@simulatte/doppler 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/BRANDING.md +14 -0
- package/LICENSE +201 -0
- package/NOTICE +5 -0
- package/README.md +85 -0
- package/SECURITY.md +19 -0
- package/package.json +144 -0
- package/src/adapters/adapter-manager.d.ts +200 -0
- package/src/adapters/adapter-manager.js +509 -0
- package/src/adapters/adapter-manifest.d.ts +290 -0
- package/src/adapters/adapter-manifest.js +320 -0
- package/src/adapters/adapter-registry.d.ts +192 -0
- package/src/adapters/adapter-registry.js +466 -0
- package/src/adapters/index.d.ts +89 -0
- package/src/adapters/index.js +42 -0
- package/src/adapters/lora-loader.d.ts +105 -0
- package/src/adapters/lora-loader.js +397 -0
- package/src/bootstrap.d.ts +1 -0
- package/src/bootstrap.js +30 -0
- package/src/bridge/extension/background.d.ts +14 -0
- package/src/bridge/extension/background.js +168 -0
- package/src/bridge/extension/manifest.json +34 -0
- package/src/bridge/extension-client.d.ts +109 -0
- package/src/bridge/extension-client.js +369 -0
- package/src/bridge/index.d.ts +68 -0
- package/src/bridge/index.js +51 -0
- package/src/bridge/protocol.d.ts +96 -0
- package/src/bridge/protocol.js +130 -0
- package/src/browser/browser-converter.d.ts +71 -0
- package/src/browser/browser-converter.js +947 -0
- package/src/browser/file-picker.d.ts +63 -0
- package/src/browser/file-picker.js +275 -0
- package/src/browser/gguf-importer.d.ts +136 -0
- package/src/browser/gguf-importer.js +532 -0
- package/src/browser/gguf-parser-browser.d.ts +14 -0
- package/src/browser/gguf-parser-browser.js +17 -0
- package/src/browser/quantization.d.ts +69 -0
- package/src/browser/quantization.js +328 -0
- package/src/browser/safetensors-parser-browser.d.ts +193 -0
- package/src/browser/safetensors-parser-browser.js +264 -0
- package/src/browser/shard-io-browser.d.ts +57 -0
- package/src/browser/shard-io-browser.js +89 -0
- package/src/browser/tensor-source-download.d.ts +27 -0
- package/src/browser/tensor-source-download.js +239 -0
- package/src/browser/tensor-source-file.d.ts +26 -0
- package/src/browser/tensor-source-file.js +53 -0
- package/src/browser/tensor-source-http.d.ts +28 -0
- package/src/browser/tensor-source-http.js +126 -0
- package/src/client/doppler-provider/generation.d.ts +25 -0
- package/src/client/doppler-provider/generation.js +114 -0
- package/src/client/doppler-provider/index.d.ts +2 -0
- package/src/client/doppler-provider/index.js +3 -0
- package/src/client/doppler-provider/model-manager.d.ts +61 -0
- package/src/client/doppler-provider/model-manager.js +667 -0
- package/src/client/doppler-provider/provider.d.ts +5 -0
- package/src/client/doppler-provider/provider.js +102 -0
- package/src/client/doppler-provider/source-runtime.d.ts +22 -0
- package/src/client/doppler-provider/source-runtime.js +522 -0
- package/src/client/doppler-provider/types.d.ts +127 -0
- package/src/client/doppler-provider/types.js +17 -0
- package/src/client/doppler-provider.d.ts +46 -0
- package/src/client/doppler-provider.js +36 -0
- package/src/config/README.md +69 -0
- package/src/config/backward-registry-loader.d.ts +3 -0
- package/src/config/backward-registry-loader.js +8 -0
- package/src/config/index.d.ts +63 -0
- package/src/config/index.js +31 -0
- package/src/config/kernel-path-loader.d.ts +149 -0
- package/src/config/kernel-path-loader.js +534 -0
- package/src/config/kernels/backward-registry.json +99 -0
- package/src/config/kernels/kernel-ref-digests.d.ts +1 -0
- package/src/config/kernels/kernel-ref-digests.js +214 -0
- package/src/config/kernels/kernel-ref.d.ts +17 -0
- package/src/config/kernels/kernel-ref.js +75 -0
- package/src/config/kernels/moe/gpt-oss.paths.json +49 -0
- package/src/config/kernels/registry.d.ts +86 -0
- package/src/config/kernels/registry.js +103 -0
- package/src/config/kernels/registry.json +6771 -0
- package/src/config/loader.d.ts +57 -0
- package/src/config/loader.js +513 -0
- package/src/config/merge.d.ts +142 -0
- package/src/config/merge.js +389 -0
- package/src/config/param-categories.d.ts +17 -0
- package/src/config/param-categories.js +72 -0
- package/src/config/param-validator.d.ts +26 -0
- package/src/config/param-validator.js +235 -0
- package/src/config/platforms/amd-rdna3.json +16 -0
- package/src/config/platforms/apple-m1.json +16 -0
- package/src/config/platforms/apple-m2.json +16 -0
- package/src/config/platforms/apple-m3.json +16 -0
- package/src/config/platforms/generic.json +14 -0
- package/src/config/platforms/loader.d.ts +65 -0
- package/src/config/platforms/loader.js +153 -0
- package/src/config/platforms/nvidia-rtx30.json +16 -0
- package/src/config/platforms/nvidia-rtx40.json +16 -0
- package/src/config/presets/kernel-paths/embeddinggemma-f16-f32a.json +60 -0
- package/src/config/presets/kernel-paths/embeddinggemma-f32-f32a.json +60 -0
- package/src/config/presets/kernel-paths/embeddinggemma-q4k-dequant-f32a.json +60 -0
- package/src/config/presets/kernel-paths/gemma2-f16-f16a.json +61 -0
- package/src/config/presets/kernel-paths/gemma2-f16-f32a.json +60 -0
- package/src/config/presets/kernel-paths/gemma2-q4k-dequant-f16a.json +61 -0
- package/src/config/presets/kernel-paths/gemma2-q4k-dequant-f32a.json +60 -0
- package/src/config/presets/kernel-paths/gemma2-q4k-fused-f32a.json +57 -0
- package/src/config/presets/kernel-paths/gemma3-f16-fused-f16a-online.json +200 -0
- package/src/config/presets/kernel-paths/gemma3-f16-fused-f32a-online.json +223 -0
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f16a-online.json +60 -0
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32a-online.json +61 -0
- package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32a.json +61 -0
- package/src/config/presets/kernel-paths/lfm2-q4k-dequant-f32a-online.json +61 -0
- package/src/config/presets/kernel-paths/registry.json +103 -0
- package/src/config/presets/models/deepseek.json +20 -0
- package/src/config/presets/models/diffusion.json +10 -0
- package/src/config/presets/models/embeddinggemma.json +74 -0
- package/src/config/presets/models/functiongemma.json +31 -0
- package/src/config/presets/models/gemma2.json +59 -0
- package/src/config/presets/models/gemma3.json +75 -0
- package/src/config/presets/models/gpt-oss.json +68 -0
- package/src/config/presets/models/kimi-k2.json +25 -0
- package/src/config/presets/models/lfm2.json +83 -0
- package/src/config/presets/models/llama3.json +40 -0
- package/src/config/presets/models/mamba.json +34 -0
- package/src/config/presets/models/mixtral.json +37 -0
- package/src/config/presets/models/modernbert.json +32 -0
- package/src/config/presets/models/qwen3.json +41 -0
- package/src/config/presets/models/transformer.json +73 -0
- package/src/config/presets/models/translategemma.json +30 -0
- package/src/config/presets/platforms/nvidia-gb200-8gpu.json +45 -0
- package/src/config/presets/platforms/nvidia-gb200-nvl72.json +45 -0
- package/src/config/presets/platforms/nvidia-gh200-nvl2.json +44 -0
- package/src/config/presets/platforms/nvidia-gh200.json +44 -0
- package/src/config/presets/runtime/compute/f16-activations.json +30 -0
- package/src/config/presets/runtime/compute/f16-batched.json +32 -0
- package/src/config/presets/runtime/default.json +101 -0
- package/src/config/presets/runtime/diagnostics/debug-logits.json +53 -0
- package/src/config/presets/runtime/experiments/bench/gemma3-bench-q4k.json +53 -0
- package/src/config/presets/runtime/experiments/debug/gemma3-debug-q4k.json +210 -0
- package/src/config/presets/runtime/experiments/verify/gemma3-verify.json +39 -0
- package/src/config/presets/runtime/kernels/dequant-f16-q4k.json +20 -0
- package/src/config/presets/runtime/kernels/dequant-f32-q4k.json +20 -0
- package/src/config/presets/runtime/kernels/fused-q4k.json +20 -0
- package/src/config/presets/runtime/kernels/safe-q4k.json +20 -0
- package/src/config/presets/runtime/model/gemma2-debug.json +77 -0
- package/src/config/presets/runtime/model/gemma2-pipeline-debug.json +66 -0
- package/src/config/presets/runtime/model/gemma2-pipeline.json +75 -0
- package/src/config/presets/runtime/model/gemma3-layer-probe.json +85 -0
- package/src/config/presets/runtime/modes/bench.json +37 -0
- package/src/config/presets/runtime/modes/debug.json +39 -0
- package/src/config/presets/runtime/modes/default.json +10 -0
- package/src/config/presets/runtime/modes/embedding-bench.json +28 -0
- package/src/config/presets/runtime/modes/embedding.json +54 -0
- package/src/config/presets/runtime/modes/low-memory.json +40 -0
- package/src/config/presets/runtime/modes/production.json +48 -0
- package/src/config/presets/runtime/modes/simulation.json +30 -0
- package/src/config/presets/runtime/modes/trace-layers.json +126 -0
- package/src/config/presets/runtime/platform/metal-apple-q4k.json +11 -0
- package/src/config/runtime-merge.d.ts +5 -0
- package/src/config/runtime-merge.js +21 -0
- package/src/config/runtime.d.ts +28 -0
- package/src/config/runtime.js +56 -0
- package/src/config/schema/adapter.schema.d.ts +53 -0
- package/src/config/schema/adapter.schema.js +60 -0
- package/src/config/schema/backward-registry.schema.d.ts +14 -0
- package/src/config/schema/backward-registry.schema.js +46 -0
- package/src/config/schema/benchmark.schema.d.ts +54 -0
- package/src/config/schema/benchmark.schema.js +74 -0
- package/src/config/schema/bridge.schema.d.ts +25 -0
- package/src/config/schema/bridge.schema.js +22 -0
- package/src/config/schema/buffer-pool.schema.d.ts +92 -0
- package/src/config/schema/buffer-pool.schema.js +50 -0
- package/src/config/schema/conversion.schema.d.ts +183 -0
- package/src/config/schema/conversion.schema.js +13 -0
- package/src/config/schema/converter.schema.d.ts +123 -0
- package/src/config/schema/converter.schema.js +136 -0
- package/src/config/schema/debug.schema.d.ts +245 -0
- package/src/config/schema/debug.schema.js +106 -0
- package/src/config/schema/diffusion.schema.d.ts +88 -0
- package/src/config/schema/diffusion.schema.js +62 -0
- package/src/config/schema/distill-training.schema.d.ts +48 -0
- package/src/config/schema/distill-training.schema.js +139 -0
- package/src/config/schema/distribution.schema.d.ts +155 -0
- package/src/config/schema/distribution.schema.js +81 -0
- package/src/config/schema/doppler.schema.d.ts +75 -0
- package/src/config/schema/doppler.schema.js +352 -0
- package/src/config/schema/ecosystem.schema.d.ts +255 -0
- package/src/config/schema/ecosystem.schema.js +534 -0
- package/src/config/schema/emulation.schema.d.ts +351 -0
- package/src/config/schema/emulation.schema.js +299 -0
- package/src/config/schema/energy.schema.d.ts +102 -0
- package/src/config/schema/energy.schema.js +72 -0
- package/src/config/schema/execution-v0.schema.d.ts +187 -0
- package/src/config/schema/execution-v0.schema.js +55 -0
- package/src/config/schema/gpu-cache.schema.d.ts +26 -0
- package/src/config/schema/gpu-cache.schema.js +8 -0
- package/src/config/schema/harness.schema.d.ts +32 -0
- package/src/config/schema/harness.schema.js +20 -0
- package/src/config/schema/hotswap.schema.d.ts +55 -0
- package/src/config/schema/hotswap.schema.js +18 -0
- package/src/config/schema/index.d.ts +863 -0
- package/src/config/schema/index.js +471 -0
- package/src/config/schema/inference-defaults.schema.d.ts +276 -0
- package/src/config/schema/inference-defaults.schema.js +185 -0
- package/src/config/schema/inference.schema.d.ts +289 -0
- package/src/config/schema/inference.schema.js +39 -0
- package/src/config/schema/intent-bundle.schema.d.ts +28 -0
- package/src/config/schema/intent-bundle.schema.js +12 -0
- package/src/config/schema/kernel-path.schema.d.ts +173 -0
- package/src/config/schema/kernel-path.schema.js +9 -0
- package/src/config/schema/kernel-registry.schema.d.ts +199 -0
- package/src/config/schema/kernel-registry.schema.js +46 -0
- package/src/config/schema/kernel-thresholds.schema.d.ts +302 -0
- package/src/config/schema/kernel-thresholds.schema.js +187 -0
- package/src/config/schema/kernel-warmup.schema.d.ts +19 -0
- package/src/config/schema/kernel-warmup.schema.js +5 -0
- package/src/config/schema/kvcache.schema.d.ts +131 -0
- package/src/config/schema/kvcache.schema.js +31 -0
- package/src/config/schema/loading.schema.d.ts +153 -0
- package/src/config/schema/loading.schema.js +84 -0
- package/src/config/schema/lora.schema.d.ts +12 -0
- package/src/config/schema/lora.schema.js +12 -0
- package/src/config/schema/manifest.schema.d.ts +500 -0
- package/src/config/schema/manifest.schema.js +130 -0
- package/src/config/schema/memory-limits.schema.d.ts +107 -0
- package/src/config/schema/memory-limits.schema.js +57 -0
- package/src/config/schema/moe.schema.d.ts +78 -0
- package/src/config/schema/moe.schema.js +31 -0
- package/src/config/schema/platform.schema.d.ts +121 -0
- package/src/config/schema/platform.schema.js +1 -0
- package/src/config/schema/preset.schema.d.ts +124 -0
- package/src/config/schema/preset.schema.js +1 -0
- package/src/config/schema/quantization-defaults.schema.d.ts +34 -0
- package/src/config/schema/quantization-defaults.schema.js +5 -0
- package/src/config/schema/quantization.schema.d.ts +10 -0
- package/src/config/schema/quantization.schema.js +33 -0
- package/src/config/schema/shared-runtime.schema.d.ts +75 -0
- package/src/config/schema/shared-runtime.schema.js +45 -0
- package/src/config/schema/speculative.schema.d.ts +21 -0
- package/src/config/schema/speculative.schema.js +11 -0
- package/src/config/schema/storage.schema.d.ts +123 -0
- package/src/config/schema/storage.schema.js +66 -0
- package/src/config/schema/tooling.schema.d.ts +29 -0
- package/src/config/schema/tooling.schema.js +12 -0
- package/src/config/schema/training-metrics.schema.d.ts +89 -0
- package/src/config/schema/training-metrics.schema.js +374 -0
- package/src/config/schema/training.schema.d.ts +88 -0
- package/src/config/schema/training.schema.js +106 -0
- package/src/config/schema/tuner.schema.d.ts +39 -0
- package/src/config/schema/tuner.schema.js +13 -0
- package/src/config/schema/ul-training.schema.d.ts +61 -0
- package/src/config/schema/ul-training.schema.js +140 -0
- package/src/config/schema/units.schema.d.ts +27 -0
- package/src/config/schema/units.schema.js +26 -0
- package/src/config/training-defaults.d.ts +24 -0
- package/src/config/training-defaults.js +91 -0
- package/src/converter/conversion-plan.d.ts +64 -0
- package/src/converter/conversion-plan.js +472 -0
- package/src/converter/core.d.ts +247 -0
- package/src/converter/core.js +1329 -0
- package/src/converter/execution-v0-manifest.d.ts +15 -0
- package/src/converter/execution-v0-manifest.js +146 -0
- package/src/converter/index.d.ts +98 -0
- package/src/converter/index.js +59 -0
- package/src/converter/manifest-inference.d.ts +20 -0
- package/src/converter/manifest-inference.js +492 -0
- package/src/converter/parsers/diffusion.d.ts +50 -0
- package/src/converter/parsers/diffusion.js +270 -0
- package/src/converter/parsers/gguf.d.ts +22 -0
- package/src/converter/parsers/gguf.js +46 -0
- package/src/converter/parsers/index.d.ts +21 -0
- package/src/converter/parsers/index.js +12 -0
- package/src/converter/parsers/transformer.d.ts +16 -0
- package/src/converter/parsers/transformer.js +25 -0
- package/src/converter/quantization-info.d.ts +37 -0
- package/src/converter/quantization-info.js +398 -0
- package/src/converter/quantizer.d.ts +96 -0
- package/src/converter/quantizer.js +422 -0
- package/src/converter/rope-config.d.ts +15 -0
- package/src/converter/rope-config.js +218 -0
- package/src/converter/shard-packer.d.ts +138 -0
- package/src/converter/shard-packer.js +422 -0
- package/src/converter/tokenizer-utils.d.ts +11 -0
- package/src/converter/tokenizer-utils.js +87 -0
- package/src/debug/config.d.ts +78 -0
- package/src/debug/config.js +235 -0
- package/src/debug/history.d.ts +65 -0
- package/src/debug/history.js +71 -0
- package/src/debug/index.d.ts +268 -0
- package/src/debug/index.js +192 -0
- package/src/debug/log.d.ts +46 -0
- package/src/debug/log.js +132 -0
- package/src/debug/perf.d.ts +33 -0
- package/src/debug/perf.js +51 -0
- package/src/debug/reference/README.md +114 -0
- package/src/debug/reference/hf_attn_debug.py +114 -0
- package/src/debug/reference/hf_embed_check.py +89 -0
- package/src/debug/reference/hf_layer_out.py +100 -0
- package/src/debug/reference/hf_rope_check.py +116 -0
- package/src/debug/reference/hf_weights.py +75 -0
- package/src/debug/signals.d.ts +63 -0
- package/src/debug/signals.js +33 -0
- package/src/debug/stats.d.ts +47 -0
- package/src/debug/stats.js +160 -0
- package/src/debug/tensor.d.ts +123 -0
- package/src/debug/tensor.js +257 -0
- package/src/debug/trace.d.ts +17 -0
- package/src/debug/trace.js +167 -0
- package/src/diffusion/image-regression.d.ts +31 -0
- package/src/diffusion/image-regression.js +107 -0
- package/src/diffusion/index.d.ts +8 -0
- package/src/diffusion/index.js +8 -0
- package/src/distribution/p2p-control-plane.d.ts +52 -0
- package/src/distribution/p2p-control-plane.js +232 -0
- package/src/distribution/p2p-observability.d.ts +116 -0
- package/src/distribution/p2p-observability.js +267 -0
- package/src/distribution/p2p-transport-contract.d.ts +57 -0
- package/src/distribution/p2p-transport-contract.js +310 -0
- package/src/distribution/p2p-webrtc-browser.d.ts +37 -0
- package/src/distribution/p2p-webrtc-browser.js +434 -0
- package/src/distribution/shard-delivery.d.ts +251 -0
- package/src/distribution/shard-delivery.js +2096 -0
- package/src/energy/index.d.ts +2 -0
- package/src/energy/index.js +2 -0
- package/src/errors/doppler-error.d.ts +21 -0
- package/src/errors/doppler-error.js +25 -0
- package/src/errors/index.d.ts +1 -0
- package/src/errors/index.js +1 -0
- package/src/formats/gguf/index.d.ts +8 -0
- package/src/formats/gguf/index.js +4 -0
- package/src/formats/gguf/types.d.ts +137 -0
- package/src/formats/gguf/types.js +443 -0
- package/src/formats/index.d.ts +51 -0
- package/src/formats/index.js +13 -0
- package/src/formats/rdrr/classification.d.ts +39 -0
- package/src/formats/rdrr/classification.js +275 -0
- package/src/formats/rdrr/groups.d.ts +27 -0
- package/src/formats/rdrr/groups.js +76 -0
- package/src/formats/rdrr/index.d.ts +25 -0
- package/src/formats/rdrr/index.js +19 -0
- package/src/formats/rdrr/manifest.d.ts +32 -0
- package/src/formats/rdrr/manifest.js +108 -0
- package/src/formats/rdrr/parsing.d.ts +23 -0
- package/src/formats/rdrr/parsing.js +101 -0
- package/src/formats/rdrr/tensor-config-validator.d.ts +42 -0
- package/src/formats/rdrr/tensor-config-validator.js +156 -0
- package/src/formats/rdrr/types.d.ts +200 -0
- package/src/formats/rdrr/types.js +16 -0
- package/src/formats/rdrr/validation.d.ts +9 -0
- package/src/formats/rdrr/validation.js +200 -0
- package/src/formats/safetensors/index.d.ts +8 -0
- package/src/formats/safetensors/index.js +4 -0
- package/src/formats/safetensors/types.d.ts +67 -0
- package/src/formats/safetensors/types.js +102 -0
- package/src/formats/tokenizer/index.d.ts +5 -0
- package/src/formats/tokenizer/index.js +3 -0
- package/src/formats/tokenizer/types.d.ts +9 -0
- package/src/formats/tokenizer/types.js +22 -0
- package/src/generation/index.d.ts +18 -0
- package/src/generation/index.js +12 -0
- package/src/gpu/command-recorder.d.ts +175 -0
- package/src/gpu/command-recorder.js +473 -0
- package/src/gpu/device.d.ts +141 -0
- package/src/gpu/device.js +350 -0
- package/src/gpu/kernel-runtime.d.ts +20 -0
- package/src/gpu/kernel-runtime.js +37 -0
- package/src/gpu/kernel-selection-cache.d.ts +13 -0
- package/src/gpu/kernel-selection-cache.js +13 -0
- package/src/gpu/kernel-selection-log.d.ts +12 -0
- package/src/gpu/kernel-selection-log.js +28 -0
- package/src/gpu/kernel-selector.d.ts +11 -0
- package/src/gpu/kernel-selector.js +10 -0
- package/src/gpu/kernel-tuner/benchmarks.d.ts +144 -0
- package/src/gpu/kernel-tuner/benchmarks.js +892 -0
- package/src/gpu/kernel-tuner/cache.d.ts +55 -0
- package/src/gpu/kernel-tuner/cache.js +66 -0
- package/src/gpu/kernel-tuner/index.d.ts +59 -0
- package/src/gpu/kernel-tuner/index.js +38 -0
- package/src/gpu/kernel-tuner/tuner.d.ts +82 -0
- package/src/gpu/kernel-tuner/tuner.js +229 -0
- package/src/gpu/kernel-tuner/types.d.ts +101 -0
- package/src/gpu/kernel-tuner/types.js +4 -0
- package/src/gpu/kernel-tuner.d.ts +33 -0
- package/src/gpu/kernel-tuner.js +12 -0
- package/src/gpu/kernels/README.md +127 -0
- package/src/gpu/kernels/attention.d.ts +236 -0
- package/src/gpu/kernels/attention.js +1359 -0
- package/src/gpu/kernels/attention.wgsl +249 -0
- package/src/gpu/kernels/attention_bdpa_decode_f16.wgsl +246 -0
- package/src/gpu/kernels/attention_decode.wgsl +233 -0
- package/src/gpu/kernels/attention_decode_chunked_f16.wgsl +183 -0
- package/src/gpu/kernels/attention_decode_chunked_f16kv.wgsl +208 -0
- package/src/gpu/kernels/attention_decode_f16.wgsl +202 -0
- package/src/gpu/kernels/attention_decode_f16kv.wgsl +224 -0
- package/src/gpu/kernels/attention_decode_online_f16.wgsl +223 -0
- package/src/gpu/kernels/attention_decode_online_f16kv.wgsl +225 -0
- package/src/gpu/kernels/attention_decode_optimized.wgsl +445 -0
- package/src/gpu/kernels/attention_decode_paged_f16.wgsl +172 -0
- package/src/gpu/kernels/attention_decode_paged_f16kv.wgsl +174 -0
- package/src/gpu/kernels/attention_decode_subgroup.wgsl +233 -0
- package/src/gpu/kernels/attention_decode_tiered_f16.wgsl +218 -0
- package/src/gpu/kernels/attention_decode_tiered_f16kv.wgsl +220 -0
- package/src/gpu/kernels/attention_decode_tiered_int4_f16kv.wgsl +242 -0
- package/src/gpu/kernels/attention_decode_tiered_int8_f16kv.wgsl +242 -0
- package/src/gpu/kernels/attention_f16.wgsl +214 -0
- package/src/gpu/kernels/attention_f16kv.wgsl +242 -0
- package/src/gpu/kernels/attention_small.wgsl +260 -0
- package/src/gpu/kernels/attention_small_f16.wgsl +240 -0
- package/src/gpu/kernels/attention_small_f16kv.wgsl +266 -0
- package/src/gpu/kernels/attention_streaming.wgsl +149 -0
- package/src/gpu/kernels/attention_streaming_f16.wgsl +147 -0
- package/src/gpu/kernels/attention_streaming_f16kv.wgsl +151 -0
- package/src/gpu/kernels/backward/adam.d.ts +28 -0
- package/src/gpu/kernels/backward/adam.js +199 -0
- package/src/gpu/kernels/backward/adam.wgsl +50 -0
- package/src/gpu/kernels/backward/attention_backward.d.ts +22 -0
- package/src/gpu/kernels/backward/attention_backward.js +276 -0
- package/src/gpu/kernels/backward/attention_backward.wgsl +49 -0
- package/src/gpu/kernels/backward/bias_add_backward.d.ts +17 -0
- package/src/gpu/kernels/backward/bias_add_backward.js +24 -0
- package/src/gpu/kernels/backward/bias_add_backward.wgsl +33 -0
- package/src/gpu/kernels/backward/conv2d_backward.d.ts +31 -0
- package/src/gpu/kernels/backward/conv2d_backward.js +135 -0
- package/src/gpu/kernels/backward/conv2d_backward_input.wgsl +83 -0
- package/src/gpu/kernels/backward/conv2d_backward_weight.wgsl +70 -0
- package/src/gpu/kernels/backward/cross_entropy_backward.d.ts +23 -0
- package/src/gpu/kernels/backward/cross_entropy_backward.js +29 -0
- package/src/gpu/kernels/backward/cross_entropy_backward.wgsl +39 -0
- package/src/gpu/kernels/backward/embed_backward.d.ts +29 -0
- package/src/gpu/kernels/backward/embed_backward.js +118 -0
- package/src/gpu/kernels/backward/embed_backward.wgsl +73 -0
- package/src/gpu/kernels/backward/gelu_backward.d.ts +16 -0
- package/src/gpu/kernels/backward/gelu_backward.js +39 -0
- package/src/gpu/kernels/backward/gelu_backward.wgsl +38 -0
- package/src/gpu/kernels/backward/groupnorm_backward.d.ts +24 -0
- package/src/gpu/kernels/backward/groupnorm_backward.js +29 -0
- package/src/gpu/kernels/backward/groupnorm_backward.wgsl +143 -0
- package/src/gpu/kernels/backward/index.d.ts +17 -0
- package/src/gpu/kernels/backward/index.js +23 -0
- package/src/gpu/kernels/backward/layernorm_backward.d.ts +22 -0
- package/src/gpu/kernels/backward/layernorm_backward.js +135 -0
- package/src/gpu/kernels/backward/layernorm_backward.wgsl +194 -0
- package/src/gpu/kernels/backward/matmul_backward.d.ts +32 -0
- package/src/gpu/kernels/backward/matmul_backward.js +124 -0
- package/src/gpu/kernels/backward/matmul_backward.wgsl +90 -0
- package/src/gpu/kernels/backward/matmul_transpose_a.wgsl +84 -0
- package/src/gpu/kernels/backward/pixel_shuffle_backward.d.ts +22 -0
- package/src/gpu/kernels/backward/pixel_shuffle_backward.js +30 -0
- package/src/gpu/kernels/backward/pixel_shuffle_backward.wgsl +54 -0
- package/src/gpu/kernels/backward/rmsnorm_backward.d.ts +24 -0
- package/src/gpu/kernels/backward/rmsnorm_backward.js +101 -0
- package/src/gpu/kernels/backward/rmsnorm_backward.wgsl +78 -0
- package/src/gpu/kernels/backward/rope_backward.d.ts +25 -0
- package/src/gpu/kernels/backward/rope_backward.js +109 -0
- package/src/gpu/kernels/backward/rope_backward.wgsl +59 -0
- package/src/gpu/kernels/backward/scale_backward.d.ts +16 -0
- package/src/gpu/kernels/backward/scale_backward.js +84 -0
- package/src/gpu/kernels/backward/scale_backward.wgsl +27 -0
- package/src/gpu/kernels/backward/silu_backward.d.ts +16 -0
- package/src/gpu/kernels/backward/silu_backward.js +39 -0
- package/src/gpu/kernels/backward/silu_backward.wgsl +31 -0
- package/src/gpu/kernels/backward/softmax_backward.d.ts +16 -0
- package/src/gpu/kernels/backward/softmax_backward.js +43 -0
- package/src/gpu/kernels/backward/softmax_backward.wgsl +44 -0
- package/src/gpu/kernels/backward/upsample2d_backward.d.ts +21 -0
- package/src/gpu/kernels/backward/upsample2d_backward.js +30 -0
- package/src/gpu/kernels/backward/upsample2d_backward.wgsl +59 -0
- package/src/gpu/kernels/backward/utils.d.ts +45 -0
- package/src/gpu/kernels/backward/utils.js +371 -0
- package/src/gpu/kernels/bf16_to_f16.wgsl +54 -0
- package/src/gpu/kernels/bf16_to_f32.wgsl +70 -0
- package/src/gpu/kernels/bias_add.wgsl +40 -0
- package/src/gpu/kernels/bias_add_f16.wgsl +44 -0
- package/src/gpu/kernels/cast.d.ts +67 -0
- package/src/gpu/kernels/cast.js +422 -0
- package/src/gpu/kernels/cast_f16_to_f32.wgsl +31 -0
- package/src/gpu/kernels/cast_f32_to_f16.wgsl +36 -0
- package/src/gpu/kernels/check-finiteness.d.ts +15 -0
- package/src/gpu/kernels/check-finiteness.js +149 -0
- package/src/gpu/kernels/check-stop.d.ts +31 -0
- package/src/gpu/kernels/check-stop.js +181 -0
- package/src/gpu/kernels/clamp.d.ts +22 -0
- package/src/gpu/kernels/clamp.js +42 -0
- package/src/gpu/kernels/clamp.wgsl +24 -0
- package/src/gpu/kernels/constants.d.ts +168 -0
- package/src/gpu/kernels/constants.js +129 -0
- package/src/gpu/kernels/conv2d.d.ts +34 -0
- package/src/gpu/kernels/conv2d.js +81 -0
- package/src/gpu/kernels/conv2d.wgsl +71 -0
- package/src/gpu/kernels/conv2d_f16.wgsl +73 -0
- package/src/gpu/kernels/cross_entropy_loss.d.ts +21 -0
- package/src/gpu/kernels/cross_entropy_loss.js +54 -0
- package/src/gpu/kernels/cross_entropy_loss.wgsl +39 -0
- package/src/gpu/kernels/dequant.d.ts +108 -0
- package/src/gpu/kernels/dequant.js +524 -0
- package/src/gpu/kernels/dequant_f16_out.wgsl +151 -0
- package/src/gpu/kernels/dequant_f16_out_vec4.wgsl +149 -0
- package/src/gpu/kernels/dequant_f16_rowwise.wgsl +139 -0
- package/src/gpu/kernels/dequant_f32_rowwise.wgsl +133 -0
- package/src/gpu/kernels/dequant_mxfp4.wgsl +120 -0
- package/src/gpu/kernels/dequant_mxfp4_expert.wgsl +129 -0
- package/src/gpu/kernels/dequant_mxfp4_expert_f16.wgsl +105 -0
- package/src/gpu/kernels/dequant_mxfp4_vec4.wgsl +116 -0
- package/src/gpu/kernels/dequant_q6k.wgsl +140 -0
- package/src/gpu/kernels/dequant_q8_0.wgsl +98 -0
- package/src/gpu/kernels/dequant_shared.wgsl +202 -0
- package/src/gpu/kernels/dequant_shared_vec4.wgsl +153 -0
- package/src/gpu/kernels/dequant_subgroup.wgsl +202 -0
- package/src/gpu/kernels/dispatch.d.ts +157 -0
- package/src/gpu/kernels/dispatch.js +235 -0
- package/src/gpu/kernels/energy.d.ts +131 -0
- package/src/gpu/kernels/energy.js +425 -0
- package/src/gpu/kernels/energy_eval.wgsl +26 -0
- package/src/gpu/kernels/energy_eval_f16.wgsl +30 -0
- package/src/gpu/kernels/energy_quintel_grad.wgsl +92 -0
- package/src/gpu/kernels/energy_quintel_grad_f16.wgsl +96 -0
- package/src/gpu/kernels/energy_quintel_reduce.wgsl +112 -0
- package/src/gpu/kernels/energy_quintel_reduce_f16.wgsl +116 -0
- package/src/gpu/kernels/energy_quintel_update.wgsl +92 -0
- package/src/gpu/kernels/energy_quintel_update_f16.wgsl +96 -0
- package/src/gpu/kernels/energy_update.wgsl +25 -0
- package/src/gpu/kernels/energy_update_f16.wgsl +30 -0
- package/src/gpu/kernels/feature-check.d.ts +42 -0
- package/src/gpu/kernels/feature-check.js +70 -0
- package/src/gpu/kernels/fused_ffn.d.ts +65 -0
- package/src/gpu/kernels/fused_ffn.js +318 -0
- package/src/gpu/kernels/fused_ffn.wgsl +420 -0
- package/src/gpu/kernels/fused_ffn_f16.wgsl +213 -0
- package/src/gpu/kernels/fused_ffn_q4k.wgsl +375 -0
- package/src/gpu/kernels/fused_matmul_q4.wgsl +404 -0
- package/src/gpu/kernels/fused_matmul_q4_batched.wgsl +194 -0
- package/src/gpu/kernels/fused_matmul_q4_batched_f16.wgsl +170 -0
- package/src/gpu/kernels/fused_matmul_q4_batched_f16a.wgsl +154 -0
- package/src/gpu/kernels/fused_matmul_q4_f16a.wgsl +219 -0
- package/src/gpu/kernels/fused_matmul_q4_multicol_f16.wgsl +216 -0
- package/src/gpu/kernels/fused_matmul_q4_multicol_f16a.wgsl +204 -0
- package/src/gpu/kernels/fused_matmul_residual.d.ts +46 -0
- package/src/gpu/kernels/fused_matmul_residual.js +152 -0
- package/src/gpu/kernels/fused_matmul_rmsnorm.d.ts +64 -0
- package/src/gpu/kernels/fused_matmul_rmsnorm.js +273 -0
- package/src/gpu/kernels/fused_matmul_rmsnorm.wgsl +324 -0
- package/src/gpu/kernels/fused_matmul_rmsnorm_f16.wgsl +303 -0
- package/src/gpu/kernels/fused_swiglu.wgsl +63 -0
- package/src/gpu/kernels/fused_swiglu_f16.wgsl +57 -0
- package/src/gpu/kernels/gather.d.ts +64 -0
- package/src/gpu/kernels/gather.js +119 -0
- package/src/gpu/kernels/gather.wgsl +61 -0
- package/src/gpu/kernels/gather_f16.wgsl +65 -0
- package/src/gpu/kernels/gather_f16_f16_out.wgsl +55 -0
- package/src/gpu/kernels/gather_f16_out.wgsl +55 -0
- package/src/gpu/kernels/gather_f16_vec4.wgsl +76 -0
- package/src/gpu/kernels/gather_f16_vec4_f16_out.wgsl +68 -0
- package/src/gpu/kernels/gather_vec4.wgsl +74 -0
- package/src/gpu/kernels/gather_vec4_f16_out.wgsl +68 -0
- package/src/gpu/kernels/gelu.d.ts +33 -0
- package/src/gpu/kernels/gelu.js +47 -0
- package/src/gpu/kernels/gelu.wgsl +64 -0
- package/src/gpu/kernels/gelu_f16.wgsl +66 -0
- package/src/gpu/kernels/gptoss_mxfp4_expert_fused.wgsl +127 -0
- package/src/gpu/kernels/gptoss_router_topk.wgsl +119 -0
- package/src/gpu/kernels/groupnorm.d.ts +31 -0
- package/src/gpu/kernels/groupnorm.js +91 -0
- package/src/gpu/kernels/groupnorm_apply.wgsl +41 -0
- package/src/gpu/kernels/groupnorm_apply_f16.wgsl +46 -0
- package/src/gpu/kernels/groupnorm_stats.wgsl +76 -0
- package/src/gpu/kernels/groupnorm_stats_f16.wgsl +79 -0
- package/src/gpu/kernels/index.d.ts +336 -0
- package/src/gpu/kernels/index.js +284 -0
- package/src/gpu/kernels/kernel-base.d.ts +33 -0
- package/src/gpu/kernels/kernel-base.js +46 -0
- package/src/gpu/kernels/kernel-configs.d.ts +65 -0
- package/src/gpu/kernels/kernel-configs.js +50 -0
- package/src/gpu/kernels/kernel-tuning.d.ts +42 -0
- package/src/gpu/kernels/kernel-tuning.js +149 -0
- package/src/gpu/kernels/kv-quantize.d.ts +37 -0
- package/src/gpu/kernels/kv-quantize.js +138 -0
- package/src/gpu/kernels/kv_quantize_int4.wgsl +119 -0
- package/src/gpu/kernels/kv_quantize_int8.wgsl +119 -0
- package/src/gpu/kernels/layernorm.d.ts +37 -0
- package/src/gpu/kernels/layernorm.js +80 -0
- package/src/gpu/kernels/layernorm.wgsl +121 -0
- package/src/gpu/kernels/layernorm_f16.wgsl +103 -0
- package/src/gpu/kernels/linear-attention-core.d.ts +39 -0
- package/src/gpu/kernels/linear-attention-core.js +535 -0
- package/src/gpu/kernels/logit-merge.d.ts +110 -0
- package/src/gpu/kernels/logit-merge.js +392 -0
- package/src/gpu/kernels/matmul-dispatch.d.ts +38 -0
- package/src/gpu/kernels/matmul-dispatch.js +155 -0
- package/src/gpu/kernels/matmul-selection.d.ts +87 -0
- package/src/gpu/kernels/matmul-selection.js +474 -0
- package/src/gpu/kernels/matmul.d.ts +109 -0
- package/src/gpu/kernels/matmul.js +271 -0
- package/src/gpu/kernels/matmul_f16.wgsl +170 -0
- package/src/gpu/kernels/matmul_f16_tiled.wgsl +165 -0
- package/src/gpu/kernels/matmul_f16w_f32a.wgsl +89 -0
- package/src/gpu/kernels/matmul_f16w_f32a_tiled.wgsl +154 -0
- package/src/gpu/kernels/matmul_f32.wgsl +100 -0
- package/src/gpu/kernels/matmul_gemv.wgsl +80 -0
- package/src/gpu/kernels/matmul_gemv_f16a.wgsl +81 -0
- package/src/gpu/kernels/matmul_gemv_residual.wgsl +119 -0
- package/src/gpu/kernels/matmul_gemv_residual_f16.wgsl +78 -0
- package/src/gpu/kernels/matmul_gemv_subgroup.wgsl +345 -0
- package/src/gpu/kernels/matmul_gemv_subgroup_f16a.wgsl +514 -0
- package/src/gpu/kernels/modulate.d.ts +29 -0
- package/src/gpu/kernels/modulate.js +49 -0
- package/src/gpu/kernels/modulate.wgsl +40 -0
- package/src/gpu/kernels/modulate_f16.wgsl +43 -0
- package/src/gpu/kernels/moe.d.ts +164 -0
- package/src/gpu/kernels/moe.js +496 -0
- package/src/gpu/kernels/moe_gather.wgsl +170 -0
- package/src/gpu/kernels/moe_gather_f16.wgsl +82 -0
- package/src/gpu/kernels/moe_gather_vec4.wgsl +74 -0
- package/src/gpu/kernels/moe_offsets.wgsl +48 -0
- package/src/gpu/kernels/pipeline-cache.d.ts +88 -0
- package/src/gpu/kernels/pipeline-cache.js +305 -0
- package/src/gpu/kernels/pixel_shuffle.d.ts +27 -0
- package/src/gpu/kernels/pixel_shuffle.js +49 -0
- package/src/gpu/kernels/pixel_shuffle.wgsl +44 -0
- package/src/gpu/kernels/pixel_shuffle_f16.wgsl +47 -0
- package/src/gpu/kernels/residual.d.ts +74 -0
- package/src/gpu/kernels/residual.js +127 -0
- package/src/gpu/kernels/residual.wgsl +53 -0
- package/src/gpu/kernels/residual_f16.wgsl +35 -0
- package/src/gpu/kernels/residual_f16_vec4.wgsl +47 -0
- package/src/gpu/kernels/residual_vec4.wgsl +46 -0
- package/src/gpu/kernels/rmsnorm.d.ts +53 -0
- package/src/gpu/kernels/rmsnorm.js +140 -0
- package/src/gpu/kernels/rmsnorm.wgsl +417 -0
- package/src/gpu/kernels/rmsnorm_f16.wgsl +164 -0
- package/src/gpu/kernels/rope.d.ts +48 -0
- package/src/gpu/kernels/rope.js +53 -0
- package/src/gpu/kernels/rope.wgsl +328 -0
- package/src/gpu/kernels/rope_f16.wgsl +271 -0
- package/src/gpu/kernels/rule-matcher.d.ts +30 -0
- package/src/gpu/kernels/rule-matcher.js +42 -0
- package/src/gpu/kernels/rule-registry.d.ts +7 -0
- package/src/gpu/kernels/rule-registry.js +41 -0
- package/src/gpu/kernels/sample.d.ts +75 -0
- package/src/gpu/kernels/sample.js +578 -0
- package/src/gpu/kernels/sample.wgsl +377 -0
- package/src/gpu/kernels/sample_f16.wgsl +331 -0
- package/src/gpu/kernels/scale.d.ts +35 -0
- package/src/gpu/kernels/scale.js +37 -0
- package/src/gpu/kernels/scale.wgsl +38 -0
- package/src/gpu/kernels/scatter_add.wgsl +88 -0
- package/src/gpu/kernels/scatter_add_dynamic.wgsl +59 -0
- package/src/gpu/kernels/scatter_add_dynamic_f16.wgsl +52 -0
- package/src/gpu/kernels/scatter_add_dynamic_f16_weights.wgsl +50 -0
- package/src/gpu/kernels/scatter_add_vec4.wgsl +70 -0
- package/src/gpu/kernels/shader-cache.d.ts +56 -0
- package/src/gpu/kernels/shader-cache.js +206 -0
- package/src/gpu/kernels/silu.d.ts +75 -0
- package/src/gpu/kernels/silu.js +340 -0
- package/src/gpu/kernels/silu.wgsl +99 -0
- package/src/gpu/kernels/silu_f16.wgsl +98 -0
- package/src/gpu/kernels/softmax.d.ts +57 -0
- package/src/gpu/kernels/softmax.js +106 -0
- package/src/gpu/kernels/softmax.wgsl +388 -0
- package/src/gpu/kernels/softmax_subgroup.wgsl +175 -0
- package/src/gpu/kernels/split_qkv.d.ts +51 -0
- package/src/gpu/kernels/split_qkv.js +41 -0
- package/src/gpu/kernels/split_qkv.wgsl +71 -0
- package/src/gpu/kernels/split_qkv_f16.wgsl +75 -0
- package/src/gpu/kernels/topk.wgsl +243 -0
- package/src/gpu/kernels/topk_f16.wgsl +108 -0
- package/src/gpu/kernels/topk_f16_weights.wgsl +101 -0
- package/src/gpu/kernels/transpose.d.ts +21 -0
- package/src/gpu/kernels/transpose.js +30 -0
- package/src/gpu/kernels/transpose.wgsl +32 -0
- package/src/gpu/kernels/types.d.ts +21 -0
- package/src/gpu/kernels/types.js +4 -0
- package/src/gpu/kernels/uniform-utils.d.ts +48 -0
- package/src/gpu/kernels/uniform-utils.js +94 -0
- package/src/gpu/kernels/upsample2d.d.ts +25 -0
- package/src/gpu/kernels/upsample2d.js +58 -0
- package/src/gpu/kernels/upsample2d.wgsl +37 -0
- package/src/gpu/kernels/upsample2d_f16.wgsl +41 -0
- package/src/gpu/kernels/utils.d.ts +106 -0
- package/src/gpu/kernels/utils.js +224 -0
- package/src/gpu/multi-model-recorder.d.ts +21 -0
- package/src/gpu/multi-model-recorder.js +31 -0
- package/src/gpu/partitioned-buffer-pool.d.ts +28 -0
- package/src/gpu/partitioned-buffer-pool.js +49 -0
- package/src/gpu/perf-guards.d.ts +25 -0
- package/src/gpu/perf-guards.js +140 -0
- package/src/gpu/profiler.d.ts +114 -0
- package/src/gpu/profiler.js +391 -0
- package/src/gpu/submit-tracker.d.ts +111 -0
- package/src/gpu/submit-tracker.js +229 -0
- package/src/gpu/tensor.d.ts +69 -0
- package/src/gpu/tensor.js +75 -0
- package/src/gpu/uniform-cache.d.ts +108 -0
- package/src/gpu/uniform-cache.js +242 -0
- package/src/gpu/weight-buffer.d.ts +115 -0
- package/src/gpu/weight-buffer.js +118 -0
- package/src/hotswap/intent-bundle.d.ts +37 -0
- package/src/hotswap/intent-bundle.js +123 -0
- package/src/hotswap/manifest.d.ts +33 -0
- package/src/hotswap/manifest.js +114 -0
- package/src/hotswap/runtime.d.ts +31 -0
- package/src/hotswap/runtime.js +128 -0
- package/src/index-browser.d.ts +47 -0
- package/src/index-browser.js +53 -0
- package/src/index-internal.d.ts +2 -0
- package/src/index-internal.js +2 -0
- package/src/index.d.ts +102 -0
- package/src/index.js +75 -0
- package/src/inference/README.md +593 -0
- package/src/inference/browser-harness.d.ts +234 -0
- package/src/inference/browser-harness.js +2665 -0
- package/src/inference/decode-buffers.d.ts +108 -0
- package/src/inference/decode-buffers.js +181 -0
- package/src/inference/decode-ring.d.ts +52 -0
- package/src/inference/decode-ring.js +273 -0
- package/src/inference/expert-router.d.ts +27 -0
- package/src/inference/expert-router.js +55 -0
- package/src/inference/functiongemma.d.ts +15 -0
- package/src/inference/functiongemma.js +1 -0
- package/src/inference/kv-cache/base.d.ts +150 -0
- package/src/inference/kv-cache/base.js +1037 -0
- package/src/inference/kv-cache/basis-decomposed-paged.d.ts +50 -0
- package/src/inference/kv-cache/basis-decomposed-paged.js +276 -0
- package/src/inference/kv-cache/index.d.ts +35 -0
- package/src/inference/kv-cache/index.js +20 -0
- package/src/inference/kv-cache/sliding-window.d.ts +72 -0
- package/src/inference/kv-cache/sliding-window.js +243 -0
- package/src/inference/kv-cache/tiered.d.ts +89 -0
- package/src/inference/kv-cache/tiered.js +574 -0
- package/src/inference/kv-cache/types.d.ts +188 -0
- package/src/inference/kv-cache/types.js +80 -0
- package/src/inference/kv-cache.d.ts +36 -0
- package/src/inference/kv-cache.js +18 -0
- package/src/inference/moe-router.d.ts +212 -0
- package/src/inference/moe-router.js +553 -0
- package/src/inference/multi-model-network.d.ts +139 -0
- package/src/inference/multi-model-network.js +769 -0
- package/src/inference/multi-pipeline-pool.d.ts +62 -0
- package/src/inference/multi-pipeline-pool.js +161 -0
- package/src/inference/network-evolution.d.ts +46 -0
- package/src/inference/network-evolution.js +80 -0
- package/src/inference/pipelines/context.d.ts +18 -0
- package/src/inference/pipelines/context.js +44 -0
- package/src/inference/pipelines/diffusion/helpers.d.ts +29 -0
- package/src/inference/pipelines/diffusion/helpers.js +112 -0
- package/src/inference/pipelines/diffusion/index.d.ts +3 -0
- package/src/inference/pipelines/diffusion/index.js +3 -0
- package/src/inference/pipelines/diffusion/init.d.ts +24 -0
- package/src/inference/pipelines/diffusion/init.js +124 -0
- package/src/inference/pipelines/diffusion/pipeline.d.ts +38 -0
- package/src/inference/pipelines/diffusion/pipeline.js +632 -0
- package/src/inference/pipelines/diffusion/scheduler.d.ts +19 -0
- package/src/inference/pipelines/diffusion/scheduler.js +65 -0
- package/src/inference/pipelines/diffusion/sd3-transformer.d.ts +20 -0
- package/src/inference/pipelines/diffusion/sd3-transformer.js +1194 -0
- package/src/inference/pipelines/diffusion/sd3-weights.d.ts +21 -0
- package/src/inference/pipelines/diffusion/sd3-weights.js +287 -0
- package/src/inference/pipelines/diffusion/text-encoder-gpu.d.ts +80 -0
- package/src/inference/pipelines/diffusion/text-encoder-gpu.js +935 -0
- package/src/inference/pipelines/diffusion/text-encoder.d.ts +29 -0
- package/src/inference/pipelines/diffusion/text-encoder.js +178 -0
- package/src/inference/pipelines/diffusion/types.d.ts +112 -0
- package/src/inference/pipelines/diffusion/types.js +1 -0
- package/src/inference/pipelines/diffusion/vae.d.ts +20 -0
- package/src/inference/pipelines/diffusion/vae.js +675 -0
- package/src/inference/pipelines/diffusion/weights.d.ts +40 -0
- package/src/inference/pipelines/diffusion/weights.js +150 -0
- package/src/inference/pipelines/dream/energy-head-pipeline.d.ts +29 -0
- package/src/inference/pipelines/dream/energy-head-pipeline.js +6 -0
- package/src/inference/pipelines/dream/pipeline.d.ts +17 -0
- package/src/inference/pipelines/dream/pipeline.js +8 -0
- package/src/inference/pipelines/energy/index.d.ts +1 -0
- package/src/inference/pipelines/energy/index.js +1 -0
- package/src/inference/pipelines/energy/pipeline.d.ts +27 -0
- package/src/inference/pipelines/energy/pipeline.js +680 -0
- package/src/inference/pipelines/energy/quintel.d.ts +87 -0
- package/src/inference/pipelines/energy/quintel.js +207 -0
- package/src/inference/pipelines/energy/types.d.ts +63 -0
- package/src/inference/pipelines/energy/types.js +1 -0
- package/src/inference/pipelines/energy-head/index.d.ts +6 -0
- package/src/inference/pipelines/energy-head/index.js +6 -0
- package/src/inference/pipelines/energy-head/row-head-pipeline.d.ts +103 -0
- package/src/inference/pipelines/energy-head/row-head-pipeline.js +487 -0
- package/src/inference/pipelines/factory.d.ts +10 -0
- package/src/inference/pipelines/factory.js +6 -0
- package/src/inference/pipelines/index.d.ts +22 -0
- package/src/inference/pipelines/index.js +19 -0
- package/src/inference/pipelines/registry.d.ts +15 -0
- package/src/inference/pipelines/registry.js +23 -0
- package/src/inference/pipelines/rng.d.ts +2 -0
- package/src/inference/pipelines/rng.js +17 -0
- package/src/inference/pipelines/structured/index.d.ts +8 -0
- package/src/inference/pipelines/structured/index.js +8 -0
- package/src/inference/pipelines/structured/json-head-pipeline.d.ts +58 -0
- package/src/inference/pipelines/structured/json-head-pipeline.js +181 -0
- package/src/inference/pipelines/text/attention/index.d.ts +24 -0
- package/src/inference/pipelines/text/attention/index.js +17 -0
- package/src/inference/pipelines/text/attention/projections.d.ts +101 -0
- package/src/inference/pipelines/text/attention/projections.js +435 -0
- package/src/inference/pipelines/text/attention/record.d.ts +36 -0
- package/src/inference/pipelines/text/attention/record.js +613 -0
- package/src/inference/pipelines/text/attention/run.d.ts +38 -0
- package/src/inference/pipelines/text/attention/run.js +826 -0
- package/src/inference/pipelines/text/attention/types.d.ts +98 -0
- package/src/inference/pipelines/text/attention/types.js +67 -0
- package/src/inference/pipelines/text/attention.d.ts +23 -0
- package/src/inference/pipelines/text/attention.js +12 -0
- package/src/inference/pipelines/text/bdpa-steamroller.d.ts +22 -0
- package/src/inference/pipelines/text/bdpa-steamroller.js +158 -0
- package/src/inference/pipelines/text/buffer-types.d.ts +7 -0
- package/src/inference/pipelines/text/buffer-types.js +4 -0
- package/src/inference/pipelines/text/chat-format.d.ts +46 -0
- package/src/inference/pipelines/text/chat-format.js +366 -0
- package/src/inference/pipelines/text/config.d.ts +235 -0
- package/src/inference/pipelines/text/config.js +623 -0
- package/src/inference/pipelines/text/debug-utils/config.d.ts +144 -0
- package/src/inference/pipelines/text/debug-utils/config.js +156 -0
- package/src/inference/pipelines/text/debug-utils/index.d.ts +53 -0
- package/src/inference/pipelines/text/debug-utils/index.js +44 -0
- package/src/inference/pipelines/text/debug-utils/logging.d.ts +106 -0
- package/src/inference/pipelines/text/debug-utils/logging.js +152 -0
- package/src/inference/pipelines/text/debug-utils/tensor.d.ts +119 -0
- package/src/inference/pipelines/text/debug-utils/tensor.js +268 -0
- package/src/inference/pipelines/text/debug-utils/utils.d.ts +77 -0
- package/src/inference/pipelines/text/debug-utils/utils.js +139 -0
- package/src/inference/pipelines/text/debug-utils.d.ts +42 -0
- package/src/inference/pipelines/text/debug-utils.js +34 -0
- package/src/inference/pipelines/text/embed.d.ts +67 -0
- package/src/inference/pipelines/text/embed.js +461 -0
- package/src/inference/pipelines/text/execution-plan.d.ts +116 -0
- package/src/inference/pipelines/text/execution-plan.js +314 -0
- package/src/inference/pipelines/text/execution-v0.d.ts +66 -0
- package/src/inference/pipelines/text/execution-v0.js +1139 -0
- package/src/inference/pipelines/text/ffn/dense.d.ts +40 -0
- package/src/inference/pipelines/text/ffn/dense.js +759 -0
- package/src/inference/pipelines/text/ffn/index.d.ts +23 -0
- package/src/inference/pipelines/text/ffn/index.js +16 -0
- package/src/inference/pipelines/text/ffn/moe.d.ts +21 -0
- package/src/inference/pipelines/text/ffn/moe.js +49 -0
- package/src/inference/pipelines/text/ffn/sandwich.d.ts +25 -0
- package/src/inference/pipelines/text/ffn/sandwich.js +196 -0
- package/src/inference/pipelines/text/ffn/standard.d.ts +23 -0
- package/src/inference/pipelines/text/ffn/standard.js +84 -0
- package/src/inference/pipelines/text/ffn/types.d.ts +30 -0
- package/src/inference/pipelines/text/ffn/types.js +25 -0
- package/src/inference/pipelines/text/ffn.d.ts +31 -0
- package/src/inference/pipelines/text/ffn.js +18 -0
- package/src/inference/pipelines/text/finiteness-guard-status.d.ts +11 -0
- package/src/inference/pipelines/text/finiteness-guard-status.js +21 -0
- package/src/inference/pipelines/text/finiteness-policy.d.ts +35 -0
- package/src/inference/pipelines/text/finiteness-policy.js +45 -0
- package/src/inference/pipelines/text/generator-helpers.d.ts +34 -0
- package/src/inference/pipelines/text/generator-helpers.js +175 -0
- package/src/inference/pipelines/text/generator-runtime.d.ts +93 -0
- package/src/inference/pipelines/text/generator-runtime.js +373 -0
- package/src/inference/pipelines/text/generator-steps.d.ts +75 -0
- package/src/inference/pipelines/text/generator-steps.js +1078 -0
- package/src/inference/pipelines/text/generator.d.ts +41 -0
- package/src/inference/pipelines/text/generator.js +1345 -0
- package/src/inference/pipelines/text/index.d.ts +5 -0
- package/src/inference/pipelines/text/index.js +6 -0
- package/src/inference/pipelines/text/init.d.ts +295 -0
- package/src/inference/pipelines/text/init.js +965 -0
- package/src/inference/pipelines/text/kernel-path-auto-select.d.ts +12 -0
- package/src/inference/pipelines/text/kernel-path-auto-select.js +90 -0
- package/src/inference/pipelines/text/kernel-trace.d.ts +150 -0
- package/src/inference/pipelines/text/kernel-trace.js +324 -0
- package/src/inference/pipelines/text/layer-plan.d.ts +65 -0
- package/src/inference/pipelines/text/layer-plan.js +249 -0
- package/src/inference/pipelines/text/layer.d.ts +56 -0
- package/src/inference/pipelines/text/layer.js +916 -0
- package/src/inference/pipelines/text/linear-attention.d.ts +94 -0
- package/src/inference/pipelines/text/linear-attention.js +803 -0
- package/src/inference/pipelines/text/logits/cpu.d.ts +81 -0
- package/src/inference/pipelines/text/logits/cpu.js +91 -0
- package/src/inference/pipelines/text/logits/gpu.d.ts +113 -0
- package/src/inference/pipelines/text/logits/gpu.js +406 -0
- package/src/inference/pipelines/text/logits/index.d.ts +57 -0
- package/src/inference/pipelines/text/logits/index.js +305 -0
- package/src/inference/pipelines/text/logits/types.d.ts +46 -0
- package/src/inference/pipelines/text/logits/types.js +4 -0
- package/src/inference/pipelines/text/logits/utils.d.ts +49 -0
- package/src/inference/pipelines/text/logits/utils.js +59 -0
- package/src/inference/pipelines/text/logits.d.ts +27 -0
- package/src/inference/pipelines/text/logits.js +16 -0
- package/src/inference/pipelines/text/lora-apply.d.ts +28 -0
- package/src/inference/pipelines/text/lora-apply.js +58 -0
- package/src/inference/pipelines/text/lora-types.d.ts +39 -0
- package/src/inference/pipelines/text/lora-types.js +18 -0
- package/src/inference/pipelines/text/lora.d.ts +18 -0
- package/src/inference/pipelines/text/lora.js +12 -0
- package/src/inference/pipelines/text/model-load.d.ts +58 -0
- package/src/inference/pipelines/text/model-load.js +561 -0
- package/src/inference/pipelines/text/moe-cache.d.ts +32 -0
- package/src/inference/pipelines/text/moe-cache.js +107 -0
- package/src/inference/pipelines/text/moe-cpu-gptoss.d.ts +9 -0
- package/src/inference/pipelines/text/moe-cpu-gptoss.js +110 -0
- package/src/inference/pipelines/text/moe-cpu.d.ts +13 -0
- package/src/inference/pipelines/text/moe-cpu.js +116 -0
- package/src/inference/pipelines/text/moe-gpu.d.ts +13 -0
- package/src/inference/pipelines/text/moe-gpu.js +611 -0
- package/src/inference/pipelines/text/moe-helpers.d.ts +12 -0
- package/src/inference/pipelines/text/moe-helpers.js +21 -0
- package/src/inference/pipelines/text/moe-impl.d.ts +117 -0
- package/src/inference/pipelines/text/moe-impl.js +9 -0
- package/src/inference/pipelines/text/moe-shape-validator.d.ts +31 -0
- package/src/inference/pipelines/text/moe-shape-validator.js +78 -0
- package/src/inference/pipelines/text/ops.d.ts +167 -0
- package/src/inference/pipelines/text/ops.js +367 -0
- package/src/inference/pipelines/text/probes.d.ts +31 -0
- package/src/inference/pipelines/text/probes.js +170 -0
- package/src/inference/pipelines/text/sampling.d.ts +54 -0
- package/src/inference/pipelines/text/sampling.js +203 -0
- package/src/inference/pipelines/text/state.d.ts +112 -0
- package/src/inference/pipelines/text/state.js +152 -0
- package/src/inference/pipelines/text/types.d.ts +627 -0
- package/src/inference/pipelines/text/types.js +4 -0
- package/src/inference/pipelines/text/weights.d.ts +110 -0
- package/src/inference/pipelines/text/weights.js +163 -0
- package/src/inference/pipelines/text.d.ts +157 -0
- package/src/inference/pipelines/text.js +586 -0
- package/src/inference/speculative.d.ts +239 -0
- package/src/inference/speculative.js +416 -0
- package/src/inference/test-harness.d.ts +178 -0
- package/src/inference/test-harness.js +349 -0
- package/src/inference/tokenizer.d.ts +77 -0
- package/src/inference/tokenizer.js +258 -0
- package/src/inference/tokenizers/base.d.ts +39 -0
- package/src/inference/tokenizers/base.js +69 -0
- package/src/inference/tokenizers/bpe.d.ts +27 -0
- package/src/inference/tokenizers/bpe.js +171 -0
- package/src/inference/tokenizers/bundled.d.ts +63 -0
- package/src/inference/tokenizers/bundled.js +866 -0
- package/src/inference/tokenizers/sentencepiece.d.ts +28 -0
- package/src/inference/tokenizers/sentencepiece.js +389 -0
- package/src/inference/tokenizers/types.d.ts +166 -0
- package/src/inference/tokenizers/types.js +7 -0
- package/src/loader/doppler-loader.d.ts +134 -0
- package/src/loader/doppler-loader.js +1036 -0
- package/src/loader/dtype-utils.d.ts +40 -0
- package/src/loader/dtype-utils.js +102 -0
- package/src/loader/embedding-loader.d.ts +56 -0
- package/src/loader/embedding-loader.js +207 -0
- package/src/loader/experts/expert-cache.d.ts +156 -0
- package/src/loader/experts/expert-cache.js +375 -0
- package/src/loader/experts/expert-loader.d.ts +108 -0
- package/src/loader/experts/expert-loader.js +384 -0
- package/src/loader/final-weights-loader.d.ts +68 -0
- package/src/loader/final-weights-loader.js +262 -0
- package/src/loader/index.d.ts +150 -0
- package/src/loader/index.js +124 -0
- package/src/loader/layer-loader.d.ts +63 -0
- package/src/loader/layer-loader.js +417 -0
- package/src/loader/loader-state.d.ts +51 -0
- package/src/loader/loader-state.js +142 -0
- package/src/loader/loader-types.d.ts +236 -0
- package/src/loader/loader-types.js +4 -0
- package/src/loader/manifest-config.d.ts +97 -0
- package/src/loader/manifest-config.js +132 -0
- package/src/loader/memory-monitor.d.ts +112 -0
- package/src/loader/memory-monitor.js +276 -0
- package/src/loader/multi-model-loader.d.ts +37 -0
- package/src/loader/multi-model-loader.js +87 -0
- package/src/loader/quantization-constants.d.ts +23 -0
- package/src/loader/quantization-constants.js +14 -0
- package/src/loader/shard-cache.d.ts +60 -0
- package/src/loader/shard-cache.js +568 -0
- package/src/loader/shard-resolver.d.ts +12 -0
- package/src/loader/shard-resolver.js +83 -0
- package/src/loader/tensors/tensor-loader.d.ts +154 -0
- package/src/loader/tensors/tensor-loader.js +427 -0
- package/src/loader/tensors/tensor-reader.d.ts +22 -0
- package/src/loader/tensors/tensor-reader.js +56 -0
- package/src/loader/tensors/tensor-role.d.ts +7 -0
- package/src/loader/tensors/tensor-role.js +12 -0
- package/src/loader/weight-downcast.d.ts +62 -0
- package/src/loader/weight-downcast.js +213 -0
- package/src/loader/weights.d.ts +22 -0
- package/src/loader/weights.js +4 -0
- package/src/memory/address-table.d.ts +104 -0
- package/src/memory/address-table.js +114 -0
- package/src/memory/buffer-pool.d.ts +196 -0
- package/src/memory/buffer-pool.js +756 -0
- package/src/memory/capability.d.ts +49 -0
- package/src/memory/capability.js +95 -0
- package/src/memory/heap-manager.d.ts +104 -0
- package/src/memory/heap-manager.js +264 -0
- package/src/memory/unified-detect.d.ts +59 -0
- package/src/memory/unified-detect.js +192 -0
- package/src/rules/converter/execution.rules.json +20 -0
- package/src/rules/converter/tensor-roles.rules.json +13 -0
- package/src/rules/converter/tokenizer.rules.json +7 -0
- package/src/rules/inference/attention.rules.json +54 -0
- package/src/rules/inference/config.rules.json +58 -0
- package/src/rules/inference/dtype.rules.json +94 -0
- package/src/rules/inference/execution.rules.json +45 -0
- package/src/rules/inference/ffn.rules.json +35 -0
- package/src/rules/inference/kernel-path.rules.json +76 -0
- package/src/rules/inference/layer-pattern.rules.json +16 -0
- package/src/rules/inference/layer.rules.json +7 -0
- package/src/rules/inference/moe.rules.json +48 -0
- package/src/rules/kernels/attention.rules.json +61 -0
- package/src/rules/kernels/conv2d.rules.json +6 -0
- package/src/rules/kernels/dequant.rules.json +58 -0
- package/src/rules/kernels/energy.rules.json +22 -0
- package/src/rules/kernels/fused-ffn.rules.json +13 -0
- package/src/rules/kernels/fused-matmul-residual.rules.json +6 -0
- package/src/rules/kernels/fused-matmul-rmsnorm.rules.json +8 -0
- package/src/rules/kernels/gather.rules.json +12 -0
- package/src/rules/kernels/gelu.rules.json +11 -0
- package/src/rules/kernels/groupnorm.rules.json +10 -0
- package/src/rules/kernels/kernel-validator.d.ts +24 -0
- package/src/rules/kernels/kernel-validator.js +160 -0
- package/src/rules/kernels/kv_quantize.rules.json +7 -0
- package/src/rules/kernels/layernorm.rules.json +6 -0
- package/src/rules/kernels/matmul.rules.json +60 -0
- package/src/rules/kernels/modulate.rules.json +6 -0
- package/src/rules/kernels/moe.rules.gptoss.json +105 -0
- package/src/rules/kernels/moe.rules.json +11 -0
- package/src/rules/kernels/pixel_shuffle.rules.json +6 -0
- package/src/rules/kernels/residual.rules.json +12 -0
- package/src/rules/kernels/rmsnorm.rules.json +11 -0
- package/src/rules/kernels/rope.rules.json +6 -0
- package/src/rules/kernels/sample.rules.json +6 -0
- package/src/rules/kernels/scale.rules.json +6 -0
- package/src/rules/kernels/silu.rules.json +21 -0
- package/src/rules/kernels/softmax.rules.json +23 -0
- package/src/rules/kernels/split-qkv.rules.json +6 -0
- package/src/rules/kernels/upsample2d.rules.json +6 -0
- package/src/rules/loader/tensor-loader.rules.json +15 -0
- package/src/rules/loader/weights.rules.json +41 -0
- package/src/rules/rule-registry.d.ts +48 -0
- package/src/rules/rule-registry.js +177 -0
- package/src/rules/tooling/command-runtime.rules.json +38 -0
- package/src/storage/backends/idb-store.d.ts +52 -0
- package/src/storage/backends/idb-store.js +590 -0
- package/src/storage/backends/memory-store.d.ts +36 -0
- package/src/storage/backends/memory-store.js +242 -0
- package/src/storage/backends/opfs-store.d.ts +41 -0
- package/src/storage/backends/opfs-store.js +429 -0
- package/src/storage/blake3.d.ts +17 -0
- package/src/storage/blake3.js +269 -0
- package/src/storage/download-types.d.ts +157 -0
- package/src/storage/download-types.js +48 -0
- package/src/storage/downloader.d.ts +103 -0
- package/src/storage/downloader.js +839 -0
- package/src/storage/emulated-vram.d.ts +264 -0
- package/src/storage/emulated-vram.js +576 -0
- package/src/storage/export.d.ts +20 -0
- package/src/storage/export.js +159 -0
- package/src/storage/index.d.ts +253 -0
- package/src/storage/index.js +185 -0
- package/src/storage/inventory.d.ts +26 -0
- package/src/storage/inventory.js +218 -0
- package/src/storage/preflight.d.ts +144 -0
- package/src/storage/preflight.js +294 -0
- package/src/storage/quickstart-downloader.d.ts +154 -0
- package/src/storage/quickstart-downloader.js +265 -0
- package/src/storage/quota.d.ts +150 -0
- package/src/storage/quota.js +304 -0
- package/src/storage/registry.d.ts +28 -0
- package/src/storage/registry.js +125 -0
- package/src/storage/reports.d.ts +20 -0
- package/src/storage/reports.js +94 -0
- package/src/storage/shard-manager.d.ts +137 -0
- package/src/storage/shard-manager.js +801 -0
- package/src/sw.d.ts +1 -0
- package/src/sw.js +187 -0
- package/src/tooling/browser-command-runner.d.ts +28 -0
- package/src/tooling/browser-command-runner.js +82 -0
- package/src/tooling/command-api.d.ts +147 -0
- package/src/tooling/command-api.js +523 -0
- package/src/tooling/command-envelope.d.ts +81 -0
- package/src/tooling/command-envelope.js +195 -0
- package/src/tooling/command-runner-shared.d.ts +73 -0
- package/src/tooling/command-runner-shared.js +146 -0
- package/src/tooling/command-runner.html +45 -0
- package/src/tooling/node-browser-command-runner.d.ts +30 -0
- package/src/tooling/node-browser-command-runner.js +868 -0
- package/src/tooling/node-command-runner.d.ts +36 -0
- package/src/tooling/node-command-runner.js +127 -0
- package/src/tooling/node-convert-worker-pool.d.ts +16 -0
- package/src/tooling/node-convert-worker-pool.js +186 -0
- package/src/tooling/node-convert-worker.d.ts +1 -0
- package/src/tooling/node-convert-worker.js +60 -0
- package/src/tooling/node-convert.d.ts +44 -0
- package/src/tooling/node-converter.d.ts +1 -0
- package/src/tooling/node-converter.js +1227 -0
- package/src/tooling/node-file-fetch.d.ts +1 -0
- package/src/tooling/node-file-fetch.js +38 -0
- package/src/tooling/node-source-runtime.d.ts +19 -0
- package/src/tooling/node-source-runtime.js +469 -0
- package/src/tooling/node-webgpu.d.ts +6 -0
- package/src/tooling/node-webgpu.js +321 -0
- package/src/tooling/opfs-cache.d.ts +11 -0
- package/src/tooling/opfs-cache.js +174 -0
- package/src/tooling/source-runtime-bundle.d.ts +102 -0
- package/src/tooling/source-runtime-bundle.js +484 -0
- package/src/tooling-exports.browser.d.ts +7 -0
- package/src/tooling-exports.browser.js +2 -0
- package/src/tooling-exports.d.ts +22 -0
- package/src/tooling-exports.js +7 -0
- package/src/tooling-exports.shared.d.ts +105 -0
- package/src/tooling-exports.shared.js +92 -0
- package/src/training/README.md +153 -0
- package/src/training/artifacts.d.ts +160 -0
- package/src/training/artifacts.js +896 -0
- package/src/training/attention-backward.d.ts +30 -0
- package/src/training/attention-backward.js +217 -0
- package/src/training/attention-forward.d.ts +22 -0
- package/src/training/attention-forward.js +82 -0
- package/src/training/autograd.d.ts +51 -0
- package/src/training/autograd.js +380 -0
- package/src/training/checkpoint.d.ts +31 -0
- package/src/training/checkpoint.js +238 -0
- package/src/training/clip.d.ts +9 -0
- package/src/training/clip.js +54 -0
- package/src/training/dataloader.d.ts +8 -0
- package/src/training/dataloader.js +44 -0
- package/src/training/datasets/index.d.ts +12 -0
- package/src/training/datasets/index.js +6 -0
- package/src/training/datasets/jsonl.d.ts +11 -0
- package/src/training/datasets/jsonl.js +50 -0
- package/src/training/datasets/reploid.d.ts +3 -0
- package/src/training/datasets/reploid.js +36 -0
- package/src/training/datasets/text-pairs.d.ts +21 -0
- package/src/training/datasets/text-pairs.js +42 -0
- package/src/training/datasets/token-batch.d.ts +21 -0
- package/src/training/datasets/token-batch.js +40 -0
- package/src/training/datasets/translation-pairs.d.ts +34 -0
- package/src/training/datasets/translation-pairs.js +49 -0
- package/src/training/export.d.ts +32 -0
- package/src/training/export.js +112 -0
- package/src/training/index.d.ts +52 -0
- package/src/training/index.js +41 -0
- package/src/training/lora.d.ts +19 -0
- package/src/training/lora.js +57 -0
- package/src/training/loss-scaling.d.ts +21 -0
- package/src/training/loss-scaling.js +80 -0
- package/src/training/loss.d.ts +10 -0
- package/src/training/loss.js +41 -0
- package/src/training/objectives/base.d.ts +58 -0
- package/src/training/objectives/base.js +38 -0
- package/src/training/objectives/cross_entropy.d.ts +18 -0
- package/src/training/objectives/cross_entropy.js +37 -0
- package/src/training/objectives/distill_kd.d.ts +16 -0
- package/src/training/objectives/distill_kd.js +369 -0
- package/src/training/objectives/distill_triplet.d.ts +16 -0
- package/src/training/objectives/distill_triplet.js +412 -0
- package/src/training/objectives/index.d.ts +12 -0
- package/src/training/objectives/index.js +6 -0
- package/src/training/objectives/ul_stage1_joint.d.ts +16 -0
- package/src/training/objectives/ul_stage1_joint.js +188 -0
- package/src/training/objectives/ul_stage2_base.d.ts +16 -0
- package/src/training/objectives/ul_stage2_base.js +222 -0
- package/src/training/optimizer.d.ts +22 -0
- package/src/training/optimizer.js +115 -0
- package/src/training/runner.d.ts +196 -0
- package/src/training/runner.js +1194 -0
- package/src/training/suite.d.ts +187 -0
- package/src/training/suite.js +3156 -0
- package/src/training/trainer.d.ts +89 -0
- package/src/training/trainer.js +301 -0
- package/src/training/ul_dataset.d.ts +47 -0
- package/src/training/ul_dataset.js +153 -0
- package/src/training/ul_schedule.d.ts +6 -0
- package/src/training/ul_schedule.js +29 -0
- package/src/types/chrome.d.ts +36 -0
- package/src/types/chrome.js +1 -0
- package/src/types/gpu.d.ts +185 -0
- package/src/types/gpu.js +5 -0
- package/src/types/index.d.ts +3 -0
- package/src/types/index.js +3 -0
- package/src/types/inference.d.ts +197 -0
- package/src/types/inference.js +5 -0
- package/src/types/model.d.ts +125 -0
- package/src/types/model.js +5 -0
- package/src/utils/index.d.ts +7 -0
- package/src/utils/index.js +7 -0
- package/src/utils/load-json.d.ts +5 -0
- package/src/utils/load-json.js +23 -0
- package/src/utils/plain-object.d.ts +1 -0
- package/src/utils/plain-object.js +3 -0
- package/src/utils/sha256.d.ts +4 -0
- package/src/utils/sha256.js +135 -0
- package/tools/convert-safetensors-node.js +180 -0
- package/tools/doppler-cli.js +1170 -0
|
@@ -0,0 +1,1345 @@
|
|
|
1
|
+
|
|
2
|
+
|
|
3
|
+
import { getDevice, setTrackSubmits } from '../../../gpu/device.js';
|
|
4
|
+
import { releaseBuffer, readBuffer, readBufferSlice } from '../../../memory/buffer-pool.js';
|
|
5
|
+
import { isGPUSamplingAvailable } from '../../../gpu/kernels/sample.js';
|
|
6
|
+
import { markWarmed as markKernelCacheWarmed } from '../../../gpu/kernel-selection-cache.js';
|
|
7
|
+
import { resetSubmitStats, logSubmitStats } from '../../../gpu/submit-tracker.js';
|
|
8
|
+
import { createCommandRecorder, createProfilingRecorder, CommandRecorder } from '../../../gpu/command-recorder.js';
|
|
9
|
+
import { allowReadback } from '../../../gpu/perf-guards.js';
|
|
10
|
+
import { log, trace } from '../../../debug/index.js';
|
|
11
|
+
import { validateCallTimeOptions } from '../../../config/param-validator.js';
|
|
12
|
+
import { selectRuleValue } from '../../../rules/rule-registry.js';
|
|
13
|
+
|
|
14
|
+
// Pipeline sub-modules
|
|
15
|
+
import { sample, applyRepetitionPenalty, logitsSanity, getTopK } from './sampling.js';
|
|
16
|
+
import { enforceLogitDrift } from '../../../hotswap/intent-bundle.js';
|
|
17
|
+
import { applyChatTemplate, isStopToken } from './init.js';
|
|
18
|
+
import { formatChatMessages } from './chat-format.js';
|
|
19
|
+
import { embed } from './embed.js';
|
|
20
|
+
import { processLayer } from './layer.js';
|
|
21
|
+
import { computeLogits, recordLogitsGPU, extractLastPositionLogits, applySoftcapping } from './logits/index.js';
|
|
22
|
+
import { isWeightBuffer, isCpuWeightBuffer, getWeightDtype } from '../../../gpu/weight-buffer.js';
|
|
23
|
+
import {
|
|
24
|
+
decodeStep,
|
|
25
|
+
decodeStepLogits,
|
|
26
|
+
advanceWithToken,
|
|
27
|
+
generateNTokensGPU,
|
|
28
|
+
shouldUseBatchDecode,
|
|
29
|
+
sumProfileTimings,
|
|
30
|
+
FinitenessError,
|
|
31
|
+
advanceWithTokenAndEmbedding as runAdvanceWithTokenAndEmbedding,
|
|
32
|
+
} from './generator-steps.js';
|
|
33
|
+
import { buildLayerContext, debugCheckBuffer as debugCheckBufferHelper, getLogitsConfig, getLogitsWeights } from './generator-helpers.js';
|
|
34
|
+
import {
|
|
35
|
+
assertTokenIdsInRange,
|
|
36
|
+
assertTokenIdInRange,
|
|
37
|
+
resolveStepOptions,
|
|
38
|
+
resolveGenerateOptions,
|
|
39
|
+
resolvePrefillOptions,
|
|
40
|
+
resolvePrefillEmbeddingOptions,
|
|
41
|
+
resolveAdvanceEmbeddingMode,
|
|
42
|
+
getFinalNormWeights,
|
|
43
|
+
extractEmbeddingFromHidden,
|
|
44
|
+
} from './generator-runtime.js';
|
|
45
|
+
|
|
46
|
+
import { decodeReadback, getLogitsHealth } from './debug-utils/index.js';
|
|
47
|
+
import { parseFinitenessStatusWords } from './finiteness-guard-status.js';
|
|
48
|
+
import { resolveDeferredRoundingWindowTokens } from './finiteness-policy.js';
|
|
49
|
+
import {
|
|
50
|
+
activateFallbackExecutionPlan,
|
|
51
|
+
rebaseExecutionSessionPlan,
|
|
52
|
+
resetActiveExecutionPlan,
|
|
53
|
+
resolveActiveExecutionPlan,
|
|
54
|
+
setActiveExecutionPlan,
|
|
55
|
+
} from './execution-plan.js';
|
|
56
|
+
import {
|
|
57
|
+
cloneLinearAttentionRuntime,
|
|
58
|
+
hasLinearAttentionLayers,
|
|
59
|
+
resetLinearAttentionRuntime,
|
|
60
|
+
restoreLinearAttentionRuntime,
|
|
61
|
+
} from './linear-attention.js';
|
|
62
|
+
|
|
63
|
+
function isStructuredChatRequest(prompt) {
|
|
64
|
+
return prompt != null
|
|
65
|
+
&& typeof prompt === 'object'
|
|
66
|
+
&& !Array.isArray(prompt)
|
|
67
|
+
&& Array.isArray(prompt.messages);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
function resolvePromptInput(state, prompt, useChatTemplate, contextLabel) {
|
|
71
|
+
if (typeof prompt === 'string') {
|
|
72
|
+
if (useChatTemplate && state.modelConfig.chatTemplateType) {
|
|
73
|
+
if (state.modelConfig.chatTemplateType === 'translategemma') {
|
|
74
|
+
throw new Error(
|
|
75
|
+
`[Pipeline] ${contextLabel}: translategemma chat template requires structured messages. ` +
|
|
76
|
+
'Pass { messages: [...] } instead of a plain string prompt.'
|
|
77
|
+
);
|
|
78
|
+
}
|
|
79
|
+
return applyChatTemplate(prompt, state.modelConfig.chatTemplateType);
|
|
80
|
+
}
|
|
81
|
+
return prompt;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const messages = isStructuredChatRequest(prompt)
|
|
85
|
+
? prompt.messages
|
|
86
|
+
: (Array.isArray(prompt) ? prompt : null);
|
|
87
|
+
if (!messages) {
|
|
88
|
+
throw new Error(
|
|
89
|
+
`[Pipeline] ${contextLabel}: prompt must be a string, chat message array, or { messages: [...] }.`
|
|
90
|
+
);
|
|
91
|
+
}
|
|
92
|
+
const templateType = useChatTemplate ? state.modelConfig.chatTemplateType : null;
|
|
93
|
+
return formatChatMessages(messages, templateType);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
function resolveTokenText(tokenizer, tokenIds, fallbackText = '?', renderTokenText, renderFallbackTokenText) {
|
|
97
|
+
const renderPrimary = typeof renderTokenText === 'function'
|
|
98
|
+
? renderTokenText
|
|
99
|
+
: (ids) => tokenizer?.decode?.(ids);
|
|
100
|
+
const renderFallback = typeof renderFallbackTokenText === 'function'
|
|
101
|
+
? renderFallbackTokenText
|
|
102
|
+
: (ids) => tokenizer?.decode?.(ids, false);
|
|
103
|
+
|
|
104
|
+
const primaryText = renderPrimary(tokenIds);
|
|
105
|
+
if (typeof primaryText === 'string' && primaryText.length > 0) {
|
|
106
|
+
return primaryText;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
const fallback = renderFallback(tokenIds);
|
|
110
|
+
if (typeof fallback === 'string' && fallback.length > 0) {
|
|
111
|
+
// Keep skip-special behavior deterministic: if primary decoding filtered this
|
|
112
|
+
// token to empty, do not reintroduce obvious special-token text via fallback.
|
|
113
|
+
if (
|
|
114
|
+
primaryText === ''
|
|
115
|
+
&& /^<[^>\n]{1,80}>$/.test(fallback.trim())
|
|
116
|
+
) {
|
|
117
|
+
return '';
|
|
118
|
+
}
|
|
119
|
+
return fallback;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
return fallbackText;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
export class PipelineGenerator {
|
|
126
|
+
|
|
127
|
+
#state;
|
|
128
|
+
#finitenessFallbackWindow;
|
|
129
|
+
|
|
130
|
+
_assertTokenIdsInRange(tokenIds, context = 'encode') {
|
|
131
|
+
assertTokenIdsInRange(this.#state, tokenIds, context);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
_assertTokenIdInRange(tokenId, context = 'token') {
|
|
135
|
+
assertTokenIdInRange(this.#state, tokenId, context);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
constructor(state) {
|
|
140
|
+
this.#state = state;
|
|
141
|
+
this.#finitenessFallbackWindow = null;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
_resolveDeferredRoundingWindowTokens() {
|
|
145
|
+
const activePlan = resolveActiveExecutionPlan(this.#state);
|
|
146
|
+
return activePlan?.deferredRoundingWindowTokens
|
|
147
|
+
?? resolveDeferredRoundingWindowTokens(this.#state.runtimeConfig?.inference?.compute);
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
_getEffectiveActivationDtype() {
|
|
151
|
+
return resolveActiveExecutionPlan(this.#state).activationDtype;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
_hasFinitenessFallbackWindow() {
|
|
155
|
+
return this.#finitenessFallbackWindow !== null;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
_openFinitenessFallbackWindow(opts, reasonLabel, tokenCount) {
|
|
159
|
+
const normalizedCount = Number.isFinite(tokenCount)
|
|
160
|
+
? Math.max(1, Math.floor(tokenCount))
|
|
161
|
+
: 1;
|
|
162
|
+
if (this.#finitenessFallbackWindow) {
|
|
163
|
+
this.#finitenessFallbackWindow.remainingTokens = Math.max(
|
|
164
|
+
this.#finitenessFallbackWindow.remainingTokens,
|
|
165
|
+
normalizedCount
|
|
166
|
+
);
|
|
167
|
+
return;
|
|
168
|
+
}
|
|
169
|
+
const original = this._beginFinitenessFallback(opts, reasonLabel);
|
|
170
|
+
this.#finitenessFallbackWindow = {
|
|
171
|
+
original,
|
|
172
|
+
remainingTokens: normalizedCount,
|
|
173
|
+
};
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
_closeFinitenessFallbackWindow(opts) {
|
|
177
|
+
if (!this.#finitenessFallbackWindow) {
|
|
178
|
+
return;
|
|
179
|
+
}
|
|
180
|
+
const original = this.#finitenessFallbackWindow.original;
|
|
181
|
+
this.#finitenessFallbackWindow = null;
|
|
182
|
+
this._endFinitenessFallback(opts, original);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
_consumeFinitenessFallbackToken(opts) {
|
|
186
|
+
if (!this.#finitenessFallbackWindow) {
|
|
187
|
+
return;
|
|
188
|
+
}
|
|
189
|
+
this.#finitenessFallbackWindow.remainingTokens -= 1;
|
|
190
|
+
if (this.#finitenessFallbackWindow.remainingTokens <= 0) {
|
|
191
|
+
this._closeFinitenessFallbackWindow(opts);
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
_resolveStepOptions(options = {}) {
|
|
196
|
+
return resolveStepOptions(this.#state, options);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
_getDecodeHelpers(debugCheckBuffer) {
|
|
200
|
+
return {
|
|
201
|
+
buildLayerContext: (recorder, isDecodeMode, debugLayers, executionPlan) =>
|
|
202
|
+
buildLayerContext(this.#state, recorder, isDecodeMode, debugLayers, debugCheckBuffer, executionPlan),
|
|
203
|
+
getLogitsWeights: () => getLogitsWeights(this.#state),
|
|
204
|
+
getLogitsConfig: () => getLogitsConfig(this.#state),
|
|
205
|
+
debugCheckBuffer,
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
async _getFinalNormWeights() {
|
|
210
|
+
return getFinalNormWeights(this.#state);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
_extractEmbeddingFromHidden(hiddenStates, numTokens, hiddenSize, embeddingMode, finalNormWeights, config) {
|
|
214
|
+
return extractEmbeddingFromHidden(
|
|
215
|
+
hiddenStates,
|
|
216
|
+
numTokens,
|
|
217
|
+
hiddenSize,
|
|
218
|
+
embeddingMode,
|
|
219
|
+
finalNormWeights,
|
|
220
|
+
config
|
|
221
|
+
);
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
_beginFinitenessFallback(opts, reasonLabel) {
|
|
225
|
+
const originalPlan = resolveActiveExecutionPlan(this.#state);
|
|
226
|
+
const original = {
|
|
227
|
+
activePlanId: this.#state.executionPlanState?.activePlanId ?? 'primary',
|
|
228
|
+
seed: opts.seed,
|
|
229
|
+
};
|
|
230
|
+
|
|
231
|
+
const fallbackPlan = activateFallbackExecutionPlan(this.#state);
|
|
232
|
+
if (!fallbackPlan) {
|
|
233
|
+
throw new Error('[Pipeline] Finiteness fallback plan is unavailable for this model/runtime configuration.');
|
|
234
|
+
}
|
|
235
|
+
log.warn(
|
|
236
|
+
'Pipeline',
|
|
237
|
+
`FinitenessGuard fallback (${reasonLabel}): ` +
|
|
238
|
+
`${originalPlan.kernelPathId ?? 'none'} -> ${fallbackPlan.kernelPathId ?? 'none'}`
|
|
239
|
+
);
|
|
240
|
+
|
|
241
|
+
this.#state.decodeBuffers?.ensureBuffers({
|
|
242
|
+
hiddenSize: this.#state.modelConfig.hiddenSize,
|
|
243
|
+
intermediateSize: this.#state.modelConfig.intermediateSize,
|
|
244
|
+
activationDtype: fallbackPlan.activationDtype,
|
|
245
|
+
enablePingPong: true,
|
|
246
|
+
});
|
|
247
|
+
|
|
248
|
+
if (opts.seed == null) {
|
|
249
|
+
const fallbackSeedBase = (this.#state.decodeStepCount + this.#state.currentSeqLen + 1) >>> 0;
|
|
250
|
+
opts.seed = (fallbackSeedBase * 2654435761) >>> 0;
|
|
251
|
+
}
|
|
252
|
+
opts.executionPlan = rebaseExecutionSessionPlan(this.#state, opts.executionPlan);
|
|
253
|
+
|
|
254
|
+
return original;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
_endFinitenessFallback(opts, original) {
|
|
258
|
+
opts.seed = original.seed;
|
|
259
|
+
setActiveExecutionPlan(this.#state, original.activePlanId);
|
|
260
|
+
opts.executionPlan = rebaseExecutionSessionPlan(this.#state, opts.executionPlan);
|
|
261
|
+
const nextActivationDtype = this._getEffectiveActivationDtype();
|
|
262
|
+
this.#state.decodeBuffers?.ensureBuffers({
|
|
263
|
+
hiddenSize: this.#state.modelConfig.hiddenSize,
|
|
264
|
+
intermediateSize: this.#state.modelConfig.intermediateSize,
|
|
265
|
+
activationDtype: nextActivationDtype,
|
|
266
|
+
enablePingPong: true,
|
|
267
|
+
});
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
async _retryWithFinitenessFallback(opts, reasonLabel, retryFn) {
|
|
271
|
+
if (this._hasFinitenessFallbackWindow()) {
|
|
272
|
+
return retryFn();
|
|
273
|
+
}
|
|
274
|
+
this.#state.kvCache?.truncate(this.#state.currentSeqLen);
|
|
275
|
+
const original = this._beginFinitenessFallback(opts, reasonLabel);
|
|
276
|
+
try {
|
|
277
|
+
return await retryFn();
|
|
278
|
+
} finally {
|
|
279
|
+
this._endFinitenessFallback(opts, original);
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
async _retryDecodeStepWithFinitenessWindow(generatedIds, opts, reasonLabel) {
|
|
284
|
+
const windowTokens = this._resolveDeferredRoundingWindowTokens();
|
|
285
|
+
if (windowTokens <= 1) {
|
|
286
|
+
return this._retryWithFinitenessFallback(
|
|
287
|
+
opts,
|
|
288
|
+
reasonLabel,
|
|
289
|
+
() => this._decodeStep(generatedIds, opts)
|
|
290
|
+
);
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
this.#state.kvCache?.truncate(this.#state.currentSeqLen);
|
|
294
|
+
this._openFinitenessFallbackWindow(opts, reasonLabel, windowTokens);
|
|
295
|
+
try {
|
|
296
|
+
return await this._decodeStep(generatedIds, opts);
|
|
297
|
+
} catch (error) {
|
|
298
|
+
this._closeFinitenessFallbackWindow(opts);
|
|
299
|
+
throw error;
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
// ==========================================================================
|
|
304
|
+
// Generation Public API
|
|
305
|
+
// ==========================================================================
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
async *generate(prompt, options = {}) {
|
|
309
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
310
|
+
if (this.#state.isGenerating) throw new Error('Generation already in progress');
|
|
311
|
+
|
|
312
|
+
validateCallTimeOptions(options);
|
|
313
|
+
|
|
314
|
+
this.#state.isGenerating = true;
|
|
315
|
+
this.#state.decodeStepCount = 0;
|
|
316
|
+
this.#state.disableRecordedLogits = false;
|
|
317
|
+
this.#state.disableFusedDecode = false;
|
|
318
|
+
resetActiveExecutionPlan(this.#state);
|
|
319
|
+
this.#state.decodeRing?.reset();
|
|
320
|
+
this.#state.stats.gpuTimePrefillMs = undefined;
|
|
321
|
+
this.#state.stats.gpuTimeDecodeMs = undefined;
|
|
322
|
+
this.#state.stats.decodeRecordMs = 0;
|
|
323
|
+
this.#state.stats.decodeSubmitWaitMs = 0;
|
|
324
|
+
this.#state.stats.decodeReadbackWaitMs = 0;
|
|
325
|
+
this.#state.stats.ttftMs = 0;
|
|
326
|
+
const startTime = performance.now();
|
|
327
|
+
|
|
328
|
+
const opts = resolveGenerateOptions(this.#state, options);
|
|
329
|
+
|
|
330
|
+
if (opts.debug) {
|
|
331
|
+
log.debug('Pipeline', `ChatTemplate: options=${options.useChatTemplate}, final=${opts.useChatTemplate}`);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
try {
|
|
335
|
+
const processedPrompt = resolvePromptInput(this.#state, prompt, opts.useChatTemplate, 'generate');
|
|
336
|
+
if (opts.debug && opts.useChatTemplate) {
|
|
337
|
+
log.debug('Pipeline', `Applied ${this.#state.modelConfig.chatTemplateType} chat template`);
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
const inputIds = this.#state.tokenizer.encode(processedPrompt);
|
|
341
|
+
this._assertTokenIdsInRange(inputIds, 'generate.encode');
|
|
342
|
+
const generatedIds = [...inputIds];
|
|
343
|
+
this.#state.stats.prefillTokens = inputIds.length;
|
|
344
|
+
|
|
345
|
+
if (opts.debug) {
|
|
346
|
+
log.debug('Pipeline', `Input: ${inputIds.length} tokens`);
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
const prefillStart = performance.now();
|
|
350
|
+
let prefillLogits;
|
|
351
|
+
try {
|
|
352
|
+
prefillLogits = await this._prefill(inputIds, opts);
|
|
353
|
+
} catch (error) {
|
|
354
|
+
if (error.name === 'FinitenessError') {
|
|
355
|
+
log.warn('Pipeline', `FinitenessGuard caught NaN/Inf during prefill. Retrying with F32 precision.`);
|
|
356
|
+
prefillLogits = await this._retryWithFinitenessFallback(
|
|
357
|
+
opts,
|
|
358
|
+
'prefill',
|
|
359
|
+
() => this._prefill(inputIds, opts)
|
|
360
|
+
);
|
|
361
|
+
} else {
|
|
362
|
+
throw error;
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
this.#state.stats.prefillTimeMs = performance.now() - prefillStart;
|
|
366
|
+
|
|
367
|
+
const intentBundleConfig = this.#state.runtimeConfig.shared.intentBundle;
|
|
368
|
+
const intentBundle = intentBundleConfig?.bundle;
|
|
369
|
+
const expectedTopK = intentBundle?.payload?.expectedTopK
|
|
370
|
+
?? intentBundle?.payload?.expected_top_k;
|
|
371
|
+
const maxDriftThreshold = intentBundle?.constraints?.maxDriftThreshold
|
|
372
|
+
?? intentBundle?.constraints?.max_drift_threshold;
|
|
373
|
+
|
|
374
|
+
if (intentBundleConfig?.enabled && Array.isArray(expectedTopK) && expectedTopK.length > 0) {
|
|
375
|
+
const actualTopK = getTopK(
|
|
376
|
+
prefillLogits,
|
|
377
|
+
expectedTopK.length,
|
|
378
|
+
(tokens) => resolveTokenText(this.#state.tokenizer, tokens),
|
|
379
|
+
).map((token) => token.token);
|
|
380
|
+
const driftResult = enforceLogitDrift(expectedTopK, actualTopK, maxDriftThreshold);
|
|
381
|
+
if (!driftResult.ok) {
|
|
382
|
+
throw new Error(`Intent bundle drift check failed: ${driftResult.reason}`);
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
applyRepetitionPenalty(prefillLogits, generatedIds, opts.repetitionPenalty);
|
|
387
|
+
const padTokenId = this.#state.tokenizer?.getSpecialTokens?.()?.pad;
|
|
388
|
+
|
|
389
|
+
if (opts.debug) {
|
|
390
|
+
const topAfterPenalty = getTopK(
|
|
391
|
+
prefillLogits,
|
|
392
|
+
5,
|
|
393
|
+
(tokens) => resolveTokenText(this.#state.tokenizer, tokens)
|
|
394
|
+
);
|
|
395
|
+
log.debug('Pipeline', `After rep penalty top-5: ${topAfterPenalty.map(t => `"${t.text}"(${(t.prob * 100).toFixed(1)}%)`).join(', ')}`);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const firstToken = sample(prefillLogits, {
|
|
399
|
+
temperature: opts.temperature,
|
|
400
|
+
topP: opts.topP,
|
|
401
|
+
topK: opts.topK,
|
|
402
|
+
padTokenId,
|
|
403
|
+
seed: opts.seed,
|
|
404
|
+
});
|
|
405
|
+
|
|
406
|
+
if (opts.debug) {
|
|
407
|
+
const firstTokenText = resolveTokenText(this.#state.tokenizer, [firstToken], `[${firstToken}]`, (tokens) => this.#state.tokenizer?.decode?.(tokens, true, false));
|
|
408
|
+
log.debug('Pipeline', `First token sampled: id=${firstToken} text="${firstTokenText}"`);
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
generatedIds.push(firstToken);
|
|
412
|
+
this.#state.stats.ttftMs = performance.now() - startTime;
|
|
413
|
+
|
|
414
|
+
const decodeToken = (tokenId) => resolveTokenText(
|
|
415
|
+
this.#state.tokenizer,
|
|
416
|
+
[tokenId],
|
|
417
|
+
`[${tokenId}]`,
|
|
418
|
+
(tokens) => this.#state.tokenizer?.decode?.(tokens, true, false),
|
|
419
|
+
(tokens) => this.#state.tokenizer?.decode?.(tokens, false, false)
|
|
420
|
+
);
|
|
421
|
+
|
|
422
|
+
const firstText = decodeToken(firstToken);
|
|
423
|
+
yield firstText;
|
|
424
|
+
if (options.onToken) options.onToken(firstToken, firstText);
|
|
425
|
+
|
|
426
|
+
yield* this._runDecodeLoop(generatedIds, opts, options, {
|
|
427
|
+
stopTokenIds: this.#state.modelConfig.stopTokenIds,
|
|
428
|
+
eosToken: this.#state.tokenizer.getSpecialTokens?.()?.eos,
|
|
429
|
+
stopSequenceStart: inputIds.length,
|
|
430
|
+
decodeToken,
|
|
431
|
+
logBatchPath: opts.debug,
|
|
432
|
+
});
|
|
433
|
+
const tokensGenerated = this.#state.stats.decodeTokens;
|
|
434
|
+
this.#state.stats.totalTimeMs = performance.now() - startTime;
|
|
435
|
+
|
|
436
|
+
if (opts.debug) {
|
|
437
|
+
log.debug('Pipeline', `Generated ${tokensGenerated} tokens in ${this.#state.stats.totalTimeMs.toFixed(0)}ms`);
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
const ttft = this.#state.stats.ttftMs ?? this.#state.stats.prefillTimeMs;
|
|
441
|
+
const decodeTokens = Math.max(0, tokensGenerated - 1);
|
|
442
|
+
const decodeSpeed = decodeTokens > 0 ? (decodeTokens / this.#state.stats.decodeTimeMs * 1000) : 0;
|
|
443
|
+
if (opts.benchmark) {
|
|
444
|
+
log.info('Benchmark', `TTFT: ${ttft.toFixed(0)}ms | Prefill: ${this.#state.stats.prefillTimeMs.toFixed(0)}ms | Decode: ${this.#state.stats.decodeTimeMs.toFixed(0)}ms (${decodeTokens} tokens @ ${decodeSpeed.toFixed(1)} tok/s)`);
|
|
445
|
+
} else {
|
|
446
|
+
log.info('Perf', `TTFT: ${ttft.toFixed(0)}ms | Prefill: ${this.#state.stats.prefillTimeMs.toFixed(0)}ms | Decode: ${this.#state.stats.decodeTimeMs.toFixed(0)}ms (${decodeTokens} tokens @ ${decodeSpeed.toFixed(1)} tok/s)`);
|
|
447
|
+
}
|
|
448
|
+
trace.perf('Decode summary', {
|
|
449
|
+
ttftMs: ttft,
|
|
450
|
+
prefillMs: this.#state.stats.prefillTimeMs,
|
|
451
|
+
decodeMs: this.#state.stats.decodeTimeMs,
|
|
452
|
+
decodeTokens,
|
|
453
|
+
decodeSpeed,
|
|
454
|
+
totalMs: this.#state.stats.totalTimeMs,
|
|
455
|
+
});
|
|
456
|
+
} finally {
|
|
457
|
+
this._closeFinitenessFallbackWindow(opts);
|
|
458
|
+
resetActiveExecutionPlan(this.#state);
|
|
459
|
+
this.#state.isGenerating = false;
|
|
460
|
+
}
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
async prefillKVOnly(prompt, options = {}) {
|
|
465
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
466
|
+
resetActiveExecutionPlan(this.#state);
|
|
467
|
+
this.#state.stats.gpuTimePrefillMs = undefined;
|
|
468
|
+
const opts = resolvePrefillOptions(this.#state, options);
|
|
469
|
+
|
|
470
|
+
const processedPrompt = resolvePromptInput(this.#state, prompt, opts.useChatTemplate, 'prefillKVOnly');
|
|
471
|
+
|
|
472
|
+
const inputIds = this.#state.tokenizer.encode(processedPrompt);
|
|
473
|
+
this._assertTokenIdsInRange(inputIds, 'prefillKVOnly.encode');
|
|
474
|
+
if (opts.debug) {
|
|
475
|
+
log.debug('Pipeline', `PrefillKVOnly: ${inputIds.length} tokens`);
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
let prefillResult;
|
|
479
|
+
try {
|
|
480
|
+
prefillResult = await this._prefillToHidden(inputIds, opts);
|
|
481
|
+
} catch (error) {
|
|
482
|
+
if (error.name === 'FinitenessError') {
|
|
483
|
+
log.warn('Pipeline', `FinitenessGuard caught NaN/Inf during prefillKVOnly. Retrying with F32 precision.`);
|
|
484
|
+
prefillResult = await this._retryWithFinitenessFallback(
|
|
485
|
+
opts,
|
|
486
|
+
'prefillKVOnly',
|
|
487
|
+
() => this._prefillToHidden(inputIds, opts)
|
|
488
|
+
);
|
|
489
|
+
} else {
|
|
490
|
+
throw error;
|
|
491
|
+
}
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
const {
|
|
495
|
+
numTokens,
|
|
496
|
+
startPos,
|
|
497
|
+
currentRecorder,
|
|
498
|
+
recordProfile,
|
|
499
|
+
currentHiddenBuffer,
|
|
500
|
+
} = prefillResult;
|
|
501
|
+
|
|
502
|
+
// Ensure prefill work completes before returning a usable snapshot.
|
|
503
|
+
if (currentRecorder) {
|
|
504
|
+
await currentRecorder.submitAndWait();
|
|
505
|
+
await recordProfile(currentRecorder);
|
|
506
|
+
} else {
|
|
507
|
+
const device = getDevice();
|
|
508
|
+
if (device) {
|
|
509
|
+
await device.queue.onSubmittedWorkDone();
|
|
510
|
+
}
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
this.#state.currentSeqLen = startPos + numTokens;
|
|
514
|
+
releaseBuffer(currentHiddenBuffer);
|
|
515
|
+
|
|
516
|
+
const snapshot = this.#state.kvCache?.clone();
|
|
517
|
+
if (!snapshot) {
|
|
518
|
+
throw new Error('KV cache unavailable after prefill');
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
return {
|
|
522
|
+
cache: snapshot,
|
|
523
|
+
seqLen: this.#state.currentSeqLen,
|
|
524
|
+
tokens: inputIds,
|
|
525
|
+
linearAttention: await cloneLinearAttentionRuntime(this.#state.linearAttentionRuntime),
|
|
526
|
+
};
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
async prefillWithEmbedding(prompt, options = {}) {
|
|
530
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
531
|
+
resetActiveExecutionPlan(this.#state);
|
|
532
|
+
this.#state.stats.gpuTimePrefillMs = undefined;
|
|
533
|
+
const opts = resolvePrefillEmbeddingOptions(this.#state, options);
|
|
534
|
+
|
|
535
|
+
const processedPrompt = resolvePromptInput(this.#state, prompt, opts.useChatTemplate, 'prefillWithEmbedding');
|
|
536
|
+
|
|
537
|
+
const inputIds = this.#state.tokenizer.encode(processedPrompt);
|
|
538
|
+
this._assertTokenIdsInRange(inputIds, 'prefillWithEmbedding.encode');
|
|
539
|
+
if (opts.debug) {
|
|
540
|
+
log.debug('Pipeline', `PrefillWithEmbedding: ${inputIds.length} tokens (mode=${opts.embeddingMode})`);
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
let prefillResult;
|
|
544
|
+
try {
|
|
545
|
+
prefillResult = await this._prefillToHidden(inputIds, opts);
|
|
546
|
+
} catch (error) {
|
|
547
|
+
if (error.name === 'FinitenessError') {
|
|
548
|
+
log.warn('Pipeline', `FinitenessGuard caught NaN/Inf during prefillWithEmbedding. Retrying with F32 precision.`);
|
|
549
|
+
prefillResult = await this._retryWithFinitenessFallback(
|
|
550
|
+
opts,
|
|
551
|
+
'prefillWithEmbedding',
|
|
552
|
+
() => this._prefillToHidden(inputIds, opts)
|
|
553
|
+
);
|
|
554
|
+
} else {
|
|
555
|
+
throw error;
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
|
|
559
|
+
const {
|
|
560
|
+
numTokens,
|
|
561
|
+
config,
|
|
562
|
+
startPos,
|
|
563
|
+
activationDtype,
|
|
564
|
+
activationBytes,
|
|
565
|
+
currentRecorder,
|
|
566
|
+
recordProfile,
|
|
567
|
+
currentHiddenBuffer,
|
|
568
|
+
} = prefillResult;
|
|
569
|
+
|
|
570
|
+
// Ensure prefill work completes before readback.
|
|
571
|
+
if (currentRecorder) {
|
|
572
|
+
await currentRecorder.submitAndWait();
|
|
573
|
+
await recordProfile(currentRecorder);
|
|
574
|
+
} else {
|
|
575
|
+
const device = getDevice();
|
|
576
|
+
if (device) {
|
|
577
|
+
await device.queue.onSubmittedWorkDone();
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
|
|
581
|
+
if (!allowReadback('pipeline.prefill.embedding')) {
|
|
582
|
+
throw new Error('GPU readback disabled; cannot return embedding');
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
let embedding;
|
|
586
|
+
try {
|
|
587
|
+
const hiddenSize = config.hiddenSize;
|
|
588
|
+
const hiddenBytes = numTokens * hiddenSize * activationBytes;
|
|
589
|
+
const hiddenData = await readBuffer(currentHiddenBuffer, hiddenBytes);
|
|
590
|
+
if (hiddenData.byteLength === 0) {
|
|
591
|
+
throw new Error('GPU readback disabled; cannot return embedding');
|
|
592
|
+
}
|
|
593
|
+
const hiddenStates = decodeReadback(hiddenData, activationDtype);
|
|
594
|
+
const finalNormWeights = await this._getFinalNormWeights();
|
|
595
|
+
embedding = this._extractEmbeddingFromHidden(
|
|
596
|
+
hiddenStates,
|
|
597
|
+
numTokens,
|
|
598
|
+
hiddenSize,
|
|
599
|
+
opts.embeddingMode,
|
|
600
|
+
finalNormWeights,
|
|
601
|
+
config
|
|
602
|
+
);
|
|
603
|
+
} finally {
|
|
604
|
+
releaseBuffer(currentHiddenBuffer);
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
this.#state.currentSeqLen = startPos + numTokens;
|
|
608
|
+
|
|
609
|
+
const snapshot = this.#state.kvCache?.clone();
|
|
610
|
+
if (!snapshot) {
|
|
611
|
+
throw new Error('KV cache unavailable after prefill');
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
return {
|
|
615
|
+
cache: snapshot,
|
|
616
|
+
seqLen: this.#state.currentSeqLen,
|
|
617
|
+
tokens: inputIds,
|
|
618
|
+
embedding,
|
|
619
|
+
embeddingMode: opts.embeddingMode,
|
|
620
|
+
linearAttention: await cloneLinearAttentionRuntime(this.#state.linearAttentionRuntime),
|
|
621
|
+
};
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
async prefillWithLogits(prompt, options = {}) {
|
|
625
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
626
|
+
resetActiveExecutionPlan(this.#state);
|
|
627
|
+
this.#state.stats.gpuTimePrefillMs = undefined;
|
|
628
|
+
const opts = resolvePrefillOptions(this.#state, options);
|
|
629
|
+
|
|
630
|
+
const processedPrompt = resolvePromptInput(this.#state, prompt, opts.useChatTemplate, 'prefillWithLogits');
|
|
631
|
+
|
|
632
|
+
const inputIds = this.#state.tokenizer.encode(processedPrompt);
|
|
633
|
+
this._assertTokenIdsInRange(inputIds, 'prefillWithLogits.encode');
|
|
634
|
+
if (opts.debug) {
|
|
635
|
+
log.debug('Pipeline', `PrefillWithLogits: ${inputIds.length} tokens`);
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
const logits = await this._prefill(inputIds, opts);
|
|
639
|
+
|
|
640
|
+
const snapshot = this.#state.kvCache?.clone();
|
|
641
|
+
if (!snapshot) {
|
|
642
|
+
throw new Error('KV cache unavailable after prefill');
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
return {
|
|
646
|
+
cache: snapshot,
|
|
647
|
+
seqLen: this.#state.currentSeqLen,
|
|
648
|
+
tokens: inputIds,
|
|
649
|
+
logits,
|
|
650
|
+
linearAttention: await cloneLinearAttentionRuntime(this.#state.linearAttentionRuntime),
|
|
651
|
+
};
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
async *generateWithPrefixKV(prefix, prompt, options = {}) {
|
|
656
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
657
|
+
if (this.#state.isGenerating) throw new Error('Generation already in progress');
|
|
658
|
+
|
|
659
|
+
validateCallTimeOptions(options);
|
|
660
|
+
|
|
661
|
+
// Apply snapshot
|
|
662
|
+
this.#state.kvCache = prefix.cache.clone();
|
|
663
|
+
if (this.#state.useGPU && this.#state.kvCache) {
|
|
664
|
+
const device = getDevice();
|
|
665
|
+
if (device) {
|
|
666
|
+
this.#state.kvCache.setGPUContext({ device });
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
if (
|
|
670
|
+
hasLinearAttentionLayers(this.#state.modelConfig.layerTypes)
|
|
671
|
+
&& prefix.linearAttention == null
|
|
672
|
+
) {
|
|
673
|
+
throw new Error(
|
|
674
|
+
'Prefix snapshot is missing linear_attention recurrent state. ' +
|
|
675
|
+
'Regenerate the prefix snapshot using the current runtime.'
|
|
676
|
+
);
|
|
677
|
+
}
|
|
678
|
+
this.#state.linearAttentionRuntime = restoreLinearAttentionRuntime(
|
|
679
|
+
this.#state.linearAttentionRuntime,
|
|
680
|
+
prefix.linearAttention ?? null
|
|
681
|
+
);
|
|
682
|
+
this.#state.currentSeqLen = prefix.seqLen;
|
|
683
|
+
|
|
684
|
+
this.#state.isGenerating = true;
|
|
685
|
+
this.#state.decodeStepCount = 0;
|
|
686
|
+
resetActiveExecutionPlan(this.#state);
|
|
687
|
+
this.#state.stats.gpuTimePrefillMs = undefined;
|
|
688
|
+
this.#state.stats.gpuTimeDecodeMs = undefined;
|
|
689
|
+
this.#state.decodeRing?.reset();
|
|
690
|
+
this.#state.stats.decodeRecordMs = 0;
|
|
691
|
+
this.#state.stats.decodeSubmitWaitMs = 0;
|
|
692
|
+
this.#state.stats.decodeReadbackWaitMs = 0;
|
|
693
|
+
this.#state.stats.ttftMs = 0;
|
|
694
|
+
const startTime = performance.now();
|
|
695
|
+
|
|
696
|
+
const opts = resolveGenerateOptions(this.#state, options);
|
|
697
|
+
|
|
698
|
+
try {
|
|
699
|
+
const processedPrompt = resolvePromptInput(this.#state, prompt, opts.useChatTemplate, 'generateWithPrefixKV');
|
|
700
|
+
|
|
701
|
+
const inputIds = this.#state.tokenizer.encode(processedPrompt);
|
|
702
|
+
this._assertTokenIdsInRange(inputIds, 'generateWithPrefixKV.encode');
|
|
703
|
+
const generatedIds = [...prefix.tokens, ...inputIds];
|
|
704
|
+
const promptTokenCount = generatedIds.length;
|
|
705
|
+
this.#state.stats.prefillTokens = inputIds.length;
|
|
706
|
+
|
|
707
|
+
const prefillStart = performance.now();
|
|
708
|
+
const prefillLogits = await this._prefill(inputIds, opts);
|
|
709
|
+
this.#state.stats.prefillTimeMs = performance.now() - prefillStart;
|
|
710
|
+
|
|
711
|
+
applyRepetitionPenalty(prefillLogits, generatedIds, opts.repetitionPenalty);
|
|
712
|
+
const padTokenId = this.#state.tokenizer?.getSpecialTokens?.()?.pad;
|
|
713
|
+
const firstToken = sample(prefillLogits, {
|
|
714
|
+
temperature: opts.temperature,
|
|
715
|
+
topP: opts.topP,
|
|
716
|
+
topK: opts.topK,
|
|
717
|
+
padTokenId,
|
|
718
|
+
seed: opts.seed,
|
|
719
|
+
});
|
|
720
|
+
|
|
721
|
+
generatedIds.push(firstToken);
|
|
722
|
+
this.#state.stats.ttftMs = performance.now() - startTime;
|
|
723
|
+
|
|
724
|
+
const firstText = resolveTokenText(
|
|
725
|
+
this.#state.tokenizer,
|
|
726
|
+
[firstToken],
|
|
727
|
+
`[${firstToken}]`,
|
|
728
|
+
(tokens) => this.#state.tokenizer?.decode?.(tokens, true, false),
|
|
729
|
+
(tokens) => this.#state.tokenizer?.decode?.(tokens, false, false)
|
|
730
|
+
);
|
|
731
|
+
yield firstText;
|
|
732
|
+
if (options.onToken) options.onToken(firstToken, firstText);
|
|
733
|
+
|
|
734
|
+
yield* this._runDecodeLoop(generatedIds, opts, options, {
|
|
735
|
+
stopTokenIds: this.#state.modelConfig.stopTokenIds,
|
|
736
|
+
eosToken: this.#state.tokenizer.getSpecialTokens?.()?.eos,
|
|
737
|
+
stopSequenceStart: promptTokenCount,
|
|
738
|
+
decodeToken: (tokenId) => this.#state.tokenizer.decode([tokenId], true, false),
|
|
739
|
+
logBatchPath: false,
|
|
740
|
+
});
|
|
741
|
+
this.#state.stats.totalTimeMs = performance.now() - startTime;
|
|
742
|
+
} finally {
|
|
743
|
+
this._closeFinitenessFallbackWindow(opts);
|
|
744
|
+
resetActiveExecutionPlan(this.#state);
|
|
745
|
+
this.#state.isGenerating = false;
|
|
746
|
+
}
|
|
747
|
+
}
|
|
748
|
+
|
|
749
|
+
// ==========================================================================
|
|
750
|
+
// Internal Methods (Prefill, Decode, Helpers)
|
|
751
|
+
// ==========================================================================
|
|
752
|
+
|
|
753
|
+
async *_runDecodeLoop(generatedIds, opts, options, runtime) {
|
|
754
|
+
const {
|
|
755
|
+
stopTokenIds,
|
|
756
|
+
eosToken,
|
|
757
|
+
stopSequenceStart,
|
|
758
|
+
decodeToken,
|
|
759
|
+
logBatchPath = false,
|
|
760
|
+
} = runtime;
|
|
761
|
+
|
|
762
|
+
let tokensGenerated = 1;
|
|
763
|
+
markKernelCacheWarmed();
|
|
764
|
+
|
|
765
|
+
const decodeStart = performance.now();
|
|
766
|
+
const lmHead = this.#state.weights.get('lm_head');
|
|
767
|
+
const embedBuffer = this.#state.weights.get('embed');
|
|
768
|
+
const hasCpuWeights = isCpuWeightBuffer(lmHead)
|
|
769
|
+
|| isCpuWeightBuffer(embedBuffer)
|
|
770
|
+
|| lmHead instanceof Float32Array
|
|
771
|
+
|| embedBuffer instanceof Float32Array;
|
|
772
|
+
const hasLinearLayers = hasLinearAttentionLayers(this.#state.modelConfig.layerTypes);
|
|
773
|
+
const gpuSamplingAvailable = isGPUSamplingAvailable() && !hasCpuWeights;
|
|
774
|
+
const executionPlan = opts.executionPlan;
|
|
775
|
+
let useBatchPath = shouldUseBatchDecode({
|
|
776
|
+
batchSize: executionPlan.batchSize,
|
|
777
|
+
useGPU: this.#state.useGPU,
|
|
778
|
+
gpuSamplingAvailable,
|
|
779
|
+
disableMultiTokenDecode: executionPlan.disableMultiTokenDecode,
|
|
780
|
+
disableCommandBatching: executionPlan.disableCommandBatching,
|
|
781
|
+
isBdpaPagedLayout: this.#state.kvCache?.layout === 'bdpa_paged',
|
|
782
|
+
finitenessFallbackWindowOpen: this._hasFinitenessFallbackWindow(),
|
|
783
|
+
});
|
|
784
|
+
if (hasLinearLayers) {
|
|
785
|
+
useBatchPath = false;
|
|
786
|
+
}
|
|
787
|
+
const readbackInterval = executionPlan.readbackInterval;
|
|
788
|
+
const intervalBatches = readbackInterval == null ? 1 : readbackInterval;
|
|
789
|
+
|
|
790
|
+
if (logBatchPath && useBatchPath) {
|
|
791
|
+
log.debug(
|
|
792
|
+
'Pipeline',
|
|
793
|
+
`Using batch decode path with batchSize=${executionPlan.batchSize}, stopCheckMode=${executionPlan.stopCheckMode}, readbackInterval=${readbackInterval}`
|
|
794
|
+
);
|
|
795
|
+
}
|
|
796
|
+
|
|
797
|
+
while (tokensGenerated < opts.maxTokens) {
|
|
798
|
+
if (options.signal?.aborted) break;
|
|
799
|
+
if (this._hasFinitenessFallbackWindow() && useBatchPath) {
|
|
800
|
+
useBatchPath = false;
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
if (useBatchPath) {
|
|
804
|
+
const remaining = opts.maxTokens - tokensGenerated;
|
|
805
|
+
const thisBatchSize = Math.min(executionPlan.batchSize * intervalBatches, remaining);
|
|
806
|
+
const lastToken = generatedIds[generatedIds.length - 1];
|
|
807
|
+
|
|
808
|
+
try {
|
|
809
|
+
const batchResult = await this._generateNTokensGPU(lastToken, thisBatchSize, generatedIds, opts);
|
|
810
|
+
let batchTokens = [];
|
|
811
|
+
for (const tokenId of batchResult.tokens) {
|
|
812
|
+
generatedIds.push(tokenId);
|
|
813
|
+
tokensGenerated++;
|
|
814
|
+
const tokenText = decodeToken(tokenId);
|
|
815
|
+
yield tokenText;
|
|
816
|
+
if (options.onToken) options.onToken(tokenId, tokenText);
|
|
817
|
+
batchTokens.push({ id: tokenId, text: tokenText });
|
|
818
|
+
if (batchTokens.length === executionPlan.batchSize) {
|
|
819
|
+
if (options.onBatch) options.onBatch(batchTokens);
|
|
820
|
+
batchTokens = [];
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
if (batchTokens.length > 0 && options.onBatch) options.onBatch(batchTokens);
|
|
824
|
+
if (batchResult.actualCount < thisBatchSize) break;
|
|
825
|
+
if (opts.stopSequences.length > 0) {
|
|
826
|
+
const fullText = this.#state.tokenizer.decode(generatedIds.slice(stopSequenceStart), false);
|
|
827
|
+
if (opts.stopSequences.some((seq) => fullText.endsWith(seq))) break;
|
|
828
|
+
}
|
|
829
|
+
} catch (error) {
|
|
830
|
+
log.warn('Pipeline', `Batch decode failed, falling back to single-token: ${error}`);
|
|
831
|
+
useBatchPath = false;
|
|
832
|
+
let nextToken;
|
|
833
|
+
try {
|
|
834
|
+
nextToken = await this._decodeStep(generatedIds, opts);
|
|
835
|
+
} catch (singleTokenError) {
|
|
836
|
+
if (singleTokenError.name === 'FinitenessError') {
|
|
837
|
+
log.warn('Pipeline', `FinitenessGuard caught NaN/Inf at batch step ${tokensGenerated}. Truncating KV cache and retrying token with F32 precision.`);
|
|
838
|
+
nextToken = await this._retryDecodeStepWithFinitenessWindow(
|
|
839
|
+
generatedIds,
|
|
840
|
+
opts,
|
|
841
|
+
`decode-batch-step-${tokensGenerated}`
|
|
842
|
+
);
|
|
843
|
+
} else {
|
|
844
|
+
throw singleTokenError;
|
|
845
|
+
}
|
|
846
|
+
}
|
|
847
|
+
generatedIds.push(nextToken);
|
|
848
|
+
tokensGenerated++;
|
|
849
|
+
const tokenText = decodeToken(nextToken);
|
|
850
|
+
yield tokenText;
|
|
851
|
+
if (options.onToken) options.onToken(nextToken, tokenText);
|
|
852
|
+
this._consumeFinitenessFallbackToken(opts);
|
|
853
|
+
if (isStopToken(nextToken, stopTokenIds, eosToken)) break;
|
|
854
|
+
}
|
|
855
|
+
} else {
|
|
856
|
+
const tokenStart = performance.now();
|
|
857
|
+
let nextToken;
|
|
858
|
+
try {
|
|
859
|
+
nextToken = await this._decodeStep(generatedIds, opts);
|
|
860
|
+
} catch (error) {
|
|
861
|
+
if (error.name === 'FinitenessError') {
|
|
862
|
+
log.warn('Pipeline', `FinitenessGuard caught NaN/Inf at step ${tokensGenerated}. Truncating KV cache and retrying token with F32 precision.`);
|
|
863
|
+
nextToken = await this._retryDecodeStepWithFinitenessWindow(
|
|
864
|
+
generatedIds,
|
|
865
|
+
opts,
|
|
866
|
+
`decode-step-${tokensGenerated}`
|
|
867
|
+
);
|
|
868
|
+
} else {
|
|
869
|
+
throw error;
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
const tokenTime = performance.now() - tokenStart;
|
|
873
|
+
generatedIds.push(nextToken);
|
|
874
|
+
tokensGenerated++;
|
|
875
|
+
const tokenText = decodeToken(nextToken);
|
|
876
|
+
yield tokenText;
|
|
877
|
+
if (options.onToken) options.onToken(nextToken, tokenText);
|
|
878
|
+
this._consumeFinitenessFallbackToken(opts);
|
|
879
|
+
|
|
880
|
+
if (opts.debug || opts.benchmark) {
|
|
881
|
+
const elapsedMs = performance.now() - decodeStart;
|
|
882
|
+
const tokPerSec = (tokensGenerated / elapsedMs) * 1000;
|
|
883
|
+
log.debug('Decode', `#${tokensGenerated} "${tokenText}" ${tokenTime.toFixed(0)}ms (${tokPerSec.toFixed(2)} tok/s avg)`);
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
if (isStopToken(nextToken, stopTokenIds, eosToken)) break;
|
|
887
|
+
if (opts.stopSequences.length > 0) {
|
|
888
|
+
const fullText = this.#state.tokenizer.decode(generatedIds.slice(stopSequenceStart), false);
|
|
889
|
+
if (opts.stopSequences.some((seq) => fullText.endsWith(seq))) break;
|
|
890
|
+
}
|
|
891
|
+
}
|
|
892
|
+
}
|
|
893
|
+
|
|
894
|
+
this.#state.stats.decodeTimeMs = performance.now() - decodeStart;
|
|
895
|
+
this.#state.stats.tokensGenerated = tokensGenerated;
|
|
896
|
+
this.#state.stats.decodeTokens = tokensGenerated;
|
|
897
|
+
}
|
|
898
|
+
|
|
899
|
+
async _prefillToHidden(inputIds, opts) {
|
|
900
|
+
// Internal-only: reuse the main prefill implementation but stop before logits.
|
|
901
|
+
return this._prefill(inputIds, { ...opts, _returnHidden: true });
|
|
902
|
+
}
|
|
903
|
+
|
|
904
|
+
|
|
905
|
+
async _prefill(inputIds, opts) {
|
|
906
|
+
const numTokens = inputIds.length;
|
|
907
|
+
const config = this.#state.modelConfig;
|
|
908
|
+
const startPos = this.#state.currentSeqLen;
|
|
909
|
+
const returnHidden = opts?._returnHidden === true;
|
|
910
|
+
this.#state.stats.gpuTimePrefillMs = undefined;
|
|
911
|
+
|
|
912
|
+
if (startPos === 0 && hasLinearAttentionLayers(config.layerTypes)) {
|
|
913
|
+
this.#state.linearAttentionRuntime = resetLinearAttentionRuntime(this.#state.linearAttentionRuntime);
|
|
914
|
+
}
|
|
915
|
+
|
|
916
|
+
const embedBufferRaw = this.#state.weights.get('embed');
|
|
917
|
+
if (!(embedBufferRaw instanceof GPUBuffer) && !isWeightBuffer(embedBufferRaw) && !isCpuWeightBuffer(embedBufferRaw) && !(embedBufferRaw instanceof Float32Array)) {
|
|
918
|
+
throw new Error('Embed buffer not found or not a supported buffer type');
|
|
919
|
+
}
|
|
920
|
+
const embedBuffer = isWeightBuffer(embedBufferRaw) ? embedBufferRaw.buffer : embedBufferRaw;
|
|
921
|
+
const embedDtype = isWeightBuffer(embedBufferRaw)
|
|
922
|
+
? getWeightDtype(embedBufferRaw)
|
|
923
|
+
: isCpuWeightBuffer(embedBufferRaw)
|
|
924
|
+
? embedBufferRaw.dtype
|
|
925
|
+
: null;
|
|
926
|
+
if (opts.debug) {
|
|
927
|
+
const embedSize = embedBuffer instanceof GPUBuffer ? embedBuffer.size : 'N/A';
|
|
928
|
+
log.debug('Pipeline', `Embed buffer: type=${embedBuffer?.constructor?.name}, size=${embedSize}, dtype=${embedDtype}`);
|
|
929
|
+
}
|
|
930
|
+
|
|
931
|
+
const device = getDevice();
|
|
932
|
+
const useCheckpoints = opts.debugLayers && opts.debugLayers.length > 0;
|
|
933
|
+
const disableCommandBatching = opts.disableCommandBatching === true
|
|
934
|
+
|| opts.debug === true
|
|
935
|
+
|| this.#state.kvCache?.layout === 'bdpa_paged';
|
|
936
|
+
const createRecorder = (label) => {
|
|
937
|
+
if (!device || disableCommandBatching) return undefined;
|
|
938
|
+
return opts.profile ? createProfilingRecorder(label) : createCommandRecorder(label);
|
|
939
|
+
};
|
|
940
|
+
const recorder = createRecorder('prefill');
|
|
941
|
+
const debugCheckBuffer = this.#state.debug
|
|
942
|
+
? (buffer, label, numTokens, expectedDim) =>
|
|
943
|
+
debugCheckBufferHelper(this.#state, buffer, label, numTokens, expectedDim)
|
|
944
|
+
: undefined;
|
|
945
|
+
const context = buildLayerContext(
|
|
946
|
+
this.#state,
|
|
947
|
+
recorder,
|
|
948
|
+
false,
|
|
949
|
+
opts.debugLayers,
|
|
950
|
+
debugCheckBuffer,
|
|
951
|
+
opts.executionPlan
|
|
952
|
+
);
|
|
953
|
+
context.currentTokenIds = inputIds;
|
|
954
|
+
let gpuTimePrefillMs = 0;
|
|
955
|
+
let hasGpuTimePrefill = false;
|
|
956
|
+
const recordProfile = async (rec) => {
|
|
957
|
+
if (!opts.profile || !rec?.isProfilingEnabled()) return;
|
|
958
|
+
const timings = await rec.resolveProfileTimings();
|
|
959
|
+
const total = sumProfileTimings(timings);
|
|
960
|
+
if (total !== null) {
|
|
961
|
+
gpuTimePrefillMs += total;
|
|
962
|
+
hasGpuTimePrefill = true;
|
|
963
|
+
}
|
|
964
|
+
if (timings) {
|
|
965
|
+
log.warn('Profile', `Prefill (${rec.label}):`);
|
|
966
|
+
log.warn('Profile', CommandRecorder.formatProfileReport(timings));
|
|
967
|
+
}
|
|
968
|
+
};
|
|
969
|
+
|
|
970
|
+
const benchmarkSubmits = opts.debug;
|
|
971
|
+
if (benchmarkSubmits) {
|
|
972
|
+
setTrackSubmits(true);
|
|
973
|
+
resetSubmitStats();
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
const activationDtype = opts.executionPlan?.activationDtype ?? this._getEffectiveActivationDtype();
|
|
977
|
+
const activationBytes = selectRuleValue('shared', 'dtype', 'bytesFromDtype', { dtype: activationDtype });
|
|
978
|
+
let hiddenStates = await embed(inputIds, embedBuffer, {
|
|
979
|
+
hiddenSize: config.hiddenSize,
|
|
980
|
+
vocabSize: config.vocabSize,
|
|
981
|
+
scaleEmbeddings: config.scaleEmbeddings,
|
|
982
|
+
debug: opts.debug,
|
|
983
|
+
recorder,
|
|
984
|
+
transpose: this.#state.embeddingTranspose,
|
|
985
|
+
debugProbes: this.#state.runtimeConfig.shared.debug.probes,
|
|
986
|
+
activationDtype,
|
|
987
|
+
embeddingDtype: selectRuleValue('inference', 'dtype', 'f16OrF32FromDtype', { dtype: embedDtype }),
|
|
988
|
+
});
|
|
989
|
+
|
|
990
|
+
if (opts.debug && hiddenStates instanceof GPUBuffer) {
|
|
991
|
+
if (recorder) {
|
|
992
|
+
await recorder.submitAndWait();
|
|
993
|
+
await recordProfile(recorder);
|
|
994
|
+
}
|
|
995
|
+
const debugReadbackSize = this.#state.runtimeConfig.shared.debug.pipeline.readbackSampleSize;
|
|
996
|
+
const sample = await readBuffer(hiddenStates, Math.min(debugReadbackSize, hiddenStates.size));
|
|
997
|
+
const f32 = decodeReadback(sample, activationDtype);
|
|
998
|
+
const nanCount = f32.filter(x => !Number.isFinite(x)).length;
|
|
999
|
+
let maxAbs = 0;
|
|
1000
|
+
for (let i = 0; i < f32.length; i++) {
|
|
1001
|
+
const abs = Math.abs(f32[i]);
|
|
1002
|
+
if (abs > maxAbs) maxAbs = abs;
|
|
1003
|
+
}
|
|
1004
|
+
const first8 = Array.from(f32).slice(0, 8).map(x => x.toFixed(4)).join(', ');
|
|
1005
|
+
log.debug('Pipeline', `After embed: buffer.label=${hiddenStates.label}, buffer.size=${hiddenStates.size}, maxAbs=${maxAbs.toFixed(4)}`);
|
|
1006
|
+
log.debug('Pipeline', `After embed first8=[${first8}], nan=${nanCount}/${f32.length}`);
|
|
1007
|
+
}
|
|
1008
|
+
|
|
1009
|
+
if (opts.debug) {
|
|
1010
|
+
log.debug('Pipeline', `LAYER_LOOP_START: numLayers=${config.numLayers}, useGPU=${context.useGPU}`);
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
if (this.#state.finitenessBuffer) {
|
|
1014
|
+
const device = getDevice();
|
|
1015
|
+
if (device) {
|
|
1016
|
+
device.queue.writeBuffer(this.#state.finitenessBuffer, 0, new Uint32Array([0, 0, 0, 0]));
|
|
1017
|
+
}
|
|
1018
|
+
}
|
|
1019
|
+
|
|
1020
|
+
let currentRecorder = recorder;
|
|
1021
|
+
|
|
1022
|
+
let currentHiddenBuffer = hiddenStates.buffer;
|
|
1023
|
+
for (let l = 0; l < config.numLayers; l++) {
|
|
1024
|
+
context.recorder = currentRecorder;
|
|
1025
|
+
|
|
1026
|
+
const prevBuffer = currentHiddenBuffer;
|
|
1027
|
+
const layerOutput = await processLayer(l, currentHiddenBuffer, numTokens, true, context);
|
|
1028
|
+
if (!(layerOutput instanceof GPUBuffer)) throw new Error('Expected GPUBuffer from processLayer');
|
|
1029
|
+
currentHiddenBuffer = layerOutput;
|
|
1030
|
+
|
|
1031
|
+
const isCheckpoint = useCheckpoints && opts.debugLayers?.includes(l);
|
|
1032
|
+
|
|
1033
|
+
if (isCheckpoint && currentRecorder) {
|
|
1034
|
+
await currentRecorder.submitAndWait();
|
|
1035
|
+
await recordProfile(currentRecorder);
|
|
1036
|
+
currentRecorder = undefined;
|
|
1037
|
+
}
|
|
1038
|
+
|
|
1039
|
+
const shouldDebug = opts.debug && currentHiddenBuffer && (!recorder || isCheckpoint);
|
|
1040
|
+
if (shouldDebug && !currentRecorder) {
|
|
1041
|
+
const device = getDevice();
|
|
1042
|
+
if (device) {
|
|
1043
|
+
if (allowReadback(`pipeline.prefill.layer-${l}`)) {
|
|
1044
|
+
try {
|
|
1045
|
+
const sampleSize = config.hiddenSize * activationBytes;
|
|
1046
|
+
const staging = device.createBuffer({
|
|
1047
|
+
size: sampleSize,
|
|
1048
|
+
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
|
|
1049
|
+
});
|
|
1050
|
+
const enc = device.createCommandEncoder();
|
|
1051
|
+
const lastTokenOffset = (numTokens - 1) * config.hiddenSize * activationBytes;
|
|
1052
|
+
enc.copyBufferToBuffer(currentHiddenBuffer, lastTokenOffset, staging, 0, sampleSize);
|
|
1053
|
+
device.queue.submit([enc.finish()]);
|
|
1054
|
+
await staging.mapAsync(GPUMapMode.READ);
|
|
1055
|
+
const data = decodeReadback(staging.getMappedRange().slice(0), activationDtype);
|
|
1056
|
+
staging.unmap();
|
|
1057
|
+
staging.destroy();
|
|
1058
|
+
let min = Infinity;
|
|
1059
|
+
let max = -Infinity;
|
|
1060
|
+
let maxAbs = 0;
|
|
1061
|
+
for (const v of data) {
|
|
1062
|
+
if (!Number.isFinite(v)) continue;
|
|
1063
|
+
if (v < min) min = v;
|
|
1064
|
+
if (v > max) max = v;
|
|
1065
|
+
const av = Math.abs(v);
|
|
1066
|
+
if (av > maxAbs) maxAbs = av;
|
|
1067
|
+
}
|
|
1068
|
+
const sample = Array.from(data).slice(0, 3).map(x => x.toFixed(3)).join(', ');
|
|
1069
|
+
log.debug('Pipeline', `LAYER_${l}_LAST[pos=${numTokens - 1}]: min=${min.toFixed(3)}, max=${max.toFixed(3)}, maxAbs=${maxAbs.toFixed(2)}, sample=[${sample}]`);
|
|
1070
|
+
} catch (e) {
|
|
1071
|
+
log.debug('Pipeline', `LAYER_${l}_LAST: error reading buffer: ${e}`);
|
|
1072
|
+
}
|
|
1073
|
+
}
|
|
1074
|
+
}
|
|
1075
|
+
}
|
|
1076
|
+
|
|
1077
|
+
if (isCheckpoint && useCheckpoints && l < config.numLayers - 1) {
|
|
1078
|
+
currentRecorder = createRecorder('prefill-cont');
|
|
1079
|
+
}
|
|
1080
|
+
|
|
1081
|
+
if (prevBuffer !== currentHiddenBuffer) {
|
|
1082
|
+
if (currentRecorder) {
|
|
1083
|
+
currentRecorder.trackTemporaryBuffer(prevBuffer);
|
|
1084
|
+
} else {
|
|
1085
|
+
releaseBuffer(prevBuffer);
|
|
1086
|
+
}
|
|
1087
|
+
}
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
if (this.#state.finitenessBuffer) {
|
|
1091
|
+
if (currentRecorder) {
|
|
1092
|
+
await currentRecorder.submitAndWait();
|
|
1093
|
+
await recordProfile(currentRecorder);
|
|
1094
|
+
currentRecorder = undefined;
|
|
1095
|
+
}
|
|
1096
|
+
const isInfiniteData = await readBuffer(this.#state.finitenessBuffer, 16);
|
|
1097
|
+
const u32 = new Uint32Array(isInfiniteData.buffer, isInfiniteData.byteOffset, 4);
|
|
1098
|
+
const finitenessStatus = parseFinitenessStatusWords(u32, 0);
|
|
1099
|
+
if (finitenessStatus.triggered) {
|
|
1100
|
+
if (currentHiddenBuffer instanceof GPUBuffer) {
|
|
1101
|
+
releaseBuffer(currentHiddenBuffer);
|
|
1102
|
+
}
|
|
1103
|
+
throw new FinitenessError(`F16 bounds exceeded during prefill${finitenessStatus.metadata}`);
|
|
1104
|
+
}
|
|
1105
|
+
}
|
|
1106
|
+
|
|
1107
|
+
if (benchmarkSubmits) {
|
|
1108
|
+
logSubmitStats(`Prefill (${numTokens} tokens, ${config.numLayers} layers)`);
|
|
1109
|
+
setTrackSubmits(false);
|
|
1110
|
+
}
|
|
1111
|
+
|
|
1112
|
+
if (opts.debug) {
|
|
1113
|
+
log.debug('Pipeline', `LAYER_LOOP_DONE, currentHiddenBuffer type=${currentHiddenBuffer?.constructor?.name}`);
|
|
1114
|
+
if (currentHiddenBuffer && allowReadback('pipeline.prefill.final-hidden')) {
|
|
1115
|
+
const device = getDevice();
|
|
1116
|
+
const lastTokenOffset = (numTokens - 1) * config.hiddenSize * activationBytes;
|
|
1117
|
+
const sampleSize = config.hiddenSize * activationBytes;
|
|
1118
|
+
const staging = device.createBuffer({
|
|
1119
|
+
size: sampleSize,
|
|
1120
|
+
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
|
|
1121
|
+
});
|
|
1122
|
+
const enc = device.createCommandEncoder();
|
|
1123
|
+
enc.copyBufferToBuffer(currentHiddenBuffer, lastTokenOffset, staging, 0, sampleSize);
|
|
1124
|
+
device.queue.submit([enc.finish()]);
|
|
1125
|
+
await staging.mapAsync(GPUMapMode.READ);
|
|
1126
|
+
const data = decodeReadback(staging.getMappedRange().slice(0), activationDtype);
|
|
1127
|
+
staging.unmap();
|
|
1128
|
+
staging.destroy();
|
|
1129
|
+
const nanCount = Array.from(data).filter(x => !Number.isFinite(x)).length;
|
|
1130
|
+
const nonZero = Array.from(data).filter(x => Number.isFinite(x) && x !== 0).slice(0, 5);
|
|
1131
|
+
log.debug('Pipeline', `FINAL_HIDDEN[pos=${numTokens - 1}]: nan=${nanCount}/${data.length}, sample=[${nonZero.map(x => x.toFixed(4)).join(', ')}]`);
|
|
1132
|
+
}
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
if (hasGpuTimePrefill) {
|
|
1136
|
+
this.#state.stats.gpuTimePrefillMs = gpuTimePrefillMs;
|
|
1137
|
+
}
|
|
1138
|
+
|
|
1139
|
+
if (returnHidden) {
|
|
1140
|
+
return {
|
|
1141
|
+
numTokens,
|
|
1142
|
+
config,
|
|
1143
|
+
startPos,
|
|
1144
|
+
activationDtype,
|
|
1145
|
+
activationBytes,
|
|
1146
|
+
currentRecorder,
|
|
1147
|
+
recordProfile,
|
|
1148
|
+
debugCheckBuffer,
|
|
1149
|
+
currentHiddenBuffer,
|
|
1150
|
+
};
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
|
|
1154
|
+
let lastLogits;
|
|
1155
|
+
let logitsVocabSize = config.vocabSize;
|
|
1156
|
+
let usedRecordedLogits = false;
|
|
1157
|
+
const lmHead = this.#state.weights.get('lm_head');
|
|
1158
|
+
const canRecordLogits = !!currentRecorder
|
|
1159
|
+
&& !!lmHead
|
|
1160
|
+
&& !isCpuWeightBuffer(lmHead)
|
|
1161
|
+
&& !this.#state.disableRecordedLogits
|
|
1162
|
+
&& numTokens === 1;
|
|
1163
|
+
if (currentRecorder && canRecordLogits) {
|
|
1164
|
+
const recorded = await recordLogitsGPU(
|
|
1165
|
+
currentRecorder,
|
|
1166
|
+
currentHiddenBuffer,
|
|
1167
|
+
numTokens,
|
|
1168
|
+
getLogitsWeights(this.#state),
|
|
1169
|
+
getLogitsConfig(this.#state)
|
|
1170
|
+
);
|
|
1171
|
+
logitsVocabSize = recorded.vocabSize;
|
|
1172
|
+
usedRecordedLogits = true;
|
|
1173
|
+
|
|
1174
|
+
await currentRecorder.submitAndWait();
|
|
1175
|
+
await recordProfile(currentRecorder);
|
|
1176
|
+
|
|
1177
|
+
const logitsBytes = selectRuleValue('shared', 'dtype', 'bytesFromDtype', { dtype: recorded.logitsDtype });
|
|
1178
|
+
const lastLogitsSize = logitsVocabSize * logitsBytes;
|
|
1179
|
+
const lastLogitsOffset = (numTokens - 1) * lastLogitsSize;
|
|
1180
|
+
const logitsData = await readBufferSlice(recorded.logitsBuffer, lastLogitsOffset, lastLogitsSize);
|
|
1181
|
+
releaseBuffer(recorded.logitsBuffer);
|
|
1182
|
+
lastLogits = decodeReadback(logitsData, recorded.logitsDtype);
|
|
1183
|
+
|
|
1184
|
+
const health = getLogitsHealth(lastLogits);
|
|
1185
|
+
if (health.nanCount > 0 || health.infCount > 0 || health.nonZeroCount === 0) {
|
|
1186
|
+
log.warn(
|
|
1187
|
+
'Logits',
|
|
1188
|
+
`Recorded logits invalid (nan=${health.nanCount} inf=${health.infCount} nonZero=${health.nonZeroCount}, maxAbs=${health.maxAbs.toFixed(3)}); recomputing without recorder.`
|
|
1189
|
+
);
|
|
1190
|
+
this.#state.disableRecordedLogits = true;
|
|
1191
|
+
this.#state.disableFusedDecode = true;
|
|
1192
|
+
const fallbackLogits = await computeLogits(
|
|
1193
|
+
currentHiddenBuffer,
|
|
1194
|
+
numTokens,
|
|
1195
|
+
getLogitsWeights(this.#state),
|
|
1196
|
+
getLogitsConfig(this.#state),
|
|
1197
|
+
this.#state.useGPU,
|
|
1198
|
+
this.#state.debugFlags,
|
|
1199
|
+
undefined,
|
|
1200
|
+
debugCheckBuffer,
|
|
1201
|
+
this.#state.runtimeConfig.shared.debug.probes,
|
|
1202
|
+
{ lastPositionOnly: true }
|
|
1203
|
+
);
|
|
1204
|
+
const fallbackHealth = getLogitsHealth(fallbackLogits);
|
|
1205
|
+
if (fallbackHealth.nanCount > 0 || fallbackHealth.infCount > 0 || fallbackHealth.nonZeroCount === 0) {
|
|
1206
|
+
throw new Error(
|
|
1207
|
+
`[Logits] Fallback logits invalid (nan=${fallbackHealth.nanCount} inf=${fallbackHealth.infCount} nonZero=${fallbackHealth.nonZeroCount}, maxAbs=${fallbackHealth.maxAbs.toFixed(3)}). ` +
|
|
1208
|
+
'This indicates upstream kernel output is NaN/Inf (often prefill attention/matmul).'
|
|
1209
|
+
);
|
|
1210
|
+
}
|
|
1211
|
+
logitsVocabSize = config.vocabSize;
|
|
1212
|
+
usedRecordedLogits = false;
|
|
1213
|
+
lastLogits = fallbackLogits.length === logitsVocabSize
|
|
1214
|
+
? fallbackLogits
|
|
1215
|
+
: extractLastPositionLogits(fallbackLogits, numTokens, logitsVocabSize);
|
|
1216
|
+
}
|
|
1217
|
+
|
|
1218
|
+
releaseBuffer(currentHiddenBuffer);
|
|
1219
|
+
} else {
|
|
1220
|
+
if (currentRecorder) {
|
|
1221
|
+
await currentRecorder.submitAndWait();
|
|
1222
|
+
await recordProfile(currentRecorder);
|
|
1223
|
+
}
|
|
1224
|
+
const logits = await computeLogits(
|
|
1225
|
+
currentHiddenBuffer,
|
|
1226
|
+
numTokens,
|
|
1227
|
+
getLogitsWeights(this.#state),
|
|
1228
|
+
getLogitsConfig(this.#state),
|
|
1229
|
+
this.#state.useGPU,
|
|
1230
|
+
this.#state.debugFlags,
|
|
1231
|
+
undefined,
|
|
1232
|
+
debugCheckBuffer,
|
|
1233
|
+
this.#state.runtimeConfig.shared.debug.probes,
|
|
1234
|
+
{ lastPositionOnly: true }
|
|
1235
|
+
);
|
|
1236
|
+
|
|
1237
|
+
lastLogits = logits.length === logitsVocabSize
|
|
1238
|
+
? logits
|
|
1239
|
+
: extractLastPositionLogits(logits, numTokens, logitsVocabSize);
|
|
1240
|
+
releaseBuffer(currentHiddenBuffer);
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
this.#state.currentSeqLen = startPos + numTokens;
|
|
1244
|
+
|
|
1245
|
+
if (usedRecordedLogits) {
|
|
1246
|
+
if (logitsVocabSize < config.vocabSize) {
|
|
1247
|
+
const padded = new Float32Array(config.vocabSize);
|
|
1248
|
+
padded.set(lastLogits);
|
|
1249
|
+
padded.fill(-Infinity, logitsVocabSize);
|
|
1250
|
+
lastLogits = padded;
|
|
1251
|
+
}
|
|
1252
|
+
if (config.finalLogitSoftcapping != null) {
|
|
1253
|
+
applySoftcapping(lastLogits, config.finalLogitSoftcapping);
|
|
1254
|
+
}
|
|
1255
|
+
}
|
|
1256
|
+
|
|
1257
|
+
if (opts.debug) {
|
|
1258
|
+
logitsSanity(lastLogits, 'Prefill', (tokens) => resolveTokenText(this.#state.tokenizer, tokens));
|
|
1259
|
+
}
|
|
1260
|
+
|
|
1261
|
+
if (opts.debug) {
|
|
1262
|
+
if (this.#state.kvCache?.hasGPUCache?.()) {
|
|
1263
|
+
log.debug('Pipeline', `KV cache active after prefill: seqLen=${this.#state.kvCache.getKeyCache(0)?.constructor.name ?? '?'}`);
|
|
1264
|
+
} else {
|
|
1265
|
+
log.warn('Pipeline', `KV cache NOT active after prefill! hasGPUCache=${this.#state.kvCache?.hasGPUCache?.()}`);
|
|
1266
|
+
}
|
|
1267
|
+
}
|
|
1268
|
+
|
|
1269
|
+
return lastLogits;
|
|
1270
|
+
}
|
|
1271
|
+
|
|
1272
|
+
|
|
1273
|
+
async _decodeStep(currentIds, opts) {
|
|
1274
|
+
const debugCheckBuffer = this.#state.debug
|
|
1275
|
+
? (buffer, label, numTokens, expectedDim) =>
|
|
1276
|
+
debugCheckBufferHelper(this.#state, buffer, label, numTokens, expectedDim)
|
|
1277
|
+
: undefined;
|
|
1278
|
+
return decodeStep(this.#state, currentIds, opts, this._getDecodeHelpers(debugCheckBuffer));
|
|
1279
|
+
}
|
|
1280
|
+
|
|
1281
|
+
async decodeStepLogits(currentIds, options = {}) {
|
|
1282
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
1283
|
+
if (this.#state.isGenerating) throw new Error('Generation already in progress');
|
|
1284
|
+
resetActiveExecutionPlan(this.#state);
|
|
1285
|
+
|
|
1286
|
+
validateCallTimeOptions(options);
|
|
1287
|
+
|
|
1288
|
+
const opts = this._resolveStepOptions(options);
|
|
1289
|
+
const debugCheckBuffer = this.#state.debug
|
|
1290
|
+
? (buffer, label, numTokens, expectedDim) =>
|
|
1291
|
+
debugCheckBufferHelper(this.#state, buffer, label, numTokens, expectedDim)
|
|
1292
|
+
: undefined;
|
|
1293
|
+
|
|
1294
|
+
return decodeStepLogits(this.#state, currentIds, opts, this._getDecodeHelpers(debugCheckBuffer));
|
|
1295
|
+
}
|
|
1296
|
+
|
|
1297
|
+
async advanceWithToken(tokenId, options = {}) {
|
|
1298
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
1299
|
+
if (this.#state.isGenerating) throw new Error('Generation already in progress');
|
|
1300
|
+
resetActiveExecutionPlan(this.#state);
|
|
1301
|
+
|
|
1302
|
+
validateCallTimeOptions(options);
|
|
1303
|
+
|
|
1304
|
+
const opts = this._resolveStepOptions(options);
|
|
1305
|
+
const debugCheckBuffer = this.#state.debug
|
|
1306
|
+
? (buffer, label, numTokens, expectedDim) =>
|
|
1307
|
+
debugCheckBufferHelper(this.#state, buffer, label, numTokens, expectedDim)
|
|
1308
|
+
: undefined;
|
|
1309
|
+
|
|
1310
|
+
this._assertTokenIdInRange(tokenId, 'advanceWithToken');
|
|
1311
|
+
await advanceWithToken(this.#state, tokenId, opts, this._getDecodeHelpers(debugCheckBuffer));
|
|
1312
|
+
}
|
|
1313
|
+
|
|
1314
|
+
async advanceWithTokenAndEmbedding(tokenId, options = {}) {
|
|
1315
|
+
if (!this.#state.isLoaded) throw new Error('Model not loaded');
|
|
1316
|
+
if (this.#state.isGenerating) throw new Error('Generation already in progress');
|
|
1317
|
+
resetActiveExecutionPlan(this.#state);
|
|
1318
|
+
|
|
1319
|
+
validateCallTimeOptions(options);
|
|
1320
|
+
|
|
1321
|
+
const opts = this._resolveStepOptions(options);
|
|
1322
|
+
const embeddingMode = resolveAdvanceEmbeddingMode(this.#state, options);
|
|
1323
|
+
const debugCheckBuffer = this.#state.debug
|
|
1324
|
+
? (buffer, label, numTokens, expectedDim) =>
|
|
1325
|
+
debugCheckBufferHelper(this.#state, buffer, label, numTokens, expectedDim)
|
|
1326
|
+
: undefined;
|
|
1327
|
+
|
|
1328
|
+
this._assertTokenIdInRange(tokenId, 'advanceWithTokenAndEmbedding');
|
|
1329
|
+
return runAdvanceWithTokenAndEmbedding(
|
|
1330
|
+
this.#state,
|
|
1331
|
+
tokenId,
|
|
1332
|
+
opts,
|
|
1333
|
+
this._getDecodeHelpers(debugCheckBuffer),
|
|
1334
|
+
embeddingMode
|
|
1335
|
+
);
|
|
1336
|
+
}
|
|
1337
|
+
|
|
1338
|
+
async _generateNTokensGPU(startToken, N, currentIds, opts) {
|
|
1339
|
+
const debugCheckBuffer = this.#state.debug
|
|
1340
|
+
? (buffer, label, numTokens, expectedDim) =>
|
|
1341
|
+
debugCheckBufferHelper(this.#state, buffer, label, numTokens, expectedDim)
|
|
1342
|
+
: undefined;
|
|
1343
|
+
return generateNTokensGPU(this.#state, startToken, N, currentIds, opts, this._getDecodeHelpers(debugCheckBuffer));
|
|
1344
|
+
}
|
|
1345
|
+
}
|