@simulatte/doppler 0.1.7 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/CHANGELOG.md +19 -0
  2. package/package.json +21 -36
  3. package/src/browser/browser-converter.js +5 -0
  4. package/src/client/doppler-registry.json +1 -17
  5. package/src/config/kernel-path-loader.d.ts +5 -0
  6. package/src/config/kernel-path-loader.js +13 -0
  7. package/src/config/kernels/registry.json +74 -0
  8. package/src/config/loader.js +3 -0
  9. package/src/config/merge-contract-check.js +7 -0
  10. package/src/config/presets/kernel-paths/gemma3-q4k-dequant-f32w-f32a-online.json +56 -0
  11. package/src/config/presets/kernel-paths/lfm2-q4k-dequant-f32a-nosubgroups.json +61 -0
  12. package/src/config/presets/kernel-paths/registry.json +14 -0
  13. package/src/config/presets/models/gemma2.json +2 -1
  14. package/src/config/presets/models/gemma3.json +2 -0
  15. package/src/config/presets/models/qwen3.json +4 -3
  16. package/src/config/presets/models/qwen3_5.json +16 -0
  17. package/src/config/presets/runtime/model/qwen3-5-layer-probe.json +52 -0
  18. package/src/config/presets/runtime/model/qwen3-5-linear-attn-debug.json +90 -0
  19. package/src/config/schema/conversion.schema.d.ts +1 -0
  20. package/src/config/schema/manifest.schema.d.ts +1 -1
  21. package/src/config/schema/manifest.schema.js +1 -1
  22. package/src/config/schema/storage.schema.js +1 -1
  23. package/src/converter/conversion-plan.js +10 -2
  24. package/src/converter/core.js +2 -0
  25. package/src/converter/manifest-inference.js +12 -22
  26. package/src/converter/parsers/transformer.js +4 -0
  27. package/src/converter/quantization-info.js +5 -1
  28. package/src/converter/quantizer.js +19 -12
  29. package/src/converter/rope-config.js +8 -6
  30. package/src/converter/tokenizer-utils.d.ts +1 -0
  31. package/src/converter/tokenizer-utils.js +4 -1
  32. package/src/debug/reference/hf_qwen35_linear_attn_debug.py +268 -0
  33. package/src/distribution/shard-delivery.js +6 -1
  34. package/src/formats/rdrr/parsing.d.ts +4 -0
  35. package/src/formats/rdrr/parsing.js +14 -1
  36. package/src/gpu/kernels/index.d.ts +8 -0
  37. package/src/gpu/kernels/index.js +6 -0
  38. package/src/gpu/kernels/matmul-selection.js +47 -4
  39. package/src/gpu/kernels/matmul.d.ts +2 -0
  40. package/src/gpu/kernels/matmul.js +1 -1
  41. package/src/gpu/kernels/rmsnorm.js +9 -2
  42. package/src/gpu/kernels/split_qg.d.ts +50 -0
  43. package/src/gpu/kernels/split_qg.js +46 -0
  44. package/src/gpu/kernels/split_qg.wgsl +58 -0
  45. package/src/gpu/kernels/split_qg_f16.wgsl +62 -0
  46. package/src/gpu/weight-buffer.d.ts +1 -1
  47. package/src/gpu/weight-buffer.js +1 -1
  48. package/src/inference/browser-harness.d.ts +2 -0
  49. package/src/inference/browser-harness.js +20 -1
  50. package/src/inference/pipelines/diffusion/helpers.js +3 -0
  51. package/src/inference/pipelines/diffusion/text-encoder-gpu.js +8 -2
  52. package/src/inference/pipelines/text/attention/output-projection.d.ts +12 -0
  53. package/src/inference/pipelines/text/attention/output-projection.js +8 -0
  54. package/src/inference/pipelines/text/attention/projections.d.ts +10 -1
  55. package/src/inference/pipelines/text/attention/projections.js +41 -11
  56. package/src/inference/pipelines/text/attention/record.js +15 -6
  57. package/src/inference/pipelines/text/attention/run.js +50 -6
  58. package/src/inference/pipelines/text/config.js +14 -0
  59. package/src/inference/pipelines/text/execution-plan.js +5 -4
  60. package/src/inference/pipelines/text/generator-runtime.js +5 -0
  61. package/src/inference/pipelines/text/generator-steps.d.ts +6 -0
  62. package/src/inference/pipelines/text/generator-steps.js +43 -15
  63. package/src/inference/pipelines/text/generator.js +50 -17
  64. package/src/inference/pipelines/text/init.d.ts +13 -0
  65. package/src/inference/pipelines/text/init.js +16 -5
  66. package/src/inference/pipelines/text/layer.js +1 -0
  67. package/src/inference/pipelines/text/linear-attention.d.ts +5 -0
  68. package/src/inference/pipelines/text/linear-attention.js +33 -3
  69. package/src/inference/pipelines/text/logits/gpu.js +2 -2
  70. package/src/inference/pipelines/text/logits/index.d.ts +6 -1
  71. package/src/inference/pipelines/text/logits/index.js +3 -1
  72. package/src/inference/pipelines/text/model-load.js +3 -0
  73. package/src/inference/pipelines/text/sampling.js +52 -6
  74. package/src/inference/test-harness.js +2 -2
  75. package/src/loader/final-weights-loader.js +2 -0
  76. package/src/loader/shard-cache.js +3 -2
  77. package/src/loader/tensors/tensor-loader.js +6 -1
  78. package/src/rules/inference/dtype.rules.json +5 -0
  79. package/src/rules/inference/kernel-path.rules.json +2 -2
  80. package/src/rules/kernels/split-qg.rules.json +6 -0
  81. package/src/rules/rule-registry.js +2 -0
  82. package/src/storage/downloader.js +2 -1
  83. package/src/storage/shard-manager.js +4 -3
  84. package/src/tooling/conversion-config-materializer.js +3 -5
  85. package/src/tooling/node-converter.js +3 -0
  86. package/src/tooling/node-source-runtime.js +36 -0
  87. package/src/types/model.d.ts +5 -0
  88. package/tools/doppler-cli.js +6 -1
@@ -0,0 +1,90 @@
1
+ {
2
+ "id": "model/qwen3-5-linear-attn-debug",
3
+ "name": "qwen3-5-linear-attn-debug",
4
+ "description": "Probe linear attention intermediates in Qwen 3.5 layer 0 for comparison with HF reference.",
5
+ "intent": "investigate",
6
+ "stability": "canonical",
7
+ "owner": "doppler-core",
8
+ "createdAtUtc": "2026-03-13T00:00:00Z",
9
+ "extends": "modes/debug",
10
+ "runtime": {
11
+ "inference": {
12
+ "prompt": "Hello",
13
+ "batching": {
14
+ "maxTokens": 1
15
+ },
16
+ "sampling": {
17
+ "temperature": 0
18
+ },
19
+ "chatTemplate": {
20
+ "enabled": false
21
+ }
22
+ },
23
+ "shared": {
24
+ "debug": {
25
+ "trace": {
26
+ "enabled": true,
27
+ "categories": ["attn", "logits"],
28
+ "layers": null,
29
+ "maxDecodeSteps": 1
30
+ },
31
+ "probes": [
32
+ {
33
+ "id": "embed",
34
+ "stage": "embed_out",
35
+ "tokens": [-1],
36
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
37
+ },
38
+ {
39
+ "id": "qkv",
40
+ "stage": "linear_qkv_proj",
41
+ "layers": [0],
42
+ "tokens": [-1],
43
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
44
+ },
45
+ {
46
+ "id": "z",
47
+ "stage": "linear_z_proj",
48
+ "layers": [0],
49
+ "tokens": [-1],
50
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
51
+ },
52
+ {
53
+ "id": "a",
54
+ "stage": "linear_a_proj",
55
+ "layers": [0],
56
+ "tokens": [-1],
57
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
58
+ },
59
+ {
60
+ "id": "b",
61
+ "stage": "linear_b_proj",
62
+ "layers": [0],
63
+ "tokens": [-1],
64
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
65
+ },
66
+ {
67
+ "id": "core",
68
+ "stage": "linear_core_out",
69
+ "layers": [0],
70
+ "tokens": [-1],
71
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
72
+ },
73
+ {
74
+ "id": "layer0",
75
+ "stage": "layer_out",
76
+ "layers": [0],
77
+ "tokens": [-1],
78
+ "dims": [0, 1, 2, 3, 4, 5, 6, 7]
79
+ },
80
+ {
81
+ "id": "logits",
82
+ "stage": "logits_final",
83
+ "tokens": [-1],
84
+ "dims": [0, 1, 2, 3]
85
+ }
86
+ ]
87
+ }
88
+ }
89
+ }
90
+ }
@@ -32,6 +32,7 @@ export interface ParsedModelSchema {
32
32
  config: RawModelConfigSchema;
33
33
  architecture?: string;
34
34
  quantization?: string;
35
+ generationConfig?: unknown;
35
36
  tokenizerJson?: unknown;
36
37
  tokenizerConfig?: unknown;
37
38
  tokenizerModel?: unknown;
@@ -163,7 +163,7 @@ export interface ArchitectureSchema {
163
163
  * Use `null` to indicate "not applicable" (e.g., no softcapping).
164
164
  */
165
165
  export interface ManifestAttentionSchema {
166
- /** Query pre-attention scalar (Gemma 2: 256, standard: sqrt(headDim)) */
166
+ /** Query pre-attention scalar: attnScale = 1/sqrt(scalar). Standard = headDim. */
167
167
  queryPreAttnScalar: number;
168
168
  /** Attention logit softcapping (Gemma 2: 50, null = disabled) */
169
169
  attnLogitSoftcapping: number | null;
@@ -40,7 +40,7 @@ export const DEFAULT_MANIFEST_INFERENCE = {
40
40
  schema: null,
41
41
  presetId: null,
42
42
  attention: {
43
- queryPreAttnScalar: 8, // sqrt(64) for standard 64-dim heads
43
+ queryPreAttnScalar: 64, // headDim for standard 64-dim heads; attnScale = 1/sqrt(scalar)
44
44
  attnLogitSoftcapping: null, // No softcapping (null = disabled)
45
45
  slidingWindow: null, // Full attention (null = no sliding window)
46
46
  queryKeyNorm: false,
@@ -35,7 +35,7 @@ export const DEFAULT_STORAGE_ALIGNMENT_CONFIG = {
35
35
  export const DEFAULT_STORAGE_BACKEND_CONFIG = {
36
36
  backend: 'auto', // auto | opfs | indexeddb | memory
37
37
  opfs: {
38
- useSyncAccessHandle: true,
38
+ useSyncAccessHandle: false,
39
39
  maxConcurrentHandles: 2,
40
40
  },
41
41
  indexeddb: {
@@ -117,7 +117,10 @@ function isLikelyEmbeddingGemma(rawConfig, architectureHint) {
117
117
 
118
118
  export function inferSourceWeightQuantization(tensors) {
119
119
  if (!Array.isArray(tensors) || tensors.length === 0) {
120
- return 'f16';
120
+ throw new Error(
121
+ 'Cannot infer source weight quantization: no tensors provided. ' +
122
+ 'Set converterConfig.quantization.weights explicitly.'
123
+ );
121
124
  }
122
125
  const weightTensors = [];
123
126
  for (const tensor of tensors) {
@@ -128,7 +131,12 @@ export function inferSourceWeightQuantization(tensors) {
128
131
  weightTensors.push({ name, dtype });
129
132
  }
130
133
  const dtypes = new Set(weightTensors.map((tensor) => tensor.dtype));
131
- if (dtypes.size === 0) return 'f16';
134
+ if (dtypes.size === 0) {
135
+ throw new Error(
136
+ 'Cannot infer source weight quantization: no recognizable weight dtypes found. ' +
137
+ 'Set converterConfig.quantization.weights explicitly.'
138
+ );
139
+ }
132
140
  if (dtypes.size > 1) {
133
141
  const detail = Array.from(dtypes)
134
142
  .sort()
@@ -983,6 +983,7 @@ export function createManifest(
983
983
  isDiffusion ? 'diffusion' : extractArchitecture(model.config, model.ggufConfig)
984
984
  );
985
985
  const rawConfig = model.config || {};
986
+ const generationConfig = model.generationConfig ?? null;
986
987
  const resolvedArchitecture = isDiffusion
987
988
  ? architecture
988
989
  : resolveIntermediateSizeFromTensors(architecture, model, tensorLocations, rawConfig, modelId);
@@ -1037,6 +1038,7 @@ export function createManifest(
1037
1038
  ? null
1038
1039
  : resolveEosTokenId({
1039
1040
  config: rawConfig,
1041
+ generationConfig,
1040
1042
  tokenizer: model.tokenizer ?? model.tokenizerConfig ?? null,
1041
1043
  tokenizerJson: model.tokenizerJson ?? null,
1042
1044
  });
@@ -240,16 +240,6 @@ function detectAttentionOutputGate(presetInference, modelConfig, defaults) {
240
240
  return modelConfig.attn_output_gate;
241
241
  }
242
242
 
243
- const modelType = normalizeLayerTypeName(modelConfig?.model_type);
244
- const hasLinearAttentionLayers = Array.isArray(modelConfig?.layer_types)
245
- && modelConfig.layer_types.some((entry) => normalizeCustomLayerType(entry) === 'linear_attention');
246
- if (
247
- hasLinearAttentionLayers
248
- && (modelType === 'qwen2' || modelType === 'qwen3_5' || modelType === 'qwen3_5_text')
249
- ) {
250
- return true;
251
- }
252
-
253
243
  return defaults.attention.attentionOutputGate;
254
244
  }
255
245
 
@@ -259,21 +249,18 @@ function resolveQueryPreAttnScalar(preset, modelConfig, headDim) {
259
249
  return explicit;
260
250
  }
261
251
 
262
- const modelType = normalizeLayerTypeName(modelConfig?.model_type);
263
- const presetId = normalizeLayerTypeName(preset?.id);
264
- if (modelType.startsWith('qwen') || presetId === 'qwen3') {
265
- return headDim;
252
+ // Standard attention scaling: attnScale = 1/sqrt(queryPreAttnScalar).
253
+ // For standard transformers queryPreAttnScalar = headDim, giving 1/sqrt(headDim).
254
+ // Preset may override for non-standard models.
255
+ const presetScalar = Number(preset?.inference?.attention?.queryPreAttnScalar);
256
+ if (Number.isFinite(presetScalar) && presetScalar > 0) {
257
+ return presetScalar;
266
258
  }
267
259
 
268
- return Math.sqrt(headDim);
260
+ return headDim;
269
261
  }
270
262
 
271
263
  function detectRmsNormWeightOffset(presetInference, modelConfig, defaults) {
272
- const modelType = normalizeLayerTypeName(modelConfig?.model_type);
273
- if (modelType === 'qwen3_5' || modelType === 'qwen3_5_text') {
274
- return true;
275
- }
276
-
277
264
  if (typeof presetInference?.normalization?.rmsNormWeightOffset === 'boolean') {
278
265
  return presetInference.normalization.rmsNormWeightOffset;
279
266
  }
@@ -385,8 +372,8 @@ export function buildManifestInference(preset, config, headDim = 64, quantizatio
385
372
  queryPreAttnScalar: resolveQueryPreAttnScalar(preset, modelConfig, headDim),
386
373
  attnLogitSoftcapping: presetInference.attention?.attnLogitSoftcapping ??
387
374
  modelConfig.attn_logit_softcapping ?? defaults.attention.attnLogitSoftcapping,
388
- slidingWindow: presetInference.attention?.slidingWindow ??
389
- modelConfig.sliding_window ?? defaults.attention.slidingWindow,
375
+ slidingWindow: modelConfig.sliding_window ??
376
+ presetInference.attention?.slidingWindow ?? defaults.attention.slidingWindow,
390
377
  queryKeyNorm: presetInference.attention?.queryKeyNorm ?? defaults.attention.queryKeyNorm,
391
378
  attentionOutputGate: detectAttentionOutputGate(presetInference, modelConfig, defaults),
392
379
  causal: detectedCausalAttention ?? presetInference.attention?.causal ?? defaults.attention.causal,
@@ -459,6 +446,9 @@ export function buildManifestInference(preset, config, headDim = 64, quantizatio
459
446
  );
460
447
  }
461
448
  globalPattern = null;
449
+ // Default offset 0 means first global layer at index 0 (most common pattern).
450
+ // This is the every_n pattern default, distinct from layerPattern.offset=null
451
+ // which means "not applicable" in the schema.
462
452
  offset = (
463
453
  detectEveryNOffsetFromLayerTypes(modelConfig.layer_types, period)
464
454
  ?? normalizeEveryNOffset(presetPattern.offset, period)
@@ -7,6 +7,9 @@ export async function parseTransformerModel(adapter) {
7
7
  } = adapter;
8
8
 
9
9
  const config = await readJson('config.json', 'config.json');
10
+ const generationConfig = await fileExists('generation_config.json')
11
+ ? await readJson('generation_config.json', 'generation_config.json')
12
+ : null;
10
13
  const architectureHint = config.architectures?.[0] ?? config.model_type ?? '';
11
14
 
12
15
  let tensors = null;
@@ -19,6 +22,7 @@ export async function parseTransformerModel(adapter) {
19
22
 
20
23
  return {
21
24
  config,
25
+ generationConfig,
22
26
  tensors,
23
27
  architectureHint,
24
28
  };
@@ -2,6 +2,10 @@
2
2
  import { DEFAULT_QUANTIZATION_DEFAULTS, DEFAULT_Q4K_LAYOUT } from '../config/index.js';
3
3
  import { classifyTensorRole } from '../formats/rdrr/index.js';
4
4
 
5
+ // Default quantization tag when no explicit dtype is provided.
6
+ // F16 is the canonical unquantized storage format for WebGPU inference.
7
+ const DEFAULT_QUANT_TAG = 'f16';
8
+
5
9
  // Quantization tag aliases mapped to canonical names.
6
10
  // Add new aliases here rather than adding if/else branches.
7
11
  const QUANT_TAG_ALIASES = {
@@ -47,7 +51,7 @@ const QUANT_TAG_ALIASES = {
47
51
  };
48
52
 
49
53
  export function normalizeQuantTag(value) {
50
- if (!value) return 'f16';
54
+ if (!value) return DEFAULT_QUANT_TAG;
51
55
  const lower = value.toLowerCase();
52
56
  return QUANT_TAG_ALIASES[lower] ?? lower;
53
57
  }
@@ -74,9 +74,10 @@ function findMinMax(data, offset, length) {
74
74
  return { min, max };
75
75
  }
76
76
 
77
- export function quantizeQ4KBlock(data, offset) {
77
+ function quantizeQ4KBlockWithValidLength(data, offset, validLength = QK_K) {
78
78
  const block = new Uint8Array(QK4_K_BLOCK_SIZE);
79
79
  const blockView = new DataView(block.buffer);
80
+ const clampedValidLength = Math.max(0, Math.min(QK_K, Math.trunc(validLength)));
80
81
 
81
82
  const scales = new Float32Array(8);
82
83
  const minOffsets = new Float32Array(8);
@@ -84,14 +85,22 @@ export function quantizeQ4KBlock(data, offset) {
84
85
 
85
86
  for (let sb = 0; sb < 8; sb++) {
86
87
  const sbOffset = offset + sb * 32;
87
- const { min, max } = findMinMax(data, sbOffset, 32);
88
+ const subblockStart = sb * 32;
89
+ const validInSubblock = Math.max(0, Math.min(32, clampedValidLength - subblockStart));
90
+ if (validInSubblock === 0) {
91
+ scales[sb] = 0;
92
+ minOffsets[sb] = 0;
93
+ continue;
94
+ }
95
+
96
+ const { min, max } = findMinMax(data, sbOffset, validInSubblock);
88
97
 
89
98
  minOffsets[sb] = -min;
90
99
  const range = max - min;
91
100
  scales[sb] = range > 0 ? range / 15 : 0;
92
101
 
93
102
  const invScale = scales[sb] > 0 ? 1 / scales[sb] : 0;
94
- for (let i = 0; i < 32; i++) {
103
+ for (let i = 0; i < validInSubblock; i++) {
95
104
  const val = data[sbOffset + i];
96
105
  let q = Math.round((val - min) * invScale);
97
106
  q = Math.max(0, Math.min(15, q));
@@ -155,6 +164,10 @@ export function quantizeQ4KBlock(data, offset) {
155
164
  return block;
156
165
  }
157
166
 
167
+ export function quantizeQ4KBlock(data, offset) {
168
+ return quantizeQ4KBlockWithValidLength(data, offset, QK_K);
169
+ }
170
+
158
171
  function dequantizeQ4KBlock(block) {
159
172
  const blockView = new DataView(block.buffer, block.byteOffset);
160
173
  const result = new Float32Array(256);
@@ -245,22 +258,16 @@ export function quantizeToQ4KMRowWise(data, shape) {
245
258
  }
246
259
 
247
260
  const blocksPerRow = Math.ceil(cols / QK_K);
248
- const paddedColsPerRow = blocksPerRow * QK_K;
249
261
  const totalBlocks = rows * blocksPerRow;
250
262
 
251
263
  const quantized = new Uint8Array(totalBlocks * QK4_K_BLOCK_SIZE);
252
264
 
253
265
  for (let row = 0; row < rows; row++) {
254
- // Extract and pad this row
255
- const rowData = new Float32Array(paddedColsPerRow);
256
- const srcOffset = row * cols;
257
- for (let c = 0; c < cols; c++) {
258
- rowData[c] = data[srcOffset + c];
259
- }
260
-
261
266
  // Quantize each block in this row
262
267
  for (let b = 0; b < blocksPerRow; b++) {
263
- const block = quantizeQ4KBlock(rowData, b * QK_K);
268
+ const validLength = Math.max(0, Math.min(QK_K, cols - b * QK_K));
269
+ const srcOffset = row * cols + b * QK_K;
270
+ const block = quantizeQ4KBlockWithValidLength(data, srcOffset, validLength);
264
271
  const dstOffset = (row * blocksPerRow + b) * QK4_K_BLOCK_SIZE;
265
272
  quantized.set(block, dstOffset);
266
273
  }
@@ -1,3 +1,5 @@
1
+ import { DEFAULT_MANIFEST_INFERENCE } from '../config/schema/index.js';
2
+
1
3
  function asObject(value) {
2
4
  if (value == null || typeof value !== 'object' || Array.isArray(value)) {
3
5
  return null;
@@ -50,7 +52,7 @@ function resolveScalingConfig(ropeScalingConfig, options = {}) {
50
52
  }
51
53
  return {
52
54
  ropeScalingType: null,
53
- ropeScalingFactor: 1.0,
55
+ ropeScalingFactor: DEFAULT_MANIFEST_INFERENCE.rope.ropeScalingFactor,
54
56
  yarnBetaFast: null,
55
57
  yarnBetaSlow: null,
56
58
  yarnOriginalMaxPos: null,
@@ -58,7 +60,7 @@ function resolveScalingConfig(ropeScalingConfig, options = {}) {
58
60
  }
59
61
 
60
62
  let ropeScalingType = scalingType;
61
- let ropeScalingFactor = 1.0;
63
+ let ropeScalingFactor = DEFAULT_MANIFEST_INFERENCE.rope.ropeScalingFactor;
62
64
  let yarnBetaFast = null;
63
65
  let yarnBetaSlow = null;
64
66
  let yarnOriginalMaxPos = null;
@@ -110,7 +112,7 @@ function hasScalingDirective(ropeScalingConfig) {
110
112
  function hasMeaningfulScalingConfig(resolvedScaling) {
111
113
  if (!resolvedScaling) return false;
112
114
  return resolvedScaling.ropeScalingType != null
113
- || resolvedScaling.ropeScalingFactor !== 1.0
115
+ || resolvedScaling.ropeScalingFactor !== DEFAULT_MANIFEST_INFERENCE.rope.ropeScalingFactor
114
116
  || resolvedScaling.yarnBetaFast != null
115
117
  || resolvedScaling.yarnBetaSlow != null
116
118
  || resolvedScaling.yarnOriginalMaxPos != null;
@@ -159,7 +161,7 @@ export function buildRoPEConfig(presetInference, config) {
159
161
  ?? null,
160
162
  ropeScalingFactor: presetRoPE.ropeScalingFactor
161
163
  ?? presetAttn?.ropeScalingFactor // Deprecated location
162
- ?? 1.0,
164
+ ?? DEFAULT_MANIFEST_INFERENCE.rope.ropeScalingFactor,
163
165
  yarnBetaFast: presetRoPE.yarnBetaFast ?? null,
164
166
  yarnBetaSlow: presetRoPE.yarnBetaSlow ?? null,
165
167
  yarnOriginalMaxPos: presetRoPE.yarnOriginalMaxPos ?? null,
@@ -223,7 +225,7 @@ export function buildRoPEConfig(presetInference, config) {
223
225
  ?? asFiniteNumber(flatRoPEParameters?.rope_theta)
224
226
  ?? asFiniteNumber(config.rope_theta)
225
227
  ?? presetInference.rope?.ropeTheta
226
- ?? 10000;
228
+ ?? DEFAULT_MANIFEST_INFERENCE.rope.ropeTheta;
227
229
 
228
230
  // For Gemma 3, local sliding attention theta comes from rope_parameters.sliding_attention.
229
231
  const ropeLocalTheta = asFiniteNumber(slidingAttentionRoPE?.rope_theta)
@@ -232,7 +234,7 @@ export function buildRoPEConfig(presetInference, config) {
232
234
 
233
235
  const mropeInterleaved = asBoolean(flatRoPEParameters?.mrope_interleaved)
234
236
  ?? presetInference.rope?.mropeInterleaved
235
- ?? false;
237
+ ?? DEFAULT_MANIFEST_INFERENCE.rope.mropeInterleaved;
236
238
  const mropeSection = asNumberArray(flatRoPEParameters?.mrope_section)
237
239
  ?? presetInference.rope?.mropeSection
238
240
  ?? null;
@@ -1,5 +1,6 @@
1
1
  export declare function resolveEosTokenId(options: {
2
2
  config?: Record<string, unknown> | null;
3
+ generationConfig?: Record<string, unknown> | null;
3
4
  tokenizer?: {
4
5
  eosTokenId?: number;
5
6
  eos_token_id?: number;
@@ -1,6 +1,8 @@
1
- export function resolveEosTokenId({ config, tokenizer, tokenizerJson }) {
1
+ export function resolveEosTokenId({ config, generationConfig, tokenizer, tokenizerJson }) {
2
2
  const nestedTextConfig = getNestedTextConfig(config);
3
3
  const candidateSources = [
4
+ generationConfig?.eos_token_id,
5
+ generationConfig?.eos_token_ids,
4
6
  tokenizer?.eosTokenId,
5
7
  tokenizer?.eos_token_id,
6
8
  tokenizerJson?.specialTokens?.eos,
@@ -19,6 +21,7 @@ export function resolveEosTokenId({ config, tokenizer, tokenizerJson }) {
19
21
  }
20
22
 
21
23
  const eosTokenStringCandidates = [
24
+ generationConfig?.eos_token,
22
25
  tokenizer?.eosToken,
23
26
  tokenizer?.eos_token,
24
27
  tokenizerJson?.specialTokens?.eos_token,