@fugood/llama.node 1.4.7 → 1.4.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/lib/binding.ts +8 -0
  2. package/package.json +15 -15
  3. package/scripts/llama.cpp.patch +23 -24
  4. package/src/LlamaContext.cpp +4 -2
  5. package/src/llama.cpp/common/CMakeLists.txt +2 -0
  6. package/src/llama.cpp/common/arg.cpp +470 -223
  7. package/src/llama.cpp/common/arg.h +43 -2
  8. package/src/llama.cpp/common/chat-peg-parser.cpp +16 -2
  9. package/src/llama.cpp/common/chat.cpp +140 -0
  10. package/src/llama.cpp/common/common.cpp +130 -67
  11. package/src/llama.cpp/common/common.h +44 -17
  12. package/src/llama.cpp/common/console.cpp +98 -18
  13. package/src/llama.cpp/common/console.h +30 -8
  14. package/src/llama.cpp/common/download.cpp +69 -25
  15. package/src/llama.cpp/common/json-schema-to-grammar.cpp +132 -3
  16. package/src/llama.cpp/common/json-schema-to-grammar.h +20 -0
  17. package/src/llama.cpp/common/log.cpp +5 -0
  18. package/src/llama.cpp/common/log.h +1 -0
  19. package/src/llama.cpp/common/peg-parser.cpp +1 -1
  20. package/src/llama.cpp/common/preset.cpp +206 -0
  21. package/src/llama.cpp/common/preset.h +32 -0
  22. package/src/llama.cpp/common/sampling.cpp +67 -54
  23. package/src/llama.cpp/common/sampling.h +8 -0
  24. package/src/llama.cpp/ggml/CMakeLists.txt +4 -0
  25. package/src/llama.cpp/ggml/include/ggml-alloc.h +9 -0
  26. package/src/llama.cpp/ggml/include/ggml-backend.h +1 -0
  27. package/src/llama.cpp/ggml/include/ggml-cpu.h +1 -0
  28. package/src/llama.cpp/ggml/include/ggml.h +7 -8
  29. package/src/llama.cpp/ggml/src/CMakeLists.txt +3 -0
  30. package/src/llama.cpp/ggml/src/ggml-cpu/CMakeLists.txt +4 -0
  31. package/src/llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp +285 -0
  32. package/src/llama.cpp/ggml/src/ggml-cpu/arch-fallback.h +28 -0
  33. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.c +111 -45
  34. package/src/llama.cpp/ggml/src/ggml-cpu/ggml-cpu.cpp +4 -0
  35. package/src/llama.cpp/ggml/src/ggml-cpu/repack.cpp +288 -1
  36. package/src/llama.cpp/ggml/src/ggml-cpu/repack.h +8 -0
  37. package/src/llama.cpp/ggml/src/ggml-cpu/vec.cpp +41 -1
  38. package/src/llama.cpp/ggml/src/ggml-cpu/vec.h +125 -22
  39. package/src/llama.cpp/include/llama.h +18 -1
  40. package/src/llama.cpp/src/llama-arch.cpp +1890 -2248
  41. package/src/llama.cpp/src/llama-arch.h +9 -2
  42. package/src/llama.cpp/src/llama-batch.cpp +12 -2
  43. package/src/llama.cpp/src/llama-batch.h +4 -2
  44. package/src/llama.cpp/src/llama-context.cpp +93 -23
  45. package/src/llama.cpp/src/llama-context.h +8 -2
  46. package/src/llama.cpp/src/llama-graph.cpp +84 -16
  47. package/src/llama.cpp/src/llama-graph.h +17 -4
  48. package/src/llama.cpp/src/llama-hparams.cpp +6 -0
  49. package/src/llama.cpp/src/llama-hparams.h +5 -1
  50. package/src/llama.cpp/src/llama-impl.cpp +4 -0
  51. package/src/llama.cpp/src/llama-kv-cache.cpp +90 -42
  52. package/src/llama.cpp/src/llama-kv-cache.h +19 -2
  53. package/src/llama.cpp/src/llama-memory-hybrid.cpp +1 -1
  54. package/src/llama.cpp/src/llama-mmap.cpp +123 -28
  55. package/src/llama.cpp/src/llama-mmap.h +5 -1
  56. package/src/llama.cpp/src/llama-model-loader.cpp +58 -13
  57. package/src/llama.cpp/src/llama-model-loader.h +2 -0
  58. package/src/llama.cpp/src/llama-model.cpp +110 -49
  59. package/src/llama.cpp/src/llama-model.h +1 -0
  60. package/src/llama.cpp/src/llama-quant.cpp +1 -1
  61. package/src/llama.cpp/src/llama-sampling.cpp +16 -0
  62. package/src/llama.cpp/src/llama-vocab.cpp +2 -1
  63. package/src/llama.cpp/src/llama.cpp +665 -1
  64. package/src/llama.cpp/src/models/deepseek2.cpp +9 -5
  65. package/src/llama.cpp/src/models/glm4-moe.cpp +28 -11
  66. package/src/llama.cpp/src/models/glm4.cpp +27 -4
  67. package/src/llama.cpp/src/models/models.h +5 -5
  68. package/src/llama.cpp/src/models/nemotron-h.cpp +35 -6
  69. package/src/llama.cpp/src/models/qwen2.cpp +12 -3
  70. package/src/llama.cpp/src/models/qwen3next.cpp +81 -266
@@ -3,6 +3,7 @@
3
3
  #include "llama-impl.h"
4
4
 
5
5
  #include <map>
6
+ #include <set>
6
7
 
7
8
  static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
8
9
  { LLM_ARCH_CLIP, "clip" }, // dummy, only used by llama-quantize
@@ -75,6 +76,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
75
76
  { LLM_ARCH_JAIS, "jais" },
76
77
  { LLM_ARCH_NEMOTRON, "nemotron" },
77
78
  { LLM_ARCH_NEMOTRON_H, "nemotron_h" },
79
+ { LLM_ARCH_NEMOTRON_H_MOE, "nemotron_h_moe" },
78
80
  { LLM_ARCH_EXAONE, "exaone" },
79
81
  { LLM_ARCH_EXAONE4, "exaone4" },
80
82
  { LLM_ARCH_RWKV6, "rwkv6" },
@@ -301,2253 +303,1884 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
301
303
  { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
302
304
  };
303
305
 
304
- static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_NAMES = {
305
- {
306
- LLM_ARCH_CLIP,
307
- {},
308
- },
309
- {
310
- LLM_ARCH_LLAMA,
311
- {
312
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
313
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
314
- { LLM_TENSOR_OUTPUT, "output" },
315
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
316
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
317
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
318
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
319
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
320
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
321
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
322
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
323
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
324
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
325
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
326
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
327
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
328
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
329
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
330
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
331
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
332
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
333
- },
334
- },
335
- {
336
- LLM_ARCH_ARCEE,
337
- {
338
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
339
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
340
- { LLM_TENSOR_OUTPUT, "output" },
341
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
342
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
343
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
344
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
345
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
346
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
347
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
348
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
349
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
350
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
351
- },
352
- },
353
- {
354
- LLM_ARCH_AFMOE,
355
- {
356
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
357
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
358
- { LLM_TENSOR_OUTPUT, "output" },
359
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
360
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
361
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
362
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
363
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
364
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
365
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
366
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
367
- { LLM_TENSOR_ATTN_GATE, "blk.%d.attn_gate" },
368
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
369
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
370
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
371
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
372
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
373
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
374
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
375
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
376
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
377
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
378
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
379
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
380
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
381
- },
382
- },
383
- {
384
- LLM_ARCH_LLAMA4,
385
- {
386
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
387
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
388
- { LLM_TENSOR_OUTPUT, "output" },
389
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
390
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
391
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
392
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
393
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
394
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
395
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
396
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
397
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
398
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
399
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
400
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
401
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
402
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
403
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
404
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
405
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
406
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
407
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
408
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
409
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
410
- },
411
- },
412
- {
413
- LLM_ARCH_DECI,
414
- {
415
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
416
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
417
- { LLM_TENSOR_OUTPUT, "output" },
418
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
419
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
420
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
421
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
422
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
423
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
424
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
425
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
426
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
427
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
428
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
429
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
430
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
431
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
432
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
433
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
434
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
435
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
436
- },
437
- },
438
- {
439
- LLM_ARCH_BAICHUAN,
440
- {
441
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
442
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
443
- { LLM_TENSOR_OUTPUT, "output" },
444
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
445
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
446
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
447
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
448
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
449
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
450
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
451
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
452
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
453
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
454
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
455
- },
456
- },
457
- {
458
- LLM_ARCH_FALCON,
459
- {
460
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
461
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
462
- { LLM_TENSOR_OUTPUT, "output" },
463
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
464
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
465
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
466
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
467
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
468
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
469
- },
470
- },
471
- {
472
- LLM_ARCH_GROK,
473
- {
474
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
475
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
476
- { LLM_TENSOR_OUTPUT, "output" },
477
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
478
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
479
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
480
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
481
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
482
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
483
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
484
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
485
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
486
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
487
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
488
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
489
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
490
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
491
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
492
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
493
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
494
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
495
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
496
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
497
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
498
- },
499
- },
500
- {
501
- LLM_ARCH_GPT2,
502
- {
503
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
504
- { LLM_TENSOR_POS_EMBD, "position_embd" },
505
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
506
- { LLM_TENSOR_OUTPUT, "output" },
507
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
508
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
509
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
510
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
511
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
512
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
513
- },
514
- },
515
- {
516
- LLM_ARCH_GPTJ,
517
- {
518
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
519
- },
520
- },
521
- {
522
- LLM_ARCH_GPTNEOX,
523
- {
524
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
525
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
526
- { LLM_TENSOR_OUTPUT, "output" },
527
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
528
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
529
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
530
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
531
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
532
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
533
- },
534
- },
535
- {
536
- LLM_ARCH_MPT,
537
- {
538
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
539
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
540
- { LLM_TENSOR_OUTPUT, "output"},
541
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
542
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
543
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
544
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
545
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
546
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
547
- { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
548
- { LLM_TENSOR_POS_EMBD, "position_embd" },
549
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
550
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
551
- },
552
- },
553
- {
554
- LLM_ARCH_STARCODER,
555
- {
556
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
557
- { LLM_TENSOR_POS_EMBD, "position_embd" },
558
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
559
- { LLM_TENSOR_OUTPUT, "output" },
560
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
561
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
562
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
563
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
564
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
565
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
566
- },
567
- },
568
- {
569
- LLM_ARCH_REFACT,
570
- {
571
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
572
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
573
- { LLM_TENSOR_OUTPUT, "output" },
574
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
575
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
576
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
577
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
578
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
579
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
580
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
581
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
582
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
583
- },
584
- },
585
- {
586
- LLM_ARCH_BERT,
587
- {
588
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
589
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
590
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
591
- { LLM_TENSOR_POS_EMBD, "position_embd" },
592
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
593
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
594
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
595
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
596
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
597
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
598
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
599
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
600
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
601
- { LLM_TENSOR_CLS, "cls" },
602
- { LLM_TENSOR_CLS_OUT, "cls.output" },
603
- },
604
- },
605
- {
606
- LLM_ARCH_NOMIC_BERT,
607
- {
608
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
609
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
610
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
611
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
612
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
613
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
614
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
615
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
616
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
617
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
618
- },
619
- },
620
- {
621
- LLM_ARCH_NOMIC_BERT_MOE,
622
- {
623
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
624
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
625
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
626
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
627
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
628
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
629
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
630
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
631
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
632
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
633
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
634
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
635
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
636
- },
637
- },
638
- {
639
- LLM_ARCH_NEO_BERT,
640
- {
641
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
642
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
643
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
644
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
645
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
646
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
647
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
648
- { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
649
- { LLM_TENSOR_CLS, "cls" },
650
- { LLM_TENSOR_CLS_OUT, "cls.output" },
651
- },
652
- },
653
- {
654
- LLM_ARCH_JINA_BERT_V2,
655
- {
656
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
657
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
658
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
659
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
660
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
661
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
662
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
663
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
664
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
665
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
666
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
667
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
668
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
669
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
670
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
671
- { LLM_TENSOR_CLS, "cls" },
672
- },
673
- },
674
- {
675
- LLM_ARCH_JINA_BERT_V3,
676
- {
677
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
678
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
679
- { LLM_TENSOR_TOKEN_TYPES, "token_types" },
680
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
681
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
682
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
683
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
684
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
685
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
686
- },
687
- },
688
- {
689
- LLM_ARCH_BLOOM,
690
- {
691
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
692
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
693
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
694
- { LLM_TENSOR_OUTPUT, "output" },
695
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
696
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
697
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
698
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
699
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
700
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
701
- },
702
- },
703
- {
704
- LLM_ARCH_STABLELM,
705
- {
706
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
707
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
708
- { LLM_TENSOR_OUTPUT, "output" },
709
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
710
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
711
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
712
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
713
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
714
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
715
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
716
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
717
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
718
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
719
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
720
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
721
- },
722
- },
723
- {
724
- LLM_ARCH_QWEN,
725
- {
726
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
727
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
728
- { LLM_TENSOR_OUTPUT, "output" },
729
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
730
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
731
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
732
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
733
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
734
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
735
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
736
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
737
- },
738
- },
739
- {
740
- LLM_ARCH_QWEN2,
741
- {
742
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
743
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
744
- { LLM_TENSOR_OUTPUT, "output" },
745
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
746
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
747
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
748
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
749
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
750
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
751
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
752
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
753
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
754
- },
755
- },
756
- {
757
- LLM_ARCH_QWEN2VL,
758
- {
759
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
760
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
761
- { LLM_TENSOR_OUTPUT, "output" },
762
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
763
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
764
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
765
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
766
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
767
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
768
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
769
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
770
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
771
- },
772
- },
773
- {
774
- LLM_ARCH_QWEN2MOE,
775
- {
776
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
777
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
778
- { LLM_TENSOR_OUTPUT, "output" },
779
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
780
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
781
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
782
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
783
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
784
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
785
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
786
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
787
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
788
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
789
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
790
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
791
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
792
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
793
- },
794
- },
795
- {
796
- LLM_ARCH_QWEN3,
797
- {
798
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
799
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
800
- { LLM_TENSOR_OUTPUT, "output" },
801
- { LLM_TENSOR_CLS_OUT, "cls.output" },
802
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
803
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
804
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
805
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
806
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
807
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
808
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
809
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
810
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
811
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
812
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
813
- },
814
- },
815
- {
816
- LLM_ARCH_QWEN3MOE,
817
- {
818
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
819
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
820
- { LLM_TENSOR_OUTPUT, "output" },
821
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
822
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
823
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
824
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
825
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
826
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
827
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
828
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
829
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
830
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
831
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
832
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
833
- },
834
- },
835
- {
836
- LLM_ARCH_QWEN3NEXT,
837
- {
838
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
839
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
840
- { LLM_TENSOR_OUTPUT, "output" },
841
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
842
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
843
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
844
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
845
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
846
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
847
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
848
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
849
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
850
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
851
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
852
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
853
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
854
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
855
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
856
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
857
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
858
- { LLM_TENSOR_SSM_A_NOSCAN, "blk.%d.ssm_a" },
859
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
860
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
861
- { LLM_TENSOR_SSM_BETA_ALPHA, "blk.%d.ssm_ba" },
862
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
863
- { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
864
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
865
- },
866
- },
867
- {
868
- LLM_ARCH_QWEN3VL,
869
- {
870
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
871
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
872
- { LLM_TENSOR_OUTPUT, "output" },
873
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
874
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
875
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
876
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
877
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
878
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
879
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
880
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
881
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
882
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
883
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
884
- },
885
- },
886
- {
887
- LLM_ARCH_QWEN3VLMOE,
888
- {
889
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
890
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
891
- { LLM_TENSOR_OUTPUT, "output" },
892
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
893
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
894
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
895
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
896
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
897
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
898
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
899
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
900
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
901
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
902
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
903
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
904
- },
905
- },
906
- {
907
- LLM_ARCH_PHI2,
908
- {
909
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
910
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
911
- { LLM_TENSOR_OUTPUT, "output" },
912
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
913
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
914
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
915
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
916
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
917
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
918
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
919
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
920
- },
921
- },
922
- {
923
- LLM_ARCH_PHI3,
924
- {
925
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
926
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
927
- { LLM_TENSOR_OUTPUT, "output" },
928
- { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
929
- { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
930
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
931
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
932
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
933
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
934
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
935
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
936
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
937
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
938
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
939
- },
940
- },
941
- {
942
- LLM_ARCH_PHIMOE,
943
- {
944
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
945
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
946
- { LLM_TENSOR_OUTPUT, "output" },
947
- { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
948
- { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
949
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
950
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
951
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
952
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
953
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
954
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
955
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
956
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
957
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
958
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
959
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
960
- },
961
- },
962
- {
963
- LLM_ARCH_PLAMO,
964
- {
965
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
966
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
967
- { LLM_TENSOR_OUTPUT, "output" },
968
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
969
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
970
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
971
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
972
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
973
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
974
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
975
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
976
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
977
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
978
- },
979
- },
980
- {
981
- LLM_ARCH_PLAMO2,
982
- {
983
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
984
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
985
- { LLM_TENSOR_OUTPUT, "output" },
986
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
987
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
988
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
989
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
990
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
991
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
992
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
993
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
994
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
995
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
996
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
997
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
998
- { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
999
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1000
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1001
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1002
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1003
- { LLM_TENSOR_SSM_DT_NORM, "blk.%d.ssm_dt_norm" },
1004
- { LLM_TENSOR_SSM_B_NORM, "blk.%d.ssm_b_norm" },
1005
- { LLM_TENSOR_SSM_C_NORM, "blk.%d.ssm_c_norm" },
1006
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1007
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1008
- },
1009
- },
1010
- {
1011
- LLM_ARCH_CODESHELL,
1012
- {
1013
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1014
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1015
- { LLM_TENSOR_OUTPUT, "output" },
1016
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1017
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1018
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1019
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1020
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1021
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1022
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1023
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1024
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1025
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1026
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1027
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1028
- },
1029
- },
1030
- {
1031
- LLM_ARCH_ORION,
1032
- {
1033
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1034
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1035
- { LLM_TENSOR_OUTPUT, "output" },
1036
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1037
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1038
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1039
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1040
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1041
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1042
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1043
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1044
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1045
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1046
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1047
- },
1048
- },
1049
- {
1050
- LLM_ARCH_INTERNLM2,
1051
- {
1052
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1053
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1054
- { LLM_TENSOR_OUTPUT, "output" },
1055
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1056
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1057
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1058
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1059
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1060
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1061
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1062
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1063
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1064
- },
1065
- },
1066
- {
1067
- LLM_ARCH_MINICPM,
1068
- {
1069
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1070
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1071
- { LLM_TENSOR_OUTPUT, "output" },
1072
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1073
- { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
1074
- { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
1075
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1076
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1077
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1078
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1079
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1080
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1081
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1082
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1083
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1084
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1085
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1086
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
1087
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
1088
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
1089
- },
1090
- },
1091
- {
1092
- LLM_ARCH_MINICPM3,
1093
- {
1094
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1095
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1096
- { LLM_TENSOR_OUTPUT, "output" },
1097
- { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
1098
- { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
1099
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1100
- { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
1101
- { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
1102
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1103
- { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
1104
- { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
1105
- { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
1106
- { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
1107
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1108
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1109
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1110
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1111
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1112
- },
1113
- },
1114
- {
1115
- LLM_ARCH_GEMMA,
1116
- {
1117
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1118
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1119
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1120
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1121
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1122
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1123
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1124
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1125
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1126
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1127
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1128
- },
1129
- },
1130
- {
1131
- LLM_ARCH_GEMMA2,
1132
- {
1133
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1134
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1135
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1136
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1137
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1138
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1139
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1140
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1141
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1142
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1143
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1144
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1145
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1146
- },
1147
- },
1148
- {
1149
- LLM_ARCH_GEMMA3,
1150
- {
1151
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1152
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1153
- { LLM_TENSOR_OUTPUT, "output" },
1154
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1155
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1156
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1157
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1158
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1159
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1160
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1161
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1162
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1163
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1164
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1165
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1166
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1167
- },
1168
- },
1169
- {
1170
- LLM_ARCH_GEMMA3N,
1171
- {
1172
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1173
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1174
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1175
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1176
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1177
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1178
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1179
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1180
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1181
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1182
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1183
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1184
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1185
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1186
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1187
- { LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "per_layer_token_embd" },
1188
- { LLM_TENSOR_PER_LAYER_MODEL_PROJ, "per_layer_model_proj" },
1189
- { LLM_TENSOR_PER_LAYER_PROJ_NORM, "per_layer_proj_norm" },
1190
- { LLM_TENSOR_ALTUP_UNEMBD_PROJ, "altup_unembd_proj" },
1191
- { LLM_TENSOR_ALTUP_PROJ, "altup_proj" },
1192
- { LLM_TENSOR_PER_LAYER_INP_GATE, "blk.%d.inp_gate" },
1193
- { LLM_TENSOR_PER_LAYER_PROJ, "blk.%d.proj" },
1194
- { LLM_TENSOR_PER_LAYER_POST_NORM, "blk.%d.post_norm" },
1195
- { LLM_TENSOR_ALTUP_CORRECT_COEF, "blk.%d.altup_correct_coef" },
1196
- { LLM_TENSOR_ALTUP_CORRECT_SCALE, "blk.%d.altup_correct_scale" },
1197
- { LLM_TENSOR_ALTUP_PREDICT_COEF, "blk.%d.altup_predict_coef" },
1198
- { LLM_TENSOR_ALTUP_ROUTER, "blk.%d.altup_router" },
1199
- { LLM_TENSOR_ALTUP_ROUTER_NORM, "blk.%d.altup_router_norm" },
1200
- { LLM_TENSOR_LAUREL_L, "blk.%d.laurel_l" },
1201
- { LLM_TENSOR_LAUREL_R, "blk.%d.laurel_r" },
1202
- { LLM_TENSOR_LAUREL_POST_NORM, "blk.%d.laurel_post_norm" },
1203
- },
1204
- },
1205
- {
1206
- LLM_ARCH_GEMMA_EMBEDDING,
1207
- {
1208
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1209
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1210
- { LLM_TENSOR_OUTPUT, "output" },
1211
- { LLM_TENSOR_DENSE_2_OUT, "dense_2" },
1212
- { LLM_TENSOR_DENSE_3_OUT, "dense_3" },
1213
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1214
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1215
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1216
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1217
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1218
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1219
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1220
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1221
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1222
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1223
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1224
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1225
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1226
- },
1227
- },
1228
- {
1229
- LLM_ARCH_STARCODER2,
1230
- {
1231
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1232
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1233
- { LLM_TENSOR_OUTPUT, "output" },
1234
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1235
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1236
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1237
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1238
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1239
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1240
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1241
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1242
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1243
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1244
- },
1245
- },
1246
- {
1247
- LLM_ARCH_MAMBA,
1248
- {
1249
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1250
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1251
- { LLM_TENSOR_OUTPUT, "output" },
1252
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1253
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
1254
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
1255
- { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
1256
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1257
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1258
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1259
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1260
- },
1261
- },
1262
- {
1263
- LLM_ARCH_MAMBA2,
1264
- {
1265
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1266
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1267
- { LLM_TENSOR_OUTPUT, "output" },
1268
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1269
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
1270
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
1271
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1272
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1273
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1274
- { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
1275
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1276
- },
1277
- },
1278
- {
1279
- LLM_ARCH_JAMBA,
1280
- {
1281
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1282
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1283
- { LLM_TENSOR_OUTPUT, "output" },
1284
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1285
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
1286
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
1287
- { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
1288
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1289
- { LLM_TENSOR_SSM_DT_NORM, "blk.%d.ssm_dt_norm" },
1290
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1291
- { LLM_TENSOR_SSM_B_NORM, "blk.%d.ssm_b_norm" },
1292
- { LLM_TENSOR_SSM_C_NORM, "blk.%d.ssm_c_norm" },
1293
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1294
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1295
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1296
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1297
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1298
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1299
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1300
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1301
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1302
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1303
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1304
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1305
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1306
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1307
- },
1308
- },
1309
- {
1310
- LLM_ARCH_FALCON_H1,
1311
- {
1312
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1313
- { LLM_TENSOR_OUTPUT, "output" },
1314
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1315
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1316
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1317
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1318
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1319
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1320
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
1321
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
1322
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1323
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1324
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1325
- { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
1326
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1327
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1328
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1329
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1330
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1331
- },
1332
- },
1333
- {
1334
- LLM_ARCH_XVERSE,
1335
- {
1336
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1337
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1338
- { LLM_TENSOR_OUTPUT, "output" },
1339
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1340
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1341
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1342
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1343
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1344
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1345
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1346
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1347
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1348
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1349
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1350
- },
1351
- },
1352
- {
1353
- LLM_ARCH_COMMAND_R,
1354
- {
1355
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1356
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1357
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1358
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1359
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1360
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1361
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1362
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1363
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1364
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1365
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1366
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1367
- },
1368
- },
1369
- {
1370
- LLM_ARCH_COHERE2,
1371
- {
1372
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1373
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1374
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1375
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1376
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1377
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1378
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1379
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1380
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1381
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1382
- },
1383
- },
1384
- {
1385
- LLM_ARCH_DBRX,
1386
- {
1387
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1388
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1389
- { LLM_TENSOR_OUTPUT, "output" },
1390
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1391
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1392
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1393
- { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
1394
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1395
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1396
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1397
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1398
- },
1399
- },
1400
- {
1401
- LLM_ARCH_OLMO,
1402
- {
1403
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1404
- { LLM_TENSOR_OUTPUT, "output" },
1405
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1406
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1407
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1408
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1409
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1410
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1411
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1412
- },
1413
- },
1414
- {
1415
- LLM_ARCH_OLMO2,
1416
- {
1417
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1418
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1419
- { LLM_TENSOR_OUTPUT, "output" },
1420
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1421
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1422
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1423
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1424
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1425
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1426
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1427
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1428
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1429
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1430
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1431
- },
1432
- },
1433
- {
1434
- LLM_ARCH_OLMOE,
1435
- {
1436
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1437
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1438
- { LLM_TENSOR_OUTPUT, "output" },
1439
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1440
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1441
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1442
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1443
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1444
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1445
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1446
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1447
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1448
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1449
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1450
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1451
- },
1452
- },
1453
- {
1454
- LLM_ARCH_OPENELM,
1455
- {
1456
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1457
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1458
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1459
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1460
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1461
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1462
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1463
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1464
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1465
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1466
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1467
- },
1468
- },
1469
- {
1470
- LLM_ARCH_ARCTIC,
1471
- {
1472
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1473
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1474
- { LLM_TENSOR_OUTPUT, "output" },
1475
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1476
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1477
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1478
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1479
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1480
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1481
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1482
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1483
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1484
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1485
- { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" },
1486
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1487
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1488
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1489
- },
1490
- },
1491
- {
1492
- LLM_ARCH_DEEPSEEK,
1493
- {
1494
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1495
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1496
- { LLM_TENSOR_OUTPUT, "output" },
1497
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1498
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1499
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1500
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1501
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1502
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1503
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1504
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1505
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1506
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1507
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1508
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1509
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1510
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1511
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1512
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
1513
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1514
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1515
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1516
- },
1517
- },
1518
- {
1519
- LLM_ARCH_DEEPSEEK2,
1520
- {
1521
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1522
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1523
- { LLM_TENSOR_OUTPUT, "output" },
1524
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1525
- { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
1526
- { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
1527
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1528
- { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
1529
- { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
1530
- { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
1531
- { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
1532
- { LLM_TENSOR_ATTN_K_B, "blk.%d.attn_k_b" },
1533
- { LLM_TENSOR_ATTN_V_B, "blk.%d.attn_v_b" },
1534
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1535
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1536
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1537
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1538
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1539
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1540
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1541
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1542
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1543
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
1544
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1545
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1546
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1547
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
1548
- },
1549
- },
1550
- {
1551
- LLM_ARCH_PLM,
1552
- {
1553
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1554
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1555
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1556
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1557
- { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
1558
- { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
1559
- { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
1560
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1561
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1562
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1563
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1564
- },
1565
- },
1566
- {
1567
- LLM_ARCH_CHATGLM,
1568
- {
1569
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1570
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1571
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1572
- { LLM_TENSOR_OUTPUT, "output" },
1573
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1574
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1575
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1576
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1577
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1578
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1579
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1580
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1581
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1582
- },
1583
- },
1584
- {
1585
- LLM_ARCH_GLM4,
1586
- {
1587
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1588
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1589
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1590
- { LLM_TENSOR_OUTPUT, "output" },
1591
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1592
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1593
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1594
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1595
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1596
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1597
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1598
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1599
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1600
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1601
- },
1602
- },
1603
- {
1604
- LLM_ARCH_GLM4_MOE,
1605
- {
1606
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1607
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1608
- { LLM_TENSOR_OUTPUT, "output" },
1609
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1610
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1611
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1612
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1613
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1614
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1615
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1616
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1617
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1618
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1619
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1620
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1621
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1622
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1623
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1624
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1625
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1626
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1627
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
1628
- // NextN/MTP tensors - preserved but unused (in final layer, dynamic layer number)
1629
- { LLM_TENSOR_NEXTN_EH_PROJ, "blk.%d.nextn.eh_proj" },
1630
- { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" },
1631
- { LLM_TENSOR_NEXTN_ENORM, "blk.%d.nextn.enorm" },
1632
- { LLM_TENSOR_NEXTN_HNORM, "blk.%d.nextn.hnorm" },
1633
- { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" },
1634
- { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" },
1635
- },
1636
- },
1637
- {
1638
- LLM_ARCH_BITNET,
1639
- {
1640
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1641
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1642
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1643
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1644
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1645
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1646
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1647
- { LLM_TENSOR_ATTN_SUB_NORM, "blk.%d.attn_sub_norm" },
1648
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1649
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1650
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1651
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1652
- { LLM_TENSOR_FFN_SUB_NORM, "blk.%d.ffn_sub_norm" },
1653
- },
1654
- },
1655
- {
1656
- LLM_ARCH_T5,
1657
- {
1658
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1659
- { LLM_TENSOR_OUTPUT, "output" },
1660
- { LLM_TENSOR_DEC_OUTPUT_NORM, "dec.output_norm" },
1661
- { LLM_TENSOR_DEC_ATTN_NORM, "dec.blk.%d.attn_norm" },
1662
- { LLM_TENSOR_DEC_ATTN_Q, "dec.blk.%d.attn_q" },
1663
- { LLM_TENSOR_DEC_ATTN_K, "dec.blk.%d.attn_k" },
1664
- { LLM_TENSOR_DEC_ATTN_V, "dec.blk.%d.attn_v" },
1665
- { LLM_TENSOR_DEC_ATTN_OUT, "dec.blk.%d.attn_o" },
1666
- { LLM_TENSOR_DEC_ATTN_REL_B, "dec.blk.%d.attn_rel_b" },
1667
- { LLM_TENSOR_DEC_CROSS_ATTN_NORM, "dec.blk.%d.cross_attn_norm" },
1668
- { LLM_TENSOR_DEC_CROSS_ATTN_Q, "dec.blk.%d.cross_attn_q" },
1669
- { LLM_TENSOR_DEC_CROSS_ATTN_K, "dec.blk.%d.cross_attn_k" },
1670
- { LLM_TENSOR_DEC_CROSS_ATTN_V, "dec.blk.%d.cross_attn_v" },
1671
- { LLM_TENSOR_DEC_CROSS_ATTN_OUT, "dec.blk.%d.cross_attn_o" },
1672
- { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
1673
- { LLM_TENSOR_DEC_FFN_NORM, "dec.blk.%d.ffn_norm" },
1674
- { LLM_TENSOR_DEC_FFN_GATE, "dec.blk.%d.ffn_gate" },
1675
- { LLM_TENSOR_DEC_FFN_DOWN, "dec.blk.%d.ffn_down" },
1676
- { LLM_TENSOR_DEC_FFN_UP, "dec.blk.%d.ffn_up" },
1677
- { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
1678
- { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
1679
- { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
1680
- { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
1681
- { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
1682
- { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
1683
- { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
1684
- { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
1685
- { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
1686
- { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
1687
- { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
1688
- },
1689
- },
1690
- {
1691
- LLM_ARCH_T5ENCODER,
1692
- {
1693
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1694
- { LLM_TENSOR_OUTPUT, "output" },
1695
- { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
1696
- { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
1697
- { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
1698
- { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
1699
- { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
1700
- { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
1701
- { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
1702
- { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
1703
- { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
1704
- { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
1705
- { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
1706
- },
1707
- },
1708
- {
1709
- LLM_ARCH_JAIS,
1710
- {
1711
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1712
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1713
- { LLM_TENSOR_OUTPUT, "output" },
1714
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1715
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
1716
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1717
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1718
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1719
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1720
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1721
- },
1722
- },
1723
- {
1724
- LLM_ARCH_NEMOTRON,
1725
- {
1726
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1727
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1728
- { LLM_TENSOR_OUTPUT, "output" },
1729
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1730
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1731
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1732
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1733
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1734
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1735
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1736
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1737
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1738
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1739
- },
1740
- },
1741
- {
1742
- LLM_ARCH_NEMOTRON_H,
1743
- {
1744
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1745
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1746
- { LLM_TENSOR_OUTPUT, "output" },
1747
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1748
- // mamba(2) ssm layers
1749
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
1750
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
1751
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1752
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1753
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1754
- { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
1755
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1756
- // attention layers
1757
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1758
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1759
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1760
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1761
- // dense FFN
1762
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1763
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1764
- },
1765
- },
1766
- {
1767
- LLM_ARCH_EXAONE,
1768
- {
1769
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1770
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1771
- { LLM_TENSOR_OUTPUT, "output" },
1772
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1773
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1774
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1775
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1776
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1777
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1778
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
1779
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1780
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1781
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1782
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1783
- },
1784
- },
1785
- {
1786
- LLM_ARCH_EXAONE4,
1787
- {
1788
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1789
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1790
- { LLM_TENSOR_OUTPUT, "output" },
1791
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
1792
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1793
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
1794
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1795
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
1796
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1797
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1798
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
1799
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1800
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1801
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1802
- { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
1803
- }
1804
- },
1805
- {
1806
- LLM_ARCH_RWKV6,
1807
- {
1808
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1809
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1810
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1811
- { LLM_TENSOR_OUTPUT, "output" },
1812
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1813
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
1814
- { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1815
- { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1816
- { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" },
1817
- { LLM_TENSOR_TIME_MIX_LERP_W, "blk.%d.time_mix_lerp_w" },
1818
- { LLM_TENSOR_TIME_MIX_LERP_K, "blk.%d.time_mix_lerp_k" },
1819
- { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" },
1820
- { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" },
1821
- { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" },
1822
- { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1823
- { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" },
1824
- { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" },
1825
- { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" },
1826
- { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" },
1827
- { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1828
- { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1829
- { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1830
- { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" },
1831
- { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
1832
- { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1833
- { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
1834
- { LLM_TENSOR_CHANNEL_MIX_LERP_R, "blk.%d.channel_mix_lerp_r" },
1835
- { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
1836
- { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
1837
- { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" },
1838
- },
1839
- },
1840
- {
1841
- LLM_ARCH_RWKV6QWEN2,
1842
- {
1843
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1844
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1845
- { LLM_TENSOR_OUTPUT, "output" },
1846
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1847
- { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1848
- { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1849
- { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" },
1850
- { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1851
- { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" },
1852
- { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" },
1853
- { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" },
1854
- { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" },
1855
- { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1856
- { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1857
- { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1858
- { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" },
1859
- { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1860
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1861
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1862
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1863
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1864
- },
1865
- },
1866
- {
1867
- LLM_ARCH_RWKV7,
1868
- {
1869
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1870
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1871
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1872
- { LLM_TENSOR_OUTPUT, "output" },
1873
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1874
- { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
1875
- { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
1876
- { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1877
- { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1878
- { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
1879
- { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
1880
- { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
1881
- { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
1882
- { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
1883
- { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
1884
- { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
1885
- { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
1886
- { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
1887
- { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
1888
- { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
1889
- { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1890
- { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1891
- { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1892
- { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1893
- { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
1894
- { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1895
- { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
1896
- { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
1897
- { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
1898
- },
1899
- },
1900
- {
1901
- LLM_ARCH_ARWKV7,
1902
- {
1903
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1904
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
1905
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1906
- { LLM_TENSOR_OUTPUT, "output" },
1907
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1908
- { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
1909
- { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
1910
- { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
1911
- { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
1912
- { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
1913
- { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
1914
- { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
1915
- { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
1916
- { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
1917
- { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
1918
- { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
1919
- { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
1920
- { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
1921
- { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
1922
- { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
1923
- { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
1924
- { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
1925
- { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
1926
- { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
1927
- { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
1928
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1929
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1930
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1931
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1932
- },
1933
- },
1934
- {
1935
- LLM_ARCH_GRANITE,
1936
- {
1937
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1938
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1939
- { LLM_TENSOR_OUTPUT, "output" },
1940
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1941
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1942
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1943
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1944
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1945
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1946
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1947
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1948
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1949
- },
1950
- },
1951
- {
1952
- LLM_ARCH_GRANITE_MOE,
1953
- {
1954
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1955
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1956
- { LLM_TENSOR_OUTPUT, "output" },
1957
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1958
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1959
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1960
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1961
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1962
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1963
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
1964
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
1965
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
1966
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
1967
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
1968
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
1969
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
1970
- },
1971
- },
1972
- {
1973
- LLM_ARCH_GRANITE_HYBRID,
1974
- {
1975
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
1976
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
1977
- { LLM_TENSOR_OUTPUT, "output" },
1978
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
1979
- // mamba(2) ssm layers
1980
- { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
1981
- { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
1982
- { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
1983
- { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
1984
- { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
1985
- { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
1986
- { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
1987
- // attention layers
1988
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
1989
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
1990
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
1991
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
1992
- // dense FFN
1993
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1994
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
1995
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
1996
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
1997
- // moe FFN
1998
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
1999
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2000
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2001
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2002
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2003
- // shared expert
2004
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
2005
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
2006
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
2007
- },
2008
- },
2009
- {
2010
- LLM_ARCH_CHAMELEON,
2011
- {
2012
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2013
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2014
- { LLM_TENSOR_OUTPUT, "output" },
2015
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2016
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2017
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2018
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2019
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2020
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2021
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2022
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2023
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2024
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2025
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2026
- },
2027
- },
2028
- {
2029
- LLM_ARCH_WAVTOKENIZER_DEC,
2030
- {
2031
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2032
- { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
2033
- { LLM_TENSOR_CONV1D, "conv1d" },
2034
- { LLM_TENSOR_CONVNEXT_DW, "convnext.%d.dw" },
2035
- { LLM_TENSOR_CONVNEXT_NORM, "convnext.%d.norm" },
2036
- { LLM_TENSOR_CONVNEXT_PW1, "convnext.%d.pw1" },
2037
- { LLM_TENSOR_CONVNEXT_PW2, "convnext.%d.pw2" },
2038
- { LLM_TENSOR_CONVNEXT_GAMMA, "convnext.%d.gamma" },
2039
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2040
- { LLM_TENSOR_OUTPUT, "output" },
2041
- { LLM_TENSOR_POS_NET_CONV1, "posnet.%d.conv1" },
2042
- { LLM_TENSOR_POS_NET_CONV2, "posnet.%d.conv2" },
2043
- { LLM_TENSOR_POS_NET_NORM, "posnet.%d.norm" },
2044
- { LLM_TENSOR_POS_NET_NORM1, "posnet.%d.norm1" },
2045
- { LLM_TENSOR_POS_NET_NORM2, "posnet.%d.norm2" },
2046
- { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
2047
- { LLM_TENSOR_POS_NET_ATTN_Q, "posnet.%d.attn_q" },
2048
- { LLM_TENSOR_POS_NET_ATTN_K, "posnet.%d.attn_k" },
2049
- { LLM_TENSOR_POS_NET_ATTN_V, "posnet.%d.attn_v" },
2050
- { LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" },
2051
- },
2052
- },
2053
- {
2054
- LLM_ARCH_BAILINGMOE,
2055
- {
2056
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2057
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2058
- { LLM_TENSOR_OUTPUT, "output" },
2059
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
2060
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2061
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2062
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2063
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2064
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2065
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2066
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2067
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2068
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2069
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2070
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
2071
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
2072
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
2073
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
2074
- },
2075
- },
2076
- {
2077
- LLM_ARCH_BAILINGMOE2,
2078
- {
2079
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2080
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2081
- { LLM_TENSOR_OUTPUT, "output" },
2082
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2083
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2084
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2085
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
2086
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2087
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2088
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
2089
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2090
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2091
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2092
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2093
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2094
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2095
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2096
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
2097
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
2098
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
2099
- { LLM_TENSOR_NEXTN_EH_PROJ, "blk.%d.nextn.eh_proj" },
2100
- { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" },
2101
- { LLM_TENSOR_NEXTN_ENORM, "blk.%d.nextn.enorm" },
2102
- { LLM_TENSOR_NEXTN_HNORM, "blk.%d.nextn.hnorm" },
2103
- { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" },
2104
- { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" },
2105
- { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
2106
- },
2107
- },
2108
- {
2109
- LLM_ARCH_DOTS1,
2110
- {
2111
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2112
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2113
- { LLM_TENSOR_OUTPUT, "output" },
2114
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2115
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2116
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2117
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2118
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2119
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2120
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2121
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2122
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2123
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2124
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2125
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2126
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2127
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2128
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2129
- { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
2130
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
2131
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
2132
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
2133
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
2134
- }
2135
- },
2136
- {
2137
- LLM_ARCH_ERNIE4_5,
2138
- {
2139
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2140
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2141
- { LLM_TENSOR_OUTPUT, "output" },
2142
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2143
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2144
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2145
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2146
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2147
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2148
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2149
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2150
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2151
- },
2152
- },
2153
- {
2154
- LLM_ARCH_ERNIE4_5_MOE,
2155
- {
2156
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2157
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2158
- { LLM_TENSOR_OUTPUT, "output" },
2159
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2160
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2161
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2162
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2163
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2164
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2165
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2166
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2167
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2168
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2169
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
2170
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
2171
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
2172
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2173
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2174
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2175
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
2176
- },
2177
- },
2178
- {
2179
- LLM_ARCH_HUNYUAN_MOE,
2180
- {
2181
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2182
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2183
- { LLM_TENSOR_OUTPUT, "output" },
2184
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2185
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2186
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2187
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2188
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2189
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2190
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2191
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2192
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2193
- { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
2194
- { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
2195
- { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
2196
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2197
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2198
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2199
- },
2200
- },
2201
- {
2202
- LLM_ARCH_HUNYUAN_DENSE,
2203
- {
2204
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2205
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2206
- { LLM_TENSOR_OUTPUT, "output" },
2207
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2208
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2209
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2210
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2211
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2212
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2213
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2214
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2215
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2216
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2217
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2218
-
2219
- },
2220
- },
2221
- {
2222
- LLM_ARCH_SMOLLM3,
2223
- {
2224
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2225
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2226
- { LLM_TENSOR_OUTPUT, "output" },
2227
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2228
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2229
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2230
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2231
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2232
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2233
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2234
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2235
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2236
- },
2237
- },
2238
- {
2239
- LLM_ARCH_OPENAI_MOE,
2240
- {
2241
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2242
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2243
- { LLM_TENSOR_OUTPUT, "output" },
2244
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2245
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
2246
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2247
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2248
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2249
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2250
- { LLM_TENSOR_ATTN_SINKS, "blk.%d.attn_sinks" },
2251
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2252
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2253
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2254
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2255
- },
2256
- },
2257
- {
2258
- LLM_ARCH_LFM2,
2259
- {
2260
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2261
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2262
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2263
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2264
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2265
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2266
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2267
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2268
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2269
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2270
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2271
- { LLM_TENSOR_SHORTCONV_CONV, "blk.%d.shortconv.conv" },
2272
- { LLM_TENSOR_SHORTCONV_INPROJ, "blk.%d.shortconv.in_proj" },
2273
- { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" },
2274
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2275
- { LLM_TENSOR_OUTPUT_NORM, "token_embd_norm" }, // note: wrong tensor name
2276
- { LLM_TENSOR_OUTPUT, "output" },
2277
- }
2278
- },
2279
- {
2280
- LLM_ARCH_LFM2MOE,
2281
- {
2282
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2283
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2284
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2285
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2286
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2287
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2288
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2289
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2290
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2291
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2292
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2293
- { LLM_TENSOR_SHORTCONV_CONV, "blk.%d.shortconv.conv" },
2294
- { LLM_TENSOR_SHORTCONV_INPROJ, "blk.%d.shortconv.in_proj" },
2295
- { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" },
2296
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2297
- { LLM_TENSOR_OUTPUT_NORM, "token_embd_norm" }, // note: wrong tensor name
2298
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2299
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2300
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2301
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2302
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
2303
- }
2304
- },
2305
- {
2306
- LLM_ARCH_SMALLTHINKER,
2307
- {
2308
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2309
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2310
- { LLM_TENSOR_OUTPUT, "output" },
2311
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2312
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2313
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2314
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2315
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2316
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2317
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2318
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2319
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2320
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2321
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2322
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2323
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }
2324
- },
2325
- },
2326
- {
2327
- LLM_ARCH_APERTUS,
2328
- {
2329
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2330
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2331
- { LLM_TENSOR_OUTPUT, "output" },
2332
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
2333
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2334
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2335
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2336
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2337
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2338
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2339
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2340
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2341
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2342
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2343
- },
2344
- },
2345
- {
2346
- LLM_ARCH_DREAM,
2347
- {
2348
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2349
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2350
- { LLM_TENSOR_OUTPUT, "output" },
2351
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2352
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2353
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2354
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2355
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2356
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2357
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2358
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2359
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2360
- },
2361
- },
2362
- {
2363
- LLM_ARCH_LLADA,
2364
- {
2365
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2366
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2367
- { LLM_TENSOR_OUTPUT, "output" },
2368
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2369
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2370
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2371
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2372
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2373
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2374
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2375
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2376
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2377
- },
2378
- },
2379
- {
2380
- LLM_ARCH_LLADA_MOE,
2381
- {
2382
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2383
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2384
- { LLM_TENSOR_OUTPUT, "output" },
2385
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2386
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2387
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2388
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2389
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2390
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2391
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2392
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2393
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2394
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2395
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2396
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2397
- },
2398
- },
2399
- {
2400
- LLM_ARCH_SEED_OSS,
2401
- {
2402
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2403
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2404
- { LLM_TENSOR_OUTPUT, "output" },
2405
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2406
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2407
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2408
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2409
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2410
- { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
2411
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2412
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2413
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2414
- },
2415
- },
2416
- {
2417
- LLM_ARCH_GROVEMOE,
2418
- {
2419
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2420
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2421
- { LLM_TENSOR_OUTPUT, "output" },
2422
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2423
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2424
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2425
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2426
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2427
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2428
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2429
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2430
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2431
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2432
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2433
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2434
- { LLM_TENSOR_FFN_GATE_CHEXPS, "blk.%d.ffn_gate_chexps" },
2435
- { LLM_TENSOR_FFN_DOWN_CHEXPS, "blk.%d.ffn_down_chexps" },
2436
- { LLM_TENSOR_FFN_UP_CHEXPS, "blk.%d.ffn_up_chexps" },
2437
- },
2438
- },
2439
- {
2440
- LLM_ARCH_MINIMAX_M2,
2441
- {
2442
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2443
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2444
- { LLM_TENSOR_OUTPUT, "output" },
2445
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2446
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2447
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2448
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2449
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2450
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2451
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2452
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2453
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2454
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2455
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2456
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2457
- { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
2458
- },
2459
- },
2460
- {
2461
- LLM_ARCH_PANGU_EMBED,
2462
- {
2463
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2464
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2465
- { LLM_TENSOR_OUTPUT, "output" },
2466
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2467
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2468
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2469
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2470
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2471
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2472
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2473
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2474
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2475
- },
2476
- },
2477
- {
2478
- LLM_ARCH_COGVLM,
2479
- {
2480
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2481
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2482
- { LLM_TENSOR_OUTPUT, "output" },
2483
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2484
- { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
2485
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2486
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2487
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2488
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2489
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2490
- { LLM_TENSOR_VISEXP_ATTN_QKV, "blk.%d.vis_attn_qkv" },
2491
- { LLM_TENSOR_VISEXP_ATTN_OUT, "blk.%d.vis_attn_output" },
2492
- { LLM_TENSOR_VISEXP_FFN_GATE, "blk.%d.vis_gate" },
2493
- { LLM_TENSOR_VISEXP_FFN_DOWN, "blk.%d.vis_down" },
2494
- { LLM_TENSOR_VISEXP_FFN_UP, "blk.%d.vis_up" },
2495
- },
2496
- },
2497
- {
2498
- LLM_ARCH_RND1,
2499
- {
2500
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2501
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2502
- { LLM_TENSOR_OUTPUT, "output" },
2503
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2504
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2505
- { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
2506
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2507
- { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
2508
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2509
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2510
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2511
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2512
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2513
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2514
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2515
- },
2516
- },
2517
- {
2518
- LLM_ARCH_MISTRAL3,
2519
- {
2520
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2521
- { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
2522
- { LLM_TENSOR_OUTPUT, "output" },
2523
- { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
2524
- { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
2525
- { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
2526
- { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
2527
- { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
2528
- { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
2529
- { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
2530
- { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
2531
- { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
2532
- { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
2533
- { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
2534
- { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
2535
- { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
2536
- { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
2537
- { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
2538
- { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
2539
- { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
2540
- { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
2541
- },
2542
- },
2543
- {
2544
- LLM_ARCH_UNKNOWN,
2545
- {
2546
- { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
2547
- },
2548
- },
306
+ static const std::map<llm_tensor, const char *> LLM_TENSOR_NAMES = {
307
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
308
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
309
+ { LLM_TENSOR_OUTPUT_NORM_LFM2, "token_embd_norm" }, // fix for wrong tensor name
310
+ { LLM_TENSOR_OUTPUT, "output" },
311
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
312
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
313
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
314
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
315
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
316
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
317
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
318
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
319
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
320
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
321
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
322
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
323
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
324
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
325
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
326
+ { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" },
327
+ { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" },
328
+ { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" },
329
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
330
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
331
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
332
+ { LLM_TENSOR_ATTN_GATE, "blk.%d.attn_gate" },
333
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
334
+ { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" },
335
+ { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" },
336
+ { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" },
337
+ { LLM_TENSOR_FFN_EXP_PROBS_B, "blk.%d.exp_probs_b" },
338
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
339
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
340
+ { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
341
+ { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
342
+ { LLM_TENSOR_POS_EMBD, "position_embd" },
343
+ { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" },
344
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
345
+ { LLM_TENSOR_TOKEN_TYPES, "token_types" },
346
+ { LLM_TENSOR_CLS, "cls" },
347
+ { LLM_TENSOR_CLS_OUT, "cls.output" },
348
+ { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" },
349
+ { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
350
+ { LLM_TENSOR_SSM_A_NOSCAN, "blk.%d.ssm_a" },
351
+ { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" },
352
+ { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" },
353
+ { LLM_TENSOR_SSM_BETA_ALPHA, "blk.%d.ssm_ba" },
354
+ { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" },
355
+ { LLM_TENSOR_SSM_NORM, "blk.%d.ssm_norm" },
356
+ { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" },
357
+ { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" },
358
+ { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
359
+ { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" },
360
+ { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" },
361
+ { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" },
362
+ { LLM_TENSOR_SSM_DT_NORM, "blk.%d.ssm_dt_norm" },
363
+ { LLM_TENSOR_SSM_B_NORM, "blk.%d.ssm_b_norm" },
364
+ { LLM_TENSOR_SSM_C_NORM, "blk.%d.ssm_c_norm" },
365
+ { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" },
366
+ { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" },
367
+ { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" },
368
+ { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" },
369
+ { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" },
370
+ { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" },
371
+ { LLM_TENSOR_PER_LAYER_TOKEN_EMBD, "per_layer_token_embd" },
372
+ { LLM_TENSOR_PER_LAYER_MODEL_PROJ, "per_layer_model_proj" },
373
+ { LLM_TENSOR_PER_LAYER_PROJ_NORM, "per_layer_proj_norm" },
374
+ { LLM_TENSOR_ALTUP_UNEMBD_PROJ, "altup_unembd_proj" },
375
+ { LLM_TENSOR_ALTUP_PROJ, "altup_proj" },
376
+ { LLM_TENSOR_PER_LAYER_INP_GATE, "blk.%d.inp_gate" },
377
+ { LLM_TENSOR_PER_LAYER_PROJ, "blk.%d.proj" },
378
+ { LLM_TENSOR_PER_LAYER_POST_NORM, "blk.%d.post_norm" },
379
+ { LLM_TENSOR_ALTUP_CORRECT_COEF, "blk.%d.altup_correct_coef" },
380
+ { LLM_TENSOR_ALTUP_CORRECT_SCALE, "blk.%d.altup_correct_scale" },
381
+ { LLM_TENSOR_ALTUP_PREDICT_COEF, "blk.%d.altup_predict_coef" },
382
+ { LLM_TENSOR_ALTUP_ROUTER, "blk.%d.altup_router" },
383
+ { LLM_TENSOR_ALTUP_ROUTER_NORM, "blk.%d.altup_router_norm" },
384
+ { LLM_TENSOR_LAUREL_L, "blk.%d.laurel_l" },
385
+ { LLM_TENSOR_LAUREL_R, "blk.%d.laurel_r" },
386
+ { LLM_TENSOR_LAUREL_POST_NORM, "blk.%d.laurel_post_norm" },
387
+ { LLM_TENSOR_DENSE_2_OUT, "dense_2" },
388
+ { LLM_TENSOR_DENSE_3_OUT, "dense_3" },
389
+ { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" },
390
+ { LLM_TENSOR_ATTN_K_B, "blk.%d.attn_k_b" },
391
+ { LLM_TENSOR_ATTN_V_B, "blk.%d.attn_v_b" },
392
+ { LLM_TENSOR_NEXTN_EH_PROJ, "blk.%d.nextn.eh_proj" },
393
+ { LLM_TENSOR_NEXTN_EMBED_TOKENS, "blk.%d.nextn.embed_tokens" },
394
+ { LLM_TENSOR_NEXTN_ENORM, "blk.%d.nextn.enorm" },
395
+ { LLM_TENSOR_NEXTN_HNORM, "blk.%d.nextn.hnorm" },
396
+ { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD, "blk.%d.nextn.shared_head_head" },
397
+ { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM, "blk.%d.nextn.shared_head_norm" },
398
+ { LLM_TENSOR_ATTN_SUB_NORM, "blk.%d.attn_sub_norm" },
399
+ { LLM_TENSOR_FFN_SUB_NORM, "blk.%d.ffn_sub_norm" },
400
+ { LLM_TENSOR_DEC_OUTPUT_NORM, "dec.output_norm" },
401
+ { LLM_TENSOR_DEC_ATTN_NORM, "dec.blk.%d.attn_norm" },
402
+ { LLM_TENSOR_DEC_ATTN_Q, "dec.blk.%d.attn_q" },
403
+ { LLM_TENSOR_DEC_ATTN_K, "dec.blk.%d.attn_k" },
404
+ { LLM_TENSOR_DEC_ATTN_V, "dec.blk.%d.attn_v" },
405
+ { LLM_TENSOR_DEC_ATTN_OUT, "dec.blk.%d.attn_o" },
406
+ { LLM_TENSOR_DEC_ATTN_REL_B, "dec.blk.%d.attn_rel_b" },
407
+ { LLM_TENSOR_DEC_CROSS_ATTN_NORM, "dec.blk.%d.cross_attn_norm" },
408
+ { LLM_TENSOR_DEC_CROSS_ATTN_Q, "dec.blk.%d.cross_attn_q" },
409
+ { LLM_TENSOR_DEC_CROSS_ATTN_K, "dec.blk.%d.cross_attn_k" },
410
+ { LLM_TENSOR_DEC_CROSS_ATTN_V, "dec.blk.%d.cross_attn_v" },
411
+ { LLM_TENSOR_DEC_CROSS_ATTN_OUT, "dec.blk.%d.cross_attn_o" },
412
+ { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
413
+ { LLM_TENSOR_DEC_FFN_NORM, "dec.blk.%d.ffn_norm" },
414
+ { LLM_TENSOR_DEC_FFN_GATE, "dec.blk.%d.ffn_gate" },
415
+ { LLM_TENSOR_DEC_FFN_DOWN, "dec.blk.%d.ffn_down" },
416
+ { LLM_TENSOR_DEC_FFN_UP, "dec.blk.%d.ffn_up" },
417
+ { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" },
418
+ { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" },
419
+ { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" },
420
+ { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" },
421
+ { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" },
422
+ { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" },
423
+ { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" },
424
+ { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" },
425
+ { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" },
426
+ { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" },
427
+ { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" },
428
+ { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" },
429
+ { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" },
430
+ { LLM_TENSOR_TIME_MIX_LERP_W, "blk.%d.time_mix_lerp_w" },
431
+ { LLM_TENSOR_TIME_MIX_LERP_K, "blk.%d.time_mix_lerp_k" },
432
+ { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" },
433
+ { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" },
434
+ { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" },
435
+ { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" },
436
+ { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" },
437
+ { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" },
438
+ { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" },
439
+ { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" },
440
+ { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" },
441
+ { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" },
442
+ { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" },
443
+ { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" },
444
+ { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" },
445
+ { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" },
446
+ { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" },
447
+ { LLM_TENSOR_CHANNEL_MIX_LERP_R, "blk.%d.channel_mix_lerp_r" },
448
+ { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" },
449
+ { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" },
450
+ { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" },
451
+ { LLM_TENSOR_TIME_MIX_W0, "blk.%d.time_mix_w0" },
452
+ { LLM_TENSOR_TIME_MIX_A0, "blk.%d.time_mix_a0" },
453
+ { LLM_TENSOR_TIME_MIX_A1, "blk.%d.time_mix_a1" },
454
+ { LLM_TENSOR_TIME_MIX_A2, "blk.%d.time_mix_a2" },
455
+ { LLM_TENSOR_TIME_MIX_V0, "blk.%d.time_mix_v0" },
456
+ { LLM_TENSOR_TIME_MIX_V1, "blk.%d.time_mix_v1" },
457
+ { LLM_TENSOR_TIME_MIX_V2, "blk.%d.time_mix_v2" },
458
+ { LLM_TENSOR_TIME_MIX_G1, "blk.%d.time_mix_g1" },
459
+ { LLM_TENSOR_TIME_MIX_G2, "blk.%d.time_mix_g2" },
460
+ { LLM_TENSOR_TIME_MIX_K_K, "blk.%d.time_mix_k_k" },
461
+ { LLM_TENSOR_TIME_MIX_K_A, "blk.%d.time_mix_k_a" },
462
+ { LLM_TENSOR_TIME_MIX_R_K, "blk.%d.time_mix_r_k" },
463
+ { LLM_TENSOR_CONV1D, "conv1d" },
464
+ { LLM_TENSOR_CONVNEXT_DW, "convnext.%d.dw" },
465
+ { LLM_TENSOR_CONVNEXT_NORM, "convnext.%d.norm" },
466
+ { LLM_TENSOR_CONVNEXT_PW1, "convnext.%d.pw1" },
467
+ { LLM_TENSOR_CONVNEXT_PW2, "convnext.%d.pw2" },
468
+ { LLM_TENSOR_CONVNEXT_GAMMA, "convnext.%d.gamma" },
469
+ { LLM_TENSOR_POS_NET_CONV1, "posnet.%d.conv1" },
470
+ { LLM_TENSOR_POS_NET_CONV2, "posnet.%d.conv2" },
471
+ { LLM_TENSOR_POS_NET_NORM, "posnet.%d.norm" },
472
+ { LLM_TENSOR_POS_NET_NORM1, "posnet.%d.norm1" },
473
+ { LLM_TENSOR_POS_NET_NORM2, "posnet.%d.norm2" },
474
+ { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
475
+ { LLM_TENSOR_POS_NET_ATTN_Q, "posnet.%d.attn_q" },
476
+ { LLM_TENSOR_POS_NET_ATTN_K, "posnet.%d.attn_k" },
477
+ { LLM_TENSOR_POS_NET_ATTN_V, "posnet.%d.attn_v" },
478
+ { LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" },
479
+ { LLM_TENSOR_ATTN_SINKS, "blk.%d.attn_sinks" },
480
+ { LLM_TENSOR_SHORTCONV_CONV, "blk.%d.shortconv.conv" },
481
+ { LLM_TENSOR_SHORTCONV_INPROJ, "blk.%d.shortconv.in_proj" },
482
+ { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" },
483
+ { LLM_TENSOR_FFN_GATE_CHEXPS, "blk.%d.ffn_gate_chexps" },
484
+ { LLM_TENSOR_FFN_DOWN_CHEXPS, "blk.%d.ffn_down_chexps" },
485
+ { LLM_TENSOR_FFN_UP_CHEXPS, "blk.%d.ffn_up_chexps" },
486
+ { LLM_TENSOR_VISEXP_ATTN_QKV, "blk.%d.vis_attn_qkv" },
487
+ { LLM_TENSOR_VISEXP_ATTN_OUT, "blk.%d.vis_attn_output" },
488
+ { LLM_TENSOR_VISEXP_FFN_GATE, "blk.%d.vis_gate" },
489
+ { LLM_TENSOR_VISEXP_FFN_DOWN, "blk.%d.vis_down" },
490
+ { LLM_TENSOR_VISEXP_FFN_UP, "blk.%d.vis_up" },
2549
491
  };
2550
492
 
493
+ static std::set<llm_tensor> llm_get_tensor_names(llm_arch arch) {
494
+ switch (arch) {
495
+ case LLM_ARCH_CLIP:
496
+ return {};
497
+ case LLM_ARCH_LLAMA:
498
+ case LLM_ARCH_DECI:
499
+ case LLM_ARCH_MISTRAL3:
500
+ return {
501
+ LLM_TENSOR_TOKEN_EMBD,
502
+ LLM_TENSOR_OUTPUT_NORM,
503
+ LLM_TENSOR_OUTPUT,
504
+ LLM_TENSOR_ROPE_FREQS,
505
+ LLM_TENSOR_ATTN_NORM,
506
+ LLM_TENSOR_ATTN_Q,
507
+ LLM_TENSOR_ATTN_K,
508
+ LLM_TENSOR_ATTN_V,
509
+ LLM_TENSOR_ATTN_OUT,
510
+ LLM_TENSOR_ATTN_ROT_EMBD,
511
+ LLM_TENSOR_FFN_GATE_INP,
512
+ LLM_TENSOR_FFN_NORM,
513
+ LLM_TENSOR_FFN_GATE,
514
+ LLM_TENSOR_FFN_DOWN,
515
+ LLM_TENSOR_FFN_UP,
516
+ LLM_TENSOR_FFN_GATE_EXP,
517
+ LLM_TENSOR_FFN_DOWN_EXP,
518
+ LLM_TENSOR_FFN_UP_EXP,
519
+ LLM_TENSOR_FFN_GATE_EXPS,
520
+ LLM_TENSOR_FFN_DOWN_EXPS,
521
+ LLM_TENSOR_FFN_UP_EXPS,
522
+ };
523
+ case LLM_ARCH_ARCEE:
524
+ case LLM_ARCH_STARCODER2:
525
+ case LLM_ARCH_NEMOTRON:
526
+ return {
527
+ LLM_TENSOR_TOKEN_EMBD,
528
+ LLM_TENSOR_OUTPUT_NORM,
529
+ LLM_TENSOR_OUTPUT,
530
+ LLM_TENSOR_ROPE_FREQS,
531
+ LLM_TENSOR_ATTN_NORM,
532
+ LLM_TENSOR_ATTN_Q,
533
+ LLM_TENSOR_ATTN_K,
534
+ LLM_TENSOR_ATTN_V,
535
+ LLM_TENSOR_ATTN_OUT,
536
+ LLM_TENSOR_ATTN_ROT_EMBD,
537
+ LLM_TENSOR_FFN_NORM,
538
+ LLM_TENSOR_FFN_DOWN,
539
+ LLM_TENSOR_FFN_UP,
540
+ };
541
+ case LLM_ARCH_AFMOE:
542
+ return {
543
+ LLM_TENSOR_TOKEN_EMBD,
544
+ LLM_TENSOR_OUTPUT_NORM,
545
+ LLM_TENSOR_OUTPUT,
546
+ LLM_TENSOR_ATTN_NORM,
547
+ LLM_TENSOR_ATTN_POST_NORM,
548
+ LLM_TENSOR_ATTN_Q,
549
+ LLM_TENSOR_ATTN_K,
550
+ LLM_TENSOR_ATTN_V,
551
+ LLM_TENSOR_ATTN_OUT,
552
+ LLM_TENSOR_ATTN_Q_NORM,
553
+ LLM_TENSOR_ATTN_K_NORM,
554
+ LLM_TENSOR_ATTN_GATE,
555
+ LLM_TENSOR_FFN_NORM,
556
+ LLM_TENSOR_FFN_POST_NORM,
557
+ LLM_TENSOR_FFN_GATE_INP,
558
+ LLM_TENSOR_FFN_GATE,
559
+ LLM_TENSOR_FFN_DOWN,
560
+ LLM_TENSOR_FFN_UP,
561
+ LLM_TENSOR_FFN_GATE_EXPS,
562
+ LLM_TENSOR_FFN_DOWN_EXPS,
563
+ LLM_TENSOR_FFN_UP_EXPS,
564
+ LLM_TENSOR_FFN_GATE_SHEXP,
565
+ LLM_TENSOR_FFN_UP_SHEXP,
566
+ LLM_TENSOR_FFN_DOWN_SHEXP,
567
+ LLM_TENSOR_FFN_EXP_PROBS_B,
568
+ };
569
+ case LLM_ARCH_LLAMA4:
570
+ return {
571
+ LLM_TENSOR_TOKEN_EMBD,
572
+ LLM_TENSOR_OUTPUT_NORM,
573
+ LLM_TENSOR_OUTPUT,
574
+ LLM_TENSOR_ROPE_FREQS,
575
+ LLM_TENSOR_ATTN_NORM,
576
+ LLM_TENSOR_ATTN_Q,
577
+ LLM_TENSOR_ATTN_K,
578
+ LLM_TENSOR_ATTN_V,
579
+ LLM_TENSOR_ATTN_OUT,
580
+ LLM_TENSOR_ATTN_ROT_EMBD,
581
+ LLM_TENSOR_FFN_GATE_INP,
582
+ LLM_TENSOR_FFN_NORM,
583
+ LLM_TENSOR_FFN_GATE,
584
+ LLM_TENSOR_FFN_DOWN,
585
+ LLM_TENSOR_FFN_UP,
586
+ LLM_TENSOR_FFN_GATE_EXP,
587
+ LLM_TENSOR_FFN_DOWN_EXP,
588
+ LLM_TENSOR_FFN_UP_EXP,
589
+ LLM_TENSOR_FFN_GATE_EXPS,
590
+ LLM_TENSOR_FFN_DOWN_EXPS,
591
+ LLM_TENSOR_FFN_UP_EXPS,
592
+ LLM_TENSOR_FFN_GATE_SHEXP,
593
+ LLM_TENSOR_FFN_DOWN_SHEXP,
594
+ LLM_TENSOR_FFN_UP_SHEXP,
595
+ };
596
+ case LLM_ARCH_BAICHUAN:
597
+ case LLM_ARCH_ORION:
598
+ case LLM_ARCH_XVERSE:
599
+ case LLM_ARCH_EXAONE:
600
+ return {
601
+ LLM_TENSOR_TOKEN_EMBD,
602
+ LLM_TENSOR_OUTPUT_NORM,
603
+ LLM_TENSOR_OUTPUT,
604
+ LLM_TENSOR_ROPE_FREQS,
605
+ LLM_TENSOR_ATTN_NORM,
606
+ LLM_TENSOR_ATTN_Q,
607
+ LLM_TENSOR_ATTN_K,
608
+ LLM_TENSOR_ATTN_V,
609
+ LLM_TENSOR_ATTN_OUT,
610
+ LLM_TENSOR_ATTN_ROT_EMBD,
611
+ LLM_TENSOR_FFN_NORM,
612
+ LLM_TENSOR_FFN_GATE,
613
+ LLM_TENSOR_FFN_DOWN,
614
+ LLM_TENSOR_FFN_UP,
615
+ };
616
+ case LLM_ARCH_FALCON:
617
+ return {
618
+ LLM_TENSOR_TOKEN_EMBD,
619
+ LLM_TENSOR_OUTPUT_NORM,
620
+ LLM_TENSOR_OUTPUT,
621
+ LLM_TENSOR_ATTN_NORM,
622
+ LLM_TENSOR_ATTN_NORM_2,
623
+ LLM_TENSOR_ATTN_QKV,
624
+ LLM_TENSOR_ATTN_OUT,
625
+ LLM_TENSOR_FFN_DOWN,
626
+ LLM_TENSOR_FFN_UP,
627
+ };
628
+ case LLM_ARCH_GROK:
629
+ return {
630
+ LLM_TENSOR_TOKEN_EMBD,
631
+ LLM_TENSOR_OUTPUT_NORM,
632
+ LLM_TENSOR_OUTPUT,
633
+ LLM_TENSOR_ROPE_FREQS,
634
+ LLM_TENSOR_ATTN_NORM,
635
+ LLM_TENSOR_ATTN_Q,
636
+ LLM_TENSOR_ATTN_K,
637
+ LLM_TENSOR_ATTN_V,
638
+ LLM_TENSOR_ATTN_OUT,
639
+ LLM_TENSOR_ATTN_ROT_EMBD,
640
+ LLM_TENSOR_FFN_GATE_INP,
641
+ LLM_TENSOR_FFN_NORM,
642
+ LLM_TENSOR_FFN_GATE,
643
+ LLM_TENSOR_FFN_DOWN,
644
+ LLM_TENSOR_FFN_UP,
645
+ LLM_TENSOR_FFN_GATE_EXP,
646
+ LLM_TENSOR_FFN_DOWN_EXP,
647
+ LLM_TENSOR_FFN_UP_EXP,
648
+ LLM_TENSOR_FFN_GATE_EXPS,
649
+ LLM_TENSOR_FFN_DOWN_EXPS,
650
+ LLM_TENSOR_FFN_UP_EXPS,
651
+ LLM_TENSOR_FFN_POST_NORM,
652
+ LLM_TENSOR_LAYER_OUT_NORM,
653
+ LLM_TENSOR_ATTN_OUT_NORM,
654
+ };
655
+ case LLM_ARCH_GPT2:
656
+ case LLM_ARCH_STARCODER:
657
+ return {
658
+ LLM_TENSOR_TOKEN_EMBD,
659
+ LLM_TENSOR_POS_EMBD,
660
+ LLM_TENSOR_OUTPUT_NORM,
661
+ LLM_TENSOR_OUTPUT,
662
+ LLM_TENSOR_ATTN_NORM,
663
+ LLM_TENSOR_ATTN_QKV,
664
+ LLM_TENSOR_ATTN_OUT,
665
+ LLM_TENSOR_FFN_NORM,
666
+ LLM_TENSOR_FFN_UP,
667
+ LLM_TENSOR_FFN_DOWN,
668
+ };
669
+ case LLM_ARCH_GPTNEOX:
670
+ return {
671
+ LLM_TENSOR_TOKEN_EMBD,
672
+ LLM_TENSOR_OUTPUT_NORM,
673
+ LLM_TENSOR_OUTPUT,
674
+ LLM_TENSOR_ATTN_NORM,
675
+ LLM_TENSOR_ATTN_QKV,
676
+ LLM_TENSOR_ATTN_OUT,
677
+ LLM_TENSOR_FFN_NORM,
678
+ LLM_TENSOR_FFN_DOWN,
679
+ LLM_TENSOR_FFN_UP,
680
+ };
681
+ case LLM_ARCH_MPT:
682
+ return {
683
+ LLM_TENSOR_TOKEN_EMBD,
684
+ LLM_TENSOR_OUTPUT_NORM,
685
+ LLM_TENSOR_OUTPUT,
686
+ LLM_TENSOR_ATTN_NORM,
687
+ LLM_TENSOR_FFN_NORM,
688
+ LLM_TENSOR_ATTN_QKV,
689
+ LLM_TENSOR_ATTN_OUT,
690
+ LLM_TENSOR_FFN_DOWN,
691
+ LLM_TENSOR_FFN_UP,
692
+ LLM_TENSOR_FFN_ACT,
693
+ LLM_TENSOR_POS_EMBD,
694
+ LLM_TENSOR_ATTN_Q_NORM,
695
+ LLM_TENSOR_ATTN_K_NORM,
696
+ };
697
+ case LLM_ARCH_REFACT:
698
+ case LLM_ARCH_QWEN2:
699
+ case LLM_ARCH_QWEN2VL:
700
+ case LLM_ARCH_INTERNLM2:
701
+ case LLM_ARCH_GRANITE:
702
+ case LLM_ARCH_ERNIE4_5:
703
+ case LLM_ARCH_SMOLLM3:
704
+ case LLM_ARCH_DREAM:
705
+ case LLM_ARCH_LLADA:
706
+ case LLM_ARCH_PANGU_EMBED:
707
+ return {
708
+ LLM_TENSOR_TOKEN_EMBD,
709
+ LLM_TENSOR_OUTPUT_NORM,
710
+ LLM_TENSOR_OUTPUT,
711
+ LLM_TENSOR_ATTN_NORM,
712
+ LLM_TENSOR_ATTN_Q,
713
+ LLM_TENSOR_ATTN_K,
714
+ LLM_TENSOR_ATTN_V,
715
+ LLM_TENSOR_ATTN_OUT,
716
+ LLM_TENSOR_FFN_NORM,
717
+ LLM_TENSOR_FFN_GATE,
718
+ LLM_TENSOR_FFN_DOWN,
719
+ LLM_TENSOR_FFN_UP,
720
+ };
721
+ case LLM_ARCH_BERT:
722
+ return {
723
+ LLM_TENSOR_TOKEN_EMBD,
724
+ LLM_TENSOR_TOKEN_EMBD_NORM,
725
+ LLM_TENSOR_TOKEN_TYPES,
726
+ LLM_TENSOR_POS_EMBD,
727
+ LLM_TENSOR_ATTN_OUT_NORM,
728
+ LLM_TENSOR_ATTN_QKV,
729
+ LLM_TENSOR_ATTN_Q,
730
+ LLM_TENSOR_ATTN_K,
731
+ LLM_TENSOR_ATTN_V,
732
+ LLM_TENSOR_ATTN_OUT,
733
+ LLM_TENSOR_LAYER_OUT_NORM,
734
+ LLM_TENSOR_FFN_DOWN,
735
+ LLM_TENSOR_FFN_UP,
736
+ LLM_TENSOR_CLS,
737
+ LLM_TENSOR_CLS_OUT,
738
+ };
739
+ case LLM_ARCH_NOMIC_BERT:
740
+ return {
741
+ LLM_TENSOR_TOKEN_EMBD,
742
+ LLM_TENSOR_TOKEN_EMBD_NORM,
743
+ LLM_TENSOR_TOKEN_TYPES,
744
+ LLM_TENSOR_ATTN_OUT_NORM,
745
+ LLM_TENSOR_ATTN_QKV,
746
+ LLM_TENSOR_ATTN_OUT,
747
+ LLM_TENSOR_LAYER_OUT_NORM,
748
+ LLM_TENSOR_FFN_GATE,
749
+ LLM_TENSOR_FFN_DOWN,
750
+ LLM_TENSOR_FFN_UP,
751
+ };
752
+ case LLM_ARCH_NOMIC_BERT_MOE:
753
+ return {
754
+ LLM_TENSOR_TOKEN_EMBD,
755
+ LLM_TENSOR_TOKEN_EMBD_NORM,
756
+ LLM_TENSOR_TOKEN_TYPES,
757
+ LLM_TENSOR_ATTN_OUT_NORM,
758
+ LLM_TENSOR_ATTN_QKV,
759
+ LLM_TENSOR_ATTN_OUT,
760
+ LLM_TENSOR_LAYER_OUT_NORM,
761
+ LLM_TENSOR_FFN_GATE,
762
+ LLM_TENSOR_FFN_DOWN,
763
+ LLM_TENSOR_FFN_UP,
764
+ LLM_TENSOR_FFN_GATE_INP,
765
+ LLM_TENSOR_FFN_DOWN_EXPS,
766
+ LLM_TENSOR_FFN_UP_EXPS,
767
+ };
768
+ case LLM_ARCH_NEO_BERT:
769
+ return {
770
+ LLM_TENSOR_TOKEN_EMBD,
771
+ LLM_TENSOR_ATTN_NORM,
772
+ LLM_TENSOR_ATTN_QKV,
773
+ LLM_TENSOR_ATTN_OUT,
774
+ LLM_TENSOR_FFN_NORM,
775
+ LLM_TENSOR_FFN_DOWN,
776
+ LLM_TENSOR_FFN_UP,
777
+ LLM_TENSOR_ENC_OUTPUT_NORM,
778
+ LLM_TENSOR_CLS,
779
+ LLM_TENSOR_CLS_OUT,
780
+ };
781
+ case LLM_ARCH_JINA_BERT_V2:
782
+ return {
783
+ LLM_TENSOR_TOKEN_EMBD,
784
+ LLM_TENSOR_TOKEN_EMBD_NORM,
785
+ LLM_TENSOR_TOKEN_TYPES,
786
+ LLM_TENSOR_ATTN_NORM_2,
787
+ LLM_TENSOR_ATTN_OUT_NORM,
788
+ LLM_TENSOR_ATTN_Q,
789
+ LLM_TENSOR_ATTN_Q_NORM,
790
+ LLM_TENSOR_ATTN_K,
791
+ LLM_TENSOR_ATTN_K_NORM,
792
+ LLM_TENSOR_ATTN_V,
793
+ LLM_TENSOR_ATTN_OUT,
794
+ LLM_TENSOR_LAYER_OUT_NORM,
795
+ LLM_TENSOR_FFN_DOWN,
796
+ LLM_TENSOR_FFN_GATE,
797
+ LLM_TENSOR_FFN_UP,
798
+ LLM_TENSOR_CLS,
799
+ };
800
+ case LLM_ARCH_JINA_BERT_V3:
801
+ return {
802
+ LLM_TENSOR_TOKEN_EMBD,
803
+ LLM_TENSOR_TOKEN_EMBD_NORM,
804
+ LLM_TENSOR_TOKEN_TYPES,
805
+ LLM_TENSOR_ATTN_OUT_NORM,
806
+ LLM_TENSOR_ATTN_QKV,
807
+ LLM_TENSOR_ATTN_OUT,
808
+ LLM_TENSOR_FFN_DOWN,
809
+ LLM_TENSOR_FFN_UP,
810
+ LLM_TENSOR_LAYER_OUT_NORM,
811
+ };
812
+ case LLM_ARCH_BLOOM:
813
+ return {
814
+ LLM_TENSOR_TOKEN_EMBD,
815
+ LLM_TENSOR_TOKEN_EMBD_NORM,
816
+ LLM_TENSOR_OUTPUT_NORM,
817
+ LLM_TENSOR_OUTPUT,
818
+ LLM_TENSOR_ATTN_NORM,
819
+ LLM_TENSOR_ATTN_QKV,
820
+ LLM_TENSOR_ATTN_OUT,
821
+ LLM_TENSOR_FFN_NORM,
822
+ LLM_TENSOR_FFN_UP,
823
+ LLM_TENSOR_FFN_DOWN,
824
+ };
825
+ case LLM_ARCH_STABLELM:
826
+ return {
827
+ LLM_TENSOR_TOKEN_EMBD,
828
+ LLM_TENSOR_OUTPUT_NORM,
829
+ LLM_TENSOR_OUTPUT,
830
+ LLM_TENSOR_ROPE_FREQS,
831
+ LLM_TENSOR_ATTN_NORM,
832
+ LLM_TENSOR_ATTN_Q,
833
+ LLM_TENSOR_ATTN_K,
834
+ LLM_TENSOR_ATTN_V,
835
+ LLM_TENSOR_ATTN_OUT,
836
+ LLM_TENSOR_FFN_NORM,
837
+ LLM_TENSOR_FFN_GATE,
838
+ LLM_TENSOR_FFN_DOWN,
839
+ LLM_TENSOR_FFN_UP,
840
+ LLM_TENSOR_ATTN_Q_NORM,
841
+ LLM_TENSOR_ATTN_K_NORM,
842
+ };
843
+ case LLM_ARCH_QWEN:
844
+ return {
845
+ LLM_TENSOR_TOKEN_EMBD,
846
+ LLM_TENSOR_OUTPUT_NORM,
847
+ LLM_TENSOR_OUTPUT,
848
+ LLM_TENSOR_ROPE_FREQS,
849
+ LLM_TENSOR_ATTN_NORM,
850
+ LLM_TENSOR_ATTN_QKV,
851
+ LLM_TENSOR_ATTN_OUT,
852
+ LLM_TENSOR_FFN_NORM,
853
+ LLM_TENSOR_FFN_GATE,
854
+ LLM_TENSOR_FFN_DOWN,
855
+ LLM_TENSOR_FFN_UP,
856
+ };
857
+ case LLM_ARCH_QWEN2MOE:
858
+ return {
859
+ LLM_TENSOR_TOKEN_EMBD,
860
+ LLM_TENSOR_OUTPUT_NORM,
861
+ LLM_TENSOR_OUTPUT,
862
+ LLM_TENSOR_ATTN_NORM,
863
+ LLM_TENSOR_ATTN_Q,
864
+ LLM_TENSOR_ATTN_K,
865
+ LLM_TENSOR_ATTN_V,
866
+ LLM_TENSOR_ATTN_OUT,
867
+ LLM_TENSOR_FFN_NORM,
868
+ LLM_TENSOR_FFN_GATE_INP,
869
+ LLM_TENSOR_FFN_GATE_EXPS,
870
+ LLM_TENSOR_FFN_DOWN_EXPS,
871
+ LLM_TENSOR_FFN_UP_EXPS,
872
+ LLM_TENSOR_FFN_GATE_INP_SHEXP,
873
+ LLM_TENSOR_FFN_GATE_SHEXP,
874
+ LLM_TENSOR_FFN_DOWN_SHEXP,
875
+ LLM_TENSOR_FFN_UP_SHEXP,
876
+ };
877
+ case LLM_ARCH_QWEN3:
878
+ return {
879
+ LLM_TENSOR_TOKEN_EMBD,
880
+ LLM_TENSOR_OUTPUT_NORM,
881
+ LLM_TENSOR_OUTPUT,
882
+ LLM_TENSOR_CLS_OUT,
883
+ LLM_TENSOR_ATTN_NORM,
884
+ LLM_TENSOR_ATTN_Q,
885
+ LLM_TENSOR_ATTN_Q_NORM,
886
+ LLM_TENSOR_ATTN_K,
887
+ LLM_TENSOR_ATTN_K_NORM,
888
+ LLM_TENSOR_ATTN_V,
889
+ LLM_TENSOR_ATTN_OUT,
890
+ LLM_TENSOR_FFN_NORM,
891
+ LLM_TENSOR_FFN_GATE,
892
+ LLM_TENSOR_FFN_DOWN,
893
+ LLM_TENSOR_FFN_UP,
894
+ };
895
+ case LLM_ARCH_QWEN3MOE:
896
+ case LLM_ARCH_QWEN3VLMOE:
897
+ case LLM_ARCH_OLMOE:
898
+ case LLM_ARCH_LLADA_MOE:
899
+ case LLM_ARCH_RND1:
900
+ return {
901
+ LLM_TENSOR_TOKEN_EMBD,
902
+ LLM_TENSOR_OUTPUT_NORM,
903
+ LLM_TENSOR_OUTPUT,
904
+ LLM_TENSOR_ATTN_NORM,
905
+ LLM_TENSOR_ATTN_Q,
906
+ LLM_TENSOR_ATTN_Q_NORM,
907
+ LLM_TENSOR_ATTN_K,
908
+ LLM_TENSOR_ATTN_K_NORM,
909
+ LLM_TENSOR_ATTN_V,
910
+ LLM_TENSOR_ATTN_OUT,
911
+ LLM_TENSOR_FFN_NORM,
912
+ LLM_TENSOR_FFN_GATE_INP,
913
+ LLM_TENSOR_FFN_GATE_EXPS,
914
+ LLM_TENSOR_FFN_DOWN_EXPS,
915
+ LLM_TENSOR_FFN_UP_EXPS,
916
+ };
917
+ case LLM_ARCH_QWEN3NEXT:
918
+ return {
919
+ LLM_TENSOR_TOKEN_EMBD,
920
+ LLM_TENSOR_OUTPUT_NORM,
921
+ LLM_TENSOR_OUTPUT,
922
+ LLM_TENSOR_ATTN_NORM,
923
+ LLM_TENSOR_ATTN_POST_NORM,
924
+ LLM_TENSOR_ATTN_Q,
925
+ LLM_TENSOR_ATTN_Q_NORM,
926
+ LLM_TENSOR_ATTN_K,
927
+ LLM_TENSOR_ATTN_K_NORM,
928
+ LLM_TENSOR_ATTN_V,
929
+ LLM_TENSOR_ATTN_OUT,
930
+ LLM_TENSOR_FFN_NORM,
931
+ LLM_TENSOR_FFN_GATE_INP,
932
+ LLM_TENSOR_FFN_GATE_EXPS,
933
+ LLM_TENSOR_FFN_DOWN_EXPS,
934
+ LLM_TENSOR_FFN_UP_EXPS,
935
+ LLM_TENSOR_FFN_GATE_INP_SHEXP,
936
+ LLM_TENSOR_FFN_GATE_SHEXP,
937
+ LLM_TENSOR_FFN_DOWN_SHEXP,
938
+ LLM_TENSOR_FFN_UP_SHEXP,
939
+ LLM_TENSOR_SSM_A_NOSCAN,
940
+ LLM_TENSOR_SSM_CONV1D,
941
+ LLM_TENSOR_SSM_DT,
942
+ LLM_TENSOR_SSM_BETA_ALPHA,
943
+ LLM_TENSOR_SSM_IN,
944
+ LLM_TENSOR_SSM_NORM,
945
+ LLM_TENSOR_SSM_OUT,
946
+ };
947
+ case LLM_ARCH_QWEN3VL:
948
+ case LLM_ARCH_CHAMELEON:
949
+ case LLM_ARCH_HUNYUAN_DENSE:
950
+ return {
951
+ LLM_TENSOR_TOKEN_EMBD,
952
+ LLM_TENSOR_OUTPUT_NORM,
953
+ LLM_TENSOR_OUTPUT,
954
+ LLM_TENSOR_ATTN_NORM,
955
+ LLM_TENSOR_ATTN_Q,
956
+ LLM_TENSOR_ATTN_Q_NORM,
957
+ LLM_TENSOR_ATTN_K,
958
+ LLM_TENSOR_ATTN_K_NORM,
959
+ LLM_TENSOR_ATTN_V,
960
+ LLM_TENSOR_ATTN_OUT,
961
+ LLM_TENSOR_FFN_NORM,
962
+ LLM_TENSOR_FFN_GATE,
963
+ LLM_TENSOR_FFN_DOWN,
964
+ LLM_TENSOR_FFN_UP,
965
+ };
966
+ case LLM_ARCH_PHI2:
967
+ return {
968
+ LLM_TENSOR_TOKEN_EMBD,
969
+ LLM_TENSOR_OUTPUT_NORM,
970
+ LLM_TENSOR_OUTPUT,
971
+ LLM_TENSOR_ATTN_NORM,
972
+ LLM_TENSOR_ATTN_QKV,
973
+ LLM_TENSOR_ATTN_Q,
974
+ LLM_TENSOR_ATTN_K,
975
+ LLM_TENSOR_ATTN_V,
976
+ LLM_TENSOR_ATTN_OUT,
977
+ LLM_TENSOR_FFN_DOWN,
978
+ LLM_TENSOR_FFN_UP,
979
+ };
980
+ case LLM_ARCH_PHI3:
981
+ return {
982
+ LLM_TENSOR_TOKEN_EMBD,
983
+ LLM_TENSOR_OUTPUT_NORM,
984
+ LLM_TENSOR_OUTPUT,
985
+ LLM_TENSOR_ROPE_FACTORS_LONG,
986
+ LLM_TENSOR_ROPE_FACTORS_SHORT,
987
+ LLM_TENSOR_ATTN_NORM,
988
+ LLM_TENSOR_ATTN_QKV,
989
+ LLM_TENSOR_ATTN_Q,
990
+ LLM_TENSOR_ATTN_K,
991
+ LLM_TENSOR_ATTN_V,
992
+ LLM_TENSOR_ATTN_OUT,
993
+ LLM_TENSOR_FFN_NORM,
994
+ LLM_TENSOR_FFN_DOWN,
995
+ LLM_TENSOR_FFN_UP,
996
+ };
997
+ case LLM_ARCH_PHIMOE:
998
+ return {
999
+ LLM_TENSOR_TOKEN_EMBD,
1000
+ LLM_TENSOR_OUTPUT_NORM,
1001
+ LLM_TENSOR_OUTPUT,
1002
+ LLM_TENSOR_ROPE_FACTORS_LONG,
1003
+ LLM_TENSOR_ROPE_FACTORS_SHORT,
1004
+ LLM_TENSOR_ATTN_NORM,
1005
+ LLM_TENSOR_ATTN_QKV,
1006
+ LLM_TENSOR_ATTN_Q,
1007
+ LLM_TENSOR_ATTN_K,
1008
+ LLM_TENSOR_ATTN_V,
1009
+ LLM_TENSOR_ATTN_OUT,
1010
+ LLM_TENSOR_FFN_NORM,
1011
+ LLM_TENSOR_FFN_GATE_INP,
1012
+ LLM_TENSOR_FFN_GATE_EXPS,
1013
+ LLM_TENSOR_FFN_DOWN_EXPS,
1014
+ LLM_TENSOR_FFN_UP_EXPS,
1015
+ };
1016
+ case LLM_ARCH_PLAMO:
1017
+ return {
1018
+ LLM_TENSOR_TOKEN_EMBD,
1019
+ LLM_TENSOR_OUTPUT_NORM,
1020
+ LLM_TENSOR_OUTPUT,
1021
+ LLM_TENSOR_ROPE_FREQS,
1022
+ LLM_TENSOR_ATTN_NORM,
1023
+ LLM_TENSOR_ATTN_Q,
1024
+ LLM_TENSOR_ATTN_K,
1025
+ LLM_TENSOR_ATTN_V,
1026
+ LLM_TENSOR_ATTN_OUT,
1027
+ LLM_TENSOR_ATTN_ROT_EMBD,
1028
+ LLM_TENSOR_FFN_GATE,
1029
+ LLM_TENSOR_FFN_DOWN,
1030
+ LLM_TENSOR_FFN_UP,
1031
+ };
1032
+ case LLM_ARCH_PLAMO2:
1033
+ return {
1034
+ LLM_TENSOR_TOKEN_EMBD,
1035
+ LLM_TENSOR_OUTPUT_NORM,
1036
+ LLM_TENSOR_OUTPUT,
1037
+ LLM_TENSOR_ROPE_FREQS,
1038
+ LLM_TENSOR_ATTN_NORM,
1039
+ LLM_TENSOR_ATTN_QKV,
1040
+ LLM_TENSOR_ATTN_Q_NORM,
1041
+ LLM_TENSOR_ATTN_K_NORM,
1042
+ LLM_TENSOR_ATTN_OUT,
1043
+ LLM_TENSOR_ATTN_ROT_EMBD,
1044
+ LLM_TENSOR_FFN_NORM,
1045
+ LLM_TENSOR_FFN_DOWN,
1046
+ LLM_TENSOR_FFN_UP,
1047
+ LLM_TENSOR_SSM_IN,
1048
+ LLM_TENSOR_SSM_CONV1D,
1049
+ LLM_TENSOR_SSM_X,
1050
+ LLM_TENSOR_SSM_DT,
1051
+ LLM_TENSOR_SSM_A,
1052
+ LLM_TENSOR_SSM_D,
1053
+ LLM_TENSOR_SSM_OUT,
1054
+ LLM_TENSOR_SSM_DT_NORM,
1055
+ LLM_TENSOR_SSM_B_NORM,
1056
+ LLM_TENSOR_SSM_C_NORM,
1057
+ LLM_TENSOR_ATTN_POST_NORM,
1058
+ LLM_TENSOR_FFN_POST_NORM,
1059
+ };
1060
+ case LLM_ARCH_CODESHELL:
1061
+ return {
1062
+ LLM_TENSOR_TOKEN_EMBD,
1063
+ LLM_TENSOR_OUTPUT_NORM,
1064
+ LLM_TENSOR_OUTPUT,
1065
+ LLM_TENSOR_ROPE_FREQS,
1066
+ LLM_TENSOR_ATTN_NORM,
1067
+ LLM_TENSOR_ATTN_Q,
1068
+ LLM_TENSOR_ATTN_K,
1069
+ LLM_TENSOR_ATTN_V,
1070
+ LLM_TENSOR_ATTN_QKV,
1071
+ LLM_TENSOR_ATTN_OUT,
1072
+ LLM_TENSOR_ATTN_ROT_EMBD,
1073
+ LLM_TENSOR_FFN_NORM,
1074
+ LLM_TENSOR_FFN_GATE,
1075
+ LLM_TENSOR_FFN_DOWN,
1076
+ LLM_TENSOR_FFN_UP,
1077
+ };
1078
+ case LLM_ARCH_MINICPM:
1079
+ return {
1080
+ LLM_TENSOR_TOKEN_EMBD,
1081
+ LLM_TENSOR_OUTPUT_NORM,
1082
+ LLM_TENSOR_OUTPUT,
1083
+ LLM_TENSOR_ROPE_FREQS,
1084
+ LLM_TENSOR_ROPE_FACTORS_LONG,
1085
+ LLM_TENSOR_ROPE_FACTORS_SHORT,
1086
+ LLM_TENSOR_ATTN_NORM,
1087
+ LLM_TENSOR_ATTN_Q,
1088
+ LLM_TENSOR_ATTN_K,
1089
+ LLM_TENSOR_ATTN_V,
1090
+ LLM_TENSOR_ATTN_OUT,
1091
+ LLM_TENSOR_ATTN_ROT_EMBD,
1092
+ LLM_TENSOR_FFN_GATE_INP,
1093
+ LLM_TENSOR_FFN_NORM,
1094
+ LLM_TENSOR_FFN_GATE,
1095
+ LLM_TENSOR_FFN_DOWN,
1096
+ LLM_TENSOR_FFN_UP,
1097
+ LLM_TENSOR_FFN_GATE_EXP,
1098
+ LLM_TENSOR_FFN_DOWN_EXP,
1099
+ LLM_TENSOR_FFN_UP_EXP,
1100
+ };
1101
+ case LLM_ARCH_MINICPM3:
1102
+ return {
1103
+ LLM_TENSOR_TOKEN_EMBD,
1104
+ LLM_TENSOR_OUTPUT_NORM,
1105
+ LLM_TENSOR_OUTPUT,
1106
+ LLM_TENSOR_ROPE_FACTORS_LONG,
1107
+ LLM_TENSOR_ROPE_FACTORS_SHORT,
1108
+ LLM_TENSOR_ATTN_NORM,
1109
+ LLM_TENSOR_ATTN_Q_A_NORM,
1110
+ LLM_TENSOR_ATTN_KV_A_NORM,
1111
+ LLM_TENSOR_ATTN_Q,
1112
+ LLM_TENSOR_ATTN_Q_A,
1113
+ LLM_TENSOR_ATTN_Q_B,
1114
+ LLM_TENSOR_ATTN_KV_A_MQA,
1115
+ LLM_TENSOR_ATTN_KV_B,
1116
+ LLM_TENSOR_ATTN_OUT,
1117
+ LLM_TENSOR_FFN_NORM,
1118
+ LLM_TENSOR_FFN_GATE,
1119
+ LLM_TENSOR_FFN_UP,
1120
+ LLM_TENSOR_FFN_DOWN,
1121
+ };
1122
+ case LLM_ARCH_GEMMA:
1123
+ return {
1124
+ LLM_TENSOR_TOKEN_EMBD,
1125
+ LLM_TENSOR_OUTPUT_NORM,
1126
+ LLM_TENSOR_ATTN_NORM,
1127
+ LLM_TENSOR_ATTN_Q,
1128
+ LLM_TENSOR_ATTN_K,
1129
+ LLM_TENSOR_ATTN_V,
1130
+ LLM_TENSOR_ATTN_OUT,
1131
+ LLM_TENSOR_FFN_NORM,
1132
+ LLM_TENSOR_FFN_GATE,
1133
+ LLM_TENSOR_FFN_DOWN,
1134
+ LLM_TENSOR_FFN_UP,
1135
+ };
1136
+ case LLM_ARCH_GEMMA2:
1137
+ return {
1138
+ LLM_TENSOR_TOKEN_EMBD,
1139
+ LLM_TENSOR_OUTPUT_NORM,
1140
+ LLM_TENSOR_ATTN_NORM,
1141
+ LLM_TENSOR_ATTN_Q,
1142
+ LLM_TENSOR_ATTN_K,
1143
+ LLM_TENSOR_ATTN_V,
1144
+ LLM_TENSOR_ATTN_OUT,
1145
+ LLM_TENSOR_ATTN_POST_NORM,
1146
+ LLM_TENSOR_FFN_NORM,
1147
+ LLM_TENSOR_FFN_GATE,
1148
+ LLM_TENSOR_FFN_DOWN,
1149
+ LLM_TENSOR_FFN_UP,
1150
+ LLM_TENSOR_FFN_POST_NORM,
1151
+ };
1152
+ case LLM_ARCH_GEMMA3:
1153
+ return {
1154
+ LLM_TENSOR_TOKEN_EMBD,
1155
+ LLM_TENSOR_OUTPUT_NORM,
1156
+ LLM_TENSOR_OUTPUT,
1157
+ LLM_TENSOR_ATTN_NORM,
1158
+ LLM_TENSOR_ATTN_Q,
1159
+ LLM_TENSOR_ATTN_Q_NORM,
1160
+ LLM_TENSOR_ATTN_K,
1161
+ LLM_TENSOR_ATTN_K_NORM,
1162
+ LLM_TENSOR_ATTN_V,
1163
+ LLM_TENSOR_ATTN_OUT,
1164
+ LLM_TENSOR_ATTN_POST_NORM,
1165
+ LLM_TENSOR_FFN_NORM,
1166
+ LLM_TENSOR_FFN_GATE,
1167
+ LLM_TENSOR_FFN_DOWN,
1168
+ LLM_TENSOR_FFN_UP,
1169
+ LLM_TENSOR_FFN_POST_NORM,
1170
+ };
1171
+ case LLM_ARCH_GEMMA3N:
1172
+ return {
1173
+ LLM_TENSOR_TOKEN_EMBD,
1174
+ LLM_TENSOR_OUTPUT_NORM,
1175
+ LLM_TENSOR_ATTN_NORM,
1176
+ LLM_TENSOR_ATTN_Q,
1177
+ LLM_TENSOR_ATTN_Q_NORM,
1178
+ LLM_TENSOR_ATTN_K,
1179
+ LLM_TENSOR_ATTN_K_NORM,
1180
+ LLM_TENSOR_ATTN_V,
1181
+ LLM_TENSOR_ATTN_OUT,
1182
+ LLM_TENSOR_ATTN_POST_NORM,
1183
+ LLM_TENSOR_FFN_NORM,
1184
+ LLM_TENSOR_FFN_GATE,
1185
+ LLM_TENSOR_FFN_DOWN,
1186
+ LLM_TENSOR_FFN_UP,
1187
+ LLM_TENSOR_FFN_POST_NORM,
1188
+ LLM_TENSOR_PER_LAYER_TOKEN_EMBD,
1189
+ LLM_TENSOR_PER_LAYER_MODEL_PROJ,
1190
+ LLM_TENSOR_PER_LAYER_PROJ_NORM,
1191
+ LLM_TENSOR_ALTUP_UNEMBD_PROJ,
1192
+ LLM_TENSOR_ALTUP_PROJ,
1193
+ LLM_TENSOR_PER_LAYER_INP_GATE,
1194
+ LLM_TENSOR_PER_LAYER_PROJ,
1195
+ LLM_TENSOR_PER_LAYER_POST_NORM,
1196
+ LLM_TENSOR_ALTUP_CORRECT_COEF,
1197
+ LLM_TENSOR_ALTUP_CORRECT_SCALE,
1198
+ LLM_TENSOR_ALTUP_PREDICT_COEF,
1199
+ LLM_TENSOR_ALTUP_ROUTER,
1200
+ LLM_TENSOR_ALTUP_ROUTER_NORM,
1201
+ LLM_TENSOR_LAUREL_L,
1202
+ LLM_TENSOR_LAUREL_R,
1203
+ LLM_TENSOR_LAUREL_POST_NORM,
1204
+ };
1205
+ case LLM_ARCH_GEMMA_EMBEDDING:
1206
+ return {
1207
+ LLM_TENSOR_TOKEN_EMBD,
1208
+ LLM_TENSOR_OUTPUT_NORM,
1209
+ LLM_TENSOR_OUTPUT,
1210
+ LLM_TENSOR_DENSE_2_OUT,
1211
+ LLM_TENSOR_DENSE_3_OUT,
1212
+ LLM_TENSOR_ATTN_NORM,
1213
+ LLM_TENSOR_ATTN_Q,
1214
+ LLM_TENSOR_ATTN_Q_NORM,
1215
+ LLM_TENSOR_ATTN_K,
1216
+ LLM_TENSOR_ATTN_K_NORM,
1217
+ LLM_TENSOR_ATTN_V,
1218
+ LLM_TENSOR_ATTN_OUT,
1219
+ LLM_TENSOR_ATTN_POST_NORM,
1220
+ LLM_TENSOR_FFN_NORM,
1221
+ LLM_TENSOR_FFN_GATE,
1222
+ LLM_TENSOR_FFN_DOWN,
1223
+ LLM_TENSOR_FFN_UP,
1224
+ LLM_TENSOR_FFN_POST_NORM,
1225
+ };
1226
+ case LLM_ARCH_MAMBA:
1227
+ return {
1228
+ LLM_TENSOR_TOKEN_EMBD,
1229
+ LLM_TENSOR_OUTPUT_NORM,
1230
+ LLM_TENSOR_OUTPUT,
1231
+ LLM_TENSOR_ATTN_NORM,
1232
+ LLM_TENSOR_SSM_IN,
1233
+ LLM_TENSOR_SSM_CONV1D,
1234
+ LLM_TENSOR_SSM_X,
1235
+ LLM_TENSOR_SSM_DT,
1236
+ LLM_TENSOR_SSM_A,
1237
+ LLM_TENSOR_SSM_D,
1238
+ LLM_TENSOR_SSM_OUT,
1239
+ };
1240
+ case LLM_ARCH_MAMBA2:
1241
+ return {
1242
+ LLM_TENSOR_TOKEN_EMBD,
1243
+ LLM_TENSOR_OUTPUT_NORM,
1244
+ LLM_TENSOR_OUTPUT,
1245
+ LLM_TENSOR_ATTN_NORM,
1246
+ LLM_TENSOR_SSM_IN,
1247
+ LLM_TENSOR_SSM_CONV1D,
1248
+ LLM_TENSOR_SSM_DT,
1249
+ LLM_TENSOR_SSM_A,
1250
+ LLM_TENSOR_SSM_D,
1251
+ LLM_TENSOR_SSM_NORM,
1252
+ LLM_TENSOR_SSM_OUT,
1253
+ };
1254
+ case LLM_ARCH_JAMBA:
1255
+ return {
1256
+ LLM_TENSOR_TOKEN_EMBD,
1257
+ LLM_TENSOR_OUTPUT_NORM,
1258
+ LLM_TENSOR_OUTPUT,
1259
+ LLM_TENSOR_ATTN_NORM,
1260
+ LLM_TENSOR_SSM_IN,
1261
+ LLM_TENSOR_SSM_CONV1D,
1262
+ LLM_TENSOR_SSM_X,
1263
+ LLM_TENSOR_SSM_DT,
1264
+ LLM_TENSOR_SSM_DT_NORM,
1265
+ LLM_TENSOR_SSM_A,
1266
+ LLM_TENSOR_SSM_B_NORM,
1267
+ LLM_TENSOR_SSM_C_NORM,
1268
+ LLM_TENSOR_SSM_D,
1269
+ LLM_TENSOR_SSM_OUT,
1270
+ LLM_TENSOR_ATTN_Q,
1271
+ LLM_TENSOR_ATTN_K,
1272
+ LLM_TENSOR_ATTN_V,
1273
+ LLM_TENSOR_ATTN_OUT,
1274
+ LLM_TENSOR_FFN_GATE_INP,
1275
+ LLM_TENSOR_FFN_NORM,
1276
+ LLM_TENSOR_FFN_GATE,
1277
+ LLM_TENSOR_FFN_DOWN,
1278
+ LLM_TENSOR_FFN_UP,
1279
+ LLM_TENSOR_FFN_GATE_EXPS,
1280
+ LLM_TENSOR_FFN_DOWN_EXPS,
1281
+ LLM_TENSOR_FFN_UP_EXPS,
1282
+ };
1283
+ case LLM_ARCH_FALCON_H1:
1284
+ return {
1285
+ LLM_TENSOR_TOKEN_EMBD,
1286
+ LLM_TENSOR_OUTPUT,
1287
+ LLM_TENSOR_OUTPUT_NORM,
1288
+ LLM_TENSOR_ATTN_NORM,
1289
+ LLM_TENSOR_ATTN_Q,
1290
+ LLM_TENSOR_ATTN_K,
1291
+ LLM_TENSOR_ATTN_V,
1292
+ LLM_TENSOR_ATTN_OUT,
1293
+ LLM_TENSOR_SSM_IN,
1294
+ LLM_TENSOR_SSM_CONV1D,
1295
+ LLM_TENSOR_SSM_DT,
1296
+ LLM_TENSOR_SSM_A,
1297
+ LLM_TENSOR_SSM_D,
1298
+ LLM_TENSOR_SSM_NORM,
1299
+ LLM_TENSOR_SSM_OUT,
1300
+ LLM_TENSOR_FFN_NORM,
1301
+ LLM_TENSOR_FFN_GATE,
1302
+ LLM_TENSOR_FFN_DOWN,
1303
+ LLM_TENSOR_FFN_UP,
1304
+ };
1305
+ case LLM_ARCH_COMMAND_R:
1306
+ return {
1307
+ LLM_TENSOR_TOKEN_EMBD,
1308
+ LLM_TENSOR_OUTPUT_NORM,
1309
+ LLM_TENSOR_ATTN_NORM,
1310
+ LLM_TENSOR_ATTN_Q,
1311
+ LLM_TENSOR_ATTN_K,
1312
+ LLM_TENSOR_ATTN_V,
1313
+ LLM_TENSOR_ATTN_OUT,
1314
+ LLM_TENSOR_FFN_GATE,
1315
+ LLM_TENSOR_FFN_DOWN,
1316
+ LLM_TENSOR_FFN_UP,
1317
+ LLM_TENSOR_ATTN_Q_NORM,
1318
+ LLM_TENSOR_ATTN_K_NORM,
1319
+ };
1320
+ case LLM_ARCH_COHERE2:
1321
+ return {
1322
+ LLM_TENSOR_TOKEN_EMBD,
1323
+ LLM_TENSOR_OUTPUT_NORM,
1324
+ LLM_TENSOR_ATTN_NORM,
1325
+ LLM_TENSOR_ATTN_Q,
1326
+ LLM_TENSOR_ATTN_K,
1327
+ LLM_TENSOR_ATTN_V,
1328
+ LLM_TENSOR_ATTN_OUT,
1329
+ LLM_TENSOR_FFN_GATE,
1330
+ LLM_TENSOR_FFN_DOWN,
1331
+ LLM_TENSOR_FFN_UP,
1332
+ };
1333
+ case LLM_ARCH_DBRX:
1334
+ return {
1335
+ LLM_TENSOR_TOKEN_EMBD,
1336
+ LLM_TENSOR_OUTPUT_NORM,
1337
+ LLM_TENSOR_OUTPUT,
1338
+ LLM_TENSOR_ATTN_QKV,
1339
+ LLM_TENSOR_ATTN_NORM,
1340
+ LLM_TENSOR_ATTN_OUT,
1341
+ LLM_TENSOR_ATTN_OUT_NORM,
1342
+ LLM_TENSOR_FFN_GATE_INP,
1343
+ LLM_TENSOR_FFN_GATE_EXPS,
1344
+ LLM_TENSOR_FFN_DOWN_EXPS,
1345
+ LLM_TENSOR_FFN_UP_EXPS,
1346
+ };
1347
+ case LLM_ARCH_OLMO:
1348
+ return {
1349
+ LLM_TENSOR_TOKEN_EMBD,
1350
+ LLM_TENSOR_OUTPUT,
1351
+ LLM_TENSOR_ATTN_Q,
1352
+ LLM_TENSOR_ATTN_K,
1353
+ LLM_TENSOR_ATTN_V,
1354
+ LLM_TENSOR_ATTN_OUT,
1355
+ LLM_TENSOR_FFN_GATE,
1356
+ LLM_TENSOR_FFN_DOWN,
1357
+ LLM_TENSOR_FFN_UP,
1358
+ };
1359
+ case LLM_ARCH_OLMO2:
1360
+ return {
1361
+ LLM_TENSOR_TOKEN_EMBD,
1362
+ LLM_TENSOR_OUTPUT_NORM,
1363
+ LLM_TENSOR_OUTPUT,
1364
+ LLM_TENSOR_ATTN_Q,
1365
+ LLM_TENSOR_ATTN_K,
1366
+ LLM_TENSOR_ATTN_V,
1367
+ LLM_TENSOR_ATTN_OUT,
1368
+ LLM_TENSOR_ATTN_POST_NORM,
1369
+ LLM_TENSOR_ATTN_Q_NORM,
1370
+ LLM_TENSOR_ATTN_K_NORM,
1371
+ LLM_TENSOR_FFN_POST_NORM,
1372
+ LLM_TENSOR_FFN_GATE,
1373
+ LLM_TENSOR_FFN_DOWN,
1374
+ LLM_TENSOR_FFN_UP,
1375
+ };
1376
+ case LLM_ARCH_OPENELM:
1377
+ return {
1378
+ LLM_TENSOR_TOKEN_EMBD,
1379
+ LLM_TENSOR_OUTPUT_NORM,
1380
+ LLM_TENSOR_ATTN_NORM,
1381
+ LLM_TENSOR_ATTN_QKV,
1382
+ LLM_TENSOR_ATTN_Q_NORM,
1383
+ LLM_TENSOR_ATTN_K_NORM,
1384
+ LLM_TENSOR_ATTN_OUT,
1385
+ LLM_TENSOR_FFN_NORM,
1386
+ LLM_TENSOR_FFN_GATE,
1387
+ LLM_TENSOR_FFN_DOWN,
1388
+ LLM_TENSOR_FFN_UP,
1389
+ };
1390
+ case LLM_ARCH_ARCTIC:
1391
+ return {
1392
+ LLM_TENSOR_TOKEN_EMBD,
1393
+ LLM_TENSOR_OUTPUT_NORM,
1394
+ LLM_TENSOR_OUTPUT,
1395
+ LLM_TENSOR_ATTN_NORM,
1396
+ LLM_TENSOR_ATTN_Q,
1397
+ LLM_TENSOR_ATTN_K,
1398
+ LLM_TENSOR_ATTN_V,
1399
+ LLM_TENSOR_ATTN_OUT,
1400
+ LLM_TENSOR_FFN_GATE_INP,
1401
+ LLM_TENSOR_FFN_NORM,
1402
+ LLM_TENSOR_FFN_GATE,
1403
+ LLM_TENSOR_FFN_DOWN,
1404
+ LLM_TENSOR_FFN_UP,
1405
+ LLM_TENSOR_FFN_NORM_EXPS,
1406
+ LLM_TENSOR_FFN_GATE_EXPS,
1407
+ LLM_TENSOR_FFN_DOWN_EXPS,
1408
+ LLM_TENSOR_FFN_UP_EXPS,
1409
+ };
1410
+ case LLM_ARCH_DEEPSEEK:
1411
+ return {
1412
+ LLM_TENSOR_TOKEN_EMBD,
1413
+ LLM_TENSOR_OUTPUT_NORM,
1414
+ LLM_TENSOR_OUTPUT,
1415
+ LLM_TENSOR_ROPE_FREQS,
1416
+ LLM_TENSOR_ATTN_NORM,
1417
+ LLM_TENSOR_ATTN_Q,
1418
+ LLM_TENSOR_ATTN_K,
1419
+ LLM_TENSOR_ATTN_V,
1420
+ LLM_TENSOR_ATTN_OUT,
1421
+ LLM_TENSOR_ATTN_ROT_EMBD,
1422
+ LLM_TENSOR_FFN_GATE_INP,
1423
+ LLM_TENSOR_FFN_NORM,
1424
+ LLM_TENSOR_FFN_GATE,
1425
+ LLM_TENSOR_FFN_DOWN,
1426
+ LLM_TENSOR_FFN_UP,
1427
+ LLM_TENSOR_FFN_GATE_EXPS,
1428
+ LLM_TENSOR_FFN_DOWN_EXPS,
1429
+ LLM_TENSOR_FFN_UP_EXPS,
1430
+ LLM_TENSOR_FFN_GATE_INP_SHEXP,
1431
+ LLM_TENSOR_FFN_GATE_SHEXP,
1432
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1433
+ LLM_TENSOR_FFN_UP_SHEXP,
1434
+ };
1435
+ case LLM_ARCH_DEEPSEEK2:
1436
+ return {
1437
+ LLM_TENSOR_TOKEN_EMBD,
1438
+ LLM_TENSOR_OUTPUT_NORM,
1439
+ LLM_TENSOR_OUTPUT,
1440
+ LLM_TENSOR_ATTN_NORM,
1441
+ LLM_TENSOR_ATTN_Q_A_NORM,
1442
+ LLM_TENSOR_ATTN_KV_A_NORM,
1443
+ LLM_TENSOR_ATTN_Q,
1444
+ LLM_TENSOR_ATTN_Q_A,
1445
+ LLM_TENSOR_ATTN_Q_B,
1446
+ LLM_TENSOR_ATTN_KV_A_MQA,
1447
+ LLM_TENSOR_ATTN_KV_B,
1448
+ LLM_TENSOR_ATTN_K_B,
1449
+ LLM_TENSOR_ATTN_V_B,
1450
+ LLM_TENSOR_ATTN_OUT,
1451
+ LLM_TENSOR_FFN_NORM,
1452
+ LLM_TENSOR_FFN_GATE,
1453
+ LLM_TENSOR_FFN_UP,
1454
+ LLM_TENSOR_FFN_DOWN,
1455
+ LLM_TENSOR_FFN_GATE_INP,
1456
+ LLM_TENSOR_FFN_GATE_EXPS,
1457
+ LLM_TENSOR_FFN_DOWN_EXPS,
1458
+ LLM_TENSOR_FFN_UP_EXPS,
1459
+ LLM_TENSOR_FFN_GATE_INP_SHEXP,
1460
+ LLM_TENSOR_FFN_GATE_SHEXP,
1461
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1462
+ LLM_TENSOR_FFN_UP_SHEXP,
1463
+ LLM_TENSOR_FFN_EXP_PROBS_B,
1464
+ };
1465
+ case LLM_ARCH_PLM:
1466
+ return {
1467
+ LLM_TENSOR_TOKEN_EMBD,
1468
+ LLM_TENSOR_OUTPUT_NORM,
1469
+ LLM_TENSOR_ATTN_NORM,
1470
+ LLM_TENSOR_ATTN_Q,
1471
+ LLM_TENSOR_ATTN_KV_A_MQA,
1472
+ LLM_TENSOR_ATTN_KV_A_NORM,
1473
+ LLM_TENSOR_ATTN_KV_B,
1474
+ LLM_TENSOR_ATTN_OUT,
1475
+ LLM_TENSOR_FFN_NORM,
1476
+ LLM_TENSOR_FFN_DOWN,
1477
+ LLM_TENSOR_FFN_UP,
1478
+ };
1479
+ case LLM_ARCH_CHATGLM:
1480
+ return {
1481
+ LLM_TENSOR_TOKEN_EMBD,
1482
+ LLM_TENSOR_ROPE_FREQS,
1483
+ LLM_TENSOR_OUTPUT_NORM,
1484
+ LLM_TENSOR_OUTPUT,
1485
+ LLM_TENSOR_ATTN_NORM,
1486
+ LLM_TENSOR_ATTN_QKV,
1487
+ LLM_TENSOR_ATTN_Q,
1488
+ LLM_TENSOR_ATTN_K,
1489
+ LLM_TENSOR_ATTN_V,
1490
+ LLM_TENSOR_ATTN_OUT,
1491
+ LLM_TENSOR_FFN_NORM,
1492
+ LLM_TENSOR_FFN_UP,
1493
+ LLM_TENSOR_FFN_DOWN,
1494
+ };
1495
+ case LLM_ARCH_GLM4:
1496
+ return {
1497
+ LLM_TENSOR_TOKEN_EMBD,
1498
+ LLM_TENSOR_ROPE_FREQS,
1499
+ LLM_TENSOR_OUTPUT_NORM,
1500
+ LLM_TENSOR_OUTPUT,
1501
+ LLM_TENSOR_ATTN_NORM,
1502
+ LLM_TENSOR_ATTN_Q,
1503
+ LLM_TENSOR_ATTN_K,
1504
+ LLM_TENSOR_ATTN_V,
1505
+ LLM_TENSOR_ATTN_OUT,
1506
+ LLM_TENSOR_FFN_NORM,
1507
+ LLM_TENSOR_FFN_UP,
1508
+ LLM_TENSOR_FFN_DOWN,
1509
+ LLM_TENSOR_ATTN_POST_NORM,
1510
+ LLM_TENSOR_FFN_POST_NORM,
1511
+ };
1512
+ case LLM_ARCH_GLM4_MOE:
1513
+ return {
1514
+ LLM_TENSOR_TOKEN_EMBD,
1515
+ LLM_TENSOR_OUTPUT_NORM,
1516
+ LLM_TENSOR_OUTPUT,
1517
+ LLM_TENSOR_ATTN_NORM,
1518
+ LLM_TENSOR_ATTN_POST_NORM,
1519
+ LLM_TENSOR_ATTN_Q,
1520
+ LLM_TENSOR_ATTN_K,
1521
+ LLM_TENSOR_ATTN_V,
1522
+ LLM_TENSOR_ATTN_OUT,
1523
+ LLM_TENSOR_ATTN_Q_NORM,
1524
+ LLM_TENSOR_ATTN_K_NORM,
1525
+ LLM_TENSOR_FFN_GATE,
1526
+ LLM_TENSOR_FFN_DOWN,
1527
+ LLM_TENSOR_FFN_UP,
1528
+ LLM_TENSOR_FFN_GATE_INP,
1529
+ LLM_TENSOR_FFN_GATE_EXPS,
1530
+ LLM_TENSOR_FFN_DOWN_EXPS,
1531
+ LLM_TENSOR_FFN_UP_EXPS,
1532
+ LLM_TENSOR_FFN_GATE_SHEXP,
1533
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1534
+ LLM_TENSOR_FFN_UP_SHEXP,
1535
+ LLM_TENSOR_FFN_EXP_PROBS_B,
1536
+ LLM_TENSOR_NEXTN_EH_PROJ,
1537
+ LLM_TENSOR_NEXTN_EMBED_TOKENS,
1538
+ LLM_TENSOR_NEXTN_ENORM,
1539
+ LLM_TENSOR_NEXTN_HNORM,
1540
+ LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
1541
+ LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
1542
+ };
1543
+ case LLM_ARCH_BITNET:
1544
+ return {
1545
+ LLM_TENSOR_TOKEN_EMBD,
1546
+ LLM_TENSOR_OUTPUT_NORM,
1547
+ LLM_TENSOR_ATTN_Q,
1548
+ LLM_TENSOR_ATTN_K,
1549
+ LLM_TENSOR_ATTN_V,
1550
+ LLM_TENSOR_ATTN_OUT,
1551
+ LLM_TENSOR_ATTN_NORM,
1552
+ LLM_TENSOR_ATTN_SUB_NORM,
1553
+ LLM_TENSOR_FFN_GATE,
1554
+ LLM_TENSOR_FFN_DOWN,
1555
+ LLM_TENSOR_FFN_UP,
1556
+ LLM_TENSOR_FFN_NORM,
1557
+ LLM_TENSOR_FFN_SUB_NORM,
1558
+ };
1559
+ case LLM_ARCH_T5:
1560
+ return {
1561
+ LLM_TENSOR_TOKEN_EMBD,
1562
+ LLM_TENSOR_OUTPUT,
1563
+ LLM_TENSOR_DEC_OUTPUT_NORM,
1564
+ LLM_TENSOR_DEC_ATTN_NORM,
1565
+ LLM_TENSOR_DEC_ATTN_Q,
1566
+ LLM_TENSOR_DEC_ATTN_K,
1567
+ LLM_TENSOR_DEC_ATTN_V,
1568
+ LLM_TENSOR_DEC_ATTN_OUT,
1569
+ LLM_TENSOR_DEC_ATTN_REL_B,
1570
+ LLM_TENSOR_DEC_CROSS_ATTN_NORM,
1571
+ LLM_TENSOR_DEC_CROSS_ATTN_Q,
1572
+ LLM_TENSOR_DEC_CROSS_ATTN_K,
1573
+ LLM_TENSOR_DEC_CROSS_ATTN_V,
1574
+ LLM_TENSOR_DEC_CROSS_ATTN_OUT,
1575
+ LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
1576
+ LLM_TENSOR_DEC_FFN_NORM,
1577
+ LLM_TENSOR_DEC_FFN_GATE,
1578
+ LLM_TENSOR_DEC_FFN_DOWN,
1579
+ LLM_TENSOR_DEC_FFN_UP,
1580
+ LLM_TENSOR_ENC_OUTPUT_NORM,
1581
+ LLM_TENSOR_ENC_ATTN_NORM,
1582
+ LLM_TENSOR_ENC_ATTN_Q,
1583
+ LLM_TENSOR_ENC_ATTN_K,
1584
+ LLM_TENSOR_ENC_ATTN_V,
1585
+ LLM_TENSOR_ENC_ATTN_OUT,
1586
+ LLM_TENSOR_ENC_ATTN_REL_B,
1587
+ LLM_TENSOR_ENC_FFN_NORM,
1588
+ LLM_TENSOR_ENC_FFN_GATE,
1589
+ LLM_TENSOR_ENC_FFN_DOWN,
1590
+ LLM_TENSOR_ENC_FFN_UP,
1591
+ };
1592
+ case LLM_ARCH_T5ENCODER:
1593
+ return {
1594
+ LLM_TENSOR_TOKEN_EMBD,
1595
+ LLM_TENSOR_OUTPUT,
1596
+ LLM_TENSOR_ENC_OUTPUT_NORM,
1597
+ LLM_TENSOR_ENC_ATTN_NORM,
1598
+ LLM_TENSOR_ENC_ATTN_Q,
1599
+ LLM_TENSOR_ENC_ATTN_K,
1600
+ LLM_TENSOR_ENC_ATTN_V,
1601
+ LLM_TENSOR_ENC_ATTN_OUT,
1602
+ LLM_TENSOR_ENC_ATTN_REL_B,
1603
+ LLM_TENSOR_ENC_FFN_NORM,
1604
+ LLM_TENSOR_ENC_FFN_GATE,
1605
+ LLM_TENSOR_ENC_FFN_DOWN,
1606
+ LLM_TENSOR_ENC_FFN_UP,
1607
+ };
1608
+ case LLM_ARCH_JAIS:
1609
+ return {
1610
+ LLM_TENSOR_TOKEN_EMBD,
1611
+ LLM_TENSOR_OUTPUT_NORM,
1612
+ LLM_TENSOR_OUTPUT,
1613
+ LLM_TENSOR_ATTN_NORM,
1614
+ LLM_TENSOR_ATTN_QKV,
1615
+ LLM_TENSOR_ATTN_OUT,
1616
+ LLM_TENSOR_FFN_NORM,
1617
+ LLM_TENSOR_FFN_UP,
1618
+ LLM_TENSOR_FFN_GATE,
1619
+ LLM_TENSOR_FFN_DOWN,
1620
+ };
1621
+ case LLM_ARCH_NEMOTRON_H:
1622
+ return {
1623
+ LLM_TENSOR_TOKEN_EMBD,
1624
+ LLM_TENSOR_OUTPUT_NORM,
1625
+ LLM_TENSOR_OUTPUT,
1626
+ LLM_TENSOR_ATTN_NORM,
1627
+ LLM_TENSOR_SSM_IN,
1628
+ LLM_TENSOR_SSM_CONV1D,
1629
+ LLM_TENSOR_SSM_DT,
1630
+ LLM_TENSOR_SSM_A,
1631
+ LLM_TENSOR_SSM_D,
1632
+ LLM_TENSOR_SSM_NORM,
1633
+ LLM_TENSOR_SSM_OUT,
1634
+ LLM_TENSOR_ATTN_Q,
1635
+ LLM_TENSOR_ATTN_K,
1636
+ LLM_TENSOR_ATTN_V,
1637
+ LLM_TENSOR_ATTN_OUT,
1638
+ LLM_TENSOR_FFN_DOWN,
1639
+ LLM_TENSOR_FFN_UP,
1640
+ };
1641
+ case LLM_ARCH_NEMOTRON_H_MOE:
1642
+ return {
1643
+ LLM_TENSOR_TOKEN_EMBD,
1644
+ LLM_TENSOR_OUTPUT_NORM,
1645
+ LLM_TENSOR_OUTPUT,
1646
+ LLM_TENSOR_ATTN_NORM,
1647
+ // mamba(2) ssm layers
1648
+ LLM_TENSOR_SSM_IN,
1649
+ LLM_TENSOR_SSM_CONV1D,
1650
+ LLM_TENSOR_SSM_DT,
1651
+ LLM_TENSOR_SSM_A,
1652
+ LLM_TENSOR_SSM_D,
1653
+ LLM_TENSOR_SSM_NORM,
1654
+ LLM_TENSOR_SSM_OUT,
1655
+ // attention layers
1656
+ LLM_TENSOR_ATTN_Q,
1657
+ LLM_TENSOR_ATTN_K,
1658
+ LLM_TENSOR_ATTN_V,
1659
+ LLM_TENSOR_ATTN_OUT,
1660
+ // dense FFN
1661
+ LLM_TENSOR_FFN_DOWN,
1662
+ LLM_TENSOR_FFN_UP,
1663
+ // MoE FFN (for MoE layers)
1664
+ LLM_TENSOR_FFN_GATE_INP,
1665
+ LLM_TENSOR_FFN_UP_EXPS,
1666
+ LLM_TENSOR_FFN_DOWN_EXPS,
1667
+ LLM_TENSOR_FFN_EXP_PROBS_B,
1668
+ // MoE shared expert layer
1669
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1670
+ LLM_TENSOR_FFN_UP_SHEXP,
1671
+ };
1672
+ case LLM_ARCH_EXAONE4:
1673
+ return {
1674
+ LLM_TENSOR_TOKEN_EMBD,
1675
+ LLM_TENSOR_OUTPUT_NORM,
1676
+ LLM_TENSOR_OUTPUT,
1677
+ LLM_TENSOR_ROPE_FREQS,
1678
+ LLM_TENSOR_ATTN_Q,
1679
+ LLM_TENSOR_ATTN_Q_NORM,
1680
+ LLM_TENSOR_ATTN_K,
1681
+ LLM_TENSOR_ATTN_K_NORM,
1682
+ LLM_TENSOR_ATTN_V,
1683
+ LLM_TENSOR_ATTN_OUT,
1684
+ LLM_TENSOR_ATTN_POST_NORM,
1685
+ LLM_TENSOR_FFN_GATE,
1686
+ LLM_TENSOR_FFN_DOWN,
1687
+ LLM_TENSOR_FFN_UP,
1688
+ LLM_TENSOR_FFN_POST_NORM,
1689
+ };
1690
+ case LLM_ARCH_RWKV6:
1691
+ return {
1692
+ LLM_TENSOR_TOKEN_EMBD,
1693
+ LLM_TENSOR_TOKEN_EMBD_NORM,
1694
+ LLM_TENSOR_OUTPUT_NORM,
1695
+ LLM_TENSOR_OUTPUT,
1696
+ LLM_TENSOR_ATTN_NORM,
1697
+ LLM_TENSOR_ATTN_NORM_2,
1698
+ LLM_TENSOR_TIME_MIX_W1,
1699
+ LLM_TENSOR_TIME_MIX_W2,
1700
+ LLM_TENSOR_TIME_MIX_LERP_X,
1701
+ LLM_TENSOR_TIME_MIX_LERP_W,
1702
+ LLM_TENSOR_TIME_MIX_LERP_K,
1703
+ LLM_TENSOR_TIME_MIX_LERP_V,
1704
+ LLM_TENSOR_TIME_MIX_LERP_R,
1705
+ LLM_TENSOR_TIME_MIX_LERP_G,
1706
+ LLM_TENSOR_TIME_MIX_LERP_FUSED,
1707
+ LLM_TENSOR_TIME_MIX_FIRST,
1708
+ LLM_TENSOR_TIME_MIX_DECAY,
1709
+ LLM_TENSOR_TIME_MIX_DECAY_W1,
1710
+ LLM_TENSOR_TIME_MIX_DECAY_W2,
1711
+ LLM_TENSOR_TIME_MIX_KEY,
1712
+ LLM_TENSOR_TIME_MIX_VALUE,
1713
+ LLM_TENSOR_TIME_MIX_RECEPTANCE,
1714
+ LLM_TENSOR_TIME_MIX_GATE,
1715
+ LLM_TENSOR_TIME_MIX_LN,
1716
+ LLM_TENSOR_TIME_MIX_OUTPUT,
1717
+ LLM_TENSOR_CHANNEL_MIX_LERP_K,
1718
+ LLM_TENSOR_CHANNEL_MIX_LERP_R,
1719
+ LLM_TENSOR_CHANNEL_MIX_KEY,
1720
+ LLM_TENSOR_CHANNEL_MIX_VALUE,
1721
+ LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
1722
+ };
1723
+ case LLM_ARCH_RWKV6QWEN2:
1724
+ return {
1725
+ LLM_TENSOR_TOKEN_EMBD,
1726
+ LLM_TENSOR_OUTPUT_NORM,
1727
+ LLM_TENSOR_OUTPUT,
1728
+ LLM_TENSOR_ATTN_NORM,
1729
+ LLM_TENSOR_TIME_MIX_W1,
1730
+ LLM_TENSOR_TIME_MIX_W2,
1731
+ LLM_TENSOR_TIME_MIX_LERP_X,
1732
+ LLM_TENSOR_TIME_MIX_LERP_FUSED,
1733
+ LLM_TENSOR_TIME_MIX_FIRST,
1734
+ LLM_TENSOR_TIME_MIX_DECAY,
1735
+ LLM_TENSOR_TIME_MIX_DECAY_W1,
1736
+ LLM_TENSOR_TIME_MIX_DECAY_W2,
1737
+ LLM_TENSOR_TIME_MIX_KEY,
1738
+ LLM_TENSOR_TIME_MIX_VALUE,
1739
+ LLM_TENSOR_TIME_MIX_RECEPTANCE,
1740
+ LLM_TENSOR_TIME_MIX_GATE,
1741
+ LLM_TENSOR_TIME_MIX_OUTPUT,
1742
+ LLM_TENSOR_FFN_NORM,
1743
+ LLM_TENSOR_FFN_GATE,
1744
+ LLM_TENSOR_FFN_DOWN,
1745
+ LLM_TENSOR_FFN_UP,
1746
+ };
1747
+ case LLM_ARCH_RWKV7:
1748
+ return {
1749
+ LLM_TENSOR_TOKEN_EMBD,
1750
+ LLM_TENSOR_TOKEN_EMBD_NORM,
1751
+ LLM_TENSOR_OUTPUT_NORM,
1752
+ LLM_TENSOR_OUTPUT,
1753
+ LLM_TENSOR_ATTN_NORM,
1754
+ LLM_TENSOR_ATTN_NORM_2,
1755
+ LLM_TENSOR_TIME_MIX_W0,
1756
+ LLM_TENSOR_TIME_MIX_W1,
1757
+ LLM_TENSOR_TIME_MIX_W2,
1758
+ LLM_TENSOR_TIME_MIX_A0,
1759
+ LLM_TENSOR_TIME_MIX_A1,
1760
+ LLM_TENSOR_TIME_MIX_A2,
1761
+ LLM_TENSOR_TIME_MIX_V0,
1762
+ LLM_TENSOR_TIME_MIX_V1,
1763
+ LLM_TENSOR_TIME_MIX_V2,
1764
+ LLM_TENSOR_TIME_MIX_G1,
1765
+ LLM_TENSOR_TIME_MIX_G2,
1766
+ LLM_TENSOR_TIME_MIX_K_K,
1767
+ LLM_TENSOR_TIME_MIX_K_A,
1768
+ LLM_TENSOR_TIME_MIX_R_K,
1769
+ LLM_TENSOR_TIME_MIX_LERP_FUSED,
1770
+ LLM_TENSOR_TIME_MIX_KEY,
1771
+ LLM_TENSOR_TIME_MIX_VALUE,
1772
+ LLM_TENSOR_TIME_MIX_RECEPTANCE,
1773
+ LLM_TENSOR_TIME_MIX_LN,
1774
+ LLM_TENSOR_TIME_MIX_OUTPUT,
1775
+ LLM_TENSOR_CHANNEL_MIX_LERP_K,
1776
+ LLM_TENSOR_CHANNEL_MIX_KEY,
1777
+ LLM_TENSOR_CHANNEL_MIX_VALUE,
1778
+ };
1779
+ case LLM_ARCH_ARWKV7:
1780
+ return {
1781
+ LLM_TENSOR_TOKEN_EMBD,
1782
+ LLM_TENSOR_TOKEN_EMBD_NORM,
1783
+ LLM_TENSOR_OUTPUT_NORM,
1784
+ LLM_TENSOR_OUTPUT,
1785
+ LLM_TENSOR_ATTN_NORM,
1786
+ LLM_TENSOR_TIME_MIX_W0,
1787
+ LLM_TENSOR_TIME_MIX_W1,
1788
+ LLM_TENSOR_TIME_MIX_W2,
1789
+ LLM_TENSOR_TIME_MIX_A0,
1790
+ LLM_TENSOR_TIME_MIX_A1,
1791
+ LLM_TENSOR_TIME_MIX_A2,
1792
+ LLM_TENSOR_TIME_MIX_V0,
1793
+ LLM_TENSOR_TIME_MIX_V1,
1794
+ LLM_TENSOR_TIME_MIX_V2,
1795
+ LLM_TENSOR_TIME_MIX_G1,
1796
+ LLM_TENSOR_TIME_MIX_G2,
1797
+ LLM_TENSOR_TIME_MIX_K_K,
1798
+ LLM_TENSOR_TIME_MIX_K_A,
1799
+ LLM_TENSOR_TIME_MIX_R_K,
1800
+ LLM_TENSOR_TIME_MIX_LERP_FUSED,
1801
+ LLM_TENSOR_TIME_MIX_KEY,
1802
+ LLM_TENSOR_TIME_MIX_VALUE,
1803
+ LLM_TENSOR_TIME_MIX_RECEPTANCE,
1804
+ LLM_TENSOR_TIME_MIX_LN,
1805
+ LLM_TENSOR_TIME_MIX_OUTPUT,
1806
+ LLM_TENSOR_FFN_NORM,
1807
+ LLM_TENSOR_FFN_GATE,
1808
+ LLM_TENSOR_FFN_DOWN,
1809
+ LLM_TENSOR_FFN_UP,
1810
+ };
1811
+ case LLM_ARCH_GRANITE_MOE:
1812
+ return {
1813
+ LLM_TENSOR_TOKEN_EMBD,
1814
+ LLM_TENSOR_OUTPUT_NORM,
1815
+ LLM_TENSOR_OUTPUT,
1816
+ LLM_TENSOR_ATTN_NORM,
1817
+ LLM_TENSOR_ATTN_Q,
1818
+ LLM_TENSOR_ATTN_K,
1819
+ LLM_TENSOR_ATTN_V,
1820
+ LLM_TENSOR_ATTN_OUT,
1821
+ LLM_TENSOR_FFN_NORM,
1822
+ LLM_TENSOR_FFN_GATE_INP,
1823
+ LLM_TENSOR_FFN_GATE_EXPS,
1824
+ LLM_TENSOR_FFN_DOWN_EXPS,
1825
+ LLM_TENSOR_FFN_UP_EXPS,
1826
+ LLM_TENSOR_FFN_GATE_SHEXP,
1827
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1828
+ LLM_TENSOR_FFN_UP_SHEXP,
1829
+ };
1830
+ case LLM_ARCH_GRANITE_HYBRID:
1831
+ return {
1832
+ LLM_TENSOR_TOKEN_EMBD,
1833
+ LLM_TENSOR_OUTPUT_NORM,
1834
+ LLM_TENSOR_OUTPUT,
1835
+ LLM_TENSOR_ATTN_NORM,
1836
+ LLM_TENSOR_SSM_IN,
1837
+ LLM_TENSOR_SSM_CONV1D,
1838
+ LLM_TENSOR_SSM_DT,
1839
+ LLM_TENSOR_SSM_A,
1840
+ LLM_TENSOR_SSM_D,
1841
+ LLM_TENSOR_SSM_NORM,
1842
+ LLM_TENSOR_SSM_OUT,
1843
+ LLM_TENSOR_ATTN_Q,
1844
+ LLM_TENSOR_ATTN_K,
1845
+ LLM_TENSOR_ATTN_V,
1846
+ LLM_TENSOR_ATTN_OUT,
1847
+ LLM_TENSOR_FFN_NORM,
1848
+ LLM_TENSOR_FFN_GATE,
1849
+ LLM_TENSOR_FFN_DOWN,
1850
+ LLM_TENSOR_FFN_UP,
1851
+ LLM_TENSOR_FFN_NORM,
1852
+ LLM_TENSOR_FFN_GATE_INP,
1853
+ LLM_TENSOR_FFN_GATE_EXPS,
1854
+ LLM_TENSOR_FFN_DOWN_EXPS,
1855
+ LLM_TENSOR_FFN_UP_EXPS,
1856
+ LLM_TENSOR_FFN_GATE_SHEXP,
1857
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1858
+ LLM_TENSOR_FFN_UP_SHEXP,
1859
+ };
1860
+ case LLM_ARCH_WAVTOKENIZER_DEC:
1861
+ return {
1862
+ LLM_TENSOR_TOKEN_EMBD,
1863
+ LLM_TENSOR_TOKEN_EMBD_NORM,
1864
+ LLM_TENSOR_CONV1D,
1865
+ LLM_TENSOR_CONVNEXT_DW,
1866
+ LLM_TENSOR_CONVNEXT_NORM,
1867
+ LLM_TENSOR_CONVNEXT_PW1,
1868
+ LLM_TENSOR_CONVNEXT_PW2,
1869
+ LLM_TENSOR_CONVNEXT_GAMMA,
1870
+ LLM_TENSOR_OUTPUT_NORM,
1871
+ LLM_TENSOR_OUTPUT,
1872
+ LLM_TENSOR_POS_NET_CONV1,
1873
+ LLM_TENSOR_POS_NET_CONV2,
1874
+ LLM_TENSOR_POS_NET_NORM,
1875
+ LLM_TENSOR_POS_NET_NORM1,
1876
+ LLM_TENSOR_POS_NET_NORM2,
1877
+ LLM_TENSOR_POS_NET_ATTN_NORM,
1878
+ LLM_TENSOR_POS_NET_ATTN_Q,
1879
+ LLM_TENSOR_POS_NET_ATTN_K,
1880
+ LLM_TENSOR_POS_NET_ATTN_V,
1881
+ LLM_TENSOR_POS_NET_ATTN_OUT,
1882
+ };
1883
+ case LLM_ARCH_BAILINGMOE:
1884
+ return {
1885
+ LLM_TENSOR_TOKEN_EMBD,
1886
+ LLM_TENSOR_OUTPUT_NORM,
1887
+ LLM_TENSOR_OUTPUT,
1888
+ LLM_TENSOR_ROPE_FREQS,
1889
+ LLM_TENSOR_ATTN_NORM,
1890
+ LLM_TENSOR_ATTN_Q,
1891
+ LLM_TENSOR_ATTN_K,
1892
+ LLM_TENSOR_ATTN_V,
1893
+ LLM_TENSOR_ATTN_OUT,
1894
+ LLM_TENSOR_FFN_GATE_INP,
1895
+ LLM_TENSOR_FFN_NORM,
1896
+ LLM_TENSOR_FFN_GATE_EXPS,
1897
+ LLM_TENSOR_FFN_DOWN_EXPS,
1898
+ LLM_TENSOR_FFN_UP_EXPS,
1899
+ LLM_TENSOR_FFN_GATE_INP_SHEXP,
1900
+ LLM_TENSOR_FFN_GATE_SHEXP,
1901
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1902
+ LLM_TENSOR_FFN_UP_SHEXP,
1903
+ };
1904
+ case LLM_ARCH_BAILINGMOE2:
1905
+ return {
1906
+ LLM_TENSOR_TOKEN_EMBD,
1907
+ LLM_TENSOR_OUTPUT_NORM,
1908
+ LLM_TENSOR_OUTPUT,
1909
+ LLM_TENSOR_ATTN_NORM,
1910
+ LLM_TENSOR_ATTN_Q_NORM,
1911
+ LLM_TENSOR_ATTN_K_NORM,
1912
+ LLM_TENSOR_ATTN_QKV,
1913
+ LLM_TENSOR_ATTN_OUT,
1914
+ LLM_TENSOR_FFN_GATE_INP,
1915
+ LLM_TENSOR_FFN_EXP_PROBS_B,
1916
+ LLM_TENSOR_FFN_NORM,
1917
+ LLM_TENSOR_FFN_GATE,
1918
+ LLM_TENSOR_FFN_DOWN,
1919
+ LLM_TENSOR_FFN_UP,
1920
+ LLM_TENSOR_FFN_GATE_EXPS,
1921
+ LLM_TENSOR_FFN_DOWN_EXPS,
1922
+ LLM_TENSOR_FFN_UP_EXPS,
1923
+ LLM_TENSOR_FFN_GATE_SHEXP,
1924
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1925
+ LLM_TENSOR_FFN_UP_SHEXP,
1926
+ LLM_TENSOR_NEXTN_EH_PROJ,
1927
+ LLM_TENSOR_NEXTN_EMBED_TOKENS,
1928
+ LLM_TENSOR_NEXTN_ENORM,
1929
+ LLM_TENSOR_NEXTN_HNORM,
1930
+ LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
1931
+ LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
1932
+ LLM_TENSOR_LAYER_OUT_NORM,
1933
+ };
1934
+ case LLM_ARCH_DOTS1:
1935
+ return {
1936
+ LLM_TENSOR_TOKEN_EMBD,
1937
+ LLM_TENSOR_OUTPUT_NORM,
1938
+ LLM_TENSOR_OUTPUT,
1939
+ LLM_TENSOR_ATTN_NORM,
1940
+ LLM_TENSOR_ATTN_Q,
1941
+ LLM_TENSOR_ATTN_Q_NORM,
1942
+ LLM_TENSOR_ATTN_K,
1943
+ LLM_TENSOR_ATTN_K_NORM,
1944
+ LLM_TENSOR_ATTN_V,
1945
+ LLM_TENSOR_ATTN_OUT,
1946
+ LLM_TENSOR_FFN_NORM,
1947
+ LLM_TENSOR_FFN_GATE,
1948
+ LLM_TENSOR_FFN_UP,
1949
+ LLM_TENSOR_FFN_DOWN,
1950
+ LLM_TENSOR_FFN_GATE_INP,
1951
+ LLM_TENSOR_FFN_GATE_EXPS,
1952
+ LLM_TENSOR_FFN_DOWN_EXPS,
1953
+ LLM_TENSOR_FFN_UP_EXPS,
1954
+ LLM_TENSOR_FFN_GATE_INP_SHEXP,
1955
+ LLM_TENSOR_FFN_GATE_SHEXP,
1956
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1957
+ LLM_TENSOR_FFN_UP_SHEXP,
1958
+ LLM_TENSOR_FFN_EXP_PROBS_B,
1959
+ };
1960
+ case LLM_ARCH_ERNIE4_5_MOE:
1961
+ return {
1962
+ LLM_TENSOR_TOKEN_EMBD,
1963
+ LLM_TENSOR_OUTPUT_NORM,
1964
+ LLM_TENSOR_OUTPUT,
1965
+ LLM_TENSOR_ATTN_NORM,
1966
+ LLM_TENSOR_ATTN_Q,
1967
+ LLM_TENSOR_ATTN_K,
1968
+ LLM_TENSOR_ATTN_V,
1969
+ LLM_TENSOR_ATTN_OUT,
1970
+ LLM_TENSOR_FFN_NORM,
1971
+ LLM_TENSOR_FFN_GATE,
1972
+ LLM_TENSOR_FFN_DOWN,
1973
+ LLM_TENSOR_FFN_UP,
1974
+ LLM_TENSOR_FFN_GATE_INP,
1975
+ LLM_TENSOR_FFN_GATE_SHEXP,
1976
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1977
+ LLM_TENSOR_FFN_UP_SHEXP,
1978
+ LLM_TENSOR_FFN_GATE_EXPS,
1979
+ LLM_TENSOR_FFN_DOWN_EXPS,
1980
+ LLM_TENSOR_FFN_UP_EXPS,
1981
+ LLM_TENSOR_FFN_EXP_PROBS_B,
1982
+ };
1983
+ case LLM_ARCH_HUNYUAN_MOE:
1984
+ return {
1985
+ LLM_TENSOR_TOKEN_EMBD,
1986
+ LLM_TENSOR_OUTPUT_NORM,
1987
+ LLM_TENSOR_OUTPUT,
1988
+ LLM_TENSOR_ATTN_NORM,
1989
+ LLM_TENSOR_ATTN_Q,
1990
+ LLM_TENSOR_ATTN_Q_NORM,
1991
+ LLM_TENSOR_ATTN_K,
1992
+ LLM_TENSOR_ATTN_K_NORM,
1993
+ LLM_TENSOR_ATTN_V,
1994
+ LLM_TENSOR_ATTN_OUT,
1995
+ LLM_TENSOR_FFN_GATE_INP,
1996
+ LLM_TENSOR_FFN_NORM,
1997
+ LLM_TENSOR_FFN_GATE_SHEXP,
1998
+ LLM_TENSOR_FFN_DOWN_SHEXP,
1999
+ LLM_TENSOR_FFN_UP_SHEXP,
2000
+ LLM_TENSOR_FFN_GATE_EXPS,
2001
+ LLM_TENSOR_FFN_DOWN_EXPS,
2002
+ LLM_TENSOR_FFN_UP_EXPS,
2003
+ };
2004
+ case LLM_ARCH_OPENAI_MOE:
2005
+ return {
2006
+ LLM_TENSOR_TOKEN_EMBD,
2007
+ LLM_TENSOR_OUTPUT_NORM,
2008
+ LLM_TENSOR_OUTPUT,
2009
+ LLM_TENSOR_ATTN_NORM,
2010
+ LLM_TENSOR_ATTN_POST_NORM,
2011
+ LLM_TENSOR_ATTN_Q,
2012
+ LLM_TENSOR_ATTN_K,
2013
+ LLM_TENSOR_ATTN_V,
2014
+ LLM_TENSOR_ATTN_OUT,
2015
+ LLM_TENSOR_ATTN_SINKS,
2016
+ LLM_TENSOR_FFN_GATE_INP,
2017
+ LLM_TENSOR_FFN_GATE_EXPS,
2018
+ LLM_TENSOR_FFN_DOWN_EXPS,
2019
+ LLM_TENSOR_FFN_UP_EXPS,
2020
+ };
2021
+ case LLM_ARCH_LFM2:
2022
+ return {
2023
+ LLM_TENSOR_ATTN_NORM,
2024
+ LLM_TENSOR_ATTN_Q,
2025
+ LLM_TENSOR_ATTN_K,
2026
+ LLM_TENSOR_ATTN_V,
2027
+ LLM_TENSOR_ATTN_OUT,
2028
+ LLM_TENSOR_ATTN_K_NORM,
2029
+ LLM_TENSOR_ATTN_Q_NORM,
2030
+ LLM_TENSOR_FFN_DOWN,
2031
+ LLM_TENSOR_FFN_GATE,
2032
+ LLM_TENSOR_FFN_NORM,
2033
+ LLM_TENSOR_FFN_UP,
2034
+ LLM_TENSOR_SHORTCONV_CONV,
2035
+ LLM_TENSOR_SHORTCONV_INPROJ,
2036
+ LLM_TENSOR_SHORTCONV_OUTPROJ,
2037
+ LLM_TENSOR_TOKEN_EMBD,
2038
+ LLM_TENSOR_OUTPUT_NORM_LFM2,
2039
+ LLM_TENSOR_OUTPUT,
2040
+ };
2041
+ case LLM_ARCH_LFM2MOE:
2042
+ return {
2043
+ LLM_TENSOR_ATTN_NORM,
2044
+ LLM_TENSOR_ATTN_Q,
2045
+ LLM_TENSOR_ATTN_K,
2046
+ LLM_TENSOR_ATTN_V,
2047
+ LLM_TENSOR_ATTN_OUT,
2048
+ LLM_TENSOR_ATTN_K_NORM,
2049
+ LLM_TENSOR_ATTN_Q_NORM,
2050
+ LLM_TENSOR_FFN_DOWN,
2051
+ LLM_TENSOR_FFN_GATE,
2052
+ LLM_TENSOR_FFN_NORM,
2053
+ LLM_TENSOR_FFN_UP,
2054
+ LLM_TENSOR_SHORTCONV_CONV,
2055
+ LLM_TENSOR_SHORTCONV_INPROJ,
2056
+ LLM_TENSOR_SHORTCONV_OUTPROJ,
2057
+ LLM_TENSOR_TOKEN_EMBD,
2058
+ LLM_TENSOR_OUTPUT_NORM_LFM2,
2059
+ LLM_TENSOR_FFN_GATE_INP,
2060
+ LLM_TENSOR_FFN_GATE_EXPS,
2061
+ LLM_TENSOR_FFN_DOWN_EXPS,
2062
+ LLM_TENSOR_FFN_UP_EXPS,
2063
+ LLM_TENSOR_FFN_EXP_PROBS_B,
2064
+ };
2065
+ case LLM_ARCH_SMALLTHINKER:
2066
+ return {
2067
+ LLM_TENSOR_TOKEN_EMBD,
2068
+ LLM_TENSOR_OUTPUT_NORM,
2069
+ LLM_TENSOR_OUTPUT,
2070
+ LLM_TENSOR_ATTN_NORM,
2071
+ LLM_TENSOR_ATTN_Q,
2072
+ LLM_TENSOR_ATTN_K,
2073
+ LLM_TENSOR_ATTN_V,
2074
+ LLM_TENSOR_ATTN_OUT,
2075
+ LLM_TENSOR_FFN_NORM,
2076
+ LLM_TENSOR_FFN_GATE,
2077
+ LLM_TENSOR_FFN_DOWN,
2078
+ LLM_TENSOR_FFN_UP,
2079
+ LLM_TENSOR_FFN_GATE_INP,
2080
+ LLM_TENSOR_FFN_GATE_EXPS,
2081
+ LLM_TENSOR_FFN_DOWN_EXPS,
2082
+ LLM_TENSOR_FFN_UP_EXPS,
2083
+ };
2084
+ case LLM_ARCH_APERTUS:
2085
+ return {
2086
+ LLM_TENSOR_TOKEN_EMBD,
2087
+ LLM_TENSOR_OUTPUT_NORM,
2088
+ LLM_TENSOR_OUTPUT,
2089
+ LLM_TENSOR_ROPE_FREQS,
2090
+ LLM_TENSOR_ATTN_NORM,
2091
+ LLM_TENSOR_ATTN_Q,
2092
+ LLM_TENSOR_ATTN_K,
2093
+ LLM_TENSOR_ATTN_V,
2094
+ LLM_TENSOR_ATTN_OUT,
2095
+ LLM_TENSOR_ATTN_Q_NORM,
2096
+ LLM_TENSOR_ATTN_K_NORM,
2097
+ LLM_TENSOR_FFN_NORM,
2098
+ LLM_TENSOR_FFN_DOWN,
2099
+ LLM_TENSOR_FFN_UP,
2100
+ };
2101
+ case LLM_ARCH_SEED_OSS:
2102
+ return {
2103
+ LLM_TENSOR_TOKEN_EMBD,
2104
+ LLM_TENSOR_OUTPUT_NORM,
2105
+ LLM_TENSOR_OUTPUT,
2106
+ LLM_TENSOR_ATTN_NORM,
2107
+ LLM_TENSOR_ATTN_Q,
2108
+ LLM_TENSOR_ATTN_K,
2109
+ LLM_TENSOR_ATTN_V,
2110
+ LLM_TENSOR_ATTN_OUT,
2111
+ LLM_TENSOR_ATTN_POST_NORM,
2112
+ LLM_TENSOR_FFN_GATE,
2113
+ LLM_TENSOR_FFN_DOWN,
2114
+ LLM_TENSOR_FFN_UP,
2115
+ };
2116
+ case LLM_ARCH_GROVEMOE:
2117
+ return {
2118
+ LLM_TENSOR_TOKEN_EMBD,
2119
+ LLM_TENSOR_OUTPUT_NORM,
2120
+ LLM_TENSOR_OUTPUT,
2121
+ LLM_TENSOR_ATTN_NORM,
2122
+ LLM_TENSOR_ATTN_Q,
2123
+ LLM_TENSOR_ATTN_Q_NORM,
2124
+ LLM_TENSOR_ATTN_K,
2125
+ LLM_TENSOR_ATTN_K_NORM,
2126
+ LLM_TENSOR_ATTN_V,
2127
+ LLM_TENSOR_ATTN_OUT,
2128
+ LLM_TENSOR_FFN_NORM,
2129
+ LLM_TENSOR_FFN_GATE_INP,
2130
+ LLM_TENSOR_FFN_GATE_EXPS,
2131
+ LLM_TENSOR_FFN_DOWN_EXPS,
2132
+ LLM_TENSOR_FFN_UP_EXPS,
2133
+ LLM_TENSOR_FFN_GATE_CHEXPS,
2134
+ LLM_TENSOR_FFN_DOWN_CHEXPS,
2135
+ LLM_TENSOR_FFN_UP_CHEXPS,
2136
+ };
2137
+ case LLM_ARCH_MINIMAX_M2:
2138
+ return {
2139
+ LLM_TENSOR_TOKEN_EMBD,
2140
+ LLM_TENSOR_OUTPUT_NORM,
2141
+ LLM_TENSOR_OUTPUT,
2142
+ LLM_TENSOR_ATTN_NORM,
2143
+ LLM_TENSOR_ATTN_Q,
2144
+ LLM_TENSOR_ATTN_K,
2145
+ LLM_TENSOR_ATTN_V,
2146
+ LLM_TENSOR_ATTN_OUT,
2147
+ LLM_TENSOR_ATTN_Q_NORM,
2148
+ LLM_TENSOR_ATTN_K_NORM,
2149
+ LLM_TENSOR_FFN_NORM,
2150
+ LLM_TENSOR_FFN_GATE_INP,
2151
+ LLM_TENSOR_FFN_GATE_EXPS,
2152
+ LLM_TENSOR_FFN_DOWN_EXPS,
2153
+ LLM_TENSOR_FFN_UP_EXPS,
2154
+ LLM_TENSOR_FFN_EXP_PROBS_B,
2155
+ };
2156
+ case LLM_ARCH_COGVLM:
2157
+ return {
2158
+ LLM_TENSOR_TOKEN_EMBD,
2159
+ LLM_TENSOR_OUTPUT_NORM,
2160
+ LLM_TENSOR_OUTPUT,
2161
+ LLM_TENSOR_ATTN_NORM,
2162
+ LLM_TENSOR_ATTN_QKV,
2163
+ LLM_TENSOR_ATTN_OUT,
2164
+ LLM_TENSOR_FFN_NORM,
2165
+ LLM_TENSOR_FFN_GATE,
2166
+ LLM_TENSOR_FFN_DOWN,
2167
+ LLM_TENSOR_FFN_UP,
2168
+ LLM_TENSOR_VISEXP_ATTN_QKV,
2169
+ LLM_TENSOR_VISEXP_ATTN_OUT,
2170
+ LLM_TENSOR_VISEXP_FFN_GATE,
2171
+ LLM_TENSOR_VISEXP_FFN_DOWN,
2172
+ LLM_TENSOR_VISEXP_FFN_UP,
2173
+ };
2174
+ case LLM_ARCH_GPTJ:
2175
+ case LLM_ARCH_UNKNOWN:
2176
+ return {
2177
+ LLM_TENSOR_TOKEN_EMBD,
2178
+ };
2179
+ default:
2180
+ GGML_ABORT("unknown architecture for tensor mapping");
2181
+ }
2182
+ }
2183
+
2551
2184
  // declare information about the model weight tensors:
2552
2185
  // - the layer in which the tensor is going to be used. this is needed in order to assign the correct buffer type for the weight
2553
2186
  // - the operator which is going to use the weight. this is needed to determine if the respective backend supports the operator
@@ -2569,6 +2202,7 @@ static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
2569
2202
  {LLM_TENSOR_DENSE_2_OUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, // Dense layer output
2570
2203
  {LLM_TENSOR_DENSE_3_OUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, // Dense layer output
2571
2204
  {LLM_TENSOR_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
2205
+ {LLM_TENSOR_OUTPUT_NORM_LFM2, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
2572
2206
  {LLM_TENSOR_DEC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
2573
2207
  {LLM_TENSOR_ENC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
2574
2208
  {LLM_TENSOR_ROPE_FREQS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
@@ -2757,13 +2391,20 @@ std::string LLM_KV::operator()(llm_kv kv) const {
2757
2391
  return name;
2758
2392
  }
2759
2393
 
2394
+ LLM_TN_IMPL::LLM_TN_IMPL(llm_arch arch, llm_tensor tensor, const char * suffix, int bid, int xid)
2395
+ : arch(arch), tensor(tensor), suffix(suffix), bid(bid), xid(xid),
2396
+ model_tensors(llm_get_tensor_names(arch)) {}
2397
+
2760
2398
  std::string LLM_TN_IMPL::str() const {
2761
- if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
2762
- return "__missing__";
2399
+ if (LLM_TENSOR_NAMES.find(tensor) == LLM_TENSOR_NAMES.end()) {
2400
+ GGML_ABORT("unknown tensor name for tensor id %d", static_cast<int>(tensor));
2763
2401
  }
2764
2402
 
2765
- std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid);
2403
+ if (model_tensors.find(tensor) == model_tensors.end()) {
2404
+ return LLM_TENSOR_NAMES.at(tensor);
2405
+ }
2766
2406
 
2407
+ std::string name = ::format(LLM_TENSOR_NAMES.at(tensor), bid, xid);
2767
2408
  if (suffix != nullptr) {
2768
2409
  name += ".";
2769
2410
  name += suffix;
@@ -2817,6 +2458,7 @@ bool llm_arch_is_hybrid(const llm_arch & arch) {
2817
2458
  case LLM_ARCH_LFM2:
2818
2459
  case LLM_ARCH_LFM2MOE:
2819
2460
  case LLM_ARCH_NEMOTRON_H:
2461
+ case LLM_ARCH_NEMOTRON_H_MOE:
2820
2462
  case LLM_ARCH_QWEN3NEXT:
2821
2463
  return true;
2822
2464
  default: