@mariozechner/pi-ai 0.20.2 → 0.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  // This file is auto-generated by scripts/generate-models.ts
2
2
  // Do not edit manually - run 'npm run generate-models' to update
3
3
  export const MODELS = {
4
- anthropic: {
4
+ "anthropic": {
5
5
  "claude-opus-4-0": {
6
6
  id: "claude-opus-4-0",
7
7
  name: "Claude Opus 4 (latest)",
@@ -360,7 +360,7 @@ export const MODELS = {
360
360
  maxTokens: 64000,
361
361
  },
362
362
  },
363
- google: {
363
+ "google": {
364
364
  "gemini-2.5-flash-preview-05-20": {
365
365
  id: "gemini-2.5-flash-preview-05-20",
366
366
  name: "Gemini 2.5 Flash Preview 05-20",
@@ -702,7 +702,7 @@ export const MODELS = {
702
702
  maxTokens: 8192,
703
703
  },
704
704
  },
705
- openai: {
705
+ "openai": {
706
706
  "gpt-4.1-nano": {
707
707
  id: "gpt-4.1-nano",
708
708
  name: "GPT-4.1 nano",
@@ -1265,7 +1265,7 @@ export const MODELS = {
1265
1265
  maxTokens: 16384,
1266
1266
  },
1267
1267
  },
1268
- groq: {
1268
+ "groq": {
1269
1269
  "llama-3.1-8b-instant": {
1270
1270
  id: "llama-3.1-8b-instant",
1271
1271
  name: "Llama 3.1 8B Instant",
@@ -1522,7 +1522,7 @@ export const MODELS = {
1522
1522
  maxTokens: 8192,
1523
1523
  },
1524
1524
  },
1525
- cerebras: {
1525
+ "cerebras": {
1526
1526
  "qwen-3-235b-a22b-instruct-2507": {
1527
1527
  id: "qwen-3-235b-a22b-instruct-2507",
1528
1528
  name: "Qwen 3 235B Instruct",
@@ -1575,7 +1575,7 @@ export const MODELS = {
1575
1575
  maxTokens: 32768,
1576
1576
  },
1577
1577
  },
1578
- xai: {
1578
+ "xai": {
1579
1579
  "grok-4-fast-non-reasoning": {
1580
1580
  id: "grok-4-fast-non-reasoning",
1581
1581
  name: "Grok 4 Fast (Non-Reasoning)",
@@ -1951,7 +1951,7 @@ export const MODELS = {
1951
1951
  maxTokens: 8192,
1952
1952
  },
1953
1953
  },
1954
- zai: {
1954
+ "zai": {
1955
1955
  "glm-4.5-flash": {
1956
1956
  id: "glm-4.5-flash",
1957
1957
  name: "GLM-4.5-Flash",
@@ -2055,7 +2055,7 @@ export const MODELS = {
2055
2055
  maxTokens: 32768,
2056
2056
  },
2057
2057
  },
2058
- mistral: {
2058
+ "mistral": {
2059
2059
  "devstral-medium-2507": {
2060
2060
  id: "devstral-medium-2507",
2061
2061
  name: "Devstral Medium",
@@ -2465,7 +2465,360 @@ export const MODELS = {
2465
2465
  maxTokens: 16384,
2466
2466
  },
2467
2467
  },
2468
- openrouter: {
2468
+ "github-copilot": {
2469
+ "grok-code-fast-1": {
2470
+ id: "grok-code-fast-1",
2471
+ name: "Grok Code Fast 1",
2472
+ api: "openai-completions",
2473
+ provider: "github-copilot",
2474
+ baseUrl: "https://api.individual.githubcopilot.com",
2475
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2476
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2477
+ reasoning: true,
2478
+ input: ["text"],
2479
+ cost: {
2480
+ input: 0,
2481
+ output: 0,
2482
+ cacheRead: 0,
2483
+ cacheWrite: 0,
2484
+ },
2485
+ contextWindow: 128000,
2486
+ maxTokens: 64000,
2487
+ },
2488
+ "gpt-5.1-codex": {
2489
+ id: "gpt-5.1-codex",
2490
+ name: "GPT-5.1-Codex",
2491
+ api: "openai-responses",
2492
+ provider: "github-copilot",
2493
+ baseUrl: "https://api.individual.githubcopilot.com",
2494
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2495
+ reasoning: true,
2496
+ input: ["text", "image"],
2497
+ cost: {
2498
+ input: 0,
2499
+ output: 0,
2500
+ cacheRead: 0,
2501
+ cacheWrite: 0,
2502
+ },
2503
+ contextWindow: 128000,
2504
+ maxTokens: 128000,
2505
+ },
2506
+ "claude-haiku-4.5": {
2507
+ id: "claude-haiku-4.5",
2508
+ name: "Claude Haiku 4.5",
2509
+ api: "openai-completions",
2510
+ provider: "github-copilot",
2511
+ baseUrl: "https://api.individual.githubcopilot.com",
2512
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2513
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2514
+ reasoning: true,
2515
+ input: ["text", "image"],
2516
+ cost: {
2517
+ input: 0,
2518
+ output: 0,
2519
+ cacheRead: 0,
2520
+ cacheWrite: 0,
2521
+ },
2522
+ contextWindow: 128000,
2523
+ maxTokens: 16000,
2524
+ },
2525
+ "gemini-3-pro-preview": {
2526
+ id: "gemini-3-pro-preview",
2527
+ name: "Gemini 3 Pro Preview",
2528
+ api: "openai-completions",
2529
+ provider: "github-copilot",
2530
+ baseUrl: "https://api.individual.githubcopilot.com",
2531
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2532
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2533
+ reasoning: true,
2534
+ input: ["text", "image"],
2535
+ cost: {
2536
+ input: 0,
2537
+ output: 0,
2538
+ cacheRead: 0,
2539
+ cacheWrite: 0,
2540
+ },
2541
+ contextWindow: 128000,
2542
+ maxTokens: 64000,
2543
+ },
2544
+ "oswe-vscode-prime": {
2545
+ id: "oswe-vscode-prime",
2546
+ name: "Raptor Mini (Preview)",
2547
+ api: "openai-completions",
2548
+ provider: "github-copilot",
2549
+ baseUrl: "https://api.individual.githubcopilot.com",
2550
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2551
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2552
+ reasoning: true,
2553
+ input: ["text", "image"],
2554
+ cost: {
2555
+ input: 0,
2556
+ output: 0,
2557
+ cacheRead: 0,
2558
+ cacheWrite: 0,
2559
+ },
2560
+ contextWindow: 200000,
2561
+ maxTokens: 64000,
2562
+ },
2563
+ "gpt-5.1-codex-mini": {
2564
+ id: "gpt-5.1-codex-mini",
2565
+ name: "GPT-5.1-Codex-mini",
2566
+ api: "openai-responses",
2567
+ provider: "github-copilot",
2568
+ baseUrl: "https://api.individual.githubcopilot.com",
2569
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2570
+ reasoning: true,
2571
+ input: ["text", "image"],
2572
+ cost: {
2573
+ input: 0,
2574
+ output: 0,
2575
+ cacheRead: 0,
2576
+ cacheWrite: 0,
2577
+ },
2578
+ contextWindow: 128000,
2579
+ maxTokens: 100000,
2580
+ },
2581
+ "gpt-5.1": {
2582
+ id: "gpt-5.1",
2583
+ name: "GPT-5.1",
2584
+ api: "openai-responses",
2585
+ provider: "github-copilot",
2586
+ baseUrl: "https://api.individual.githubcopilot.com",
2587
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2588
+ reasoning: true,
2589
+ input: ["text", "image"],
2590
+ cost: {
2591
+ input: 0,
2592
+ output: 0,
2593
+ cacheRead: 0,
2594
+ cacheWrite: 0,
2595
+ },
2596
+ contextWindow: 128000,
2597
+ maxTokens: 128000,
2598
+ },
2599
+ "gpt-5-codex": {
2600
+ id: "gpt-5-codex",
2601
+ name: "GPT-5-Codex",
2602
+ api: "openai-responses",
2603
+ provider: "github-copilot",
2604
+ baseUrl: "https://api.individual.githubcopilot.com",
2605
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2606
+ reasoning: true,
2607
+ input: ["text", "image"],
2608
+ cost: {
2609
+ input: 0,
2610
+ output: 0,
2611
+ cacheRead: 0,
2612
+ cacheWrite: 0,
2613
+ },
2614
+ contextWindow: 128000,
2615
+ maxTokens: 128000,
2616
+ },
2617
+ "gpt-4o": {
2618
+ id: "gpt-4o",
2619
+ name: "GPT-4o",
2620
+ api: "openai-completions",
2621
+ provider: "github-copilot",
2622
+ baseUrl: "https://api.individual.githubcopilot.com",
2623
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2624
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2625
+ reasoning: false,
2626
+ input: ["text", "image"],
2627
+ cost: {
2628
+ input: 0,
2629
+ output: 0,
2630
+ cacheRead: 0,
2631
+ cacheWrite: 0,
2632
+ },
2633
+ contextWindow: 64000,
2634
+ maxTokens: 16384,
2635
+ },
2636
+ "gpt-4.1": {
2637
+ id: "gpt-4.1",
2638
+ name: "GPT-4.1",
2639
+ api: "openai-completions",
2640
+ provider: "github-copilot",
2641
+ baseUrl: "https://api.individual.githubcopilot.com",
2642
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2643
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2644
+ reasoning: false,
2645
+ input: ["text", "image"],
2646
+ cost: {
2647
+ input: 0,
2648
+ output: 0,
2649
+ cacheRead: 0,
2650
+ cacheWrite: 0,
2651
+ },
2652
+ contextWindow: 128000,
2653
+ maxTokens: 16384,
2654
+ },
2655
+ "gpt-5-mini": {
2656
+ id: "gpt-5-mini",
2657
+ name: "GPT-5-mini",
2658
+ api: "openai-responses",
2659
+ provider: "github-copilot",
2660
+ baseUrl: "https://api.individual.githubcopilot.com",
2661
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2662
+ reasoning: true,
2663
+ input: ["text", "image"],
2664
+ cost: {
2665
+ input: 0,
2666
+ output: 0,
2667
+ cacheRead: 0,
2668
+ cacheWrite: 0,
2669
+ },
2670
+ contextWindow: 128000,
2671
+ maxTokens: 64000,
2672
+ },
2673
+ "gemini-2.5-pro": {
2674
+ id: "gemini-2.5-pro",
2675
+ name: "Gemini 2.5 Pro",
2676
+ api: "openai-completions",
2677
+ provider: "github-copilot",
2678
+ baseUrl: "https://api.individual.githubcopilot.com",
2679
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2680
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2681
+ reasoning: false,
2682
+ input: ["text", "image"],
2683
+ cost: {
2684
+ input: 0,
2685
+ output: 0,
2686
+ cacheRead: 0,
2687
+ cacheWrite: 0,
2688
+ },
2689
+ contextWindow: 128000,
2690
+ maxTokens: 64000,
2691
+ },
2692
+ "gpt-5.1-codex-max": {
2693
+ id: "gpt-5.1-codex-max",
2694
+ name: "GPT-5.1-Codex-max",
2695
+ api: "openai-responses",
2696
+ provider: "github-copilot",
2697
+ baseUrl: "https://api.individual.githubcopilot.com",
2698
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2699
+ reasoning: true,
2700
+ input: ["text", "image"],
2701
+ cost: {
2702
+ input: 0,
2703
+ output: 0,
2704
+ cacheRead: 0,
2705
+ cacheWrite: 0,
2706
+ },
2707
+ contextWindow: 128000,
2708
+ maxTokens: 128000,
2709
+ },
2710
+ "claude-sonnet-4": {
2711
+ id: "claude-sonnet-4",
2712
+ name: "Claude Sonnet 4",
2713
+ api: "openai-completions",
2714
+ provider: "github-copilot",
2715
+ baseUrl: "https://api.individual.githubcopilot.com",
2716
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2717
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2718
+ reasoning: true,
2719
+ input: ["text", "image"],
2720
+ cost: {
2721
+ input: 0,
2722
+ output: 0,
2723
+ cacheRead: 0,
2724
+ cacheWrite: 0,
2725
+ },
2726
+ contextWindow: 128000,
2727
+ maxTokens: 16000,
2728
+ },
2729
+ "gpt-5": {
2730
+ id: "gpt-5",
2731
+ name: "GPT-5",
2732
+ api: "openai-responses",
2733
+ provider: "github-copilot",
2734
+ baseUrl: "https://api.individual.githubcopilot.com",
2735
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2736
+ reasoning: true,
2737
+ input: ["text", "image"],
2738
+ cost: {
2739
+ input: 0,
2740
+ output: 0,
2741
+ cacheRead: 0,
2742
+ cacheWrite: 0,
2743
+ },
2744
+ contextWindow: 128000,
2745
+ maxTokens: 128000,
2746
+ },
2747
+ "claude-opus-4.5": {
2748
+ id: "claude-opus-4.5",
2749
+ name: "Claude Opus 4.5",
2750
+ api: "openai-completions",
2751
+ provider: "github-copilot",
2752
+ baseUrl: "https://api.individual.githubcopilot.com",
2753
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2754
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2755
+ reasoning: true,
2756
+ input: ["text", "image"],
2757
+ cost: {
2758
+ input: 0,
2759
+ output: 0,
2760
+ cacheRead: 0,
2761
+ cacheWrite: 0,
2762
+ },
2763
+ contextWindow: 128000,
2764
+ maxTokens: 16000,
2765
+ },
2766
+ "gpt-5.2": {
2767
+ id: "gpt-5.2",
2768
+ name: "GPT-5.2",
2769
+ api: "openai-responses",
2770
+ provider: "github-copilot",
2771
+ baseUrl: "https://api.individual.githubcopilot.com",
2772
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2773
+ reasoning: true,
2774
+ input: ["text", "image"],
2775
+ cost: {
2776
+ input: 0,
2777
+ output: 0,
2778
+ cacheRead: 0,
2779
+ cacheWrite: 0,
2780
+ },
2781
+ contextWindow: 128000,
2782
+ maxTokens: 64000,
2783
+ },
2784
+ "claude-sonnet-4.5": {
2785
+ id: "claude-sonnet-4.5",
2786
+ name: "Claude Sonnet 4.5",
2787
+ api: "openai-completions",
2788
+ provider: "github-copilot",
2789
+ baseUrl: "https://api.individual.githubcopilot.com",
2790
+ headers: { "User-Agent": "GitHubCopilotChat/0.35.0", "Editor-Version": "vscode/1.107.0", "Editor-Plugin-Version": "copilot-chat/0.35.0", "Copilot-Integration-Id": "vscode-chat" },
2791
+ compat: { "supportsStore": false, "supportsDeveloperRole": false, "supportsReasoningEffort": false },
2792
+ reasoning: true,
2793
+ input: ["text", "image"],
2794
+ cost: {
2795
+ input: 0,
2796
+ output: 0,
2797
+ cacheRead: 0,
2798
+ cacheWrite: 0,
2799
+ },
2800
+ contextWindow: 128000,
2801
+ maxTokens: 16000,
2802
+ },
2803
+ },
2804
+ "openrouter": {
2805
+ "nvidia/nemotron-3-nano-30b-a3b:free": {
2806
+ id: "nvidia/nemotron-3-nano-30b-a3b:free",
2807
+ name: "NVIDIA: Nemotron 3 Nano 30B A3B (free)",
2808
+ api: "openai-completions",
2809
+ provider: "openrouter",
2810
+ baseUrl: "https://openrouter.ai/api/v1",
2811
+ reasoning: true,
2812
+ input: ["text"],
2813
+ cost: {
2814
+ input: 0,
2815
+ output: 0,
2816
+ cacheRead: 0,
2817
+ cacheWrite: 0,
2818
+ },
2819
+ contextWindow: 256000,
2820
+ maxTokens: 4096,
2821
+ },
2469
2822
  "openai/gpt-5.2-chat": {
2470
2823
  id: "openai/gpt-5.2-chat",
2471
2824
  name: "OpenAI: GPT-5.2 Chat",
@@ -2764,7 +3117,7 @@ export const MODELS = {
2764
3117
  reasoning: true,
2765
3118
  input: ["text"],
2766
3119
  cost: {
2767
- input: 0.25,
3120
+ input: 0.24,
2768
3121
  output: 0.38,
2769
3122
  cacheRead: 0.19,
2770
3123
  cacheWrite: 0,
@@ -3070,13 +3423,13 @@ export const MODELS = {
3070
3423
  reasoning: true,
3071
3424
  input: ["text"],
3072
3425
  cost: {
3073
- input: 0.254,
3074
- output: 1.02,
3075
- cacheRead: 0.127,
3426
+ input: 0.19999999999999998,
3427
+ output: 1,
3428
+ cacheRead: 0,
3076
3429
  cacheWrite: 0,
3077
3430
  },
3078
- contextWindow: 262144,
3079
- maxTokens: 4096,
3431
+ contextWindow: 196608,
3432
+ maxTokens: 131072,
3080
3433
  },
3081
3434
  "deepcogito/cogito-v2-preview-llama-405b": {
3082
3435
  id: "deepcogito/cogito-v2-preview-llama-405b",
@@ -3291,9 +3644,9 @@ export const MODELS = {
3291
3644
  reasoning: true,
3292
3645
  input: ["text"],
3293
3646
  cost: {
3294
- input: 0.39999999999999997,
3295
- output: 1.75,
3296
- cacheRead: 0,
3647
+ input: 0.38,
3648
+ output: 1.69,
3649
+ cacheRead: 0.06,
3297
3650
  cacheWrite: 0,
3298
3651
  },
3299
3652
  contextWindow: 202752,
@@ -3344,11 +3697,11 @@ export const MODELS = {
3344
3697
  cost: {
3345
3698
  input: 0.21,
3346
3699
  output: 0.32,
3347
- cacheRead: 0.16799999999999998,
3700
+ cacheRead: 0,
3348
3701
  cacheWrite: 0,
3349
3702
  },
3350
3703
  contextWindow: 163840,
3351
- maxTokens: 4096,
3704
+ maxTokens: 65536,
3352
3705
  },
3353
3706
  "google/gemini-2.5-flash-preview-09-2025": {
3354
3707
  id: "google/gemini-2.5-flash-preview-09-2025",
@@ -5067,6 +5420,23 @@ export const MODELS = {
5067
5420
  contextWindow: 131072,
5068
5421
  maxTokens: 131072,
5069
5422
  },
5423
+ "google/gemma-3-27b-it:free": {
5424
+ id: "google/gemma-3-27b-it:free",
5425
+ name: "Google: Gemma 3 27B (free)",
5426
+ api: "openai-completions",
5427
+ provider: "openrouter",
5428
+ baseUrl: "https://openrouter.ai/api/v1",
5429
+ reasoning: false,
5430
+ input: ["text", "image"],
5431
+ cost: {
5432
+ input: 0,
5433
+ output: 0,
5434
+ cacheRead: 0,
5435
+ cacheWrite: 0,
5436
+ },
5437
+ contextWindow: 131072,
5438
+ maxTokens: 4096,
5439
+ },
5070
5440
  "google/gemma-3-27b-it": {
5071
5441
  id: "google/gemma-3-27b-it",
5072
5442
  name: "Google: Gemma 3 27B",
@@ -5560,9 +5930,9 @@ export const MODELS = {
5560
5930
  contextWindow: 32768,
5561
5931
  maxTokens: 4096,
5562
5932
  },
5563
- "anthropic/claude-3.5-haiku-20241022": {
5564
- id: "anthropic/claude-3.5-haiku-20241022",
5565
- name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
5933
+ "anthropic/claude-3.5-haiku": {
5934
+ id: "anthropic/claude-3.5-haiku",
5935
+ name: "Anthropic: Claude 3.5 Haiku",
5566
5936
  api: "openai-completions",
5567
5937
  provider: "openrouter",
5568
5938
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5577,9 +5947,9 @@ export const MODELS = {
5577
5947
  contextWindow: 200000,
5578
5948
  maxTokens: 8192,
5579
5949
  },
5580
- "anthropic/claude-3.5-haiku": {
5581
- id: "anthropic/claude-3.5-haiku",
5582
- name: "Anthropic: Claude 3.5 Haiku",
5950
+ "anthropic/claude-3.5-haiku-20241022": {
5951
+ id: "anthropic/claude-3.5-haiku-20241022",
5952
+ name: "Anthropic: Claude 3.5 Haiku (2024-10-22)",
5583
5953
  api: "openai-completions",
5584
5954
  provider: "openrouter",
5585
5955
  baseUrl: "https://openrouter.ai/api/v1",
@@ -5611,34 +5981,34 @@ export const MODELS = {
5611
5981
  contextWindow: 200000,
5612
5982
  maxTokens: 8192,
5613
5983
  },
5614
- "mistralai/ministral-8b": {
5615
- id: "mistralai/ministral-8b",
5616
- name: "Mistral: Ministral 8B",
5984
+ "mistralai/ministral-3b": {
5985
+ id: "mistralai/ministral-3b",
5986
+ name: "Mistral: Ministral 3B",
5617
5987
  api: "openai-completions",
5618
5988
  provider: "openrouter",
5619
5989
  baseUrl: "https://openrouter.ai/api/v1",
5620
5990
  reasoning: false,
5621
5991
  input: ["text"],
5622
5992
  cost: {
5623
- input: 0.09999999999999999,
5624
- output: 0.09999999999999999,
5993
+ input: 0.04,
5994
+ output: 0.04,
5625
5995
  cacheRead: 0,
5626
5996
  cacheWrite: 0,
5627
5997
  },
5628
5998
  contextWindow: 131072,
5629
5999
  maxTokens: 4096,
5630
6000
  },
5631
- "mistralai/ministral-3b": {
5632
- id: "mistralai/ministral-3b",
5633
- name: "Mistral: Ministral 3B",
6001
+ "mistralai/ministral-8b": {
6002
+ id: "mistralai/ministral-8b",
6003
+ name: "Mistral: Ministral 8B",
5634
6004
  api: "openai-completions",
5635
6005
  provider: "openrouter",
5636
6006
  baseUrl: "https://openrouter.ai/api/v1",
5637
6007
  reasoning: false,
5638
6008
  input: ["text"],
5639
6009
  cost: {
5640
- input: 0.04,
5641
- output: 0.04,
6010
+ input: 0.09999999999999999,
6011
+ output: 0.09999999999999999,
5642
6012
  cacheRead: 0,
5643
6013
  cacheWrite: 0,
5644
6014
  },
@@ -5832,38 +6202,38 @@ export const MODELS = {
5832
6202
  contextWindow: 131072,
5833
6203
  maxTokens: 16384,
5834
6204
  },
5835
- "meta-llama/llama-3.1-405b-instruct": {
5836
- id: "meta-llama/llama-3.1-405b-instruct",
5837
- name: "Meta: Llama 3.1 405B Instruct",
6205
+ "meta-llama/llama-3.1-70b-instruct": {
6206
+ id: "meta-llama/llama-3.1-70b-instruct",
6207
+ name: "Meta: Llama 3.1 70B Instruct",
5838
6208
  api: "openai-completions",
5839
6209
  provider: "openrouter",
5840
6210
  baseUrl: "https://openrouter.ai/api/v1",
5841
6211
  reasoning: false,
5842
6212
  input: ["text"],
5843
6213
  cost: {
5844
- input: 3.5,
5845
- output: 3.5,
6214
+ input: 0.39999999999999997,
6215
+ output: 0.39999999999999997,
5846
6216
  cacheRead: 0,
5847
6217
  cacheWrite: 0,
5848
6218
  },
5849
- contextWindow: 130815,
6219
+ contextWindow: 131072,
5850
6220
  maxTokens: 4096,
5851
6221
  },
5852
- "meta-llama/llama-3.1-70b-instruct": {
5853
- id: "meta-llama/llama-3.1-70b-instruct",
5854
- name: "Meta: Llama 3.1 70B Instruct",
6222
+ "meta-llama/llama-3.1-405b-instruct": {
6223
+ id: "meta-llama/llama-3.1-405b-instruct",
6224
+ name: "Meta: Llama 3.1 405B Instruct",
5855
6225
  api: "openai-completions",
5856
6226
  provider: "openrouter",
5857
6227
  baseUrl: "https://openrouter.ai/api/v1",
5858
6228
  reasoning: false,
5859
6229
  input: ["text"],
5860
6230
  cost: {
5861
- input: 0.39999999999999997,
5862
- output: 0.39999999999999997,
6231
+ input: 3.5,
6232
+ output: 3.5,
5863
6233
  cacheRead: 0,
5864
6234
  cacheWrite: 0,
5865
6235
  },
5866
- contextWindow: 131072,
6236
+ contextWindow: 130815,
5867
6237
  maxTokens: 4096,
5868
6238
  },
5869
6239
  "mistralai/mistral-nemo": {
@@ -6002,23 +6372,6 @@ export const MODELS = {
6002
6372
  contextWindow: 128000,
6003
6373
  maxTokens: 4096,
6004
6374
  },
6005
- "openai/gpt-4o-2024-05-13": {
6006
- id: "openai/gpt-4o-2024-05-13",
6007
- name: "OpenAI: GPT-4o (2024-05-13)",
6008
- api: "openai-completions",
6009
- provider: "openrouter",
6010
- baseUrl: "https://openrouter.ai/api/v1",
6011
- reasoning: false,
6012
- input: ["text", "image"],
6013
- cost: {
6014
- input: 5,
6015
- output: 15,
6016
- cacheRead: 0,
6017
- cacheWrite: 0,
6018
- },
6019
- contextWindow: 128000,
6020
- maxTokens: 4096,
6021
- },
6022
6375
  "openai/gpt-4o": {
6023
6376
  id: "openai/gpt-4o",
6024
6377
  name: "OpenAI: GPT-4o",
@@ -6053,22 +6406,22 @@ export const MODELS = {
6053
6406
  contextWindow: 128000,
6054
6407
  maxTokens: 64000,
6055
6408
  },
6056
- "meta-llama/llama-3-70b-instruct": {
6057
- id: "meta-llama/llama-3-70b-instruct",
6058
- name: "Meta: Llama 3 70B Instruct",
6409
+ "openai/gpt-4o-2024-05-13": {
6410
+ id: "openai/gpt-4o-2024-05-13",
6411
+ name: "OpenAI: GPT-4o (2024-05-13)",
6059
6412
  api: "openai-completions",
6060
6413
  provider: "openrouter",
6061
6414
  baseUrl: "https://openrouter.ai/api/v1",
6062
6415
  reasoning: false,
6063
- input: ["text"],
6416
+ input: ["text", "image"],
6064
6417
  cost: {
6065
- input: 0.3,
6066
- output: 0.39999999999999997,
6418
+ input: 5,
6419
+ output: 15,
6067
6420
  cacheRead: 0,
6068
6421
  cacheWrite: 0,
6069
6422
  },
6070
- contextWindow: 8192,
6071
- maxTokens: 16384,
6423
+ contextWindow: 128000,
6424
+ maxTokens: 4096,
6072
6425
  },
6073
6426
  "meta-llama/llama-3-8b-instruct": {
6074
6427
  id: "meta-llama/llama-3-8b-instruct",
@@ -6087,6 +6440,23 @@ export const MODELS = {
6087
6440
  contextWindow: 8192,
6088
6441
  maxTokens: 16384,
6089
6442
  },
6443
+ "meta-llama/llama-3-70b-instruct": {
6444
+ id: "meta-llama/llama-3-70b-instruct",
6445
+ name: "Meta: Llama 3 70B Instruct",
6446
+ api: "openai-completions",
6447
+ provider: "openrouter",
6448
+ baseUrl: "https://openrouter.ai/api/v1",
6449
+ reasoning: false,
6450
+ input: ["text"],
6451
+ cost: {
6452
+ input: 0.3,
6453
+ output: 0.39999999999999997,
6454
+ cacheRead: 0,
6455
+ cacheWrite: 0,
6456
+ },
6457
+ contextWindow: 8192,
6458
+ maxTokens: 16384,
6459
+ },
6090
6460
  "mistralai/mixtral-8x22b-instruct": {
6091
6461
  id: "mistralai/mixtral-8x22b-instruct",
6092
6462
  name: "Mistral: Mixtral 8x22B Instruct",
@@ -6291,38 +6661,38 @@ export const MODELS = {
6291
6661
  contextWindow: 8191,
6292
6662
  maxTokens: 4096,
6293
6663
  },
6294
- "openai/gpt-4": {
6295
- id: "openai/gpt-4",
6296
- name: "OpenAI: GPT-4",
6664
+ "openai/gpt-3.5-turbo": {
6665
+ id: "openai/gpt-3.5-turbo",
6666
+ name: "OpenAI: GPT-3.5 Turbo",
6297
6667
  api: "openai-completions",
6298
6668
  provider: "openrouter",
6299
6669
  baseUrl: "https://openrouter.ai/api/v1",
6300
6670
  reasoning: false,
6301
6671
  input: ["text"],
6302
6672
  cost: {
6303
- input: 30,
6304
- output: 60,
6673
+ input: 0.5,
6674
+ output: 1.5,
6305
6675
  cacheRead: 0,
6306
6676
  cacheWrite: 0,
6307
6677
  },
6308
- contextWindow: 8191,
6678
+ contextWindow: 16385,
6309
6679
  maxTokens: 4096,
6310
6680
  },
6311
- "openai/gpt-3.5-turbo": {
6312
- id: "openai/gpt-3.5-turbo",
6313
- name: "OpenAI: GPT-3.5 Turbo",
6681
+ "openai/gpt-4": {
6682
+ id: "openai/gpt-4",
6683
+ name: "OpenAI: GPT-4",
6314
6684
  api: "openai-completions",
6315
6685
  provider: "openrouter",
6316
6686
  baseUrl: "https://openrouter.ai/api/v1",
6317
6687
  reasoning: false,
6318
6688
  input: ["text"],
6319
6689
  cost: {
6320
- input: 0.5,
6321
- output: 1.5,
6690
+ input: 30,
6691
+ output: 60,
6322
6692
  cacheRead: 0,
6323
6693
  cacheWrite: 0,
6324
6694
  },
6325
- contextWindow: 16385,
6695
+ contextWindow: 8191,
6326
6696
  maxTokens: 4096,
6327
6697
  },
6328
6698
  "openrouter/auto": {