@mariozechner/pi-ai 0.21.0 → 0.22.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,6 +14,7 @@ Unified LLM API with automatic model discovery, provider configuration, token an
14
14
  - **Cerebras**
15
15
  - **xAI**
16
16
  - **OpenRouter**
17
+ - **GitHub Copilot** (requires OAuth, see below)
17
18
  - **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
18
19
 
19
20
  ## Installation
@@ -1073,6 +1074,30 @@ setApiKey('anthropic', 'sk-ant-...');
1073
1074
  const key = getApiKey('openai');
1074
1075
  ```
1075
1076
 
1077
+ ## GitHub Copilot
1078
+
1079
+ GitHub Copilot is available as a provider, requiring OAuth authentication via GitHub's device flow.
1080
+
1081
+ **Using with `@mariozechner/pi-coding-agent`**: Use `/login` and select "GitHub Copilot" to authenticate. All models are automatically enabled after login. Token stored in `~/.pi/agent/oauth.json`.
1082
+
1083
+ **Using standalone**: If you have a valid Copilot OAuth token (e.g., from the coding agent's `oauth.json`):
1084
+
1085
+ ```typescript
1086
+ import { getModel, complete } from '@mariozechner/pi-ai';
1087
+
1088
+ const model = getModel('github-copilot', 'gpt-4o');
1089
+
1090
+ const response = await complete(model, {
1091
+ messages: [{ role: 'user', content: 'Hello!' }]
1092
+ }, {
1093
+ apiKey: 'tid=...;exp=...;proxy-ep=...' // OAuth token from ~/.pi/agent/oauth.json
1094
+ });
1095
+ ```
1096
+
1097
+ **Note**: OAuth tokens expire and need periodic refresh. The coding agent handles this automatically.
1098
+
1099
+ If you get "The requested model is not supported" error, enable the model manually in VS Code: open Copilot Chat, click the model selector, select the model (warning icon), and click "Enable".
1100
+
1076
1101
  ## License
1077
1102
 
1078
1103
  MIT
@@ -2463,7 +2463,490 @@ export declare const MODELS: {
2463
2463
  maxTokens: number;
2464
2464
  };
2465
2465
  };
2466
+ readonly "github-copilot": {
2467
+ readonly "grok-code-fast-1": {
2468
+ id: string;
2469
+ name: string;
2470
+ api: "openai-completions";
2471
+ provider: string;
2472
+ baseUrl: string;
2473
+ headers: {
2474
+ "User-Agent": string;
2475
+ "Editor-Version": string;
2476
+ "Editor-Plugin-Version": string;
2477
+ "Copilot-Integration-Id": string;
2478
+ };
2479
+ compat: {
2480
+ supportsStore: false;
2481
+ supportsDeveloperRole: false;
2482
+ supportsReasoningEffort: false;
2483
+ };
2484
+ reasoning: true;
2485
+ input: "text"[];
2486
+ cost: {
2487
+ input: number;
2488
+ output: number;
2489
+ cacheRead: number;
2490
+ cacheWrite: number;
2491
+ };
2492
+ contextWindow: number;
2493
+ maxTokens: number;
2494
+ };
2495
+ readonly "gpt-5.1-codex": {
2496
+ id: string;
2497
+ name: string;
2498
+ api: "openai-responses";
2499
+ provider: string;
2500
+ baseUrl: string;
2501
+ headers: {
2502
+ "User-Agent": string;
2503
+ "Editor-Version": string;
2504
+ "Editor-Plugin-Version": string;
2505
+ "Copilot-Integration-Id": string;
2506
+ };
2507
+ reasoning: true;
2508
+ input: ("image" | "text")[];
2509
+ cost: {
2510
+ input: number;
2511
+ output: number;
2512
+ cacheRead: number;
2513
+ cacheWrite: number;
2514
+ };
2515
+ contextWindow: number;
2516
+ maxTokens: number;
2517
+ };
2518
+ readonly "claude-haiku-4.5": {
2519
+ id: string;
2520
+ name: string;
2521
+ api: "openai-completions";
2522
+ provider: string;
2523
+ baseUrl: string;
2524
+ headers: {
2525
+ "User-Agent": string;
2526
+ "Editor-Version": string;
2527
+ "Editor-Plugin-Version": string;
2528
+ "Copilot-Integration-Id": string;
2529
+ };
2530
+ compat: {
2531
+ supportsStore: false;
2532
+ supportsDeveloperRole: false;
2533
+ supportsReasoningEffort: false;
2534
+ };
2535
+ reasoning: true;
2536
+ input: ("image" | "text")[];
2537
+ cost: {
2538
+ input: number;
2539
+ output: number;
2540
+ cacheRead: number;
2541
+ cacheWrite: number;
2542
+ };
2543
+ contextWindow: number;
2544
+ maxTokens: number;
2545
+ };
2546
+ readonly "gemini-3-pro-preview": {
2547
+ id: string;
2548
+ name: string;
2549
+ api: "openai-completions";
2550
+ provider: string;
2551
+ baseUrl: string;
2552
+ headers: {
2553
+ "User-Agent": string;
2554
+ "Editor-Version": string;
2555
+ "Editor-Plugin-Version": string;
2556
+ "Copilot-Integration-Id": string;
2557
+ };
2558
+ compat: {
2559
+ supportsStore: false;
2560
+ supportsDeveloperRole: false;
2561
+ supportsReasoningEffort: false;
2562
+ };
2563
+ reasoning: true;
2564
+ input: ("image" | "text")[];
2565
+ cost: {
2566
+ input: number;
2567
+ output: number;
2568
+ cacheRead: number;
2569
+ cacheWrite: number;
2570
+ };
2571
+ contextWindow: number;
2572
+ maxTokens: number;
2573
+ };
2574
+ readonly "oswe-vscode-prime": {
2575
+ id: string;
2576
+ name: string;
2577
+ api: "openai-completions";
2578
+ provider: string;
2579
+ baseUrl: string;
2580
+ headers: {
2581
+ "User-Agent": string;
2582
+ "Editor-Version": string;
2583
+ "Editor-Plugin-Version": string;
2584
+ "Copilot-Integration-Id": string;
2585
+ };
2586
+ compat: {
2587
+ supportsStore: false;
2588
+ supportsDeveloperRole: false;
2589
+ supportsReasoningEffort: false;
2590
+ };
2591
+ reasoning: true;
2592
+ input: ("image" | "text")[];
2593
+ cost: {
2594
+ input: number;
2595
+ output: number;
2596
+ cacheRead: number;
2597
+ cacheWrite: number;
2598
+ };
2599
+ contextWindow: number;
2600
+ maxTokens: number;
2601
+ };
2602
+ readonly "gpt-5.1-codex-mini": {
2603
+ id: string;
2604
+ name: string;
2605
+ api: "openai-responses";
2606
+ provider: string;
2607
+ baseUrl: string;
2608
+ headers: {
2609
+ "User-Agent": string;
2610
+ "Editor-Version": string;
2611
+ "Editor-Plugin-Version": string;
2612
+ "Copilot-Integration-Id": string;
2613
+ };
2614
+ reasoning: true;
2615
+ input: ("image" | "text")[];
2616
+ cost: {
2617
+ input: number;
2618
+ output: number;
2619
+ cacheRead: number;
2620
+ cacheWrite: number;
2621
+ };
2622
+ contextWindow: number;
2623
+ maxTokens: number;
2624
+ };
2625
+ readonly "gpt-5.1": {
2626
+ id: string;
2627
+ name: string;
2628
+ api: "openai-responses";
2629
+ provider: string;
2630
+ baseUrl: string;
2631
+ headers: {
2632
+ "User-Agent": string;
2633
+ "Editor-Version": string;
2634
+ "Editor-Plugin-Version": string;
2635
+ "Copilot-Integration-Id": string;
2636
+ };
2637
+ reasoning: true;
2638
+ input: ("image" | "text")[];
2639
+ cost: {
2640
+ input: number;
2641
+ output: number;
2642
+ cacheRead: number;
2643
+ cacheWrite: number;
2644
+ };
2645
+ contextWindow: number;
2646
+ maxTokens: number;
2647
+ };
2648
+ readonly "gpt-5-codex": {
2649
+ id: string;
2650
+ name: string;
2651
+ api: "openai-responses";
2652
+ provider: string;
2653
+ baseUrl: string;
2654
+ headers: {
2655
+ "User-Agent": string;
2656
+ "Editor-Version": string;
2657
+ "Editor-Plugin-Version": string;
2658
+ "Copilot-Integration-Id": string;
2659
+ };
2660
+ reasoning: true;
2661
+ input: ("image" | "text")[];
2662
+ cost: {
2663
+ input: number;
2664
+ output: number;
2665
+ cacheRead: number;
2666
+ cacheWrite: number;
2667
+ };
2668
+ contextWindow: number;
2669
+ maxTokens: number;
2670
+ };
2671
+ readonly "gpt-4o": {
2672
+ id: string;
2673
+ name: string;
2674
+ api: "openai-completions";
2675
+ provider: string;
2676
+ baseUrl: string;
2677
+ headers: {
2678
+ "User-Agent": string;
2679
+ "Editor-Version": string;
2680
+ "Editor-Plugin-Version": string;
2681
+ "Copilot-Integration-Id": string;
2682
+ };
2683
+ compat: {
2684
+ supportsStore: false;
2685
+ supportsDeveloperRole: false;
2686
+ supportsReasoningEffort: false;
2687
+ };
2688
+ reasoning: false;
2689
+ input: ("image" | "text")[];
2690
+ cost: {
2691
+ input: number;
2692
+ output: number;
2693
+ cacheRead: number;
2694
+ cacheWrite: number;
2695
+ };
2696
+ contextWindow: number;
2697
+ maxTokens: number;
2698
+ };
2699
+ readonly "gpt-4.1": {
2700
+ id: string;
2701
+ name: string;
2702
+ api: "openai-completions";
2703
+ provider: string;
2704
+ baseUrl: string;
2705
+ headers: {
2706
+ "User-Agent": string;
2707
+ "Editor-Version": string;
2708
+ "Editor-Plugin-Version": string;
2709
+ "Copilot-Integration-Id": string;
2710
+ };
2711
+ compat: {
2712
+ supportsStore: false;
2713
+ supportsDeveloperRole: false;
2714
+ supportsReasoningEffort: false;
2715
+ };
2716
+ reasoning: false;
2717
+ input: ("image" | "text")[];
2718
+ cost: {
2719
+ input: number;
2720
+ output: number;
2721
+ cacheRead: number;
2722
+ cacheWrite: number;
2723
+ };
2724
+ contextWindow: number;
2725
+ maxTokens: number;
2726
+ };
2727
+ readonly "gpt-5-mini": {
2728
+ id: string;
2729
+ name: string;
2730
+ api: "openai-responses";
2731
+ provider: string;
2732
+ baseUrl: string;
2733
+ headers: {
2734
+ "User-Agent": string;
2735
+ "Editor-Version": string;
2736
+ "Editor-Plugin-Version": string;
2737
+ "Copilot-Integration-Id": string;
2738
+ };
2739
+ reasoning: true;
2740
+ input: ("image" | "text")[];
2741
+ cost: {
2742
+ input: number;
2743
+ output: number;
2744
+ cacheRead: number;
2745
+ cacheWrite: number;
2746
+ };
2747
+ contextWindow: number;
2748
+ maxTokens: number;
2749
+ };
2750
+ readonly "gemini-2.5-pro": {
2751
+ id: string;
2752
+ name: string;
2753
+ api: "openai-completions";
2754
+ provider: string;
2755
+ baseUrl: string;
2756
+ headers: {
2757
+ "User-Agent": string;
2758
+ "Editor-Version": string;
2759
+ "Editor-Plugin-Version": string;
2760
+ "Copilot-Integration-Id": string;
2761
+ };
2762
+ compat: {
2763
+ supportsStore: false;
2764
+ supportsDeveloperRole: false;
2765
+ supportsReasoningEffort: false;
2766
+ };
2767
+ reasoning: false;
2768
+ input: ("image" | "text")[];
2769
+ cost: {
2770
+ input: number;
2771
+ output: number;
2772
+ cacheRead: number;
2773
+ cacheWrite: number;
2774
+ };
2775
+ contextWindow: number;
2776
+ maxTokens: number;
2777
+ };
2778
+ readonly "gpt-5.1-codex-max": {
2779
+ id: string;
2780
+ name: string;
2781
+ api: "openai-responses";
2782
+ provider: string;
2783
+ baseUrl: string;
2784
+ headers: {
2785
+ "User-Agent": string;
2786
+ "Editor-Version": string;
2787
+ "Editor-Plugin-Version": string;
2788
+ "Copilot-Integration-Id": string;
2789
+ };
2790
+ reasoning: true;
2791
+ input: ("image" | "text")[];
2792
+ cost: {
2793
+ input: number;
2794
+ output: number;
2795
+ cacheRead: number;
2796
+ cacheWrite: number;
2797
+ };
2798
+ contextWindow: number;
2799
+ maxTokens: number;
2800
+ };
2801
+ readonly "claude-sonnet-4": {
2802
+ id: string;
2803
+ name: string;
2804
+ api: "openai-completions";
2805
+ provider: string;
2806
+ baseUrl: string;
2807
+ headers: {
2808
+ "User-Agent": string;
2809
+ "Editor-Version": string;
2810
+ "Editor-Plugin-Version": string;
2811
+ "Copilot-Integration-Id": string;
2812
+ };
2813
+ compat: {
2814
+ supportsStore: false;
2815
+ supportsDeveloperRole: false;
2816
+ supportsReasoningEffort: false;
2817
+ };
2818
+ reasoning: true;
2819
+ input: ("image" | "text")[];
2820
+ cost: {
2821
+ input: number;
2822
+ output: number;
2823
+ cacheRead: number;
2824
+ cacheWrite: number;
2825
+ };
2826
+ contextWindow: number;
2827
+ maxTokens: number;
2828
+ };
2829
+ readonly "gpt-5": {
2830
+ id: string;
2831
+ name: string;
2832
+ api: "openai-responses";
2833
+ provider: string;
2834
+ baseUrl: string;
2835
+ headers: {
2836
+ "User-Agent": string;
2837
+ "Editor-Version": string;
2838
+ "Editor-Plugin-Version": string;
2839
+ "Copilot-Integration-Id": string;
2840
+ };
2841
+ reasoning: true;
2842
+ input: ("image" | "text")[];
2843
+ cost: {
2844
+ input: number;
2845
+ output: number;
2846
+ cacheRead: number;
2847
+ cacheWrite: number;
2848
+ };
2849
+ contextWindow: number;
2850
+ maxTokens: number;
2851
+ };
2852
+ readonly "claude-opus-4.5": {
2853
+ id: string;
2854
+ name: string;
2855
+ api: "openai-completions";
2856
+ provider: string;
2857
+ baseUrl: string;
2858
+ headers: {
2859
+ "User-Agent": string;
2860
+ "Editor-Version": string;
2861
+ "Editor-Plugin-Version": string;
2862
+ "Copilot-Integration-Id": string;
2863
+ };
2864
+ compat: {
2865
+ supportsStore: false;
2866
+ supportsDeveloperRole: false;
2867
+ supportsReasoningEffort: false;
2868
+ };
2869
+ reasoning: true;
2870
+ input: ("image" | "text")[];
2871
+ cost: {
2872
+ input: number;
2873
+ output: number;
2874
+ cacheRead: number;
2875
+ cacheWrite: number;
2876
+ };
2877
+ contextWindow: number;
2878
+ maxTokens: number;
2879
+ };
2880
+ readonly "gpt-5.2": {
2881
+ id: string;
2882
+ name: string;
2883
+ api: "openai-responses";
2884
+ provider: string;
2885
+ baseUrl: string;
2886
+ headers: {
2887
+ "User-Agent": string;
2888
+ "Editor-Version": string;
2889
+ "Editor-Plugin-Version": string;
2890
+ "Copilot-Integration-Id": string;
2891
+ };
2892
+ reasoning: true;
2893
+ input: ("image" | "text")[];
2894
+ cost: {
2895
+ input: number;
2896
+ output: number;
2897
+ cacheRead: number;
2898
+ cacheWrite: number;
2899
+ };
2900
+ contextWindow: number;
2901
+ maxTokens: number;
2902
+ };
2903
+ readonly "claude-sonnet-4.5": {
2904
+ id: string;
2905
+ name: string;
2906
+ api: "openai-completions";
2907
+ provider: string;
2908
+ baseUrl: string;
2909
+ headers: {
2910
+ "User-Agent": string;
2911
+ "Editor-Version": string;
2912
+ "Editor-Plugin-Version": string;
2913
+ "Copilot-Integration-Id": string;
2914
+ };
2915
+ compat: {
2916
+ supportsStore: false;
2917
+ supportsDeveloperRole: false;
2918
+ supportsReasoningEffort: false;
2919
+ };
2920
+ reasoning: true;
2921
+ input: ("image" | "text")[];
2922
+ cost: {
2923
+ input: number;
2924
+ output: number;
2925
+ cacheRead: number;
2926
+ cacheWrite: number;
2927
+ };
2928
+ contextWindow: number;
2929
+ maxTokens: number;
2930
+ };
2931
+ };
2466
2932
  readonly openrouter: {
2933
+ readonly "nvidia/nemotron-3-nano-30b-a3b:free": {
2934
+ id: string;
2935
+ name: string;
2936
+ api: "openai-completions";
2937
+ provider: string;
2938
+ baseUrl: string;
2939
+ reasoning: true;
2940
+ input: "text"[];
2941
+ cost: {
2942
+ input: number;
2943
+ output: number;
2944
+ cacheRead: number;
2945
+ cacheWrite: number;
2946
+ };
2947
+ contextWindow: number;
2948
+ maxTokens: number;
2949
+ };
2467
2950
  readonly "openai/gpt-5.2-chat": {
2468
2951
  id: string;
2469
2952
  name: string;
@@ -5065,6 +5548,23 @@ export declare const MODELS: {
5065
5548
  contextWindow: number;
5066
5549
  maxTokens: number;
5067
5550
  };
5551
+ readonly "google/gemma-3-27b-it:free": {
5552
+ id: string;
5553
+ name: string;
5554
+ api: "openai-completions";
5555
+ provider: string;
5556
+ baseUrl: string;
5557
+ reasoning: false;
5558
+ input: ("image" | "text")[];
5559
+ cost: {
5560
+ input: number;
5561
+ output: number;
5562
+ cacheRead: number;
5563
+ cacheWrite: number;
5564
+ };
5565
+ contextWindow: number;
5566
+ maxTokens: number;
5567
+ };
5068
5568
  readonly "google/gemma-3-27b-it": {
5069
5569
  id: string;
5070
5570
  name: string;
@@ -5558,7 +6058,7 @@ export declare const MODELS: {
5558
6058
  contextWindow: number;
5559
6059
  maxTokens: number;
5560
6060
  };
5561
- readonly "anthropic/claude-3.5-haiku-20241022": {
6061
+ readonly "anthropic/claude-3.5-haiku": {
5562
6062
  id: string;
5563
6063
  name: string;
5564
6064
  api: "openai-completions";
@@ -5575,7 +6075,7 @@ export declare const MODELS: {
5575
6075
  contextWindow: number;
5576
6076
  maxTokens: number;
5577
6077
  };
5578
- readonly "anthropic/claude-3.5-haiku": {
6078
+ readonly "anthropic/claude-3.5-haiku-20241022": {
5579
6079
  id: string;
5580
6080
  name: string;
5581
6081
  api: "openai-completions";
@@ -5609,7 +6109,7 @@ export declare const MODELS: {
5609
6109
  contextWindow: number;
5610
6110
  maxTokens: number;
5611
6111
  };
5612
- readonly "mistralai/ministral-8b": {
6112
+ readonly "mistralai/ministral-3b": {
5613
6113
  id: string;
5614
6114
  name: string;
5615
6115
  api: "openai-completions";
@@ -5626,7 +6126,7 @@ export declare const MODELS: {
5626
6126
  contextWindow: number;
5627
6127
  maxTokens: number;
5628
6128
  };
5629
- readonly "mistralai/ministral-3b": {
6129
+ readonly "mistralai/ministral-8b": {
5630
6130
  id: string;
5631
6131
  name: string;
5632
6132
  api: "openai-completions";
@@ -5830,7 +6330,7 @@ export declare const MODELS: {
5830
6330
  contextWindow: number;
5831
6331
  maxTokens: number;
5832
6332
  };
5833
- readonly "meta-llama/llama-3.1-405b-instruct": {
6333
+ readonly "meta-llama/llama-3.1-70b-instruct": {
5834
6334
  id: string;
5835
6335
  name: string;
5836
6336
  api: "openai-completions";
@@ -5847,7 +6347,7 @@ export declare const MODELS: {
5847
6347
  contextWindow: number;
5848
6348
  maxTokens: number;
5849
6349
  };
5850
- readonly "meta-llama/llama-3.1-70b-instruct": {
6350
+ readonly "meta-llama/llama-3.1-405b-instruct": {
5851
6351
  id: string;
5852
6352
  name: string;
5853
6353
  api: "openai-completions";
@@ -6000,7 +6500,7 @@ export declare const MODELS: {
6000
6500
  contextWindow: number;
6001
6501
  maxTokens: number;
6002
6502
  };
6003
- readonly "openai/gpt-4o-2024-05-13": {
6503
+ readonly "openai/gpt-4o": {
6004
6504
  id: string;
6005
6505
  name: string;
6006
6506
  api: "openai-completions";
@@ -6017,7 +6517,7 @@ export declare const MODELS: {
6017
6517
  contextWindow: number;
6018
6518
  maxTokens: number;
6019
6519
  };
6020
- readonly "openai/gpt-4o": {
6520
+ readonly "openai/gpt-4o:extended": {
6021
6521
  id: string;
6022
6522
  name: string;
6023
6523
  api: "openai-completions";
@@ -6034,7 +6534,7 @@ export declare const MODELS: {
6034
6534
  contextWindow: number;
6035
6535
  maxTokens: number;
6036
6536
  };
6037
- readonly "openai/gpt-4o:extended": {
6537
+ readonly "openai/gpt-4o-2024-05-13": {
6038
6538
  id: string;
6039
6539
  name: string;
6040
6540
  api: "openai-completions";
@@ -6051,7 +6551,7 @@ export declare const MODELS: {
6051
6551
  contextWindow: number;
6052
6552
  maxTokens: number;
6053
6553
  };
6054
- readonly "meta-llama/llama-3-70b-instruct": {
6554
+ readonly "meta-llama/llama-3-8b-instruct": {
6055
6555
  id: string;
6056
6556
  name: string;
6057
6557
  api: "openai-completions";
@@ -6068,7 +6568,7 @@ export declare const MODELS: {
6068
6568
  contextWindow: number;
6069
6569
  maxTokens: number;
6070
6570
  };
6071
- readonly "meta-llama/llama-3-8b-instruct": {
6571
+ readonly "meta-llama/llama-3-70b-instruct": {
6072
6572
  id: string;
6073
6573
  name: string;
6074
6574
  api: "openai-completions";
@@ -6289,7 +6789,7 @@ export declare const MODELS: {
6289
6789
  contextWindow: number;
6290
6790
  maxTokens: number;
6291
6791
  };
6292
- readonly "openai/gpt-4": {
6792
+ readonly "openai/gpt-3.5-turbo": {
6293
6793
  id: string;
6294
6794
  name: string;
6295
6795
  api: "openai-completions";
@@ -6306,7 +6806,7 @@ export declare const MODELS: {
6306
6806
  contextWindow: number;
6307
6807
  maxTokens: number;
6308
6808
  };
6309
- readonly "openai/gpt-3.5-turbo": {
6809
+ readonly "openai/gpt-4": {
6310
6810
  id: string;
6311
6811
  name: string;
6312
6812
  api: "openai-completions";