@mariozechner/pi-ai 0.49.1 → 0.49.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +1 -1
  2. package/dist/models.generated.d.ts +68 -74
  3. package/dist/models.generated.d.ts.map +1 -1
  4. package/dist/models.generated.js +77 -78
  5. package/dist/models.generated.js.map +1 -1
  6. package/dist/providers/anthropic.d.ts.map +1 -1
  7. package/dist/providers/anthropic.js +15 -8
  8. package/dist/providers/anthropic.js.map +1 -1
  9. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  10. package/dist/providers/google-gemini-cli.js +1 -0
  11. package/dist/providers/google-gemini-cli.js.map +1 -1
  12. package/dist/providers/google-vertex.d.ts.map +1 -1
  13. package/dist/providers/google-vertex.js +4 -4
  14. package/dist/providers/google-vertex.js.map +1 -1
  15. package/dist/providers/google.d.ts.map +1 -1
  16. package/dist/providers/google.js +4 -4
  17. package/dist/providers/google.js.map +1 -1
  18. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  19. package/dist/providers/openai-codex-responses.js +17 -7
  20. package/dist/providers/openai-codex-responses.js.map +1 -1
  21. package/dist/providers/openai-completions.d.ts.map +1 -1
  22. package/dist/providers/openai-completions.js +6 -2
  23. package/dist/providers/openai-completions.js.map +1 -1
  24. package/dist/providers/openai-responses.d.ts.map +1 -1
  25. package/dist/providers/openai-responses.js +25 -23
  26. package/dist/providers/openai-responses.js.map +1 -1
  27. package/dist/providers/transform-messages.d.ts.map +1 -1
  28. package/dist/providers/transform-messages.js +10 -13
  29. package/dist/providers/transform-messages.js.map +1 -1
  30. package/dist/stream.d.ts.map +1 -1
  31. package/dist/stream.js +32 -6
  32. package/dist/stream.js.map +1 -1
  33. package/dist/types.d.ts +6 -2
  34. package/dist/types.d.ts.map +1 -1
  35. package/dist/types.js.map +1 -1
  36. package/dist/utils/oauth/openai-codex.d.ts +5 -0
  37. package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
  38. package/dist/utils/oauth/openai-codex.js +27 -8
  39. package/dist/utils/oauth/openai-codex.js.map +1 -1
  40. package/package.json +1 -1
package/README.md CHANGED
@@ -729,7 +729,7 @@ interface OpenAICompletionsCompat {
729
729
  }
730
730
 
731
731
  interface OpenAIResponsesCompat {
732
- strictResponsesPairing?: boolean; // Enforce strict reasoning/message pairing for OpenAI Responses history replay on providers like Azure (default: false)
732
+ // Reserved for future use
733
733
  }
734
734
  ```
735
735
 
@@ -1624,29 +1624,6 @@ export declare const MODELS: {
1624
1624
  contextWindow: number;
1625
1625
  maxTokens: number;
1626
1626
  };
1627
- readonly "gpt-5-codex": {
1628
- id: string;
1629
- name: string;
1630
- api: "openai-responses";
1631
- provider: string;
1632
- baseUrl: string;
1633
- headers: {
1634
- "User-Agent": string;
1635
- "Editor-Version": string;
1636
- "Editor-Plugin-Version": string;
1637
- "Copilot-Integration-Id": string;
1638
- };
1639
- reasoning: true;
1640
- input: ("image" | "text")[];
1641
- cost: {
1642
- input: number;
1643
- output: number;
1644
- cacheRead: number;
1645
- cacheWrite: number;
1646
- };
1647
- contextWindow: number;
1648
- maxTokens: number;
1649
- };
1650
1627
  readonly "gpt-5-mini": {
1651
1628
  id: string;
1652
1629
  name: string;
@@ -7419,6 +7396,23 @@ export declare const MODELS: {
7419
7396
  contextWindow: number;
7420
7397
  maxTokens: number;
7421
7398
  };
7399
+ readonly "qwen/qwen2.5-vl-72b-instruct": {
7400
+ id: string;
7401
+ name: string;
7402
+ api: "openai-completions";
7403
+ provider: string;
7404
+ baseUrl: string;
7405
+ reasoning: false;
7406
+ input: ("image" | "text")[];
7407
+ cost: {
7408
+ input: number;
7409
+ output: number;
7410
+ cacheRead: number;
7411
+ cacheWrite: number;
7412
+ };
7413
+ contextWindow: number;
7414
+ maxTokens: number;
7415
+ };
7422
7416
  readonly "qwen/qwen3-14b": {
7423
7417
  id: string;
7424
7418
  name: string;
@@ -7776,6 +7770,23 @@ export declare const MODELS: {
7776
7770
  contextWindow: number;
7777
7771
  maxTokens: number;
7778
7772
  };
7773
+ readonly "qwen/qwen3-vl-235b-a22b-thinking": {
7774
+ id: string;
7775
+ name: string;
7776
+ api: "openai-completions";
7777
+ provider: string;
7778
+ baseUrl: string;
7779
+ reasoning: true;
7780
+ input: ("image" | "text")[];
7781
+ cost: {
7782
+ input: number;
7783
+ output: number;
7784
+ cacheRead: number;
7785
+ cacheWrite: number;
7786
+ };
7787
+ contextWindow: number;
7788
+ maxTokens: number;
7789
+ };
7779
7790
  readonly "qwen/qwen3-vl-30b-a3b-instruct": {
7780
7791
  id: string;
7781
7792
  name: string;
@@ -8337,6 +8348,23 @@ export declare const MODELS: {
8337
8348
  contextWindow: number;
8338
8349
  maxTokens: number;
8339
8350
  };
8351
+ readonly "z-ai/glm-4.7-flash": {
8352
+ id: string;
8353
+ name: string;
8354
+ api: "openai-completions";
8355
+ provider: string;
8356
+ baseUrl: string;
8357
+ reasoning: true;
8358
+ input: "text"[];
8359
+ cost: {
8360
+ input: number;
8361
+ output: number;
8362
+ cacheRead: number;
8363
+ cacheWrite: number;
8364
+ };
8365
+ contextWindow: number;
8366
+ maxTokens: number;
8367
+ };
8340
8368
  };
8341
8369
  readonly "vercel-ai-gateway": {
8342
8370
  readonly "alibaba/qwen-3-14b": {
@@ -8526,23 +8554,6 @@ export declare const MODELS: {
8526
8554
  contextWindow: number;
8527
8555
  maxTokens: number;
8528
8556
  };
8529
- readonly "anthropic/claude-3-opus": {
8530
- id: string;
8531
- name: string;
8532
- api: "anthropic-messages";
8533
- provider: string;
8534
- baseUrl: string;
8535
- reasoning: false;
8536
- input: ("image" | "text")[];
8537
- cost: {
8538
- input: number;
8539
- output: number;
8540
- cacheRead: number;
8541
- cacheWrite: number;
8542
- };
8543
- contextWindow: number;
8544
- maxTokens: number;
8545
- };
8546
8557
  readonly "anthropic/claude-3.5-haiku": {
8547
8558
  id: string;
8548
8559
  name: string;
@@ -8832,40 +8843,6 @@ export declare const MODELS: {
8832
8843
  contextWindow: number;
8833
8844
  maxTokens: number;
8834
8845
  };
8835
- readonly "google/gemini-2.0-flash": {
8836
- id: string;
8837
- name: string;
8838
- api: "anthropic-messages";
8839
- provider: string;
8840
- baseUrl: string;
8841
- reasoning: false;
8842
- input: ("image" | "text")[];
8843
- cost: {
8844
- input: number;
8845
- output: number;
8846
- cacheRead: number;
8847
- cacheWrite: number;
8848
- };
8849
- contextWindow: number;
8850
- maxTokens: number;
8851
- };
8852
- readonly "google/gemini-2.0-flash-lite": {
8853
- id: string;
8854
- name: string;
8855
- api: "anthropic-messages";
8856
- provider: string;
8857
- baseUrl: string;
8858
- reasoning: false;
8859
- input: ("image" | "text")[];
8860
- cost: {
8861
- input: number;
8862
- output: number;
8863
- cacheRead: number;
8864
- cacheWrite: number;
8865
- };
8866
- contextWindow: number;
8867
- maxTokens: number;
8868
- };
8869
8846
  readonly "google/gemini-2.5-flash": {
8870
8847
  id: string;
8871
8848
  name: string;
@@ -10447,6 +10424,23 @@ export declare const MODELS: {
10447
10424
  contextWindow: number;
10448
10425
  maxTokens: number;
10449
10426
  };
10427
+ readonly "zai/glm-4.7-flashx": {
10428
+ id: string;
10429
+ name: string;
10430
+ api: "anthropic-messages";
10431
+ provider: string;
10432
+ baseUrl: string;
10433
+ reasoning: true;
10434
+ input: "text"[];
10435
+ cost: {
10436
+ input: number;
10437
+ output: number;
10438
+ cacheRead: number;
10439
+ cacheWrite: number;
10440
+ };
10441
+ contextWindow: number;
10442
+ maxTokens: number;
10443
+ };
10450
10444
  };
10451
10445
  readonly xai: {
10452
10446
  readonly "grok-2": {