@mariozechner/pi-ai 0.37.2 → 0.37.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/README.md +61 -12
  2. package/dist/models.generated.d.ts +0 -51
  3. package/dist/models.generated.d.ts.map +1 -1
  4. package/dist/models.generated.js +41 -92
  5. package/dist/models.generated.js.map +1 -1
  6. package/dist/providers/google-gemini-cli.d.ts.map +1 -1
  7. package/dist/providers/google-gemini-cli.js +3 -3
  8. package/dist/providers/google-gemini-cli.js.map +1 -1
  9. package/dist/providers/google-shared.d.ts +26 -1
  10. package/dist/providers/google-shared.d.ts.map +1 -1
  11. package/dist/providers/google-shared.js +31 -0
  12. package/dist/providers/google-shared.js.map +1 -1
  13. package/dist/providers/google-vertex.d.ts.map +1 -1
  14. package/dist/providers/google-vertex.js +3 -3
  15. package/dist/providers/google-vertex.js.map +1 -1
  16. package/dist/providers/google.d.ts.map +1 -1
  17. package/dist/providers/google.js +3 -3
  18. package/dist/providers/google.js.map +1 -1
  19. package/dist/providers/openai-codex/prompts/codex.d.ts +0 -1
  20. package/dist/providers/openai-codex/prompts/codex.d.ts.map +1 -1
  21. package/dist/providers/openai-codex/prompts/codex.js +1 -42
  22. package/dist/providers/openai-codex/prompts/codex.js.map +1 -1
  23. package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts +2 -1
  24. package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts.map +1 -1
  25. package/dist/providers/openai-codex/prompts/pi-codex-bridge.js +42 -42
  26. package/dist/providers/openai-codex/prompts/pi-codex-bridge.js.map +1 -1
  27. package/dist/providers/openai-codex/prompts/system-prompt.d.ts +10 -0
  28. package/dist/providers/openai-codex/prompts/system-prompt.d.ts.map +1 -0
  29. package/dist/providers/openai-codex/prompts/system-prompt.js +15 -0
  30. package/dist/providers/openai-codex/prompts/system-prompt.js.map +1 -0
  31. package/dist/providers/openai-codex/request-transformer.d.ts +5 -1
  32. package/dist/providers/openai-codex/request-transformer.d.ts.map +1 -1
  33. package/dist/providers/openai-codex/request-transformer.js +9 -41
  34. package/dist/providers/openai-codex/request-transformer.js.map +1 -1
  35. package/dist/providers/openai-codex-responses.d.ts.map +1 -1
  36. package/dist/providers/openai-codex-responses.js +13 -2
  37. package/dist/providers/openai-codex-responses.js.map +1 -1
  38. package/dist/stream.d.ts.map +1 -1
  39. package/dist/stream.js +1 -0
  40. package/dist/stream.js.map +1 -1
  41. package/dist/types.d.ts +6 -0
  42. package/dist/types.d.ts.map +1 -1
  43. package/dist/types.js.map +1 -1
  44. package/package.json +1 -1
package/README.md CHANGED
@@ -4,9 +4,50 @@ Unified LLM API with automatic model discovery, provider configuration, token an
4
4
 
5
5
  **Note**: This library only includes models that support tool calling (function calling), as this is essential for agentic workflows.
6
6
 
7
+ ## Table of Contents
8
+
9
+ - [Supported Providers](#supported-providers)
10
+ - [Installation](#installation)
11
+ - [Quick Start](#quick-start)
12
+ - [Tools](#tools)
13
+ - [Defining Tools](#defining-tools)
14
+ - [Handling Tool Calls](#handling-tool-calls)
15
+ - [Streaming Tool Calls with Partial JSON](#streaming-tool-calls-with-partial-json)
16
+ - [Validating Tool Arguments](#validating-tool-arguments)
17
+ - [Complete Event Reference](#complete-event-reference)
18
+ - [Image Input](#image-input)
19
+ - [Thinking/Reasoning](#thinkingreasoning)
20
+ - [Unified Interface](#unified-interface-streamsimplecompletesimple)
21
+ - [Provider-Specific Options](#provider-specific-options-streamcomplete)
22
+ - [Streaming Thinking Content](#streaming-thinking-content)
23
+ - [Stop Reasons](#stop-reasons)
24
+ - [Error Handling](#error-handling)
25
+ - [Aborting Requests](#aborting-requests)
26
+ - [Continuing After Abort](#continuing-after-abort)
27
+ - [APIs, Models, and Providers](#apis-models-and-providers)
28
+ - [Providers and Models](#providers-and-models)
29
+ - [Querying Providers and Models](#querying-providers-and-models)
30
+ - [Custom Models](#custom-models)
31
+ - [OpenAI Compatibility Settings](#openai-compatibility-settings)
32
+ - [Type Safety](#type-safety)
33
+ - [Cross-Provider Handoffs](#cross-provider-handoffs)
34
+ - [Context Serialization](#context-serialization)
35
+ - [Browser Usage](#browser-usage)
36
+ - [Environment Variables](#environment-variables-nodejs-only)
37
+ - [Checking Environment Variables](#checking-environment-variables)
38
+ - [OAuth Providers](#oauth-providers)
39
+ - [Vertex AI (ADC)](#vertex-ai-adc)
40
+ - [CLI Login](#cli-login)
41
+ - [Programmatic OAuth](#programmatic-oauth)
42
+ - [Login Flow Example](#login-flow-example)
43
+ - [Using OAuth Tokens](#using-oauth-tokens)
44
+ - [Provider Notes](#provider-notes)
45
+ - [License](#license)
46
+
7
47
  ## Supported Providers
8
48
 
9
49
  - **OpenAI**
50
+ - **OpenAI Codex** (ChatGPT Plus/Pro subscription, requires OAuth, see below)
10
51
  - **Anthropic**
11
52
  - **Google**
12
53
  - **Vertex AI** (Gemini via Vertex AI)
@@ -16,6 +57,8 @@ Unified LLM API with automatic model discovery, provider configuration, token an
16
57
  - **xAI**
17
58
  - **OpenRouter**
18
59
  - **GitHub Copilot** (requires OAuth, see below)
60
+ - **Google Gemini CLI** (requires OAuth, see below)
61
+ - **Antigravity** (requires OAuth, see below)
19
62
  - **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
20
63
 
21
64
  ## Installation
@@ -806,17 +849,19 @@ const response = await complete(model, {
806
849
 
807
850
  In Node.js environments, you can set environment variables to avoid passing API keys:
808
851
 
809
- ```bash
810
- OPENAI_API_KEY=sk-...
811
- ANTHROPIC_API_KEY=sk-ant-...
812
- GEMINI_API_KEY=...
813
- MISTRAL_API_KEY=...
814
- GROQ_API_KEY=gsk_...
815
- CEREBRAS_API_KEY=csk-...
816
- XAI_API_KEY=xai-...
817
- ZAI_API_KEY=...
818
- OPENROUTER_API_KEY=sk-or-...
819
- ```
852
+ | Provider | Environment Variable(s) |
853
+ |----------|------------------------|
854
+ | OpenAI | `OPENAI_API_KEY` |
855
+ | Anthropic | `ANTHROPIC_API_KEY` or `ANTHROPIC_OAUTH_TOKEN` |
856
+ | Google | `GEMINI_API_KEY` |
857
+ | Vertex AI | `GOOGLE_CLOUD_PROJECT` (or `GCLOUD_PROJECT`) + `GOOGLE_CLOUD_LOCATION` + ADC |
858
+ | Mistral | `MISTRAL_API_KEY` |
859
+ | Groq | `GROQ_API_KEY` |
860
+ | Cerebras | `CEREBRAS_API_KEY` |
861
+ | xAI | `XAI_API_KEY` |
862
+ | OpenRouter | `OPENROUTER_API_KEY` |
863
+ | zAI | `ZAI_API_KEY` |
864
+ | GitHub Copilot | `COPILOT_GITHUB_TOKEN` or `GH_TOKEN` or `GITHUB_TOKEN` |
820
865
 
821
866
  When set, the library automatically uses these keys:
822
867
 
@@ -845,6 +890,7 @@ const key = getEnvApiKey('openai'); // checks OPENAI_API_KEY
845
890
  Several providers require OAuth authentication instead of static API keys:
846
891
 
847
892
  - **Anthropic** (Claude Pro/Max subscription)
893
+ - **OpenAI Codex** (ChatGPT Plus/Pro subscription, access to GPT-5.x Codex models)
848
894
  - **GitHub Copilot** (Copilot subscription)
849
895
  - **Google Gemini CLI** (Free Gemini 2.0/2.5 via Google Cloud Code Assist)
850
896
  - **Antigravity** (Free Gemini 3, Claude, GPT-OSS via Google Cloud)
@@ -873,6 +919,7 @@ The library provides login and token refresh functions. Credential storage is th
873
919
  import {
874
920
  // Login functions (return credentials, do not store)
875
921
  loginAnthropic,
922
+ loginOpenAICodex,
876
923
  loginGitHubCopilot,
877
924
  loginGeminiCli,
878
925
  loginAntigravity,
@@ -882,7 +929,7 @@ import {
882
929
  getOAuthApiKey, // (provider, credentialsMap) => { newCredentials, apiKey } | null
883
930
 
884
931
  // Types
885
- type OAuthProvider, // 'anthropic' | 'github-copilot' | 'google-gemini-cli' | 'google-antigravity'
932
+ type OAuthProvider, // 'anthropic' | 'openai-codex' | 'github-copilot' | 'google-gemini-cli' | 'google-antigravity'
886
933
  type OAuthCredentials,
887
934
  } from '@mariozechner/pi-ai';
888
935
  ```
@@ -937,6 +984,8 @@ const response = await complete(model, {
937
984
 
938
985
  ### Provider Notes
939
986
 
987
+ **OpenAI Codex**: Requires a ChatGPT Plus or Pro subscription. Provides access to GPT-5.x Codex models with extended context windows and reasoning capabilities. The library automatically handles session-based prompt caching when `sessionId` is provided in stream options.
988
+
940
989
  **GitHub Copilot**: If you get "The requested model is not supported" error, enable the model manually in VS Code: open Copilot Chat, click the model selector, select the model (warning icon), and click "Enable".
941
990
 
942
991
  **Google Gemini CLI / Antigravity**: These use Google Cloud OAuth. The `apiKey` returned by `getOAuthApiKey()` is a JSON string containing both the token and project ID, which the library handles automatically.
@@ -4334,57 +4334,6 @@ export declare const MODELS: {
4334
4334
  contextWindow: number;
4335
4335
  maxTokens: number;
4336
4336
  };
4337
- readonly "microsoft/phi-3-medium-128k-instruct": {
4338
- id: string;
4339
- name: string;
4340
- api: "openai-completions";
4341
- provider: string;
4342
- baseUrl: string;
4343
- reasoning: false;
4344
- input: "text"[];
4345
- cost: {
4346
- input: number;
4347
- output: number;
4348
- cacheRead: number;
4349
- cacheWrite: number;
4350
- };
4351
- contextWindow: number;
4352
- maxTokens: number;
4353
- };
4354
- readonly "microsoft/phi-3-mini-128k-instruct": {
4355
- id: string;
4356
- name: string;
4357
- api: "openai-completions";
4358
- provider: string;
4359
- baseUrl: string;
4360
- reasoning: false;
4361
- input: "text"[];
4362
- cost: {
4363
- input: number;
4364
- output: number;
4365
- cacheRead: number;
4366
- cacheWrite: number;
4367
- };
4368
- contextWindow: number;
4369
- maxTokens: number;
4370
- };
4371
- readonly "microsoft/phi-3.5-mini-128k-instruct": {
4372
- id: string;
4373
- name: string;
4374
- api: "openai-completions";
4375
- provider: string;
4376
- baseUrl: string;
4377
- reasoning: false;
4378
- input: "text"[];
4379
- cost: {
4380
- input: number;
4381
- output: number;
4382
- cacheRead: number;
4383
- cacheWrite: number;
4384
- };
4385
- contextWindow: number;
4386
- maxTokens: number;
4387
- };
4388
4337
  readonly "minimax/minimax-m1": {
4389
4338
  id: string;
4390
4339
  name: string;