@oh-my-pi/pi-ai 11.1.0 → 11.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -10,38 +10,38 @@ Unified LLM API with automatic model discovery, provider configuration, token an
|
|
|
10
10
|
- [Installation](#installation)
|
|
11
11
|
- [Quick Start](#quick-start)
|
|
12
12
|
- [Tools](#tools)
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
13
|
+
- [Defining Tools](#defining-tools)
|
|
14
|
+
- [Handling Tool Calls](#handling-tool-calls)
|
|
15
|
+
- [Streaming Tool Calls with Partial JSON](#streaming-tool-calls-with-partial-json)
|
|
16
|
+
- [Validating Tool Arguments](#validating-tool-arguments)
|
|
17
|
+
- [Complete Event Reference](#complete-event-reference)
|
|
18
18
|
- [Image Input](#image-input)
|
|
19
19
|
- [Thinking/Reasoning](#thinkingreasoning)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
20
|
+
- [Unified Interface](#unified-interface-streamsimplecompletesimple)
|
|
21
|
+
- [Provider-Specific Options](#provider-specific-options-streamcomplete)
|
|
22
|
+
- [Streaming Thinking Content](#streaming-thinking-content)
|
|
23
23
|
- [Stop Reasons](#stop-reasons)
|
|
24
24
|
- [Error Handling](#error-handling)
|
|
25
|
-
|
|
26
|
-
|
|
25
|
+
- [Aborting Requests](#aborting-requests)
|
|
26
|
+
- [Continuing After Abort](#continuing-after-abort)
|
|
27
27
|
- [APIs, Models, and Providers](#apis-models-and-providers)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
28
|
+
- [Providers and Models](#providers-and-models)
|
|
29
|
+
- [Querying Providers and Models](#querying-providers-and-models)
|
|
30
|
+
- [Custom Models](#custom-models)
|
|
31
|
+
- [OpenAI Compatibility Settings](#openai-compatibility-settings)
|
|
32
|
+
- [Type Safety](#type-safety)
|
|
33
33
|
- [Cross-Provider Handoffs](#cross-provider-handoffs)
|
|
34
34
|
- [Context Serialization](#context-serialization)
|
|
35
35
|
- [Browser Usage](#browser-usage)
|
|
36
|
-
|
|
37
|
-
|
|
36
|
+
- [Environment Variables](#environment-variables-nodejs-only)
|
|
37
|
+
- [Checking Environment Variables](#checking-environment-variables)
|
|
38
38
|
- [OAuth Providers](#oauth-providers)
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
39
|
+
- [Vertex AI (ADC)](#vertex-ai-adc)
|
|
40
|
+
- [CLI Login](#cli-login)
|
|
41
|
+
- [Programmatic OAuth](#programmatic-oauth)
|
|
42
|
+
- [Login Flow Example](#login-flow-example)
|
|
43
|
+
- [Using OAuth Tokens](#using-oauth-tokens)
|
|
44
|
+
- [Provider Notes](#provider-notes)
|
|
45
45
|
- [License](#license)
|
|
46
46
|
|
|
47
47
|
## Supported Providers
|
|
@@ -267,7 +267,7 @@ context.messages.push({
|
|
|
267
267
|
toolName: "generate_chart",
|
|
268
268
|
content: [
|
|
269
269
|
{ type: "text", text: "Generated chart showing temperature trends" },
|
|
270
|
-
{ type: "image", data: imageBuffer.
|
|
270
|
+
{ type: "image", data: imageBuffer.toBase64(), mimeType: "image/png" },
|
|
271
271
|
],
|
|
272
272
|
isError: false,
|
|
273
273
|
timestamp: Date.now(),
|
|
@@ -390,7 +390,7 @@ if (model.input.includes("image")) {
|
|
|
390
390
|
}
|
|
391
391
|
|
|
392
392
|
const imageBuffer = fs.readFileSync("image.png");
|
|
393
|
-
const base64Image = imageBuffer.
|
|
393
|
+
const base64Image = imageBuffer.toBase64();
|
|
394
394
|
|
|
395
395
|
const response = await complete(model, {
|
|
396
396
|
messages: [
|
|
@@ -443,7 +443,7 @@ const response = await completeSimple(
|
|
|
443
443
|
},
|
|
444
444
|
{
|
|
445
445
|
reasoning: "medium", // 'minimal' | 'low' | 'medium' | 'high' | 'xhigh' (xhigh maps to high on non-OpenAI providers)
|
|
446
|
-
}
|
|
446
|
+
}
|
|
447
447
|
);
|
|
448
448
|
|
|
449
449
|
// Access thinking and text blocks
|
|
@@ -562,7 +562,7 @@ const s = stream(
|
|
|
562
562
|
},
|
|
563
563
|
{
|
|
564
564
|
signal,
|
|
565
|
-
}
|
|
565
|
+
}
|
|
566
566
|
);
|
|
567
567
|
|
|
568
568
|
for await (const event of s) {
|
|
@@ -877,7 +877,7 @@ const response = await complete(
|
|
|
877
877
|
},
|
|
878
878
|
{
|
|
879
879
|
apiKey: "your-api-key",
|
|
880
|
-
}
|
|
880
|
+
}
|
|
881
881
|
);
|
|
882
882
|
```
|
|
883
883
|
|
|
@@ -983,7 +983,7 @@ bunx @oh-my-pi/pi-ai login anthropic # login to specific provider
|
|
|
983
983
|
bunx @oh-my-pi/pi-ai list # list available providers
|
|
984
984
|
```
|
|
985
985
|
|
|
986
|
-
Credentials are saved to `
|
|
986
|
+
Credentials are saved to `agent.db` in the agent directory.
|
|
987
987
|
|
|
988
988
|
### Programmatic OAuth
|
|
989
989
|
|
|
@@ -1036,7 +1036,7 @@ const credentials = await loginGitHubCopilot({
|
|
|
1036
1036
|
|
|
1037
1037
|
// Store credentials yourself
|
|
1038
1038
|
const auth = { "github-copilot": { type: "oauth", ...credentials } };
|
|
1039
|
-
fs.writeFileSync("
|
|
1039
|
+
fs.writeFileSync("credentials.json", JSON.stringify(auth, null, 2));
|
|
1040
1040
|
```
|
|
1041
1041
|
|
|
1042
1042
|
### Using OAuth Tokens
|
|
@@ -1048,7 +1048,7 @@ import { getModel, complete, getOAuthApiKey } from "@oh-my-pi/pi-ai";
|
|
|
1048
1048
|
import * as fs from "node:fs";
|
|
1049
1049
|
|
|
1050
1050
|
// Load your stored credentials
|
|
1051
|
-
const auth = JSON.parse(fs.readFileSync("
|
|
1051
|
+
const auth = JSON.parse(fs.readFileSync("credentials.json", "utf-8"));
|
|
1052
1052
|
|
|
1053
1053
|
// Get API key (refreshes if expired)
|
|
1054
1054
|
const result = await getOAuthApiKey("github-copilot", auth);
|
|
@@ -1056,7 +1056,7 @@ if (!result) throw new Error("Not logged in");
|
|
|
1056
1056
|
|
|
1057
1057
|
// Save refreshed credentials
|
|
1058
1058
|
auth["github-copilot"] = { type: "oauth", ...result.newCredentials };
|
|
1059
|
-
fs.writeFileSync("
|
|
1059
|
+
fs.writeFileSync("credentials.json", JSON.stringify(auth, null, 2));
|
|
1060
1060
|
|
|
1061
1061
|
// Use the API key
|
|
1062
1062
|
const model = getModel("github-copilot", "gpt-4o");
|
|
@@ -1065,7 +1065,7 @@ const response = await complete(
|
|
|
1065
1065
|
{
|
|
1066
1066
|
messages: [{ role: "user", content: "Hello!" }],
|
|
1067
1067
|
},
|
|
1068
|
-
{ apiKey: result.apiKey }
|
|
1068
|
+
{ apiKey: result.apiKey }
|
|
1069
1069
|
);
|
|
1070
1070
|
```
|
|
1071
1071
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@oh-my-pi/pi-ai",
|
|
3
|
-
"version": "11.1
|
|
3
|
+
"version": "11.2.1",
|
|
4
4
|
"description": "Unified LLM API with automatic model discovery and provider configuration",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./src/index.ts",
|
|
@@ -63,7 +63,7 @@
|
|
|
63
63
|
"@connectrpc/connect-node": "^2.1.1",
|
|
64
64
|
"@google/genai": "^1.39.0",
|
|
65
65
|
"@mistralai/mistralai": "^1.13.0",
|
|
66
|
-
"@oh-my-pi/pi-utils": "11.1
|
|
66
|
+
"@oh-my-pi/pi-utils": "11.2.1",
|
|
67
67
|
"@sinclair/typebox": "^0.34.48",
|
|
68
68
|
"@smithy/node-http-handler": "^4.4.9",
|
|
69
69
|
"ajv": "^8.17.1",
|
|
@@ -93,6 +93,6 @@
|
|
|
93
93
|
"bun": ">=1.3.7"
|
|
94
94
|
},
|
|
95
95
|
"devDependencies": {
|
|
96
|
-
"@types/
|
|
96
|
+
"@types/bun": "^1.3.8"
|
|
97
97
|
}
|
|
98
98
|
}
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import { readSseData } from "@oh-my-pi/pi-utils";
|
|
2
|
-
|
|
3
1
|
export type CodexRateLimit = {
|
|
4
2
|
used_percent?: number;
|
|
5
3
|
window_minutes?: number;
|
|
@@ -71,16 +69,6 @@ export async function parseCodexError(response: Response): Promise<CodexErrorInf
|
|
|
71
69
|
};
|
|
72
70
|
}
|
|
73
71
|
|
|
74
|
-
export async function* parseCodexSseStream(response: Response): AsyncGenerator<Record<string, unknown>> {
|
|
75
|
-
if (!response.body) {
|
|
76
|
-
return;
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
for await (const data of readSseData<Record<string, unknown>>(response.body)) {
|
|
80
|
-
yield data;
|
|
81
|
-
}
|
|
82
|
-
}
|
|
83
|
-
|
|
84
72
|
function toNumber(v: string | null): number | undefined {
|
|
85
73
|
if (v == null) return undefined;
|
|
86
74
|
const n = Number(v);
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import * as os from "node:os";
|
|
2
|
-
import { $env, abortableSleep } from "@oh-my-pi/pi-utils";
|
|
2
|
+
import { $env, abortableSleep, readSseJson } from "@oh-my-pi/pi-utils";
|
|
3
3
|
import type {
|
|
4
4
|
ResponseFunctionToolCall,
|
|
5
5
|
ResponseInput,
|
|
@@ -38,7 +38,7 @@ import {
|
|
|
38
38
|
URL_PATHS,
|
|
39
39
|
} from "./openai-codex/constants";
|
|
40
40
|
import { type CodexRequestOptions, type RequestBody, transformRequestBody } from "./openai-codex/request-transformer";
|
|
41
|
-
import { parseCodexError
|
|
41
|
+
import { parseCodexError } from "./openai-codex/response-handler";
|
|
42
42
|
import { transformMessages } from "./transform-messages";
|
|
43
43
|
|
|
44
44
|
export interface OpenAICodexResponsesOptions extends StreamOptions {
|
|
@@ -234,7 +234,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
|
|
|
234
234
|
const blocks = output.content;
|
|
235
235
|
const blockIndex = () => blocks.length - 1;
|
|
236
236
|
|
|
237
|
-
for await (const rawEvent of
|
|
237
|
+
for await (const rawEvent of readSseJson<Record<string, unknown>>(response.body!, options?.signal)) {
|
|
238
238
|
const eventType = typeof rawEvent.type === "string" ? rawEvent.type : "";
|
|
239
239
|
if (!eventType) continue;
|
|
240
240
|
|