@oh-my-pi/pi-ai 3.15.1 → 3.20.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/bun-imports.d.ts +14 -0
- package/src/cli.ts +16 -1
- package/src/index.ts +2 -0
- package/src/models.generated.ts +20 -20
- package/src/models.ts +16 -9
- package/src/providers/google-shared.ts +1 -1
- package/src/providers/google-vertex.ts +355 -0
- package/src/providers/openai-codex/constants.ts +25 -0
- package/src/providers/openai-codex/prompts/codex-instructions.md +105 -0
- package/src/providers/openai-codex/prompts/codex.ts +217 -0
- package/src/providers/openai-codex/prompts/pi-codex-bridge.ts +48 -0
- package/src/providers/openai-codex/request-transformer.ts +328 -0
- package/src/providers/openai-codex/response-handler.ts +133 -0
- package/src/providers/openai-codex-responses.ts +619 -0
- package/src/stream.ts +116 -7
- package/src/types.ts +9 -1
- package/src/utils/oauth/index.ts +14 -0
- package/src/utils/oauth/openai-codex.ts +334 -0
- package/src/utils/oauth/types.ts +7 -1
package/package.json
CHANGED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Type declarations for Bun's import attributes.
|
|
3
|
+
* These allow importing non-JS files as text at build time.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
declare module "*.md" {
|
|
7
|
+
const content: string;
|
|
8
|
+
export default content;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
declare module "*.txt" {
|
|
12
|
+
const content: string;
|
|
13
|
+
export default content;
|
|
14
|
+
}
|
package/src/cli.ts
CHANGED
|
@@ -6,6 +6,7 @@ import { loginGitHubCopilot } from "./utils/oauth/github-copilot";
|
|
|
6
6
|
import { loginAntigravity } from "./utils/oauth/google-antigravity";
|
|
7
7
|
import { loginGeminiCli } from "./utils/oauth/google-gemini-cli";
|
|
8
8
|
import { getOAuthProviders } from "./utils/oauth/index";
|
|
9
|
+
import { loginOpenAICodex } from "./utils/oauth/openai-codex";
|
|
9
10
|
import type { OAuthCredentials, OAuthProvider } from "./utils/oauth/types";
|
|
10
11
|
|
|
11
12
|
const AUTH_FILE = "auth.json";
|
|
@@ -89,6 +90,19 @@ async function login(provider: OAuthProvider): Promise<void> {
|
|
|
89
90
|
(msg) => console.log(msg),
|
|
90
91
|
);
|
|
91
92
|
break;
|
|
93
|
+
case "openai-codex":
|
|
94
|
+
credentials = await loginOpenAICodex({
|
|
95
|
+
onAuth: (info) => {
|
|
96
|
+
console.log(`\nOpen this URL in your browser:\n${info.url}`);
|
|
97
|
+
if (info.instructions) console.log(info.instructions);
|
|
98
|
+
console.log();
|
|
99
|
+
},
|
|
100
|
+
onPrompt: async (p) => {
|
|
101
|
+
return await promptFn(`${p.message}${p.placeholder ? ` (${p.placeholder})` : ""}:`);
|
|
102
|
+
},
|
|
103
|
+
onProgress: (msg) => console.log(msg),
|
|
104
|
+
});
|
|
105
|
+
break;
|
|
92
106
|
}
|
|
93
107
|
|
|
94
108
|
const auth = await loadAuth();
|
|
@@ -114,6 +128,7 @@ Providers:
|
|
|
114
128
|
github-copilot GitHub Copilot
|
|
115
129
|
google-gemini-cli Google Gemini CLI
|
|
116
130
|
google-antigravity Antigravity (Gemini 3, Claude, GPT-OSS)
|
|
131
|
+
openai-codex OpenAI Codex (ChatGPT Plus/Pro)
|
|
117
132
|
|
|
118
133
|
Examples:
|
|
119
134
|
npx @oh-my-pi/pi-ai login # interactive provider selection
|
|
@@ -141,7 +156,7 @@ Examples:
|
|
|
141
156
|
}
|
|
142
157
|
console.log();
|
|
143
158
|
|
|
144
|
-
const choice = await prompt(
|
|
159
|
+
const choice = await prompt(`Enter number (1-${PROVIDERS.length}): `);
|
|
145
160
|
|
|
146
161
|
const index = parseInt(choice, 10) - 1;
|
|
147
162
|
if (index < 0 || index >= PROVIDERS.length) {
|
package/src/index.ts
CHANGED
package/src/models.generated.ts
CHANGED
|
@@ -2755,23 +2755,6 @@ export const MODELS = {
|
|
|
2755
2755
|
contextWindow: 200000,
|
|
2756
2756
|
maxTokens: 4096,
|
|
2757
2757
|
} satisfies Model<"openai-completions">,
|
|
2758
|
-
"anthropic/claude-3-opus": {
|
|
2759
|
-
id: "anthropic/claude-3-opus",
|
|
2760
|
-
name: "Anthropic: Claude 3 Opus",
|
|
2761
|
-
api: "openai-completions",
|
|
2762
|
-
provider: "openrouter",
|
|
2763
|
-
baseUrl: "https://openrouter.ai/api/v1",
|
|
2764
|
-
reasoning: false,
|
|
2765
|
-
input: ["text", "image"],
|
|
2766
|
-
cost: {
|
|
2767
|
-
input: 15,
|
|
2768
|
-
output: 75,
|
|
2769
|
-
cacheRead: 1.5,
|
|
2770
|
-
cacheWrite: 18.75,
|
|
2771
|
-
},
|
|
2772
|
-
contextWindow: 200000,
|
|
2773
|
-
maxTokens: 4096,
|
|
2774
|
-
} satisfies Model<"openai-completions">,
|
|
2775
2758
|
"anthropic/claude-3.5-haiku": {
|
|
2776
2759
|
id: "anthropic/claude-3.5-haiku",
|
|
2777
2760
|
name: "Anthropic: Claude 3.5 Haiku",
|
|
@@ -3637,7 +3620,7 @@ export const MODELS = {
|
|
|
3637
3620
|
cacheWrite: 0,
|
|
3638
3621
|
},
|
|
3639
3622
|
contextWindow: 256000,
|
|
3640
|
-
maxTokens:
|
|
3623
|
+
maxTokens: 128000,
|
|
3641
3624
|
} satisfies Model<"openai-completions">,
|
|
3642
3625
|
"meta-llama/llama-3-70b-instruct": {
|
|
3643
3626
|
id: "meta-llama/llama-3-70b-instruct",
|
|
@@ -3717,11 +3700,11 @@ export const MODELS = {
|
|
|
3717
3700
|
input: ["text"],
|
|
3718
3701
|
cost: {
|
|
3719
3702
|
input: 0.02,
|
|
3720
|
-
output: 0.
|
|
3703
|
+
output: 0.049999999999999996,
|
|
3721
3704
|
cacheRead: 0,
|
|
3722
3705
|
cacheWrite: 0,
|
|
3723
3706
|
},
|
|
3724
|
-
contextWindow:
|
|
3707
|
+
contextWindow: 16384,
|
|
3725
3708
|
maxTokens: 16384,
|
|
3726
3709
|
} satisfies Model<"openai-completions">,
|
|
3727
3710
|
"meta-llama/llama-3.2-3b-instruct": {
|
|
@@ -6257,6 +6240,23 @@ export const MODELS = {
|
|
|
6257
6240
|
contextWindow: 163840,
|
|
6258
6241
|
maxTokens: 65536,
|
|
6259
6242
|
} satisfies Model<"openai-completions">,
|
|
6243
|
+
"tngtech/tng-r1t-chimera:free": {
|
|
6244
|
+
id: "tngtech/tng-r1t-chimera:free",
|
|
6245
|
+
name: "TNG: R1T Chimera (free)",
|
|
6246
|
+
api: "openai-completions",
|
|
6247
|
+
provider: "openrouter",
|
|
6248
|
+
baseUrl: "https://openrouter.ai/api/v1",
|
|
6249
|
+
reasoning: true,
|
|
6250
|
+
input: ["text"],
|
|
6251
|
+
cost: {
|
|
6252
|
+
input: 0,
|
|
6253
|
+
output: 0,
|
|
6254
|
+
cacheRead: 0,
|
|
6255
|
+
cacheWrite: 0,
|
|
6256
|
+
},
|
|
6257
|
+
contextWindow: 163840,
|
|
6258
|
+
maxTokens: 65536,
|
|
6259
|
+
} satisfies Model<"openai-completions">,
|
|
6260
6260
|
"x-ai/grok-3": {
|
|
6261
6261
|
id: "x-ai/grok-3",
|
|
6262
6262
|
name: "xAI: Grok 3",
|
package/src/models.ts
CHANGED
|
@@ -12,27 +12,34 @@ for (const [provider, models] of Object.entries(MODELS)) {
|
|
|
12
12
|
modelRegistry.set(provider, providerModels);
|
|
13
13
|
}
|
|
14
14
|
|
|
15
|
+
type ProviderModels = typeof MODELS;
|
|
16
|
+
type ProviderWithModels = keyof ProviderModels;
|
|
17
|
+
|
|
15
18
|
type ModelApi<
|
|
16
|
-
TProvider extends
|
|
17
|
-
TModelId extends keyof
|
|
18
|
-
> =
|
|
19
|
+
TProvider extends ProviderWithModels,
|
|
20
|
+
TModelId extends keyof ProviderModels[TProvider],
|
|
21
|
+
> = ProviderModels[TProvider][TModelId] extends { api: infer TApi } ? (TApi extends Api ? TApi : never) : never;
|
|
19
22
|
|
|
20
|
-
export function getModel<TProvider extends
|
|
23
|
+
export function getModel<TProvider extends ProviderWithModels, TModelId extends keyof ProviderModels[TProvider]>(
|
|
21
24
|
provider: TProvider,
|
|
22
25
|
modelId: TModelId,
|
|
23
|
-
): Model<ModelApi<TProvider, TModelId
|
|
24
|
-
|
|
26
|
+
): Model<ModelApi<TProvider, TModelId>>;
|
|
27
|
+
export function getModel(provider: KnownProvider, modelId: string): Model<Api> | undefined;
|
|
28
|
+
export function getModel(provider: KnownProvider, modelId: string): Model<Api> | undefined {
|
|
29
|
+
return modelRegistry.get(provider)?.get(modelId as string) as Model<Api> | undefined;
|
|
25
30
|
}
|
|
26
31
|
|
|
27
32
|
export function getProviders(): KnownProvider[] {
|
|
28
33
|
return Array.from(modelRegistry.keys()) as KnownProvider[];
|
|
29
34
|
}
|
|
30
35
|
|
|
31
|
-
export function getModels<TProvider extends
|
|
36
|
+
export function getModels<TProvider extends ProviderWithModels>(
|
|
32
37
|
provider: TProvider,
|
|
33
|
-
): Model<ModelApi<TProvider, keyof
|
|
38
|
+
): Model<ModelApi<TProvider, keyof ProviderModels[TProvider]>>[];
|
|
39
|
+
export function getModels(provider: KnownProvider): Model<Api>[];
|
|
40
|
+
export function getModels(provider: KnownProvider): Model<Api>[] {
|
|
34
41
|
const models = modelRegistry.get(provider);
|
|
35
|
-
return models ? (Array.from(models.values()) as Model<
|
|
42
|
+
return models ? (Array.from(models.values()) as Model<Api>[]) : [];
|
|
36
43
|
}
|
|
37
44
|
|
|
38
45
|
export function calculateCost<TApi extends Api>(model: Model<TApi>, usage: Usage): Usage["cost"] {
|
|
@@ -7,7 +7,7 @@ import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from
|
|
|
7
7
|
import { sanitizeSurrogates } from "../utils/sanitize-unicode";
|
|
8
8
|
import { transformMessages } from "./transorm-messages";
|
|
9
9
|
|
|
10
|
-
type GoogleApiType = "google-generative-ai" | "google-gemini-cli";
|
|
10
|
+
type GoogleApiType = "google-generative-ai" | "google-gemini-cli" | "google-vertex";
|
|
11
11
|
|
|
12
12
|
/**
|
|
13
13
|
* Convert internal messages to Gemini Content[] format.
|
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
import {
|
|
2
|
+
type GenerateContentConfig,
|
|
3
|
+
type GenerateContentParameters,
|
|
4
|
+
GoogleGenAI,
|
|
5
|
+
type ThinkingConfig,
|
|
6
|
+
ThinkingLevel,
|
|
7
|
+
} from "@google/genai";
|
|
8
|
+
import { calculateCost } from "../models";
|
|
9
|
+
import type {
|
|
10
|
+
Api,
|
|
11
|
+
AssistantMessage,
|
|
12
|
+
Context,
|
|
13
|
+
Model,
|
|
14
|
+
StreamFunction,
|
|
15
|
+
StreamOptions,
|
|
16
|
+
TextContent,
|
|
17
|
+
ThinkingContent,
|
|
18
|
+
ToolCall,
|
|
19
|
+
} from "../types";
|
|
20
|
+
import { AssistantMessageEventStream } from "../utils/event-stream";
|
|
21
|
+
import { sanitizeSurrogates } from "../utils/sanitize-unicode";
|
|
22
|
+
import type { GoogleThinkingLevel } from "./google-gemini-cli";
|
|
23
|
+
import { convertMessages, convertTools, mapStopReason, mapToolChoice } from "./google-shared";
|
|
24
|
+
|
|
25
|
+
export interface GoogleVertexOptions extends StreamOptions {
|
|
26
|
+
toolChoice?: "auto" | "none" | "any";
|
|
27
|
+
thinking?: {
|
|
28
|
+
enabled: boolean;
|
|
29
|
+
budgetTokens?: number; // -1 for dynamic, 0 to disable
|
|
30
|
+
level?: GoogleThinkingLevel;
|
|
31
|
+
};
|
|
32
|
+
project?: string;
|
|
33
|
+
location?: string;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const API_VERSION = "v1";
|
|
37
|
+
|
|
38
|
+
const THINKING_LEVEL_MAP: Record<GoogleThinkingLevel, ThinkingLevel> = {
|
|
39
|
+
THINKING_LEVEL_UNSPECIFIED: ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
|
|
40
|
+
MINIMAL: ThinkingLevel.MINIMAL,
|
|
41
|
+
LOW: ThinkingLevel.LOW,
|
|
42
|
+
MEDIUM: ThinkingLevel.MEDIUM,
|
|
43
|
+
HIGH: ThinkingLevel.HIGH,
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
// Counter for generating unique tool call IDs
|
|
47
|
+
let toolCallCounter = 0;
|
|
48
|
+
|
|
49
|
+
export const streamGoogleVertex: StreamFunction<"google-vertex"> = (
|
|
50
|
+
model: Model<"google-vertex">,
|
|
51
|
+
context: Context,
|
|
52
|
+
options?: GoogleVertexOptions,
|
|
53
|
+
): AssistantMessageEventStream => {
|
|
54
|
+
const stream = new AssistantMessageEventStream();
|
|
55
|
+
|
|
56
|
+
(async () => {
|
|
57
|
+
const output: AssistantMessage = {
|
|
58
|
+
role: "assistant",
|
|
59
|
+
content: [],
|
|
60
|
+
api: "google-vertex" as Api,
|
|
61
|
+
provider: model.provider,
|
|
62
|
+
model: model.id,
|
|
63
|
+
usage: {
|
|
64
|
+
input: 0,
|
|
65
|
+
output: 0,
|
|
66
|
+
cacheRead: 0,
|
|
67
|
+
cacheWrite: 0,
|
|
68
|
+
totalTokens: 0,
|
|
69
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
70
|
+
},
|
|
71
|
+
stopReason: "stop",
|
|
72
|
+
timestamp: Date.now(),
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
try {
|
|
76
|
+
const project = resolveProject(options);
|
|
77
|
+
const location = resolveLocation(options);
|
|
78
|
+
const client = createClient(model, project, location);
|
|
79
|
+
const params = buildParams(model, context, options);
|
|
80
|
+
const googleStream = await client.models.generateContentStream(params);
|
|
81
|
+
|
|
82
|
+
stream.push({ type: "start", partial: output });
|
|
83
|
+
let currentBlock: TextContent | ThinkingContent | null = null;
|
|
84
|
+
const blocks = output.content;
|
|
85
|
+
const blockIndex = () => blocks.length - 1;
|
|
86
|
+
for await (const chunk of googleStream) {
|
|
87
|
+
const candidate = chunk.candidates?.[0];
|
|
88
|
+
if (candidate?.content?.parts) {
|
|
89
|
+
for (const part of candidate.content.parts) {
|
|
90
|
+
if (part.text !== undefined) {
|
|
91
|
+
const isThinking = part.thought === true;
|
|
92
|
+
if (
|
|
93
|
+
!currentBlock ||
|
|
94
|
+
(isThinking && currentBlock.type !== "thinking") ||
|
|
95
|
+
(!isThinking && currentBlock.type !== "text")
|
|
96
|
+
) {
|
|
97
|
+
if (currentBlock) {
|
|
98
|
+
if (currentBlock.type === "text") {
|
|
99
|
+
stream.push({
|
|
100
|
+
type: "text_end",
|
|
101
|
+
contentIndex: blocks.length - 1,
|
|
102
|
+
content: currentBlock.text,
|
|
103
|
+
partial: output,
|
|
104
|
+
});
|
|
105
|
+
} else {
|
|
106
|
+
stream.push({
|
|
107
|
+
type: "thinking_end",
|
|
108
|
+
contentIndex: blockIndex(),
|
|
109
|
+
content: currentBlock.thinking,
|
|
110
|
+
partial: output,
|
|
111
|
+
});
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
if (isThinking) {
|
|
115
|
+
currentBlock = { type: "thinking", thinking: "", thinkingSignature: undefined };
|
|
116
|
+
output.content.push(currentBlock);
|
|
117
|
+
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
118
|
+
} else {
|
|
119
|
+
currentBlock = { type: "text", text: "" };
|
|
120
|
+
output.content.push(currentBlock);
|
|
121
|
+
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
if (currentBlock.type === "thinking") {
|
|
125
|
+
currentBlock.thinking += part.text;
|
|
126
|
+
currentBlock.thinkingSignature = part.thoughtSignature;
|
|
127
|
+
stream.push({
|
|
128
|
+
type: "thinking_delta",
|
|
129
|
+
contentIndex: blockIndex(),
|
|
130
|
+
delta: part.text,
|
|
131
|
+
partial: output,
|
|
132
|
+
});
|
|
133
|
+
} else {
|
|
134
|
+
currentBlock.text += part.text;
|
|
135
|
+
stream.push({
|
|
136
|
+
type: "text_delta",
|
|
137
|
+
contentIndex: blockIndex(),
|
|
138
|
+
delta: part.text,
|
|
139
|
+
partial: output,
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
if (part.functionCall) {
|
|
145
|
+
if (currentBlock) {
|
|
146
|
+
if (currentBlock.type === "text") {
|
|
147
|
+
stream.push({
|
|
148
|
+
type: "text_end",
|
|
149
|
+
contentIndex: blockIndex(),
|
|
150
|
+
content: currentBlock.text,
|
|
151
|
+
partial: output,
|
|
152
|
+
});
|
|
153
|
+
} else {
|
|
154
|
+
stream.push({
|
|
155
|
+
type: "thinking_end",
|
|
156
|
+
contentIndex: blockIndex(),
|
|
157
|
+
content: currentBlock.thinking,
|
|
158
|
+
partial: output,
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
currentBlock = null;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
const providedId = part.functionCall.id;
|
|
165
|
+
const needsNewId =
|
|
166
|
+
!providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId);
|
|
167
|
+
const toolCallId = needsNewId
|
|
168
|
+
? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}`
|
|
169
|
+
: providedId;
|
|
170
|
+
|
|
171
|
+
const toolCall: ToolCall = {
|
|
172
|
+
type: "toolCall",
|
|
173
|
+
id: toolCallId,
|
|
174
|
+
name: part.functionCall.name || "",
|
|
175
|
+
arguments: part.functionCall.args as Record<string, any>,
|
|
176
|
+
...(part.thoughtSignature && { thoughtSignature: part.thoughtSignature }),
|
|
177
|
+
};
|
|
178
|
+
|
|
179
|
+
output.content.push(toolCall);
|
|
180
|
+
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
181
|
+
stream.push({
|
|
182
|
+
type: "toolcall_delta",
|
|
183
|
+
contentIndex: blockIndex(),
|
|
184
|
+
delta: JSON.stringify(toolCall.arguments),
|
|
185
|
+
partial: output,
|
|
186
|
+
});
|
|
187
|
+
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if (candidate?.finishReason) {
|
|
193
|
+
output.stopReason = mapStopReason(candidate.finishReason);
|
|
194
|
+
if (output.content.some((b) => b.type === "toolCall")) {
|
|
195
|
+
output.stopReason = "toolUse";
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if (chunk.usageMetadata) {
|
|
200
|
+
output.usage = {
|
|
201
|
+
input: chunk.usageMetadata.promptTokenCount || 0,
|
|
202
|
+
output:
|
|
203
|
+
(chunk.usageMetadata.candidatesTokenCount || 0) + (chunk.usageMetadata.thoughtsTokenCount || 0),
|
|
204
|
+
cacheRead: chunk.usageMetadata.cachedContentTokenCount || 0,
|
|
205
|
+
cacheWrite: 0,
|
|
206
|
+
totalTokens: chunk.usageMetadata.totalTokenCount || 0,
|
|
207
|
+
cost: {
|
|
208
|
+
input: 0,
|
|
209
|
+
output: 0,
|
|
210
|
+
cacheRead: 0,
|
|
211
|
+
cacheWrite: 0,
|
|
212
|
+
total: 0,
|
|
213
|
+
},
|
|
214
|
+
};
|
|
215
|
+
calculateCost(model, output.usage);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
if (currentBlock) {
|
|
220
|
+
if (currentBlock.type === "text") {
|
|
221
|
+
stream.push({
|
|
222
|
+
type: "text_end",
|
|
223
|
+
contentIndex: blockIndex(),
|
|
224
|
+
content: currentBlock.text,
|
|
225
|
+
partial: output,
|
|
226
|
+
});
|
|
227
|
+
} else {
|
|
228
|
+
stream.push({
|
|
229
|
+
type: "thinking_end",
|
|
230
|
+
contentIndex: blockIndex(),
|
|
231
|
+
content: currentBlock.thinking,
|
|
232
|
+
partial: output,
|
|
233
|
+
});
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
if (options?.signal?.aborted) {
|
|
238
|
+
throw new Error("Request was aborted");
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
242
|
+
throw new Error("An unknown error occurred");
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
246
|
+
stream.end();
|
|
247
|
+
} catch (error) {
|
|
248
|
+
// Remove internal index property used during streaming
|
|
249
|
+
for (const block of output.content) {
|
|
250
|
+
if ("index" in block) {
|
|
251
|
+
delete (block as { index?: number }).index;
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
255
|
+
output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
|
|
256
|
+
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
257
|
+
stream.end();
|
|
258
|
+
}
|
|
259
|
+
})();
|
|
260
|
+
|
|
261
|
+
return stream;
|
|
262
|
+
};
|
|
263
|
+
|
|
264
|
+
function createClient(model: Model<"google-vertex">, project: string, location: string): GoogleGenAI {
|
|
265
|
+
const httpOptions: { headers?: Record<string, string> } = {};
|
|
266
|
+
|
|
267
|
+
if (model.headers) {
|
|
268
|
+
httpOptions.headers = { ...model.headers };
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
const hasHttpOptions = Object.values(httpOptions).some(Boolean);
|
|
272
|
+
|
|
273
|
+
return new GoogleGenAI({
|
|
274
|
+
vertexai: true,
|
|
275
|
+
project,
|
|
276
|
+
location,
|
|
277
|
+
apiVersion: API_VERSION,
|
|
278
|
+
httpOptions: hasHttpOptions ? httpOptions : undefined,
|
|
279
|
+
});
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
function resolveProject(options?: GoogleVertexOptions): string {
|
|
283
|
+
const project = options?.project || process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT;
|
|
284
|
+
if (!project) {
|
|
285
|
+
throw new Error(
|
|
286
|
+
"Vertex AI requires a project ID. Set GOOGLE_CLOUD_PROJECT/GCLOUD_PROJECT or pass project in options.",
|
|
287
|
+
);
|
|
288
|
+
}
|
|
289
|
+
return project;
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
function resolveLocation(options?: GoogleVertexOptions): string {
|
|
293
|
+
const location = options?.location || process.env.GOOGLE_CLOUD_LOCATION;
|
|
294
|
+
if (!location) {
|
|
295
|
+
throw new Error("Vertex AI requires a location. Set GOOGLE_CLOUD_LOCATION or pass location in options.");
|
|
296
|
+
}
|
|
297
|
+
return location;
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
function buildParams(
|
|
301
|
+
model: Model<"google-vertex">,
|
|
302
|
+
context: Context,
|
|
303
|
+
options: GoogleVertexOptions = {},
|
|
304
|
+
): GenerateContentParameters {
|
|
305
|
+
const contents = convertMessages(model, context);
|
|
306
|
+
|
|
307
|
+
const generationConfig: GenerateContentConfig = {};
|
|
308
|
+
if (options.temperature !== undefined) {
|
|
309
|
+
generationConfig.temperature = options.temperature;
|
|
310
|
+
}
|
|
311
|
+
if (options.maxTokens !== undefined) {
|
|
312
|
+
generationConfig.maxOutputTokens = options.maxTokens;
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
const config: GenerateContentConfig = {
|
|
316
|
+
...(Object.keys(generationConfig).length > 0 && generationConfig),
|
|
317
|
+
...(context.systemPrompt && { systemInstruction: sanitizeSurrogates(context.systemPrompt) }),
|
|
318
|
+
...(context.tools && context.tools.length > 0 && { tools: convertTools(context.tools) }),
|
|
319
|
+
};
|
|
320
|
+
|
|
321
|
+
if (context.tools && context.tools.length > 0 && options.toolChoice) {
|
|
322
|
+
config.toolConfig = {
|
|
323
|
+
functionCallingConfig: {
|
|
324
|
+
mode: mapToolChoice(options.toolChoice),
|
|
325
|
+
},
|
|
326
|
+
};
|
|
327
|
+
} else {
|
|
328
|
+
config.toolConfig = undefined;
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
if (options.thinking?.enabled && model.reasoning) {
|
|
332
|
+
const thinkingConfig: ThinkingConfig = { includeThoughts: true };
|
|
333
|
+
if (options.thinking.level !== undefined) {
|
|
334
|
+
thinkingConfig.thinkingLevel = THINKING_LEVEL_MAP[options.thinking.level];
|
|
335
|
+
} else if (options.thinking.budgetTokens !== undefined) {
|
|
336
|
+
thinkingConfig.thinkingBudget = options.thinking.budgetTokens;
|
|
337
|
+
}
|
|
338
|
+
config.thinkingConfig = thinkingConfig;
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
if (options.signal) {
|
|
342
|
+
if (options.signal.aborted) {
|
|
343
|
+
throw new Error("Request aborted");
|
|
344
|
+
}
|
|
345
|
+
config.abortSignal = options.signal;
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
const params: GenerateContentParameters = {
|
|
349
|
+
model: model.id,
|
|
350
|
+
contents,
|
|
351
|
+
config,
|
|
352
|
+
};
|
|
353
|
+
|
|
354
|
+
return params;
|
|
355
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Constants for OpenAI Codex (ChatGPT OAuth) backend
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export const CODEX_BASE_URL = "https://chatgpt.com/backend-api";
|
|
6
|
+
|
|
7
|
+
export const OPENAI_HEADERS = {
|
|
8
|
+
BETA: "OpenAI-Beta",
|
|
9
|
+
ACCOUNT_ID: "chatgpt-account-id",
|
|
10
|
+
ORIGINATOR: "originator",
|
|
11
|
+
SESSION_ID: "session_id",
|
|
12
|
+
CONVERSATION_ID: "conversation_id",
|
|
13
|
+
} as const;
|
|
14
|
+
|
|
15
|
+
export const OPENAI_HEADER_VALUES = {
|
|
16
|
+
BETA_RESPONSES: "responses=experimental",
|
|
17
|
+
ORIGINATOR_CODEX: "codex_cli_rs",
|
|
18
|
+
} as const;
|
|
19
|
+
|
|
20
|
+
export const URL_PATHS = {
|
|
21
|
+
RESPONSES: "/responses",
|
|
22
|
+
CODEX_RESPONSES: "/codex/responses",
|
|
23
|
+
} as const;
|
|
24
|
+
|
|
25
|
+
export const JWT_CLAIM_PATH = "https://api.openai.com/auth" as const;
|