@oh-my-pi/pi-ai 8.1.0 → 8.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -12
- package/package.json +38 -14
- package/src/cli.ts +6 -6
- package/src/providers/amazon-bedrock.ts +12 -13
- package/src/providers/anthropic.ts +25 -26
- package/src/providers/cursor.ts +57 -57
- package/src/providers/google-gemini-cli-usage.ts +2 -2
- package/src/providers/google-gemini-cli.ts +8 -10
- package/src/providers/google-shared.ts +12 -13
- package/src/providers/google-vertex.ts +7 -7
- package/src/providers/google.ts +8 -8
- package/src/providers/openai-codex/request-transformer.ts +6 -6
- package/src/providers/openai-codex-responses.ts +28 -28
- package/src/providers/openai-completions.ts +39 -39
- package/src/providers/openai-responses.ts +44 -33
- package/src/providers/transform-messages.ts +3 -3
- package/src/storage.ts +29 -19
- package/src/stream.ts +6 -6
- package/src/types.ts +1 -2
- package/src/usage/claude.ts +4 -4
- package/src/usage/github-copilot.ts +3 -4
- package/src/usage/google-antigravity.ts +3 -3
- package/src/usage/openai-codex.ts +4 -4
- package/src/usage/zai.ts +3 -3
- package/src/usage.ts +0 -1
- package/src/utils/event-stream.ts +4 -4
- package/src/utils/oauth/anthropic.ts +0 -1
- package/src/utils/oauth/callback-server.ts +2 -3
- package/src/utils/oauth/github-copilot.ts +2 -3
- package/src/utils/oauth/google-antigravity.ts +0 -1
- package/src/utils/oauth/google-gemini-cli.ts +2 -3
- package/src/utils/oauth/index.ts +11 -12
- package/src/utils/oauth/openai-codex.ts +0 -1
- package/src/utils/overflow.ts +2 -2
- package/src/utils/validation.ts +4 -5
|
@@ -1,6 +1,17 @@
|
|
|
1
|
-
import os from "node:os";
|
|
2
|
-
import {
|
|
3
|
-
import {
|
|
1
|
+
import * as os from "node:os";
|
|
2
|
+
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
3
|
+
import type {
|
|
4
|
+
ResponseFunctionToolCall,
|
|
5
|
+
ResponseInput,
|
|
6
|
+
ResponseInputContent,
|
|
7
|
+
ResponseInputImage,
|
|
8
|
+
ResponseInputText,
|
|
9
|
+
ResponseOutputMessage,
|
|
10
|
+
ResponseReasoningItem,
|
|
11
|
+
} from "openai/resources/responses/responses";
|
|
12
|
+
import packageJson from "../../package.json" with { type: "json" };
|
|
13
|
+
import { calculateCost } from "../models";
|
|
14
|
+
import { getEnvApiKey } from "../stream";
|
|
4
15
|
import type {
|
|
5
16
|
Api,
|
|
6
17
|
AssistantMessage,
|
|
@@ -13,22 +24,11 @@ import type {
|
|
|
13
24
|
ThinkingContent,
|
|
14
25
|
Tool,
|
|
15
26
|
ToolCall,
|
|
16
|
-
} from "
|
|
17
|
-
import { AssistantMessageEventStream } from "
|
|
18
|
-
import { parseStreamingJson } from "
|
|
19
|
-
import { formatErrorMessageWithRetryAfter } from "
|
|
20
|
-
import { sanitizeSurrogates } from "
|
|
21
|
-
import { abortableSleep } from "@oh-my-pi/pi-utils";
|
|
22
|
-
import type {
|
|
23
|
-
ResponseFunctionToolCall,
|
|
24
|
-
ResponseInput,
|
|
25
|
-
ResponseInputContent,
|
|
26
|
-
ResponseInputImage,
|
|
27
|
-
ResponseInputText,
|
|
28
|
-
ResponseOutputMessage,
|
|
29
|
-
ResponseReasoningItem,
|
|
30
|
-
} from "openai/resources/responses/responses";
|
|
31
|
-
import packageJson from "../../package.json" with { type: "json" };
|
|
27
|
+
} from "../types";
|
|
28
|
+
import { AssistantMessageEventStream } from "../utils/event-stream";
|
|
29
|
+
import { parseStreamingJson } from "../utils/json-parse";
|
|
30
|
+
import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
|
|
31
|
+
import { sanitizeSurrogates } from "../utils/sanitize-unicode";
|
|
32
32
|
import {
|
|
33
33
|
CODEX_BASE_URL,
|
|
34
34
|
JWT_CLAIM_PATH,
|
|
@@ -341,7 +341,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
|
|
|
341
341
|
} else if (eventType === "response.output_item.done") {
|
|
342
342
|
const item = rawEvent.item as ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall;
|
|
343
343
|
if (item.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
344
|
-
currentBlock.thinking = item.summary?.map(
|
|
344
|
+
currentBlock.thinking = item.summary?.map(s => s.text).join("\n\n") || "";
|
|
345
345
|
currentBlock.thinkingSignature = JSON.stringify(item);
|
|
346
346
|
stream.push({
|
|
347
347
|
type: "thinking_end",
|
|
@@ -351,7 +351,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
|
|
|
351
351
|
});
|
|
352
352
|
currentBlock = null;
|
|
353
353
|
} else if (item.type === "message" && currentBlock?.type === "text") {
|
|
354
|
-
currentBlock.text = item.content.map(
|
|
354
|
+
currentBlock.text = item.content.map(c => (c.type === "output_text" ? c.text : c.refusal)).join("");
|
|
355
355
|
currentBlock.textSignature = item.id;
|
|
356
356
|
stream.push({
|
|
357
357
|
type: "text_end",
|
|
@@ -396,7 +396,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
|
|
|
396
396
|
}
|
|
397
397
|
calculateCost(model, output.usage);
|
|
398
398
|
output.stopReason = mapStopReason(response?.status);
|
|
399
|
-
if (output.content.some(
|
|
399
|
+
if (output.content.some(b => b.type === "toolCall") && output.stopReason === "stop") {
|
|
400
400
|
output.stopReason = "toolUse";
|
|
401
401
|
}
|
|
402
402
|
} else if (eventType === "error") {
|
|
@@ -593,9 +593,9 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
|
|
|
593
593
|
});
|
|
594
594
|
// Filter out images if model doesn't support them, and empty text blocks
|
|
595
595
|
let filteredContent = !model.input.includes("image")
|
|
596
|
-
? content.filter(
|
|
596
|
+
? content.filter(c => c.type !== "input_image")
|
|
597
597
|
: content;
|
|
598
|
-
filteredContent = filteredContent.filter(
|
|
598
|
+
filteredContent = filteredContent.filter(c => {
|
|
599
599
|
if (c.type === "input_text") {
|
|
600
600
|
return c.text.trim().length > 0;
|
|
601
601
|
}
|
|
@@ -647,10 +647,10 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
|
|
|
647
647
|
messages.push(...output);
|
|
648
648
|
} else if (msg.role === "toolResult") {
|
|
649
649
|
const textResult = msg.content
|
|
650
|
-
.filter(
|
|
651
|
-
.map(
|
|
650
|
+
.filter(c => c.type === "text")
|
|
651
|
+
.map(c => (c as { text: string }).text)
|
|
652
652
|
.join("\n");
|
|
653
|
-
const hasImages = msg.content.some(
|
|
653
|
+
const hasImages = msg.content.some(c => c.type === "image");
|
|
654
654
|
const normalized = normalizeResponsesToolCallId(msg.toolCallId);
|
|
655
655
|
|
|
656
656
|
const hasText = textResult.length > 0;
|
|
@@ -692,7 +692,7 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
|
|
|
692
692
|
function convertTools(
|
|
693
693
|
tools: Tool[],
|
|
694
694
|
): Array<{ type: "function"; name: string; description: string; parameters: Record<string, unknown>; strict: null }> {
|
|
695
|
-
return tools.map(
|
|
695
|
+
return tools.map(tool => ({
|
|
696
696
|
type: "function",
|
|
697
697
|
name: tool.name,
|
|
698
698
|
description: tool.description,
|
|
@@ -1,5 +1,15 @@
|
|
|
1
|
-
import
|
|
2
|
-
import {
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import type {
|
|
3
|
+
ChatCompletionAssistantMessageParam,
|
|
4
|
+
ChatCompletionChunk,
|
|
5
|
+
ChatCompletionContentPart,
|
|
6
|
+
ChatCompletionContentPartImage,
|
|
7
|
+
ChatCompletionContentPartText,
|
|
8
|
+
ChatCompletionMessageParam,
|
|
9
|
+
ChatCompletionToolMessageParam,
|
|
10
|
+
} from "openai/resources/chat/completions";
|
|
11
|
+
import { calculateCost } from "../models";
|
|
12
|
+
import { getEnvApiKey } from "../stream";
|
|
3
13
|
import type {
|
|
4
14
|
AssistantMessage,
|
|
5
15
|
Context,
|
|
@@ -14,21 +24,11 @@ import type {
|
|
|
14
24
|
Tool,
|
|
15
25
|
ToolCall,
|
|
16
26
|
ToolResultMessage,
|
|
17
|
-
} from "
|
|
18
|
-
import { AssistantMessageEventStream } from "
|
|
19
|
-
import { parseStreamingJson } from "
|
|
20
|
-
import { formatErrorMessageWithRetryAfter } from "
|
|
21
|
-
import { sanitizeSurrogates } from "
|
|
22
|
-
import OpenAI from "openai";
|
|
23
|
-
import type {
|
|
24
|
-
ChatCompletionAssistantMessageParam,
|
|
25
|
-
ChatCompletionChunk,
|
|
26
|
-
ChatCompletionContentPart,
|
|
27
|
-
ChatCompletionContentPartImage,
|
|
28
|
-
ChatCompletionContentPartText,
|
|
29
|
-
ChatCompletionMessageParam,
|
|
30
|
-
ChatCompletionToolMessageParam,
|
|
31
|
-
} from "openai/resources/chat/completions";
|
|
27
|
+
} from "../types";
|
|
28
|
+
import { AssistantMessageEventStream } from "../utils/event-stream";
|
|
29
|
+
import { parseStreamingJson } from "../utils/json-parse";
|
|
30
|
+
import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
|
|
31
|
+
import { sanitizeSurrogates } from "../utils/sanitize-unicode";
|
|
32
32
|
import { transformMessages } from "./transform-messages";
|
|
33
33
|
|
|
34
34
|
/**
|
|
@@ -61,7 +61,7 @@ function hasToolHistory(messages: Message[]): boolean {
|
|
|
61
61
|
return true;
|
|
62
62
|
}
|
|
63
63
|
if (msg.role === "assistant") {
|
|
64
|
-
if (msg.content.some(
|
|
64
|
+
if (msg.content.some(block => block.type === "toolCall")) {
|
|
65
65
|
return true;
|
|
66
66
|
}
|
|
67
67
|
}
|
|
@@ -287,7 +287,7 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
|
|
|
287
287
|
for (const detail of reasoningDetails) {
|
|
288
288
|
if (detail.type === "reasoning.encrypted" && detail.id && detail.data) {
|
|
289
289
|
const matchingToolCall = output.content.find(
|
|
290
|
-
|
|
290
|
+
b => b.type === "toolCall" && b.id === detail.id,
|
|
291
291
|
) as ToolCall | undefined;
|
|
292
292
|
if (matchingToolCall) {
|
|
293
293
|
matchingToolCall.thoughtSignature = JSON.stringify(detail);
|
|
@@ -356,12 +356,12 @@ function createClient(
|
|
|
356
356
|
headers["Openai-Intent"] = "conversation-edits";
|
|
357
357
|
|
|
358
358
|
// Copilot requires this header when sending images
|
|
359
|
-
const hasImages = messages.some(
|
|
359
|
+
const hasImages = messages.some(msg => {
|
|
360
360
|
if (msg.role === "user" && Array.isArray(msg.content)) {
|
|
361
|
-
return msg.content.some(
|
|
361
|
+
return msg.content.some(c => c.type === "image");
|
|
362
362
|
}
|
|
363
363
|
if (msg.role === "toolResult" && Array.isArray(msg.content)) {
|
|
364
|
-
return msg.content.some(
|
|
364
|
+
return msg.content.some(c => c.type === "image");
|
|
365
365
|
}
|
|
366
366
|
return false;
|
|
367
367
|
});
|
|
@@ -516,7 +516,7 @@ export function convertMessages(
|
|
|
516
516
|
}
|
|
517
517
|
});
|
|
518
518
|
const filteredContent = !model.input.includes("image")
|
|
519
|
-
? content.filter(
|
|
519
|
+
? content.filter(c => c.type !== "image_url")
|
|
520
520
|
: content;
|
|
521
521
|
if (filteredContent.length === 0) continue;
|
|
522
522
|
params.push({
|
|
@@ -531,29 +531,29 @@ export function convertMessages(
|
|
|
531
531
|
content: compat.requiresAssistantAfterToolResult ? "" : null,
|
|
532
532
|
};
|
|
533
533
|
|
|
534
|
-
const textBlocks = msg.content.filter(
|
|
534
|
+
const textBlocks = msg.content.filter(b => b.type === "text") as TextContent[];
|
|
535
535
|
// Filter out empty text blocks to avoid API validation errors
|
|
536
|
-
const nonEmptyTextBlocks = textBlocks.filter(
|
|
536
|
+
const nonEmptyTextBlocks = textBlocks.filter(b => b.text && b.text.trim().length > 0);
|
|
537
537
|
if (nonEmptyTextBlocks.length > 0) {
|
|
538
538
|
// GitHub Copilot requires assistant content as a string, not an array.
|
|
539
539
|
// Sending as array causes Claude models to re-answer all previous prompts.
|
|
540
540
|
if (model.provider === "github-copilot") {
|
|
541
|
-
assistantMsg.content = nonEmptyTextBlocks.map(
|
|
541
|
+
assistantMsg.content = nonEmptyTextBlocks.map(b => sanitizeSurrogates(b.text)).join("");
|
|
542
542
|
} else {
|
|
543
|
-
assistantMsg.content = nonEmptyTextBlocks.map(
|
|
543
|
+
assistantMsg.content = nonEmptyTextBlocks.map(b => {
|
|
544
544
|
return { type: "text", text: sanitizeSurrogates(b.text) };
|
|
545
545
|
});
|
|
546
546
|
}
|
|
547
547
|
}
|
|
548
548
|
|
|
549
549
|
// Handle thinking blocks
|
|
550
|
-
const thinkingBlocks = msg.content.filter(
|
|
550
|
+
const thinkingBlocks = msg.content.filter(b => b.type === "thinking") as ThinkingContent[];
|
|
551
551
|
// Filter out empty thinking blocks to avoid API validation errors
|
|
552
|
-
const nonEmptyThinkingBlocks = thinkingBlocks.filter(
|
|
552
|
+
const nonEmptyThinkingBlocks = thinkingBlocks.filter(b => b.thinking && b.thinking.trim().length > 0);
|
|
553
553
|
if (nonEmptyThinkingBlocks.length > 0) {
|
|
554
554
|
if (compat.requiresThinkingAsText) {
|
|
555
555
|
// Convert thinking blocks to plain text (no tags to avoid model mimicking them)
|
|
556
|
-
const thinkingText = nonEmptyThinkingBlocks.map(
|
|
556
|
+
const thinkingText = nonEmptyThinkingBlocks.map(b => b.thinking).join("\n\n");
|
|
557
557
|
const textContent = assistantMsg.content as Array<{ type: "text"; text: string }> | null;
|
|
558
558
|
if (textContent) {
|
|
559
559
|
textContent.unshift({ type: "text", text: thinkingText });
|
|
@@ -564,14 +564,14 @@ export function convertMessages(
|
|
|
564
564
|
// Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)
|
|
565
565
|
const signature = nonEmptyThinkingBlocks[0].thinkingSignature;
|
|
566
566
|
if (signature && signature.length > 0) {
|
|
567
|
-
(assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map(
|
|
567
|
+
(assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map(b => b.thinking).join("\n");
|
|
568
568
|
}
|
|
569
569
|
}
|
|
570
570
|
}
|
|
571
571
|
|
|
572
|
-
const toolCalls = msg.content.filter(
|
|
572
|
+
const toolCalls = msg.content.filter(b => b.type === "toolCall") as ToolCall[];
|
|
573
573
|
if (toolCalls.length > 0) {
|
|
574
|
-
assistantMsg.tool_calls = toolCalls.map(
|
|
574
|
+
assistantMsg.tool_calls = toolCalls.map(tc => ({
|
|
575
575
|
id: normalizeMistralToolId(tc.id, compat.requiresMistralToolIds),
|
|
576
576
|
type: "function" as const,
|
|
577
577
|
function: {
|
|
@@ -580,8 +580,8 @@ export function convertMessages(
|
|
|
580
580
|
},
|
|
581
581
|
}));
|
|
582
582
|
const reasoningDetails = toolCalls
|
|
583
|
-
.filter(
|
|
584
|
-
.map(
|
|
583
|
+
.filter(tc => tc.thoughtSignature)
|
|
584
|
+
.map(tc => {
|
|
585
585
|
try {
|
|
586
586
|
return JSON.parse(tc.thoughtSignature!);
|
|
587
587
|
} catch {
|
|
@@ -616,10 +616,10 @@ export function convertMessages(
|
|
|
616
616
|
|
|
617
617
|
// Extract text and image content
|
|
618
618
|
const textResult = toolMsg.content
|
|
619
|
-
.filter(
|
|
620
|
-
.map(
|
|
619
|
+
.filter(c => c.type === "text")
|
|
620
|
+
.map(c => (c as any).text)
|
|
621
621
|
.join("\n");
|
|
622
|
-
const hasImages = toolMsg.content.some(
|
|
622
|
+
const hasImages = toolMsg.content.some(c => c.type === "image");
|
|
623
623
|
|
|
624
624
|
// Always send tool result with text (or placeholder if only images)
|
|
625
625
|
const hasText = textResult.length > 0;
|
|
@@ -683,7 +683,7 @@ export function convertMessages(
|
|
|
683
683
|
}
|
|
684
684
|
|
|
685
685
|
function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
|
686
|
-
return tools.map(
|
|
686
|
+
return tools.map(tool => ({
|
|
687
687
|
type: "function",
|
|
688
688
|
function: {
|
|
689
689
|
name: tool.name,
|
|
@@ -1,22 +1,3 @@
|
|
|
1
|
-
import { calculateCost } from "@oh-my-pi/pi-ai/models";
|
|
2
|
-
import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
|
|
3
|
-
import type {
|
|
4
|
-
Api,
|
|
5
|
-
AssistantMessage,
|
|
6
|
-
Context,
|
|
7
|
-
Model,
|
|
8
|
-
StopReason,
|
|
9
|
-
StreamFunction,
|
|
10
|
-
StreamOptions,
|
|
11
|
-
TextContent,
|
|
12
|
-
ThinkingContent,
|
|
13
|
-
Tool,
|
|
14
|
-
ToolCall,
|
|
15
|
-
} from "@oh-my-pi/pi-ai/types";
|
|
16
|
-
import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
|
|
17
|
-
import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
|
|
18
|
-
import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
|
|
19
|
-
import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
|
|
20
1
|
import OpenAI from "openai";
|
|
21
2
|
import type {
|
|
22
3
|
Tool as OpenAITool,
|
|
@@ -29,6 +10,25 @@ import type {
|
|
|
29
10
|
ResponseOutputMessage,
|
|
30
11
|
ResponseReasoningItem,
|
|
31
12
|
} from "openai/resources/responses/responses";
|
|
13
|
+
import { calculateCost } from "../models";
|
|
14
|
+
import { getEnvApiKey } from "../stream";
|
|
15
|
+
import type {
|
|
16
|
+
Api,
|
|
17
|
+
AssistantMessage,
|
|
18
|
+
Context,
|
|
19
|
+
Model,
|
|
20
|
+
StopReason,
|
|
21
|
+
StreamFunction,
|
|
22
|
+
StreamOptions,
|
|
23
|
+
TextContent,
|
|
24
|
+
ThinkingContent,
|
|
25
|
+
Tool,
|
|
26
|
+
ToolCall,
|
|
27
|
+
} from "../types";
|
|
28
|
+
import { AssistantMessageEventStream } from "../utils/event-stream";
|
|
29
|
+
import { parseStreamingJson } from "../utils/json-parse";
|
|
30
|
+
import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
|
|
31
|
+
import { sanitizeSurrogates } from "../utils/sanitize-unicode";
|
|
32
32
|
import { transformMessages } from "./transform-messages";
|
|
33
33
|
|
|
34
34
|
/** Fast deterministic hash to shorten long strings */
|
|
@@ -240,12 +240,19 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
|
|
|
240
240
|
});
|
|
241
241
|
}
|
|
242
242
|
}
|
|
243
|
+
// Handle function call arguments done (some providers send this instead of deltas)
|
|
244
|
+
else if (event.type === "response.function_call_arguments.done") {
|
|
245
|
+
if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") {
|
|
246
|
+
currentBlock.partialJson = event.arguments;
|
|
247
|
+
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
|
|
248
|
+
}
|
|
249
|
+
}
|
|
243
250
|
// Handle output item completion
|
|
244
251
|
else if (event.type === "response.output_item.done") {
|
|
245
252
|
const item = event.item;
|
|
246
253
|
|
|
247
254
|
if (item.type === "reasoning" && currentBlock && currentBlock.type === "thinking") {
|
|
248
|
-
currentBlock.thinking = item.summary?.map(
|
|
255
|
+
currentBlock.thinking = item.summary?.map(s => s.text).join("\n\n") || "";
|
|
249
256
|
currentBlock.thinkingSignature = JSON.stringify(item);
|
|
250
257
|
stream.push({
|
|
251
258
|
type: "thinking_end",
|
|
@@ -255,7 +262,7 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
|
|
|
255
262
|
});
|
|
256
263
|
currentBlock = null;
|
|
257
264
|
} else if (item.type === "message" && currentBlock && currentBlock.type === "text") {
|
|
258
|
-
currentBlock.text = item.content.map(
|
|
265
|
+
currentBlock.text = item.content.map(c => (c.type === "output_text" ? c.text : c.refusal)).join("");
|
|
259
266
|
currentBlock.textSignature = item.id;
|
|
260
267
|
stream.push({
|
|
261
268
|
type: "text_end",
|
|
@@ -265,13 +272,17 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
|
|
|
265
272
|
});
|
|
266
273
|
currentBlock = null;
|
|
267
274
|
} else if (item.type === "function_call") {
|
|
275
|
+
const args =
|
|
276
|
+
currentBlock?.type === "toolCall" && currentBlock.partialJson
|
|
277
|
+
? JSON.parse(currentBlock.partialJson)
|
|
278
|
+
: JSON.parse(item.arguments);
|
|
268
279
|
const toolCall: ToolCall = {
|
|
269
280
|
type: "toolCall",
|
|
270
281
|
id: `${item.call_id}|${item.id}`,
|
|
271
282
|
name: item.name,
|
|
272
|
-
arguments:
|
|
283
|
+
arguments: args,
|
|
273
284
|
};
|
|
274
|
-
|
|
285
|
+
currentBlock = null;
|
|
275
286
|
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
276
287
|
}
|
|
277
288
|
}
|
|
@@ -293,7 +304,7 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
|
|
|
293
304
|
calculateCost(model, output.usage);
|
|
294
305
|
// Map status to stop reason
|
|
295
306
|
output.stopReason = mapStopReason(response?.status);
|
|
296
|
-
if (output.content.some(
|
|
307
|
+
if (output.content.some(b => b.type === "toolCall") && output.stopReason === "stop") {
|
|
297
308
|
output.stopReason = "toolUse";
|
|
298
309
|
}
|
|
299
310
|
}
|
|
@@ -358,12 +369,12 @@ function createClient(
|
|
|
358
369
|
headers["Openai-Intent"] = "conversation-edits";
|
|
359
370
|
|
|
360
371
|
// Copilot requires this header when sending images
|
|
361
|
-
const hasImages = messages.some(
|
|
372
|
+
const hasImages = messages.some(msg => {
|
|
362
373
|
if (msg.role === "user" && Array.isArray(msg.content)) {
|
|
363
|
-
return msg.content.some(
|
|
374
|
+
return msg.content.some(c => c.type === "image");
|
|
364
375
|
}
|
|
365
376
|
if (msg.role === "toolResult" && Array.isArray(msg.content)) {
|
|
366
|
-
return msg.content.some(
|
|
377
|
+
return msg.content.some(c => c.type === "image");
|
|
367
378
|
}
|
|
368
379
|
return false;
|
|
369
380
|
});
|
|
@@ -491,9 +502,9 @@ function convertMessages(
|
|
|
491
502
|
});
|
|
492
503
|
// Filter out images if model doesn't support them, and empty text blocks
|
|
493
504
|
let filteredContent = !model.input.includes("image")
|
|
494
|
-
? content.filter(
|
|
505
|
+
? content.filter(c => c.type !== "input_image")
|
|
495
506
|
: content;
|
|
496
|
-
filteredContent = filteredContent.filter(
|
|
507
|
+
filteredContent = filteredContent.filter(c => {
|
|
497
508
|
if (c.type === "input_text") {
|
|
498
509
|
return c.text.trim().length > 0;
|
|
499
510
|
}
|
|
@@ -567,10 +578,10 @@ function convertMessages(
|
|
|
567
578
|
} else if (msg.role === "toolResult") {
|
|
568
579
|
// Extract text and image content
|
|
569
580
|
const textResult = msg.content
|
|
570
|
-
.filter(
|
|
571
|
-
.map(
|
|
581
|
+
.filter(c => c.type === "text")
|
|
582
|
+
.map(c => (c as any).text)
|
|
572
583
|
.join("\n");
|
|
573
|
-
const hasImages = msg.content.some(
|
|
584
|
+
const hasImages = msg.content.some(c => c.type === "image");
|
|
574
585
|
const normalized = normalizeResponsesToolCallId(msg.toolCallId);
|
|
575
586
|
if (strictResponsesPairing && !knownCallIds.has(normalized.callId)) {
|
|
576
587
|
continue;
|
|
@@ -618,7 +629,7 @@ function convertMessages(
|
|
|
618
629
|
}
|
|
619
630
|
|
|
620
631
|
function convertTools(tools: Tool[]): OpenAITool[] {
|
|
621
|
-
return tools.map(
|
|
632
|
+
return tools.map(tool => ({
|
|
622
633
|
type: "function",
|
|
623
634
|
name: tool.name,
|
|
624
635
|
description: tool.description,
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "
|
|
1
|
+
import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "../types";
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Normalize tool call ID for cross-provider compatibility.
|
|
@@ -98,7 +98,7 @@ export function transformMessages<TApi extends Api>(messages: Message[], model:
|
|
|
98
98
|
const needsToolCallIdNormalization = targetRequiresStrictIds && (crossProviderSwitch || copilotCrossApiSwitch);
|
|
99
99
|
|
|
100
100
|
// Transform message from different provider/model
|
|
101
|
-
const transformedContent = assistantMsg.content.flatMap(
|
|
101
|
+
const transformedContent = assistantMsg.content.flatMap(block => {
|
|
102
102
|
if (block.type === "thinking") {
|
|
103
103
|
// Skip empty thinking blocks, convert others to plain text
|
|
104
104
|
if (!block.thinking || block.thinking.trim() === "") return [];
|
|
@@ -173,7 +173,7 @@ export function transformMessages<TApi extends Api>(messages: Message[], model:
|
|
|
173
173
|
|
|
174
174
|
const assistantMsg = msg as AssistantMessage;
|
|
175
175
|
const isErroredAssistant = assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted";
|
|
176
|
-
const toolCalls = assistantMsg.content.filter(
|
|
176
|
+
const toolCalls = assistantMsg.content.filter(b => b.type === "toolCall") as ToolCall[];
|
|
177
177
|
|
|
178
178
|
result.push(msg);
|
|
179
179
|
|
package/src/storage.ts
CHANGED
|
@@ -4,9 +4,9 @@
|
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
import { Database } from "bun:sqlite";
|
|
7
|
-
import
|
|
8
|
-
import
|
|
9
|
-
import
|
|
7
|
+
import * as fs from "node:fs/promises";
|
|
8
|
+
import * as os from "node:os";
|
|
9
|
+
import * as path from "node:path";
|
|
10
10
|
import type { OAuthCredentials } from "./utils/oauth/types";
|
|
11
11
|
|
|
12
12
|
type AuthCredential = { type: "api_key"; key: string } | ({ type: "oauth" } & OAuthCredentials);
|
|
@@ -24,7 +24,7 @@ type AuthRow = {
|
|
|
24
24
|
* Get the agent config directory (e.g., ~/.omp/agent/)
|
|
25
25
|
*/
|
|
26
26
|
function getAgentDir(): string {
|
|
27
|
-
const configDir = process.env.OMP_CODING_AGENT_DIR || join(homedir(), ".omp", "agent");
|
|
27
|
+
const configDir = process.env.OMP_CODING_AGENT_DIR || path.join(os.homedir(), ".omp", "agent");
|
|
28
28
|
return configDir;
|
|
29
29
|
}
|
|
30
30
|
|
|
@@ -32,7 +32,7 @@ function getAgentDir(): string {
|
|
|
32
32
|
* Get path to agent.db
|
|
33
33
|
*/
|
|
34
34
|
function getAgentDbPath(): string {
|
|
35
|
-
return join(getAgentDir(), "agent.db");
|
|
35
|
+
return path.join(getAgentDir(), "agent.db");
|
|
36
36
|
}
|
|
37
37
|
|
|
38
38
|
function serializeCredential(credential: AuthCredential): { credentialType: string; data: string } | null {
|
|
@@ -79,6 +79,8 @@ function deserializeCredential(row: AuthRow): AuthCredential | null {
|
|
|
79
79
|
|
|
80
80
|
/**
|
|
81
81
|
* Simple storage class for CLI auth credentials.
|
|
82
|
+
*
|
|
83
|
+
* Use `CliAuthStorage.create()` to instantiate (async initialization).
|
|
82
84
|
*/
|
|
83
85
|
export class CliAuthStorage {
|
|
84
86
|
private db: Database;
|
|
@@ -87,20 +89,8 @@ export class CliAuthStorage {
|
|
|
87
89
|
private listAllStmt: ReturnType<Database["prepare"]>;
|
|
88
90
|
private deleteByProviderStmt: ReturnType<Database["prepare"]>;
|
|
89
91
|
|
|
90
|
-
constructor(
|
|
91
|
-
|
|
92
|
-
const dir = dirname(dbPath);
|
|
93
|
-
if (!existsSync(dir)) {
|
|
94
|
-
mkdirSync(dir, { recursive: true, mode: 0o700 });
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
this.db = new Database(dbPath);
|
|
98
|
-
// Harden database file permissions to prevent credential leakage
|
|
99
|
-
try {
|
|
100
|
-
chmodSync(dbPath, 0o600);
|
|
101
|
-
} catch {
|
|
102
|
-
// Ignore chmod failures (e.g., Windows)
|
|
103
|
-
}
|
|
92
|
+
private constructor(db: Database) {
|
|
93
|
+
this.db = db;
|
|
104
94
|
this.initializeSchema();
|
|
105
95
|
|
|
106
96
|
this.insertStmt = this.db.prepare(
|
|
@@ -111,6 +101,26 @@ export class CliAuthStorage {
|
|
|
111
101
|
this.deleteByProviderStmt = this.db.prepare("DELETE FROM auth_credentials WHERE provider = ?");
|
|
112
102
|
}
|
|
113
103
|
|
|
104
|
+
static async create(dbPath: string = getAgentDbPath()): Promise<CliAuthStorage> {
|
|
105
|
+
const dir = path.dirname(dbPath);
|
|
106
|
+
const dirExists = await fs
|
|
107
|
+
.stat(dir)
|
|
108
|
+
.then(s => s.isDirectory())
|
|
109
|
+
.catch(() => false);
|
|
110
|
+
if (!dirExists) {
|
|
111
|
+
await fs.mkdir(dir, { recursive: true, mode: 0o700 });
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
const db = new Database(dbPath);
|
|
115
|
+
try {
|
|
116
|
+
await fs.chmod(dbPath, 0o600);
|
|
117
|
+
} catch {
|
|
118
|
+
// Ignore chmod failures (e.g., Windows)
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
return new CliAuthStorage(db);
|
|
122
|
+
}
|
|
123
|
+
|
|
114
124
|
private initializeSchema(): void {
|
|
115
125
|
this.db.exec(`
|
|
116
126
|
PRAGMA journal_mode=WAL;
|
package/src/stream.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import
|
|
2
|
-
import
|
|
3
|
-
import
|
|
1
|
+
import * as fs from "node:fs";
|
|
2
|
+
import * as os from "node:os";
|
|
3
|
+
import * as path from "node:path";
|
|
4
4
|
import { supportsXhigh } from "./models";
|
|
5
5
|
import { type BedrockOptions, streamBedrock } from "./providers/amazon-bedrock";
|
|
6
6
|
import { type AnthropicOptions, streamAnthropic } from "./providers/anthropic";
|
|
@@ -34,10 +34,10 @@ function hasVertexAdcCredentials(): boolean {
|
|
|
34
34
|
if (cachedVertexAdcCredentialsExists === null) {
|
|
35
35
|
const gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS;
|
|
36
36
|
if (gacPath) {
|
|
37
|
-
cachedVertexAdcCredentialsExists = existsSync(gacPath);
|
|
37
|
+
cachedVertexAdcCredentialsExists = fs.existsSync(gacPath);
|
|
38
38
|
} else {
|
|
39
|
-
cachedVertexAdcCredentialsExists = existsSync(
|
|
40
|
-
join(homedir(), ".config", "gcloud", "application_default_credentials.json"),
|
|
39
|
+
cachedVertexAdcCredentialsExists = fs.existsSync(
|
|
40
|
+
path.join(os.homedir(), ".config", "gcloud", "application_default_credentials.json"),
|
|
41
41
|
);
|
|
42
42
|
}
|
|
43
43
|
}
|
package/src/types.ts
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import type { TSchema } from "@sinclair/typebox";
|
|
1
2
|
import type { BedrockOptions } from "./providers/amazon-bedrock";
|
|
2
3
|
import type { AnthropicOptions } from "./providers/anthropic";
|
|
3
4
|
import type { CursorOptions } from "./providers/cursor";
|
|
@@ -237,8 +238,6 @@ export interface CursorExecHandlers {
|
|
|
237
238
|
onToolResult?: CursorToolResultHandler;
|
|
238
239
|
}
|
|
239
240
|
|
|
240
|
-
import type { TSchema } from "@sinclair/typebox";
|
|
241
|
-
|
|
242
241
|
export interface Tool<TParameters extends TSchema = TSchema> {
|
|
243
242
|
name: string;
|
|
244
243
|
description: string;
|
package/src/usage/claude.ts
CHANGED
|
@@ -7,7 +7,7 @@ import type {
|
|
|
7
7
|
UsageReport,
|
|
8
8
|
UsageStatus,
|
|
9
9
|
UsageWindow,
|
|
10
|
-
} from "
|
|
10
|
+
} from "../usage";
|
|
11
11
|
|
|
12
12
|
const DEFAULT_ENDPOINT = "https://api.anthropic.com/api/oauth";
|
|
13
13
|
const DEFAULT_CACHE_TTL_MS = 60_000;
|
|
@@ -241,10 +241,10 @@ function buildCacheKey(params: UsageFetchParams): string {
|
|
|
241
241
|
|
|
242
242
|
function resolveCacheExpiry(now: number, limits: UsageLimit[]): number {
|
|
243
243
|
const earliestReset = limits
|
|
244
|
-
.map(
|
|
244
|
+
.map(limit => limit.window?.resetsAt)
|
|
245
245
|
.filter((value): value is number => typeof value === "number" && Number.isFinite(value))
|
|
246
246
|
.reduce((min, value) => (min === undefined ? value : Math.min(min, value)), undefined as number | undefined);
|
|
247
|
-
const exhausted = limits.some(
|
|
247
|
+
const exhausted = limits.some(limit => limit.status === "exhausted");
|
|
248
248
|
if (earliestReset === undefined) return now + DEFAULT_CACHE_TTL_MS;
|
|
249
249
|
if (exhausted) return earliestReset;
|
|
250
250
|
return Math.min(now + DEFAULT_CACHE_TTL_MS, earliestReset);
|
|
@@ -351,5 +351,5 @@ async function fetchClaudeUsage(params: UsageFetchParams, ctx: UsageFetchContext
|
|
|
351
351
|
export const claudeUsageProvider: UsageProvider = {
|
|
352
352
|
id: "anthropic",
|
|
353
353
|
fetchUsage: fetchClaudeUsage,
|
|
354
|
-
supports:
|
|
354
|
+
supports: params => params.provider === "anthropic" && params.credential.type === "oauth",
|
|
355
355
|
};
|
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
*
|
|
4
4
|
* Normalizes Copilot quota usage into the shared UsageReport schema.
|
|
5
5
|
*/
|
|
6
|
-
|
|
7
6
|
import type {
|
|
8
7
|
UsageAmount,
|
|
9
8
|
UsageCacheEntry,
|
|
@@ -14,7 +13,7 @@ import type {
|
|
|
14
13
|
UsageReport,
|
|
15
14
|
UsageStatus,
|
|
16
15
|
UsageWindow,
|
|
17
|
-
} from "
|
|
16
|
+
} from "../usage";
|
|
18
17
|
|
|
19
18
|
const COPILOT_HEADERS = {
|
|
20
19
|
"User-Agent": "GitHubCopilotChat/0.35.0",
|
|
@@ -316,7 +315,7 @@ function normalizeBillingUsage(data: BillingUsageResponse): UsageLimit[] {
|
|
|
316
315
|
};
|
|
317
316
|
|
|
318
317
|
const premiumItems = data.usageItems.filter(
|
|
319
|
-
|
|
318
|
+
item => item.sku === "Copilot Premium Request" || item.sku.includes("Premium"),
|
|
320
319
|
);
|
|
321
320
|
const totalUsed = premiumItems.reduce((sum, item) => sum + item.grossQuantity, 0);
|
|
322
321
|
const totalLimit = premiumItems.reduce((sum, item) => sum + (item.limit ?? 0), 0) || undefined;
|
|
@@ -359,7 +358,7 @@ function normalizeBillingUsage(data: BillingUsageResponse): UsageLimit[] {
|
|
|
359
358
|
function resolveCacheTtl(now: number, report: UsageReport | null): UsageCacheEntry["expiresAt"] {
|
|
360
359
|
if (!report) return now + DEFAULT_CACHE_TTL_MS;
|
|
361
360
|
const resetInMs = report.limits
|
|
362
|
-
.map(
|
|
361
|
+
.map(limit => limit.window?.resetInMs)
|
|
363
362
|
.find((value): value is number => typeof value === "number" && Number.isFinite(value));
|
|
364
363
|
if (!resetInMs || resetInMs <= 0) return now + DEFAULT_CACHE_TTL_MS;
|
|
365
364
|
return now + Math.min(MAX_CACHE_TTL_MS, resetInMs);
|