@oh-my-pi/pi-ai 8.1.0 → 8.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,17 @@
1
- import os from "node:os";
2
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
3
- import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
1
+ import * as os from "node:os";
2
+ import { abortableSleep } from "@oh-my-pi/pi-utils";
3
+ import type {
4
+ ResponseFunctionToolCall,
5
+ ResponseInput,
6
+ ResponseInputContent,
7
+ ResponseInputImage,
8
+ ResponseInputText,
9
+ ResponseOutputMessage,
10
+ ResponseReasoningItem,
11
+ } from "openai/resources/responses/responses";
12
+ import packageJson from "../../package.json" with { type: "json" };
13
+ import { calculateCost } from "../models";
14
+ import { getEnvApiKey } from "../stream";
4
15
  import type {
5
16
  Api,
6
17
  AssistantMessage,
@@ -13,22 +24,11 @@ import type {
13
24
  ThinkingContent,
14
25
  Tool,
15
26
  ToolCall,
16
- } from "@oh-my-pi/pi-ai/types";
17
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
18
- import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
19
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
20
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
21
- import { abortableSleep } from "@oh-my-pi/pi-utils";
22
- import type {
23
- ResponseFunctionToolCall,
24
- ResponseInput,
25
- ResponseInputContent,
26
- ResponseInputImage,
27
- ResponseInputText,
28
- ResponseOutputMessage,
29
- ResponseReasoningItem,
30
- } from "openai/resources/responses/responses";
31
- import packageJson from "../../package.json" with { type: "json" };
27
+ } from "../types";
28
+ import { AssistantMessageEventStream } from "../utils/event-stream";
29
+ import { parseStreamingJson } from "../utils/json-parse";
30
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
31
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
32
32
  import {
33
33
  CODEX_BASE_URL,
34
34
  JWT_CLAIM_PATH,
@@ -341,7 +341,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
341
341
  } else if (eventType === "response.output_item.done") {
342
342
  const item = rawEvent.item as ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall;
343
343
  if (item.type === "reasoning" && currentBlock?.type === "thinking") {
344
- currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
344
+ currentBlock.thinking = item.summary?.map(s => s.text).join("\n\n") || "";
345
345
  currentBlock.thinkingSignature = JSON.stringify(item);
346
346
  stream.push({
347
347
  type: "thinking_end",
@@ -351,7 +351,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
351
351
  });
352
352
  currentBlock = null;
353
353
  } else if (item.type === "message" && currentBlock?.type === "text") {
354
- currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
354
+ currentBlock.text = item.content.map(c => (c.type === "output_text" ? c.text : c.refusal)).join("");
355
355
  currentBlock.textSignature = item.id;
356
356
  stream.push({
357
357
  type: "text_end",
@@ -396,7 +396,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
396
396
  }
397
397
  calculateCost(model, output.usage);
398
398
  output.stopReason = mapStopReason(response?.status);
399
- if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
399
+ if (output.content.some(b => b.type === "toolCall") && output.stopReason === "stop") {
400
400
  output.stopReason = "toolUse";
401
401
  }
402
402
  } else if (eventType === "error") {
@@ -593,9 +593,9 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
593
593
  });
594
594
  // Filter out images if model doesn't support them, and empty text blocks
595
595
  let filteredContent = !model.input.includes("image")
596
- ? content.filter((c) => c.type !== "input_image")
596
+ ? content.filter(c => c.type !== "input_image")
597
597
  : content;
598
- filteredContent = filteredContent.filter((c) => {
598
+ filteredContent = filteredContent.filter(c => {
599
599
  if (c.type === "input_text") {
600
600
  return c.text.trim().length > 0;
601
601
  }
@@ -647,10 +647,10 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
647
647
  messages.push(...output);
648
648
  } else if (msg.role === "toolResult") {
649
649
  const textResult = msg.content
650
- .filter((c) => c.type === "text")
651
- .map((c) => (c as { text: string }).text)
650
+ .filter(c => c.type === "text")
651
+ .map(c => (c as { text: string }).text)
652
652
  .join("\n");
653
- const hasImages = msg.content.some((c) => c.type === "image");
653
+ const hasImages = msg.content.some(c => c.type === "image");
654
654
  const normalized = normalizeResponsesToolCallId(msg.toolCallId);
655
655
 
656
656
  const hasText = textResult.length > 0;
@@ -692,7 +692,7 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
692
692
  function convertTools(
693
693
  tools: Tool[],
694
694
  ): Array<{ type: "function"; name: string; description: string; parameters: Record<string, unknown>; strict: null }> {
695
- return tools.map((tool) => ({
695
+ return tools.map(tool => ({
696
696
  type: "function",
697
697
  name: tool.name,
698
698
  description: tool.description,
@@ -1,5 +1,15 @@
1
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
2
- import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
1
+ import OpenAI from "openai";
2
+ import type {
3
+ ChatCompletionAssistantMessageParam,
4
+ ChatCompletionChunk,
5
+ ChatCompletionContentPart,
6
+ ChatCompletionContentPartImage,
7
+ ChatCompletionContentPartText,
8
+ ChatCompletionMessageParam,
9
+ ChatCompletionToolMessageParam,
10
+ } from "openai/resources/chat/completions";
11
+ import { calculateCost } from "../models";
12
+ import { getEnvApiKey } from "../stream";
3
13
  import type {
4
14
  AssistantMessage,
5
15
  Context,
@@ -14,21 +24,11 @@ import type {
14
24
  Tool,
15
25
  ToolCall,
16
26
  ToolResultMessage,
17
- } from "@oh-my-pi/pi-ai/types";
18
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
19
- import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
20
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
21
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
22
- import OpenAI from "openai";
23
- import type {
24
- ChatCompletionAssistantMessageParam,
25
- ChatCompletionChunk,
26
- ChatCompletionContentPart,
27
- ChatCompletionContentPartImage,
28
- ChatCompletionContentPartText,
29
- ChatCompletionMessageParam,
30
- ChatCompletionToolMessageParam,
31
- } from "openai/resources/chat/completions";
27
+ } from "../types";
28
+ import { AssistantMessageEventStream } from "../utils/event-stream";
29
+ import { parseStreamingJson } from "../utils/json-parse";
30
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
31
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
32
32
  import { transformMessages } from "./transform-messages";
33
33
 
34
34
  /**
@@ -61,7 +61,7 @@ function hasToolHistory(messages: Message[]): boolean {
61
61
  return true;
62
62
  }
63
63
  if (msg.role === "assistant") {
64
- if (msg.content.some((block) => block.type === "toolCall")) {
64
+ if (msg.content.some(block => block.type === "toolCall")) {
65
65
  return true;
66
66
  }
67
67
  }
@@ -287,7 +287,7 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
287
287
  for (const detail of reasoningDetails) {
288
288
  if (detail.type === "reasoning.encrypted" && detail.id && detail.data) {
289
289
  const matchingToolCall = output.content.find(
290
- (b) => b.type === "toolCall" && b.id === detail.id,
290
+ b => b.type === "toolCall" && b.id === detail.id,
291
291
  ) as ToolCall | undefined;
292
292
  if (matchingToolCall) {
293
293
  matchingToolCall.thoughtSignature = JSON.stringify(detail);
@@ -356,12 +356,12 @@ function createClient(
356
356
  headers["Openai-Intent"] = "conversation-edits";
357
357
 
358
358
  // Copilot requires this header when sending images
359
- const hasImages = messages.some((msg) => {
359
+ const hasImages = messages.some(msg => {
360
360
  if (msg.role === "user" && Array.isArray(msg.content)) {
361
- return msg.content.some((c) => c.type === "image");
361
+ return msg.content.some(c => c.type === "image");
362
362
  }
363
363
  if (msg.role === "toolResult" && Array.isArray(msg.content)) {
364
- return msg.content.some((c) => c.type === "image");
364
+ return msg.content.some(c => c.type === "image");
365
365
  }
366
366
  return false;
367
367
  });
@@ -516,7 +516,7 @@ export function convertMessages(
516
516
  }
517
517
  });
518
518
  const filteredContent = !model.input.includes("image")
519
- ? content.filter((c) => c.type !== "image_url")
519
+ ? content.filter(c => c.type !== "image_url")
520
520
  : content;
521
521
  if (filteredContent.length === 0) continue;
522
522
  params.push({
@@ -531,29 +531,29 @@ export function convertMessages(
531
531
  content: compat.requiresAssistantAfterToolResult ? "" : null,
532
532
  };
533
533
 
534
- const textBlocks = msg.content.filter((b) => b.type === "text") as TextContent[];
534
+ const textBlocks = msg.content.filter(b => b.type === "text") as TextContent[];
535
535
  // Filter out empty text blocks to avoid API validation errors
536
- const nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);
536
+ const nonEmptyTextBlocks = textBlocks.filter(b => b.text && b.text.trim().length > 0);
537
537
  if (nonEmptyTextBlocks.length > 0) {
538
538
  // GitHub Copilot requires assistant content as a string, not an array.
539
539
  // Sending as array causes Claude models to re-answer all previous prompts.
540
540
  if (model.provider === "github-copilot") {
541
- assistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join("");
541
+ assistantMsg.content = nonEmptyTextBlocks.map(b => sanitizeSurrogates(b.text)).join("");
542
542
  } else {
543
- assistantMsg.content = nonEmptyTextBlocks.map((b) => {
543
+ assistantMsg.content = nonEmptyTextBlocks.map(b => {
544
544
  return { type: "text", text: sanitizeSurrogates(b.text) };
545
545
  });
546
546
  }
547
547
  }
548
548
 
549
549
  // Handle thinking blocks
550
- const thinkingBlocks = msg.content.filter((b) => b.type === "thinking") as ThinkingContent[];
550
+ const thinkingBlocks = msg.content.filter(b => b.type === "thinking") as ThinkingContent[];
551
551
  // Filter out empty thinking blocks to avoid API validation errors
552
- const nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);
552
+ const nonEmptyThinkingBlocks = thinkingBlocks.filter(b => b.thinking && b.thinking.trim().length > 0);
553
553
  if (nonEmptyThinkingBlocks.length > 0) {
554
554
  if (compat.requiresThinkingAsText) {
555
555
  // Convert thinking blocks to plain text (no tags to avoid model mimicking them)
556
- const thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n\n");
556
+ const thinkingText = nonEmptyThinkingBlocks.map(b => b.thinking).join("\n\n");
557
557
  const textContent = assistantMsg.content as Array<{ type: "text"; text: string }> | null;
558
558
  if (textContent) {
559
559
  textContent.unshift({ type: "text", text: thinkingText });
@@ -564,14 +564,14 @@ export function convertMessages(
564
564
  // Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)
565
565
  const signature = nonEmptyThinkingBlocks[0].thinkingSignature;
566
566
  if (signature && signature.length > 0) {
567
- (assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n");
567
+ (assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map(b => b.thinking).join("\n");
568
568
  }
569
569
  }
570
570
  }
571
571
 
572
- const toolCalls = msg.content.filter((b) => b.type === "toolCall") as ToolCall[];
572
+ const toolCalls = msg.content.filter(b => b.type === "toolCall") as ToolCall[];
573
573
  if (toolCalls.length > 0) {
574
- assistantMsg.tool_calls = toolCalls.map((tc) => ({
574
+ assistantMsg.tool_calls = toolCalls.map(tc => ({
575
575
  id: normalizeMistralToolId(tc.id, compat.requiresMistralToolIds),
576
576
  type: "function" as const,
577
577
  function: {
@@ -580,8 +580,8 @@ export function convertMessages(
580
580
  },
581
581
  }));
582
582
  const reasoningDetails = toolCalls
583
- .filter((tc) => tc.thoughtSignature)
584
- .map((tc) => {
583
+ .filter(tc => tc.thoughtSignature)
584
+ .map(tc => {
585
585
  try {
586
586
  return JSON.parse(tc.thoughtSignature!);
587
587
  } catch {
@@ -616,10 +616,10 @@ export function convertMessages(
616
616
 
617
617
  // Extract text and image content
618
618
  const textResult = toolMsg.content
619
- .filter((c) => c.type === "text")
620
- .map((c) => (c as any).text)
619
+ .filter(c => c.type === "text")
620
+ .map(c => (c as any).text)
621
621
  .join("\n");
622
- const hasImages = toolMsg.content.some((c) => c.type === "image");
622
+ const hasImages = toolMsg.content.some(c => c.type === "image");
623
623
 
624
624
  // Always send tool result with text (or placeholder if only images)
625
625
  const hasText = textResult.length > 0;
@@ -683,7 +683,7 @@ export function convertMessages(
683
683
  }
684
684
 
685
685
  function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {
686
- return tools.map((tool) => ({
686
+ return tools.map(tool => ({
687
687
  type: "function",
688
688
  function: {
689
689
  name: tool.name,
@@ -1,22 +1,3 @@
1
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
2
- import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
3
- import type {
4
- Api,
5
- AssistantMessage,
6
- Context,
7
- Model,
8
- StopReason,
9
- StreamFunction,
10
- StreamOptions,
11
- TextContent,
12
- ThinkingContent,
13
- Tool,
14
- ToolCall,
15
- } from "@oh-my-pi/pi-ai/types";
16
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
17
- import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
18
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
19
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
20
1
  import OpenAI from "openai";
21
2
  import type {
22
3
  Tool as OpenAITool,
@@ -29,6 +10,25 @@ import type {
29
10
  ResponseOutputMessage,
30
11
  ResponseReasoningItem,
31
12
  } from "openai/resources/responses/responses";
13
+ import { calculateCost } from "../models";
14
+ import { getEnvApiKey } from "../stream";
15
+ import type {
16
+ Api,
17
+ AssistantMessage,
18
+ Context,
19
+ Model,
20
+ StopReason,
21
+ StreamFunction,
22
+ StreamOptions,
23
+ TextContent,
24
+ ThinkingContent,
25
+ Tool,
26
+ ToolCall,
27
+ } from "../types";
28
+ import { AssistantMessageEventStream } from "../utils/event-stream";
29
+ import { parseStreamingJson } from "../utils/json-parse";
30
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
31
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
32
32
  import { transformMessages } from "./transform-messages";
33
33
 
34
34
  /** Fast deterministic hash to shorten long strings */
@@ -245,7 +245,7 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
245
245
  const item = event.item;
246
246
 
247
247
  if (item.type === "reasoning" && currentBlock && currentBlock.type === "thinking") {
248
- currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
248
+ currentBlock.thinking = item.summary?.map(s => s.text).join("\n\n") || "";
249
249
  currentBlock.thinkingSignature = JSON.stringify(item);
250
250
  stream.push({
251
251
  type: "thinking_end",
@@ -255,7 +255,7 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
255
255
  });
256
256
  currentBlock = null;
257
257
  } else if (item.type === "message" && currentBlock && currentBlock.type === "text") {
258
- currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
258
+ currentBlock.text = item.content.map(c => (c.type === "output_text" ? c.text : c.refusal)).join("");
259
259
  currentBlock.textSignature = item.id;
260
260
  stream.push({
261
261
  type: "text_end",
@@ -293,7 +293,7 @@ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
293
293
  calculateCost(model, output.usage);
294
294
  // Map status to stop reason
295
295
  output.stopReason = mapStopReason(response?.status);
296
- if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
296
+ if (output.content.some(b => b.type === "toolCall") && output.stopReason === "stop") {
297
297
  output.stopReason = "toolUse";
298
298
  }
299
299
  }
@@ -358,12 +358,12 @@ function createClient(
358
358
  headers["Openai-Intent"] = "conversation-edits";
359
359
 
360
360
  // Copilot requires this header when sending images
361
- const hasImages = messages.some((msg) => {
361
+ const hasImages = messages.some(msg => {
362
362
  if (msg.role === "user" && Array.isArray(msg.content)) {
363
- return msg.content.some((c) => c.type === "image");
363
+ return msg.content.some(c => c.type === "image");
364
364
  }
365
365
  if (msg.role === "toolResult" && Array.isArray(msg.content)) {
366
- return msg.content.some((c) => c.type === "image");
366
+ return msg.content.some(c => c.type === "image");
367
367
  }
368
368
  return false;
369
369
  });
@@ -491,9 +491,9 @@ function convertMessages(
491
491
  });
492
492
  // Filter out images if model doesn't support them, and empty text blocks
493
493
  let filteredContent = !model.input.includes("image")
494
- ? content.filter((c) => c.type !== "input_image")
494
+ ? content.filter(c => c.type !== "input_image")
495
495
  : content;
496
- filteredContent = filteredContent.filter((c) => {
496
+ filteredContent = filteredContent.filter(c => {
497
497
  if (c.type === "input_text") {
498
498
  return c.text.trim().length > 0;
499
499
  }
@@ -567,10 +567,10 @@ function convertMessages(
567
567
  } else if (msg.role === "toolResult") {
568
568
  // Extract text and image content
569
569
  const textResult = msg.content
570
- .filter((c) => c.type === "text")
571
- .map((c) => (c as any).text)
570
+ .filter(c => c.type === "text")
571
+ .map(c => (c as any).text)
572
572
  .join("\n");
573
- const hasImages = msg.content.some((c) => c.type === "image");
573
+ const hasImages = msg.content.some(c => c.type === "image");
574
574
  const normalized = normalizeResponsesToolCallId(msg.toolCallId);
575
575
  if (strictResponsesPairing && !knownCallIds.has(normalized.callId)) {
576
576
  continue;
@@ -618,7 +618,7 @@ function convertMessages(
618
618
  }
619
619
 
620
620
  function convertTools(tools: Tool[]): OpenAITool[] {
621
- return tools.map((tool) => ({
621
+ return tools.map(tool => ({
622
622
  type: "function",
623
623
  name: tool.name,
624
624
  description: tool.description,
@@ -1,4 +1,4 @@
1
- import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "@oh-my-pi/pi-ai/types";
1
+ import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "../types";
2
2
 
3
3
  /**
4
4
  * Normalize tool call ID for cross-provider compatibility.
@@ -98,7 +98,7 @@ export function transformMessages<TApi extends Api>(messages: Message[], model:
98
98
  const needsToolCallIdNormalization = targetRequiresStrictIds && (crossProviderSwitch || copilotCrossApiSwitch);
99
99
 
100
100
  // Transform message from different provider/model
101
- const transformedContent = assistantMsg.content.flatMap((block) => {
101
+ const transformedContent = assistantMsg.content.flatMap(block => {
102
102
  if (block.type === "thinking") {
103
103
  // Skip empty thinking blocks, convert others to plain text
104
104
  if (!block.thinking || block.thinking.trim() === "") return [];
@@ -173,7 +173,7 @@ export function transformMessages<TApi extends Api>(messages: Message[], model:
173
173
 
174
174
  const assistantMsg = msg as AssistantMessage;
175
175
  const isErroredAssistant = assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted";
176
- const toolCalls = assistantMsg.content.filter((b) => b.type === "toolCall") as ToolCall[];
176
+ const toolCalls = assistantMsg.content.filter(b => b.type === "toolCall") as ToolCall[];
177
177
 
178
178
  result.push(msg);
179
179
 
package/src/storage.ts CHANGED
@@ -4,9 +4,9 @@
4
4
  */
5
5
 
6
6
  import { Database } from "bun:sqlite";
7
- import { chmodSync, existsSync, mkdirSync } from "node:fs";
8
- import { homedir } from "node:os";
9
- import { dirname, join } from "node:path";
7
+ import * as fs from "node:fs/promises";
8
+ import * as os from "node:os";
9
+ import * as path from "node:path";
10
10
  import type { OAuthCredentials } from "./utils/oauth/types";
11
11
 
12
12
  type AuthCredential = { type: "api_key"; key: string } | ({ type: "oauth" } & OAuthCredentials);
@@ -24,7 +24,7 @@ type AuthRow = {
24
24
  * Get the agent config directory (e.g., ~/.omp/agent/)
25
25
  */
26
26
  function getAgentDir(): string {
27
- const configDir = process.env.OMP_CODING_AGENT_DIR || join(homedir(), ".omp", "agent");
27
+ const configDir = process.env.OMP_CODING_AGENT_DIR || path.join(os.homedir(), ".omp", "agent");
28
28
  return configDir;
29
29
  }
30
30
 
@@ -32,7 +32,7 @@ function getAgentDir(): string {
32
32
  * Get path to agent.db
33
33
  */
34
34
  function getAgentDbPath(): string {
35
- return join(getAgentDir(), "agent.db");
35
+ return path.join(getAgentDir(), "agent.db");
36
36
  }
37
37
 
38
38
  function serializeCredential(credential: AuthCredential): { credentialType: string; data: string } | null {
@@ -79,6 +79,8 @@ function deserializeCredential(row: AuthRow): AuthCredential | null {
79
79
 
80
80
  /**
81
81
  * Simple storage class for CLI auth credentials.
82
+ *
83
+ * Use `CliAuthStorage.create()` to instantiate (async initialization).
82
84
  */
83
85
  export class CliAuthStorage {
84
86
  private db: Database;
@@ -87,20 +89,8 @@ export class CliAuthStorage {
87
89
  private listAllStmt: ReturnType<Database["prepare"]>;
88
90
  private deleteByProviderStmt: ReturnType<Database["prepare"]>;
89
91
 
90
- constructor(dbPath: string = getAgentDbPath()) {
91
- // Ensure directory exists with secure permissions
92
- const dir = dirname(dbPath);
93
- if (!existsSync(dir)) {
94
- mkdirSync(dir, { recursive: true, mode: 0o700 });
95
- }
96
-
97
- this.db = new Database(dbPath);
98
- // Harden database file permissions to prevent credential leakage
99
- try {
100
- chmodSync(dbPath, 0o600);
101
- } catch {
102
- // Ignore chmod failures (e.g., Windows)
103
- }
92
+ private constructor(db: Database) {
93
+ this.db = db;
104
94
  this.initializeSchema();
105
95
 
106
96
  this.insertStmt = this.db.prepare(
@@ -111,6 +101,26 @@ export class CliAuthStorage {
111
101
  this.deleteByProviderStmt = this.db.prepare("DELETE FROM auth_credentials WHERE provider = ?");
112
102
  }
113
103
 
104
+ static async create(dbPath: string = getAgentDbPath()): Promise<CliAuthStorage> {
105
+ const dir = path.dirname(dbPath);
106
+ const dirExists = await fs
107
+ .stat(dir)
108
+ .then(s => s.isDirectory())
109
+ .catch(() => false);
110
+ if (!dirExists) {
111
+ await fs.mkdir(dir, { recursive: true, mode: 0o700 });
112
+ }
113
+
114
+ const db = new Database(dbPath);
115
+ try {
116
+ await fs.chmod(dbPath, 0o600);
117
+ } catch {
118
+ // Ignore chmod failures (e.g., Windows)
119
+ }
120
+
121
+ return new CliAuthStorage(db);
122
+ }
123
+
114
124
  private initializeSchema(): void {
115
125
  this.db.exec(`
116
126
  PRAGMA journal_mode=WAL;
package/src/stream.ts CHANGED
@@ -1,6 +1,6 @@
1
- import { existsSync } from "node:fs";
2
- import { homedir } from "node:os";
3
- import { join } from "node:path";
1
+ import * as fs from "node:fs";
2
+ import * as os from "node:os";
3
+ import * as path from "node:path";
4
4
  import { supportsXhigh } from "./models";
5
5
  import { type BedrockOptions, streamBedrock } from "./providers/amazon-bedrock";
6
6
  import { type AnthropicOptions, streamAnthropic } from "./providers/anthropic";
@@ -34,10 +34,10 @@ function hasVertexAdcCredentials(): boolean {
34
34
  if (cachedVertexAdcCredentialsExists === null) {
35
35
  const gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS;
36
36
  if (gacPath) {
37
- cachedVertexAdcCredentialsExists = existsSync(gacPath);
37
+ cachedVertexAdcCredentialsExists = fs.existsSync(gacPath);
38
38
  } else {
39
- cachedVertexAdcCredentialsExists = existsSync(
40
- join(homedir(), ".config", "gcloud", "application_default_credentials.json"),
39
+ cachedVertexAdcCredentialsExists = fs.existsSync(
40
+ path.join(os.homedir(), ".config", "gcloud", "application_default_credentials.json"),
41
41
  );
42
42
  }
43
43
  }
package/src/types.ts CHANGED
@@ -1,3 +1,4 @@
1
+ import type { TSchema } from "@sinclair/typebox";
1
2
  import type { BedrockOptions } from "./providers/amazon-bedrock";
2
3
  import type { AnthropicOptions } from "./providers/anthropic";
3
4
  import type { CursorOptions } from "./providers/cursor";
@@ -237,8 +238,6 @@ export interface CursorExecHandlers {
237
238
  onToolResult?: CursorToolResultHandler;
238
239
  }
239
240
 
240
- import type { TSchema } from "@sinclair/typebox";
241
-
242
241
  export interface Tool<TParameters extends TSchema = TSchema> {
243
242
  name: string;
244
243
  description: string;
@@ -7,7 +7,7 @@ import type {
7
7
  UsageReport,
8
8
  UsageStatus,
9
9
  UsageWindow,
10
- } from "@oh-my-pi/pi-ai/usage";
10
+ } from "../usage";
11
11
 
12
12
  const DEFAULT_ENDPOINT = "https://api.anthropic.com/api/oauth";
13
13
  const DEFAULT_CACHE_TTL_MS = 60_000;
@@ -241,10 +241,10 @@ function buildCacheKey(params: UsageFetchParams): string {
241
241
 
242
242
  function resolveCacheExpiry(now: number, limits: UsageLimit[]): number {
243
243
  const earliestReset = limits
244
- .map((limit) => limit.window?.resetsAt)
244
+ .map(limit => limit.window?.resetsAt)
245
245
  .filter((value): value is number => typeof value === "number" && Number.isFinite(value))
246
246
  .reduce((min, value) => (min === undefined ? value : Math.min(min, value)), undefined as number | undefined);
247
- const exhausted = limits.some((limit) => limit.status === "exhausted");
247
+ const exhausted = limits.some(limit => limit.status === "exhausted");
248
248
  if (earliestReset === undefined) return now + DEFAULT_CACHE_TTL_MS;
249
249
  if (exhausted) return earliestReset;
250
250
  return Math.min(now + DEFAULT_CACHE_TTL_MS, earliestReset);
@@ -351,5 +351,5 @@ async function fetchClaudeUsage(params: UsageFetchParams, ctx: UsageFetchContext
351
351
  export const claudeUsageProvider: UsageProvider = {
352
352
  id: "anthropic",
353
353
  fetchUsage: fetchClaudeUsage,
354
- supports: (params) => params.provider === "anthropic" && params.credential.type === "oauth",
354
+ supports: params => params.provider === "anthropic" && params.credential.type === "oauth",
355
355
  };
@@ -3,7 +3,6 @@
3
3
  *
4
4
  * Normalizes Copilot quota usage into the shared UsageReport schema.
5
5
  */
6
-
7
6
  import type {
8
7
  UsageAmount,
9
8
  UsageCacheEntry,
@@ -14,7 +13,7 @@ import type {
14
13
  UsageReport,
15
14
  UsageStatus,
16
15
  UsageWindow,
17
- } from "@oh-my-pi/pi-ai/usage";
16
+ } from "../usage";
18
17
 
19
18
  const COPILOT_HEADERS = {
20
19
  "User-Agent": "GitHubCopilotChat/0.35.0",
@@ -316,7 +315,7 @@ function normalizeBillingUsage(data: BillingUsageResponse): UsageLimit[] {
316
315
  };
317
316
 
318
317
  const premiumItems = data.usageItems.filter(
319
- (item) => item.sku === "Copilot Premium Request" || item.sku.includes("Premium"),
318
+ item => item.sku === "Copilot Premium Request" || item.sku.includes("Premium"),
320
319
  );
321
320
  const totalUsed = premiumItems.reduce((sum, item) => sum + item.grossQuantity, 0);
322
321
  const totalLimit = premiumItems.reduce((sum, item) => sum + (item.limit ?? 0), 0) || undefined;
@@ -359,7 +358,7 @@ function normalizeBillingUsage(data: BillingUsageResponse): UsageLimit[] {
359
358
  function resolveCacheTtl(now: number, report: UsageReport | null): UsageCacheEntry["expiresAt"] {
360
359
  if (!report) return now + DEFAULT_CACHE_TTL_MS;
361
360
  const resetInMs = report.limits
362
- .map((limit) => limit.window?.resetInMs)
361
+ .map(limit => limit.window?.resetInMs)
363
362
  .find((value): value is number => typeof value === "number" && Number.isFinite(value));
364
363
  if (!resetInMs || resetInMs <= 0) return now + DEFAULT_CACHE_TTL_MS;
365
364
  return now + Math.min(MAX_CACHE_TTL_MS, resetInMs);
@@ -7,8 +7,8 @@ import type {
7
7
  UsageReport,
8
8
  UsageStatus,
9
9
  UsageWindow,
10
- } from "@oh-my-pi/pi-ai/usage";
11
- import { refreshAntigravityToken } from "@oh-my-pi/pi-ai/utils/oauth/google-antigravity";
10
+ } from "../usage";
11
+ import { refreshAntigravityToken } from "../utils/oauth/google-antigravity";
12
12
 
13
13
  interface AntigravityQuotaInfo {
14
14
  remainingFraction?: number;
@@ -214,5 +214,5 @@ async function fetchAntigravityUsage(params: UsageFetchParams, ctx: UsageFetchCo
214
214
  export const antigravityUsageProvider: UsageProvider = {
215
215
  id: "google-antigravity",
216
216
  fetchUsage: fetchAntigravityUsage,
217
- supports: (params) => params.provider === "google-antigravity",
217
+ supports: params => params.provider === "google-antigravity",
218
218
  };