@jeffreycao/copilot-api 1.9.4 → 1.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -62,7 +62,7 @@ Compared with routing everything through plain Chat Completions compatibility, t
62
62
  - **Opencode OAuth Support**: Use opencode GitHub Copilot authentication by setting `COPILOT_API_OAUTH_APP=opencode` environment variable or using `--oauth-app=opencode` command line option.
63
63
  - **GitHub Enterprise Support**: Connect to GHE.com by setting `COPILOT_API_ENTERPRISE_URL` environment variable (e.g., `company.ghe.com`) or using `--enterprise-url=company.ghe.com` command line option.
64
64
  - **Custom Data Directory**: Change the default data directory (where tokens and config are stored) by setting `COPILOT_API_HOME` environment variable or using `--api-home=/path/to/dir` command line option.
65
- - **Multi-Provider Anthropic Proxy Routes**: Add global provider configs and call external Anthropic-compatible APIs via `/:provider/v1/messages` and `/:provider/v1/models`.
65
+ - **Multi-Provider Messages Proxy Routes**: Add global provider configs and call external Anthropic-compatible or OpenAI-compatible APIs via `/:provider/v1/messages` and `/:provider/v1/models`.
66
66
  - **Accurate Claude Token Counting**: Optionally forward `/v1/messages/count_tokens` requests for Claude models to Anthropic's free token counting endpoint for exact counts instead of GPT tokenizer estimation.
67
67
  - **GPT Context Management**: Configurable context compaction for long-running GPT conversations via `responsesApiContextManagementModels`, reducing unnecessary premium requests when approaching token limits. See [Configuration](#configuration-configjson) for details.
68
68
 
@@ -309,6 +309,21 @@ The following command line options are available for the `start` command:
309
309
  "topP": 0.95
310
310
  }
311
311
  }
312
+ },
313
+ "dashscope": {
314
+ "type": "openai-compatible",
315
+ "enabled": true,
316
+ "baseUrl": "https://dashscope.aliyuncs.com/compatible-mode",
317
+ "apiKey": "sk-your-dashscope-key",
318
+ "models": {
319
+ "qwen3.6-plus": {
320
+ "extraBody": {
321
+ "preserve_thinking": true
322
+ },
323
+ "supportPdf": false,
324
+ "toolContentSupportType": []
325
+ }
326
+ }
312
327
  }
313
328
  },
314
329
  "extraPrompts": {
@@ -332,16 +347,19 @@ The following command line options are available for the `start` command:
332
347
  ```
333
348
  - **auth.apiKeys:** API keys used for request authentication. Supports multiple keys for rotation. Requests can authenticate with either `x-api-key: <key>` or `Authorization: Bearer <key>`. If empty or omitted, authentication is disabled.
334
349
  - **extraPrompts:** Map of `model -> prompt` appended to the first system prompt when translating Anthropic-style requests to Copilot. Use this to inject guardrails or guidance per model. Missing default entries are auto-added without overwriting your custom prompts. The built-in prompts for `gpt-5.3-codex` and `gpt-5.4` enable phase-aware commentary, which lets the model emit a short user-facing progress update before tools or deeper reasoning.
335
- - **providers:** Global upstream provider map. Each provider key (for example `custom`) becomes a route prefix (`/custom/v1/messages`). Currently only `type: "anthropic"` is supported.
350
+ - **providers:** Global upstream provider map. Each provider key (for example `custom`) becomes a route prefix (`/custom/v1/messages`). Supports `type: "anthropic"` and `type: "openai-compatible"`.
336
351
  - `enabled` defaults to `true` if omitted.
337
- - `baseUrl` should be provider API base URL without trailing `/v1/messages`.
352
+ - `baseUrl` should be provider API base URL without the final endpoint. For Anthropic providers, omit `/v1/messages`; for OpenAI-compatible providers, omit `/v1/chat/completions`.
338
353
  - `apiKey` is used as the upstream credential value.
339
- - `authType` (optional): Controls how `apiKey` is sent upstream. Supports `x-api-key` (default) and `authorization`. When set to `authorization`, the proxy sends `Authorization: Bearer <apiKey>`.
354
+ - `authType` (optional): Controls how `apiKey` is sent upstream. Supports `x-api-key` and `authorization`. Anthropic providers default to `x-api-key`; OpenAI-compatible providers default to `authorization`. When set to `authorization`, the proxy sends `Authorization: Bearer <apiKey>`.
340
355
  - `adjustInputTokens` (optional): When `true`, the proxy will adjust the `input_tokens` in the usage response by subtracting `cache_read_input_tokens` and `cache_creation_input_tokens`.
341
356
  - `models` (optional): Per-model configuration map. Each key is a model ID (matching the model name in requests), and the value is:
342
357
  - `temperature` (optional): Default temperature value used when the request does not specify one.
343
358
  - `topP` (optional): Default top_p value used when the request does not specify one.
344
359
  - `topK` (optional): Default top_k value used when the request does not specify one.
360
+ - `extraBody` (optional): Dynamic fields merged into the upstream request body for that model. Request body fields with the same name take precedence. OpenAI-compatible providers can use this for fields such as `enable_thinking`, `preserve_thinking`, `reasoning_effort`.
361
+ - `supportPdf` (optional): Controls whether the model supports PDF/document content. Defaults to `false`; unsupported PDFs are converted to a text notice. Set it to `true` to send PDF/document blocks as OpenAI Chat Completions file parts.
362
+ - `toolContentSupportType` (optional): Tool result content capabilities for that model, as an array of `array`, `image`, and `pdf`. Provider routes default to string-only tool content when omitted. If `supportPdf` is `true` but this list does not include `pdf`, file parts in tool results are moved to user role messages. This provider default does not change the Copilot main flow, which continues to support array + image and not PDF.
345
363
  - **smallModel:** Fallback model used for tool-less warmup messages (e.g., Claude Code probe requests) to avoid spending premium requests; defaults to gpt-5-mini.
346
364
  - **responsesApiContextManagementModels:** List of GPT model IDs that should receive Responses API `context_management` compaction instructions. This defaults to `[]`, so you need to opt in explicitly. A good starting point is `["gpt-5-mini", "gpt-5.3-codex", "gpt-5.4-mini", "gpt-5.4"]`. When enabled, the request includes `context_management` in the body and keeps only the latest compaction carrier on follow-up turns. The actual compaction is handled server-side and appears to begin when usage approaches roughly 90% of the model's `maxPromptTokens`, which makes it especially useful for long-running tasks without consuming additional premium requests. In practice, the effective `compact_threshold` also appears to be fixed on the server side, so changing it in this project does not currently alter compaction behavior. At the moment, this optimization is intended for GPT-family models only.
347
365
  - **modelReasoningEfforts:** Per-model `reasoning.effort` sent to the Copilot Responses API. Allowed values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. If a model isn’t listed, `high` is used by default.
@@ -392,8 +410,8 @@ These endpoints are designed to be compatible with the Anthropic Messages API.
392
410
  | -------------------------------- | ------ | ------------------------------------------------------------ |
393
411
  | `POST /v1/messages` | `POST` | Creates a model response for a given conversation. |
394
412
  | `POST /v1/messages/count_tokens` | `POST` | Calculates the number of tokens for a given set of messages. |
395
- | `POST /:provider/v1/messages` | `POST` | Proxies Anthropic Messages API to the configured provider. |
396
- | `GET /:provider/v1/models` | `GET` | Proxies Anthropic Models API to the configured provider. |
413
+ | `POST /:provider/v1/messages` | `POST` | Proxies Anthropic Messages requests to the configured Anthropic or OpenAI-compatible provider. |
414
+ | `GET /:provider/v1/models` | `GET` | Proxies model listing requests to the configured provider. |
397
415
  | `POST /:provider/v1/messages/count_tokens` | `POST` | Calculates tokens locally for provider route requests. |
398
416
 
399
417
  ### Usage Monitoring Endpoints
package/README.zh-CN.md CHANGED
@@ -62,7 +62,7 @@
62
62
  - **支持 opencode OAuth**:可通过设置环境变量 `COPILOT_API_OAUTH_APP=opencode` 或使用命令行参数 `--oauth-app=opencode` 来启用 opencode GitHub Copilot 认证。
63
63
  - **支持 GitHub Enterprise**:可通过设置环境变量 `COPILOT_API_ENTERPRISE_URL`(例如 `company.ghe.com`)或命令行参数 `--enterprise-url=company.ghe.com` 连接到 GHE.com。
64
64
  - **自定义数据目录**:可通过环境变量 `COPILOT_API_HOME` 或命令行参数 `--api-home=/path/to/dir` 修改默认数据目录(存放 token 和配置)。
65
- - **多 Provider Anthropic 代理路由**:可以添加全局 provider 配置,并通过 `/:provider/v1/messages` 与 `/:provider/v1/models` 调用外部 Anthropic 兼容 API。
65
+ - **多 Provider Messages 代理路由**:可以添加全局 provider 配置,并通过 `/:provider/v1/messages` 与 `/:provider/v1/models` 调用外部 Anthropic 或 OpenAI 兼容 API。
66
66
  - **精确的 Claude Token 计数**:可以选择将 Claude 模型的 `/v1/messages/count_tokens` 请求转发到 Anthropic 的免费 token counting 端点,以获得精确计数,而不是依赖 GPT tokenizer 估算。
67
67
  - **GPT 上下文管理**:可通过 `responsesApiContextManagementModels` 为长上下文 GPT 对话启用可配置的上下文压缩,在接近 token 限制时减少不必要的 Premium 请求。详见 [配置](#configuration-configjson)。
68
68
 
@@ -313,6 +313,21 @@ Copilot API 现在使用子命令结构,主要命令包括:
313
313
  "topP": 0.95
314
314
  }
315
315
  }
316
+ },
317
+ "dashscope": {
318
+ "type": "openai-compatible",
319
+ "enabled": true,
320
+ "baseUrl": "https://dashscope.aliyuncs.com/compatible-mode",
321
+ "apiKey": "sk-your-dashscope-key",
322
+ "models": {
323
+ "qwen3.6-plus": {
324
+ "extraBody": {
325
+ "preserve_thinking": true
326
+ },
327
+ "supportPdf": false,
328
+ "toolContentSupportType": []
329
+ }
330
+ }
316
331
  }
317
332
  },
318
333
  "extraPrompts": {
@@ -336,16 +351,19 @@ Copilot API 现在使用子命令结构,主要命令包括:
336
351
  ```
337
352
  - **auth.apiKeys:** 用于请求认证的 API key。支持多个 key 轮换使用。请求可通过 `x-api-key: <key>` 或 `Authorization: Bearer <key>` 进行认证。若为空或省略,则禁用认证。
338
353
  - **extraPrompts:** `model -> prompt` 的映射。把 Anthropic 风格请求翻译给 Copilot 时,会将其附加到第一条 system prompt 后面。你可以借此为不同模型注入护栏或指引。缺失的默认项会自动补齐,但不会覆盖你自定义的 prompt。内置的 `gpt-5.3-codex` 和 `gpt-5.4` prompt 会启用带阶段感知的 commentary,让模型在工具调用或更深层推理前先发出简短的用户可见进度说明。
339
- - **providers:** 全局上游 provider 映射。每个 provider key(例如 `custom`)都会变成一个路由前缀(`/custom/v1/messages`)。目前仅支持 `type: "anthropic"`。
354
+ - **providers:** 全局上游 provider 映射。每个 provider key(例如 `custom`)都会变成一个路由前缀(`/custom/v1/messages`)。支持 `type: "anthropic"` 和 `type: "openai-compatible"`。
340
355
  - `enabled`:可选,若省略则默认为 `true`。
341
- - `baseUrl`:provider API 的基础 URL,不要带结尾的 `/v1/messages`。
356
+ - `baseUrl`:provider API 的基础 URL,不要带结尾的 endpoint。Anthropic provider 不要带 `/v1/messages`;OpenAI 兼容 provider 不要带 `/v1/chat/completions`。
342
357
  - `apiKey`:作为上游凭据值使用。
343
- - `authType`:可选,控制 `apiKey` 如何发送到上游。支持 `x-api-key`(默认)和 `authorization`。当设置为 `authorization` 时,代理会发送 `Authorization: Bearer <apiKey>`。
358
+ - `authType`:可选,控制 `apiKey` 如何发送到上游。支持 `x-api-key` `authorization`。Anthropic provider 默认 `x-api-key`;OpenAI 兼容 provider 默认 `authorization`。当设置为 `authorization` 时,代理会发送 `Authorization: Bearer <apiKey>`。
344
359
  - `adjustInputTokens`:可选,当为 `true` 时,代理会在 usage 响应里用 `input_tokens` 减去 `cache_read_input_tokens` 和 `cache_creation_input_tokens`。
345
360
  - `models`:可选,按模型 ID 配置的映射。每个键为请求中的模型名,值支持:
346
361
  - `temperature`:可选,当请求未指定时使用的默认温度。
347
362
  - `topP`:可选,当请求未指定时使用的默认 `top_p`。
348
363
  - `topK`:可选,当请求未指定时使用的默认 `top_k`。
364
+ - `extraBody`:可选,按模型合入上游请求体的动态字段;请求体显式同名字段优先。OpenAI 兼容 provider 可用它配置 `enable_thinking`、`preserve_thinking`、`reasoning_effort` 等字段。
365
+ - `supportPdf`:可选,控制该模型是否支持 PDF/document content。默认 `false`,不支持时会把 PDF 转成提示文本;设为 `true` 时会把 PDF/document 转成 OpenAI Chat Completions 的 file part。
366
+ - `toolContentSupportType`:可选,配置该模型的 tool result content 支持能力,值为 `array`、`image`、`pdf` 的数组。provider 侧未配置时默认只发送 string tool content。若 `supportPdf` 为 `true` 但这里不包含 `pdf`,tool result 里的 file part 会被转成 user role 消息。Copilot 主链路不使用这个 provider 默认,仍按 array + image 且不支持 PDF 的能力处理。
349
367
  - **smallModel:** 无工具预热消息的回退模型(例如 Claude Code 的探测请求),用于避免消耗 premium requests;默认是 `gpt-5-mini`。
350
368
  - **responsesApiContextManagementModels:** 需要启用 Responses API `context_management` 压缩指令的 GPT 模型 ID 列表。默认是 `[]`,需要你显式开启。一个不错的起点是 `["gpt-5-mini", "gpt-5.3-codex", "gpt-5.4-mini", "gpt-5.4"]`。启用后,请求体会带上 `context_management`,并在后续轮次中仅保留最新的压缩承载内容。实际压缩由服务端完成,看起来会在 usage 接近模型 `maxPromptTokens` 的约 90% 时开始,因此特别适合长任务场景,同时不会额外消耗 premium requests。实践中 `compact_threshold` 似乎也是服务端固定的,所以在本项目中修改它目前不会改变压缩行为。当前该优化仅面向 GPT 系模型。
351
369
  - **modelReasoningEfforts:** 按模型配置发送到 Copilot Responses API 的 `reasoning.effort`。可选值包括 `none`、`minimal`、`low`、`medium`、`high` 和 `xhigh`。若某模型未配置,则默认使用 `high`。
@@ -396,8 +414,8 @@ curl http://localhost:4141/v1/models \
396
414
  | --- | --- | --- |
397
415
  | `POST /v1/messages` | `POST` | 为给定对话创建模型响应。 |
398
416
  | `POST /v1/messages/count_tokens` | `POST` | 计算一组消息的 token 数。 |
399
- | `POST /:provider/v1/messages` | `POST` | 将 Anthropic Messages API 代理到已配置的 provider。 |
400
- | `GET /:provider/v1/models` | `GET` | Anthropic Models API 代理到已配置的 provider。 |
417
+ | `POST /:provider/v1/messages` | `POST` | 将 Anthropic Messages 请求代理到已配置的 Anthropic 或 OpenAI 兼容 provider。 |
418
+ | `GET /:provider/v1/models` | `GET` | 将模型列表请求代理到已配置的 provider。 |
401
419
  | `POST /:provider/v1/messages/count_tokens` | `POST` | 为 provider 路由请求在本地计算 token 数。 |
402
420
 
403
421
  ### 使用量监控端点
@@ -140,11 +140,15 @@ function getReasoningEffortForModel(model) {
140
140
  function normalizeProviderBaseUrl(url) {
141
141
  return url.trim().replace(/\/+$/u, "");
142
142
  }
143
- function resolveProviderAuthType(providerName, authType) {
144
- if (authType === void 0 || authType === "x-api-key") return "x-api-key";
143
+ function getDefaultProviderAuthType(providerType) {
144
+ return providerType === "openai-compatible" ? "authorization" : "x-api-key";
145
+ }
146
+ function resolveProviderAuthType(providerName, authType, providerType) {
147
+ if (authType === void 0) return getDefaultProviderAuthType(providerType);
148
+ if (authType === "x-api-key") return "x-api-key";
145
149
  if (authType === "authorization") return authType;
146
- consola.warn(`Provider ${providerName} has invalid authType '${authType}', falling back to x-api-key`);
147
- return "x-api-key";
150
+ consola.warn(`Provider ${providerName} has invalid authType '${authType}', falling back to ${getDefaultProviderAuthType(providerType)}`);
151
+ return getDefaultProviderAuthType(providerType);
148
152
  }
149
153
  function getProviderConfig(name) {
150
154
  const providerName = name.trim();
@@ -153,13 +157,13 @@ function getProviderConfig(name) {
153
157
  if (!provider) return null;
154
158
  if (provider.enabled === false) return null;
155
159
  const type = provider.type ?? "anthropic";
156
- if (type !== "anthropic") {
157
- consola.warn(`Provider ${providerName} is ignored because only anthropic type is supported`);
160
+ if (type !== "anthropic" && type !== "openai-compatible") {
161
+ consola.warn(`Provider ${providerName} is ignored because type '${type}' is not supported`);
158
162
  return null;
159
163
  }
160
164
  const baseUrl = normalizeProviderBaseUrl(provider.baseUrl ?? "");
161
165
  const apiKey = (provider.apiKey ?? "").trim();
162
- const authType = resolveProviderAuthType(providerName, provider.authType);
166
+ const authType = resolveProviderAuthType(providerName, provider.authType, type);
163
167
  if (!baseUrl || !apiKey) {
164
168
  consola.warn(`Provider ${providerName} is enabled but missing baseUrl or apiKey`);
165
169
  return null;
@@ -189,4 +193,4 @@ function getClaudeTokenMultiplier() {
189
193
 
190
194
  //#endregion
191
195
  export { getAnthropicApiKey, getClaudeTokenMultiplier, getConfig, getExtraPromptForModel, getProviderConfig, getReasoningEffortForModel, getSmallModel, isMessagesApiEnabled, isResponsesApiContextManagementModel, isResponsesApiWebSearchEnabled, mergeConfigWithDefaults };
192
- //# sourceMappingURL=config-BQvWqYh_.js.map
196
+ //# sourceMappingURL=config-BJt9unC0.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"config-BQvWqYh_.js","names":["defaultConfig: AppConfig","cachedConfig: AppConfig | null"],"sources":["../src/lib/config.ts"],"sourcesContent":["import consola from \"consola\"\nimport fs from \"node:fs\"\n\nimport { PATHS } from \"./paths\"\n\nexport interface AppConfig {\n auth?: {\n apiKeys?: Array<string>\n }\n providers?: Record<string, ProviderConfig>\n extraPrompts?: Record<string, string>\n smallModel?: string\n responsesApiContextManagementModels?: Array<string>\n modelReasoningEfforts?: Record<\n string,\n \"none\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\"\n >\n useFunctionApplyPatch?: boolean\n useMessagesApi?: boolean\n anthropicApiKey?: string\n useResponsesApiWebSearch?: boolean\n claudeTokenMultiplier?: number\n}\n\nexport interface ModelConfig {\n temperature?: number\n topP?: number\n topK?: number\n}\n\nexport type ProviderAuthType = \"authorization\" | \"x-api-key\"\n\nexport interface ProviderConfig {\n type?: string\n enabled?: boolean\n baseUrl?: string\n apiKey?: string\n authType?: ProviderAuthType\n models?: Record<string, ModelConfig>\n adjustInputTokens?: boolean\n}\n\nexport interface ResolvedProviderConfig {\n name: string\n type: \"anthropic\"\n baseUrl: string\n apiKey: string\n authType: ProviderAuthType\n models?: Record<string, ModelConfig>\n adjustInputTokens?: boolean\n}\n\nconst gpt5ExplorationPrompt = `## Exploration and reading files\n- **Think first.** Before any tool call, decide ALL files/resources you will need.\n- **Batch everything.** If you need multiple files (even from different places), read them together.\n- **multi_tool_use.parallel** Use multi_tool_use.parallel to parallelize tool calls and only this.\n- **Only make sequential calls if you truly cannot know the next file without seeing a result first.**\n- **Workflow:** (a) plan all needed reads → (b) issue one parallel batch → (c) analyze results → (d) repeat if new, unpredictable reads arise.`\n\nconst gpt5CommentaryPrompt = `# Working with the user\n\nYou interact with the user through a terminal. You have 2 ways of communicating with the users: \n- Share intermediary updates in \\`commentary\\` channel. \n- After you have completed all your work, send a message to the \\`final\\` channel. \n\n## Intermediary updates\n\n- Intermediary updates go to the \\`commentary\\` channel.\n- User updates are short updates while you are working, they are NOT final answers.\n- You use 1-2 sentence user updates to communicate progress and new information to the user as you are doing work.\n- Do not begin responses with conversational interjections or meta commentary. Avoid openers such as acknowledgements (“Done —”, “Got it”, “Great question, ”) or framing phrases.\n- You provide user updates frequently, every 20s.\n- Before exploring or doing substantial work, you start with a user update acknowledging the request and explaining your first step. You should include your understanding of the user request and explain what you will do. Avoid commenting on the request or using starters such as \"Got it -\" or \"Understood -\" etc.\n- When exploring, e.g. searching, reading files, you provide user updates as you go, every 20s, explaining what context you are gathering and what you've learned. Vary your sentence structure when providing these updates to avoid sounding repetitive - in particular, don't start each sentence the same way.\n- After you have sufficient context, and the work is substantial, you provide a longer plan (this is the only user update that may be longer than 2 sentences and can contain formatting).\n- Before performing file edits of any kind, you provide updates explaining what edits you are making.\n- As you are thinking, you very frequently provide updates even if not taking any actions, informing the user of your progress. You interrupt your thinking and send multiple updates in a row if thinking for more than 100 words.\n- Tone of your updates MUST match your personality.`\n\nconst defaultConfig: AppConfig = {\n auth: {\n apiKeys: [],\n },\n providers: {},\n extraPrompts: {\n \"gpt-5-mini\": gpt5ExplorationPrompt,\n \"gpt-5.3-codex\": gpt5CommentaryPrompt,\n \"gpt-5.4-mini\": gpt5CommentaryPrompt,\n \"gpt-5.4\": gpt5CommentaryPrompt,\n \"gpt-5.5\": gpt5CommentaryPrompt,\n },\n smallModel: \"gpt-5-mini\",\n responsesApiContextManagementModels: [],\n modelReasoningEfforts: {\n \"gpt-5-mini\": \"low\",\n \"gpt-5.3-codex\": \"xhigh\",\n \"gpt-5.4-mini\": \"xhigh\",\n \"gpt-5.4\": \"xhigh\",\n \"gpt-5.5\": \"xhigh\",\n },\n useFunctionApplyPatch: true,\n useMessagesApi: true,\n useResponsesApiWebSearch: true,\n}\n\nlet cachedConfig: AppConfig | null = null\n\nfunction ensureConfigFile(): void {\n try {\n fs.accessSync(PATHS.CONFIG_PATH, fs.constants.R_OK | fs.constants.W_OK)\n } catch {\n fs.mkdirSync(PATHS.APP_DIR, { recursive: true })\n fs.writeFileSync(\n PATHS.CONFIG_PATH,\n `${JSON.stringify(defaultConfig, null, 2)}\\n`,\n \"utf8\",\n )\n try {\n fs.chmodSync(PATHS.CONFIG_PATH, 0o600)\n } catch {\n return\n }\n }\n}\n\nfunction readConfigFromDisk(): AppConfig {\n ensureConfigFile()\n try {\n const raw = fs.readFileSync(PATHS.CONFIG_PATH, \"utf8\")\n if (!raw.trim()) {\n fs.writeFileSync(\n PATHS.CONFIG_PATH,\n `${JSON.stringify(defaultConfig, null, 2)}\\n`,\n \"utf8\",\n )\n return defaultConfig\n }\n return JSON.parse(raw) as AppConfig\n } catch (error) {\n consola.error(\"Failed to read config file, using default config\", error)\n return defaultConfig\n }\n}\n\nfunction mergeDefaultConfig(config: AppConfig): {\n mergedConfig: AppConfig\n changed: boolean\n} {\n const extraPrompts = config.extraPrompts ?? {}\n const defaultExtraPrompts = defaultConfig.extraPrompts ?? {}\n const modelReasoningEfforts = config.modelReasoningEfforts ?? {}\n const defaultModelReasoningEfforts = defaultConfig.modelReasoningEfforts ?? {}\n\n const missingExtraPromptModels = Object.keys(defaultExtraPrompts).filter(\n (model) => !Object.hasOwn(extraPrompts, model),\n )\n\n const missingReasoningEffortModels = Object.keys(\n defaultModelReasoningEfforts,\n ).filter((model) => !Object.hasOwn(modelReasoningEfforts, model))\n\n const hasExtraPromptChanges = missingExtraPromptModels.length > 0\n const hasReasoningEffortChanges = missingReasoningEffortModels.length > 0\n\n if (!hasExtraPromptChanges && !hasReasoningEffortChanges) {\n return { mergedConfig: config, changed: false }\n }\n\n return {\n mergedConfig: {\n ...config,\n extraPrompts: {\n ...defaultExtraPrompts,\n ...extraPrompts,\n },\n modelReasoningEfforts: {\n ...defaultModelReasoningEfforts,\n ...modelReasoningEfforts,\n },\n },\n changed: true,\n }\n}\n\nexport function mergeConfigWithDefaults(): AppConfig {\n const config = readConfigFromDisk()\n const { mergedConfig, changed } = mergeDefaultConfig(config)\n\n if (changed) {\n try {\n fs.writeFileSync(\n PATHS.CONFIG_PATH,\n `${JSON.stringify(mergedConfig, null, 2)}\\n`,\n \"utf8\",\n )\n } catch (writeError) {\n consola.warn(\n \"Failed to write merged extraPrompts to config file\",\n writeError,\n )\n }\n }\n\n cachedConfig = mergedConfig\n return mergedConfig\n}\n\nexport function getConfig(): AppConfig {\n cachedConfig ??= readConfigFromDisk()\n return cachedConfig\n}\n\nexport function getExtraPromptForModel(model: string): string {\n const config = getConfig()\n return config.extraPrompts?.[model] ?? \"\"\n}\n\nexport function getSmallModel(): string {\n const config = getConfig()\n return config.smallModel ?? \"gpt-5-mini\"\n}\n\nexport function getResponsesApiContextManagementModels(): Array<string> {\n const config = getConfig()\n return (\n config.responsesApiContextManagementModels\n ?? defaultConfig.responsesApiContextManagementModels\n ?? []\n )\n}\n\nexport function isResponsesApiContextManagementModel(model: string): boolean {\n return getResponsesApiContextManagementModels().includes(model)\n}\n\nexport function getReasoningEffortForModel(\n model: string,\n): \"none\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\" {\n const config = getConfig()\n return config.modelReasoningEfforts?.[model] ?? \"high\"\n}\n\nexport function normalizeProviderBaseUrl(url: string): string {\n return url.trim().replace(/\\/+$/u, \"\")\n}\n\nfunction resolveProviderAuthType(\n providerName: string,\n authType: string | undefined,\n): ProviderAuthType {\n if (authType === undefined || authType === \"x-api-key\") {\n return \"x-api-key\"\n }\n\n if (authType === \"authorization\") {\n return authType\n }\n\n consola.warn(\n `Provider ${providerName} has invalid authType '${authType}', falling back to x-api-key`,\n )\n return \"x-api-key\"\n}\n\nexport function getProviderConfig(name: string): ResolvedProviderConfig | null {\n const providerName = name.trim()\n if (!providerName) {\n return null\n }\n\n const config = getConfig()\n const provider = config.providers?.[providerName]\n if (!provider) {\n return null\n }\n\n if (provider.enabled === false) {\n return null\n }\n\n const type = provider.type ?? \"anthropic\"\n if (type !== \"anthropic\") {\n consola.warn(\n `Provider ${providerName} is ignored because only anthropic type is supported`,\n )\n return null\n }\n\n const baseUrl = normalizeProviderBaseUrl(provider.baseUrl ?? \"\")\n const apiKey = (provider.apiKey ?? \"\").trim()\n const authType = resolveProviderAuthType(providerName, provider.authType)\n if (!baseUrl || !apiKey) {\n consola.warn(\n `Provider ${providerName} is enabled but missing baseUrl or apiKey`,\n )\n return null\n }\n\n return {\n name: providerName,\n type,\n baseUrl,\n apiKey,\n authType,\n models: provider.models,\n adjustInputTokens: provider.adjustInputTokens,\n }\n}\n\nexport function listEnabledProviders(): Array<string> {\n const config = getConfig()\n const providerNames = Object.keys(config.providers ?? {})\n return providerNames.filter((name) => getProviderConfig(name) !== null)\n}\n\nexport function isMessagesApiEnabled(): boolean {\n const config = getConfig()\n return config.useMessagesApi ?? true\n}\n\nexport function getAnthropicApiKey(): string | undefined {\n const config = getConfig()\n return config.anthropicApiKey ?? process.env.ANTHROPIC_API_KEY ?? undefined\n}\n\nexport function isResponsesApiWebSearchEnabled(): boolean {\n const config = getConfig()\n return config.useResponsesApiWebSearch ?? true\n}\n\nexport function getClaudeTokenMultiplier(): number {\n const config = getConfig()\n return config.claudeTokenMultiplier ?? 1.15\n}\n"],"mappings":";;;;;AAoDA,MAAM,wBAAwB;;;;;;AAO9B,MAAM,uBAAuB;;;;;;;;;;;;;;;;;;;AAoB7B,MAAMA,gBAA2B;CAC/B,MAAM,EACJ,SAAS,EAAE,EACZ;CACD,WAAW,EAAE;CACb,cAAc;EACZ,cAAc;EACd,iBAAiB;EACjB,gBAAgB;EAChB,WAAW;EACX,WAAW;EACZ;CACD,YAAY;CACZ,qCAAqC,EAAE;CACvC,uBAAuB;EACrB,cAAc;EACd,iBAAiB;EACjB,gBAAgB;EAChB,WAAW;EACX,WAAW;EACZ;CACD,uBAAuB;CACvB,gBAAgB;CAChB,0BAA0B;CAC3B;AAED,IAAIC,eAAiC;AAErC,SAAS,mBAAyB;AAChC,KAAI;AACF,KAAG,WAAW,MAAM,aAAa,GAAG,UAAU,OAAO,GAAG,UAAU,KAAK;SACjE;AACN,KAAG,UAAU,MAAM,SAAS,EAAE,WAAW,MAAM,CAAC;AAChD,KAAG,cACD,MAAM,aACN,GAAG,KAAK,UAAU,eAAe,MAAM,EAAE,CAAC,KAC1C,OACD;AACD,MAAI;AACF,MAAG,UAAU,MAAM,aAAa,IAAM;UAChC;AACN;;;;AAKN,SAAS,qBAAgC;AACvC,mBAAkB;AAClB,KAAI;EACF,MAAM,MAAM,GAAG,aAAa,MAAM,aAAa,OAAO;AACtD,MAAI,CAAC,IAAI,MAAM,EAAE;AACf,MAAG,cACD,MAAM,aACN,GAAG,KAAK,UAAU,eAAe,MAAM,EAAE,CAAC,KAC1C,OACD;AACD,UAAO;;AAET,SAAO,KAAK,MAAM,IAAI;UACf,OAAO;AACd,UAAQ,MAAM,oDAAoD,MAAM;AACxE,SAAO;;;AAIX,SAAS,mBAAmB,QAG1B;CACA,MAAM,eAAe,OAAO,gBAAgB,EAAE;CAC9C,MAAM,sBAAsB,cAAc,gBAAgB,EAAE;CAC5D,MAAM,wBAAwB,OAAO,yBAAyB,EAAE;CAChE,MAAM,+BAA+B,cAAc,yBAAyB,EAAE;CAE9E,MAAM,2BAA2B,OAAO,KAAK,oBAAoB,CAAC,QAC/D,UAAU,CAAC,OAAO,OAAO,cAAc,MAAM,CAC/C;CAED,MAAM,+BAA+B,OAAO,KAC1C,6BACD,CAAC,QAAQ,UAAU,CAAC,OAAO,OAAO,uBAAuB,MAAM,CAAC;CAEjE,MAAM,wBAAwB,yBAAyB,SAAS;CAChE,MAAM,4BAA4B,6BAA6B,SAAS;AAExE,KAAI,CAAC,yBAAyB,CAAC,0BAC7B,QAAO;EAAE,cAAc;EAAQ,SAAS;EAAO;AAGjD,QAAO;EACL,cAAc;GACZ,GAAG;GACH,cAAc;IACZ,GAAG;IACH,GAAG;IACJ;GACD,uBAAuB;IACrB,GAAG;IACH,GAAG;IACJ;GACF;EACD,SAAS;EACV;;AAGH,SAAgB,0BAAqC;CACnD,MAAM,SAAS,oBAAoB;CACnC,MAAM,EAAE,cAAc,YAAY,mBAAmB,OAAO;AAE5D,KAAI,QACF,KAAI;AACF,KAAG,cACD,MAAM,aACN,GAAG,KAAK,UAAU,cAAc,MAAM,EAAE,CAAC,KACzC,OACD;UACM,YAAY;AACnB,UAAQ,KACN,sDACA,WACD;;AAIL,gBAAe;AACf,QAAO;;AAGT,SAAgB,YAAuB;AACrC,kBAAiB,oBAAoB;AACrC,QAAO;;AAGT,SAAgB,uBAAuB,OAAuB;AAE5D,QADe,WAAW,CACZ,eAAe,UAAU;;AAGzC,SAAgB,gBAAwB;AAEtC,QADe,WAAW,CACZ,cAAc;;AAG9B,SAAgB,yCAAwD;AAEtE,QADe,WAAW,CAEjB,uCACJ,cAAc,uCACd,EAAE;;AAIT,SAAgB,qCAAqC,OAAwB;AAC3E,QAAO,wCAAwC,CAAC,SAAS,MAAM;;AAGjE,SAAgB,2BACd,OAC0D;AAE1D,QADe,WAAW,CACZ,wBAAwB,UAAU;;AAGlD,SAAgB,yBAAyB,KAAqB;AAC5D,QAAO,IAAI,MAAM,CAAC,QAAQ,SAAS,GAAG;;AAGxC,SAAS,wBACP,cACA,UACkB;AAClB,KAAI,aAAa,UAAa,aAAa,YACzC,QAAO;AAGT,KAAI,aAAa,gBACf,QAAO;AAGT,SAAQ,KACN,YAAY,aAAa,yBAAyB,SAAS,8BAC5D;AACD,QAAO;;AAGT,SAAgB,kBAAkB,MAA6C;CAC7E,MAAM,eAAe,KAAK,MAAM;AAChC,KAAI,CAAC,aACH,QAAO;CAIT,MAAM,WADS,WAAW,CACF,YAAY;AACpC,KAAI,CAAC,SACH,QAAO;AAGT,KAAI,SAAS,YAAY,MACvB,QAAO;CAGT,MAAM,OAAO,SAAS,QAAQ;AAC9B,KAAI,SAAS,aAAa;AACxB,UAAQ,KACN,YAAY,aAAa,sDAC1B;AACD,SAAO;;CAGT,MAAM,UAAU,yBAAyB,SAAS,WAAW,GAAG;CAChE,MAAM,UAAU,SAAS,UAAU,IAAI,MAAM;CAC7C,MAAM,WAAW,wBAAwB,cAAc,SAAS,SAAS;AACzE,KAAI,CAAC,WAAW,CAAC,QAAQ;AACvB,UAAQ,KACN,YAAY,aAAa,2CAC1B;AACD,SAAO;;AAGT,QAAO;EACL,MAAM;EACN;EACA;EACA;EACA;EACA,QAAQ,SAAS;EACjB,mBAAmB,SAAS;EAC7B;;AASH,SAAgB,uBAAgC;AAE9C,QADe,WAAW,CACZ,kBAAkB;;AAGlC,SAAgB,qBAAyC;AAEvD,QADe,WAAW,CACZ,mBAAmB,QAAQ,IAAI,qBAAqB;;AAGpE,SAAgB,iCAA0C;AAExD,QADe,WAAW,CACZ,4BAA4B;;AAG5C,SAAgB,2BAAmC;AAEjD,QADe,WAAW,CACZ,yBAAyB"}
1
+ {"version":3,"file":"config-BJt9unC0.js","names":["defaultConfig: AppConfig","cachedConfig: AppConfig | null"],"sources":["../src/lib/config.ts"],"sourcesContent":["import consola from \"consola\"\nimport fs from \"node:fs\"\n\nimport { PATHS } from \"./paths\"\n\nexport interface AppConfig {\n auth?: {\n apiKeys?: Array<string>\n }\n providers?: Record<string, ProviderConfig>\n extraPrompts?: Record<string, string>\n smallModel?: string\n responsesApiContextManagementModels?: Array<string>\n modelReasoningEfforts?: Record<\n string,\n \"none\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\"\n >\n useFunctionApplyPatch?: boolean\n useMessagesApi?: boolean\n anthropicApiKey?: string\n useResponsesApiWebSearch?: boolean\n claudeTokenMultiplier?: number\n}\n\nexport interface ModelConfig {\n temperature?: number\n topP?: number\n topK?: number\n extraBody?: Record<string, unknown>\n supportPdf?: boolean\n toolContentSupportType?: Array<ToolContentSupportType>\n}\n\nexport type ProviderAuthType = \"authorization\" | \"x-api-key\"\nexport type ProviderType = \"anthropic\" | \"openai-compatible\"\nexport type ToolContentSupportType = \"array\" | \"image\" | \"pdf\"\n\nexport interface ProviderConfig {\n type?: string\n enabled?: boolean\n baseUrl?: string\n apiKey?: string\n authType?: ProviderAuthType\n models?: Record<string, ModelConfig>\n adjustInputTokens?: boolean\n}\n\nexport interface ResolvedProviderConfig {\n name: string\n type: ProviderType\n baseUrl: string\n apiKey: string\n authType: ProviderAuthType\n models?: Record<string, ModelConfig>\n adjustInputTokens?: boolean\n}\n\nconst gpt5ExplorationPrompt = `## Exploration and reading files\n- **Think first.** Before any tool call, decide ALL files/resources you will need.\n- **Batch everything.** If you need multiple files (even from different places), read them together.\n- **multi_tool_use.parallel** Use multi_tool_use.parallel to parallelize tool calls and only this.\n- **Only make sequential calls if you truly cannot know the next file without seeing a result first.**\n- **Workflow:** (a) plan all needed reads → (b) issue one parallel batch → (c) analyze results → (d) repeat if new, unpredictable reads arise.`\n\nconst gpt5CommentaryPrompt = `# Working with the user\n\nYou interact with the user through a terminal. You have 2 ways of communicating with the users: \n- Share intermediary updates in \\`commentary\\` channel. \n- After you have completed all your work, send a message to the \\`final\\` channel. \n\n## Intermediary updates\n\n- Intermediary updates go to the \\`commentary\\` channel.\n- User updates are short updates while you are working, they are NOT final answers.\n- You use 1-2 sentence user updates to communicate progress and new information to the user as you are doing work.\n- Do not begin responses with conversational interjections or meta commentary. Avoid openers such as acknowledgements (“Done —”, “Got it”, “Great question, ”) or framing phrases.\n- You provide user updates frequently, every 20s.\n- Before exploring or doing substantial work, you start with a user update acknowledging the request and explaining your first step. You should include your understanding of the user request and explain what you will do. Avoid commenting on the request or using starters such as \"Got it -\" or \"Understood -\" etc.\n- When exploring, e.g. searching, reading files, you provide user updates as you go, every 20s, explaining what context you are gathering and what you've learned. Vary your sentence structure when providing these updates to avoid sounding repetitive - in particular, don't start each sentence the same way.\n- After you have sufficient context, and the work is substantial, you provide a longer plan (this is the only user update that may be longer than 2 sentences and can contain formatting).\n- Before performing file edits of any kind, you provide updates explaining what edits you are making.\n- As you are thinking, you very frequently provide updates even if not taking any actions, informing the user of your progress. You interrupt your thinking and send multiple updates in a row if thinking for more than 100 words.\n- Tone of your updates MUST match your personality.`\n\nconst defaultConfig: AppConfig = {\n auth: {\n apiKeys: [],\n },\n providers: {},\n extraPrompts: {\n \"gpt-5-mini\": gpt5ExplorationPrompt,\n \"gpt-5.3-codex\": gpt5CommentaryPrompt,\n \"gpt-5.4-mini\": gpt5CommentaryPrompt,\n \"gpt-5.4\": gpt5CommentaryPrompt,\n \"gpt-5.5\": gpt5CommentaryPrompt,\n },\n smallModel: \"gpt-5-mini\",\n responsesApiContextManagementModels: [],\n modelReasoningEfforts: {\n \"gpt-5-mini\": \"low\",\n \"gpt-5.3-codex\": \"xhigh\",\n \"gpt-5.4-mini\": \"xhigh\",\n \"gpt-5.4\": \"xhigh\",\n \"gpt-5.5\": \"xhigh\",\n },\n useFunctionApplyPatch: true,\n useMessagesApi: true,\n useResponsesApiWebSearch: true,\n}\n\nlet cachedConfig: AppConfig | null = null\n\nfunction ensureConfigFile(): void {\n try {\n fs.accessSync(PATHS.CONFIG_PATH, fs.constants.R_OK | fs.constants.W_OK)\n } catch {\n fs.mkdirSync(PATHS.APP_DIR, { recursive: true })\n fs.writeFileSync(\n PATHS.CONFIG_PATH,\n `${JSON.stringify(defaultConfig, null, 2)}\\n`,\n \"utf8\",\n )\n try {\n fs.chmodSync(PATHS.CONFIG_PATH, 0o600)\n } catch {\n return\n }\n }\n}\n\nfunction readConfigFromDisk(): AppConfig {\n ensureConfigFile()\n try {\n const raw = fs.readFileSync(PATHS.CONFIG_PATH, \"utf8\")\n if (!raw.trim()) {\n fs.writeFileSync(\n PATHS.CONFIG_PATH,\n `${JSON.stringify(defaultConfig, null, 2)}\\n`,\n \"utf8\",\n )\n return defaultConfig\n }\n return JSON.parse(raw) as AppConfig\n } catch (error) {\n consola.error(\"Failed to read config file, using default config\", error)\n return defaultConfig\n }\n}\n\nfunction mergeDefaultConfig(config: AppConfig): {\n mergedConfig: AppConfig\n changed: boolean\n} {\n const extraPrompts = config.extraPrompts ?? {}\n const defaultExtraPrompts = defaultConfig.extraPrompts ?? {}\n const modelReasoningEfforts = config.modelReasoningEfforts ?? {}\n const defaultModelReasoningEfforts = defaultConfig.modelReasoningEfforts ?? {}\n\n const missingExtraPromptModels = Object.keys(defaultExtraPrompts).filter(\n (model) => !Object.hasOwn(extraPrompts, model),\n )\n\n const missingReasoningEffortModels = Object.keys(\n defaultModelReasoningEfforts,\n ).filter((model) => !Object.hasOwn(modelReasoningEfforts, model))\n\n const hasExtraPromptChanges = missingExtraPromptModels.length > 0\n const hasReasoningEffortChanges = missingReasoningEffortModels.length > 0\n\n if (!hasExtraPromptChanges && !hasReasoningEffortChanges) {\n return { mergedConfig: config, changed: false }\n }\n\n return {\n mergedConfig: {\n ...config,\n extraPrompts: {\n ...defaultExtraPrompts,\n ...extraPrompts,\n },\n modelReasoningEfforts: {\n ...defaultModelReasoningEfforts,\n ...modelReasoningEfforts,\n },\n },\n changed: true,\n }\n}\n\nexport function mergeConfigWithDefaults(): AppConfig {\n const config = readConfigFromDisk()\n const { mergedConfig, changed } = mergeDefaultConfig(config)\n\n if (changed) {\n try {\n fs.writeFileSync(\n PATHS.CONFIG_PATH,\n `${JSON.stringify(mergedConfig, null, 2)}\\n`,\n \"utf8\",\n )\n } catch (writeError) {\n consola.warn(\n \"Failed to write merged extraPrompts to config file\",\n writeError,\n )\n }\n }\n\n cachedConfig = mergedConfig\n return mergedConfig\n}\n\nexport function getConfig(): AppConfig {\n cachedConfig ??= readConfigFromDisk()\n return cachedConfig\n}\n\nexport function getExtraPromptForModel(model: string): string {\n const config = getConfig()\n return config.extraPrompts?.[model] ?? \"\"\n}\n\nexport function getSmallModel(): string {\n const config = getConfig()\n return config.smallModel ?? \"gpt-5-mini\"\n}\n\nexport function getResponsesApiContextManagementModels(): Array<string> {\n const config = getConfig()\n return (\n config.responsesApiContextManagementModels\n ?? defaultConfig.responsesApiContextManagementModels\n ?? []\n )\n}\n\nexport function isResponsesApiContextManagementModel(model: string): boolean {\n return getResponsesApiContextManagementModels().includes(model)\n}\n\nexport function getReasoningEffortForModel(\n model: string,\n): \"none\" | \"minimal\" | \"low\" | \"medium\" | \"high\" | \"xhigh\" {\n const config = getConfig()\n return config.modelReasoningEfforts?.[model] ?? \"high\"\n}\n\nexport function normalizeProviderBaseUrl(url: string): string {\n return url.trim().replace(/\\/+$/u, \"\")\n}\n\nfunction getDefaultProviderAuthType(\n providerType: ProviderType,\n): ProviderAuthType {\n return providerType === \"openai-compatible\" ? \"authorization\" : \"x-api-key\"\n}\n\nexport function resolveProviderAuthType(\n providerName: string,\n authType: string | undefined,\n providerType: ProviderType,\n): ProviderAuthType {\n if (authType === undefined) {\n return getDefaultProviderAuthType(providerType)\n }\n\n if (authType === \"x-api-key\") {\n return \"x-api-key\"\n }\n\n if (authType === \"authorization\") {\n return authType\n }\n\n consola.warn(\n `Provider ${providerName} has invalid authType '${authType}', falling back to ${getDefaultProviderAuthType(providerType)}`,\n )\n return getDefaultProviderAuthType(providerType)\n}\n\nexport function getProviderConfig(name: string): ResolvedProviderConfig | null {\n const providerName = name.trim()\n if (!providerName) {\n return null\n }\n\n const config = getConfig()\n const provider = config.providers?.[providerName]\n if (!provider) {\n return null\n }\n\n if (provider.enabled === false) {\n return null\n }\n\n const type = provider.type ?? \"anthropic\"\n if (type !== \"anthropic\" && type !== \"openai-compatible\") {\n consola.warn(\n `Provider ${providerName} is ignored because type '${type}' is not supported`,\n )\n return null\n }\n\n const baseUrl = normalizeProviderBaseUrl(provider.baseUrl ?? \"\")\n const apiKey = (provider.apiKey ?? \"\").trim()\n const authType = resolveProviderAuthType(\n providerName,\n provider.authType,\n type,\n )\n if (!baseUrl || !apiKey) {\n consola.warn(\n `Provider ${providerName} is enabled but missing baseUrl or apiKey`,\n )\n return null\n }\n\n return {\n name: providerName,\n type,\n baseUrl,\n apiKey,\n authType,\n models: provider.models,\n adjustInputTokens: provider.adjustInputTokens,\n }\n}\n\nexport function listEnabledProviders(): Array<string> {\n const config = getConfig()\n const providerNames = Object.keys(config.providers ?? {})\n return providerNames.filter((name) => getProviderConfig(name) !== null)\n}\n\nexport function isMessagesApiEnabled(): boolean {\n const config = getConfig()\n return config.useMessagesApi ?? true\n}\n\nexport function getAnthropicApiKey(): string | undefined {\n const config = getConfig()\n return config.anthropicApiKey ?? process.env.ANTHROPIC_API_KEY ?? undefined\n}\n\nexport function isResponsesApiWebSearchEnabled(): boolean {\n const config = getConfig()\n return config.useResponsesApiWebSearch ?? true\n}\n\nexport function getClaudeTokenMultiplier(): number {\n const config = getConfig()\n return config.claudeTokenMultiplier ?? 1.15\n}\n"],"mappings":";;;;;AAyDA,MAAM,wBAAwB;;;;;;AAO9B,MAAM,uBAAuB;;;;;;;;;;;;;;;;;;;AAoB7B,MAAMA,gBAA2B;CAC/B,MAAM,EACJ,SAAS,EAAE,EACZ;CACD,WAAW,EAAE;CACb,cAAc;EACZ,cAAc;EACd,iBAAiB;EACjB,gBAAgB;EAChB,WAAW;EACX,WAAW;EACZ;CACD,YAAY;CACZ,qCAAqC,EAAE;CACvC,uBAAuB;EACrB,cAAc;EACd,iBAAiB;EACjB,gBAAgB;EAChB,WAAW;EACX,WAAW;EACZ;CACD,uBAAuB;CACvB,gBAAgB;CAChB,0BAA0B;CAC3B;AAED,IAAIC,eAAiC;AAErC,SAAS,mBAAyB;AAChC,KAAI;AACF,KAAG,WAAW,MAAM,aAAa,GAAG,UAAU,OAAO,GAAG,UAAU,KAAK;SACjE;AACN,KAAG,UAAU,MAAM,SAAS,EAAE,WAAW,MAAM,CAAC;AAChD,KAAG,cACD,MAAM,aACN,GAAG,KAAK,UAAU,eAAe,MAAM,EAAE,CAAC,KAC1C,OACD;AACD,MAAI;AACF,MAAG,UAAU,MAAM,aAAa,IAAM;UAChC;AACN;;;;AAKN,SAAS,qBAAgC;AACvC,mBAAkB;AAClB,KAAI;EACF,MAAM,MAAM,GAAG,aAAa,MAAM,aAAa,OAAO;AACtD,MAAI,CAAC,IAAI,MAAM,EAAE;AACf,MAAG,cACD,MAAM,aACN,GAAG,KAAK,UAAU,eAAe,MAAM,EAAE,CAAC,KAC1C,OACD;AACD,UAAO;;AAET,SAAO,KAAK,MAAM,IAAI;UACf,OAAO;AACd,UAAQ,MAAM,oDAAoD,MAAM;AACxE,SAAO;;;AAIX,SAAS,mBAAmB,QAG1B;CACA,MAAM,eAAe,OAAO,gBAAgB,EAAE;CAC9C,MAAM,sBAAsB,cAAc,gBAAgB,EAAE;CAC5D,MAAM,wBAAwB,OAAO,yBAAyB,EAAE;CAChE,MAAM,+BAA+B,cAAc,yBAAyB,EAAE;CAE9E,MAAM,2BAA2B,OAAO,KAAK,oBAAoB,CAAC,QAC/D,UAAU,CAAC,OAAO,OAAO,cAAc,MAAM,CAC/C;CAED,MAAM,+BAA+B,OAAO,KAC1C,6BACD,CAAC,QAAQ,UAAU,CAAC,OAAO,OAAO,uBAAuB,MAAM,CAAC;CAEjE,MAAM,wBAAwB,yBAAyB,SAAS;CAChE,MAAM,4BAA4B,6BAA6B,SAAS;AAExE,KAAI,CAAC,yBAAyB,CAAC,0BAC7B,QAAO;EAAE,cAAc;EAAQ,SAAS;EAAO;AAGjD,QAAO;EACL,cAAc;GACZ,GAAG;GACH,cAAc;IACZ,GAAG;IACH,GAAG;IACJ;GACD,uBAAuB;IACrB,GAAG;IACH,GAAG;IACJ;GACF;EACD,SAAS;EACV;;AAGH,SAAgB,0BAAqC;CACnD,MAAM,SAAS,oBAAoB;CACnC,MAAM,EAAE,cAAc,YAAY,mBAAmB,OAAO;AAE5D,KAAI,QACF,KAAI;AACF,KAAG,cACD,MAAM,aACN,GAAG,KAAK,UAAU,cAAc,MAAM,EAAE,CAAC,KACzC,OACD;UACM,YAAY;AACnB,UAAQ,KACN,sDACA,WACD;;AAIL,gBAAe;AACf,QAAO;;AAGT,SAAgB,YAAuB;AACrC,kBAAiB,oBAAoB;AACrC,QAAO;;AAGT,SAAgB,uBAAuB,OAAuB;AAE5D,QADe,WAAW,CACZ,eAAe,UAAU;;AAGzC,SAAgB,gBAAwB;AAEtC,QADe,WAAW,CACZ,cAAc;;AAG9B,SAAgB,yCAAwD;AAEtE,QADe,WAAW,CAEjB,uCACJ,cAAc,uCACd,EAAE;;AAIT,SAAgB,qCAAqC,OAAwB;AAC3E,QAAO,wCAAwC,CAAC,SAAS,MAAM;;AAGjE,SAAgB,2BACd,OAC0D;AAE1D,QADe,WAAW,CACZ,wBAAwB,UAAU;;AAGlD,SAAgB,yBAAyB,KAAqB;AAC5D,QAAO,IAAI,MAAM,CAAC,QAAQ,SAAS,GAAG;;AAGxC,SAAS,2BACP,cACkB;AAClB,QAAO,iBAAiB,sBAAsB,kBAAkB;;AAGlE,SAAgB,wBACd,cACA,UACA,cACkB;AAClB,KAAI,aAAa,OACf,QAAO,2BAA2B,aAAa;AAGjD,KAAI,aAAa,YACf,QAAO;AAGT,KAAI,aAAa,gBACf,QAAO;AAGT,SAAQ,KACN,YAAY,aAAa,yBAAyB,SAAS,qBAAqB,2BAA2B,aAAa,GACzH;AACD,QAAO,2BAA2B,aAAa;;AAGjD,SAAgB,kBAAkB,MAA6C;CAC7E,MAAM,eAAe,KAAK,MAAM;AAChC,KAAI,CAAC,aACH,QAAO;CAIT,MAAM,WADS,WAAW,CACF,YAAY;AACpC,KAAI,CAAC,SACH,QAAO;AAGT,KAAI,SAAS,YAAY,MACvB,QAAO;CAGT,MAAM,OAAO,SAAS,QAAQ;AAC9B,KAAI,SAAS,eAAe,SAAS,qBAAqB;AACxD,UAAQ,KACN,YAAY,aAAa,4BAA4B,KAAK,oBAC3D;AACD,SAAO;;CAGT,MAAM,UAAU,yBAAyB,SAAS,WAAW,GAAG;CAChE,MAAM,UAAU,SAAS,UAAU,IAAI,MAAM;CAC7C,MAAM,WAAW,wBACf,cACA,SAAS,UACT,KACD;AACD,KAAI,CAAC,WAAW,CAAC,QAAQ;AACvB,UAAQ,KACN,YAAY,aAAa,2CAC1B;AACD,SAAO;;AAGT,QAAO;EACL,MAAM;EACN;EACA;EACA;EACA;EACA,QAAQ,SAAS;EACjB,mBAAmB,SAAS;EAC7B;;AASH,SAAgB,uBAAgC;AAE9C,QADe,WAAW,CACZ,kBAAkB;;AAGlC,SAAgB,qBAAyC;AAEvD,QADe,WAAW,CACZ,mBAAmB,QAAQ,IAAI,qBAAqB;;AAGpE,SAAgB,iCAA0C;AAExD,QADe,WAAW,CACZ,4BAA4B;;AAG5C,SAAgB,2BAAmC;AAEjD,QADe,WAAW,CACZ,yBAAyB"}
package/dist/main.js CHANGED
@@ -44,7 +44,7 @@ bindElectronFetch();
44
44
  const { auth } = await import("./auth-CWEhhJYn.js");
45
45
  const { checkUsage } = await import("./check-usage-B5yr4fpk.js");
46
46
  const { debug } = await import("./debug-DcC7ZPH0.js");
47
- const { start } = await import("./start-1ogeufrV.js");
47
+ const { start } = await import("./start-DNhkQhQZ.js");
48
48
  const main = defineCommand({
49
49
  meta: {
50
50
  name: "copilot-api",