@eminent337/aery-ai 0.67.126 → 0.67.128
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +98 -39
- package/dist/env-api-keys.d.ts.map +1 -1
- package/dist/env-api-keys.js +9 -1
- package/dist/env-api-keys.js.map +1 -1
- package/dist/image-models.d.ts +10 -0
- package/dist/image-models.d.ts.map +1 -0
- package/dist/image-models.generated.d.ts +425 -0
- package/dist/image-models.generated.d.ts.map +1 -0
- package/dist/image-models.generated.js +427 -0
- package/dist/image-models.generated.js.map +1 -0
- package/dist/image-models.js +23 -0
- package/dist/image-models.js.map +1 -0
- package/dist/images-api-registry.d.ts +14 -0
- package/dist/images-api-registry.d.ts.map +1 -0
- package/dist/images-api-registry.js +22 -0
- package/dist/images-api-registry.js.map +1 -0
- package/dist/images.d.ts +4 -0
- package/dist/images.d.ts.map +1 -0
- package/dist/images.js +14 -0
- package/dist/images.js.map +1 -0
- package/dist/index.d.ts +8 -3
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +5 -0
- package/dist/index.js.map +1 -1
- package/dist/models.d.ts +5 -7
- package/dist/models.d.ts.map +1 -1
- package/dist/models.generated.d.ts +4541 -2236
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +3425 -1895
- package/dist/models.generated.js.map +1 -1
- package/dist/models.js +35 -20
- package/dist/models.js.map +1 -1
- package/dist/providers/amazon-bedrock.d.ts.map +1 -1
- package/dist/providers/amazon-bedrock.js +18 -27
- package/dist/providers/amazon-bedrock.js.map +1 -1
- package/dist/providers/anthropic.d.ts.map +1 -1
- package/dist/providers/anthropic.js +62 -33
- package/dist/providers/anthropic.js.map +1 -1
- package/dist/providers/azure-openai-responses.d.ts.map +1 -1
- package/dist/providers/azure-openai-responses.js +14 -8
- package/dist/providers/azure-openai-responses.js.map +1 -1
- package/dist/providers/cloudflare.d.ts +9 -3
- package/dist/providers/cloudflare.d.ts.map +1 -1
- package/dist/providers/cloudflare.js +10 -3
- package/dist/providers/cloudflare.js.map +1 -1
- package/dist/providers/google-shared.d.ts +7 -2
- package/dist/providers/google-shared.d.ts.map +1 -1
- package/dist/providers/google-shared.js +4 -13
- package/dist/providers/google-shared.js.map +1 -1
- package/dist/providers/google-vertex.d.ts +1 -1
- package/dist/providers/google-vertex.d.ts.map +1 -1
- package/dist/providers/google-vertex.js +8 -7
- package/dist/providers/google-vertex.js.map +1 -1
- package/dist/providers/google.d.ts +1 -1
- package/dist/providers/google.d.ts.map +1 -1
- package/dist/providers/google.js +5 -4
- package/dist/providers/google.js.map +1 -1
- package/dist/providers/images/openrouter.d.ts +3 -0
- package/dist/providers/images/openrouter.d.ts.map +1 -0
- package/dist/providers/images/openrouter.js +129 -0
- package/dist/providers/images/openrouter.js.map +1 -0
- package/dist/providers/images/register-builtins.d.ts +4 -0
- package/dist/providers/images/register-builtins.d.ts.map +1 -0
- package/dist/providers/images/register-builtins.js +34 -0
- package/dist/providers/images/register-builtins.js.map +1 -0
- package/dist/providers/mistral.d.ts.map +1 -1
- package/dist/providers/mistral.js +8 -7
- package/dist/providers/mistral.js.map +1 -1
- package/dist/providers/openai-codex-responses.d.ts +19 -0
- package/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/dist/providers/openai-codex-responses.js +347 -46
- package/dist/providers/openai-codex-responses.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +185 -159
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/openai-responses-shared.d.ts.map +1 -1
- package/dist/providers/openai-responses-shared.js +14 -1
- package/dist/providers/openai-responses-shared.js.map +1 -1
- package/dist/providers/openai-responses.d.ts.map +1 -1
- package/dist/providers/openai-responses.js +22 -8
- package/dist/providers/openai-responses.js.map +1 -1
- package/dist/providers/register-builtins.d.ts +0 -3
- package/dist/providers/register-builtins.d.ts.map +1 -1
- package/dist/providers/register-builtins.js +0 -18
- package/dist/providers/register-builtins.js.map +1 -1
- package/dist/providers/simple-options.d.ts.map +1 -1
- package/dist/providers/simple-options.js +1 -0
- package/dist/providers/simple-options.js.map +1 -1
- package/dist/session-resources.d.ts +4 -0
- package/dist/session-resources.d.ts.map +1 -0
- package/dist/session-resources.js +22 -0
- package/dist/session-resources.js.map +1 -0
- package/dist/types.d.ts +95 -6
- package/dist/types.d.ts.map +1 -1
- package/dist/types.js.map +1 -1
- package/dist/utils/node-http-proxy.d.ts +10 -0
- package/dist/utils/node-http-proxy.d.ts.map +1 -0
- package/dist/utils/node-http-proxy.js +34 -0
- package/dist/utils/node-http-proxy.js.map +1 -0
- package/dist/utils/oauth/anthropic.d.ts.map +1 -1
- package/dist/utils/oauth/anthropic.js +3 -3
- package/dist/utils/oauth/anthropic.js.map +1 -1
- package/dist/utils/oauth/index.d.ts +0 -4
- package/dist/utils/oauth/index.d.ts.map +1 -1
- package/dist/utils/oauth/index.js +0 -10
- package/dist/utils/oauth/index.js.map +1 -1
- package/dist/utils/oauth/openai-codex.d.ts +1 -1
- package/dist/utils/oauth/openai-codex.d.ts.map +1 -1
- package/dist/utils/oauth/openai-codex.js +26 -15
- package/dist/utils/oauth/openai-codex.js.map +1 -1
- package/dist/utils/oauth/types.d.ts +10 -0
- package/dist/utils/oauth/types.d.ts.map +1 -1
- package/dist/utils/oauth/types.js.map +1 -1
- package/dist/utils/overflow.d.ts +3 -0
- package/dist/utils/overflow.d.ts.map +1 -1
- package/dist/utils/overflow.js +17 -0
- package/dist/utils/overflow.js.map +1 -1
- package/package.json +8 -12
package/README.md
CHANGED
|
@@ -16,6 +16,9 @@ Unified LLM API with automatic model discovery, provider configuration, token an
|
|
|
16
16
|
- [Validating Tool Arguments](#validating-tool-arguments)
|
|
17
17
|
- [Complete Event Reference](#complete-event-reference)
|
|
18
18
|
- [Image Input](#image-input)
|
|
19
|
+
- [Image Generation](#image-generation)
|
|
20
|
+
- [Basic Image Generation](#basic-image-generation)
|
|
21
|
+
- [Notes and Limitations](#notes-and-limitations)
|
|
19
22
|
- [Thinking/Reasoning](#thinkingreasoning)
|
|
20
23
|
- [Unified Interface](#unified-interface-streamsimplecompletesimple)
|
|
21
24
|
- [Provider-Specific Options](#provider-specific-options-streamcomplete)
|
|
@@ -57,19 +60,20 @@ Unified LLM API with automatic model discovery, provider configuration, token an
|
|
|
57
60
|
- **Mistral**
|
|
58
61
|
- **Groq**
|
|
59
62
|
- **Cerebras**
|
|
63
|
+
- **Cloudflare AI Gateway**
|
|
60
64
|
- **Cloudflare Workers AI**
|
|
61
65
|
- **xAI**
|
|
62
66
|
- **OpenRouter**
|
|
63
67
|
- **Vercel AI Gateway**
|
|
64
68
|
- **MiniMax**
|
|
69
|
+
- **Together AI**
|
|
65
70
|
- **GitHub Copilot** (requires OAuth, see below)
|
|
66
|
-
- **Google Gemini CLI** (requires OAuth, see below)
|
|
67
|
-
- **Antigravity** (requires OAuth, see below)
|
|
68
71
|
- **Amazon Bedrock**
|
|
69
72
|
- **OpenCode Zen**
|
|
70
73
|
- **OpenCode Go**
|
|
71
74
|
- **Fireworks** (uses Anthropic-compatible API)
|
|
72
75
|
- **Kimi For Coding** (Moonshot AI, uses Anthropic-compatible API)
|
|
76
|
+
- **Xiaomi MiMo** (uses Anthropic-compatible API; defaults to API billing endpoint, with separate Token Plan providers for `cn`/`ams`/`sgp` regions)
|
|
73
77
|
- **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
|
|
74
78
|
|
|
75
79
|
## Installation
|
|
@@ -383,6 +387,8 @@ All streaming events emitted during assistant message generation:
|
|
|
383
387
|
| `done` | Stream complete | `reason`: Stop reason ("stop", "length", "toolUse"), `message`: Final assistant message |
|
|
384
388
|
| `error` | Error occurred | `reason`: Error type ("error" or "aborted"), `error`: AssistantMessage with partial content |
|
|
385
389
|
|
|
390
|
+
Streaming events for different content blocks are not guaranteed to be contiguous. Providers may emit deltas for text, thinking, and tool calls in the same upstream chunk, and pi may surface corresponding events interleaved, for example `text_start`, `text_delta`, `toolcall_start`, `text_delta`, `toolcall_delta`. Consumers must use `contentIndex` to associate each delta/end event with its block and must not assume that a block's `*_start`/`*_delta`/`*_end` sequence is uninterrupted by events for other blocks.
|
|
391
|
+
|
|
386
392
|
## Image Input
|
|
387
393
|
|
|
388
394
|
Models with vision capabilities can process images. You can check if a model supports images via the `input` property. If you pass images to a non-vision model, they are silently ignored.
|
|
@@ -419,6 +425,70 @@ for (const block of response.content) {
|
|
|
419
425
|
}
|
|
420
426
|
```
|
|
421
427
|
|
|
428
|
+
## Image Generation
|
|
429
|
+
|
|
430
|
+
Image generation uses a separate API surface from text/chat generation. Use `getImageModel()` / `getImageModels()` / `getImageProviders()` to discover image-generation models, and `generateImages()` to get the final result.
|
|
431
|
+
|
|
432
|
+
Do not use `stream()` or `complete()` for image generation. Image generation is a one-shot API: `generateImages()` waits for the provider response and returns the final `AssistantImages` result.
|
|
433
|
+
|
|
434
|
+
### Basic Image Generation
|
|
435
|
+
|
|
436
|
+
```typescript
|
|
437
|
+
import { getImageModel, generateImages } from '@eminent337/aery-ai';
|
|
438
|
+
|
|
439
|
+
const model = getImageModel('openrouter', 'google/gemini-2.5-flash-image');
|
|
440
|
+
|
|
441
|
+
const result = await generateImages(model, {
|
|
442
|
+
input: [{ type: 'text', text: 'Generate a red circle on a plain white background.' }]
|
|
443
|
+
}, {
|
|
444
|
+
apiKey: process.env.OPENROUTER_API_KEY
|
|
445
|
+
});
|
|
446
|
+
|
|
447
|
+
for (const block of result.output) {
|
|
448
|
+
if (block.type === 'text') {
|
|
449
|
+
console.log(block.text);
|
|
450
|
+
} else if (block.type === 'image') {
|
|
451
|
+
console.log(block.mimeType);
|
|
452
|
+
console.log(block.data.substring(0, 32));
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
```
|
|
456
|
+
|
|
457
|
+
Some models also support image input:
|
|
458
|
+
|
|
459
|
+
```typescript
|
|
460
|
+
import { readFileSync } from 'fs';
|
|
461
|
+
|
|
462
|
+
const imageBuffer = readFileSync('input.png');
|
|
463
|
+
const result = await generateImages(model, {
|
|
464
|
+
input: [
|
|
465
|
+
{ type: 'text', text: 'Create a variation of this image with a blue background.' },
|
|
466
|
+
{ type: 'image', data: imageBuffer.toString('base64'), mimeType: 'image/png' }
|
|
467
|
+
]
|
|
468
|
+
}, {
|
|
469
|
+
apiKey: process.env.OPENROUTER_API_KEY
|
|
470
|
+
});
|
|
471
|
+
```
|
|
472
|
+
|
|
473
|
+
Check capabilities on the model metadata:
|
|
474
|
+
|
|
475
|
+
```typescript
|
|
476
|
+
console.log(model.input); // ['text', 'image']
|
|
477
|
+
console.log(model.output); // ['image'] or ['image', 'text']
|
|
478
|
+
```
|
|
479
|
+
|
|
480
|
+
### Notes and Limitations
|
|
481
|
+
|
|
482
|
+
- Use `getImageModel(...)`, not `getModel(...)`.
|
|
483
|
+
- Use `generateImages()`, not `stream()` / `complete()`.
|
|
484
|
+
- Image-generation models do not participate in tool calling.
|
|
485
|
+
- Outputs are returned in `AssistantImages.output` and can include both base64-encoded `ImageContent` blocks and `TextContent` blocks.
|
|
486
|
+
- Some models return only images, others return images plus text. Check `model.output`.
|
|
487
|
+
- Some models accept image input, others are text-to-image only. Check `model.input`.
|
|
488
|
+
- Like the streaming APIs, image generation supports options such as `apiKey`, `signal`, `headers`, `onPayload`, and `onResponse`, and results may include `stopReason`, `responseId`, and `usage`.
|
|
489
|
+
- If you want a model to analyze images in a conversation or call tools, use the regular `stream()` / `complete()` APIs with a model that supports image input.
|
|
490
|
+
- At the moment, image generation is available through only one provider, OpenRouter.
|
|
491
|
+
|
|
422
492
|
## Thinking/Reasoning
|
|
423
493
|
|
|
424
494
|
Many models support thinking/reasoning capabilities where they can show their internal thought process. You can check if a model supports reasoning via the `reasoning` property. If you pass reasoning options to a non-reasoning model, they are silently ignored.
|
|
@@ -446,7 +516,7 @@ if (model.reasoning) {
|
|
|
446
516
|
const response = await completeSimple(model, {
|
|
447
517
|
messages: [{ role: 'user', content: 'Solve: 2x + 5 = 13' }]
|
|
448
518
|
}, {
|
|
449
|
-
reasoning: 'medium' // 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
|
519
|
+
reasoning: 'medium' // 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
|
|
450
520
|
});
|
|
451
521
|
|
|
452
522
|
// Access thinking and text blocks
|
|
@@ -630,7 +700,6 @@ The library uses a registry of API implementations. Built-in APIs include:
|
|
|
630
700
|
|
|
631
701
|
- **`anthropic-messages`**: Anthropic Messages API (`streamAnthropic`, `AnthropicOptions`)
|
|
632
702
|
- **`google-generative-ai`**: Google Generative AI API (`streamGoogle`, `GoogleOptions`)
|
|
633
|
-
- **`google-gemini-cli`**: Google Cloud Code Assist API (`streamGoogleGeminiCli`, `GoogleGeminiCliOptions`)
|
|
634
703
|
- **`google-vertex`**: Google Vertex AI API (`streamGoogleVertex`, `GoogleVertexOptions`)
|
|
635
704
|
- **`mistral-conversations`**: Mistral Conversations API (`streamMistral`, `MistralOptions`)
|
|
636
705
|
- **`openai-completions`**: OpenAI Chat Completions API (`streamOpenAICompletions`, `OpenAICompletionsOptions`)
|
|
@@ -732,7 +801,7 @@ A **provider** offers models through a specific API. For example:
|
|
|
732
801
|
- **Google** models use the `google-generative-ai` API
|
|
733
802
|
- **OpenAI** models use the `openai-responses` API
|
|
734
803
|
- **Mistral** models use the `mistral-conversations` API
|
|
735
|
-
- **xAI, Cerebras, Groq, etc.** models use the `openai-completions` API (OpenAI-compatible)
|
|
804
|
+
- **xAI, Cerebras, Groq, Together AI, etc.** models use the `openai-completions` API (OpenAI-compatible)
|
|
736
805
|
|
|
737
806
|
### Querying Providers and Models
|
|
738
807
|
|
|
@@ -822,6 +891,8 @@ const response = await stream(ollamaModel, context, {
|
|
|
822
891
|
|
|
823
892
|
Some OpenAI-compatible servers do not understand the `developer` role used for reasoning-capable models. For those providers, set `compat.supportsDeveloperRole` to `false` so the system prompt is sent as a `system` message instead. If the server also does not support `reasoning_effort`, set `compat.supportsReasoningEffort` to `false` too.
|
|
824
893
|
|
|
894
|
+
Use model-level `thinkingLevelMap` to describe model-specific thinking controls. Keys are pi thinking levels (`off`, `minimal`, `low`, `medium`, `high`, `xhigh`). Missing keys use provider defaults, string values are sent to the provider, and `null` marks a level unsupported.
|
|
895
|
+
|
|
825
896
|
This commonly applies to Ollama, vLLM, SGLang, and similar OpenAI-compatible servers. You can set `compat` at the provider level or per model.
|
|
826
897
|
|
|
827
898
|
```typescript
|
|
@@ -836,6 +907,13 @@ const ollamaReasoningModel: Model<'openai-completions'> = {
|
|
|
836
907
|
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
837
908
|
contextWindow: 131072,
|
|
838
909
|
maxTokens: 32000,
|
|
910
|
+
thinkingLevelMap: {
|
|
911
|
+
minimal: null,
|
|
912
|
+
low: null,
|
|
913
|
+
medium: null,
|
|
914
|
+
high: 'high',
|
|
915
|
+
xhigh: null,
|
|
916
|
+
},
|
|
839
917
|
compat: {
|
|
840
918
|
supportsDeveloperRole: false,
|
|
841
919
|
supportsReasoningEffort: false,
|
|
@@ -845,7 +923,7 @@ const ollamaReasoningModel: Model<'openai-completions'> = {
|
|
|
845
923
|
|
|
846
924
|
### OpenAI Compatibility Settings
|
|
847
925
|
|
|
848
|
-
The `openai-completions` API is implemented by many providers with minor differences. By default, the library auto-detects compatibility settings based on `baseUrl` for a small set of known OpenAI-compatible providers (Cerebras, xAI, Chutes, DeepSeek, zAi, OpenCode, Cloudflare Workers AI, etc.). For custom proxies or unknown endpoints, you can override these settings via the `compat` field. For `openai-responses` models, the compat field only supports Responses-specific flags.
|
|
926
|
+
The `openai-completions` API is implemented by many providers with minor differences. By default, the library auto-detects compatibility settings based on `baseUrl` for a small set of known OpenAI-compatible providers (Cerebras, xAI, Chutes, DeepSeek, Together AI, zAi, OpenCode, Cloudflare Workers AI, etc.). For custom proxies or unknown endpoints, you can override these settings via the `compat` field. For `openai-responses` models, the compat field only supports Responses-specific flags.
|
|
849
927
|
|
|
850
928
|
```typescript
|
|
851
929
|
interface OpenAICompletionsCompat {
|
|
@@ -860,7 +938,7 @@ interface OpenAICompletionsCompat {
|
|
|
860
938
|
requiresAssistantAfterToolResult?: boolean; // Whether tool results must be followed by an assistant message (default: false)
|
|
861
939
|
requiresThinkingAsText?: boolean; // Whether thinking blocks must be converted to text (default: false)
|
|
862
940
|
requiresReasoningContentOnAssistantMessages?: boolean; // Whether all replayed assistant messages must include empty reasoning_content when reasoning is enabled (default: auto-detected for DeepSeek)
|
|
863
|
-
thinkingFormat?: 'openai' | 'deepseek' | 'zai' | 'qwen' | 'qwen-chat-template'; // Format for reasoning param: 'openai' uses reasoning_effort, 'deepseek' uses thinking: { type } plus reasoning_effort, 'zai' uses enable_thinking, 'qwen' uses enable_thinking, 'qwen-chat-template' uses chat_template_kwargs.enable_thinking (default: openai)
|
|
941
|
+
thinkingFormat?: 'openai' | 'openrouter' | 'deepseek' | 'together' | 'zai' | 'qwen' | 'qwen-chat-template'; // Format for reasoning param: 'openai' uses reasoning_effort, 'openrouter' uses reasoning: { effort }, 'deepseek' uses thinking: { type } plus reasoning_effort, 'together' uses reasoning: { enabled } plus reasoning_effort when supported, 'zai' uses enable_thinking, 'qwen' uses enable_thinking, 'qwen-chat-template' uses chat_template_kwargs.enable_thinking (default: openai)
|
|
864
942
|
cacheControlFormat?: 'anthropic'; // Anthropic-style cache_control on system prompt, last tool, and last user/assistant text content
|
|
865
943
|
openRouterRouting?: OpenRouterRouting; // OpenRouter routing preferences (default: {})
|
|
866
944
|
vercelGatewayRouting?: VercelGatewayRouting; // Vercel AI Gateway routing preferences (default: {})
|
|
@@ -1021,7 +1099,7 @@ In Node.js environments, you can set environment variables to avoid passing API
|
|
|
1021
1099
|
| Provider | Environment Variable(s) |
|
|
1022
1100
|
|----------|------------------------|
|
|
1023
1101
|
| OpenAI | `OPENAI_API_KEY` |
|
|
1024
|
-
| Azure OpenAI | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_BASE_URL` (e.g. `https://{resource}.openai.azure.com`) or `AZURE_OPENAI_RESOURCE_NAME`. Supports `*.openai.azure.com` and `*.cognitiveservices.azure.com`; root endpoints auto-normalize to `/openai/v1`. Optional: `
|
|
1102
|
+
| Azure OpenAI | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_BASE_URL` (e.g. `https://{resource}.openai.azure.com`) or `AZURE_OPENAI_RESOURCE_NAME`. Supports `*.openai.azure.com` and `*.cognitiveservices.azure.com`; root endpoints auto-normalize to `/openai/v1`. Optional: `AZURE_OPENAI_AAERY_VERSION` (default `v1`), `AZURE_OPENAI_DEPLOYMENT_NAME_MAP`. |
|
|
1025
1103
|
| Anthropic | `ANTHROPIC_API_KEY` or `ANTHROPIC_OAUTH_TOKEN` |
|
|
1026
1104
|
| DeepSeek | `DEEPSEEK_API_KEY` |
|
|
1027
1105
|
| Google | `GEMINI_API_KEY` |
|
|
@@ -1029,16 +1107,22 @@ In Node.js environments, you can set environment variables to avoid passing API
|
|
|
1029
1107
|
| Mistral | `MISTRAL_API_KEY` |
|
|
1030
1108
|
| Groq | `GROQ_API_KEY` |
|
|
1031
1109
|
| Cerebras | `CEREBRAS_API_KEY` |
|
|
1110
|
+
| Cloudflare AI Gateway | `CLOUDFLARE_API_KEY` + `CLOUDFLARE_ACCOUNT_ID` + `CLOUDFLARE_GATEWAY_ID` |
|
|
1032
1111
|
| Cloudflare Workers AI | `CLOUDFLARE_API_KEY` + `CLOUDFLARE_ACCOUNT_ID` |
|
|
1033
1112
|
| xAI | `XAI_API_KEY` |
|
|
1034
1113
|
| Fireworks | `FIREWORKS_API_KEY` |
|
|
1114
|
+
| Together AI | `TOGETHER_API_KEY` |
|
|
1035
1115
|
| OpenRouter | `OPENROUTER_API_KEY` |
|
|
1036
1116
|
| Vercel AI Gateway | `AI_GATEWAY_API_KEY` |
|
|
1037
1117
|
| zAI | `ZAI_API_KEY` |
|
|
1038
1118
|
| MiniMax | `MINIMAX_API_KEY` |
|
|
1039
1119
|
| OpenCode Zen / OpenCode Go | `OPENCODE_API_KEY` |
|
|
1040
1120
|
| Kimi For Coding | `KIMI_API_KEY` |
|
|
1041
|
-
|
|
|
1121
|
+
| Xiaomi MiMo (API billing) | `XIAOMI_API_KEY` |
|
|
1122
|
+
| Xiaomi MiMo Token Plan (China) | `XIAOMI_TOKEN_PLAN_CN_API_KEY` |
|
|
1123
|
+
| Xiaomi MiMo Token Plan (Amsterdam) | `XIAOMI_TOKEN_PLAN_AMS_API_KEY` |
|
|
1124
|
+
| Xiaomi MiMo Token Plan (Singapore) | `XIAOMI_TOKEN_PLAN_SGP_API_KEY` |
|
|
1125
|
+
| GitHub Copilot | `COPILOT_GITHUB_TOKEN` |
|
|
1042
1126
|
|
|
1043
1127
|
When set, the library automatically uses these keys:
|
|
1044
1128
|
|
|
@@ -1053,27 +1137,6 @@ const response = await complete(model, context, {
|
|
|
1053
1137
|
});
|
|
1054
1138
|
```
|
|
1055
1139
|
|
|
1056
|
-
#### Antigravity Version Override
|
|
1057
|
-
|
|
1058
|
-
Set `PI_AI_ANTIGRAVITY_VERSION` to override the Antigravity User-Agent version when Google updates their requirements:
|
|
1059
|
-
|
|
1060
|
-
```bash
|
|
1061
|
-
export PI_AI_ANTIGRAVITY_VERSION="1.23.0"
|
|
1062
|
-
```
|
|
1063
|
-
|
|
1064
|
-
#### Cache Retention
|
|
1065
|
-
|
|
1066
|
-
Set `PI_CACHE_RETENTION=long` to extend prompt cache retention:
|
|
1067
|
-
|
|
1068
|
-
| Provider | Default | With `PI_CACHE_RETENTION=long` |
|
|
1069
|
-
|----------|---------|-------------------------------|
|
|
1070
|
-
| Anthropic | 5 minutes | 1 hour |
|
|
1071
|
-
| OpenAI | in-memory | 24 hours |
|
|
1072
|
-
|
|
1073
|
-
This only affects direct API calls to `api.anthropic.com` and `api.openai.com`. Proxies and other providers are unaffected.
|
|
1074
|
-
|
|
1075
|
-
> **Note**: Extended cache retention may increase costs for Anthropic (cache writes are charged at a higher rate). OpenAI's 24h retention has no additional cost.
|
|
1076
|
-
|
|
1077
1140
|
### Checking Environment Variables
|
|
1078
1141
|
|
|
1079
1142
|
```typescript
|
|
@@ -1090,8 +1153,6 @@ Several providers require OAuth authentication instead of static API keys:
|
|
|
1090
1153
|
- **Anthropic** (Claude Pro/Max subscription)
|
|
1091
1154
|
- **OpenAI Codex** (ChatGPT Plus/Pro subscription, access to GPT-5.x Codex models)
|
|
1092
1155
|
- **GitHub Copilot** (Copilot subscription)
|
|
1093
|
-
- **Google Gemini CLI** (Gemini 2.0/2.5 via Google Cloud Code Assist; free tier or paid subscription)
|
|
1094
|
-
- **Antigravity** (Free Gemini 3, Claude, GPT-OSS via Google Cloud)
|
|
1095
1156
|
|
|
1096
1157
|
For paid Cloud Code Assist subscriptions, set `GOOGLE_CLOUD_PROJECT` or `GOOGLE_CLOUD_PROJECT_ID` to your project ID.
|
|
1097
1158
|
|
|
@@ -1159,14 +1220,13 @@ import {
|
|
|
1159
1220
|
loginOpenAICodex,
|
|
1160
1221
|
loginGitHubCopilot,
|
|
1161
1222
|
loginGeminiCli,
|
|
1162
|
-
loginAntigravity,
|
|
1163
1223
|
|
|
1164
1224
|
// Token management
|
|
1165
1225
|
refreshOAuthToken, // (provider, credentials) => new credentials
|
|
1166
1226
|
getOAuthApiKey, // (provider, credentialsMap) => { newCredentials, apiKey } | null
|
|
1167
1227
|
|
|
1168
1228
|
// Types
|
|
1169
|
-
type OAuthProvider,
|
|
1229
|
+
type OAuthProvider,
|
|
1170
1230
|
type OAuthCredentials,
|
|
1171
1231
|
} from '@eminent337/aery-ai/oauth';
|
|
1172
1232
|
```
|
|
@@ -1224,12 +1284,10 @@ const response = await complete(model, {
|
|
|
1224
1284
|
|
|
1225
1285
|
**OpenAI Codex**: Requires a ChatGPT Plus or Pro subscription. Provides access to GPT-5.x Codex models with extended context windows and reasoning capabilities. The library automatically handles session-based prompt caching when `sessionId` is provided in stream options. You can set `transport` in stream options to `"sse"`, `"websocket"`, or `"auto"` for Codex Responses transport selection. When using WebSocket with a `sessionId`, connections are reused per session and expire after 5 minutes of inactivity.
|
|
1226
1286
|
|
|
1227
|
-
**Azure OpenAI (Responses)**: Uses the Responses API only. Set `AZURE_OPENAI_API_KEY` and either `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. `AZURE_OPENAI_BASE_URL` supports both `https://<resource>.openai.azure.com` and `https://<resource>.cognitiveservices.azure.com`; root endpoints are normalized to `.../openai/v1` automatically. Use `
|
|
1287
|
+
**Azure OpenAI (Responses)**: Uses the Responses API only. Set `AZURE_OPENAI_API_KEY` and either `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. `AZURE_OPENAI_BASE_URL` supports both `https://<resource>.openai.azure.com` and `https://<resource>.cognitiveservices.azure.com`; root endpoints are normalized to `.../openai/v1` automatically. Use `AZURE_OPENAI_AAERY_VERSION` (defaults to `v1`) to override the API version if needed. Deployment names are treated as model IDs by default, override with `azureDeploymentName` or `AZURE_OPENAI_DEPLOYMENT_NAME_MAP` using comma-separated `model-id=deployment` pairs (for example `gpt-4o-mini=my-deployment,gpt-4o=prod`). Legacy deployment-based URLs are intentionally unsupported.
|
|
1228
1288
|
|
|
1229
1289
|
**GitHub Copilot**: If you get "The requested model is not supported" error, enable the model manually in VS Code: open Copilot Chat, click the model selector, select the model (warning icon), and click "Enable".
|
|
1230
1290
|
|
|
1231
|
-
**Google Gemini CLI / Antigravity**: These use Google Cloud OAuth. The `apiKey` returned by `getOAuthApiKey()` is a JSON string containing both the token and project ID, which the library handles automatically.
|
|
1232
|
-
|
|
1233
1291
|
## Development
|
|
1234
1292
|
|
|
1235
1293
|
### Adding a New Provider
|
|
@@ -1262,10 +1320,11 @@ Create a new provider file (for example `amazon-bedrock.ts`) that exports:
|
|
|
1262
1320
|
- Add credential detection in `env-api-keys.ts` for the new provider
|
|
1263
1321
|
- Ensure `streamSimple` handles auth lookup via `getEnvApiKey()` or provider-specific auth
|
|
1264
1322
|
|
|
1265
|
-
#### 4. Model Generation (`scripts/generate-models.ts`)
|
|
1323
|
+
#### 4. Model Generation (`scripts/generate-models.ts`, `scripts/generate-image-models.ts`)
|
|
1266
1324
|
|
|
1267
1325
|
- Add logic to fetch and parse models from the provider's source (e.g., models.dev API)
|
|
1268
|
-
- Map provider model data to the standardized `Model` interface
|
|
1326
|
+
- Map chat/tool-capable provider model data to the standardized `Model` interface via `scripts/generate-models.ts`
|
|
1327
|
+
- Map image-generation provider model data to the standardized `ImagesModel` interface via `scripts/generate-image-models.ts`
|
|
1269
1328
|
- Handle provider-specific quirks (pricing format, capability flags, model ID transformations)
|
|
1270
1329
|
|
|
1271
1330
|
#### 5. Tests (`test/`)
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"env-api-keys.d.ts","sourceRoot":"","sources":["../src/env-api-keys.ts"],"names":[],"mappings":"AAyBA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,YAAY,CAAC;
|
|
1
|
+
{"version":3,"file":"env-api-keys.d.ts","sourceRoot":"","sources":["../src/env-api-keys.ts"],"names":[],"mappings":"AAyBA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,YAAY,CAAC;AA8GhD;;;;;;GAMG;AACH,wBAAgB,WAAW,CAAC,QAAQ,EAAE,aAAa,GAAG,MAAM,EAAE,GAAG,SAAS,CAAC;AAC3E,wBAAgB,WAAW,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,EAAE,GAAG,SAAS,CAAC;AASpE;;;;GAIG;AACH,wBAAgB,YAAY,CAAC,QAAQ,EAAE,aAAa,GAAG,MAAM,GAAG,SAAS,CAAC;AAC1E,wBAAgB,YAAY,CAAC,QAAQ,EAAE,MAAM,GAAG,MAAM,GAAG,SAAS,CAAC","sourcesContent":["// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui)\nlet _existsSync: typeof import(\"node:fs\").existsSync | null = null;\nlet _homedir: typeof import(\"node:os\").homedir | null = null;\nlet _join: typeof import(\"node:path\").join | null = null;\n\ntype DynamicImport = (specifier: string) => Promise<unknown>;\n\nconst dynamicImport: DynamicImport = (specifier) => import(specifier);\nconst NODE_FS_SPECIFIER = \"node:\" + \"fs\";\nconst NODE_OS_SPECIFIER = \"node:\" + \"os\";\nconst NODE_PATH_SPECIFIER = \"node:\" + \"path\";\n\n// Eagerly load in Node.js/Bun environment only\nif (typeof process !== \"undefined\" && (process.versions?.node || process.versions?.bun)) {\n\tdynamicImport(NODE_FS_SPECIFIER).then((m) => {\n\t\t_existsSync = (m as typeof import(\"node:fs\")).existsSync;\n\t});\n\tdynamicImport(NODE_OS_SPECIFIER).then((m) => {\n\t\t_homedir = (m as typeof import(\"node:os\")).homedir;\n\t});\n\tdynamicImport(NODE_PATH_SPECIFIER).then((m) => {\n\t\t_join = (m as typeof import(\"node:path\")).join;\n\t});\n}\n\nimport type { KnownProvider } from \"./types.js\";\n\nlet _procEnvCache: Map<string, string> | null = null;\n\n/**\n * Fallback for https://github.com/oven-sh/bun/issues/27802\n * Bun compiled binaries have an empty `process.env` inside sandbox\n * environments on Linux. We can recover the env from `/proc/self/environ`.\n */\nfunction getProcEnv(key: string): string | undefined {\n\tif (!process.versions?.bun) return undefined;\n\tif (typeof process === \"undefined\") return undefined;\n\n\t// If process.env already has entries, the bug is not triggered.\n\tif (Object.keys(process.env).length > 0) return undefined;\n\n\tif (_procEnvCache === null) {\n\t\t_procEnvCache = new Map();\n\t\ttry {\n\t\t\tconst { readFileSync } = require(\"node:fs\") as typeof import(\"node:fs\");\n\t\t\tconst data = readFileSync(\"/proc/self/environ\", \"utf-8\");\n\t\t\tfor (const entry of data.split(\"\\0\")) {\n\t\t\t\tconst idx = entry.indexOf(\"=\");\n\t\t\t\tif (idx > 0) {\n\t\t\t\t\t_procEnvCache.set(entry.slice(0, idx), entry.slice(idx + 1));\n\t\t\t\t}\n\t\t\t}\n\t\t} catch {\n\t\t\t// /proc/self/environ may not be readable.\n\t\t}\n\t}\n\n\treturn _procEnvCache.get(key);\n}\n\nlet cachedVertexAdcCredentialsExists: boolean | null = null;\n\nfunction hasVertexAdcCredentials(): boolean {\n\tif (cachedVertexAdcCredentialsExists === null) {\n\t\t// If node modules haven't loaded yet (async import race at startup),\n\t\t// return false WITHOUT caching so the next call retries once they're ready.\n\t\t// Only cache false permanently in a browser environment where fs is never available.\n\t\tif (!_existsSync || !_homedir || !_join) {\n\t\t\tconst isNode = typeof process !== \"undefined\" && (process.versions?.node || process.versions?.bun);\n\t\t\tif (!isNode) {\n\t\t\t\t// Definitively in a browser — safe to cache false permanently\n\t\t\t\tcachedVertexAdcCredentialsExists = false;\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\n\t\t// Check GOOGLE_APPLICATION_CREDENTIALS env var first (standard way)\n\t\tconst gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS || getProcEnv(\"GOOGLE_APPLICATION_CREDENTIALS\");\n\t\tif (gacPath) {\n\t\t\tcachedVertexAdcCredentialsExists = _existsSync(gacPath);\n\t\t} else {\n\t\t\t// Fall back to default ADC path (lazy evaluation)\n\t\t\tcachedVertexAdcCredentialsExists = _existsSync(\n\t\t\t\t_join(_homedir(), \".config\", \"gcloud\", \"application_default_credentials.json\"),\n\t\t\t);\n\t\t}\n\t}\n\treturn cachedVertexAdcCredentialsExists;\n}\n\nfunction getApiKeyEnvVars(provider: string): readonly string[] | undefined {\n\tif (provider === \"github-copilot\") {\n\t\treturn [\"COPILOT_GITHUB_TOKEN\"];\n\t}\n\n\t// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY\n\tif (provider === \"anthropic\") {\n\t\treturn [\"ANTHROPIC_OAUTH_TOKEN\", \"ANTHROPIC_API_KEY\"];\n\t}\n\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\t\"azure-openai-responses\": \"AZURE_OPENAI_API_KEY\",\n\t\tdeepseek: \"DEEPSEEK_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\t\"google-vertex\": \"GOOGLE_CLOUD_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\t\"vercel-ai-gateway\": \"AI_GATEWAY_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t\tminimax: \"MINIMAX_API_KEY\",\n\t\t\"minimax-cn\": \"MINIMAX_CN_API_KEY\",\n\t\tmoonshotai: \"MOONSHOT_API_KEY\",\n\t\t\"moonshotai-cn\": \"MOONSHOT_API_KEY\",\n\t\thuggingface: \"HF_TOKEN\",\n\t\tfireworks: \"FIREWORKS_API_KEY\",\n\t\ttogether: \"TOGETHER_API_KEY\",\n\t\topencode: \"OPENCODE_API_KEY\",\n\t\t\"opencode-go\": \"OPENCODE_API_KEY\",\n\t\t\"kimi-coding\": \"KIMI_API_KEY\",\n\t\t\"cloudflare-workers-ai\": \"CLOUDFLARE_API_KEY\",\n\t\t\"cloudflare-ai-gateway\": \"CLOUDFLARE_API_KEY\",\n\t\txiaomi: \"XIAOMI_API_KEY\",\n\t\t\"xiaomi-token-plan-cn\": \"XIAOMI_TOKEN_PLAN_CN_API_KEY\",\n\t\t\"xiaomi-token-plan-ams\": \"XIAOMI_TOKEN_PLAN_AMS_API_KEY\",\n\t\t\"xiaomi-token-plan-sgp\": \"XIAOMI_TOKEN_PLAN_SGP_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? [envVar] : undefined;\n}\n\n/**\n * Find configured environment variables that can provide an API key for a provider.\n *\n * This only reports actual API key variables. It intentionally excludes ambient\n * credential sources such as AWS profiles, AWS IAM credentials, and Google\n * Application Default Credentials.\n */\nexport function findEnvKeys(provider: KnownProvider): string[] | undefined;\nexport function findEnvKeys(provider: string): string[] | undefined;\nexport function findEnvKeys(provider: string): string[] | undefined {\n\tconst envVars = getApiKeyEnvVars(provider);\n\tif (!envVars) return undefined;\n\n\tconst found = envVars.filter((envVar) => !!process.env[envVar] || !!getProcEnv(envVar));\n\treturn found.length > 0 ? found : undefined;\n}\n\n/**\n * Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.\n *\n * Will not return API keys for providers that require OAuth tokens.\n */\nexport function getEnvApiKey(provider: KnownProvider): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined {\n\tconst envKeys = findEnvKeys(provider);\n\tif (envKeys?.[0]) {\n\t\treturn process.env[envKeys[0]] || getProcEnv(envKeys[0]);\n\t}\n\n\t// Vertex AI supports either an explicit API key or Application Default Credentials.\n\t// Auth is configured via `gcloud auth application-default login`.\n\tif (provider === \"google-vertex\") {\n\t\tconst hasCredentials = hasVertexAdcCredentials();\n\t\tconst hasProject = !!(\n\t\t\tprocess.env.GOOGLE_CLOUD_PROJECT ||\n\t\t\tprocess.env.GCLOUD_PROJECT ||\n\t\t\tgetProcEnv(\"GOOGLE_CLOUD_PROJECT\") ||\n\t\t\tgetProcEnv(\"GCLOUD_PROJECT\")\n\t\t);\n\t\tconst hasLocation = !!(process.env.GOOGLE_CLOUD_LOCATION || getProcEnv(\"GOOGLE_CLOUD_LOCATION\"));\n\n\t\tif (hasCredentials && hasProject && hasLocation) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\tif (provider === \"amazon-bedrock\") {\n\t\t// Amazon Bedrock supports multiple credential sources:\n\t\t// 1. AWS_PROFILE - named profile from ~/.aws/credentials\n\t\t// 2. AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY - standard IAM keys\n\t\t// 3. AWS_BEARER_TOKEN_BEDROCK - Bedrock bearer token\n\t\t// 4. AWS_CONTAINER_CREDENTIALS_RELATIVE_URI - ECS task roles\n\t\t// 5. AWS_CONTAINER_CREDENTIALS_FULL_URI - ECS task roles (full URI)\n\t\t// 6. AWS_WEB_IDENTITY_TOKEN_FILE - IRSA (IAM Roles for Service Accounts)\n\t\tif (\n\t\t\tprocess.env.AWS_PROFILE ||\n\t\t\t(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) ||\n\t\t\tprocess.env.AWS_BEARER_TOKEN_BEDROCK ||\n\t\t\tprocess.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI ||\n\t\t\tprocess.env.AWS_CONTAINER_CREDENTIALS_FULL_URI ||\n\t\t\tprocess.env.AWS_WEB_IDENTITY_TOKEN_FILE ||\n\t\t\tgetProcEnv(\"AWS_PROFILE\") ||\n\t\t\t(getProcEnv(\"AWS_ACCESS_KEY_ID\") && getProcEnv(\"AWS_SECRET_ACCESS_KEY\")) ||\n\t\t\tgetProcEnv(\"AWS_BEARER_TOKEN_BEDROCK\") ||\n\t\t\tgetProcEnv(\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\") ||\n\t\t\tgetProcEnv(\"AWS_CONTAINER_CREDENTIALS_FULL_URI\") ||\n\t\t\tgetProcEnv(\"AWS_WEB_IDENTITY_TOKEN_FILE\")\n\t\t) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\treturn undefined;\n}\n"]}
|
package/dist/env-api-keys.js
CHANGED
|
@@ -78,7 +78,7 @@ function hasVertexAdcCredentials() {
|
|
|
78
78
|
}
|
|
79
79
|
function getApiKeyEnvVars(provider) {
|
|
80
80
|
if (provider === "github-copilot") {
|
|
81
|
-
return ["COPILOT_GITHUB_TOKEN"
|
|
81
|
+
return ["COPILOT_GITHUB_TOKEN"];
|
|
82
82
|
}
|
|
83
83
|
// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY
|
|
84
84
|
if (provider === "anthropic") {
|
|
@@ -99,12 +99,20 @@ function getApiKeyEnvVars(provider) {
|
|
|
99
99
|
mistral: "MISTRAL_API_KEY",
|
|
100
100
|
minimax: "MINIMAX_API_KEY",
|
|
101
101
|
"minimax-cn": "MINIMAX_CN_API_KEY",
|
|
102
|
+
moonshotai: "MOONSHOT_API_KEY",
|
|
103
|
+
"moonshotai-cn": "MOONSHOT_API_KEY",
|
|
102
104
|
huggingface: "HF_TOKEN",
|
|
103
105
|
fireworks: "FIREWORKS_API_KEY",
|
|
106
|
+
together: "TOGETHER_API_KEY",
|
|
104
107
|
opencode: "OPENCODE_API_KEY",
|
|
105
108
|
"opencode-go": "OPENCODE_API_KEY",
|
|
106
109
|
"kimi-coding": "KIMI_API_KEY",
|
|
107
110
|
"cloudflare-workers-ai": "CLOUDFLARE_API_KEY",
|
|
111
|
+
"cloudflare-ai-gateway": "CLOUDFLARE_API_KEY",
|
|
112
|
+
xiaomi: "XIAOMI_API_KEY",
|
|
113
|
+
"xiaomi-token-plan-cn": "XIAOMI_TOKEN_PLAN_CN_API_KEY",
|
|
114
|
+
"xiaomi-token-plan-ams": "XIAOMI_TOKEN_PLAN_AMS_API_KEY",
|
|
115
|
+
"xiaomi-token-plan-sgp": "XIAOMI_TOKEN_PLAN_SGP_API_KEY",
|
|
108
116
|
};
|
|
109
117
|
const envVar = envMap[provider];
|
|
110
118
|
return envVar ? [envVar] : undefined;
|
package/dist/env-api-keys.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"env-api-keys.js","sourceRoot":"","sources":["../src/env-api-keys.ts"],"names":[],"mappings":"AAAA,2EAA2E;AAC3E,IAAI,WAAW,GAA+C,IAAI,CAAC;AACnE,IAAI,QAAQ,GAA4C,IAAI,CAAC;AAC7D,IAAI,KAAK,GAA2C,IAAI,CAAC;AAIzD,MAAM,aAAa,GAAkB,CAAC,SAAS,EAAE,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;AACtE,MAAM,iBAAiB,GAAG,OAAO,GAAG,IAAI,CAAC;AACzC,MAAM,iBAAiB,GAAG,OAAO,GAAG,IAAI,CAAC;AACzC,MAAM,mBAAmB,GAAG,OAAO,GAAG,MAAM,CAAC;AAE7C,+CAA+C;AAC/C,IAAI,OAAO,OAAO,KAAK,WAAW,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,OAAO,CAAC,QAAQ,EAAE,GAAG,CAAC,EAAE,CAAC;IACzF,aAAa,CAAC,iBAAiB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;QAC5C,WAAW,GAAI,CAA8B,CAAC,UAAU,CAAC;IAAA,CACzD,CAAC,CAAC;IACH,aAAa,CAAC,iBAAiB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;QAC5C,QAAQ,GAAI,CAA8B,CAAC,OAAO,CAAC;IAAA,CACnD,CAAC,CAAC;IACH,aAAa,CAAC,mBAAmB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;QAC9C,KAAK,GAAI,CAAgC,CAAC,IAAI,CAAC;IAAA,CAC/C,CAAC,CAAC;AACJ,CAAC;AAID,IAAI,aAAa,GAA+B,IAAI,CAAC;AAErD;;;;GAIG;AACH,SAAS,UAAU,CAAC,GAAW,EAAsB;IACpD,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,GAAG;QAAE,OAAO,SAAS,CAAC;IAC7C,IAAI,OAAO,OAAO,KAAK,WAAW;QAAE,OAAO,SAAS,CAAC;IAErD,gEAAgE;IAChE,IAAI,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,MAAM,GAAG,CAAC;QAAE,OAAO,SAAS,CAAC;IAE1D,IAAI,aAAa,KAAK,IAAI,EAAE,CAAC;QAC5B,aAAa,GAAG,IAAI,GAAG,EAAE,CAAC;QAC1B,IAAI,CAAC;YACJ,MAAM,EAAE,YAAY,EAAE,GAAG,OAAO,CAAC,SAAS,CAA6B,CAAC;YACxE,MAAM,IAAI,GAAG,YAAY,CAAC,oBAAoB,EAAE,OAAO,CAAC,CAAC;YACzD,KAAK,MAAM,KAAK,IAAI,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;gBACtC,MAAM,GAAG,GAAG,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;gBAC/B,IAAI,GAAG,GAAG,CAAC,EAAE,CAAC;oBACb,aAAa,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,KAAK,CAAC,KAAK,CAAC,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC;gBAC9D,CAAC;YACF,CAAC;QACF,CAAC;QAAC,MAAM,CAAC;YACR,0CAA0C;QAC3C,CAAC;IACF,CAAC;IAED,OAAO,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAAA,CAC9B;AAED,IAAI,gCAAgC,GAAmB,IAAI,CAAC;AAE5D,SAAS,uBAAuB,GAAY;IAC3C,IAAI,gCAAgC,KAAK,IAAI,EAAE,CAAC;QAC/C,qEAAqE;QACrE,4EAA4E;QAC5E,qFAAqF;QACrF,IAAI,CAAC,WAAW,IAAI,CAAC,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;YACzC,MAAM,MAAM,GAAG,OAAO,OAAO,KAAK,WAAW,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,OAAO,CAAC,QAAQ,EAAE,GAAG,CAAC,CAAC;YACnG,IAAI,CAAC,MAAM,EAAE,CAAC;gBACb,gEAA8D;gBAC9D,gCAAgC,GAAG,KAAK,CAAC;YAC1C,CAAC;YACD,OAAO,KAAK,CAAC;QACd,CAAC;QAED,oEAAoE;QACpE,MAAM,OAAO,GAAG,OAAO,CAAC,GAAG,CAAC,8BAA8B,IAAI,UAAU,CAAC,gCAAgC,CAAC,CAAC;QAC3G,IAAI,OAAO,EAAE,CAAC;YACb,gCAAgC,GAAG,WAAW,CAAC,OAAO,CAAC,CAAC;QACzD,CAAC;aAAM,CAAC;YACP,kDAAkD;YAClD,gCAAgC,GAAG,WAAW,CAC7C,KAAK,CAAC,QAAQ,EAAE,EAAE,SAAS,EAAE,QAAQ,EAAE,sCAAsC,CAAC,CAC9E,CAAC;QACH,CAAC;IACF,CAAC;IACD,OAAO,gCAAgC,CAAC;AAAA,CACxC;AAED,SAAS,gBAAgB,CAAC,QAAgB,EAAiC;IAC1E,IAAI,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACnC,OAAO,CAAC,sBAAsB,EAAE,UAAU,EAAE,cAAc,CAAC,CAAC;IAC7D,CAAC;IAED,gEAAgE;IAChE,IAAI,QAAQ,KAAK,WAAW,EAAE,CAAC;QAC9B,OAAO,CAAC,uBAAuB,EAAE,mBAAmB,CAAC,CAAC;IACvD,CAAC;IAED,MAAM,MAAM,GAA2B;QACtC,MAAM,EAAE,gBAAgB;QACxB,wBAAwB,EAAE,sBAAsB;QAChD,QAAQ,EAAE,kBAAkB;QAC5B,MAAM,EAAE,gBAAgB;QACxB,eAAe,EAAE,sBAAsB;QACvC,IAAI,EAAE,cAAc;QACpB,QAAQ,EAAE,kBAAkB;QAC5B,GAAG,EAAE,aAAa;QAClB,UAAU,EAAE,oBAAoB;QAChC,mBAAmB,EAAE,oBAAoB;QACzC,GAAG,EAAE,aAAa;QAClB,OAAO,EAAE,iBAAiB;QAC1B,OAAO,EAAE,iBAAiB;QAC1B,YAAY,EAAE,oBAAoB;QAClC,WAAW,EAAE,UAAU;QACvB,SAAS,EAAE,mBAAmB;QAC9B,QAAQ,EAAE,kBAAkB;QAC5B,aAAa,EAAE,kBAAkB;QACjC,aAAa,EAAE,cAAc;QAC7B,uBAAuB,EAAE,oBAAoB;KAC7C,CAAC;IAEF,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC;IAChC,OAAO,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CACrC;AAWD,MAAM,UAAU,WAAW,CAAC,QAAgB,EAAwB;IACnE,MAAM,OAAO,GAAG,gBAAgB,CAAC,QAAQ,CAAC,CAAC;IAC3C,IAAI,CAAC,OAAO;QAAE,OAAO,SAAS,CAAC;IAE/B,MAAM,KAAK,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC;IACxF,OAAO,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAC5C;AASD,MAAM,UAAU,YAAY,CAAC,QAAgB,EAAsB;IAClE,MAAM,OAAO,GAAG,WAAW,CAAC,QAAQ,CAAC,CAAC;IACtC,IAAI,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;QAClB,OAAO,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,IAAI,UAAU,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;IAC1D,CAAC;IAED,oFAAoF;IACpF,kEAAkE;IAClE,IAAI,QAAQ,KAAK,eAAe,EAAE,CAAC;QAClC,MAAM,cAAc,GAAG,uBAAuB,EAAE,CAAC;QACjD,MAAM,UAAU,GAAG,CAAC,CAAC,CACpB,OAAO,CAAC,GAAG,CAAC,oBAAoB;YAChC,OAAO,CAAC,GAAG,CAAC,cAAc;YAC1B,UAAU,CAAC,sBAAsB,CAAC;YAClC,UAAU,CAAC,gBAAgB,CAAC,CAC5B,CAAC;QACF,MAAM,WAAW,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,qBAAqB,IAAI,UAAU,CAAC,uBAAuB,CAAC,CAAC,CAAC;QAEjG,IAAI,cAAc,IAAI,UAAU,IAAI,WAAW,EAAE,CAAC;YACjD,OAAO,iBAAiB,CAAC;QAC1B,CAAC;IACF,CAAC;IAED,IAAI,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACnC,uDAAuD;QACvD,yDAAyD;QACzD,mEAAmE;QACnE,qDAAqD;QACrD,6DAA6D;QAC7D,oEAAoE;QACpE,yEAAyE;QACzE,IACC,OAAO,CAAC,GAAG,CAAC,WAAW;YACvB,CAAC,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC;YACpE,OAAO,CAAC,GAAG,CAAC,wBAAwB;YACpC,OAAO,CAAC,GAAG,CAAC,sCAAsC;YAClD,OAAO,CAAC,GAAG,CAAC,kCAAkC;YAC9C,OAAO,CAAC,GAAG,CAAC,2BAA2B;YACvC,UAAU,CAAC,aAAa,CAAC;YACzB,CAAC,UAAU,CAAC,mBAAmB,CAAC,IAAI,UAAU,CAAC,uBAAuB,CAAC,CAAC;YACxE,UAAU,CAAC,0BAA0B,CAAC;YACtC,UAAU,CAAC,wCAAwC,CAAC;YACpD,UAAU,CAAC,oCAAoC,CAAC;YAChD,UAAU,CAAC,6BAA6B,CAAC,EACxC,CAAC;YACF,OAAO,iBAAiB,CAAC;QAC1B,CAAC;IACF,CAAC;IAED,OAAO,SAAS,CAAC;AAAA,CACjB","sourcesContent":["// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui)\nlet _existsSync: typeof import(\"node:fs\").existsSync | null = null;\nlet _homedir: typeof import(\"node:os\").homedir | null = null;\nlet _join: typeof import(\"node:path\").join | null = null;\n\ntype DynamicImport = (specifier: string) => Promise<unknown>;\n\nconst dynamicImport: DynamicImport = (specifier) => import(specifier);\nconst NODE_FS_SPECIFIER = \"node:\" + \"fs\";\nconst NODE_OS_SPECIFIER = \"node:\" + \"os\";\nconst NODE_PATH_SPECIFIER = \"node:\" + \"path\";\n\n// Eagerly load in Node.js/Bun environment only\nif (typeof process !== \"undefined\" && (process.versions?.node || process.versions?.bun)) {\n\tdynamicImport(NODE_FS_SPECIFIER).then((m) => {\n\t\t_existsSync = (m as typeof import(\"node:fs\")).existsSync;\n\t});\n\tdynamicImport(NODE_OS_SPECIFIER).then((m) => {\n\t\t_homedir = (m as typeof import(\"node:os\")).homedir;\n\t});\n\tdynamicImport(NODE_PATH_SPECIFIER).then((m) => {\n\t\t_join = (m as typeof import(\"node:path\")).join;\n\t});\n}\n\nimport type { KnownProvider } from \"./types.js\";\n\nlet _procEnvCache: Map<string, string> | null = null;\n\n/**\n * Fallback for https://github.com/oven-sh/bun/issues/27802\n * Bun compiled binaries have an empty `process.env` inside sandbox\n * environments on Linux. We can recover the env from `/proc/self/environ`.\n */\nfunction getProcEnv(key: string): string | undefined {\n\tif (!process.versions?.bun) return undefined;\n\tif (typeof process === \"undefined\") return undefined;\n\n\t// If process.env already has entries, the bug is not triggered.\n\tif (Object.keys(process.env).length > 0) return undefined;\n\n\tif (_procEnvCache === null) {\n\t\t_procEnvCache = new Map();\n\t\ttry {\n\t\t\tconst { readFileSync } = require(\"node:fs\") as typeof import(\"node:fs\");\n\t\t\tconst data = readFileSync(\"/proc/self/environ\", \"utf-8\");\n\t\t\tfor (const entry of data.split(\"\\0\")) {\n\t\t\t\tconst idx = entry.indexOf(\"=\");\n\t\t\t\tif (idx > 0) {\n\t\t\t\t\t_procEnvCache.set(entry.slice(0, idx), entry.slice(idx + 1));\n\t\t\t\t}\n\t\t\t}\n\t\t} catch {\n\t\t\t// /proc/self/environ may not be readable.\n\t\t}\n\t}\n\n\treturn _procEnvCache.get(key);\n}\n\nlet cachedVertexAdcCredentialsExists: boolean | null = null;\n\nfunction hasVertexAdcCredentials(): boolean {\n\tif (cachedVertexAdcCredentialsExists === null) {\n\t\t// If node modules haven't loaded yet (async import race at startup),\n\t\t// return false WITHOUT caching so the next call retries once they're ready.\n\t\t// Only cache false permanently in a browser environment where fs is never available.\n\t\tif (!_existsSync || !_homedir || !_join) {\n\t\t\tconst isNode = typeof process !== \"undefined\" && (process.versions?.node || process.versions?.bun);\n\t\t\tif (!isNode) {\n\t\t\t\t// Definitively in a browser — safe to cache false permanently\n\t\t\t\tcachedVertexAdcCredentialsExists = false;\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\n\t\t// Check GOOGLE_APPLICATION_CREDENTIALS env var first (standard way)\n\t\tconst gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS || getProcEnv(\"GOOGLE_APPLICATION_CREDENTIALS\");\n\t\tif (gacPath) {\n\t\t\tcachedVertexAdcCredentialsExists = _existsSync(gacPath);\n\t\t} else {\n\t\t\t// Fall back to default ADC path (lazy evaluation)\n\t\t\tcachedVertexAdcCredentialsExists = _existsSync(\n\t\t\t\t_join(_homedir(), \".config\", \"gcloud\", \"application_default_credentials.json\"),\n\t\t\t);\n\t\t}\n\t}\n\treturn cachedVertexAdcCredentialsExists;\n}\n\nfunction getApiKeyEnvVars(provider: string): readonly string[] | undefined {\n\tif (provider === \"github-copilot\") {\n\t\treturn [\"COPILOT_GITHUB_TOKEN\", \"GH_TOKEN\", \"GITHUB_TOKEN\"];\n\t}\n\n\t// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY\n\tif (provider === \"anthropic\") {\n\t\treturn [\"ANTHROPIC_OAUTH_TOKEN\", \"ANTHROPIC_API_KEY\"];\n\t}\n\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\t\"azure-openai-responses\": \"AZURE_OPENAI_API_KEY\",\n\t\tdeepseek: \"DEEPSEEK_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\t\"google-vertex\": \"GOOGLE_CLOUD_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\t\"vercel-ai-gateway\": \"AI_GATEWAY_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t\tminimax: \"MINIMAX_API_KEY\",\n\t\t\"minimax-cn\": \"MINIMAX_CN_API_KEY\",\n\t\thuggingface: \"HF_TOKEN\",\n\t\tfireworks: \"FIREWORKS_API_KEY\",\n\t\topencode: \"OPENCODE_API_KEY\",\n\t\t\"opencode-go\": \"OPENCODE_API_KEY\",\n\t\t\"kimi-coding\": \"KIMI_API_KEY\",\n\t\t\"cloudflare-workers-ai\": \"CLOUDFLARE_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? [envVar] : undefined;\n}\n\n/**\n * Find configured environment variables that can provide an API key for a provider.\n *\n * This only reports actual API key variables. It intentionally excludes ambient\n * credential sources such as AWS profiles, AWS IAM credentials, and Google\n * Application Default Credentials.\n */\nexport function findEnvKeys(provider: KnownProvider): string[] | undefined;\nexport function findEnvKeys(provider: string): string[] | undefined;\nexport function findEnvKeys(provider: string): string[] | undefined {\n\tconst envVars = getApiKeyEnvVars(provider);\n\tif (!envVars) return undefined;\n\n\tconst found = envVars.filter((envVar) => !!process.env[envVar] || !!getProcEnv(envVar));\n\treturn found.length > 0 ? found : undefined;\n}\n\n/**\n * Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.\n *\n * Will not return API keys for providers that require OAuth tokens.\n */\nexport function getEnvApiKey(provider: KnownProvider): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined {\n\tconst envKeys = findEnvKeys(provider);\n\tif (envKeys?.[0]) {\n\t\treturn process.env[envKeys[0]] || getProcEnv(envKeys[0]);\n\t}\n\n\t// Vertex AI supports either an explicit API key or Application Default Credentials.\n\t// Auth is configured via `gcloud auth application-default login`.\n\tif (provider === \"google-vertex\") {\n\t\tconst hasCredentials = hasVertexAdcCredentials();\n\t\tconst hasProject = !!(\n\t\t\tprocess.env.GOOGLE_CLOUD_PROJECT ||\n\t\t\tprocess.env.GCLOUD_PROJECT ||\n\t\t\tgetProcEnv(\"GOOGLE_CLOUD_PROJECT\") ||\n\t\t\tgetProcEnv(\"GCLOUD_PROJECT\")\n\t\t);\n\t\tconst hasLocation = !!(process.env.GOOGLE_CLOUD_LOCATION || getProcEnv(\"GOOGLE_CLOUD_LOCATION\"));\n\n\t\tif (hasCredentials && hasProject && hasLocation) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\tif (provider === \"amazon-bedrock\") {\n\t\t// Amazon Bedrock supports multiple credential sources:\n\t\t// 1. AWS_PROFILE - named profile from ~/.aws/credentials\n\t\t// 2. AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY - standard IAM keys\n\t\t// 3. AWS_BEARER_TOKEN_BEDROCK - Bedrock bearer token\n\t\t// 4. AWS_CONTAINER_CREDENTIALS_RELATIVE_URI - ECS task roles\n\t\t// 5. AWS_CONTAINER_CREDENTIALS_FULL_URI - ECS task roles (full URI)\n\t\t// 6. AWS_WEB_IDENTITY_TOKEN_FILE - IRSA (IAM Roles for Service Accounts)\n\t\tif (\n\t\t\tprocess.env.AWS_PROFILE ||\n\t\t\t(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) ||\n\t\t\tprocess.env.AWS_BEARER_TOKEN_BEDROCK ||\n\t\t\tprocess.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI ||\n\t\t\tprocess.env.AWS_CONTAINER_CREDENTIALS_FULL_URI ||\n\t\t\tprocess.env.AWS_WEB_IDENTITY_TOKEN_FILE ||\n\t\t\tgetProcEnv(\"AWS_PROFILE\") ||\n\t\t\t(getProcEnv(\"AWS_ACCESS_KEY_ID\") && getProcEnv(\"AWS_SECRET_ACCESS_KEY\")) ||\n\t\t\tgetProcEnv(\"AWS_BEARER_TOKEN_BEDROCK\") ||\n\t\t\tgetProcEnv(\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\") ||\n\t\t\tgetProcEnv(\"AWS_CONTAINER_CREDENTIALS_FULL_URI\") ||\n\t\t\tgetProcEnv(\"AWS_WEB_IDENTITY_TOKEN_FILE\")\n\t\t) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\treturn undefined;\n}\n"]}
|
|
1
|
+
{"version":3,"file":"env-api-keys.js","sourceRoot":"","sources":["../src/env-api-keys.ts"],"names":[],"mappings":"AAAA,2EAA2E;AAC3E,IAAI,WAAW,GAA+C,IAAI,CAAC;AACnE,IAAI,QAAQ,GAA4C,IAAI,CAAC;AAC7D,IAAI,KAAK,GAA2C,IAAI,CAAC;AAIzD,MAAM,aAAa,GAAkB,CAAC,SAAS,EAAE,EAAE,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC;AACtE,MAAM,iBAAiB,GAAG,OAAO,GAAG,IAAI,CAAC;AACzC,MAAM,iBAAiB,GAAG,OAAO,GAAG,IAAI,CAAC;AACzC,MAAM,mBAAmB,GAAG,OAAO,GAAG,MAAM,CAAC;AAE7C,+CAA+C;AAC/C,IAAI,OAAO,OAAO,KAAK,WAAW,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,OAAO,CAAC,QAAQ,EAAE,GAAG,CAAC,EAAE,CAAC;IACzF,aAAa,CAAC,iBAAiB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;QAC5C,WAAW,GAAI,CAA8B,CAAC,UAAU,CAAC;IAAA,CACzD,CAAC,CAAC;IACH,aAAa,CAAC,iBAAiB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;QAC5C,QAAQ,GAAI,CAA8B,CAAC,OAAO,CAAC;IAAA,CACnD,CAAC,CAAC;IACH,aAAa,CAAC,mBAAmB,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;QAC9C,KAAK,GAAI,CAAgC,CAAC,IAAI,CAAC;IAAA,CAC/C,CAAC,CAAC;AACJ,CAAC;AAID,IAAI,aAAa,GAA+B,IAAI,CAAC;AAErD;;;;GAIG;AACH,SAAS,UAAU,CAAC,GAAW,EAAsB;IACpD,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,GAAG;QAAE,OAAO,SAAS,CAAC;IAC7C,IAAI,OAAO,OAAO,KAAK,WAAW;QAAE,OAAO,SAAS,CAAC;IAErD,gEAAgE;IAChE,IAAI,MAAM,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,MAAM,GAAG,CAAC;QAAE,OAAO,SAAS,CAAC;IAE1D,IAAI,aAAa,KAAK,IAAI,EAAE,CAAC;QAC5B,aAAa,GAAG,IAAI,GAAG,EAAE,CAAC;QAC1B,IAAI,CAAC;YACJ,MAAM,EAAE,YAAY,EAAE,GAAG,OAAO,CAAC,SAAS,CAA6B,CAAC;YACxE,MAAM,IAAI,GAAG,YAAY,CAAC,oBAAoB,EAAE,OAAO,CAAC,CAAC;YACzD,KAAK,MAAM,KAAK,IAAI,IAAI,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;gBACtC,MAAM,GAAG,GAAG,KAAK,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;gBAC/B,IAAI,GAAG,GAAG,CAAC,EAAE,CAAC;oBACb,aAAa,CAAC,GAAG,CAAC,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,GAAG,CAAC,EAAE,KAAK,CAAC,KAAK,CAAC,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC;gBAC9D,CAAC;YACF,CAAC;QACF,CAAC;QAAC,MAAM,CAAC;YACR,0CAA0C;QAC3C,CAAC;IACF,CAAC;IAED,OAAO,aAAa,CAAC,GAAG,CAAC,GAAG,CAAC,CAAC;AAAA,CAC9B;AAED,IAAI,gCAAgC,GAAmB,IAAI,CAAC;AAE5D,SAAS,uBAAuB,GAAY;IAC3C,IAAI,gCAAgC,KAAK,IAAI,EAAE,CAAC;QAC/C,qEAAqE;QACrE,4EAA4E;QAC5E,qFAAqF;QACrF,IAAI,CAAC,WAAW,IAAI,CAAC,QAAQ,IAAI,CAAC,KAAK,EAAE,CAAC;YACzC,MAAM,MAAM,GAAG,OAAO,OAAO,KAAK,WAAW,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE,IAAI,IAAI,OAAO,CAAC,QAAQ,EAAE,GAAG,CAAC,CAAC;YACnG,IAAI,CAAC,MAAM,EAAE,CAAC;gBACb,gEAA8D;gBAC9D,gCAAgC,GAAG,KAAK,CAAC;YAC1C,CAAC;YACD,OAAO,KAAK,CAAC;QACd,CAAC;QAED,oEAAoE;QACpE,MAAM,OAAO,GAAG,OAAO,CAAC,GAAG,CAAC,8BAA8B,IAAI,UAAU,CAAC,gCAAgC,CAAC,CAAC;QAC3G,IAAI,OAAO,EAAE,CAAC;YACb,gCAAgC,GAAG,WAAW,CAAC,OAAO,CAAC,CAAC;QACzD,CAAC;aAAM,CAAC;YACP,kDAAkD;YAClD,gCAAgC,GAAG,WAAW,CAC7C,KAAK,CAAC,QAAQ,EAAE,EAAE,SAAS,EAAE,QAAQ,EAAE,sCAAsC,CAAC,CAC9E,CAAC;QACH,CAAC;IACF,CAAC;IACD,OAAO,gCAAgC,CAAC;AAAA,CACxC;AAED,SAAS,gBAAgB,CAAC,QAAgB,EAAiC;IAC1E,IAAI,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACnC,OAAO,CAAC,sBAAsB,CAAC,CAAC;IACjC,CAAC;IAED,gEAAgE;IAChE,IAAI,QAAQ,KAAK,WAAW,EAAE,CAAC;QAC9B,OAAO,CAAC,uBAAuB,EAAE,mBAAmB,CAAC,CAAC;IACvD,CAAC;IAED,MAAM,MAAM,GAA2B;QACtC,MAAM,EAAE,gBAAgB;QACxB,wBAAwB,EAAE,sBAAsB;QAChD,QAAQ,EAAE,kBAAkB;QAC5B,MAAM,EAAE,gBAAgB;QACxB,eAAe,EAAE,sBAAsB;QACvC,IAAI,EAAE,cAAc;QACpB,QAAQ,EAAE,kBAAkB;QAC5B,GAAG,EAAE,aAAa;QAClB,UAAU,EAAE,oBAAoB;QAChC,mBAAmB,EAAE,oBAAoB;QACzC,GAAG,EAAE,aAAa;QAClB,OAAO,EAAE,iBAAiB;QAC1B,OAAO,EAAE,iBAAiB;QAC1B,YAAY,EAAE,oBAAoB;QAClC,UAAU,EAAE,kBAAkB;QAC9B,eAAe,EAAE,kBAAkB;QACnC,WAAW,EAAE,UAAU;QACvB,SAAS,EAAE,mBAAmB;QAC9B,QAAQ,EAAE,kBAAkB;QAC5B,QAAQ,EAAE,kBAAkB;QAC5B,aAAa,EAAE,kBAAkB;QACjC,aAAa,EAAE,cAAc;QAC7B,uBAAuB,EAAE,oBAAoB;QAC7C,uBAAuB,EAAE,oBAAoB;QAC7C,MAAM,EAAE,gBAAgB;QACxB,sBAAsB,EAAE,8BAA8B;QACtD,uBAAuB,EAAE,+BAA+B;QACxD,uBAAuB,EAAE,+BAA+B;KACxD,CAAC;IAEF,MAAM,MAAM,GAAG,MAAM,CAAC,QAAQ,CAAC,CAAC;IAChC,OAAO,MAAM,CAAC,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CACrC;AAWD,MAAM,UAAU,WAAW,CAAC,QAAgB,EAAwB;IACnE,MAAM,OAAO,GAAG,gBAAgB,CAAC,QAAQ,CAAC,CAAC;IAC3C,IAAI,CAAC,OAAO;QAAE,OAAO,SAAS,CAAC;IAE/B,MAAM,KAAK,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC,CAAC;IACxF,OAAO,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAC5C;AASD,MAAM,UAAU,YAAY,CAAC,QAAgB,EAAsB;IAClE,MAAM,OAAO,GAAG,WAAW,CAAC,QAAQ,CAAC,CAAC;IACtC,IAAI,OAAO,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;QAClB,OAAO,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,IAAI,UAAU,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;IAC1D,CAAC;IAED,oFAAoF;IACpF,kEAAkE;IAClE,IAAI,QAAQ,KAAK,eAAe,EAAE,CAAC;QAClC,MAAM,cAAc,GAAG,uBAAuB,EAAE,CAAC;QACjD,MAAM,UAAU,GAAG,CAAC,CAAC,CACpB,OAAO,CAAC,GAAG,CAAC,oBAAoB;YAChC,OAAO,CAAC,GAAG,CAAC,cAAc;YAC1B,UAAU,CAAC,sBAAsB,CAAC;YAClC,UAAU,CAAC,gBAAgB,CAAC,CAC5B,CAAC;QACF,MAAM,WAAW,GAAG,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,qBAAqB,IAAI,UAAU,CAAC,uBAAuB,CAAC,CAAC,CAAC;QAEjG,IAAI,cAAc,IAAI,UAAU,IAAI,WAAW,EAAE,CAAC;YACjD,OAAO,iBAAiB,CAAC;QAC1B,CAAC;IACF,CAAC;IAED,IAAI,QAAQ,KAAK,gBAAgB,EAAE,CAAC;QACnC,uDAAuD;QACvD,yDAAyD;QACzD,mEAAmE;QACnE,qDAAqD;QACrD,6DAA6D;QAC7D,oEAAoE;QACpE,yEAAyE;QACzE,IACC,OAAO,CAAC,GAAG,CAAC,WAAW;YACvB,CAAC,OAAO,CAAC,GAAG,CAAC,iBAAiB,IAAI,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC;YACpE,OAAO,CAAC,GAAG,CAAC,wBAAwB;YACpC,OAAO,CAAC,GAAG,CAAC,sCAAsC;YAClD,OAAO,CAAC,GAAG,CAAC,kCAAkC;YAC9C,OAAO,CAAC,GAAG,CAAC,2BAA2B;YACvC,UAAU,CAAC,aAAa,CAAC;YACzB,CAAC,UAAU,CAAC,mBAAmB,CAAC,IAAI,UAAU,CAAC,uBAAuB,CAAC,CAAC;YACxE,UAAU,CAAC,0BAA0B,CAAC;YACtC,UAAU,CAAC,wCAAwC,CAAC;YACpD,UAAU,CAAC,oCAAoC,CAAC;YAChD,UAAU,CAAC,6BAA6B,CAAC,EACxC,CAAC;YACF,OAAO,iBAAiB,CAAC;QAC1B,CAAC;IACF,CAAC;IAED,OAAO,SAAS,CAAC;AAAA,CACjB","sourcesContent":["// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui)\nlet _existsSync: typeof import(\"node:fs\").existsSync | null = null;\nlet _homedir: typeof import(\"node:os\").homedir | null = null;\nlet _join: typeof import(\"node:path\").join | null = null;\n\ntype DynamicImport = (specifier: string) => Promise<unknown>;\n\nconst dynamicImport: DynamicImport = (specifier) => import(specifier);\nconst NODE_FS_SPECIFIER = \"node:\" + \"fs\";\nconst NODE_OS_SPECIFIER = \"node:\" + \"os\";\nconst NODE_PATH_SPECIFIER = \"node:\" + \"path\";\n\n// Eagerly load in Node.js/Bun environment only\nif (typeof process !== \"undefined\" && (process.versions?.node || process.versions?.bun)) {\n\tdynamicImport(NODE_FS_SPECIFIER).then((m) => {\n\t\t_existsSync = (m as typeof import(\"node:fs\")).existsSync;\n\t});\n\tdynamicImport(NODE_OS_SPECIFIER).then((m) => {\n\t\t_homedir = (m as typeof import(\"node:os\")).homedir;\n\t});\n\tdynamicImport(NODE_PATH_SPECIFIER).then((m) => {\n\t\t_join = (m as typeof import(\"node:path\")).join;\n\t});\n}\n\nimport type { KnownProvider } from \"./types.js\";\n\nlet _procEnvCache: Map<string, string> | null = null;\n\n/**\n * Fallback for https://github.com/oven-sh/bun/issues/27802\n * Bun compiled binaries have an empty `process.env` inside sandbox\n * environments on Linux. We can recover the env from `/proc/self/environ`.\n */\nfunction getProcEnv(key: string): string | undefined {\n\tif (!process.versions?.bun) return undefined;\n\tif (typeof process === \"undefined\") return undefined;\n\n\t// If process.env already has entries, the bug is not triggered.\n\tif (Object.keys(process.env).length > 0) return undefined;\n\n\tif (_procEnvCache === null) {\n\t\t_procEnvCache = new Map();\n\t\ttry {\n\t\t\tconst { readFileSync } = require(\"node:fs\") as typeof import(\"node:fs\");\n\t\t\tconst data = readFileSync(\"/proc/self/environ\", \"utf-8\");\n\t\t\tfor (const entry of data.split(\"\\0\")) {\n\t\t\t\tconst idx = entry.indexOf(\"=\");\n\t\t\t\tif (idx > 0) {\n\t\t\t\t\t_procEnvCache.set(entry.slice(0, idx), entry.slice(idx + 1));\n\t\t\t\t}\n\t\t\t}\n\t\t} catch {\n\t\t\t// /proc/self/environ may not be readable.\n\t\t}\n\t}\n\n\treturn _procEnvCache.get(key);\n}\n\nlet cachedVertexAdcCredentialsExists: boolean | null = null;\n\nfunction hasVertexAdcCredentials(): boolean {\n\tif (cachedVertexAdcCredentialsExists === null) {\n\t\t// If node modules haven't loaded yet (async import race at startup),\n\t\t// return false WITHOUT caching so the next call retries once they're ready.\n\t\t// Only cache false permanently in a browser environment where fs is never available.\n\t\tif (!_existsSync || !_homedir || !_join) {\n\t\t\tconst isNode = typeof process !== \"undefined\" && (process.versions?.node || process.versions?.bun);\n\t\t\tif (!isNode) {\n\t\t\t\t// Definitively in a browser — safe to cache false permanently\n\t\t\t\tcachedVertexAdcCredentialsExists = false;\n\t\t\t}\n\t\t\treturn false;\n\t\t}\n\n\t\t// Check GOOGLE_APPLICATION_CREDENTIALS env var first (standard way)\n\t\tconst gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS || getProcEnv(\"GOOGLE_APPLICATION_CREDENTIALS\");\n\t\tif (gacPath) {\n\t\t\tcachedVertexAdcCredentialsExists = _existsSync(gacPath);\n\t\t} else {\n\t\t\t// Fall back to default ADC path (lazy evaluation)\n\t\t\tcachedVertexAdcCredentialsExists = _existsSync(\n\t\t\t\t_join(_homedir(), \".config\", \"gcloud\", \"application_default_credentials.json\"),\n\t\t\t);\n\t\t}\n\t}\n\treturn cachedVertexAdcCredentialsExists;\n}\n\nfunction getApiKeyEnvVars(provider: string): readonly string[] | undefined {\n\tif (provider === \"github-copilot\") {\n\t\treturn [\"COPILOT_GITHUB_TOKEN\"];\n\t}\n\n\t// ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY\n\tif (provider === \"anthropic\") {\n\t\treturn [\"ANTHROPIC_OAUTH_TOKEN\", \"ANTHROPIC_API_KEY\"];\n\t}\n\n\tconst envMap: Record<string, string> = {\n\t\topenai: \"OPENAI_API_KEY\",\n\t\t\"azure-openai-responses\": \"AZURE_OPENAI_API_KEY\",\n\t\tdeepseek: \"DEEPSEEK_API_KEY\",\n\t\tgoogle: \"GEMINI_API_KEY\",\n\t\t\"google-vertex\": \"GOOGLE_CLOUD_API_KEY\",\n\t\tgroq: \"GROQ_API_KEY\",\n\t\tcerebras: \"CEREBRAS_API_KEY\",\n\t\txai: \"XAI_API_KEY\",\n\t\topenrouter: \"OPENROUTER_API_KEY\",\n\t\t\"vercel-ai-gateway\": \"AI_GATEWAY_API_KEY\",\n\t\tzai: \"ZAI_API_KEY\",\n\t\tmistral: \"MISTRAL_API_KEY\",\n\t\tminimax: \"MINIMAX_API_KEY\",\n\t\t\"minimax-cn\": \"MINIMAX_CN_API_KEY\",\n\t\tmoonshotai: \"MOONSHOT_API_KEY\",\n\t\t\"moonshotai-cn\": \"MOONSHOT_API_KEY\",\n\t\thuggingface: \"HF_TOKEN\",\n\t\tfireworks: \"FIREWORKS_API_KEY\",\n\t\ttogether: \"TOGETHER_API_KEY\",\n\t\topencode: \"OPENCODE_API_KEY\",\n\t\t\"opencode-go\": \"OPENCODE_API_KEY\",\n\t\t\"kimi-coding\": \"KIMI_API_KEY\",\n\t\t\"cloudflare-workers-ai\": \"CLOUDFLARE_API_KEY\",\n\t\t\"cloudflare-ai-gateway\": \"CLOUDFLARE_API_KEY\",\n\t\txiaomi: \"XIAOMI_API_KEY\",\n\t\t\"xiaomi-token-plan-cn\": \"XIAOMI_TOKEN_PLAN_CN_API_KEY\",\n\t\t\"xiaomi-token-plan-ams\": \"XIAOMI_TOKEN_PLAN_AMS_API_KEY\",\n\t\t\"xiaomi-token-plan-sgp\": \"XIAOMI_TOKEN_PLAN_SGP_API_KEY\",\n\t};\n\n\tconst envVar = envMap[provider];\n\treturn envVar ? [envVar] : undefined;\n}\n\n/**\n * Find configured environment variables that can provide an API key for a provider.\n *\n * This only reports actual API key variables. It intentionally excludes ambient\n * credential sources such as AWS profiles, AWS IAM credentials, and Google\n * Application Default Credentials.\n */\nexport function findEnvKeys(provider: KnownProvider): string[] | undefined;\nexport function findEnvKeys(provider: string): string[] | undefined;\nexport function findEnvKeys(provider: string): string[] | undefined {\n\tconst envVars = getApiKeyEnvVars(provider);\n\tif (!envVars) return undefined;\n\n\tconst found = envVars.filter((envVar) => !!process.env[envVar] || !!getProcEnv(envVar));\n\treturn found.length > 0 ? found : undefined;\n}\n\n/**\n * Get API key for provider from known environment variables, e.g. OPENAI_API_KEY.\n *\n * Will not return API keys for providers that require OAuth tokens.\n */\nexport function getEnvApiKey(provider: KnownProvider): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined;\nexport function getEnvApiKey(provider: string): string | undefined {\n\tconst envKeys = findEnvKeys(provider);\n\tif (envKeys?.[0]) {\n\t\treturn process.env[envKeys[0]] || getProcEnv(envKeys[0]);\n\t}\n\n\t// Vertex AI supports either an explicit API key or Application Default Credentials.\n\t// Auth is configured via `gcloud auth application-default login`.\n\tif (provider === \"google-vertex\") {\n\t\tconst hasCredentials = hasVertexAdcCredentials();\n\t\tconst hasProject = !!(\n\t\t\tprocess.env.GOOGLE_CLOUD_PROJECT ||\n\t\t\tprocess.env.GCLOUD_PROJECT ||\n\t\t\tgetProcEnv(\"GOOGLE_CLOUD_PROJECT\") ||\n\t\t\tgetProcEnv(\"GCLOUD_PROJECT\")\n\t\t);\n\t\tconst hasLocation = !!(process.env.GOOGLE_CLOUD_LOCATION || getProcEnv(\"GOOGLE_CLOUD_LOCATION\"));\n\n\t\tif (hasCredentials && hasProject && hasLocation) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\tif (provider === \"amazon-bedrock\") {\n\t\t// Amazon Bedrock supports multiple credential sources:\n\t\t// 1. AWS_PROFILE - named profile from ~/.aws/credentials\n\t\t// 2. AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY - standard IAM keys\n\t\t// 3. AWS_BEARER_TOKEN_BEDROCK - Bedrock bearer token\n\t\t// 4. AWS_CONTAINER_CREDENTIALS_RELATIVE_URI - ECS task roles\n\t\t// 5. AWS_CONTAINER_CREDENTIALS_FULL_URI - ECS task roles (full URI)\n\t\t// 6. AWS_WEB_IDENTITY_TOKEN_FILE - IRSA (IAM Roles for Service Accounts)\n\t\tif (\n\t\t\tprocess.env.AWS_PROFILE ||\n\t\t\t(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) ||\n\t\t\tprocess.env.AWS_BEARER_TOKEN_BEDROCK ||\n\t\t\tprocess.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI ||\n\t\t\tprocess.env.AWS_CONTAINER_CREDENTIALS_FULL_URI ||\n\t\t\tprocess.env.AWS_WEB_IDENTITY_TOKEN_FILE ||\n\t\t\tgetProcEnv(\"AWS_PROFILE\") ||\n\t\t\t(getProcEnv(\"AWS_ACCESS_KEY_ID\") && getProcEnv(\"AWS_SECRET_ACCESS_KEY\")) ||\n\t\t\tgetProcEnv(\"AWS_BEARER_TOKEN_BEDROCK\") ||\n\t\t\tgetProcEnv(\"AWS_CONTAINER_CREDENTIALS_RELATIVE_URI\") ||\n\t\t\tgetProcEnv(\"AWS_CONTAINER_CREDENTIALS_FULL_URI\") ||\n\t\t\tgetProcEnv(\"AWS_WEB_IDENTITY_TOKEN_FILE\")\n\t\t) {\n\t\t\treturn \"<authenticated>\";\n\t\t}\n\t}\n\n\treturn undefined;\n}\n"]}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { IMAGE_MODELS } from "./image-models.generated.js";
|
|
2
|
+
import type { ImagesApi, ImagesModel, KnownImagesProvider } from "./types.js";
|
|
3
|
+
type ImageModelApi<TProvider extends KnownImagesProvider, TModelId extends keyof (typeof IMAGE_MODELS)[TProvider]> = (typeof IMAGE_MODELS)[TProvider][TModelId] extends {
|
|
4
|
+
api: infer TApi;
|
|
5
|
+
} ? TApi extends ImagesApi ? TApi : never : never;
|
|
6
|
+
export declare function getImageModel<TProvider extends KnownImagesProvider, TModelId extends keyof (typeof IMAGE_MODELS)[TProvider]>(provider: TProvider, modelId: TModelId): ImagesModel<ImageModelApi<TProvider, TModelId>>;
|
|
7
|
+
export declare function getImageProviders(): KnownImagesProvider[];
|
|
8
|
+
export declare function getImageModels<TProvider extends KnownImagesProvider>(provider: TProvider): ImagesModel<ImageModelApi<TProvider, keyof (typeof IMAGE_MODELS)[TProvider]>>[];
|
|
9
|
+
export {};
|
|
10
|
+
//# sourceMappingURL=image-models.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"image-models.d.ts","sourceRoot":"","sources":["../src/image-models.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,6BAA6B,CAAC;AAC3D,OAAO,KAAK,EAAE,SAAS,EAAE,WAAW,EAAE,mBAAmB,EAAE,MAAM,YAAY,CAAC;AAY9E,KAAK,aAAa,CACjB,SAAS,SAAS,mBAAmB,EACrC,QAAQ,SAAS,MAAM,CAAC,OAAO,YAAY,CAAC,CAAC,SAAS,CAAC,IACpD,CAAC,OAAO,YAAY,CAAC,CAAC,SAAS,CAAC,CAAC,QAAQ,CAAC,SAAS;IAAE,GAAG,EAAE,MAAM,IAAI,CAAA;CAAE,GACvE,IAAI,SAAS,SAAS,GACrB,IAAI,GACJ,KAAK,GACN,KAAK,CAAC;AAET,wBAAgB,aAAa,CAC5B,SAAS,SAAS,mBAAmB,EACrC,QAAQ,SAAS,MAAM,CAAC,OAAO,YAAY,CAAC,CAAC,SAAS,CAAC,EACtD,QAAQ,EAAE,SAAS,EAAE,OAAO,EAAE,QAAQ,GAAG,WAAW,CAAC,aAAa,CAAC,SAAS,EAAE,QAAQ,CAAC,CAAC,CAGzF;AAED,wBAAgB,iBAAiB,IAAI,mBAAmB,EAAE,CAEzD;AAED,wBAAgB,cAAc,CAAC,SAAS,SAAS,mBAAmB,EACnE,QAAQ,EAAE,SAAS,GACjB,WAAW,CAAC,aAAa,CAAC,SAAS,EAAE,MAAM,CAAC,OAAO,YAAY,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,CAKjF","sourcesContent":["import { IMAGE_MODELS } from \"./image-models.generated.js\";\nimport type { ImagesApi, ImagesModel, KnownImagesProvider } from \"./types.js\";\n\nconst imageModelRegistry: Map<string, Map<string, ImagesModel<ImagesApi>>> = new Map();\n\nfor (const [provider, models] of Object.entries(IMAGE_MODELS)) {\n\tconst providerModels = new Map<string, ImagesModel<ImagesApi>>();\n\tfor (const [id, model] of Object.entries(models)) {\n\t\tproviderModels.set(id, model as ImagesModel<ImagesApi>);\n\t}\n\timageModelRegistry.set(provider, providerModels);\n}\n\ntype ImageModelApi<\n\tTProvider extends KnownImagesProvider,\n\tTModelId extends keyof (typeof IMAGE_MODELS)[TProvider],\n> = (typeof IMAGE_MODELS)[TProvider][TModelId] extends { api: infer TApi }\n\t? TApi extends ImagesApi\n\t\t? TApi\n\t\t: never\n\t: never;\n\nexport function getImageModel<\n\tTProvider extends KnownImagesProvider,\n\tTModelId extends keyof (typeof IMAGE_MODELS)[TProvider],\n>(provider: TProvider, modelId: TModelId): ImagesModel<ImageModelApi<TProvider, TModelId>> {\n\tconst providerModels = imageModelRegistry.get(provider);\n\treturn providerModels?.get(modelId as string) as ImagesModel<ImageModelApi<TProvider, TModelId>>;\n}\n\nexport function getImageProviders(): KnownImagesProvider[] {\n\treturn Array.from(imageModelRegistry.keys()) as KnownImagesProvider[];\n}\n\nexport function getImageModels<TProvider extends KnownImagesProvider>(\n\tprovider: TProvider,\n): ImagesModel<ImageModelApi<TProvider, keyof (typeof IMAGE_MODELS)[TProvider]>>[] {\n\tconst models = imageModelRegistry.get(provider);\n\treturn models\n\t\t? (Array.from(models.values()) as ImagesModel<ImageModelApi<TProvider, keyof (typeof IMAGE_MODELS)[TProvider]>>[])\n\t\t: [];\n}\n"]}
|