@fgv/ts-extras 5.1.0-15 → 5.1.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/dist/index.browser.js +2 -1
  2. package/dist/packlets/ai-assist/apiClient.js +570 -58
  3. package/dist/packlets/ai-assist/chatRequestBuilders.js +180 -0
  4. package/dist/packlets/ai-assist/index.js +4 -3
  5. package/dist/packlets/ai-assist/model.js +20 -3
  6. package/dist/packlets/ai-assist/registry.js +66 -10
  7. package/dist/packlets/ai-assist/sseParser.js +122 -0
  8. package/dist/packlets/ai-assist/streamingAdapters/anthropic.js +192 -0
  9. package/dist/packlets/ai-assist/streamingAdapters/common.js +77 -0
  10. package/dist/packlets/ai-assist/streamingAdapters/gemini.js +160 -0
  11. package/dist/packlets/ai-assist/streamingAdapters/openaiChat.js +149 -0
  12. package/dist/packlets/ai-assist/streamingAdapters/openaiResponses.js +163 -0
  13. package/dist/packlets/ai-assist/streamingAdapters/proxy.js +157 -0
  14. package/dist/packlets/ai-assist/streamingClient.js +88 -0
  15. package/dist/packlets/conversion/converters.js +1 -1
  16. package/dist/packlets/crypto-utils/keystore/keyStore.js +74 -42
  17. package/dist/packlets/zip-file-tree/zipFileTreeAccessors.js +2 -2
  18. package/dist/ts-extras.d.ts +531 -5
  19. package/lib/index.browser.d.ts +2 -1
  20. package/lib/index.browser.js +3 -1
  21. package/lib/packlets/ai-assist/apiClient.d.ts +103 -1
  22. package/lib/packlets/ai-assist/apiClient.js +574 -58
  23. package/lib/packlets/ai-assist/chatRequestBuilders.d.ts +89 -0
  24. package/lib/packlets/ai-assist/chatRequestBuilders.js +189 -0
  25. package/lib/packlets/ai-assist/index.d.ts +4 -3
  26. package/lib/packlets/ai-assist/index.js +10 -1
  27. package/lib/packlets/ai-assist/model.d.ts +271 -2
  28. package/lib/packlets/ai-assist/model.js +21 -3
  29. package/lib/packlets/ai-assist/registry.d.ts +10 -1
  30. package/lib/packlets/ai-assist/registry.js +67 -11
  31. package/lib/packlets/ai-assist/sseParser.d.ts +45 -0
  32. package/lib/packlets/ai-assist/sseParser.js +127 -0
  33. package/lib/packlets/ai-assist/streamingAdapters/anthropic.d.ts +18 -0
  34. package/lib/packlets/ai-assist/streamingAdapters/anthropic.js +195 -0
  35. package/lib/packlets/ai-assist/streamingAdapters/common.d.ts +71 -0
  36. package/lib/packlets/ai-assist/streamingAdapters/common.js +81 -0
  37. package/lib/packlets/ai-assist/streamingAdapters/gemini.d.ts +19 -0
  38. package/lib/packlets/ai-assist/streamingAdapters/gemini.js +163 -0
  39. package/lib/packlets/ai-assist/streamingAdapters/openaiChat.d.ts +18 -0
  40. package/lib/packlets/ai-assist/streamingAdapters/openaiChat.js +152 -0
  41. package/lib/packlets/ai-assist/streamingAdapters/openaiResponses.d.ts +19 -0
  42. package/lib/packlets/ai-assist/streamingAdapters/openaiResponses.js +166 -0
  43. package/lib/packlets/ai-assist/streamingAdapters/proxy.d.ts +34 -0
  44. package/lib/packlets/ai-assist/streamingAdapters/proxy.js +160 -0
  45. package/lib/packlets/ai-assist/streamingClient.d.ts +33 -0
  46. package/lib/packlets/ai-assist/streamingClient.js +93 -0
  47. package/lib/packlets/conversion/converters.d.ts +1 -1
  48. package/lib/packlets/conversion/converters.js +1 -1
  49. package/lib/packlets/crypto-utils/keystore/keyStore.d.ts +19 -0
  50. package/lib/packlets/crypto-utils/keystore/keyStore.js +74 -42
  51. package/lib/packlets/zip-file-tree/zipFileTreeAccessors.d.ts +2 -2
  52. package/lib/packlets/zip-file-tree/zipFileTreeAccessors.js +2 -2
  53. package/package.json +7 -7
@@ -0,0 +1,89 @@
1
+ /**
2
+ * Per-format chat request shape builders. Shared between the synchronous
3
+ * (`apiClient.ts`) and streaming (`streamingClient.ts`) paths so the wire
4
+ * shapes stay consistent.
5
+ *
6
+ * @packageDocumentation
7
+ */
8
+ import { AiPrompt, type IChatMessage } from './model';
9
+ /**
10
+ * Optional head/tail messages to weave around the prompt's user message.
11
+ *
12
+ * @internal
13
+ */
14
+ export interface IBuildMessagesOptions {
15
+ /**
16
+ * Messages inserted between the system prompt and the prompt's user
17
+ * message (e.g. prior conversation history for multi-turn chat).
18
+ */
19
+ readonly head?: ReadonlyArray<IChatMessage>;
20
+ /**
21
+ * Messages appended after the prompt's user message (e.g. assistant
22
+ * + correction turns for the JSON-validation retry loop).
23
+ */
24
+ readonly tail?: ReadonlyArray<IChatMessage>;
25
+ }
26
+ /**
27
+ * Builds the messages array from prompt + optional head/tail messages.
28
+ * The caller supplies the user content (string for text-only, parts array
29
+ * for vision prompts) since the parts shape differs by format.
30
+ *
31
+ * @internal
32
+ */
33
+ export declare function buildMessages(systemPrompt: string, userContent: string | unknown[], options?: IBuildMessagesOptions): Array<{
34
+ role: string;
35
+ content: string | unknown[];
36
+ }>;
37
+ /**
38
+ * Builds the user content for OpenAI Chat Completions when attachments are
39
+ * present. Returns a string when there are no attachments.
40
+ *
41
+ * @internal
42
+ */
43
+ export declare function buildOpenAiChatUserContent(prompt: AiPrompt): string | unknown[];
44
+ /**
45
+ * Builds the user content for OpenAI / xAI Responses API when attachments
46
+ * are present. Responses API uses `input_text` / `input_image` part types,
47
+ * distinct from Chat Completions' `text` / `image_url`.
48
+ *
49
+ * @internal
50
+ */
51
+ export declare function buildOpenAiResponsesUserContent(prompt: AiPrompt): string | unknown[];
52
+ /**
53
+ * Builds the user-message content for Anthropic when attachments are present.
54
+ *
55
+ * @internal
56
+ */
57
+ export declare function buildAnthropicUserContent(prompt: AiPrompt): string | unknown[];
58
+ /**
59
+ * Builds the Gemini `parts` array for the user turn, including any image
60
+ * attachments as `inlineData` parts.
61
+ *
62
+ * @internal
63
+ */
64
+ export declare function buildGeminiUserParts(prompt: AiPrompt): Array<Record<string, unknown>>;
65
+ /**
66
+ * Builds the Anthropic messages array, weaving any `head` messages between
67
+ * implicit system + the prompt's user message and appending `tail` messages
68
+ * after. System messages are filtered out (Anthropic uses a top-level system
69
+ * field).
70
+ *
71
+ * @internal
72
+ */
73
+ export declare function buildAnthropicMessages(prompt: AiPrompt, options?: IBuildMessagesOptions): Array<{
74
+ role: string;
75
+ content: string | unknown[];
76
+ }>;
77
+ /**
78
+ * Builds the Gemini `contents` array, weaving any `head` messages before the
79
+ * prompt's user parts and appending `tail` messages after. System messages
80
+ * are filtered out (Gemini uses a top-level systemInstruction field) and
81
+ * assistant roles are mapped to Gemini's `model` role.
82
+ *
83
+ * @internal
84
+ */
85
+ export declare function buildGeminiContents(prompt: AiPrompt, options?: IBuildMessagesOptions): Array<{
86
+ role: string;
87
+ parts: Array<Record<string, unknown>>;
88
+ }>;
89
+ //# sourceMappingURL=chatRequestBuilders.d.ts.map
@@ -0,0 +1,189 @@
1
+ "use strict";
2
+ // Copyright (c) 2026 Erik Fortune
3
+ //
4
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
5
+ // of this software and associated documentation files (the "Software"), to deal
6
+ // in the Software without restriction, including without limitation the rights
7
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
+ // copies of the Software, and to permit persons to whom the Software is
9
+ // furnished to do so, subject to the following conditions:
10
+ //
11
+ // The above copyright notice and this permission notice shall be included in all
12
+ // copies or substantial portions of the Software.
13
+ //
14
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
+ // SOFTWARE.
21
+ Object.defineProperty(exports, "__esModule", { value: true });
22
+ exports.buildMessages = buildMessages;
23
+ exports.buildOpenAiChatUserContent = buildOpenAiChatUserContent;
24
+ exports.buildOpenAiResponsesUserContent = buildOpenAiResponsesUserContent;
25
+ exports.buildAnthropicUserContent = buildAnthropicUserContent;
26
+ exports.buildGeminiUserParts = buildGeminiUserParts;
27
+ exports.buildAnthropicMessages = buildAnthropicMessages;
28
+ exports.buildGeminiContents = buildGeminiContents;
29
+ /**
30
+ * Per-format chat request shape builders. Shared between the synchronous
31
+ * (`apiClient.ts`) and streaming (`streamingClient.ts`) paths so the wire
32
+ * shapes stay consistent.
33
+ *
34
+ * @packageDocumentation
35
+ */
36
+ const model_1 = require("./model");
37
+ /**
38
+ * Builds the messages array from prompt + optional head/tail messages.
39
+ * The caller supplies the user content (string for text-only, parts array
40
+ * for vision prompts) since the parts shape differs by format.
41
+ *
42
+ * @internal
43
+ */
44
+ function buildMessages(systemPrompt, userContent, options) {
45
+ const messages = [
46
+ { role: 'system', content: systemPrompt }
47
+ ];
48
+ if (options === null || options === void 0 ? void 0 : options.head) {
49
+ for (const msg of options.head) {
50
+ messages.push({ role: msg.role, content: msg.content });
51
+ }
52
+ }
53
+ messages.push({ role: 'user', content: userContent });
54
+ if (options === null || options === void 0 ? void 0 : options.tail) {
55
+ for (const msg of options.tail) {
56
+ messages.push({ role: msg.role, content: msg.content });
57
+ }
58
+ }
59
+ return messages;
60
+ }
61
+ /**
62
+ * Builds the user content for OpenAI Chat Completions when attachments are
63
+ * present. Returns a string when there are no attachments.
64
+ *
65
+ * @internal
66
+ */
67
+ function buildOpenAiChatUserContent(prompt) {
68
+ if (prompt.attachments.length === 0) {
69
+ return prompt.user;
70
+ }
71
+ return [
72
+ { type: 'text', text: prompt.user },
73
+ ...prompt.attachments.map((att) => ({
74
+ type: 'image_url',
75
+ image_url: Object.assign({ url: (0, model_1.toDataUrl)(att) }, (att.detail !== undefined ? { detail: att.detail } : {}))
76
+ }))
77
+ ];
78
+ }
79
+ /**
80
+ * Builds the user content for OpenAI / xAI Responses API when attachments
81
+ * are present. Responses API uses `input_text` / `input_image` part types,
82
+ * distinct from Chat Completions' `text` / `image_url`.
83
+ *
84
+ * @internal
85
+ */
86
+ function buildOpenAiResponsesUserContent(prompt) {
87
+ if (prompt.attachments.length === 0) {
88
+ return prompt.user;
89
+ }
90
+ return [
91
+ { type: 'input_text', text: prompt.user },
92
+ ...prompt.attachments.map((att) => (Object.assign({ type: 'input_image', image_url: (0, model_1.toDataUrl)(att) }, (att.detail !== undefined ? { detail: att.detail } : {}))))
93
+ ];
94
+ }
95
+ /**
96
+ * Builds the user-message content for Anthropic when attachments are present.
97
+ *
98
+ * @internal
99
+ */
100
+ function buildAnthropicUserContent(prompt) {
101
+ if (prompt.attachments.length === 0) {
102
+ return prompt.user;
103
+ }
104
+ return [
105
+ { type: 'text', text: prompt.user },
106
+ ...prompt.attachments.map((att) => ({
107
+ type: 'image',
108
+ source: {
109
+ type: 'base64',
110
+ media_type: att.mimeType,
111
+ data: att.base64
112
+ }
113
+ }))
114
+ ];
115
+ }
116
+ /**
117
+ * Builds the Gemini `parts` array for the user turn, including any image
118
+ * attachments as `inlineData` parts.
119
+ *
120
+ * @internal
121
+ */
122
+ function buildGeminiUserParts(prompt) {
123
+ const parts = [{ text: prompt.user }];
124
+ for (const att of prompt.attachments) {
125
+ parts.push({ inlineData: { mimeType: att.mimeType, data: att.base64 } });
126
+ }
127
+ return parts;
128
+ }
129
+ /**
130
+ * Builds the Anthropic messages array, weaving any `head` messages between
131
+ * implicit system + the prompt's user message and appending `tail` messages
132
+ * after. System messages are filtered out (Anthropic uses a top-level system
133
+ * field).
134
+ *
135
+ * @internal
136
+ */
137
+ function buildAnthropicMessages(prompt, options) {
138
+ const messages = [];
139
+ if (options === null || options === void 0 ? void 0 : options.head) {
140
+ for (const msg of options.head) {
141
+ if (msg.role !== 'system') {
142
+ messages.push({ role: msg.role, content: msg.content });
143
+ }
144
+ }
145
+ }
146
+ messages.push({ role: 'user', content: buildAnthropicUserContent(prompt) });
147
+ if (options === null || options === void 0 ? void 0 : options.tail) {
148
+ for (const msg of options.tail) {
149
+ if (msg.role !== 'system') {
150
+ messages.push({ role: msg.role, content: msg.content });
151
+ }
152
+ }
153
+ }
154
+ return messages;
155
+ }
156
+ /**
157
+ * Builds the Gemini `contents` array, weaving any `head` messages before the
158
+ * prompt's user parts and appending `tail` messages after. System messages
159
+ * are filtered out (Gemini uses a top-level systemInstruction field) and
160
+ * assistant roles are mapped to Gemini's `model` role.
161
+ *
162
+ * @internal
163
+ */
164
+ function buildGeminiContents(prompt, options) {
165
+ const contents = [];
166
+ if (options === null || options === void 0 ? void 0 : options.head) {
167
+ for (const msg of options.head) {
168
+ if (msg.role !== 'system') {
169
+ contents.push({
170
+ role: msg.role === 'assistant' ? 'model' : msg.role,
171
+ parts: [{ text: msg.content }]
172
+ });
173
+ }
174
+ }
175
+ }
176
+ contents.push({ role: 'user', parts: buildGeminiUserParts(prompt) });
177
+ if (options === null || options === void 0 ? void 0 : options.tail) {
178
+ for (const msg of options.tail) {
179
+ if (msg.role !== 'system') {
180
+ contents.push({
181
+ role: msg.role === 'assistant' ? 'model' : msg.role,
182
+ parts: [{ text: msg.content }]
183
+ });
184
+ }
185
+ }
186
+ }
187
+ return contents;
188
+ }
189
+ //# sourceMappingURL=chatRequestBuilders.js.map
@@ -2,9 +2,10 @@
2
2
  * AI assist packlet - provider registry, prompt class, settings, and API client.
3
3
  * @packageDocumentation
4
4
  */
5
- export { AiPrompt, type AiProviderId, type AiServerToolType, type AiServerToolConfig, type IAiWebSearchToolConfig, type IAiToolEnablement, type IAiCompletionResponse, type IChatMessage, type AiApiFormat, type IAiProviderDescriptor, type IAiAssistProviderConfig, type IAiAssistSettings, DEFAULT_AI_ASSIST, type IAiAssistKeyStore, type ModelSpec, type ModelSpecKey, type IModelSpecMap, allModelSpecKeys, MODEL_SPEC_BASE_KEY, resolveModel } from './model';
6
- export { allProviderIds, getProviderDescriptors, getProviderDescriptor } from './registry';
7
- export { callProviderCompletion, callProxiedCompletion, type IProviderCompletionParams } from './apiClient';
5
+ export { AiPrompt, type AiModelCapability, type AiProviderId, type AiServerToolType, type AiServerToolConfig, type IAiWebSearchToolConfig, type IAiToolEnablement, type IAiCompletionResponse, type IChatMessage, type AiApiFormat, type AiImageApiFormat, type IAiProviderDescriptor, type IAiAssistProviderConfig, type IAiAssistSettings, DEFAULT_AI_ASSIST, type IAiAssistKeyStore, type IAiImageAttachment, type IAiImageData, type IAiImageGenerationOptions, type IAiImageGenerationParams, type IAiGeneratedImage, type IAiImageGenerationResponse, type IAiModelCapabilityRule, type IAiModelCapabilityConfig, type IAiModelInfo, type IAiStreamEvent, type IAiStreamTextDelta, type IAiStreamToolEvent, type IAiStreamDone, type IAiStreamError, type ModelSpec, type ModelSpecKey, type IModelSpecMap, allModelSpecKeys, MODEL_SPEC_BASE_KEY, resolveModel, toDataUrl } from './model';
6
+ export { allProviderIds, getProviderDescriptors, getProviderDescriptor, DEFAULT_MODEL_CAPABILITY_CONFIG } from './registry';
7
+ export { callProviderCompletion, callProxiedCompletion, callProviderImageGeneration, callProxiedImageGeneration, callProviderListModels, callProxiedListModels, type IProviderCompletionParams, type IProviderImageGenerationParams, type IProviderListModelsParams } from './apiClient';
8
+ export { callProviderCompletionStream, callProxiedCompletionStream, type IProviderCompletionStreamParams } from './streamingClient';
8
9
  export { aiProviderId, aiServerToolType, aiWebSearchToolConfig, aiServerToolConfig, aiToolEnablement, aiAssistProviderConfig, aiAssistSettings, modelSpecKey, modelSpec } from './converters';
9
10
  export { resolveEffectiveTools } from './toolFormats';
10
11
  //# sourceMappingURL=index.d.ts.map
@@ -4,20 +4,29 @@
4
4
  * @packageDocumentation
5
5
  */
6
6
  Object.defineProperty(exports, "__esModule", { value: true });
7
- exports.resolveEffectiveTools = exports.modelSpec = exports.modelSpecKey = exports.aiAssistSettings = exports.aiAssistProviderConfig = exports.aiToolEnablement = exports.aiServerToolConfig = exports.aiWebSearchToolConfig = exports.aiServerToolType = exports.aiProviderId = exports.callProxiedCompletion = exports.callProviderCompletion = exports.getProviderDescriptor = exports.getProviderDescriptors = exports.allProviderIds = exports.resolveModel = exports.MODEL_SPEC_BASE_KEY = exports.allModelSpecKeys = exports.DEFAULT_AI_ASSIST = exports.AiPrompt = void 0;
7
+ exports.resolveEffectiveTools = exports.modelSpec = exports.modelSpecKey = exports.aiAssistSettings = exports.aiAssistProviderConfig = exports.aiToolEnablement = exports.aiServerToolConfig = exports.aiWebSearchToolConfig = exports.aiServerToolType = exports.aiProviderId = exports.callProxiedCompletionStream = exports.callProviderCompletionStream = exports.callProxiedListModels = exports.callProviderListModels = exports.callProxiedImageGeneration = exports.callProviderImageGeneration = exports.callProxiedCompletion = exports.callProviderCompletion = exports.DEFAULT_MODEL_CAPABILITY_CONFIG = exports.getProviderDescriptor = exports.getProviderDescriptors = exports.allProviderIds = exports.toDataUrl = exports.resolveModel = exports.MODEL_SPEC_BASE_KEY = exports.allModelSpecKeys = exports.DEFAULT_AI_ASSIST = exports.AiPrompt = void 0;
8
8
  var model_1 = require("./model");
9
9
  Object.defineProperty(exports, "AiPrompt", { enumerable: true, get: function () { return model_1.AiPrompt; } });
10
10
  Object.defineProperty(exports, "DEFAULT_AI_ASSIST", { enumerable: true, get: function () { return model_1.DEFAULT_AI_ASSIST; } });
11
11
  Object.defineProperty(exports, "allModelSpecKeys", { enumerable: true, get: function () { return model_1.allModelSpecKeys; } });
12
12
  Object.defineProperty(exports, "MODEL_SPEC_BASE_KEY", { enumerable: true, get: function () { return model_1.MODEL_SPEC_BASE_KEY; } });
13
13
  Object.defineProperty(exports, "resolveModel", { enumerable: true, get: function () { return model_1.resolveModel; } });
14
+ Object.defineProperty(exports, "toDataUrl", { enumerable: true, get: function () { return model_1.toDataUrl; } });
14
15
  var registry_1 = require("./registry");
15
16
  Object.defineProperty(exports, "allProviderIds", { enumerable: true, get: function () { return registry_1.allProviderIds; } });
16
17
  Object.defineProperty(exports, "getProviderDescriptors", { enumerable: true, get: function () { return registry_1.getProviderDescriptors; } });
17
18
  Object.defineProperty(exports, "getProviderDescriptor", { enumerable: true, get: function () { return registry_1.getProviderDescriptor; } });
19
+ Object.defineProperty(exports, "DEFAULT_MODEL_CAPABILITY_CONFIG", { enumerable: true, get: function () { return registry_1.DEFAULT_MODEL_CAPABILITY_CONFIG; } });
18
20
  var apiClient_1 = require("./apiClient");
19
21
  Object.defineProperty(exports, "callProviderCompletion", { enumerable: true, get: function () { return apiClient_1.callProviderCompletion; } });
20
22
  Object.defineProperty(exports, "callProxiedCompletion", { enumerable: true, get: function () { return apiClient_1.callProxiedCompletion; } });
23
+ Object.defineProperty(exports, "callProviderImageGeneration", { enumerable: true, get: function () { return apiClient_1.callProviderImageGeneration; } });
24
+ Object.defineProperty(exports, "callProxiedImageGeneration", { enumerable: true, get: function () { return apiClient_1.callProxiedImageGeneration; } });
25
+ Object.defineProperty(exports, "callProviderListModels", { enumerable: true, get: function () { return apiClient_1.callProviderListModels; } });
26
+ Object.defineProperty(exports, "callProxiedListModels", { enumerable: true, get: function () { return apiClient_1.callProxiedListModels; } });
27
+ var streamingClient_1 = require("./streamingClient");
28
+ Object.defineProperty(exports, "callProviderCompletionStream", { enumerable: true, get: function () { return streamingClient_1.callProviderCompletionStream; } });
29
+ Object.defineProperty(exports, "callProxiedCompletionStream", { enumerable: true, get: function () { return streamingClient_1.callProxiedCompletionStream; } });
21
30
  var converters_1 = require("./converters");
22
31
  Object.defineProperty(exports, "aiProviderId", { enumerable: true, get: function () { return converters_1.aiProviderId; } });
23
32
  Object.defineProperty(exports, "aiServerToolType", { enumerable: true, get: function () { return converters_1.aiServerToolType; } });
@@ -3,6 +3,49 @@
3
3
  * @packageDocumentation
4
4
  */
5
5
  import { type Result } from '@fgv/ts-utils';
6
+ /**
7
+ * Universal image representation used for both image input (vision prompts)
8
+ * and image output (generation responses).
9
+ *
10
+ * @remarks
11
+ * The base64 string is raw — no `data:` URL prefix. Use {@link AiAssist.toDataUrl} to
12
+ * format it for browser-display contexts.
13
+ *
14
+ * @public
15
+ */
16
+ export interface IAiImageData {
17
+ /** MIME type, e.g. `'image/png'`, `'image/jpeg'`, `'image/webp'`. */
18
+ readonly mimeType: string;
19
+ /** Base64-encoded image bytes (no `data:` prefix). */
20
+ readonly base64: string;
21
+ }
22
+ /**
23
+ * Formats an {@link IAiImageData} as a `data:` URL suitable for browser display.
24
+ * @param image - The image to format
25
+ * @returns A `data:<mime>;base64,<data>` URL string
26
+ * @public
27
+ */
28
+ export declare function toDataUrl(image: IAiImageData): string;
29
+ /**
30
+ * Image attachment for a vision (image-input) prompt.
31
+ *
32
+ * @remarks
33
+ * Extends {@link IAiImageData} with an OpenAI-specific `detail` hint that is
34
+ * silently ignored by Anthropic, Gemini, and other providers.
35
+ *
36
+ * @public
37
+ */
38
+ export interface IAiImageAttachment extends IAiImageData {
39
+ /**
40
+ * OpenAI vision detail hint:
41
+ * - `'low'`: faster, cheaper, lower fidelity
42
+ * - `'high'`: slower, more expensive, higher fidelity
43
+ * - `'auto'` (default): provider chooses
44
+ *
45
+ * Ignored by providers other than OpenAI.
46
+ */
47
+ readonly detail?: 'low' | 'high' | 'auto';
48
+ }
6
49
  /**
7
50
  * A structured AI prompt with system/user split for direct API calls,
8
51
  * and a lazily-constructed combined version for copy/paste workflows.
@@ -13,8 +56,18 @@ export declare class AiPrompt {
13
56
  readonly system: string;
14
57
  /** User request: the specific entity generation request. */
15
58
  readonly user: string;
16
- constructor(user: string, system: string);
17
- /** Combined single-string version (user + system joined) for copy/paste. */
59
+ /**
60
+ * Optional image attachments. When present, vision-capable providers will
61
+ * include them in the user message; non-vision providers will reject the
62
+ * call up front (see {@link AiAssist.IAiProviderDescriptor.acceptsImageInput}).
63
+ */
64
+ readonly attachments: ReadonlyArray<IAiImageAttachment>;
65
+ constructor(user: string, system: string, attachments?: ReadonlyArray<IAiImageAttachment>);
66
+ /**
67
+ * Combined single-string version (user + system joined) for copy/paste.
68
+ * When attachments are present, includes a sentinel noting they aren't
69
+ * part of the copied text.
70
+ */
18
71
  get combined(): string;
19
72
  }
20
73
  /**
@@ -139,6 +192,11 @@ export type AiProviderId = 'copy-paste' | 'xai-grok' | 'openai' | 'anthropic' |
139
192
  * @public
140
193
  */
141
194
  export type AiApiFormat = 'openai' | 'anthropic' | 'gemini';
195
+ /**
196
+ * API format categories for image-generation provider routing.
197
+ * @public
198
+ */
199
+ export type AiImageApiFormat = 'openai-images' | 'gemini-imagen' | 'xai-images';
142
200
  /**
143
201
  * Result of an AI provider completion call.
144
202
  * @public
@@ -149,6 +207,67 @@ export interface IAiCompletionResponse {
149
207
  /** Whether the response was truncated due to token limits */
150
208
  readonly truncated: boolean;
151
209
  }
210
+ /**
211
+ * A text-content delta arriving during a streaming completion.
212
+ * @public
213
+ */
214
+ export interface IAiStreamTextDelta {
215
+ readonly type: 'text-delta';
216
+ /** The newly arrived text fragment. */
217
+ readonly delta: string;
218
+ }
219
+ /**
220
+ * A server-side tool progress event arriving during a streaming completion.
221
+ * Surfaced for providers that emit explicit tool-progress markers (OpenAI
222
+ * Responses API, Anthropic). Gemini's grounding doesn't emit these.
223
+ * @public
224
+ */
225
+ export interface IAiStreamToolEvent {
226
+ readonly type: 'tool-event';
227
+ /** Which server-side tool this event describes. */
228
+ readonly toolType: AiServerToolType;
229
+ /** Tool lifecycle phase. */
230
+ readonly phase: 'started' | 'completed';
231
+ /**
232
+ * Optional provider-specific detail. For web_search this is typically the
233
+ * search query when available; format varies by provider.
234
+ */
235
+ readonly detail?: string;
236
+ }
237
+ /**
238
+ * Terminal success event for a streaming completion. Carries the aggregated
239
+ * full text and truncation status for callers that want both the progressive
240
+ * UI and the complete result.
241
+ * @public
242
+ */
243
+ export interface IAiStreamDone {
244
+ readonly type: 'done';
245
+ /** Whether the response was truncated due to token limits. */
246
+ readonly truncated: boolean;
247
+ /** The full concatenated text from all `text-delta` events. */
248
+ readonly fullText: string;
249
+ }
250
+ /**
251
+ * Terminal failure event for a streaming completion. After this event no
252
+ * further events are emitted.
253
+ *
254
+ * @remarks
255
+ * Connection-time failures (auth, network, pre-flight CORS rejection) are
256
+ * surfaced via the outer `Result.fail` returned by
257
+ * `callProviderCompletionStream` rather than as an `error` event, so callers
258
+ * can distinguish "didn't start" from "started but errored mid-stream."
259
+ *
260
+ * @public
261
+ */
262
+ export interface IAiStreamError {
263
+ readonly type: 'error';
264
+ readonly message: string;
265
+ }
266
+ /**
267
+ * Discriminated union of events emitted by a streaming completion.
268
+ * @public
269
+ */
270
+ export type IAiStreamEvent = IAiStreamTextDelta | IAiStreamToolEvent | IAiStreamDone | IAiStreamError;
152
271
  /**
153
272
  * Describes a single AI provider — single source of truth for all metadata.
154
273
  * @public
@@ -172,6 +291,156 @@ export interface IAiProviderDescriptor {
172
291
  readonly supportedTools: ReadonlyArray<AiServerToolType>;
173
292
  /** Whether this provider's API enforces CORS restrictions that prevent direct browser calls. */
174
293
  readonly corsRestricted: boolean;
294
+ /**
295
+ * Whether this provider's streaming completion endpoint requires a proxy
296
+ * for direct browser calls. Some providers gate streaming separately from
297
+ * non-streaming (rare), so this is tracked independently from
298
+ * {@link IAiProviderDescriptor.corsRestricted}.
299
+ *
300
+ * @remarks
301
+ * When `true`, `callProviderCompletionStream` rejects up front unless the
302
+ * call is being routed through a proxy.
303
+ */
304
+ readonly streamingCorsRestricted: boolean;
305
+ /**
306
+ * Whether this provider's chat completions API accepts image input
307
+ * (i.e. supports vision prompts). When false, calls with
308
+ * `prompt.attachments` are rejected up front.
309
+ */
310
+ readonly acceptsImageInput: boolean;
311
+ /**
312
+ * Which image-generation API format this provider uses, or undefined if it
313
+ * does not support image generation.
314
+ *
315
+ * @remarks
316
+ * Image-model selection reuses the existing `image` {@link ModelSpecKey}.
317
+ * Providers with `imageApiFormat` set should declare a model in
318
+ * `defaultModel.image`, e.g. `{ base: 'gpt-4o', image: 'dall-e-3' }`.
319
+ */
320
+ readonly imageApiFormat?: AiImageApiFormat;
321
+ }
322
+ /**
323
+ * Options for image generation requests.
324
+ *
325
+ * @remarks
326
+ * Provider compatibility is documented per field. The library does not
327
+ * pre-validate against per-model constraints (e.g. `dall-e-3` rejects
328
+ * `count > 1`); provider 400 errors surface through the failure path.
329
+ *
330
+ * @public
331
+ */
332
+ export interface IAiImageGenerationOptions {
333
+ /**
334
+ * Image dimensions. Used by openai-format providers (mapped to the
335
+ * provider's `size` field). Ignored by Imagen — use
336
+ * {@link IAiImageGenerationOptions.imagen} `aspectRatio` instead.
337
+ *
338
+ * Note: each model has its own accepted set; `dall-e-3` only accepts the
339
+ * values listed here.
340
+ */
341
+ readonly size?: '1024x1024' | '1024x1792' | '1792x1024' | 'auto';
342
+ /**
343
+ * Number of images to generate. Default 1.
344
+ *
345
+ * Note: `dall-e-3` rejects `count > 1`.
346
+ */
347
+ readonly count?: number;
348
+ /** Generation quality hint where supported. */
349
+ readonly quality?: 'standard' | 'high';
350
+ /** Random seed for reproducibility, where supported. */
351
+ readonly seed?: number;
352
+ /**
353
+ * Imagen-specific options. Ignored by other providers.
354
+ */
355
+ readonly imagen?: {
356
+ readonly negativePrompt?: string;
357
+ readonly aspectRatio?: '1:1' | '3:4' | '4:3' | '9:16' | '16:9';
358
+ };
359
+ }
360
+ /**
361
+ * Parameters for an image-generation request.
362
+ * @public
363
+ */
364
+ export interface IAiImageGenerationParams {
365
+ /** The text prompt describing the desired image. */
366
+ readonly prompt: string;
367
+ /** Optional generation options. */
368
+ readonly options?: IAiImageGenerationOptions;
369
+ }
370
+ /**
371
+ * A single generated image.
372
+ * @public
373
+ */
374
+ export interface IAiGeneratedImage extends IAiImageData {
375
+ /**
376
+ * The prompt as rewritten by the provider, if any. OpenAI's image models
377
+ * commonly rewrite prompts; other providers do not.
378
+ */
379
+ readonly revisedPrompt?: string;
380
+ }
381
+ /**
382
+ * Capability vocabulary used to describe what a model can do. Used as both
383
+ * a filter and as a tag in {@link AiAssist.IAiModelInfo.capabilities}.
384
+ *
385
+ * @remarks
386
+ * Adding a new capability is cheap; adding the *first* one after consumers
387
+ * already exist forces churn. The initial vocabulary is intentionally broad
388
+ * even though only `image-generation` is fully exercised today.
389
+ *
390
+ * @public
391
+ */
392
+ export type AiModelCapability = 'chat' | 'tools' | 'vision' | 'image-generation';
393
+ /**
394
+ * Information about a single model returned by a provider's list endpoint,
395
+ * with capabilities already resolved (native + config rules).
396
+ * @public
397
+ */
398
+ export interface IAiModelInfo {
399
+ /** Provider-native model identifier. */
400
+ readonly id: string;
401
+ /** Resolved capability set — union of native declarations and config rules. */
402
+ readonly capabilities: ReadonlySet<AiModelCapability>;
403
+ /** Friendly name for display, when known. */
404
+ readonly displayName?: string;
405
+ }
406
+ /**
407
+ * One rule in an {@link IAiModelCapabilityConfig}. Multiple rules can match
408
+ * a single model — their capability arrays are unioned.
409
+ * @public
410
+ */
411
+ export interface IAiModelCapabilityRule {
412
+ /** RegExp tested against the model id (using `.test`). */
413
+ readonly idPattern: RegExp;
414
+ /** Capabilities this rule attributes to matching models. */
415
+ readonly capabilities: ReadonlyArray<AiModelCapability>;
416
+ /**
417
+ * Friendly display-name override for matching models. The function form
418
+ * lets one rule format many ids (e.g. `(id) => id.toUpperCase()`).
419
+ * If multiple matching rules supply `displayName`, the first match wins.
420
+ */
421
+ readonly displayName?: string | ((id: string) => string);
422
+ }
423
+ /**
424
+ * Configuration that maps model id patterns to capabilities. Used to
425
+ * augment (or, where the provider supplies no capability info, fully
426
+ * derive) the capability set for each listed model.
427
+ * @public
428
+ */
429
+ export interface IAiModelCapabilityConfig {
430
+ /** Per-provider rules. Tried before {@link AiAssist.IAiModelCapabilityConfig.global}. */
431
+ readonly perProvider?: {
432
+ readonly [P in AiProviderId]?: ReadonlyArray<IAiModelCapabilityRule>;
433
+ };
434
+ /** Cross-provider fallback rules. */
435
+ readonly global?: ReadonlyArray<IAiModelCapabilityRule>;
436
+ }
437
+ /**
438
+ * Result of an image-generation call.
439
+ * @public
440
+ */
441
+ export interface IAiImageGenerationResponse {
442
+ /** The generated images, in provider-returned order. */
443
+ readonly images: ReadonlyArray<IAiGeneratedImage>;
175
444
  }
176
445
  /**
177
446
  * Configuration for a single AI assist provider.
@@ -20,7 +20,17 @@
20
20
  // SOFTWARE.
21
21
  Object.defineProperty(exports, "__esModule", { value: true });
22
22
  exports.DEFAULT_AI_ASSIST = exports.MODEL_SPEC_BASE_KEY = exports.allModelSpecKeys = exports.AiPrompt = void 0;
23
+ exports.toDataUrl = toDataUrl;
23
24
  exports.resolveModel = resolveModel;
25
+ /**
26
+ * Formats an {@link IAiImageData} as a `data:` URL suitable for browser display.
27
+ * @param image - The image to format
28
+ * @returns A `data:<mime>;base64,<data>` URL string
29
+ * @public
30
+ */
31
+ function toDataUrl(image) {
32
+ return `data:${image.mimeType};base64,${image.base64}`;
33
+ }
24
34
  // ============================================================================
25
35
  // AiPrompt
26
36
  // ============================================================================
@@ -30,13 +40,21 @@ exports.resolveModel = resolveModel;
30
40
  * @public
31
41
  */
32
42
  class AiPrompt {
33
- constructor(user, system) {
43
+ constructor(user, system, attachments) {
34
44
  this.system = system;
35
45
  this.user = user;
46
+ this.attachments = attachments !== null && attachments !== void 0 ? attachments : [];
36
47
  }
37
- /** Combined single-string version (user + system joined) for copy/paste. */
48
+ /**
49
+ * Combined single-string version (user + system joined) for copy/paste.
50
+ * When attachments are present, includes a sentinel noting they aren't
51
+ * part of the copied text.
52
+ */
38
53
  get combined() {
39
- return `${this.user}\n\n${this.system}`;
54
+ const sentinel = this.attachments.length > 0
55
+ ? `\n\n[${this.attachments.length} image attachment(s) — not included in copied text]`
56
+ : '';
57
+ return `${this.user}${sentinel}\n\n${this.system}`;
40
58
  }
41
59
  }
42
60
  exports.AiPrompt = AiPrompt;