@eminent337/aery-ai 0.1.119

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (210) hide show
  1. package/README.md +1383 -0
  2. package/dist/api-registry.d.ts +20 -0
  3. package/dist/api-registry.d.ts.map +1 -0
  4. package/dist/api-registry.js +44 -0
  5. package/dist/api-registry.js.map +1 -0
  6. package/dist/bedrock-provider.d.ts +5 -0
  7. package/dist/bedrock-provider.d.ts.map +1 -0
  8. package/dist/bedrock-provider.js +6 -0
  9. package/dist/bedrock-provider.js.map +1 -0
  10. package/dist/cli.d.ts +3 -0
  11. package/dist/cli.d.ts.map +1 -0
  12. package/dist/cli.js +116 -0
  13. package/dist/cli.js.map +1 -0
  14. package/dist/env-api-keys.d.ts +18 -0
  15. package/dist/env-api-keys.d.ts.map +1 -0
  16. package/dist/env-api-keys.js +170 -0
  17. package/dist/env-api-keys.js.map +1 -0
  18. package/dist/image-models.d.ts +10 -0
  19. package/dist/image-models.d.ts.map +1 -0
  20. package/dist/image-models.generated.d.ts +260 -0
  21. package/dist/image-models.generated.d.ts.map +1 -0
  22. package/dist/image-models.generated.js +262 -0
  23. package/dist/image-models.generated.js.map +1 -0
  24. package/dist/image-models.js +23 -0
  25. package/dist/image-models.js.map +1 -0
  26. package/dist/images-api-registry.d.ts +14 -0
  27. package/dist/images-api-registry.d.ts.map +1 -0
  28. package/dist/images-api-registry.js +22 -0
  29. package/dist/images-api-registry.js.map +1 -0
  30. package/dist/images.d.ts +4 -0
  31. package/dist/images.d.ts.map +1 -0
  32. package/dist/images.js +14 -0
  33. package/dist/images.js.map +1 -0
  34. package/dist/index.d.ts +32 -0
  35. package/dist/index.d.ts.map +1 -0
  36. package/dist/index.js +20 -0
  37. package/dist/index.js.map +1 -0
  38. package/dist/models.d.ts +18 -0
  39. package/dist/models.d.ts.map +1 -0
  40. package/dist/models.generated.d.ts +18237 -0
  41. package/dist/models.generated.d.ts.map +1 -0
  42. package/dist/models.generated.js +17251 -0
  43. package/dist/models.generated.js.map +1 -0
  44. package/dist/models.js +71 -0
  45. package/dist/models.js.map +1 -0
  46. package/dist/oauth.d.ts +2 -0
  47. package/dist/oauth.d.ts.map +1 -0
  48. package/dist/oauth.js +2 -0
  49. package/dist/oauth.js.map +1 -0
  50. package/dist/providers/aery-error-formatting.d.ts +13 -0
  51. package/dist/providers/aery-error-formatting.d.ts.map +1 -0
  52. package/dist/providers/aery-error-formatting.js +112 -0
  53. package/dist/providers/aery-error-formatting.js.map +1 -0
  54. package/dist/providers/amazon-bedrock.d.ts +38 -0
  55. package/dist/providers/amazon-bedrock.d.ts.map +1 -0
  56. package/dist/providers/amazon-bedrock.js +750 -0
  57. package/dist/providers/amazon-bedrock.js.map +1 -0
  58. package/dist/providers/anthropic.d.ts +54 -0
  59. package/dist/providers/anthropic.d.ts.map +1 -0
  60. package/dist/providers/anthropic.js +960 -0
  61. package/dist/providers/anthropic.js.map +1 -0
  62. package/dist/providers/azure-openai-responses.d.ts +15 -0
  63. package/dist/providers/azure-openai-responses.d.ts.map +1 -0
  64. package/dist/providers/azure-openai-responses.js +208 -0
  65. package/dist/providers/azure-openai-responses.js.map +1 -0
  66. package/dist/providers/cloudflare.d.ts +13 -0
  67. package/dist/providers/cloudflare.d.ts.map +1 -0
  68. package/dist/providers/cloudflare.js +26 -0
  69. package/dist/providers/cloudflare.js.map +1 -0
  70. package/dist/providers/faux.d.ts +56 -0
  71. package/dist/providers/faux.d.ts.map +1 -0
  72. package/dist/providers/faux.js +368 -0
  73. package/dist/providers/faux.js.map +1 -0
  74. package/dist/providers/github-copilot-headers.d.ts +8 -0
  75. package/dist/providers/github-copilot-headers.d.ts.map +1 -0
  76. package/dist/providers/github-copilot-headers.js +29 -0
  77. package/dist/providers/github-copilot-headers.js.map +1 -0
  78. package/dist/providers/google-shared.d.ts +70 -0
  79. package/dist/providers/google-shared.d.ts.map +1 -0
  80. package/dist/providers/google-shared.js +329 -0
  81. package/dist/providers/google-shared.js.map +1 -0
  82. package/dist/providers/google-vertex.d.ts +15 -0
  83. package/dist/providers/google-vertex.d.ts.map +1 -0
  84. package/dist/providers/google-vertex.js +442 -0
  85. package/dist/providers/google-vertex.js.map +1 -0
  86. package/dist/providers/google.d.ts +13 -0
  87. package/dist/providers/google.d.ts.map +1 -0
  88. package/dist/providers/google.js +400 -0
  89. package/dist/providers/google.js.map +1 -0
  90. package/dist/providers/images/openrouter.d.ts +3 -0
  91. package/dist/providers/images/openrouter.d.ts.map +1 -0
  92. package/dist/providers/images/openrouter.js +129 -0
  93. package/dist/providers/images/openrouter.js.map +1 -0
  94. package/dist/providers/images/register-builtins.d.ts +4 -0
  95. package/dist/providers/images/register-builtins.d.ts.map +1 -0
  96. package/dist/providers/images/register-builtins.js +34 -0
  97. package/dist/providers/images/register-builtins.js.map +1 -0
  98. package/dist/providers/mistral.d.ts +25 -0
  99. package/dist/providers/mistral.d.ts.map +1 -0
  100. package/dist/providers/mistral.js +535 -0
  101. package/dist/providers/mistral.js.map +1 -0
  102. package/dist/providers/openai-codex-responses.d.ts +30 -0
  103. package/dist/providers/openai-codex-responses.d.ts.map +1 -0
  104. package/dist/providers/openai-codex-responses.js +1080 -0
  105. package/dist/providers/openai-codex-responses.js.map +1 -0
  106. package/dist/providers/openai-completions.d.ts +19 -0
  107. package/dist/providers/openai-completions.d.ts.map +1 -0
  108. package/dist/providers/openai-completions.js +936 -0
  109. package/dist/providers/openai-completions.js.map +1 -0
  110. package/dist/providers/openai-responses-shared.d.ts +18 -0
  111. package/dist/providers/openai-responses-shared.d.ts.map +1 -0
  112. package/dist/providers/openai-responses-shared.js +492 -0
  113. package/dist/providers/openai-responses-shared.js.map +1 -0
  114. package/dist/providers/openai-responses.d.ts +13 -0
  115. package/dist/providers/openai-responses.d.ts.map +1 -0
  116. package/dist/providers/openai-responses.js +220 -0
  117. package/dist/providers/openai-responses.js.map +1 -0
  118. package/dist/providers/register-builtins.d.ts +35 -0
  119. package/dist/providers/register-builtins.d.ts.map +1 -0
  120. package/dist/providers/register-builtins.js +243 -0
  121. package/dist/providers/register-builtins.js.map +1 -0
  122. package/dist/providers/simple-options.d.ts +8 -0
  123. package/dist/providers/simple-options.d.ts.map +1 -0
  124. package/dist/providers/simple-options.js +39 -0
  125. package/dist/providers/simple-options.js.map +1 -0
  126. package/dist/providers/transform-messages.d.ts +8 -0
  127. package/dist/providers/transform-messages.d.ts.map +1 -0
  128. package/dist/providers/transform-messages.js +184 -0
  129. package/dist/providers/transform-messages.js.map +1 -0
  130. package/dist/session-resources.d.ts +4 -0
  131. package/dist/session-resources.d.ts.map +1 -0
  132. package/dist/session-resources.js +22 -0
  133. package/dist/session-resources.js.map +1 -0
  134. package/dist/stream.d.ts +8 -0
  135. package/dist/stream.d.ts.map +1 -0
  136. package/dist/stream.js +27 -0
  137. package/dist/stream.js.map +1 -0
  138. package/dist/types.d.ts +488 -0
  139. package/dist/types.d.ts.map +1 -0
  140. package/dist/types.js +2 -0
  141. package/dist/types.js.map +1 -0
  142. package/dist/utils/diagnostics.d.ts +19 -0
  143. package/dist/utils/diagnostics.d.ts.map +1 -0
  144. package/dist/utils/diagnostics.js +25 -0
  145. package/dist/utils/diagnostics.js.map +1 -0
  146. package/dist/utils/event-stream.d.ts +21 -0
  147. package/dist/utils/event-stream.d.ts.map +1 -0
  148. package/dist/utils/event-stream.js +81 -0
  149. package/dist/utils/event-stream.js.map +1 -0
  150. package/dist/utils/hash.d.ts +3 -0
  151. package/dist/utils/hash.d.ts.map +1 -0
  152. package/dist/utils/hash.js +14 -0
  153. package/dist/utils/hash.js.map +1 -0
  154. package/dist/utils/headers.d.ts +2 -0
  155. package/dist/utils/headers.d.ts.map +1 -0
  156. package/dist/utils/headers.js +8 -0
  157. package/dist/utils/headers.js.map +1 -0
  158. package/dist/utils/json-parse.d.ts +16 -0
  159. package/dist/utils/json-parse.d.ts.map +1 -0
  160. package/dist/utils/json-parse.js +113 -0
  161. package/dist/utils/json-parse.js.map +1 -0
  162. package/dist/utils/node-http-proxy.d.ts +10 -0
  163. package/dist/utils/node-http-proxy.d.ts.map +1 -0
  164. package/dist/utils/node-http-proxy.js +34 -0
  165. package/dist/utils/node-http-proxy.js.map +1 -0
  166. package/dist/utils/oauth/anthropic.d.ts +25 -0
  167. package/dist/utils/oauth/anthropic.d.ts.map +1 -0
  168. package/dist/utils/oauth/anthropic.js +335 -0
  169. package/dist/utils/oauth/anthropic.js.map +1 -0
  170. package/dist/utils/oauth/github-copilot.d.ts +30 -0
  171. package/dist/utils/oauth/github-copilot.d.ts.map +1 -0
  172. package/dist/utils/oauth/github-copilot.js +292 -0
  173. package/dist/utils/oauth/github-copilot.js.map +1 -0
  174. package/dist/utils/oauth/index.d.ts +57 -0
  175. package/dist/utils/oauth/index.d.ts.map +1 -0
  176. package/dist/utils/oauth/index.js +121 -0
  177. package/dist/utils/oauth/index.js.map +1 -0
  178. package/dist/utils/oauth/oauth-page.d.ts +3 -0
  179. package/dist/utils/oauth/oauth-page.d.ts.map +1 -0
  180. package/dist/utils/oauth/oauth-page.js +105 -0
  181. package/dist/utils/oauth/oauth-page.js.map +1 -0
  182. package/dist/utils/oauth/openai-codex.d.ts +34 -0
  183. package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
  184. package/dist/utils/oauth/openai-codex.js +385 -0
  185. package/dist/utils/oauth/openai-codex.js.map +1 -0
  186. package/dist/utils/oauth/pkce.d.ts +13 -0
  187. package/dist/utils/oauth/pkce.d.ts.map +1 -0
  188. package/dist/utils/oauth/pkce.js +31 -0
  189. package/dist/utils/oauth/pkce.js.map +1 -0
  190. package/dist/utils/oauth/types.d.ts +57 -0
  191. package/dist/utils/oauth/types.d.ts.map +1 -0
  192. package/dist/utils/oauth/types.js +2 -0
  193. package/dist/utils/oauth/types.js.map +1 -0
  194. package/dist/utils/overflow.d.ts +56 -0
  195. package/dist/utils/overflow.d.ts.map +1 -0
  196. package/dist/utils/overflow.js +149 -0
  197. package/dist/utils/overflow.js.map +1 -0
  198. package/dist/utils/sanitize-unicode.d.ts +22 -0
  199. package/dist/utils/sanitize-unicode.d.ts.map +1 -0
  200. package/dist/utils/sanitize-unicode.js +26 -0
  201. package/dist/utils/sanitize-unicode.js.map +1 -0
  202. package/dist/utils/typebox-helpers.d.ts +17 -0
  203. package/dist/utils/typebox-helpers.d.ts.map +1 -0
  204. package/dist/utils/typebox-helpers.js +21 -0
  205. package/dist/utils/typebox-helpers.js.map +1 -0
  206. package/dist/utils/validation.d.ts +18 -0
  207. package/dist/utils/validation.d.ts.map +1 -0
  208. package/dist/utils/validation.js +281 -0
  209. package/dist/utils/validation.js.map +1 -0
  210. package/package.json +108 -0
package/README.md ADDED
@@ -0,0 +1,1383 @@
1
+ # @eminent337/aery-ai
2
+
3
+ Unified LLM API with automatic model discovery, provider configuration, token and cost tracking, and simple context persistence and hand-off to other models mid-session.
4
+
5
+ **Note**: This library only includes models that support tool calling (function calling), as this is essential for agentic workflows.
6
+
7
+ ## Table of Contents
8
+
9
+ - [Supported Providers](#supported-providers)
10
+ - [Installation](#installation)
11
+ - [Quick Start](#quick-start)
12
+ - [Tools](#tools)
13
+ - [Defining Tools](#defining-tools)
14
+ - [Handling Tool Calls](#handling-tool-calls)
15
+ - [Streaming Tool Calls with Partial JSON](#streaming-tool-calls-with-partial-json)
16
+ - [Validating Tool Arguments](#validating-tool-arguments)
17
+ - [Complete Event Reference](#complete-event-reference)
18
+ - [Image Input](#image-input)
19
+ - [Image Generation](#image-generation)
20
+ - [Basic Image Generation](#basic-image-generation)
21
+ - [Notes and Limitations](#notes-and-limitations)
22
+ - [Thinking/Reasoning](#thinkingreasoning)
23
+ - [Unified Interface](#unified-interface-streamsimplecompletesimple)
24
+ - [Provider-Specific Options](#provider-specific-options-streamcomplete)
25
+ - [Streaming Thinking Content](#streaming-thinking-content)
26
+ - [Stop Reasons](#stop-reasons)
27
+ - [Error Handling](#error-handling)
28
+ - [Aborting Requests](#aborting-requests)
29
+ - [Continuing After Abort](#continuing-after-abort)
30
+ - [APIs, Models, and Providers](#apis-models-and-providers)
31
+ - [Providers and Models](#providers-and-models)
32
+ - [Querying Providers and Models](#querying-providers-and-models)
33
+ - [Custom Models](#custom-models)
34
+ - [OpenAI Compatibility Settings](#openai-compatibility-settings)
35
+ - [Type Safety](#type-safety)
36
+ - [Cross-Provider Handoffs](#cross-provider-handoffs)
37
+ - [Context Serialization](#context-serialization)
38
+ - [Browser Usage](#browser-usage)
39
+ - [Browser Compatibility Notes](#browser-compatibility-notes)
40
+ - [Environment Variables](#environment-variables-nodejs-only)
41
+ - [Checking Environment Variables](#checking-environment-variables)
42
+ - [OAuth Providers](#oauth-providers)
43
+ - [Vertex AI](#vertex-ai)
44
+ - [CLI Login](#cli-login)
45
+ - [Programmatic OAuth](#programmatic-oauth)
46
+ - [Login Flow Example](#login-flow-example)
47
+ - [Using OAuth Tokens](#using-oauth-tokens)
48
+ - [Provider Notes](#provider-notes)
49
+ - [License](#license)
50
+
51
+ ## Supported Providers
52
+
53
+ - **OpenAI**
54
+ - **Azure OpenAI (Responses)**
55
+ - **OpenAI Codex** (ChatGPT Plus/Pro subscription, requires OAuth, see below)
56
+ - **DeepSeek**
57
+ - **Anthropic**
58
+ - **Google**
59
+ - **Vertex AI** (Gemini via Vertex AI)
60
+ - **Mistral**
61
+ - **Groq**
62
+ - **Cerebras**
63
+ - **Cloudflare AI Gateway**
64
+ - **Cloudflare Workers AI**
65
+ - **xAI**
66
+ - **OpenRouter**
67
+ - **Vercel AI Gateway**
68
+ - **MiniMax**
69
+ - **Together AI**
70
+ - **GitHub Copilot** (requires OAuth, see below)
71
+ - **Amazon Bedrock**
72
+ - **OpenCode Zen**
73
+ - **OpenCode Go**
74
+ - **Fireworks** (uses Anthropic-compatible API)
75
+ - **Kimi For Coding** (Moonshot AI, uses Anthropic-compatible API)
76
+ - **Xiaomi MiMo** (uses Anthropic-compatible API; defaults to API billing endpoint, with separate Token Plan providers for `cn`/`ams`/`sgp` regions)
77
+ - **Any OpenAI-compatible API**: Ollama, vLLM, LM Studio, etc.
78
+
79
+ ## Installation
80
+
81
+ ```bash
82
+ npm install @eminent337/aery-ai
83
+ ```
84
+
85
+ TypeBox exports are re-exported from `@eminent337/aery-ai`: `Type`, `Static`, and `TSchema`.
86
+
87
+ ## Quick Start
88
+
89
+ ```typescript
90
+ import { Type, getModel, stream, complete, Context, Tool, StringEnum } from '@eminent337/aery-ai';
91
+
92
+ // Fully typed with auto-complete support for both providers and models
93
+ const model = getModel('openai', 'gpt-4o-mini');
94
+
95
+ // Define tools with TypeBox schemas for type safety and validation
96
+ const tools: Tool[] = [{
97
+ name: 'get_time',
98
+ description: 'Get the current time',
99
+ parameters: Type.Object({
100
+ timezone: Type.Optional(Type.String({ description: 'Optional timezone (e.g., America/New_York)' }))
101
+ })
102
+ }];
103
+
104
+ // Build a conversation context (easily serializable and transferable between models)
105
+ const context: Context = {
106
+ systemPrompt: 'You are a helpful assistant.',
107
+ messages: [{ role: 'user', content: 'What time is it?' }],
108
+ tools
109
+ };
110
+
111
+ // Option 1: Streaming with all event types
112
+ const s = stream(model, context);
113
+
114
+ for await (const event of s) {
115
+ switch (event.type) {
116
+ case 'start':
117
+ console.log(`Starting with ${event.partial.model}`);
118
+ break;
119
+ case 'text_start':
120
+ console.log('\n[Text started]');
121
+ break;
122
+ case 'text_delta':
123
+ process.stdout.write(event.delta);
124
+ break;
125
+ case 'text_end':
126
+ console.log('\n[Text ended]');
127
+ break;
128
+ case 'thinking_start':
129
+ console.log('[Model is thinking...]');
130
+ break;
131
+ case 'thinking_delta':
132
+ process.stdout.write(event.delta);
133
+ break;
134
+ case 'thinking_end':
135
+ console.log('[Thinking complete]');
136
+ break;
137
+ case 'toolcall_start':
138
+ console.log(`\n[Tool call started: index ${event.contentIndex}]`);
139
+ break;
140
+ case 'toolcall_delta':
141
+ // Partial tool arguments are being streamed
142
+ const partialCall = event.partial.content[event.contentIndex];
143
+ if (partialCall.type === 'toolCall') {
144
+ console.log(`[Streaming args for ${partialCall.name}]`);
145
+ }
146
+ break;
147
+ case 'toolcall_end':
148
+ console.log(`\nTool called: ${event.toolCall.name}`);
149
+ console.log(`Arguments: ${JSON.stringify(event.toolCall.arguments)}`);
150
+ break;
151
+ case 'done':
152
+ console.log(`\nFinished: ${event.reason}`);
153
+ break;
154
+ case 'error':
155
+ console.error(`Error: ${event.error}`);
156
+ break;
157
+ }
158
+ }
159
+
160
+ // Get the final message after streaming, add it to the context
161
+ const finalMessage = await s.result();
162
+ context.messages.push(finalMessage);
163
+
164
+ // Handle tool calls if any
165
+ const toolCalls = finalMessage.content.filter(b => b.type === 'toolCall');
166
+ for (const call of toolCalls) {
167
+ // Execute the tool
168
+ const result = call.name === 'get_time'
169
+ ? new Date().toLocaleString('en-US', {
170
+ timeZone: call.arguments.timezone || 'UTC',
171
+ dateStyle: 'full',
172
+ timeStyle: 'long'
173
+ })
174
+ : 'Unknown tool';
175
+
176
+ // Add tool result to context (supports text and images)
177
+ context.messages.push({
178
+ role: 'toolResult',
179
+ toolCallId: call.id,
180
+ toolName: call.name,
181
+ content: [{ type: 'text', text: result }],
182
+ isError: false,
183
+ timestamp: Date.now()
184
+ });
185
+ }
186
+
187
+ // Continue if there were tool calls
188
+ if (toolCalls.length > 0) {
189
+ const continuation = await complete(model, context);
190
+ context.messages.push(continuation);
191
+ console.log('After tool execution:', continuation.content);
192
+ }
193
+
194
+ console.log(`Total tokens: ${finalMessage.usage.input} in, ${finalMessage.usage.output} out`);
195
+ console.log(`Cost: $${finalMessage.usage.cost.total.toFixed(4)}`);
196
+
197
+ // Option 2: Get complete response without streaming
198
+ const response = await complete(model, context);
199
+
200
+ for (const block of response.content) {
201
+ if (block.type === 'text') {
202
+ console.log(block.text);
203
+ } else if (block.type === 'toolCall') {
204
+ console.log(`Tool: ${block.name}(${JSON.stringify(block.arguments)})`);
205
+ }
206
+ }
207
+ ```
208
+
209
+ ## Tools
210
+
211
+ Tools enable LLMs to interact with external systems. This library uses TypeBox schemas for type-safe tool definitions with automatic validation using TypeBox's built-in validator and value conversion utilities. TypeBox schemas can be serialized and deserialized as plain JSON, making them ideal for distributed systems.
212
+
213
+ ### Defining Tools
214
+
215
+ ```typescript
216
+ import { Type, Tool, StringEnum } from '@eminent337/aery-ai';
217
+
218
+ // Define tool parameters with TypeBox
219
+ const weatherTool: Tool = {
220
+ name: 'get_weather',
221
+ description: 'Get current weather for a location',
222
+ parameters: Type.Object({
223
+ location: Type.String({ description: 'City name or coordinates' }),
224
+ units: StringEnum(['celsius', 'fahrenheit'], { default: 'celsius' })
225
+ })
226
+ };
227
+
228
+ // Note: For Google API compatibility, use StringEnum helper instead of Type.Enum
229
+ // Type.Enum generates anyOf/const patterns that Google doesn't support
230
+
231
+ const bookMeetingTool: Tool = {
232
+ name: 'book_meeting',
233
+ description: 'Schedule a meeting',
234
+ parameters: Type.Object({
235
+ title: Type.String({ minLength: 1 }),
236
+ startTime: Type.String({ format: 'date-time' }),
237
+ endTime: Type.String({ format: 'date-time' }),
238
+ attendees: Type.Array(Type.String({ format: 'email' }), { minItems: 1 })
239
+ })
240
+ };
241
+ ```
242
+
243
+ ### Handling Tool Calls
244
+
245
+ Tool results use content blocks and can include both text and images:
246
+
247
+ ```typescript
248
+ import { readFileSync } from 'fs';
249
+
250
+ const context: Context = {
251
+ messages: [{ role: 'user', content: 'What is the weather in London?' }],
252
+ tools: [weatherTool]
253
+ };
254
+
255
+ const response = await complete(model, context);
256
+
257
+ // Check for tool calls in the response
258
+ for (const block of response.content) {
259
+ if (block.type === 'toolCall') {
260
+ // Execute your tool with the arguments
261
+ // See "Validating Tool Arguments" section for validation
262
+ const result = await executeWeatherApi(block.arguments);
263
+
264
+ // Add tool result with text content
265
+ context.messages.push({
266
+ role: 'toolResult',
267
+ toolCallId: block.id,
268
+ toolName: block.name,
269
+ content: [{ type: 'text', text: JSON.stringify(result) }],
270
+ isError: false,
271
+ timestamp: Date.now()
272
+ });
273
+ }
274
+ }
275
+
276
+ // Tool results can also include images (for vision-capable models)
277
+ const imageBuffer = readFileSync('chart.png');
278
+ context.messages.push({
279
+ role: 'toolResult',
280
+ toolCallId: 'tool_xyz',
281
+ toolName: 'generate_chart',
282
+ content: [
283
+ { type: 'text', text: 'Generated chart showing temperature trends' },
284
+ { type: 'image', data: imageBuffer.toString('base64'), mimeType: 'image/png' }
285
+ ],
286
+ isError: false,
287
+ timestamp: Date.now()
288
+ });
289
+ ```
290
+
291
+ ### Streaming Tool Calls with Partial JSON
292
+
293
+ During streaming, tool call arguments are progressively parsed as they arrive. This enables real-time UI updates before the complete arguments are available:
294
+
295
+ ```typescript
296
+ const s = stream(model, context);
297
+
298
+ for await (const event of s) {
299
+ if (event.type === 'toolcall_delta') {
300
+ const toolCall = event.partial.content[event.contentIndex];
301
+
302
+ // toolCall.arguments contains partially parsed JSON during streaming
303
+ // This allows for progressive UI updates
304
+ if (toolCall.type === 'toolCall' && toolCall.arguments) {
305
+ // BE DEFENSIVE: arguments may be incomplete
306
+ // Example: Show file path being written even before content is complete
307
+ if (toolCall.name === 'write_file' && toolCall.arguments.path) {
308
+ console.log(`Writing to: ${toolCall.arguments.path}`);
309
+
310
+ // Content might be partial or missing
311
+ if (toolCall.arguments.content) {
312
+ console.log(`Content preview: ${toolCall.arguments.content.substring(0, 100)}...`);
313
+ }
314
+ }
315
+ }
316
+ }
317
+
318
+ if (event.type === 'toolcall_end') {
319
+ // Here toolCall.arguments is complete (but not yet validated)
320
+ const toolCall = event.toolCall;
321
+ console.log(`Tool completed: ${toolCall.name}`, toolCall.arguments);
322
+ }
323
+ }
324
+ ```
325
+
326
+ **Important notes about partial tool arguments:**
327
+ - During `toolcall_delta` events, `arguments` contains the best-effort parse of partial JSON
328
+ - Fields may be missing or incomplete - always check for existence before use
329
+ - String values may be truncated mid-word
330
+ - Arrays may be incomplete
331
+ - Nested objects may be partially populated
332
+ - At minimum, `arguments` will be an empty object `{}`, never `undefined`
333
+ - The Google provider does not support function call streaming. Instead, you will receive a single `toolcall_delta` event with the full arguments.
334
+
335
+ ### Validating Tool Arguments
336
+
337
+ When using `agentLoop`, tool arguments are automatically validated against your TypeBox schemas before execution. If validation fails, the error is returned to the model as a tool result, allowing it to retry.
338
+
339
+ When implementing your own tool execution loop with `stream()` or `complete()`, use `validateToolCall` to validate arguments before passing them to your tools:
340
+
341
+ ```typescript
342
+ import { stream, validateToolCall, Tool } from '@eminent337/aery-ai';
343
+
344
+ const tools: Tool[] = [weatherTool, calculatorTool];
345
+ const s = stream(model, { messages, tools });
346
+
347
+ for await (const event of s) {
348
+ if (event.type === 'toolcall_end') {
349
+ const toolCall = event.toolCall;
350
+
351
+ try {
352
+ // Validate arguments against the tool's schema (throws on invalid args)
353
+ const validatedArgs = validateToolCall(tools, toolCall);
354
+ const result = await executeMyTool(toolCall.name, validatedArgs);
355
+ // ... add tool result to context
356
+ } catch (error) {
357
+ // Validation failed - return error as tool result so model can retry
358
+ context.messages.push({
359
+ role: 'toolResult',
360
+ toolCallId: toolCall.id,
361
+ toolName: toolCall.name,
362
+ content: [{ type: 'text', text: error.message }],
363
+ isError: true,
364
+ timestamp: Date.now()
365
+ });
366
+ }
367
+ }
368
+ }
369
+ ```
370
+
371
+ ### Complete Event Reference
372
+
373
+ All streaming events emitted during assistant message generation:
374
+
375
+ | Event Type | Description | Key Properties |
376
+ |------------|-------------|----------------|
377
+ | `start` | Stream begins | `partial`: Initial assistant message structure |
378
+ | `text_start` | Text block starts | `contentIndex`: Position in content array |
379
+ | `text_delta` | Text chunk received | `delta`: New text, `contentIndex`: Position |
380
+ | `text_end` | Text block complete | `content`: Full text, `contentIndex`: Position |
381
+ | `thinking_start` | Thinking block starts | `contentIndex`: Position in content array |
382
+ | `thinking_delta` | Thinking chunk received | `delta`: New text, `contentIndex`: Position |
383
+ | `thinking_end` | Thinking block complete | `content`: Full thinking, `contentIndex`: Position |
384
+ | `toolcall_start` | Tool call begins | `contentIndex`: Position in content array |
385
+ | `toolcall_delta` | Tool arguments streaming | `delta`: JSON chunk, `partial.content[contentIndex].arguments`: Partial parsed args |
386
+ | `toolcall_end` | Tool call complete | `toolCall`: Complete validated tool call with `id`, `name`, `arguments` |
387
+ | `done` | Stream complete | `reason`: Stop reason ("stop", "length", "toolUse"), `message`: Final assistant message |
388
+ | `error` | Error occurred | `reason`: Error type ("error" or "aborted"), `error`: AssistantMessage with partial content |
389
+
390
+ Streaming events for different content blocks are not guaranteed to be contiguous. Providers may emit deltas for text, thinking, and tool calls in the same upstream chunk, and pi may surface corresponding events interleaved, for example `text_start`, `text_delta`, `toolcall_start`, `text_delta`, `toolcall_delta`. Consumers must use `contentIndex` to associate each delta/end event with its block and must not assume that a block's `*_start`/`*_delta`/`*_end` sequence is uninterrupted by events for other blocks.
391
+
392
+ ## Image Input
393
+
394
+ Models with vision capabilities can process images. You can check if a model supports images via the `input` property. If you pass images to a non-vision model, they are silently ignored.
395
+
396
+ ```typescript
397
+ import { readFileSync } from 'fs';
398
+ import { getModel, complete } from '@eminent337/aery-ai';
399
+
400
+ const model = getModel('openai', 'gpt-4o-mini');
401
+
402
+ // Check if model supports images
403
+ if (model.input.includes('image')) {
404
+ console.log('Model supports vision');
405
+ }
406
+
407
+ const imageBuffer = readFileSync('image.png');
408
+ const base64Image = imageBuffer.toString('base64');
409
+
410
+ const response = await complete(model, {
411
+ messages: [{
412
+ role: 'user',
413
+ content: [
414
+ { type: 'text', text: 'What is in this image?' },
415
+ { type: 'image', data: base64Image, mimeType: 'image/png' }
416
+ ]
417
+ }]
418
+ });
419
+
420
+ // Access the response
421
+ for (const block of response.content) {
422
+ if (block.type === 'text') {
423
+ console.log(block.text);
424
+ }
425
+ }
426
+ ```
427
+
428
+ ## Image Generation
429
+
430
+ Image generation uses a separate API surface from text/chat generation. Use `getImageModel()` / `getImageModels()` / `getImageProviders()` to discover image-generation models, and `generateImages()` to get the final result.
431
+
432
+ Do not use `stream()` or `complete()` for image generation. Image generation is a one-shot API: `generateImages()` waits for the provider response and returns the final `AssistantImages` result.
433
+
434
+ ### Basic Image Generation
435
+
436
+ ```typescript
437
+ import { getImageModel, generateImages } from '@eminent337/aery-ai';
438
+
439
+ const model = getImageModel('openrouter', 'google/gemini-2.5-flash-image');
440
+
441
+ const result = await generateImages(model, {
442
+ input: [{ type: 'text', text: 'Generate a red circle on a plain white background.' }]
443
+ }, {
444
+ apiKey: process.env.OPENROUTER_API_KEY
445
+ });
446
+
447
+ for (const block of result.output) {
448
+ if (block.type === 'text') {
449
+ console.log(block.text);
450
+ } else if (block.type === 'image') {
451
+ console.log(block.mimeType);
452
+ console.log(block.data.substring(0, 32));
453
+ }
454
+ }
455
+ ```
456
+
457
+ Some models also support image input:
458
+
459
+ ```typescript
460
+ import { readFileSync } from 'fs';
461
+
462
+ const imageBuffer = readFileSync('input.png');
463
+ const result = await generateImages(model, {
464
+ input: [
465
+ { type: 'text', text: 'Create a variation of this image with a blue background.' },
466
+ { type: 'image', data: imageBuffer.toString('base64'), mimeType: 'image/png' }
467
+ ]
468
+ }, {
469
+ apiKey: process.env.OPENROUTER_API_KEY
470
+ });
471
+ ```
472
+
473
+ Check capabilities on the model metadata:
474
+
475
+ ```typescript
476
+ console.log(model.input); // ['text', 'image']
477
+ console.log(model.output); // ['image'] or ['image', 'text']
478
+ ```
479
+
480
+ ### Notes and Limitations
481
+
482
+ - Use `getImageModel(...)`, not `getModel(...)`.
483
+ - Use `generateImages()`, not `stream()` / `complete()`.
484
+ - Image-generation models do not participate in tool calling.
485
+ - Outputs are returned in `AssistantImages.output` and can include both base64-encoded `ImageContent` blocks and `TextContent` blocks.
486
+ - Some models return only images, others return images plus text. Check `model.output`.
487
+ - Some models accept image input, others are text-to-image only. Check `model.input`.
488
+ - Like the streaming APIs, image generation supports options such as `apiKey`, `signal`, `headers`, `onPayload`, and `onResponse`, and results may include `stopReason`, `responseId`, and `usage`.
489
+ - If you want a model to analyze images in a conversation or call tools, use the regular `stream()` / `complete()` APIs with a model that supports image input.
490
+ - At the moment, image generation is available through only one provider, OpenRouter.
491
+
492
+ ## Thinking/Reasoning
493
+
494
+ Many models support thinking/reasoning capabilities where they can show their internal thought process. You can check if a model supports reasoning via the `reasoning` property. If you pass reasoning options to a non-reasoning model, they are silently ignored.
495
+
496
+ ### Unified Interface (streamSimple/completeSimple)
497
+
498
+ ```typescript
499
+ import { getModel, streamSimple, completeSimple } from '@eminent337/aery-ai';
500
+
501
+ // Many models across providers support thinking/reasoning
502
+ const model = getModel('anthropic', 'claude-sonnet-4-20250514');
503
+ // or getModel('openai', 'gpt-5-mini');
504
+ // or getModel('google', 'gemini-2.5-flash');
505
+ // or getModel('xai', 'grok-code-fast-1');
506
+ // or getModel('groq', 'openai/gpt-oss-20b');
507
+ // or getModel('cerebras', 'gpt-oss-120b');
508
+ // or getModel('openrouter', 'z-ai/glm-4.5v');
509
+
510
+ // Check if model supports reasoning
511
+ if (model.reasoning) {
512
+ console.log('Model supports reasoning/thinking');
513
+ }
514
+
515
+ // Use the simplified reasoning option
516
+ const response = await completeSimple(model, {
517
+ messages: [{ role: 'user', content: 'Solve: 2x + 5 = 13' }]
518
+ }, {
519
+ reasoning: 'medium' // 'minimal' | 'low' | 'medium' | 'high' | 'xhigh'
520
+ });
521
+
522
+ // Access thinking and text blocks
523
+ for (const block of response.content) {
524
+ if (block.type === 'thinking') {
525
+ console.log('Thinking:', block.thinking);
526
+ } else if (block.type === 'text') {
527
+ console.log('Response:', block.text);
528
+ }
529
+ }
530
+ ```
531
+
532
+ ### Provider-Specific Options (stream/complete)
533
+
534
+ For fine-grained control, use the provider-specific options:
535
+
536
+ ```typescript
537
+ import { getModel, complete } from '@eminent337/aery-ai';
538
+
539
+ // OpenAI Reasoning (o1, o3, gpt-5)
540
+ const openaiModel = getModel('openai', 'gpt-5-mini');
541
+ await complete(openaiModel, context, {
542
+ reasoningEffort: 'medium',
543
+ reasoningSummary: 'detailed' // OpenAI Responses API only
544
+ });
545
+
546
+ // Anthropic Thinking (Claude Sonnet 4)
547
+ const anthropicModel = getModel('anthropic', 'claude-sonnet-4-20250514');
548
+ await complete(anthropicModel, context, {
549
+ thinkingEnabled: true,
550
+ thinkingBudgetTokens: 8192 // Optional token limit
551
+ });
552
+
553
+ // Google Gemini Thinking
554
+ const googleModel = getModel('google', 'gemini-2.5-flash');
555
+ await complete(googleModel, context, {
556
+ thinking: {
557
+ enabled: true,
558
+ budgetTokens: 8192 // -1 for dynamic, 0 to disable
559
+ }
560
+ });
561
+ ```
562
+
563
+ ### Streaming Thinking Content
564
+
565
+ When streaming, thinking content is delivered through specific events:
566
+
567
+ ```typescript
568
+ const s = streamSimple(model, context, { reasoning: 'high' });
569
+
570
+ for await (const event of s) {
571
+ switch (event.type) {
572
+ case 'thinking_start':
573
+ console.log('[Model started thinking]');
574
+ break;
575
+ case 'thinking_delta':
576
+ process.stdout.write(event.delta); // Stream thinking content
577
+ break;
578
+ case 'thinking_end':
579
+ console.log('\n[Thinking complete]');
580
+ break;
581
+ }
582
+ }
583
+ ```
584
+
585
+ ## Stop Reasons
586
+
587
+ Every `AssistantMessage` includes a `stopReason` field that indicates how the generation ended:
588
+
589
+ - `"stop"` - Normal completion, the model finished its response
590
+ - `"length"` - Output hit the maximum token limit
591
+ - `"toolUse"` - Model is calling tools and expects tool results
592
+ - `"error"` - An error occurred during generation
593
+ - `"aborted"` - Request was cancelled via abort signal
594
+
595
+ `AssistantMessage` may also include `responseId`, a provider-specific upstream response or message identifier when the underlying API exposes one. Do not assume it is always present across providers.
596
+
597
+ ## Error Handling
598
+
599
+ When a request ends with an error (including aborts and tool call validation errors), the streaming API emits an error event:
600
+
601
+ ```typescript
602
+ // In streaming
603
+ for await (const event of stream) {
604
+ if (event.type === 'error') {
605
+ // event.reason is either "error" or "aborted"
606
+ // event.error is the AssistantMessage with partial content
607
+ console.error(`Error (${event.reason}):`, event.error.errorMessage);
608
+ console.log('Partial content:', event.error.content);
609
+ }
610
+ }
611
+
612
+ // The final message will have the error details
613
+ const message = await stream.result();
614
+ if (message.stopReason === 'error' || message.stopReason === 'aborted') {
615
+ console.error('Request failed:', message.errorMessage);
616
+ // message.content contains any partial content received before the error
617
+ // message.usage contains partial token counts and costs
618
+ }
619
+ ```
620
+
621
+ ### Aborting Requests
622
+
623
+ The abort signal allows you to cancel in-progress requests. Aborted requests have `stopReason === 'aborted'`:
624
+
625
+ ```typescript
626
+ import { getModel, stream } from '@eminent337/aery-ai';
627
+
628
+ const model = getModel('openai', 'gpt-4o-mini');
629
+ const controller = new AbortController();
630
+
631
+ // Abort after 2 seconds
632
+ setTimeout(() => controller.abort(), 2000);
633
+
634
+ const s = stream(model, {
635
+ messages: [{ role: 'user', content: 'Write a long story' }]
636
+ }, {
637
+ signal: controller.signal
638
+ });
639
+
640
+ for await (const event of s) {
641
+ if (event.type === 'text_delta') {
642
+ process.stdout.write(event.delta);
643
+ } else if (event.type === 'error') {
644
+ // event.reason tells you if it was "error" or "aborted"
645
+ console.log(`${event.reason === 'aborted' ? 'Aborted' : 'Error'}:`, event.error.errorMessage);
646
+ }
647
+ }
648
+
649
+ // Get results (may be partial if aborted)
650
+ const response = await s.result();
651
+ if (response.stopReason === 'aborted') {
652
+ console.log('Request was aborted:', response.errorMessage);
653
+ console.log('Partial content received:', response.content);
654
+ console.log('Tokens used:', response.usage);
655
+ }
656
+ ```
657
+
658
+ ### Continuing After Abort
659
+
660
+ Aborted messages can be added to the conversation context and continued in subsequent requests:
661
+
662
+ ```typescript
663
+ const context = {
664
+ messages: [
665
+ { role: 'user', content: 'Explain quantum computing in detail' }
666
+ ]
667
+ };
668
+
669
+ // First request gets aborted after 2 seconds
670
+ const controller1 = new AbortController();
671
+ setTimeout(() => controller1.abort(), 2000);
672
+
673
+ const partial = await complete(model, context, { signal: controller1.signal });
674
+
675
+ // Add the partial response to context
676
+ context.messages.push(partial);
677
+ context.messages.push({ role: 'user', content: 'Please continue' });
678
+
679
+ // Continue the conversation
680
+ const continuation = await complete(model, context);
681
+ ```
682
+
683
+ ### Debugging Provider Payloads
684
+
685
+ Use the `onPayload` callback to inspect the request payload sent to the provider. This is useful for debugging request formatting issues or provider validation errors.
686
+
687
+ ```typescript
688
+ const response = await complete(model, context, {
689
+ onPayload: (payload) => {
690
+ console.log('Provider payload:', JSON.stringify(payload, null, 2));
691
+ }
692
+ });
693
+ ```
694
+
695
+ The callback is supported by `stream`, `complete`, `streamSimple`, and `completeSimple`.
696
+
697
+ ## APIs, Models, and Providers
698
+
699
+ The library uses a registry of API implementations. Built-in APIs include:
700
+
701
+ - **`anthropic-messages`**: Anthropic Messages API (`streamAnthropic`, `AnthropicOptions`)
702
+ - **`google-generative-ai`**: Google Generative AI API (`streamGoogle`, `GoogleOptions`)
703
+ - **`google-vertex`**: Google Vertex AI API (`streamGoogleVertex`, `GoogleVertexOptions`)
704
+ - **`mistral-conversations`**: Mistral Conversations API (`streamMistral`, `MistralOptions`)
705
+ - **`openai-completions`**: OpenAI Chat Completions API (`streamOpenAICompletions`, `OpenAICompletionsOptions`)
706
+ - **`openai-responses`**: OpenAI Responses API (`streamOpenAIResponses`, `OpenAIResponsesOptions`)
707
+ - **`openai-codex-responses`**: OpenAI Codex Responses API (`streamOpenAICodexResponses`, `OpenAICodexResponsesOptions`)
708
+ - **`azure-openai-responses`**: Azure OpenAI Responses API (`streamAzureOpenAIResponses`, `AzureOpenAIResponsesOptions`)
709
+ - **`bedrock-converse-stream`**: Amazon Bedrock Converse API (`streamBedrock`, `BedrockOptions`)
710
+
711
+ ### Faux provider for tests
712
+
713
+ `registerFauxProvider()` registers a temporary in-memory provider for tests and demos. It is opt-in and not part of the built-in provider set.
714
+
715
+ ```typescript
716
+ import {
717
+ complete,
718
+ fauxAssistantMessage,
719
+ fauxText,
720
+ fauxThinking,
721
+ fauxToolCall,
722
+ registerFauxProvider,
723
+ stream,
724
+ } from '@eminent337/aery-ai';
725
+
726
+ const registration = registerFauxProvider({
727
+ tokensPerSecond: 50 // optional
728
+ });
729
+
730
+ const model = registration.getModel();
731
+ const context = {
732
+ messages: [{ role: 'user', content: 'Summarize package.json and then call echo', timestamp: Date.now() }]
733
+ };
734
+
735
+ registration.setResponses([
736
+ fauxAssistantMessage([
737
+ fauxThinking('Need to inspect package metadata first.'),
738
+ fauxToolCall('echo', { text: 'package.json' })
739
+ ], { stopReason: 'toolUse' })
740
+ ]);
741
+
742
+ const first = await complete(model, context, {
743
+ sessionId: 'session-1',
744
+ cacheRetention: 'short'
745
+ });
746
+ context.messages.push(first);
747
+
748
+ context.messages.push({
749
+ role: 'toolResult',
750
+ toolCallId: first.content.find((block) => block.type === 'toolCall')!.id,
751
+ toolName: 'echo',
752
+ content: [{ type: 'text', text: 'package.json contents here' }],
753
+ isError: false,
754
+ timestamp: Date.now()
755
+ });
756
+
757
+ registration.setResponses([
758
+ fauxAssistantMessage([
759
+ fauxThinking('Now I can summarize the tool output.'),
760
+ fauxText('Here is the summary.')
761
+ ])
762
+ ]);
763
+
764
+ const s = stream(model, context);
765
+ for await (const event of s) {
766
+ console.log(event.type);
767
+ }
768
+
769
+ // Optional: register multiple faux models for model-switching tests
770
+ const multiModel = registerFauxProvider({
771
+ models: [
772
+ { id: 'faux-fast', reasoning: false },
773
+ { id: 'faux-thinker', reasoning: true }
774
+ ]
775
+ });
776
+ const thinker = multiModel.getModel('faux-thinker');
777
+
778
+ console.log(thinker?.reasoning);
779
+ console.log(registration.getPendingResponseCount());
780
+ console.log(registration.state.callCount);
781
+ registration.unregister();
782
+ multiModel.unregister();
783
+ ```
784
+
785
+ Notes:
786
+ - Responses are consumed from a queue in request start order.
787
+ - If the queue is empty, the faux provider returns an assistant error message with `errorMessage: "No more faux responses queued"`.
788
+ - Use `registration.setResponses([...])` to replace the remaining queue and `registration.appendResponses([...])` to add more responses.
789
+ - `registration.models` exposes all registered faux models. `registration.getModel()` returns the first one, and `registration.getModel(id)` returns a specific one.
790
+ - Use `fauxAssistantMessage(...)` for scripted assistant replies. Use `fauxText(...)`, `fauxThinking(...)`, and `fauxToolCall(...)` to build content blocks without filling in low-level fields manually.
791
+ - `registration.unregister()` removes the temporary provider from the global API registry.
792
+ - Usage is estimated at roughly 1 token per 4 characters. When `sessionId` is present and `cacheRetention` is not `"none"`, prompt cache reads and writes are simulated automatically.
793
+ - Tool call arguments stream incrementally via `toolcall_delta` chunks.
794
+ - By default, each streamed chunk is emitted on its own microtask. Set `tokensPerSecond` to pace chunk delivery in real time.
795
+ - The intended use is one deterministic scripted flow per registration. If you need independent concurrent flows, register separate faux providers.
796
+
797
+ ### Providers and Models
798
+
799
+ A **provider** offers models through a specific API. For example:
800
+ - **Anthropic** models use the `anthropic-messages` API
801
+ - **Google** models use the `google-generative-ai` API
802
+ - **OpenAI** models use the `openai-responses` API
803
+ - **Mistral** models use the `mistral-conversations` API
804
+ - **xAI, Cerebras, Groq, Together AI, etc.** models use the `openai-completions` API (OpenAI-compatible)
805
+
806
+ ### Querying Providers and Models
807
+
808
+ ```typescript
809
+ import { getProviders, getModels, getModel } from '@eminent337/aery-ai';
810
+
811
+ // Get all available providers
812
+ const providers = getProviders();
813
+ console.log(providers); // ['openai', 'anthropic', 'google', 'xai', 'groq', ...]
814
+
815
+ // Get all models from a provider (fully typed)
816
+ const anthropicModels = getModels('anthropic');
817
+ for (const model of anthropicModels) {
818
+ console.log(`${model.id}: ${model.name}`);
819
+ console.log(` API: ${model.api}`); // 'anthropic-messages'
820
+ console.log(` Context: ${model.contextWindow} tokens`);
821
+ console.log(` Vision: ${model.input.includes('image')}`);
822
+ console.log(` Reasoning: ${model.reasoning}`);
823
+ }
824
+
825
+ // Get a specific model (both provider and model ID are auto-completed in IDEs)
826
+ const model = getModel('openai', 'gpt-4o-mini');
827
+ console.log(`Using ${model.name} via ${model.api} API`);
828
+ ```
829
+
830
+ ### Custom Models
831
+
832
+ You can create custom models for local inference servers or custom endpoints:
833
+
834
+ ```typescript
835
+ import { Model, stream } from '@eminent337/aery-ai';
836
+
837
+ // Example: Ollama using OpenAI-compatible API
838
+ const ollamaModel: Model<'openai-completions'> = {
839
+ id: 'llama-3.1-8b',
840
+ name: 'Llama 3.1 8B (Ollama)',
841
+ api: 'openai-completions',
842
+ provider: 'ollama',
843
+ baseUrl: 'http://localhost:11434/v1',
844
+ reasoning: false,
845
+ input: ['text'],
846
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
847
+ contextWindow: 128000,
848
+ maxTokens: 32000
849
+ };
850
+
851
+ // Example: LiteLLM proxy with explicit compat settings
852
+ const litellmModel: Model<'openai-completions'> = {
853
+ id: 'gpt-4o',
854
+ name: 'GPT-4o (via LiteLLM)',
855
+ api: 'openai-completions',
856
+ provider: 'litellm',
857
+ baseUrl: 'http://localhost:4000/v1',
858
+ reasoning: false,
859
+ input: ['text', 'image'],
860
+ cost: { input: 2.5, output: 10, cacheRead: 0, cacheWrite: 0 },
861
+ contextWindow: 128000,
862
+ maxTokens: 16384,
863
+ compat: {
864
+ supportsStore: false, // LiteLLM doesn't support the store field
865
+ }
866
+ };
867
+
868
+ // Example: Custom endpoint with headers (bypassing Cloudflare bot detection)
869
+ const proxyModel: Model<'anthropic-messages'> = {
870
+ id: 'claude-sonnet-4',
871
+ name: 'Claude Sonnet 4 (Proxied)',
872
+ api: 'anthropic-messages',
873
+ provider: 'custom-proxy',
874
+ baseUrl: 'https://proxy.example.com/v1',
875
+ reasoning: true,
876
+ input: ['text', 'image'],
877
+ cost: { input: 3, output: 15, cacheRead: 0.3, cacheWrite: 3.75 },
878
+ contextWindow: 200000,
879
+ maxTokens: 8192,
880
+ headers: {
881
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36',
882
+ 'X-Custom-Auth': 'bearer-token-here'
883
+ }
884
+ };
885
+
886
+ // Use the custom model
887
+ const response = await stream(ollamaModel, context, {
888
+ apiKey: 'dummy' // Ollama doesn't need a real key
889
+ });
890
+ ```
891
+
892
+ Some OpenAI-compatible servers do not understand the `developer` role used for reasoning-capable models. For those providers, set `compat.supportsDeveloperRole` to `false` so the system prompt is sent as a `system` message instead. If the server also does not support `reasoning_effort`, set `compat.supportsReasoningEffort` to `false` too.
893
+
894
+ Use model-level `thinkingLevelMap` to describe model-specific thinking controls. Keys are pi thinking levels (`off`, `minimal`, `low`, `medium`, `high`, `xhigh`). Missing keys use provider defaults, string values are sent to the provider, and `null` marks a level unsupported.
895
+
896
+ This commonly applies to Ollama, vLLM, SGLang, and similar OpenAI-compatible servers. You can set `compat` at the provider level or per model.
897
+
898
+ ```typescript
899
+ const ollamaReasoningModel: Model<'openai-completions'> = {
900
+ id: 'gpt-oss:20b',
901
+ name: 'GPT-OSS 20B (Ollama)',
902
+ api: 'openai-completions',
903
+ provider: 'ollama',
904
+ baseUrl: 'http://localhost:11434/v1',
905
+ reasoning: true,
906
+ input: ['text'],
907
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
908
+ contextWindow: 131072,
909
+ maxTokens: 32000,
910
+ thinkingLevelMap: {
911
+ minimal: null,
912
+ low: null,
913
+ medium: null,
914
+ high: 'high',
915
+ xhigh: null,
916
+ },
917
+ compat: {
918
+ supportsDeveloperRole: false,
919
+ supportsReasoningEffort: false,
920
+ }
921
+ };
922
+ ```
923
+
924
+ ### OpenAI Compatibility Settings
925
+
926
+ The `openai-completions` API is implemented by many providers with minor differences. By default, the library auto-detects compatibility settings based on `baseUrl` for a small set of known OpenAI-compatible providers (Cerebras, xAI, Chutes, DeepSeek, Together AI, zAi, OpenCode, Cloudflare Workers AI, etc.). For custom proxies or unknown endpoints, you can override these settings via the `compat` field. For `openai-responses` models, the compat field only supports Responses-specific flags.
927
+
928
+ ```typescript
929
+ interface OpenAICompletionsCompat {
930
+ supportsStore?: boolean; // Whether provider supports the `store` field (default: true)
931
+ supportsDeveloperRole?: boolean; // Whether provider supports `developer` role vs `system` (default: true)
932
+ supportsReasoningEffort?: boolean; // Whether provider supports `reasoning_effort` (default: true)
933
+ supportsUsageInStreaming?: boolean; // Whether provider supports `stream_options: { include_usage: true }` (default: true)
934
+ supportsStrictMode?: boolean; // Whether provider supports `strict` in tool definitions (default: true)
935
+ sendSessionAffinityHeaders?: boolean; // Whether to send `session_id`, `x-client-request-id`, and `x-session-affinity` from `sessionId` when caching is enabled (default: false)
936
+ maxTokensField?: 'max_completion_tokens' | 'max_tokens'; // Which field name to use (default: max_completion_tokens)
937
+ requiresToolResultName?: boolean; // Whether tool results require the `name` field (default: false)
938
+ requiresAssistantAfterToolResult?: boolean; // Whether tool results must be followed by an assistant message (default: false)
939
+ requiresThinkingAsText?: boolean; // Whether thinking blocks must be converted to text (default: false)
940
+ requiresReasoningContentOnAssistantMessages?: boolean; // Whether all replayed assistant messages must include empty reasoning_content when reasoning is enabled (default: auto-detected for DeepSeek)
941
+ thinkingFormat?: 'openai' | 'openrouter' | 'deepseek' | 'together' | 'zai' | 'qwen' | 'qwen-chat-template'; // Format for reasoning param: 'openai' uses reasoning_effort, 'openrouter' uses reasoning: { effort }, 'deepseek' uses thinking: { type } plus reasoning_effort, 'together' uses reasoning: { enabled } plus reasoning_effort when supported, 'zai' uses enable_thinking, 'qwen' uses enable_thinking, 'qwen-chat-template' uses chat_template_kwargs.enable_thinking (default: openai)
942
+ cacheControlFormat?: 'anthropic'; // Anthropic-style cache_control on system prompt, last tool, and last user/assistant text content
943
+ openRouterRouting?: OpenRouterRouting; // OpenRouter routing preferences (default: {})
944
+ vercelGatewayRouting?: VercelGatewayRouting; // Vercel AI Gateway routing preferences (default: {})
945
+ }
946
+
947
+ interface OpenAIResponsesCompat {
948
+ // Reserved for future use
949
+ }
950
+ ```
951
+
952
+ If `compat` is not set, the library falls back to URL-based detection. If `compat` is partially set, unspecified fields use the detected defaults. This is useful for:
953
+
954
+ - **LiteLLM proxies**: May not support `store` field
955
+ - **Custom inference servers**: May use non-standard field names
956
+ - **Self-hosted endpoints**: May have different feature support
957
+
958
+ ### Type Safety
959
+
960
+ Models are typed by their API, which keeps the model metadata accurate. Provider-specific option types are enforced when you call the provider functions directly. The generic `stream` and `complete` functions accept `StreamOptions` with additional provider fields.
961
+
962
+ ```typescript
963
+ import { streamAnthropic, type AnthropicOptions } from '@eminent337/aery-ai';
964
+
965
+ // TypeScript knows this is an Anthropic model
966
+ const claude = getModel('anthropic', 'claude-sonnet-4-20250514');
967
+
968
+ const options: AnthropicOptions = {
969
+ thinkingEnabled: true,
970
+ thinkingBudgetTokens: 2048
971
+ };
972
+
973
+ await streamAnthropic(claude, context, options);
974
+ ```
975
+
976
+ ## Cross-Provider Handoffs
977
+
978
+ The library supports seamless handoffs between different LLM providers within the same conversation. This allows you to switch models mid-conversation while preserving context, including thinking blocks, tool calls, and tool results.
979
+
980
+ ### How It Works
981
+
982
+ When messages from one provider are sent to a different provider, the library automatically transforms them for compatibility:
983
+
984
+ - **User and tool result messages** are passed through unchanged
985
+ - **Assistant messages from the same provider/API** are preserved as-is
986
+ - **Assistant messages from different providers** have their thinking blocks converted to text with `<thinking>` tags
987
+ - **Tool calls and regular text** are preserved unchanged
988
+
989
+ ### Example: Multi-Provider Conversation
990
+
991
+ ```typescript
992
+ import { getModel, complete, Context } from '@eminent337/aery-ai';
993
+
994
+ // Start with Claude
995
+ const claude = getModel('anthropic', 'claude-sonnet-4-20250514');
996
+ const context: Context = {
997
+ messages: []
998
+ };
999
+
1000
+ context.messages.push({ role: 'user', content: 'What is 25 * 18?' });
1001
+ const claudeResponse = await complete(claude, context, {
1002
+ thinkingEnabled: true
1003
+ });
1004
+ context.messages.push(claudeResponse);
1005
+
1006
+ // Switch to GPT-5 - it will see Claude's thinking as <thinking> tagged text
1007
+ const gpt5 = getModel('openai', 'gpt-5-mini');
1008
+ context.messages.push({ role: 'user', content: 'Is that calculation correct?' });
1009
+ const gptResponse = await complete(gpt5, context);
1010
+ context.messages.push(gptResponse);
1011
+
1012
+ // Switch to Gemini
1013
+ const gemini = getModel('google', 'gemini-2.5-flash');
1014
+ context.messages.push({ role: 'user', content: 'What was the original question?' });
1015
+ const geminiResponse = await complete(gemini, context);
1016
+ ```
1017
+
1018
+ ### Provider Compatibility
1019
+
1020
+ All providers can handle messages from other providers, including:
1021
+ - Text content
1022
+ - Tool calls and tool results (including images in tool results)
1023
+ - Thinking/reasoning blocks (transformed to tagged text for cross-provider compatibility)
1024
+ - Aborted messages with partial content
1025
+
1026
+ This enables flexible workflows where you can:
1027
+ - Start with a fast model for initial responses
1028
+ - Switch to a more capable model for complex reasoning
1029
+ - Use specialized models for specific tasks
1030
+ - Maintain conversation continuity across provider outages
1031
+
1032
+ ## Context Serialization
1033
+
1034
+ The `Context` object can be easily serialized and deserialized using standard JSON methods, making it simple to persist conversations, implement chat history, or transfer contexts between services:
1035
+
1036
+ ```typescript
1037
+ import { Context, getModel, complete } from '@eminent337/aery-ai';
1038
+
1039
+ // Create and use a context
1040
+ const context: Context = {
1041
+ systemPrompt: 'You are a helpful assistant.',
1042
+ messages: [
1043
+ { role: 'user', content: 'What is TypeScript?' }
1044
+ ]
1045
+ };
1046
+
1047
+ const model = getModel('openai', 'gpt-4o-mini');
1048
+ const response = await complete(model, context);
1049
+ context.messages.push(response);
1050
+
1051
+ // Serialize the entire context
1052
+ const serialized = JSON.stringify(context);
1053
+ console.log('Serialized context size:', serialized.length, 'bytes');
1054
+
1055
+ // Save to database, localStorage, file, etc.
1056
+ localStorage.setItem('conversation', serialized);
1057
+
1058
+ // Later: deserialize and continue the conversation
1059
+ const restored: Context = JSON.parse(localStorage.getItem('conversation')!);
1060
+ restored.messages.push({ role: 'user', content: 'Tell me more about its type system' });
1061
+
1062
+ // Continue with any model
1063
+ const newModel = getModel('anthropic', 'claude-3-5-haiku-20241022');
1064
+ const continuation = await complete(newModel, restored);
1065
+ ```
1066
+
1067
+ > **Note**: If the context contains images (encoded as base64 as shown in the Image Input section), those will also be serialized.
1068
+
1069
+ ## Browser Usage
1070
+
1071
+ The library supports browser environments. You must pass the API key explicitly since environment variables are not available in browsers:
1072
+
1073
+ ```typescript
1074
+ import { getModel, complete } from '@eminent337/aery-ai';
1075
+
1076
+ // API key must be passed explicitly in browser
1077
+ const model = getModel('anthropic', 'claude-3-5-haiku-20241022');
1078
+
1079
+ const response = await complete(model, {
1080
+ messages: [{ role: 'user', content: 'Hello!' }]
1081
+ }, {
1082
+ apiKey: 'your-api-key'
1083
+ });
1084
+ ```
1085
+
1086
+ > **Security Warning**: Exposing API keys in frontend code is dangerous. Anyone can extract and abuse your keys. Only use this approach for internal tools or demos. For production applications, use a backend proxy that keeps your API keys secure.
1087
+
1088
+ ### Browser Compatibility Notes
1089
+
1090
+ - Amazon Bedrock (`bedrock-converse-stream`) is not supported in browser environments.
1091
+ - OAuth login flows are not supported in browser environments. Use the `@eminent337/aery-ai/oauth` entry point in Node.js.
1092
+ - In browser builds, Bedrock can still appear in model lists. Calls to Bedrock models fail at runtime.
1093
+ - Use a server-side proxy or backend service if you need Bedrock or OAuth-based auth from a web app.
1094
+
1095
+ ### Environment Variables (Node.js only)
1096
+
1097
+ In Node.js environments, you can set environment variables to avoid passing API keys:
1098
+
1099
+ | Provider | Environment Variable(s) |
1100
+ |----------|------------------------|
1101
+ | OpenAI | `OPENAI_API_KEY` |
1102
+ | Azure OpenAI | `AZURE_OPENAI_API_KEY` + `AZURE_OPENAI_BASE_URL` (e.g. `https://{resource}.openai.azure.com`) or `AZURE_OPENAI_RESOURCE_NAME`. Supports `*.openai.azure.com` and `*.cognitiveservices.azure.com`; root endpoints auto-normalize to `/openai/v1`. Optional: `AZURE_OPENAI_AAERY_VERSION` (default `v1`), `AZURE_OPENAI_DEPLOYMENT_NAME_MAP`. |
1103
+ | Anthropic | `ANTHROPIC_API_KEY` or `ANTHROPIC_OAUTH_TOKEN` |
1104
+ | DeepSeek | `DEEPSEEK_API_KEY` |
1105
+ | Google | `GEMINI_API_KEY` |
1106
+ | Vertex AI | `GOOGLE_CLOUD_API_KEY` or `GOOGLE_CLOUD_PROJECT` (or `GCLOUD_PROJECT`) + `GOOGLE_CLOUD_LOCATION` + ADC |
1107
+ | Mistral | `MISTRAL_API_KEY` |
1108
+ | Groq | `GROQ_API_KEY` |
1109
+ | Cerebras | `CEREBRAS_API_KEY` |
1110
+ | Cloudflare AI Gateway | `CLOUDFLARE_API_KEY` + `CLOUDFLARE_ACCOUNT_ID` + `CLOUDFLARE_GATEWAY_ID` |
1111
+ | Cloudflare Workers AI | `CLOUDFLARE_API_KEY` + `CLOUDFLARE_ACCOUNT_ID` |
1112
+ | xAI | `XAI_API_KEY` |
1113
+ | Fireworks | `FIREWORKS_API_KEY` |
1114
+ | Together AI | `TOGETHER_API_KEY` |
1115
+ | OpenRouter | `OPENROUTER_API_KEY` |
1116
+ | Vercel AI Gateway | `AI_GATEWAY_API_KEY` |
1117
+ | zAI | `ZAI_API_KEY` |
1118
+ | MiniMax | `MINIMAX_API_KEY` |
1119
+ | OpenCode Zen / OpenCode Go | `OPENCODE_API_KEY` |
1120
+ | Kimi For Coding | `KIMI_API_KEY` |
1121
+ | Xiaomi MiMo (API billing) | `XIAOMI_API_KEY` |
1122
+ | Xiaomi MiMo Token Plan (China) | `XIAOMI_TOKEN_PLAN_CN_API_KEY` |
1123
+ | Xiaomi MiMo Token Plan (Amsterdam) | `XIAOMI_TOKEN_PLAN_AMS_API_KEY` |
1124
+ | Xiaomi MiMo Token Plan (Singapore) | `XIAOMI_TOKEN_PLAN_SGP_API_KEY` |
1125
+ | GitHub Copilot | `COPILOT_GITHUB_TOKEN` |
1126
+
1127
+ When set, the library automatically uses these keys:
1128
+
1129
+ ```typescript
1130
+ // Uses OPENAI_API_KEY from environment
1131
+ const model = getModel('openai', 'gpt-4o-mini');
1132
+ const response = await complete(model, context);
1133
+
1134
+ // Or override with explicit key
1135
+ const response = await complete(model, context, {
1136
+ apiKey: 'sk-different-key'
1137
+ });
1138
+ ```
1139
+
1140
+ ### Checking Environment Variables
1141
+
1142
+ ```typescript
1143
+ import { getEnvApiKey } from '@eminent337/aery-ai';
1144
+
1145
+ // Check if an API key is set in environment variables
1146
+ const key = getEnvApiKey('openai'); // checks OPENAI_API_KEY
1147
+ ```
1148
+
1149
+ ## OAuth Providers
1150
+
1151
+ Several providers require OAuth authentication instead of static API keys:
1152
+
1153
+ - **Anthropic** (Claude Pro/Max subscription)
1154
+ - **OpenAI Codex** (ChatGPT Plus/Pro subscription, access to GPT-5.x Codex models)
1155
+ - **GitHub Copilot** (Copilot subscription)
1156
+
1157
+ For paid Cloud Code Assist subscriptions, set `GOOGLE_CLOUD_PROJECT` or `GOOGLE_CLOUD_PROJECT_ID` to your project ID.
1158
+
1159
+ ### Vertex AI
1160
+
1161
+ Vertex AI models support either a Google Cloud API key or Application Default Credentials (ADC):
1162
+
1163
+ - **API key**: Set `GOOGLE_CLOUD_API_KEY` or pass `apiKey` in the call options.
1164
+ - **Local development (ADC)**: Run `gcloud auth application-default login`
1165
+ - **CI/Production (ADC)**: Set `GOOGLE_APPLICATION_CREDENTIALS` to point to a service account JSON key file
1166
+
1167
+ When using ADC, also set `GOOGLE_CLOUD_PROJECT` (or `GCLOUD_PROJECT`) and `GOOGLE_CLOUD_LOCATION`. You can also pass `project`/`location` in the call options. When using `GOOGLE_CLOUD_API_KEY`, `project` and `location` are not required.
1168
+
1169
+ Example:
1170
+
1171
+ ```bash
1172
+ # Local (uses your user credentials)
1173
+ gcloud auth application-default login
1174
+ export GOOGLE_CLOUD_PROJECT="my-project"
1175
+ export GOOGLE_CLOUD_LOCATION="us-central1"
1176
+
1177
+ # CI/Production (service account key file)
1178
+ export GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json"
1179
+ ```
1180
+
1181
+ ```typescript
1182
+ import { getModel, complete } from '@eminent337/aery-ai';
1183
+
1184
+ (async () => {
1185
+ const model = getModel('google-vertex', 'gemini-2.5-flash');
1186
+ const response = await complete(model, {
1187
+ messages: [{ role: 'user', content: 'Hello from Vertex AI' }]
1188
+ }, {
1189
+ apiKey: process.env.GOOGLE_CLOUD_API_KEY,
1190
+ });
1191
+
1192
+ for (const block of response.content) {
1193
+ if (block.type === 'text') console.log(block.text);
1194
+ }
1195
+ })().catch(console.error);
1196
+ ```
1197
+
1198
+ Official docs: [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials)
1199
+
1200
+ ### CLI Login
1201
+
1202
+ The quickest way to authenticate:
1203
+
1204
+ ```bash
1205
+ npx @eminent337/aery-ai login # interactive provider selection
1206
+ npx @eminent337/aery-ai login anthropic # login to specific provider
1207
+ npx @eminent337/aery-ai list # list available providers
1208
+ ```
1209
+
1210
+ Credentials are saved to `auth.json` in the current directory.
1211
+
1212
+ ### Programmatic OAuth
1213
+
1214
+ The library provides login and token refresh functions via the `@eminent337/aery-ai/oauth` entry point. Credential storage is the caller's responsibility.
1215
+
1216
+ ```typescript
1217
+ import {
1218
+ // Login functions (return credentials, do not store)
1219
+ loginAnthropic,
1220
+ loginOpenAICodex,
1221
+ loginGitHubCopilot,
1222
+ loginGeminiCli,
1223
+
1224
+ // Token management
1225
+ refreshOAuthToken, // (provider, credentials) => new credentials
1226
+ getOAuthApiKey, // (provider, credentialsMap) => { newCredentials, apiKey } | null
1227
+
1228
+ // Types
1229
+ type OAuthProvider,
1230
+ type OAuthCredentials,
1231
+ } from '@eminent337/aery-ai/oauth';
1232
+ ```
1233
+
1234
+ ### Login Flow Example
1235
+
1236
+ ```typescript
1237
+ import { loginGitHubCopilot } from '@eminent337/aery-ai/oauth';
1238
+ import { writeFileSync } from 'fs';
1239
+
1240
+ const credentials = await loginGitHubCopilot({
1241
+ onAuth: (url, instructions) => {
1242
+ console.log(`Open: ${url}`);
1243
+ if (instructions) console.log(instructions);
1244
+ },
1245
+ onPrompt: async (prompt) => {
1246
+ return await getUserInput(prompt.message);
1247
+ },
1248
+ onProgress: (message) => console.log(message)
1249
+ });
1250
+
1251
+ // Store credentials yourself
1252
+ const auth = { 'github-copilot': { type: 'oauth', ...credentials } };
1253
+ writeFileSync('auth.json', JSON.stringify(auth, null, 2));
1254
+ ```
1255
+
1256
+ ### Using OAuth Tokens
1257
+
1258
+ Use `getOAuthApiKey()` to get an API key, automatically refreshing if expired:
1259
+
1260
+ ```typescript
1261
+ import { getModel, complete } from '@eminent337/aery-ai';
1262
+ import { getOAuthApiKey } from '@eminent337/aery-ai/oauth';
1263
+ import { readFileSync, writeFileSync } from 'fs';
1264
+
1265
+ // Load your stored credentials
1266
+ const auth = JSON.parse(readFileSync('auth.json', 'utf-8'));
1267
+
1268
+ // Get API key (refreshes if expired)
1269
+ const result = await getOAuthApiKey('github-copilot', auth);
1270
+ if (!result) throw new Error('Not logged in');
1271
+
1272
+ // Save refreshed credentials
1273
+ auth['github-copilot'] = { type: 'oauth', ...result.newCredentials };
1274
+ writeFileSync('auth.json', JSON.stringify(auth, null, 2));
1275
+
1276
+ // Use the API key
1277
+ const model = getModel('github-copilot', 'gpt-4o');
1278
+ const response = await complete(model, {
1279
+ messages: [{ role: 'user', content: 'Hello!' }]
1280
+ }, { apiKey: result.apiKey });
1281
+ ```
1282
+
1283
+ ### Provider Notes
1284
+
1285
+ **OpenAI Codex**: Requires a ChatGPT Plus or Pro subscription. Provides access to GPT-5.x Codex models with extended context windows and reasoning capabilities. The library automatically handles session-based prompt caching when `sessionId` is provided in stream options. You can set `transport` in stream options to `"sse"`, `"websocket"`, or `"auto"` for Codex Responses transport selection. When using WebSocket with a `sessionId`, connections are reused per session and expire after 5 minutes of inactivity.
1286
+
1287
+ **Azure OpenAI (Responses)**: Uses the Responses API only. Set `AZURE_OPENAI_API_KEY` and either `AZURE_OPENAI_BASE_URL` or `AZURE_OPENAI_RESOURCE_NAME`. `AZURE_OPENAI_BASE_URL` supports both `https://<resource>.openai.azure.com` and `https://<resource>.cognitiveservices.azure.com`; root endpoints are normalized to `.../openai/v1` automatically. Use `AZURE_OPENAI_AAERY_VERSION` (defaults to `v1`) to override the API version if needed. Deployment names are treated as model IDs by default, override with `azureDeploymentName` or `AZURE_OPENAI_DEPLOYMENT_NAME_MAP` using comma-separated `model-id=deployment` pairs (for example `gpt-4o-mini=my-deployment,gpt-4o=prod`). Legacy deployment-based URLs are intentionally unsupported.
1288
+
1289
+ **GitHub Copilot**: If you get "The requested model is not supported" error, enable the model manually in VS Code: open Copilot Chat, click the model selector, select the model (warning icon), and click "Enable".
1290
+
1291
+ ## Development
1292
+
1293
+ ### Adding a New Provider
1294
+
1295
+ Adding a new LLM provider requires changes across multiple files. This checklist covers all necessary steps:
1296
+
1297
+ #### 1. Core Types (`src/types.ts`)
1298
+
1299
+ - Add the API identifier to `KnownApi` (for example `"bedrock-converse-stream"`)
1300
+ - Create an options interface extending `StreamOptions` (for example `BedrockOptions`)
1301
+ - Add the provider name to `KnownProvider` (for example `"amazon-bedrock"`)
1302
+
1303
+ #### 2. Provider Implementation (`src/providers/`)
1304
+
1305
+ Create a new provider file (for example `amazon-bedrock.ts`) that exports:
1306
+
1307
+ - `stream<Provider>()` function returning `AssistantMessageEventStream`
1308
+ - `streamSimple<Provider>()` for `SimpleStreamOptions` mapping
1309
+ - Provider-specific options interface
1310
+ - Message conversion functions to transform `Context` to provider format
1311
+ - Tool conversion if the provider supports tools
1312
+ - Response parsing to emit standardized events (`text`, `tool_call`, `thinking`, `usage`, `stop`)
1313
+
1314
+ #### 3. API Registry Integration (`src/providers/register-builtins.ts`)
1315
+
1316
+ - Register the API with `registerApiProvider()`
1317
+ - Add a package subpath export in `package.json` for the provider module (`./dist/providers/<provider>.js`)
1318
+ - Add lazy loader wrappers in `src/providers/register-builtins.ts`, do not statically import provider implementation modules there
1319
+ - Add any root-level `export type` re-exports in `src/index.ts` that should remain available from `@eminent337/aery-ai`
1320
+ - Add credential detection in `env-api-keys.ts` for the new provider
1321
+ - Ensure `streamSimple` handles auth lookup via `getEnvApiKey()` or provider-specific auth
1322
+
1323
+ #### 4. Model Generation (`scripts/generate-models.ts`, `scripts/generate-image-models.ts`)
1324
+
1325
+ - Add logic to fetch and parse models from the provider's source (e.g., models.dev API)
1326
+ - Map chat/tool-capable provider model data to the standardized `Model` interface via `scripts/generate-models.ts`
1327
+ - Map image-generation provider model data to the standardized `ImagesModel` interface via `scripts/generate-image-models.ts`
1328
+ - Handle provider-specific quirks (pricing format, capability flags, model ID transformations)
1329
+
1330
+ #### 5. Tests (`test/`)
1331
+
1332
+ Create or update test files to cover the new provider:
1333
+
1334
+ - `stream.test.ts` - Basic streaming and tool use
1335
+ - `tokens.test.ts` - Token usage reporting
1336
+ - `abort.test.ts` - Request cancellation
1337
+ - `empty.test.ts` - Empty message handling
1338
+ - `context-overflow.test.ts` - Context limit errors
1339
+ - `image-limits.test.ts` - Image support (if applicable)
1340
+ - `unicode-surrogate.test.ts` - Unicode handling
1341
+ - `tool-call-without-result.test.ts` - Orphaned tool calls
1342
+ - `image-tool-result.test.ts` - Images in tool results
1343
+ - `total-tokens.test.ts` - Token counting accuracy
1344
+ - `cross-provider-handoff.test.ts` - Cross-provider context replay
1345
+
1346
+ For `cross-provider-handoff.test.ts`, add at least one provider/model pair. If the provider exposes multiple model families (for example GPT and Claude), add at least one pair per family.
1347
+
1348
+ For providers with non-standard auth (AWS, Google Vertex), create a utility like `bedrock-utils.ts` with credential detection helpers.
1349
+
1350
+ #### 6. Coding Agent Integration (`../coding-agent/`)
1351
+
1352
+ Update `src/core/model-resolver.ts`:
1353
+
1354
+ - Add a default model ID for the provider in `DEFAULT_MODELS`
1355
+
1356
+ Update `src/cli/args.ts`:
1357
+
1358
+ - Add environment variable documentation in the help text
1359
+
1360
+ Update `README.md`:
1361
+
1362
+ - Add the provider to the providers section with setup instructions
1363
+
1364
+ #### 7. Documentation
1365
+
1366
+ Update `packages/ai/README.md`:
1367
+
1368
+ - Add to the Supported Providers table
1369
+ - Document any provider-specific options or authentication requirements
1370
+ - Add environment variable to the Environment Variables section
1371
+
1372
+ #### 8. Changelog
1373
+
1374
+ Add an entry to `packages/ai/CHANGELOG.md` under `## [Unreleased]`:
1375
+
1376
+ ```markdown
1377
+ ### Added
1378
+ - Added support for [Provider Name] provider ([#PR](link) by [@author](link))
1379
+ ```
1380
+
1381
+ ## License
1382
+
1383
+ MIT