confused-ai-core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. package/FEATURES.md +169 -0
  2. package/package.json +119 -0
  3. package/src/agent.ts +187 -0
  4. package/src/agentic/index.ts +87 -0
  5. package/src/agentic/runner.ts +386 -0
  6. package/src/agentic/types.ts +91 -0
  7. package/src/artifacts/artifact.ts +417 -0
  8. package/src/artifacts/index.ts +42 -0
  9. package/src/artifacts/media.ts +304 -0
  10. package/src/cli/index.ts +122 -0
  11. package/src/core/base-agent.ts +151 -0
  12. package/src/core/context-builder.ts +106 -0
  13. package/src/core/index.ts +8 -0
  14. package/src/core/schemas.ts +17 -0
  15. package/src/core/types.ts +158 -0
  16. package/src/create-agent.ts +309 -0
  17. package/src/debug-logger.ts +188 -0
  18. package/src/dx/agent.ts +88 -0
  19. package/src/dx/define-agent.ts +183 -0
  20. package/src/dx/dev-logger.ts +57 -0
  21. package/src/dx/index.ts +11 -0
  22. package/src/errors.ts +175 -0
  23. package/src/execution/engine.ts +522 -0
  24. package/src/execution/graph-builder.ts +362 -0
  25. package/src/execution/index.ts +8 -0
  26. package/src/execution/types.ts +257 -0
  27. package/src/execution/worker-pool.ts +308 -0
  28. package/src/extensions/index.ts +123 -0
  29. package/src/guardrails/allowlist.ts +155 -0
  30. package/src/guardrails/index.ts +17 -0
  31. package/src/guardrails/types.ts +159 -0
  32. package/src/guardrails/validator.ts +265 -0
  33. package/src/index.ts +74 -0
  34. package/src/knowledge/index.ts +5 -0
  35. package/src/knowledge/types.ts +52 -0
  36. package/src/learning/in-memory-store.ts +72 -0
  37. package/src/learning/index.ts +6 -0
  38. package/src/learning/types.ts +42 -0
  39. package/src/llm/cache.ts +300 -0
  40. package/src/llm/index.ts +22 -0
  41. package/src/llm/model-resolver.ts +81 -0
  42. package/src/llm/openai-provider.ts +313 -0
  43. package/src/llm/openrouter-provider.ts +29 -0
  44. package/src/llm/types.ts +131 -0
  45. package/src/memory/in-memory-store.ts +255 -0
  46. package/src/memory/index.ts +7 -0
  47. package/src/memory/types.ts +193 -0
  48. package/src/memory/vector-store.ts +251 -0
  49. package/src/observability/console-logger.ts +123 -0
  50. package/src/observability/index.ts +12 -0
  51. package/src/observability/metrics.ts +85 -0
  52. package/src/observability/otlp-exporter.ts +417 -0
  53. package/src/observability/tracer.ts +105 -0
  54. package/src/observability/types.ts +341 -0
  55. package/src/orchestration/agent-adapter.ts +33 -0
  56. package/src/orchestration/index.ts +34 -0
  57. package/src/orchestration/load-balancer.ts +151 -0
  58. package/src/orchestration/mcp-types.ts +59 -0
  59. package/src/orchestration/message-bus.ts +192 -0
  60. package/src/orchestration/orchestrator.ts +349 -0
  61. package/src/orchestration/pipeline.ts +66 -0
  62. package/src/orchestration/supervisor.ts +107 -0
  63. package/src/orchestration/swarm.ts +1099 -0
  64. package/src/orchestration/toolkit.ts +47 -0
  65. package/src/orchestration/types.ts +339 -0
  66. package/src/planner/classical-planner.ts +383 -0
  67. package/src/planner/index.ts +8 -0
  68. package/src/planner/llm-planner.ts +353 -0
  69. package/src/planner/types.ts +227 -0
  70. package/src/planner/validator.ts +297 -0
  71. package/src/production/circuit-breaker.ts +290 -0
  72. package/src/production/graceful-shutdown.ts +251 -0
  73. package/src/production/health.ts +333 -0
  74. package/src/production/index.ts +57 -0
  75. package/src/production/latency-eval.ts +62 -0
  76. package/src/production/rate-limiter.ts +287 -0
  77. package/src/production/resumable-stream.ts +289 -0
  78. package/src/production/types.ts +81 -0
  79. package/src/sdk/index.ts +374 -0
  80. package/src/session/db-driver.ts +50 -0
  81. package/src/session/in-memory-store.ts +235 -0
  82. package/src/session/index.ts +12 -0
  83. package/src/session/sql-store.ts +315 -0
  84. package/src/session/sqlite-store.ts +61 -0
  85. package/src/session/types.ts +153 -0
  86. package/src/tools/base-tool.ts +223 -0
  87. package/src/tools/browser-tool.ts +123 -0
  88. package/src/tools/calculator-tool.ts +265 -0
  89. package/src/tools/file-tools.ts +394 -0
  90. package/src/tools/github-tool.ts +432 -0
  91. package/src/tools/hackernews-tool.ts +187 -0
  92. package/src/tools/http-tool.ts +118 -0
  93. package/src/tools/index.ts +99 -0
  94. package/src/tools/jira-tool.ts +373 -0
  95. package/src/tools/notion-tool.ts +322 -0
  96. package/src/tools/openai-tool.ts +236 -0
  97. package/src/tools/registry.ts +131 -0
  98. package/src/tools/serpapi-tool.ts +234 -0
  99. package/src/tools/shell-tool.ts +118 -0
  100. package/src/tools/slack-tool.ts +327 -0
  101. package/src/tools/telegram-tool.ts +127 -0
  102. package/src/tools/types.ts +229 -0
  103. package/src/tools/websearch-tool.ts +335 -0
  104. package/src/tools/wikipedia-tool.ts +177 -0
  105. package/src/tools/yfinance-tool.ts +33 -0
  106. package/src/voice/index.ts +17 -0
  107. package/src/voice/voice-provider.ts +228 -0
  108. package/tests/artifact.test.ts +241 -0
  109. package/tests/circuit-breaker.test.ts +171 -0
  110. package/tests/health.test.ts +192 -0
  111. package/tests/llm-cache.test.ts +186 -0
  112. package/tests/rate-limiter.test.ts +161 -0
  113. package/tsconfig.json +29 -0
  114. package/vitest.config.ts +47 -0
@@ -0,0 +1,313 @@
1
+ /**
2
+ * OpenAI LLM provider.
3
+ * Requires: npm install openai
4
+ */
5
+
6
+ import type {
7
+ LLMProvider,
8
+ Message,
9
+ GenerateResult,
10
+ GenerateOptions,
11
+ LLMToolDefinition,
12
+ ToolCall,
13
+ StreamOptions,
14
+ } from './types.js';
15
+ import { DebugLogger, createDebugLogger } from '../debug-logger.js';
16
+
17
+ // Minimal types so we don't require openai at compile time (peer dependency at runtime)
18
+ interface OpenAIClient {
19
+ chat: {
20
+ completions: {
21
+ create(params: OpenAICreateParams): Promise<OpenAIResponse | AsyncIterable<OpenAIStreamChunk>>;
22
+ };
23
+ };
24
+ }
25
+ interface OpenAICreateParams {
26
+ model: string;
27
+ messages: OpenAIMessageParam[];
28
+ temperature?: number;
29
+ max_tokens?: number;
30
+ stop?: string[];
31
+ tools?: OpenAITool[];
32
+ tool_choice?: 'auto' | 'none';
33
+ stream?: boolean;
34
+ }
35
+ // Content: string or multimodal parts (text, image_url, etc.) per OpenAI API
36
+ type OpenAIContent = string | Array<{ type: string; text?: string; image_url?: { url: string; detail?: string }; file?: { url: string }; audio?: { url: string }; video?: { url: string } }> | null;
37
+ type OpenAIMessageParam =
38
+ | { role: 'system' | 'user'; content: OpenAIContent }
39
+ | { role: 'assistant'; content: OpenAIContent; tool_calls?: { id: string; type: 'function'; function: { name: string; arguments: string } }[] }
40
+ | { role: 'tool'; content: string; tool_call_id: string };
41
+ interface OpenAITool {
42
+ type: 'function';
43
+ function: { name: string; description: string; parameters: Record<string, unknown> };
44
+ }
45
+ interface OpenAIResponse {
46
+ choices?: { message?: { content?: string | null; tool_calls?: { id: string; function?: { name?: string; arguments?: string } }[] }; finish_reason?: string }[];
47
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
48
+ }
49
+ interface OpenAIStreamChunk {
50
+ choices?: {
51
+ delta?: {
52
+ content?: string | null;
53
+ tool_calls?: { index: number; id?: string; function?: { name?: string; arguments?: string } }[]
54
+ };
55
+ finish_reason?: string | null;
56
+ }[];
57
+ usage?: { prompt_tokens?: number; completion_tokens?: number; total_tokens?: number };
58
+ }
59
+
60
+ export interface OpenAIProviderConfig {
61
+ /** OpenAI client instance, or options to create one */
62
+ client?: OpenAIClient;
63
+ /** Model name (default: gpt-4o). Use e.g. llama3.2, bern2-8b for open models. */
64
+ model?: string;
65
+ /** API key (used only if client is not provided). Optional when baseURL points to a local server (e.g. Ollama). */
66
+ apiKey?: string;
67
+ /** Base URL for the API (e.g. https://api.openai.com/v1, or http://localhost:11434/v1 for Ollama). */
68
+ baseURL?: string;
69
+ /** Enable debug logging */
70
+ debug?: boolean;
71
+ }
72
+
73
+ /**
74
+ * Map framework Message[] to OpenAI format
75
+ */
76
+ function toOpenAIMessages(messages: Message[]): OpenAIMessageParam[] {
77
+ return messages.map((m) => {
78
+ if (m.role === 'assistant' && 'toolCalls' in m && (m as { toolCalls?: ToolCall[] }).toolCalls?.length) {
79
+ const content = m.content;
80
+ const normalized = Array.isArray(content) ? content : (content ?? null);
81
+ return {
82
+ role: 'assistant',
83
+ content: normalized as OpenAIContent,
84
+ tool_calls: (m as { toolCalls: ToolCall[] }).toolCalls.map((tc) => ({
85
+ id: tc.id,
86
+ type: 'function' as const,
87
+ function: { name: tc.name, arguments: JSON.stringify(tc.arguments) },
88
+ })),
89
+ };
90
+ }
91
+ if (m.role === 'tool') {
92
+ const toolMsg = m as Message & { toolCallId?: string };
93
+ const content = typeof m.content === 'string' ? m.content : (Array.isArray(m.content) ? (m.content.find((p: { type: string; text?: string }) => p.type === 'text') as { text?: string } | undefined)?.text ?? '' : '');
94
+ return {
95
+ role: 'tool',
96
+ content,
97
+ tool_call_id: toolMsg.toolCallId ?? '',
98
+ };
99
+ }
100
+ const content = m.content;
101
+ const normalized = Array.isArray(content) ? content : (content ?? null);
102
+ return { role: m.role as 'system' | 'user' | 'assistant', content: normalized as OpenAIContent };
103
+ });
104
+ }
105
+
106
+ /**
107
+ * Map framework LLMToolDefinition to OpenAI format
108
+ */
109
+ function toOpenAITools(tools?: LLMToolDefinition[]): OpenAITool[] | undefined {
110
+ if (!tools?.length) return undefined;
111
+ return tools.map((t) => ({
112
+ type: 'function' as const,
113
+ function: {
114
+ name: t.name,
115
+ description: t.description,
116
+ parameters: t.parameters as Record<string, unknown>,
117
+ },
118
+ }));
119
+ }
120
+
121
+ /**
122
+ * OpenAI implementation of LLMProvider.
123
+ * Install the openai package: npm install openai
124
+ */
125
+ export class OpenAIProvider implements LLMProvider {
126
+ private client: OpenAIClient;
127
+ private model: string;
128
+ private logger: DebugLogger;
129
+
130
+ constructor(config: OpenAIProviderConfig = {}) {
131
+ this.logger = createDebugLogger('OpenAIProvider', config.debug ?? false);
132
+
133
+ if (config.client) {
134
+ this.client = config.client;
135
+ } else {
136
+ const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY;
137
+ const baseURL = config.baseURL ?? process.env.OPENAI_BASE_URL;
138
+ if (!baseURL && !apiKey) {
139
+ throw new Error('OpenAIProvider requires apiKey (or OPENAI_API_KEY) or baseURL (or OPENAI_BASE_URL)');
140
+ }
141
+ const { OpenAI } = require('openai') as {
142
+ OpenAI: new (opts: { apiKey?: string; baseURL?: string }) => OpenAIClient;
143
+ };
144
+ this.client = new OpenAI({
145
+ apiKey: apiKey ?? 'not-needed',
146
+ ...(baseURL && { baseURL }),
147
+ });
148
+ }
149
+ this.model = config.model ?? 'gpt-4o';
150
+ this.logger.debug('OpenAIProvider initialized', undefined, { model: this.model });
151
+ }
152
+
153
+ async generateText(messages: Message[], options?: GenerateOptions): Promise<GenerateResult> {
154
+ const startTime = Date.now();
155
+ this.logger.logStart('LLM generateText', {
156
+ messageCount: messages.length,
157
+ model: this.model,
158
+ });
159
+
160
+ const body: Record<string, unknown> = {
161
+ model: this.model,
162
+ messages: toOpenAIMessages(messages),
163
+ temperature: options?.temperature ?? 0.7,
164
+ max_tokens: options?.maxTokens,
165
+ stop: options?.stop,
166
+ };
167
+
168
+ const tools = toOpenAITools(options?.tools);
169
+ if (tools?.length) {
170
+ body.tools = tools;
171
+ body.tool_choice = options?.toolChoice === 'none' ? 'none' : 'auto';
172
+ this.logger.debug('Including tools in request', undefined, { toolCount: tools.length });
173
+ }
174
+
175
+ const response = await this.client.chat.completions.create(body as unknown as OpenAICreateParams) as OpenAIResponse;
176
+
177
+ const choice = response.choices?.[0];
178
+ if (!choice?.message) {
179
+ this.logger.warn('Empty response from LLM');
180
+ return { text: '', finishReason: choice?.finish_reason ?? 'unknown' };
181
+ }
182
+
183
+ const msg = choice.message;
184
+ let text = typeof msg.content === 'string' ? msg.content : '';
185
+
186
+ const toolCalls: ToolCall[] | undefined = msg.tool_calls?.map((tc: { id: string; function?: { name?: string; arguments?: string } }) => ({
187
+ id: tc.id,
188
+ name: tc.function?.name ?? '',
189
+ arguments: (() => {
190
+ try {
191
+ return JSON.parse(tc.function?.arguments ?? '{}') as Record<string, unknown>;
192
+ } catch {
193
+ return {};
194
+ }
195
+ })(),
196
+ }));
197
+
198
+ const duration = Date.now() - startTime;
199
+ this.logger.logComplete('LLM generateText', duration, {
200
+ textLength: text.length,
201
+ toolCallsCount: toolCalls?.length ?? 0,
202
+ tokens: response.usage?.total_tokens,
203
+ });
204
+
205
+ return {
206
+ text,
207
+ toolCalls: toolCalls?.length ? toolCalls : undefined,
208
+ finishReason: choice.finish_reason ?? undefined,
209
+ usage: response.usage
210
+ ? {
211
+ promptTokens: response.usage.prompt_tokens,
212
+ completionTokens: response.usage.completion_tokens,
213
+ totalTokens: response.usage.total_tokens,
214
+ }
215
+ : undefined,
216
+ };
217
+ }
218
+
219
+ async streamText(messages: Message[], options?: StreamOptions): Promise<GenerateResult> {
220
+ const body: Record<string, unknown> = {
221
+ model: this.model,
222
+ messages: toOpenAIMessages(messages),
223
+ temperature: options?.temperature ?? 0.7,
224
+ max_tokens: options?.maxTokens,
225
+ stop: options?.stop,
226
+ stream: true,
227
+ };
228
+
229
+ const tools = toOpenAITools(options?.tools);
230
+ if (tools?.length) {
231
+ body.tools = tools;
232
+ body.tool_choice = options?.toolChoice === 'none' ? 'none' : 'auto';
233
+ }
234
+
235
+ const stream = await this.client.chat.completions.create(body as unknown as OpenAICreateParams) as AsyncIterable<OpenAIStreamChunk>;
236
+
237
+ let fullText = '';
238
+ const toolCallsMap = new Map<number, { id: string; name: string; args: string }>();
239
+ let finishReason: string | undefined;
240
+ let usage: GenerateResult['usage'];
241
+
242
+ for await (const chunk of stream) {
243
+ const delta = chunk.choices?.[0]?.delta;
244
+ if (!delta) continue;
245
+
246
+ // Handle text content
247
+ if (delta.content) {
248
+ const textDelta = delta.content;
249
+ fullText += textDelta;
250
+ options?.onChunk?.({ type: 'text', text: textDelta });
251
+ }
252
+
253
+ // Handle tool calls
254
+ if (delta.tool_calls) {
255
+ for (const tc of delta.tool_calls) {
256
+ if (tc.id) {
257
+ toolCallsMap.set(tc.index, {
258
+ id: tc.id,
259
+ name: tc.function?.name ?? '',
260
+ args: tc.function?.arguments ?? ''
261
+ });
262
+ } else if (tc.function?.arguments) {
263
+ const existing = toolCallsMap.get(tc.index);
264
+ if (existing) {
265
+ existing.args += tc.function.arguments;
266
+ }
267
+ }
268
+
269
+ if (tc.function?.arguments) {
270
+ options?.onChunk?.({
271
+ type: 'tool_call',
272
+ id: tc.id ?? toolCallsMap.get(tc.index)?.id ?? '',
273
+ name: tc.function.name ?? toolCallsMap.get(tc.index)?.name ?? '',
274
+ argsDelta: tc.function.arguments
275
+ });
276
+ }
277
+ }
278
+ }
279
+
280
+ if (chunk.choices?.[0]?.finish_reason) {
281
+ finishReason = chunk.choices[0].finish_reason ?? undefined;
282
+ }
283
+
284
+ if (chunk.usage) {
285
+ usage = {
286
+ promptTokens: chunk.usage.prompt_tokens,
287
+ completionTokens: chunk.usage.completion_tokens,
288
+ totalTokens: chunk.usage.total_tokens,
289
+ };
290
+ }
291
+ }
292
+
293
+ // Parse tool calls
294
+ const toolCalls: ToolCall[] = Array.from(toolCallsMap.values()).map(tc => ({
295
+ id: tc.id,
296
+ name: tc.name,
297
+ arguments: (() => {
298
+ try {
299
+ return JSON.parse(tc.args) as Record<string, unknown>;
300
+ } catch {
301
+ return {};
302
+ }
303
+ })(),
304
+ }));
305
+
306
+ return {
307
+ text: fullText,
308
+ toolCalls: toolCalls.length ? toolCalls : undefined,
309
+ finishReason,
310
+ usage,
311
+ };
312
+ }
313
+ }
@@ -0,0 +1,29 @@
1
+ /**
2
+ * OpenRouter LLM provider.
3
+ * OpenRouter (https://openrouter.ai) routes to many models (OpenAI, Anthropic, Google, Meta, etc.) via one API.
4
+ * Uses the OpenAI-compatible endpoint; this is a thin wrapper around OpenAIProvider.
5
+ */
6
+
7
+ import type { LLMProvider } from './types.js';
8
+ import { OpenAIProvider } from './openai-provider.js';
9
+
10
+ const OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1';
11
+
12
+ export interface OpenRouterProviderConfig {
13
+ /** OpenRouter API key (or set OPENROUTER_API_KEY). */
14
+ apiKey: string;
15
+ /** Model id, e.g. openai/gpt-4o, anthropic/claude-3-sonnet, google/gemini-pro. Default: openai/gpt-4o */
16
+ model?: string;
17
+ }
18
+
19
+ /**
20
+ * Create an LLM provider that uses OpenRouter.
21
+ * Gives access to many best-in-class models (OpenAI, Anthropic, Google, etc.) through one API.
22
+ */
23
+ export function createOpenRouterProvider(config: OpenRouterProviderConfig): LLMProvider {
24
+ return new OpenAIProvider({
25
+ apiKey: config.apiKey,
26
+ baseURL: OPENROUTER_BASE_URL,
27
+ model: config.model ?? 'openai/gpt-4o',
28
+ });
29
+ }
@@ -0,0 +1,131 @@
1
+ /**
2
+ * LLM provider abstraction for production-grade agent frameworks.
3
+ * Implement this interface to plug in OpenAI, Anthropic, Google, or custom backends.
4
+ */
5
+
6
+ /**
7
+ * Role of a message in a conversation
8
+ */
9
+ export type MessageRole = 'system' | 'user' | 'assistant' | 'tool';
10
+
11
+ /**
12
+ * Multimodal content part: text, image_url, etc. (OpenAI-style)
13
+ */
14
+ export type ContentPart =
15
+ | { readonly type: 'text'; readonly text: string }
16
+ | { readonly type: 'image_url'; readonly image_url: { readonly url: string; readonly detail?: 'low' | 'high' | 'auto' } }
17
+ | { readonly type: 'file'; readonly file: { readonly url: string; readonly filename?: string } }
18
+ | { readonly type: 'audio'; readonly audio: { readonly url: string } }
19
+ | { readonly type: 'video'; readonly video: { readonly url: string } };
20
+
21
+ /**
22
+ * A single message in a conversation.
23
+ * content can be string (text-only) or ContentPart[] for multimodal (text, images, audio, video, files).
24
+ */
25
+ export interface Message {
26
+ readonly role: MessageRole;
27
+ readonly content: string | ContentPart[];
28
+ }
29
+
30
+ /**
31
+ * Message with optional toolCallId (for role 'tool')
32
+ */
33
+ export interface MessageWithToolId extends Message {
34
+ readonly toolCallId?: string;
35
+ }
36
+
37
+ /**
38
+ * Tool call requested by the model (name + arguments)
39
+ */
40
+ export interface ToolCall {
41
+ readonly id: string;
42
+ readonly name: string;
43
+ readonly arguments: Record<string, unknown>;
44
+ }
45
+
46
+ /**
47
+ * Tool result to send back to the model
48
+ */
49
+ export interface ToolResultMessage {
50
+ readonly toolCallId: string;
51
+ readonly content: string;
52
+ }
53
+
54
+ /**
55
+ * Assistant message that may include tool calls
56
+ */
57
+ export interface AssistantMessage extends Message {
58
+ role: 'assistant';
59
+ content: string;
60
+ toolCalls?: ToolCall[];
61
+ }
62
+
63
+ /**
64
+ * Tool definition for the LLM (name, description, parameters schema as JSON Schema)
65
+ */
66
+ export interface LLMToolDefinition {
67
+ readonly name: string;
68
+ readonly description: string;
69
+ readonly parameters: Record<string, unknown>; // JSON Schema
70
+ }
71
+
72
+ /**
73
+ * Result of a single generation (no streaming)
74
+ */
75
+ export interface GenerateResult {
76
+ readonly text: string;
77
+ readonly toolCalls?: ToolCall[];
78
+ readonly finishReason?: string;
79
+ readonly usage?: { promptTokens?: number; completionTokens?: number; totalTokens?: number };
80
+ }
81
+
82
+ /**
83
+ * Options for generateText
84
+ */
85
+ export interface GenerateOptions {
86
+ readonly temperature?: number;
87
+ readonly maxTokens?: number;
88
+ readonly tools?: LLMToolDefinition[];
89
+ readonly toolChoice?: 'auto' | 'none' | { type: 'tool'; name: string };
90
+ readonly stop?: string[];
91
+ }
92
+
93
+ /**
94
+ * Chunk from streaming (text delta or tool call delta)
95
+ */
96
+ export interface StreamChunk {
97
+ readonly type: 'text';
98
+ readonly text: string;
99
+ }
100
+
101
+ export interface StreamToolCallChunk {
102
+ readonly type: 'tool_call';
103
+ readonly id: string;
104
+ readonly name: string;
105
+ readonly argsDelta: string;
106
+ }
107
+
108
+ export type StreamDelta = StreamChunk | StreamToolCallChunk;
109
+
110
+ /**
111
+ * Options for streamText
112
+ */
113
+ export interface StreamOptions extends GenerateOptions {
114
+ readonly onChunk?: (delta: StreamDelta) => void;
115
+ }
116
+
117
+ /**
118
+ * LLM provider interface.
119
+ * Implement for OpenAI, Anthropic, Google, local models, etc.
120
+ */
121
+ export interface LLMProvider {
122
+ /**
123
+ * Generate a single response (and optional tool calls) from messages.
124
+ */
125
+ generateText(messages: Message[], options?: GenerateOptions): Promise<GenerateResult>;
126
+
127
+ /**
128
+ * Stream response tokens and optional tool calls. Call onChunk for each delta.
129
+ */
130
+ streamText?(messages: Message[], options?: StreamOptions): Promise<GenerateResult>;
131
+ }