@yourgpt/llm-sdk 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +145 -450
  11. package/dist/index.d.ts +145 -450
  12. package/dist/index.js +1837 -307
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1827 -305
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
@@ -0,0 +1,338 @@
1
+ import { z } from 'zod';
2
+
3
+ /**
4
+ * Core Types for @yourgpt/llm-sdk
5
+ *
6
+ * Modern, instance-based types following Vercel AI SDK patterns.
7
+ */
8
+
9
+ /**
10
+ * A language model instance that can generate text.
11
+ * This is what provider functions like `openai('gpt-4o')` return.
12
+ */
13
+ interface LanguageModel {
14
+ /** Provider identifier (e.g., 'openai', 'anthropic') */
15
+ readonly provider: string;
16
+ /** Model identifier (e.g., 'gpt-4o', 'claude-3-5-sonnet') */
17
+ readonly modelId: string;
18
+ /** Model capabilities for feature detection */
19
+ readonly capabilities: ModelCapabilities;
20
+ /**
21
+ * Generate a complete response (non-streaming)
22
+ * Used internally by generateText()
23
+ */
24
+ doGenerate(params: DoGenerateParams): Promise<DoGenerateResult>;
25
+ /**
26
+ * Stream a response
27
+ * Used internally by streamText()
28
+ */
29
+ doStream(params: DoGenerateParams): AsyncGenerator<StreamChunk>;
30
+ }
31
+ /**
32
+ * Model capabilities for UI feature flags
33
+ */
34
+ interface ModelCapabilities {
35
+ /** Supports image inputs */
36
+ supportsVision: boolean;
37
+ /** Supports tool/function calling */
38
+ supportsTools: boolean;
39
+ /** Supports streaming responses */
40
+ supportsStreaming: boolean;
41
+ /** Supports JSON mode / structured output */
42
+ supportsJsonMode: boolean;
43
+ /** Supports extended thinking (Claude) */
44
+ supportsThinking: boolean;
45
+ /** Supports PDF document inputs */
46
+ supportsPDF: boolean;
47
+ /** Maximum context tokens */
48
+ maxTokens: number;
49
+ /** Supported image MIME types */
50
+ supportedImageTypes: string[];
51
+ }
52
+ /**
53
+ * Default capabilities for unknown models
54
+ */
55
+ declare const DEFAULT_CAPABILITIES: ModelCapabilities;
56
+ /**
57
+ * Core message types for LLM conversations
58
+ */
59
+ type CoreMessage = SystemMessage | UserMessage | AssistantMessage | ToolMessage;
60
+ interface SystemMessage {
61
+ role: "system";
62
+ content: string;
63
+ }
64
+ interface UserMessage {
65
+ role: "user";
66
+ content: string | UserContentPart[];
67
+ }
68
+ interface AssistantMessage {
69
+ role: "assistant";
70
+ content: string | null;
71
+ toolCalls?: ToolCall[];
72
+ }
73
+ interface ToolMessage {
74
+ role: "tool";
75
+ toolCallId: string;
76
+ content: string;
77
+ }
78
+ /**
79
+ * Content parts for multimodal user messages
80
+ */
81
+ type UserContentPart = TextPart | ImagePart | FilePart;
82
+ interface TextPart {
83
+ type: "text";
84
+ text: string;
85
+ }
86
+ interface ImagePart {
87
+ type: "image";
88
+ /** Base64 data or URL */
89
+ image: string | Uint8Array;
90
+ /** MIME type (e.g., 'image/png') */
91
+ mimeType?: string;
92
+ }
93
+ interface FilePart {
94
+ type: "file";
95
+ /** Base64 data or URL */
96
+ data: string;
97
+ /** MIME type (e.g., 'application/pdf') */
98
+ mimeType: string;
99
+ }
100
+ /**
101
+ * Tool definition with Zod schema support
102
+ */
103
+ interface Tool<TParams = unknown, TResult = unknown> {
104
+ /** Tool description for the LLM */
105
+ description: string;
106
+ /** Zod schema for parameters */
107
+ parameters: z.ZodType<TParams>;
108
+ /** Execute function */
109
+ execute: (params: TParams, context: ToolContext) => Promise<TResult>;
110
+ }
111
+ /**
112
+ * Context passed to tool execute function
113
+ */
114
+ interface ToolContext {
115
+ /** Abort signal for cancellation */
116
+ abortSignal?: AbortSignal;
117
+ /** Unique tool call ID */
118
+ toolCallId: string;
119
+ /** Optional: messages in conversation */
120
+ messages?: CoreMessage[];
121
+ }
122
+ /**
123
+ * Tool call from LLM response
124
+ */
125
+ interface ToolCall {
126
+ /** Unique ID for this tool call */
127
+ id: string;
128
+ /** Tool name */
129
+ name: string;
130
+ /** Parsed arguments */
131
+ args: Record<string, unknown>;
132
+ }
133
+ /**
134
+ * Tool execution result
135
+ */
136
+ interface ToolResult {
137
+ /** Tool call ID this result corresponds to */
138
+ toolCallId: string;
139
+ /** Result data (will be JSON stringified for LLM) */
140
+ result: unknown;
141
+ }
142
+ /**
143
+ * Parameters for model.doGenerate() and model.doStream()
144
+ */
145
+ interface DoGenerateParams {
146
+ /** Messages to send to LLM */
147
+ messages: CoreMessage[];
148
+ /** Tools available to the LLM (already formatted for provider) */
149
+ tools?: unknown[];
150
+ /** Temperature (0-2) */
151
+ temperature?: number;
152
+ /** Maximum tokens to generate */
153
+ maxTokens?: number;
154
+ /** Abort signal */
155
+ signal?: AbortSignal;
156
+ }
157
+ /**
158
+ * Result from model.doGenerate()
159
+ */
160
+ interface DoGenerateResult {
161
+ /** Generated text content */
162
+ text: string;
163
+ /** Tool calls requested by the LLM */
164
+ toolCalls: ToolCall[];
165
+ /** Why generation stopped */
166
+ finishReason: FinishReason;
167
+ /** Token usage */
168
+ usage: TokenUsage;
169
+ /** Raw provider response (for debugging) */
170
+ rawResponse?: unknown;
171
+ }
172
+ /**
173
+ * Finish reason for generation
174
+ */
175
+ type FinishReason = "stop" | "length" | "tool-calls" | "content-filter" | "error" | "unknown";
176
+ /**
177
+ * Token usage statistics
178
+ */
179
+ interface TokenUsage {
180
+ promptTokens: number;
181
+ completionTokens: number;
182
+ totalTokens: number;
183
+ }
184
+ /**
185
+ * Stream chunk from model.doStream()
186
+ */
187
+ type StreamChunk = TextDeltaChunk | ToolCallChunk | ToolResultChunk | FinishChunk | ErrorChunk;
188
+ interface TextDeltaChunk {
189
+ type: "text-delta";
190
+ text: string;
191
+ }
192
+ interface ToolCallChunk {
193
+ type: "tool-call";
194
+ toolCall: ToolCall;
195
+ }
196
+ interface ToolResultChunk {
197
+ type: "tool-result";
198
+ toolCallId: string;
199
+ result: unknown;
200
+ }
201
+ interface FinishChunk {
202
+ type: "finish";
203
+ finishReason: FinishReason;
204
+ usage?: TokenUsage;
205
+ }
206
+ interface ErrorChunk {
207
+ type: "error";
208
+ error: Error;
209
+ }
210
+ /**
211
+ * Parameters for generateText()
212
+ */
213
+ interface GenerateTextParams {
214
+ /** Language model to use */
215
+ model: LanguageModel;
216
+ /** Simple prompt (converted to user message) */
217
+ prompt?: string;
218
+ /** System prompt */
219
+ system?: string;
220
+ /** Full message history */
221
+ messages?: CoreMessage[];
222
+ /** Tools available to the LLM */
223
+ tools?: Record<string, Tool>;
224
+ /** Maximum agentic steps (tool call loops) */
225
+ maxSteps?: number;
226
+ /** Temperature (0-2) */
227
+ temperature?: number;
228
+ /** Maximum tokens to generate */
229
+ maxTokens?: number;
230
+ /** Abort signal */
231
+ signal?: AbortSignal;
232
+ }
233
+ /**
234
+ * Result from generateText()
235
+ */
236
+ interface GenerateTextResult {
237
+ /** Final text output */
238
+ text: string;
239
+ /** Token usage */
240
+ usage: TokenUsage;
241
+ /** Why generation stopped */
242
+ finishReason: FinishReason;
243
+ /** All steps taken (for agentic workflows) */
244
+ steps: GenerateStep[];
245
+ /** All tool calls made across all steps */
246
+ toolCalls: ToolCall[];
247
+ /** All tool results across all steps */
248
+ toolResults: ToolResult[];
249
+ /** Final message list including tool interactions */
250
+ response: {
251
+ messages: CoreMessage[];
252
+ };
253
+ }
254
+ /**
255
+ * A single step in the generation process
256
+ */
257
+ interface GenerateStep {
258
+ /** Text generated in this step */
259
+ text: string;
260
+ /** Tool calls made in this step */
261
+ toolCalls: ToolCall[];
262
+ /** Tool results from this step */
263
+ toolResults: ToolResult[];
264
+ /** Finish reason for this step */
265
+ finishReason: FinishReason;
266
+ /** Token usage for this step */
267
+ usage: TokenUsage;
268
+ }
269
+ /**
270
+ * Parameters for streamText() - same as generateText
271
+ */
272
+ type StreamTextParams = GenerateTextParams;
273
+ /**
274
+ * Result from streamText()
275
+ */
276
+ interface StreamTextResult {
277
+ /** Async iterable of text chunks only */
278
+ textStream: AsyncIterable<string>;
279
+ /** Async iterable of all stream parts */
280
+ fullStream: AsyncIterable<StreamPart>;
281
+ /** Promise that resolves to full text when complete */
282
+ readonly text: Promise<string>;
283
+ /** Promise that resolves to usage when complete */
284
+ readonly usage: Promise<TokenUsage>;
285
+ /** Promise that resolves to finish reason when complete */
286
+ readonly finishReason: Promise<FinishReason>;
287
+ /** Convert to plain text streaming Response */
288
+ toTextStreamResponse(options?: ResponseOptions): Response;
289
+ /** Convert to data stream Response (SSE with tool calls) */
290
+ toDataStreamResponse(options?: ResponseOptions): Response;
291
+ }
292
+ /**
293
+ * Stream part for fullStream
294
+ */
295
+ type StreamPart = {
296
+ type: "text-delta";
297
+ text: string;
298
+ } | {
299
+ type: "tool-call-start";
300
+ toolCallId: string;
301
+ toolName: string;
302
+ } | {
303
+ type: "tool-call-delta";
304
+ toolCallId: string;
305
+ argsText: string;
306
+ } | {
307
+ type: "tool-call-complete";
308
+ toolCall: ToolCall;
309
+ } | {
310
+ type: "tool-result";
311
+ toolCallId: string;
312
+ result: unknown;
313
+ } | {
314
+ type: "step-start";
315
+ step: number;
316
+ } | {
317
+ type: "step-finish";
318
+ step: number;
319
+ finishReason: FinishReason;
320
+ } | {
321
+ type: "finish";
322
+ finishReason: FinishReason;
323
+ usage: TokenUsage;
324
+ } | {
325
+ type: "error";
326
+ error: Error;
327
+ };
328
+ /**
329
+ * Options for Response helpers
330
+ */
331
+ interface ResponseOptions {
332
+ /** Additional headers */
333
+ headers?: Record<string, string>;
334
+ /** Response status (default: 200) */
335
+ status?: number;
336
+ }
337
+
338
+ export { type AssistantMessage as A, type CoreMessage as C, type DoGenerateParams as D, type ErrorChunk as E, type FilePart as F, type GenerateTextParams as G, type ImagePart as I, type LanguageModel as L, type ModelCapabilities as M, type ResponseOptions as R, type StreamTextParams as S, type ToolContext as T, type UserMessage as U, type GenerateTextResult as a, type StreamTextResult as b, type Tool as c, type DoGenerateResult as d, type SystemMessage as e, type ToolMessage as f, type UserContentPart as g, type TextPart as h, type ToolCall as i, type ToolResult as j, type GenerateStep as k, type StreamPart as l, type StreamChunk as m, type TextDeltaChunk as n, type ToolCallChunk as o, type ToolResultChunk as p, type FinishChunk as q, type TokenUsage as r, type FinishReason as s, DEFAULT_CAPABILITIES as t };
@@ -0,0 +1,338 @@
1
+ import { z } from 'zod';
2
+
3
+ /**
4
+ * Core Types for @yourgpt/llm-sdk
5
+ *
6
+ * Modern, instance-based types following Vercel AI SDK patterns.
7
+ */
8
+
9
+ /**
10
+ * A language model instance that can generate text.
11
+ * This is what provider functions like `openai('gpt-4o')` return.
12
+ */
13
+ interface LanguageModel {
14
+ /** Provider identifier (e.g., 'openai', 'anthropic') */
15
+ readonly provider: string;
16
+ /** Model identifier (e.g., 'gpt-4o', 'claude-3-5-sonnet') */
17
+ readonly modelId: string;
18
+ /** Model capabilities for feature detection */
19
+ readonly capabilities: ModelCapabilities;
20
+ /**
21
+ * Generate a complete response (non-streaming)
22
+ * Used internally by generateText()
23
+ */
24
+ doGenerate(params: DoGenerateParams): Promise<DoGenerateResult>;
25
+ /**
26
+ * Stream a response
27
+ * Used internally by streamText()
28
+ */
29
+ doStream(params: DoGenerateParams): AsyncGenerator<StreamChunk>;
30
+ }
31
+ /**
32
+ * Model capabilities for UI feature flags
33
+ */
34
+ interface ModelCapabilities {
35
+ /** Supports image inputs */
36
+ supportsVision: boolean;
37
+ /** Supports tool/function calling */
38
+ supportsTools: boolean;
39
+ /** Supports streaming responses */
40
+ supportsStreaming: boolean;
41
+ /** Supports JSON mode / structured output */
42
+ supportsJsonMode: boolean;
43
+ /** Supports extended thinking (Claude) */
44
+ supportsThinking: boolean;
45
+ /** Supports PDF document inputs */
46
+ supportsPDF: boolean;
47
+ /** Maximum context tokens */
48
+ maxTokens: number;
49
+ /** Supported image MIME types */
50
+ supportedImageTypes: string[];
51
+ }
52
+ /**
53
+ * Default capabilities for unknown models
54
+ */
55
+ declare const DEFAULT_CAPABILITIES: ModelCapabilities;
56
+ /**
57
+ * Core message types for LLM conversations
58
+ */
59
+ type CoreMessage = SystemMessage | UserMessage | AssistantMessage | ToolMessage;
60
+ interface SystemMessage {
61
+ role: "system";
62
+ content: string;
63
+ }
64
+ interface UserMessage {
65
+ role: "user";
66
+ content: string | UserContentPart[];
67
+ }
68
+ interface AssistantMessage {
69
+ role: "assistant";
70
+ content: string | null;
71
+ toolCalls?: ToolCall[];
72
+ }
73
+ interface ToolMessage {
74
+ role: "tool";
75
+ toolCallId: string;
76
+ content: string;
77
+ }
78
+ /**
79
+ * Content parts for multimodal user messages
80
+ */
81
+ type UserContentPart = TextPart | ImagePart | FilePart;
82
+ interface TextPart {
83
+ type: "text";
84
+ text: string;
85
+ }
86
+ interface ImagePart {
87
+ type: "image";
88
+ /** Base64 data or URL */
89
+ image: string | Uint8Array;
90
+ /** MIME type (e.g., 'image/png') */
91
+ mimeType?: string;
92
+ }
93
+ interface FilePart {
94
+ type: "file";
95
+ /** Base64 data or URL */
96
+ data: string;
97
+ /** MIME type (e.g., 'application/pdf') */
98
+ mimeType: string;
99
+ }
100
+ /**
101
+ * Tool definition with Zod schema support
102
+ */
103
+ interface Tool<TParams = unknown, TResult = unknown> {
104
+ /** Tool description for the LLM */
105
+ description: string;
106
+ /** Zod schema for parameters */
107
+ parameters: z.ZodType<TParams>;
108
+ /** Execute function */
109
+ execute: (params: TParams, context: ToolContext) => Promise<TResult>;
110
+ }
111
+ /**
112
+ * Context passed to tool execute function
113
+ */
114
+ interface ToolContext {
115
+ /** Abort signal for cancellation */
116
+ abortSignal?: AbortSignal;
117
+ /** Unique tool call ID */
118
+ toolCallId: string;
119
+ /** Optional: messages in conversation */
120
+ messages?: CoreMessage[];
121
+ }
122
+ /**
123
+ * Tool call from LLM response
124
+ */
125
+ interface ToolCall {
126
+ /** Unique ID for this tool call */
127
+ id: string;
128
+ /** Tool name */
129
+ name: string;
130
+ /** Parsed arguments */
131
+ args: Record<string, unknown>;
132
+ }
133
+ /**
134
+ * Tool execution result
135
+ */
136
+ interface ToolResult {
137
+ /** Tool call ID this result corresponds to */
138
+ toolCallId: string;
139
+ /** Result data (will be JSON stringified for LLM) */
140
+ result: unknown;
141
+ }
142
+ /**
143
+ * Parameters for model.doGenerate() and model.doStream()
144
+ */
145
+ interface DoGenerateParams {
146
+ /** Messages to send to LLM */
147
+ messages: CoreMessage[];
148
+ /** Tools available to the LLM (already formatted for provider) */
149
+ tools?: unknown[];
150
+ /** Temperature (0-2) */
151
+ temperature?: number;
152
+ /** Maximum tokens to generate */
153
+ maxTokens?: number;
154
+ /** Abort signal */
155
+ signal?: AbortSignal;
156
+ }
157
+ /**
158
+ * Result from model.doGenerate()
159
+ */
160
+ interface DoGenerateResult {
161
+ /** Generated text content */
162
+ text: string;
163
+ /** Tool calls requested by the LLM */
164
+ toolCalls: ToolCall[];
165
+ /** Why generation stopped */
166
+ finishReason: FinishReason;
167
+ /** Token usage */
168
+ usage: TokenUsage;
169
+ /** Raw provider response (for debugging) */
170
+ rawResponse?: unknown;
171
+ }
172
+ /**
173
+ * Finish reason for generation
174
+ */
175
+ type FinishReason = "stop" | "length" | "tool-calls" | "content-filter" | "error" | "unknown";
176
+ /**
177
+ * Token usage statistics
178
+ */
179
+ interface TokenUsage {
180
+ promptTokens: number;
181
+ completionTokens: number;
182
+ totalTokens: number;
183
+ }
184
+ /**
185
+ * Stream chunk from model.doStream()
186
+ */
187
+ type StreamChunk = TextDeltaChunk | ToolCallChunk | ToolResultChunk | FinishChunk | ErrorChunk;
188
+ interface TextDeltaChunk {
189
+ type: "text-delta";
190
+ text: string;
191
+ }
192
+ interface ToolCallChunk {
193
+ type: "tool-call";
194
+ toolCall: ToolCall;
195
+ }
196
+ interface ToolResultChunk {
197
+ type: "tool-result";
198
+ toolCallId: string;
199
+ result: unknown;
200
+ }
201
+ interface FinishChunk {
202
+ type: "finish";
203
+ finishReason: FinishReason;
204
+ usage?: TokenUsage;
205
+ }
206
+ interface ErrorChunk {
207
+ type: "error";
208
+ error: Error;
209
+ }
210
+ /**
211
+ * Parameters for generateText()
212
+ */
213
+ interface GenerateTextParams {
214
+ /** Language model to use */
215
+ model: LanguageModel;
216
+ /** Simple prompt (converted to user message) */
217
+ prompt?: string;
218
+ /** System prompt */
219
+ system?: string;
220
+ /** Full message history */
221
+ messages?: CoreMessage[];
222
+ /** Tools available to the LLM */
223
+ tools?: Record<string, Tool>;
224
+ /** Maximum agentic steps (tool call loops) */
225
+ maxSteps?: number;
226
+ /** Temperature (0-2) */
227
+ temperature?: number;
228
+ /** Maximum tokens to generate */
229
+ maxTokens?: number;
230
+ /** Abort signal */
231
+ signal?: AbortSignal;
232
+ }
233
+ /**
234
+ * Result from generateText()
235
+ */
236
+ interface GenerateTextResult {
237
+ /** Final text output */
238
+ text: string;
239
+ /** Token usage */
240
+ usage: TokenUsage;
241
+ /** Why generation stopped */
242
+ finishReason: FinishReason;
243
+ /** All steps taken (for agentic workflows) */
244
+ steps: GenerateStep[];
245
+ /** All tool calls made across all steps */
246
+ toolCalls: ToolCall[];
247
+ /** All tool results across all steps */
248
+ toolResults: ToolResult[];
249
+ /** Final message list including tool interactions */
250
+ response: {
251
+ messages: CoreMessage[];
252
+ };
253
+ }
254
+ /**
255
+ * A single step in the generation process
256
+ */
257
+ interface GenerateStep {
258
+ /** Text generated in this step */
259
+ text: string;
260
+ /** Tool calls made in this step */
261
+ toolCalls: ToolCall[];
262
+ /** Tool results from this step */
263
+ toolResults: ToolResult[];
264
+ /** Finish reason for this step */
265
+ finishReason: FinishReason;
266
+ /** Token usage for this step */
267
+ usage: TokenUsage;
268
+ }
269
+ /**
270
+ * Parameters for streamText() - same as generateText
271
+ */
272
+ type StreamTextParams = GenerateTextParams;
273
+ /**
274
+ * Result from streamText()
275
+ */
276
+ interface StreamTextResult {
277
+ /** Async iterable of text chunks only */
278
+ textStream: AsyncIterable<string>;
279
+ /** Async iterable of all stream parts */
280
+ fullStream: AsyncIterable<StreamPart>;
281
+ /** Promise that resolves to full text when complete */
282
+ readonly text: Promise<string>;
283
+ /** Promise that resolves to usage when complete */
284
+ readonly usage: Promise<TokenUsage>;
285
+ /** Promise that resolves to finish reason when complete */
286
+ readonly finishReason: Promise<FinishReason>;
287
+ /** Convert to plain text streaming Response */
288
+ toTextStreamResponse(options?: ResponseOptions): Response;
289
+ /** Convert to data stream Response (SSE with tool calls) */
290
+ toDataStreamResponse(options?: ResponseOptions): Response;
291
+ }
292
+ /**
293
+ * Stream part for fullStream
294
+ */
295
+ type StreamPart = {
296
+ type: "text-delta";
297
+ text: string;
298
+ } | {
299
+ type: "tool-call-start";
300
+ toolCallId: string;
301
+ toolName: string;
302
+ } | {
303
+ type: "tool-call-delta";
304
+ toolCallId: string;
305
+ argsText: string;
306
+ } | {
307
+ type: "tool-call-complete";
308
+ toolCall: ToolCall;
309
+ } | {
310
+ type: "tool-result";
311
+ toolCallId: string;
312
+ result: unknown;
313
+ } | {
314
+ type: "step-start";
315
+ step: number;
316
+ } | {
317
+ type: "step-finish";
318
+ step: number;
319
+ finishReason: FinishReason;
320
+ } | {
321
+ type: "finish";
322
+ finishReason: FinishReason;
323
+ usage: TokenUsage;
324
+ } | {
325
+ type: "error";
326
+ error: Error;
327
+ };
328
+ /**
329
+ * Options for Response helpers
330
+ */
331
+ interface ResponseOptions {
332
+ /** Additional headers */
333
+ headers?: Record<string, string>;
334
+ /** Response status (default: 200) */
335
+ status?: number;
336
+ }
337
+
338
+ export { type AssistantMessage as A, type CoreMessage as C, type DoGenerateParams as D, type ErrorChunk as E, type FilePart as F, type GenerateTextParams as G, type ImagePart as I, type LanguageModel as L, type ModelCapabilities as M, type ResponseOptions as R, type StreamTextParams as S, type ToolContext as T, type UserMessage as U, type GenerateTextResult as a, type StreamTextResult as b, type Tool as c, type DoGenerateResult as d, type SystemMessage as e, type ToolMessage as f, type UserContentPart as g, type TextPart as h, type ToolCall as i, type ToolResult as j, type GenerateStep as k, type StreamPart as l, type StreamChunk as m, type TextDeltaChunk as n, type ToolCallChunk as o, type ToolResultChunk as p, type FinishChunk as q, type TokenUsage as r, type FinishReason as s, DEFAULT_CAPABILITIES as t };