@yourgpt/llm-sdk 2.0.1 → 2.0.2-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/dist/adapters/index.d.mts +27 -4
  2. package/dist/adapters/index.d.ts +27 -4
  3. package/dist/adapters/index.js +395 -25
  4. package/dist/adapters/index.js.map +1 -1
  5. package/dist/adapters/index.mjs +395 -25
  6. package/dist/adapters/index.mjs.map +1 -1
  7. package/dist/index.d.mts +24 -5
  8. package/dist/index.d.ts +24 -5
  9. package/dist/index.js +20 -4
  10. package/dist/index.js.map +1 -1
  11. package/dist/index.mjs +20 -4
  12. package/dist/index.mjs.map +1 -1
  13. package/dist/providers/anthropic/index.d.mts +1 -2
  14. package/dist/providers/anthropic/index.d.ts +1 -2
  15. package/dist/providers/anthropic/index.js +108 -12
  16. package/dist/providers/anthropic/index.js.map +1 -1
  17. package/dist/providers/anthropic/index.mjs +108 -12
  18. package/dist/providers/anthropic/index.mjs.map +1 -1
  19. package/dist/providers/azure/index.d.mts +1 -2
  20. package/dist/providers/azure/index.d.ts +1 -2
  21. package/dist/providers/azure/index.js.map +1 -1
  22. package/dist/providers/azure/index.mjs.map +1 -1
  23. package/dist/providers/google/index.d.mts +1 -2
  24. package/dist/providers/google/index.d.ts +1 -2
  25. package/dist/providers/google/index.js +61 -2
  26. package/dist/providers/google/index.js.map +1 -1
  27. package/dist/providers/google/index.mjs +61 -2
  28. package/dist/providers/google/index.mjs.map +1 -1
  29. package/dist/providers/ollama/index.d.mts +8 -3
  30. package/dist/providers/ollama/index.d.ts +8 -3
  31. package/dist/providers/ollama/index.js +227 -17
  32. package/dist/providers/ollama/index.js.map +1 -1
  33. package/dist/providers/ollama/index.mjs +227 -17
  34. package/dist/providers/ollama/index.mjs.map +1 -1
  35. package/dist/providers/openai/index.d.mts +1 -2
  36. package/dist/providers/openai/index.d.ts +1 -2
  37. package/dist/providers/openai/index.js +57 -3
  38. package/dist/providers/openai/index.js.map +1 -1
  39. package/dist/providers/openai/index.mjs +57 -3
  40. package/dist/providers/openai/index.mjs.map +1 -1
  41. package/dist/providers/openrouter/index.d.mts +56 -3
  42. package/dist/providers/openrouter/index.d.ts +56 -3
  43. package/dist/providers/openrouter/index.js +90 -276
  44. package/dist/providers/openrouter/index.js.map +1 -1
  45. package/dist/providers/openrouter/index.mjs +89 -277
  46. package/dist/providers/openrouter/index.mjs.map +1 -1
  47. package/dist/providers/xai/index.d.mts +1 -2
  48. package/dist/providers/xai/index.d.ts +1 -2
  49. package/dist/providers/xai/index.js.map +1 -1
  50. package/dist/providers/xai/index.mjs.map +1 -1
  51. package/dist/{base-DdxolpKP.d.mts → types-C_f95PKp.d.mts} +434 -3
  52. package/dist/{base-DdxolpKP.d.ts → types-C_f95PKp.d.ts} +434 -3
  53. package/package.json +1 -1
  54. package/dist/types-Ck25ZYma.d.mts +0 -323
  55. package/dist/types-Dsz8SpdB.d.ts +0 -323
@@ -107,6 +107,13 @@ interface Tool<TParams = unknown, TResult = unknown> {
107
107
  parameters: z.ZodType<TParams>;
108
108
  /** Execute function */
109
109
  execute: (params: TParams, context: ToolContext$1) => Promise<TResult>;
110
+ /**
111
+ * Hide this tool's execution from the chat UI.
112
+ * When true, tool calls and results won't be displayed to the user,
113
+ * but the tool will still execute normally.
114
+ * @default false
115
+ */
116
+ hidden?: boolean;
110
117
  }
111
118
  /**
112
119
  * Context passed to tool execute function
@@ -342,7 +349,7 @@ interface ResponseOptions {
342
349
  /**
343
350
  * Stream event types
344
351
  */
345
- type StreamEventType = "message:start" | "message:delta" | "message:end" | "thinking:start" | "thinking:delta" | "thinking:end" | "action:start" | "action:args" | "action:end" | "tool_calls" | "tool:result" | "loop:iteration" | "loop:complete" | "error" | "done";
352
+ type StreamEventType = "message:start" | "message:delta" | "message:end" | "thinking:start" | "thinking:delta" | "thinking:end" | "action:start" | "action:args" | "action:end" | "tool_calls" | "tool:result" | "citation" | "loop:iteration" | "loop:complete" | "error" | "done";
346
353
  /**
347
354
  * Base event interface
348
355
  */
@@ -479,6 +486,30 @@ interface LoopCompleteEvent extends BaseEvent {
479
486
  aborted?: boolean;
480
487
  maxIterationsReached?: boolean;
481
488
  }
489
+ /**
490
+ * Citation from web search (unified format for all providers)
491
+ */
492
+ interface Citation {
493
+ /** Unique citation index (1-based) */
494
+ index: number;
495
+ /** Source URL */
496
+ url: string;
497
+ /** Page title */
498
+ title: string;
499
+ /** Cited text snippet (optional) */
500
+ citedText?: string;
501
+ /** Source domain (extracted from URL) */
502
+ domain?: string;
503
+ /** Favicon URL (generated from domain) */
504
+ favicon?: string;
505
+ }
506
+ /**
507
+ * Citation event - web search returned citations
508
+ */
509
+ interface CitationEvent extends BaseEvent {
510
+ type: "citation";
511
+ citations: Citation[];
512
+ }
482
513
  /**
483
514
  * Message format for done event (API format with snake_case)
484
515
  */
@@ -516,7 +547,7 @@ interface DoneEvent extends BaseEvent {
516
547
  /**
517
548
  * Union of all stream events
518
549
  */
519
- type StreamEvent = MessageStartEvent | MessageDeltaEvent | MessageEndEvent | ThinkingStartEvent | ThinkingDeltaEvent | ThinkingEndEvent | ActionStartEvent | ActionArgsEvent | ActionEndEvent | ToolCallsEvent | ToolResultEvent | LoopIterationEvent | LoopCompleteEvent | ErrorEvent | DoneEvent;
550
+ type StreamEvent = MessageStartEvent | MessageDeltaEvent | MessageEndEvent | ThinkingStartEvent | ThinkingDeltaEvent | ThinkingEndEvent | ActionStartEvent | ActionArgsEvent | ActionEndEvent | ToolCallsEvent | ToolResultEvent | CitationEvent | LoopIterationEvent | LoopCompleteEvent | ErrorEvent | DoneEvent;
520
551
  /**
521
552
  * LLM configuration
522
553
  */
@@ -671,6 +702,49 @@ interface AgentLoopConfig {
671
702
  debug?: boolean;
672
703
  enabled?: boolean;
673
704
  }
705
+ /**
706
+ * Web search configuration for native provider search
707
+ *
708
+ * Enables native web search for supported providers:
709
+ * - Anthropic: Uses Claude's built-in web search tool
710
+ * - OpenAI: Uses GPT's web search preview
711
+ * - Google: Uses Gemini's Google Search grounding
712
+ *
713
+ * @example
714
+ * ```typescript
715
+ * const runtime = createRuntime({
716
+ * provider: createAnthropic({ apiKey: '...' }),
717
+ * model: 'claude-sonnet-4-20250514',
718
+ * webSearch: true, // Enable with defaults
719
+ * });
720
+ *
721
+ * // Or with configuration
722
+ * const runtime = createRuntime({
723
+ * provider: createOpenAI({ apiKey: '...' }),
724
+ * model: 'gpt-4o',
725
+ * webSearch: {
726
+ * maxUses: 5,
727
+ * allowedDomains: ['docs.anthropic.com', 'openai.com'],
728
+ * },
729
+ * });
730
+ * ```
731
+ */
732
+ interface WebSearchConfig {
733
+ /** Maximum number of search uses per request (default: unlimited) */
734
+ maxUses?: number;
735
+ /** Only search these domains (provider-specific support) */
736
+ allowedDomains?: string[];
737
+ /** Exclude these domains from search (provider-specific support) */
738
+ blockedDomains?: string[];
739
+ /** User location for localized results (Anthropic only) */
740
+ userLocation?: {
741
+ type: "approximate";
742
+ city?: string;
743
+ region?: string;
744
+ country?: string;
745
+ timezone?: string;
746
+ };
747
+ }
674
748
  /**
675
749
  * Unified tool call format
676
750
  */
@@ -741,6 +815,11 @@ interface ChatCompletionRequest {
741
815
  config?: RequestLLMConfig;
742
816
  /** Abort signal for cancellation */
743
817
  signal?: AbortSignal;
818
+ /**
819
+ * Enable native web search for the provider.
820
+ * When true or configured, the provider's native search is enabled.
821
+ */
822
+ webSearch?: boolean | WebSearchConfig;
744
823
  }
745
824
  /**
746
825
  * Non-streaming completion result
@@ -955,4 +1034,356 @@ type OpenAIMessage = {
955
1034
  */
956
1035
  declare function formatMessagesForOpenAI(messages: Message[], systemPrompt?: string): OpenAIMessage[];
957
1036
 
958
- export { formatTools as $, type ActionDefinition as A, type FinishChunk as B, type CoreMessage as C, type DoneEventMessage as D, type ErrorChunk as E, type FilePart as F, type GenerateTextParams as G, type TokenUsage as H, type ImagePart as I, type FinishReason as J, type KnowledgeBaseConfig as K, type LLMAdapter as L, type Message as M, DEFAULT_CAPABILITIES as N, type ChatCompletionRequest as O, type AdapterFactory as P, type LLMConfig as Q, type ResponseOptions as R, type StreamTextParams as S, type ToolContext$1 as T, type UserMessage as U, type ToolLocation as V, type UnifiedToolCall as W, type UnifiedToolResult as X, type ToolExecution as Y, type CompletionResult as Z, formatMessages as _, type GenerateTextResult as a, formatMessagesForAnthropic as a0, formatMessagesForOpenAI as a1, messageToAnthropicContent as a2, messageToOpenAIContent as a3, hasImageAttachments as a4, hasMediaAttachments as a5, attachmentToAnthropicImage as a6, attachmentToAnthropicDocument as a7, attachmentToOpenAIImage as a8, type AnthropicContentBlock as a9, type OpenAIContentBlock as aa, type StreamTextResult as b, type Tool as c, type ToolDefinition as d, type AgentLoopConfig as e, type StreamEvent as f, type ToolCallInfo as g, type TokenUsageRaw as h, type ToolResponse as i, type LanguageModel as j, type ModelCapabilities as k, type DoGenerateParams as l, type DoGenerateResult as m, type SystemMessage as n, type AssistantMessage as o, type ToolMessage as p, type UserContentPart as q, type TextPart as r, type ToolCall$1 as s, type ToolResult as t, type GenerateStep as u, type StreamPart as v, type StreamChunk as w, type TextDeltaChunk as x, type ToolCallChunk as y, type ToolResultChunk as z };
1037
+ /**
1038
+ * Provider Types
1039
+ *
1040
+ * Defines interfaces for:
1041
+ * 1. Provider Formatters (for tool transformations in agent loop)
1042
+ * 2. Multi-provider architecture (AIProvider, capabilities, configs)
1043
+ */
1044
+
1045
+ /**
1046
+ * Provider formatter interface
1047
+ *
1048
+ * Each provider implements this interface to handle:
1049
+ * - Tool definition transformation
1050
+ * - Tool call parsing from responses
1051
+ * - Tool result formatting
1052
+ * - Stop reason detection
1053
+ */
1054
+ interface ProviderFormatter {
1055
+ /**
1056
+ * Transform unified tool definitions to provider format
1057
+ */
1058
+ transformTools(tools: ToolDefinition[]): unknown[];
1059
+ /**
1060
+ * Parse tool calls from provider response
1061
+ */
1062
+ parseToolCalls(response: unknown): UnifiedToolCall[];
1063
+ /**
1064
+ * Format tool results for provider
1065
+ */
1066
+ formatToolResults(results: UnifiedToolResult[]): unknown[];
1067
+ /**
1068
+ * Check if response indicates tool use is requested
1069
+ */
1070
+ isToolUseStop(response: unknown): boolean;
1071
+ /**
1072
+ * Check if response indicates end of turn
1073
+ */
1074
+ isEndTurnStop(response: unknown): boolean;
1075
+ /**
1076
+ * Get stop reason string from response
1077
+ */
1078
+ getStopReason(response: unknown): string;
1079
+ /**
1080
+ * Extract text content from response
1081
+ */
1082
+ extractTextContent(response: unknown): string;
1083
+ /**
1084
+ * Build assistant message with tool calls for conversation history
1085
+ */
1086
+ buildAssistantToolMessage(toolCalls: UnifiedToolCall[], textContent?: string): unknown;
1087
+ /**
1088
+ * Build user message with tool results for conversation history
1089
+ */
1090
+ buildToolResultMessage(results: UnifiedToolResult[]): unknown;
1091
+ }
1092
+ /**
1093
+ * Anthropic tool definition format
1094
+ */
1095
+ interface AnthropicTool {
1096
+ name: string;
1097
+ description: string;
1098
+ input_schema: {
1099
+ type: "object";
1100
+ properties: Record<string, unknown>;
1101
+ required?: string[];
1102
+ };
1103
+ }
1104
+ /**
1105
+ * Anthropic tool_use block from response
1106
+ */
1107
+ interface AnthropicToolUse {
1108
+ type: "tool_use";
1109
+ id: string;
1110
+ name: string;
1111
+ input: Record<string, unknown>;
1112
+ }
1113
+ /**
1114
+ * Anthropic tool_result block
1115
+ */
1116
+ interface AnthropicToolResult {
1117
+ type: "tool_result";
1118
+ tool_use_id: string;
1119
+ content: string;
1120
+ }
1121
+ /**
1122
+ * OpenAI tool definition format
1123
+ */
1124
+ interface OpenAITool {
1125
+ type: "function";
1126
+ function: {
1127
+ name: string;
1128
+ description: string;
1129
+ parameters: {
1130
+ type: "object";
1131
+ properties: Record<string, unknown>;
1132
+ required?: string[];
1133
+ };
1134
+ };
1135
+ }
1136
+ /**
1137
+ * OpenAI tool call from response
1138
+ */
1139
+ interface OpenAIToolCall {
1140
+ id: string;
1141
+ type: "function";
1142
+ function: {
1143
+ name: string;
1144
+ arguments: string;
1145
+ };
1146
+ }
1147
+ /**
1148
+ * OpenAI tool result message
1149
+ */
1150
+ interface OpenAIToolResult {
1151
+ role: "tool";
1152
+ tool_call_id: string;
1153
+ content: string;
1154
+ }
1155
+ /**
1156
+ * Google Gemini function declaration
1157
+ */
1158
+ interface GeminiFunctionDeclaration {
1159
+ name: string;
1160
+ description: string;
1161
+ parameters?: {
1162
+ type: "object";
1163
+ properties: Record<string, unknown>;
1164
+ required?: string[];
1165
+ };
1166
+ }
1167
+ /**
1168
+ * Gemini function call from response
1169
+ */
1170
+ interface GeminiFunctionCall {
1171
+ name: string;
1172
+ args: Record<string, unknown>;
1173
+ }
1174
+ /**
1175
+ * Gemini function response
1176
+ */
1177
+ interface GeminiFunctionResponse {
1178
+ name: string;
1179
+ response: Record<string, unknown>;
1180
+ }
1181
+ /**
1182
+ * Capabilities of a model for UI feature flags
1183
+ * UI components can use this to enable/disable features
1184
+ */
1185
+ interface ProviderCapabilities {
1186
+ /** Supports image inputs */
1187
+ supportsVision: boolean;
1188
+ /** Supports tool/function calling */
1189
+ supportsTools: boolean;
1190
+ /** Supports extended thinking (Claude, DeepSeek) */
1191
+ supportsThinking: boolean;
1192
+ /** Supports streaming responses */
1193
+ supportsStreaming: boolean;
1194
+ /** Supports PDF document inputs */
1195
+ supportsPDF: boolean;
1196
+ /** Supports audio inputs */
1197
+ supportsAudio: boolean;
1198
+ /** Supports video inputs */
1199
+ supportsVideo: boolean;
1200
+ /** Maximum context tokens */
1201
+ maxTokens: number;
1202
+ /** Supported image MIME types */
1203
+ supportedImageTypes: string[];
1204
+ /** Supported audio MIME types */
1205
+ supportedAudioTypes?: string[];
1206
+ /** Supported video MIME types */
1207
+ supportedVideoTypes?: string[];
1208
+ /** Supports JSON mode / structured output */
1209
+ supportsJsonMode?: boolean;
1210
+ /** Supports system messages */
1211
+ supportsSystemMessages?: boolean;
1212
+ }
1213
+ /**
1214
+ * AI Provider interface (object form)
1215
+ *
1216
+ * Wraps existing LLMAdapter with additional metadata:
1217
+ * - Supported models list
1218
+ * - Per-model capabilities
1219
+ * - Provider name
1220
+ */
1221
+ interface AIProviderObject {
1222
+ /** Provider name (e.g., 'openai', 'anthropic') */
1223
+ readonly name: string;
1224
+ /** List of supported model IDs */
1225
+ readonly supportedModels: string[];
1226
+ /**
1227
+ * Get a language model adapter for the given model ID
1228
+ * Returns the existing LLMAdapter interface - no breaking changes
1229
+ */
1230
+ languageModel(modelId: string): LLMAdapter;
1231
+ /**
1232
+ * Get capabilities for a specific model
1233
+ * UI components use this to enable/disable features
1234
+ */
1235
+ getCapabilities(modelId: string): ProviderCapabilities;
1236
+ /**
1237
+ * Optional: Get an embedding model (future expansion)
1238
+ */
1239
+ embeddingModel?(modelId: string): EmbeddingModel;
1240
+ }
1241
+ /**
1242
+ * Callable AI Provider (Vercel AI SDK style)
1243
+ *
1244
+ * A function that returns a LanguageModel when called with a model ID,
1245
+ * but also has properties for provider metadata and methods.
1246
+ *
1247
+ * @example
1248
+ * ```typescript
1249
+ * const openai = createOpenAI({ apiKey: '...' });
1250
+ *
1251
+ * // Callable - returns LanguageModel directly (Vercel AI SDK style)
1252
+ * const model = openai('gpt-4o');
1253
+ *
1254
+ * // Also supports method calls (backward compatible)
1255
+ * const model2 = openai.languageModel('gpt-4o');
1256
+ *
1257
+ * // Check capabilities
1258
+ * const caps = openai.getCapabilities('gpt-4o');
1259
+ * if (caps.supportsVision) {
1260
+ * // Show image upload button
1261
+ * }
1262
+ * ```
1263
+ */
1264
+ interface AIProvider extends AIProviderObject {
1265
+ /**
1266
+ * Call the provider directly with a model ID to get a LanguageModel
1267
+ * This is the Vercel AI SDK style pattern
1268
+ */
1269
+ (modelId: string): LLMAdapter;
1270
+ }
1271
+ /**
1272
+ * Embedding model interface (for future expansion)
1273
+ */
1274
+ interface EmbeddingModel {
1275
+ readonly provider: string;
1276
+ readonly modelId: string;
1277
+ embed(texts: string[]): Promise<number[][]>;
1278
+ }
1279
+ /**
1280
+ * Base provider configuration
1281
+ */
1282
+ interface BaseProviderConfig {
1283
+ /** API key (falls back to environment variable) */
1284
+ apiKey?: string;
1285
+ /** Custom base URL */
1286
+ baseUrl?: string;
1287
+ /** Request timeout in milliseconds */
1288
+ timeout?: number;
1289
+ /** Custom headers to include */
1290
+ headers?: Record<string, string>;
1291
+ }
1292
+ /**
1293
+ * OpenAI provider configuration
1294
+ */
1295
+ interface OpenAIProviderConfig extends BaseProviderConfig {
1296
+ /** OpenAI organization ID */
1297
+ organization?: string;
1298
+ /** OpenAI project ID */
1299
+ project?: string;
1300
+ /** Vision detail level for images */
1301
+ imageDetail?: "auto" | "low" | "high";
1302
+ }
1303
+ /**
1304
+ * Anthropic provider configuration
1305
+ */
1306
+ interface AnthropicProviderConfig extends BaseProviderConfig {
1307
+ /** Extended thinking budget in tokens (minimum 1024) */
1308
+ thinkingBudget?: number;
1309
+ /** Enable prompt caching */
1310
+ cacheControl?: boolean;
1311
+ }
1312
+ /**
1313
+ * Google provider configuration
1314
+ */
1315
+ interface GoogleProviderConfig extends BaseProviderConfig {
1316
+ /** Safety settings */
1317
+ safetySettings?: GoogleSafetySetting[];
1318
+ /** Grounding configuration (for web search) */
1319
+ groundingConfig?: GoogleGroundingConfig;
1320
+ }
1321
+ /**
1322
+ * Google safety setting
1323
+ */
1324
+ interface GoogleSafetySetting {
1325
+ category: "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_DANGEROUS_CONTENT";
1326
+ threshold: "BLOCK_NONE" | "BLOCK_LOW_AND_ABOVE" | "BLOCK_MEDIUM_AND_ABOVE" | "BLOCK_HIGH_AND_ABOVE";
1327
+ }
1328
+ /**
1329
+ * Google grounding configuration
1330
+ */
1331
+ interface GoogleGroundingConfig {
1332
+ /** Enable Google Search grounding */
1333
+ googleSearchRetrieval?: boolean;
1334
+ }
1335
+ /**
1336
+ * xAI provider configuration
1337
+ */
1338
+ interface XAIProviderConfig extends BaseProviderConfig {
1339
+ }
1340
+ /**
1341
+ * Azure OpenAI provider configuration
1342
+ */
1343
+ interface AzureProviderConfig extends BaseProviderConfig {
1344
+ /** Azure resource name */
1345
+ resourceName: string;
1346
+ /** Deployment name */
1347
+ deploymentName: string;
1348
+ /** API version (default: 2024-02-15-preview) */
1349
+ apiVersion?: string;
1350
+ }
1351
+ /**
1352
+ * Ollama model-specific options
1353
+ * These map to Ollama's native API options
1354
+ */
1355
+ interface OllamaModelOptions {
1356
+ /** Context window size (default varies by model) */
1357
+ num_ctx?: number;
1358
+ /** Max tokens to predict (-1 = infinite, -2 = fill context) */
1359
+ num_predict?: number;
1360
+ /** Mirostat sampling (0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) */
1361
+ mirostat?: 0 | 1 | 2;
1362
+ /** Mirostat learning rate (default: 0.1) */
1363
+ mirostat_eta?: number;
1364
+ /** Mirostat target entropy (default: 5.0) */
1365
+ mirostat_tau?: number;
1366
+ /** Repeat penalty (default: 1.1) */
1367
+ repeat_penalty?: number;
1368
+ /** Random seed for reproducibility (-1 = random) */
1369
+ seed?: number;
1370
+ /** Top-k sampling (default: 40) */
1371
+ top_k?: number;
1372
+ /** Top-p (nucleus) sampling (default: 0.9) */
1373
+ top_p?: number;
1374
+ /** Min-p sampling (default: 0.0) */
1375
+ min_p?: number;
1376
+ /** Stop sequences */
1377
+ stop?: string[];
1378
+ /** Temperature override (also available in config) */
1379
+ temperature?: number;
1380
+ }
1381
+ /**
1382
+ * Ollama provider configuration
1383
+ */
1384
+ interface OllamaProviderConfig extends BaseProviderConfig {
1385
+ /** Default Ollama-specific model options */
1386
+ options?: OllamaModelOptions;
1387
+ }
1388
+
1389
+ export { type XAIProviderConfig as $, type AIProvider as A, type ToolResultChunk as B, type CoreMessage as C, type DoneEventMessage as D, type FinishChunk as E, type FilePart as F, type GenerateTextParams as G, type ErrorChunk as H, type ImagePart as I, type TokenUsage as J, type KnowledgeBaseConfig as K, type LLMAdapter as L, type Message as M, type FinishReason as N, DEFAULT_CAPABILITIES as O, type ChatCompletionRequest as P, type AdapterFactory as Q, type ResponseOptions as R, type StreamTextParams as S, type ToolContext$1 as T, type UserMessage as U, type ProviderCapabilities as V, type WebSearchConfig as W, type BaseProviderConfig as X, type OpenAIProviderConfig as Y, type AnthropicProviderConfig as Z, type GoogleProviderConfig as _, type GenerateTextResult as a, type AzureProviderConfig as a0, type OllamaProviderConfig as a1, type OllamaModelOptions as a2, type ProviderFormatter as a3, type AnthropicTool as a4, type AnthropicToolUse as a5, type AnthropicToolResult as a6, type OpenAITool as a7, type OpenAIToolCall as a8, type OpenAIToolResult as a9, type GeminiFunctionDeclaration as aa, type GeminiFunctionCall as ab, type GeminiFunctionResponse as ac, type LLMConfig as ad, type ToolLocation as ae, type UnifiedToolCall as af, type UnifiedToolResult as ag, type ToolExecution as ah, type Citation as ai, type CompletionResult as aj, formatMessages as ak, formatTools as al, formatMessagesForAnthropic as am, formatMessagesForOpenAI as an, messageToAnthropicContent as ao, messageToOpenAIContent as ap, hasImageAttachments as aq, hasMediaAttachments as ar, attachmentToAnthropicImage as as, attachmentToAnthropicDocument as at, attachmentToOpenAIImage as au, type AnthropicContentBlock as av, type OpenAIContentBlock as aw, type StreamTextResult as b, type Tool as c, type ActionDefinition as d, type ToolDefinition as e, type AgentLoopConfig as f, type StreamEvent as g, type ToolCallInfo as h, type TokenUsageRaw as i, type ToolResponse as j, type LanguageModel as k, type ModelCapabilities as l, type DoGenerateParams as m, type DoGenerateResult as n, type SystemMessage as o, type AssistantMessage as p, type ToolMessage as q, type UserContentPart as r, type TextPart as s, type ToolCall$1 as t, type ToolResult as u, type GenerateStep as v, type StreamPart as w, type StreamChunk as x, type TextDeltaChunk as y, type ToolCallChunk as z };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@yourgpt/llm-sdk",
3
- "version": "2.0.1",
3
+ "version": "2.0.2-beta.2",
4
4
  "description": "AI SDK for building AI Agents with any LLM",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",