@yourgpt/llm-sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/dist/adapters/index.d.mts +497 -0
- package/dist/adapters/index.d.ts +497 -0
- package/dist/adapters/index.js +1642 -0
- package/dist/adapters/index.js.map +1 -0
- package/dist/adapters/index.mjs +1616 -0
- package/dist/adapters/index.mjs.map +1 -0
- package/dist/index.d.mts +1048 -0
- package/dist/index.d.ts +1048 -0
- package/dist/index.js +4216 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +4170 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +83 -0
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,1048 @@
|
|
|
1
|
+
import { ToolDefinition, UnifiedToolCall, UnifiedToolResult, LLMConfig, ActionDefinition, AgentLoopConfig, KnowledgeBaseConfig, StreamEvent, DoneEventMessage, Message, AIProvider as AIProvider$1, ToolResponse } from '@yourgpt/copilot-sdk/core';
|
|
2
|
+
export { ActionDefinition, AgentLoopConfig, LLMConfig, LLMProvider, Message, StreamEvent, ToolDefinition, ToolExecution, ToolLocation, ToolResponse, UnifiedToolCall, UnifiedToolResult } from '@yourgpt/copilot-sdk/core';
|
|
3
|
+
import { LLMAdapter } from './adapters/index.js';
|
|
4
|
+
export { AdapterFactory, AnthropicAdapter, AnthropicAdapterConfig, AzureAdapter, AzureAdapterConfig, ChatCompletionRequest, GoogleAdapter, GoogleAdapterConfig, GroqAdapter, GroqAdapterConfig, OllamaAdapter, OllamaAdapterConfig, OpenAIAdapter, OpenAIAdapterConfig, XAIAdapter, XAIAdapterConfig, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createGroqAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter } from './adapters/index.js';
|
|
5
|
+
import * as hono from 'hono';
|
|
6
|
+
import { Hono } from 'hono';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Provider Types
|
|
10
|
+
*
|
|
11
|
+
* Defines interfaces for:
|
|
12
|
+
* 1. Provider Formatters (for tool transformations in agent loop)
|
|
13
|
+
* 2. Multi-provider architecture (AIProvider, capabilities, configs)
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Provider formatter interface
|
|
18
|
+
*
|
|
19
|
+
* Each provider implements this interface to handle:
|
|
20
|
+
* - Tool definition transformation
|
|
21
|
+
* - Tool call parsing from responses
|
|
22
|
+
* - Tool result formatting
|
|
23
|
+
* - Stop reason detection
|
|
24
|
+
*/
|
|
25
|
+
interface ProviderFormatter {
|
|
26
|
+
/**
|
|
27
|
+
* Transform unified tool definitions to provider format
|
|
28
|
+
*/
|
|
29
|
+
transformTools(tools: ToolDefinition[]): unknown[];
|
|
30
|
+
/**
|
|
31
|
+
* Parse tool calls from provider response
|
|
32
|
+
*/
|
|
33
|
+
parseToolCalls(response: unknown): UnifiedToolCall[];
|
|
34
|
+
/**
|
|
35
|
+
* Format tool results for provider
|
|
36
|
+
*/
|
|
37
|
+
formatToolResults(results: UnifiedToolResult[]): unknown[];
|
|
38
|
+
/**
|
|
39
|
+
* Check if response indicates tool use is requested
|
|
40
|
+
*/
|
|
41
|
+
isToolUseStop(response: unknown): boolean;
|
|
42
|
+
/**
|
|
43
|
+
* Check if response indicates end of turn
|
|
44
|
+
*/
|
|
45
|
+
isEndTurnStop(response: unknown): boolean;
|
|
46
|
+
/**
|
|
47
|
+
* Get stop reason string from response
|
|
48
|
+
*/
|
|
49
|
+
getStopReason(response: unknown): string;
|
|
50
|
+
/**
|
|
51
|
+
* Extract text content from response
|
|
52
|
+
*/
|
|
53
|
+
extractTextContent(response: unknown): string;
|
|
54
|
+
/**
|
|
55
|
+
* Build assistant message with tool calls for conversation history
|
|
56
|
+
*/
|
|
57
|
+
buildAssistantToolMessage(toolCalls: UnifiedToolCall[], textContent?: string): unknown;
|
|
58
|
+
/**
|
|
59
|
+
* Build user message with tool results for conversation history
|
|
60
|
+
*/
|
|
61
|
+
buildToolResultMessage(results: UnifiedToolResult[]): unknown;
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Anthropic tool definition format
|
|
65
|
+
*/
|
|
66
|
+
interface AnthropicTool {
|
|
67
|
+
name: string;
|
|
68
|
+
description: string;
|
|
69
|
+
input_schema: {
|
|
70
|
+
type: "object";
|
|
71
|
+
properties: Record<string, unknown>;
|
|
72
|
+
required?: string[];
|
|
73
|
+
};
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Anthropic tool_use block from response
|
|
77
|
+
*/
|
|
78
|
+
interface AnthropicToolUse {
|
|
79
|
+
type: "tool_use";
|
|
80
|
+
id: string;
|
|
81
|
+
name: string;
|
|
82
|
+
input: Record<string, unknown>;
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Anthropic tool_result block
|
|
86
|
+
*/
|
|
87
|
+
interface AnthropicToolResult {
|
|
88
|
+
type: "tool_result";
|
|
89
|
+
tool_use_id: string;
|
|
90
|
+
content: string;
|
|
91
|
+
}
|
|
92
|
+
/**
|
|
93
|
+
* OpenAI tool definition format
|
|
94
|
+
*/
|
|
95
|
+
interface OpenAITool {
|
|
96
|
+
type: "function";
|
|
97
|
+
function: {
|
|
98
|
+
name: string;
|
|
99
|
+
description: string;
|
|
100
|
+
parameters: {
|
|
101
|
+
type: "object";
|
|
102
|
+
properties: Record<string, unknown>;
|
|
103
|
+
required?: string[];
|
|
104
|
+
};
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* OpenAI tool call from response
|
|
109
|
+
*/
|
|
110
|
+
interface OpenAIToolCall {
|
|
111
|
+
id: string;
|
|
112
|
+
type: "function";
|
|
113
|
+
function: {
|
|
114
|
+
name: string;
|
|
115
|
+
arguments: string;
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
/**
|
|
119
|
+
* OpenAI tool result message
|
|
120
|
+
*/
|
|
121
|
+
interface OpenAIToolResult {
|
|
122
|
+
role: "tool";
|
|
123
|
+
tool_call_id: string;
|
|
124
|
+
content: string;
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Google Gemini function declaration
|
|
128
|
+
*/
|
|
129
|
+
interface GeminiFunctionDeclaration {
|
|
130
|
+
name: string;
|
|
131
|
+
description: string;
|
|
132
|
+
parameters?: {
|
|
133
|
+
type: "object";
|
|
134
|
+
properties: Record<string, unknown>;
|
|
135
|
+
required?: string[];
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
/**
|
|
139
|
+
* Gemini function call from response
|
|
140
|
+
*/
|
|
141
|
+
interface GeminiFunctionCall {
|
|
142
|
+
name: string;
|
|
143
|
+
args: Record<string, unknown>;
|
|
144
|
+
}
|
|
145
|
+
/**
|
|
146
|
+
* Gemini function response
|
|
147
|
+
*/
|
|
148
|
+
interface GeminiFunctionResponse {
|
|
149
|
+
name: string;
|
|
150
|
+
response: Record<string, unknown>;
|
|
151
|
+
}
|
|
152
|
+
/**
|
|
153
|
+
* Capabilities of a model for UI feature flags
|
|
154
|
+
* UI components can use this to enable/disable features
|
|
155
|
+
*/
|
|
156
|
+
interface ProviderCapabilities {
|
|
157
|
+
/** Supports image inputs */
|
|
158
|
+
supportsVision: boolean;
|
|
159
|
+
/** Supports tool/function calling */
|
|
160
|
+
supportsTools: boolean;
|
|
161
|
+
/** Supports extended thinking (Claude, DeepSeek) */
|
|
162
|
+
supportsThinking: boolean;
|
|
163
|
+
/** Supports streaming responses */
|
|
164
|
+
supportsStreaming: boolean;
|
|
165
|
+
/** Supports PDF document inputs */
|
|
166
|
+
supportsPDF: boolean;
|
|
167
|
+
/** Supports audio inputs */
|
|
168
|
+
supportsAudio: boolean;
|
|
169
|
+
/** Supports video inputs */
|
|
170
|
+
supportsVideo: boolean;
|
|
171
|
+
/** Maximum context tokens */
|
|
172
|
+
maxTokens: number;
|
|
173
|
+
/** Supported image MIME types */
|
|
174
|
+
supportedImageTypes: string[];
|
|
175
|
+
/** Supported audio MIME types */
|
|
176
|
+
supportedAudioTypes?: string[];
|
|
177
|
+
/** Supported video MIME types */
|
|
178
|
+
supportedVideoTypes?: string[];
|
|
179
|
+
/** Supports JSON mode / structured output */
|
|
180
|
+
supportsJsonMode?: boolean;
|
|
181
|
+
/** Supports system messages */
|
|
182
|
+
supportsSystemMessages?: boolean;
|
|
183
|
+
}
|
|
184
|
+
/**
|
|
185
|
+
* AI Provider interface
|
|
186
|
+
*
|
|
187
|
+
* Wraps existing LLMAdapter with additional metadata:
|
|
188
|
+
* - Supported models list
|
|
189
|
+
* - Per-model capabilities
|
|
190
|
+
* - Provider name
|
|
191
|
+
*
|
|
192
|
+
* @example
|
|
193
|
+
* ```typescript
|
|
194
|
+
* const openai = createOpenAI({ apiKey: '...' });
|
|
195
|
+
*
|
|
196
|
+
* // Get adapter for a specific model
|
|
197
|
+
* const adapter = openai.languageModel('gpt-4o');
|
|
198
|
+
*
|
|
199
|
+
* // Check capabilities
|
|
200
|
+
* const caps = openai.getCapabilities('gpt-4o');
|
|
201
|
+
* if (caps.supportsVision) {
|
|
202
|
+
* // Show image upload button
|
|
203
|
+
* }
|
|
204
|
+
* ```
|
|
205
|
+
*/
|
|
206
|
+
interface AIProvider {
|
|
207
|
+
/** Provider name (e.g., 'openai', 'anthropic') */
|
|
208
|
+
readonly name: string;
|
|
209
|
+
/** List of supported model IDs */
|
|
210
|
+
readonly supportedModels: string[];
|
|
211
|
+
/**
|
|
212
|
+
* Get a language model adapter for the given model ID
|
|
213
|
+
* Returns the existing LLMAdapter interface - no breaking changes
|
|
214
|
+
*/
|
|
215
|
+
languageModel(modelId: string): LLMAdapter;
|
|
216
|
+
/**
|
|
217
|
+
* Get capabilities for a specific model
|
|
218
|
+
* UI components use this to enable/disable features
|
|
219
|
+
*/
|
|
220
|
+
getCapabilities(modelId: string): ProviderCapabilities;
|
|
221
|
+
/**
|
|
222
|
+
* Optional: Get an embedding model (future expansion)
|
|
223
|
+
*/
|
|
224
|
+
embeddingModel?(modelId: string): EmbeddingModel;
|
|
225
|
+
}
|
|
226
|
+
/**
|
|
227
|
+
* Embedding model interface (for future expansion)
|
|
228
|
+
*/
|
|
229
|
+
interface EmbeddingModel {
|
|
230
|
+
readonly provider: string;
|
|
231
|
+
readonly modelId: string;
|
|
232
|
+
embed(texts: string[]): Promise<number[][]>;
|
|
233
|
+
}
|
|
234
|
+
/**
|
|
235
|
+
* Base provider configuration
|
|
236
|
+
*/
|
|
237
|
+
interface BaseProviderConfig {
|
|
238
|
+
/** API key (falls back to environment variable) */
|
|
239
|
+
apiKey?: string;
|
|
240
|
+
/** Custom base URL */
|
|
241
|
+
baseUrl?: string;
|
|
242
|
+
/** Request timeout in milliseconds */
|
|
243
|
+
timeout?: number;
|
|
244
|
+
/** Custom headers to include */
|
|
245
|
+
headers?: Record<string, string>;
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* OpenAI provider configuration
|
|
249
|
+
*/
|
|
250
|
+
interface OpenAIProviderConfig extends BaseProviderConfig {
|
|
251
|
+
/** OpenAI organization ID */
|
|
252
|
+
organization?: string;
|
|
253
|
+
/** OpenAI project ID */
|
|
254
|
+
project?: string;
|
|
255
|
+
/** Vision detail level for images */
|
|
256
|
+
imageDetail?: "auto" | "low" | "high";
|
|
257
|
+
}
|
|
258
|
+
/**
|
|
259
|
+
* Anthropic provider configuration
|
|
260
|
+
*/
|
|
261
|
+
interface AnthropicProviderConfig extends BaseProviderConfig {
|
|
262
|
+
/** Extended thinking budget in tokens (minimum 1024) */
|
|
263
|
+
thinkingBudget?: number;
|
|
264
|
+
/** Enable prompt caching */
|
|
265
|
+
cacheControl?: boolean;
|
|
266
|
+
}
|
|
267
|
+
/**
|
|
268
|
+
* Google provider configuration
|
|
269
|
+
*/
|
|
270
|
+
interface GoogleProviderConfig extends BaseProviderConfig {
|
|
271
|
+
/** Safety settings */
|
|
272
|
+
safetySettings?: GoogleSafetySetting[];
|
|
273
|
+
/** Grounding configuration (for web search) */
|
|
274
|
+
groundingConfig?: GoogleGroundingConfig;
|
|
275
|
+
}
|
|
276
|
+
/**
|
|
277
|
+
* Google safety setting
|
|
278
|
+
*/
|
|
279
|
+
interface GoogleSafetySetting {
|
|
280
|
+
category: "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_DANGEROUS_CONTENT";
|
|
281
|
+
threshold: "BLOCK_NONE" | "BLOCK_LOW_AND_ABOVE" | "BLOCK_MEDIUM_AND_ABOVE" | "BLOCK_HIGH_AND_ABOVE";
|
|
282
|
+
}
|
|
283
|
+
/**
|
|
284
|
+
* Google grounding configuration
|
|
285
|
+
*/
|
|
286
|
+
interface GoogleGroundingConfig {
|
|
287
|
+
/** Enable Google Search grounding */
|
|
288
|
+
googleSearchRetrieval?: boolean;
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* xAI provider configuration
|
|
292
|
+
*/
|
|
293
|
+
interface XAIProviderConfig extends BaseProviderConfig {
|
|
294
|
+
}
|
|
295
|
+
/**
|
|
296
|
+
* Azure OpenAI provider configuration
|
|
297
|
+
*/
|
|
298
|
+
interface AzureProviderConfig extends BaseProviderConfig {
|
|
299
|
+
/** Azure resource name */
|
|
300
|
+
resourceName: string;
|
|
301
|
+
/** Deployment name */
|
|
302
|
+
deploymentName: string;
|
|
303
|
+
/** API version (default: 2024-02-15-preview) */
|
|
304
|
+
apiVersion?: string;
|
|
305
|
+
}
|
|
306
|
+
/**
|
|
307
|
+
* Groq provider configuration
|
|
308
|
+
*/
|
|
309
|
+
interface GroqProviderConfig extends BaseProviderConfig {
|
|
310
|
+
}
|
|
311
|
+
/**
|
|
312
|
+
* Ollama provider configuration
|
|
313
|
+
*/
|
|
314
|
+
interface OllamaProviderConfig extends BaseProviderConfig {
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/**
|
|
318
|
+
* Runtime configuration with LLM config
|
|
319
|
+
*/
|
|
320
|
+
interface RuntimeConfigWithLLM {
|
|
321
|
+
/** LLM configuration */
|
|
322
|
+
llm: LLMConfig & {
|
|
323
|
+
apiKey: string;
|
|
324
|
+
};
|
|
325
|
+
/** Custom LLM adapter (overrides llm config) */
|
|
326
|
+
adapter?: LLMAdapter;
|
|
327
|
+
/** System prompt */
|
|
328
|
+
systemPrompt?: string;
|
|
329
|
+
/** Available actions (legacy) */
|
|
330
|
+
actions?: ActionDefinition[];
|
|
331
|
+
/** Available tools (new - supports location: server/client) */
|
|
332
|
+
tools?: ToolDefinition[];
|
|
333
|
+
/** Agent loop configuration */
|
|
334
|
+
agentLoop?: AgentLoopConfig;
|
|
335
|
+
/** Knowledge base configuration (enables search_knowledge tool) */
|
|
336
|
+
knowledgeBase?: KnowledgeBaseConfig;
|
|
337
|
+
/** Enable debug logging */
|
|
338
|
+
debug?: boolean;
|
|
339
|
+
/**
|
|
340
|
+
* Custom context data passed to all tool handlers.
|
|
341
|
+
* Useful for passing auth tokens, user info, tenant data, etc.
|
|
342
|
+
*
|
|
343
|
+
* @example
|
|
344
|
+
* ```typescript
|
|
345
|
+
* const runtime = createRuntime({
|
|
346
|
+
* llm: { ... },
|
|
347
|
+
* toolContext: { userId: session.userId, tenantId: tenant.id },
|
|
348
|
+
* });
|
|
349
|
+
* ```
|
|
350
|
+
*/
|
|
351
|
+
toolContext?: Record<string, unknown>;
|
|
352
|
+
}
|
|
353
|
+
/**
|
|
354
|
+
* Runtime configuration with adapter
|
|
355
|
+
*/
|
|
356
|
+
interface RuntimeConfigWithAdapter {
|
|
357
|
+
/** Custom LLM adapter */
|
|
358
|
+
adapter: LLMAdapter;
|
|
359
|
+
/** LLM configuration (optional when adapter provided) */
|
|
360
|
+
llm?: LLMConfig & {
|
|
361
|
+
apiKey: string;
|
|
362
|
+
};
|
|
363
|
+
/** System prompt */
|
|
364
|
+
systemPrompt?: string;
|
|
365
|
+
/** Available actions (legacy) */
|
|
366
|
+
actions?: ActionDefinition[];
|
|
367
|
+
/** Available tools (new - supports location: server/client) */
|
|
368
|
+
tools?: ToolDefinition[];
|
|
369
|
+
/** Agent loop configuration */
|
|
370
|
+
agentLoop?: AgentLoopConfig;
|
|
371
|
+
/** Knowledge base configuration (enables search_knowledge tool) */
|
|
372
|
+
knowledgeBase?: KnowledgeBaseConfig;
|
|
373
|
+
/** Enable debug logging */
|
|
374
|
+
debug?: boolean;
|
|
375
|
+
/** Custom context data passed to all tool handlers */
|
|
376
|
+
toolContext?: Record<string, unknown>;
|
|
377
|
+
}
|
|
378
|
+
/**
|
|
379
|
+
* Runtime configuration with AIProvider
|
|
380
|
+
*
|
|
381
|
+
* @example
|
|
382
|
+
* ```typescript
|
|
383
|
+
* import { createOpenAI } from '@yourgpt/llm-sdk';
|
|
384
|
+
*
|
|
385
|
+
* const openai = createOpenAI({ apiKey: '...' });
|
|
386
|
+
* const runtime = createRuntime({
|
|
387
|
+
* provider: openai,
|
|
388
|
+
* model: 'gpt-4o',
|
|
389
|
+
* });
|
|
390
|
+
* ```
|
|
391
|
+
*/
|
|
392
|
+
interface RuntimeConfigWithProvider {
|
|
393
|
+
/** AI Provider instance */
|
|
394
|
+
provider: AIProvider;
|
|
395
|
+
/** Model ID to use (required when using provider) */
|
|
396
|
+
model: string;
|
|
397
|
+
/** System prompt */
|
|
398
|
+
systemPrompt?: string;
|
|
399
|
+
/** Available actions (legacy) */
|
|
400
|
+
actions?: ActionDefinition[];
|
|
401
|
+
/** Available tools (new - supports location: server/client) */
|
|
402
|
+
tools?: ToolDefinition[];
|
|
403
|
+
/** Agent loop configuration */
|
|
404
|
+
agentLoop?: AgentLoopConfig;
|
|
405
|
+
/** Knowledge base configuration (enables search_knowledge tool) */
|
|
406
|
+
knowledgeBase?: KnowledgeBaseConfig;
|
|
407
|
+
/** Enable debug logging */
|
|
408
|
+
debug?: boolean;
|
|
409
|
+
/** Custom context data passed to all tool handlers */
|
|
410
|
+
toolContext?: Record<string, unknown>;
|
|
411
|
+
}
|
|
412
|
+
/**
|
|
413
|
+
* Runtime configuration - either provide llm config, adapter, or provider
|
|
414
|
+
*/
|
|
415
|
+
type RuntimeConfig = RuntimeConfigWithLLM | RuntimeConfigWithAdapter | RuntimeConfigWithProvider;
|
|
416
|
+
/**
|
|
417
|
+
* Message attachment (images, files, etc.)
|
|
418
|
+
*/
|
|
419
|
+
interface MessageAttachment {
|
|
420
|
+
type: "image" | "file" | "audio" | "video";
|
|
421
|
+
data: string;
|
|
422
|
+
mimeType?: string;
|
|
423
|
+
filename?: string;
|
|
424
|
+
}
|
|
425
|
+
/**
|
|
426
|
+
* Chat request body
|
|
427
|
+
*/
|
|
428
|
+
interface ChatRequest {
|
|
429
|
+
/** Conversation messages */
|
|
430
|
+
messages: Array<{
|
|
431
|
+
role: string;
|
|
432
|
+
content: string;
|
|
433
|
+
/** Attachments like images (for vision support) */
|
|
434
|
+
attachments?: MessageAttachment[];
|
|
435
|
+
/** Tool call ID (for tool result messages) */
|
|
436
|
+
tool_call_id?: string;
|
|
437
|
+
/** Tool calls from assistant (for continuing agent loop) */
|
|
438
|
+
tool_calls?: Array<{
|
|
439
|
+
id: string;
|
|
440
|
+
type: string;
|
|
441
|
+
function: {
|
|
442
|
+
name: string;
|
|
443
|
+
arguments: string;
|
|
444
|
+
};
|
|
445
|
+
}>;
|
|
446
|
+
}>;
|
|
447
|
+
/** Thread/conversation ID */
|
|
448
|
+
threadId?: string;
|
|
449
|
+
/** Bot ID (for cloud) */
|
|
450
|
+
botId?: string;
|
|
451
|
+
/** LLM config overrides */
|
|
452
|
+
config?: Partial<LLMConfig>;
|
|
453
|
+
/** System prompt override */
|
|
454
|
+
systemPrompt?: string;
|
|
455
|
+
/** Actions from client (legacy) */
|
|
456
|
+
actions?: Array<{
|
|
457
|
+
name: string;
|
|
458
|
+
description: string;
|
|
459
|
+
parameters?: Record<string, unknown>;
|
|
460
|
+
}>;
|
|
461
|
+
/** Tools from client (location is always "client" for tools from request) */
|
|
462
|
+
tools?: Array<{
|
|
463
|
+
name: string;
|
|
464
|
+
description: string;
|
|
465
|
+
inputSchema: {
|
|
466
|
+
type: "object";
|
|
467
|
+
properties: Record<string, unknown>;
|
|
468
|
+
required?: string[];
|
|
469
|
+
};
|
|
470
|
+
}>;
|
|
471
|
+
/** Enable agentic loop mode */
|
|
472
|
+
useAgentLoop?: boolean;
|
|
473
|
+
/** Enable streaming responses (default: true). Set to false for non-streaming mode. */
|
|
474
|
+
streaming?: boolean;
|
|
475
|
+
/** Knowledge Base configuration (enables search_knowledge tool) */
|
|
476
|
+
knowledgeBase?: {
|
|
477
|
+
/** Project UID for the knowledge base */
|
|
478
|
+
projectUid: string;
|
|
479
|
+
/** Auth token for API calls */
|
|
480
|
+
token: string;
|
|
481
|
+
/** App ID (default: "1") */
|
|
482
|
+
appId?: string;
|
|
483
|
+
/** Results limit (default: 5) */
|
|
484
|
+
limit?: number;
|
|
485
|
+
};
|
|
486
|
+
}
|
|
487
|
+
/**
|
|
488
|
+
* Action execution request
|
|
489
|
+
*/
|
|
490
|
+
interface ActionRequest {
|
|
491
|
+
/** Action name */
|
|
492
|
+
name: string;
|
|
493
|
+
/** Action arguments */
|
|
494
|
+
args: Record<string, unknown>;
|
|
495
|
+
}
|
|
496
|
+
/**
|
|
497
|
+
* Request context
|
|
498
|
+
*/
|
|
499
|
+
interface RequestContext {
|
|
500
|
+
/** Request headers */
|
|
501
|
+
headers: Record<string, string>;
|
|
502
|
+
/** Parsed request body */
|
|
503
|
+
body: ChatRequest;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
/**
|
|
507
|
+
* Copilot SDK Runtime
|
|
508
|
+
*
|
|
509
|
+
* Handles chat requests and manages LLM interactions.
|
|
510
|
+
*/
|
|
511
|
+
declare class Runtime {
|
|
512
|
+
private adapter;
|
|
513
|
+
private config;
|
|
514
|
+
private actions;
|
|
515
|
+
private tools;
|
|
516
|
+
constructor(config: RuntimeConfig);
|
|
517
|
+
/**
|
|
518
|
+
* Create LLM adapter based on config
|
|
519
|
+
*/
|
|
520
|
+
private createAdapter;
|
|
521
|
+
/**
|
|
522
|
+
* Process a chat request and return streaming response
|
|
523
|
+
*/
|
|
524
|
+
processChat(request: ChatRequest, signal?: AbortSignal): AsyncGenerator<StreamEvent>;
|
|
525
|
+
/**
|
|
526
|
+
* Handle HTTP request (for use with any framework)
|
|
527
|
+
*/
|
|
528
|
+
handleRequest(request: Request): Promise<Response>;
|
|
529
|
+
/**
|
|
530
|
+
* Handle non-streaming request - returns JSON instead of SSE
|
|
531
|
+
*/
|
|
532
|
+
private handleNonStreamingRequest;
|
|
533
|
+
/**
|
|
534
|
+
* Get registered actions
|
|
535
|
+
*/
|
|
536
|
+
getActions(): ActionDefinition[];
|
|
537
|
+
/**
|
|
538
|
+
* Register a new action
|
|
539
|
+
*/
|
|
540
|
+
registerAction(action: ActionDefinition): void;
|
|
541
|
+
/**
|
|
542
|
+
* Unregister an action
|
|
543
|
+
*/
|
|
544
|
+
unregisterAction(name: string): void;
|
|
545
|
+
/**
|
|
546
|
+
* Register a new tool
|
|
547
|
+
*/
|
|
548
|
+
registerTool(tool: ToolDefinition): void;
|
|
549
|
+
/**
|
|
550
|
+
* Unregister a tool
|
|
551
|
+
*/
|
|
552
|
+
unregisterTool(name: string): void;
|
|
553
|
+
/**
|
|
554
|
+
* Get registered tools
|
|
555
|
+
*/
|
|
556
|
+
getTools(): ToolDefinition[];
|
|
557
|
+
/**
|
|
558
|
+
* Get the AI provider name from config
|
|
559
|
+
*/
|
|
560
|
+
private getProviderName;
|
|
561
|
+
/**
|
|
562
|
+
* Get the AI provider instance (if using provider config)
|
|
563
|
+
*/
|
|
564
|
+
getProvider(): AIProvider | null;
|
|
565
|
+
/**
|
|
566
|
+
* Get the current model ID
|
|
567
|
+
*/
|
|
568
|
+
getModel(): string;
|
|
569
|
+
/**
|
|
570
|
+
* Process a chat request with tool support (Vercel AI SDK pattern)
|
|
571
|
+
*
|
|
572
|
+
* This method:
|
|
573
|
+
* 1. Streams response from adapter
|
|
574
|
+
* 2. Detects tool calls from streaming events
|
|
575
|
+
* 3. Server-side tools are executed immediately
|
|
576
|
+
* 4. Client-side tool calls are yielded for client to execute
|
|
577
|
+
* 5. Loop continues until no more tool calls or max iterations reached
|
|
578
|
+
* 6. Returns all new messages in the done event for client to append
|
|
579
|
+
*/
|
|
580
|
+
processChatWithLoop(request: ChatRequest, signal?: AbortSignal, _accumulatedMessages?: DoneEventMessage[], _isRecursive?: boolean, _httpRequest?: Request): AsyncGenerator<StreamEvent>;
|
|
581
|
+
/**
|
|
582
|
+
* Non-streaming agent loop implementation
|
|
583
|
+
*
|
|
584
|
+
* Uses adapter.complete() instead of stream() for:
|
|
585
|
+
* - Better comparison with original studio-ai behavior
|
|
586
|
+
* - Easier debugging (full response at once)
|
|
587
|
+
* - More predictable retry behavior
|
|
588
|
+
*/
|
|
589
|
+
private processChatWithLoopNonStreaming;
|
|
590
|
+
/**
|
|
591
|
+
* Convert tools to legacy action format (for adapter compatibility)
|
|
592
|
+
*/
|
|
593
|
+
private convertToolsToActions;
|
|
594
|
+
/**
|
|
595
|
+
* Convert JSON Schema property to ActionParameter format recursively
|
|
596
|
+
*/
|
|
597
|
+
private convertSchemaProperty;
|
|
598
|
+
/**
|
|
599
|
+
* Convert JSON Schema to legacy parameters format
|
|
600
|
+
*/
|
|
601
|
+
private convertInputSchemaToParameters;
|
|
602
|
+
}
|
|
603
|
+
/**
|
|
604
|
+
* Create runtime instance
|
|
605
|
+
*/
|
|
606
|
+
declare function createRuntime(config: RuntimeConfig): Runtime;
|
|
607
|
+
|
|
608
|
+
/**
|
|
609
|
+
* Create SSE response headers
|
|
610
|
+
*/
|
|
611
|
+
declare function createSSEHeaders(): Record<string, string>;
|
|
612
|
+
/**
|
|
613
|
+
* Format event as SSE data
|
|
614
|
+
*/
|
|
615
|
+
declare function formatSSEData(event: StreamEvent): string;
|
|
616
|
+
/**
|
|
617
|
+
* Create a ReadableStream from an async generator of events
|
|
618
|
+
*/
|
|
619
|
+
declare function createEventStream(generator: AsyncGenerator<StreamEvent>): ReadableStream<Uint8Array>;
|
|
620
|
+
/**
|
|
621
|
+
* Create SSE Response object
|
|
622
|
+
*/
|
|
623
|
+
declare function createSSEResponse(generator: AsyncGenerator<StreamEvent>): Response;
|
|
624
|
+
|
|
625
|
+
/**
|
|
626
|
+
* Create Hono app with chat endpoint
|
|
627
|
+
*/
|
|
628
|
+
declare function createHonoApp(runtime: Runtime): Hono;
|
|
629
|
+
/**
|
|
630
|
+
* Next.js App Router handler
|
|
631
|
+
*
|
|
632
|
+
* @example
|
|
633
|
+
* ```ts
|
|
634
|
+
* // app/api/chat/route.ts
|
|
635
|
+
* import { createNextHandler } from '@yourgpt/llm-sdk';
|
|
636
|
+
*
|
|
637
|
+
* const handler = createNextHandler({
|
|
638
|
+
* llm: { provider: 'openai', apiKey: process.env.OPENAI_API_KEY! },
|
|
639
|
+
* });
|
|
640
|
+
*
|
|
641
|
+
* export const POST = handler;
|
|
642
|
+
* ```
|
|
643
|
+
*/
|
|
644
|
+
declare function createNextHandler(config: RuntimeConfig): (request: Request) => Promise<Response>;
|
|
645
|
+
/**
|
|
646
|
+
* Express middleware
|
|
647
|
+
*
|
|
648
|
+
* @example
|
|
649
|
+
* ```ts
|
|
650
|
+
* import express from 'express';
|
|
651
|
+
* import { createExpressMiddleware } from '@yourgpt/llm-sdk';
|
|
652
|
+
*
|
|
653
|
+
* const app = express();
|
|
654
|
+
*
|
|
655
|
+
* app.use('/api/chat', createExpressMiddleware({
|
|
656
|
+
* llm: { provider: 'openai', apiKey: process.env.OPENAI_API_KEY! },
|
|
657
|
+
* }));
|
|
658
|
+
* ```
|
|
659
|
+
*/
|
|
660
|
+
declare function createExpressMiddleware(config: RuntimeConfig): (req: {
|
|
661
|
+
method: string;
|
|
662
|
+
url: string;
|
|
663
|
+
headers: Record<string, string>;
|
|
664
|
+
body: unknown;
|
|
665
|
+
}, res: {
|
|
666
|
+
status: (code: number) => {
|
|
667
|
+
json: (data: unknown) => void;
|
|
668
|
+
};
|
|
669
|
+
setHeader: (name: string, value: string) => void;
|
|
670
|
+
write: (data: string) => void;
|
|
671
|
+
end: () => void;
|
|
672
|
+
}) => Promise<void>;
|
|
673
|
+
/**
|
|
674
|
+
* Node.js HTTP handler
|
|
675
|
+
*
|
|
676
|
+
* @example
|
|
677
|
+
* ```ts
|
|
678
|
+
* import http from 'http';
|
|
679
|
+
* import { createNodeHandler } from '@yourgpt/llm-sdk';
|
|
680
|
+
*
|
|
681
|
+
* const handler = createNodeHandler({
|
|
682
|
+
* llm: { provider: 'openai', apiKey: process.env.OPENAI_API_KEY! },
|
|
683
|
+
* });
|
|
684
|
+
*
|
|
685
|
+
* const server = http.createServer(handler);
|
|
686
|
+
* server.listen(3001);
|
|
687
|
+
* ```
|
|
688
|
+
*/
|
|
689
|
+
declare function createNodeHandler(config: RuntimeConfig): (request: Request, Env?: unknown, executionCtx?: hono.ExecutionContext) => Response | Promise<Response>;
|
|
690
|
+
|
|
691
|
+
/**
|
|
692
|
+
* Agent Loop Implementation
|
|
693
|
+
*
|
|
694
|
+
* Server-side agentic loop that:
|
|
695
|
+
* 1. Calls LLM with tools
|
|
696
|
+
* 2. Parses tool calls from response
|
|
697
|
+
* 3. Executes server-side tools or requests client execution
|
|
698
|
+
* 4. Loops until LLM returns end_turn or max iterations reached
|
|
699
|
+
*
|
|
700
|
+
* Streams events to client via SSE for real-time updates
|
|
701
|
+
*/
|
|
702
|
+
|
|
703
|
+
/** Default maximum iterations */
|
|
704
|
+
declare const DEFAULT_MAX_ITERATIONS = 20;
|
|
705
|
+
/**
|
|
706
|
+
* Agent loop options
|
|
707
|
+
*/
|
|
708
|
+
interface AgentLoopOptions {
|
|
709
|
+
/** Initial messages */
|
|
710
|
+
messages: Message[];
|
|
711
|
+
/** Available tools */
|
|
712
|
+
tools: ToolDefinition[];
|
|
713
|
+
/** System prompt */
|
|
714
|
+
systemPrompt?: string;
|
|
715
|
+
/** AI provider */
|
|
716
|
+
provider: AIProvider$1;
|
|
717
|
+
/** Abort signal for cancellation */
|
|
718
|
+
signal?: AbortSignal;
|
|
719
|
+
/** Loop configuration */
|
|
720
|
+
config?: AgentLoopConfig;
|
|
721
|
+
/**
|
|
722
|
+
* LLM call function
|
|
723
|
+
* Should call the LLM and return the raw response
|
|
724
|
+
*/
|
|
725
|
+
callLLM: (messages: unknown[], tools: unknown[]) => Promise<unknown>;
|
|
726
|
+
/**
|
|
727
|
+
* Server-side tool executor
|
|
728
|
+
* Called when a server-side tool needs to be executed
|
|
729
|
+
*/
|
|
730
|
+
executeServerTool?: (name: string, args: Record<string, unknown>) => Promise<ToolResponse>;
|
|
731
|
+
/**
|
|
732
|
+
* Callback to wait for client tool result
|
|
733
|
+
* Called when a client-side tool needs to be executed
|
|
734
|
+
* Should return a Promise that resolves when client sends result
|
|
735
|
+
*/
|
|
736
|
+
waitForClientToolResult?: (toolCallId: string, name: string, args: Record<string, unknown>) => Promise<ToolResponse>;
|
|
737
|
+
}
|
|
738
|
+
/**
|
|
739
|
+
* Run the agentic loop
|
|
740
|
+
*
|
|
741
|
+
* @yields Stream events for each step of the loop
|
|
742
|
+
*/
|
|
743
|
+
declare function runAgentLoop(options: AgentLoopOptions): AsyncGenerator<StreamEvent>;
|
|
744
|
+
|
|
745
|
+
/**
|
|
746
|
+
* Provider Registry
|
|
747
|
+
*
|
|
748
|
+
* Central registry for AI providers.
|
|
749
|
+
* Allows dynamic registration and lookup of providers.
|
|
750
|
+
*/
|
|
751
|
+
|
|
752
|
+
/**
|
|
753
|
+
* Register a provider factory
|
|
754
|
+
*
|
|
755
|
+
* @param name Provider name (e.g., 'openai')
|
|
756
|
+
* @param factory Factory function that creates the provider
|
|
757
|
+
*
|
|
758
|
+
* @example
|
|
759
|
+
* ```typescript
|
|
760
|
+
* registerProvider('openai', (config) => createOpenAI(config));
|
|
761
|
+
* ```
|
|
762
|
+
*/
|
|
763
|
+
declare function registerProvider(name: string, factory: (config?: Record<string, unknown>) => AIProvider): void;
|
|
764
|
+
/**
|
|
765
|
+
* Get a provider by name
|
|
766
|
+
*
|
|
767
|
+
* @param name Provider name
|
|
768
|
+
* @param config Optional configuration
|
|
769
|
+
* @returns Provider instance or undefined if not found
|
|
770
|
+
*
|
|
771
|
+
* @example
|
|
772
|
+
* ```typescript
|
|
773
|
+
* const openai = getProvider('openai', { apiKey: '...' });
|
|
774
|
+
* ```
|
|
775
|
+
*/
|
|
776
|
+
declare function getProvider(name: string, config?: Record<string, unknown>): AIProvider | undefined;
|
|
777
|
+
/**
|
|
778
|
+
* Check if a provider is registered
|
|
779
|
+
*/
|
|
780
|
+
declare function hasProvider(name: string): boolean;
|
|
781
|
+
/**
|
|
782
|
+
* List all registered provider names
|
|
783
|
+
*/
|
|
784
|
+
declare function listProviders(): string[];
|
|
785
|
+
/**
|
|
786
|
+
* Get all available providers with their models
|
|
787
|
+
* Useful for building UI model selectors
|
|
788
|
+
*/
|
|
789
|
+
declare function getAvailableProviders(): Array<{
|
|
790
|
+
name: string;
|
|
791
|
+
models: string[];
|
|
792
|
+
}>;
|
|
793
|
+
/**
|
|
794
|
+
* Get capabilities for a specific provider and model
|
|
795
|
+
*/
|
|
796
|
+
declare function getModelCapabilities(providerName: string, modelId: string): ProviderCapabilities | undefined;
|
|
797
|
+
|
|
798
|
+
/**
|
|
799
|
+
* OpenAI Provider
|
|
800
|
+
*
|
|
801
|
+
* Wraps the existing OpenAIAdapter with provider interface.
|
|
802
|
+
* Adds model capabilities and metadata.
|
|
803
|
+
*/
|
|
804
|
+
|
|
805
|
+
/**
|
|
806
|
+
* Create an OpenAI provider
|
|
807
|
+
*
|
|
808
|
+
* @example
|
|
809
|
+
* ```typescript
|
|
810
|
+
* const openai = createOpenAI({ apiKey: '...' });
|
|
811
|
+
* const adapter = openai.languageModel('gpt-4o');
|
|
812
|
+
* const caps = openai.getCapabilities('gpt-4o');
|
|
813
|
+
* ```
|
|
814
|
+
*/
|
|
815
|
+
declare function createOpenAI(config?: OpenAIProviderConfig): AIProvider;
|
|
816
|
+
|
|
817
|
+
/**
|
|
818
|
+
* Anthropic Provider
|
|
819
|
+
*
|
|
820
|
+
* Wraps the existing AnthropicAdapter with provider interface.
|
|
821
|
+
* Adds model capabilities and metadata including extended thinking support.
|
|
822
|
+
*/
|
|
823
|
+
|
|
824
|
+
/**
|
|
825
|
+
* Create an Anthropic provider
|
|
826
|
+
*
|
|
827
|
+
* @example
|
|
828
|
+
* ```typescript
|
|
829
|
+
* const anthropic = createAnthropic({
|
|
830
|
+
* apiKey: '...',
|
|
831
|
+
* thinkingBudget: 10000, // Enable extended thinking
|
|
832
|
+
* });
|
|
833
|
+
* const adapter = anthropic.languageModel('claude-sonnet-4-20250514');
|
|
834
|
+
* const caps = anthropic.getCapabilities('claude-sonnet-4-20250514');
|
|
835
|
+
* ```
|
|
836
|
+
*/
|
|
837
|
+
declare function createAnthropic(config?: AnthropicProviderConfig): AIProvider;
|
|
838
|
+
|
|
839
|
+
/**
|
|
840
|
+
* Groq Provider
|
|
841
|
+
*
|
|
842
|
+
* Wraps the existing GroqAdapter with provider interface.
|
|
843
|
+
* Groq provides fast inference for open-source models.
|
|
844
|
+
*/
|
|
845
|
+
|
|
846
|
+
/**
|
|
847
|
+
* Create a Groq provider
|
|
848
|
+
*
|
|
849
|
+
* @example
|
|
850
|
+
* ```typescript
|
|
851
|
+
* const groq = createGroq({ apiKey: '...' });
|
|
852
|
+
* const adapter = groq.languageModel('llama-3.3-70b-versatile');
|
|
853
|
+
* ```
|
|
854
|
+
*/
|
|
855
|
+
declare function createGroq(config?: GroqProviderConfig): AIProvider;
|
|
856
|
+
|
|
857
|
+
/**
|
|
858
|
+
* Ollama Provider
|
|
859
|
+
*
|
|
860
|
+
* Wraps the existing OllamaAdapter with provider interface.
|
|
861
|
+
* Ollama runs models locally on your machine.
|
|
862
|
+
*/
|
|
863
|
+
|
|
864
|
+
/**
|
|
865
|
+
* Create an Ollama provider
|
|
866
|
+
*
|
|
867
|
+
* @example
|
|
868
|
+
* ```typescript
|
|
869
|
+
* const ollama = createOllama({ baseUrl: 'http://localhost:11434' });
|
|
870
|
+
* const adapter = ollama.languageModel('llama3');
|
|
871
|
+
* ```
|
|
872
|
+
*/
|
|
873
|
+
declare function createOllama(config?: OllamaProviderConfig): AIProvider;
|
|
874
|
+
|
|
875
|
+
/**
|
|
876
|
+
* Google Provider
|
|
877
|
+
*
|
|
878
|
+
* Wraps the GoogleAdapter with provider interface.
|
|
879
|
+
* Adds model capabilities and metadata for multimodal support.
|
|
880
|
+
*
|
|
881
|
+
* Features:
|
|
882
|
+
* - Vision (images)
|
|
883
|
+
* - Audio input
|
|
884
|
+
* - Video input
|
|
885
|
+
* - PDF documents
|
|
886
|
+
* - Tools/Function calling
|
|
887
|
+
* - Massive context windows (up to 2M tokens)
|
|
888
|
+
*/
|
|
889
|
+
|
|
890
|
+
/**
|
|
891
|
+
* Create a Google provider
|
|
892
|
+
*
|
|
893
|
+
* @example
|
|
894
|
+
* ```typescript
|
|
895
|
+
* const google = createGoogle({
|
|
896
|
+
* apiKey: '...',
|
|
897
|
+
* });
|
|
898
|
+
* const adapter = google.languageModel('gemini-2.0-flash');
|
|
899
|
+
* const caps = google.getCapabilities('gemini-2.0-flash');
|
|
900
|
+
*
|
|
901
|
+
* if (caps.supportsVideo) {
|
|
902
|
+
* // Show video upload button
|
|
903
|
+
* }
|
|
904
|
+
* ```
|
|
905
|
+
*/
|
|
906
|
+
declare function createGoogle(config?: GoogleProviderConfig): AIProvider;
|
|
907
|
+
|
|
908
|
+
/**
|
|
909
|
+
* xAI Provider
|
|
910
|
+
*
|
|
911
|
+
* Wraps the XAIAdapter with provider interface.
|
|
912
|
+
* xAI's Grok models are cutting-edge AI models with vision and tool support.
|
|
913
|
+
*
|
|
914
|
+
* Features:
|
|
915
|
+
* - Vision (images)
|
|
916
|
+
* - Tools/Function calling
|
|
917
|
+
* - Real-time information (trained on X/Twitter data)
|
|
918
|
+
*/
|
|
919
|
+
|
|
920
|
+
/**
|
|
921
|
+
* Create an xAI provider
|
|
922
|
+
*
|
|
923
|
+
* @example
|
|
924
|
+
* ```typescript
|
|
925
|
+
* const xai = createXAI({
|
|
926
|
+
* apiKey: '...',
|
|
927
|
+
* });
|
|
928
|
+
* const adapter = xai.languageModel('grok-2');
|
|
929
|
+
* const caps = xai.getCapabilities('grok-2');
|
|
930
|
+
* ```
|
|
931
|
+
*/
|
|
932
|
+
declare function createXAI(config?: XAIProviderConfig): AIProvider;
|
|
933
|
+
|
|
934
|
+
/**
|
|
935
|
+
* Azure OpenAI Provider
|
|
936
|
+
*
|
|
937
|
+
* Wraps the AzureAdapter with provider interface.
|
|
938
|
+
* Azure OpenAI provides enterprise-grade OpenAI models with Azure security.
|
|
939
|
+
*
|
|
940
|
+
* Features:
|
|
941
|
+
* - Vision (for supported deployments)
|
|
942
|
+
* - Tools/Function calling
|
|
943
|
+
* - Enterprise security & compliance
|
|
944
|
+
* - Private networking options
|
|
945
|
+
*
|
|
946
|
+
* Note: Capabilities depend on which model is deployed, not a model ID.
|
|
947
|
+
* The provider attempts to detect capabilities from the deployment name.
|
|
948
|
+
*/
|
|
949
|
+
|
|
950
|
+
/**
|
|
951
|
+
* Create an Azure OpenAI provider
|
|
952
|
+
*
|
|
953
|
+
* @example
|
|
954
|
+
* ```typescript
|
|
955
|
+
* const azure = createAzure({
|
|
956
|
+
* apiKey: '...',
|
|
957
|
+
* resourceName: 'my-azure-resource',
|
|
958
|
+
* deploymentName: 'gpt-4o-deployment',
|
|
959
|
+
* });
|
|
960
|
+
* const adapter = azure.languageModel('gpt-4o-deployment');
|
|
961
|
+
* const caps = azure.getCapabilities('gpt-4o-deployment');
|
|
962
|
+
* ```
|
|
963
|
+
*/
|
|
964
|
+
declare function createAzure(config: AzureProviderConfig): AIProvider;
|
|
965
|
+
|
|
966
|
+
/**
|
|
967
|
+
* OpenAI Provider Formatter
|
|
968
|
+
*
|
|
969
|
+
* Transformation functions for OpenAI API format
|
|
970
|
+
* Also used by xAI/Grok (they use OpenAI-compatible format)
|
|
971
|
+
*/
|
|
972
|
+
|
|
973
|
+
/**
|
|
974
|
+
* OpenAI provider formatter
|
|
975
|
+
*/
|
|
976
|
+
declare const openaiFormatter: ProviderFormatter;
|
|
977
|
+
|
|
978
|
+
/**
|
|
979
|
+
* Anthropic/Claude Provider Formatter
|
|
980
|
+
*
|
|
981
|
+
* Transformation functions for Anthropic API format
|
|
982
|
+
*/
|
|
983
|
+
|
|
984
|
+
/**
|
|
985
|
+
* Anthropic provider formatter
|
|
986
|
+
*/
|
|
987
|
+
declare const anthropicFormatter: ProviderFormatter;
|
|
988
|
+
|
|
989
|
+
/**
|
|
990
|
+
* Google Gemini Provider Formatter
|
|
991
|
+
*
|
|
992
|
+
* Transformation functions for Google Gemini API format
|
|
993
|
+
*/
|
|
994
|
+
|
|
995
|
+
/**
|
|
996
|
+
* Gemini provider formatter
|
|
997
|
+
*/
|
|
998
|
+
declare const geminiFormatter: ProviderFormatter;
|
|
999
|
+
|
|
1000
|
+
/**
|
|
1001
|
+
* Provider Formatter Registry
|
|
1002
|
+
*
|
|
1003
|
+
* Maps provider names to their formatters for the agent loop.
|
|
1004
|
+
* Formatters handle tool transformations between unified format and provider-specific formats.
|
|
1005
|
+
*/
|
|
1006
|
+
|
|
1007
|
+
/**
|
|
1008
|
+
* Get a formatter for a specific provider
|
|
1009
|
+
*
|
|
1010
|
+
* @param provider - Provider name (e.g., 'openai', 'anthropic', 'google')
|
|
1011
|
+
* @returns The provider's formatter
|
|
1012
|
+
* @throws Error if provider is not supported
|
|
1013
|
+
*
|
|
1014
|
+
* @example
|
|
1015
|
+
* ```typescript
|
|
1016
|
+
* const formatter = getFormatter('openai');
|
|
1017
|
+
* const tools = formatter.transformTools(unifiedTools);
|
|
1018
|
+
* ```
|
|
1019
|
+
*/
|
|
1020
|
+
declare function getFormatter(provider: string): ProviderFormatter;
|
|
1021
|
+
/**
|
|
1022
|
+
* Check if a provider is supported
|
|
1023
|
+
*
|
|
1024
|
+
* @param provider - Provider name to check
|
|
1025
|
+
* @returns True if provider has a formatter
|
|
1026
|
+
*
|
|
1027
|
+
* @example
|
|
1028
|
+
* ```typescript
|
|
1029
|
+
* if (isProviderSupported('openai')) {
|
|
1030
|
+
* // Use the provider
|
|
1031
|
+
* }
|
|
1032
|
+
* ```
|
|
1033
|
+
*/
|
|
1034
|
+
declare function isProviderSupported(provider: string): boolean;
|
|
1035
|
+
/**
|
|
1036
|
+
* Get list of supported providers
|
|
1037
|
+
*
|
|
1038
|
+
* @returns Array of supported provider names
|
|
1039
|
+
*
|
|
1040
|
+
* @example
|
|
1041
|
+
* ```typescript
|
|
1042
|
+
* const providers = getSupportedProviders();
|
|
1043
|
+
* // ['openai', 'anthropic', 'google', ...]
|
|
1044
|
+
* ```
|
|
1045
|
+
*/
|
|
1046
|
+
declare function getSupportedProviders(): string[];
|
|
1047
|
+
|
|
1048
|
+
export { type AIProvider, type ActionRequest, type AgentLoopOptions, type AnthropicProviderConfig, type AnthropicTool, type AnthropicToolResult, type AnthropicToolUse, type AzureProviderConfig, type BaseProviderConfig, type ChatRequest, DEFAULT_MAX_ITERATIONS, type GeminiFunctionCall, type GeminiFunctionDeclaration, type GeminiFunctionResponse, type GoogleProviderConfig, type GroqProviderConfig, LLMAdapter, type OllamaProviderConfig, type OpenAIProviderConfig, type OpenAITool, type OpenAIToolCall, type OpenAIToolResult, type ProviderCapabilities, type ProviderFormatter, type RequestContext, Runtime, type RuntimeConfig, type XAIProviderConfig, anthropicFormatter, createAnthropic, createAzure, createEventStream, createExpressMiddleware, createGoogle, createGroq, createHonoApp, createNextHandler, createNodeHandler, createOllama, createOpenAI, createRuntime, createSSEHeaders, createSSEResponse, createXAI, formatSSEData, geminiFormatter, getAvailableProviders, getFormatter, getModelCapabilities, getProvider, getSupportedProviders, hasProvider, isProviderSupported, listProviders, openaiFormatter, registerProvider, runAgentLoop };
|