@yourgpt/llm-sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +74 -0
- package/dist/adapters/index.d.mts +497 -0
- package/dist/adapters/index.d.ts +497 -0
- package/dist/adapters/index.js +1642 -0
- package/dist/adapters/index.js.map +1 -0
- package/dist/adapters/index.mjs +1616 -0
- package/dist/adapters/index.mjs.map +1 -0
- package/dist/index.d.mts +1048 -0
- package/dist/index.d.ts +1048 -0
- package/dist/index.js +4216 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +4170 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +83 -0
package/README.md
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# @yourgpt/llm-sdk
|
|
2
|
+
|
|
3
|
+
Multi-provider LLM integration for Copilot SDK.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @yourgpt/llm-sdk
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { createRuntime, createOpenAI } from "@yourgpt/llm-sdk";
|
|
15
|
+
|
|
16
|
+
const openai = createOpenAI({
|
|
17
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
const runtime = createRuntime({
|
|
21
|
+
provider: openai,
|
|
22
|
+
model: "gpt-4o",
|
|
23
|
+
systemPrompt: "You are a helpful assistant.",
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
// Next.js App Router
|
|
27
|
+
export async function POST(request: Request) {
|
|
28
|
+
return runtime.handleRequest(request);
|
|
29
|
+
}
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Supported Providers
|
|
33
|
+
|
|
34
|
+
| Provider | Factory |
|
|
35
|
+
| ------------- | ------------------- |
|
|
36
|
+
| OpenAI | `createOpenAI()` |
|
|
37
|
+
| Anthropic | `createAnthropic()` |
|
|
38
|
+
| Google Gemini | `createGoogle()` |
|
|
39
|
+
| Groq | `createGroq()` |
|
|
40
|
+
| Ollama | `createOllama()` |
|
|
41
|
+
| xAI (Grok) | `createXAI()` |
|
|
42
|
+
| Azure OpenAI | `createAzure()` |
|
|
43
|
+
|
|
44
|
+
## Framework Integrations
|
|
45
|
+
|
|
46
|
+
```typescript
|
|
47
|
+
// Next.js
|
|
48
|
+
import { createNextHandler } from "@yourgpt/llm-sdk";
|
|
49
|
+
|
|
50
|
+
// Express
|
|
51
|
+
import { createExpressMiddleware } from "@yourgpt/llm-sdk";
|
|
52
|
+
|
|
53
|
+
// Hono
|
|
54
|
+
import { createHonoApp } from "@yourgpt/llm-sdk";
|
|
55
|
+
|
|
56
|
+
// Node.js HTTP
|
|
57
|
+
import { createNodeHandler } from "@yourgpt/llm-sdk";
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
## Features
|
|
61
|
+
|
|
62
|
+
- Multi-provider support (7 LLM providers)
|
|
63
|
+
- Streaming responses (SSE)
|
|
64
|
+
- Agent loop for multi-step tool execution
|
|
65
|
+
- Framework-agnostic with built-in integrations
|
|
66
|
+
- TypeScript-first
|
|
67
|
+
|
|
68
|
+
## Documentation
|
|
69
|
+
|
|
70
|
+
Visit [copilot-sdk.yourgpt.ai](https://copilot-sdk.yourgpt.ai)
|
|
71
|
+
|
|
72
|
+
## License
|
|
73
|
+
|
|
74
|
+
MIT
|
|
@@ -0,0 +1,497 @@
|
|
|
1
|
+
import { Message, ActionDefinition, LLMConfig, StreamEvent, MessageAttachment } from '@yourgpt/copilot-sdk/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Chat completion request
|
|
5
|
+
*/
|
|
6
|
+
interface ChatCompletionRequest {
|
|
7
|
+
/** Conversation messages */
|
|
8
|
+
messages: Message[];
|
|
9
|
+
/**
|
|
10
|
+
* Raw provider-formatted messages (for agent loop with tool calls)
|
|
11
|
+
* When provided, these are used instead of converting from Message[]
|
|
12
|
+
* This allows passing messages with tool_calls and tool role
|
|
13
|
+
*/
|
|
14
|
+
rawMessages?: Array<Record<string, unknown>>;
|
|
15
|
+
/** Available actions/tools */
|
|
16
|
+
actions?: ActionDefinition[];
|
|
17
|
+
/** System prompt */
|
|
18
|
+
systemPrompt?: string;
|
|
19
|
+
/** LLM configuration overrides */
|
|
20
|
+
config?: Partial<LLMConfig>;
|
|
21
|
+
/** Abort signal for cancellation */
|
|
22
|
+
signal?: AbortSignal;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Non-streaming completion result
|
|
26
|
+
*/
|
|
27
|
+
interface CompletionResult {
|
|
28
|
+
/** Text content */
|
|
29
|
+
content: string;
|
|
30
|
+
/** Tool calls */
|
|
31
|
+
toolCalls: Array<{
|
|
32
|
+
id: string;
|
|
33
|
+
name: string;
|
|
34
|
+
args: Record<string, unknown>;
|
|
35
|
+
}>;
|
|
36
|
+
/** Thinking content (if extended thinking enabled) */
|
|
37
|
+
thinking?: string;
|
|
38
|
+
/** Raw provider response for debugging */
|
|
39
|
+
rawResponse: Record<string, unknown>;
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Base LLM adapter interface
|
|
43
|
+
*/
|
|
44
|
+
interface LLMAdapter {
|
|
45
|
+
/** Provider name */
|
|
46
|
+
readonly provider: string;
|
|
47
|
+
/** Model name */
|
|
48
|
+
readonly model: string;
|
|
49
|
+
/**
|
|
50
|
+
* Stream a chat completion
|
|
51
|
+
*/
|
|
52
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
53
|
+
/**
|
|
54
|
+
* Non-streaming chat completion (for debugging/comparison)
|
|
55
|
+
*/
|
|
56
|
+
complete?(request: ChatCompletionRequest): Promise<CompletionResult>;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Adapter factory function type
|
|
60
|
+
*/
|
|
61
|
+
type AdapterFactory = (config: LLMConfig) => LLMAdapter;
|
|
62
|
+
/**
|
|
63
|
+
* Convert messages to provider format (simple text only)
|
|
64
|
+
*/
|
|
65
|
+
declare function formatMessages(messages: Message[], systemPrompt?: string): Array<{
|
|
66
|
+
role: string;
|
|
67
|
+
content: string;
|
|
68
|
+
}>;
|
|
69
|
+
/**
|
|
70
|
+
* Convert actions to OpenAI tool format
|
|
71
|
+
*/
|
|
72
|
+
declare function formatTools(actions: ActionDefinition[]): Array<{
|
|
73
|
+
type: "function";
|
|
74
|
+
function: {
|
|
75
|
+
name: string;
|
|
76
|
+
description: string;
|
|
77
|
+
parameters: object;
|
|
78
|
+
};
|
|
79
|
+
}>;
|
|
80
|
+
/**
|
|
81
|
+
* Content block types for multimodal messages
|
|
82
|
+
*/
|
|
83
|
+
type AnthropicContentBlock = {
|
|
84
|
+
type: "text";
|
|
85
|
+
text: string;
|
|
86
|
+
} | {
|
|
87
|
+
type: "image";
|
|
88
|
+
source: {
|
|
89
|
+
type: "base64";
|
|
90
|
+
media_type: string;
|
|
91
|
+
data: string;
|
|
92
|
+
} | {
|
|
93
|
+
type: "url";
|
|
94
|
+
url: string;
|
|
95
|
+
};
|
|
96
|
+
} | {
|
|
97
|
+
type: "document";
|
|
98
|
+
source: {
|
|
99
|
+
type: "base64";
|
|
100
|
+
media_type: string;
|
|
101
|
+
data: string;
|
|
102
|
+
} | {
|
|
103
|
+
type: "url";
|
|
104
|
+
url: string;
|
|
105
|
+
};
|
|
106
|
+
};
|
|
107
|
+
type OpenAIContentBlock = {
|
|
108
|
+
type: "text";
|
|
109
|
+
text: string;
|
|
110
|
+
} | {
|
|
111
|
+
type: "image_url";
|
|
112
|
+
image_url: {
|
|
113
|
+
url: string;
|
|
114
|
+
detail?: "low" | "high" | "auto";
|
|
115
|
+
};
|
|
116
|
+
};
|
|
117
|
+
/**
|
|
118
|
+
* Check if a message has image attachments
|
|
119
|
+
* Supports both new format (metadata.attachments) and legacy (attachments)
|
|
120
|
+
*/
|
|
121
|
+
declare function hasImageAttachments(message: Message): boolean;
|
|
122
|
+
/**
|
|
123
|
+
* Check if a message has media attachments (images or PDFs)
|
|
124
|
+
*/
|
|
125
|
+
declare function hasMediaAttachments(message: Message): boolean;
|
|
126
|
+
/**
|
|
127
|
+
* Convert MessageAttachment to Anthropic image content block
|
|
128
|
+
*
|
|
129
|
+
* Anthropic format:
|
|
130
|
+
* {
|
|
131
|
+
* type: "image",
|
|
132
|
+
* source: {
|
|
133
|
+
* type: "base64",
|
|
134
|
+
* media_type: "image/png",
|
|
135
|
+
* data: "base64data..."
|
|
136
|
+
* }
|
|
137
|
+
* }
|
|
138
|
+
*/
|
|
139
|
+
declare function attachmentToAnthropicImage(attachment: MessageAttachment): AnthropicContentBlock | null;
|
|
140
|
+
/**
|
|
141
|
+
* Convert MessageAttachment to OpenAI image_url content block
|
|
142
|
+
*
|
|
143
|
+
* OpenAI format:
|
|
144
|
+
* {
|
|
145
|
+
* type: "image_url",
|
|
146
|
+
* image_url: {
|
|
147
|
+
* url: "data:image/png;base64,..."
|
|
148
|
+
* }
|
|
149
|
+
* }
|
|
150
|
+
*/
|
|
151
|
+
declare function attachmentToOpenAIImage(attachment: MessageAttachment): OpenAIContentBlock | null;
|
|
152
|
+
/**
|
|
153
|
+
* Convert MessageAttachment (PDF) to Anthropic document content block
|
|
154
|
+
*
|
|
155
|
+
* Anthropic format:
|
|
156
|
+
* {
|
|
157
|
+
* type: "document",
|
|
158
|
+
* source: {
|
|
159
|
+
* type: "base64",
|
|
160
|
+
* media_type: "application/pdf",
|
|
161
|
+
* data: "base64data..."
|
|
162
|
+
* }
|
|
163
|
+
* }
|
|
164
|
+
*/
|
|
165
|
+
declare function attachmentToAnthropicDocument(attachment: MessageAttachment): AnthropicContentBlock | null;
|
|
166
|
+
/**
|
|
167
|
+
* Convert a Message to Anthropic multimodal content blocks
|
|
168
|
+
*/
|
|
169
|
+
declare function messageToAnthropicContent(message: Message): string | AnthropicContentBlock[];
|
|
170
|
+
/**
|
|
171
|
+
* Convert a Message to OpenAI multimodal content blocks
|
|
172
|
+
*/
|
|
173
|
+
declare function messageToOpenAIContent(message: Message): string | OpenAIContentBlock[];
|
|
174
|
+
/**
|
|
175
|
+
* Anthropic content block types (extended for tools)
|
|
176
|
+
*/
|
|
177
|
+
type AnthropicToolUseBlock = {
|
|
178
|
+
type: "tool_use";
|
|
179
|
+
id: string;
|
|
180
|
+
name: string;
|
|
181
|
+
input: Record<string, unknown>;
|
|
182
|
+
};
|
|
183
|
+
type AnthropicToolResultBlock = {
|
|
184
|
+
type: "tool_result";
|
|
185
|
+
tool_use_id: string;
|
|
186
|
+
content: string;
|
|
187
|
+
};
|
|
188
|
+
type AnthropicMessageContent = string | Array<AnthropicContentBlock | AnthropicToolUseBlock | AnthropicToolResultBlock>;
|
|
189
|
+
/**
|
|
190
|
+
* Format messages for Anthropic with full tool support
|
|
191
|
+
* Handles: text, images, tool_use, and tool_result
|
|
192
|
+
*
|
|
193
|
+
* Key differences from OpenAI:
|
|
194
|
+
* - tool_calls become tool_use blocks in assistant content
|
|
195
|
+
* - tool results become tool_result blocks in user content
|
|
196
|
+
*/
|
|
197
|
+
declare function formatMessagesForAnthropic(messages: Message[], systemPrompt?: string): {
|
|
198
|
+
system: string;
|
|
199
|
+
messages: Array<{
|
|
200
|
+
role: "user" | "assistant";
|
|
201
|
+
content: AnthropicMessageContent;
|
|
202
|
+
}>;
|
|
203
|
+
};
|
|
204
|
+
/**
|
|
205
|
+
* OpenAI message format with tool support
|
|
206
|
+
*/
|
|
207
|
+
type OpenAIMessage = {
|
|
208
|
+
role: "system";
|
|
209
|
+
content: string;
|
|
210
|
+
} | {
|
|
211
|
+
role: "user";
|
|
212
|
+
content: string | OpenAIContentBlock[];
|
|
213
|
+
} | {
|
|
214
|
+
role: "assistant";
|
|
215
|
+
content: string | null;
|
|
216
|
+
tool_calls?: Array<{
|
|
217
|
+
id: string;
|
|
218
|
+
type: "function";
|
|
219
|
+
function: {
|
|
220
|
+
name: string;
|
|
221
|
+
arguments: string;
|
|
222
|
+
};
|
|
223
|
+
}>;
|
|
224
|
+
} | {
|
|
225
|
+
role: "tool";
|
|
226
|
+
content: string;
|
|
227
|
+
tool_call_id: string;
|
|
228
|
+
};
|
|
229
|
+
/**
|
|
230
|
+
* Format messages for OpenAI with full tool support
|
|
231
|
+
* Handles: text, images, tool_calls, and tool results
|
|
232
|
+
*/
|
|
233
|
+
declare function formatMessagesForOpenAI(messages: Message[], systemPrompt?: string): OpenAIMessage[];
|
|
234
|
+
|
|
235
|
+
/**
|
|
236
|
+
* OpenAI adapter configuration
|
|
237
|
+
*/
|
|
238
|
+
interface OpenAIAdapterConfig extends Partial<LLMConfig> {
|
|
239
|
+
apiKey: string;
|
|
240
|
+
model?: string;
|
|
241
|
+
baseUrl?: string;
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* OpenAI LLM Adapter
|
|
245
|
+
*
|
|
246
|
+
* Supports: GPT-4, GPT-4o, GPT-3.5-turbo, etc.
|
|
247
|
+
*/
|
|
248
|
+
declare class OpenAIAdapter implements LLMAdapter {
|
|
249
|
+
readonly provider = "openai";
|
|
250
|
+
readonly model: string;
|
|
251
|
+
private client;
|
|
252
|
+
private config;
|
|
253
|
+
constructor(config: OpenAIAdapterConfig);
|
|
254
|
+
private getClient;
|
|
255
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Create OpenAI adapter
|
|
259
|
+
*/
|
|
260
|
+
declare function createOpenAIAdapter(config: OpenAIAdapterConfig): OpenAIAdapter;
|
|
261
|
+
|
|
262
|
+
/**
|
|
263
|
+
* Extended thinking configuration
|
|
264
|
+
*/
|
|
265
|
+
interface ThinkingConfig {
|
|
266
|
+
type: "enabled";
|
|
267
|
+
/** Budget for thinking tokens (minimum 1024) */
|
|
268
|
+
budgetTokens?: number;
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Anthropic adapter configuration
|
|
272
|
+
*/
|
|
273
|
+
interface AnthropicAdapterConfig extends Partial<LLMConfig> {
|
|
274
|
+
apiKey: string;
|
|
275
|
+
model?: string;
|
|
276
|
+
/** Enable extended thinking (for Claude 3.7 Sonnet, Claude 4) */
|
|
277
|
+
thinking?: ThinkingConfig;
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Anthropic LLM Adapter
|
|
281
|
+
*
|
|
282
|
+
* Supports: Claude 3.5 Sonnet, Claude 3 Opus, Claude 3 Haiku, etc.
|
|
283
|
+
*/
|
|
284
|
+
declare class AnthropicAdapter implements LLMAdapter {
|
|
285
|
+
readonly provider = "anthropic";
|
|
286
|
+
readonly model: string;
|
|
287
|
+
private client;
|
|
288
|
+
private config;
|
|
289
|
+
constructor(config: AnthropicAdapterConfig);
|
|
290
|
+
private getClient;
|
|
291
|
+
/**
|
|
292
|
+
* Convert OpenAI-style messages to Anthropic format
|
|
293
|
+
*
|
|
294
|
+
* OpenAI format:
|
|
295
|
+
* - { role: "assistant", content: "...", tool_calls: [...] }
|
|
296
|
+
* - { role: "tool", tool_call_id: "...", content: "..." }
|
|
297
|
+
*
|
|
298
|
+
* Anthropic format:
|
|
299
|
+
* - { role: "assistant", content: [{ type: "text", text: "..." }, { type: "tool_use", id: "...", name: "...", input: {...} }] }
|
|
300
|
+
* - { role: "user", content: [{ type: "tool_result", tool_use_id: "...", content: "..." }] }
|
|
301
|
+
*/
|
|
302
|
+
private convertToAnthropicMessages;
|
|
303
|
+
/**
|
|
304
|
+
* Build common request options for both streaming and non-streaming
|
|
305
|
+
*/
|
|
306
|
+
private buildRequestOptions;
|
|
307
|
+
/**
|
|
308
|
+
* Non-streaming completion (for debugging/comparison with original studio-ai)
|
|
309
|
+
*/
|
|
310
|
+
complete(request: ChatCompletionRequest): Promise<CompletionResult>;
|
|
311
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
312
|
+
}
|
|
313
|
+
/**
|
|
314
|
+
* Create Anthropic adapter
|
|
315
|
+
*/
|
|
316
|
+
declare function createAnthropicAdapter(config: AnthropicAdapterConfig): AnthropicAdapter;
|
|
317
|
+
|
|
318
|
+
/**
|
|
319
|
+
* Groq adapter configuration
|
|
320
|
+
*/
|
|
321
|
+
interface GroqAdapterConfig extends Partial<LLMConfig> {
|
|
322
|
+
apiKey: string;
|
|
323
|
+
model?: string;
|
|
324
|
+
}
|
|
325
|
+
/**
|
|
326
|
+
* Groq LLM Adapter (Fast inference)
|
|
327
|
+
*
|
|
328
|
+
* Supports: Llama 3.1, Mixtral, Gemma, etc.
|
|
329
|
+
*/
|
|
330
|
+
declare class GroqAdapter implements LLMAdapter {
|
|
331
|
+
readonly provider = "groq";
|
|
332
|
+
readonly model: string;
|
|
333
|
+
private config;
|
|
334
|
+
constructor(config: GroqAdapterConfig);
|
|
335
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
336
|
+
}
|
|
337
|
+
/**
|
|
338
|
+
* Create Groq adapter
|
|
339
|
+
*/
|
|
340
|
+
declare function createGroqAdapter(config: GroqAdapterConfig): GroqAdapter;
|
|
341
|
+
|
|
342
|
+
/**
|
|
343
|
+
* Ollama adapter configuration
|
|
344
|
+
*/
|
|
345
|
+
interface OllamaAdapterConfig extends Partial<LLMConfig> {
|
|
346
|
+
model?: string;
|
|
347
|
+
baseUrl?: string;
|
|
348
|
+
}
|
|
349
|
+
/**
|
|
350
|
+
* Ollama LLM Adapter (Local models)
|
|
351
|
+
*
|
|
352
|
+
* Supports: Llama 3, Mistral, Phi, Gemma, CodeLlama, etc.
|
|
353
|
+
*/
|
|
354
|
+
declare class OllamaAdapter implements LLMAdapter {
|
|
355
|
+
readonly provider = "ollama";
|
|
356
|
+
readonly model: string;
|
|
357
|
+
private baseUrl;
|
|
358
|
+
private config;
|
|
359
|
+
constructor(config?: OllamaAdapterConfig);
|
|
360
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
361
|
+
}
|
|
362
|
+
/**
|
|
363
|
+
* Create Ollama adapter
|
|
364
|
+
*/
|
|
365
|
+
declare function createOllamaAdapter(config?: OllamaAdapterConfig): OllamaAdapter;
|
|
366
|
+
|
|
367
|
+
/**
|
|
368
|
+
* Google Gemini LLM Adapter
|
|
369
|
+
*
|
|
370
|
+
* Supports: Gemini 2.0, 1.5 Pro, 1.5 Flash, etc.
|
|
371
|
+
* Features: Vision, Audio, Video, PDF, Tools/Function Calling
|
|
372
|
+
*/
|
|
373
|
+
|
|
374
|
+
/**
|
|
375
|
+
* Google adapter configuration
|
|
376
|
+
*/
|
|
377
|
+
interface GoogleAdapterConfig extends Partial<LLMConfig> {
|
|
378
|
+
apiKey: string;
|
|
379
|
+
model?: string;
|
|
380
|
+
baseUrl?: string;
|
|
381
|
+
/** Safety settings */
|
|
382
|
+
safetySettings?: Array<{
|
|
383
|
+
category: string;
|
|
384
|
+
threshold: string;
|
|
385
|
+
}>;
|
|
386
|
+
}
|
|
387
|
+
/**
|
|
388
|
+
* Google Gemini LLM Adapter
|
|
389
|
+
*/
|
|
390
|
+
declare class GoogleAdapter implements LLMAdapter {
|
|
391
|
+
readonly provider = "google";
|
|
392
|
+
readonly model: string;
|
|
393
|
+
private client;
|
|
394
|
+
private config;
|
|
395
|
+
constructor(config: GoogleAdapterConfig);
|
|
396
|
+
private getClient;
|
|
397
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
398
|
+
/**
|
|
399
|
+
* Non-streaming completion (optional, for debugging)
|
|
400
|
+
*/
|
|
401
|
+
complete(request: ChatCompletionRequest): Promise<CompletionResult>;
|
|
402
|
+
}
|
|
403
|
+
/**
|
|
404
|
+
* Create Google Gemini adapter
|
|
405
|
+
*/
|
|
406
|
+
declare function createGoogleAdapter(config: GoogleAdapterConfig): GoogleAdapter;
|
|
407
|
+
|
|
408
|
+
/**
|
|
409
|
+
* xAI Grok LLM Adapter
|
|
410
|
+
*
|
|
411
|
+
* xAI uses an OpenAI-compatible API, so this adapter extends OpenAIAdapter
|
|
412
|
+
* with a different base URL.
|
|
413
|
+
*
|
|
414
|
+
* Supports: Grok-2, Grok-2-mini, Grok-beta
|
|
415
|
+
* Features: Vision, Tools/Function Calling
|
|
416
|
+
*/
|
|
417
|
+
|
|
418
|
+
/**
|
|
419
|
+
* xAI adapter configuration
|
|
420
|
+
*/
|
|
421
|
+
interface XAIAdapterConfig extends Partial<LLMConfig> {
|
|
422
|
+
apiKey: string;
|
|
423
|
+
model?: string;
|
|
424
|
+
baseUrl?: string;
|
|
425
|
+
}
|
|
426
|
+
/**
|
|
427
|
+
* xAI Grok LLM Adapter
|
|
428
|
+
*
|
|
429
|
+
* Uses OpenAI-compatible API with xAI's endpoint
|
|
430
|
+
*/
|
|
431
|
+
declare class XAIAdapter implements LLMAdapter {
|
|
432
|
+
readonly provider = "xai";
|
|
433
|
+
readonly model: string;
|
|
434
|
+
private client;
|
|
435
|
+
private config;
|
|
436
|
+
constructor(config: XAIAdapterConfig);
|
|
437
|
+
private getClient;
|
|
438
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
439
|
+
/**
|
|
440
|
+
* Non-streaming completion (optional, for debugging)
|
|
441
|
+
*/
|
|
442
|
+
complete(request: ChatCompletionRequest): Promise<CompletionResult>;
|
|
443
|
+
}
|
|
444
|
+
/**
|
|
445
|
+
* Create xAI Grok adapter
|
|
446
|
+
*/
|
|
447
|
+
declare function createXAIAdapter(config: XAIAdapterConfig): XAIAdapter;
|
|
448
|
+
|
|
449
|
+
/**
|
|
450
|
+
* Azure OpenAI LLM Adapter
|
|
451
|
+
*
|
|
452
|
+
* Azure OpenAI uses Microsoft's cloud infrastructure with
|
|
453
|
+
* different authentication and URL patterns than standard OpenAI.
|
|
454
|
+
*
|
|
455
|
+
* Supports: Any OpenAI model deployed on Azure (GPT-4, GPT-4o, etc.)
|
|
456
|
+
* Features: Vision, Tools/Function Calling (depends on deployed model)
|
|
457
|
+
*/
|
|
458
|
+
|
|
459
|
+
/**
|
|
460
|
+
* Azure OpenAI adapter configuration
|
|
461
|
+
*/
|
|
462
|
+
interface AzureAdapterConfig extends Partial<LLMConfig> {
|
|
463
|
+
/** Azure OpenAI API key */
|
|
464
|
+
apiKey: string;
|
|
465
|
+
/** Azure resource name (e.g., 'my-resource') */
|
|
466
|
+
resourceName: string;
|
|
467
|
+
/** Azure deployment name (e.g., 'gpt-4o-deployment') */
|
|
468
|
+
deploymentName: string;
|
|
469
|
+
/** API version (default: 2024-08-01-preview) */
|
|
470
|
+
apiVersion?: string;
|
|
471
|
+
/** Custom endpoint URL (optional, overrides resourceName) */
|
|
472
|
+
baseUrl?: string;
|
|
473
|
+
}
|
|
474
|
+
/**
|
|
475
|
+
* Azure OpenAI LLM Adapter
|
|
476
|
+
*
|
|
477
|
+
* Uses Azure's OpenAI service with Azure-specific authentication
|
|
478
|
+
*/
|
|
479
|
+
declare class AzureAdapter implements LLMAdapter {
|
|
480
|
+
readonly provider = "azure";
|
|
481
|
+
readonly model: string;
|
|
482
|
+
private client;
|
|
483
|
+
private config;
|
|
484
|
+
constructor(config: AzureAdapterConfig);
|
|
485
|
+
private getClient;
|
|
486
|
+
stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
|
|
487
|
+
/**
|
|
488
|
+
* Non-streaming completion (optional, for debugging)
|
|
489
|
+
*/
|
|
490
|
+
complete(request: ChatCompletionRequest): Promise<CompletionResult>;
|
|
491
|
+
}
|
|
492
|
+
/**
|
|
493
|
+
* Create Azure OpenAI adapter
|
|
494
|
+
*/
|
|
495
|
+
declare function createAzureAdapter(config: AzureAdapterConfig): AzureAdapter;
|
|
496
|
+
|
|
497
|
+
export { type AdapterFactory, AnthropicAdapter, type AnthropicAdapterConfig, type AnthropicContentBlock, AzureAdapter, type AzureAdapterConfig, type ChatCompletionRequest, type CompletionResult, GoogleAdapter, type GoogleAdapterConfig, GroqAdapter, type GroqAdapterConfig, type LLMAdapter, OllamaAdapter, type OllamaAdapterConfig, OpenAIAdapter, type OpenAIAdapterConfig, type OpenAIContentBlock, XAIAdapter, type XAIAdapterConfig, attachmentToAnthropicDocument, attachmentToAnthropicImage, attachmentToOpenAIImage, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createGroqAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter, formatMessages, formatMessagesForAnthropic, formatMessagesForOpenAI, formatTools, hasImageAttachments, hasMediaAttachments, messageToAnthropicContent, messageToOpenAIContent };
|