@yourgpt/llm-sdk 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. package/README.md +61 -40
  2. package/dist/adapters/index.d.mts +4 -258
  3. package/dist/adapters/index.d.ts +4 -258
  4. package/dist/adapters/index.js +0 -113
  5. package/dist/adapters/index.js.map +1 -1
  6. package/dist/adapters/index.mjs +1 -112
  7. package/dist/adapters/index.mjs.map +1 -1
  8. package/dist/base-D_FyHFKj.d.mts +235 -0
  9. package/dist/base-D_FyHFKj.d.ts +235 -0
  10. package/dist/index.d.mts +145 -450
  11. package/dist/index.d.ts +145 -450
  12. package/dist/index.js +1837 -307
  13. package/dist/index.js.map +1 -1
  14. package/dist/index.mjs +1827 -305
  15. package/dist/index.mjs.map +1 -1
  16. package/dist/providers/anthropic/index.d.mts +61 -0
  17. package/dist/providers/anthropic/index.d.ts +61 -0
  18. package/dist/providers/anthropic/index.js +939 -0
  19. package/dist/providers/anthropic/index.js.map +1 -0
  20. package/dist/providers/anthropic/index.mjs +934 -0
  21. package/dist/providers/anthropic/index.mjs.map +1 -0
  22. package/dist/providers/azure/index.d.mts +38 -0
  23. package/dist/providers/azure/index.d.ts +38 -0
  24. package/dist/providers/azure/index.js +380 -0
  25. package/dist/providers/azure/index.js.map +1 -0
  26. package/dist/providers/azure/index.mjs +377 -0
  27. package/dist/providers/azure/index.mjs.map +1 -0
  28. package/dist/providers/google/index.d.mts +72 -0
  29. package/dist/providers/google/index.d.ts +72 -0
  30. package/dist/providers/google/index.js +790 -0
  31. package/dist/providers/google/index.js.map +1 -0
  32. package/dist/providers/google/index.mjs +785 -0
  33. package/dist/providers/google/index.mjs.map +1 -0
  34. package/dist/providers/ollama/index.d.mts +24 -0
  35. package/dist/providers/ollama/index.d.ts +24 -0
  36. package/dist/providers/ollama/index.js +235 -0
  37. package/dist/providers/ollama/index.js.map +1 -0
  38. package/dist/providers/ollama/index.mjs +232 -0
  39. package/dist/providers/ollama/index.mjs.map +1 -0
  40. package/dist/providers/openai/index.d.mts +82 -0
  41. package/dist/providers/openai/index.d.ts +82 -0
  42. package/dist/providers/openai/index.js +679 -0
  43. package/dist/providers/openai/index.js.map +1 -0
  44. package/dist/providers/openai/index.mjs +674 -0
  45. package/dist/providers/openai/index.mjs.map +1 -0
  46. package/dist/providers/xai/index.d.mts +78 -0
  47. package/dist/providers/xai/index.d.ts +78 -0
  48. package/dist/providers/xai/index.js +671 -0
  49. package/dist/providers/xai/index.js.map +1 -0
  50. package/dist/providers/xai/index.mjs +666 -0
  51. package/dist/providers/xai/index.mjs.map +1 -0
  52. package/dist/types-BBCZ3Fxy.d.mts +308 -0
  53. package/dist/types-CdORv1Yu.d.mts +338 -0
  54. package/dist/types-CdORv1Yu.d.ts +338 -0
  55. package/dist/types-DcoCaVVC.d.ts +308 -0
  56. package/package.json +34 -3
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @yourgpt/llm-sdk
2
2
 
3
- Multi-provider LLM integration for Copilot SDK.
3
+ Multi-provider LLM SDK with streaming. One API, any provider.
4
4
 
5
5
  ## Installation
6
6
 
@@ -10,60 +10,81 @@ npm install @yourgpt/llm-sdk
10
10
 
11
11
  ## Quick Start
12
12
 
13
- ```typescript
14
- import { createRuntime, createOpenAI } from "@yourgpt/llm-sdk";
13
+ ```ts
14
+ import { streamText } from "@yourgpt/llm-sdk";
15
+ import { openai } from "@yourgpt/llm-sdk/openai";
15
16
 
16
- const openai = createOpenAI({
17
- apiKey: process.env.OPENAI_API_KEY,
18
- });
17
+ export async function POST(req: Request) {
18
+ const { messages } = await req.json();
19
19
 
20
- const runtime = createRuntime({
21
- provider: openai,
22
- model: "gpt-4o",
23
- systemPrompt: "You are a helpful assistant.",
24
- });
20
+ const result = await streamText({
21
+ model: openai("gpt-5"),
22
+ system: "You are a helpful assistant.",
23
+ messages,
24
+ });
25
25
 
26
- // Next.js App Router
27
- export async function POST(request: Request) {
28
- return runtime.handleRequest(request);
26
+ return result.toTextStreamResponse();
29
27
  }
30
28
  ```
31
29
 
32
- ## Supported Providers
30
+ ## Multi-Provider Support
33
31
 
34
- | Provider | Factory |
35
- | ------------- | ------------------- |
36
- | OpenAI | `createOpenAI()` |
37
- | Anthropic | `createAnthropic()` |
38
- | Google Gemini | `createGoogle()` |
39
- | Groq | `createGroq()` |
40
- | Ollama | `createOllama()` |
41
- | xAI (Grok) | `createXAI()` |
42
- | Azure OpenAI | `createAzure()` |
32
+ ```ts
33
+ import { openai } from "@yourgpt/llm-sdk/openai";
34
+ import { anthropic } from "@yourgpt/llm-sdk/anthropic";
35
+ import { google } from "@yourgpt/llm-sdk/google";
36
+ import { xai } from "@yourgpt/llm-sdk/xai";
43
37
 
44
- ## Framework Integrations
38
+ // OpenAI
39
+ await streamText({ model: openai("gpt-5"), messages });
45
40
 
46
- ```typescript
47
- // Next.js
48
- import { createNextHandler } from "@yourgpt/llm-sdk";
41
+ // Anthropic
42
+ await streamText({ model: anthropic("claude-sonnet-4-20250514"), messages });
49
43
 
50
- // Express
51
- import { createExpressMiddleware } from "@yourgpt/llm-sdk";
44
+ // Google
45
+ await streamText({ model: google("gemini-2.0-flash"), messages });
52
46
 
53
- // Hono
54
- import { createHonoApp } from "@yourgpt/llm-sdk";
47
+ // xAI
48
+ await streamText({ model: xai("grok-3"), messages });
49
+ ```
55
50
 
56
- // Node.js HTTP
57
- import { createNodeHandler } from "@yourgpt/llm-sdk";
51
+ ## Server-Side Tools
52
+
53
+ ```ts
54
+ import { streamText, tool } from "@yourgpt/llm-sdk";
55
+ import { openai } from "@yourgpt/llm-sdk/openai";
56
+ import { z } from "zod";
57
+
58
+ const result = await streamText({
59
+ model: openai("gpt-5"),
60
+ messages,
61
+ tools: {
62
+ getWeather: tool({
63
+ description: "Get current weather for a city",
64
+ parameters: z.object({
65
+ city: z.string().describe("City name"),
66
+ }),
67
+ execute: async ({ city }) => {
68
+ return { temperature: 72, condition: "sunny" };
69
+ },
70
+ }),
71
+ },
72
+ maxSteps: 5,
73
+ });
74
+
75
+ return result.toDataStreamResponse();
58
76
  ```
59
77
 
60
- ## Features
78
+ ## Supported Providers
61
79
 
62
- - Multi-provider support (7 LLM providers)
63
- - Streaming responses (SSE)
64
- - Agent loop for multi-step tool execution
65
- - Framework-agnostic with built-in integrations
66
- - TypeScript-first
80
+ | Provider | Import |
81
+ | ------------- | ---------------------------- |
82
+ | OpenAI | `@yourgpt/llm-sdk/openai` |
83
+ | Anthropic | `@yourgpt/llm-sdk/anthropic` |
84
+ | Google Gemini | `@yourgpt/llm-sdk/google` |
85
+ | xAI (Grok) | `@yourgpt/llm-sdk/xai` |
86
+ | Ollama | `@yourgpt/llm-sdk/ollama` |
87
+ | Azure OpenAI | `@yourgpt/llm-sdk/azure` |
67
88
 
68
89
  ## Documentation
69
90
 
@@ -1,236 +1,6 @@
1
- import { Message, ActionDefinition, LLMConfig, StreamEvent, MessageAttachment } from '@yourgpt/copilot-sdk/core';
2
-
3
- /**
4
- * Chat completion request
5
- */
6
- interface ChatCompletionRequest {
7
- /** Conversation messages */
8
- messages: Message[];
9
- /**
10
- * Raw provider-formatted messages (for agent loop with tool calls)
11
- * When provided, these are used instead of converting from Message[]
12
- * This allows passing messages with tool_calls and tool role
13
- */
14
- rawMessages?: Array<Record<string, unknown>>;
15
- /** Available actions/tools */
16
- actions?: ActionDefinition[];
17
- /** System prompt */
18
- systemPrompt?: string;
19
- /** LLM configuration overrides */
20
- config?: Partial<LLMConfig>;
21
- /** Abort signal for cancellation */
22
- signal?: AbortSignal;
23
- }
24
- /**
25
- * Non-streaming completion result
26
- */
27
- interface CompletionResult {
28
- /** Text content */
29
- content: string;
30
- /** Tool calls */
31
- toolCalls: Array<{
32
- id: string;
33
- name: string;
34
- args: Record<string, unknown>;
35
- }>;
36
- /** Thinking content (if extended thinking enabled) */
37
- thinking?: string;
38
- /** Raw provider response for debugging */
39
- rawResponse: Record<string, unknown>;
40
- }
41
- /**
42
- * Base LLM adapter interface
43
- */
44
- interface LLMAdapter {
45
- /** Provider name */
46
- readonly provider: string;
47
- /** Model name */
48
- readonly model: string;
49
- /**
50
- * Stream a chat completion
51
- */
52
- stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
53
- /**
54
- * Non-streaming chat completion (for debugging/comparison)
55
- */
56
- complete?(request: ChatCompletionRequest): Promise<CompletionResult>;
57
- }
58
- /**
59
- * Adapter factory function type
60
- */
61
- type AdapterFactory = (config: LLMConfig) => LLMAdapter;
62
- /**
63
- * Convert messages to provider format (simple text only)
64
- */
65
- declare function formatMessages(messages: Message[], systemPrompt?: string): Array<{
66
- role: string;
67
- content: string;
68
- }>;
69
- /**
70
- * Convert actions to OpenAI tool format
71
- */
72
- declare function formatTools(actions: ActionDefinition[]): Array<{
73
- type: "function";
74
- function: {
75
- name: string;
76
- description: string;
77
- parameters: object;
78
- };
79
- }>;
80
- /**
81
- * Content block types for multimodal messages
82
- */
83
- type AnthropicContentBlock = {
84
- type: "text";
85
- text: string;
86
- } | {
87
- type: "image";
88
- source: {
89
- type: "base64";
90
- media_type: string;
91
- data: string;
92
- } | {
93
- type: "url";
94
- url: string;
95
- };
96
- } | {
97
- type: "document";
98
- source: {
99
- type: "base64";
100
- media_type: string;
101
- data: string;
102
- } | {
103
- type: "url";
104
- url: string;
105
- };
106
- };
107
- type OpenAIContentBlock = {
108
- type: "text";
109
- text: string;
110
- } | {
111
- type: "image_url";
112
- image_url: {
113
- url: string;
114
- detail?: "low" | "high" | "auto";
115
- };
116
- };
117
- /**
118
- * Check if a message has image attachments
119
- * Supports both new format (metadata.attachments) and legacy (attachments)
120
- */
121
- declare function hasImageAttachments(message: Message): boolean;
122
- /**
123
- * Check if a message has media attachments (images or PDFs)
124
- */
125
- declare function hasMediaAttachments(message: Message): boolean;
126
- /**
127
- * Convert MessageAttachment to Anthropic image content block
128
- *
129
- * Anthropic format:
130
- * {
131
- * type: "image",
132
- * source: {
133
- * type: "base64",
134
- * media_type: "image/png",
135
- * data: "base64data..."
136
- * }
137
- * }
138
- */
139
- declare function attachmentToAnthropicImage(attachment: MessageAttachment): AnthropicContentBlock | null;
140
- /**
141
- * Convert MessageAttachment to OpenAI image_url content block
142
- *
143
- * OpenAI format:
144
- * {
145
- * type: "image_url",
146
- * image_url: {
147
- * url: "data:image/png;base64,..."
148
- * }
149
- * }
150
- */
151
- declare function attachmentToOpenAIImage(attachment: MessageAttachment): OpenAIContentBlock | null;
152
- /**
153
- * Convert MessageAttachment (PDF) to Anthropic document content block
154
- *
155
- * Anthropic format:
156
- * {
157
- * type: "document",
158
- * source: {
159
- * type: "base64",
160
- * media_type: "application/pdf",
161
- * data: "base64data..."
162
- * }
163
- * }
164
- */
165
- declare function attachmentToAnthropicDocument(attachment: MessageAttachment): AnthropicContentBlock | null;
166
- /**
167
- * Convert a Message to Anthropic multimodal content blocks
168
- */
169
- declare function messageToAnthropicContent(message: Message): string | AnthropicContentBlock[];
170
- /**
171
- * Convert a Message to OpenAI multimodal content blocks
172
- */
173
- declare function messageToOpenAIContent(message: Message): string | OpenAIContentBlock[];
174
- /**
175
- * Anthropic content block types (extended for tools)
176
- */
177
- type AnthropicToolUseBlock = {
178
- type: "tool_use";
179
- id: string;
180
- name: string;
181
- input: Record<string, unknown>;
182
- };
183
- type AnthropicToolResultBlock = {
184
- type: "tool_result";
185
- tool_use_id: string;
186
- content: string;
187
- };
188
- type AnthropicMessageContent = string | Array<AnthropicContentBlock | AnthropicToolUseBlock | AnthropicToolResultBlock>;
189
- /**
190
- * Format messages for Anthropic with full tool support
191
- * Handles: text, images, tool_use, and tool_result
192
- *
193
- * Key differences from OpenAI:
194
- * - tool_calls become tool_use blocks in assistant content
195
- * - tool results become tool_result blocks in user content
196
- */
197
- declare function formatMessagesForAnthropic(messages: Message[], systemPrompt?: string): {
198
- system: string;
199
- messages: Array<{
200
- role: "user" | "assistant";
201
- content: AnthropicMessageContent;
202
- }>;
203
- };
204
- /**
205
- * OpenAI message format with tool support
206
- */
207
- type OpenAIMessage = {
208
- role: "system";
209
- content: string;
210
- } | {
211
- role: "user";
212
- content: string | OpenAIContentBlock[];
213
- } | {
214
- role: "assistant";
215
- content: string | null;
216
- tool_calls?: Array<{
217
- id: string;
218
- type: "function";
219
- function: {
220
- name: string;
221
- arguments: string;
222
- };
223
- }>;
224
- } | {
225
- role: "tool";
226
- content: string;
227
- tool_call_id: string;
228
- };
229
- /**
230
- * Format messages for OpenAI with full tool support
231
- * Handles: text, images, tool_calls, and tool results
232
- */
233
- declare function formatMessagesForOpenAI(messages: Message[], systemPrompt?: string): OpenAIMessage[];
1
+ import { L as LLMAdapter, C as ChatCompletionRequest, a as CompletionResult } from '../base-D_FyHFKj.mjs';
2
+ export { A as AdapterFactory, l as AnthropicContentBlock, O as OpenAIContentBlock, j as attachmentToAnthropicDocument, i as attachmentToAnthropicImage, k as attachmentToOpenAIImage, f as formatMessages, c as formatMessagesForAnthropic, d as formatMessagesForOpenAI, b as formatTools, h as hasImageAttachments, g as hasMediaAttachments, m as messageToAnthropicContent, e as messageToOpenAIContent } from '../base-D_FyHFKj.mjs';
3
+ import { LLMConfig, StreamEvent } from '@yourgpt/copilot-sdk/core';
234
4
 
235
5
  /**
236
6
  * OpenAI adapter configuration
@@ -315,30 +85,6 @@ declare class AnthropicAdapter implements LLMAdapter {
315
85
  */
316
86
  declare function createAnthropicAdapter(config: AnthropicAdapterConfig): AnthropicAdapter;
317
87
 
318
- /**
319
- * Groq adapter configuration
320
- */
321
- interface GroqAdapterConfig extends Partial<LLMConfig> {
322
- apiKey: string;
323
- model?: string;
324
- }
325
- /**
326
- * Groq LLM Adapter (Fast inference)
327
- *
328
- * Supports: Llama 3.1, Mixtral, Gemma, etc.
329
- */
330
- declare class GroqAdapter implements LLMAdapter {
331
- readonly provider = "groq";
332
- readonly model: string;
333
- private config;
334
- constructor(config: GroqAdapterConfig);
335
- stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
336
- }
337
- /**
338
- * Create Groq adapter
339
- */
340
- declare function createGroqAdapter(config: GroqAdapterConfig): GroqAdapter;
341
-
342
88
  /**
343
89
  * Ollama adapter configuration
344
90
  */
@@ -494,4 +240,4 @@ declare class AzureAdapter implements LLMAdapter {
494
240
  */
495
241
  declare function createAzureAdapter(config: AzureAdapterConfig): AzureAdapter;
496
242
 
497
- export { type AdapterFactory, AnthropicAdapter, type AnthropicAdapterConfig, type AnthropicContentBlock, AzureAdapter, type AzureAdapterConfig, type ChatCompletionRequest, type CompletionResult, GoogleAdapter, type GoogleAdapterConfig, GroqAdapter, type GroqAdapterConfig, type LLMAdapter, OllamaAdapter, type OllamaAdapterConfig, OpenAIAdapter, type OpenAIAdapterConfig, type OpenAIContentBlock, XAIAdapter, type XAIAdapterConfig, attachmentToAnthropicDocument, attachmentToAnthropicImage, attachmentToOpenAIImage, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createGroqAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter, formatMessages, formatMessagesForAnthropic, formatMessagesForOpenAI, formatTools, hasImageAttachments, hasMediaAttachments, messageToAnthropicContent, messageToOpenAIContent };
243
+ export { AnthropicAdapter, type AnthropicAdapterConfig, AzureAdapter, type AzureAdapterConfig, ChatCompletionRequest, CompletionResult, GoogleAdapter, type GoogleAdapterConfig, LLMAdapter, OllamaAdapter, type OllamaAdapterConfig, OpenAIAdapter, type OpenAIAdapterConfig, XAIAdapter, type XAIAdapterConfig, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter };
@@ -1,236 +1,6 @@
1
- import { Message, ActionDefinition, LLMConfig, StreamEvent, MessageAttachment } from '@yourgpt/copilot-sdk/core';
2
-
3
- /**
4
- * Chat completion request
5
- */
6
- interface ChatCompletionRequest {
7
- /** Conversation messages */
8
- messages: Message[];
9
- /**
10
- * Raw provider-formatted messages (for agent loop with tool calls)
11
- * When provided, these are used instead of converting from Message[]
12
- * This allows passing messages with tool_calls and tool role
13
- */
14
- rawMessages?: Array<Record<string, unknown>>;
15
- /** Available actions/tools */
16
- actions?: ActionDefinition[];
17
- /** System prompt */
18
- systemPrompt?: string;
19
- /** LLM configuration overrides */
20
- config?: Partial<LLMConfig>;
21
- /** Abort signal for cancellation */
22
- signal?: AbortSignal;
23
- }
24
- /**
25
- * Non-streaming completion result
26
- */
27
- interface CompletionResult {
28
- /** Text content */
29
- content: string;
30
- /** Tool calls */
31
- toolCalls: Array<{
32
- id: string;
33
- name: string;
34
- args: Record<string, unknown>;
35
- }>;
36
- /** Thinking content (if extended thinking enabled) */
37
- thinking?: string;
38
- /** Raw provider response for debugging */
39
- rawResponse: Record<string, unknown>;
40
- }
41
- /**
42
- * Base LLM adapter interface
43
- */
44
- interface LLMAdapter {
45
- /** Provider name */
46
- readonly provider: string;
47
- /** Model name */
48
- readonly model: string;
49
- /**
50
- * Stream a chat completion
51
- */
52
- stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
53
- /**
54
- * Non-streaming chat completion (for debugging/comparison)
55
- */
56
- complete?(request: ChatCompletionRequest): Promise<CompletionResult>;
57
- }
58
- /**
59
- * Adapter factory function type
60
- */
61
- type AdapterFactory = (config: LLMConfig) => LLMAdapter;
62
- /**
63
- * Convert messages to provider format (simple text only)
64
- */
65
- declare function formatMessages(messages: Message[], systemPrompt?: string): Array<{
66
- role: string;
67
- content: string;
68
- }>;
69
- /**
70
- * Convert actions to OpenAI tool format
71
- */
72
- declare function formatTools(actions: ActionDefinition[]): Array<{
73
- type: "function";
74
- function: {
75
- name: string;
76
- description: string;
77
- parameters: object;
78
- };
79
- }>;
80
- /**
81
- * Content block types for multimodal messages
82
- */
83
- type AnthropicContentBlock = {
84
- type: "text";
85
- text: string;
86
- } | {
87
- type: "image";
88
- source: {
89
- type: "base64";
90
- media_type: string;
91
- data: string;
92
- } | {
93
- type: "url";
94
- url: string;
95
- };
96
- } | {
97
- type: "document";
98
- source: {
99
- type: "base64";
100
- media_type: string;
101
- data: string;
102
- } | {
103
- type: "url";
104
- url: string;
105
- };
106
- };
107
- type OpenAIContentBlock = {
108
- type: "text";
109
- text: string;
110
- } | {
111
- type: "image_url";
112
- image_url: {
113
- url: string;
114
- detail?: "low" | "high" | "auto";
115
- };
116
- };
117
- /**
118
- * Check if a message has image attachments
119
- * Supports both new format (metadata.attachments) and legacy (attachments)
120
- */
121
- declare function hasImageAttachments(message: Message): boolean;
122
- /**
123
- * Check if a message has media attachments (images or PDFs)
124
- */
125
- declare function hasMediaAttachments(message: Message): boolean;
126
- /**
127
- * Convert MessageAttachment to Anthropic image content block
128
- *
129
- * Anthropic format:
130
- * {
131
- * type: "image",
132
- * source: {
133
- * type: "base64",
134
- * media_type: "image/png",
135
- * data: "base64data..."
136
- * }
137
- * }
138
- */
139
- declare function attachmentToAnthropicImage(attachment: MessageAttachment): AnthropicContentBlock | null;
140
- /**
141
- * Convert MessageAttachment to OpenAI image_url content block
142
- *
143
- * OpenAI format:
144
- * {
145
- * type: "image_url",
146
- * image_url: {
147
- * url: "data:image/png;base64,..."
148
- * }
149
- * }
150
- */
151
- declare function attachmentToOpenAIImage(attachment: MessageAttachment): OpenAIContentBlock | null;
152
- /**
153
- * Convert MessageAttachment (PDF) to Anthropic document content block
154
- *
155
- * Anthropic format:
156
- * {
157
- * type: "document",
158
- * source: {
159
- * type: "base64",
160
- * media_type: "application/pdf",
161
- * data: "base64data..."
162
- * }
163
- * }
164
- */
165
- declare function attachmentToAnthropicDocument(attachment: MessageAttachment): AnthropicContentBlock | null;
166
- /**
167
- * Convert a Message to Anthropic multimodal content blocks
168
- */
169
- declare function messageToAnthropicContent(message: Message): string | AnthropicContentBlock[];
170
- /**
171
- * Convert a Message to OpenAI multimodal content blocks
172
- */
173
- declare function messageToOpenAIContent(message: Message): string | OpenAIContentBlock[];
174
- /**
175
- * Anthropic content block types (extended for tools)
176
- */
177
- type AnthropicToolUseBlock = {
178
- type: "tool_use";
179
- id: string;
180
- name: string;
181
- input: Record<string, unknown>;
182
- };
183
- type AnthropicToolResultBlock = {
184
- type: "tool_result";
185
- tool_use_id: string;
186
- content: string;
187
- };
188
- type AnthropicMessageContent = string | Array<AnthropicContentBlock | AnthropicToolUseBlock | AnthropicToolResultBlock>;
189
- /**
190
- * Format messages for Anthropic with full tool support
191
- * Handles: text, images, tool_use, and tool_result
192
- *
193
- * Key differences from OpenAI:
194
- * - tool_calls become tool_use blocks in assistant content
195
- * - tool results become tool_result blocks in user content
196
- */
197
- declare function formatMessagesForAnthropic(messages: Message[], systemPrompt?: string): {
198
- system: string;
199
- messages: Array<{
200
- role: "user" | "assistant";
201
- content: AnthropicMessageContent;
202
- }>;
203
- };
204
- /**
205
- * OpenAI message format with tool support
206
- */
207
- type OpenAIMessage = {
208
- role: "system";
209
- content: string;
210
- } | {
211
- role: "user";
212
- content: string | OpenAIContentBlock[];
213
- } | {
214
- role: "assistant";
215
- content: string | null;
216
- tool_calls?: Array<{
217
- id: string;
218
- type: "function";
219
- function: {
220
- name: string;
221
- arguments: string;
222
- };
223
- }>;
224
- } | {
225
- role: "tool";
226
- content: string;
227
- tool_call_id: string;
228
- };
229
- /**
230
- * Format messages for OpenAI with full tool support
231
- * Handles: text, images, tool_calls, and tool results
232
- */
233
- declare function formatMessagesForOpenAI(messages: Message[], systemPrompt?: string): OpenAIMessage[];
1
+ import { L as LLMAdapter, C as ChatCompletionRequest, a as CompletionResult } from '../base-D_FyHFKj.js';
2
+ export { A as AdapterFactory, l as AnthropicContentBlock, O as OpenAIContentBlock, j as attachmentToAnthropicDocument, i as attachmentToAnthropicImage, k as attachmentToOpenAIImage, f as formatMessages, c as formatMessagesForAnthropic, d as formatMessagesForOpenAI, b as formatTools, h as hasImageAttachments, g as hasMediaAttachments, m as messageToAnthropicContent, e as messageToOpenAIContent } from '../base-D_FyHFKj.js';
3
+ import { LLMConfig, StreamEvent } from '@yourgpt/copilot-sdk/core';
234
4
 
235
5
  /**
236
6
  * OpenAI adapter configuration
@@ -315,30 +85,6 @@ declare class AnthropicAdapter implements LLMAdapter {
315
85
  */
316
86
  declare function createAnthropicAdapter(config: AnthropicAdapterConfig): AnthropicAdapter;
317
87
 
318
- /**
319
- * Groq adapter configuration
320
- */
321
- interface GroqAdapterConfig extends Partial<LLMConfig> {
322
- apiKey: string;
323
- model?: string;
324
- }
325
- /**
326
- * Groq LLM Adapter (Fast inference)
327
- *
328
- * Supports: Llama 3.1, Mixtral, Gemma, etc.
329
- */
330
- declare class GroqAdapter implements LLMAdapter {
331
- readonly provider = "groq";
332
- readonly model: string;
333
- private config;
334
- constructor(config: GroqAdapterConfig);
335
- stream(request: ChatCompletionRequest): AsyncGenerator<StreamEvent>;
336
- }
337
- /**
338
- * Create Groq adapter
339
- */
340
- declare function createGroqAdapter(config: GroqAdapterConfig): GroqAdapter;
341
-
342
88
  /**
343
89
  * Ollama adapter configuration
344
90
  */
@@ -494,4 +240,4 @@ declare class AzureAdapter implements LLMAdapter {
494
240
  */
495
241
  declare function createAzureAdapter(config: AzureAdapterConfig): AzureAdapter;
496
242
 
497
- export { type AdapterFactory, AnthropicAdapter, type AnthropicAdapterConfig, type AnthropicContentBlock, AzureAdapter, type AzureAdapterConfig, type ChatCompletionRequest, type CompletionResult, GoogleAdapter, type GoogleAdapterConfig, GroqAdapter, type GroqAdapterConfig, type LLMAdapter, OllamaAdapter, type OllamaAdapterConfig, OpenAIAdapter, type OpenAIAdapterConfig, type OpenAIContentBlock, XAIAdapter, type XAIAdapterConfig, attachmentToAnthropicDocument, attachmentToAnthropicImage, attachmentToOpenAIImage, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createGroqAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter, formatMessages, formatMessagesForAnthropic, formatMessagesForOpenAI, formatTools, hasImageAttachments, hasMediaAttachments, messageToAnthropicContent, messageToOpenAIContent };
243
+ export { AnthropicAdapter, type AnthropicAdapterConfig, AzureAdapter, type AzureAdapterConfig, ChatCompletionRequest, CompletionResult, GoogleAdapter, type GoogleAdapterConfig, LLMAdapter, OllamaAdapter, type OllamaAdapterConfig, OpenAIAdapter, type OpenAIAdapterConfig, XAIAdapter, type XAIAdapterConfig, createAnthropicAdapter, createAzureAdapter, createGoogleAdapter, createOllamaAdapter, createOpenAIAdapter, createXAIAdapter };