@yolo-labs/core-providers 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.d.ts +34 -0
- package/dist/anthropic/index.js +191 -0
- package/dist/anthropic/index.js.map +1 -0
- package/dist/google/index.d.ts +38 -0
- package/dist/google/index.js +223 -0
- package/dist/google/index.js.map +1 -0
- package/dist/index.d.ts +60 -0
- package/dist/index.js +81 -0
- package/dist/index.js.map +1 -0
- package/dist/openai/index.d.ts +36 -0
- package/dist/openai/index.js +252 -0
- package/dist/openai/index.js.map +1 -0
- package/package.json +73 -0
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { AIProvider, CreateMessageParams, StreamEvent } from '@yolo-labs/core-types';
|
|
2
|
+
|
|
3
|
+
/** Configuration options for the Anthropic provider. */
|
|
4
|
+
interface AnthropicProviderOptions {
|
|
5
|
+
apiKey?: string;
|
|
6
|
+
baseURL?: string;
|
|
7
|
+
defaultModel?: string;
|
|
8
|
+
}
|
|
9
|
+
/**
|
|
10
|
+
* {@link AIProvider} adapter for the Anthropic Claude API.
|
|
11
|
+
*
|
|
12
|
+
* @remarks
|
|
13
|
+
* Translates canonical {@link CreateMessageParams} into Anthropic SDK calls and
|
|
14
|
+
* maps Anthropic stream events back to canonical {@link StreamEvent} types.
|
|
15
|
+
* Requires `@anthropic-ai/sdk` as a peer dependency.
|
|
16
|
+
*/
|
|
17
|
+
declare class AnthropicProvider implements AIProvider {
|
|
18
|
+
private client;
|
|
19
|
+
private defaultModel;
|
|
20
|
+
/** Creates a new AnthropicProvider with the given options. */
|
|
21
|
+
constructor(options?: AnthropicProviderOptions);
|
|
22
|
+
/**
|
|
23
|
+
* Creates a streaming message using the Anthropic SDK.
|
|
24
|
+
*
|
|
25
|
+
* @param params - Canonical message creation parameters.
|
|
26
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
27
|
+
*/
|
|
28
|
+
createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent>;
|
|
29
|
+
private mapEvent;
|
|
30
|
+
private toAnthropicTool;
|
|
31
|
+
private toAnthropicMessage;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export { AnthropicProvider, type AnthropicProviderOptions };
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
// src/anthropic/index.ts
|
|
2
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
3
|
+
var AnthropicProvider = class {
|
|
4
|
+
client;
|
|
5
|
+
defaultModel;
|
|
6
|
+
/** Creates a new AnthropicProvider with the given options. */
|
|
7
|
+
constructor(options = {}) {
|
|
8
|
+
this.client = new Anthropic({
|
|
9
|
+
apiKey: options.apiKey,
|
|
10
|
+
baseURL: options.baseURL
|
|
11
|
+
});
|
|
12
|
+
this.defaultModel = options.defaultModel ?? "claude-sonnet-4-20250514";
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Creates a streaming message using the Anthropic SDK.
|
|
16
|
+
*
|
|
17
|
+
* @param params - Canonical message creation parameters.
|
|
18
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
19
|
+
*/
|
|
20
|
+
async *createMessage(params) {
|
|
21
|
+
const model = params.model || this.defaultModel;
|
|
22
|
+
const tools = params.tools?.map((t) => this.toAnthropicTool(t));
|
|
23
|
+
const messages = params.messages.map((m) => this.toAnthropicMessage(m));
|
|
24
|
+
const requestParams = {
|
|
25
|
+
model,
|
|
26
|
+
messages,
|
|
27
|
+
max_tokens: params.maxTokens ?? 4096,
|
|
28
|
+
stream: true
|
|
29
|
+
};
|
|
30
|
+
if (params.system) {
|
|
31
|
+
requestParams.system = params.system;
|
|
32
|
+
}
|
|
33
|
+
if (tools && tools.length > 0) {
|
|
34
|
+
requestParams.tools = tools;
|
|
35
|
+
}
|
|
36
|
+
if (params.temperature !== void 0) {
|
|
37
|
+
requestParams.temperature = params.temperature;
|
|
38
|
+
}
|
|
39
|
+
if (params.topP !== void 0) {
|
|
40
|
+
requestParams.top_p = params.topP;
|
|
41
|
+
}
|
|
42
|
+
if (params.stopSequences && params.stopSequences.length > 0) {
|
|
43
|
+
requestParams.stop_sequences = params.stopSequences;
|
|
44
|
+
}
|
|
45
|
+
const stream = this.client.messages.stream(requestParams);
|
|
46
|
+
for await (const event of stream) {
|
|
47
|
+
const mapped = this.mapEvent(event);
|
|
48
|
+
if (mapped) {
|
|
49
|
+
yield mapped;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
// 47b: Map Anthropic raw stream events to canonical StreamEvent
|
|
54
|
+
mapEvent(event) {
|
|
55
|
+
switch (event.type) {
|
|
56
|
+
case "message_start":
|
|
57
|
+
return {
|
|
58
|
+
type: "message_start",
|
|
59
|
+
message: {
|
|
60
|
+
id: event.message.id,
|
|
61
|
+
model: event.message.model,
|
|
62
|
+
role: "assistant",
|
|
63
|
+
usage: event.message.usage ? {
|
|
64
|
+
input_tokens: event.message.usage.input_tokens,
|
|
65
|
+
output_tokens: event.message.usage.output_tokens
|
|
66
|
+
} : void 0
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
case "content_block_start":
|
|
70
|
+
return {
|
|
71
|
+
type: "content_block_start",
|
|
72
|
+
index: event.index,
|
|
73
|
+
contentBlock: {
|
|
74
|
+
type: event.content_block.type === "tool_use" ? "tool_use" : "text",
|
|
75
|
+
id: event.content_block.type === "tool_use" ? event.content_block.id : void 0,
|
|
76
|
+
name: event.content_block.type === "tool_use" ? event.content_block.name : void 0
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
case "content_block_delta":
|
|
80
|
+
if (event.delta.type === "text_delta") {
|
|
81
|
+
return {
|
|
82
|
+
type: "content_block_delta",
|
|
83
|
+
index: event.index,
|
|
84
|
+
delta: { type: "text_delta", text: event.delta.text }
|
|
85
|
+
};
|
|
86
|
+
} else if (event.delta.type === "input_json_delta") {
|
|
87
|
+
return {
|
|
88
|
+
type: "content_block_delta",
|
|
89
|
+
index: event.index,
|
|
90
|
+
delta: { type: "input_json_delta", partial_json: event.delta.partial_json }
|
|
91
|
+
};
|
|
92
|
+
} else if (event.delta.type === "thinking_delta") {
|
|
93
|
+
return {
|
|
94
|
+
type: "content_block_delta",
|
|
95
|
+
index: event.index,
|
|
96
|
+
delta: { type: "thinking_delta", thinking: event.delta.thinking }
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
return null;
|
|
100
|
+
case "content_block_stop":
|
|
101
|
+
return {
|
|
102
|
+
type: "content_block_stop",
|
|
103
|
+
index: event.index
|
|
104
|
+
};
|
|
105
|
+
case "message_delta":
|
|
106
|
+
return {
|
|
107
|
+
type: "message_delta",
|
|
108
|
+
delta: {
|
|
109
|
+
stop_reason: event.delta.stop_reason ?? null
|
|
110
|
+
},
|
|
111
|
+
usage: event.usage ? { output_tokens: event.usage.output_tokens } : void 0
|
|
112
|
+
};
|
|
113
|
+
case "message_stop":
|
|
114
|
+
return { type: "message_stop" };
|
|
115
|
+
default:
|
|
116
|
+
return null;
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
// 47c: Translate canonical ToolDefinition → Anthropic tool format
|
|
120
|
+
toAnthropicTool(tool) {
|
|
121
|
+
return {
|
|
122
|
+
name: tool.name,
|
|
123
|
+
description: tool.description,
|
|
124
|
+
input_schema: tool.inputSchema
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
// 47d: Map multimodal content blocks to Anthropic message format
|
|
128
|
+
toAnthropicMessage(message) {
|
|
129
|
+
if (typeof message.content === "string") {
|
|
130
|
+
return { role: message.role, content: message.content };
|
|
131
|
+
}
|
|
132
|
+
const contentParts = message.content.map(
|
|
133
|
+
(block) => {
|
|
134
|
+
switch (block.type) {
|
|
135
|
+
case "text":
|
|
136
|
+
return { type: "text", text: block.text };
|
|
137
|
+
case "image":
|
|
138
|
+
if (block.source.type === "base64") {
|
|
139
|
+
return {
|
|
140
|
+
type: "image",
|
|
141
|
+
source: {
|
|
142
|
+
type: "base64",
|
|
143
|
+
media_type: block.source.media_type,
|
|
144
|
+
data: block.source.data
|
|
145
|
+
}
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
return {
|
|
149
|
+
type: "image",
|
|
150
|
+
source: {
|
|
151
|
+
type: "url",
|
|
152
|
+
url: block.source.url
|
|
153
|
+
}
|
|
154
|
+
};
|
|
155
|
+
case "file":
|
|
156
|
+
return {
|
|
157
|
+
type: "document",
|
|
158
|
+
source: {
|
|
159
|
+
type: "base64",
|
|
160
|
+
media_type: block.source.media_type,
|
|
161
|
+
data: block.source.data
|
|
162
|
+
}
|
|
163
|
+
};
|
|
164
|
+
// Task 98: Map ToolUseBlock → Anthropic native tool_use format
|
|
165
|
+
case "tool_use":
|
|
166
|
+
return {
|
|
167
|
+
type: "tool_use",
|
|
168
|
+
id: block.id,
|
|
169
|
+
name: block.name,
|
|
170
|
+
input: block.input
|
|
171
|
+
};
|
|
172
|
+
// Task 98: Map ToolResultBlock → Anthropic native tool_result format
|
|
173
|
+
case "tool_result":
|
|
174
|
+
return {
|
|
175
|
+
type: "tool_result",
|
|
176
|
+
tool_use_id: block.tool_use_id,
|
|
177
|
+
content: block.content,
|
|
178
|
+
...block.is_error ? { is_error: true } : {}
|
|
179
|
+
};
|
|
180
|
+
default:
|
|
181
|
+
return { type: "text", text: "[unsupported content type]" };
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
);
|
|
185
|
+
return { role: message.role, content: contentParts };
|
|
186
|
+
}
|
|
187
|
+
};
|
|
188
|
+
export {
|
|
189
|
+
AnthropicProvider
|
|
190
|
+
};
|
|
191
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/anthropic/index.ts"],"sourcesContent":["// Task 47: AnthropicProvider — adapter for @anthropic-ai/sdk\n\nimport Anthropic from '@anthropic-ai/sdk';\nimport type {\n AIProvider,\n CreateMessageParams,\n StreamEvent,\n ToolDefinition,\n ContentBlock,\n Message,\n} from '@yolo-labs/core-types';\n\n/** Configuration options for the Anthropic provider. */\nexport interface AnthropicProviderOptions {\n apiKey?: string;\n baseURL?: string;\n defaultModel?: string;\n}\n\n/**\n * {@link AIProvider} adapter for the Anthropic Claude API.\n *\n * @remarks\n * Translates canonical {@link CreateMessageParams} into Anthropic SDK calls and\n * maps Anthropic stream events back to canonical {@link StreamEvent} types.\n * Requires `@anthropic-ai/sdk` as a peer dependency.\n */\nexport class AnthropicProvider implements AIProvider {\n private client: Anthropic;\n private defaultModel: string;\n\n /** Creates a new AnthropicProvider with the given options. */\n constructor(options: AnthropicProviderOptions = {}) {\n this.client = new Anthropic({\n apiKey: options.apiKey,\n baseURL: options.baseURL,\n });\n this.defaultModel = options.defaultModel ?? 'claude-sonnet-4-20250514';\n }\n\n /**\n * Creates a streaming message using the Anthropic SDK.\n *\n * @param params - Canonical message creation parameters.\n * @returns An async iterable of canonical {@link StreamEvent} objects.\n */\n async *createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent> {\n const model = params.model || this.defaultModel;\n\n // 47c: Translate canonical ToolDefinition → Anthropic tools with input_schema\n const tools = params.tools?.map((t) => this.toAnthropicTool(t));\n\n // 47d: Map multimodal content blocks to Anthropic format\n const messages = params.messages.map((m) => this.toAnthropicMessage(m));\n\n const requestParams: Anthropic.MessageCreateParamsStreaming = {\n model,\n messages,\n max_tokens: params.maxTokens ?? 4096,\n stream: true,\n };\n\n if (params.system) {\n requestParams.system = params.system;\n }\n if (tools && tools.length > 0) {\n requestParams.tools = tools;\n }\n if (params.temperature !== undefined) {\n requestParams.temperature = params.temperature;\n }\n if (params.topP !== undefined) {\n requestParams.top_p = params.topP;\n }\n if (params.stopSequences && params.stopSequences.length > 0) {\n requestParams.stop_sequences = params.stopSequences;\n }\n\n // 47b: Map Anthropic stream events to canonical StreamEvent types\n const stream = this.client.messages.stream(requestParams);\n\n for await (const event of stream) {\n const mapped = this.mapEvent(event);\n if (mapped) {\n yield mapped;\n }\n }\n }\n\n // 47b: Map Anthropic raw stream events to canonical StreamEvent\n private mapEvent(event: Anthropic.MessageStreamEvent): StreamEvent | null {\n switch (event.type) {\n case 'message_start':\n return {\n type: 'message_start',\n message: {\n id: event.message.id,\n model: event.message.model,\n role: 'assistant',\n usage: event.message.usage\n ? {\n input_tokens: event.message.usage.input_tokens,\n output_tokens: event.message.usage.output_tokens,\n }\n : undefined,\n },\n };\n\n case 'content_block_start':\n return {\n type: 'content_block_start',\n index: event.index,\n contentBlock: {\n type: event.content_block.type === 'tool_use' ? 'tool_use' : 'text',\n id: event.content_block.type === 'tool_use' ? event.content_block.id : undefined,\n name: event.content_block.type === 'tool_use' ? event.content_block.name : undefined,\n },\n };\n\n case 'content_block_delta':\n if (event.delta.type === 'text_delta') {\n return {\n type: 'content_block_delta',\n index: event.index,\n delta: { type: 'text_delta', text: event.delta.text },\n };\n } else if (event.delta.type === 'input_json_delta') {\n return {\n type: 'content_block_delta',\n index: event.index,\n delta: { type: 'input_json_delta', partial_json: event.delta.partial_json },\n };\n } else if (event.delta.type === 'thinking_delta') {\n return {\n type: 'content_block_delta',\n index: event.index,\n delta: { type: 'thinking_delta', thinking: (event.delta as { thinking: string }).thinking },\n };\n }\n return null;\n\n case 'content_block_stop':\n return {\n type: 'content_block_stop',\n index: event.index,\n };\n\n case 'message_delta':\n return {\n type: 'message_delta',\n delta: {\n stop_reason: event.delta.stop_reason ?? null,\n },\n usage: event.usage\n ? { output_tokens: event.usage.output_tokens }\n : undefined,\n };\n\n case 'message_stop':\n return { type: 'message_stop' };\n\n default:\n return null;\n }\n }\n\n // 47c: Translate canonical ToolDefinition → Anthropic tool format\n private toAnthropicTool(tool: ToolDefinition): Anthropic.Tool {\n return {\n name: tool.name,\n description: tool.description,\n input_schema: tool.inputSchema as Anthropic.Tool.InputSchema,\n };\n }\n\n // 47d: Map multimodal content blocks to Anthropic message format\n private toAnthropicMessage(\n message: Message,\n ): Anthropic.MessageParam {\n if (typeof message.content === 'string') {\n return { role: message.role, content: message.content };\n }\n\n const contentParts: Anthropic.ContentBlockParam[] = message.content.map(\n (block: ContentBlock) => {\n switch (block.type) {\n case 'text':\n return { type: 'text' as const, text: block.text };\n case 'image':\n if (block.source.type === 'base64') {\n return {\n type: 'image' as const,\n source: {\n type: 'base64' as const,\n media_type: block.source.media_type as\n | 'image/jpeg'\n | 'image/png'\n | 'image/gif'\n | 'image/webp',\n data: block.source.data,\n },\n };\n }\n // URL images — use url source\n return {\n type: 'image' as const,\n source: {\n type: 'url' as const,\n url: block.source.url,\n },\n };\n case 'file':\n // Anthropic supports document content as base64\n return {\n type: 'document' as const,\n source: {\n type: 'base64' as const,\n media_type: block.source.media_type,\n data: block.source.data,\n },\n } as unknown as Anthropic.ContentBlockParam;\n // Task 98: Map ToolUseBlock → Anthropic native tool_use format\n case 'tool_use':\n return {\n type: 'tool_use' as const,\n id: block.id,\n name: block.name,\n input: block.input,\n } as unknown as Anthropic.ContentBlockParam;\n // Task 98: Map ToolResultBlock → Anthropic native tool_result format\n case 'tool_result':\n return {\n type: 'tool_result' as const,\n tool_use_id: block.tool_use_id,\n content: block.content,\n ...(block.is_error ? { is_error: true } : {}),\n } as unknown as Anthropic.ContentBlockParam;\n default:\n return { type: 'text' as const, text: '[unsupported content type]' };\n }\n },\n );\n\n return { role: message.role, content: contentParts };\n }\n}\n"],"mappings":";AAEA,OAAO,eAAe;AAyBf,IAAM,oBAAN,MAA8C;AAAA,EAC3C;AAAA,EACA;AAAA;AAAA,EAGR,YAAY,UAAoC,CAAC,GAAG;AAClD,SAAK,SAAS,IAAI,UAAU;AAAA,MAC1B,QAAQ,QAAQ;AAAA,MAChB,SAAS,QAAQ;AAAA,IACnB,CAAC;AACD,SAAK,eAAe,QAAQ,gBAAgB;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,OAAO,cAAc,QAAyD;AAC5E,UAAM,QAAQ,OAAO,SAAS,KAAK;AAGnC,UAAM,QAAQ,OAAO,OAAO,IAAI,CAAC,MAAM,KAAK,gBAAgB,CAAC,CAAC;AAG9D,UAAM,WAAW,OAAO,SAAS,IAAI,CAAC,MAAM,KAAK,mBAAmB,CAAC,CAAC;AAEtE,UAAM,gBAAwD;AAAA,MAC5D;AAAA,MACA;AAAA,MACA,YAAY,OAAO,aAAa;AAAA,MAChC,QAAQ;AAAA,IACV;AAEA,QAAI,OAAO,QAAQ;AACjB,oBAAc,SAAS,OAAO;AAAA,IAChC;AACA,QAAI,SAAS,MAAM,SAAS,GAAG;AAC7B,oBAAc,QAAQ;AAAA,IACxB;AACA,QAAI,OAAO,gBAAgB,QAAW;AACpC,oBAAc,cAAc,OAAO;AAAA,IACrC;AACA,QAAI,OAAO,SAAS,QAAW;AAC7B,oBAAc,QAAQ,OAAO;AAAA,IAC/B;AACA,QAAI,OAAO,iBAAiB,OAAO,cAAc,SAAS,GAAG;AAC3D,oBAAc,iBAAiB,OAAO;AAAA,IACxC;AAGA,UAAM,SAAS,KAAK,OAAO,SAAS,OAAO,aAAa;AAExD,qBAAiB,SAAS,QAAQ;AAChC,YAAM,SAAS,KAAK,SAAS,KAAK;AAClC,UAAI,QAAQ;AACV,cAAM;AAAA,MACR;AAAA,IACF;AAAA,EACF;AAAA;AAAA,EAGQ,SAAS,OAAyD;AACxE,YAAQ,MAAM,MAAM;AAAA,MAClB,KAAK;AACH,eAAO;AAAA,UACL,MAAM;AAAA,UACN,SAAS;AAAA,YACP,IAAI,MAAM,QAAQ;AAAA,YAClB,OAAO,MAAM,QAAQ;AAAA,YACrB,MAAM;AAAA,YACN,OAAO,MAAM,QAAQ,QACjB;AAAA,cACE,cAAc,MAAM,QAAQ,MAAM;AAAA,cAClC,eAAe,MAAM,QAAQ,MAAM;AAAA,YACrC,IACA;AAAA,UACN;AAAA,QACF;AAAA,MAEF,KAAK;AACH,eAAO;AAAA,UACL,MAAM;AAAA,UACN,OAAO,MAAM;AAAA,UACb,cAAc;AAAA,YACZ,MAAM,MAAM,cAAc,SAAS,aAAa,aAAa;AAAA,YAC7D,IAAI,MAAM,cAAc,SAAS,aAAa,MAAM,cAAc,KAAK;AAAA,YACvE,MAAM,MAAM,cAAc,SAAS,aAAa,MAAM,cAAc,OAAO;AAAA,UAC7E;AAAA,QACF;AAAA,MAEF,KAAK;AACH,YAAI,MAAM,MAAM,SAAS,cAAc;AACrC,iBAAO;AAAA,YACL,MAAM;AAAA,YACN,OAAO,MAAM;AAAA,YACb,OAAO,EAAE,MAAM,cAAc,MAAM,MAAM,MAAM,KAAK;AAAA,UACtD;AAAA,QACF,WAAW,MAAM,MAAM,SAAS,oBAAoB;AAClD,iBAAO;AAAA,YACL,MAAM;AAAA,YACN,OAAO,MAAM;AAAA,YACb,OAAO,EAAE,MAAM,oBAAoB,cAAc,MAAM,MAAM,aAAa;AAAA,UAC5E;AAAA,QACF,WAAW,MAAM,MAAM,SAAS,kBAAkB;AAChD,iBAAO;AAAA,YACL,MAAM;AAAA,YACN,OAAO,MAAM;AAAA,YACb,OAAO,EAAE,MAAM,kBAAkB,UAAW,MAAM,MAA+B,SAAS;AAAA,UAC5F;AAAA,QACF;AACA,eAAO;AAAA,MAET,KAAK;AACH,eAAO;AAAA,UACL,MAAM;AAAA,UACN,OAAO,MAAM;AAAA,QACf;AAAA,MAEF,KAAK;AACH,eAAO;AAAA,UACL,MAAM;AAAA,UACN,OAAO;AAAA,YACL,aAAa,MAAM,MAAM,eAAe;AAAA,UAC1C;AAAA,UACA,OAAO,MAAM,QACT,EAAE,eAAe,MAAM,MAAM,cAAc,IAC3C;AAAA,QACN;AAAA,MAEF,KAAK;AACH,eAAO,EAAE,MAAM,eAAe;AAAA,MAEhC;AACE,eAAO;AAAA,IACX;AAAA,EACF;AAAA;AAAA,EAGQ,gBAAgB,MAAsC;AAC5D,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,aAAa,KAAK;AAAA,MAClB,cAAc,KAAK;AAAA,IACrB;AAAA,EACF;AAAA;AAAA,EAGQ,mBACN,SACwB;AACxB,QAAI,OAAO,QAAQ,YAAY,UAAU;AACvC,aAAO,EAAE,MAAM,QAAQ,MAAM,SAAS,QAAQ,QAAQ;AAAA,IACxD;AAEA,UAAM,eAA8C,QAAQ,QAAQ;AAAA,MAClE,CAAC,UAAwB;AACvB,gBAAQ,MAAM,MAAM;AAAA,UAClB,KAAK;AACH,mBAAO,EAAE,MAAM,QAAiB,MAAM,MAAM,KAAK;AAAA,UACnD,KAAK;AACH,gBAAI,MAAM,OAAO,SAAS,UAAU;AAClC,qBAAO;AAAA,gBACL,MAAM;AAAA,gBACN,QAAQ;AAAA,kBACN,MAAM;AAAA,kBACN,YAAY,MAAM,OAAO;AAAA,kBAKzB,MAAM,MAAM,OAAO;AAAA,gBACrB;AAAA,cACF;AAAA,YACF;AAEA,mBAAO;AAAA,cACL,MAAM;AAAA,cACN,QAAQ;AAAA,gBACN,MAAM;AAAA,gBACN,KAAK,MAAM,OAAO;AAAA,cACpB;AAAA,YACF;AAAA,UACF,KAAK;AAEH,mBAAO;AAAA,cACL,MAAM;AAAA,cACN,QAAQ;AAAA,gBACN,MAAM;AAAA,gBACN,YAAY,MAAM,OAAO;AAAA,gBACzB,MAAM,MAAM,OAAO;AAAA,cACrB;AAAA,YACF;AAAA;AAAA,UAEF,KAAK;AACH,mBAAO;AAAA,cACL,MAAM;AAAA,cACN,IAAI,MAAM;AAAA,cACV,MAAM,MAAM;AAAA,cACZ,OAAO,MAAM;AAAA,YACf;AAAA;AAAA,UAEF,KAAK;AACH,mBAAO;AAAA,cACL,MAAM;AAAA,cACN,aAAa,MAAM;AAAA,cACnB,SAAS,MAAM;AAAA,cACf,GAAI,MAAM,WAAW,EAAE,UAAU,KAAK,IAAI,CAAC;AAAA,YAC7C;AAAA,UACF;AACE,mBAAO,EAAE,MAAM,QAAiB,MAAM,6BAA6B;AAAA,QACvE;AAAA,MACF;AAAA,IACF;AAEA,WAAO,EAAE,MAAM,QAAQ,MAAM,SAAS,aAAa;AAAA,EACrD;AACF;","names":[]}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { AIProvider, CreateMessageParams, StreamEvent } from '@yolo-labs/core-types';
|
|
2
|
+
|
|
3
|
+
/** Configuration options for the Google Gemini provider. */
|
|
4
|
+
interface GoogleProviderOptions {
|
|
5
|
+
apiKey: string;
|
|
6
|
+
defaultModel?: string;
|
|
7
|
+
}
|
|
8
|
+
/**
|
|
9
|
+
* {@link AIProvider} adapter for the Google Gemini generative AI API.
|
|
10
|
+
*
|
|
11
|
+
* @remarks
|
|
12
|
+
* Translates canonical {@link CreateMessageParams} into Google `generateContentStream`
|
|
13
|
+
* calls and maps response chunks back to canonical {@link StreamEvent} types.
|
|
14
|
+
* Google returns function call arguments in full (not streamed), so tool use
|
|
15
|
+
* blocks emit a single `input_json_delta` with the complete JSON.
|
|
16
|
+
* Requires `@google/generative-ai` as a peer dependency.
|
|
17
|
+
*/
|
|
18
|
+
declare class GoogleProvider implements AIProvider {
|
|
19
|
+
private genAI;
|
|
20
|
+
private defaultModel;
|
|
21
|
+
/** Creates a new GoogleProvider with the given options. */
|
|
22
|
+
constructor(options: GoogleProviderOptions);
|
|
23
|
+
/**
|
|
24
|
+
* Creates a streaming message using the Google Generative AI SDK.
|
|
25
|
+
*
|
|
26
|
+
* @param params - Canonical message creation parameters.
|
|
27
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
28
|
+
*/
|
|
29
|
+
createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent>;
|
|
30
|
+
private mapFinishReason;
|
|
31
|
+
private toGoogleTools;
|
|
32
|
+
private mapSchemaType;
|
|
33
|
+
private toGoogleSchema;
|
|
34
|
+
private toGoogleContents;
|
|
35
|
+
private toGoogleParts;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export { GoogleProvider, type GoogleProviderOptions };
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
// src/google/index.ts
|
|
2
|
+
import {
|
|
3
|
+
GoogleGenerativeAI,
|
|
4
|
+
SchemaType
|
|
5
|
+
} from "@google/generative-ai";
|
|
6
|
+
var GoogleProvider = class {
|
|
7
|
+
genAI;
|
|
8
|
+
defaultModel;
|
|
9
|
+
/** Creates a new GoogleProvider with the given options. */
|
|
10
|
+
constructor(options) {
|
|
11
|
+
this.genAI = new GoogleGenerativeAI(options.apiKey);
|
|
12
|
+
this.defaultModel = options.defaultModel ?? "gemini-2.0-flash";
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Creates a streaming message using the Google Generative AI SDK.
|
|
16
|
+
*
|
|
17
|
+
* @param params - Canonical message creation parameters.
|
|
18
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
19
|
+
*/
|
|
20
|
+
async *createMessage(params) {
|
|
21
|
+
const modelName = params.model || this.defaultModel;
|
|
22
|
+
const tools = params.tools?.length ? [this.toGoogleTools(params.tools)] : void 0;
|
|
23
|
+
const model = this.genAI.getGenerativeModel({
|
|
24
|
+
model: modelName,
|
|
25
|
+
systemInstruction: params.system ? { role: "user", parts: [{ text: params.system }] } : void 0,
|
|
26
|
+
tools,
|
|
27
|
+
generationConfig: {
|
|
28
|
+
maxOutputTokens: params.maxTokens,
|
|
29
|
+
temperature: params.temperature,
|
|
30
|
+
topP: params.topP,
|
|
31
|
+
stopSequences: params.stopSequences
|
|
32
|
+
}
|
|
33
|
+
});
|
|
34
|
+
const contents = this.toGoogleContents(params.messages);
|
|
35
|
+
const messageId = `msg_${Date.now()}`;
|
|
36
|
+
yield {
|
|
37
|
+
type: "message_start",
|
|
38
|
+
message: {
|
|
39
|
+
id: messageId,
|
|
40
|
+
model: modelName,
|
|
41
|
+
role: "assistant"
|
|
42
|
+
}
|
|
43
|
+
};
|
|
44
|
+
const result = await model.generateContentStream({
|
|
45
|
+
contents
|
|
46
|
+
});
|
|
47
|
+
let contentBlockIndex = 0;
|
|
48
|
+
let hasToolCalls = false;
|
|
49
|
+
for await (const chunk of result.stream) {
|
|
50
|
+
const candidates = chunk.candidates;
|
|
51
|
+
if (!candidates?.length) continue;
|
|
52
|
+
const candidate = candidates[0];
|
|
53
|
+
const parts = candidate.content?.parts;
|
|
54
|
+
if (!parts) continue;
|
|
55
|
+
for (const part of parts) {
|
|
56
|
+
if ("text" in part && part.text) {
|
|
57
|
+
yield {
|
|
58
|
+
type: "content_block_start",
|
|
59
|
+
index: contentBlockIndex,
|
|
60
|
+
contentBlock: { type: "text" }
|
|
61
|
+
};
|
|
62
|
+
yield {
|
|
63
|
+
type: "content_block_delta",
|
|
64
|
+
index: contentBlockIndex,
|
|
65
|
+
delta: { type: "text_delta", text: part.text }
|
|
66
|
+
};
|
|
67
|
+
yield { type: "content_block_stop", index: contentBlockIndex };
|
|
68
|
+
contentBlockIndex++;
|
|
69
|
+
}
|
|
70
|
+
if ("functionCall" in part && part.functionCall) {
|
|
71
|
+
hasToolCalls = true;
|
|
72
|
+
const toolCallId = `call_${Date.now()}_${contentBlockIndex}`;
|
|
73
|
+
yield {
|
|
74
|
+
type: "content_block_start",
|
|
75
|
+
index: contentBlockIndex,
|
|
76
|
+
contentBlock: {
|
|
77
|
+
type: "tool_use",
|
|
78
|
+
id: toolCallId,
|
|
79
|
+
name: part.functionCall.name
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
const argsJson = JSON.stringify(part.functionCall.args ?? {});
|
|
83
|
+
yield {
|
|
84
|
+
type: "content_block_delta",
|
|
85
|
+
index: contentBlockIndex,
|
|
86
|
+
delta: { type: "input_json_delta", partial_json: argsJson }
|
|
87
|
+
};
|
|
88
|
+
yield { type: "content_block_stop", index: contentBlockIndex };
|
|
89
|
+
contentBlockIndex++;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
if (candidate.finishReason) {
|
|
93
|
+
const stopReason = this.mapFinishReason(candidate.finishReason, hasToolCalls);
|
|
94
|
+
yield {
|
|
95
|
+
type: "message_delta",
|
|
96
|
+
delta: { stop_reason: stopReason },
|
|
97
|
+
usage: chunk.usageMetadata ? { output_tokens: chunk.usageMetadata.candidatesTokenCount ?? 0 } : void 0
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
yield { type: "message_stop" };
|
|
102
|
+
}
|
|
103
|
+
mapFinishReason(reason, hasToolCalls) {
|
|
104
|
+
switch (reason) {
|
|
105
|
+
case "STOP":
|
|
106
|
+
return hasToolCalls ? "tool_use" : "end_turn";
|
|
107
|
+
case "MAX_TOKENS":
|
|
108
|
+
return "max_tokens";
|
|
109
|
+
case "SAFETY":
|
|
110
|
+
return "content_filter";
|
|
111
|
+
case "RECITATION":
|
|
112
|
+
return "content_filter";
|
|
113
|
+
default:
|
|
114
|
+
return "end_turn";
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
// 49c: Translate canonical ToolDefinition → Google functionDeclarations
|
|
118
|
+
toGoogleTools(tools) {
|
|
119
|
+
return {
|
|
120
|
+
functionDeclarations: tools.map((t) => ({
|
|
121
|
+
name: t.name,
|
|
122
|
+
description: t.description,
|
|
123
|
+
parameters: this.toGoogleSchema(t.inputSchema)
|
|
124
|
+
}))
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
mapSchemaType(type) {
|
|
128
|
+
switch (type.toLowerCase()) {
|
|
129
|
+
case "string":
|
|
130
|
+
return SchemaType.STRING;
|
|
131
|
+
case "number":
|
|
132
|
+
return SchemaType.NUMBER;
|
|
133
|
+
case "integer":
|
|
134
|
+
return SchemaType.INTEGER;
|
|
135
|
+
case "boolean":
|
|
136
|
+
return SchemaType.BOOLEAN;
|
|
137
|
+
case "array":
|
|
138
|
+
return SchemaType.ARRAY;
|
|
139
|
+
case "object":
|
|
140
|
+
return SchemaType.OBJECT;
|
|
141
|
+
default:
|
|
142
|
+
return SchemaType.STRING;
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
toGoogleSchema(schema) {
|
|
146
|
+
const result = {};
|
|
147
|
+
result.type = this.mapSchemaType(schema.type ?? "object");
|
|
148
|
+
if (schema.description) result.description = schema.description;
|
|
149
|
+
if (schema.properties) {
|
|
150
|
+
const props = {};
|
|
151
|
+
for (const [key, value] of Object.entries(schema.properties)) {
|
|
152
|
+
props[key] = this.toGoogleSchema(value);
|
|
153
|
+
}
|
|
154
|
+
result.properties = props;
|
|
155
|
+
}
|
|
156
|
+
if (schema.required) result.required = schema.required;
|
|
157
|
+
if (schema.items) result.items = this.toGoogleSchema(schema.items);
|
|
158
|
+
if (schema.enum) result.enum = schema.enum;
|
|
159
|
+
return result;
|
|
160
|
+
}
|
|
161
|
+
// 49e: Map multimodal content blocks to Google Content/Part types
|
|
162
|
+
toGoogleContents(messages) {
|
|
163
|
+
return messages.map((msg) => ({
|
|
164
|
+
role: msg.role === "assistant" ? "model" : "user",
|
|
165
|
+
parts: this.toGoogleParts(msg.content)
|
|
166
|
+
}));
|
|
167
|
+
}
|
|
168
|
+
toGoogleParts(content) {
|
|
169
|
+
if (typeof content === "string") {
|
|
170
|
+
return [{ text: content }];
|
|
171
|
+
}
|
|
172
|
+
return content.map((block) => {
|
|
173
|
+
switch (block.type) {
|
|
174
|
+
case "text":
|
|
175
|
+
return { text: block.text };
|
|
176
|
+
case "image":
|
|
177
|
+
if (block.source.type === "base64") {
|
|
178
|
+
return {
|
|
179
|
+
inlineData: {
|
|
180
|
+
mimeType: block.source.media_type,
|
|
181
|
+
data: block.source.data
|
|
182
|
+
}
|
|
183
|
+
};
|
|
184
|
+
}
|
|
185
|
+
return {
|
|
186
|
+
fileData: {
|
|
187
|
+
mimeType: "image/jpeg",
|
|
188
|
+
fileUri: block.source.url
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
case "file":
|
|
192
|
+
return {
|
|
193
|
+
inlineData: {
|
|
194
|
+
mimeType: block.source.media_type,
|
|
195
|
+
data: block.source.data
|
|
196
|
+
}
|
|
197
|
+
};
|
|
198
|
+
// Task 100: Map ToolUseBlock → Google functionCall part
|
|
199
|
+
case "tool_use":
|
|
200
|
+
return {
|
|
201
|
+
functionCall: {
|
|
202
|
+
name: block.name,
|
|
203
|
+
args: block.input
|
|
204
|
+
}
|
|
205
|
+
};
|
|
206
|
+
// Task 100: Map ToolResultBlock → Google functionResponse part
|
|
207
|
+
case "tool_result":
|
|
208
|
+
return {
|
|
209
|
+
functionResponse: {
|
|
210
|
+
name: block.tool_use_id,
|
|
211
|
+
response: { content: block.content }
|
|
212
|
+
}
|
|
213
|
+
};
|
|
214
|
+
default:
|
|
215
|
+
return { text: "[unsupported content type]" };
|
|
216
|
+
}
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
};
|
|
220
|
+
export {
|
|
221
|
+
GoogleProvider
|
|
222
|
+
};
|
|
223
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/google/index.ts"],"sourcesContent":["// Task 49: GoogleProvider — adapter for Google AI (Gemini)\n\nimport {\n GoogleGenerativeAI,\n SchemaType,\n type GenerativeModel,\n type Content,\n type Part,\n type FunctionDeclarationsTool,\n type FunctionDeclarationSchema,\n type FunctionDeclaration,\n type GenerateContentStreamResult,\n} from '@google/generative-ai';\nimport type {\n AIProvider,\n CreateMessageParams,\n StreamEvent,\n ToolDefinition,\n ContentBlock,\n Message,\n} from '@yolo-labs/core-types';\n\n/** Configuration options for the Google Gemini provider. */\nexport interface GoogleProviderOptions {\n apiKey: string;\n defaultModel?: string;\n}\n\n/**\n * {@link AIProvider} adapter for the Google Gemini generative AI API.\n *\n * @remarks\n * Translates canonical {@link CreateMessageParams} into Google `generateContentStream`\n * calls and maps response chunks back to canonical {@link StreamEvent} types.\n * Google returns function call arguments in full (not streamed), so tool use\n * blocks emit a single `input_json_delta` with the complete JSON.\n * Requires `@google/generative-ai` as a peer dependency.\n */\nexport class GoogleProvider implements AIProvider {\n private genAI: GoogleGenerativeAI;\n private defaultModel: string;\n\n /** Creates a new GoogleProvider with the given options. */\n constructor(options: GoogleProviderOptions) {\n this.genAI = new GoogleGenerativeAI(options.apiKey);\n this.defaultModel = options.defaultModel ?? 'gemini-2.0-flash';\n }\n\n /**\n * Creates a streaming message using the Google Generative AI SDK.\n *\n * @param params - Canonical message creation parameters.\n * @returns An async iterable of canonical {@link StreamEvent} objects.\n */\n async *createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent> {\n const modelName = params.model || this.defaultModel;\n\n // 49c: Translate canonical ToolDefinition → Google functionDeclarations\n const tools = params.tools?.length\n ? [this.toGoogleTools(params.tools)]\n : undefined;\n\n const model: GenerativeModel = this.genAI.getGenerativeModel({\n model: modelName,\n systemInstruction: params.system ? { role: 'user', parts: [{ text: params.system }] } : undefined,\n tools,\n generationConfig: {\n maxOutputTokens: params.maxTokens,\n temperature: params.temperature,\n topP: params.topP,\n stopSequences: params.stopSequences,\n },\n });\n\n // 49e: Map multimodal content blocks to Google Part types\n const contents = this.toGoogleContents(params.messages);\n\n // Emit message_start\n const messageId = `msg_${Date.now()}`;\n yield {\n type: 'message_start',\n message: {\n id: messageId,\n model: modelName,\n role: 'assistant',\n },\n };\n\n // 49b: Map Google stream events to canonical StreamEvent types\n const result: GenerateContentStreamResult = await model.generateContentStream({\n contents,\n });\n\n let contentBlockIndex = 0;\n let hasToolCalls = false;\n\n for await (const chunk of result.stream) {\n const candidates = chunk.candidates;\n if (!candidates?.length) continue;\n\n const candidate = candidates[0];\n const parts = candidate.content?.parts;\n if (!parts) continue;\n\n for (const part of parts) {\n if ('text' in part && part.text) {\n // Text content\n yield {\n type: 'content_block_start',\n index: contentBlockIndex,\n contentBlock: { type: 'text' },\n };\n yield {\n type: 'content_block_delta',\n index: contentBlockIndex,\n delta: { type: 'text_delta', text: part.text },\n };\n yield { type: 'content_block_stop', index: contentBlockIndex };\n contentBlockIndex++;\n }\n\n // 49d: Handle Google's function call response format\n if ('functionCall' in part && part.functionCall) {\n hasToolCalls = true;\n const toolCallId = `call_${Date.now()}_${contentBlockIndex}`;\n\n yield {\n type: 'content_block_start',\n index: contentBlockIndex,\n contentBlock: {\n type: 'tool_use',\n id: toolCallId,\n name: part.functionCall.name,\n },\n };\n\n // Google returns the full arguments at once, not streamed\n const argsJson = JSON.stringify(part.functionCall.args ?? {});\n yield {\n type: 'content_block_delta',\n index: contentBlockIndex,\n delta: { type: 'input_json_delta', partial_json: argsJson },\n };\n\n yield { type: 'content_block_stop', index: contentBlockIndex };\n contentBlockIndex++;\n }\n }\n\n // Check for finish reason\n if (candidate.finishReason) {\n const stopReason = this.mapFinishReason(candidate.finishReason, hasToolCalls);\n yield {\n type: 'message_delta',\n delta: { stop_reason: stopReason },\n usage: chunk.usageMetadata\n ? { output_tokens: chunk.usageMetadata.candidatesTokenCount ?? 0 }\n : undefined,\n };\n }\n }\n\n yield { type: 'message_stop' };\n }\n\n private mapFinishReason(reason: string, hasToolCalls: boolean): string {\n switch (reason) {\n case 'STOP':\n return hasToolCalls ? 'tool_use' : 'end_turn';\n case 'MAX_TOKENS':\n return 'max_tokens';\n case 'SAFETY':\n return 'content_filter';\n case 'RECITATION':\n return 'content_filter';\n default:\n return 'end_turn';\n }\n }\n\n // 49c: Translate canonical ToolDefinition → Google functionDeclarations\n private toGoogleTools(tools: ToolDefinition[]): FunctionDeclarationsTool {\n return {\n functionDeclarations: tools.map((t): FunctionDeclaration => ({\n name: t.name,\n description: t.description,\n parameters: this.toGoogleSchema(t.inputSchema),\n })),\n };\n }\n\n private mapSchemaType(type: string): SchemaType {\n switch (type.toLowerCase()) {\n case 'string': return SchemaType.STRING;\n case 'number': return SchemaType.NUMBER;\n case 'integer': return SchemaType.INTEGER;\n case 'boolean': return SchemaType.BOOLEAN;\n case 'array': return SchemaType.ARRAY;\n case 'object': return SchemaType.OBJECT;\n default: return SchemaType.STRING;\n }\n }\n\n private toGoogleSchema(schema: Record<string, unknown>): FunctionDeclarationSchema {\n const result: Record<string, unknown> = {};\n\n result.type = this.mapSchemaType((schema.type as string) ?? 'object');\n if (schema.description) result.description = schema.description;\n if (schema.properties) {\n const props: Record<string, unknown> = {};\n for (const [key, value] of Object.entries(schema.properties as Record<string, unknown>)) {\n props[key] = this.toGoogleSchema(value as Record<string, unknown>);\n }\n result.properties = props;\n }\n if (schema.required) result.required = schema.required;\n if (schema.items) result.items = this.toGoogleSchema(schema.items as Record<string, unknown>);\n if (schema.enum) result.enum = schema.enum;\n\n return result as unknown as FunctionDeclarationSchema;\n }\n\n // 49e: Map multimodal content blocks to Google Content/Part types\n private toGoogleContents(messages: Message[]): Content[] {\n return messages.map((msg) => ({\n role: msg.role === 'assistant' ? 'model' : 'user',\n parts: this.toGoogleParts(msg.content),\n }));\n }\n\n private toGoogleParts(content: string | ContentBlock[]): Part[] {\n if (typeof content === 'string') {\n return [{ text: content }];\n }\n\n return content.map((block: ContentBlock): Part => {\n switch (block.type) {\n case 'text':\n return { text: block.text };\n case 'image':\n if (block.source.type === 'base64') {\n return {\n inlineData: {\n mimeType: block.source.media_type,\n data: block.source.data,\n },\n };\n }\n // Google supports file URI for URL images\n return {\n fileData: {\n mimeType: 'image/jpeg',\n fileUri: block.source.url,\n },\n };\n case 'file':\n return {\n inlineData: {\n mimeType: block.source.media_type,\n data: block.source.data,\n },\n };\n // Task 100: Map ToolUseBlock → Google functionCall part\n case 'tool_use':\n return {\n functionCall: {\n name: block.name,\n args: block.input as Record<string, string>,\n },\n };\n // Task 100: Map ToolResultBlock → Google functionResponse part\n case 'tool_result':\n return {\n functionResponse: {\n name: block.tool_use_id,\n response: { content: block.content },\n },\n };\n default:\n return { text: '[unsupported content type]' };\n }\n });\n }\n}\n"],"mappings":";AAEA;AAAA,EACE;AAAA,EACA;AAAA,OAQK;AA0BA,IAAM,iBAAN,MAA2C;AAAA,EACxC;AAAA,EACA;AAAA;AAAA,EAGR,YAAY,SAAgC;AAC1C,SAAK,QAAQ,IAAI,mBAAmB,QAAQ,MAAM;AAClD,SAAK,eAAe,QAAQ,gBAAgB;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,OAAO,cAAc,QAAyD;AAC5E,UAAM,YAAY,OAAO,SAAS,KAAK;AAGvC,UAAM,QAAQ,OAAO,OAAO,SACxB,CAAC,KAAK,cAAc,OAAO,KAAK,CAAC,IACjC;AAEJ,UAAM,QAAyB,KAAK,MAAM,mBAAmB;AAAA,MAC3D,OAAO;AAAA,MACP,mBAAmB,OAAO,SAAS,EAAE,MAAM,QAAQ,OAAO,CAAC,EAAE,MAAM,OAAO,OAAO,CAAC,EAAE,IAAI;AAAA,MACxF;AAAA,MACA,kBAAkB;AAAA,QAChB,iBAAiB,OAAO;AAAA,QACxB,aAAa,OAAO;AAAA,QACpB,MAAM,OAAO;AAAA,QACb,eAAe,OAAO;AAAA,MACxB;AAAA,IACF,CAAC;AAGD,UAAM,WAAW,KAAK,iBAAiB,OAAO,QAAQ;AAGtD,UAAM,YAAY,OAAO,KAAK,IAAI,CAAC;AACnC,UAAM;AAAA,MACJ,MAAM;AAAA,MACN,SAAS;AAAA,QACP,IAAI;AAAA,QACJ,OAAO;AAAA,QACP,MAAM;AAAA,MACR;AAAA,IACF;AAGA,UAAM,SAAsC,MAAM,MAAM,sBAAsB;AAAA,MAC5E;AAAA,IACF,CAAC;AAED,QAAI,oBAAoB;AACxB,QAAI,eAAe;AAEnB,qBAAiB,SAAS,OAAO,QAAQ;AACvC,YAAM,aAAa,MAAM;AACzB,UAAI,CAAC,YAAY,OAAQ;AAEzB,YAAM,YAAY,WAAW,CAAC;AAC9B,YAAM,QAAQ,UAAU,SAAS;AACjC,UAAI,CAAC,MAAO;AAEZ,iBAAW,QAAQ,OAAO;AACxB,YAAI,UAAU,QAAQ,KAAK,MAAM;AAE/B,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,OAAO;AAAA,YACP,cAAc,EAAE,MAAM,OAAO;AAAA,UAC/B;AACA,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,OAAO;AAAA,YACP,OAAO,EAAE,MAAM,cAAc,MAAM,KAAK,KAAK;AAAA,UAC/C;AACA,gBAAM,EAAE,MAAM,sBAAsB,OAAO,kBAAkB;AAC7D;AAAA,QACF;AAGA,YAAI,kBAAkB,QAAQ,KAAK,cAAc;AAC/C,yBAAe;AACf,gBAAM,aAAa,QAAQ,KAAK,IAAI,CAAC,IAAI,iBAAiB;AAE1D,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,OAAO;AAAA,YACP,cAAc;AAAA,cACZ,MAAM;AAAA,cACN,IAAI;AAAA,cACJ,MAAM,KAAK,aAAa;AAAA,YAC1B;AAAA,UACF;AAGA,gBAAM,WAAW,KAAK,UAAU,KAAK,aAAa,QAAQ,CAAC,CAAC;AAC5D,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,OAAO;AAAA,YACP,OAAO,EAAE,MAAM,oBAAoB,cAAc,SAAS;AAAA,UAC5D;AAEA,gBAAM,EAAE,MAAM,sBAAsB,OAAO,kBAAkB;AAC7D;AAAA,QACF;AAAA,MACF;AAGA,UAAI,UAAU,cAAc;AAC1B,cAAM,aAAa,KAAK,gBAAgB,UAAU,cAAc,YAAY;AAC5E,cAAM;AAAA,UACJ,MAAM;AAAA,UACN,OAAO,EAAE,aAAa,WAAW;AAAA,UACjC,OAAO,MAAM,gBACT,EAAE,eAAe,MAAM,cAAc,wBAAwB,EAAE,IAC/D;AAAA,QACN;AAAA,MACF;AAAA,IACF;AAEA,UAAM,EAAE,MAAM,eAAe;AAAA,EAC/B;AAAA,EAEQ,gBAAgB,QAAgB,cAA+B;AACrE,YAAQ,QAAQ;AAAA,MACd,KAAK;AACH,eAAO,eAAe,aAAa;AAAA,MACrC,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT;AACE,eAAO;AAAA,IACX;AAAA,EACF;AAAA;AAAA,EAGQ,cAAc,OAAmD;AACvE,WAAO;AAAA,MACL,sBAAsB,MAAM,IAAI,CAAC,OAA4B;AAAA,QAC3D,MAAM,EAAE;AAAA,QACR,aAAa,EAAE;AAAA,QACf,YAAY,KAAK,eAAe,EAAE,WAAW;AAAA,MAC/C,EAAE;AAAA,IACJ;AAAA,EACF;AAAA,EAEQ,cAAc,MAA0B;AAC9C,YAAQ,KAAK,YAAY,GAAG;AAAA,MAC1B,KAAK;AAAU,eAAO,WAAW;AAAA,MACjC,KAAK;AAAU,eAAO,WAAW;AAAA,MACjC,KAAK;AAAW,eAAO,WAAW;AAAA,MAClC,KAAK;AAAW,eAAO,WAAW;AAAA,MAClC,KAAK;AAAS,eAAO,WAAW;AAAA,MAChC,KAAK;AAAU,eAAO,WAAW;AAAA,MACjC;AAAS,eAAO,WAAW;AAAA,IAC7B;AAAA,EACF;AAAA,EAEQ,eAAe,QAA4D;AACjF,UAAM,SAAkC,CAAC;AAEzC,WAAO,OAAO,KAAK,cAAe,OAAO,QAAmB,QAAQ;AACpE,QAAI,OAAO,YAAa,QAAO,cAAc,OAAO;AACpD,QAAI,OAAO,YAAY;AACrB,YAAM,QAAiC,CAAC;AACxC,iBAAW,CAAC,KAAK,KAAK,KAAK,OAAO,QAAQ,OAAO,UAAqC,GAAG;AACvF,cAAM,GAAG,IAAI,KAAK,eAAe,KAAgC;AAAA,MACnE;AACA,aAAO,aAAa;AAAA,IACtB;AACA,QAAI,OAAO,SAAU,QAAO,WAAW,OAAO;AAC9C,QAAI,OAAO,MAAO,QAAO,QAAQ,KAAK,eAAe,OAAO,KAAgC;AAC5F,QAAI,OAAO,KAAM,QAAO,OAAO,OAAO;AAEtC,WAAO;AAAA,EACT;AAAA;AAAA,EAGQ,iBAAiB,UAAgC;AACvD,WAAO,SAAS,IAAI,CAAC,SAAS;AAAA,MAC5B,MAAM,IAAI,SAAS,cAAc,UAAU;AAAA,MAC3C,OAAO,KAAK,cAAc,IAAI,OAAO;AAAA,IACvC,EAAE;AAAA,EACJ;AAAA,EAEQ,cAAc,SAA0C;AAC9D,QAAI,OAAO,YAAY,UAAU;AAC/B,aAAO,CAAC,EAAE,MAAM,QAAQ,CAAC;AAAA,IAC3B;AAEA,WAAO,QAAQ,IAAI,CAAC,UAA8B;AAChD,cAAQ,MAAM,MAAM;AAAA,QAClB,KAAK;AACH,iBAAO,EAAE,MAAM,MAAM,KAAK;AAAA,QAC5B,KAAK;AACH,cAAI,MAAM,OAAO,SAAS,UAAU;AAClC,mBAAO;AAAA,cACL,YAAY;AAAA,gBACV,UAAU,MAAM,OAAO;AAAA,gBACvB,MAAM,MAAM,OAAO;AAAA,cACrB;AAAA,YACF;AAAA,UACF;AAEA,iBAAO;AAAA,YACL,UAAU;AAAA,cACR,UAAU;AAAA,cACV,SAAS,MAAM,OAAO;AAAA,YACxB;AAAA,UACF;AAAA,QACF,KAAK;AACH,iBAAO;AAAA,YACL,YAAY;AAAA,cACV,UAAU,MAAM,OAAO;AAAA,cACvB,MAAM,MAAM,OAAO;AAAA,YACrB;AAAA,UACF;AAAA;AAAA,QAEF,KAAK;AACH,iBAAO;AAAA,YACL,cAAc;AAAA,cACZ,MAAM,MAAM;AAAA,cACZ,MAAM,MAAM;AAAA,YACd;AAAA,UACF;AAAA;AAAA,QAEF,KAAK;AACH,iBAAO;AAAA,YACL,kBAAkB;AAAA,cAChB,MAAM,MAAM;AAAA,cACZ,UAAU,EAAE,SAAS,MAAM,QAAQ;AAAA,YACrC;AAAA,UACF;AAAA,QACF;AACE,iBAAO,EAAE,MAAM,6BAA6B;AAAA,MAChD;AAAA,IACF,CAAC;AAAA,EACH;AACF;","names":[]}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import { AIProvider, CreateMessageParams, StreamEvent } from '@yolo-labs/core-types';
|
|
2
|
+
|
|
3
|
+
/** Predicate that decides whether a given error should trigger failover to the next provider. */
|
|
4
|
+
interface FailoverTrigger {
|
|
5
|
+
(error: unknown): boolean;
|
|
6
|
+
}
|
|
7
|
+
/**
|
|
8
|
+
* Configuration options for the FailoverRouter.
|
|
9
|
+
*
|
|
10
|
+
* @remarks
|
|
11
|
+
* Providers are tried in order. Each provider is retried up to
|
|
12
|
+
* `maxRetriesPerProvider` times with exponential backoff before
|
|
13
|
+
* failing over to the next. Supply `shouldFailover` for custom
|
|
14
|
+
* error classification; the default triggers on network errors,
|
|
15
|
+
* rate limits (429), and server errors (5xx).
|
|
16
|
+
*/
|
|
17
|
+
interface FailoverRouterOptions {
|
|
18
|
+
providers: AIProvider[];
|
|
19
|
+
maxRetriesPerProvider?: number;
|
|
20
|
+
backoffMs?: number;
|
|
21
|
+
backoffMultiplier?: number;
|
|
22
|
+
shouldFailover?: FailoverTrigger;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Transparent failover across multiple AI providers with retry and exponential backoff.
|
|
26
|
+
*
|
|
27
|
+
* @remarks
|
|
28
|
+
* Implements {@link AIProvider}, so it can be used as a drop-in replacement
|
|
29
|
+
* anywhere a single provider is expected. Non-recoverable errors (as
|
|
30
|
+
* determined by the `shouldFailover` predicate) are thrown immediately
|
|
31
|
+
* without trying additional providers.
|
|
32
|
+
*
|
|
33
|
+
* @example
|
|
34
|
+
* ```ts
|
|
35
|
+
* const router = new FailoverRouter({
|
|
36
|
+
* providers: [anthropic, openai, google],
|
|
37
|
+
* maxRetriesPerProvider: 2,
|
|
38
|
+
* });
|
|
39
|
+
* for await (const event of router.createMessage(params)) { ... }
|
|
40
|
+
* ```
|
|
41
|
+
*/
|
|
42
|
+
declare class FailoverRouter implements AIProvider {
|
|
43
|
+
private providers;
|
|
44
|
+
private maxRetriesPerProvider;
|
|
45
|
+
private backoffMs;
|
|
46
|
+
private backoffMultiplier;
|
|
47
|
+
private shouldFailover;
|
|
48
|
+
/** Creates a new FailoverRouter; requires at least one provider. */
|
|
49
|
+
constructor(options: FailoverRouterOptions);
|
|
50
|
+
/**
|
|
51
|
+
* Streams a message, failing over to the next provider on recoverable errors.
|
|
52
|
+
*
|
|
53
|
+
* @param params - Canonical message creation parameters.
|
|
54
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
55
|
+
* @throws The last error encountered if all providers and retries are exhausted.
|
|
56
|
+
*/
|
|
57
|
+
createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent>;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
export { FailoverRouter, type FailoverRouterOptions, type FailoverTrigger };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
// src/failover-router.ts
|
|
2
|
+
var defaultFailoverTrigger = (error) => {
|
|
3
|
+
if (error instanceof Error) {
|
|
4
|
+
const msg = error.message.toLowerCase();
|
|
5
|
+
if (msg.includes("econnrefused") || msg.includes("enotfound") || msg.includes("timeout")) {
|
|
6
|
+
return true;
|
|
7
|
+
}
|
|
8
|
+
if (msg.includes("429") || msg.includes("rate limit")) return true;
|
|
9
|
+
if (msg.includes("500") || msg.includes("502") || msg.includes("503") || msg.includes("504")) {
|
|
10
|
+
return true;
|
|
11
|
+
}
|
|
12
|
+
if (msg.includes("internal server error") || msg.includes("service unavailable")) {
|
|
13
|
+
return true;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
const statusError = error;
|
|
17
|
+
if (statusError.status && (statusError.status === 429 || statusError.status >= 500)) {
|
|
18
|
+
return true;
|
|
19
|
+
}
|
|
20
|
+
return false;
|
|
21
|
+
};
|
|
22
|
+
var FailoverRouter = class {
|
|
23
|
+
providers;
|
|
24
|
+
maxRetriesPerProvider;
|
|
25
|
+
backoffMs;
|
|
26
|
+
backoffMultiplier;
|
|
27
|
+
shouldFailover;
|
|
28
|
+
/** Creates a new FailoverRouter; requires at least one provider. */
|
|
29
|
+
constructor(options) {
|
|
30
|
+
if (options.providers.length === 0) {
|
|
31
|
+
throw new Error("FailoverRouter requires at least one provider");
|
|
32
|
+
}
|
|
33
|
+
this.providers = options.providers;
|
|
34
|
+
this.maxRetriesPerProvider = options.maxRetriesPerProvider ?? 1;
|
|
35
|
+
this.backoffMs = options.backoffMs ?? 1e3;
|
|
36
|
+
this.backoffMultiplier = options.backoffMultiplier ?? 2;
|
|
37
|
+
this.shouldFailover = options.shouldFailover ?? defaultFailoverTrigger;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Streams a message, failing over to the next provider on recoverable errors.
|
|
41
|
+
*
|
|
42
|
+
* @param params - Canonical message creation parameters.
|
|
43
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
44
|
+
* @throws The last error encountered if all providers and retries are exhausted.
|
|
45
|
+
*/
|
|
46
|
+
async *createMessage(params) {
|
|
47
|
+
let lastError;
|
|
48
|
+
for (let providerIndex = 0; providerIndex < this.providers.length; providerIndex++) {
|
|
49
|
+
const provider = this.providers[providerIndex];
|
|
50
|
+
let retries = 0;
|
|
51
|
+
while (retries <= this.maxRetriesPerProvider) {
|
|
52
|
+
try {
|
|
53
|
+
const stream = provider.createMessage(params);
|
|
54
|
+
const events = [];
|
|
55
|
+
for await (const event of stream) {
|
|
56
|
+
yield event;
|
|
57
|
+
}
|
|
58
|
+
return;
|
|
59
|
+
} catch (err) {
|
|
60
|
+
lastError = err;
|
|
61
|
+
if (!this.shouldFailover(err)) {
|
|
62
|
+
throw err;
|
|
63
|
+
}
|
|
64
|
+
retries++;
|
|
65
|
+
if (retries <= this.maxRetriesPerProvider) {
|
|
66
|
+
const delay = this.backoffMs * Math.pow(this.backoffMultiplier, retries - 1);
|
|
67
|
+
await sleep(delay);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
throw lastError ?? new Error("All providers failed");
|
|
73
|
+
}
|
|
74
|
+
};
|
|
75
|
+
function sleep(ms) {
|
|
76
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
77
|
+
}
|
|
78
|
+
export {
|
|
79
|
+
FailoverRouter
|
|
80
|
+
};
|
|
81
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/failover-router.ts"],"sourcesContent":["// Task 50: FailoverRouter — transparent provider failover\n\nimport type {\n AIProvider,\n CreateMessageParams,\n StreamEvent,\n} from '@yolo-labs/core-types';\n\n/** Predicate that decides whether a given error should trigger failover to the next provider. */\nexport interface FailoverTrigger {\n (error: unknown): boolean;\n}\n\n/**\n * Configuration options for the FailoverRouter.\n *\n * @remarks\n * Providers are tried in order. Each provider is retried up to\n * `maxRetriesPerProvider` times with exponential backoff before\n * failing over to the next. Supply `shouldFailover` for custom\n * error classification; the default triggers on network errors,\n * rate limits (429), and server errors (5xx).\n */\nexport interface FailoverRouterOptions {\n providers: AIProvider[];\n maxRetriesPerProvider?: number;\n backoffMs?: number;\n backoffMultiplier?: number;\n shouldFailover?: FailoverTrigger;\n}\n\n// 50d: Default failover trigger — network errors, rate limits, 5xx\nconst defaultFailoverTrigger: FailoverTrigger = (error: unknown): boolean => {\n if (error instanceof Error) {\n const msg = error.message.toLowerCase();\n // Network errors\n if (msg.includes('econnrefused') || msg.includes('enotfound') || msg.includes('timeout')) {\n return true;\n }\n // Rate limit or server errors (check for status code patterns)\n if (msg.includes('429') || msg.includes('rate limit')) return true;\n if (msg.includes('500') || msg.includes('502') || msg.includes('503') || msg.includes('504')) {\n return true;\n }\n if (msg.includes('internal server error') || msg.includes('service unavailable')) {\n return true;\n }\n }\n // Check for status property on error objects\n const statusError = error as { status?: number };\n if (statusError.status && (statusError.status === 429 || statusError.status >= 500)) {\n return true;\n }\n return false;\n};\n\n/**\n * Transparent failover across multiple AI providers with retry and exponential backoff.\n *\n * @remarks\n * Implements {@link AIProvider}, so it can be used as a drop-in replacement\n * anywhere a single provider is expected. Non-recoverable errors (as\n * determined by the `shouldFailover` predicate) are thrown immediately\n * without trying additional providers.\n *\n * @example\n * ```ts\n * const router = new FailoverRouter({\n * providers: [anthropic, openai, google],\n * maxRetriesPerProvider: 2,\n * });\n * for await (const event of router.createMessage(params)) { ... }\n * ```\n */\nexport class FailoverRouter implements AIProvider {\n private providers: AIProvider[];\n private maxRetriesPerProvider: number;\n private backoffMs: number;\n private backoffMultiplier: number;\n private shouldFailover: FailoverTrigger;\n\n /** Creates a new FailoverRouter; requires at least one provider. */\n constructor(options: FailoverRouterOptions) {\n if (options.providers.length === 0) {\n throw new Error('FailoverRouter requires at least one provider');\n }\n this.providers = options.providers;\n this.maxRetriesPerProvider = options.maxRetriesPerProvider ?? 1;\n this.backoffMs = options.backoffMs ?? 1000;\n this.backoffMultiplier = options.backoffMultiplier ?? 2;\n this.shouldFailover = options.shouldFailover ?? defaultFailoverTrigger;\n }\n\n /**\n * Streams a message, failing over to the next provider on recoverable errors.\n *\n * @param params - Canonical message creation parameters.\n * @returns An async iterable of canonical {@link StreamEvent} objects.\n * @throws The last error encountered if all providers and retries are exhausted.\n */\n async *createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent> {\n let lastError: unknown;\n\n for (let providerIndex = 0; providerIndex < this.providers.length; providerIndex++) {\n const provider = this.providers[providerIndex];\n let retries = 0;\n\n while (retries <= this.maxRetriesPerProvider) {\n try {\n // Try to create the stream and yield from it\n const stream = provider.createMessage(params);\n const events: StreamEvent[] = [];\n\n // Buffer events to detect errors before yielding\n // We yield immediately for streaming, but catch errors mid-stream\n for await (const event of stream) {\n yield event;\n }\n\n // If we got here, the stream completed successfully\n return;\n } catch (err) {\n lastError = err;\n\n // 50b/50c: Check if we should failover\n if (!this.shouldFailover(err)) {\n // Non-recoverable error, don't failover\n throw err;\n }\n\n retries++;\n\n if (retries <= this.maxRetriesPerProvider) {\n // 50c: Backoff before retry on same provider\n const delay = this.backoffMs * Math.pow(this.backoffMultiplier, retries - 1);\n await sleep(delay);\n }\n }\n }\n\n // All retries exhausted for this provider, try next\n }\n\n // All providers failed\n throw lastError ?? new Error('All providers failed');\n }\n}\n\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n"],"mappings":";AAgCA,IAAM,yBAA0C,CAAC,UAA4B;AAC3E,MAAI,iBAAiB,OAAO;AAC1B,UAAM,MAAM,MAAM,QAAQ,YAAY;AAEtC,QAAI,IAAI,SAAS,cAAc,KAAK,IAAI,SAAS,WAAW,KAAK,IAAI,SAAS,SAAS,GAAG;AACxF,aAAO;AAAA,IACT;AAEA,QAAI,IAAI,SAAS,KAAK,KAAK,IAAI,SAAS,YAAY,EAAG,QAAO;AAC9D,QAAI,IAAI,SAAS,KAAK,KAAK,IAAI,SAAS,KAAK,KAAK,IAAI,SAAS,KAAK,KAAK,IAAI,SAAS,KAAK,GAAG;AAC5F,aAAO;AAAA,IACT;AACA,QAAI,IAAI,SAAS,uBAAuB,KAAK,IAAI,SAAS,qBAAqB,GAAG;AAChF,aAAO;AAAA,IACT;AAAA,EACF;AAEA,QAAM,cAAc;AACpB,MAAI,YAAY,WAAW,YAAY,WAAW,OAAO,YAAY,UAAU,MAAM;AACnF,WAAO;AAAA,EACT;AACA,SAAO;AACT;AAoBO,IAAM,iBAAN,MAA2C;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAGR,YAAY,SAAgC;AAC1C,QAAI,QAAQ,UAAU,WAAW,GAAG;AAClC,YAAM,IAAI,MAAM,+CAA+C;AAAA,IACjE;AACA,SAAK,YAAY,QAAQ;AACzB,SAAK,wBAAwB,QAAQ,yBAAyB;AAC9D,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,oBAAoB,QAAQ,qBAAqB;AACtD,SAAK,iBAAiB,QAAQ,kBAAkB;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,cAAc,QAAyD;AAC5E,QAAI;AAEJ,aAAS,gBAAgB,GAAG,gBAAgB,KAAK,UAAU,QAAQ,iBAAiB;AAClF,YAAM,WAAW,KAAK,UAAU,aAAa;AAC7C,UAAI,UAAU;AAEd,aAAO,WAAW,KAAK,uBAAuB;AAC5C,YAAI;AAEF,gBAAM,SAAS,SAAS,cAAc,MAAM;AAC5C,gBAAM,SAAwB,CAAC;AAI/B,2BAAiB,SAAS,QAAQ;AAChC,kBAAM;AAAA,UACR;AAGA;AAAA,QACF,SAAS,KAAK;AACZ,sBAAY;AAGZ,cAAI,CAAC,KAAK,eAAe,GAAG,GAAG;AAE7B,kBAAM;AAAA,UACR;AAEA;AAEA,cAAI,WAAW,KAAK,uBAAuB;AAEzC,kBAAM,QAAQ,KAAK,YAAY,KAAK,IAAI,KAAK,mBAAmB,UAAU,CAAC;AAC3E,kBAAM,MAAM,KAAK;AAAA,UACnB;AAAA,QACF;AAAA,MACF;AAAA,IAGF;AAGA,UAAM,aAAa,IAAI,MAAM,sBAAsB;AAAA,EACrD;AACF;AAEA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;","names":[]}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { AIProvider, CreateMessageParams, StreamEvent } from '@yolo-labs/core-types';
|
|
2
|
+
|
|
3
|
+
/** Configuration options for the OpenAI provider. */
|
|
4
|
+
interface OpenAIProviderOptions {
|
|
5
|
+
apiKey?: string;
|
|
6
|
+
baseURL?: string;
|
|
7
|
+
organization?: string;
|
|
8
|
+
defaultModel?: string;
|
|
9
|
+
}
|
|
10
|
+
/**
|
|
11
|
+
* {@link AIProvider} adapter for the OpenAI chat completions API.
|
|
12
|
+
*
|
|
13
|
+
* @remarks
|
|
14
|
+
* Translates canonical {@link CreateMessageParams} into OpenAI chat completion
|
|
15
|
+
* calls and maps streaming chunks back to canonical {@link StreamEvent} types.
|
|
16
|
+
* Handles OpenAI's `tool_calls` response format and finish reason mapping.
|
|
17
|
+
* Requires `openai` as a peer dependency.
|
|
18
|
+
*/
|
|
19
|
+
declare class OpenAIProvider implements AIProvider {
|
|
20
|
+
private client;
|
|
21
|
+
private defaultModel;
|
|
22
|
+
/** Creates a new OpenAIProvider with the given options. */
|
|
23
|
+
constructor(options?: OpenAIProviderOptions);
|
|
24
|
+
/**
|
|
25
|
+
* Creates a streaming message using the OpenAI SDK.
|
|
26
|
+
*
|
|
27
|
+
* @param params - Canonical message creation parameters.
|
|
28
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
29
|
+
*/
|
|
30
|
+
createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent>;
|
|
31
|
+
private mapFinishReason;
|
|
32
|
+
private toOpenAITool;
|
|
33
|
+
private toOpenAIMessages;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export { OpenAIProvider, type OpenAIProviderOptions };
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
// src/openai/index.ts
|
|
2
|
+
import OpenAI from "openai";
|
|
3
|
+
var OpenAIProvider = class {
|
|
4
|
+
client;
|
|
5
|
+
defaultModel;
|
|
6
|
+
/** Creates a new OpenAIProvider with the given options. */
|
|
7
|
+
constructor(options = {}) {
|
|
8
|
+
this.client = new OpenAI({
|
|
9
|
+
apiKey: options.apiKey,
|
|
10
|
+
baseURL: options.baseURL,
|
|
11
|
+
organization: options.organization
|
|
12
|
+
});
|
|
13
|
+
this.defaultModel = options.defaultModel ?? "gpt-4o";
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Creates a streaming message using the OpenAI SDK.
|
|
17
|
+
*
|
|
18
|
+
* @param params - Canonical message creation parameters.
|
|
19
|
+
* @returns An async iterable of canonical {@link StreamEvent} objects.
|
|
20
|
+
*/
|
|
21
|
+
async *createMessage(params) {
|
|
22
|
+
const model = params.model || this.defaultModel;
|
|
23
|
+
const messages = [];
|
|
24
|
+
if (params.system) {
|
|
25
|
+
messages.push({ role: "system", content: params.system });
|
|
26
|
+
}
|
|
27
|
+
for (const msg of params.messages) {
|
|
28
|
+
messages.push(...this.toOpenAIMessages(msg));
|
|
29
|
+
}
|
|
30
|
+
const tools = params.tools?.map((t) => this.toOpenAITool(t));
|
|
31
|
+
const requestParams = {
|
|
32
|
+
model,
|
|
33
|
+
messages,
|
|
34
|
+
stream: true,
|
|
35
|
+
stream_options: { include_usage: true }
|
|
36
|
+
};
|
|
37
|
+
if (params.maxTokens) {
|
|
38
|
+
requestParams.max_tokens = params.maxTokens;
|
|
39
|
+
}
|
|
40
|
+
if (tools && tools.length > 0) {
|
|
41
|
+
requestParams.tools = tools;
|
|
42
|
+
}
|
|
43
|
+
if (params.temperature !== void 0) {
|
|
44
|
+
requestParams.temperature = params.temperature;
|
|
45
|
+
}
|
|
46
|
+
if (params.topP !== void 0) {
|
|
47
|
+
requestParams.top_p = params.topP;
|
|
48
|
+
}
|
|
49
|
+
if (params.stopSequences && params.stopSequences.length > 0) {
|
|
50
|
+
requestParams.stop = params.stopSequences;
|
|
51
|
+
}
|
|
52
|
+
const messageId = `msg_${Date.now()}`;
|
|
53
|
+
yield {
|
|
54
|
+
type: "message_start",
|
|
55
|
+
message: {
|
|
56
|
+
id: messageId,
|
|
57
|
+
model,
|
|
58
|
+
role: "assistant"
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
const stream = await this.client.chat.completions.create(requestParams);
|
|
62
|
+
let contentBlockIndex = 0;
|
|
63
|
+
let textBlockStarted = false;
|
|
64
|
+
const toolCallBlocks = /* @__PURE__ */ new Map();
|
|
65
|
+
let finishReason = null;
|
|
66
|
+
for await (const chunk of stream) {
|
|
67
|
+
const choice = chunk.choices?.[0];
|
|
68
|
+
if (!choice) {
|
|
69
|
+
if (chunk.usage) {
|
|
70
|
+
yield {
|
|
71
|
+
type: "message_delta",
|
|
72
|
+
delta: { stop_reason: finishReason },
|
|
73
|
+
usage: { output_tokens: chunk.usage.completion_tokens }
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
continue;
|
|
77
|
+
}
|
|
78
|
+
const delta = choice.delta;
|
|
79
|
+
if (delta?.content) {
|
|
80
|
+
if (!textBlockStarted) {
|
|
81
|
+
yield {
|
|
82
|
+
type: "content_block_start",
|
|
83
|
+
index: contentBlockIndex,
|
|
84
|
+
contentBlock: { type: "text" }
|
|
85
|
+
};
|
|
86
|
+
textBlockStarted = true;
|
|
87
|
+
}
|
|
88
|
+
yield {
|
|
89
|
+
type: "content_block_delta",
|
|
90
|
+
index: contentBlockIndex,
|
|
91
|
+
delta: { type: "text_delta", text: delta.content }
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
if (delta?.tool_calls) {
|
|
95
|
+
for (const tc of delta.tool_calls) {
|
|
96
|
+
const tcIndex = tc.index;
|
|
97
|
+
if (!toolCallBlocks.has(tcIndex)) {
|
|
98
|
+
if (textBlockStarted) {
|
|
99
|
+
yield { type: "content_block_stop", index: contentBlockIndex };
|
|
100
|
+
contentBlockIndex++;
|
|
101
|
+
textBlockStarted = false;
|
|
102
|
+
}
|
|
103
|
+
toolCallBlocks.set(tcIndex, {
|
|
104
|
+
id: tc.id ?? "",
|
|
105
|
+
name: tc.function?.name ?? "",
|
|
106
|
+
started: false
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
const block = toolCallBlocks.get(tcIndex);
|
|
110
|
+
if (tc.id) block.id = tc.id;
|
|
111
|
+
if (tc.function?.name) block.name = tc.function.name;
|
|
112
|
+
if (!block.started && block.id && block.name) {
|
|
113
|
+
yield {
|
|
114
|
+
type: "content_block_start",
|
|
115
|
+
index: contentBlockIndex + tcIndex,
|
|
116
|
+
contentBlock: {
|
|
117
|
+
type: "tool_use",
|
|
118
|
+
id: block.id,
|
|
119
|
+
name: block.name
|
|
120
|
+
}
|
|
121
|
+
};
|
|
122
|
+
block.started = true;
|
|
123
|
+
}
|
|
124
|
+
if (tc.function?.arguments) {
|
|
125
|
+
yield {
|
|
126
|
+
type: "content_block_delta",
|
|
127
|
+
index: contentBlockIndex + tcIndex,
|
|
128
|
+
delta: {
|
|
129
|
+
type: "input_json_delta",
|
|
130
|
+
partial_json: tc.function.arguments
|
|
131
|
+
}
|
|
132
|
+
};
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
if (choice.finish_reason) {
|
|
137
|
+
finishReason = this.mapFinishReason(choice.finish_reason);
|
|
138
|
+
if (textBlockStarted) {
|
|
139
|
+
yield { type: "content_block_stop", index: contentBlockIndex };
|
|
140
|
+
contentBlockIndex++;
|
|
141
|
+
textBlockStarted = false;
|
|
142
|
+
}
|
|
143
|
+
for (const [tcIdx, block] of toolCallBlocks) {
|
|
144
|
+
if (block.started) {
|
|
145
|
+
yield { type: "content_block_stop", index: contentBlockIndex + tcIdx };
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
if (finishReason) {
|
|
151
|
+
yield {
|
|
152
|
+
type: "message_delta",
|
|
153
|
+
delta: { stop_reason: finishReason }
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
yield { type: "message_stop" };
|
|
157
|
+
}
|
|
158
|
+
mapFinishReason(reason) {
|
|
159
|
+
switch (reason) {
|
|
160
|
+
case "stop":
|
|
161
|
+
return "end_turn";
|
|
162
|
+
case "tool_calls":
|
|
163
|
+
return "tool_use";
|
|
164
|
+
case "length":
|
|
165
|
+
return "max_tokens";
|
|
166
|
+
case "content_filter":
|
|
167
|
+
return "content_filter";
|
|
168
|
+
default:
|
|
169
|
+
return reason;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
// 48c: Translate canonical ToolDefinition → OpenAI tool format
|
|
173
|
+
toOpenAITool(tool) {
|
|
174
|
+
return {
|
|
175
|
+
type: "function",
|
|
176
|
+
function: {
|
|
177
|
+
name: tool.name,
|
|
178
|
+
description: tool.description,
|
|
179
|
+
parameters: tool.inputSchema
|
|
180
|
+
}
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
// 48e: Map multimodal content blocks to OpenAI message format
|
|
184
|
+
// Task 99: Returns an array because ToolResultBlocks expand into multiple 'tool' role messages
|
|
185
|
+
toOpenAIMessages(message) {
|
|
186
|
+
if (typeof message.content === "string") {
|
|
187
|
+
return [{ role: message.role === "user" ? "user" : "assistant", content: message.content }];
|
|
188
|
+
}
|
|
189
|
+
if (message.role === "assistant") {
|
|
190
|
+
const textBlocks = message.content.filter((b) => b.type === "text");
|
|
191
|
+
const toolUseBlocks = message.content.filter((b) => b.type === "tool_use");
|
|
192
|
+
const text = textBlocks.filter((b) => b.type === "text").map((b) => b.text).join("");
|
|
193
|
+
if (toolUseBlocks.length > 0) {
|
|
194
|
+
const toolCalls = toolUseBlocks.filter((b) => b.type === "tool_use").map((b) => ({
|
|
195
|
+
id: b.id,
|
|
196
|
+
type: "function",
|
|
197
|
+
function: {
|
|
198
|
+
name: b.name,
|
|
199
|
+
arguments: JSON.stringify(b.input)
|
|
200
|
+
}
|
|
201
|
+
}));
|
|
202
|
+
return [{
|
|
203
|
+
role: "assistant",
|
|
204
|
+
content: text || null,
|
|
205
|
+
tool_calls: toolCalls
|
|
206
|
+
}];
|
|
207
|
+
}
|
|
208
|
+
return [{ role: "assistant", content: text }];
|
|
209
|
+
}
|
|
210
|
+
const toolResultBlocks = message.content.filter((b) => b.type === "tool_result");
|
|
211
|
+
if (toolResultBlocks.length > 0) {
|
|
212
|
+
return toolResultBlocks.filter((b) => b.type === "tool_result").map((b) => ({
|
|
213
|
+
role: "tool",
|
|
214
|
+
tool_call_id: b.tool_use_id,
|
|
215
|
+
content: b.content
|
|
216
|
+
}));
|
|
217
|
+
}
|
|
218
|
+
const parts = message.content.map(
|
|
219
|
+
(block) => {
|
|
220
|
+
switch (block.type) {
|
|
221
|
+
case "text":
|
|
222
|
+
return { type: "text", text: block.text };
|
|
223
|
+
case "image":
|
|
224
|
+
if (block.source.type === "base64") {
|
|
225
|
+
return {
|
|
226
|
+
type: "image_url",
|
|
227
|
+
image_url: {
|
|
228
|
+
url: `data:${block.source.media_type};base64,${block.source.data}`
|
|
229
|
+
}
|
|
230
|
+
};
|
|
231
|
+
}
|
|
232
|
+
return {
|
|
233
|
+
type: "image_url",
|
|
234
|
+
image_url: { url: block.source.url }
|
|
235
|
+
};
|
|
236
|
+
case "file":
|
|
237
|
+
return {
|
|
238
|
+
type: "text",
|
|
239
|
+
text: `[File: ${block.source.name ?? "attachment"} (${block.source.media_type})]`
|
|
240
|
+
};
|
|
241
|
+
default:
|
|
242
|
+
return { type: "text", text: "[unsupported content type]" };
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
);
|
|
246
|
+
return [{ role: "user", content: parts }];
|
|
247
|
+
}
|
|
248
|
+
};
|
|
249
|
+
export {
|
|
250
|
+
OpenAIProvider
|
|
251
|
+
};
|
|
252
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/openai/index.ts"],"sourcesContent":["// Task 48: OpenAIProvider — adapter for openai SDK\n\nimport OpenAI from 'openai';\nimport type {\n AIProvider,\n CreateMessageParams,\n StreamEvent,\n ToolDefinition,\n ContentBlock,\n Message,\n} from '@yolo-labs/core-types';\n\n/** Configuration options for the OpenAI provider. */\nexport interface OpenAIProviderOptions {\n apiKey?: string;\n baseURL?: string;\n organization?: string;\n defaultModel?: string;\n}\n\n/**\n * {@link AIProvider} adapter for the OpenAI chat completions API.\n *\n * @remarks\n * Translates canonical {@link CreateMessageParams} into OpenAI chat completion\n * calls and maps streaming chunks back to canonical {@link StreamEvent} types.\n * Handles OpenAI's `tool_calls` response format and finish reason mapping.\n * Requires `openai` as a peer dependency.\n */\nexport class OpenAIProvider implements AIProvider {\n private client: OpenAI;\n private defaultModel: string;\n\n /** Creates a new OpenAIProvider with the given options. */\n constructor(options: OpenAIProviderOptions = {}) {\n this.client = new OpenAI({\n apiKey: options.apiKey,\n baseURL: options.baseURL,\n organization: options.organization,\n });\n this.defaultModel = options.defaultModel ?? 'gpt-4o';\n }\n\n /**\n * Creates a streaming message using the OpenAI SDK.\n *\n * @param params - Canonical message creation parameters.\n * @returns An async iterable of canonical {@link StreamEvent} objects.\n */\n async *createMessage(params: CreateMessageParams): AsyncIterable<StreamEvent> {\n const model = params.model || this.defaultModel;\n\n // Build OpenAI messages array\n const messages: OpenAI.ChatCompletionMessageParam[] = [];\n\n // System message\n if (params.system) {\n messages.push({ role: 'system', content: params.system });\n }\n\n // 48e: Map multimodal content blocks to OpenAI content array format\n // Task 99: toOpenAIMessages returns an array (ToolResultBlocks expand to multiple messages)\n for (const msg of params.messages) {\n messages.push(...this.toOpenAIMessages(msg));\n }\n\n // 48c: Translate canonical ToolDefinition → OpenAI tools with function.parameters\n const tools = params.tools?.map((t) => this.toOpenAITool(t));\n\n const requestParams: OpenAI.ChatCompletionCreateParamsStreaming = {\n model,\n messages,\n stream: true,\n stream_options: { include_usage: true },\n };\n\n if (params.maxTokens) {\n requestParams.max_tokens = params.maxTokens;\n }\n if (tools && tools.length > 0) {\n requestParams.tools = tools;\n }\n if (params.temperature !== undefined) {\n requestParams.temperature = params.temperature;\n }\n if (params.topP !== undefined) {\n requestParams.top_p = params.topP;\n }\n if (params.stopSequences && params.stopSequences.length > 0) {\n requestParams.stop = params.stopSequences;\n }\n\n // Emit message_start\n const messageId = `msg_${Date.now()}`;\n yield {\n type: 'message_start',\n message: {\n id: messageId,\n model,\n role: 'assistant',\n },\n };\n\n // 48b: Map OpenAI stream chunks to canonical StreamEvent types\n const stream = await this.client.chat.completions.create(requestParams);\n\n let contentBlockIndex = 0;\n let textBlockStarted = false;\n // 48d: Track tool calls by index\n const toolCallBlocks = new Map<number, { id: string; name: string; started: boolean }>();\n let finishReason: string | null = null;\n\n for await (const chunk of stream) {\n const choice = chunk.choices?.[0];\n if (!choice) {\n // Usage-only chunk at the end\n if (chunk.usage) {\n yield {\n type: 'message_delta',\n delta: { stop_reason: finishReason },\n usage: { output_tokens: chunk.usage.completion_tokens },\n };\n }\n continue;\n }\n\n const delta = choice.delta;\n\n // Handle text content\n if (delta?.content) {\n if (!textBlockStarted) {\n yield {\n type: 'content_block_start',\n index: contentBlockIndex,\n contentBlock: { type: 'text' },\n };\n textBlockStarted = true;\n }\n yield {\n type: 'content_block_delta',\n index: contentBlockIndex,\n delta: { type: 'text_delta', text: delta.content },\n };\n }\n\n // 48d: Handle OpenAI's tool_calls response format\n if (delta?.tool_calls) {\n for (const tc of delta.tool_calls) {\n const tcIndex = tc.index;\n\n if (!toolCallBlocks.has(tcIndex)) {\n // Close text block if it was open\n if (textBlockStarted) {\n yield { type: 'content_block_stop', index: contentBlockIndex };\n contentBlockIndex++;\n textBlockStarted = false;\n }\n\n toolCallBlocks.set(tcIndex, {\n id: tc.id ?? '',\n name: tc.function?.name ?? '',\n started: false,\n });\n }\n\n const block = toolCallBlocks.get(tcIndex)!;\n\n // Update id/name if provided in this chunk\n if (tc.id) block.id = tc.id;\n if (tc.function?.name) block.name = tc.function.name;\n\n if (!block.started && block.id && block.name) {\n yield {\n type: 'content_block_start',\n index: contentBlockIndex + tcIndex,\n contentBlock: {\n type: 'tool_use',\n id: block.id,\n name: block.name,\n },\n };\n block.started = true;\n }\n\n // Stream function arguments as input_json_delta\n if (tc.function?.arguments) {\n yield {\n type: 'content_block_delta',\n index: contentBlockIndex + tcIndex,\n delta: {\n type: 'input_json_delta',\n partial_json: tc.function.arguments,\n },\n };\n }\n }\n }\n\n // Finish reason\n if (choice.finish_reason) {\n finishReason = this.mapFinishReason(choice.finish_reason);\n\n // Close any open blocks\n if (textBlockStarted) {\n yield { type: 'content_block_stop', index: contentBlockIndex };\n contentBlockIndex++;\n textBlockStarted = false;\n }\n for (const [tcIdx, block] of toolCallBlocks) {\n if (block.started) {\n yield { type: 'content_block_stop', index: contentBlockIndex + tcIdx };\n }\n }\n }\n }\n\n // Emit message_delta with stop reason if not already emitted by usage chunk\n if (finishReason) {\n yield {\n type: 'message_delta',\n delta: { stop_reason: finishReason },\n };\n }\n\n yield { type: 'message_stop' };\n }\n\n private mapFinishReason(reason: string): string {\n switch (reason) {\n case 'stop':\n return 'end_turn';\n case 'tool_calls':\n return 'tool_use';\n case 'length':\n return 'max_tokens';\n case 'content_filter':\n return 'content_filter';\n default:\n return reason;\n }\n }\n\n // 48c: Translate canonical ToolDefinition → OpenAI tool format\n private toOpenAITool(\n tool: ToolDefinition,\n ): OpenAI.ChatCompletionTool {\n return {\n type: 'function',\n function: {\n name: tool.name,\n description: tool.description,\n parameters: tool.inputSchema as Record<string, unknown>,\n },\n };\n }\n\n // 48e: Map multimodal content blocks to OpenAI message format\n // Task 99: Returns an array because ToolResultBlocks expand into multiple 'tool' role messages\n private toOpenAIMessages(\n message: Message,\n ): OpenAI.ChatCompletionMessageParam[] {\n if (typeof message.content === 'string') {\n return [{ role: message.role === 'user' ? 'user' : 'assistant', content: message.content }];\n }\n\n if (message.role === 'assistant') {\n // Task 99: Assistant messages with ToolUseBlock → set tool_calls array\n const textBlocks = message.content.filter((b) => b.type === 'text');\n const toolUseBlocks = message.content.filter((b) => b.type === 'tool_use');\n\n const text = textBlocks\n .filter((b): b is { type: 'text'; text: string } => b.type === 'text')\n .map((b) => b.text)\n .join('');\n\n if (toolUseBlocks.length > 0) {\n const toolCalls: OpenAI.ChatCompletionMessageToolCall[] = toolUseBlocks\n .filter((b): b is import('@yolo-labs/core-types').ToolUseBlock => b.type === 'tool_use')\n .map((b) => ({\n id: b.id,\n type: 'function' as const,\n function: {\n name: b.name,\n arguments: JSON.stringify(b.input),\n },\n }));\n return [{\n role: 'assistant' as const,\n content: text || null,\n tool_calls: toolCalls,\n }];\n }\n\n return [{ role: 'assistant' as const, content: text }];\n }\n\n // Task 99: User messages with ToolResultBlock → emit as separate 'tool' role messages\n const toolResultBlocks = message.content.filter((b) => b.type === 'tool_result');\n if (toolResultBlocks.length > 0) {\n return toolResultBlocks\n .filter((b): b is import('@yolo-labs/core-types').ToolResultBlock => b.type === 'tool_result')\n .map((b) => ({\n role: 'tool' as const,\n tool_call_id: b.tool_use_id,\n content: b.content,\n }));\n }\n\n // User messages support multimodal content\n const parts: OpenAI.ChatCompletionContentPart[] = message.content.map(\n (block: ContentBlock) => {\n switch (block.type) {\n case 'text':\n return { type: 'text' as const, text: block.text };\n case 'image':\n if (block.source.type === 'base64') {\n return {\n type: 'image_url' as const,\n image_url: {\n url: `data:${block.source.media_type};base64,${block.source.data}`,\n },\n };\n }\n return {\n type: 'image_url' as const,\n image_url: { url: block.source.url },\n };\n case 'file':\n // OpenAI doesn't natively support file blocks, convert to text reference\n return {\n type: 'text' as const,\n text: `[File: ${block.source.name ?? 'attachment'} (${block.source.media_type})]`,\n };\n default:\n return { type: 'text' as const, text: '[unsupported content type]' };\n }\n },\n );\n\n return [{ role: 'user', content: parts }];\n }\n}\n"],"mappings":";AAEA,OAAO,YAAY;AA2BZ,IAAM,iBAAN,MAA2C;AAAA,EACxC;AAAA,EACA;AAAA;AAAA,EAGR,YAAY,UAAiC,CAAC,GAAG;AAC/C,SAAK,SAAS,IAAI,OAAO;AAAA,MACvB,QAAQ,QAAQ;AAAA,MAChB,SAAS,QAAQ;AAAA,MACjB,cAAc,QAAQ;AAAA,IACxB,CAAC;AACD,SAAK,eAAe,QAAQ,gBAAgB;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,OAAO,cAAc,QAAyD;AAC5E,UAAM,QAAQ,OAAO,SAAS,KAAK;AAGnC,UAAM,WAAgD,CAAC;AAGvD,QAAI,OAAO,QAAQ;AACjB,eAAS,KAAK,EAAE,MAAM,UAAU,SAAS,OAAO,OAAO,CAAC;AAAA,IAC1D;AAIA,eAAW,OAAO,OAAO,UAAU;AACjC,eAAS,KAAK,GAAG,KAAK,iBAAiB,GAAG,CAAC;AAAA,IAC7C;AAGA,UAAM,QAAQ,OAAO,OAAO,IAAI,CAAC,MAAM,KAAK,aAAa,CAAC,CAAC;AAE3D,UAAM,gBAA4D;AAAA,MAChE;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,MACR,gBAAgB,EAAE,eAAe,KAAK;AAAA,IACxC;AAEA,QAAI,OAAO,WAAW;AACpB,oBAAc,aAAa,OAAO;AAAA,IACpC;AACA,QAAI,SAAS,MAAM,SAAS,GAAG;AAC7B,oBAAc,QAAQ;AAAA,IACxB;AACA,QAAI,OAAO,gBAAgB,QAAW;AACpC,oBAAc,cAAc,OAAO;AAAA,IACrC;AACA,QAAI,OAAO,SAAS,QAAW;AAC7B,oBAAc,QAAQ,OAAO;AAAA,IAC/B;AACA,QAAI,OAAO,iBAAiB,OAAO,cAAc,SAAS,GAAG;AAC3D,oBAAc,OAAO,OAAO;AAAA,IAC9B;AAGA,UAAM,YAAY,OAAO,KAAK,IAAI,CAAC;AACnC,UAAM;AAAA,MACJ,MAAM;AAAA,MACN,SAAS;AAAA,QACP,IAAI;AAAA,QACJ;AAAA,QACA,MAAM;AAAA,MACR;AAAA,IACF;AAGA,UAAM,SAAS,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO,aAAa;AAEtE,QAAI,oBAAoB;AACxB,QAAI,mBAAmB;AAEvB,UAAM,iBAAiB,oBAAI,IAA4D;AACvF,QAAI,eAA8B;AAElC,qBAAiB,SAAS,QAAQ;AAChC,YAAM,SAAS,MAAM,UAAU,CAAC;AAChC,UAAI,CAAC,QAAQ;AAEX,YAAI,MAAM,OAAO;AACf,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,OAAO,EAAE,aAAa,aAAa;AAAA,YACnC,OAAO,EAAE,eAAe,MAAM,MAAM,kBAAkB;AAAA,UACxD;AAAA,QACF;AACA;AAAA,MACF;AAEA,YAAM,QAAQ,OAAO;AAGrB,UAAI,OAAO,SAAS;AAClB,YAAI,CAAC,kBAAkB;AACrB,gBAAM;AAAA,YACJ,MAAM;AAAA,YACN,OAAO;AAAA,YACP,cAAc,EAAE,MAAM,OAAO;AAAA,UAC/B;AACA,6BAAmB;AAAA,QACrB;AACA,cAAM;AAAA,UACJ,MAAM;AAAA,UACN,OAAO;AAAA,UACP,OAAO,EAAE,MAAM,cAAc,MAAM,MAAM,QAAQ;AAAA,QACnD;AAAA,MACF;AAGA,UAAI,OAAO,YAAY;AACrB,mBAAW,MAAM,MAAM,YAAY;AACjC,gBAAM,UAAU,GAAG;AAEnB,cAAI,CAAC,eAAe,IAAI,OAAO,GAAG;AAEhC,gBAAI,kBAAkB;AACpB,oBAAM,EAAE,MAAM,sBAAsB,OAAO,kBAAkB;AAC7D;AACA,iCAAmB;AAAA,YACrB;AAEA,2BAAe,IAAI,SAAS;AAAA,cAC1B,IAAI,GAAG,MAAM;AAAA,cACb,MAAM,GAAG,UAAU,QAAQ;AAAA,cAC3B,SAAS;AAAA,YACX,CAAC;AAAA,UACH;AAEA,gBAAM,QAAQ,eAAe,IAAI,OAAO;AAGxC,cAAI,GAAG,GAAI,OAAM,KAAK,GAAG;AACzB,cAAI,GAAG,UAAU,KAAM,OAAM,OAAO,GAAG,SAAS;AAEhD,cAAI,CAAC,MAAM,WAAW,MAAM,MAAM,MAAM,MAAM;AAC5C,kBAAM;AAAA,cACJ,MAAM;AAAA,cACN,OAAO,oBAAoB;AAAA,cAC3B,cAAc;AAAA,gBACZ,MAAM;AAAA,gBACN,IAAI,MAAM;AAAA,gBACV,MAAM,MAAM;AAAA,cACd;AAAA,YACF;AACA,kBAAM,UAAU;AAAA,UAClB;AAGA,cAAI,GAAG,UAAU,WAAW;AAC1B,kBAAM;AAAA,cACJ,MAAM;AAAA,cACN,OAAO,oBAAoB;AAAA,cAC3B,OAAO;AAAA,gBACL,MAAM;AAAA,gBACN,cAAc,GAAG,SAAS;AAAA,cAC5B;AAAA,YACF;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAGA,UAAI,OAAO,eAAe;AACxB,uBAAe,KAAK,gBAAgB,OAAO,aAAa;AAGxD,YAAI,kBAAkB;AACpB,gBAAM,EAAE,MAAM,sBAAsB,OAAO,kBAAkB;AAC7D;AACA,6BAAmB;AAAA,QACrB;AACA,mBAAW,CAAC,OAAO,KAAK,KAAK,gBAAgB;AAC3C,cAAI,MAAM,SAAS;AACjB,kBAAM,EAAE,MAAM,sBAAsB,OAAO,oBAAoB,MAAM;AAAA,UACvE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,QAAI,cAAc;AAChB,YAAM;AAAA,QACJ,MAAM;AAAA,QACN,OAAO,EAAE,aAAa,aAAa;AAAA,MACrC;AAAA,IACF;AAEA,UAAM,EAAE,MAAM,eAAe;AAAA,EAC/B;AAAA,EAEQ,gBAAgB,QAAwB;AAC9C,YAAQ,QAAQ;AAAA,MACd,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT,KAAK;AACH,eAAO;AAAA,MACT;AACE,eAAO;AAAA,IACX;AAAA,EACF;AAAA;AAAA,EAGQ,aACN,MAC2B;AAC3B,WAAO;AAAA,MACL,MAAM;AAAA,MACN,UAAU;AAAA,QACR,MAAM,KAAK;AAAA,QACX,aAAa,KAAK;AAAA,QAClB,YAAY,KAAK;AAAA,MACnB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA,EAIQ,iBACN,SACqC;AACrC,QAAI,OAAO,QAAQ,YAAY,UAAU;AACvC,aAAO,CAAC,EAAE,MAAM,QAAQ,SAAS,SAAS,SAAS,aAAa,SAAS,QAAQ,QAAQ,CAAC;AAAA,IAC5F;AAEA,QAAI,QAAQ,SAAS,aAAa;AAEhC,YAAM,aAAa,QAAQ,QAAQ,OAAO,CAAC,MAAM,EAAE,SAAS,MAAM;AAClE,YAAM,gBAAgB,QAAQ,QAAQ,OAAO,CAAC,MAAM,EAAE,SAAS,UAAU;AAEzE,YAAM,OAAO,WACV,OAAO,CAAC,MAA2C,EAAE,SAAS,MAAM,EACpE,IAAI,CAAC,MAAM,EAAE,IAAI,EACjB,KAAK,EAAE;AAEV,UAAI,cAAc,SAAS,GAAG;AAC5B,cAAM,YAAoD,cACvD,OAAO,CAAC,MAAyD,EAAE,SAAS,UAAU,EACtF,IAAI,CAAC,OAAO;AAAA,UACX,IAAI,EAAE;AAAA,UACN,MAAM;AAAA,UACN,UAAU;AAAA,YACR,MAAM,EAAE;AAAA,YACR,WAAW,KAAK,UAAU,EAAE,KAAK;AAAA,UACnC;AAAA,QACF,EAAE;AACJ,eAAO,CAAC;AAAA,UACN,MAAM;AAAA,UACN,SAAS,QAAQ;AAAA,UACjB,YAAY;AAAA,QACd,CAAC;AAAA,MACH;AAEA,aAAO,CAAC,EAAE,MAAM,aAAsB,SAAS,KAAK,CAAC;AAAA,IACvD;AAGA,UAAM,mBAAmB,QAAQ,QAAQ,OAAO,CAAC,MAAM,EAAE,SAAS,aAAa;AAC/E,QAAI,iBAAiB,SAAS,GAAG;AAC/B,aAAO,iBACJ,OAAO,CAAC,MAA4D,EAAE,SAAS,aAAa,EAC5F,IAAI,CAAC,OAAO;AAAA,QACX,MAAM;AAAA,QACN,cAAc,EAAE;AAAA,QAChB,SAAS,EAAE;AAAA,MACb,EAAE;AAAA,IACN;AAGA,UAAM,QAA4C,QAAQ,QAAQ;AAAA,MAChE,CAAC,UAAwB;AACvB,gBAAQ,MAAM,MAAM;AAAA,UAClB,KAAK;AACH,mBAAO,EAAE,MAAM,QAAiB,MAAM,MAAM,KAAK;AAAA,UACnD,KAAK;AACH,gBAAI,MAAM,OAAO,SAAS,UAAU;AAClC,qBAAO;AAAA,gBACL,MAAM;AAAA,gBACN,WAAW;AAAA,kBACT,KAAK,QAAQ,MAAM,OAAO,UAAU,WAAW,MAAM,OAAO,IAAI;AAAA,gBAClE;AAAA,cACF;AAAA,YACF;AACA,mBAAO;AAAA,cACL,MAAM;AAAA,cACN,WAAW,EAAE,KAAK,MAAM,OAAO,IAAI;AAAA,YACrC;AAAA,UACF,KAAK;AAEH,mBAAO;AAAA,cACL,MAAM;AAAA,cACN,MAAM,UAAU,MAAM,OAAO,QAAQ,YAAY,KAAK,MAAM,OAAO,UAAU;AAAA,YAC/E;AAAA,UACF;AACE,mBAAO,EAAE,MAAM,QAAiB,MAAM,6BAA6B;AAAA,QACvE;AAAA,MACF;AAAA,IACF;AAEA,WAAO,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,CAAC;AAAA,EAC1C;AACF;","names":[]}
|
package/package.json
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@yolo-labs/core-providers",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"exports": {
|
|
6
|
+
".": {
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"import": "./dist/index.js"
|
|
9
|
+
},
|
|
10
|
+
"./anthropic": {
|
|
11
|
+
"types": "./dist/anthropic/index.d.ts",
|
|
12
|
+
"import": "./dist/anthropic/index.js"
|
|
13
|
+
},
|
|
14
|
+
"./openai": {
|
|
15
|
+
"types": "./dist/openai/index.d.ts",
|
|
16
|
+
"import": "./dist/openai/index.js"
|
|
17
|
+
},
|
|
18
|
+
"./google": {
|
|
19
|
+
"types": "./dist/google/index.d.ts",
|
|
20
|
+
"import": "./dist/google/index.js"
|
|
21
|
+
}
|
|
22
|
+
},
|
|
23
|
+
"main": "./dist/index.js",
|
|
24
|
+
"types": "./dist/index.d.ts",
|
|
25
|
+
"files": [
|
|
26
|
+
"dist"
|
|
27
|
+
],
|
|
28
|
+
"dependencies": {
|
|
29
|
+
"@yolo-labs/core-types": "1.0.0"
|
|
30
|
+
},
|
|
31
|
+
"peerDependencies": {
|
|
32
|
+
"@anthropic-ai/sdk": ">=0.30.0",
|
|
33
|
+
"@google/generative-ai": ">=0.21.0",
|
|
34
|
+
"openai": ">=4.0.0"
|
|
35
|
+
},
|
|
36
|
+
"peerDependenciesMeta": {
|
|
37
|
+
"@anthropic-ai/sdk": {
|
|
38
|
+
"optional": true
|
|
39
|
+
},
|
|
40
|
+
"openai": {
|
|
41
|
+
"optional": true
|
|
42
|
+
},
|
|
43
|
+
"@google/generative-ai": {
|
|
44
|
+
"optional": true
|
|
45
|
+
}
|
|
46
|
+
},
|
|
47
|
+
"devDependencies": {
|
|
48
|
+
"@anthropic-ai/sdk": "^0.72.1",
|
|
49
|
+
"@google/generative-ai": "^0.24.1",
|
|
50
|
+
"openai": "^6.17.0",
|
|
51
|
+
"tsup": "^8.0.0",
|
|
52
|
+
"typescript": "^5.5.0"
|
|
53
|
+
},
|
|
54
|
+
"description": "AI provider adapters (Anthropic, OpenAI, Google) with failover",
|
|
55
|
+
"license": "MIT",
|
|
56
|
+
"repository": {
|
|
57
|
+
"type": "git",
|
|
58
|
+
"url": "git+https://github.com/yolo-labs-hq/monorepo.git",
|
|
59
|
+
"directory": "yolo-core/packages/providers"
|
|
60
|
+
},
|
|
61
|
+
"publishConfig": {
|
|
62
|
+
"access": "public",
|
|
63
|
+
"registry": "https://registry.npmjs.org/"
|
|
64
|
+
},
|
|
65
|
+
"homepage": "https://github.com/yolo-labs-hq/monorepo/tree/main/yolo-core#readme",
|
|
66
|
+
"bugs": {
|
|
67
|
+
"url": "https://github.com/yolo-labs-hq/monorepo/issues"
|
|
68
|
+
},
|
|
69
|
+
"scripts": {
|
|
70
|
+
"build": "tsup",
|
|
71
|
+
"clean": "rm -rf dist *.tsbuildinfo"
|
|
72
|
+
}
|
|
73
|
+
}
|