praisonai 1.0.18 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent/context.d.ts +68 -0
- package/dist/agent/context.js +119 -0
- package/dist/agent/enhanced.d.ts +92 -0
- package/dist/agent/enhanced.js +267 -0
- package/dist/agent/handoff.d.ts +82 -0
- package/dist/agent/handoff.js +124 -0
- package/dist/agent/router.d.ts +77 -0
- package/dist/agent/router.js +113 -0
- package/dist/agent/simple.d.ts +1 -1
- package/dist/agent/simple.js +40 -4
- package/dist/agent/types.js +2 -2
- package/dist/cli/index.d.ts +20 -0
- package/dist/cli/index.js +150 -0
- package/dist/db/index.d.ts +23 -0
- package/dist/db/index.js +72 -0
- package/dist/db/memory-adapter.d.ts +42 -0
- package/dist/db/memory-adapter.js +146 -0
- package/dist/db/types.d.ts +113 -0
- package/dist/db/types.js +5 -0
- package/dist/eval/index.d.ts +61 -0
- package/dist/eval/index.js +157 -0
- package/dist/guardrails/index.d.ts +82 -0
- package/dist/guardrails/index.js +202 -0
- package/dist/index.d.ts +16 -1
- package/dist/index.js +72 -1
- package/dist/knowledge/rag.d.ts +80 -0
- package/dist/knowledge/rag.js +147 -0
- package/dist/llm/openai.js +11 -3
- package/dist/llm/providers/anthropic.d.ts +33 -0
- package/dist/llm/providers/anthropic.js +291 -0
- package/dist/llm/providers/base.d.ts +25 -0
- package/dist/llm/providers/base.js +43 -0
- package/dist/llm/providers/google.d.ts +27 -0
- package/dist/llm/providers/google.js +275 -0
- package/dist/llm/providers/index.d.ts +43 -0
- package/dist/llm/providers/index.js +116 -0
- package/dist/llm/providers/openai.d.ts +18 -0
- package/dist/llm/providers/openai.js +203 -0
- package/dist/llm/providers/types.d.ts +94 -0
- package/dist/llm/providers/types.js +5 -0
- package/dist/observability/index.d.ts +86 -0
- package/dist/observability/index.js +166 -0
- package/dist/session/index.d.ts +111 -0
- package/dist/session/index.js +250 -0
- package/dist/skills/index.d.ts +70 -0
- package/dist/skills/index.js +233 -0
- package/dist/tools/decorator.d.ts +91 -0
- package/dist/tools/decorator.js +165 -0
- package/dist/tools/index.d.ts +2 -0
- package/dist/tools/index.js +3 -0
- package/dist/tools/mcpSse.d.ts +41 -0
- package/dist/tools/mcpSse.js +108 -0
- package/dist/workflows/index.d.ts +97 -0
- package/dist/workflows/index.js +216 -0
- package/package.json +6 -2
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Anthropic Provider - Implementation for Anthropic Claude API
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.AnthropicProvider = void 0;
|
|
7
|
+
const base_1 = require("./base");
|
|
8
|
+
class AnthropicProvider extends base_1.BaseProvider {
|
|
9
|
+
constructor(modelId, config = {}) {
|
|
10
|
+
super(modelId, config);
|
|
11
|
+
this.providerId = 'anthropic';
|
|
12
|
+
this.apiKey = config.apiKey || process.env.ANTHROPIC_API_KEY || '';
|
|
13
|
+
this.baseUrl = config.baseUrl || 'https://api.anthropic.com';
|
|
14
|
+
if (!this.apiKey) {
|
|
15
|
+
throw new Error('ANTHROPIC_API_KEY is required');
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
async generateText(options) {
|
|
19
|
+
return this.withRetry(async () => {
|
|
20
|
+
const { systemPrompt, messages } = this.extractSystemPrompt(options.messages);
|
|
21
|
+
const response = await fetch(`${this.baseUrl}/v1/messages`, {
|
|
22
|
+
method: 'POST',
|
|
23
|
+
headers: {
|
|
24
|
+
'Content-Type': 'application/json',
|
|
25
|
+
'x-api-key': this.apiKey,
|
|
26
|
+
'anthropic-version': '2023-06-01',
|
|
27
|
+
},
|
|
28
|
+
body: JSON.stringify({
|
|
29
|
+
model: this.modelId,
|
|
30
|
+
max_tokens: options.maxTokens || 4096,
|
|
31
|
+
system: systemPrompt,
|
|
32
|
+
messages: this.formatMessages(messages),
|
|
33
|
+
temperature: options.temperature ?? 0.7,
|
|
34
|
+
tools: options.tools ? this.formatTools(options.tools) : undefined,
|
|
35
|
+
stop_sequences: options.stop,
|
|
36
|
+
top_p: options.topP,
|
|
37
|
+
}),
|
|
38
|
+
});
|
|
39
|
+
if (!response.ok) {
|
|
40
|
+
const error = await response.json().catch(() => ({}));
|
|
41
|
+
throw Object.assign(new Error(error.error?.message || `Anthropic API error: ${response.status}`), {
|
|
42
|
+
status: response.status,
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
const data = await response.json();
|
|
46
|
+
let text = '';
|
|
47
|
+
const toolCalls = [];
|
|
48
|
+
for (const block of data.content || []) {
|
|
49
|
+
if (block.type === 'text') {
|
|
50
|
+
text += block.text;
|
|
51
|
+
}
|
|
52
|
+
else if (block.type === 'tool_use') {
|
|
53
|
+
toolCalls.push({
|
|
54
|
+
id: block.id,
|
|
55
|
+
type: 'function',
|
|
56
|
+
function: {
|
|
57
|
+
name: block.name,
|
|
58
|
+
arguments: JSON.stringify(block.input),
|
|
59
|
+
},
|
|
60
|
+
});
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
return {
|
|
64
|
+
text,
|
|
65
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
66
|
+
usage: {
|
|
67
|
+
promptTokens: data.usage?.input_tokens || 0,
|
|
68
|
+
completionTokens: data.usage?.output_tokens || 0,
|
|
69
|
+
totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
|
|
70
|
+
},
|
|
71
|
+
finishReason: this.mapStopReason(data.stop_reason),
|
|
72
|
+
raw: data,
|
|
73
|
+
};
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
async streamText(options) {
|
|
77
|
+
const self = this;
|
|
78
|
+
const { systemPrompt, messages } = this.extractSystemPrompt(options.messages);
|
|
79
|
+
return {
|
|
80
|
+
async *[Symbol.asyncIterator]() {
|
|
81
|
+
const response = await fetch(`${self.baseUrl}/v1/messages`, {
|
|
82
|
+
method: 'POST',
|
|
83
|
+
headers: {
|
|
84
|
+
'Content-Type': 'application/json',
|
|
85
|
+
'x-api-key': self.apiKey,
|
|
86
|
+
'anthropic-version': '2023-06-01',
|
|
87
|
+
},
|
|
88
|
+
body: JSON.stringify({
|
|
89
|
+
model: self.modelId,
|
|
90
|
+
max_tokens: options.maxTokens || 4096,
|
|
91
|
+
system: systemPrompt,
|
|
92
|
+
messages: self.formatMessages(messages),
|
|
93
|
+
temperature: options.temperature ?? 0.7,
|
|
94
|
+
tools: options.tools ? self.formatTools(options.tools) : undefined,
|
|
95
|
+
stream: true,
|
|
96
|
+
}),
|
|
97
|
+
});
|
|
98
|
+
if (!response.ok) {
|
|
99
|
+
const error = await response.json().catch(() => ({}));
|
|
100
|
+
throw new Error(error.error?.message || `Anthropic API error: ${response.status}`);
|
|
101
|
+
}
|
|
102
|
+
const reader = response.body?.getReader();
|
|
103
|
+
if (!reader)
|
|
104
|
+
throw new Error('No response body');
|
|
105
|
+
const decoder = new TextDecoder();
|
|
106
|
+
let buffer = '';
|
|
107
|
+
const toolCalls = [];
|
|
108
|
+
let currentToolCall = null;
|
|
109
|
+
while (true) {
|
|
110
|
+
const { done, value } = await reader.read();
|
|
111
|
+
if (done)
|
|
112
|
+
break;
|
|
113
|
+
buffer += decoder.decode(value, { stream: true });
|
|
114
|
+
const lines = buffer.split('\n');
|
|
115
|
+
buffer = lines.pop() || '';
|
|
116
|
+
for (const line of lines) {
|
|
117
|
+
if (!line.startsWith('data: '))
|
|
118
|
+
continue;
|
|
119
|
+
const data = line.slice(6);
|
|
120
|
+
if (data === '[DONE]')
|
|
121
|
+
continue;
|
|
122
|
+
try {
|
|
123
|
+
const event = JSON.parse(data);
|
|
124
|
+
if (event.type === 'content_block_delta') {
|
|
125
|
+
if (event.delta?.type === 'text_delta') {
|
|
126
|
+
const text = event.delta.text;
|
|
127
|
+
if (options.onToken)
|
|
128
|
+
options.onToken(text);
|
|
129
|
+
yield { text };
|
|
130
|
+
}
|
|
131
|
+
else if (event.delta?.type === 'input_json_delta') {
|
|
132
|
+
if (currentToolCall) {
|
|
133
|
+
currentToolCall.function.arguments += event.delta.partial_json;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
else if (event.type === 'content_block_start') {
|
|
138
|
+
if (event.content_block?.type === 'tool_use') {
|
|
139
|
+
currentToolCall = {
|
|
140
|
+
id: event.content_block.id,
|
|
141
|
+
type: 'function',
|
|
142
|
+
function: {
|
|
143
|
+
name: event.content_block.name,
|
|
144
|
+
arguments: '',
|
|
145
|
+
},
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
else if (event.type === 'content_block_stop') {
|
|
150
|
+
if (currentToolCall) {
|
|
151
|
+
toolCalls.push(currentToolCall);
|
|
152
|
+
currentToolCall = null;
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
else if (event.type === 'message_delta') {
|
|
156
|
+
yield {
|
|
157
|
+
finishReason: self.mapStopReason(event.delta?.stop_reason),
|
|
158
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
159
|
+
usage: event.usage ? {
|
|
160
|
+
promptTokens: 0,
|
|
161
|
+
completionTokens: event.usage.output_tokens,
|
|
162
|
+
totalTokens: event.usage.output_tokens,
|
|
163
|
+
} : undefined,
|
|
164
|
+
};
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
catch (e) {
|
|
168
|
+
// Skip malformed JSON
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
},
|
|
173
|
+
};
|
|
174
|
+
}
|
|
175
|
+
async generateObject(options) {
|
|
176
|
+
const { systemPrompt, messages } = this.extractSystemPrompt(options.messages);
|
|
177
|
+
// Add JSON instruction to system prompt
|
|
178
|
+
const jsonSystemPrompt = `${systemPrompt}\n\nYou must respond with valid JSON matching this schema:\n${JSON.stringify(options.schema, null, 2)}`;
|
|
179
|
+
return this.withRetry(async () => {
|
|
180
|
+
const response = await fetch(`${this.baseUrl}/v1/messages`, {
|
|
181
|
+
method: 'POST',
|
|
182
|
+
headers: {
|
|
183
|
+
'Content-Type': 'application/json',
|
|
184
|
+
'x-api-key': this.apiKey,
|
|
185
|
+
'anthropic-version': '2023-06-01',
|
|
186
|
+
},
|
|
187
|
+
body: JSON.stringify({
|
|
188
|
+
model: this.modelId,
|
|
189
|
+
max_tokens: options.maxTokens || 4096,
|
|
190
|
+
system: jsonSystemPrompt,
|
|
191
|
+
messages: this.formatMessages(messages),
|
|
192
|
+
temperature: options.temperature ?? 0.7,
|
|
193
|
+
}),
|
|
194
|
+
});
|
|
195
|
+
if (!response.ok) {
|
|
196
|
+
const error = await response.json().catch(() => ({}));
|
|
197
|
+
throw new Error(error.error?.message || `Anthropic API error: ${response.status}`);
|
|
198
|
+
}
|
|
199
|
+
const data = await response.json();
|
|
200
|
+
let text = '';
|
|
201
|
+
for (const block of data.content || []) {
|
|
202
|
+
if (block.type === 'text') {
|
|
203
|
+
text += block.text;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
// Extract JSON from response
|
|
207
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
208
|
+
if (!jsonMatch) {
|
|
209
|
+
throw new Error(`No JSON found in response: ${text}`);
|
|
210
|
+
}
|
|
211
|
+
let parsed;
|
|
212
|
+
try {
|
|
213
|
+
parsed = JSON.parse(jsonMatch[0]);
|
|
214
|
+
}
|
|
215
|
+
catch (e) {
|
|
216
|
+
throw new Error(`Failed to parse JSON: ${jsonMatch[0]}`);
|
|
217
|
+
}
|
|
218
|
+
return {
|
|
219
|
+
object: parsed,
|
|
220
|
+
usage: {
|
|
221
|
+
promptTokens: data.usage?.input_tokens || 0,
|
|
222
|
+
completionTokens: data.usage?.output_tokens || 0,
|
|
223
|
+
totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
|
|
224
|
+
},
|
|
225
|
+
raw: data,
|
|
226
|
+
};
|
|
227
|
+
});
|
|
228
|
+
}
|
|
229
|
+
extractSystemPrompt(messages) {
|
|
230
|
+
const systemMessages = messages.filter(m => m.role === 'system');
|
|
231
|
+
const otherMessages = messages.filter(m => m.role !== 'system');
|
|
232
|
+
const systemPrompt = systemMessages.map(m => m.content).join('\n');
|
|
233
|
+
return { systemPrompt, messages: otherMessages };
|
|
234
|
+
}
|
|
235
|
+
formatMessages(messages) {
|
|
236
|
+
const result = [];
|
|
237
|
+
for (const msg of messages) {
|
|
238
|
+
if (msg.role === 'system')
|
|
239
|
+
continue; // Handled separately
|
|
240
|
+
if (msg.role === 'tool') {
|
|
241
|
+
// Tool results need to be part of user message in Anthropic
|
|
242
|
+
result.push({
|
|
243
|
+
role: 'user',
|
|
244
|
+
content: [{
|
|
245
|
+
type: 'tool_result',
|
|
246
|
+
tool_use_id: msg.tool_call_id || '',
|
|
247
|
+
content: msg.content || '',
|
|
248
|
+
}],
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
else if (msg.role === 'assistant' && msg.tool_calls) {
|
|
252
|
+
const content = [];
|
|
253
|
+
if (msg.content) {
|
|
254
|
+
content.push({ type: 'text', text: msg.content });
|
|
255
|
+
}
|
|
256
|
+
for (const tc of msg.tool_calls) {
|
|
257
|
+
content.push({
|
|
258
|
+
type: 'tool_use',
|
|
259
|
+
id: tc.id,
|
|
260
|
+
name: tc.function.name,
|
|
261
|
+
input: JSON.parse(tc.function.arguments),
|
|
262
|
+
});
|
|
263
|
+
}
|
|
264
|
+
result.push({ role: 'assistant', content });
|
|
265
|
+
}
|
|
266
|
+
else {
|
|
267
|
+
result.push({
|
|
268
|
+
role: msg.role,
|
|
269
|
+
content: msg.content || '',
|
|
270
|
+
});
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
return result;
|
|
274
|
+
}
|
|
275
|
+
formatTools(tools) {
|
|
276
|
+
return tools.map(tool => ({
|
|
277
|
+
name: tool.name,
|
|
278
|
+
description: tool.description || `Function ${tool.name}`,
|
|
279
|
+
input_schema: tool.parameters || { type: 'object', properties: {} },
|
|
280
|
+
}));
|
|
281
|
+
}
|
|
282
|
+
mapStopReason(reason) {
|
|
283
|
+
switch (reason) {
|
|
284
|
+
case 'end_turn': return 'stop';
|
|
285
|
+
case 'max_tokens': return 'length';
|
|
286
|
+
case 'tool_use': return 'tool_calls';
|
|
287
|
+
default: return 'stop';
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
exports.AnthropicProvider = AnthropicProvider;
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Base Provider - Abstract base class for LLM providers
|
|
3
|
+
*/
|
|
4
|
+
import type { LLMProvider, ProviderConfig, GenerateTextOptions, GenerateTextResult, StreamTextOptions, StreamChunk, GenerateObjectOptions, GenerateObjectResult, Message, ToolDefinition } from './types';
|
|
5
|
+
export declare abstract class BaseProvider implements LLMProvider {
|
|
6
|
+
abstract readonly providerId: string;
|
|
7
|
+
readonly modelId: string;
|
|
8
|
+
protected config: ProviderConfig;
|
|
9
|
+
constructor(modelId: string, config?: ProviderConfig);
|
|
10
|
+
abstract generateText(options: GenerateTextOptions): Promise<GenerateTextResult>;
|
|
11
|
+
abstract streamText(options: StreamTextOptions): Promise<AsyncIterable<StreamChunk>>;
|
|
12
|
+
abstract generateObject<T = any>(options: GenerateObjectOptions<T>): Promise<GenerateObjectResult<T>>;
|
|
13
|
+
/**
|
|
14
|
+
* Convert tool definitions to provider-specific format
|
|
15
|
+
*/
|
|
16
|
+
protected abstract formatTools(tools: ToolDefinition[]): any[];
|
|
17
|
+
/**
|
|
18
|
+
* Convert messages to provider-specific format
|
|
19
|
+
*/
|
|
20
|
+
protected abstract formatMessages(messages: Message[]): any[];
|
|
21
|
+
/**
|
|
22
|
+
* Retry logic with exponential backoff
|
|
23
|
+
*/
|
|
24
|
+
protected withRetry<T>(fn: () => Promise<T>, maxRetries?: number): Promise<T>;
|
|
25
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Base Provider - Abstract base class for LLM providers
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.BaseProvider = void 0;
|
|
7
|
+
class BaseProvider {
|
|
8
|
+
constructor(modelId, config = {}) {
|
|
9
|
+
this.modelId = modelId;
|
|
10
|
+
this.config = {
|
|
11
|
+
maxRetries: 3,
|
|
12
|
+
timeout: 60000,
|
|
13
|
+
...config,
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Retry logic with exponential backoff
|
|
18
|
+
*/
|
|
19
|
+
async withRetry(fn, maxRetries = this.config.maxRetries || 3) {
|
|
20
|
+
let lastError;
|
|
21
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
22
|
+
try {
|
|
23
|
+
return await fn();
|
|
24
|
+
}
|
|
25
|
+
catch (error) {
|
|
26
|
+
lastError = error;
|
|
27
|
+
// Check if error is retryable (rate limit, server error)
|
|
28
|
+
const isRetryable = error.status === 429 ||
|
|
29
|
+
error.status >= 500 ||
|
|
30
|
+
error.code === 'ECONNRESET' ||
|
|
31
|
+
error.code === 'ETIMEDOUT';
|
|
32
|
+
if (!isRetryable || attempt === maxRetries) {
|
|
33
|
+
throw error;
|
|
34
|
+
}
|
|
35
|
+
// Exponential backoff
|
|
36
|
+
const delay = Math.min(1000 * Math.pow(2, attempt), 30000);
|
|
37
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
throw lastError;
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
exports.BaseProvider = BaseProvider;
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google Provider - Implementation for Google Gemini API
|
|
3
|
+
*/
|
|
4
|
+
import { BaseProvider } from './base';
|
|
5
|
+
import type { ProviderConfig, GenerateTextOptions, GenerateTextResult, StreamTextOptions, StreamChunk, GenerateObjectOptions, GenerateObjectResult, Message, ToolDefinition } from './types';
|
|
6
|
+
interface GeminiContent {
|
|
7
|
+
role: 'user' | 'model';
|
|
8
|
+
parts: Array<{
|
|
9
|
+
text?: string;
|
|
10
|
+
functionCall?: any;
|
|
11
|
+
functionResponse?: any;
|
|
12
|
+
}>;
|
|
13
|
+
}
|
|
14
|
+
export declare class GoogleProvider extends BaseProvider {
|
|
15
|
+
readonly providerId = "google";
|
|
16
|
+
private apiKey;
|
|
17
|
+
private baseUrl;
|
|
18
|
+
constructor(modelId: string, config?: ProviderConfig);
|
|
19
|
+
generateText(options: GenerateTextOptions): Promise<GenerateTextResult>;
|
|
20
|
+
streamText(options: StreamTextOptions): Promise<AsyncIterable<StreamChunk>>;
|
|
21
|
+
generateObject<T = any>(options: GenerateObjectOptions<T>): Promise<GenerateObjectResult<T>>;
|
|
22
|
+
private formatRequest;
|
|
23
|
+
protected formatMessages(messages: Message[]): GeminiContent[];
|
|
24
|
+
protected formatTools(tools: ToolDefinition[]): any[];
|
|
25
|
+
private mapFinishReason;
|
|
26
|
+
}
|
|
27
|
+
export {};
|
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Google Provider - Implementation for Google Gemini API
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.GoogleProvider = void 0;
|
|
7
|
+
const base_1 = require("./base");
|
|
8
|
+
class GoogleProvider extends base_1.BaseProvider {
|
|
9
|
+
constructor(modelId, config = {}) {
|
|
10
|
+
super(modelId, config);
|
|
11
|
+
this.providerId = 'google';
|
|
12
|
+
this.apiKey = config.apiKey || process.env.GOOGLE_API_KEY || '';
|
|
13
|
+
this.baseUrl = config.baseUrl || 'https://generativelanguage.googleapis.com/v1beta';
|
|
14
|
+
if (!this.apiKey) {
|
|
15
|
+
throw new Error('GOOGLE_API_KEY is required');
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
async generateText(options) {
|
|
19
|
+
return this.withRetry(async () => {
|
|
20
|
+
const { systemInstruction, contents } = this.formatRequest(options.messages);
|
|
21
|
+
const response = await fetch(`${this.baseUrl}/models/${this.modelId}:generateContent?key=${this.apiKey}`, {
|
|
22
|
+
method: 'POST',
|
|
23
|
+
headers: { 'Content-Type': 'application/json' },
|
|
24
|
+
body: JSON.stringify({
|
|
25
|
+
systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined,
|
|
26
|
+
contents,
|
|
27
|
+
generationConfig: {
|
|
28
|
+
temperature: options.temperature ?? 0.7,
|
|
29
|
+
maxOutputTokens: options.maxTokens,
|
|
30
|
+
topP: options.topP,
|
|
31
|
+
stopSequences: options.stop,
|
|
32
|
+
},
|
|
33
|
+
tools: options.tools ? this.formatTools(options.tools) : undefined,
|
|
34
|
+
}),
|
|
35
|
+
});
|
|
36
|
+
if (!response.ok) {
|
|
37
|
+
const error = await response.json().catch(() => ({}));
|
|
38
|
+
throw Object.assign(new Error(error.error?.message || `Google API error: ${response.status}`), {
|
|
39
|
+
status: response.status,
|
|
40
|
+
});
|
|
41
|
+
}
|
|
42
|
+
const data = await response.json();
|
|
43
|
+
const candidate = data.candidates?.[0];
|
|
44
|
+
let text = '';
|
|
45
|
+
const toolCalls = [];
|
|
46
|
+
for (const part of candidate?.content?.parts || []) {
|
|
47
|
+
if (part.text) {
|
|
48
|
+
text += part.text;
|
|
49
|
+
}
|
|
50
|
+
else if (part.functionCall) {
|
|
51
|
+
toolCalls.push({
|
|
52
|
+
id: `call_${Math.random().toString(36).substr(2, 9)}`,
|
|
53
|
+
type: 'function',
|
|
54
|
+
function: {
|
|
55
|
+
name: part.functionCall.name,
|
|
56
|
+
arguments: JSON.stringify(part.functionCall.args || {}),
|
|
57
|
+
},
|
|
58
|
+
});
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
return {
|
|
62
|
+
text,
|
|
63
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
64
|
+
usage: {
|
|
65
|
+
promptTokens: data.usageMetadata?.promptTokenCount || 0,
|
|
66
|
+
completionTokens: data.usageMetadata?.candidatesTokenCount || 0,
|
|
67
|
+
totalTokens: data.usageMetadata?.totalTokenCount || 0,
|
|
68
|
+
},
|
|
69
|
+
finishReason: this.mapFinishReason(candidate?.finishReason),
|
|
70
|
+
raw: data,
|
|
71
|
+
};
|
|
72
|
+
});
|
|
73
|
+
}
|
|
74
|
+
async streamText(options) {
|
|
75
|
+
const self = this;
|
|
76
|
+
const { systemInstruction, contents } = this.formatRequest(options.messages);
|
|
77
|
+
return {
|
|
78
|
+
async *[Symbol.asyncIterator]() {
|
|
79
|
+
const response = await fetch(`${self.baseUrl}/models/${self.modelId}:streamGenerateContent?key=${self.apiKey}&alt=sse`, {
|
|
80
|
+
method: 'POST',
|
|
81
|
+
headers: { 'Content-Type': 'application/json' },
|
|
82
|
+
body: JSON.stringify({
|
|
83
|
+
systemInstruction: systemInstruction ? { parts: [{ text: systemInstruction }] } : undefined,
|
|
84
|
+
contents,
|
|
85
|
+
generationConfig: {
|
|
86
|
+
temperature: options.temperature ?? 0.7,
|
|
87
|
+
maxOutputTokens: options.maxTokens,
|
|
88
|
+
},
|
|
89
|
+
tools: options.tools ? self.formatTools(options.tools) : undefined,
|
|
90
|
+
}),
|
|
91
|
+
});
|
|
92
|
+
if (!response.ok) {
|
|
93
|
+
const error = await response.json().catch(() => ({}));
|
|
94
|
+
throw new Error(error.error?.message || `Google API error: ${response.status}`);
|
|
95
|
+
}
|
|
96
|
+
const reader = response.body?.getReader();
|
|
97
|
+
if (!reader)
|
|
98
|
+
throw new Error('No response body');
|
|
99
|
+
const decoder = new TextDecoder();
|
|
100
|
+
let buffer = '';
|
|
101
|
+
const toolCalls = [];
|
|
102
|
+
while (true) {
|
|
103
|
+
const { done, value } = await reader.read();
|
|
104
|
+
if (done)
|
|
105
|
+
break;
|
|
106
|
+
buffer += decoder.decode(value, { stream: true });
|
|
107
|
+
const lines = buffer.split('\n');
|
|
108
|
+
buffer = lines.pop() || '';
|
|
109
|
+
for (const line of lines) {
|
|
110
|
+
if (!line.startsWith('data: '))
|
|
111
|
+
continue;
|
|
112
|
+
const data = line.slice(6);
|
|
113
|
+
if (data === '[DONE]')
|
|
114
|
+
continue;
|
|
115
|
+
try {
|
|
116
|
+
const event = JSON.parse(data);
|
|
117
|
+
const candidate = event.candidates?.[0];
|
|
118
|
+
for (const part of candidate?.content?.parts || []) {
|
|
119
|
+
if (part.text) {
|
|
120
|
+
if (options.onToken)
|
|
121
|
+
options.onToken(part.text);
|
|
122
|
+
yield { text: part.text };
|
|
123
|
+
}
|
|
124
|
+
else if (part.functionCall) {
|
|
125
|
+
toolCalls.push({
|
|
126
|
+
id: `call_${Math.random().toString(36).substr(2, 9)}`,
|
|
127
|
+
type: 'function',
|
|
128
|
+
function: {
|
|
129
|
+
name: part.functionCall.name,
|
|
130
|
+
arguments: JSON.stringify(part.functionCall.args || {}),
|
|
131
|
+
},
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
if (candidate?.finishReason) {
|
|
136
|
+
yield {
|
|
137
|
+
finishReason: self.mapFinishReason(candidate.finishReason),
|
|
138
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
139
|
+
usage: event.usageMetadata ? {
|
|
140
|
+
promptTokens: event.usageMetadata.promptTokenCount || 0,
|
|
141
|
+
completionTokens: event.usageMetadata.candidatesTokenCount || 0,
|
|
142
|
+
totalTokens: event.usageMetadata.totalTokenCount || 0,
|
|
143
|
+
} : undefined,
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
catch (e) {
|
|
148
|
+
// Skip malformed JSON
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
async generateObject(options) {
|
|
156
|
+
const { systemInstruction, contents } = this.formatRequest(options.messages);
|
|
157
|
+
return this.withRetry(async () => {
|
|
158
|
+
const response = await fetch(`${this.baseUrl}/models/${this.modelId}:generateContent?key=${this.apiKey}`, {
|
|
159
|
+
method: 'POST',
|
|
160
|
+
headers: { 'Content-Type': 'application/json' },
|
|
161
|
+
body: JSON.stringify({
|
|
162
|
+
systemInstruction: systemInstruction
|
|
163
|
+
? { parts: [{ text: `${systemInstruction}\n\nRespond with valid JSON matching this schema:\n${JSON.stringify(options.schema, null, 2)}` }] }
|
|
164
|
+
: { parts: [{ text: `Respond with valid JSON matching this schema:\n${JSON.stringify(options.schema, null, 2)}` }] },
|
|
165
|
+
contents,
|
|
166
|
+
generationConfig: {
|
|
167
|
+
temperature: options.temperature ?? 0.7,
|
|
168
|
+
maxOutputTokens: options.maxTokens,
|
|
169
|
+
responseMimeType: 'application/json',
|
|
170
|
+
},
|
|
171
|
+
}),
|
|
172
|
+
});
|
|
173
|
+
if (!response.ok) {
|
|
174
|
+
const error = await response.json().catch(() => ({}));
|
|
175
|
+
throw new Error(error.error?.message || `Google API error: ${response.status}`);
|
|
176
|
+
}
|
|
177
|
+
const data = await response.json();
|
|
178
|
+
const candidate = data.candidates?.[0];
|
|
179
|
+
let text = '';
|
|
180
|
+
for (const part of candidate?.content?.parts || []) {
|
|
181
|
+
if (part.text)
|
|
182
|
+
text += part.text;
|
|
183
|
+
}
|
|
184
|
+
let parsed;
|
|
185
|
+
try {
|
|
186
|
+
parsed = JSON.parse(text);
|
|
187
|
+
}
|
|
188
|
+
catch (e) {
|
|
189
|
+
// Try to extract JSON from text
|
|
190
|
+
const jsonMatch = text.match(/\{[\s\S]*\}/);
|
|
191
|
+
if (!jsonMatch)
|
|
192
|
+
throw new Error(`No JSON found: ${text}`);
|
|
193
|
+
parsed = JSON.parse(jsonMatch[0]);
|
|
194
|
+
}
|
|
195
|
+
return {
|
|
196
|
+
object: parsed,
|
|
197
|
+
usage: {
|
|
198
|
+
promptTokens: data.usageMetadata?.promptTokenCount || 0,
|
|
199
|
+
completionTokens: data.usageMetadata?.candidatesTokenCount || 0,
|
|
200
|
+
totalTokens: data.usageMetadata?.totalTokenCount || 0,
|
|
201
|
+
},
|
|
202
|
+
raw: data,
|
|
203
|
+
};
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
formatRequest(messages) {
|
|
207
|
+
const systemMessages = messages.filter(m => m.role === 'system');
|
|
208
|
+
const otherMessages = messages.filter(m => m.role !== 'system');
|
|
209
|
+
const systemInstruction = systemMessages.length > 0
|
|
210
|
+
? systemMessages.map(m => m.content).join('\n')
|
|
211
|
+
: null;
|
|
212
|
+
return {
|
|
213
|
+
systemInstruction,
|
|
214
|
+
contents: this.formatMessages(otherMessages),
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
formatMessages(messages) {
|
|
218
|
+
const result = [];
|
|
219
|
+
for (const msg of messages) {
|
|
220
|
+
if (msg.role === 'system')
|
|
221
|
+
continue;
|
|
222
|
+
const role = msg.role === 'assistant' ? 'model' : 'user';
|
|
223
|
+
if (msg.role === 'tool') {
|
|
224
|
+
result.push({
|
|
225
|
+
role: 'user',
|
|
226
|
+
parts: [{
|
|
227
|
+
functionResponse: {
|
|
228
|
+
name: msg.name || 'function',
|
|
229
|
+
response: { result: msg.content },
|
|
230
|
+
},
|
|
231
|
+
}],
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
else if (msg.role === 'assistant' && msg.tool_calls) {
|
|
235
|
+
const parts = [];
|
|
236
|
+
if (msg.content)
|
|
237
|
+
parts.push({ text: msg.content });
|
|
238
|
+
for (const tc of msg.tool_calls) {
|
|
239
|
+
parts.push({
|
|
240
|
+
functionCall: {
|
|
241
|
+
name: tc.function.name,
|
|
242
|
+
args: JSON.parse(tc.function.arguments),
|
|
243
|
+
},
|
|
244
|
+
});
|
|
245
|
+
}
|
|
246
|
+
result.push({ role: 'model', parts });
|
|
247
|
+
}
|
|
248
|
+
else {
|
|
249
|
+
result.push({
|
|
250
|
+
role,
|
|
251
|
+
parts: [{ text: msg.content || '' }],
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
return result;
|
|
256
|
+
}
|
|
257
|
+
formatTools(tools) {
|
|
258
|
+
return [{
|
|
259
|
+
functionDeclarations: tools.map(tool => ({
|
|
260
|
+
name: tool.name,
|
|
261
|
+
description: tool.description || `Function ${tool.name}`,
|
|
262
|
+
parameters: tool.parameters || { type: 'object', properties: {} },
|
|
263
|
+
})),
|
|
264
|
+
}];
|
|
265
|
+
}
|
|
266
|
+
mapFinishReason(reason) {
|
|
267
|
+
switch (reason) {
|
|
268
|
+
case 'STOP': return 'stop';
|
|
269
|
+
case 'MAX_TOKENS': return 'length';
|
|
270
|
+
case 'SAFETY': return 'content_filter';
|
|
271
|
+
default: return 'stop';
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
exports.GoogleProvider = GoogleProvider;
|