recker 1.0.29-next.cf0cafb → 1.0.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ai/client-ai.d.ts +41 -0
- package/dist/ai/client-ai.js +391 -0
- package/dist/ai/index.d.ts +2 -0
- package/dist/ai/index.js +2 -0
- package/dist/browser/ai/client-ai.d.ts +41 -0
- package/dist/browser/ai/client-ai.js +391 -0
- package/dist/browser/ai/memory.d.ts +35 -0
- package/dist/browser/ai/memory.js +136 -0
- package/dist/browser/core/client.d.ts +6 -1
- package/dist/browser/core/client.js +18 -0
- package/dist/browser/transport/undici.js +11 -2
- package/dist/browser/types/ai-client.d.ts +3 -0
- package/dist/browser/types/ai.d.ts +1 -1
- package/dist/cli/index.js +261 -1
- package/dist/cli/tui/shell.d.ts +3 -0
- package/dist/cli/tui/shell.js +126 -2
- package/dist/core/client.d.ts +6 -1
- package/dist/core/client.js +18 -0
- package/dist/presets/anthropic.d.ts +3 -1
- package/dist/presets/anthropic.js +11 -1
- package/dist/presets/azure-openai.d.ts +3 -1
- package/dist/presets/azure-openai.js +11 -1
- package/dist/presets/cohere.d.ts +3 -1
- package/dist/presets/cohere.js +8 -2
- package/dist/presets/deepseek.d.ts +3 -1
- package/dist/presets/deepseek.js +8 -2
- package/dist/presets/fireworks.d.ts +3 -1
- package/dist/presets/fireworks.js +8 -2
- package/dist/presets/gemini.d.ts +3 -1
- package/dist/presets/gemini.js +8 -1
- package/dist/presets/groq.d.ts +3 -1
- package/dist/presets/groq.js +8 -2
- package/dist/presets/huggingface.d.ts +3 -1
- package/dist/presets/huggingface.js +8 -1
- package/dist/presets/mistral.d.ts +3 -1
- package/dist/presets/mistral.js +8 -2
- package/dist/presets/openai.d.ts +3 -1
- package/dist/presets/openai.js +9 -2
- package/dist/presets/perplexity.d.ts +3 -1
- package/dist/presets/perplexity.js +8 -2
- package/dist/presets/registry.d.ts +4 -0
- package/dist/presets/registry.js +48 -0
- package/dist/presets/replicate.d.ts +3 -1
- package/dist/presets/replicate.js +8 -1
- package/dist/presets/together.d.ts +3 -1
- package/dist/presets/together.js +8 -2
- package/dist/presets/xai.d.ts +3 -1
- package/dist/presets/xai.js +8 -2
- package/dist/transport/undici.js +11 -2
- package/dist/types/ai-client.d.ts +3 -0
- package/dist/types/ai.d.ts +1 -1
- package/dist/utils/colors.d.ts +2 -0
- package/dist/utils/colors.js +4 -0
- package/package.json +1 -1
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import type { AIProvider, AIResponse, AIStream, ChatMessage } from '../types/ai.js';
|
|
2
|
+
import type { ClientAI, PresetAIConfig, AIMemoryConfig } from '../types/ai-client.js';
|
|
3
|
+
import type { Client } from '../core/client.js';
|
|
4
|
+
export declare class AIConfigurationError extends Error {
|
|
5
|
+
constructor(message: string);
|
|
6
|
+
}
|
|
7
|
+
export declare class ClientAIImpl implements ClientAI {
|
|
8
|
+
private readonly client;
|
|
9
|
+
private readonly config;
|
|
10
|
+
private readonly memory;
|
|
11
|
+
constructor(client: Client, config: PresetAIConfig);
|
|
12
|
+
get provider(): AIProvider;
|
|
13
|
+
get model(): string;
|
|
14
|
+
chat(prompt: string): Promise<AIResponse>;
|
|
15
|
+
chatStream(prompt: string): Promise<AIStream>;
|
|
16
|
+
prompt(prompt: string): Promise<AIResponse>;
|
|
17
|
+
promptStream(prompt: string): Promise<AIStream>;
|
|
18
|
+
clearMemory(): void;
|
|
19
|
+
getMemory(): readonly ChatMessage[];
|
|
20
|
+
setMemoryConfig(config: Partial<AIMemoryConfig>): void;
|
|
21
|
+
getMemoryConfig(): AIMemoryConfig;
|
|
22
|
+
private makeRequest;
|
|
23
|
+
private makeStreamRequest;
|
|
24
|
+
private buildRequestBody;
|
|
25
|
+
private buildAnthropicBody;
|
|
26
|
+
private buildGoogleBody;
|
|
27
|
+
private buildCohereBody;
|
|
28
|
+
private getEndpoint;
|
|
29
|
+
private getExtraHeaders;
|
|
30
|
+
private parseResponse;
|
|
31
|
+
private parseOpenAIResponse;
|
|
32
|
+
private parseAnthropicResponse;
|
|
33
|
+
private parseGoogleResponse;
|
|
34
|
+
private parseCohereResponse;
|
|
35
|
+
private buildLatency;
|
|
36
|
+
private parseSSEStream;
|
|
37
|
+
private parseStreamChunk;
|
|
38
|
+
private parseOpenAIStreamChunk;
|
|
39
|
+
private parseAnthropicStreamChunk;
|
|
40
|
+
private wrapStreamWithMemory;
|
|
41
|
+
}
|
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
import { ConversationMemory } from './memory.js';
|
|
2
|
+
export class AIConfigurationError extends Error {
|
|
3
|
+
constructor(message) {
|
|
4
|
+
super(message);
|
|
5
|
+
this.name = 'AIConfigurationError';
|
|
6
|
+
}
|
|
7
|
+
}
|
|
8
|
+
const PROVIDER_ENDPOINTS = {
|
|
9
|
+
openai: '/chat/completions',
|
|
10
|
+
anthropic: '/messages',
|
|
11
|
+
google: '/models/{model}:generateContent',
|
|
12
|
+
groq: '/chat/completions',
|
|
13
|
+
mistral: '/chat/completions',
|
|
14
|
+
cohere: '/chat',
|
|
15
|
+
together: '/chat/completions',
|
|
16
|
+
perplexity: '/chat/completions',
|
|
17
|
+
deepseek: '/chat/completions',
|
|
18
|
+
fireworks: '/chat/completions',
|
|
19
|
+
xai: '/chat/completions',
|
|
20
|
+
replicate: '/predictions',
|
|
21
|
+
huggingface: '/models/{model}/v1/chat/completions',
|
|
22
|
+
ollama: '/api/chat',
|
|
23
|
+
'azure-openai': '/chat/completions',
|
|
24
|
+
'cloudflare-workers-ai': '/ai/run/@cf/meta/llama-2-7b-chat-int8',
|
|
25
|
+
custom: '/chat/completions',
|
|
26
|
+
};
|
|
27
|
+
export class ClientAIImpl {
|
|
28
|
+
client;
|
|
29
|
+
config;
|
|
30
|
+
memory;
|
|
31
|
+
constructor(client, config) {
|
|
32
|
+
this.client = client;
|
|
33
|
+
this.config = config;
|
|
34
|
+
this.memory = new ConversationMemory(config.memory);
|
|
35
|
+
}
|
|
36
|
+
get provider() {
|
|
37
|
+
return this.config.provider;
|
|
38
|
+
}
|
|
39
|
+
get model() {
|
|
40
|
+
return this.config.model;
|
|
41
|
+
}
|
|
42
|
+
async chat(prompt) {
|
|
43
|
+
const messages = this.memory.buildMessages(prompt);
|
|
44
|
+
const response = await this.makeRequest(messages, false);
|
|
45
|
+
this.memory.recordResponse(response.content);
|
|
46
|
+
return response;
|
|
47
|
+
}
|
|
48
|
+
async chatStream(prompt) {
|
|
49
|
+
const messages = this.memory.buildMessages(prompt);
|
|
50
|
+
const stream = await this.makeStreamRequest(messages);
|
|
51
|
+
return this.wrapStreamWithMemory(stream);
|
|
52
|
+
}
|
|
53
|
+
async prompt(prompt) {
|
|
54
|
+
const messages = [{ role: 'user', content: prompt }];
|
|
55
|
+
return this.makeRequest(messages, false);
|
|
56
|
+
}
|
|
57
|
+
async promptStream(prompt) {
|
|
58
|
+
const messages = [{ role: 'user', content: prompt }];
|
|
59
|
+
return this.makeStreamRequest(messages);
|
|
60
|
+
}
|
|
61
|
+
clearMemory() {
|
|
62
|
+
this.memory.clear();
|
|
63
|
+
}
|
|
64
|
+
getMemory() {
|
|
65
|
+
return this.memory.getConversation();
|
|
66
|
+
}
|
|
67
|
+
setMemoryConfig(config) {
|
|
68
|
+
this.memory.setConfig(config);
|
|
69
|
+
}
|
|
70
|
+
getMemoryConfig() {
|
|
71
|
+
return this.memory.getConfig();
|
|
72
|
+
}
|
|
73
|
+
async makeRequest(messages, stream) {
|
|
74
|
+
const startTime = performance.now();
|
|
75
|
+
const body = this.buildRequestBody(messages, stream);
|
|
76
|
+
const endpoint = this.getEndpoint();
|
|
77
|
+
const response = await this.client.post(endpoint, {
|
|
78
|
+
json: body,
|
|
79
|
+
headers: this.getExtraHeaders(),
|
|
80
|
+
});
|
|
81
|
+
const data = await response.json();
|
|
82
|
+
return this.parseResponse(data, startTime);
|
|
83
|
+
}
|
|
84
|
+
async makeStreamRequest(messages) {
|
|
85
|
+
const body = this.buildRequestBody(messages, true);
|
|
86
|
+
const endpoint = this.getEndpoint();
|
|
87
|
+
const response = await this.client.post(endpoint, {
|
|
88
|
+
json: body,
|
|
89
|
+
headers: this.getExtraHeaders(),
|
|
90
|
+
});
|
|
91
|
+
return this.parseSSEStream(response.raw);
|
|
92
|
+
}
|
|
93
|
+
buildRequestBody(messages, stream) {
|
|
94
|
+
const provider = this.config.provider;
|
|
95
|
+
if (provider === 'anthropic') {
|
|
96
|
+
return this.buildAnthropicBody(messages, stream);
|
|
97
|
+
}
|
|
98
|
+
if (provider === 'google') {
|
|
99
|
+
return this.buildGoogleBody(messages, stream);
|
|
100
|
+
}
|
|
101
|
+
if (provider === 'cohere') {
|
|
102
|
+
return this.buildCohereBody(messages, stream);
|
|
103
|
+
}
|
|
104
|
+
return {
|
|
105
|
+
model: this.config.model,
|
|
106
|
+
messages: messages.map(m => ({
|
|
107
|
+
role: m.role,
|
|
108
|
+
content: m.content,
|
|
109
|
+
})),
|
|
110
|
+
stream,
|
|
111
|
+
...(stream && { stream_options: { include_usage: true } }),
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
buildAnthropicBody(messages, stream) {
|
|
115
|
+
const systemMessages = messages.filter(m => m.role === 'system');
|
|
116
|
+
const otherMessages = messages.filter(m => m.role !== 'system');
|
|
117
|
+
return {
|
|
118
|
+
model: this.config.model,
|
|
119
|
+
max_tokens: 4096,
|
|
120
|
+
system: systemMessages.map(m => m.content).join('\n') || undefined,
|
|
121
|
+
messages: otherMessages.map(m => ({
|
|
122
|
+
role: m.role === 'assistant' ? 'assistant' : 'user',
|
|
123
|
+
content: m.content,
|
|
124
|
+
})),
|
|
125
|
+
stream,
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
buildGoogleBody(messages, stream) {
|
|
129
|
+
const contents = messages
|
|
130
|
+
.filter(m => m.role !== 'system')
|
|
131
|
+
.map(m => ({
|
|
132
|
+
role: m.role === 'assistant' ? 'model' : 'user',
|
|
133
|
+
parts: [{ text: m.content }],
|
|
134
|
+
}));
|
|
135
|
+
const systemInstruction = messages
|
|
136
|
+
.filter(m => m.role === 'system')
|
|
137
|
+
.map(m => m.content)
|
|
138
|
+
.join('\n');
|
|
139
|
+
return {
|
|
140
|
+
contents,
|
|
141
|
+
...(systemInstruction && {
|
|
142
|
+
systemInstruction: { parts: [{ text: systemInstruction }] },
|
|
143
|
+
}),
|
|
144
|
+
generationConfig: {
|
|
145
|
+
maxOutputTokens: 4096,
|
|
146
|
+
},
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
buildCohereBody(messages, stream) {
|
|
150
|
+
const chatHistory = messages.slice(0, -1).map(m => ({
|
|
151
|
+
role: m.role === 'assistant' ? 'CHATBOT' : 'USER',
|
|
152
|
+
message: m.content,
|
|
153
|
+
}));
|
|
154
|
+
const lastMessage = messages[messages.length - 1];
|
|
155
|
+
return {
|
|
156
|
+
model: this.config.model,
|
|
157
|
+
message: typeof lastMessage.content === 'string' ? lastMessage.content : '',
|
|
158
|
+
chat_history: chatHistory.length > 0 ? chatHistory : undefined,
|
|
159
|
+
stream,
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
getEndpoint() {
|
|
163
|
+
let endpoint = PROVIDER_ENDPOINTS[this.config.provider] || '/chat/completions';
|
|
164
|
+
endpoint = endpoint.replace('{model}', this.config.model);
|
|
165
|
+
return endpoint;
|
|
166
|
+
}
|
|
167
|
+
getExtraHeaders() {
|
|
168
|
+
const headers = {};
|
|
169
|
+
if (this.config.headers) {
|
|
170
|
+
Object.assign(headers, this.config.headers);
|
|
171
|
+
}
|
|
172
|
+
return headers;
|
|
173
|
+
}
|
|
174
|
+
parseResponse(data, startTime) {
|
|
175
|
+
const provider = this.config.provider;
|
|
176
|
+
const endTime = performance.now();
|
|
177
|
+
if (provider === 'anthropic') {
|
|
178
|
+
return this.parseAnthropicResponse(data, startTime, endTime);
|
|
179
|
+
}
|
|
180
|
+
if (provider === 'google') {
|
|
181
|
+
return this.parseGoogleResponse(data, startTime, endTime);
|
|
182
|
+
}
|
|
183
|
+
if (provider === 'cohere') {
|
|
184
|
+
return this.parseCohereResponse(data, startTime, endTime);
|
|
185
|
+
}
|
|
186
|
+
return this.parseOpenAIResponse(data, startTime, endTime);
|
|
187
|
+
}
|
|
188
|
+
parseOpenAIResponse(data, startTime, endTime) {
|
|
189
|
+
const d = data;
|
|
190
|
+
const content = d.choices?.[0]?.message?.content || '';
|
|
191
|
+
const usage = {
|
|
192
|
+
inputTokens: d.usage?.prompt_tokens || 0,
|
|
193
|
+
outputTokens: d.usage?.completion_tokens || 0,
|
|
194
|
+
totalTokens: d.usage?.total_tokens || 0,
|
|
195
|
+
};
|
|
196
|
+
return {
|
|
197
|
+
content,
|
|
198
|
+
usage,
|
|
199
|
+
latency: this.buildLatency(startTime, endTime, usage.outputTokens),
|
|
200
|
+
model: d.model || this.config.model,
|
|
201
|
+
provider: this.config.provider,
|
|
202
|
+
cached: false,
|
|
203
|
+
finishReason: d.choices?.[0]?.finish_reason,
|
|
204
|
+
raw: data,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
parseAnthropicResponse(data, startTime, endTime) {
|
|
208
|
+
const d = data;
|
|
209
|
+
const textContent = d.content?.find(c => c.type === 'text');
|
|
210
|
+
const content = textContent?.text || '';
|
|
211
|
+
const usage = {
|
|
212
|
+
inputTokens: d.usage?.input_tokens || 0,
|
|
213
|
+
outputTokens: d.usage?.output_tokens || 0,
|
|
214
|
+
totalTokens: (d.usage?.input_tokens || 0) + (d.usage?.output_tokens || 0),
|
|
215
|
+
};
|
|
216
|
+
return {
|
|
217
|
+
content,
|
|
218
|
+
usage,
|
|
219
|
+
latency: this.buildLatency(startTime, endTime, usage.outputTokens),
|
|
220
|
+
model: d.model || this.config.model,
|
|
221
|
+
provider: 'anthropic',
|
|
222
|
+
cached: false,
|
|
223
|
+
finishReason: d.stop_reason === 'end_turn' ? 'stop' : undefined,
|
|
224
|
+
raw: data,
|
|
225
|
+
};
|
|
226
|
+
}
|
|
227
|
+
parseGoogleResponse(data, startTime, endTime) {
|
|
228
|
+
const d = data;
|
|
229
|
+
const content = d.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
230
|
+
const usage = {
|
|
231
|
+
inputTokens: d.usageMetadata?.promptTokenCount || 0,
|
|
232
|
+
outputTokens: d.usageMetadata?.candidatesTokenCount || 0,
|
|
233
|
+
totalTokens: d.usageMetadata?.totalTokenCount || 0,
|
|
234
|
+
};
|
|
235
|
+
return {
|
|
236
|
+
content,
|
|
237
|
+
usage,
|
|
238
|
+
latency: this.buildLatency(startTime, endTime, usage.outputTokens),
|
|
239
|
+
model: this.config.model,
|
|
240
|
+
provider: 'google',
|
|
241
|
+
cached: false,
|
|
242
|
+
finishReason: d.candidates?.[0]?.finishReason === 'STOP' ? 'stop' : undefined,
|
|
243
|
+
raw: data,
|
|
244
|
+
};
|
|
245
|
+
}
|
|
246
|
+
parseCohereResponse(data, startTime, endTime) {
|
|
247
|
+
const d = data;
|
|
248
|
+
const content = d.text || '';
|
|
249
|
+
const usage = {
|
|
250
|
+
inputTokens: d.meta?.tokens?.input_tokens || 0,
|
|
251
|
+
outputTokens: d.meta?.tokens?.output_tokens || 0,
|
|
252
|
+
totalTokens: (d.meta?.tokens?.input_tokens || 0) + (d.meta?.tokens?.output_tokens || 0),
|
|
253
|
+
};
|
|
254
|
+
return {
|
|
255
|
+
content,
|
|
256
|
+
usage,
|
|
257
|
+
latency: this.buildLatency(startTime, endTime, usage.outputTokens),
|
|
258
|
+
model: this.config.model,
|
|
259
|
+
provider: 'cohere',
|
|
260
|
+
cached: false,
|
|
261
|
+
finishReason: d.finish_reason === 'COMPLETE' ? 'stop' : undefined,
|
|
262
|
+
raw: data,
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
buildLatency(startTime, endTime, outputTokens) {
|
|
266
|
+
const total = endTime - startTime;
|
|
267
|
+
return {
|
|
268
|
+
ttft: total,
|
|
269
|
+
tps: outputTokens > 0 ? (outputTokens / (total / 1000)) : 0,
|
|
270
|
+
total,
|
|
271
|
+
};
|
|
272
|
+
}
|
|
273
|
+
async *parseSSEStream(response) {
|
|
274
|
+
const reader = response.body?.getReader();
|
|
275
|
+
if (!reader) {
|
|
276
|
+
throw new Error('No response body');
|
|
277
|
+
}
|
|
278
|
+
const decoder = new TextDecoder();
|
|
279
|
+
let buffer = '';
|
|
280
|
+
let firstChunkTime;
|
|
281
|
+
const startTime = performance.now();
|
|
282
|
+
try {
|
|
283
|
+
while (true) {
|
|
284
|
+
const { done, value } = await reader.read();
|
|
285
|
+
if (done) {
|
|
286
|
+
break;
|
|
287
|
+
}
|
|
288
|
+
buffer += decoder.decode(value, { stream: true });
|
|
289
|
+
const lines = buffer.split('\n');
|
|
290
|
+
buffer = lines.pop() || '';
|
|
291
|
+
for (const line of lines) {
|
|
292
|
+
const trimmed = line.trim();
|
|
293
|
+
if (!trimmed || trimmed === 'data: [DONE]') {
|
|
294
|
+
continue;
|
|
295
|
+
}
|
|
296
|
+
if (trimmed.startsWith('data: ')) {
|
|
297
|
+
const jsonStr = trimmed.slice(6);
|
|
298
|
+
try {
|
|
299
|
+
const event = this.parseStreamChunk(jsonStr);
|
|
300
|
+
if (event) {
|
|
301
|
+
if (!firstChunkTime && event.type === 'text') {
|
|
302
|
+
firstChunkTime = performance.now();
|
|
303
|
+
}
|
|
304
|
+
yield event;
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
catch {
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
finally {
|
|
314
|
+
reader.releaseLock();
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
parseStreamChunk(jsonStr) {
|
|
318
|
+
const data = JSON.parse(jsonStr);
|
|
319
|
+
const provider = this.config.provider;
|
|
320
|
+
if (provider === 'anthropic') {
|
|
321
|
+
return this.parseAnthropicStreamChunk(data);
|
|
322
|
+
}
|
|
323
|
+
return this.parseOpenAIStreamChunk(data);
|
|
324
|
+
}
|
|
325
|
+
parseOpenAIStreamChunk(data) {
|
|
326
|
+
const choice = data.choices?.[0];
|
|
327
|
+
if (!choice) {
|
|
328
|
+
if (data.usage) {
|
|
329
|
+
return {
|
|
330
|
+
type: 'usage',
|
|
331
|
+
usage: {
|
|
332
|
+
inputTokens: data.usage.prompt_tokens || 0,
|
|
333
|
+
outputTokens: data.usage.completion_tokens || 0,
|
|
334
|
+
totalTokens: data.usage.total_tokens || 0,
|
|
335
|
+
},
|
|
336
|
+
};
|
|
337
|
+
}
|
|
338
|
+
return null;
|
|
339
|
+
}
|
|
340
|
+
if (choice.delta?.content) {
|
|
341
|
+
return {
|
|
342
|
+
type: 'text',
|
|
343
|
+
content: choice.delta.content,
|
|
344
|
+
};
|
|
345
|
+
}
|
|
346
|
+
if (choice.finish_reason) {
|
|
347
|
+
return {
|
|
348
|
+
type: 'done',
|
|
349
|
+
finishReason: choice.finish_reason,
|
|
350
|
+
};
|
|
351
|
+
}
|
|
352
|
+
return null;
|
|
353
|
+
}
|
|
354
|
+
parseAnthropicStreamChunk(data) {
|
|
355
|
+
if (data.type === 'content_block_delta' && data.delta?.type === 'text_delta') {
|
|
356
|
+
return {
|
|
357
|
+
type: 'text',
|
|
358
|
+
content: data.delta.text || '',
|
|
359
|
+
};
|
|
360
|
+
}
|
|
361
|
+
if (data.type === 'message_stop') {
|
|
362
|
+
return {
|
|
363
|
+
type: 'done',
|
|
364
|
+
finishReason: 'stop',
|
|
365
|
+
};
|
|
366
|
+
}
|
|
367
|
+
if (data.type === 'message_delta' && data.message?.usage) {
|
|
368
|
+
return {
|
|
369
|
+
type: 'usage',
|
|
370
|
+
usage: {
|
|
371
|
+
inputTokens: data.message.usage.input_tokens || 0,
|
|
372
|
+
outputTokens: data.message.usage.output_tokens || 0,
|
|
373
|
+
totalTokens: (data.message.usage.input_tokens || 0) + (data.message.usage.output_tokens || 0),
|
|
374
|
+
},
|
|
375
|
+
};
|
|
376
|
+
}
|
|
377
|
+
return null;
|
|
378
|
+
}
|
|
379
|
+
async *wrapStreamWithMemory(stream) {
|
|
380
|
+
let fullContent = '';
|
|
381
|
+
for await (const event of stream) {
|
|
382
|
+
if (event.type === 'text') {
|
|
383
|
+
fullContent += event.content;
|
|
384
|
+
}
|
|
385
|
+
yield event;
|
|
386
|
+
}
|
|
387
|
+
if (fullContent) {
|
|
388
|
+
this.memory.recordResponse(fullContent);
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
}
|
package/dist/ai/index.d.ts
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
export { UnifiedAIClient, createAI } from './client.js';
|
|
2
|
+
export { ClientAIImpl, AIConfigurationError } from './client-ai.js';
|
|
3
|
+
export { ConversationMemory } from './memory.js';
|
|
2
4
|
export { BaseAIProvider, AIError, RateLimitError, ContextLengthError, OverloadedError, AuthenticationError } from './providers/base.js';
|
|
3
5
|
export { OpenAIProvider } from './providers/openai.js';
|
|
4
6
|
export { AnthropicProvider } from './providers/anthropic.js';
|
package/dist/ai/index.js
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
export { UnifiedAIClient, createAI } from './client.js';
|
|
2
|
+
export { ClientAIImpl, AIConfigurationError } from './client-ai.js';
|
|
3
|
+
export { ConversationMemory } from './memory.js';
|
|
2
4
|
export { BaseAIProvider, AIError, RateLimitError, ContextLengthError, OverloadedError, AuthenticationError } from './providers/base.js';
|
|
3
5
|
export { OpenAIProvider } from './providers/openai.js';
|
|
4
6
|
export { AnthropicProvider } from './providers/anthropic.js';
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import type { AIProvider, AIResponse, AIStream, ChatMessage } from '../types/ai.js';
|
|
2
|
+
import type { ClientAI, PresetAIConfig, AIMemoryConfig } from '../types/ai-client.js';
|
|
3
|
+
import type { Client } from '../core/client.js';
|
|
4
|
+
export declare class AIConfigurationError extends Error {
|
|
5
|
+
constructor(message: string);
|
|
6
|
+
}
|
|
7
|
+
export declare class ClientAIImpl implements ClientAI {
|
|
8
|
+
private readonly client;
|
|
9
|
+
private readonly config;
|
|
10
|
+
private readonly memory;
|
|
11
|
+
constructor(client: Client, config: PresetAIConfig);
|
|
12
|
+
get provider(): AIProvider;
|
|
13
|
+
get model(): string;
|
|
14
|
+
chat(prompt: string): Promise<AIResponse>;
|
|
15
|
+
chatStream(prompt: string): Promise<AIStream>;
|
|
16
|
+
prompt(prompt: string): Promise<AIResponse>;
|
|
17
|
+
promptStream(prompt: string): Promise<AIStream>;
|
|
18
|
+
clearMemory(): void;
|
|
19
|
+
getMemory(): readonly ChatMessage[];
|
|
20
|
+
setMemoryConfig(config: Partial<AIMemoryConfig>): void;
|
|
21
|
+
getMemoryConfig(): AIMemoryConfig;
|
|
22
|
+
private makeRequest;
|
|
23
|
+
private makeStreamRequest;
|
|
24
|
+
private buildRequestBody;
|
|
25
|
+
private buildAnthropicBody;
|
|
26
|
+
private buildGoogleBody;
|
|
27
|
+
private buildCohereBody;
|
|
28
|
+
private getEndpoint;
|
|
29
|
+
private getExtraHeaders;
|
|
30
|
+
private parseResponse;
|
|
31
|
+
private parseOpenAIResponse;
|
|
32
|
+
private parseAnthropicResponse;
|
|
33
|
+
private parseGoogleResponse;
|
|
34
|
+
private parseCohereResponse;
|
|
35
|
+
private buildLatency;
|
|
36
|
+
private parseSSEStream;
|
|
37
|
+
private parseStreamChunk;
|
|
38
|
+
private parseOpenAIStreamChunk;
|
|
39
|
+
private parseAnthropicStreamChunk;
|
|
40
|
+
private wrapStreamWithMemory;
|
|
41
|
+
}
|