@olane/o-intelligence 0.7.12-alpha.9 → 0.7.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/anthropic-intelligence.tool.d.ts +11 -3
- package/dist/src/anthropic-intelligence.tool.d.ts.map +1 -1
- package/dist/src/anthropic-intelligence.tool.js +230 -4
- package/dist/src/gemini-intelligence.tool.d.ts +11 -3
- package/dist/src/gemini-intelligence.tool.d.ts.map +1 -1
- package/dist/src/gemini-intelligence.tool.js +234 -2
- package/dist/src/grok-intelligence.tool.d.ts +5 -3
- package/dist/src/grok-intelligence.tool.d.ts.map +1 -1
- package/dist/src/grok-intelligence.tool.js +207 -2
- package/dist/src/interfaces/prompt.request.d.ts +2 -2
- package/dist/src/interfaces/prompt.request.d.ts.map +1 -1
- package/dist/src/methods/intelligence.methods.d.ts.map +1 -1
- package/dist/src/methods/intelligence.methods.js +7 -0
- package/dist/src/methods/llm.methods.d.ts.map +1 -1
- package/dist/src/methods/llm.methods.js +14 -0
- package/dist/src/o-intelligence.tool.d.ts.map +1 -1
- package/dist/src/o-intelligence.tool.js +42 -38
- package/dist/src/ollama-intelligence.tool.d.ts +11 -3
- package/dist/src/ollama-intelligence.tool.d.ts.map +1 -1
- package/dist/src/ollama-intelligence.tool.js +188 -2
- package/dist/src/openai-intelligence.tool.d.ts +11 -3
- package/dist/src/openai-intelligence.tool.d.ts.map +1 -1
- package/dist/src/openai-intelligence.tool.js +221 -2
- package/dist/src/perplexity-intelligence.tool.d.ts +11 -3
- package/dist/src/perplexity-intelligence.tool.d.ts.map +1 -1
- package/dist/src/perplexity-intelligence.tool.js +288 -3
- package/dist/src/types/streaming.types.d.ts +197 -0
- package/dist/src/types/streaming.types.d.ts.map +1 -0
- package/dist/src/types/streaming.types.js +4 -0
- package/dist/src/utils/sse-parser.d.ts +66 -0
- package/dist/src/utils/sse-parser.d.ts.map +1 -0
- package/dist/src/utils/sse-parser.js +255 -0
- package/dist/src/utils/streaming-helpers.d.ts +16 -0
- package/dist/src/utils/streaming-helpers.d.ts.map +1 -0
- package/dist/src/utils/streaming-helpers.js +129 -0
- package/package.json +7 -7
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import { oAddress } from '@olane/o-core';
|
|
2
2
|
import { LLM_PARAMS } from './methods/llm.methods.js';
|
|
3
3
|
import { oLaneTool } from '@olane/o-lane';
|
|
4
|
+
import { StreamUtils } from '@olane/o-node';
|
|
4
5
|
export class PerplexityIntelligenceTool extends oLaneTool {
|
|
5
6
|
constructor(config) {
|
|
6
7
|
super({
|
|
7
8
|
...config,
|
|
8
9
|
address: new oAddress('o://perplexity'),
|
|
9
|
-
description: 'Perplexity
|
|
10
|
+
description: 'Perplexity intelligence tool is useful for searching the web or providing information about any unknown entity or topic',
|
|
10
11
|
methods: LLM_PARAMS,
|
|
11
12
|
dependencies: [],
|
|
12
13
|
});
|
|
@@ -17,8 +18,13 @@ export class PerplexityIntelligenceTool extends oLaneTool {
|
|
|
17
18
|
* Chat completion with Perplexity
|
|
18
19
|
*/
|
|
19
20
|
async _tool_completion(request) {
|
|
21
|
+
const params = request.params;
|
|
22
|
+
const { _isStreaming = false } = params;
|
|
23
|
+
if (_isStreaming) {
|
|
24
|
+
this.logger.debug('Streaming completion...');
|
|
25
|
+
return StreamUtils.processGenerator(request, this._streamCompletion(request), request.stream);
|
|
26
|
+
}
|
|
20
27
|
try {
|
|
21
|
-
const params = request.params;
|
|
22
28
|
const { model = this.defaultModel, messages, max_tokens, temperature, top_p, top_k, presence_penalty, frequency_penalty, apiKey = this.defaultApiKey, search_domain, return_citations, return_images, return_related_questions, } = params;
|
|
23
29
|
if (!messages || !Array.isArray(messages)) {
|
|
24
30
|
return {
|
|
@@ -89,12 +95,151 @@ export class PerplexityIntelligenceTool extends oLaneTool {
|
|
|
89
95
|
};
|
|
90
96
|
}
|
|
91
97
|
}
|
|
98
|
+
/**
|
|
99
|
+
* Stream chat completion with Perplexity
|
|
100
|
+
*/
|
|
101
|
+
async *_streamCompletion(request) {
|
|
102
|
+
try {
|
|
103
|
+
const params = request.params;
|
|
104
|
+
const { model = this.defaultModel, messages, max_tokens, temperature, top_p, top_k, presence_penalty, frequency_penalty, apiKey = this.defaultApiKey, search_domain, return_citations, return_images, return_related_questions, } = params;
|
|
105
|
+
if (!messages || !Array.isArray(messages)) {
|
|
106
|
+
yield {
|
|
107
|
+
success: false,
|
|
108
|
+
error: '"messages" array is required',
|
|
109
|
+
};
|
|
110
|
+
return;
|
|
111
|
+
}
|
|
112
|
+
if (!apiKey) {
|
|
113
|
+
yield {
|
|
114
|
+
success: false,
|
|
115
|
+
error: 'Perplexity API key is required',
|
|
116
|
+
};
|
|
117
|
+
return;
|
|
118
|
+
}
|
|
119
|
+
const chatRequest = {
|
|
120
|
+
model: model,
|
|
121
|
+
messages: messages,
|
|
122
|
+
stream: true,
|
|
123
|
+
};
|
|
124
|
+
// Add optional parameters if provided
|
|
125
|
+
if (max_tokens !== undefined)
|
|
126
|
+
chatRequest.max_tokens = max_tokens;
|
|
127
|
+
if (temperature !== undefined)
|
|
128
|
+
chatRequest.temperature = temperature;
|
|
129
|
+
if (top_p !== undefined)
|
|
130
|
+
chatRequest.top_p = top_p;
|
|
131
|
+
if (top_k !== undefined)
|
|
132
|
+
chatRequest.top_k = top_k;
|
|
133
|
+
if (presence_penalty !== undefined)
|
|
134
|
+
chatRequest.presence_penalty = presence_penalty;
|
|
135
|
+
if (frequency_penalty !== undefined)
|
|
136
|
+
chatRequest.frequency_penalty = frequency_penalty;
|
|
137
|
+
if (search_domain !== undefined)
|
|
138
|
+
chatRequest.search_domain = search_domain;
|
|
139
|
+
if (return_citations !== undefined)
|
|
140
|
+
chatRequest.return_citations = return_citations;
|
|
141
|
+
if (return_images !== undefined)
|
|
142
|
+
chatRequest.return_images = return_images;
|
|
143
|
+
if (return_related_questions !== undefined)
|
|
144
|
+
chatRequest.return_related_questions = return_related_questions;
|
|
145
|
+
const response = await fetch(`https://api.perplexity.ai/chat/completions`, {
|
|
146
|
+
method: 'POST',
|
|
147
|
+
headers: {
|
|
148
|
+
'Content-Type': 'application/json',
|
|
149
|
+
Authorization: `Bearer ${apiKey}`,
|
|
150
|
+
},
|
|
151
|
+
body: JSON.stringify(chatRequest),
|
|
152
|
+
});
|
|
153
|
+
if (!response.ok) {
|
|
154
|
+
const errorText = await response.text();
|
|
155
|
+
yield {
|
|
156
|
+
success: false,
|
|
157
|
+
error: `Perplexity API error: ${response.status} - ${errorText}`,
|
|
158
|
+
};
|
|
159
|
+
return;
|
|
160
|
+
}
|
|
161
|
+
if (!response.body) {
|
|
162
|
+
yield { success: false, error: 'Response body is null' };
|
|
163
|
+
return;
|
|
164
|
+
}
|
|
165
|
+
const reader = response.body.getReader();
|
|
166
|
+
const decoder = new TextDecoder();
|
|
167
|
+
let buffer = '';
|
|
168
|
+
while (true) {
|
|
169
|
+
const { done, value } = await reader.read();
|
|
170
|
+
if (done)
|
|
171
|
+
break;
|
|
172
|
+
buffer += decoder.decode(value, { stream: true });
|
|
173
|
+
const lines = buffer.split('\n');
|
|
174
|
+
buffer = lines.pop() || '';
|
|
175
|
+
for (const line of lines) {
|
|
176
|
+
const trimmedLine = line.trim();
|
|
177
|
+
if (!trimmedLine || !trimmedLine.startsWith('data: '))
|
|
178
|
+
continue;
|
|
179
|
+
const data = trimmedLine.slice(6);
|
|
180
|
+
if (data === '[DONE]')
|
|
181
|
+
break;
|
|
182
|
+
try {
|
|
183
|
+
const parsed = JSON.parse(data);
|
|
184
|
+
const choice = parsed.choices?.[0];
|
|
185
|
+
if (choice?.delta?.content) {
|
|
186
|
+
yield {
|
|
187
|
+
delta: choice.delta.content,
|
|
188
|
+
model: parsed.model || model,
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
if (choice?.finish_reason) {
|
|
192
|
+
yield {
|
|
193
|
+
finish_reason: choice.finish_reason,
|
|
194
|
+
};
|
|
195
|
+
}
|
|
196
|
+
if (parsed.usage) {
|
|
197
|
+
yield {
|
|
198
|
+
usage: parsed.usage,
|
|
199
|
+
};
|
|
200
|
+
}
|
|
201
|
+
// Handle Perplexity-specific fields
|
|
202
|
+
if (parsed.citations) {
|
|
203
|
+
yield {
|
|
204
|
+
citations: parsed.citations,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
if (parsed.images) {
|
|
208
|
+
yield {
|
|
209
|
+
images: parsed.images,
|
|
210
|
+
};
|
|
211
|
+
}
|
|
212
|
+
if (parsed.related_questions) {
|
|
213
|
+
yield {
|
|
214
|
+
related_questions: parsed.related_questions,
|
|
215
|
+
};
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
catch (parseError) {
|
|
219
|
+
// Skip invalid JSON
|
|
220
|
+
continue;
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
catch (error) {
|
|
226
|
+
yield {
|
|
227
|
+
success: false,
|
|
228
|
+
error: `Failed to stream chat: ${error.message}`,
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
}
|
|
92
232
|
/**
|
|
93
233
|
* Generate text with Perplexity (alias for completion)
|
|
94
234
|
*/
|
|
95
235
|
async _tool_generate(request) {
|
|
236
|
+
const params = request.params;
|
|
237
|
+
const { _isStreaming = false } = params;
|
|
238
|
+
if (_isStreaming) {
|
|
239
|
+
this.logger.debug('Streaming generate...');
|
|
240
|
+
return StreamUtils.processGenerator(request, this._streamGenerate(request), request.stream);
|
|
241
|
+
}
|
|
96
242
|
try {
|
|
97
|
-
const params = request.params;
|
|
98
243
|
const { model = this.defaultModel, prompt, system, max_tokens, temperature, top_p, top_k, presence_penalty, frequency_penalty, search_domain, return_citations, return_images, return_related_questions, apiKey = this.defaultApiKey, } = params;
|
|
99
244
|
if (!prompt) {
|
|
100
245
|
return {
|
|
@@ -171,6 +316,146 @@ export class PerplexityIntelligenceTool extends oLaneTool {
|
|
|
171
316
|
};
|
|
172
317
|
}
|
|
173
318
|
}
|
|
319
|
+
/**
|
|
320
|
+
* Stream text generation with Perplexity
|
|
321
|
+
*/
|
|
322
|
+
async *_streamGenerate(request) {
|
|
323
|
+
try {
|
|
324
|
+
const params = request.params;
|
|
325
|
+
const { model = this.defaultModel, prompt, system, max_tokens, temperature, top_p, top_k, presence_penalty, frequency_penalty, search_domain, return_citations, return_images, return_related_questions, apiKey = this.defaultApiKey, } = params;
|
|
326
|
+
if (!prompt) {
|
|
327
|
+
yield {
|
|
328
|
+
success: false,
|
|
329
|
+
error: 'Prompt is required',
|
|
330
|
+
};
|
|
331
|
+
return;
|
|
332
|
+
}
|
|
333
|
+
if (!apiKey) {
|
|
334
|
+
yield {
|
|
335
|
+
success: false,
|
|
336
|
+
error: 'Perplexity API key is required',
|
|
337
|
+
};
|
|
338
|
+
return;
|
|
339
|
+
}
|
|
340
|
+
// Convert prompt to messages format
|
|
341
|
+
const messages = [];
|
|
342
|
+
if (system) {
|
|
343
|
+
messages.push({ role: 'system', content: system });
|
|
344
|
+
}
|
|
345
|
+
messages.push({ role: 'user', content: prompt });
|
|
346
|
+
const chatRequest = {
|
|
347
|
+
model: model,
|
|
348
|
+
messages,
|
|
349
|
+
stream: true,
|
|
350
|
+
};
|
|
351
|
+
// Add optional parameters if provided
|
|
352
|
+
if (max_tokens !== undefined)
|
|
353
|
+
chatRequest.max_tokens = max_tokens;
|
|
354
|
+
if (temperature !== undefined)
|
|
355
|
+
chatRequest.temperature = temperature;
|
|
356
|
+
if (top_p !== undefined)
|
|
357
|
+
chatRequest.top_p = top_p;
|
|
358
|
+
if (top_k !== undefined)
|
|
359
|
+
chatRequest.top_k = top_k;
|
|
360
|
+
if (presence_penalty !== undefined)
|
|
361
|
+
chatRequest.presence_penalty = presence_penalty;
|
|
362
|
+
if (frequency_penalty !== undefined)
|
|
363
|
+
chatRequest.frequency_penalty = frequency_penalty;
|
|
364
|
+
if (search_domain !== undefined)
|
|
365
|
+
chatRequest.search_domain = search_domain;
|
|
366
|
+
if (return_citations !== undefined)
|
|
367
|
+
chatRequest.return_citations = return_citations;
|
|
368
|
+
if (return_images !== undefined)
|
|
369
|
+
chatRequest.return_images = return_images;
|
|
370
|
+
if (return_related_questions !== undefined)
|
|
371
|
+
chatRequest.return_related_questions = return_related_questions;
|
|
372
|
+
const response = await fetch(`https://api.perplexity.ai/chat/completions`, {
|
|
373
|
+
method: 'POST',
|
|
374
|
+
headers: {
|
|
375
|
+
'Content-Type': 'application/json',
|
|
376
|
+
Authorization: `Bearer ${apiKey}`,
|
|
377
|
+
},
|
|
378
|
+
body: JSON.stringify(chatRequest),
|
|
379
|
+
});
|
|
380
|
+
if (!response.ok) {
|
|
381
|
+
const errorText = await response.text();
|
|
382
|
+
yield {
|
|
383
|
+
success: false,
|
|
384
|
+
error: `Perplexity API error: ${response.status} - ${errorText}`,
|
|
385
|
+
};
|
|
386
|
+
return;
|
|
387
|
+
}
|
|
388
|
+
if (!response.body) {
|
|
389
|
+
yield { success: false, error: 'Response body is null' };
|
|
390
|
+
return;
|
|
391
|
+
}
|
|
392
|
+
const reader = response.body.getReader();
|
|
393
|
+
const decoder = new TextDecoder();
|
|
394
|
+
let buffer = '';
|
|
395
|
+
while (true) {
|
|
396
|
+
const { done, value } = await reader.read();
|
|
397
|
+
if (done)
|
|
398
|
+
break;
|
|
399
|
+
buffer += decoder.decode(value, { stream: true });
|
|
400
|
+
const lines = buffer.split('\n');
|
|
401
|
+
buffer = lines.pop() || '';
|
|
402
|
+
for (const line of lines) {
|
|
403
|
+
const trimmedLine = line.trim();
|
|
404
|
+
if (!trimmedLine || !trimmedLine.startsWith('data: '))
|
|
405
|
+
continue;
|
|
406
|
+
const data = trimmedLine.slice(6);
|
|
407
|
+
if (data === '[DONE]')
|
|
408
|
+
break;
|
|
409
|
+
try {
|
|
410
|
+
const parsed = JSON.parse(data);
|
|
411
|
+
const choice = parsed.choices?.[0];
|
|
412
|
+
if (choice?.delta?.content) {
|
|
413
|
+
yield {
|
|
414
|
+
delta: choice.delta.content,
|
|
415
|
+
model: parsed.model || model,
|
|
416
|
+
};
|
|
417
|
+
}
|
|
418
|
+
if (choice?.finish_reason) {
|
|
419
|
+
yield {
|
|
420
|
+
finish_reason: choice.finish_reason,
|
|
421
|
+
};
|
|
422
|
+
}
|
|
423
|
+
if (parsed.usage) {
|
|
424
|
+
yield {
|
|
425
|
+
usage: parsed.usage,
|
|
426
|
+
};
|
|
427
|
+
}
|
|
428
|
+
// Handle Perplexity-specific fields
|
|
429
|
+
if (parsed.citations) {
|
|
430
|
+
yield {
|
|
431
|
+
citations: parsed.citations,
|
|
432
|
+
};
|
|
433
|
+
}
|
|
434
|
+
if (parsed.images) {
|
|
435
|
+
yield {
|
|
436
|
+
images: parsed.images,
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
if (parsed.related_questions) {
|
|
440
|
+
yield {
|
|
441
|
+
related_questions: parsed.related_questions,
|
|
442
|
+
};
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
catch (parseError) {
|
|
446
|
+
// Skip invalid JSON
|
|
447
|
+
continue;
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
catch (error) {
|
|
453
|
+
yield {
|
|
454
|
+
success: false,
|
|
455
|
+
error: `Failed to stream generate: ${error.message}`,
|
|
456
|
+
};
|
|
457
|
+
}
|
|
458
|
+
}
|
|
174
459
|
/**
|
|
175
460
|
* List available models
|
|
176
461
|
*/
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Streaming-related type definitions for o-intelligence package
|
|
3
|
+
*/
|
|
4
|
+
/**
|
|
5
|
+
* Represents a single chunk of streaming data
|
|
6
|
+
*/
|
|
7
|
+
export interface StreamChunk {
|
|
8
|
+
/**
|
|
9
|
+
* The text content of this chunk (token/text fragment)
|
|
10
|
+
*/
|
|
11
|
+
text: string;
|
|
12
|
+
/**
|
|
13
|
+
* Whether this is a delta (incremental) chunk
|
|
14
|
+
*/
|
|
15
|
+
delta?: boolean;
|
|
16
|
+
/**
|
|
17
|
+
* Current position in the stream (optional)
|
|
18
|
+
*/
|
|
19
|
+
position?: number;
|
|
20
|
+
/**
|
|
21
|
+
* Whether this is the final chunk
|
|
22
|
+
*/
|
|
23
|
+
isComplete?: boolean;
|
|
24
|
+
/**
|
|
25
|
+
* Model information
|
|
26
|
+
*/
|
|
27
|
+
model?: string;
|
|
28
|
+
/**
|
|
29
|
+
* Additional metadata from the provider
|
|
30
|
+
*/
|
|
31
|
+
metadata?: {
|
|
32
|
+
/**
|
|
33
|
+
* Finish reason (if available)
|
|
34
|
+
*/
|
|
35
|
+
finish_reason?: string;
|
|
36
|
+
/**
|
|
37
|
+
* Token usage information (usually in final chunk)
|
|
38
|
+
*/
|
|
39
|
+
usage?: {
|
|
40
|
+
prompt_tokens?: number;
|
|
41
|
+
completion_tokens?: number;
|
|
42
|
+
total_tokens?: number;
|
|
43
|
+
};
|
|
44
|
+
/**
|
|
45
|
+
* Provider-specific data
|
|
46
|
+
*/
|
|
47
|
+
[key: string]: any;
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Configuration options for streaming
|
|
52
|
+
*/
|
|
53
|
+
export interface StreamingOptions {
|
|
54
|
+
/**
|
|
55
|
+
* Whether to enable streaming
|
|
56
|
+
*/
|
|
57
|
+
enabled?: boolean;
|
|
58
|
+
/**
|
|
59
|
+
* Buffer size for streaming chunks (in bytes)
|
|
60
|
+
*/
|
|
61
|
+
bufferSize?: number;
|
|
62
|
+
/**
|
|
63
|
+
* Timeout for reading from stream (in milliseconds)
|
|
64
|
+
*/
|
|
65
|
+
readTimeoutMs?: number;
|
|
66
|
+
/**
|
|
67
|
+
* Whether to include metadata in each chunk
|
|
68
|
+
*/
|
|
69
|
+
includeMetadata?: boolean;
|
|
70
|
+
/**
|
|
71
|
+
* Callback for handling errors during streaming
|
|
72
|
+
*/
|
|
73
|
+
onError?: (error: Error) => void;
|
|
74
|
+
/**
|
|
75
|
+
* Callback for when streaming starts
|
|
76
|
+
*/
|
|
77
|
+
onStart?: () => void;
|
|
78
|
+
/**
|
|
79
|
+
* Callback for when streaming completes
|
|
80
|
+
*/
|
|
81
|
+
onComplete?: (totalChunks: number) => void;
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* Streaming request parameters (extends standard completion params)
|
|
85
|
+
*/
|
|
86
|
+
export interface StreamingRequestParams {
|
|
87
|
+
/**
|
|
88
|
+
* The model to use
|
|
89
|
+
*/
|
|
90
|
+
model?: string;
|
|
91
|
+
/**
|
|
92
|
+
* Messages for chat completion
|
|
93
|
+
*/
|
|
94
|
+
messages?: Array<{
|
|
95
|
+
role: string;
|
|
96
|
+
content: string;
|
|
97
|
+
}>;
|
|
98
|
+
/**
|
|
99
|
+
* System message
|
|
100
|
+
*/
|
|
101
|
+
system?: string;
|
|
102
|
+
/**
|
|
103
|
+
* Maximum tokens to generate
|
|
104
|
+
*/
|
|
105
|
+
max_tokens?: number;
|
|
106
|
+
/**
|
|
107
|
+
* Temperature (0-1 or 0-2 depending on provider)
|
|
108
|
+
*/
|
|
109
|
+
temperature?: number;
|
|
110
|
+
/**
|
|
111
|
+
* Top-p sampling
|
|
112
|
+
*/
|
|
113
|
+
top_p?: number;
|
|
114
|
+
/**
|
|
115
|
+
* API key for the provider
|
|
116
|
+
*/
|
|
117
|
+
apiKey?: string;
|
|
118
|
+
/**
|
|
119
|
+
* Whether to stream the response
|
|
120
|
+
*/
|
|
121
|
+
stream?: boolean;
|
|
122
|
+
/**
|
|
123
|
+
* Streaming-specific options
|
|
124
|
+
*/
|
|
125
|
+
streamingOptions?: StreamingOptions;
|
|
126
|
+
/**
|
|
127
|
+
* Additional provider-specific parameters
|
|
128
|
+
*/
|
|
129
|
+
[key: string]: any;
|
|
130
|
+
}
|
|
131
|
+
/**
|
|
132
|
+
* Accumulator for building complete response from chunks
|
|
133
|
+
*/
|
|
134
|
+
export interface StreamAccumulator {
|
|
135
|
+
/**
|
|
136
|
+
* Full text accumulated so far
|
|
137
|
+
*/
|
|
138
|
+
fullText: string;
|
|
139
|
+
/**
|
|
140
|
+
* Number of chunks received
|
|
141
|
+
*/
|
|
142
|
+
chunkCount: number;
|
|
143
|
+
/**
|
|
144
|
+
* Timestamp when streaming started
|
|
145
|
+
*/
|
|
146
|
+
startTime: number;
|
|
147
|
+
/**
|
|
148
|
+
* Whether streaming is complete
|
|
149
|
+
*/
|
|
150
|
+
isComplete: boolean;
|
|
151
|
+
/**
|
|
152
|
+
* Final metadata (if available)
|
|
153
|
+
*/
|
|
154
|
+
metadata?: StreamChunk['metadata'];
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Result from a completed stream
|
|
158
|
+
*/
|
|
159
|
+
export interface StreamResult {
|
|
160
|
+
/**
|
|
161
|
+
* Complete generated text
|
|
162
|
+
*/
|
|
163
|
+
text: string;
|
|
164
|
+
/**
|
|
165
|
+
* Model used
|
|
166
|
+
*/
|
|
167
|
+
model?: string;
|
|
168
|
+
/**
|
|
169
|
+
* Total chunks received
|
|
170
|
+
*/
|
|
171
|
+
totalChunks: number;
|
|
172
|
+
/**
|
|
173
|
+
* Time taken (in milliseconds)
|
|
174
|
+
*/
|
|
175
|
+
duration: number;
|
|
176
|
+
/**
|
|
177
|
+
* Usage information
|
|
178
|
+
*/
|
|
179
|
+
usage?: {
|
|
180
|
+
prompt_tokens: number;
|
|
181
|
+
completion_tokens: number;
|
|
182
|
+
total_tokens: number;
|
|
183
|
+
};
|
|
184
|
+
/**
|
|
185
|
+
* Finish reason
|
|
186
|
+
*/
|
|
187
|
+
finish_reason?: string;
|
|
188
|
+
/**
|
|
189
|
+
* Whether the stream completed successfully
|
|
190
|
+
*/
|
|
191
|
+
success: boolean;
|
|
192
|
+
/**
|
|
193
|
+
* Error message if stream failed
|
|
194
|
+
*/
|
|
195
|
+
error?: string;
|
|
196
|
+
}
|
|
197
|
+
//# sourceMappingURL=streaming.types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"streaming.types.d.ts","sourceRoot":"","sources":["../../../src/types/streaming.types.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH;;GAEG;AACH,MAAM,WAAW,WAAW;IAC1B;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IAEb;;OAEG;IACH,KAAK,CAAC,EAAE,OAAO,CAAC;IAEhB;;OAEG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;IAElB;;OAEG;IACH,UAAU,CAAC,EAAE,OAAO,CAAC;IAErB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IAEf;;OAEG;IACH,QAAQ,CAAC,EAAE;QACT;;WAEG;QACH,aAAa,CAAC,EAAE,MAAM,CAAC;QAEvB;;WAEG;QACH,KAAK,CAAC,EAAE;YACN,aAAa,CAAC,EAAE,MAAM,CAAC;YACvB,iBAAiB,CAAC,EAAE,MAAM,CAAC;YAC3B,YAAY,CAAC,EAAE,MAAM,CAAC;SACvB,CAAC;QAEF;;WAEG;QACH,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAC;KACpB,CAAC;CACH;AAED;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAC/B;;OAEG;IACH,OAAO,CAAC,EAAE,OAAO,CAAC;IAElB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IAEpB;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IAEvB;;OAEG;IACH,eAAe,CAAC,EAAE,OAAO,CAAC;IAE1B;;OAEG;IACH,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE,KAAK,KAAK,IAAI,CAAC;IAEjC;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,IAAI,CAAC;IAErB;;OAEG;IACH,UAAU,CAAC,EAAE,CAAC,WAAW,EAAE,MAAM,KAAK,IAAI,CAAC;CAC5C;AAED;;GAEG;AACH,MAAM,WAAW,sBAAsB;IACrC;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IAEf;;OAEG;IACH,QAAQ,CAAC,EAAE,KAAK,CAAC;QACf,IAAI,EAAE,MAAM,CAAC;QACb,OAAO,EAAE,MAAM,CAAC;KACjB,CAAC,CAAC;IAEH;;OAEG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IAEpB;;OAEG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IAEf;;OAEG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAEhB;;OAEG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;IAEjB;;OAEG;IACH,gBAAgB,CAAC,EAAE,gBAAgB,CAAC;IAEpC;;OAEG;IACH,CAAC,GAAG,EAAE,MAAM,GAAG,GAAG,CAAC;CACpB;AAED;;GAEG;AACH,MAAM,WAAW,iBAAiB;IAChC;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IAEjB;;OAEG;IACH,UAAU,EAAE,MAAM,CAAC;IAEnB;;OAEG;IACH,SAAS,EAAE,MAAM,CAAC;IAElB;;OAEG;IACH,UAAU,EAAE,OAAO,CAAC;IAEpB;;OAEG;IACH,QAAQ,CAAC,EAAE,WAAW,CAAC,UAAU,CAAC,CAAC;CACpC;AAED;;GAEG;AACH,MAAM,WAAW,YAAY;IAC3B;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IAEb;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IAEf;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IAEpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IAEjB;;OAEG;IACH,KAAK,CAAC,EAAE;QACN,aAAa,EAAE,MAAM,CAAC;QACtB,iBAAiB,EAAE,MAAM,CAAC;QAC1B,YAAY,EAAE,MAAM,CAAC;KACtB,CAAC;IAEF;;OAEG;IACH,aAAa,CAAC,EAAE,MAAM,CAAC;IAEvB;;OAEG;IACH,OAAO,EAAE,OAAO,CAAC;IAEjB;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB"}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Server-Sent Events (SSE) Parser Utility
|
|
3
|
+
*
|
|
4
|
+
* Parses streaming responses from various LLM providers that use SSE format.
|
|
5
|
+
* Most providers (Anthropic, OpenAI, Perplexity, Grok) follow a similar pattern:
|
|
6
|
+
* - Lines starting with "data: " contain JSON payloads
|
|
7
|
+
* - "data: [DONE]" signals stream completion
|
|
8
|
+
* - Empty lines separate events
|
|
9
|
+
*/
|
|
10
|
+
/// <reference types="node" />
|
|
11
|
+
/**
|
|
12
|
+
* Represents a parsed SSE message
|
|
13
|
+
*/
|
|
14
|
+
export interface SSEMessage {
|
|
15
|
+
event?: string;
|
|
16
|
+
data?: string;
|
|
17
|
+
id?: string;
|
|
18
|
+
retry?: number;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Parse a buffer of SSE text into individual messages
|
|
22
|
+
*
|
|
23
|
+
* @param buffer - The text buffer containing SSE data
|
|
24
|
+
* @returns Array of parsed SSE messages
|
|
25
|
+
*/
|
|
26
|
+
export declare function parseSSE(buffer: string): SSEMessage[];
|
|
27
|
+
/**
|
|
28
|
+
* Check if SSE data indicates stream completion
|
|
29
|
+
*/
|
|
30
|
+
export declare function isStreamDone(data: string): boolean;
|
|
31
|
+
/**
|
|
32
|
+
* AsyncGenerator that yields parsed chunks from a ReadableStream
|
|
33
|
+
*
|
|
34
|
+
* @param stream - The response stream from fetch
|
|
35
|
+
* @yields Parsed JSON data from each SSE message
|
|
36
|
+
*/
|
|
37
|
+
export declare function streamSSEChunks(stream: ReadableStream<Uint8Array>): AsyncGenerator<any, void, unknown>;
|
|
38
|
+
/**
|
|
39
|
+
* Extract text content from Anthropic streaming chunk
|
|
40
|
+
*/
|
|
41
|
+
export declare function extractAnthropicContent(chunk: any): string;
|
|
42
|
+
/**
|
|
43
|
+
* Extract text content from OpenAI streaming chunk
|
|
44
|
+
*/
|
|
45
|
+
export declare function extractOpenAIContent(chunk: any): string;
|
|
46
|
+
/**
|
|
47
|
+
* Extract text content from Ollama streaming chunk
|
|
48
|
+
*/
|
|
49
|
+
export declare function extractOllamaContent(chunk: any): string;
|
|
50
|
+
/**
|
|
51
|
+
* Extract text content from Perplexity streaming chunk
|
|
52
|
+
*/
|
|
53
|
+
export declare function extractPerplexityContent(chunk: any): string;
|
|
54
|
+
/**
|
|
55
|
+
* Extract text content from Grok streaming chunk
|
|
56
|
+
*/
|
|
57
|
+
export declare function extractGrokContent(chunk: any): string;
|
|
58
|
+
/**
|
|
59
|
+
* Extract text content from Gemini streaming chunk
|
|
60
|
+
*/
|
|
61
|
+
export declare function extractGeminiContent(chunk: any): string;
|
|
62
|
+
/**
|
|
63
|
+
* Generic content extractor that tries multiple patterns
|
|
64
|
+
*/
|
|
65
|
+
export declare function extractContent(chunk: any, provider?: string): string;
|
|
66
|
+
//# sourceMappingURL=sse-parser.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sse-parser.d.ts","sourceRoot":"","sources":["../../../src/utils/sse-parser.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG;;AAEH;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,EAAE,CAAC,EAAE,MAAM,CAAC;IACZ,KAAK,CAAC,EAAE,MAAM,CAAC;CAChB;AAsBD;;;;;GAKG;AACH,wBAAgB,QAAQ,CAAC,MAAM,EAAE,MAAM,GAAG,UAAU,EAAE,CAkDrD;AAED;;GAEG;AACH,wBAAgB,YAAY,CAAC,IAAI,EAAE,MAAM,GAAG,OAAO,CAElD;AAED;;;;;GAKG;AACH,wBAAuB,eAAe,CACpC,MAAM,EAAE,cAAc,CAAC,UAAU,CAAC,GACjC,cAAc,CAAC,GAAG,EAAE,IAAI,EAAE,OAAO,CAAC,CAoEpC;AAED;;GAEG;AACH,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,GAAG,GAAG,MAAM,CAK1D;AAED;;GAEG;AACH,wBAAgB,oBAAoB,CAAC,KAAK,EAAE,GAAG,GAAG,MAAM,CAKvD;AAED;;GAEG;AACH,wBAAgB,oBAAoB,CAAC,KAAK,EAAE,GAAG,GAAG,MAAM,CAQvD;AAED;;GAEG;AACH,wBAAgB,wBAAwB,CAAC,KAAK,EAAE,GAAG,GAAG,MAAM,CAK3D;AAED;;GAEG;AACH,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,GAAG,GAAG,MAAM,CAKrD;AAED;;GAEG;AACH,wBAAgB,oBAAoB,CAAC,KAAK,EAAE,GAAG,GAAG,MAAM,CAKvD;AAED;;GAEG;AACH,wBAAgB,cAAc,CAAC,KAAK,EAAE,GAAG,EAAE,QAAQ,CAAC,EAAE,MAAM,GAAG,MAAM,CAiDpE"}
|