@olane/o-intelligence 0.7.12-alpha.9 → 0.7.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/dist/src/anthropic-intelligence.tool.d.ts +11 -3
  2. package/dist/src/anthropic-intelligence.tool.d.ts.map +1 -1
  3. package/dist/src/anthropic-intelligence.tool.js +230 -4
  4. package/dist/src/gemini-intelligence.tool.d.ts +11 -3
  5. package/dist/src/gemini-intelligence.tool.d.ts.map +1 -1
  6. package/dist/src/gemini-intelligence.tool.js +234 -2
  7. package/dist/src/grok-intelligence.tool.d.ts +5 -3
  8. package/dist/src/grok-intelligence.tool.d.ts.map +1 -1
  9. package/dist/src/grok-intelligence.tool.js +207 -2
  10. package/dist/src/interfaces/prompt.request.d.ts +2 -2
  11. package/dist/src/interfaces/prompt.request.d.ts.map +1 -1
  12. package/dist/src/methods/intelligence.methods.d.ts.map +1 -1
  13. package/dist/src/methods/intelligence.methods.js +7 -0
  14. package/dist/src/methods/llm.methods.d.ts.map +1 -1
  15. package/dist/src/methods/llm.methods.js +14 -0
  16. package/dist/src/o-intelligence.tool.d.ts.map +1 -1
  17. package/dist/src/o-intelligence.tool.js +42 -38
  18. package/dist/src/ollama-intelligence.tool.d.ts +11 -3
  19. package/dist/src/ollama-intelligence.tool.d.ts.map +1 -1
  20. package/dist/src/ollama-intelligence.tool.js +188 -2
  21. package/dist/src/openai-intelligence.tool.d.ts +11 -3
  22. package/dist/src/openai-intelligence.tool.d.ts.map +1 -1
  23. package/dist/src/openai-intelligence.tool.js +221 -2
  24. package/dist/src/perplexity-intelligence.tool.d.ts +11 -3
  25. package/dist/src/perplexity-intelligence.tool.d.ts.map +1 -1
  26. package/dist/src/perplexity-intelligence.tool.js +288 -3
  27. package/dist/src/types/streaming.types.d.ts +197 -0
  28. package/dist/src/types/streaming.types.d.ts.map +1 -0
  29. package/dist/src/types/streaming.types.js +4 -0
  30. package/dist/src/utils/sse-parser.d.ts +66 -0
  31. package/dist/src/utils/sse-parser.d.ts.map +1 -0
  32. package/dist/src/utils/sse-parser.js +255 -0
  33. package/dist/src/utils/streaming-helpers.d.ts +16 -0
  34. package/dist/src/utils/streaming-helpers.d.ts.map +1 -0
  35. package/dist/src/utils/streaming-helpers.js +129 -0
  36. package/package.json +7 -7
@@ -1,7 +1,7 @@
1
1
  import { oRequest } from '@olane/o-core';
2
2
  import { ToolResult } from '@olane/o-tool';
3
3
  import { oLaneTool } from '@olane/o-lane';
4
- import { oNodeToolConfig } from '@olane/o-node';
4
+ import { oNodeToolConfig, oStreamRequest } from '@olane/o-node';
5
5
  export declare class AnthropicIntelligenceTool extends oLaneTool {
6
6
  private defaultModel;
7
7
  private apiKey;
@@ -9,11 +9,19 @@ export declare class AnthropicIntelligenceTool extends oLaneTool {
9
9
  /**
10
10
  * Chat completion with Anthropic
11
11
  */
12
- _tool_completion(request: oRequest): Promise<ToolResult>;
12
+ _tool_completion(request: oStreamRequest): Promise<ToolResult>;
13
+ /**
14
+ * Stream chat completion with Anthropic
15
+ */
16
+ private _streamCompletion;
13
17
  /**
14
18
  * Generate text with Anthropic (using messages endpoint)
15
19
  */
16
- _tool_generate(request: oRequest): Promise<ToolResult>;
20
+ _tool_generate(request: oStreamRequest): Promise<ToolResult>;
21
+ /**
22
+ * Stream text generation with Anthropic
23
+ */
24
+ private _streamGenerate;
17
25
  /**
18
26
  * List available models
19
27
  */
@@ -1 +1 @@
1
- {"version":3,"file":"anthropic-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/anthropic-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAAe,eAAe,EAAE,MAAM,eAAe,CAAC;AAkG7D,qBAAa,yBAA0B,SAAQ,SAAS;IACtD,OAAO,CAAC,YAAY,CAAgC;IACpD,OAAO,CAAC,MAAM,CAA+C;gBAEjD,MAAM,EAAE,eAAe;IAWnC;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAqE9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA+E5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4C/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAoD9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAuC3D"}
1
+ {"version":3,"file":"anthropic-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/anthropic-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAuB,QAAQ,EAAa,MAAM,eAAe,CAAC;AACzE,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAEL,eAAe,EACf,cAAc,EAEf,MAAM,eAAe,CAAC;AAmGvB,qBAAa,yBAA0B,SAAQ,SAAS;IACtD,OAAO,CAAC,YAAY,CAAgC;IACpD,OAAO,CAAC,MAAM,CAA+C;gBAEjD,MAAM,EAAE,eAAe;IAWnC;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC;IA6EpE;;OAEG;YACY,iBAAiB;IAoHhC;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC;IAyFlE;;OAEG;YACY,eAAe;IA6H9B;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4C/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAoD9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAuC3D"}
@@ -1,6 +1,7 @@
1
1
  import { oAddress } from '@olane/o-core';
2
2
  import { LLM_PARAMS } from './methods/llm.methods.js';
3
3
  import { oLaneTool } from '@olane/o-lane';
4
+ import { StreamUtils, } from '@olane/o-node';
4
5
  export class AnthropicIntelligenceTool extends oLaneTool {
5
6
  constructor(config) {
6
7
  super({
@@ -18,9 +19,15 @@ export class AnthropicIntelligenceTool extends oLaneTool {
18
19
  * Chat completion with Anthropic
19
20
  */
20
21
  async _tool_completion(request) {
22
+ const params = request.params;
23
+ const { _isStreaming = false } = params;
24
+ if (_isStreaming) {
25
+ this.logger.debug('Streaming completion...');
26
+ const gen = this._streamCompletion(request);
27
+ return StreamUtils.processGenerator(request, gen, request.stream);
28
+ }
21
29
  try {
22
- const params = request.params;
23
- const { model = this.defaultModel, messages, system, max_tokens = 1000, apiKey = this.apiKey, } = params;
30
+ const { model = this.defaultModel, messages, system, max_tokens = 10000, apiKey = this.apiKey, } = params;
24
31
  if (!apiKey) {
25
32
  return {
26
33
  success: false,
@@ -72,13 +79,121 @@ export class AnthropicIntelligenceTool extends oLaneTool {
72
79
  };
73
80
  }
74
81
  }
82
+ /**
83
+ * Stream chat completion with Anthropic
84
+ */
85
+ async *_streamCompletion(request) {
86
+ try {
87
+ const params = request.params;
88
+ const { model = this.defaultModel, messages, system, max_tokens = 10000, apiKey = this.apiKey, } = params;
89
+ if (!apiKey) {
90
+ yield {
91
+ success: false,
92
+ error: 'Anthropic API key is required',
93
+ };
94
+ return;
95
+ }
96
+ if (!messages || !Array.isArray(messages)) {
97
+ yield {
98
+ success: false,
99
+ error: '"messages" array is required',
100
+ };
101
+ return;
102
+ }
103
+ const chatRequest = {
104
+ model: model,
105
+ max_tokens: max_tokens,
106
+ messages: messages,
107
+ system: system,
108
+ stream: true,
109
+ };
110
+ const response = await fetch(`https://api.anthropic.com/v1/messages`, {
111
+ method: 'POST',
112
+ headers: {
113
+ 'Content-Type': 'application/json',
114
+ 'x-api-key': apiKey,
115
+ 'anthropic-version': '2023-06-01',
116
+ },
117
+ body: JSON.stringify(chatRequest),
118
+ });
119
+ if (!response.ok) {
120
+ const errorText = await response.text();
121
+ yield {
122
+ success: false,
123
+ error: `Anthropic API error: ${response.status} - ${errorText}`,
124
+ };
125
+ return;
126
+ }
127
+ if (!response.body) {
128
+ yield {
129
+ success: false,
130
+ error: 'Response body is null',
131
+ };
132
+ return;
133
+ }
134
+ const reader = response.body.getReader();
135
+ const decoder = new TextDecoder();
136
+ let buffer = '';
137
+ while (true) {
138
+ const { done, value } = await reader.read();
139
+ if (done)
140
+ break;
141
+ buffer += decoder.decode(value, { stream: true });
142
+ const lines = buffer.split('\n');
143
+ buffer = lines.pop() || '';
144
+ for (const line of lines) {
145
+ const trimmedLine = line.trim();
146
+ if (!trimmedLine || !trimmedLine.startsWith('data: '))
147
+ continue;
148
+ const data = trimmedLine.slice(6);
149
+ if (data === '[DONE]')
150
+ break;
151
+ try {
152
+ const parsed = JSON.parse(data);
153
+ if (parsed.type === 'content_block_delta' && parsed.delta?.text) {
154
+ yield {
155
+ delta: parsed.delta.text,
156
+ model: model,
157
+ };
158
+ }
159
+ else if (parsed.type === 'message_start' && parsed.message) {
160
+ yield {
161
+ model: parsed.message.model,
162
+ usage: parsed.message.usage,
163
+ };
164
+ }
165
+ else if (parsed.type === 'message_delta' && parsed.delta) {
166
+ yield {
167
+ stop_reason: parsed.delta.stop_reason,
168
+ usage: parsed.usage,
169
+ };
170
+ }
171
+ }
172
+ catch (parseError) {
173
+ // Skip invalid JSON
174
+ continue;
175
+ }
176
+ }
177
+ }
178
+ }
179
+ catch (error) {
180
+ yield {
181
+ success: false,
182
+ error: `Failed to stream chat: ${error.message}`,
183
+ };
184
+ }
185
+ }
75
186
  /**
76
187
  * Generate text with Anthropic (using messages endpoint)
77
188
  */
78
189
  async _tool_generate(request) {
190
+ const params = request.params;
191
+ const { _isStreaming = false } = params;
192
+ if (_isStreaming) {
193
+ return StreamUtils.processGenerator(request, this._streamGenerate(request), request.stream);
194
+ }
79
195
  try {
80
- const params = request.params;
81
- const { model = this.defaultModel, prompt, system, max_tokens = 1000, apiKey = this.apiKey, ...options } = params;
196
+ const { model = this.defaultModel, prompt, system, max_tokens = 10000, apiKey = this.apiKey, ...options } = params;
82
197
  if (!apiKey) {
83
198
  return {
84
199
  success: false,
@@ -138,6 +253,117 @@ export class AnthropicIntelligenceTool extends oLaneTool {
138
253
  };
139
254
  }
140
255
  }
256
+ /**
257
+ * Stream text generation with Anthropic
258
+ */
259
+ async *_streamGenerate(request) {
260
+ try {
261
+ const params = request.params;
262
+ const { model = this.defaultModel, prompt, system, max_tokens = 10000, apiKey = this.apiKey, ...options } = params;
263
+ if (!apiKey) {
264
+ yield {
265
+ success: false,
266
+ error: 'Anthropic API key is required',
267
+ };
268
+ return;
269
+ }
270
+ if (!prompt) {
271
+ yield {
272
+ success: false,
273
+ error: 'Prompt is required',
274
+ };
275
+ return;
276
+ }
277
+ const messages = [
278
+ {
279
+ role: 'user',
280
+ content: prompt,
281
+ },
282
+ ];
283
+ const generateRequest = {
284
+ model: model,
285
+ max_tokens: max_tokens,
286
+ messages,
287
+ system: system,
288
+ stream: true,
289
+ ...options,
290
+ };
291
+ const response = await fetch(`https://api.anthropic.com/v1/messages`, {
292
+ method: 'POST',
293
+ headers: {
294
+ 'Content-Type': 'application/json',
295
+ 'x-api-key': apiKey,
296
+ 'anthropic-version': '2023-06-01',
297
+ },
298
+ body: JSON.stringify(generateRequest),
299
+ });
300
+ if (!response.ok) {
301
+ const errorText = await response.text();
302
+ yield {
303
+ success: false,
304
+ error: `Anthropic API error: ${response.status} - ${errorText}`,
305
+ };
306
+ return;
307
+ }
308
+ if (!response.body) {
309
+ yield {
310
+ success: false,
311
+ error: 'Response body is null',
312
+ };
313
+ return;
314
+ }
315
+ const reader = response.body.getReader();
316
+ const decoder = new TextDecoder();
317
+ let buffer = '';
318
+ while (true) {
319
+ const { done, value } = await reader.read();
320
+ if (done)
321
+ break;
322
+ buffer += decoder.decode(value, { stream: true });
323
+ const lines = buffer.split('\n');
324
+ buffer = lines.pop() || '';
325
+ for (const line of lines) {
326
+ const trimmedLine = line.trim();
327
+ if (!trimmedLine || !trimmedLine.startsWith('data: '))
328
+ continue;
329
+ const data = trimmedLine.slice(6);
330
+ if (data === '[DONE]')
331
+ break;
332
+ try {
333
+ const parsed = JSON.parse(data);
334
+ if (parsed.type === 'content_block_delta' && parsed.delta?.text) {
335
+ yield {
336
+ delta: parsed.delta.text,
337
+ model: model,
338
+ };
339
+ }
340
+ else if (parsed.type === 'message_start' && parsed.message) {
341
+ yield {
342
+ model: parsed.message.model,
343
+ usage: parsed.message.usage,
344
+ };
345
+ }
346
+ else if (parsed.type === 'message_delta' && parsed.delta) {
347
+ yield {
348
+ stop_reason: parsed.delta.stop_reason,
349
+ usage: parsed.usage,
350
+ };
351
+ }
352
+ }
353
+ catch (parseError) {
354
+ // Skip invalid JSON
355
+ continue;
356
+ }
357
+ }
358
+ }
359
+ }
360
+ catch (error) {
361
+ yield {
362
+ success: false,
363
+ error: `Failed to stream generate: ${error.message}`,
364
+ };
365
+ }
366
+ }
141
367
  /**
142
368
  * List available models
143
369
  */
@@ -1,7 +1,7 @@
1
1
  import { oRequest } from '@olane/o-core';
2
2
  import { ToolResult } from '@olane/o-tool';
3
3
  import { oLaneTool } from '@olane/o-lane';
4
- import { oNodeToolConfig } from '@olane/o-node';
4
+ import { oNodeToolConfig, oStreamRequest } from '@olane/o-node';
5
5
  export declare class GeminiIntelligenceTool extends oLaneTool {
6
6
  private apiKey;
7
7
  private baseUrl;
@@ -10,11 +10,19 @@ export declare class GeminiIntelligenceTool extends oLaneTool {
10
10
  /**
11
11
  * Chat completion with Gemini
12
12
  */
13
- _tool_completion(request: oRequest): Promise<ToolResult>;
13
+ _tool_completion(request: oStreamRequest): Promise<ToolResult>;
14
+ /**
15
+ * Stream chat completion with Gemini
16
+ */
17
+ private _streamCompletion;
14
18
  /**
15
19
  * Generate text with Gemini
16
20
  */
17
- _tool_generate(request: oRequest): Promise<ToolResult>;
21
+ _tool_generate(request: oStreamRequest): Promise<ToolResult>;
22
+ /**
23
+ * Stream text generation with Gemini
24
+ */
25
+ private _streamGenerate;
18
26
  /**
19
27
  * List available models
20
28
  */
@@ -1 +1 @@
1
- {"version":3,"file":"gemini-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/gemini-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAAe,eAAe,EAAE,MAAM,eAAe,CAAC;AAgH7D,qBAAa,sBAAuB,SAAQ,SAAS;IACnD,OAAO,CAAC,MAAM,CAA4C;IAC1D,OAAO,CAAC,OAAO,CAAU;IACzB,OAAO,CAAC,YAAY,CAAU;gBAElB,MAAM,EAAE,eAAe;IAUnC;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAkF9D;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAmF5D;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA0C/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4C9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAiC3D"}
1
+ {"version":3,"file":"gemini-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/gemini-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAEL,eAAe,EACf,cAAc,EAEf,MAAM,eAAe,CAAC;AAgHvB,qBAAa,sBAAuB,SAAQ,SAAS;IACnD,OAAO,CAAC,MAAM,CAA4C;IAC1D,OAAO,CAAC,OAAO,CAAU;IACzB,OAAO,CAAC,YAAY,CAAU;gBAElB,MAAM,EAAE,eAAe;IAUnC;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC;IA6FpE;;OAEG;YACY,iBAAiB;IA8HhC;;OAEG;IACG,cAAc,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC;IA8FlE;;OAEG;YACY,eAAe;IA+H9B;;OAEG;IACG,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA0C/D;;OAEG;IACG,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IA4C9D;;OAEG;IACG,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CAiC3D"}
@@ -1,6 +1,7 @@
1
1
  import { oAddress } from '@olane/o-core';
2
2
  import { LLM_PARAMS } from './methods/llm.methods.js';
3
3
  import { oLaneTool } from '@olane/o-lane';
4
+ import { StreamUtils, } from '@olane/o-node';
4
5
  export class GeminiIntelligenceTool extends oLaneTool {
5
6
  constructor(config) {
6
7
  super({
@@ -16,8 +17,13 @@ export class GeminiIntelligenceTool extends oLaneTool {
16
17
  * Chat completion with Gemini
17
18
  */
18
19
  async _tool_completion(request) {
20
+ const params = request.params;
21
+ const { _isStreaming = false } = params;
22
+ if (_isStreaming) {
23
+ this.logger.debug('Streaming completion...');
24
+ return StreamUtils.processGenerator(request, this._streamCompletion(request), request.stream);
25
+ }
19
26
  try {
20
- const params = request.params;
21
27
  const { model = this.defaultModel, messages, ...options } = params;
22
28
  if (!this.apiKey) {
23
29
  return {
@@ -84,12 +90,127 @@ export class GeminiIntelligenceTool extends oLaneTool {
84
90
  };
85
91
  }
86
92
  }
93
+ /**
94
+ * Stream chat completion with Gemini
95
+ */
96
+ async *_streamCompletion(request) {
97
+ try {
98
+ const params = request.params;
99
+ const { model = this.defaultModel, messages, ...options } = params;
100
+ if (!this.apiKey) {
101
+ yield {
102
+ success: false,
103
+ error: 'Gemini API key is required',
104
+ };
105
+ return;
106
+ }
107
+ if (!messages || !Array.isArray(messages)) {
108
+ yield {
109
+ success: false,
110
+ error: '"messages" array is required',
111
+ };
112
+ return;
113
+ }
114
+ // Convert messages to Gemini format
115
+ const contents = messages.map((msg) => ({
116
+ role: msg.role === 'assistant' ? 'model' : 'user',
117
+ parts: [{ text: msg.content }],
118
+ }));
119
+ const chatRequest = {
120
+ contents,
121
+ generationConfig: {
122
+ temperature: options.temperature,
123
+ topK: options.topK,
124
+ topP: options.topP,
125
+ maxOutputTokens: options.maxOutputTokens,
126
+ stopSequences: options.stopSequences,
127
+ },
128
+ safetySettings: options.safetySettings,
129
+ };
130
+ const response = await fetch(`${this.baseUrl}/models/${model}:streamGenerateContent?key=${this.apiKey}&alt=sse`, {
131
+ method: 'POST',
132
+ headers: {
133
+ 'Content-Type': 'application/json',
134
+ },
135
+ body: JSON.stringify(chatRequest),
136
+ });
137
+ if (!response.ok) {
138
+ const errorText = await response.text();
139
+ yield {
140
+ success: false,
141
+ error: `Gemini API error: ${response.status} - ${errorText}`,
142
+ };
143
+ return;
144
+ }
145
+ if (!response.body) {
146
+ yield {
147
+ success: false,
148
+ error: 'Response body is null',
149
+ };
150
+ return;
151
+ }
152
+ const reader = response.body.getReader();
153
+ const decoder = new TextDecoder();
154
+ let buffer = '';
155
+ while (true) {
156
+ const { done, value } = await reader.read();
157
+ if (done)
158
+ break;
159
+ buffer += decoder.decode(value, { stream: true });
160
+ const lines = buffer.split('\n');
161
+ buffer = lines.pop() || '';
162
+ for (const line of lines) {
163
+ const trimmedLine = line.trim();
164
+ if (!trimmedLine || !trimmedLine.startsWith('data: '))
165
+ continue;
166
+ const data = trimmedLine.slice(6);
167
+ if (data === '[DONE]')
168
+ break;
169
+ try {
170
+ const parsed = JSON.parse(data);
171
+ if (parsed.candidates?.[0]?.content?.parts?.[0]?.text !== undefined) {
172
+ yield {
173
+ delta: parsed.candidates[0].content.parts[0].text,
174
+ model: model,
175
+ };
176
+ }
177
+ // Track usage and finish reason in final chunk
178
+ if (parsed.usageMetadata) {
179
+ yield {
180
+ usage: parsed.usageMetadata,
181
+ };
182
+ }
183
+ if (parsed.candidates?.[0]?.finishReason) {
184
+ yield {
185
+ finish_reason: parsed.candidates[0].finishReason,
186
+ };
187
+ }
188
+ }
189
+ catch (parseError) {
190
+ // Skip invalid JSON
191
+ continue;
192
+ }
193
+ }
194
+ }
195
+ }
196
+ catch (error) {
197
+ yield {
198
+ success: false,
199
+ error: `Failed to stream chat: ${error.message}`,
200
+ };
201
+ }
202
+ }
87
203
  /**
88
204
  * Generate text with Gemini
89
205
  */
90
206
  async _tool_generate(request) {
207
+ const params = request.params;
208
+ const { _isStreaming = false } = params;
209
+ if (_isStreaming) {
210
+ this.logger.debug('Streaming generate...');
211
+ return StreamUtils.processGenerator(request, this._streamGenerate(request), request.stream);
212
+ }
91
213
  try {
92
- const params = request.params;
93
214
  const { model = this.defaultModel, prompt, system, ...options } = params;
94
215
  if (!this.apiKey) {
95
216
  return {
@@ -157,6 +278,117 @@ export class GeminiIntelligenceTool extends oLaneTool {
157
278
  };
158
279
  }
159
280
  }
281
+ /**
282
+ * Stream text generation with Gemini
283
+ */
284
+ async *_streamGenerate(request) {
285
+ try {
286
+ const params = request.params;
287
+ const { model = this.defaultModel, prompt, system, ...options } = params;
288
+ if (!this.apiKey) {
289
+ yield {
290
+ success: false,
291
+ error: 'Gemini API key is required',
292
+ };
293
+ return;
294
+ }
295
+ if (!prompt) {
296
+ yield {
297
+ success: false,
298
+ error: 'Prompt is required',
299
+ };
300
+ return;
301
+ }
302
+ // Combine system and user prompt
303
+ const fullPrompt = system ? `${system}\n\n${prompt}` : prompt;
304
+ const generateRequest = {
305
+ contents: [
306
+ {
307
+ parts: [{ text: fullPrompt }],
308
+ },
309
+ ],
310
+ generationConfig: {
311
+ temperature: options.temperature,
312
+ topK: options.topK,
313
+ topP: options.topP,
314
+ maxOutputTokens: options.maxOutputTokens,
315
+ stopSequences: options.stopSequences,
316
+ },
317
+ safetySettings: options.safetySettings,
318
+ };
319
+ const response = await fetch(`${this.baseUrl}/models/${model}:streamGenerateContent?key=${this.apiKey}&alt=sse`, {
320
+ method: 'POST',
321
+ headers: {
322
+ 'Content-Type': 'application/json',
323
+ },
324
+ body: JSON.stringify(generateRequest),
325
+ });
326
+ if (!response.ok) {
327
+ const errorText = await response.text();
328
+ yield {
329
+ success: false,
330
+ error: `Gemini API error: ${response.status} - ${errorText}`,
331
+ };
332
+ return;
333
+ }
334
+ if (!response.body) {
335
+ yield {
336
+ success: false,
337
+ error: 'Response body is null',
338
+ };
339
+ return;
340
+ }
341
+ const reader = response.body.getReader();
342
+ const decoder = new TextDecoder();
343
+ let buffer = '';
344
+ while (true) {
345
+ const { done, value } = await reader.read();
346
+ if (done)
347
+ break;
348
+ buffer += decoder.decode(value, { stream: true });
349
+ const lines = buffer.split('\n');
350
+ buffer = lines.pop() || '';
351
+ for (const line of lines) {
352
+ const trimmedLine = line.trim();
353
+ if (!trimmedLine || !trimmedLine.startsWith('data: '))
354
+ continue;
355
+ const data = trimmedLine.slice(6);
356
+ if (data === '[DONE]')
357
+ break;
358
+ try {
359
+ const parsed = JSON.parse(data);
360
+ if (parsed.candidates?.[0]?.content?.parts?.[0]?.text !== undefined) {
361
+ yield {
362
+ delta: parsed.candidates[0].content.parts[0].text,
363
+ model: model,
364
+ };
365
+ }
366
+ // Track usage and finish reason in final chunk
367
+ if (parsed.usageMetadata) {
368
+ yield {
369
+ usage: parsed.usageMetadata,
370
+ };
371
+ }
372
+ if (parsed.candidates?.[0]?.finishReason) {
373
+ yield {
374
+ finish_reason: parsed.candidates[0].finishReason,
375
+ };
376
+ }
377
+ }
378
+ catch (parseError) {
379
+ // Skip invalid JSON
380
+ continue;
381
+ }
382
+ }
383
+ }
384
+ }
385
+ catch (error) {
386
+ yield {
387
+ success: false,
388
+ error: `Failed to stream generate: ${error.message}`,
389
+ };
390
+ }
391
+ }
160
392
  /**
161
393
  * List available models
162
394
  */
@@ -1,14 +1,16 @@
1
1
  import { oRequest } from '@olane/o-core';
2
2
  import { ToolResult } from '@olane/o-tool';
3
3
  import { oLaneTool } from '@olane/o-lane';
4
- import { oNodeToolConfig } from '@olane/o-node';
4
+ import { oNodeToolConfig, oStreamRequest } from '@olane/o-node';
5
5
  export declare class GrokIntelligenceTool extends oLaneTool {
6
6
  private baseUrl;
7
7
  private defaultModel;
8
8
  private apiKey;
9
9
  constructor(config: oNodeToolConfig);
10
- _tool_completion(request: oRequest): Promise<ToolResult>;
11
- _tool_generate(request: oRequest): Promise<ToolResult>;
10
+ _tool_completion(request: oStreamRequest): Promise<ToolResult>;
11
+ private _streamCompletion;
12
+ _tool_generate(request: oStreamRequest): Promise<ToolResult>;
13
+ private _streamGenerate;
12
14
  _tool_list_models(request: oRequest): Promise<ToolResult>;
13
15
  _tool_model_info(request: oRequest): Promise<ToolResult>;
14
16
  _tool_status(request: oRequest): Promise<ToolResult>;
@@ -1 +1 @@
1
- {"version":3,"file":"grok-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/grok-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAAE,eAAe,EAAE,MAAM,eAAe,CAAC;AA6ChD,qBAAa,oBAAqB,SAAQ,SAAS;IACjD,OAAO,CAAC,OAAO,CAAiC;IAChD,OAAO,CAAC,YAAY,CAAyB;IAC7C,OAAO,CAAC,MAAM,CAA0C;gBAE5C,MAAM,EAAE,eAAe;IAU7B,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiExD,cAAc,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAqEtD,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiCzD,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAgCxD,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CA+B3D"}
1
+ {"version":3,"file":"grok-intelligence.tool.d.ts","sourceRoot":"","sources":["../../src/grok-intelligence.tool.ts"],"names":[],"mappings":"AAAA,OAAO,EAAY,QAAQ,EAAE,MAAM,eAAe,CAAC;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,eAAe,CAAC;AAE3C,OAAO,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAC1C,OAAO,EAAE,eAAe,EAAE,cAAc,EAAe,MAAM,eAAe,CAAC;AA6C7E,qBAAa,oBAAqB,SAAQ,SAAS;IACjD,OAAO,CAAC,OAAO,CAAiC;IAChD,OAAO,CAAC,YAAY,CAAyB;IAC7C,OAAO,CAAC,MAAM,CAA0C;gBAE5C,MAAM,EAAE,eAAe;IAU7B,gBAAgB,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC;YA4ErD,iBAAiB;IAgH1B,cAAc,CAAC,OAAO,EAAE,cAAc,GAAG,OAAO,CAAC,UAAU,CAAC;YAgFnD,eAAe;IAoHxB,iBAAiB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAiCzD,gBAAgB,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;IAgCxD,YAAY,CAAC,OAAO,EAAE,QAAQ,GAAG,OAAO,CAAC,UAAU,CAAC;CA+B3D"}