@olane/os 0.7.12-alpha.22 → 0.7.12-alpha.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,8 +3,7 @@ import { expect } from 'chai';
3
3
  import dotenv from 'dotenv';
4
4
  import { defaultOSInstance } from '../utils/os.default.js';
5
5
  import { OlaneOSSystemStatus } from '../../src/o-olane-os/enum/o-os.status-enum.js';
6
- import { oNodeAddress, oNodeTransport } from '@olane/o-node';
7
- import { multiaddr } from '@olane/o-config';
6
+ import { oNodeAddress } from '@olane/o-node';
8
7
  dotenv.config();
9
8
  const network = defaultOSInstance;
10
9
  describe('basic-usage @initialize', async () => {
@@ -26,7 +25,7 @@ describe('basic-usage @initialize', async () => {
26
25
  // expect(data.error).to.be.undefined;
27
26
  // expect(data.message).to.equal('Network indexed!');
28
27
  // });
29
- it('should be able to use olane remote services', async () => {
28
+ it('should be able to use stream from a provider service', async () => {
30
29
  const entryNode = network.entryNode();
31
30
  expect(entryNode).to.exist;
32
31
  expect(entryNode.state).to.equal(NodeState.RUNNING);
@@ -41,12 +40,10 @@ describe('basic-usage @initialize', async () => {
41
40
  // },
42
41
  // });
43
42
  // use the intelligence tool
44
- const response2 = await entryNode.use(new oNodeAddress('o://perplexity', [
45
- new oNodeTransport(multiaddr('/ip4/127.0.0.1/tcp/4000/ws/p2p/12D3KooWPHdsHhEdyBd9DS2zHJ1vRSyqSkZ97iT7F8ByYJ7U7bw8')),
46
- ]), {
43
+ await entryNode.useStream(new oNodeAddress('o://anthropic'), {
47
44
  method: 'completion',
48
45
  params: {
49
- model: 'sonar',
46
+ _isStream: true,
50
47
  messages: [
51
48
  {
52
49
  role: 'user',
@@ -54,8 +51,11 @@ describe('basic-usage @initialize', async () => {
54
51
  },
55
52
  ],
56
53
  },
54
+ }, {
55
+ onChunk: (chunk) => {
56
+ console.log('Received chunk: ', chunk.result.data);
57
+ },
57
58
  });
58
- console.log(response2.result.data);
59
59
  });
60
60
  });
61
61
  // describe('olane network usage', async () => {
@@ -0,0 +1,2 @@
1
+ export {};
2
+ //# sourceMappingURL=streaming-usage.spec.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"streaming-usage.spec.d.ts","sourceRoot":"","sources":["../../../test/basic/streaming-usage.spec.ts"],"names":[],"mappings":""}
@@ -0,0 +1,314 @@
1
+ import { oAddress } from '@olane/o-core';
2
+ import { expect } from 'chai';
3
+ import dotenv from 'dotenv';
4
+ import { defaultOSInstance } from '../utils/os.default.js';
5
+ import { OlaneOSSystemStatus } from '../../src/o-olane-os/enum/o-os.status-enum.js';
6
+ dotenv.config();
7
+ const network = defaultOSInstance;
8
+ describe('streaming-usage @initialize', async () => {
9
+ it('should be able to startup the network', async () => {
10
+ await network.start();
11
+ expect(network.status).to.equal(OlaneOSSystemStatus.RUNNING);
12
+ });
13
+ });
14
+ describe('intelligence provider streaming tests', () => {
15
+ it('should stream from Intelligence Router (default provider)', async function () {
16
+ const entryNode = network.entryNode();
17
+ let fullText = '';
18
+ let chunkCount = 0;
19
+ let lastChunk = null;
20
+ console.log('\n Testing Intelligence Router streaming...');
21
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://intelligence'), {
22
+ method: 'stream_prompt',
23
+ params: {
24
+ prompt: 'Say hello in 5 words',
25
+ },
26
+ })) {
27
+ chunkCount++;
28
+ if (chunk.text) {
29
+ fullText += chunk.text;
30
+ console.log('Chunk:', chunk.text);
31
+ process.stdout.write(chunk.text);
32
+ }
33
+ lastChunk = chunk;
34
+ }
35
+ console.log(`\n Received ${chunkCount} chunks`);
36
+ console.log(` Full response: "${fullText}"\n`);
37
+ expect(chunkCount).to.be.greaterThan(0);
38
+ expect(fullText).to.not.be.empty;
39
+ expect(lastChunk?.isComplete).to.be.true;
40
+ });
41
+ it('should stream from Anthropic', async function () {
42
+ const entryNode = network.entryNode();
43
+ let fullText = '';
44
+ let chunkCount = 0;
45
+ let lastChunk = null;
46
+ console.log('\n Testing Anthropic streaming...');
47
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://anthropic'), {
48
+ method: 'stream_completion',
49
+ params: {
50
+ messages: [{ role: 'user', content: 'Say hello in 5 words' }],
51
+ max_tokens: 50,
52
+ },
53
+ })) {
54
+ chunkCount++;
55
+ if (chunk.text) {
56
+ fullText += chunk.text;
57
+ process.stdout.write(chunk.text);
58
+ }
59
+ lastChunk = chunk;
60
+ }
61
+ console.log(`\n Received ${chunkCount} chunks`);
62
+ console.log(` Full response: "${fullText}"\n`);
63
+ expect(chunkCount).to.be.greaterThan(0);
64
+ expect(fullText).to.not.be.empty;
65
+ expect(lastChunk?.isComplete).to.be.true;
66
+ });
67
+ it('should stream from OpenAI', async function () {
68
+ const entryNode = network.entryNode();
69
+ let fullText = '';
70
+ let chunkCount = 0;
71
+ let lastChunk = null;
72
+ console.log('\n Testing OpenAI streaming...');
73
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://openai'), {
74
+ method: 'stream_completion',
75
+ params: {
76
+ messages: [{ role: 'user', content: 'Say hello in 5 words' }],
77
+ max_tokens: 50,
78
+ },
79
+ })) {
80
+ chunkCount++;
81
+ if (chunk.text) {
82
+ fullText += chunk.text;
83
+ process.stdout.write(chunk.text);
84
+ }
85
+ lastChunk = chunk;
86
+ }
87
+ console.log(`\n Received ${chunkCount} chunks`);
88
+ console.log(` Full response: "${fullText}"\n`);
89
+ expect(chunkCount).to.be.greaterThan(0);
90
+ expect(fullText).to.not.be.empty;
91
+ expect(lastChunk?.isComplete).to.be.true;
92
+ });
93
+ it('should stream from Ollama', async function () {
94
+ const entryNode = network.entryNode();
95
+ let fullText = '';
96
+ let chunkCount = 0;
97
+ let lastChunk = null;
98
+ console.log('\n Testing Ollama streaming...');
99
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://ollama'), {
100
+ method: 'stream_completion',
101
+ params: {
102
+ messages: [{ role: 'user', content: 'Say hello in 5 words' }],
103
+ model: 'llama3.2:latest', // Default model
104
+ },
105
+ })) {
106
+ chunkCount++;
107
+ if (chunk.text) {
108
+ fullText += chunk.text;
109
+ process.stdout.write(chunk.text);
110
+ }
111
+ lastChunk = chunk;
112
+ }
113
+ console.log(`\n Received ${chunkCount} chunks`);
114
+ console.log(` Full response: "${fullText}"\n`);
115
+ expect(chunkCount).to.be.greaterThan(0);
116
+ expect(fullText).to.not.be.empty;
117
+ expect(lastChunk?.isComplete).to.be.true;
118
+ });
119
+ it('should stream from Perplexity', async function () {
120
+ const entryNode = network.entryNode();
121
+ let fullText = '';
122
+ let chunkCount = 0;
123
+ let lastChunk = null;
124
+ console.log('\n Testing Perplexity streaming...');
125
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://perplexity'), {
126
+ method: 'stream_completion',
127
+ params: {
128
+ messages: [{ role: 'user', content: 'Say hello in 5 words' }],
129
+ max_tokens: 50,
130
+ },
131
+ })) {
132
+ chunkCount++;
133
+ if (chunk.text) {
134
+ fullText += chunk.text;
135
+ process.stdout.write(chunk.text);
136
+ }
137
+ lastChunk = chunk;
138
+ }
139
+ console.log(`\n Received ${chunkCount} chunks`);
140
+ console.log(` Full response: "${fullText}"\n`);
141
+ expect(chunkCount).to.be.greaterThan(0);
142
+ expect(fullText).to.not.be.empty;
143
+ expect(lastChunk?.isComplete).to.be.true;
144
+ });
145
+ it('should stream from Grok', async function () {
146
+ const entryNode = network.entryNode();
147
+ let fullText = '';
148
+ let chunkCount = 0;
149
+ let lastChunk = null;
150
+ console.log('\n Testing Grok streaming...');
151
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://grok'), {
152
+ method: 'stream_completion',
153
+ params: {
154
+ messages: [{ role: 'user', content: 'Say hello in 5 words' }],
155
+ max_tokens: 50,
156
+ },
157
+ })) {
158
+ chunkCount++;
159
+ if (chunk.text) {
160
+ fullText += chunk.text;
161
+ process.stdout.write(chunk.text);
162
+ }
163
+ lastChunk = chunk;
164
+ }
165
+ console.log(`\n Received ${chunkCount} chunks`);
166
+ console.log(` Full response: "${fullText}"\n`);
167
+ expect(chunkCount).to.be.greaterThan(0);
168
+ expect(fullText).to.not.be.empty;
169
+ expect(lastChunk?.isComplete).to.be.true;
170
+ });
171
+ it('should stream from Gemini', async function () {
172
+ const entryNode = network.entryNode();
173
+ let fullText = '';
174
+ let chunkCount = 0;
175
+ let lastChunk = null;
176
+ console.log('\n Testing Gemini streaming...');
177
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://gemini'), {
178
+ method: 'stream_completion',
179
+ params: {
180
+ messages: [{ role: 'user', content: 'Say hello in 5 words' }],
181
+ max_tokens: 50,
182
+ },
183
+ })) {
184
+ chunkCount++;
185
+ if (chunk.text) {
186
+ fullText += chunk.text;
187
+ process.stdout.write(chunk.text);
188
+ }
189
+ lastChunk = chunk;
190
+ }
191
+ console.log(`\n Received ${chunkCount} chunks`);
192
+ console.log(` Full response: "${fullText}"\n`);
193
+ expect(chunkCount).to.be.greaterThan(0);
194
+ expect(fullText).to.not.be.empty;
195
+ expect(lastChunk?.isComplete).to.be.true;
196
+ });
197
+ });
198
+ describe('multi-hop streaming tests', () => {
199
+ it('should stream through Intelligence Router to Anthropic (2-hop)', async function () {
200
+ const entryNode = network.entryNode();
201
+ let fullText = '';
202
+ let chunkCount = 0;
203
+ let lastChunk = null;
204
+ console.log('\n Testing 2-hop streaming (Intelligence Router → Anthropic)...');
205
+ // The intelligence router will route to a specific provider
206
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://intelligence'), {
207
+ method: 'stream_prompt',
208
+ params: {
209
+ prompt: 'Count to 5',
210
+ provider: 'anthropic', // Force routing to Anthropic
211
+ },
212
+ })) {
213
+ chunkCount++;
214
+ if (chunk.text) {
215
+ fullText += chunk.text;
216
+ process.stdout.write(chunk.text);
217
+ }
218
+ lastChunk = chunk;
219
+ }
220
+ console.log(`\n Received ${chunkCount} chunks through 2-hop routing`);
221
+ console.log(` Full response: "${fullText}"\n`);
222
+ expect(chunkCount).to.be.greaterThan(0);
223
+ expect(fullText).to.not.be.empty;
224
+ expect(lastChunk?.isComplete).to.be.true;
225
+ });
226
+ it('should stream through Intelligence Router to OpenAI (2-hop)', async function () {
227
+ const entryNode = network.entryNode();
228
+ let fullText = '';
229
+ let chunkCount = 0;
230
+ let lastChunk = null;
231
+ console.log('\n Testing 2-hop streaming (Intelligence Router → OpenAI)...');
232
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://intelligence'), {
233
+ method: 'stream_prompt',
234
+ params: {
235
+ prompt: 'Say hello',
236
+ provider: 'openai', // Force routing to OpenAI
237
+ },
238
+ })) {
239
+ chunkCount++;
240
+ if (chunk.text) {
241
+ fullText += chunk.text;
242
+ process.stdout.write(chunk.text);
243
+ }
244
+ lastChunk = chunk;
245
+ }
246
+ console.log(`\n Received ${chunkCount} chunks through 2-hop routing`);
247
+ console.log(` Full response: "${fullText}"\n`);
248
+ expect(chunkCount).to.be.greaterThan(0);
249
+ expect(fullText).to.not.be.empty;
250
+ expect(lastChunk?.isComplete).to.be.true;
251
+ });
252
+ it('should handle streaming with routing correctly (verify chunk order)', async function () {
253
+ const entryNode = network.entryNode();
254
+ const chunks = [];
255
+ console.log('\n Testing chunk ordering through routing...');
256
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://anthropic'), {
257
+ method: 'stream_completion',
258
+ params: {
259
+ messages: [{ role: 'user', content: 'Count: 1, 2, 3, 4, 5' }],
260
+ max_tokens: 50,
261
+ },
262
+ })) {
263
+ chunks.push(chunk);
264
+ if (chunk.text) {
265
+ process.stdout.write(chunk.text);
266
+ }
267
+ }
268
+ console.log(`\n Received ${chunks.length} chunks\n`);
269
+ // Verify chunks are in order (sequence numbers should increment)
270
+ for (let i = 0; i < chunks.length - 1; i++) {
271
+ if (chunks[i].position !== undefined && chunks[i + 1].position !== undefined) {
272
+ expect(chunks[i + 1].position).to.be.greaterThanOrEqual(chunks[i].position);
273
+ }
274
+ }
275
+ // Verify last chunk is marked as complete
276
+ const lastChunk = chunks[chunks.length - 1];
277
+ expect(lastChunk?.isComplete).to.be.true;
278
+ console.log(' ✓ Chunks are in correct order');
279
+ });
280
+ it('should handle backpressure correctly during streaming', async function () {
281
+ const entryNode = network.entryNode();
282
+ let chunkCount = 0;
283
+ let processingTime = 0;
284
+ console.log('\n Testing backpressure handling...');
285
+ const startTime = Date.now();
286
+ for await (const chunk of entryNode.useStreaming(new oAddress('o://anthropic'), {
287
+ method: 'stream_completion',
288
+ params: {
289
+ messages: [{ role: 'user', content: 'Write a short poem' }],
290
+ max_tokens: 100,
291
+ },
292
+ })) {
293
+ chunkCount++;
294
+ if (chunk.text) {
295
+ process.stdout.write(chunk.text);
296
+ }
297
+ // Simulate slow consumer (backpressure)
298
+ if (chunkCount % 5 === 0) {
299
+ await new Promise(resolve => setTimeout(resolve, 10));
300
+ }
301
+ }
302
+ processingTime = Date.now() - startTime;
303
+ console.log(`\n Processed ${chunkCount} chunks in ${processingTime}ms`);
304
+ console.log(` Average: ${(processingTime / chunkCount).toFixed(2)}ms per chunk\n`);
305
+ expect(chunkCount).to.be.greaterThan(0);
306
+ // Backpressure should work without errors
307
+ });
308
+ });
309
+ describe('streaming-usage @stop-network', async () => {
310
+ it('should be able to stop the network', async () => {
311
+ await network.stop();
312
+ expect(network.status).to.equal(OlaneOSSystemStatus.STOPPED);
313
+ });
314
+ });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@olane/os",
3
- "version": "0.7.12-alpha.22",
3
+ "version": "0.7.12-alpha.24",
4
4
  "type": "module",
5
5
  "main": "dist/src/index.js",
6
6
  "types": "dist/src/index.d.ts",
@@ -57,21 +57,21 @@
57
57
  "typescript": "5.4.5"
58
58
  },
59
59
  "dependencies": {
60
- "@olane/o-config": "0.7.12-alpha.22",
61
- "@olane/o-core": "0.7.12-alpha.22",
62
- "@olane/o-intelligence": "0.7.12-alpha.22",
63
- "@olane/o-lane": "0.7.12-alpha.22",
64
- "@olane/o-leader": "0.7.12-alpha.22",
65
- "@olane/o-protocol": "0.7.12-alpha.22",
66
- "@olane/o-storage": "0.7.12-alpha.22",
67
- "@olane/o-tool": "0.7.12-alpha.22",
68
- "@olane/o-tool-registry": "0.7.12-alpha.22",
69
- "@olane/o-tools-common": "0.7.12-alpha.22",
60
+ "@olane/o-config": "0.7.12-alpha.24",
61
+ "@olane/o-core": "0.7.12-alpha.24",
62
+ "@olane/o-intelligence": "0.7.12-alpha.24",
63
+ "@olane/o-lane": "0.7.12-alpha.24",
64
+ "@olane/o-leader": "0.7.12-alpha.24",
65
+ "@olane/o-protocol": "0.7.12-alpha.24",
66
+ "@olane/o-storage": "0.7.12-alpha.24",
67
+ "@olane/o-tool": "0.7.12-alpha.24",
68
+ "@olane/o-tool-registry": "0.7.12-alpha.24",
69
+ "@olane/o-tools-common": "0.7.12-alpha.24",
70
70
  "chalk": "^5.4.1",
71
71
  "debug": "^4.4.1",
72
72
  "dotenv": "^16.5.0",
73
73
  "fs-extra": "^11.3.0",
74
74
  "touch": "^3.1.1"
75
75
  },
76
- "gitHead": "8224ba551626751995e60a2064243a5976588d6d"
76
+ "gitHead": "ac75e0acc3a0a232a5cdeb9271060d90cf1e5a2c"
77
77
  }