@synnaxlabs/client 0.49.0 → 0.49.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,865 @@
1
+ // Copyright 2025 Synnax Labs, Inc.
2
+ //
3
+ // Use of this software is governed by the Business Source License included in the file
4
+ // licenses/BSL.txt.
5
+ //
6
+ // As of the Change Date specified in that file, in accordance with the Business Source
7
+ // License, use of this software will be governed by the Apache License, Version 2.0,
8
+ // included in the file licenses/APL.txt.
9
+
10
+ import { DataType, id, runtime, TimeSpan, TimeStamp } from "@synnaxlabs/x";
11
+ import { describe, expect, it } from "vitest";
12
+
13
+ import { type channel } from "@/channel";
14
+ import { createTestClient } from "@/testutil/client";
15
+ import { secondsLinspace } from "@/testutil/telem";
16
+
17
+ const client = createTestClient();
18
+
19
+ const delimiter = runtime.getOS() === "Windows" ? "\r\n" : "\n";
20
+
21
+ /** Helper to collect stream into a string */
22
+ const streamToString = async (stream: ReadableStream<Uint8Array>): Promise<string> => {
23
+ const reader = stream.getReader();
24
+ const chunks: Uint8Array[] = [];
25
+ while (true) {
26
+ const { done, value } = await reader.read();
27
+ if (done) break;
28
+ chunks.push(value);
29
+ }
30
+ const decoder = new TextDecoder();
31
+ return chunks.map((c) => decoder.decode(c)).join("");
32
+ };
33
+
34
+ const parseCSV = (csv: string): string[][] => {
35
+ const lines = csv.trim().split(delimiter);
36
+ return lines.map((line) => line.split(","));
37
+ };
38
+
39
+ const streamToRecords = async (
40
+ stream: ReadableStream<Uint8Array>,
41
+ ): Promise<string[][]> => {
42
+ const csv = await streamToString(stream);
43
+ return parseCSV(csv);
44
+ };
45
+
46
+ describe("Reader", () => {
47
+ describe("CSV", () => {
48
+ it("should export channels with the same index", async () => {
49
+ const index = await client.channels.create({
50
+ name: id.create(),
51
+ dataType: DataType.TIMESTAMP,
52
+ isIndex: true,
53
+ });
54
+ const data1 = await client.channels.create({
55
+ name: id.create(),
56
+ dataType: DataType.FLOAT64,
57
+ index: index.key,
58
+ });
59
+ const data2 = await client.channels.create({
60
+ name: id.create(),
61
+ dataType: DataType.FLOAT64,
62
+ index: index.key,
63
+ });
64
+ const start = TimeStamp.seconds(1);
65
+ const writer = await client.openWriter({
66
+ start,
67
+ channels: [index.key, data1.key, data2.key],
68
+ });
69
+ await writer.write({
70
+ [index.key]: [TimeStamp.seconds(1), TimeStamp.seconds(2), TimeStamp.seconds(3)],
71
+ [data1.key]: [10, 20, 30],
72
+ [data2.key]: [100, 200, 300],
73
+ });
74
+ await writer.commit();
75
+ await writer.close();
76
+ const stream = await client.read({
77
+ channels: [index.key, data1.key, data2.key],
78
+ timeRange: { start: TimeStamp.seconds(0), end: TimeStamp.seconds(10) },
79
+ channelNames: {
80
+ [index.key]: "Time",
81
+ [data1.key]: "Sensor1",
82
+ [data2.key]: "Sensor2",
83
+ },
84
+ responseType: "csv",
85
+ });
86
+ const records = await streamToRecords(stream);
87
+ expect(records).toEqual([
88
+ ["Time", "Sensor1", "Sensor2"],
89
+ ["1000000000", "10", "100"],
90
+ ["2000000000", "20", "200"],
91
+ ["3000000000", "30", "300"],
92
+ ]);
93
+ });
94
+ it("should export multiple channels with different indexes", async () => {
95
+ const index1 = await client.channels.create({
96
+ name: id.create(),
97
+ dataType: DataType.TIMESTAMP,
98
+ isIndex: true,
99
+ });
100
+ const data1 = await client.channels.create({
101
+ name: id.create(),
102
+ dataType: DataType.FLOAT64,
103
+ index: index1.key,
104
+ });
105
+ const index2 = await client.channels.create({
106
+ name: id.create(),
107
+ dataType: DataType.TIMESTAMP,
108
+ isIndex: true,
109
+ });
110
+ const data2 = await client.channels.create({
111
+ name: id.create(),
112
+ dataType: DataType.FLOAT64,
113
+ index: index2.key,
114
+ });
115
+ // Write to first group - timestamps 1, 3, 5
116
+ const writer1 = await client.openWriter({
117
+ start: TimeStamp.seconds(1),
118
+ channels: [index1.key, data1.key],
119
+ });
120
+ await writer1.write({
121
+ [index1.key]: [
122
+ TimeStamp.seconds(1),
123
+ TimeStamp.seconds(3),
124
+ TimeStamp.seconds(5),
125
+ ],
126
+ [data1.key]: [100, 300, 500],
127
+ });
128
+ await writer1.commit();
129
+ await writer1.close();
130
+
131
+ // Write to second group - timestamps 2, 4, 6
132
+ const writer2 = await client.openWriter({
133
+ start: TimeStamp.seconds(2),
134
+ channels: [index2.key, data2.key],
135
+ });
136
+ await writer2.write({
137
+ [index2.key]: [
138
+ TimeStamp.seconds(2),
139
+ TimeStamp.seconds(4),
140
+ TimeStamp.seconds(6),
141
+ ],
142
+ [data2.key]: [200, 400, 600],
143
+ });
144
+ await writer2.commit();
145
+ await writer2.close();
146
+ const stream = await client.read({
147
+ channels: [data1.key, data2.key], // Just data channels - indexes auto-included
148
+ timeRange: { start: TimeStamp.seconds(0), end: TimeStamp.seconds(10) },
149
+ channelNames: {
150
+ [index1.key]: "Time1",
151
+ [data1.key]: "Data1",
152
+ [index2.key]: "Time2",
153
+ [data2.key]: "Data2",
154
+ },
155
+ responseType: "csv",
156
+ });
157
+ const records = await streamToRecords(stream);
158
+ expect(records).toEqual([
159
+ ["Time1", "Data1", "Time2", "Data2"],
160
+ ["1000000000", "100", "", ""],
161
+ ["", "", "2000000000", "200"],
162
+ ["3000000000", "300", "", ""],
163
+ ["", "", "4000000000", "400"],
164
+ ["5000000000", "500", "", ""],
165
+ ["", "", "6000000000", "600"],
166
+ ]);
167
+ });
168
+ it("should allow downsampling", async () => {
169
+ const index = await client.channels.create({
170
+ name: id.create(),
171
+ dataType: DataType.TIMESTAMP,
172
+ isIndex: true,
173
+ });
174
+ const data = await client.channels.create({
175
+ name: id.create(),
176
+ dataType: DataType.FLOAT64,
177
+ index: index.key,
178
+ });
179
+ const writer = await client.openWriter({
180
+ start: TimeStamp.seconds(1),
181
+ channels: [index.key, data.key],
182
+ });
183
+ await writer.write({
184
+ [index.key]: [
185
+ TimeStamp.seconds(1),
186
+ TimeStamp.seconds(2),
187
+ TimeStamp.seconds(3),
188
+ TimeStamp.seconds(4),
189
+ TimeStamp.seconds(5),
190
+ ],
191
+ [data.key]: [10, 20, 30, 40, 50],
192
+ });
193
+ await writer.commit();
194
+ await writer.close();
195
+ const stream = await client.read({
196
+ channels: [data.key],
197
+ timeRange: { start: TimeStamp.seconds(0), end: TimeStamp.seconds(10) },
198
+ responseType: "csv",
199
+ iteratorConfig: { downsampleFactor: 2 },
200
+ });
201
+ const records = await streamToRecords(stream);
202
+ expect(records).toEqual([
203
+ [index.name, data.name],
204
+ ["1000000000", "10"],
205
+ ["3000000000", "30"],
206
+ ["5000000000", "50"],
207
+ ]);
208
+ });
209
+ it("should handle channels at different uneven rates with correct row ordering", async () => {
210
+ const indexFast = await client.channels.create({
211
+ name: id.create(),
212
+ dataType: DataType.TIMESTAMP,
213
+ isIndex: true,
214
+ });
215
+ const dataFast = await client.channels.create({
216
+ name: id.create(),
217
+ dataType: DataType.FLOAT64,
218
+ index: indexFast.key,
219
+ });
220
+ const indexSlow = await client.channels.create({
221
+ name: id.create(),
222
+ dataType: DataType.TIMESTAMP,
223
+ isIndex: true,
224
+ });
225
+ const dataSlow = await client.channels.create({
226
+ name: id.create(),
227
+ dataType: DataType.FLOAT64,
228
+ index: indexSlow.key,
229
+ });
230
+ const baseTime = TimeStamp.nanoseconds(0);
231
+ // Write fast data: 0ns, 1ns, 2ns, 3ns, 4ns, 5ns
232
+ const writerFast = await client.openWriter({
233
+ start: baseTime,
234
+ channels: [indexFast.key, dataFast.key],
235
+ });
236
+ await writerFast.write({
237
+ [indexFast.key]: [
238
+ TimeStamp.nanoseconds(0),
239
+ TimeStamp.nanoseconds(1),
240
+ TimeStamp.nanoseconds(2),
241
+ TimeStamp.nanoseconds(3),
242
+ TimeStamp.nanoseconds(4),
243
+ TimeStamp.nanoseconds(5),
244
+ ],
245
+ [dataFast.key]: [1.0, 1.1, 1.2, 1.3, 1.4, 1.5],
246
+ });
247
+ await writerFast.commit();
248
+ await writerFast.close();
249
+
250
+ // Write slow data: 0ns, 5ns
251
+ const writerSlow = await client.openWriter({
252
+ start: baseTime,
253
+ channels: [indexSlow.key, dataSlow.key],
254
+ });
255
+ await writerSlow.write({
256
+ [indexSlow.key]: [TimeStamp.nanoseconds(0), TimeStamp.nanoseconds(5)],
257
+ [dataSlow.key]: [2.0, 2.5],
258
+ });
259
+ await writerSlow.commit();
260
+ await writerSlow.close();
261
+
262
+ const stream = await client.read({
263
+ channels: [dataFast.key, dataSlow.key],
264
+ timeRange: {
265
+ start: baseTime,
266
+ end: TimeStamp.nanoseconds(6),
267
+ },
268
+ responseType: "csv",
269
+ });
270
+ const records = await streamToRecords(stream);
271
+ expect(records).toEqual([
272
+ [indexFast.name, dataFast.name, indexSlow.name, dataSlow.name],
273
+ ["0", "1", "0", "2"],
274
+ ["1", "1.1", "", ""],
275
+ ["2", "1.2", "", ""],
276
+ ["3", "1.3", "", ""],
277
+ ["4", "1.4", "", ""],
278
+ ["5", "1.5", "5", "2.5"],
279
+ ]);
280
+ });
281
+ it("should handle large amounts of channels", async () => {
282
+ const numGroups = 5;
283
+ const channelsPerGroup = 3;
284
+ const dataKeys: channel.Keys = [];
285
+ const expectedColumns = numGroups * (1 + channelsPerGroup);
286
+
287
+ // Store timestamps written per group for building expected rows later
288
+ interface GroupWrite {
289
+ groupIdx: number;
290
+ timestamps: bigint[];
291
+ values: number[][]; // values[sampleIdx][channelIdx]
292
+ }
293
+ const groupWrites: GroupWrite[] = [];
294
+
295
+ for (let g = 0; g < numGroups; g++) {
296
+ const index = await client.channels.create({
297
+ name: id.create(),
298
+ dataType: DataType.TIMESTAMP,
299
+ isIndex: true,
300
+ });
301
+ const groupChannels: channel.Keys = [index.key];
302
+ for (let c = 0; c < channelsPerGroup; c++) {
303
+ const data = await client.channels.create({
304
+ name: id.create(),
305
+ dataType: DataType.FLOAT64,
306
+ index: index.key,
307
+ });
308
+ dataKeys.push(data.key);
309
+ groupChannels.push(data.key);
310
+ }
311
+ const writer = await client.openWriter({
312
+ start: TimeStamp.seconds(g + 1),
313
+ channels: groupChannels,
314
+ });
315
+ // Write two timestamps for this group
316
+ const ts1 = TimeStamp.seconds(g + 1);
317
+ const ts2 = TimeStamp.seconds(g + 2);
318
+ const writeData: Record<number, unknown[]> = {
319
+ [index.key]: [ts1, ts2],
320
+ };
321
+ // Write sample values for all channels
322
+ for (let c = 0; c < channelsPerGroup; c++)
323
+ writeData[groupChannels[c + 1]] = [g * 10 + c, g * 10 + c + 1];
324
+
325
+ await writer.write(writeData);
326
+ await writer.commit();
327
+ await writer.close();
328
+
329
+ // Store the write info
330
+ groupWrites.push({
331
+ groupIdx: g,
332
+ timestamps: [ts1.valueOf(), ts2.valueOf()],
333
+ values: [
334
+ Array.from({ length: channelsPerGroup }, (_, c) => g * 10 + c),
335
+ Array.from({ length: channelsPerGroup }, (_, c) => g * 10 + c + 1),
336
+ ],
337
+ });
338
+ }
339
+
340
+ // Build expected rows AFTER all groups created (now we know total columns)
341
+ const rowsByTime = new Map<string, string[]>();
342
+ for (const gw of groupWrites)
343
+ for (let i = 0; i < gw.timestamps.length; i++) {
344
+ const timeStr = gw.timestamps[i].toString();
345
+ if (!rowsByTime.has(timeStr))
346
+ rowsByTime.set(timeStr, Array(expectedColumns).fill(""));
347
+
348
+ const row = rowsByTime.get(timeStr)!;
349
+ const colOffset = gw.groupIdx * (1 + channelsPerGroup);
350
+ row[colOffset] = timeStr; // index timestamp
351
+ for (let c = 0; c < channelsPerGroup; c++)
352
+ row[colOffset + 1 + c] = gw.values[i][c].toString();
353
+ }
354
+
355
+ // Compose expected rows in time order (ascending)
356
+ const sortedTimes = Array.from(rowsByTime.keys())
357
+ .map((k) => BigInt(k))
358
+ .sort((a, b) => (a < b ? -1 : a > b ? 1 : 0))
359
+ .map((k) => k.toString());
360
+
361
+ const expectedRows: string[][] = [];
362
+ for (const timeStr of sortedTimes) expectedRows.push(rowsByTime.get(timeStr)!);
363
+
364
+ const stream = await client.read({
365
+ channels: dataKeys,
366
+ timeRange: { start: TimeStamp.seconds(0), end: TimeStamp.seconds(20) },
367
+ responseType: "csv",
368
+ });
369
+ const rows = await streamToRecords(stream);
370
+ // There should be a header and at least the expected number of rows
371
+ expect(rows.length).toBeGreaterThan(1);
372
+ expect(rows.slice(1)).toEqual(expectedRows);
373
+ // Each row should have columns for all groups (index + data channels each)
374
+ rows.forEach((row) => {
375
+ expect(row).toHaveLength(expectedColumns);
376
+ });
377
+ });
378
+
379
+ it("should handle empty data gracefully", async () => {
380
+ const index = await client.channels.create({
381
+ name: id.create(),
382
+ dataType: DataType.TIMESTAMP,
383
+ isIndex: true,
384
+ });
385
+ const data = await client.channels.create({
386
+ name: id.create(),
387
+ dataType: DataType.FLOAT64,
388
+ index: index.key,
389
+ });
390
+ const stream = await client.read({
391
+ channels: [data.key],
392
+ timeRange: { start: TimeStamp.seconds(0), end: TimeStamp.seconds(10) },
393
+ responseType: "csv",
394
+ });
395
+ const rows = await streamToRecords(stream);
396
+ expect(rows).toEqual([[index.name, data.name]]);
397
+ });
398
+
399
+ it("should use channel names as default headers", async () => {
400
+ const uniqueSuffix = id.create();
401
+ const indexName = `my_timestamp_${uniqueSuffix}`;
402
+ const dataName = `my_sensor_data_${uniqueSuffix}`;
403
+ const index = await client.channels.create({
404
+ name: indexName,
405
+ dataType: DataType.TIMESTAMP,
406
+ isIndex: true,
407
+ });
408
+ const data = await client.channels.create({
409
+ name: dataName,
410
+ dataType: DataType.FLOAT64,
411
+ index: index.key,
412
+ });
413
+ const writer = await client.openWriter({
414
+ start: TimeStamp.nanoseconds(1),
415
+ channels: [index.key, data.key],
416
+ });
417
+ await writer.write({
418
+ [index.key]: [TimeStamp.nanoseconds(1)],
419
+ [data.key]: [42],
420
+ });
421
+ await writer.commit();
422
+ await writer.close();
423
+ const stream = await client.read({
424
+ channels: [data.key],
425
+ timeRange: { start: TimeStamp.seconds(0), end: TimeStamp.seconds(100000) },
426
+ responseType: "csv",
427
+ });
428
+ const records = await streamToRecords(stream);
429
+ expect(records).toEqual([
430
+ [index.name, data.name],
431
+ ["1", "42"],
432
+ ]);
433
+ });
434
+ it("should handle channels across domains with gaps in them", async () => {
435
+ const index = await client.channels.create({
436
+ name: id.create(),
437
+ dataType: DataType.TIMESTAMP,
438
+ isIndex: true,
439
+ });
440
+ const data = await client.channels.create({
441
+ name: id.create(),
442
+ dataType: DataType.FLOAT64,
443
+ index: index.key,
444
+ });
445
+ let writer = await client.openWriter({
446
+ start: TimeStamp.nanoseconds(101),
447
+ channels: [index.key, data.key],
448
+ });
449
+ await writer.write({
450
+ [index.key]: [
451
+ TimeStamp.nanoseconds(101),
452
+ TimeStamp.nanoseconds(102),
453
+ TimeStamp.nanoseconds(103),
454
+ ],
455
+ [data.key]: [10, 11, 12],
456
+ });
457
+ await writer.commit();
458
+ await writer.close();
459
+ writer = await client.openWriter({
460
+ start: TimeStamp.nanoseconds(1),
461
+ channels: [index.key, data.key],
462
+ });
463
+ await writer.write({
464
+ [index.key]: [
465
+ TimeStamp.nanoseconds(1),
466
+ TimeStamp.nanoseconds(2),
467
+ TimeStamp.nanoseconds(3),
468
+ ],
469
+ [data.key]: [1, 2, 3],
470
+ });
471
+ await writer.commit();
472
+ await writer.close();
473
+ const stream = await client.read({
474
+ channels: [data.key],
475
+ timeRange: { start: TimeStamp.nanoseconds(3), end: TimeStamp.nanoseconds(103) },
476
+ responseType: "csv",
477
+ });
478
+ const rows = await streamToRecords(stream);
479
+ expect(rows).toEqual([
480
+ [index.name, data.name],
481
+ ["3", "3"],
482
+ ["101", "10"],
483
+ ["102", "11"],
484
+ ]);
485
+ });
486
+ it("should handle non-overlapping data across domains", async () => {
487
+ // first index will get written from times 10-15, second index from times 13-18
488
+ const index1 = await client.channels.create({
489
+ name: id.create(),
490
+ dataType: DataType.TIMESTAMP,
491
+ isIndex: true,
492
+ });
493
+ const index2 = await client.channels.create({
494
+ name: id.create(),
495
+ dataType: DataType.TIMESTAMP,
496
+ isIndex: true,
497
+ });
498
+ const data1 = await client.channels.create({
499
+ name: id.create(),
500
+ dataType: DataType.FLOAT64,
501
+ index: index1.key,
502
+ });
503
+ const data2 = await client.channels.create({
504
+ name: id.create(),
505
+ dataType: DataType.FLOAT64,
506
+ index: index2.key,
507
+ });
508
+ const writer1 = await client.openWriter({
509
+ start: TimeStamp.nanoseconds(10),
510
+ channels: [index1.key, data1.key],
511
+ });
512
+ await writer1.write({
513
+ [index1.key]: [
514
+ TimeStamp.nanoseconds(10),
515
+ TimeStamp.nanoseconds(11),
516
+ TimeStamp.nanoseconds(12),
517
+ TimeStamp.nanoseconds(13),
518
+ TimeStamp.nanoseconds(14),
519
+ TimeStamp.nanoseconds(15),
520
+ ],
521
+ [data1.key]: [1, 2, 3, 4, 5, 6],
522
+ });
523
+ await writer1.commit();
524
+ await writer1.close();
525
+ const writer2 = await client.openWriter({
526
+ start: TimeStamp.nanoseconds(15),
527
+ channels: [index2.key, data2.key],
528
+ });
529
+ await writer2.write({
530
+ [index2.key]: [
531
+ TimeStamp.nanoseconds(13),
532
+ TimeStamp.nanoseconds(14),
533
+ TimeStamp.nanoseconds(15),
534
+ TimeStamp.nanoseconds(16),
535
+ TimeStamp.nanoseconds(17),
536
+ TimeStamp.nanoseconds(18),
537
+ ],
538
+ [data2.key]: [11, 12, 13, 14, 15, 16],
539
+ });
540
+ await writer2.commit();
541
+ await writer2.close();
542
+ const stream = await client.read({
543
+ channels: [data1.key, data2.key],
544
+ timeRange: { start: TimeStamp.nanoseconds(0), end: TimeStamp.nanoseconds(19) },
545
+ responseType: "csv",
546
+ });
547
+ const rows = await streamToRecords(stream);
548
+ expect(rows).toEqual([
549
+ [index1.name, data1.name, index2.name, data2.name],
550
+ ["10", "1", "", ""],
551
+ ["11", "2", "", ""],
552
+ ["12", "3", "", ""],
553
+ ["13", "4", "13", "11"],
554
+ ["14", "5", "14", "12"],
555
+ ["15", "6", "15", "13"],
556
+ ["", "", "16", "14"],
557
+ ["", "", "17", "15"],
558
+ ["", "", "18", "16"],
559
+ ]);
560
+ });
561
+ it("should handle large dataset requiring multiple iterator calls", async () => {
562
+ // Create 4 groups with different indexes at different rates
563
+ const numGroups = 4;
564
+ const samplesPerGroup = [3000, 2500, 2000, 1500]; // Different sample counts
565
+ const channelsPerGroup = 3;
566
+
567
+ interface GroupInfo {
568
+ indexKey: number;
569
+ dataKeys: number[];
570
+ baseTime: TimeStamp;
571
+ sampleCount: number;
572
+ intervalMs: number;
573
+ }
574
+
575
+ const groups: GroupInfo[] = [];
576
+ const allDataKeys: number[] = [];
577
+
578
+ // Create channels for each group
579
+ for (let g = 0; g < numGroups; g++) {
580
+ const index = await client.channels.create({
581
+ name: `stress_index_${id.create()}`,
582
+ dataType: DataType.TIMESTAMP,
583
+ isIndex: true,
584
+ });
585
+
586
+ const dataKeys: number[] = [];
587
+ for (let c = 0; c < channelsPerGroup; c++) {
588
+ const data = await client.channels.create({
589
+ name: `stress_data_${id.create()}`,
590
+ dataType: DataType.FLOAT64,
591
+ index: index.key,
592
+ });
593
+ dataKeys.push(data.key);
594
+ allDataKeys.push(data.key);
595
+ }
596
+
597
+ // Different base times and intervals to create interleaving
598
+ const baseTime = TimeStamp.seconds(1000).add(TimeSpan.milliseconds(g * 7));
599
+ const intervalMs = 10 + g * 3; // 10ms, 13ms, 16ms, 19ms intervals
600
+
601
+ groups.push({
602
+ indexKey: index.key,
603
+ dataKeys,
604
+ baseTime,
605
+ sampleCount: samplesPerGroup[g],
606
+ intervalMs,
607
+ });
608
+ }
609
+
610
+ // Write data to each group in parallel using Promise.all
611
+ await Promise.all(
612
+ groups.map(async (group) => {
613
+ const writer = await client.openWriter({
614
+ start: group.baseTime,
615
+ channels: [group.indexKey, ...group.dataKeys],
616
+ });
617
+
618
+ // Write in batches to avoid memory issues
619
+ const batchSize = 500;
620
+ for (
621
+ let batchStart = 0;
622
+ batchStart < group.sampleCount;
623
+ batchStart += batchSize
624
+ ) {
625
+ const batchEnd = Math.min(batchStart + batchSize, group.sampleCount);
626
+ const timestamps: TimeStamp[] = [];
627
+ const dataArrays: number[][] = group.dataKeys.map(() => []);
628
+
629
+ for (let i = batchStart; i < batchEnd; i++) {
630
+ timestamps.push(
631
+ group.baseTime.add(TimeSpan.milliseconds(i * group.intervalMs)),
632
+ );
633
+ group.dataKeys.forEach((_, c) => {
634
+ dataArrays[c].push(i * 100 + c);
635
+ });
636
+ }
637
+
638
+ const writeData: Record<number, unknown[]> = {
639
+ [group.indexKey]: timestamps,
640
+ };
641
+ group.dataKeys.forEach((key, c) => {
642
+ writeData[key] = dataArrays[c];
643
+ });
644
+
645
+ await writer.write(writeData);
646
+ }
647
+ await writer.commit();
648
+ await writer.close();
649
+ }),
650
+ );
651
+ // Calculate expected total samples across all groups
652
+ const totalSamples = samplesPerGroup.reduce((a, b) => a + b, 0);
653
+
654
+ // Export the data
655
+ const stream = await client.read({
656
+ channels: allDataKeys,
657
+ timeRange: {
658
+ start: TimeStamp.seconds(999),
659
+ end: TimeStamp.seconds(1100),
660
+ },
661
+ responseType: "csv",
662
+ });
663
+
664
+ // Collect all chunks and track streaming behavior
665
+ const reader = stream.getReader();
666
+ const chunks: Uint8Array[] = [];
667
+ let chunkCount = 0;
668
+
669
+ while (true) {
670
+ const { done, value } = await reader.read();
671
+ if (done) break;
672
+ chunks.push(value);
673
+ chunkCount++;
674
+ }
675
+
676
+ // Verify multiple chunks were produced (proves streaming worked)
677
+ expect(chunkCount).toBeGreaterThan(1);
678
+
679
+ // Decode and parse the full CSV
680
+ const decoder = new TextDecoder();
681
+ const csv = chunks.map((c) => decoder.decode(c)).join("");
682
+ const lines = csv.trim().split(delimiter);
683
+
684
+ // Header + data rows (some timestamps may align, so rows <= totalSamples)
685
+ expect(lines.length).toBeGreaterThan(1);
686
+ expect(lines.length).toBeLessThanOrEqual(totalSamples + 1);
687
+
688
+ // Verify header has correct number of columns
689
+ // Each group has: 1 index + channelsPerGroup data channels
690
+ const expectedColumns = numGroups * (1 + channelsPerGroup);
691
+ const headerColumns = lines[0].split(",");
692
+ expect(headerColumns).toHaveLength(expectedColumns);
693
+
694
+ // Verify all data rows have correct column count
695
+ for (let i = 1; i < lines.length; i++) {
696
+ const cols = lines[i].split(",");
697
+ expect(cols).toHaveLength(expectedColumns);
698
+ }
699
+
700
+ // Verify timestamps are in ascending order
701
+ const rows = parseCSV(csv);
702
+ let lastTimestamp: bigint | null = null;
703
+ for (let i = 1; i < rows.length; i++)
704
+ // Find the first non-empty timestamp in this row
705
+ for (let g = 0; g < numGroups; g++) {
706
+ const tsCol = g * (1 + channelsPerGroup);
707
+ const tsStr = rows[i][tsCol];
708
+ if (tsStr === "") continue;
709
+ const ts = BigInt(tsStr);
710
+ if (lastTimestamp !== null) expect(ts).toBeGreaterThanOrEqual(lastTimestamp);
711
+ lastTimestamp = ts;
712
+ break;
713
+ }
714
+
715
+ // Verify some specific data integrity
716
+ // First row should have data from at least one group
717
+ const firstDataRow = rows[1];
718
+ const nonEmptyValues = firstDataRow.filter((v) => v !== "");
719
+ expect(nonEmptyValues.length).toBeGreaterThan(0);
720
+ });
721
+ it(
722
+ "should handle large dense and sparse indexes with correct ordering and merging",
723
+ { timeout: 15_000 },
724
+ async () => {
725
+ const denseSamples = 100_000;
726
+ const sparseStep = 1_000;
727
+ const sparseSamples = denseSamples / sparseStep;
728
+
729
+ const indexFast = await client.channels.create({
730
+ name: `dense_index_${id.create()}`,
731
+ dataType: DataType.TIMESTAMP,
732
+ isIndex: true,
733
+ });
734
+ const dataFast = await client.channels.create({
735
+ name: `dense_data_${id.create()}`,
736
+ dataType: DataType.FLOAT64,
737
+ index: indexFast.key,
738
+ });
739
+
740
+ const indexSlow = await client.channels.create({
741
+ name: `sparse_index_${id.create()}`,
742
+ dataType: DataType.TIMESTAMP,
743
+ isIndex: true,
744
+ });
745
+ const dataSlow = await client.channels.create({
746
+ name: `sparse_data_${id.create()}`,
747
+ dataType: DataType.FLOAT64,
748
+ index: indexSlow.key,
749
+ });
750
+ const start = TimeStamp.seconds(0);
751
+ const denseWriter = await client.openWriter({
752
+ start,
753
+ channels: [indexFast.key, dataFast.key],
754
+ });
755
+
756
+ const maxBatchSize = 10_000;
757
+ for (
758
+ let batchStart = 1;
759
+ batchStart <= denseSamples;
760
+ batchStart += maxBatchSize
761
+ ) {
762
+ const batchEnd = Math.min(batchStart + maxBatchSize - 1, denseSamples);
763
+ const batchSize = batchEnd - batchStart + 1;
764
+ const times = secondsLinspace(batchStart, batchSize);
765
+ const data = Array.from({ length: batchSize }, (_, i) => i + batchStart);
766
+ await denseWriter.write({ [indexFast.key]: times, [dataFast.key]: data });
767
+ }
768
+ await denseWriter.commit();
769
+ await denseWriter.close();
770
+
771
+ const sparseWriter = await client.openWriter({
772
+ start,
773
+ channels: [indexSlow.key, dataSlow.key],
774
+ });
775
+
776
+ for (
777
+ let batchStart = 1;
778
+ batchStart < sparseSamples;
779
+ batchStart += maxBatchSize
780
+ ) {
781
+ const batchEnd = Math.min(
782
+ batchStart + (maxBatchSize - 1) * sparseStep,
783
+ batchStart + sparseSamples * sparseStep,
784
+ );
785
+ const times: TimeStamp[] = [];
786
+ const data: number[] = [];
787
+
788
+ for (let j = batchStart; j < batchEnd; j += sparseStep) {
789
+ times.push(start.add(TimeSpan.seconds(j)));
790
+ data.push(j); // arbitrary data value
791
+ }
792
+ await sparseWriter.write({ [indexSlow.key]: times, [dataSlow.key]: data });
793
+ }
794
+ await sparseWriter.commit();
795
+ await sparseWriter.close();
796
+
797
+ const stream = await client.read({
798
+ channels: [dataFast.key, dataSlow.key],
799
+ timeRange: {
800
+ start: TimeStamp.seconds(0),
801
+ end: start.add(TimeSpan.seconds(denseSamples + 1)),
802
+ },
803
+ responseType: "csv",
804
+ });
805
+
806
+ const reader = stream.getReader();
807
+ const decoder = new TextDecoder();
808
+
809
+ let buffer = "";
810
+ let chunkCount = 0;
811
+ let isHeader = true;
812
+ let totalRows = 0; // data rows only (exclude header)
813
+ let sparseRows = 0;
814
+ let lastTimestamp: bigint | null = null;
815
+
816
+ while (true) {
817
+ const { done, value } = await reader.read();
818
+ if (done) break;
819
+ chunkCount++;
820
+ buffer += decoder.decode(value);
821
+ while (true) {
822
+ const idx = buffer.indexOf(delimiter);
823
+ if (idx === -1) break;
824
+ const line = buffer.slice(0, idx);
825
+ buffer = buffer.slice(idx + delimiter.length);
826
+ if (line === "") continue;
827
+ if (isHeader) {
828
+ const headerCols = line.split(",");
829
+ expect(headerCols).toEqual([
830
+ indexFast.name,
831
+ dataFast.name,
832
+ indexSlow.name,
833
+ dataSlow.name,
834
+ ]);
835
+ isHeader = false;
836
+ continue;
837
+ }
838
+
839
+ totalRows++;
840
+ const cols = line.split(",");
841
+ expect(cols).toHaveLength(4);
842
+ const [fastTsStr, fastValStr, slowTsStr, slowValStr] = cols;
843
+
844
+ expect(fastTsStr).not.toBe("");
845
+ expect(fastValStr).not.toBe("");
846
+
847
+ const ts = BigInt(fastTsStr);
848
+ if (lastTimestamp !== null) expect(ts).toBeGreaterThan(lastTimestamp);
849
+ lastTimestamp = ts;
850
+
851
+ if (slowValStr !== "") {
852
+ sparseRows++;
853
+ // When sparse has data, its timestamp should match dense's timestamp
854
+ expect(slowTsStr).toBe(fastTsStr);
855
+ expect(slowValStr).not.toBe("");
856
+ }
857
+ }
858
+ }
859
+ expect(chunkCount).toBeGreaterThan(1);
860
+ expect(totalRows).toBe(denseSamples);
861
+ expect(sparseRows).toBe(sparseSamples);
862
+ },
863
+ );
864
+ });
865
+ });