@kapeta/local-cluster-service 0.19.6 → 0.19.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/cjs/src/containerManager.d.ts +39 -32
- package/dist/cjs/src/containerManager.js +138 -108
- package/dist/cjs/src/instanceManager.js +28 -18
- package/dist/cjs/src/operatorManager.js +3 -0
- package/dist/cjs/src/taskManager.js +4 -1
- package/dist/cjs/src/utils/BlockInstanceRunner.js +9 -0
- package/dist/esm/src/containerManager.d.ts +39 -32
- package/dist/esm/src/containerManager.js +138 -108
- package/dist/esm/src/instanceManager.js +28 -18
- package/dist/esm/src/operatorManager.js +3 -0
- package/dist/esm/src/taskManager.js +4 -1
- package/dist/esm/src/utils/BlockInstanceRunner.js +9 -0
- package/package.json +5 -2
- package/src/containerManager.ts +188 -140
- package/src/instanceManager.ts +44 -20
- package/src/operatorManager.ts +11 -1
- package/src/taskManager.ts +4 -1
- package/src/utils/BlockInstanceRunner.ts +19 -3
package/src/containerManager.ts
CHANGED
@@ -3,10 +3,9 @@ import { storageService } from './storageService';
|
|
3
3
|
import os from 'os';
|
4
4
|
import _ from 'lodash';
|
5
5
|
import FSExtra, { ReadStream } from 'fs-extra';
|
6
|
-
import
|
6
|
+
import Docker from 'dockerode';
|
7
7
|
import { parseKapetaUri } from '@kapeta/nodejs-utils';
|
8
8
|
import ClusterConfiguration from '@kapeta/local-cluster-config';
|
9
|
-
import { Container } from 'node-docker-api/lib/container';
|
10
9
|
import uuid from 'node-uuid';
|
11
10
|
import md5 from 'md5';
|
12
11
|
import { getBlockInstanceContainerName } from './utils/utils';
|
@@ -14,6 +13,8 @@ import { InstanceInfo, LogEntry, LogSource } from './types';
|
|
14
13
|
import { KapetaAPI } from '@kapeta/nodejs-api-client';
|
15
14
|
import { taskManager, Task } from './taskManager';
|
16
15
|
import { EventEmitter } from 'node:events';
|
16
|
+
import StreamValues from 'stream-json/streamers/StreamValues';
|
17
|
+
import { Stream } from 'stream';
|
17
18
|
|
18
19
|
type StringMap = { [key: string]: string };
|
19
20
|
|
@@ -33,23 +34,40 @@ export interface DockerMounts {
|
|
33
34
|
Consistency: string;
|
34
35
|
}
|
35
36
|
|
36
|
-
interface
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
37
|
+
interface JSONProgress {
|
38
|
+
// Current is the current status and value of the progress made towards Total.
|
39
|
+
current: number;
|
40
|
+
// Total is the end value describing when we made 100% progress for an operation.
|
41
|
+
total: number;
|
42
|
+
// Start is the initial value for the operation.
|
43
|
+
start: number;
|
44
|
+
// HideCounts. if true, hides the progress count indicator (xB/yB).
|
45
|
+
hidecounts: boolean;
|
46
|
+
// Units is the unit to print for progress. It defaults to "bytes" if empty.
|
47
|
+
units: string;
|
48
|
+
}
|
49
|
+
|
50
|
+
interface JSONError {
|
51
|
+
code: number;
|
52
|
+
message: string;
|
53
|
+
}
|
54
|
+
|
55
|
+
export type DockerContainerStatus = 'created' | 'running' | 'paused' | 'restarting' | 'removing' | 'exited' | 'dead';
|
56
|
+
export type DockerContainerHealth = 'starting' | 'healthy' | 'unhealthy' | 'none';
|
57
|
+
|
58
|
+
interface JSONMessage<T = string> {
|
59
|
+
stream?: string;
|
60
|
+
status: T;
|
61
|
+
progressDetail?: JSONProgress;
|
62
|
+
progress?: string;
|
63
|
+
id: string;
|
64
|
+
from: string;
|
65
|
+
time: number;
|
66
|
+
timeNano: number;
|
67
|
+
errorDetail?: JSONError;
|
68
|
+
error?: string;
|
69
|
+
// Aux contains out-of-band data, such as digests for push signing and image id after building.
|
70
|
+
aux?: any;
|
53
71
|
}
|
54
72
|
|
55
73
|
interface Health {
|
@@ -63,14 +81,45 @@ export const CONTAINER_LABEL_PORT_PREFIX = 'kapeta_port-';
|
|
63
81
|
const NANO_SECOND = 1000000;
|
64
82
|
const HEALTH_CHECK_INTERVAL = 3000;
|
65
83
|
const HEALTH_CHECK_MAX = 20;
|
84
|
+
export const COMPOSE_LABEL_PROJECT = 'com.docker.compose.project';
|
85
|
+
export const COMPOSE_LABEL_SERVICE = 'com.docker.compose.service';
|
66
86
|
|
67
87
|
export const HEALTH_CHECK_TIMEOUT = HEALTH_CHECK_INTERVAL * HEALTH_CHECK_MAX * 2;
|
68
88
|
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
89
|
+
enum DockerPullEventTypes {
|
90
|
+
PreparingPhase = 'Preparing',
|
91
|
+
WaitingPhase = 'Waiting',
|
92
|
+
PullingFsPhase = 'Pulling fs layer',
|
93
|
+
DownloadingPhase = 'Downloading',
|
94
|
+
DownloadCompletePhase = 'Download complete',
|
95
|
+
ExtractingPhase = 'Extracting',
|
96
|
+
VerifyingChecksumPhase = 'Verifying Checksum',
|
97
|
+
AlreadyExistsPhase = 'Already exists',
|
98
|
+
PullCompletePhase = 'Pull complete',
|
99
|
+
}
|
100
|
+
|
101
|
+
type DockerPullEventType = DockerPullEventTypes | string;
|
102
|
+
|
103
|
+
const processJsonStream = <T>(purpose: string, stream: Stream, handler: (d: JSONMessage<T>) => void) =>
|
104
|
+
new Promise<void>((resolve, reject) => {
|
105
|
+
const jsonStream = StreamValues.withParser();
|
106
|
+
jsonStream.on('data', (data: any) => {
|
107
|
+
try {
|
108
|
+
handler(data.value as JSONMessage<T>);
|
109
|
+
} catch (e) {
|
110
|
+
console.error('Failed while processing data for stream: %s', purpose, e);
|
111
|
+
}
|
112
|
+
});
|
113
|
+
jsonStream.on('end', () => {
|
114
|
+
console.log('Docker stream ended: %s', purpose);
|
115
|
+
resolve();
|
116
|
+
});
|
117
|
+
jsonStream.on('error', (err) => {
|
118
|
+
console.error('Docker stream failed: %s', purpose, err);
|
119
|
+
reject(err);
|
120
|
+
});
|
121
|
+
|
122
|
+
stream.pipe(jsonStream);
|
74
123
|
});
|
75
124
|
|
76
125
|
class ContainerManager {
|
@@ -92,7 +141,7 @@ class ContainerManager {
|
|
92
141
|
async initialize() {
|
93
142
|
// Use the value from cluster-service.yml if configured
|
94
143
|
const dockerConfig = ClusterConfiguration.getDockerConfig();
|
95
|
-
const connectOptions =
|
144
|
+
const connectOptions: any[] =
|
96
145
|
Object.keys(dockerConfig).length > 0
|
97
146
|
? [dockerConfig]
|
98
147
|
: [
|
@@ -114,7 +163,7 @@ class ContainerManager {
|
|
114
163
|
try {
|
115
164
|
const client = new Docker({
|
116
165
|
...opts,
|
117
|
-
timeout:
|
166
|
+
timeout: 15 * 60 * 1000, //15 minutes should be enough for any operation
|
118
167
|
});
|
119
168
|
await client.ping();
|
120
169
|
this._docker = client;
|
@@ -210,14 +259,13 @@ class ContainerManager {
|
|
210
259
|
}
|
211
260
|
|
212
261
|
async getContainerByName(containerName: string): Promise<ContainerInfo | undefined> {
|
213
|
-
const containers = await this.docker().
|
262
|
+
const containers = await this.docker().listContainers({ all: true });
|
214
263
|
const out = containers.find((container) => {
|
215
|
-
|
216
|
-
return containerData.Names.indexOf(`/${containerName}`) > -1;
|
264
|
+
return container.Names.indexOf(`/${containerName}`) > -1;
|
217
265
|
});
|
218
266
|
|
219
267
|
if (out) {
|
220
|
-
return
|
268
|
+
return this.get(out.Id);
|
221
269
|
}
|
222
270
|
return undefined;
|
223
271
|
}
|
@@ -228,8 +276,7 @@ class ContainerManager {
|
|
228
276
|
tag = 'latest';
|
229
277
|
}
|
230
278
|
|
231
|
-
const imageTagList = (await this.docker().
|
232
|
-
.map((image) => image.data as any)
|
279
|
+
const imageTagList = (await this.docker().listImages({}))
|
233
280
|
.filter((imageData) => !!imageData.RepoTags)
|
234
281
|
.map((imageData) => imageData.RepoTags as string[]);
|
235
282
|
|
@@ -261,10 +308,9 @@ class ContainerManager {
|
|
261
308
|
}
|
262
309
|
: {};
|
263
310
|
|
264
|
-
const stream =
|
265
|
-
|
266
|
-
|
267
|
-
})) as ReadStream;
|
311
|
+
const stream = await this.docker().pull(image, {
|
312
|
+
authconfig: auth,
|
313
|
+
});
|
268
314
|
|
269
315
|
const chunks: {
|
270
316
|
[p: string]: {
|
@@ -281,66 +327,61 @@ class ContainerManager {
|
|
281
327
|
} = {};
|
282
328
|
|
283
329
|
let lastEmitted = Date.now();
|
284
|
-
await
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
return;
|
299
|
-
}
|
330
|
+
await processJsonStream<DockerPullEventType>(`image:pull:${image}`, stream, (data) => {
|
331
|
+
if (!chunks[data.id]) {
|
332
|
+
chunks[data.id] = {
|
333
|
+
downloading: {
|
334
|
+
total: 0,
|
335
|
+
current: 0,
|
336
|
+
},
|
337
|
+
extracting: {
|
338
|
+
total: 0,
|
339
|
+
current: 0,
|
340
|
+
},
|
341
|
+
done: false,
|
342
|
+
};
|
343
|
+
}
|
300
344
|
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
345
|
+
const chunk = chunks[data.id];
|
346
|
+
|
347
|
+
switch (data.status) {
|
348
|
+
case DockerPullEventTypes.PreparingPhase:
|
349
|
+
case DockerPullEventTypes.WaitingPhase:
|
350
|
+
case DockerPullEventTypes.PullingFsPhase:
|
351
|
+
//Do nothing
|
352
|
+
break;
|
353
|
+
case DockerPullEventTypes.DownloadingPhase:
|
354
|
+
case DockerPullEventTypes.VerifyingChecksumPhase:
|
355
|
+
chunk.downloading = {
|
356
|
+
total: data.progressDetail?.total ?? 0,
|
357
|
+
current: data.progressDetail?.current ?? 0,
|
312
358
|
};
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
chunk.extracting.current = chunks[data.id].extracting.total;
|
329
|
-
chunk.done = true;
|
330
|
-
break;
|
331
|
-
case 'Already exists':
|
332
|
-
// Force layer to be done
|
333
|
-
chunk.downloading.current = 1;
|
334
|
-
chunk.downloading.total = 1;
|
335
|
-
chunk.extracting.current = 1;
|
336
|
-
chunk.extracting.total = 1;
|
337
|
-
chunk.done = true;
|
338
|
-
break;
|
339
|
-
}
|
340
|
-
});
|
359
|
+
break;
|
360
|
+
case DockerPullEventTypes.ExtractingPhase:
|
361
|
+
chunk.extracting = {
|
362
|
+
total: data.progressDetail?.total ?? 0,
|
363
|
+
current: data.progressDetail?.current ?? 0,
|
364
|
+
};
|
365
|
+
break;
|
366
|
+
case DockerPullEventTypes.DownloadCompletePhase:
|
367
|
+
chunk.downloading.current = chunks[data.id].downloading.total;
|
368
|
+
break;
|
369
|
+
case DockerPullEventTypes.PullCompletePhase:
|
370
|
+
chunk.extracting.current = chunks[data.id].extracting.total;
|
371
|
+
chunk.done = true;
|
372
|
+
break;
|
373
|
+
}
|
341
374
|
|
342
|
-
if (
|
343
|
-
|
375
|
+
if (
|
376
|
+
data.status === DockerPullEventTypes.AlreadyExistsPhase ||
|
377
|
+
data.status.includes('Image is up to date') ||
|
378
|
+
data.status.includes('Downloaded newer image')
|
379
|
+
) {
|
380
|
+
chunk.downloading.current = 1;
|
381
|
+
chunk.downloading.total = 1;
|
382
|
+
chunk.extracting.current = 1;
|
383
|
+
chunk.extracting.total = 1;
|
384
|
+
chunk.done = true;
|
344
385
|
}
|
345
386
|
|
346
387
|
const chunkList = Object.values(chunks);
|
@@ -353,6 +394,7 @@ class ContainerManager {
|
|
353
394
|
total: 0,
|
354
395
|
current: 0,
|
355
396
|
},
|
397
|
+
percent: 0,
|
356
398
|
total: chunkList.length,
|
357
399
|
done: 0,
|
358
400
|
};
|
@@ -379,15 +421,19 @@ class ContainerManager {
|
|
379
421
|
}
|
380
422
|
});
|
381
423
|
|
382
|
-
|
424
|
+
totals.percent = totals.total > 0 ? (totals.done / totals.total) * 100 : 0;
|
383
425
|
|
384
426
|
task.metadata = {
|
385
427
|
...task.metadata,
|
386
428
|
image,
|
387
|
-
progress,
|
429
|
+
progress: totals.percent,
|
388
430
|
status: totals,
|
389
431
|
timeTaken: Date.now() - timeStarted,
|
390
432
|
};
|
433
|
+
|
434
|
+
if (Date.now() - lastEmitted < 1000) {
|
435
|
+
return;
|
436
|
+
}
|
391
437
|
task.emitUpdate();
|
392
438
|
lastEmitted = Date.now();
|
393
439
|
//console.log('Pulling image %s: %s % [done: %s, total: %s]', image, Math.round(percent), totals.done, totals.total);
|
@@ -406,6 +452,7 @@ class ContainerManager {
|
|
406
452
|
name: taskName,
|
407
453
|
image,
|
408
454
|
progress: -1,
|
455
|
+
group: 'docker:pull', //It's faster to pull images one at a time
|
409
456
|
});
|
410
457
|
|
411
458
|
await task.wait();
|
@@ -462,40 +509,40 @@ class ContainerManager {
|
|
462
509
|
console.log('Starting unnamed container: %s', opts.Image);
|
463
510
|
return this.startContainer(opts);
|
464
511
|
}
|
465
|
-
const
|
512
|
+
const container = await this.getContainerByName(opts.name);
|
466
513
|
if (imagePulled) {
|
514
|
+
// If image was pulled always recreate
|
467
515
|
console.log('New version of image was pulled: %s', opts.Image);
|
468
516
|
} else {
|
469
|
-
|
470
|
-
if (!containerInfo) {
|
517
|
+
if (!container) {
|
471
518
|
console.log('Starting new container: %s', opts.name);
|
472
519
|
return this.startContainer(opts);
|
473
520
|
}
|
474
521
|
|
475
|
-
const containerData =
|
522
|
+
const containerData = await container.inspect();
|
476
523
|
|
477
|
-
if (containerData?.Labels?.HASH === opts.Labels.HASH) {
|
478
|
-
if (!(await
|
524
|
+
if (containerData?.Config.Labels?.HASH === opts.Labels.HASH) {
|
525
|
+
if (!(await container.isRunning())) {
|
479
526
|
console.log('Starting previously created container: %s', opts.name);
|
480
|
-
await
|
527
|
+
await container.start();
|
481
528
|
} else {
|
482
529
|
console.log('Previously created container already running: %s', opts.name);
|
483
530
|
}
|
484
|
-
return
|
531
|
+
return container.native;
|
485
532
|
}
|
486
533
|
}
|
487
534
|
|
488
|
-
if (
|
535
|
+
if (container) {
|
489
536
|
// Remove the container and start a new one
|
490
537
|
console.log('Replacing previously created container: %s', opts.name);
|
491
|
-
await
|
538
|
+
await container.remove({ force: true });
|
492
539
|
}
|
493
540
|
|
494
541
|
console.log('Starting new container: %s', opts.name);
|
495
542
|
return this.startContainer(opts);
|
496
543
|
}
|
497
544
|
|
498
|
-
async startContainer(opts: any) {
|
545
|
+
private async startContainer(opts: any) {
|
499
546
|
const extraHosts = getExtraHosts(this._version);
|
500
547
|
|
501
548
|
if (extraHosts && extraHosts.length > 0) {
|
@@ -510,12 +557,12 @@ class ContainerManager {
|
|
510
557
|
opts.HostConfig.ExtraHosts = opts.HostConfig.ExtraHosts.concat(extraHosts);
|
511
558
|
}
|
512
559
|
|
513
|
-
const dockerContainer = await this.docker().
|
560
|
+
const dockerContainer = await this.docker().createContainer(opts);
|
514
561
|
await dockerContainer.start();
|
515
562
|
return dockerContainer;
|
516
563
|
}
|
517
564
|
|
518
|
-
async waitForReady(container: Container, attempt: number = 0): Promise<void> {
|
565
|
+
async waitForReady(container: Docker.Container, attempt: number = 0): Promise<void> {
|
519
566
|
if (!attempt) {
|
520
567
|
attempt = 0;
|
521
568
|
}
|
@@ -540,34 +587,33 @@ class ContainerManager {
|
|
540
587
|
});
|
541
588
|
}
|
542
589
|
|
543
|
-
async _isReady(container: Container) {
|
544
|
-
let info:
|
590
|
+
async _isReady(container: Docker.Container) {
|
591
|
+
let info: Docker.ContainerInspectInfo;
|
545
592
|
try {
|
546
|
-
info = await container.
|
593
|
+
info = await container.inspect();
|
547
594
|
} catch (err) {
|
548
595
|
return false;
|
549
596
|
}
|
550
|
-
const infoData: any = info?.data;
|
551
|
-
const state = infoData?.State as DockerState;
|
552
597
|
|
553
|
-
|
598
|
+
const state = info.State;
|
599
|
+
|
600
|
+
if (state.Status === 'exited' || state?.Status === 'removing' || state?.Status === 'dead') {
|
554
601
|
throw new Error('Container exited unexpectedly');
|
555
602
|
}
|
556
603
|
|
557
|
-
if (
|
604
|
+
if (state.Health) {
|
558
605
|
// If container has health info - wait for it to become healthy
|
559
|
-
return
|
606
|
+
return state.Health.Status === 'healthy';
|
560
607
|
} else {
|
561
|
-
return
|
608
|
+
return state.Running ?? false;
|
562
609
|
}
|
563
610
|
}
|
564
611
|
|
565
|
-
async remove(container: Container, opts?: { force?: boolean }) {
|
612
|
+
async remove(container: Docker.Container, opts?: { force?: boolean }) {
|
566
613
|
const newName = 'deleting-' + uuid.v4();
|
567
|
-
const containerData = container.data as any;
|
568
614
|
// Rename the container first to avoid name conflicts if people start the same container
|
569
615
|
await container.rename({ name: newName });
|
570
|
-
await container.
|
616
|
+
await container.remove({ force: !!opts?.force });
|
571
617
|
}
|
572
618
|
|
573
619
|
/**
|
@@ -575,19 +621,19 @@ class ContainerManager {
|
|
575
621
|
* @param name
|
576
622
|
* @return {Promise<ContainerInfo>}
|
577
623
|
*/
|
578
|
-
async get(name: string): Promise<ContainerInfo |
|
624
|
+
async get(name: string): Promise<ContainerInfo | undefined> {
|
579
625
|
let dockerContainer = null;
|
580
626
|
|
581
627
|
try {
|
582
|
-
dockerContainer = await this.docker().
|
583
|
-
await dockerContainer.
|
628
|
+
dockerContainer = await this.docker().getContainer(name);
|
629
|
+
await dockerContainer.stats();
|
584
630
|
} catch (err) {
|
585
631
|
//Ignore
|
586
632
|
dockerContainer = null;
|
587
633
|
}
|
588
634
|
|
589
635
|
if (!dockerContainer) {
|
590
|
-
return
|
636
|
+
return undefined;
|
591
637
|
}
|
592
638
|
|
593
639
|
return new ContainerInfo(dockerContainer);
|
@@ -797,16 +843,16 @@ class ClosableLogStream {
|
|
797
843
|
}
|
798
844
|
|
799
845
|
export class ContainerInfo {
|
800
|
-
private readonly _container: Container;
|
846
|
+
private readonly _container: Docker.Container;
|
801
847
|
|
802
848
|
/**
|
803
849
|
*
|
804
|
-
* @param {Container} dockerContainer
|
850
|
+
* @param {Docker.Container} dockerContainer
|
805
851
|
*/
|
806
|
-
constructor(dockerContainer: Container) {
|
852
|
+
constructor(dockerContainer: Docker.Container) {
|
807
853
|
/**
|
808
854
|
*
|
809
|
-
* @type {Container}
|
855
|
+
* @type {Docker.Container}
|
810
856
|
* @private
|
811
857
|
*/
|
812
858
|
this._container = dockerContainer;
|
@@ -827,14 +873,23 @@ export class ContainerInfo {
|
|
827
873
|
}
|
828
874
|
|
829
875
|
async start() {
|
876
|
+
if (await this.isRunning()) {
|
877
|
+
return;
|
878
|
+
}
|
830
879
|
await this._container.start();
|
831
880
|
}
|
832
881
|
|
833
882
|
async restart() {
|
883
|
+
if (!(await this.isRunning())) {
|
884
|
+
return this.start();
|
885
|
+
}
|
834
886
|
await this._container.restart();
|
835
887
|
}
|
836
888
|
|
837
889
|
async stop() {
|
890
|
+
if (!(await this.isRunning())) {
|
891
|
+
return;
|
892
|
+
}
|
838
893
|
await this._container.stop();
|
839
894
|
}
|
840
895
|
|
@@ -854,18 +909,16 @@ export class ContainerInfo {
|
|
854
909
|
|
855
910
|
async inspect() {
|
856
911
|
try {
|
857
|
-
|
858
|
-
|
859
|
-
return result ? (result.data as any) : null;
|
912
|
+
return await this._container.inspect();
|
860
913
|
} catch (err) {
|
861
|
-
return
|
914
|
+
return undefined;
|
862
915
|
}
|
863
916
|
}
|
864
917
|
|
865
918
|
async status() {
|
866
919
|
const result = await this.inspect();
|
867
920
|
|
868
|
-
return result
|
921
|
+
return result?.State;
|
869
922
|
}
|
870
923
|
|
871
924
|
async getPorts(): Promise<PortMap | false> {
|
@@ -923,19 +976,14 @@ export class ContainerInfo {
|
|
923
976
|
}
|
924
977
|
|
925
978
|
async getLogs(): Promise<LogEntry[]> {
|
926
|
-
const
|
979
|
+
const logs = await this.native.logs({
|
927
980
|
stdout: true,
|
928
981
|
stderr: true,
|
929
982
|
follow: false,
|
930
983
|
timestamps: true,
|
931
|
-
})) as ReadStream;
|
932
|
-
|
933
|
-
const chunks: Buffer[] = [];
|
934
|
-
await promisifyStream(logStream, (data) => {
|
935
|
-
chunks.push(data as Buffer);
|
936
984
|
});
|
937
985
|
|
938
|
-
const out = readLogBuffer(
|
986
|
+
const out = readLogBuffer(logs);
|
939
987
|
|
940
988
|
if (out.length === 0) {
|
941
989
|
out.push({
|