@milaboratories/pl-drivers 1.5.16 → 1.5.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@milaboratories/pl-drivers",
3
- "version": "1.5.16",
3
+ "version": "1.5.17",
4
4
  "engines": {
5
5
  "node": ">=20"
6
6
  },
@@ -30,11 +30,11 @@
30
30
  "tar-fs": "^3.0.8",
31
31
  "undici": "^7.2.3",
32
32
  "zod": "~3.23.8",
33
- "@milaboratories/ts-helpers": "^1.1.3",
34
- "@milaboratories/pl-tree": "^1.4.23",
35
- "@milaboratories/computable": "^2.3.4",
33
+ "@milaboratories/ts-helpers": "^1.1.4",
34
+ "@milaboratories/pl-client": "^2.7.5",
35
+ "@milaboratories/computable": "^2.3.5",
36
36
  "@milaboratories/pl-model-common": "^1.10.5",
37
- "@milaboratories/pl-client": "^2.7.4"
37
+ "@milaboratories/pl-tree": "^1.4.24"
38
38
  },
39
39
  "devDependencies": {
40
40
  "eslint": "^9.16.0",
@@ -47,8 +47,8 @@
47
47
  "jest": "^29.7.0",
48
48
  "@jest/globals": "^29.7.0",
49
49
  "ts-jest": "^29.2.5",
50
- "@milaboratories/eslint-config": "^1.0.1",
51
- "@milaboratories/platforma-build-configs": "1.0.2"
50
+ "@milaboratories/platforma-build-configs": "1.0.2",
51
+ "@milaboratories/eslint-config": "^1.0.1"
52
52
  },
53
53
  "scripts": {
54
54
  "type-check": "tsc --noEmit --composite false",
@@ -77,6 +77,12 @@ export class ClientUpload {
77
77
  } = await request(info.uploadUrl, {
78
78
  dispatcher: this.httpClient,
79
79
  body: chunk,
80
+ // We got headers only after we send
81
+ // the whole body (in case of S3 PUT requests it's 5 MB).
82
+ // It might be slow with a slow connection (or with SSH),
83
+ // that's why we got big timeout here.
84
+ headersTimeout: 60000,
85
+ bodyTimeout: 60000,
80
86
  headers: toHeadersMap(info.headers),
81
87
  method: info.method.toUpperCase() as Dispatcher.HttpMethod,
82
88
  });
@@ -85,10 +91,13 @@ export class ClientUpload {
85
91
  const body = await rawBody.text();
86
92
  checkStatusCodeOk(statusCode, body, headers, info);
87
93
  } catch (e: unknown) {
94
+ if (e instanceof NetworkError)
95
+ throw e;
96
+
88
97
  throw new Error(`partUpload: error ${JSON.stringify(e)} happened while trying to do part upload to the url ${info.uploadUrl}, headers: ${JSON.stringify(info.headers)}`);
89
98
  }
90
99
 
91
- await this.grpcUpdateProgress({ id, type }, info.chunkEnd - info.chunkStart, options);
100
+ await this.grpcUpdateProgress({ id, type }, BigInt(info.chunkEnd - info.chunkStart), options);
92
101
  }
93
102
 
94
103
  public async finalize(info: ResourceInfo, options?: RpcOptions) {
@@ -179,7 +179,7 @@ export class DownloadAndUnarchiveTask {
179
179
  async function dirSize(dir: string): Promise<number> {
180
180
  const files = await fsp.readdir(dir, { withFileTypes: true });
181
181
  const sizes = await Promise.all(
182
- files.map(async (file) => {
182
+ files.map(async (file: any) => {
183
183
  const fPath = path.join(dir, file.name);
184
184
 
185
185
  if (file.isDirectory()) return await dirSize(fPath);
@@ -189,7 +189,7 @@ async function dirSize(dir: string): Promise<number> {
189
189
  }),
190
190
  );
191
191
 
192
- return sizes.reduce((sum, size) => sum + size, 0);
192
+ return sizes.reduce((sum: any, size: any) => sum + size, 0);
193
193
  }
194
194
 
195
195
  /** Do rm -rf on dir. */
@@ -295,7 +295,7 @@ class URLAborted extends Error {}
295
295
  async function dirSize(dir: string): Promise<number> {
296
296
  const files = await fsp.readdir(dir, { withFileTypes: true });
297
297
  const sizes = await Promise.all(
298
- files.map(async (file) => {
298
+ files.map(async (file: any) => {
299
299
  const fPath = path.join(dir, file.name);
300
300
 
301
301
  if (file.isDirectory()) return await dirSize(fPath);
@@ -305,7 +305,7 @@ async function dirSize(dir: string): Promise<number> {
305
305
  }),
306
306
  );
307
307
 
308
- return sizes.reduce((sum, size) => sum + size, 0);
308
+ return sizes.reduce((sum: any, size: any) => sum + size, 0);
309
309
  }
310
310
 
311
311
  /** Do rm -rf on dir. */
@@ -2,7 +2,7 @@ import type { Watcher } from '@milaboratories/computable';
2
2
  import { ChangeSource } from '@milaboratories/computable';
3
3
  import { stringifyWithResourceId } from '@milaboratories/pl-client';
4
4
  import type * as sdk from '@milaboratories/pl-model-common';
5
- import type { MiLogger, Signer } from '@milaboratories/ts-helpers';
5
+ import type { AsyncPoolController, MiLogger, Signer } from '@milaboratories/ts-helpers';
6
6
  import { asyncPool, CallersCounter } from '@milaboratories/ts-helpers';
7
7
  import type { ClientProgress, ProgressStatus } from '../clients/progress';
8
8
  import type { ClientUpload } from '../clients/upload';
@@ -17,6 +17,9 @@ import assert from 'node:assert';
17
17
  export class UploadTask {
18
18
  private readonly change: ChangeSource = new ChangeSource();
19
19
  private readonly counter: CallersCounter = new CallersCounter();
20
+ private nMaxUploads: number;
21
+ private nPartsWithThisUploadSpeed = 0;
22
+ private nPartsToIncreaseUpload = 10; // how many parts we have to wait to increase concurrency, 50 mb, 10 parts by 5 mb each.
20
23
 
21
24
  /** If this is upload progress this field will be defined */
22
25
  private uploadData?: ImportFileHandleUploadData;
@@ -33,10 +36,11 @@ export class UploadTask {
33
36
  private readonly logger: MiLogger,
34
37
  private readonly clientBlob: ClientUpload,
35
38
  private readonly clientProgress: ClientProgress,
36
- private readonly nConcurrentPartsUpload: number,
39
+ private readonly maxNConcurrentPartsUpload: number,
37
40
  signer: Signer,
38
41
  public readonly res: ImportResourceSnapshot,
39
42
  ) {
43
+ this.nMaxUploads = this.maxNConcurrentPartsUpload;
40
44
  const { uploadData, progress } = newProgress(res, signer);
41
45
  this.uploadData = uploadData;
42
46
  this.progress = progress;
@@ -60,30 +64,41 @@ export class UploadTask {
60
64
  /** Uploads a blob if it's not BlobIndex. */
61
65
  public async uploadBlobTask() {
62
66
  assert(isUpload(this.res), 'the upload operation can be done only for BlobUploads');
67
+ const timeout = 10000; // 10 sec instead of standard 5 sec, things might be slow with a slow connection.
63
68
 
64
69
  try {
65
70
  if (this.isComputableDone()) return;
66
- const parts = await this.clientBlob.initUpload(this.res);
71
+ const parts = await this.clientBlob.initUpload(this.res, { timeout });
67
72
  this.logger.info(
68
73
  `started to upload blob ${this.res.id},`
69
- + ` parts overall: ${parts.overall}, parts remained: ${parts.toUpload.length}`,
74
+ + ` parts overall: ${parts.overall}, parts remained: ${parts.toUpload.length},`
75
+ + ` number of concurrent uploads: ${this.nMaxUploads}`,
70
76
  );
71
77
 
72
- const partUploadFn = (part: bigint) => async () => {
78
+ const partUploadFn = (part: bigint) => async (controller: AsyncPoolController) => {
73
79
  if (this.isComputableDone()) return;
74
80
  await this.clientBlob.partUpload(
75
81
  this.res,
76
82
  this.uploadData!.localPath,
77
83
  BigInt(this.uploadData!.modificationTime),
78
84
  part,
85
+ { timeout }
79
86
  );
80
87
  this.logger.info(`uploaded chunk ${part}/${parts.overall} of resource: ${this.res.id}`);
88
+
89
+ // if we had a network freeze, it will be increased slowly.
90
+ this.nPartsWithThisUploadSpeed++;
91
+ if (this.nPartsWithThisUploadSpeed >= this.nPartsToIncreaseUpload) {
92
+ this.nPartsWithThisUploadSpeed = 0;
93
+ this.nMaxUploads = increaseConcurrency(this.logger, this.nMaxUploads, this.maxNConcurrentPartsUpload);
94
+ controller.setConcurrency(this.nMaxUploads);
95
+ }
81
96
  };
82
97
 
83
- await asyncPool(this.nConcurrentPartsUpload, parts.toUpload.map(partUploadFn));
98
+ await asyncPool(this.nMaxUploads, parts.toUpload.map(partUploadFn));
84
99
 
85
100
  if (this.isComputableDone()) return;
86
- await this.clientBlob.finalize(this.res);
101
+ await this.clientBlob.finalize(this.res, { timeout });
87
102
 
88
103
  this.logger.info(`uploading of resource ${this.res.id} finished.`);
89
104
  this.change.markChanged();
@@ -106,13 +121,19 @@ export class UploadTask {
106
121
  return;
107
122
  }
108
123
 
124
+ if (isHeadersTimeoutError(e)) {
125
+ // we probably have a slow internet, we need to slow things a bit.
126
+ this.nMaxUploads = decreaseConcurrency(this.logger, this.nMaxUploads, 1);
127
+ }
128
+
109
129
  throw e;
110
130
  }
111
131
  }
112
132
 
113
133
  public async updateStatus() {
114
134
  try {
115
- const status = await this.clientProgress.getStatus(this.res);
135
+ // we do it with timeout in case we have slow internet.
136
+ const status = await this.clientProgress.getStatus(this.res, { timeout: 10000 });
116
137
 
117
138
  const oldStatus = this.progress.status;
118
139
  const newStatus = doneProgressIfExisted(this.alreadyExisted, protoToStatus(status));
@@ -125,7 +146,7 @@ export class UploadTask {
125
146
  } catch (e: any) {
126
147
  this.setRetriableError(e);
127
148
 
128
- if (e.name == 'RpcError' && e.code == 'DEADLINE_EXCEEDED') {
149
+ if ((e.name == 'RpcError' && e.code == 'DEADLINE_EXCEEDED') || e?.message?.includes('DEADLINE_EXCEEDED')) {
129
150
  this.logger.warn(`deadline exceeded while getting a status of BlobImport`);
130
151
  return;
131
152
  }
@@ -139,9 +160,12 @@ export class UploadTask {
139
160
  return;
140
161
  }
141
162
 
142
- this.logger.error(`error while updating a status of BlobImport: ${e}`);
143
- this.change.markChanged();
144
- this.setTerminalError(e);
163
+ this.logger.error(`retryable error while updating a status of BlobImport: ${e}`);
164
+ // It was a terminal error, but when a connection drops,
165
+ // this will stop the whole task, so we make it retryable.
166
+ // It was like that:
167
+ // this.change.markChanged();
168
+ // this.setTerminalError(e);
145
169
  }
146
170
  }
147
171
 
@@ -280,3 +304,26 @@ export function isResourceWasDeletedError(e: any) {
280
304
  export function nonRecoverableError(e: any) {
281
305
  return e instanceof MTimeError || e instanceof UnexpectedEOF || e instanceof NoFileForUploading;
282
306
  }
307
+
308
+ function isHeadersTimeoutError(e: any) {
309
+ return (e as Error)?.message.includes(`UND_ERR_HEADERS_TIMEOUT`);
310
+ }
311
+
312
+ /** It's called for every upload success so if everyone is succeeded, we'll double the concurrency. */
313
+ function increaseConcurrency(logger: MiLogger, current: number, max: number): number {
314
+ const newConcurrency = Math.min(current + 2, max);
315
+ if (newConcurrency != current)
316
+ logger.info(`uploadTask.increaseConcurrency: increased from ${current} to ${newConcurrency}`)
317
+
318
+ return newConcurrency;
319
+ }
320
+
321
+ /** When a error happens, this will half the concurrency level, so the next time
322
+ * we'll try to upload blobs slower. */
323
+ function decreaseConcurrency(logger: MiLogger, current: number, min: number): number {
324
+ const newConcurrency = Math.max(Math.round(current / 2), min);
325
+ if (newConcurrency != current)
326
+ logger.info(`uploadTask.decreaseConcurrency: decreased from ${current} to ${newConcurrency}`)
327
+
328
+ return newConcurrency;
329
+ }