@aj-archipelago/cortex 1.1.3 → 1.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/.eslintignore +3 -3
  2. package/README.md +17 -4
  3. package/config.js +45 -9
  4. package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/Dockerfile +1 -1
  5. package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/fileChunker.js +4 -1
  6. package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/package-lock.json +25 -216
  7. package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/package.json +2 -2
  8. package/helper-apps/cortex-whisper-wrapper/.dockerignore +27 -0
  9. package/helper-apps/cortex-whisper-wrapper/Dockerfile +32 -0
  10. package/helper-apps/cortex-whisper-wrapper/app.py +104 -0
  11. package/helper-apps/cortex-whisper-wrapper/docker-compose.debug.yml +12 -0
  12. package/helper-apps/cortex-whisper-wrapper/docker-compose.yml +10 -0
  13. package/helper-apps/cortex-whisper-wrapper/models/.gitkeep +0 -0
  14. package/helper-apps/cortex-whisper-wrapper/requirements.txt +5 -0
  15. package/lib/cortexRequest.js +117 -0
  16. package/lib/pathwayTools.js +2 -1
  17. package/lib/redisSubscription.js +2 -2
  18. package/lib/requestExecutor.js +360 -0
  19. package/lib/requestMonitor.js +131 -28
  20. package/package.json +2 -1
  21. package/pathways/summary.js +3 -3
  22. package/server/graphql.js +6 -6
  23. package/server/{pathwayPrompter.js → modelExecutor.js} +24 -21
  24. package/server/pathwayResolver.js +22 -17
  25. package/server/plugins/azureCognitivePlugin.js +25 -20
  26. package/server/plugins/azureTranslatePlugin.js +6 -10
  27. package/server/plugins/cohereGeneratePlugin.js +5 -12
  28. package/server/plugins/cohereSummarizePlugin.js +5 -12
  29. package/server/plugins/localModelPlugin.js +3 -3
  30. package/server/plugins/modelPlugin.js +18 -12
  31. package/server/plugins/openAiChatExtensionPlugin.js +5 -5
  32. package/server/plugins/openAiChatPlugin.js +8 -10
  33. package/server/plugins/openAiCompletionPlugin.js +9 -12
  34. package/server/plugins/openAiDallE3Plugin.js +14 -31
  35. package/server/plugins/openAiEmbeddingsPlugin.js +6 -9
  36. package/server/plugins/openAiImagePlugin.js +19 -15
  37. package/server/plugins/openAiWhisperPlugin.js +168 -100
  38. package/server/plugins/palmChatPlugin.js +9 -10
  39. package/server/plugins/palmCodeCompletionPlugin.js +2 -2
  40. package/server/plugins/palmCompletionPlugin.js +11 -12
  41. package/server/resolver.js +2 -2
  42. package/server/rest.js +1 -1
  43. package/tests/config.test.js +1 -1
  44. package/tests/mocks.js +5 -0
  45. package/tests/modelPlugin.test.js +3 -10
  46. package/tests/openAiChatPlugin.test.js +9 -8
  47. package/tests/openai_api.test.js +3 -3
  48. package/tests/palmChatPlugin.test.js +1 -1
  49. package/tests/palmCompletionPlugin.test.js +1 -1
  50. package/tests/pathwayResolver.test.js +2 -1
  51. package/tests/requestMonitor.test.js +94 -0
  52. package/tests/{requestDurationEstimator.test.js → requestMonitorDurationEstimator.test.js} +21 -17
  53. package/tests/truncateMessages.test.js +1 -1
  54. package/lib/request.js +0 -259
  55. package/lib/requestDurationEstimator.js +0 -90
  56. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/blobHandler.js +0 -0
  57. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/docHelper.js +0 -0
  58. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/function.json +0 -0
  59. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/helper.js +0 -0
  60. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/index.js +0 -0
  61. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/localFileHandler.js +0 -0
  62. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/redis.js +0 -0
  63. /package/{helper_apps/CortexFileHandler → helper-apps/cortex-file-handler}/start.js +0 -0
@@ -1,7 +1,7 @@
1
1
  import test from 'ava';
2
2
  import { PathwayResolver } from '../server/pathwayResolver.js';
3
3
  import sinon from 'sinon';
4
- import { mockConfig, mockPathwayString } from './mocks.js';
4
+ import { mockConfig, mockPathwayString, mockModelEndpoints } from './mocks.js';
5
5
 
6
6
  const mockPathway = mockPathwayString;
7
7
  mockPathway.useInputChunking = false;
@@ -16,6 +16,7 @@ test.beforeEach((t) => {
16
16
  config: mockConfig,
17
17
  pathway: mockPathway,
18
18
  args: mockArgs,
19
+ endpoints: mockModelEndpoints,
19
20
  });
20
21
  });
21
22
 
@@ -0,0 +1,94 @@
1
+ import test from 'ava';
2
+ import RequestMonitor from '../lib/requestMonitor.js'; // replace with actual path
3
+
4
+ test('RequestMonitor: startCall', t => {
5
+ const rm = new RequestMonitor();
6
+
7
+ const callId = rm.startCall();
8
+
9
+ t.is(rm.callStartTimes.has(callId), true);
10
+ });
11
+
12
+ test('RequestMonitor: endCall', t => {
13
+ const rm = new RequestMonitor();
14
+
15
+ const callId = rm.startCall();
16
+ rm.endCall(callId);
17
+
18
+ t.is(rm.callStartTimes.has(callId), false);
19
+ t.is(rm.callCount.size(), 1);
20
+ });
21
+
22
+ test('RequestMonitor: getAverageCallDuration', async t => {
23
+ const rm = new RequestMonitor();
24
+
25
+ const callId1 = rm.startCall();
26
+ await new Promise(resolve => setTimeout(resolve, 1000));
27
+ rm.endCall(callId1);
28
+
29
+ const callId2 = rm.startCall();
30
+ await new Promise(resolve => setTimeout(resolve, 2000));
31
+ rm.endCall(callId2);
32
+
33
+ const average = rm.getAverageCallDuration();
34
+ t.truthy(average > 1400 && average < 1600);
35
+ });
36
+
37
+ test('RequestMonitor: incrementError429Count', t => {
38
+ const rm = new RequestMonitor();
39
+
40
+ rm.incrementError429Count();
41
+
42
+ t.is(rm.error429Count.size(), 1);
43
+ });
44
+
45
+ test('RequestMonitor: getCallRate', async t => {
46
+ const rm = new RequestMonitor();
47
+
48
+ rm.startCall();
49
+ rm.endCall();
50
+
51
+ await new Promise(resolve => setTimeout(resolve, 1000));
52
+
53
+ const callRate = rm.getCallRate();
54
+ t.truthy(callRate > 0.9 && callRate < 1.1);
55
+ });
56
+
57
+ test('RequestMonitor: getPeakCallRate', async t => {
58
+ const rm = new RequestMonitor();
59
+
60
+ rm.startCall();
61
+ rm.endCall();
62
+
63
+ await new Promise(resolve => setTimeout(resolve, 1000));
64
+
65
+ rm.startCall();
66
+ rm.endCall();
67
+
68
+ const peakCallRate = rm.getPeakCallRate();
69
+ t.truthy(peakCallRate > 1.9 && peakCallRate < 2.1);
70
+ });
71
+
72
+ test('RequestMonitor: getError429Rate', t => {
73
+ const rm = new RequestMonitor();
74
+
75
+ rm.startCall();
76
+ rm.endCall();
77
+ rm.incrementError429Count();
78
+
79
+ t.is(rm.getError429Rate(), 1);
80
+ });
81
+
82
+ test('RequestMonitor: reset', t => {
83
+ const rm = new RequestMonitor();
84
+
85
+ rm.startCall();
86
+ rm.endCall();
87
+ rm.incrementError429Count();
88
+
89
+ rm.reset();
90
+
91
+ t.is(rm.callCount.size(), 0);
92
+ t.is(rm.error429Count.size(), 0);
93
+ t.is(rm.peakCallRate, 0);
94
+ });
@@ -1,14 +1,14 @@
1
1
  import test from 'ava';
2
- import RequestDurationEstimator from '../lib/requestDurationEstimator.js';
2
+ import RequestMonitor from '../lib/requestMonitor.js';
3
3
 
4
4
  test('add and get average request duration', async (t) => {
5
- const estimator = new RequestDurationEstimator(5);
5
+ const estimator = new RequestMonitor(5);
6
6
 
7
- estimator.startRequest('req1');
7
+ const callid = estimator.startCall();
8
8
  await new Promise(resolve => setTimeout(() => {
9
- estimator.endRequest();
9
+ estimator.endCall(callid);
10
10
 
11
- const average = estimator.calculatePercentComplete();
11
+ const average = estimator.calculatePercentComplete(callid);
12
12
 
13
13
  // An average should be calculated after the first completed request
14
14
  t.not(average, 0);
@@ -17,31 +17,31 @@ test('add and get average request duration', async (t) => {
17
17
  });
18
18
 
19
19
  test('add more requests than size of durations array', (t) => {
20
- const estimator = new RequestDurationEstimator(5);
20
+ const estimator = new RequestMonitor(5);
21
21
 
22
22
  for (let i = 0; i < 10; i++) {
23
- estimator.startRequest(`req${i}`);
24
- estimator.endRequest();
23
+ const callid = estimator.startCall();
24
+ estimator.endCall(callid);
25
25
  }
26
26
 
27
27
  // Array size should not exceed maximum length (5 in this case)
28
- t.is(estimator.durations.length, 5);
28
+ t.is(estimator.callDurations.size(), 5);
29
29
  });
30
30
 
31
31
  test('calculate percent complete of current request based on average of past durations', async (t) => {
32
- const estimator = new RequestDurationEstimator(5);
32
+ const estimator = new RequestMonitor(5);
33
33
 
34
34
  for (let i = 0; i < 4; i++) {
35
- estimator.startRequest(`req${i}`);
35
+ const callid = estimator.startCall();
36
36
  // wait 1 second
37
37
  await new Promise(resolve => setTimeout(resolve, 1000));
38
- estimator.endRequest();
38
+ estimator.endCall(callid);
39
39
  }
40
40
 
41
- estimator.startRequest('req5');
41
+ const callid = estimator.startCall();
42
42
 
43
43
  await new Promise(resolve => setTimeout(() => {
44
- const percentComplete = estimator.calculatePercentComplete();
44
+ const percentComplete = estimator.calculatePercentComplete(callid);
45
45
 
46
46
  // Depending on how fast the operations are,
47
47
  // the percentage may not be exactly 50%, but
@@ -52,8 +52,12 @@ test('calculate percent complete of current request based on average of past dur
52
52
  });
53
53
 
54
54
  test('calculate percent complete based on average of past durations', async (t) => {
55
- const estimator = new RequestDurationEstimator(5);
56
- estimator.durations = [1000, 2000, 3000];
57
- const average = estimator.getAverage();
55
+ const estimator = new RequestMonitor(5);
56
+ estimator.callDurations.clear;
57
+ estimator.callDurations.pushBack({endTime: new Date(), callDuration: 1000});
58
+ estimator.callDurations.pushBack({endTime: new Date(), callDuration: 2000});
59
+ estimator.callDurations.pushBack({endTime: new Date(), callDuration: 3000});
60
+
61
+ const average = estimator.getAverageCallDuration();
58
62
  t.is(average, 2000);
59
63
  });
@@ -6,7 +6,7 @@ import { mockPathwayResolverString } from './mocks.js';
6
6
 
7
7
  const { config, pathway, modelName, model } = mockPathwayResolverString;
8
8
 
9
- const modelPlugin = new ModelPlugin(config, pathway, modelName, model);
9
+ const modelPlugin = new ModelPlugin(pathway, model);
10
10
 
11
11
  const generateMessage = (role, content) => ({ role, content });
12
12
 
package/lib/request.js DELETED
@@ -1,259 +0,0 @@
1
- import Bottleneck from 'bottleneck/es5.js';
2
- import RequestMonitor from './requestMonitor.js';
3
- import { config } from '../config.js';
4
- import axios from 'axios';
5
- import { setupCache } from 'axios-cache-interceptor';
6
- import Redis from 'ioredis';
7
- import logger from './logger.js';
8
-
9
- const connectionString = config.get('storageConnectionString');
10
-
11
- if (!connectionString) {
12
- logger.info('No STORAGE_CONNECTION_STRING found in environment. Redis features (caching, pubsub, clustered limiters) disabled.')
13
- } else {
14
- logger.info('Using Redis connection specified in STORAGE_CONNECTION_STRING.');
15
- }
16
-
17
- let client;
18
-
19
- if (connectionString) {
20
- try {
21
- client = new Redis(connectionString);
22
- } catch (error) {
23
- logger.error(`Redis connection error: ${error}`);
24
- }
25
- }
26
-
27
- const cortexId = config.get('cortexId');
28
- const connection = client && new Bottleneck.IORedisConnection({ client: client });
29
-
30
- const limiters = {};
31
- const monitors = {};
32
-
33
- const buildLimiters = (config) => {
34
- logger.info(`Building ${connection ? 'Redis clustered' : 'local'} model rate limiters for ${cortexId}...`);
35
- for (const [name, model] of Object.entries(config.get('models'))) {
36
- const rps = model.requestsPerSecond ?? 100;
37
- let limiterOptions = {
38
- minTime: 1000 / rps,
39
- maxConcurrent: rps,
40
- reservoir: rps, // Number of tokens available initially
41
- reservoirRefreshAmount: rps, // Number of tokens added per interval
42
- reservoirRefreshInterval: 1000, // Interval in milliseconds
43
- };
44
-
45
- // If Redis connection exists, add id and connection to enable clustering
46
- if (connection) {
47
- limiterOptions.id = `${cortexId}-${name}-limiter`; // Unique id for each limiter
48
- limiterOptions.connection = connection; // Shared Redis connection
49
- }
50
-
51
- limiters[name] = new Bottleneck(limiterOptions);
52
- limiters[name].on('error', (err) => {
53
- logger.error(`Limiter error for ${cortexId}-${name}: ${err}`);
54
- });
55
- monitors[name] = new RequestMonitor();
56
- }
57
- }
58
-
59
- let cortexAxios = axios;
60
-
61
- if (config.get('enableCache')) {
62
- // Setup cache
63
- cortexAxios = setupCache(axios, {
64
- // enable cache for all requests by default
65
- methods: ['get', 'post', 'put', 'delete', 'patch'],
66
- interpretHeader: false,
67
- ttl: 1000 * 60 * 60 * 24 * 7, // 7 days
68
- });
69
- }
70
-
71
- setInterval(() => {
72
- const monitorKeys = Object.keys(monitors);
73
-
74
- // Skip logging if the monitors object does not exist or is empty
75
- if (!monitorKeys || monitorKeys.length === 0) {
76
- return;
77
- }
78
-
79
- monitorKeys.forEach((monitorName) => {
80
- const monitor = monitors[monitorName];
81
- const callRate = monitor.getPeakCallRate();
82
- const error429Rate = monitor.getError429Rate();
83
- if (callRate > 0) {
84
- logger.info('------------------------');
85
- logger.info(`${monitorName} Call rate: ${callRate} calls/sec, 429 errors: ${error429Rate * 100}%`);
86
- logger.info('------------------------');
87
- // Reset the rate monitor to start a new monitoring interval.
88
- monitor.reset();
89
- }
90
- });
91
- }, 10000); // Log rates every 10 seconds (10000 ms).
92
-
93
- const postWithMonitor = async (model, url, data, axiosConfigObj) => {
94
- const monitor = monitors[model];
95
- monitor.incrementCallCount();
96
- return cortexAxios.post(url, data, axiosConfigObj);
97
- }
98
-
99
- const MAX_RETRY = 10; // retries for error handling
100
- const MAX_DUPLICATE_REQUESTS = 3; // duplicate requests to manage latency spikes
101
- const DUPLICATE_REQUEST_AFTER = 10; // 10 seconds
102
-
103
- const postRequest = async ({ url, data, params, headers, cache }, model, requestId, pathway) => {
104
- let promises = [];
105
- for (let i = 0; i < MAX_RETRY; i++) {
106
- const modelProperties = config.get('models')[model];
107
- const enableDuplicateRequests = pathway?.enableDuplicateRequests !== undefined ? pathway.enableDuplicateRequests : config.get('enableDuplicateRequests');
108
- let maxDuplicateRequests = enableDuplicateRequests ? MAX_DUPLICATE_REQUESTS : 1;
109
- let duplicateRequestAfter = (pathway?.duplicateRequestAfter || DUPLICATE_REQUEST_AFTER) * 1000;
110
-
111
- if (enableDuplicateRequests) {
112
- //logger.info(`>>> [${requestId}] Duplicate requests enabled after ${duplicateRequestAfter / 1000} seconds`);
113
- }
114
-
115
- const axiosConfigObj = { params, headers, cache };
116
- const streamRequested = (params?.stream || data?.stream);
117
- if (streamRequested && modelProperties.supportsStreaming) {
118
- axiosConfigObj.responseType = 'stream';
119
- promises.push(limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj)));
120
- } else {
121
- if (streamRequested) {
122
- logger.info(`>>> [${requestId}] ${model} does not support streaming - sending non-streaming request`);
123
- axiosConfigObj.params.stream = false;
124
- data.stream = false;
125
- }
126
- const controllers = Array.from({ length: maxDuplicateRequests }, () => new AbortController());
127
- promises = controllers.map((controller, index) =>
128
- new Promise((resolve, reject) => {
129
- const duplicateRequestTime = duplicateRequestAfter * Math.pow(2, index) - duplicateRequestAfter;
130
- const jitter = duplicateRequestTime * 0.2 * Math.random();
131
- const duplicateRequestTimeout = Math.max(0, duplicateRequestTime + jitter);
132
- setTimeout(async () => {
133
- try {
134
- if (!limiters[model]) {
135
- throw new Error(`No limiter for model ${model}!`);
136
- }
137
- const axiosConfigObj = { params, headers, cache };
138
-
139
- let response = null;
140
-
141
- if (!controller.signal?.aborted) {
142
-
143
- axiosConfigObj.signal = controller.signal;
144
- axiosConfigObj.headers['X-Cortex-Request-Index'] = index;
145
-
146
- if (index === 0) {
147
- //logger.info(`>>> [${requestId}] sending request to ${model} API ${axiosConfigObj.responseType === 'stream' ? 'with streaming' : ''}`);
148
- } else {
149
- if (modelProperties.supportsStreaming) {
150
- axiosConfigObj.responseType = 'stream';
151
- axiosConfigObj.cache = false;
152
- }
153
- const logMessage = `>>> [${requestId}] taking too long - sending duplicate request ${index} to ${model} API ${axiosConfigObj.responseType === 'stream' ? 'with streaming' : ''}`;
154
- const header = '>'.repeat(logMessage.length);
155
- logger.info(`\n${header}\n${logMessage}`);
156
- }
157
-
158
- response = await limiters[model].schedule(() => postWithMonitor(model, url, data, axiosConfigObj));
159
-
160
- if (!controller.signal?.aborted) {
161
-
162
- //logger.info(`<<< [${requestId}] received response for request ${index}`);
163
-
164
- if (axiosConfigObj.responseType === 'stream') {
165
- // Buffering and collecting the stream data
166
- logger.info(`<<< [${requestId}] buffering streaming response for request ${index}`);
167
- response = await new Promise((resolve, reject) => {
168
- let responseData = '';
169
- response.data.on('data', (chunk) => {
170
- responseData += chunk;
171
- //logger.info(`<<< [${requestId}] received chunk for request ${index}`);
172
- });
173
- response.data.on('end', () => {
174
- response.data = JSON.parse(responseData);
175
- resolve(response);
176
- });
177
- response.data.on('error', (error) => {
178
- reject(error);
179
- });
180
- });
181
- }
182
- }
183
- }
184
-
185
- resolve(response);
186
-
187
- } catch (error) {
188
- if (error.name === 'AbortError' || error.name === 'CanceledError') {
189
- //logger.info(`XXX [${requestId}] request ${index} was cancelled`);
190
- reject(error);
191
- } else {
192
- logger.info(`!!! [${requestId}] request ${index} failed with error: ${error?.response?.data?.error?.message || error}`);
193
- reject(error);
194
- }
195
- } finally {
196
- controllers.forEach(controller => controller.abort());
197
- }
198
- }, duplicateRequestTimeout);
199
- })
200
- );
201
- }
202
-
203
- try {
204
- const response = await Promise.race(promises);
205
-
206
- // if response status is 2xx
207
- if (response.status >= 200 && response.status < 300) {
208
- return response;
209
- } else {
210
- throw new Error(`Received error response: ${response.status}`);
211
- }
212
- } catch (error) {
213
- //logger.error(`!!! [${requestId}] failed request with data ${JSON.stringify(data)}: ${error}`);
214
- if (error.response) {
215
- const status = error.response.status;
216
- if ((status === 429) || (status >= 500 && status < 600)) {
217
- if (status === 429) {
218
- monitors[model].incrementError429Count();
219
- }
220
- logger.info(`>>> [${requestId}] retrying request due to ${status} response. Retry count: ${i + 1}`);
221
- if (i < MAX_RETRY - 1) {
222
- const backoffTime = 200 * Math.pow(2, i);
223
- const jitter = backoffTime * 0.2 * Math.random();
224
- await new Promise(r => setTimeout(r, backoffTime + jitter));
225
- } else {
226
- throw error;
227
- }
228
- } else {
229
- throw error;
230
- }
231
- } else {
232
- throw error;
233
- }
234
- }
235
- }
236
- };
237
-
238
- const request = async (params, model, requestId, pathway) => {
239
- try {
240
- const response = await postRequest(params, model, requestId, pathway);
241
- const { error, data, cached } = response;
242
- if (cached) {
243
- logger.info(`<<< [${requestId}] served with cached response.`);
244
- }
245
- if (error && error.length > 0) {
246
- const lastError = error[error.length - 1];
247
- return { error: lastError.toJSON() ?? lastError ?? error };
248
- }
249
- //logger.info(`<<< [${requestId}] response: ${data.choices[0].delta || data.choices[0]}`)
250
- return data;
251
- } catch (error) {
252
- logger.error(`Error in request: ${error.message || error}`);
253
- return { error: error };
254
- }
255
- }
256
-
257
- export {
258
- axios, request, postRequest, buildLimiters
259
- };
@@ -1,90 +0,0 @@
1
- /**
2
- * A class to get request durations and estimate their average.
3
- */
4
- export default class RequestDurationEstimator {
5
- // Initializing the class with given number of durations to track.
6
- constructor(n = 10) {
7
- this.n = n; // Number of last durations to consider
8
- this.durations = []; // List to keep track of last n durations
9
- }
10
-
11
- /**
12
- * Private method to add a request duration to the durations list.
13
- * If the list is full (n durations already), the oldest duration is removed.
14
- * @param {number} duration - The duration of the request
15
- */
16
- #add(duration) {
17
- this.durations.push(duration);
18
- // Remove the oldest duration if we have stored n durations
19
- if (this.durations.length > this.n) {
20
- this.durations.shift();
21
- }
22
- }
23
-
24
- /**
25
- * To be invoked when a request starts.
26
- * If there is an ongoing request, it ends that request.
27
- * @param {string} requestId - The ID of the request
28
- */
29
- startRequest(requestId) {
30
- // If there is an ongoing request, end it
31
- if (this.requestId) {
32
- this.endRequest();
33
- }
34
-
35
- // Store the starting details of the new request
36
- this.requestId = requestId;
37
- this.startTime = Date.now();
38
- }
39
-
40
- /**
41
- * To be invoked when a request ends.
42
- * Calculates the duration of the request and adds it to the durations list.
43
- */
44
- endRequest() {
45
- // If there is an ongoing request, add its duration to the durations list
46
- if (this.requestId) {
47
- this.#add(Date.now() - this.startTime);
48
- this.requestId = null;
49
- }
50
- }
51
-
52
- /**
53
- * Calculate and return the average of the request durations.
54
- * @return {number} The average request duration
55
- */
56
- getAverage() {
57
- // If no duration is stored, return 0
58
- if (!this.durations.length) {
59
- return 0;
60
- }
61
-
62
- // Calculate the sum of the durations and divide by the number of durations to get the average
63
- return this.durations.reduce((a, b) => a + b) / this.durations.length;
64
- }
65
-
66
- /**
67
- * Calculate the percentage completion of the current request based on the average of past durations.
68
- * @return {number} The estimated percent completion of the ongoing request
69
- */
70
- calculatePercentComplete() {
71
- // If no duration is stored, return 0
72
- if (!this.durations.length) {
73
- return 0;
74
- }
75
-
76
-
77
- // Calculate the duration of the current request
78
- const duration = Date.now() - this.startTime;
79
- // Get the average of the durations
80
- const average = this.getAverage();
81
- // Calculate the percentage completion
82
- let percentComplete = duration / average;
83
-
84
- if (percentComplete > .8) {
85
- percentComplete = 0.8;
86
- }
87
-
88
- return percentComplete;
89
- }
90
- }