openlayer 0.1.22 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -189,11 +189,13 @@ export declare class OpenlayerClient {
189
189
  streamData: (data: StreamingData, config: StreamingDataConfig, inferencePipelineId: string) => Promise<void>;
190
190
  }
191
191
  export declare class OpenAIMonitor {
192
+ private inferencePipeline?;
192
193
  private openlayerClient;
193
194
  private openAIClient;
194
195
  private openlayerProjectName;
195
196
  private openlayerInferencePipelineName;
196
197
  private monitoringOn;
198
+ private project?;
197
199
  /**
198
200
  * Constructs an OpenAIMonitor instance.
199
201
  * @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
@@ -220,7 +222,7 @@ export declare class OpenAIMonitor {
220
222
  /**
221
223
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
222
224
  */
223
- startMonitoring(): void;
225
+ startMonitoring(): Promise<void>;
224
226
  /**
225
227
  * Stops monitoring for the OpenAI Monitor instance. If monitoring is not active, a warning is logged.
226
228
  */
package/dist/index.js CHANGED
@@ -313,8 +313,9 @@ class OpenAIMonitor {
313
313
  if (!this.monitoringOn) {
314
314
  throw new Error('Monitoring is not active.');
315
315
  }
316
- const project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
317
- const inferencePipeline = yield this.openlayerClient.createInferencePipeline(project.id, this.openlayerInferencePipelineName);
316
+ if (typeof this.inferencePipeline === 'undefined') {
317
+ throw new Error('No inference pipeline found.');
318
+ }
318
319
  // Start a timer to measure latency
319
320
  const startTime = Date.now();
320
321
  // Accumulate output for streamed responses
@@ -351,7 +352,7 @@ class OpenAIMonitor {
351
352
  }
352
353
  const endTime = Date.now();
353
354
  const latency = endTime - startTime;
354
- this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, inferencePipeline.id);
355
+ this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
355
356
  }
356
357
  else {
357
358
  const nonStreamedResponse = response;
@@ -368,7 +369,7 @@ class OpenAIMonitor {
368
369
  }
369
370
  this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
370
371
  latency,
371
- output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, inferencePipeline.id);
372
+ output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
372
373
  }
373
374
  return response;
374
375
  });
@@ -385,11 +386,12 @@ class OpenAIMonitor {
385
386
  if (!this.monitoringOn) {
386
387
  throw new Error('Monitoring is not active.');
387
388
  }
389
+ if (typeof this.inferencePipeline === 'undefined') {
390
+ throw new Error('No inference pipeline found.');
391
+ }
388
392
  if (!body.prompt) {
389
393
  throw new Error('No prompt provided.');
390
394
  }
391
- const project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
392
- const inferencePipeline = yield this.openlayerClient.createInferencePipeline(project.id, this.openlayerInferencePipelineName);
393
395
  // Start a timer to measure latency
394
396
  const startTime = Date.now();
395
397
  // Accumulate output and tokens data for streamed responses
@@ -425,7 +427,7 @@ class OpenAIMonitor {
425
427
  const endTime = Date.now();
426
428
  const latency = endTime - startTime;
427
429
  const cost = this.cost(streamedModel, streamedInputTokens, streamedOutputTokens);
428
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, inferencePipeline.id);
430
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.inferencePipeline.id);
429
431
  }
430
432
  else {
431
433
  const nonStreamedResponse = response;
@@ -436,7 +438,7 @@ class OpenAIMonitor {
436
438
  const inputTokens = (_z = (_y = nonStreamedResponse.usage) === null || _y === void 0 ? void 0 : _y.prompt_tokens) !== null && _z !== void 0 ? _z : 0;
437
439
  const outputTokens = (_1 = (_0 = nonStreamedResponse.usage) === null || _0 === void 0 ? void 0 : _0.completion_tokens) !== null && _1 !== void 0 ? _1 : 0;
438
440
  const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
439
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, inferencePipeline.id);
441
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.inferencePipeline.id);
440
442
  }
441
443
  return response;
442
444
  });
@@ -457,23 +459,33 @@ class OpenAIMonitor {
457
459
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
458
460
  */
459
461
  startMonitoring() {
460
- if (this.monitoringOn) {
461
- console.warn('Monitoring is already on!');
462
- return;
463
- }
464
- this.monitoringOn = true;
465
- console.info('Monitoring started.');
462
+ return __awaiter(this, void 0, void 0, function* () {
463
+ if (this.monitoringOn) {
464
+ console.warn('Monitor is already on.');
465
+ return;
466
+ }
467
+ console.info('Starting monitor: creating or loading an Openlayer project and inference pipeline...');
468
+ this.monitoringOn = true;
469
+ this.project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
470
+ if (typeof this.project !== 'undefined') {
471
+ this.inferencePipeline =
472
+ yield this.openlayerClient.createInferencePipeline(this.project.id, this.openlayerInferencePipelineName);
473
+ }
474
+ console.info('Monitor started');
475
+ });
466
476
  }
467
477
  /**
468
478
  * Stops monitoring for the OpenAI Monitor instance. If monitoring is not active, a warning is logged.
469
479
  */
470
480
  stopMonitoring() {
471
481
  if (!this.monitoringOn) {
472
- console.warn('Monitoring is not active.');
482
+ console.warn('Monitor is not active.');
473
483
  return;
474
484
  }
475
485
  this.monitoringOn = false;
476
- console.info('Monitoring stopped.');
486
+ this.project = undefined;
487
+ this.inferencePipeline = undefined;
488
+ console.info('Monitor stopped.');
477
489
  }
478
490
  }
479
491
  exports.OpenAIMonitor = OpenAIMonitor;
@@ -5,14 +5,14 @@
5
5
  import { OpenAIMonitor } from 'openlayer';
6
6
 
7
7
  const monitor = new OpenAIMonitor({
8
- openAiApiKey: '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',
8
+ openAiApiKey: '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',
9
9
  openlayerApiKey: 'UyKge0qbzrnAg_vehsTtw_e_mArrHHyT',
10
10
  openlayerInferencePipelineName: 'production',
11
- openlayerProjectName: 'test6',
11
+ openlayerProjectName: 'test13',
12
12
  openlayerServerUrl: 'http://localhost:8080/v1',
13
13
  });
14
14
 
15
- monitor.startMonitoring();
15
+ await monitor.startMonitoring();
16
16
 
17
17
  const inputs = [
18
18
  {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.1.22",
3
+ "version": "0.1.23",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",