openlayer 0.1.29 → 0.1.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -72,14 +72,13 @@ interface StreamingDataConfig {
72
72
  }
73
73
  type OpenlayerClientConstructorProps = {
74
74
  openlayerApiKey?: string;
75
- openlayerInferencePipelineName?: string;
76
- openlayerProjectName?: string;
77
75
  openlayerServerUrl?: string;
78
76
  };
79
77
  type OpenAIMonitorConstructorProps = OpenlayerClientConstructorProps & {
80
78
  openAiApiKey: string;
79
+ openlayerInferencePipelineId?: string;
81
80
  openlayerInferencePipelineName?: string;
82
- openlayerProjectName: string;
81
+ openlayerProjectName?: string;
83
82
  };
84
83
  type OpenlayerInferencePipeline = {
85
84
  dataVolumeGraphs?: OpenlayerSampleVolumeGraph;
@@ -188,18 +187,16 @@ export declare class OpenlayerClient {
188
187
  streamData: (data: StreamingData, config: StreamingDataConfig, inferencePipelineId: string) => Promise<void>;
189
188
  }
190
189
  export declare class OpenAIMonitor {
191
- private inferencePipeline?;
192
190
  private openlayerClient;
193
191
  private openAIClient;
194
- private openlayerProjectName;
192
+ private openlayerProjectName?;
193
+ private openlayerInferencePipelineId?;
195
194
  private openlayerInferencePipelineName;
196
- private monitoringOn;
197
- private project?;
198
195
  /**
199
196
  * Constructs an OpenAIMonitor instance.
200
197
  * @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
201
198
  */
202
- constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineName, openlayerServerUrl, }: OpenAIMonitorConstructorProps);
199
+ constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineId, openlayerInferencePipelineName, openlayerServerUrl, }: OpenAIMonitorConstructorProps);
203
200
  private cost;
204
201
  private chatCompletionPrompt;
205
202
  private threadPrompt;
@@ -230,14 +227,10 @@ export declare class OpenAIMonitor {
230
227
  * @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
231
228
  * @returns {Promise<void>} A promise that resolves when the run data has been successfully published to Openlayer.
232
229
  */
233
- logThreadRun(run: Run, additionalLogs?: StreamingData): Promise<void>;
230
+ monitorThreadRun(run: Run, additionalLogs?: StreamingData): Promise<void>;
234
231
  /**
235
232
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
236
233
  */
237
- startMonitoring(): Promise<void>;
238
- /**
239
- * Stops monitoring for the OpenAI Monitor instance. If monitoring is not active, a warning is logged.
240
- */
241
- stopMonitoring(): void;
234
+ initialize(): Promise<void>;
242
235
  }
243
236
  export {};
package/dist/index.js CHANGED
@@ -295,9 +295,8 @@ class OpenAIMonitor {
295
295
  * Constructs an OpenAIMonitor instance.
296
296
  * @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
297
297
  */
298
- constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineName, openlayerServerUrl, }) {
298
+ constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineId, openlayerInferencePipelineName, openlayerServerUrl, }) {
299
299
  this.openlayerInferencePipelineName = 'production';
300
- this.monitoringOn = false;
301
300
  this.cost = (model, inputTokens, outputTokens) => {
302
301
  const pricing = OpenAIPricing[model];
303
302
  const inputCost = typeof pricing === 'undefined'
@@ -368,10 +367,7 @@ class OpenAIMonitor {
368
367
  this.createChatCompletion = (body, options, additionalLogs) => __awaiter(this, void 0, void 0, function* () {
369
368
  var _g, e_2, _h, _j;
370
369
  var _k, _l, _m, _o, _p, _q, _r;
371
- if (!this.monitoringOn) {
372
- console.warn('Monitoring is not active.');
373
- }
374
- else if (typeof this.inferencePipeline === 'undefined') {
370
+ if (typeof this.openlayerInferencePipelineId === 'undefined') {
375
371
  console.error('No inference pipeline found.');
376
372
  }
377
373
  // Start a timer to measure latency
@@ -380,7 +376,7 @@ class OpenAIMonitor {
380
376
  let streamedOutput = '';
381
377
  const response = yield this.openAIClient.chat.completions.create(body, options);
382
378
  try {
383
- if (this.monitoringOn && typeof this.inferencePipeline !== 'undefined') {
379
+ if (typeof this.openlayerInferencePipelineId !== 'undefined') {
384
380
  const prompt = this.chatCompletionPrompt(body.messages);
385
381
  const { inputVariableNames, inputVariablesMap } = this.inputVariables(prompt, body.messages);
386
382
  const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames,
@@ -406,7 +402,7 @@ class OpenAIMonitor {
406
402
  }
407
403
  const endTime = Date.now();
408
404
  const latency = endTime - startTime;
409
- this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
405
+ this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.openlayerInferencePipelineId);
410
406
  }
411
407
  else {
412
408
  const nonStreamedResponse = response;
@@ -420,7 +416,7 @@ class OpenAIMonitor {
420
416
  const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
421
417
  if (typeof output === 'string') {
422
418
  this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
423
- latency, model: nonStreamedResponse.model, output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
419
+ latency, model: nonStreamedResponse.model, output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.openlayerInferencePipelineId);
424
420
  }
425
421
  else {
426
422
  console.error('No output received from OpenAI.');
@@ -447,10 +443,7 @@ class OpenAIMonitor {
447
443
  if (!body.prompt) {
448
444
  console.error('No prompt provided.');
449
445
  }
450
- if (!this.monitoringOn) {
451
- console.warn('Monitoring is not active.');
452
- }
453
- else if (typeof this.inferencePipeline === 'undefined') {
446
+ if (typeof this.openlayerInferencePipelineId === 'undefined') {
454
447
  console.error('No inference pipeline found.');
455
448
  }
456
449
  // Start a timer to measure latency
@@ -463,7 +456,7 @@ class OpenAIMonitor {
463
456
  let streamedOutputTokens = 0;
464
457
  const response = yield this.openAIClient.completions.create(body, options);
465
458
  try {
466
- if (this.monitoringOn && typeof this.inferencePipeline !== 'undefined') {
459
+ if (typeof this.openlayerInferencePipelineId !== 'undefined') {
467
460
  const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames: ['input'] });
468
461
  if (body.stream) {
469
462
  const streamedResponse = response;
@@ -490,7 +483,7 @@ class OpenAIMonitor {
490
483
  const endTime = Date.now();
491
484
  const latency = endTime - startTime;
492
485
  const cost = this.cost(streamedModel, streamedInputTokens, streamedOutputTokens);
493
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.inferencePipeline.id);
486
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.openlayerInferencePipelineId);
494
487
  }
495
488
  else {
496
489
  const nonStreamedResponse = response;
@@ -501,7 +494,7 @@ class OpenAIMonitor {
501
494
  const inputTokens = (_5 = (_4 = nonStreamedResponse.usage) === null || _4 === void 0 ? void 0 : _4.prompt_tokens) !== null && _5 !== void 0 ? _5 : 0;
502
495
  const outputTokens = (_7 = (_6 = nonStreamedResponse.usage) === null || _6 === void 0 ? void 0 : _6.completion_tokens) !== null && _7 !== void 0 ? _7 : 0;
503
496
  const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
504
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.inferencePipeline.id);
497
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.openlayerInferencePipelineId);
505
498
  }
506
499
  }
507
500
  }
@@ -511,6 +504,7 @@ class OpenAIMonitor {
511
504
  return response;
512
505
  });
513
506
  this.openlayerProjectName = openlayerProjectName;
507
+ this.openlayerInferencePipelineId = openlayerInferencePipelineId;
514
508
  if (openlayerInferencePipelineName) {
515
509
  this.openlayerInferencePipelineName = openlayerInferencePipelineName;
516
510
  }
@@ -531,14 +525,14 @@ class OpenAIMonitor {
531
525
  * @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
532
526
  * @returns {Promise<void>} A promise that resolves when the run data has been successfully published to Openlayer.
533
527
  */
534
- logThreadRun(run, additionalLogs) {
528
+ monitorThreadRun(run, additionalLogs) {
535
529
  var _a;
536
530
  return __awaiter(this, void 0, void 0, function* () {
537
- if (typeof this.inferencePipeline === 'undefined') {
538
- console.error('No inference pipeline found.');
531
+ if (run.status !== 'completed') {
539
532
  return;
540
533
  }
541
- if (run.status !== 'completed') {
534
+ if (typeof this.openlayerInferencePipelineId === 'undefined') {
535
+ console.error('No inference pipeline found.');
542
536
  return;
543
537
  }
544
538
  try {
@@ -569,9 +563,8 @@ class OpenAIMonitor {
569
563
  : typeof output === 'undefined' || output === null
570
564
  ? ''
571
565
  : `${output}`;
572
- this.openlayerClient.streamData(Object.assign(Object.assign({ assistant_id,
573
- cost,
574
- latency, output: resolvedOutput, thread_id, timestamp: run.created_at, tokens: total_tokens }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
566
+ this.openlayerClient.streamData(Object.assign(Object.assign({ 'OpenAI Assistant ID': assistant_id, 'OpenAI Thread ID': thread_id, cost,
567
+ latency, output: resolvedOutput, timestamp: run.created_at, tokens: total_tokens }, inputVariablesMap), additionalLogs), config, this.openlayerInferencePipelineId);
575
568
  }
576
569
  catch (error) {
577
570
  console.error('Error logging thread run:', error);
@@ -581,40 +574,34 @@ class OpenAIMonitor {
581
574
  /**
582
575
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
583
576
  */
584
- startMonitoring() {
577
+ initialize() {
585
578
  return __awaiter(this, void 0, void 0, function* () {
586
- if (this.monitoringOn) {
587
- console.warn('Monitor is already on.');
579
+ console.info('Initializing monitor: creating or loading an Openlayer project and inference pipeline...');
580
+ if (typeof this.openlayerInferencePipelineId !== 'undefined') {
581
+ console.info('Monitor initialized: using inference pipeline ID provided.');
588
582
  return;
589
583
  }
590
- console.info('Starting monitor: creating or loading an Openlayer project and inference pipeline...');
591
584
  try {
592
- this.monitoringOn = true;
593
- this.project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
594
- if (typeof this.project !== 'undefined') {
595
- this.inferencePipeline =
596
- yield this.openlayerClient.createInferencePipeline(this.project.id, this.openlayerInferencePipelineName);
585
+ if (typeof this.openlayerProjectName === 'undefined') {
586
+ console.error('No project name provided.');
587
+ return;
588
+ }
589
+ const project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
590
+ if (typeof project !== 'undefined') {
591
+ const inferencePipeline = yield this.openlayerClient.createInferencePipeline(project.id, this.openlayerInferencePipelineName);
592
+ if (typeof (inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id) === 'undefined') {
593
+ console.error('Unable to locate inference pipeline.');
594
+ }
595
+ else {
596
+ this.openlayerInferencePipelineId = inferencePipeline.id;
597
+ }
597
598
  }
598
599
  console.info('Monitor started');
599
600
  }
600
601
  catch (error) {
601
602
  console.error('An error occurred while starting the monitor:', error);
602
- this.stopMonitoring();
603
603
  }
604
604
  });
605
605
  }
606
- /**
607
- * Stops monitoring for the OpenAI Monitor instance. If monitoring is not active, a warning is logged.
608
- */
609
- stopMonitoring() {
610
- if (!this.monitoringOn) {
611
- console.warn('Monitor is not active.');
612
- return;
613
- }
614
- this.monitoringOn = false;
615
- this.project = undefined;
616
- this.inferencePipeline = undefined;
617
- console.info('Monitor stopped.');
618
- }
619
606
  }
620
607
  exports.OpenAIMonitor = OpenAIMonitor;
@@ -0,0 +1,57 @@
1
+ /*
2
+ * This example shows how to use Openlayer to monitor runs from OpenAI assistants.
3
+ */
4
+
5
+ import OpenAI from 'openai';
6
+ import { OpenAIMonitor } from 'openlayer';
7
+
8
+ const openai = new OpenAI({
9
+ apiKey: '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',
10
+ });
11
+
12
+ // Create monitor with your credentials
13
+ const monitor = new OpenAIMonitor({
14
+ openAiApiKey: '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',
15
+ openlayerApiKey: 'UyKge0qbzrnAg_vehsTtw_e_mArrHHyT',
16
+ // EITHER specify an existing inference pipeline ID
17
+ openlayerInferencePipelineId: 'ef604fd7-2237-419f-90a0-4a456020ecbb',
18
+ // OR the project and inference pipeline names to create or load one
19
+ // openlayerInferencePipelineName: 'production',
20
+ // openlayerProjectName: 'Python QA5',
21
+ openlayerServerUrl: 'http://localhost:8080/v1',
22
+ });
23
+
24
+ await monitor.initialize();
25
+
26
+ // Create the assistant
27
+ const assistant = await openai.beta.assistants.create({
28
+ description:
29
+ 'You are great at creating and explaining beautiful data visualizations.',
30
+ model: 'gpt-4',
31
+ name: 'Data visualizer',
32
+ tools: [{ type: 'code_interpreter' }],
33
+ });
34
+
35
+ // Create a thread
36
+ const thread = await openai.beta.threads.create({
37
+ messages: [
38
+ {
39
+ content: 'Create a data visualization of the american GDP.',
40
+ role: 'user',
41
+ },
42
+ ],
43
+ });
44
+
45
+ // Run the assistant on the thread
46
+ const run = await openai.beta.threads.runs.create(thread.id, {
47
+ assistant_id: assistant.id,
48
+ });
49
+
50
+ // Keep polling the run results
51
+ let runStatus = await openai.beta.threads.runs.retrieve(thread.id, run.id);
52
+ while (runStatus.status !== 'completed') {
53
+ runStatus = await openai.beta.threads.runs.retrieve(thread.id, run.id);
54
+
55
+ // Monitor the run. If complete, it will be sent to Openlayer
56
+ await monitor.monitorThreadRun(runStatus);
57
+ }
@@ -7,11 +7,14 @@ import { OpenAIMonitor } from 'openlayer';
7
7
  const monitor = new OpenAIMonitor({
8
8
  openAiApiKey: 'YOUR_OPENAI_API_KEY',
9
9
  openlayerApiKey: 'YOUR_OPENLAYER_API_KEY',
10
+ // EITHER specify an existing inference pipeline ID
11
+ openlayerInferencePipelineId: 'YOUR_OPENLAYER_INFERENCE_PIPELINE_ID',
12
+ // OR the project and inference pipeline names to create or load one
10
13
  openlayerInferencePipelineName: 'production',
11
14
  openlayerProjectName: 'YOUR_OPENLAYER_PROJECT_NAME',
12
15
  });
13
16
 
14
- await monitor.startMonitoring();
17
+ await monitor.initialize();
15
18
 
16
19
  const inputs = [
17
20
  {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.1.29",
3
+ "version": "0.1.31",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -20,7 +20,7 @@
20
20
  "eslint-plugin-typescript-sort-keys": "^3.1.0",
21
21
  "node-fetch": "^3.3.2",
22
22
  "openai": "^4.19.0",
23
- "openlayer": "^0.1.27",
23
+ "openlayer": "^0.1.30",
24
24
  "uuid": "^9.0.1"
25
25
  },
26
26
  "devDependencies": {