openlayer 0.1.26 → 0.1.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -143,7 +143,6 @@ export declare class OpenlayerClient {
143
143
  /**
144
144
  * Constructs an OpenlayerClient instance.
145
145
  * @param {OpenlayerClientConstructorProps} props - The config for the Openlayer client. The API key is required.
146
- * @throws {Error} Throws an error if the Openlayer API key is not provided.
147
146
  */
148
147
  constructor({ openlayerApiKey, openlayerServerUrl, }: OpenlayerClientConstructorProps);
149
148
  private resolvedQuery;
@@ -184,7 +183,6 @@ export declare class OpenlayerClient {
184
183
  * @param {StreamingData} data - The chat completion data to be streamed.
185
184
  * @param {string} inferencePipelineId - The ID of the Openlayer inference pipeline to which data is streamed.
186
185
  * @returns {Promise<void>} A promise that resolves when the data has been successfully streamed.
187
- * @throws {Error} Throws an error if the Openlayer API key is not set or an error occurs in the streaming process.
188
186
  */
189
187
  streamData: (data: StreamingData, config: StreamingDataConfig, inferencePipelineId: string) => Promise<void>;
190
188
  }
@@ -208,7 +206,7 @@ export declare class OpenAIMonitor {
208
206
  * @param {ChatCompletionCreateParams} body - The parameters for creating a chat completion.
209
207
  * @param {RequestOptions} [options] - Optional request options.
210
208
  * @returns {Promise<ChatCompletion | Stream<ChatCompletionChunk>>} Promise of a ChatCompletion or a Stream
211
- * @throws {Error} Throws an error if monitoring is not active or if no output is received from OpenAI.
209
+ * @throws {Error} Throws errors from the OpenAI client.
212
210
  */
213
211
  createChatCompletion: (body: ChatCompletionCreateParams, options?: RequestOptions, additionalLogs?: StreamingData) => Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
214
212
  /**
@@ -216,7 +214,7 @@ export declare class OpenAIMonitor {
216
214
  * @param {CompletionCreateParams} body - The parameters for creating a completion.
217
215
  * @param {RequestOptions} [options] - Optional request options.
218
216
  * @returns {Promise<Completion | Stream<Completion>>} Promise that resolves to a Completion or a Stream.
219
- * @throws {Error} Throws an error if monitoring is not active or if no prompt is provided.
217
+ * @throws {Error} Throws errors from the OpenAI client.
220
218
  */
221
219
  createCompletion: (body: CompletionCreateParams, options?: RequestOptions, additionalLogs?: StreamingData) => Promise<Completion | Stream<Completion>>;
222
220
  /**
package/dist/index.js CHANGED
@@ -98,7 +98,6 @@ class OpenlayerClient {
98
98
  /**
99
99
  * Constructs an OpenlayerClient instance.
100
100
  * @param {OpenlayerClientConstructorProps} props - The config for the Openlayer client. The API key is required.
101
- * @throws {Error} Throws an error if the Openlayer API key is not provided.
102
101
  */
103
102
  constructor({ openlayerApiKey, openlayerServerUrl, }) {
104
103
  this.defaultConfig = {
@@ -248,12 +247,12 @@ class OpenlayerClient {
248
247
  * @param {StreamingData} data - The chat completion data to be streamed.
249
248
  * @param {string} inferencePipelineId - The ID of the Openlayer inference pipeline to which data is streamed.
250
249
  * @returns {Promise<void>} A promise that resolves when the data has been successfully streamed.
251
- * @throws {Error} Throws an error if the Openlayer API key is not set or an error occurs in the streaming process.
252
250
  */
253
251
  this.streamData = (data, config, inferencePipelineId) => __awaiter(this, void 0, void 0, function* () {
254
252
  var _c;
255
253
  if (!this.openlayerApiKey) {
256
- throw new Error('Openlayer API key are required for streaming data.');
254
+ console.error('Openlayer API key are required for streaming data.');
255
+ return;
257
256
  }
258
257
  try {
259
258
  const dataStreamEndpoint = `/inference-pipelines/${inferencePipelineId}/data-stream`;
@@ -273,13 +272,12 @@ class OpenlayerClient {
273
272
  });
274
273
  if (!response.ok) {
275
274
  console.error('Error making POST request:', response.status);
276
- throw new Error(`Error: ${response.status}`);
275
+ console.error(`Error: ${response.status}`);
277
276
  }
278
- return yield response.json();
277
+ yield response.json();
279
278
  }
280
279
  catch (error) {
281
280
  console.error('Error streaming data to Openlayer:', error);
282
- throw error;
283
281
  }
284
282
  });
285
283
  this.openlayerApiKey = openlayerApiKey;
@@ -287,7 +285,7 @@ class OpenlayerClient {
287
285
  this.openlayerServerUrl = openlayerServerUrl;
288
286
  }
289
287
  if (!this.openlayerApiKey) {
290
- throw new Error('Openlayer API key are required for publishing.');
288
+ console.error('Openlayer API key are required for publishing.');
291
289
  }
292
290
  }
293
291
  }
@@ -321,70 +319,79 @@ class OpenAIMonitor {
321
319
  * @param {ChatCompletionCreateParams} body - The parameters for creating a chat completion.
322
320
  * @param {RequestOptions} [options] - Optional request options.
323
321
  * @returns {Promise<ChatCompletion | Stream<ChatCompletionChunk>>} Promise of a ChatCompletion or a Stream
324
- * @throws {Error} Throws an error if monitoring is not active or if no output is received from OpenAI.
322
+ * @throws {Error} Throws errors from the OpenAI client.
325
323
  */
326
324
  this.createChatCompletion = (body, options, additionalLogs) => __awaiter(this, void 0, void 0, function* () {
327
325
  var _a, e_1, _b, _c;
328
326
  var _d, _e, _f, _g, _h, _j, _k;
329
327
  if (!this.monitoringOn) {
330
- throw new Error('Monitoring is not active.');
328
+ console.warn('Monitoring is not active.');
331
329
  }
332
- if (typeof this.inferencePipeline === 'undefined') {
333
- throw new Error('No inference pipeline found.');
330
+ else if (typeof this.inferencePipeline === 'undefined') {
331
+ console.error('No inference pipeline found.');
334
332
  }
335
333
  // Start a timer to measure latency
336
334
  const startTime = Date.now();
337
335
  // Accumulate output for streamed responses
338
336
  let streamedOutput = '';
339
337
  const response = yield this.openAIClient.chat.completions.create(body, options);
340
- const prompt = this.formatChatCompletionInput(body.messages);
341
- const inputVariableNames = prompt
342
- .filter(({ role }) => role === 'user')
343
- .map(({ content }) => String(content).replace(/{{\s*|\s*}}/g, ''));
344
- const inputVariables = body.messages
345
- .filter(({ role }) => role === 'user')
346
- .map(({ content }) => content);
347
- const inputVariablesMap = inputVariableNames.reduce((acc, name, i) => (Object.assign(Object.assign({}, acc), { [name]: inputVariables[i] })), {});
348
- const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames,
349
- prompt });
350
- if (body.stream) {
351
- const streamedResponse = response;
352
- try {
353
- for (var _l = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _a = streamedResponse_1_1.done, !_a; _l = true) {
354
- _c = streamedResponse_1_1.value;
355
- _l = false;
356
- const chunk = _c;
357
- // Process each chunk - for example, accumulate input data
358
- const chunkOutput = (_d = chunk.choices[0].delta.content) !== null && _d !== void 0 ? _d : '';
359
- streamedOutput += chunkOutput;
338
+ try {
339
+ if (this.monitoringOn && typeof this.inferencePipeline !== 'undefined') {
340
+ const prompt = this.formatChatCompletionInput(body.messages);
341
+ const inputVariableNames = prompt
342
+ .filter(({ role }) => role === 'user')
343
+ .map(({ content }) => String(content).replace(/{{\s*|\s*}}/g, ''));
344
+ const inputVariables = body.messages
345
+ .filter(({ role }) => role === 'user')
346
+ .map(({ content }) => content);
347
+ const inputVariablesMap = inputVariableNames.reduce((acc, name, i) => (Object.assign(Object.assign({}, acc), { [name]: inputVariables[i] })), {});
348
+ const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames,
349
+ prompt });
350
+ if (body.stream) {
351
+ const streamedResponse = response;
352
+ try {
353
+ for (var _l = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _a = streamedResponse_1_1.done, !_a; _l = true) {
354
+ _c = streamedResponse_1_1.value;
355
+ _l = false;
356
+ const chunk = _c;
357
+ // Process each chunk - for example, accumulate input data
358
+ const chunkOutput = (_d = chunk.choices[0].delta.content) !== null && _d !== void 0 ? _d : '';
359
+ streamedOutput += chunkOutput;
360
+ }
361
+ }
362
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
363
+ finally {
364
+ try {
365
+ if (!_l && !_a && (_b = streamedResponse_1.return)) yield _b.call(streamedResponse_1);
366
+ }
367
+ finally { if (e_1) throw e_1.error; }
368
+ }
369
+ const endTime = Date.now();
370
+ const latency = endTime - startTime;
371
+ this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
360
372
  }
361
- }
362
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
363
- finally {
364
- try {
365
- if (!_l && !_a && (_b = streamedResponse_1.return)) yield _b.call(streamedResponse_1);
373
+ else {
374
+ const nonStreamedResponse = response;
375
+ // Handle regular (non-streamed) response
376
+ const endTime = Date.now();
377
+ const latency = endTime - startTime;
378
+ const output = nonStreamedResponse.choices[0].message.content;
379
+ const tokens = (_f = (_e = nonStreamedResponse.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) !== null && _f !== void 0 ? _f : 0;
380
+ const inputTokens = (_h = (_g = nonStreamedResponse.usage) === null || _g === void 0 ? void 0 : _g.prompt_tokens) !== null && _h !== void 0 ? _h : 0;
381
+ const outputTokens = (_k = (_j = nonStreamedResponse.usage) === null || _j === void 0 ? void 0 : _j.completion_tokens) !== null && _k !== void 0 ? _k : 0;
382
+ const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
383
+ if (typeof output === 'string') {
384
+ this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
385
+ latency, model: nonStreamedResponse.model, output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
386
+ }
387
+ else {
388
+ console.error('No output received from OpenAI.');
389
+ }
366
390
  }
367
- finally { if (e_1) throw e_1.error; }
368
391
  }
369
- const endTime = Date.now();
370
- const latency = endTime - startTime;
371
- this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
372
392
  }
373
- else {
374
- const nonStreamedResponse = response;
375
- // Handle regular (non-streamed) response
376
- const endTime = Date.now();
377
- const latency = endTime - startTime;
378
- const output = nonStreamedResponse.choices[0].message.content;
379
- const tokens = (_f = (_e = nonStreamedResponse.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) !== null && _f !== void 0 ? _f : 0;
380
- const inputTokens = (_h = (_g = nonStreamedResponse.usage) === null || _g === void 0 ? void 0 : _g.prompt_tokens) !== null && _h !== void 0 ? _h : 0;
381
- const outputTokens = (_k = (_j = nonStreamedResponse.usage) === null || _j === void 0 ? void 0 : _j.completion_tokens) !== null && _k !== void 0 ? _k : 0;
382
- const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
383
- if (typeof output !== 'string') {
384
- throw new Error('No output received from OpenAI.');
385
- }
386
- this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
387
- latency, model: nonStreamedResponse.model, output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
393
+ catch (error) {
394
+ console.error(error);
388
395
  }
389
396
  return response;
390
397
  });
@@ -393,19 +400,19 @@ class OpenAIMonitor {
393
400
  * @param {CompletionCreateParams} body - The parameters for creating a completion.
394
401
  * @param {RequestOptions} [options] - Optional request options.
395
402
  * @returns {Promise<Completion | Stream<Completion>>} Promise that resolves to a Completion or a Stream.
396
- * @throws {Error} Throws an error if monitoring is not active or if no prompt is provided.
403
+ * @throws {Error} Throws errors from the OpenAI client.
397
404
  */
398
405
  this.createCompletion = (body, options, additionalLogs) => __awaiter(this, void 0, void 0, function* () {
399
406
  var _m, e_2, _o, _p;
400
407
  var _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _0, _1;
401
- if (!this.monitoringOn) {
402
- throw new Error('Monitoring is not active.');
408
+ if (!body.prompt) {
409
+ console.error('No prompt provided.');
403
410
  }
404
- if (typeof this.inferencePipeline === 'undefined') {
405
- throw new Error('No inference pipeline found.');
411
+ if (!this.monitoringOn) {
412
+ console.warn('Monitoring is not active.');
406
413
  }
407
- if (!body.prompt) {
408
- throw new Error('No prompt provided.');
414
+ else if (typeof this.inferencePipeline === 'undefined') {
415
+ console.error('No inference pipeline found.');
409
416
  }
410
417
  // Start a timer to measure latency
411
418
  const startTime = Date.now();
@@ -416,44 +423,51 @@ class OpenAIMonitor {
416
423
  let streamedInputTokens = 0;
417
424
  let streamedOutputTokens = 0;
418
425
  const response = yield this.openAIClient.completions.create(body, options);
419
- const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames: ['input'] });
420
- if (body.stream) {
421
- const streamedResponse = response;
422
- try {
423
- for (var _2 = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _m = streamedResponse_2_1.done, !_m; _2 = true) {
424
- _p = streamedResponse_2_1.value;
425
- _2 = false;
426
- const chunk = _p;
427
- // Process each chunk - for example, accumulate input data
428
- streamedModel = chunk.model;
429
- streamedOutput += chunk.choices[0].text.trim();
430
- streamedTokens += (_r = (_q = chunk.usage) === null || _q === void 0 ? void 0 : _q.total_tokens) !== null && _r !== void 0 ? _r : 0;
431
- streamedInputTokens += (_t = (_s = chunk.usage) === null || _s === void 0 ? void 0 : _s.prompt_tokens) !== null && _t !== void 0 ? _t : 0;
432
- streamedOutputTokens += (_v = (_u = chunk.usage) === null || _u === void 0 ? void 0 : _u.completion_tokens) !== null && _v !== void 0 ? _v : 0;
426
+ try {
427
+ if (this.monitoringOn && typeof this.inferencePipeline !== 'undefined') {
428
+ const config = Object.assign(Object.assign({}, this.openlayerClient.defaultConfig), { inputVariableNames: ['input'] });
429
+ if (body.stream) {
430
+ const streamedResponse = response;
431
+ try {
432
+ for (var _2 = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _m = streamedResponse_2_1.done, !_m; _2 = true) {
433
+ _p = streamedResponse_2_1.value;
434
+ _2 = false;
435
+ const chunk = _p;
436
+ // Process each chunk - for example, accumulate input data
437
+ streamedModel = chunk.model;
438
+ streamedOutput += chunk.choices[0].text.trim();
439
+ streamedTokens += (_r = (_q = chunk.usage) === null || _q === void 0 ? void 0 : _q.total_tokens) !== null && _r !== void 0 ? _r : 0;
440
+ streamedInputTokens += (_t = (_s = chunk.usage) === null || _s === void 0 ? void 0 : _s.prompt_tokens) !== null && _t !== void 0 ? _t : 0;
441
+ streamedOutputTokens += (_v = (_u = chunk.usage) === null || _u === void 0 ? void 0 : _u.completion_tokens) !== null && _v !== void 0 ? _v : 0;
442
+ }
443
+ }
444
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
445
+ finally {
446
+ try {
447
+ if (!_2 && !_m && (_o = streamedResponse_2.return)) yield _o.call(streamedResponse_2);
448
+ }
449
+ finally { if (e_2) throw e_2.error; }
450
+ }
451
+ const endTime = Date.now();
452
+ const latency = endTime - startTime;
453
+ const cost = this.cost(streamedModel, streamedInputTokens, streamedOutputTokens);
454
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.inferencePipeline.id);
433
455
  }
434
- }
435
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
436
- finally {
437
- try {
438
- if (!_2 && !_m && (_o = streamedResponse_2.return)) yield _o.call(streamedResponse_2);
456
+ else {
457
+ const nonStreamedResponse = response;
458
+ // Handle regular (non-streamed) response
459
+ const endTime = Date.now();
460
+ const latency = endTime - startTime;
461
+ const tokens = (_x = (_w = nonStreamedResponse.usage) === null || _w === void 0 ? void 0 : _w.total_tokens) !== null && _x !== void 0 ? _x : 0;
462
+ const inputTokens = (_z = (_y = nonStreamedResponse.usage) === null || _y === void 0 ? void 0 : _y.prompt_tokens) !== null && _z !== void 0 ? _z : 0;
463
+ const outputTokens = (_1 = (_0 = nonStreamedResponse.usage) === null || _0 === void 0 ? void 0 : _0.completion_tokens) !== null && _1 !== void 0 ? _1 : 0;
464
+ const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
465
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.inferencePipeline.id);
439
466
  }
440
- finally { if (e_2) throw e_2.error; }
441
467
  }
442
- const endTime = Date.now();
443
- const latency = endTime - startTime;
444
- const cost = this.cost(streamedModel, streamedInputTokens, streamedOutputTokens);
445
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.inferencePipeline.id);
446
468
  }
447
- else {
448
- const nonStreamedResponse = response;
449
- // Handle regular (non-streamed) response
450
- const endTime = Date.now();
451
- const latency = endTime - startTime;
452
- const tokens = (_x = (_w = nonStreamedResponse.usage) === null || _w === void 0 ? void 0 : _w.total_tokens) !== null && _x !== void 0 ? _x : 0;
453
- const inputTokens = (_z = (_y = nonStreamedResponse.usage) === null || _y === void 0 ? void 0 : _y.prompt_tokens) !== null && _z !== void 0 ? _z : 0;
454
- const outputTokens = (_1 = (_0 = nonStreamedResponse.usage) === null || _0 === void 0 ? void 0 : _0.completion_tokens) !== null && _1 !== void 0 ? _1 : 0;
455
- const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
456
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.inferencePipeline.id);
469
+ catch (error) {
470
+ console.error(error);
457
471
  }
458
472
  return response;
459
473
  });
@@ -480,13 +494,19 @@ class OpenAIMonitor {
480
494
  return;
481
495
  }
482
496
  console.info('Starting monitor: creating or loading an Openlayer project and inference pipeline...');
483
- this.monitoringOn = true;
484
- this.project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
485
- if (typeof this.project !== 'undefined') {
486
- this.inferencePipeline =
487
- yield this.openlayerClient.createInferencePipeline(this.project.id, this.openlayerInferencePipelineName);
497
+ try {
498
+ this.monitoringOn = true;
499
+ this.project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
500
+ if (typeof this.project !== 'undefined') {
501
+ this.inferencePipeline =
502
+ yield this.openlayerClient.createInferencePipeline(this.project.id, this.openlayerInferencePipelineName);
503
+ }
504
+ console.info('Monitor started');
505
+ }
506
+ catch (error) {
507
+ console.error('An error occurred while starting the monitor:', error);
508
+ this.stopMonitoring();
488
509
  }
489
- console.info('Monitor started');
490
510
  });
491
511
  }
492
512
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.1.26",
3
+ "version": "0.1.28",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -20,6 +20,7 @@
20
20
  "eslint-plugin-typescript-sort-keys": "^3.1.0",
21
21
  "node-fetch": "^3.3.2",
22
22
  "openai": "^4.19.0",
23
+ "openlayer": "^0.1.27",
23
24
  "uuid": "^9.0.1"
24
25
  },
25
26
  "devDependencies": {