openlayer 0.1.21 → 0.1.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -189,11 +189,13 @@ export declare class OpenlayerClient {
189
189
  streamData: (data: StreamingData, config: StreamingDataConfig, inferencePipelineId: string) => Promise<void>;
190
190
  }
191
191
  export declare class OpenAIMonitor {
192
+ private inferencePipeline?;
192
193
  private openlayerClient;
193
194
  private openAIClient;
194
195
  private openlayerProjectName;
195
196
  private openlayerInferencePipelineName;
196
197
  private monitoringOn;
198
+ private project?;
197
199
  /**
198
200
  * Constructs an OpenAIMonitor instance.
199
201
  * @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
@@ -220,7 +222,7 @@ export declare class OpenAIMonitor {
220
222
  /**
221
223
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
222
224
  */
223
- startMonitoring(): void;
225
+ startMonitoring(): Promise<void>;
224
226
  /**
225
227
  * Stops monitoring for the OpenAI Monitor instance. If monitoring is not active, a warning is logged.
226
228
  */
package/dist/index.js CHANGED
@@ -95,7 +95,7 @@ class OpenlayerClient {
95
95
  timestampColumnName: 'timestamp',
96
96
  };
97
97
  this.openlayerServerUrl = 'https://api.openlayer.com/v1';
98
- this.version = '0.1.0a20';
98
+ this.version = '0.1.0a21';
99
99
  this.resolvedQuery = (endpoint, args = {}) => (0, request_1.resolvedQuery)(this.openlayerServerUrl, endpoint, args);
100
100
  /**
101
101
  * Creates a new inference pipeline in Openlayer or loads an existing one.
@@ -108,26 +108,25 @@ class OpenlayerClient {
108
108
  try {
109
109
  return yield this.loadInferencePipeline(projectId, name);
110
110
  }
111
- catch (_a) {
112
- const createInferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
113
- const createInferencePipelineQuery = this.resolvedQuery(createInferencePipelineEndpoint, { version: this.version });
114
- const createInferencePipelineResponse = yield fetch(createInferencePipelineQuery, {
115
- body: JSON.stringify({
116
- description: '',
117
- name,
118
- }),
119
- headers: {
120
- Authorization: `Bearer ${this.openlayerApiKey}`,
121
- 'Content-Type': 'application/json',
122
- },
123
- method: 'POST',
124
- });
125
- const inferencePipeline = yield createInferencePipelineResponse.json();
126
- if (!(inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id)) {
127
- throw new Error('Error creating inference pipeline');
128
- }
129
- return inferencePipeline;
111
+ catch (_a) { }
112
+ const createInferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
113
+ const createInferencePipelineQuery = this.resolvedQuery(createInferencePipelineEndpoint, { version: this.version });
114
+ const createInferencePipelineResponse = yield fetch(createInferencePipelineQuery, {
115
+ body: JSON.stringify({
116
+ description: '',
117
+ name,
118
+ }),
119
+ headers: {
120
+ Authorization: `Bearer ${this.openlayerApiKey}`,
121
+ 'Content-Type': 'application/json',
122
+ },
123
+ method: 'POST',
124
+ });
125
+ const inferencePipeline = yield createInferencePipelineResponse.json();
126
+ if (!(inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id)) {
127
+ throw new Error('Error creating inference pipeline');
130
128
  }
129
+ return inferencePipeline;
131
130
  });
132
131
  /**
133
132
  * Creates a new project in Openlayer or loads an existing one.
@@ -141,32 +140,31 @@ class OpenlayerClient {
141
140
  try {
142
141
  return yield this.loadProject(name);
143
142
  }
144
- catch (_b) {
145
- const projectsEndpoint = '/projects';
146
- const projectsQuery = this.resolvedQuery(projectsEndpoint);
147
- const response = yield fetch(projectsQuery, {
148
- body: JSON.stringify({
149
- description,
150
- name,
151
- taskType,
152
- }),
153
- headers: {
154
- Authorization: `Bearer ${this.openlayerApiKey}`,
155
- 'Content-Type': 'application/json',
156
- },
157
- method: 'POST',
158
- });
159
- const data = yield response.json();
160
- const { items: projects, error } = data;
161
- if (!Array.isArray(projects)) {
162
- throw new Error(typeof error === 'string' ? error : 'Invalid response from Openlayer');
163
- }
164
- const project = projects.find((p) => p.name === name);
165
- if (!(project === null || project === void 0 ? void 0 : project.id)) {
166
- throw new Error('Project not found');
167
- }
168
- return project;
143
+ catch (_b) { }
144
+ const projectsEndpoint = '/projects';
145
+ const projectsQuery = this.resolvedQuery(projectsEndpoint);
146
+ const response = yield fetch(projectsQuery, {
147
+ body: JSON.stringify({
148
+ description,
149
+ name,
150
+ taskType,
151
+ }),
152
+ headers: {
153
+ Authorization: `Bearer ${this.openlayerApiKey}`,
154
+ 'Content-Type': 'application/json',
155
+ },
156
+ method: 'POST',
157
+ });
158
+ const data = yield response.json();
159
+ const { items: projects, error } = data;
160
+ if (!Array.isArray(projects)) {
161
+ throw new Error(typeof error === 'string' ? error : 'Invalid response from Openlayer');
169
162
  }
163
+ const project = projects.find((p) => p.name === name);
164
+ if (!(project === null || project === void 0 ? void 0 : project.id)) {
165
+ throw new Error('Project not found');
166
+ }
167
+ return project;
170
168
  });
171
169
  /**
172
170
  * Loads an existing inference pipeline from Openlayer based on its name and project ID.
@@ -189,12 +187,12 @@ class OpenlayerClient {
189
187
  },
190
188
  method: 'GET',
191
189
  });
192
- const { items: inferencePipelines } = yield inferencePipelineResponse.json();
190
+ const { items: inferencePipelines, error } = yield inferencePipelineResponse.json();
193
191
  const inferencePipeline = Array.isArray(inferencePipelines)
194
192
  ? inferencePipelines.find((p) => p.name === name)
195
193
  : undefined;
196
194
  if (!(inferencePipeline === null || inferencePipeline === void 0 ? void 0 : inferencePipeline.id)) {
197
- throw new Error('Inference pipeline not found');
195
+ throw new Error(typeof error === 'string' ? error : 'Inference pipeline not found');
198
196
  }
199
197
  return inferencePipeline;
200
198
  });
@@ -315,8 +313,9 @@ class OpenAIMonitor {
315
313
  if (!this.monitoringOn) {
316
314
  throw new Error('Monitoring is not active.');
317
315
  }
318
- const project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
319
- const inferencePipeline = yield this.openlayerClient.createInferencePipeline(project.id, this.openlayerInferencePipelineName);
316
+ if (typeof this.inferencePipeline === 'undefined') {
317
+ throw new Error('No inference pipeline found.');
318
+ }
320
319
  // Start a timer to measure latency
321
320
  const startTime = Date.now();
322
321
  // Accumulate output for streamed responses
@@ -325,7 +324,7 @@ class OpenAIMonitor {
325
324
  const prompt = this.formatChatCompletionInput(body.messages);
326
325
  const inputVariableNames = prompt
327
326
  .filter(({ role }) => role === 'user')
328
- .map(({ content }) => content.replace(/{{\s*|\s*}}/g, ''));
327
+ .map(({ content }) => String(content).replace(/{{\s*|\s*}}/g, ''));
329
328
  const inputVariables = body.messages
330
329
  .filter(({ role }) => role === 'user')
331
330
  .map(({ content }) => content);
@@ -353,7 +352,7 @@ class OpenAIMonitor {
353
352
  }
354
353
  const endTime = Date.now();
355
354
  const latency = endTime - startTime;
356
- this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, inferencePipeline.id);
355
+ this.openlayerClient.streamData(Object.assign(Object.assign({ latency, output: streamedOutput, timestamp: startTime }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
357
356
  }
358
357
  else {
359
358
  const nonStreamedResponse = response;
@@ -370,7 +369,7 @@ class OpenAIMonitor {
370
369
  }
371
370
  this.openlayerClient.streamData(Object.assign(Object.assign({ cost,
372
371
  latency,
373
- output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, inferencePipeline.id);
372
+ output, timestamp: startTime, tokens }, inputVariablesMap), additionalLogs), config, this.inferencePipeline.id);
374
373
  }
375
374
  return response;
376
375
  });
@@ -387,11 +386,12 @@ class OpenAIMonitor {
387
386
  if (!this.monitoringOn) {
388
387
  throw new Error('Monitoring is not active.');
389
388
  }
389
+ if (typeof this.inferencePipeline === 'undefined') {
390
+ throw new Error('No inference pipeline found.');
391
+ }
390
392
  if (!body.prompt) {
391
393
  throw new Error('No prompt provided.');
392
394
  }
393
- const project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
394
- const inferencePipeline = yield this.openlayerClient.createInferencePipeline(project.id, this.openlayerInferencePipelineName);
395
395
  // Start a timer to measure latency
396
396
  const startTime = Date.now();
397
397
  // Accumulate output and tokens data for streamed responses
@@ -427,7 +427,7 @@ class OpenAIMonitor {
427
427
  const endTime = Date.now();
428
428
  const latency = endTime - startTime;
429
429
  const cost = this.cost(streamedModel, streamedInputTokens, streamedOutputTokens);
430
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, inferencePipeline.id);
430
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: streamedOutput, timestamp: startTime, tokens: streamedTokens }, additionalLogs), config, this.inferencePipeline.id);
431
431
  }
432
432
  else {
433
433
  const nonStreamedResponse = response;
@@ -438,7 +438,7 @@ class OpenAIMonitor {
438
438
  const inputTokens = (_z = (_y = nonStreamedResponse.usage) === null || _y === void 0 ? void 0 : _y.prompt_tokens) !== null && _z !== void 0 ? _z : 0;
439
439
  const outputTokens = (_1 = (_0 = nonStreamedResponse.usage) === null || _0 === void 0 ? void 0 : _0.completion_tokens) !== null && _1 !== void 0 ? _1 : 0;
440
440
  const cost = this.cost(nonStreamedResponse.model, inputTokens, outputTokens);
441
- this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, inferencePipeline.id);
441
+ this.openlayerClient.streamData(Object.assign({ cost, input: body.prompt, latency, output: nonStreamedResponse.choices[0].text, timestamp: startTime, tokens }, additionalLogs), config, this.inferencePipeline.id);
442
442
  }
443
443
  return response;
444
444
  });
@@ -459,23 +459,33 @@ class OpenAIMonitor {
459
459
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
460
460
  */
461
461
  startMonitoring() {
462
- if (this.monitoringOn) {
463
- console.warn('Monitoring is already on!');
464
- return;
465
- }
466
- this.monitoringOn = true;
467
- console.info('Monitoring started.');
462
+ return __awaiter(this, void 0, void 0, function* () {
463
+ if (this.monitoringOn) {
464
+ console.warn('Monitor is already on.');
465
+ return;
466
+ }
467
+ console.info('Starting monitor: creating or loading an Openlayer project and inference pipeline...');
468
+ this.monitoringOn = true;
469
+ this.project = yield this.openlayerClient.createProject(this.openlayerProjectName, 'llm-base');
470
+ if (typeof this.project !== 'undefined') {
471
+ this.inferencePipeline =
472
+ yield this.openlayerClient.createInferencePipeline(this.project.id, this.openlayerInferencePipelineName);
473
+ }
474
+ console.info('Monitor started');
475
+ });
468
476
  }
469
477
  /**
470
478
  * Stops monitoring for the OpenAI Monitor instance. If monitoring is not active, a warning is logged.
471
479
  */
472
480
  stopMonitoring() {
473
481
  if (!this.monitoringOn) {
474
- console.warn('Monitoring is not active.');
482
+ console.warn('Monitor is not active.');
475
483
  return;
476
484
  }
477
485
  this.monitoringOn = false;
478
- console.info('Monitoring stopped.');
486
+ this.project = undefined;
487
+ this.inferencePipeline = undefined;
488
+ console.info('Monitor stopped.');
479
489
  }
480
490
  }
481
491
  exports.OpenAIMonitor = OpenAIMonitor;
@@ -24,28 +24,27 @@ const inputs = [
24
24
  'What would be a good name for a company that makes colorful socks?',
25
25
  ];
26
26
 
27
- await Promise.all(
28
- inputs.map(async (input) => {
29
- // Call the LLM
30
- const output = await chatModel.predict(input);
27
+ for (let i = 0; i < inputs.length; i++) {
28
+ const input = inputs[i];
29
+ // Call the LLM
30
+ const output = await chatModel.predict(input);
31
31
 
32
- // Stream the results to Openlayer
33
- await openlayer.streamData(
34
- {
35
- input,
36
- output,
37
- },
38
- {
39
- ...openlayer.defaultConfig,
40
- inputVariableNames: ['input'],
41
- prompt: [
42
- {
43
- content: '{{ input }}',
44
- role: 'user',
45
- },
46
- ],
47
- },
48
- inferencePipeline.id
49
- );
50
- })
51
- );
32
+ // Stream the results to Openlayer
33
+ await openlayer.streamData(
34
+ {
35
+ input,
36
+ output,
37
+ },
38
+ {
39
+ ...openlayer.defaultConfig,
40
+ inputVariableNames: ['input'],
41
+ prompt: [
42
+ {
43
+ content: '{{ input }}',
44
+ role: 'user',
45
+ },
46
+ ],
47
+ },
48
+ inferencePipeline.id
49
+ );
50
+ }
@@ -5,12 +5,14 @@
5
5
  import { OpenAIMonitor } from 'openlayer';
6
6
 
7
7
  const monitor = new OpenAIMonitor({
8
- openAiApiKey: 'YOUR_OPENAI_API_KEY',
9
- openlayerApiKey: 'YOUR_OPENLAYER_API_KEY',
10
- openlayerProjectName: 'YOUR_PROJECT_NAME',
8
+ openAiApiKey: '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',
9
+ openlayerApiKey: 'UyKge0qbzrnAg_vehsTtw_e_mArrHHyT',
10
+ openlayerInferencePipelineName: 'production',
11
+ openlayerProjectName: 'test13',
12
+ openlayerServerUrl: 'http://localhost:8080/v1',
11
13
  });
12
14
 
13
- monitor.startMonitoring();
15
+ await monitor.startMonitoring();
14
16
 
15
17
  const inputs = [
16
18
  {
@@ -27,9 +29,10 @@ const inputs = [
27
29
  },
28
30
  ];
29
31
 
30
- inputs.map(async ({ promptVersion, systemMessage, userMessage }) => {
32
+ for (let i = 0; i < inputs.length; i++) {
33
+ const { promptVersion, systemMessage, userMessage } = inputs[i];
31
34
  // Stream the results to Openlayer
32
- const chatCompletion = await monitor.createChatCompletion(
35
+ await monitor.createChatCompletion(
33
36
  {
34
37
  messages: [
35
38
  {
@@ -49,5 +52,4 @@ inputs.map(async ({ promptVersion, systemMessage, userMessage }) => {
49
52
  promptVersion,
50
53
  }
51
54
  );
52
- console.log(chatCompletion);
53
- });
55
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.1.21",
3
+ "version": "0.1.23",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",