openlayer 0.1.5 → 0.1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.d.ts +33 -27
  2. package/dist/index.js +168 -156
  3. package/package.json +1 -1
package/dist/index.d.ts CHANGED
@@ -61,13 +61,15 @@ export interface ChatCompletionConfig {
61
61
  */
62
62
  timestampColumnName: string | null;
63
63
  }
64
- type ConstructorProps = {
65
- openAiApiKey: string;
64
+ type OpenlayerClientConstructorProps = {
66
65
  openlayerApiKey?: string;
67
66
  openlayerInferencePipelineName?: string;
68
67
  openlayerProjectName?: string;
69
68
  openlayerServerUrl?: string;
70
69
  };
70
+ type OpenAIMonitorConstructorProps = OpenlayerClientConstructorProps & {
71
+ openAiApiKey: string;
72
+ };
71
73
  type OpenlayerInferencePipeline = {
72
74
  dataVolumeGraphs?: OpenlayerSampleVolumeGraph;
73
75
  dateCreated: string;
@@ -123,17 +125,13 @@ type OpenlayerSampleVolumeGraph = {
123
125
  weekly: OpenlayerSampleVolumeGraphBucket;
124
126
  };
125
127
  type OpenlayerTaskType = 'llm-base' | 'tabular-classification' | 'tabular-regression' | 'text-classification';
126
- declare class OpenAIMonitor {
127
- private OpenAIClient;
128
- private monitoringOn;
128
+ declare class OpenlayerClient {
129
129
  private openlayerApiKey?;
130
- private openlayerDefaultDataConfig;
131
130
  private openlayerProjectName?;
132
131
  private openlayerInferencePipelineName?;
133
132
  private openlayerServerUrl;
134
133
  private version;
135
- constructor({ openAiApiKey, openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }: ConstructorProps);
136
- private formatChatCompletionInput;
134
+ constructor({ openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }: OpenlayerClientConstructorProps);
137
135
  private resolvedQuery;
138
136
  private uploadToInferencePipeline;
139
137
  /**
@@ -145,24 +143,6 @@ declare class OpenAIMonitor {
145
143
  * @returns A promise that resolves when the data has been successfully uploaded.
146
144
  */
147
145
  uploadDataToOpenlayer: (data: ChatCompletionData, config: ChatCompletionConfig) => Promise<void>;
148
- /**
149
- * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
150
- * This function also measures latency and uploads data to Openlayer.
151
- * @param body The parameters for creating a chat completion.
152
- * @param options Optional request options.
153
- * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
154
- * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
155
- */
156
- createChatCompletion: (body: ChatCompletionCreateParams, options?: RequestOptions) => Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
157
- /**
158
- * Creates a new Completion instance. If monitoring is not active, an error is thrown.
159
- * This function also measures latency and uploads data to Openlayer.
160
- * @param body The parameters for creating a completion.
161
- * @param options Optional request options.
162
- * @throws Throws an error if monitoring is not active or if no prompt is provided.
163
- * @returns A promise that resolves to a Completion or a Stream of Completions.
164
- */
165
- createCompletion: (body: CompletionCreateParams, options?: RequestOptions) => Promise<Completion | Stream<Completion>>;
166
146
  /**
167
147
  * Creates a new inference pipeline in Openlayer, or loads an existing one if it already exists.
168
148
  * @param name The name of the inference pipeline.
@@ -189,6 +169,32 @@ declare class OpenAIMonitor {
189
169
  */
190
170
  loadInferencePipeline: (name: string, projectId: string) => Promise<OpenlayerInferencePipeline>;
191
171
  loadProject: (name: string) => Promise<OpenlayerProject>;
172
+ }
173
+ export declare class OpenAIMonitor {
174
+ private openlayerClient;
175
+ private openAIClient;
176
+ private openlayerDefaultDataConfig;
177
+ private monitoringOn;
178
+ constructor({ openAiApiKey, openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }: OpenAIMonitorConstructorProps);
179
+ private formatChatCompletionInput;
180
+ /**
181
+ * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
182
+ * This function also measures latency and uploads data to Openlayer.
183
+ * @param body The parameters for creating a chat completion.
184
+ * @param options Optional request options.
185
+ * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
186
+ * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
187
+ */
188
+ createChatCompletion: (body: ChatCompletionCreateParams, options?: RequestOptions) => Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
189
+ /**
190
+ * Creates a new Completion instance. If monitoring is not active, an error is thrown.
191
+ * This function also measures latency and uploads data to Openlayer.
192
+ * @param body The parameters for creating a completion.
193
+ * @param options Optional request options.
194
+ * @throws Throws an error if monitoring is not active or if no prompt is provided.
195
+ * @returns A promise that resolves to a Completion or a Stream of Completions.
196
+ */
197
+ createCompletion: (body: CompletionCreateParams, options?: RequestOptions) => Promise<Completion | Stream<Completion>>;
192
198
  /**
193
199
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
194
200
  */
@@ -198,4 +204,4 @@ declare class OpenAIMonitor {
198
204
  */
199
205
  stopMonitoring(): void;
200
206
  }
201
- export default OpenAIMonitor;
207
+ export default OpenlayerClient;
package/dist/index.js CHANGED
@@ -16,28 +16,14 @@ var __asyncValues = (this && this.__asyncValues) || function (o) {
16
16
  function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
17
  };
18
18
  Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.OpenAIMonitor = void 0;
19
20
  const openai_1 = require("openai");
20
21
  const uuid_1 = require("uuid");
21
22
  const request_1 = require("./utils/request");
22
- class OpenAIMonitor {
23
- constructor({ openAiApiKey, openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }) {
24
- this.monitoringOn = false;
25
- this.openlayerDefaultDataConfig = {
26
- groundTruthColumnName: null,
27
- inferenceIdColumnName: 'id',
28
- inputVariableNames: ['input'],
29
- latencyColumnName: 'latency',
30
- numOfTokenColumnName: 'tokens',
31
- outputColumnName: 'output',
32
- timestampColumnName: 'timestamp',
33
- };
23
+ class OpenlayerClient {
24
+ constructor({ openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }) {
34
25
  this.openlayerServerUrl = 'https://api.openlayer.com/v1';
35
26
  this.version = '0.1.0a16';
36
- this.formatChatCompletionInput = (messages) => messages
37
- .filter(({ role }) => role === 'user')
38
- .map(({ content }) => content)
39
- .join('\n')
40
- .trim();
41
27
  this.resolvedQuery = (endpoint, args = {}) => (0, request_1.resolvedQuery)(this.openlayerServerUrl, endpoint, args);
42
28
  this.uploadToInferencePipeline = (inferencePipelineId, data, config) => __awaiter(this, void 0, void 0, function* () {
43
29
  var _a;
@@ -84,138 +70,6 @@ class OpenAIMonitor {
84
70
  throw error;
85
71
  }
86
72
  });
87
- /**
88
- * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
89
- * This function also measures latency and uploads data to Openlayer.
90
- * @param body The parameters for creating a chat completion.
91
- * @param options Optional request options.
92
- * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
93
- * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
94
- */
95
- this.createChatCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
96
- var _b, e_1, _c, _d;
97
- var _e, _f;
98
- if (!this.monitoringOn) {
99
- throw new Error('Monitoring is not active.');
100
- }
101
- // Start a timer to measure latency
102
- const startTime = Date.now();
103
- // Accumulate output for streamed responses
104
- let outputData = '';
105
- const response = yield this.OpenAIClient.chat.completions.create(body, options);
106
- if (body.stream) {
107
- const streamedResponse = response;
108
- try {
109
- for (var _g = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _b = streamedResponse_1_1.done, !_b; _g = true) {
110
- _d = streamedResponse_1_1.value;
111
- _g = false;
112
- const chunk = _d;
113
- // Process each chunk - for example, accumulate input data
114
- outputData += chunk.choices[0].delta.content;
115
- }
116
- }
117
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
118
- finally {
119
- try {
120
- if (!_g && !_b && (_c = streamedResponse_1.return)) yield _c.call(streamedResponse_1);
121
- }
122
- finally { if (e_1) throw e_1.error; }
123
- }
124
- const endTime = Date.now();
125
- const latency = endTime - startTime;
126
- this.uploadDataToOpenlayer({
127
- input: this.formatChatCompletionInput(body.messages),
128
- latency,
129
- output: outputData,
130
- timestamp: startTime,
131
- }, this.openlayerDefaultDataConfig);
132
- }
133
- else {
134
- const nonStreamedResponse = response;
135
- // Handle regular (non-streamed) response
136
- const endTime = Date.now();
137
- const latency = endTime - startTime;
138
- const output = nonStreamedResponse.choices[0].message.content;
139
- if (typeof output !== 'string') {
140
- throw new Error('No output received from OpenAI.');
141
- }
142
- this.uploadDataToOpenlayer({
143
- input: this.formatChatCompletionInput(body.messages),
144
- latency,
145
- output,
146
- timestamp: startTime,
147
- tokens: (_f = (_e = nonStreamedResponse.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) !== null && _f !== void 0 ? _f : 0,
148
- }, this.openlayerDefaultDataConfig);
149
- }
150
- return response;
151
- });
152
- /**
153
- * Creates a new Completion instance. If monitoring is not active, an error is thrown.
154
- * This function also measures latency and uploads data to Openlayer.
155
- * @param body The parameters for creating a completion.
156
- * @param options Optional request options.
157
- * @throws Throws an error if monitoring is not active or if no prompt is provided.
158
- * @returns A promise that resolves to a Completion or a Stream of Completions.
159
- */
160
- this.createCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
161
- var _h, e_2, _j, _k;
162
- var _l, _m, _o, _p;
163
- if (!this.monitoringOn) {
164
- throw new Error('Monitoring is not active.');
165
- }
166
- if (!body.prompt) {
167
- throw new Error('No prompt provided.');
168
- }
169
- // Start a timer to measure latency
170
- const startTime = Date.now();
171
- // Accumulate output and tokens data for streamed responses
172
- let outputData = '';
173
- let tokensData = 0;
174
- const response = yield this.OpenAIClient.completions.create(body, options);
175
- if (body.stream) {
176
- const streamedResponse = response;
177
- try {
178
- for (var _q = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _h = streamedResponse_2_1.done, !_h; _q = true) {
179
- _k = streamedResponse_2_1.value;
180
- _q = false;
181
- const chunk = _k;
182
- // Process each chunk - for example, accumulate input data
183
- outputData += chunk.choices[0].text.trim();
184
- tokensData += (_m = (_l = chunk.usage) === null || _l === void 0 ? void 0 : _l.total_tokens) !== null && _m !== void 0 ? _m : 0;
185
- }
186
- }
187
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
188
- finally {
189
- try {
190
- if (!_q && !_h && (_j = streamedResponse_2.return)) yield _j.call(streamedResponse_2);
191
- }
192
- finally { if (e_2) throw e_2.error; }
193
- }
194
- const endTime = Date.now();
195
- const latency = endTime - startTime;
196
- this.uploadDataToOpenlayer({
197
- input: body.prompt,
198
- latency,
199
- output: outputData,
200
- timestamp: startTime,
201
- tokens: tokensData,
202
- }, this.openlayerDefaultDataConfig);
203
- }
204
- else {
205
- const nonStreamedResponse = response;
206
- // Handle regular (non-streamed) response
207
- const endTime = Date.now();
208
- const latency = endTime - startTime;
209
- this.uploadDataToOpenlayer({
210
- input: body.prompt,
211
- latency,
212
- output: nonStreamedResponse.choices[0].text,
213
- timestamp: startTime,
214
- tokens: (_p = (_o = nonStreamedResponse.usage) === null || _o === void 0 ? void 0 : _o.total_tokens) !== null && _p !== void 0 ? _p : 0,
215
- }, this.openlayerDefaultDataConfig);
216
- }
217
- return response;
218
- });
219
73
  /**
220
74
  * Creates a new inference pipeline in Openlayer, or loads an existing one if it already exists.
221
75
  * @param name The name of the inference pipeline.
@@ -227,7 +81,7 @@ class OpenAIMonitor {
227
81
  try {
228
82
  return yield this.loadInferencePipeline(name, projectId);
229
83
  }
230
- catch (_r) {
84
+ catch (_b) {
231
85
  const createInferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
232
86
  const createInferencePipelineQuery = this.resolvedQuery(createInferencePipelineEndpoint, { version: this.version });
233
87
  const createInferencePipelineResponse = yield fetch(createInferencePipelineQuery, {
@@ -262,7 +116,7 @@ class OpenAIMonitor {
262
116
  try {
263
117
  return yield this.loadProject(name);
264
118
  }
265
- catch (_s) {
119
+ catch (_c) {
266
120
  const projectsEndpoint = '/projects';
267
121
  const projectsQuery = this.resolvedQuery(projectsEndpoint);
268
122
  const projectsResponse = yield fetch(projectsQuery, {
@@ -342,10 +196,6 @@ class OpenAIMonitor {
342
196
  }
343
197
  return project;
344
198
  });
345
- this.OpenAIClient = new openai_1.default({
346
- apiKey: openAiApiKey,
347
- dangerouslyAllowBrowser: true,
348
- });
349
199
  this.openlayerApiKey = openlayerApiKey;
350
200
  this.openlayerInferencePipelineName = openlayerInferencePipelineName;
351
201
  this.openlayerProjectName = openlayerProjectName;
@@ -356,6 +206,167 @@ class OpenAIMonitor {
356
206
  throw new Error('Openlayer API key and project name are required for publishing.');
357
207
  }
358
208
  }
209
+ }
210
+ class OpenAIMonitor {
211
+ constructor({ openAiApiKey, openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }) {
212
+ this.openlayerDefaultDataConfig = {
213
+ groundTruthColumnName: null,
214
+ inferenceIdColumnName: 'id',
215
+ inputVariableNames: ['input'],
216
+ latencyColumnName: 'latency',
217
+ numOfTokenColumnName: 'tokens',
218
+ outputColumnName: 'output',
219
+ timestampColumnName: 'timestamp',
220
+ };
221
+ this.monitoringOn = false;
222
+ this.formatChatCompletionInput = (messages) => messages
223
+ .filter(({ role }) => role === 'user')
224
+ .map(({ content }) => content)
225
+ .join('\n')
226
+ .trim();
227
+ /**
228
+ * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
229
+ * This function also measures latency and uploads data to Openlayer.
230
+ * @param body The parameters for creating a chat completion.
231
+ * @param options Optional request options.
232
+ * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
233
+ * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
234
+ */
235
+ this.createChatCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
236
+ var _a, e_1, _b, _c;
237
+ var _d, _e;
238
+ if (!this.monitoringOn) {
239
+ throw new Error('Monitoring is not active.');
240
+ }
241
+ // Start a timer to measure latency
242
+ const startTime = Date.now();
243
+ // Accumulate output for streamed responses
244
+ let outputData = '';
245
+ const response = yield this.openAIClient.chat.completions.create(body, options);
246
+ if (body.stream) {
247
+ const streamedResponse = response;
248
+ try {
249
+ for (var _f = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _a = streamedResponse_1_1.done, !_a; _f = true) {
250
+ _c = streamedResponse_1_1.value;
251
+ _f = false;
252
+ const chunk = _c;
253
+ // Process each chunk - for example, accumulate input data
254
+ outputData += chunk.choices[0].delta.content;
255
+ }
256
+ }
257
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
258
+ finally {
259
+ try {
260
+ if (!_f && !_a && (_b = streamedResponse_1.return)) yield _b.call(streamedResponse_1);
261
+ }
262
+ finally { if (e_1) throw e_1.error; }
263
+ }
264
+ const endTime = Date.now();
265
+ const latency = endTime - startTime;
266
+ this.openlayerClient.uploadDataToOpenlayer({
267
+ input: this.formatChatCompletionInput(body.messages),
268
+ latency,
269
+ output: outputData,
270
+ timestamp: startTime,
271
+ }, this.openlayerDefaultDataConfig);
272
+ }
273
+ else {
274
+ const nonStreamedResponse = response;
275
+ // Handle regular (non-streamed) response
276
+ const endTime = Date.now();
277
+ const latency = endTime - startTime;
278
+ const output = nonStreamedResponse.choices[0].message.content;
279
+ if (typeof output !== 'string') {
280
+ throw new Error('No output received from OpenAI.');
281
+ }
282
+ this.openlayerClient.uploadDataToOpenlayer({
283
+ input: this.formatChatCompletionInput(body.messages),
284
+ latency,
285
+ output,
286
+ timestamp: startTime,
287
+ tokens: (_e = (_d = nonStreamedResponse.usage) === null || _d === void 0 ? void 0 : _d.total_tokens) !== null && _e !== void 0 ? _e : 0,
288
+ }, this.openlayerDefaultDataConfig);
289
+ }
290
+ return response;
291
+ });
292
+ /**
293
+ * Creates a new Completion instance. If monitoring is not active, an error is thrown.
294
+ * This function also measures latency and uploads data to Openlayer.
295
+ * @param body The parameters for creating a completion.
296
+ * @param options Optional request options.
297
+ * @throws Throws an error if monitoring is not active or if no prompt is provided.
298
+ * @returns A promise that resolves to a Completion or a Stream of Completions.
299
+ */
300
+ this.createCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
301
+ var _g, e_2, _h, _j;
302
+ var _k, _l, _m, _o;
303
+ if (!this.monitoringOn) {
304
+ throw new Error('Monitoring is not active.');
305
+ }
306
+ if (!body.prompt) {
307
+ throw new Error('No prompt provided.');
308
+ }
309
+ // Start a timer to measure latency
310
+ const startTime = Date.now();
311
+ // Accumulate output and tokens data for streamed responses
312
+ let outputData = '';
313
+ let tokensData = 0;
314
+ const response = yield this.openAIClient.completions.create(body, options);
315
+ if (body.stream) {
316
+ const streamedResponse = response;
317
+ try {
318
+ for (var _p = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _g = streamedResponse_2_1.done, !_g; _p = true) {
319
+ _j = streamedResponse_2_1.value;
320
+ _p = false;
321
+ const chunk = _j;
322
+ // Process each chunk - for example, accumulate input data
323
+ outputData += chunk.choices[0].text.trim();
324
+ tokensData += (_l = (_k = chunk.usage) === null || _k === void 0 ? void 0 : _k.total_tokens) !== null && _l !== void 0 ? _l : 0;
325
+ }
326
+ }
327
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
328
+ finally {
329
+ try {
330
+ if (!_p && !_g && (_h = streamedResponse_2.return)) yield _h.call(streamedResponse_2);
331
+ }
332
+ finally { if (e_2) throw e_2.error; }
333
+ }
334
+ const endTime = Date.now();
335
+ const latency = endTime - startTime;
336
+ this.openlayerClient.uploadDataToOpenlayer({
337
+ input: body.prompt,
338
+ latency,
339
+ output: outputData,
340
+ timestamp: startTime,
341
+ tokens: tokensData,
342
+ }, this.openlayerDefaultDataConfig);
343
+ }
344
+ else {
345
+ const nonStreamedResponse = response;
346
+ // Handle regular (non-streamed) response
347
+ const endTime = Date.now();
348
+ const latency = endTime - startTime;
349
+ this.openlayerClient.uploadDataToOpenlayer({
350
+ input: body.prompt,
351
+ latency,
352
+ output: nonStreamedResponse.choices[0].text,
353
+ timestamp: startTime,
354
+ tokens: (_o = (_m = nonStreamedResponse.usage) === null || _m === void 0 ? void 0 : _m.total_tokens) !== null && _o !== void 0 ? _o : 0,
355
+ }, this.openlayerDefaultDataConfig);
356
+ }
357
+ return response;
358
+ });
359
+ this.openlayerClient = new OpenlayerClient({
360
+ openlayerApiKey,
361
+ openlayerInferencePipelineName,
362
+ openlayerProjectName,
363
+ openlayerServerUrl,
364
+ });
365
+ this.openAIClient = new openai_1.default({
366
+ apiKey: openAiApiKey,
367
+ dangerouslyAllowBrowser: true,
368
+ });
369
+ }
359
370
  /**
360
371
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
361
372
  */
@@ -379,4 +390,5 @@ class OpenAIMonitor {
379
390
  console.info('Monitoring stopped.');
380
391
  }
381
392
  }
382
- exports.default = OpenAIMonitor;
393
+ exports.OpenAIMonitor = OpenAIMonitor;
394
+ exports.default = OpenlayerClient;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.1.5",
3
+ "version": "0.1.6",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",