openlayer 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.d.ts +40 -32
  2. package/dist/index.js +182 -171
  3. package/package.json +1 -1
package/dist/index.d.ts CHANGED
@@ -61,13 +61,17 @@ export interface ChatCompletionConfig {
61
61
  */
62
62
  timestampColumnName: string | null;
63
63
  }
64
- type ConstructorProps = {
65
- openAiApiKey: string;
64
+ type OpenlayerClientConstructorProps = {
66
65
  openlayerApiKey?: string;
67
66
  openlayerInferencePipelineName?: string;
68
67
  openlayerProjectName?: string;
69
68
  openlayerServerUrl?: string;
70
69
  };
70
+ type OpenAIMonitorConstructorProps = OpenlayerClientConstructorProps & {
71
+ openAiApiKey: string;
72
+ openlayerInferencePipelineName?: string;
73
+ openlayerProjectName: string;
74
+ };
71
75
  type OpenlayerInferencePipeline = {
72
76
  dataVolumeGraphs?: OpenlayerSampleVolumeGraph;
73
77
  dateCreated: string;
@@ -123,17 +127,11 @@ type OpenlayerSampleVolumeGraph = {
123
127
  weekly: OpenlayerSampleVolumeGraphBucket;
124
128
  };
125
129
  type OpenlayerTaskType = 'llm-base' | 'tabular-classification' | 'tabular-regression' | 'text-classification';
126
- declare class OpenAIMonitor {
127
- private OpenAIClient;
128
- private monitoringOn;
130
+ declare class OpenlayerClient {
129
131
  private openlayerApiKey?;
130
- private openlayerDefaultDataConfig;
131
- private openlayerProjectName?;
132
- private openlayerInferencePipelineName?;
133
132
  private openlayerServerUrl;
134
133
  private version;
135
- constructor({ openAiApiKey, openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }: ConstructorProps);
136
- private formatChatCompletionInput;
134
+ constructor({ openlayerApiKey, openlayerServerUrl, }: OpenlayerClientConstructorProps);
137
135
  private resolvedQuery;
138
136
  private uploadToInferencePipeline;
139
137
  /**
@@ -144,25 +142,7 @@ declare class OpenAIMonitor {
144
142
  * @throws Throws an error if Openlayer API key or project name are not set.
145
143
  * @returns A promise that resolves when the data has been successfully uploaded.
146
144
  */
147
- uploadDataToOpenlayer: (data: ChatCompletionData, config: ChatCompletionConfig) => Promise<void>;
148
- /**
149
- * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
150
- * This function also measures latency and uploads data to Openlayer.
151
- * @param body The parameters for creating a chat completion.
152
- * @param options Optional request options.
153
- * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
154
- * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
155
- */
156
- createChatCompletion: (body: ChatCompletionCreateParams, options?: RequestOptions) => Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
157
- /**
158
- * Creates a new Completion instance. If monitoring is not active, an error is thrown.
159
- * This function also measures latency and uploads data to Openlayer.
160
- * @param body The parameters for creating a completion.
161
- * @param options Optional request options.
162
- * @throws Throws an error if monitoring is not active or if no prompt is provided.
163
- * @returns A promise that resolves to a Completion or a Stream of Completions.
164
- */
165
- createCompletion: (body: CompletionCreateParams, options?: RequestOptions) => Promise<Completion | Stream<Completion>>;
145
+ uploadDataToOpenlayer: (data: ChatCompletionData, config: ChatCompletionConfig, projectName: string, inferencePipelineName?: string) => Promise<void>;
166
146
  /**
167
147
  * Creates a new inference pipeline in Openlayer, or loads an existing one if it already exists.
168
148
  * @param name The name of the inference pipeline.
@@ -170,7 +150,7 @@ declare class OpenAIMonitor {
170
150
  * @throws Throws an error if the inference pipeline cannot be created or found.
171
151
  * @returns A promise that resolves to an OpenlayerInferencePipeline object.
172
152
  */
173
- createInferencePipeline: (name: string, projectId: string) => Promise<OpenlayerInferencePipeline>;
153
+ createInferencePipeline: (projectId: string, name?: string) => Promise<OpenlayerInferencePipeline>;
174
154
  /**
175
155
  * Creates a new project in Openlayer, or loads an existing one if it already exists.
176
156
  * @param name The name of the project.
@@ -187,8 +167,36 @@ declare class OpenAIMonitor {
187
167
  * @throws Throws an error if the inference pipeline is not found.
188
168
  * @returns A promise that resolves to an OpenlayerInferencePipeline object.
189
169
  */
190
- loadInferencePipeline: (name: string, projectId: string) => Promise<OpenlayerInferencePipeline>;
170
+ loadInferencePipeline: (projectId: string, name?: string) => Promise<OpenlayerInferencePipeline>;
191
171
  loadProject: (name: string) => Promise<OpenlayerProject>;
172
+ }
173
+ export declare class OpenAIMonitor {
174
+ private openlayerClient;
175
+ private openAIClient;
176
+ private openlayerDefaultDataConfig;
177
+ private openlayerProjectName;
178
+ private openlayerInferencePipelineName;
179
+ private monitoringOn;
180
+ constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineName, openlayerServerUrl, }: OpenAIMonitorConstructorProps);
181
+ private formatChatCompletionInput;
182
+ /**
183
+ * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
184
+ * This function also measures latency and uploads data to Openlayer.
185
+ * @param body The parameters for creating a chat completion.
186
+ * @param options Optional request options.
187
+ * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
188
+ * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
189
+ */
190
+ createChatCompletion: (body: ChatCompletionCreateParams, options?: RequestOptions) => Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
191
+ /**
192
+ * Creates a new Completion instance. If monitoring is not active, an error is thrown.
193
+ * This function also measures latency and uploads data to Openlayer.
194
+ * @param body The parameters for creating a completion.
195
+ * @param options Optional request options.
196
+ * @throws Throws an error if monitoring is not active or if no prompt is provided.
197
+ * @returns A promise that resolves to a Completion or a Stream of Completions.
198
+ */
199
+ createCompletion: (body: CompletionCreateParams, options?: RequestOptions) => Promise<Completion | Stream<Completion>>;
192
200
  /**
193
201
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
194
202
  */
@@ -198,4 +206,4 @@ declare class OpenAIMonitor {
198
206
  */
199
207
  stopMonitoring(): void;
200
208
  }
201
- export default OpenAIMonitor;
209
+ export default OpenlayerClient;
package/dist/index.js CHANGED
@@ -16,28 +16,14 @@ var __asyncValues = (this && this.__asyncValues) || function (o) {
16
16
  function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
17
17
  };
18
18
  Object.defineProperty(exports, "__esModule", { value: true });
19
+ exports.OpenAIMonitor = void 0;
19
20
  const openai_1 = require("openai");
20
21
  const uuid_1 = require("uuid");
21
22
  const request_1 = require("./utils/request");
22
- class OpenAIMonitor {
23
- constructor({ openAiApiKey, openlayerApiKey, openlayerInferencePipelineName, openlayerProjectName, openlayerServerUrl, }) {
24
- this.monitoringOn = false;
25
- this.openlayerDefaultDataConfig = {
26
- groundTruthColumnName: null,
27
- inferenceIdColumnName: 'id',
28
- inputVariableNames: ['input'],
29
- latencyColumnName: 'latency',
30
- numOfTokenColumnName: 'tokens',
31
- outputColumnName: 'output',
32
- timestampColumnName: 'timestamp',
33
- };
23
+ class OpenlayerClient {
24
+ constructor({ openlayerApiKey, openlayerServerUrl, }) {
34
25
  this.openlayerServerUrl = 'https://api.openlayer.com/v1';
35
26
  this.version = '0.1.0a16';
36
- this.formatChatCompletionInput = (messages) => messages
37
- .filter(({ role }) => role === 'user')
38
- .map(({ content }) => content)
39
- .join('\n')
40
- .trim();
41
27
  this.resolvedQuery = (endpoint, args = {}) => (0, request_1.resolvedQuery)(this.openlayerServerUrl, endpoint, args);
42
28
  this.uploadToInferencePipeline = (inferencePipelineId, data, config) => __awaiter(this, void 0, void 0, function* () {
43
29
  var _a;
@@ -70,13 +56,13 @@ class OpenAIMonitor {
70
56
  * @throws Throws an error if Openlayer API key or project name are not set.
71
57
  * @returns A promise that resolves when the data has been successfully uploaded.
72
58
  */
73
- this.uploadDataToOpenlayer = (data, config) => __awaiter(this, void 0, void 0, function* () {
74
- if (!this.openlayerApiKey || !this.openlayerProjectName) {
75
- throw new Error('Openlayer API key and project name are required for publishing.');
59
+ this.uploadDataToOpenlayer = (data, config, projectName, inferencePipelineName = 'production') => __awaiter(this, void 0, void 0, function* () {
60
+ if (!this.openlayerApiKey) {
61
+ throw new Error('Openlayer API key are required for publishing.');
76
62
  }
77
63
  try {
78
- const project = yield this.createProject(this.openlayerProjectName, 'llm-base');
79
- const inferencePipeline = yield this.createInferencePipeline(this.openlayerProjectName, project.id);
64
+ const project = yield this.createProject(projectName, 'llm-base');
65
+ const inferencePipeline = yield this.createInferencePipeline(inferencePipelineName, project.id);
80
66
  yield this.uploadToInferencePipeline(inferencePipeline.id, data, config);
81
67
  }
82
68
  catch (error) {
@@ -84,138 +70,6 @@ class OpenAIMonitor {
84
70
  throw error;
85
71
  }
86
72
  });
87
- /**
88
- * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
89
- * This function also measures latency and uploads data to Openlayer.
90
- * @param body The parameters for creating a chat completion.
91
- * @param options Optional request options.
92
- * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
93
- * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
94
- */
95
- this.createChatCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
96
- var _b, e_1, _c, _d;
97
- var _e, _f;
98
- if (!this.monitoringOn) {
99
- throw new Error('Monitoring is not active.');
100
- }
101
- // Start a timer to measure latency
102
- const startTime = Date.now();
103
- // Accumulate output for streamed responses
104
- let outputData = '';
105
- const response = yield this.OpenAIClient.chat.completions.create(body, options);
106
- if (body.stream) {
107
- const streamedResponse = response;
108
- try {
109
- for (var _g = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _b = streamedResponse_1_1.done, !_b; _g = true) {
110
- _d = streamedResponse_1_1.value;
111
- _g = false;
112
- const chunk = _d;
113
- // Process each chunk - for example, accumulate input data
114
- outputData += chunk.choices[0].delta.content;
115
- }
116
- }
117
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
118
- finally {
119
- try {
120
- if (!_g && !_b && (_c = streamedResponse_1.return)) yield _c.call(streamedResponse_1);
121
- }
122
- finally { if (e_1) throw e_1.error; }
123
- }
124
- const endTime = Date.now();
125
- const latency = endTime - startTime;
126
- this.uploadDataToOpenlayer({
127
- input: this.formatChatCompletionInput(body.messages),
128
- latency,
129
- output: outputData,
130
- timestamp: startTime,
131
- }, this.openlayerDefaultDataConfig);
132
- }
133
- else {
134
- const nonStreamedResponse = response;
135
- // Handle regular (non-streamed) response
136
- const endTime = Date.now();
137
- const latency = endTime - startTime;
138
- const output = nonStreamedResponse.choices[0].message.content;
139
- if (typeof output !== 'string') {
140
- throw new Error('No output received from OpenAI.');
141
- }
142
- this.uploadDataToOpenlayer({
143
- input: this.formatChatCompletionInput(body.messages),
144
- latency,
145
- output,
146
- timestamp: startTime,
147
- tokens: (_f = (_e = nonStreamedResponse.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) !== null && _f !== void 0 ? _f : 0,
148
- }, this.openlayerDefaultDataConfig);
149
- }
150
- return response;
151
- });
152
- /**
153
- * Creates a new Completion instance. If monitoring is not active, an error is thrown.
154
- * This function also measures latency and uploads data to Openlayer.
155
- * @param body The parameters for creating a completion.
156
- * @param options Optional request options.
157
- * @throws Throws an error if monitoring is not active or if no prompt is provided.
158
- * @returns A promise that resolves to a Completion or a Stream of Completions.
159
- */
160
- this.createCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
161
- var _h, e_2, _j, _k;
162
- var _l, _m, _o, _p;
163
- if (!this.monitoringOn) {
164
- throw new Error('Monitoring is not active.');
165
- }
166
- if (!body.prompt) {
167
- throw new Error('No prompt provided.');
168
- }
169
- // Start a timer to measure latency
170
- const startTime = Date.now();
171
- // Accumulate output and tokens data for streamed responses
172
- let outputData = '';
173
- let tokensData = 0;
174
- const response = yield this.OpenAIClient.completions.create(body, options);
175
- if (body.stream) {
176
- const streamedResponse = response;
177
- try {
178
- for (var _q = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _h = streamedResponse_2_1.done, !_h; _q = true) {
179
- _k = streamedResponse_2_1.value;
180
- _q = false;
181
- const chunk = _k;
182
- // Process each chunk - for example, accumulate input data
183
- outputData += chunk.choices[0].text.trim();
184
- tokensData += (_m = (_l = chunk.usage) === null || _l === void 0 ? void 0 : _l.total_tokens) !== null && _m !== void 0 ? _m : 0;
185
- }
186
- }
187
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
188
- finally {
189
- try {
190
- if (!_q && !_h && (_j = streamedResponse_2.return)) yield _j.call(streamedResponse_2);
191
- }
192
- finally { if (e_2) throw e_2.error; }
193
- }
194
- const endTime = Date.now();
195
- const latency = endTime - startTime;
196
- this.uploadDataToOpenlayer({
197
- input: body.prompt,
198
- latency,
199
- output: outputData,
200
- timestamp: startTime,
201
- tokens: tokensData,
202
- }, this.openlayerDefaultDataConfig);
203
- }
204
- else {
205
- const nonStreamedResponse = response;
206
- // Handle regular (non-streamed) response
207
- const endTime = Date.now();
208
- const latency = endTime - startTime;
209
- this.uploadDataToOpenlayer({
210
- input: body.prompt,
211
- latency,
212
- output: nonStreamedResponse.choices[0].text,
213
- timestamp: startTime,
214
- tokens: (_p = (_o = nonStreamedResponse.usage) === null || _o === void 0 ? void 0 : _o.total_tokens) !== null && _p !== void 0 ? _p : 0,
215
- }, this.openlayerDefaultDataConfig);
216
- }
217
- return response;
218
- });
219
73
  /**
220
74
  * Creates a new inference pipeline in Openlayer, or loads an existing one if it already exists.
221
75
  * @param name The name of the inference pipeline.
@@ -223,19 +77,17 @@ class OpenAIMonitor {
223
77
  * @throws Throws an error if the inference pipeline cannot be created or found.
224
78
  * @returns A promise that resolves to an OpenlayerInferencePipeline object.
225
79
  */
226
- this.createInferencePipeline = (name, projectId) => __awaiter(this, void 0, void 0, function* () {
80
+ this.createInferencePipeline = (projectId, name = 'production') => __awaiter(this, void 0, void 0, function* () {
227
81
  try {
228
82
  return yield this.loadInferencePipeline(name, projectId);
229
83
  }
230
- catch (_r) {
84
+ catch (_b) {
231
85
  const createInferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
232
86
  const createInferencePipelineQuery = this.resolvedQuery(createInferencePipelineEndpoint, { version: this.version });
233
87
  const createInferencePipelineResponse = yield fetch(createInferencePipelineQuery, {
234
88
  body: JSON.stringify({
235
89
  description: '',
236
- name: typeof this.openlayerInferencePipelineName === 'undefined'
237
- ? 'production'
238
- : this.openlayerInferencePipelineName,
90
+ name,
239
91
  }),
240
92
  headers: {
241
93
  Authorization: `Bearer ${this.openlayerApiKey}`,
@@ -262,7 +114,7 @@ class OpenAIMonitor {
262
114
  try {
263
115
  return yield this.loadProject(name);
264
116
  }
265
- catch (_s) {
117
+ catch (_c) {
266
118
  const projectsEndpoint = '/projects';
267
119
  const projectsQuery = this.resolvedQuery(projectsEndpoint);
268
120
  const projectsResponse = yield fetch(projectsQuery, {
@@ -295,10 +147,10 @@ class OpenAIMonitor {
295
147
  * @throws Throws an error if the inference pipeline is not found.
296
148
  * @returns A promise that resolves to an OpenlayerInferencePipeline object.
297
149
  */
298
- this.loadInferencePipeline = (name, projectId) => __awaiter(this, void 0, void 0, function* () {
150
+ this.loadInferencePipeline = (projectId, name = 'production') => __awaiter(this, void 0, void 0, function* () {
299
151
  const inferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
300
152
  const inferencePipelineQueryParameters = {
301
- name: this.openlayerInferencePipelineName,
153
+ name,
302
154
  version: this.version,
303
155
  };
304
156
  const inferencePipelineQuery = this.resolvedQuery(inferencePipelineEndpoint, inferencePipelineQueryParameters);
@@ -342,19 +194,177 @@ class OpenAIMonitor {
342
194
  }
343
195
  return project;
344
196
  });
345
- this.OpenAIClient = new openai_1.default({
346
- apiKey: openAiApiKey,
347
- dangerouslyAllowBrowser: true,
348
- });
349
197
  this.openlayerApiKey = openlayerApiKey;
350
- this.openlayerInferencePipelineName = openlayerInferencePipelineName;
351
- this.openlayerProjectName = openlayerProjectName;
352
198
  if (openlayerServerUrl) {
353
199
  this.openlayerServerUrl = openlayerServerUrl;
354
200
  }
355
- if (!this.openlayerApiKey || !this.openlayerProjectName) {
356
- throw new Error('Openlayer API key and project name are required for publishing.');
201
+ if (!this.openlayerApiKey) {
202
+ throw new Error('Openlayer API key are required for publishing.');
203
+ }
204
+ }
205
+ }
206
+ class OpenAIMonitor {
207
+ constructor({ openAiApiKey, openlayerApiKey, openlayerProjectName, openlayerInferencePipelineName, openlayerServerUrl, }) {
208
+ this.openlayerDefaultDataConfig = {
209
+ groundTruthColumnName: null,
210
+ inferenceIdColumnName: 'id',
211
+ inputVariableNames: ['input'],
212
+ latencyColumnName: 'latency',
213
+ numOfTokenColumnName: 'tokens',
214
+ outputColumnName: 'output',
215
+ timestampColumnName: 'timestamp',
216
+ };
217
+ this.openlayerInferencePipelineName = 'production';
218
+ this.monitoringOn = false;
219
+ this.formatChatCompletionInput = (messages) => messages
220
+ .filter(({ role }) => role === 'user')
221
+ .map(({ content }) => content)
222
+ .join('\n')
223
+ .trim();
224
+ /**
225
+ * Creates a new ChatCompletion instance. If monitoring is not active, an error is thrown.
226
+ * This function also measures latency and uploads data to Openlayer.
227
+ * @param body The parameters for creating a chat completion.
228
+ * @param options Optional request options.
229
+ * @throws Throws an error if monitoring is not active or if there is no output received from OpenAI.
230
+ * @returns A promise that resolves to a ChatCompletion or a Stream of ChatCompletionChunks.
231
+ */
232
+ this.createChatCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
233
+ var _a, e_1, _b, _c;
234
+ var _d, _e;
235
+ if (!this.monitoringOn) {
236
+ throw new Error('Monitoring is not active.');
237
+ }
238
+ // Start a timer to measure latency
239
+ const startTime = Date.now();
240
+ // Accumulate output for streamed responses
241
+ let outputData = '';
242
+ const response = yield this.openAIClient.chat.completions.create(body, options);
243
+ if (body.stream) {
244
+ const streamedResponse = response;
245
+ try {
246
+ for (var _f = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _a = streamedResponse_1_1.done, !_a; _f = true) {
247
+ _c = streamedResponse_1_1.value;
248
+ _f = false;
249
+ const chunk = _c;
250
+ // Process each chunk - for example, accumulate input data
251
+ outputData += chunk.choices[0].delta.content;
252
+ }
253
+ }
254
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
255
+ finally {
256
+ try {
257
+ if (!_f && !_a && (_b = streamedResponse_1.return)) yield _b.call(streamedResponse_1);
258
+ }
259
+ finally { if (e_1) throw e_1.error; }
260
+ }
261
+ const endTime = Date.now();
262
+ const latency = endTime - startTime;
263
+ this.openlayerClient.uploadDataToOpenlayer({
264
+ input: this.formatChatCompletionInput(body.messages),
265
+ latency,
266
+ output: outputData,
267
+ timestamp: startTime,
268
+ }, this.openlayerDefaultDataConfig, this.openlayerProjectName, this.openlayerInferencePipelineName);
269
+ }
270
+ else {
271
+ const nonStreamedResponse = response;
272
+ // Handle regular (non-streamed) response
273
+ const endTime = Date.now();
274
+ const latency = endTime - startTime;
275
+ const output = nonStreamedResponse.choices[0].message.content;
276
+ if (typeof output !== 'string') {
277
+ throw new Error('No output received from OpenAI.');
278
+ }
279
+ this.openlayerClient.uploadDataToOpenlayer({
280
+ input: this.formatChatCompletionInput(body.messages),
281
+ latency,
282
+ output,
283
+ timestamp: startTime,
284
+ tokens: (_e = (_d = nonStreamedResponse.usage) === null || _d === void 0 ? void 0 : _d.total_tokens) !== null && _e !== void 0 ? _e : 0,
285
+ }, this.openlayerDefaultDataConfig, this.openlayerProjectName, this.openlayerInferencePipelineName);
286
+ }
287
+ return response;
288
+ });
289
+ /**
290
+ * Creates a new Completion instance. If monitoring is not active, an error is thrown.
291
+ * This function also measures latency and uploads data to Openlayer.
292
+ * @param body The parameters for creating a completion.
293
+ * @param options Optional request options.
294
+ * @throws Throws an error if monitoring is not active or if no prompt is provided.
295
+ * @returns A promise that resolves to a Completion or a Stream of Completions.
296
+ */
297
+ this.createCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
298
+ var _g, e_2, _h, _j;
299
+ var _k, _l, _m, _o;
300
+ if (!this.monitoringOn) {
301
+ throw new Error('Monitoring is not active.');
302
+ }
303
+ if (!body.prompt) {
304
+ throw new Error('No prompt provided.');
305
+ }
306
+ // Start a timer to measure latency
307
+ const startTime = Date.now();
308
+ // Accumulate output and tokens data for streamed responses
309
+ let outputData = '';
310
+ let tokensData = 0;
311
+ const response = yield this.openAIClient.completions.create(body, options);
312
+ if (body.stream) {
313
+ const streamedResponse = response;
314
+ try {
315
+ for (var _p = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _g = streamedResponse_2_1.done, !_g; _p = true) {
316
+ _j = streamedResponse_2_1.value;
317
+ _p = false;
318
+ const chunk = _j;
319
+ // Process each chunk - for example, accumulate input data
320
+ outputData += chunk.choices[0].text.trim();
321
+ tokensData += (_l = (_k = chunk.usage) === null || _k === void 0 ? void 0 : _k.total_tokens) !== null && _l !== void 0 ? _l : 0;
322
+ }
323
+ }
324
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
325
+ finally {
326
+ try {
327
+ if (!_p && !_g && (_h = streamedResponse_2.return)) yield _h.call(streamedResponse_2);
328
+ }
329
+ finally { if (e_2) throw e_2.error; }
330
+ }
331
+ const endTime = Date.now();
332
+ const latency = endTime - startTime;
333
+ this.openlayerClient.uploadDataToOpenlayer({
334
+ input: body.prompt,
335
+ latency,
336
+ output: outputData,
337
+ timestamp: startTime,
338
+ tokens: tokensData,
339
+ }, this.openlayerDefaultDataConfig, this.openlayerProjectName, this.openlayerInferencePipelineName);
340
+ }
341
+ else {
342
+ const nonStreamedResponse = response;
343
+ // Handle regular (non-streamed) response
344
+ const endTime = Date.now();
345
+ const latency = endTime - startTime;
346
+ this.openlayerClient.uploadDataToOpenlayer({
347
+ input: body.prompt,
348
+ latency,
349
+ output: nonStreamedResponse.choices[0].text,
350
+ timestamp: startTime,
351
+ tokens: (_o = (_m = nonStreamedResponse.usage) === null || _m === void 0 ? void 0 : _m.total_tokens) !== null && _o !== void 0 ? _o : 0,
352
+ }, this.openlayerDefaultDataConfig, this.openlayerProjectName, this.openlayerInferencePipelineName);
353
+ }
354
+ return response;
355
+ });
356
+ this.openlayerProjectName = openlayerProjectName;
357
+ if (openlayerInferencePipelineName) {
358
+ this.openlayerInferencePipelineName = openlayerInferencePipelineName;
357
359
  }
360
+ this.openlayerClient = new OpenlayerClient({
361
+ openlayerApiKey,
362
+ openlayerServerUrl,
363
+ });
364
+ this.openAIClient = new openai_1.default({
365
+ apiKey: openAiApiKey,
366
+ dangerouslyAllowBrowser: true,
367
+ });
358
368
  }
359
369
  /**
360
370
  * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
@@ -379,4 +389,5 @@ class OpenAIMonitor {
379
389
  console.info('Monitoring stopped.');
380
390
  }
381
391
  }
382
- exports.default = OpenAIMonitor;
392
+ exports.OpenAIMonitor = OpenAIMonitor;
393
+ exports.default = OpenlayerClient;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",