openlayer 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +18 -31
  2. package/package.json +2 -1
package/dist/index.js CHANGED
@@ -8,17 +8,6 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
8
8
  step((generator = generator.apply(thisArg, _arguments || [])).next());
9
9
  });
10
10
  };
11
- var __rest = (this && this.__rest) || function (s, e) {
12
- var t = {};
13
- for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
14
- t[p] = s[p];
15
- if (s != null && typeof Object.getOwnPropertySymbols === "function")
16
- for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
17
- if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
18
- t[p[i]] = s[p[i]];
19
- }
20
- return t;
21
- };
22
11
  var __asyncValues = (this && this.__asyncValues) || function (o) {
23
12
  if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
24
13
  var m = o[Symbol.asyncIterator], i;
@@ -62,7 +51,6 @@ class OpenAIMonitor {
62
51
  },
63
52
  method: 'POST',
64
53
  });
65
- console.log('bruh', response, dataStreamQuery);
66
54
  if (!response.ok) {
67
55
  console.error('Error making POST request:', response.status);
68
56
  throw new Error(`Error: ${response.status}`);
@@ -130,8 +118,7 @@ class OpenAIMonitor {
130
118
  },
131
119
  method: 'POST',
132
120
  });
133
- const _a = yield createInferencePipelineResponse.json(), { id: inferencePipelineId } = _a, response = __rest(_a, ["id"]);
134
- console.log(createInferencePipelineResponse, response, inferencePipelineId);
121
+ const { id: inferencePipelineId } = yield createInferencePipelineResponse.json();
135
122
  if (!inferencePipelineId) {
136
123
  throw new Error('Error creating inference pipeline');
137
124
  }
@@ -144,8 +131,8 @@ class OpenAIMonitor {
144
131
  }
145
132
  });
146
133
  this.createChatCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
147
- var _b, e_1, _c, _d;
148
- var _e, _f;
134
+ var _a, e_1, _b, _c;
135
+ var _d, _e;
149
136
  if (!this.monitoringOn) {
150
137
  throw new Error('Monitoring is not active.');
151
138
  }
@@ -157,10 +144,10 @@ class OpenAIMonitor {
157
144
  if (body.stream) {
158
145
  const streamedResponse = response;
159
146
  try {
160
- for (var _g = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _b = streamedResponse_1_1.done, !_b; _g = true) {
161
- _d = streamedResponse_1_1.value;
162
- _g = false;
163
- const chunk = _d;
147
+ for (var _f = true, streamedResponse_1 = __asyncValues(streamedResponse), streamedResponse_1_1; streamedResponse_1_1 = yield streamedResponse_1.next(), _a = streamedResponse_1_1.done, !_a; _f = true) {
148
+ _c = streamedResponse_1_1.value;
149
+ _f = false;
150
+ const chunk = _c;
164
151
  // Process each chunk - for example, accumulate input data
165
152
  outputData += chunk.choices[0].delta.content;
166
153
  }
@@ -168,7 +155,7 @@ class OpenAIMonitor {
168
155
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
169
156
  finally {
170
157
  try {
171
- if (!_g && !_b && (_c = streamedResponse_1.return)) yield _c.call(streamedResponse_1);
158
+ if (!_f && !_a && (_b = streamedResponse_1.return)) yield _b.call(streamedResponse_1);
172
159
  }
173
160
  finally { if (e_1) throw e_1.error; }
174
161
  }
@@ -189,14 +176,14 @@ class OpenAIMonitor {
189
176
  input: this.formatChatCompletionInput(body.messages),
190
177
  latency,
191
178
  output: nonStreamedResponse.choices[0].message.content,
192
- tokens: (_f = (_e = nonStreamedResponse.usage) === null || _e === void 0 ? void 0 : _e.total_tokens) !== null && _f !== void 0 ? _f : 0,
179
+ tokens: (_e = (_d = nonStreamedResponse.usage) === null || _d === void 0 ? void 0 : _d.total_tokens) !== null && _e !== void 0 ? _e : 0,
193
180
  });
194
181
  }
195
182
  return response;
196
183
  });
197
184
  this.createCompletion = (body, options) => __awaiter(this, void 0, void 0, function* () {
198
- var _h, e_2, _j, _k;
199
- var _l, _m, _o, _p;
185
+ var _g, e_2, _h, _j;
186
+ var _k, _l, _m, _o;
200
187
  if (!this.monitoringOn) {
201
188
  throw new Error('Monitoring is not active.');
202
189
  }
@@ -209,19 +196,19 @@ class OpenAIMonitor {
209
196
  if (body.stream) {
210
197
  const streamedResponse = response;
211
198
  try {
212
- for (var _q = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _h = streamedResponse_2_1.done, !_h; _q = true) {
213
- _k = streamedResponse_2_1.value;
214
- _q = false;
215
- const chunk = _k;
199
+ for (var _p = true, streamedResponse_2 = __asyncValues(streamedResponse), streamedResponse_2_1; streamedResponse_2_1 = yield streamedResponse_2.next(), _g = streamedResponse_2_1.done, !_g; _p = true) {
200
+ _j = streamedResponse_2_1.value;
201
+ _p = false;
202
+ const chunk = _j;
216
203
  // Process each chunk - for example, accumulate input data
217
204
  outputData += chunk.choices[0].text.trim();
218
- tokensData += (_m = (_l = chunk.usage) === null || _l === void 0 ? void 0 : _l.total_tokens) !== null && _m !== void 0 ? _m : 0;
205
+ tokensData += (_l = (_k = chunk.usage) === null || _k === void 0 ? void 0 : _k.total_tokens) !== null && _l !== void 0 ? _l : 0;
219
206
  }
220
207
  }
221
208
  catch (e_2_1) { e_2 = { error: e_2_1 }; }
222
209
  finally {
223
210
  try {
224
- if (!_q && !_h && (_j = streamedResponse_2.return)) yield _j.call(streamedResponse_2);
211
+ if (!_p && !_g && (_h = streamedResponse_2.return)) yield _h.call(streamedResponse_2);
225
212
  }
226
213
  finally { if (e_2) throw e_2.error; }
227
214
  }
@@ -243,7 +230,7 @@ class OpenAIMonitor {
243
230
  input: body.prompt,
244
231
  latency,
245
232
  output: nonStreamedResponse.choices[0].text,
246
- tokens: (_p = (_o = nonStreamedResponse.usage) === null || _o === void 0 ? void 0 : _o.total_tokens) !== null && _p !== void 0 ? _p : 0,
233
+ tokens: (_o = (_m = nonStreamedResponse.usage) === null || _m === void 0 ? void 0 : _m.total_tokens) !== null && _o !== void 0 ? _o : 0,
247
234
  });
248
235
  }
249
236
  return response;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openlayer",
3
- "version": "0.0.4",
3
+ "version": "0.0.6",
4
4
  "description": "The Openlayer TypeScript client",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -18,6 +18,7 @@
18
18
  "eslint-config-prettier": "^9.0.0",
19
19
  "eslint-plugin-prettier": "^5.0.1",
20
20
  "eslint-plugin-typescript-sort-keys": "^3.1.0",
21
+ "node-fetch": "^3.3.2",
21
22
  "openai": "^4.19.0"
22
23
  },
23
24
  "devDependencies": {