openlayer 0.0.0 → 0.0.1-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (262) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +408 -0
  3. package/_shims/MultipartBody.d.ts +9 -0
  4. package/_shims/MultipartBody.d.ts.map +1 -0
  5. package/_shims/MultipartBody.js +16 -0
  6. package/_shims/MultipartBody.js.map +1 -0
  7. package/_shims/MultipartBody.mjs +12 -0
  8. package/_shims/MultipartBody.mjs.map +1 -0
  9. package/_shims/README.md +46 -0
  10. package/_shims/auto/runtime-bun.d.ts +5 -0
  11. package/_shims/auto/runtime-bun.d.ts.map +1 -0
  12. package/_shims/auto/runtime-bun.js +21 -0
  13. package/_shims/auto/runtime-bun.js.map +1 -0
  14. package/_shims/auto/runtime-bun.mjs +2 -0
  15. package/_shims/auto/runtime-bun.mjs.map +1 -0
  16. package/_shims/auto/runtime-node.d.ts +5 -0
  17. package/_shims/auto/runtime-node.d.ts.map +1 -0
  18. package/_shims/auto/runtime-node.js +21 -0
  19. package/_shims/auto/runtime-node.js.map +1 -0
  20. package/_shims/auto/runtime-node.mjs +2 -0
  21. package/_shims/auto/runtime-node.mjs.map +1 -0
  22. package/_shims/auto/runtime.d.ts +5 -0
  23. package/_shims/auto/runtime.d.ts.map +1 -0
  24. package/_shims/auto/runtime.js +21 -0
  25. package/_shims/auto/runtime.js.map +1 -0
  26. package/_shims/auto/runtime.mjs +2 -0
  27. package/_shims/auto/runtime.mjs.map +1 -0
  28. package/_shims/auto/types-node.d.ts +5 -0
  29. package/_shims/auto/types-node.d.ts.map +1 -0
  30. package/_shims/auto/types-node.js +21 -0
  31. package/_shims/auto/types-node.js.map +1 -0
  32. package/_shims/auto/types-node.mjs +2 -0
  33. package/_shims/auto/types-node.mjs.map +1 -0
  34. package/_shims/auto/types.d.ts +101 -0
  35. package/_shims/auto/types.js +3 -0
  36. package/_shims/auto/types.mjs +3 -0
  37. package/_shims/bun-runtime.d.ts +6 -0
  38. package/_shims/bun-runtime.d.ts.map +1 -0
  39. package/_shims/bun-runtime.js +14 -0
  40. package/_shims/bun-runtime.js.map +1 -0
  41. package/_shims/bun-runtime.mjs +10 -0
  42. package/_shims/bun-runtime.mjs.map +1 -0
  43. package/_shims/index.d.ts +81 -0
  44. package/_shims/index.js +13 -0
  45. package/_shims/index.mjs +7 -0
  46. package/_shims/manual-types.d.ts +12 -0
  47. package/_shims/manual-types.js +3 -0
  48. package/_shims/manual-types.mjs +3 -0
  49. package/_shims/node-runtime.d.ts +3 -0
  50. package/_shims/node-runtime.d.ts.map +1 -0
  51. package/_shims/node-runtime.js +90 -0
  52. package/_shims/node-runtime.js.map +1 -0
  53. package/_shims/node-runtime.mjs +56 -0
  54. package/_shims/node-runtime.mjs.map +1 -0
  55. package/_shims/node-types.d.ts +42 -0
  56. package/_shims/node-types.js +3 -0
  57. package/_shims/node-types.mjs +3 -0
  58. package/_shims/registry.d.ts +37 -0
  59. package/_shims/registry.d.ts.map +1 -0
  60. package/_shims/registry.js +41 -0
  61. package/_shims/registry.js.map +1 -0
  62. package/_shims/registry.mjs +37 -0
  63. package/_shims/registry.mjs.map +1 -0
  64. package/_shims/web-runtime.d.ts +5 -0
  65. package/_shims/web-runtime.d.ts.map +1 -0
  66. package/_shims/web-runtime.js +78 -0
  67. package/_shims/web-runtime.js.map +1 -0
  68. package/_shims/web-runtime.mjs +71 -0
  69. package/_shims/web-runtime.mjs.map +1 -0
  70. package/_shims/web-types.d.ts +83 -0
  71. package/_shims/web-types.js +3 -0
  72. package/_shims/web-types.mjs +3 -0
  73. package/core.d.ts +237 -0
  74. package/core.d.ts.map +1 -0
  75. package/core.js +868 -0
  76. package/core.js.map +1 -0
  77. package/core.mjs +837 -0
  78. package/core.mjs.map +1 -0
  79. package/error.d.ts +53 -0
  80. package/error.d.ts.map +1 -0
  81. package/error.js +143 -0
  82. package/error.js.map +1 -0
  83. package/error.mjs +127 -0
  84. package/error.mjs.map +1 -0
  85. package/index.d.mts +114 -0
  86. package/index.d.ts +114 -0
  87. package/index.d.ts.map +1 -0
  88. package/index.js +119 -0
  89. package/index.js.map +1 -0
  90. package/index.mjs +91 -0
  91. package/index.mjs.map +1 -0
  92. package/lib/core/cli.d.ts +7 -0
  93. package/lib/core/cli.d.ts.map +1 -0
  94. package/lib/core/cli.js +91 -0
  95. package/lib/core/cli.js.map +1 -0
  96. package/lib/core/cli.mjs +64 -0
  97. package/lib/core/cli.mjs.map +1 -0
  98. package/lib/core/index.d.ts +237 -0
  99. package/lib/core/index.d.ts.map +1 -0
  100. package/lib/core/index.js +635 -0
  101. package/lib/core/index.js.map +1 -0
  102. package/lib/core/index.mjs +627 -0
  103. package/lib/core/index.mjs.map +1 -0
  104. package/lib/core/run.d.ts +14 -0
  105. package/lib/core/run.d.ts.map +1 -0
  106. package/lib/core/run.js +3 -0
  107. package/lib/core/run.js.map +1 -0
  108. package/lib/core/run.mjs +2 -0
  109. package/lib/core/run.mjs.map +1 -0
  110. package/package.json +102 -6
  111. package/resource.d.ts +6 -0
  112. package/resource.d.ts.map +1 -0
  113. package/resource.js +11 -0
  114. package/resource.js.map +1 -0
  115. package/resource.mjs +7 -0
  116. package/resource.mjs.map +1 -0
  117. package/resources/commits/commits.d.ts +11 -0
  118. package/resources/commits/commits.d.ts.map +1 -0
  119. package/resources/commits/commits.js +40 -0
  120. package/resources/commits/commits.js.map +1 -0
  121. package/resources/commits/commits.mjs +13 -0
  122. package/resources/commits/commits.mjs.map +1 -0
  123. package/resources/commits/index.d.ts +3 -0
  124. package/resources/commits/index.d.ts.map +1 -0
  125. package/resources/commits/index.js +9 -0
  126. package/resources/commits/index.js.map +1 -0
  127. package/resources/commits/index.mjs +4 -0
  128. package/resources/commits/index.mjs.map +1 -0
  129. package/resources/commits/test-results.d.ts +217 -0
  130. package/resources/commits/test-results.d.ts.map +1 -0
  131. package/resources/commits/test-results.js +18 -0
  132. package/resources/commits/test-results.js.map +1 -0
  133. package/resources/commits/test-results.mjs +14 -0
  134. package/resources/commits/test-results.mjs.map +1 -0
  135. package/resources/index.d.ts +4 -0
  136. package/resources/index.d.ts.map +1 -0
  137. package/resources/index.js +11 -0
  138. package/resources/index.js.map +1 -0
  139. package/resources/index.mjs +5 -0
  140. package/resources/index.mjs.map +1 -0
  141. package/resources/inference-pipelines/data.d.ts +228 -0
  142. package/resources/inference-pipelines/data.d.ts.map +1 -0
  143. package/resources/inference-pipelines/data.js +17 -0
  144. package/resources/inference-pipelines/data.js.map +1 -0
  145. package/resources/inference-pipelines/data.mjs +13 -0
  146. package/resources/inference-pipelines/data.mjs.map +1 -0
  147. package/resources/inference-pipelines/index.d.ts +4 -0
  148. package/resources/inference-pipelines/index.d.ts.map +1 -0
  149. package/resources/inference-pipelines/index.js +11 -0
  150. package/resources/inference-pipelines/index.js.map +1 -0
  151. package/resources/inference-pipelines/index.mjs +5 -0
  152. package/resources/inference-pipelines/index.mjs.map +1 -0
  153. package/resources/inference-pipelines/inference-pipelines.d.ts +16 -0
  154. package/resources/inference-pipelines/inference-pipelines.d.ts.map +1 -0
  155. package/resources/inference-pipelines/inference-pipelines.js +43 -0
  156. package/resources/inference-pipelines/inference-pipelines.js.map +1 -0
  157. package/resources/inference-pipelines/inference-pipelines.mjs +16 -0
  158. package/resources/inference-pipelines/inference-pipelines.mjs.map +1 -0
  159. package/resources/inference-pipelines/test-results.d.ts +217 -0
  160. package/resources/inference-pipelines/test-results.d.ts.map +1 -0
  161. package/resources/inference-pipelines/test-results.js +18 -0
  162. package/resources/inference-pipelines/test-results.js.map +1 -0
  163. package/resources/inference-pipelines/test-results.mjs +14 -0
  164. package/resources/inference-pipelines/test-results.mjs.map +1 -0
  165. package/resources/projects/commits.d.ts +171 -0
  166. package/resources/projects/commits.d.ts.map +1 -0
  167. package/resources/projects/commits.js +18 -0
  168. package/resources/projects/commits.js.map +1 -0
  169. package/resources/projects/commits.mjs +14 -0
  170. package/resources/projects/commits.mjs.map +1 -0
  171. package/resources/projects/index.d.ts +4 -0
  172. package/resources/projects/index.d.ts.map +1 -0
  173. package/resources/projects/index.js +11 -0
  174. package/resources/projects/index.js.map +1 -0
  175. package/resources/projects/index.mjs +5 -0
  176. package/resources/projects/index.mjs.map +1 -0
  177. package/resources/projects/inference-pipelines.d.ts +121 -0
  178. package/resources/projects/inference-pipelines.d.ts.map +1 -0
  179. package/resources/projects/inference-pipelines.js +18 -0
  180. package/resources/projects/inference-pipelines.js.map +1 -0
  181. package/resources/projects/inference-pipelines.mjs +14 -0
  182. package/resources/projects/inference-pipelines.mjs.map +1 -0
  183. package/resources/projects/projects.d.ts +172 -0
  184. package/resources/projects/projects.d.ts.map +1 -0
  185. package/resources/projects/projects.js +50 -0
  186. package/resources/projects/projects.js.map +1 -0
  187. package/resources/projects/projects.mjs +23 -0
  188. package/resources/projects/projects.mjs.map +1 -0
  189. package/shims/node.d.ts +29 -0
  190. package/shims/node.d.ts.map +1 -0
  191. package/shims/node.js +31 -0
  192. package/shims/node.js.map +1 -0
  193. package/shims/node.mjs +5 -0
  194. package/shims/node.mjs.map +1 -0
  195. package/shims/web.d.ts +26 -0
  196. package/shims/web.d.ts.map +1 -0
  197. package/shims/web.js +31 -0
  198. package/shims/web.js.map +1 -0
  199. package/shims/web.mjs +5 -0
  200. package/shims/web.mjs.map +1 -0
  201. package/src/_shims/MultipartBody.ts +9 -0
  202. package/src/_shims/README.md +46 -0
  203. package/src/_shims/auto/runtime-bun.ts +4 -0
  204. package/src/_shims/auto/runtime-node.ts +4 -0
  205. package/src/_shims/auto/runtime.ts +4 -0
  206. package/src/_shims/auto/types-node.ts +4 -0
  207. package/src/_shims/auto/types.d.ts +101 -0
  208. package/src/_shims/auto/types.js +3 -0
  209. package/src/_shims/auto/types.mjs +3 -0
  210. package/src/_shims/bun-runtime.ts +14 -0
  211. package/src/_shims/index.d.ts +81 -0
  212. package/src/_shims/index.js +13 -0
  213. package/src/_shims/index.mjs +7 -0
  214. package/src/_shims/manual-types.d.ts +12 -0
  215. package/src/_shims/manual-types.js +3 -0
  216. package/src/_shims/manual-types.mjs +3 -0
  217. package/src/_shims/node-runtime.ts +83 -0
  218. package/src/_shims/node-types.d.ts +42 -0
  219. package/src/_shims/node-types.js +3 -0
  220. package/src/_shims/node-types.mjs +3 -0
  221. package/src/_shims/registry.ts +67 -0
  222. package/src/_shims/web-runtime.ts +103 -0
  223. package/src/_shims/web-types.d.ts +83 -0
  224. package/src/_shims/web-types.js +3 -0
  225. package/src/_shims/web-types.mjs +3 -0
  226. package/src/core.ts +1146 -0
  227. package/src/error.ts +146 -0
  228. package/src/index.ts +199 -0
  229. package/src/lib/.keep +4 -0
  230. package/src/lib/core/cli.ts +80 -0
  231. package/src/lib/core/index.ts +1067 -0
  232. package/src/lib/core/run.ts +14 -0
  233. package/src/resource.ts +11 -0
  234. package/src/resources/commits/commits.ts +14 -0
  235. package/src/resources/commits/index.ts +4 -0
  236. package/src/resources/commits/test-results.ts +284 -0
  237. package/src/resources/index.ts +5 -0
  238. package/src/resources/inference-pipelines/data.ts +285 -0
  239. package/src/resources/inference-pipelines/index.ts +5 -0
  240. package/src/resources/inference-pipelines/inference-pipelines.ts +19 -0
  241. package/src/resources/inference-pipelines/test-results.ts +284 -0
  242. package/src/resources/projects/commits.ts +226 -0
  243. package/src/resources/projects/index.ts +9 -0
  244. package/src/resources/projects/inference-pipelines.ts +165 -0
  245. package/src/resources/projects/projects.ts +232 -0
  246. package/src/shims/node.ts +50 -0
  247. package/src/shims/web.ts +50 -0
  248. package/src/tsconfig.json +11 -0
  249. package/src/uploads.ts +248 -0
  250. package/src/version.ts +1 -0
  251. package/uploads.d.ts +75 -0
  252. package/uploads.d.ts.map +1 -0
  253. package/uploads.js +165 -0
  254. package/uploads.js.map +1 -0
  255. package/uploads.mjs +152 -0
  256. package/uploads.mjs.map +1 -0
  257. package/version.d.ts +2 -0
  258. package/version.d.ts.map +1 -0
  259. package/version.js +5 -0
  260. package/version.js.map +1 -0
  261. package/version.mjs +2 -0
  262. package/version.mjs.map +1 -0
@@ -0,0 +1,1067 @@
1
+ import fetch from 'node-fetch';
2
+ import OpenAI from 'openai';
3
+ import { RequestOptions } from 'openai/core';
4
+ import {
5
+ ChatCompletion,
6
+ ChatCompletionChunk,
7
+ ChatCompletionCreateParams,
8
+ ChatCompletionMessageParam,
9
+ Completion,
10
+ CompletionCreateParams,
11
+ } from 'openai/resources';
12
+ import { Run } from 'openai/resources/beta/threads/runs/runs';
13
+ import { Threads } from 'openai/resources/beta/threads/threads';
14
+ import { Stream } from 'openai/streaming';
15
+ import { v4 as uuid } from 'uuid';
16
+
17
+ /* eslint-disable camelcase */
18
+
19
+ type RequestParameters = { [key: string]: any };
20
+
21
+ const resolvedQuery = (
22
+ baseUrl: string,
23
+ endpoint: string,
24
+ args: RequestParameters = {}
25
+ ): string => `${baseUrl}${endpoint}${queryParameters(args)}`;
26
+
27
+ const queryParameters = (args: RequestParameters): string => {
28
+ const filteredArgs: RequestParameters = Object.keys(args)
29
+ .filter((key) => typeof args[key] !== 'undefined')
30
+ .reduce((acc: RequestParameters, arg) => {
31
+ if (Array.isArray(args[arg])) {
32
+ if (args[arg].length === 0) {
33
+ return acc;
34
+ }
35
+
36
+ acc[arg] = args[arg].join(`&${arg}=`);
37
+ } else {
38
+ if (
39
+ (typeof args[arg] === 'string' && args[arg].length === 0) ||
40
+ (typeof args[arg] === 'object' &&
41
+ Object.values(args[arg]).length === 0)
42
+ ) {
43
+ return acc;
44
+ }
45
+
46
+ acc[arg] = args[arg];
47
+ }
48
+
49
+ return acc;
50
+ }, {});
51
+
52
+ if (Object.keys(filteredArgs).length === 0) {
53
+ return '';
54
+ }
55
+
56
+ const resolvedArgs = Object.keys(filteredArgs)
57
+ .map((key) => `${key}=${filteredArgs[key]}`)
58
+ .join('&');
59
+ return `?${resolvedArgs}`;
60
+ };
61
+
62
+ /**
63
+ * Represents the data structure for a chat completion.
64
+ * Object keys represent a column name and the values represent the column value.
65
+ */
66
+ export interface StreamingData {
67
+ [columnName: string]: any;
68
+
69
+ /**
70
+ * The total estimated cost of the chat completion in USD. Optional.
71
+ */
72
+ cost?: number | undefined;
73
+
74
+ /**
75
+ * The latency of the chat completion in milliseconds. Optional.
76
+ */
77
+ latency?: number | undefined;
78
+
79
+ /**
80
+ * The output string generated by the chat completion.
81
+ */
82
+ output: string;
83
+
84
+ /**
85
+ * A timestamp representing when the chat completion occurred. Optional.
86
+ */
87
+ timestamp?: number | undefined;
88
+
89
+ /**
90
+ * The number of tokens used in the chat completion. Optional.
91
+ */
92
+ tokens?: number | undefined;
93
+ }
94
+
95
+ /**
96
+ * Configuration settings for uploading chat completion data to Openlayer.
97
+ */
98
+ interface StreamingDataConfig {
99
+ /**
100
+ * The name of the column that stores the request cost data. Can be null.
101
+ */
102
+ costColumnName: string | null;
103
+
104
+ /**
105
+ * The name of the column that stores the ground truth data. Can be null.
106
+ */
107
+ groundTruthColumnName: string | null;
108
+
109
+ /**
110
+ * The name of the column that stores inference IDs. Can be null.
111
+ */
112
+ inferenceIdColumnName: string | null;
113
+
114
+ /**
115
+ * An array of names for input variable columns. Can be null.
116
+ */
117
+ inputVariableNames?: string[] | null;
118
+
119
+ /**
120
+ * The name of the column that stores latency data. Can be null.
121
+ */
122
+ latencyColumnName: string | null;
123
+
124
+ /**
125
+ * The name of the column that stores the number of tokens. Can be null.
126
+ */
127
+ numOfTokenColumnName: string | null;
128
+
129
+ /**
130
+ * The name of the column that stores output data. Can be null.
131
+ */
132
+ outputColumnName: string | null;
133
+
134
+ /**
135
+ * The full prompt history for the chat completion.
136
+ */
137
+ prompt?: ChatCompletionMessageParam[];
138
+
139
+ /**
140
+ * The name of the column that stores timestamp data. Can be null.
141
+ */
142
+ timestampColumnName: string | null;
143
+ }
144
+
145
+ type OpenlayerClientConstructorProps = {
146
+ openlayerApiKey?: string | undefined;
147
+ openlayerServerUrl?: string | undefined;
148
+ };
149
+
150
+ type OpenAIMonitorConstructorProps = OpenlayerClientConstructorProps & {
151
+ openAiApiKey: string;
152
+ openlayerInferencePipelineId?: string;
153
+ openlayerInferencePipelineName?: string;
154
+ openlayerProjectName?: string;
155
+ };
156
+
157
+ type OpenlayerInferencePipeline = {
158
+ dataVolumeGraphs?: OpenlayerSampleVolumeGraph;
159
+ dateCreated: string;
160
+ dateLastEvaluated?: string;
161
+ dateLastSampleReceived?: string;
162
+ dateOfNextEvaluation?: string;
163
+ dateUpdated: string;
164
+ description?: string;
165
+ failingGoalCount: number;
166
+ id: string;
167
+ name: string;
168
+ passingGoalCount: number;
169
+ projectId: string;
170
+ status: OpenlayerInferencePipelineStatus;
171
+ statusMessage?: string;
172
+ totalGoalCount: number;
173
+ };
174
+
175
+ type OpenlayerInferencePipelineStatus =
176
+ | 'completed'
177
+ | 'failed'
178
+ | 'paused'
179
+ | 'queued'
180
+ | 'running'
181
+ | 'unknown';
182
+
183
+ type OpenlayerProject = {
184
+ dateCreated: string;
185
+ dateUpdated: string;
186
+ description?: string;
187
+ developmentGoalCount: number;
188
+ goalCount: number;
189
+ id: string;
190
+ inferencePipelineCount: number;
191
+ memberIds: string[];
192
+ monitoringGoalCount: number;
193
+ name: string;
194
+ sample?: boolean;
195
+ slackChannelId?: string;
196
+ slackChannelName?: string;
197
+ slackChannelNotificationsEnabled: boolean;
198
+ taskType: OpenlayerTaskType;
199
+ unreadNotificationCount: number;
200
+ versionCount: number;
201
+ };
202
+
203
+ type OpenlayerSampleVolumeGraphBucket = {
204
+ title: string;
205
+ xAxis: {
206
+ data: string[];
207
+ title: string;
208
+ };
209
+ yAxis: {
210
+ data: number[];
211
+ title: string;
212
+ };
213
+ };
214
+
215
+ type OpenlayerSampleVolumeGraph = {
216
+ daily: OpenlayerSampleVolumeGraphBucket;
217
+ hourly: OpenlayerSampleVolumeGraphBucket;
218
+ monthly: OpenlayerSampleVolumeGraphBucket;
219
+ weekly: OpenlayerSampleVolumeGraphBucket;
220
+ };
221
+
222
+ type OpenlayerTaskType =
223
+ | 'llm-base'
224
+ | 'tabular-classification'
225
+ | 'tabular-regression'
226
+ | 'text-classification';
227
+
228
+ type Pricing = {
229
+ input: number;
230
+ output: number;
231
+ };
232
+
233
+ const OpenAIPricing: { [key: string]: Pricing } = {
234
+ 'babbage-002': {
235
+ input: 0.0004,
236
+ output: 0.0004,
237
+ },
238
+ 'davinci-002': {
239
+ input: 0.002,
240
+ output: 0.002,
241
+ },
242
+ 'gpt-3.5-turbo': {
243
+ input: 0.0005,
244
+ output: 0.0015,
245
+ },
246
+ 'gpt-3.5-turbo-0125': {
247
+ input: 0.0005,
248
+ output: 0.0015,
249
+ },
250
+ 'gpt-3.5-turbo-0301': {
251
+ input: 0.0015,
252
+ output: 0.002,
253
+ },
254
+ 'gpt-3.5-turbo-0613': {
255
+ input: 0.0015,
256
+ output: 0.002,
257
+ },
258
+ 'gpt-3.5-turbo-1106': {
259
+ input: 0.001,
260
+ output: 0.002,
261
+ },
262
+ 'gpt-3.5-turbo-16k-0613': {
263
+ input: 0.003,
264
+ output: 0.004,
265
+ },
266
+ 'gpt-3.5-turbo-instruct': {
267
+ input: 0.0015,
268
+ output: 0.002,
269
+ },
270
+ 'gpt-4': {
271
+ input: 0.03,
272
+ output: 0.06,
273
+ },
274
+ 'gpt-4-0125-preview': {
275
+ input: 0.01,
276
+ output: 0.03,
277
+ },
278
+ 'gpt-4-0314': {
279
+ input: 0.03,
280
+ output: 0.06,
281
+ },
282
+ 'gpt-4-0613': {
283
+ input: 0.03,
284
+ output: 0.06,
285
+ },
286
+ 'gpt-4-1106-preview': {
287
+ input: 0.01,
288
+ output: 0.03,
289
+ },
290
+ 'gpt-4-1106-vision-preview': {
291
+ input: 0.01,
292
+ output: 0.03,
293
+ },
294
+ 'gpt-4-32k': {
295
+ input: 0.06,
296
+ output: 0.12,
297
+ },
298
+ 'gpt-4-32k-0314': {
299
+ input: 0.06,
300
+ output: 0.12,
301
+ },
302
+ 'gpt-4-32k-0613': {
303
+ input: 0.03,
304
+ output: 0.06,
305
+ },
306
+ };
307
+
308
+ export class OpenlayerClient {
309
+ private openlayerApiKey?: string | undefined;
310
+
311
+ public defaultConfig: StreamingDataConfig = {
312
+ costColumnName: 'cost',
313
+ groundTruthColumnName: null,
314
+ inferenceIdColumnName: 'id',
315
+ latencyColumnName: 'latency',
316
+ numOfTokenColumnName: 'tokens',
317
+ outputColumnName: 'output',
318
+ timestampColumnName: 'timestamp',
319
+ };
320
+
321
+ private openlayerServerUrl: string = 'https://api.openlayer.com/v1';
322
+
323
+ private version = '0.1.0a21';
324
+
325
+ /**
326
+ * Constructs an OpenlayerClient instance.
327
+ * @param {OpenlayerClientConstructorProps} props - The config for the Openlayer client. The API key is required.
328
+ */
329
+ constructor({
330
+ openlayerApiKey,
331
+ openlayerServerUrl,
332
+ }: OpenlayerClientConstructorProps) {
333
+ this.openlayerApiKey = openlayerApiKey;
334
+
335
+ if (openlayerServerUrl) {
336
+ this.openlayerServerUrl = openlayerServerUrl;
337
+ }
338
+
339
+ if (!this.openlayerApiKey) {
340
+ console.error('Openlayer API key are required for publishing.');
341
+ }
342
+ }
343
+
344
+ private resolvedQuery = (endpoint: string, args: RequestParameters = {}) =>
345
+ resolvedQuery(this.openlayerServerUrl, endpoint, args);
346
+
347
+ /**
348
+ * Creates a new inference pipeline in Openlayer or loads an existing one.
349
+ * @param {string} projectId - The ID of the project containing the inference pipeline.
350
+ * @param {string} [name='production'] - The name of the inference pipeline, defaults to 'production'.
351
+ * @returns {Promise<OpenlayerInferencePipeline>} A promise that resolves to an OpenlayerInferencePipeline object.
352
+ * @throws {Error} Throws an error if the inference pipeline cannot be created or found.
353
+ */
354
+ public createInferencePipeline = async (
355
+ projectId: string,
356
+ name: string = 'production'
357
+ ): Promise<OpenlayerInferencePipeline> => {
358
+ try {
359
+ return await this.loadInferencePipeline(projectId, name);
360
+ } catch {}
361
+
362
+ const createInferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
363
+ const createInferencePipelineQuery = this.resolvedQuery(
364
+ createInferencePipelineEndpoint,
365
+ { version: this.version }
366
+ );
367
+
368
+ const createInferencePipelineResponse = await fetch(
369
+ createInferencePipelineQuery,
370
+ {
371
+ body: JSON.stringify({
372
+ description: '',
373
+ name,
374
+ }),
375
+ headers: {
376
+ Authorization: `Bearer ${this.openlayerApiKey}`,
377
+ 'Content-Type': 'application/json',
378
+ },
379
+ method: 'POST',
380
+ }
381
+ );
382
+
383
+ const inferencePipeline = (await createInferencePipelineResponse.json()) as
384
+ | OpenlayerInferencePipeline
385
+ | undefined;
386
+
387
+ if (!inferencePipeline?.id) {
388
+ throw new Error('Error creating inference pipeline');
389
+ }
390
+
391
+ return inferencePipeline;
392
+ };
393
+
394
+ /**
395
+ * Creates a new project in Openlayer or loads an existing one.
396
+ * @param {string} name - The name of the project.
397
+ * @param {OpenlayerTaskType} taskType - The type of task associated with the project.
398
+ * @param {string} [description] - Optional description of the project.
399
+ * @returns {Promise<OpenlayerProject>} A promise that resolves to an OpenlayerProject object.
400
+ * @throws {Error} Throws an error if the project cannot be created or found.
401
+ */
402
+ public createProject = async (
403
+ name: string,
404
+ taskType: OpenlayerTaskType,
405
+ description?: string
406
+ ): Promise<OpenlayerProject> => {
407
+ try {
408
+ return await this.loadProject(name);
409
+ } catch {}
410
+
411
+ const projectsEndpoint = '/projects';
412
+ const projectsQuery = this.resolvedQuery(projectsEndpoint);
413
+
414
+ const response = await fetch(projectsQuery, {
415
+ body: JSON.stringify({
416
+ description,
417
+ name,
418
+ taskType,
419
+ }),
420
+ headers: {
421
+ Authorization: `Bearer ${this.openlayerApiKey}`,
422
+ 'Content-Type': 'application/json',
423
+ },
424
+ method: 'POST',
425
+ });
426
+
427
+ const data = (await response.json()) as {
428
+ error?: string;
429
+ items?: OpenlayerProject[];
430
+ };
431
+
432
+ const { items: projects, error } = data;
433
+
434
+ if (!Array.isArray(projects)) {
435
+ throw new Error(
436
+ typeof error === 'string' ? error : 'Invalid response from Openlayer'
437
+ );
438
+ }
439
+
440
+ const project = projects.find((p) => p.name === name);
441
+
442
+ if (!project?.id) {
443
+ throw new Error('Project not found');
444
+ }
445
+
446
+ return project;
447
+ };
448
+
449
+ /**
450
+ * Loads an existing inference pipeline from Openlayer based on its name and project ID.
451
+ * @param {string} projectId - The ID of the project containing the inference pipeline.
452
+ * @param {string} [name='production'] - The name of the inference pipeline, defaults to 'production'.
453
+ * @returns {Promise<OpenlayerInferencePipeline>} A promise that resolves to an OpenlayerInferencePipeline object.
454
+ * @throws {Error} Throws an error if the inference pipeline is not found.
455
+ */
456
+ public loadInferencePipeline = async (
457
+ projectId: string,
458
+ name: string = 'production'
459
+ ): Promise<OpenlayerInferencePipeline> => {
460
+ const inferencePipelineEndpoint = `/projects/${projectId}/inference-pipelines`;
461
+ const inferencePipelineQueryParameters = {
462
+ name,
463
+ version: this.version,
464
+ };
465
+
466
+ const inferencePipelineQuery = this.resolvedQuery(
467
+ inferencePipelineEndpoint,
468
+ inferencePipelineQueryParameters
469
+ );
470
+
471
+ const inferencePipelineResponse = await fetch(inferencePipelineQuery, {
472
+ headers: {
473
+ Authorization: `Bearer ${this.openlayerApiKey}`,
474
+ 'Content-Type': 'application/json',
475
+ },
476
+ method: 'GET',
477
+ });
478
+
479
+ const { items: inferencePipelines, error } =
480
+ (await inferencePipelineResponse.json()) as {
481
+ error?: string;
482
+ items?: OpenlayerInferencePipeline[];
483
+ };
484
+
485
+ const inferencePipeline = Array.isArray(inferencePipelines)
486
+ ? inferencePipelines.find((p) => p.name === name)
487
+ : undefined;
488
+
489
+ if (!inferencePipeline?.id) {
490
+ throw new Error(
491
+ typeof error === 'string' ? error : 'Inference pipeline not found'
492
+ );
493
+ }
494
+
495
+ return inferencePipeline;
496
+ };
497
+
498
+ /**
499
+ * Loads an existing project from Openlayer based on its name.
500
+ * @param {string} name - The name of the project.
501
+ * @returns {Promise<OpenlayerProject>} A promise that resolves to an OpenlayerProject object.
502
+ * @throws {Error} Throws an error if the project is not found.
503
+ */
504
+ public loadProject = async (name: string): Promise<OpenlayerProject> => {
505
+ const projectsEndpoint = '/projects';
506
+ const projectsQueryParameters = {
507
+ name,
508
+ version: this.version,
509
+ };
510
+
511
+ const projectsQuery = this.resolvedQuery(
512
+ projectsEndpoint,
513
+ projectsQueryParameters
514
+ );
515
+
516
+ const response = await fetch(projectsQuery, {
517
+ headers: {
518
+ Authorization: `Bearer ${this.openlayerApiKey}`,
519
+ 'Content-Type': 'application/json',
520
+ },
521
+ method: 'GET',
522
+ });
523
+
524
+ const data = await response.json();
525
+ const { items: projects, error } = data as {
526
+ error?: string;
527
+ items?: OpenlayerProject[];
528
+ };
529
+
530
+ if (!Array.isArray(projects)) {
531
+ throw new Error(
532
+ typeof error === 'string' ? error : 'Invalid response from Openlayer'
533
+ );
534
+ }
535
+
536
+ const project = projects.find((p) => p.name === name);
537
+
538
+ if (!project?.id) {
539
+ throw new Error('Project not found');
540
+ }
541
+
542
+ return project;
543
+ };
544
+
545
+ /**
546
+ * Streams data to the Openlayer inference pipeline.
547
+ * @param {StreamingData} data - The chat completion data to be streamed.
548
+ * @param {string} inferencePipelineId - The ID of the Openlayer inference pipeline to which data is streamed.
549
+ * @returns {Promise<void>} A promise that resolves when the data has been successfully streamed.
550
+ */
551
+ public streamData = async (
552
+ data: StreamingData,
553
+ config: StreamingDataConfig,
554
+ inferencePipelineId: string
555
+ ): Promise<void> => {
556
+ if (!this.openlayerApiKey) {
557
+ console.error('Openlayer API key are required for streaming data.');
558
+ return;
559
+ }
560
+
561
+ try {
562
+ const dataStreamEndpoint = `/inference-pipelines/${inferencePipelineId}/data-stream`;
563
+ const dataStreamQuery = this.resolvedQuery(dataStreamEndpoint);
564
+
565
+ const response = await fetch(dataStreamQuery, {
566
+ body: JSON.stringify({
567
+ config,
568
+ rows: [
569
+ {
570
+ ...data,
571
+ id: uuid(),
572
+ timestamp: Math.round((data.timestamp ?? Date.now()) / 1000),
573
+ },
574
+ ],
575
+ }),
576
+ headers: {
577
+ Authorization: `Bearer ${this.openlayerApiKey}`,
578
+ 'Content-Type': 'application/json',
579
+ },
580
+ method: 'POST',
581
+ });
582
+
583
+ if (!response.ok) {
584
+ console.error('Error making POST request:', response.status);
585
+ console.error(`Error: ${response.status}`);
586
+ }
587
+
588
+ await response.json();
589
+ } catch (error) {
590
+ console.error('Error streaming data to Openlayer:', error);
591
+ }
592
+ };
593
+ }
594
+
595
+ export class OpenAIMonitor {
596
+ private openlayerClient: OpenlayerClient;
597
+
598
+ private openAIClient: OpenAI;
599
+
600
+ private openlayerProjectName?: string | undefined;
601
+
602
+ private openlayerInferencePipelineId?: string | undefined;
603
+
604
+ private openlayerInferencePipelineName: string = 'production';
605
+
606
+ /**
607
+ * Constructs an OpenAIMonitor instance.
608
+ * @param {OpenAIMonitorConstructorProps} props - The configuration properties for the OpenAI and Openlayer clients.
609
+ */
610
+ constructor({
611
+ openAiApiKey,
612
+ openlayerApiKey,
613
+ openlayerProjectName,
614
+ openlayerInferencePipelineId,
615
+ openlayerInferencePipelineName,
616
+ openlayerServerUrl,
617
+ }: OpenAIMonitorConstructorProps) {
618
+ this.openlayerProjectName = openlayerProjectName;
619
+ this.openlayerInferencePipelineId = openlayerInferencePipelineId;
620
+
621
+ if (openlayerInferencePipelineName) {
622
+ this.openlayerInferencePipelineName = openlayerInferencePipelineName;
623
+ }
624
+
625
+ this.openlayerClient = new OpenlayerClient({
626
+ openlayerApiKey,
627
+ openlayerServerUrl,
628
+ });
629
+
630
+ this.openAIClient = new OpenAI({
631
+ apiKey: openAiApiKey,
632
+ dangerouslyAllowBrowser: true,
633
+ });
634
+ }
635
+
636
+ private cost = (model: string, inputTokens: number, outputTokens: number) => {
637
+ const pricing: Pricing | undefined = OpenAIPricing[model];
638
+ const inputCost =
639
+ typeof pricing === 'undefined'
640
+ ? undefined
641
+ : (inputTokens / 1000) * pricing.input;
642
+ const outputCost =
643
+ typeof pricing === 'undefined'
644
+ ? undefined
645
+ : (outputTokens / 1000) * pricing.output;
646
+ return typeof pricing === 'undefined'
647
+ ? undefined
648
+ : (inputCost ?? 0) + (outputCost ?? 0);
649
+ };
650
+
651
+ private chatCompletionPrompt = (
652
+ fromMessages: ChatCompletionMessageParam[]
653
+ ): ChatCompletionMessageParam[] =>
654
+ fromMessages.map(
655
+ ({ content, role }, i) =>
656
+ ({
657
+ content: role === 'user' ? `{{ message_${i} }}` : content,
658
+ role,
659
+ }) as unknown as ChatCompletionMessageParam
660
+ );
661
+
662
+ private threadPrompt = async (
663
+ fromMessages: Threads.MessagesPage
664
+ ): Promise<ChatCompletionMessageParam[]> => {
665
+ const messages: Threads.Messages.Message[] = [];
666
+ for await (const page of fromMessages.iterPages()) {
667
+ messages.push(...page.getPaginatedItems());
668
+ }
669
+
670
+ return messages
671
+ .map(({ content, role }) =>
672
+ content.map((item) => ({
673
+ content: (() => {
674
+ switch (item.type) {
675
+ case 'image_file':
676
+ return item.image_file.file_id;
677
+ case 'text':
678
+ return item.text.value;
679
+ default:
680
+ return '';
681
+ }
682
+ })(),
683
+ role,
684
+ }))
685
+ )
686
+ .flat();
687
+ };
688
+
689
+ private inputVariables = (
690
+ fromPrompt: ChatCompletionMessageParam[],
691
+ andMessages: ChatCompletionMessageParam[]
692
+ ) => {
693
+ const inputVariableNames = fromPrompt
694
+ .filter(({ role }) => role === 'user')
695
+ .map(({ content }) =>
696
+ String(content).replace(/{{\s*|\s*}}/g, '')
697
+ ) as string[];
698
+ const inputVariables = andMessages
699
+ .filter(({ role }) => role === 'user')
700
+ .map(({ content }) => content) as string[];
701
+ const inputVariablesMap = inputVariableNames.reduce(
702
+ (acc, name, i) => ({ ...acc, [name]: inputVariables[i] }),
703
+ {}
704
+ );
705
+
706
+ return { inputVariableNames, inputVariables, inputVariablesMap };
707
+ };
708
+
709
+ /**
710
+ * Creates a chat completion using the OpenAI client and streams the result to Openlayer.
711
+ * @param {ChatCompletionCreateParams} body - The parameters for creating a chat completion.
712
+ * @param {RequestOptions} [options] - Optional request options.
713
+ * @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
714
+ * @returns {Promise<ChatCompletion | Stream<ChatCompletionChunk>>} Promise of a ChatCompletion or a Stream
715
+ * @throws {Error} Throws errors from the OpenAI client.
716
+ */
717
+ public createChatCompletion = async (
718
+ body: ChatCompletionCreateParams,
719
+ options?: RequestOptions,
720
+ additionalLogs?: StreamingData
721
+ ): Promise<ChatCompletion | Stream<ChatCompletionChunk>> => {
722
+ if (typeof this.openlayerInferencePipelineId === 'undefined') {
723
+ console.error('No inference pipeline found.');
724
+ }
725
+
726
+ // Start a timer to measure latency
727
+ const startTime = Date.now();
728
+ // Accumulate output for streamed responses
729
+ let streamedOutput = '';
730
+
731
+ const response = await this.openAIClient.chat.completions.create(
732
+ body,
733
+ options
734
+ );
735
+
736
+ try {
737
+ if (typeof this.openlayerInferencePipelineId !== 'undefined') {
738
+ const prompt = this.chatCompletionPrompt(body.messages);
739
+ const { inputVariableNames, inputVariablesMap } = this.inputVariables(
740
+ prompt,
741
+ body.messages
742
+ );
743
+
744
+ const config = {
745
+ ...this.openlayerClient.defaultConfig,
746
+ inputVariableNames,
747
+ prompt,
748
+ };
749
+
750
+ if (body.stream) {
751
+ const streamedResponse = response as Stream<ChatCompletionChunk>;
752
+
753
+ for await (const chunk of streamedResponse) {
754
+ const [choice] = chunk.choices;
755
+ // Process each chunk - for example, accumulate input data
756
+ const chunkOutput = choice?.delta.content ?? '';
757
+ streamedOutput += chunkOutput;
758
+ }
759
+
760
+ const endTime = Date.now();
761
+ const latency = endTime - startTime;
762
+
763
+ this.openlayerClient.streamData(
764
+ {
765
+ latency,
766
+ output: streamedOutput,
767
+ timestamp: startTime,
768
+ ...inputVariablesMap,
769
+ ...additionalLogs,
770
+ },
771
+ config,
772
+ this.openlayerInferencePipelineId
773
+ );
774
+ } else {
775
+ const nonStreamedResponse = response as ChatCompletion;
776
+ // Handle regular (non-streamed) response
777
+ const endTime = Date.now();
778
+ const latency = endTime - startTime;
779
+ const [choice] = nonStreamedResponse.choices;
780
+ const output = choice?.message.content;
781
+ const tokens = nonStreamedResponse.usage?.total_tokens ?? 0;
782
+ const inputTokens = nonStreamedResponse.usage?.prompt_tokens ?? 0;
783
+ const outputTokens =
784
+ nonStreamedResponse.usage?.completion_tokens ?? 0;
785
+ const cost = this.cost(
786
+ nonStreamedResponse.model,
787
+ inputTokens,
788
+ outputTokens
789
+ );
790
+
791
+ if (typeof output === 'string') {
792
+ this.openlayerClient.streamData(
793
+ {
794
+ cost,
795
+ latency,
796
+ model: nonStreamedResponse.model,
797
+ output,
798
+ timestamp: startTime,
799
+ tokens,
800
+ ...inputVariablesMap,
801
+ ...additionalLogs,
802
+ },
803
+ config,
804
+ this.openlayerInferencePipelineId
805
+ );
806
+ } else {
807
+ console.error('No output received from OpenAI.');
808
+ }
809
+ }
810
+ }
811
+ } catch (error) {
812
+ console.error(error);
813
+ }
814
+
815
+ return response;
816
+ };
817
+
818
+ /**
819
+ * Creates a completion using the OpenAI client and streams the result to Openlayer.
820
+ * @param {CompletionCreateParams} body - The parameters for creating a completion.
821
+ * @param {RequestOptions} [options] - Optional request options.
822
+ * @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
823
+ * @returns {Promise<Completion | Stream<Completion>>} Promise that resolves to a Completion or a Stream.
824
+ * @throws {Error} Throws errors from the OpenAI client.
825
+ */
826
+ public createCompletion = async (
827
+ body: CompletionCreateParams,
828
+ options?: RequestOptions,
829
+ additionalLogs?: StreamingData
830
+ ): Promise<Completion | Stream<Completion>> => {
831
+ if (!body.prompt) {
832
+ console.error('No prompt provided.');
833
+ }
834
+
835
+ if (typeof this.openlayerInferencePipelineId === 'undefined') {
836
+ console.error('No inference pipeline found.');
837
+ }
838
+
839
+ // Start a timer to measure latency
840
+ const startTime = Date.now();
841
+
842
+ // Accumulate output and tokens data for streamed responses
843
+ let streamedModel = body.model;
844
+ let streamedOutput = '';
845
+ let streamedTokens = 0;
846
+ let streamedInputTokens = 0;
847
+ let streamedOutputTokens = 0;
848
+
849
+ const response = await this.openAIClient.completions.create(body, options);
850
+
851
+ try {
852
+ if (typeof this.openlayerInferencePipelineId !== 'undefined') {
853
+ const config = {
854
+ ...this.openlayerClient.defaultConfig,
855
+ inputVariableNames: ['input'],
856
+ };
857
+
858
+ if (body.stream) {
859
+ const streamedResponse = response as Stream<Completion>;
860
+
861
+ for await (const chunk of streamedResponse) {
862
+ const [choice] = chunk.choices;
863
+ // Process each chunk - for example, accumulate input data
864
+ streamedModel = chunk.model;
865
+ streamedOutput += choice?.text.trim();
866
+ streamedTokens += chunk.usage?.total_tokens ?? 0;
867
+ streamedInputTokens += chunk.usage?.prompt_tokens ?? 0;
868
+ streamedOutputTokens += chunk.usage?.completion_tokens ?? 0;
869
+ }
870
+
871
+ const endTime = Date.now();
872
+ const latency = endTime - startTime;
873
+ const cost = this.cost(
874
+ streamedModel,
875
+ streamedInputTokens,
876
+ streamedOutputTokens
877
+ );
878
+
879
+ this.openlayerClient.streamData(
880
+ {
881
+ cost,
882
+ input: body.prompt,
883
+ latency,
884
+ output: streamedOutput,
885
+ timestamp: startTime,
886
+ tokens: streamedTokens,
887
+ ...additionalLogs,
888
+ },
889
+ config,
890
+ this.openlayerInferencePipelineId
891
+ );
892
+ } else {
893
+ const nonStreamedResponse = response as Completion;
894
+ const [choice] = nonStreamedResponse.choices;
895
+ // Handle regular (non-streamed) response
896
+ const endTime = Date.now();
897
+ const latency = endTime - startTime;
898
+ const tokens = nonStreamedResponse.usage?.total_tokens ?? 0;
899
+ const inputTokens = nonStreamedResponse.usage?.prompt_tokens ?? 0;
900
+ const outputTokens =
901
+ nonStreamedResponse.usage?.completion_tokens ?? 0;
902
+ const cost = this.cost(
903
+ nonStreamedResponse.model,
904
+ inputTokens,
905
+ outputTokens
906
+ );
907
+
908
+ this.openlayerClient.streamData(
909
+ {
910
+ cost,
911
+ input: body.prompt,
912
+ latency,
913
+ output: choice?.text ?? '',
914
+ timestamp: startTime,
915
+ tokens,
916
+ ...additionalLogs,
917
+ },
918
+ config,
919
+ this.openlayerInferencePipelineId
920
+ );
921
+ }
922
+ }
923
+ } catch (error) {
924
+ console.error(error);
925
+ }
926
+
927
+ return response;
928
+ };
929
+
930
+ /**
931
+ * Monitor a run from an OpenAI assistant.
932
+ * Once the run is completed, the thread data is published to Openlayer,
933
+ * along with the latency, cost, and number of tokens used.
934
+ * @param {Run} run - The run created by the OpenAI assistant.
935
+ * @param {StreamingData} [additionalLogs] - Optional metadata logs to include with the request sent to Openlayer.
936
+ * @returns {Promise<void>} A promise that resolves when the run data has been successfully published to Openlayer.
937
+ */
938
+ public async monitorThreadRun(run: Run, additionalLogs?: StreamingData) {
939
+ if (run.status !== 'completed') {
940
+ return;
941
+ }
942
+
943
+ if (typeof this.openlayerInferencePipelineId === 'undefined') {
944
+ console.error('No inference pipeline found.');
945
+ return;
946
+ }
947
+
948
+ try {
949
+ const {
950
+ assistant_id,
951
+ completed_at,
952
+ created_at,
953
+ model,
954
+ thread_id,
955
+ // @ts-ignore
956
+ usage,
957
+ } = run;
958
+
959
+ // @ts-ignore
960
+ const { completion_tokens, prompt_tokens, total_tokens } =
961
+ typeof usage === 'undefined' ||
962
+ typeof usage !== 'object' ||
963
+ usage === null
964
+ ? {}
965
+ : usage;
966
+
967
+ const cost = this.cost(model, prompt_tokens, completion_tokens);
968
+ const latency =
969
+ completed_at === null ||
970
+ created_at === null ||
971
+ isNaN(completed_at) ||
972
+ isNaN(created_at)
973
+ ? undefined
974
+ : (completed_at - created_at) * 1000;
975
+
976
+ const messages = await this.openAIClient.beta.threads.messages.list(
977
+ thread_id,
978
+ { order: 'asc' }
979
+ );
980
+
981
+ const populatedPrompt = await this.threadPrompt(messages);
982
+ const prompt = this.chatCompletionPrompt(populatedPrompt);
983
+ const { inputVariableNames, inputVariablesMap } = this.inputVariables(
984
+ prompt,
985
+ populatedPrompt
986
+ );
987
+
988
+ const config = {
989
+ ...this.openlayerClient.defaultConfig,
990
+ inputVariableNames,
991
+ prompt: prompt.slice(0, prompt.length - 1),
992
+ };
993
+
994
+ const output = prompt[prompt.length - 1]?.content;
995
+ const resolvedOutput =
996
+ typeof output === 'string'
997
+ ? output
998
+ : typeof output === 'undefined' || output === null
999
+ ? ''
1000
+ : `${output}`;
1001
+
1002
+ this.openlayerClient.streamData(
1003
+ {
1004
+ cost,
1005
+ latency,
1006
+ openai_assistant_id: assistant_id,
1007
+ openai_thread_id: thread_id,
1008
+ output: resolvedOutput,
1009
+ timestamp: run.created_at,
1010
+ tokens: total_tokens,
1011
+ ...inputVariablesMap,
1012
+ ...additionalLogs,
1013
+ },
1014
+ config,
1015
+ this.openlayerInferencePipelineId
1016
+ );
1017
+ } catch (error) {
1018
+ console.error('Error logging thread run:', error);
1019
+ }
1020
+ }
1021
+
1022
+ /**
1023
+ * Starts monitoring for the OpenAI Monitor instance. If monitoring is already active, a warning is logged.
1024
+ */
1025
+ public async initialize() {
1026
+ console.info(
1027
+ 'Initializing monitor: creating or loading an Openlayer project and inference pipeline...'
1028
+ );
1029
+
1030
+ if (typeof this.openlayerInferencePipelineId !== 'undefined') {
1031
+ console.info(
1032
+ 'Monitor initialized: using inference pipeline ID provided.'
1033
+ );
1034
+ return;
1035
+ }
1036
+
1037
+ try {
1038
+ if (typeof this.openlayerProjectName === 'undefined') {
1039
+ console.error('No project name provided.');
1040
+ return;
1041
+ }
1042
+
1043
+ const project = await this.openlayerClient.createProject(
1044
+ this.openlayerProjectName,
1045
+ 'llm-base'
1046
+ );
1047
+
1048
+ if (typeof project !== 'undefined') {
1049
+ const inferencePipeline =
1050
+ await this.openlayerClient.createInferencePipeline(
1051
+ project.id,
1052
+ this.openlayerInferencePipelineName
1053
+ );
1054
+
1055
+ if (typeof inferencePipeline?.id === 'undefined') {
1056
+ console.error('Unable to locate inference pipeline.');
1057
+ } else {
1058
+ this.openlayerInferencePipelineId = inferencePipeline.id;
1059
+ }
1060
+ }
1061
+
1062
+ console.info('Monitor started');
1063
+ } catch (error) {
1064
+ console.error('An error occurred while starting the monitor:', error);
1065
+ }
1066
+ }
1067
+ }