@elasticdash/openai 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,24 @@
1
+ MIT License
2
+
3
+ This repository is licensed under the MIT License. Portions of this codebase are derived from posthog/posthog-js-lite by PostHog, which is also licensed under the MIT License.
4
+
5
+ Copyright (c) 2023-2026 Langfuse GmbH
6
+ Copyright (c) 2022 PostHog (part of Hiberly Inc)
7
+
8
+ Permission is hereby granted, free of charge, to any person obtaining a copy
9
+ of this software and associated documentation files (the "Software"), to deal
10
+ in the Software without restriction, including without limitation the rights
11
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12
+ copies of the Software, and to permit persons to whom the Software is
13
+ furnished to do so, subject to the following conditions:
14
+
15
+ The above copyright notice and this permission notice shall be included in all
16
+ copies or substantial portions of the Software.
17
+
18
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
+ SOFTWARE.
package/README.md CHANGED
@@ -6,8 +6,8 @@ This is the OpenAI integration package of the Langfuse JS SDK containing the `ob
6
6
 
7
7
  ## Packages
8
8
 
9
- | Package | NPM | Description | Environments |
10
- | ------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | ------------ |
9
+ | Package | NPM | Description | Environments |
10
+ | ---------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------- | ------------ |
11
11
  | [@elasticdash/client](./packages/client) | [![NPM](https://img.shields.io/npm/v/@elasticdash/client.svg)](https://www.npmjs.com/package/@elasticdash/client) | Langfuse API client for universal JavaScript environments | Universal JS |
12
12
  | [@elasticdash/tracing](./packages/tracing) | [![NPM](https://img.shields.io/npm/v/@elasticdash/tracing.svg)](https://www.npmjs.com/package/@elasticdash/tracing) | Langfuse instrumentation methods based on OpenTelemetry | Node.js 20+ |
13
13
  | [@elasticdash/otel](./packages/otel) | [![NPM](https://img.shields.io/npm/v/@elasticdash/otel.svg)](https://www.npmjs.com/package/@elasticdash/otel) | Langfuse OpenTelemetry export helpers | Node.js 20+ |
package/dist/index.cjs ADDED
@@ -0,0 +1,428 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ observeOpenAI: () => observeOpenAI
24
+ });
25
+ module.exports = __toCommonJS(index_exports);
26
+
27
+ // src/traceMethod.ts
28
+ var import_tracing = require("@elasticdash/tracing");
29
+
30
+ // src/parseOpenAI.ts
31
+ var parseInputArgs = (args) => {
32
+ let params = {};
33
+ params = {
34
+ frequency_penalty: args.frequency_penalty,
35
+ logit_bias: args.logit_bias,
36
+ logprobs: args.logprobs,
37
+ max_tokens: args.max_tokens,
38
+ n: args.n,
39
+ presence_penalty: args.presence_penalty,
40
+ seed: args.seed,
41
+ stop: args.stop,
42
+ stream: args.stream,
43
+ temperature: args.temperature,
44
+ top_p: args.top_p,
45
+ user: args.user,
46
+ response_format: args.response_format,
47
+ top_logprobs: args.top_logprobs
48
+ };
49
+ let input = args.input;
50
+ if (args && typeof args === "object" && !Array.isArray(args) && "messages" in args) {
51
+ input = {};
52
+ input.messages = args.messages;
53
+ if ("function_call" in args) {
54
+ input.function_call = args.function_call;
55
+ }
56
+ if ("functions" in args) {
57
+ input.functions = args.functions;
58
+ }
59
+ if ("tools" in args) {
60
+ input.tools = args.tools;
61
+ }
62
+ if ("tool_choice" in args) {
63
+ input.tool_choice = args.tool_choice;
64
+ }
65
+ } else if (!input) {
66
+ input = args.prompt;
67
+ }
68
+ return {
69
+ model: args.model,
70
+ input,
71
+ modelParameters: params
72
+ };
73
+ };
74
+ var parseCompletionOutput = (res) => {
75
+ var _a;
76
+ if (res instanceof Object && "output_text" in res && res["output_text"] !== "") {
77
+ return res["output_text"];
78
+ }
79
+ if (typeof res === "object" && res && "output" in res && Array.isArray(res["output"])) {
80
+ const output = res["output"];
81
+ if (output.length > 1) {
82
+ return output;
83
+ }
84
+ if (output.length === 1) {
85
+ return output[0];
86
+ }
87
+ return null;
88
+ }
89
+ if (!(res instanceof Object && "choices" in res && Array.isArray(res.choices))) {
90
+ return "";
91
+ }
92
+ return "message" in res.choices[0] ? res.choices[0].message : (_a = res.choices[0].text) != null ? _a : "";
93
+ };
94
+ var parseUsageDetails = (completionUsage) => {
95
+ if ("prompt_tokens" in completionUsage) {
96
+ const {
97
+ prompt_tokens,
98
+ completion_tokens,
99
+ total_tokens,
100
+ completion_tokens_details,
101
+ prompt_tokens_details
102
+ } = completionUsage;
103
+ const flatPromptTokensDetails = Object.fromEntries(
104
+ Object.entries(prompt_tokens_details != null ? prompt_tokens_details : {}).map(([key, value]) => [
105
+ `input_${key}`,
106
+ value
107
+ ])
108
+ );
109
+ const flatCompletionTokensDetails = Object.fromEntries(
110
+ Object.entries(completion_tokens_details != null ? completion_tokens_details : {}).map(([key, value]) => [
111
+ `output_${key}`,
112
+ value
113
+ ])
114
+ );
115
+ let finalInputTokens = prompt_tokens;
116
+ Object.values(flatPromptTokensDetails).forEach((value) => {
117
+ finalInputTokens = Math.max(finalInputTokens - value, 0);
118
+ });
119
+ let finalOutputTokens = completion_tokens;
120
+ Object.values(flatCompletionTokensDetails).forEach((value) => {
121
+ finalOutputTokens = Math.max(finalOutputTokens - value, 0);
122
+ });
123
+ return {
124
+ input: finalInputTokens,
125
+ output: finalOutputTokens,
126
+ total: total_tokens,
127
+ ...flatPromptTokensDetails,
128
+ ...flatCompletionTokensDetails
129
+ };
130
+ } else if ("input_tokens" in completionUsage) {
131
+ const {
132
+ input_tokens,
133
+ output_tokens,
134
+ total_tokens,
135
+ input_tokens_details,
136
+ output_tokens_details
137
+ } = completionUsage;
138
+ let finalInputTokens = input_tokens;
139
+ Object.keys(input_tokens_details != null ? input_tokens_details : {}).forEach((key) => {
140
+ finalInputTokens = Math.max(
141
+ finalInputTokens - input_tokens_details[key],
142
+ 0
143
+ );
144
+ });
145
+ let finalOutputTokens = output_tokens;
146
+ Object.keys(output_tokens_details != null ? output_tokens_details : {}).forEach((key) => {
147
+ finalOutputTokens = Math.max(
148
+ finalOutputTokens - output_tokens_details[key],
149
+ 0
150
+ );
151
+ });
152
+ return {
153
+ input: finalInputTokens,
154
+ output: finalOutputTokens,
155
+ total: total_tokens,
156
+ ...Object.fromEntries(
157
+ Object.entries(input_tokens_details != null ? input_tokens_details : {}).map(([key, value]) => [
158
+ `input_${key}`,
159
+ value
160
+ ])
161
+ ),
162
+ ...Object.fromEntries(
163
+ Object.entries(output_tokens_details != null ? output_tokens_details : {}).map(([key, value]) => [
164
+ `output_${key}`,
165
+ value
166
+ ])
167
+ )
168
+ };
169
+ }
170
+ };
171
+ var parseUsageDetailsFromResponse = (res) => {
172
+ if (hasCompletionUsage(res)) {
173
+ return parseUsageDetails(res.usage);
174
+ }
175
+ };
176
+ var parseChunk = (rawChunk) => {
177
+ var _a, _b;
178
+ let isToolCall = false;
179
+ const _chunk = rawChunk;
180
+ const chunkData = (_a = _chunk == null ? void 0 : _chunk.choices) == null ? void 0 : _a[0];
181
+ try {
182
+ if ("delta" in chunkData && "tool_calls" in chunkData.delta && Array.isArray(chunkData.delta.tool_calls)) {
183
+ isToolCall = true;
184
+ return { isToolCall, data: chunkData.delta.tool_calls[0] };
185
+ }
186
+ if ("delta" in chunkData) {
187
+ return { isToolCall, data: ((_b = chunkData.delta) == null ? void 0 : _b.content) || "" };
188
+ }
189
+ if ("text" in chunkData) {
190
+ return { isToolCall, data: chunkData.text || "" };
191
+ }
192
+ } catch {
193
+ }
194
+ return { isToolCall: false, data: "" };
195
+ };
196
+ function hasCompletionUsage(obj) {
197
+ return obj instanceof Object && "usage" in obj && obj.usage instanceof Object && // Completion API Usage format
198
+ (typeof obj.usage.prompt_tokens === "number" && typeof obj.usage.completion_tokens === "number" && typeof obj.usage.total_tokens === "number" || // Response API Usage format
199
+ typeof obj.usage.input_tokens === "number" && typeof obj.usage.output_tokens === "number" && typeof obj.usage.total_tokens === "number");
200
+ }
201
+ var getToolCallOutput = (toolCallChunks) => {
202
+ var _a, _b;
203
+ let name = "";
204
+ let toolArguments = "";
205
+ for (const toolCall of toolCallChunks) {
206
+ name = ((_a = toolCall.function) == null ? void 0 : _a.name) || name;
207
+ toolArguments += ((_b = toolCall.function) == null ? void 0 : _b.arguments) || "";
208
+ }
209
+ return {
210
+ tool_calls: [
211
+ {
212
+ function: {
213
+ name,
214
+ arguments: toolArguments
215
+ }
216
+ }
217
+ ]
218
+ };
219
+ };
220
+ var parseModelDataFromResponse = (res) => {
221
+ if (typeof res !== "object" || res === null) {
222
+ return {
223
+ model: void 0,
224
+ modelParameters: void 0,
225
+ metadata: void 0
226
+ };
227
+ }
228
+ const model = "model" in res ? res["model"] : void 0;
229
+ const modelParameters = {};
230
+ const modelParamKeys = [
231
+ "max_output_tokens",
232
+ "parallel_tool_calls",
233
+ "store",
234
+ "temperature",
235
+ "tool_choice",
236
+ "top_p",
237
+ "truncation",
238
+ "user"
239
+ ];
240
+ const metadata = {};
241
+ const metadataKeys = [
242
+ "reasoning",
243
+ "incomplete_details",
244
+ "instructions",
245
+ "previous_response_id",
246
+ "tools",
247
+ "metadata",
248
+ "status",
249
+ "error"
250
+ ];
251
+ for (const key of modelParamKeys) {
252
+ const val = key in res ? res[key] : null;
253
+ if (val !== null && val !== void 0) {
254
+ modelParameters[key] = val;
255
+ }
256
+ }
257
+ for (const key of metadataKeys) {
258
+ const val = key in res ? res[key] : null;
259
+ if (val) {
260
+ metadata[key] = val;
261
+ }
262
+ }
263
+ return {
264
+ model,
265
+ modelParameters: Object.keys(modelParameters).length > 0 ? modelParameters : void 0,
266
+ metadata: Object.keys(metadata).length > 0 ? metadata : void 0
267
+ };
268
+ };
269
+
270
+ // src/utils.ts
271
+ var isAsyncIterable = (x) => x != null && typeof x === "object" && typeof x[Symbol.asyncIterator] === "function";
272
+
273
+ // src/traceMethod.ts
274
+ var withTracing = (tracedMethod, config) => {
275
+ return (...args) => wrapMethod(tracedMethod, config, ...args);
276
+ };
277
+ var wrapMethod = (tracedMethod, config, ...args) => {
278
+ var _a, _b;
279
+ const { model, input, modelParameters } = parseInputArgs((_a = args[0]) != null ? _a : {});
280
+ const finalModelParams = { ...modelParameters, response_format: "" };
281
+ const finalMetadata = {
282
+ ...config == null ? void 0 : config.generationMetadata,
283
+ response_format: "response_format" in modelParameters ? modelParameters.response_format : void 0
284
+ };
285
+ const generation = (0, import_tracing.startObservation)(
286
+ (_b = config == null ? void 0 : config.generationName) != null ? _b : "OpenAI-completion",
287
+ {
288
+ model,
289
+ input,
290
+ modelParameters: finalModelParams,
291
+ prompt: config == null ? void 0 : config.langfusePrompt,
292
+ metadata: finalMetadata
293
+ },
294
+ {
295
+ asType: "generation",
296
+ parentSpanContext: config == null ? void 0 : config.parentSpanContext
297
+ }
298
+ ).updateTrace({
299
+ userId: config == null ? void 0 : config.userId,
300
+ sessionId: config == null ? void 0 : config.sessionId,
301
+ tags: config == null ? void 0 : config.tags,
302
+ name: config == null ? void 0 : config.traceName
303
+ });
304
+ try {
305
+ const res = tracedMethod(...args);
306
+ if (isAsyncIterable(res)) {
307
+ return wrapAsyncIterable(res, generation);
308
+ }
309
+ if (res instanceof Promise) {
310
+ const wrappedPromise = res.then((result) => {
311
+ if (isAsyncIterable(result)) {
312
+ return wrapAsyncIterable(result, generation);
313
+ }
314
+ const output = parseCompletionOutput(result);
315
+ const usageDetails = parseUsageDetailsFromResponse(result);
316
+ const {
317
+ model: modelFromResponse,
318
+ modelParameters: modelParametersFromResponse,
319
+ metadata: metadataFromResponse
320
+ } = parseModelDataFromResponse(result);
321
+ generation.update({
322
+ output,
323
+ usageDetails,
324
+ model: modelFromResponse,
325
+ modelParameters: modelParametersFromResponse,
326
+ metadata: metadataFromResponse
327
+ }).end();
328
+ return result;
329
+ }).catch((err) => {
330
+ generation.update({
331
+ statusMessage: String(err),
332
+ level: "ERROR",
333
+ costDetails: {
334
+ input: 0,
335
+ output: 0,
336
+ total: 0
337
+ }
338
+ }).end();
339
+ throw err;
340
+ });
341
+ return wrappedPromise;
342
+ }
343
+ return res;
344
+ } catch (error) {
345
+ generation.update({
346
+ statusMessage: String(error),
347
+ level: "ERROR",
348
+ costDetails: {
349
+ input: 0,
350
+ output: 0,
351
+ total: 0
352
+ }
353
+ }).end();
354
+ throw error;
355
+ }
356
+ };
357
+ function wrapAsyncIterable(iterable, generation) {
358
+ async function* tracedOutputGenerator() {
359
+ const response = iterable;
360
+ const textChunks = [];
361
+ const toolCallChunks = [];
362
+ let usage = null;
363
+ let completionStartTime = void 0;
364
+ let usageDetails = void 0;
365
+ let output = null;
366
+ for await (const rawChunk of response) {
367
+ completionStartTime = completionStartTime != null ? completionStartTime : /* @__PURE__ */ new Date();
368
+ if (typeof rawChunk === "object" && rawChunk && "response" in rawChunk) {
369
+ const result = rawChunk["response"];
370
+ output = parseCompletionOutput(result);
371
+ usageDetails = parseUsageDetailsFromResponse(result);
372
+ const {
373
+ model: modelFromResponse,
374
+ modelParameters: modelParametersFromResponse,
375
+ metadata: metadataFromResponse
376
+ } = parseModelDataFromResponse(result);
377
+ generation.update({
378
+ model: modelFromResponse,
379
+ modelParameters: modelParametersFromResponse,
380
+ metadata: metadataFromResponse
381
+ });
382
+ }
383
+ if (typeof rawChunk === "object" && rawChunk != null && "usage" in rawChunk) {
384
+ usage = rawChunk.usage;
385
+ }
386
+ const processedChunk = parseChunk(rawChunk);
387
+ if (!processedChunk.isToolCall) {
388
+ textChunks.push(processedChunk.data);
389
+ } else {
390
+ toolCallChunks.push(processedChunk.data);
391
+ }
392
+ yield rawChunk;
393
+ }
394
+ output = output != null ? output : toolCallChunks.length > 0 ? getToolCallOutput(toolCallChunks) : textChunks.join("");
395
+ generation.update({
396
+ output,
397
+ completionStartTime,
398
+ usageDetails: usageDetails != null ? usageDetails : usage ? parseUsageDetails(usage) : void 0
399
+ }).end();
400
+ }
401
+ return tracedOutputGenerator();
402
+ }
403
+
404
+ // src/observeOpenAI.ts
405
+ var observeOpenAI = (sdk, langfuseConfig) => {
406
+ return new Proxy(sdk, {
407
+ get(wrappedSdk, propKey, proxy) {
408
+ var _a, _b;
409
+ const originalProperty = wrappedSdk[propKey];
410
+ const defaultGenerationName = `${(_a = sdk.constructor) == null ? void 0 : _a.name}.${propKey.toString()}`;
411
+ const generationName = (_b = langfuseConfig == null ? void 0 : langfuseConfig.generationName) != null ? _b : defaultGenerationName;
412
+ const config = { ...langfuseConfig, generationName };
413
+ if (typeof originalProperty === "function") {
414
+ return withTracing(originalProperty.bind(wrappedSdk), config);
415
+ }
416
+ const isNestedOpenAIObject = originalProperty && !Array.isArray(originalProperty) && !(originalProperty instanceof Date) && typeof originalProperty === "object";
417
+ if (isNestedOpenAIObject) {
418
+ return observeOpenAI(originalProperty, config);
419
+ }
420
+ return Reflect.get(wrappedSdk, propKey, proxy);
421
+ }
422
+ });
423
+ };
424
+ // Annotate the CommonJS export names for ESM import in node:
425
+ 0 && (module.exports = {
426
+ observeOpenAI
427
+ });
428
+ //# sourceMappingURL=index.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts","../src/traceMethod.ts","../src/parseOpenAI.ts","../src/utils.ts","../src/observeOpenAI.ts"],"sourcesContent":["export { observeOpenAI } from \"./observeOpenAI.js\";\nexport * from \"./types.js\";\n","import { LangfuseGeneration, startObservation } from \"@elasticdash/tracing\";\nimport type OpenAI from \"openai\";\n\nimport {\n getToolCallOutput,\n parseChunk,\n parseCompletionOutput,\n parseInputArgs,\n parseUsageDetails,\n parseModelDataFromResponse,\n parseUsageDetailsFromResponse,\n} from \"./parseOpenAI.js\";\nimport type { LangfuseConfig } from \"./types.js\";\nimport { isAsyncIterable } from \"./utils.js\";\n\n/**\n * Generic method type for any function that can be traced.\n * @internal\n */\ntype GenericMethod = (...args: unknown[]) => unknown;\n\n/**\n * Wraps a method with Langfuse tracing functionality.\n *\n * This function creates a wrapper around OpenAI SDK methods that automatically\n * creates Langfuse generations, captures input/output data, handles streaming\n * responses, and records usage metrics and errors.\n *\n * @param tracedMethod - The OpenAI SDK method to wrap with tracing\n * @param config - Configuration for the trace and generation\n * @returns A wrapped version of the method that creates Langfuse traces\n *\n * @internal\n */\nexport const withTracing = <T extends GenericMethod>(\n tracedMethod: T,\n config?: LangfuseConfig & Required<{ generationName: string }>,\n): ((...args: Parameters<T>) => Promise<ReturnType<T>>) => {\n return (...args) => wrapMethod(tracedMethod, config, ...args);\n};\n\n/**\n * Internal method that handles the actual tracing logic for OpenAI SDK methods.\n *\n * This function creates a Langfuse generation, executes the original method,\n * and captures all relevant data including input, output, usage, and errors.\n * It handles both streaming and non-streaming responses appropriately.\n *\n * @param tracedMethod - The original OpenAI SDK method to execute\n * @param config - Langfuse configuration options\n * @param args - Arguments to pass to the original method\n * @returns The result from the original method, potentially wrapped for streaming\n *\n * @internal\n */\nconst wrapMethod = <T extends GenericMethod>(\n tracedMethod: T,\n config?: LangfuseConfig,\n ...args: Parameters<T>\n): ReturnType<T> | any => {\n const { model, input, modelParameters } = parseInputArgs(args[0] ?? {});\n\n const finalModelParams = { ...modelParameters, response_format: \"\" };\n const finalMetadata = {\n ...config?.generationMetadata,\n response_format:\n \"response_format\" in modelParameters\n ? modelParameters.response_format\n : undefined,\n };\n\n const generation = startObservation(\n config?.generationName ?? \"OpenAI-completion\",\n {\n model,\n input,\n modelParameters: finalModelParams,\n prompt: config?.langfusePrompt,\n metadata: finalMetadata,\n },\n {\n asType: \"generation\",\n parentSpanContext: config?.parentSpanContext,\n },\n ).updateTrace({\n userId: config?.userId,\n sessionId: config?.sessionId,\n tags: config?.tags,\n name: config?.traceName,\n });\n\n try {\n const res = tracedMethod(...args);\n\n // Handle stream responses\n if (isAsyncIterable(res)) {\n return wrapAsyncIterable(res, generation);\n }\n\n if (res instanceof Promise) {\n const wrappedPromise = res\n .then((result) => {\n if (isAsyncIterable(result)) {\n return wrapAsyncIterable(result, generation);\n }\n\n const output = parseCompletionOutput(result);\n const usageDetails = parseUsageDetailsFromResponse(result);\n const {\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n } = parseModelDataFromResponse(result);\n\n generation\n .update({\n output,\n usageDetails,\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n })\n .end();\n\n return result;\n })\n .catch((err) => {\n generation\n .update({\n statusMessage: String(err),\n level: \"ERROR\",\n costDetails: {\n input: 0,\n output: 0,\n total: 0,\n },\n })\n .end();\n\n throw err;\n });\n\n return wrappedPromise;\n }\n\n return res;\n } catch (error) {\n generation\n .update({\n statusMessage: String(error),\n level: \"ERROR\",\n costDetails: {\n input: 0,\n output: 0,\n total: 0,\n },\n })\n .end();\n\n throw error;\n }\n};\n\n/**\n * Wraps an async iterable (streaming response) with Langfuse tracing.\n *\n * This function handles streaming OpenAI responses by collecting chunks,\n * parsing usage information, and updating the Langfuse generation with\n * the complete output and usage details once the stream is consumed.\n *\n * @param iterable - The async iterable from OpenAI (streaming response)\n * @param generation - The Langfuse generation to update with stream data\n * @returns An async generator that yields original chunks while collecting data\n *\n * @internal\n */\nfunction wrapAsyncIterable<R>(\n iterable: AsyncIterable<unknown>,\n generation: LangfuseGeneration,\n): R {\n async function* tracedOutputGenerator(): AsyncGenerator<\n unknown,\n void,\n unknown\n > {\n const response = iterable;\n const textChunks: string[] = [];\n const toolCallChunks: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall[] =\n [];\n let usage: OpenAI.CompletionUsage | null = null;\n let completionStartTime: Date | undefined = undefined;\n let usageDetails: Record<string, number> | undefined = undefined;\n let output: unknown = null;\n\n for await (const rawChunk of response as AsyncIterable<unknown>) {\n completionStartTime = completionStartTime ?? new Date();\n\n // Handle Response API chunks\n if (typeof rawChunk === \"object\" && rawChunk && \"response\" in rawChunk) {\n const result = rawChunk[\"response\"];\n output = parseCompletionOutput(result);\n usageDetails = parseUsageDetailsFromResponse(result);\n\n const {\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n } = parseModelDataFromResponse(result);\n\n generation.update({\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n });\n }\n\n if (\n typeof rawChunk === \"object\" &&\n rawChunk != null &&\n \"usage\" in rawChunk\n ) {\n usage = rawChunk.usage as OpenAI.CompletionUsage | null;\n }\n\n const processedChunk = parseChunk(rawChunk);\n\n if (!processedChunk.isToolCall) {\n textChunks.push(processedChunk.data);\n } else {\n toolCallChunks.push(processedChunk.data);\n }\n\n yield rawChunk;\n }\n\n output =\n output ??\n (toolCallChunks.length > 0\n ? getToolCallOutput(toolCallChunks)\n : textChunks.join(\"\"));\n\n generation\n .update({\n output,\n completionStartTime,\n usageDetails:\n usageDetails ?? (usage ? parseUsageDetails(usage) : undefined),\n })\n .end();\n }\n\n return tracedOutputGenerator() as R;\n}\n","import type OpenAI from \"openai\";\n\ntype ParsedOpenAIArguments = {\n model: string;\n input: Record<string, any> | string;\n modelParameters: Record<string, any>;\n};\n\nexport const parseInputArgs = (\n args: Record<string, any>,\n): ParsedOpenAIArguments => {\n let params: Record<string, any> = {};\n params = {\n frequency_penalty: args.frequency_penalty,\n logit_bias: args.logit_bias,\n logprobs: args.logprobs,\n max_tokens: args.max_tokens,\n n: args.n,\n presence_penalty: args.presence_penalty,\n seed: args.seed,\n stop: args.stop,\n stream: args.stream,\n temperature: args.temperature,\n top_p: args.top_p,\n user: args.user,\n response_format: args.response_format,\n top_logprobs: args.top_logprobs,\n };\n\n let input: Record<string, any> | string = args.input;\n\n if (\n args &&\n typeof args === \"object\" &&\n !Array.isArray(args) &&\n \"messages\" in args\n ) {\n input = {};\n input.messages = args.messages;\n if (\"function_call\" in args) {\n input.function_call = args.function_call;\n }\n if (\"functions\" in args) {\n input.functions = args.functions;\n }\n if (\"tools\" in args) {\n input.tools = args.tools;\n }\n\n if (\"tool_choice\" in args) {\n input.tool_choice = args.tool_choice;\n }\n } else if (!input) {\n input = args.prompt;\n }\n\n return {\n model: args.model,\n input: input,\n modelParameters: params,\n };\n};\n\nexport const parseCompletionOutput = (res: unknown): unknown => {\n if (\n res instanceof Object &&\n \"output_text\" in res &&\n res[\"output_text\"] !== \"\"\n ) {\n return res[\"output_text\"] as string;\n }\n\n if (\n typeof res === \"object\" &&\n res &&\n \"output\" in res &&\n Array.isArray(res[\"output\"])\n ) {\n const output = res[\"output\"];\n\n if (output.length > 1) {\n return output;\n }\n if (output.length === 1) {\n return output[0] as Record<string, unknown>;\n }\n\n return null;\n }\n\n if (\n !(res instanceof Object && \"choices\" in res && Array.isArray(res.choices))\n ) {\n return \"\";\n }\n\n return \"message\" in res.choices[0]\n ? res.choices[0].message\n : (res.choices[0].text ?? \"\");\n};\n\nexport const parseUsageDetails = (\n completionUsage: OpenAI.CompletionUsage,\n): Record<string, number> | undefined => {\n if (\"prompt_tokens\" in completionUsage) {\n const {\n prompt_tokens,\n completion_tokens,\n total_tokens,\n completion_tokens_details,\n prompt_tokens_details,\n } = completionUsage;\n\n const flatPromptTokensDetails = Object.fromEntries(\n Object.entries(prompt_tokens_details ?? {}).map(([key, value]) => [\n `input_${key}`,\n value as number,\n ]),\n );\n\n const flatCompletionTokensDetails = Object.fromEntries(\n Object.entries(completion_tokens_details ?? {}).map(([key, value]) => [\n `output_${key}`,\n value as number,\n ]),\n );\n\n let finalInputTokens = prompt_tokens as number;\n Object.values(flatPromptTokensDetails).forEach((value) => {\n finalInputTokens = Math.max(finalInputTokens - value, 0);\n });\n\n let finalOutputTokens = completion_tokens as number;\n Object.values(flatCompletionTokensDetails).forEach((value) => {\n finalOutputTokens = Math.max(finalOutputTokens - value, 0);\n });\n\n return {\n input: finalInputTokens,\n output: finalOutputTokens,\n total: total_tokens,\n ...flatPromptTokensDetails,\n ...flatCompletionTokensDetails,\n };\n } else if (\"input_tokens\" in completionUsage) {\n const {\n input_tokens,\n output_tokens,\n total_tokens,\n input_tokens_details,\n output_tokens_details,\n } = completionUsage;\n\n let finalInputTokens = input_tokens as number;\n Object.keys(input_tokens_details ?? {}).forEach((key) => {\n finalInputTokens = Math.max(\n finalInputTokens -\n input_tokens_details[key as keyof typeof input_tokens_details],\n 0,\n );\n });\n\n let finalOutputTokens = output_tokens as number;\n Object.keys(output_tokens_details ?? {}).forEach((key) => {\n finalOutputTokens = Math.max(\n finalOutputTokens -\n output_tokens_details[key as keyof typeof output_tokens_details],\n 0,\n );\n });\n\n return {\n input: finalInputTokens,\n output: finalOutputTokens,\n total: total_tokens,\n ...Object.fromEntries(\n Object.entries(input_tokens_details ?? {}).map(([key, value]) => [\n `input_${key}`,\n value as number,\n ]),\n ),\n ...Object.fromEntries(\n Object.entries(output_tokens_details ?? {}).map(([key, value]) => [\n `output_${key}`,\n value as number,\n ]),\n ),\n };\n }\n};\n\nexport const parseUsageDetailsFromResponse = (\n res: unknown,\n): Record<string, number> | undefined => {\n if (hasCompletionUsage(res)) {\n return parseUsageDetails(res.usage);\n }\n};\n\nexport const parseChunk = (\n rawChunk: unknown,\n):\n | { isToolCall: false; data: string }\n | {\n isToolCall: true;\n data: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall;\n } => {\n let isToolCall = false;\n const _chunk = rawChunk as\n | OpenAI.ChatCompletionChunk\n | OpenAI.Completions.Completion;\n const chunkData = _chunk?.choices?.[0];\n\n try {\n if (\n \"delta\" in chunkData &&\n \"tool_calls\" in chunkData.delta &&\n Array.isArray(chunkData.delta.tool_calls)\n ) {\n isToolCall = true;\n\n return { isToolCall, data: chunkData.delta.tool_calls[0] };\n }\n if (\"delta\" in chunkData) {\n return { isToolCall, data: chunkData.delta?.content || \"\" };\n }\n\n if (\"text\" in chunkData) {\n return { isToolCall, data: chunkData.text || \"\" };\n }\n } catch {}\n\n return { isToolCall: false, data: \"\" };\n};\n\n// Type guard to check if an unknown object is a UsageResponse\nfunction hasCompletionUsage(\n obj: any,\n): obj is { usage: OpenAI.CompletionUsage } {\n return (\n obj instanceof Object &&\n \"usage\" in obj &&\n obj.usage instanceof Object &&\n // Completion API Usage format\n ((typeof obj.usage.prompt_tokens === \"number\" &&\n typeof obj.usage.completion_tokens === \"number\" &&\n typeof obj.usage.total_tokens === \"number\") ||\n // Response API Usage format\n (typeof obj.usage.input_tokens === \"number\" &&\n typeof obj.usage.output_tokens === \"number\" &&\n typeof obj.usage.total_tokens === \"number\"))\n );\n}\n\nexport const getToolCallOutput = (\n toolCallChunks: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall[],\n): {\n tool_calls: {\n function: {\n name: string;\n arguments: string;\n };\n }[];\n} => {\n let name = \"\";\n let toolArguments = \"\";\n\n for (const toolCall of toolCallChunks) {\n name = toolCall.function?.name || name;\n toolArguments += toolCall.function?.arguments || \"\";\n }\n\n return {\n tool_calls: [\n {\n function: {\n name,\n arguments: toolArguments,\n },\n },\n ],\n };\n};\n\nexport const parseModelDataFromResponse = (\n res: unknown,\n): {\n model: string | undefined;\n modelParameters: Record<string, string | number> | undefined;\n metadata: Record<string, unknown> | undefined;\n} => {\n if (typeof res !== \"object\" || res === null) {\n return {\n model: undefined,\n modelParameters: undefined,\n metadata: undefined,\n };\n }\n\n const model = \"model\" in res ? (res[\"model\"] as string) : undefined;\n const modelParameters: Record<string, string | number> = {};\n const modelParamKeys = [\n \"max_output_tokens\",\n \"parallel_tool_calls\",\n \"store\",\n \"temperature\",\n \"tool_choice\",\n \"top_p\",\n \"truncation\",\n \"user\",\n ];\n\n const metadata: Record<string, unknown> = {};\n const metadataKeys = [\n \"reasoning\",\n \"incomplete_details\",\n \"instructions\",\n \"previous_response_id\",\n \"tools\",\n \"metadata\",\n \"status\",\n \"error\",\n ];\n\n for (const key of modelParamKeys) {\n const val =\n key in res ? (res[key as keyof typeof res] as string | number) : null;\n if (val !== null && val !== undefined) {\n modelParameters[key as keyof typeof modelParameters] = val;\n }\n }\n\n for (const key of metadataKeys) {\n const val =\n key in res ? (res[key as keyof typeof res] as string | number) : null;\n if (val) {\n metadata[key as keyof typeof metadata] = val;\n }\n }\n\n return {\n model,\n modelParameters:\n Object.keys(modelParameters).length > 0 ? modelParameters : undefined,\n metadata: Object.keys(metadata).length > 0 ? metadata : undefined,\n };\n};\n","/**\n * Type guard to check if a value is an async iterable.\n *\n * This utility function determines whether a given value implements the\n * AsyncIterable interface, which is used to identify streaming responses\n * from the OpenAI SDK.\n *\n * @param x - The value to check\n * @returns True if the value is an async iterable, false otherwise\n *\n * @example\n * ```typescript\n * import { isAsyncIterable } from './utils.js';\n *\n * const response = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [...],\n * stream: true\n * });\n *\n * if (isAsyncIterable(response)) {\n * // Handle streaming response\n * for await (const chunk of response) {\n * console.log(chunk);\n * }\n * } else {\n * // Handle regular response\n * console.log(response);\n * }\n * ```\n *\n * @public\n */\nexport const isAsyncIterable = (x: unknown): x is AsyncIterable<unknown> =>\n x != null &&\n typeof x === \"object\" &&\n typeof (x as any)[Symbol.asyncIterator] === \"function\";\n","import { withTracing } from \"./traceMethod.js\";\nimport type { LangfuseConfig } from \"./types.js\";\n\n/**\n * Wraps an OpenAI SDK client with automatic Langfuse tracing.\n *\n * This function creates a proxy around the OpenAI SDK that automatically\n * traces all method calls, capturing detailed information about requests,\n * responses, token usage, costs, and performance metrics. It works with\n * both streaming and non-streaming OpenAI API calls.\n *\n * The wrapper recursively traces nested objects in the OpenAI SDK, ensuring\n * that all API calls (chat completions, embeddings, fine-tuning, etc.) are\n * automatically captured as Langfuse generations.\n *\n * @param sdk - The OpenAI SDK client instance to wrap with tracing\n * @param langfuseConfig - Optional configuration for tracing behavior\n * @returns A proxied version of the OpenAI SDK with automatic tracing\n *\n * @example\n * ```typescript\n * import OpenAI from 'openai';\n * import { observeOpenAI } from '@elasticdash/openai';\n *\n * const openai = observeOpenAI(new OpenAI({\n * apiKey: process.env.OPENAI_API_KEY,\n * }));\n *\n * // All OpenAI calls are now automatically traced\n * const response = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Hello!' }],\n * max_tokens: 100,\n * temperature: 0.7\n * });\n * ```\n *\n * @example\n * ```typescript\n * // With custom tracing configuration\n * const openai = observeOpenAI(new OpenAI({\n * apiKey: process.env.OPENAI_API_KEY\n * }), {\n * traceName: 'AI-Assistant-Chat',\n * userId: 'user-123',\n * sessionId: 'session-456',\n * tags: ['production', 'chat-feature'],\n * generationName: 'gpt-4-chat-completion'\n * });\n *\n * const completion = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Explain quantum computing' }]\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Streaming responses are also automatically traced\n * const stream = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Write a story' }],\n * stream: true\n * });\n *\n * for await (const chunk of stream) {\n * process.stdout.write(chunk.choices[0]?.delta?.content || '');\n * }\n * // Final usage details and complete output are captured automatically\n * ```\n *\n * @example\n * ```typescript\n * // Using with Langfuse prompt management\n * const openai = observeOpenAI(new OpenAI({\n * apiKey: process.env.OPENAI_API_KEY\n * }), {\n * langfusePrompt: {\n * name: 'chat-assistant-v2',\n * version: 3,\n * isFallback: false\n * },\n * generationMetadata: {\n * environment: 'production',\n * feature: 'chat-assistant'\n * }\n * });\n * ```\n *\n * @public\n */\nexport const observeOpenAI = <SDKType extends object>(\n sdk: SDKType,\n langfuseConfig?: LangfuseConfig,\n): SDKType => {\n return new Proxy(sdk, {\n get(wrappedSdk, propKey, proxy) {\n const originalProperty = wrappedSdk[propKey as keyof SDKType];\n\n const defaultGenerationName = `${sdk.constructor?.name}.${propKey.toString()}`;\n const generationName =\n langfuseConfig?.generationName ?? defaultGenerationName;\n const config = { ...langfuseConfig, generationName };\n\n // Trace methods of the OpenAI SDK\n if (typeof originalProperty === \"function\") {\n return withTracing(originalProperty.bind(wrappedSdk), config);\n }\n\n const isNestedOpenAIObject =\n originalProperty &&\n !Array.isArray(originalProperty) &&\n !(originalProperty instanceof Date) &&\n typeof originalProperty === \"object\";\n\n // Recursively wrap nested objects to ensure all nested properties or methods are also traced\n if (isNestedOpenAIObject) {\n return observeOpenAI(originalProperty, config);\n }\n\n // Fallback to returning the original value\n return Reflect.get(wrappedSdk, propKey, proxy);\n },\n });\n};\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAA,qBAAqD;;;ACQ9C,IAAM,iBAAiB,CAC5B,SAC0B;AAC1B,MAAI,SAA8B,CAAC;AACnC,WAAS;AAAA,IACP,mBAAmB,KAAK;AAAA,IACxB,YAAY,KAAK;AAAA,IACjB,UAAU,KAAK;AAAA,IACf,YAAY,KAAK;AAAA,IACjB,GAAG,KAAK;AAAA,IACR,kBAAkB,KAAK;AAAA,IACvB,MAAM,KAAK;AAAA,IACX,MAAM,KAAK;AAAA,IACX,QAAQ,KAAK;AAAA,IACb,aAAa,KAAK;AAAA,IAClB,OAAO,KAAK;AAAA,IACZ,MAAM,KAAK;AAAA,IACX,iBAAiB,KAAK;AAAA,IACtB,cAAc,KAAK;AAAA,EACrB;AAEA,MAAI,QAAsC,KAAK;AAE/C,MACE,QACA,OAAO,SAAS,YAChB,CAAC,MAAM,QAAQ,IAAI,KACnB,cAAc,MACd;AACA,YAAQ,CAAC;AACT,UAAM,WAAW,KAAK;AACtB,QAAI,mBAAmB,MAAM;AAC3B,YAAM,gBAAgB,KAAK;AAAA,IAC7B;AACA,QAAI,eAAe,MAAM;AACvB,YAAM,YAAY,KAAK;AAAA,IACzB;AACA,QAAI,WAAW,MAAM;AACnB,YAAM,QAAQ,KAAK;AAAA,IACrB;AAEA,QAAI,iBAAiB,MAAM;AACzB,YAAM,cAAc,KAAK;AAAA,IAC3B;AAAA,EACF,WAAW,CAAC,OAAO;AACjB,YAAQ,KAAK;AAAA,EACf;AAEA,SAAO;AAAA,IACL,OAAO,KAAK;AAAA,IACZ;AAAA,IACA,iBAAiB;AAAA,EACnB;AACF;AAEO,IAAM,wBAAwB,CAAC,QAA0B;AA/DhE;AAgEE,MACE,eAAe,UACf,iBAAiB,OACjB,IAAI,aAAa,MAAM,IACvB;AACA,WAAO,IAAI,aAAa;AAAA,EAC1B;AAEA,MACE,OAAO,QAAQ,YACf,OACA,YAAY,OACZ,MAAM,QAAQ,IAAI,QAAQ,CAAC,GAC3B;AACA,UAAM,SAAS,IAAI,QAAQ;AAE3B,QAAI,OAAO,SAAS,GAAG;AACrB,aAAO;AAAA,IACT;AACA,QAAI,OAAO,WAAW,GAAG;AACvB,aAAO,OAAO,CAAC;AAAA,IACjB;AAEA,WAAO;AAAA,EACT;AAEA,MACE,EAAE,eAAe,UAAU,aAAa,OAAO,MAAM,QAAQ,IAAI,OAAO,IACxE;AACA,WAAO;AAAA,EACT;AAEA,SAAO,aAAa,IAAI,QAAQ,CAAC,IAC7B,IAAI,QAAQ,CAAC,EAAE,WACd,SAAI,QAAQ,CAAC,EAAE,SAAf,YAAuB;AAC9B;AAEO,IAAM,oBAAoB,CAC/B,oBACuC;AACvC,MAAI,mBAAmB,iBAAiB;AACtC,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,UAAM,0BAA0B,OAAO;AAAA,MACrC,OAAO,QAAQ,wDAAyB,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,QAChE,SAAS,GAAG;AAAA,QACZ;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,8BAA8B,OAAO;AAAA,MACzC,OAAO,QAAQ,gEAA6B,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,QACpE,UAAU,GAAG;AAAA,QACb;AAAA,MACF,CAAC;AAAA,IACH;AAEA,QAAI,mBAAmB;AACvB,WAAO,OAAO,uBAAuB,EAAE,QAAQ,CAAC,UAAU;AACxD,yBAAmB,KAAK,IAAI,mBAAmB,OAAO,CAAC;AAAA,IACzD,CAAC;AAED,QAAI,oBAAoB;AACxB,WAAO,OAAO,2BAA2B,EAAE,QAAQ,CAAC,UAAU;AAC5D,0BAAoB,KAAK,IAAI,oBAAoB,OAAO,CAAC;AAAA,IAC3D,CAAC;AAED,WAAO;AAAA,MACL,OAAO;AAAA,MACP,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,GAAG;AAAA,MACH,GAAG;AAAA,IACL;AAAA,EACF,WAAW,kBAAkB,iBAAiB;AAC5C,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,QAAI,mBAAmB;AACvB,WAAO,KAAK,sDAAwB,CAAC,CAAC,EAAE,QAAQ,CAAC,QAAQ;AACvD,yBAAmB,KAAK;AAAA,QACtB,mBACE,qBAAqB,GAAwC;AAAA,QAC/D;AAAA,MACF;AAAA,IACF,CAAC;AAED,QAAI,oBAAoB;AACxB,WAAO,KAAK,wDAAyB,CAAC,CAAC,EAAE,QAAQ,CAAC,QAAQ;AACxD,0BAAoB,KAAK;AAAA,QACvB,oBACE,sBAAsB,GAAyC;AAAA,QACjE;AAAA,MACF;AAAA,IACF,CAAC;AAED,WAAO;AAAA,MACL,OAAO;AAAA,MACP,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,GAAG,OAAO;AAAA,QACR,OAAO,QAAQ,sDAAwB,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,UAC/D,SAAS,GAAG;AAAA,UACZ;AAAA,QACF,CAAC;AAAA,MACH;AAAA,MACA,GAAG,OAAO;AAAA,QACR,OAAO,QAAQ,wDAAyB,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,UAChE,UAAU,GAAG;AAAA,UACb;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AACF;AAEO,IAAM,gCAAgC,CAC3C,QACuC;AACvC,MAAI,mBAAmB,GAAG,GAAG;AAC3B,WAAO,kBAAkB,IAAI,KAAK;AAAA,EACpC;AACF;AAEO,IAAM,aAAa,CACxB,aAMO;AA9MT;AA+ME,MAAI,aAAa;AACjB,QAAM,SAAS;AAGf,QAAM,aAAY,sCAAQ,YAAR,mBAAkB;AAEpC,MAAI;AACF,QACE,WAAW,aACX,gBAAgB,UAAU,SAC1B,MAAM,QAAQ,UAAU,MAAM,UAAU,GACxC;AACA,mBAAa;AAEb,aAAO,EAAE,YAAY,MAAM,UAAU,MAAM,WAAW,CAAC,EAAE;AAAA,IAC3D;AACA,QAAI,WAAW,WAAW;AACxB,aAAO,EAAE,YAAY,QAAM,eAAU,UAAV,mBAAiB,YAAW,GAAG;AAAA,IAC5D;AAEA,QAAI,UAAU,WAAW;AACvB,aAAO,EAAE,YAAY,MAAM,UAAU,QAAQ,GAAG;AAAA,IAClD;AAAA,EACF,QAAQ;AAAA,EAAC;AAET,SAAO,EAAE,YAAY,OAAO,MAAM,GAAG;AACvC;AAGA,SAAS,mBACP,KAC0C;AAC1C,SACE,eAAe,UACf,WAAW,OACX,IAAI,iBAAiB;AAAA,GAEnB,OAAO,IAAI,MAAM,kBAAkB,YACnC,OAAO,IAAI,MAAM,sBAAsB,YACvC,OAAO,IAAI,MAAM,iBAAiB;AAAA,EAEjC,OAAO,IAAI,MAAM,iBAAiB,YACjC,OAAO,IAAI,MAAM,kBAAkB,YACnC,OAAO,IAAI,MAAM,iBAAiB;AAE1C;AAEO,IAAM,oBAAoB,CAC/B,mBAQG;AAvQL;AAwQE,MAAI,OAAO;AACX,MAAI,gBAAgB;AAEpB,aAAW,YAAY,gBAAgB;AACrC,aAAO,cAAS,aAAT,mBAAmB,SAAQ;AAClC,uBAAiB,cAAS,aAAT,mBAAmB,cAAa;AAAA,EACnD;AAEA,SAAO;AAAA,IACL,YAAY;AAAA,MACV;AAAA,QACE,UAAU;AAAA,UACR;AAAA,UACA,WAAW;AAAA,QACb;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,IAAM,6BAA6B,CACxC,QAKG;AACH,MAAI,OAAO,QAAQ,YAAY,QAAQ,MAAM;AAC3C,WAAO;AAAA,MACL,OAAO;AAAA,MACP,iBAAiB;AAAA,MACjB,UAAU;AAAA,IACZ;AAAA,EACF;AAEA,QAAM,QAAQ,WAAW,MAAO,IAAI,OAAO,IAAe;AAC1D,QAAM,kBAAmD,CAAC;AAC1D,QAAM,iBAAiB;AAAA,IACrB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,QAAM,WAAoC,CAAC;AAC3C,QAAM,eAAe;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,OAAO,gBAAgB;AAChC,UAAM,MACJ,OAAO,MAAO,IAAI,GAAuB,IAAwB;AACnE,QAAI,QAAQ,QAAQ,QAAQ,QAAW;AACrC,sBAAgB,GAAmC,IAAI;AAAA,IACzD;AAAA,EACF;AAEA,aAAW,OAAO,cAAc;AAC9B,UAAM,MACJ,OAAO,MAAO,IAAI,GAAuB,IAAwB;AACnE,QAAI,KAAK;AACP,eAAS,GAA4B,IAAI;AAAA,IAC3C;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA,iBACE,OAAO,KAAK,eAAe,EAAE,SAAS,IAAI,kBAAkB;AAAA,IAC9D,UAAU,OAAO,KAAK,QAAQ,EAAE,SAAS,IAAI,WAAW;AAAA,EAC1D;AACF;;;ACzTO,IAAM,kBAAkB,CAAC,MAC9B,KAAK,QACL,OAAO,MAAM,YACb,OAAQ,EAAU,OAAO,aAAa,MAAM;;;AFFvC,IAAM,cAAc,CACzB,cACA,WACyD;AACzD,SAAO,IAAI,SAAS,WAAW,cAAc,QAAQ,GAAG,IAAI;AAC9D;AAgBA,IAAM,aAAa,CACjB,cACA,WACG,SACqB;AA3D1B;AA4DE,QAAM,EAAE,OAAO,OAAO,gBAAgB,IAAI,gBAAe,UAAK,CAAC,MAAN,YAAW,CAAC,CAAC;AAEtE,QAAM,mBAAmB,EAAE,GAAG,iBAAiB,iBAAiB,GAAG;AACnE,QAAM,gBAAgB;AAAA,IACpB,GAAG,iCAAQ;AAAA,IACX,iBACE,qBAAqB,kBACjB,gBAAgB,kBAChB;AAAA,EACR;AAEA,QAAM,iBAAa;AAAA,KACjB,sCAAQ,mBAAR,YAA0B;AAAA,IAC1B;AAAA,MACE;AAAA,MACA;AAAA,MACA,iBAAiB;AAAA,MACjB,QAAQ,iCAAQ;AAAA,MAChB,UAAU;AAAA,IACZ;AAAA,IACA;AAAA,MACE,QAAQ;AAAA,MACR,mBAAmB,iCAAQ;AAAA,IAC7B;AAAA,EACF,EAAE,YAAY;AAAA,IACZ,QAAQ,iCAAQ;AAAA,IAChB,WAAW,iCAAQ;AAAA,IACnB,MAAM,iCAAQ;AAAA,IACd,MAAM,iCAAQ;AAAA,EAChB,CAAC;AAED,MAAI;AACF,UAAM,MAAM,aAAa,GAAG,IAAI;AAGhC,QAAI,gBAAgB,GAAG,GAAG;AACxB,aAAO,kBAAkB,KAAK,UAAU;AAAA,IAC1C;AAEA,QAAI,eAAe,SAAS;AAC1B,YAAM,iBAAiB,IACpB,KAAK,CAAC,WAAW;AAChB,YAAI,gBAAgB,MAAM,GAAG;AAC3B,iBAAO,kBAAkB,QAAQ,UAAU;AAAA,QAC7C;AAEA,cAAM,SAAS,sBAAsB,MAAM;AAC3C,cAAM,eAAe,8BAA8B,MAAM;AACzD,cAAM;AAAA,UACJ,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,IAAI,2BAA2B,MAAM;AAErC,mBACG,OAAO;AAAA,UACN;AAAA,UACA;AAAA,UACA,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,CAAC,EACA,IAAI;AAEP,eAAO;AAAA,MACT,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,mBACG,OAAO;AAAA,UACN,eAAe,OAAO,GAAG;AAAA,UACzB,OAAO;AAAA,UACP,aAAa;AAAA,YACX,OAAO;AAAA,YACP,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF,CAAC,EACA,IAAI;AAEP,cAAM;AAAA,MACR,CAAC;AAEH,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT,SAAS,OAAO;AACd,eACG,OAAO;AAAA,MACN,eAAe,OAAO,KAAK;AAAA,MAC3B,OAAO;AAAA,MACP,aAAa;AAAA,QACX,OAAO;AAAA,QACP,QAAQ;AAAA,QACR,OAAO;AAAA,MACT;AAAA,IACF,CAAC,EACA,IAAI;AAEP,UAAM;AAAA,EACR;AACF;AAeA,SAAS,kBACP,UACA,YACG;AACH,kBAAgB,wBAId;AACA,UAAM,WAAW;AACjB,UAAM,aAAuB,CAAC;AAC9B,UAAM,iBACJ,CAAC;AACH,QAAI,QAAuC;AAC3C,QAAI,sBAAwC;AAC5C,QAAI,eAAmD;AACvD,QAAI,SAAkB;AAEtB,qBAAiB,YAAY,UAAoC;AAC/D,4BAAsB,oDAAuB,oBAAI,KAAK;AAGtD,UAAI,OAAO,aAAa,YAAY,YAAY,cAAc,UAAU;AACtE,cAAM,SAAS,SAAS,UAAU;AAClC,iBAAS,sBAAsB,MAAM;AACrC,uBAAe,8BAA8B,MAAM;AAEnD,cAAM;AAAA,UACJ,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,IAAI,2BAA2B,MAAM;AAErC,mBAAW,OAAO;AAAA,UAChB,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,CAAC;AAAA,MACH;AAEA,UACE,OAAO,aAAa,YACpB,YAAY,QACZ,WAAW,UACX;AACA,gBAAQ,SAAS;AAAA,MACnB;AAEA,YAAM,iBAAiB,WAAW,QAAQ;AAE1C,UAAI,CAAC,eAAe,YAAY;AAC9B,mBAAW,KAAK,eAAe,IAAI;AAAA,MACrC,OAAO;AACL,uBAAe,KAAK,eAAe,IAAI;AAAA,MACzC;AAEA,YAAM;AAAA,IACR;AAEA,aACE,0BACC,eAAe,SAAS,IACrB,kBAAkB,cAAc,IAChC,WAAW,KAAK,EAAE;AAExB,eACG,OAAO;AAAA,MACN;AAAA,MACA;AAAA,MACA,cACE,sCAAiB,QAAQ,kBAAkB,KAAK,IAAI;AAAA,IACxD,CAAC,EACA,IAAI;AAAA,EACT;AAEA,SAAO,sBAAsB;AAC/B;;;AGjKO,IAAM,gBAAgB,CAC3B,KACA,mBACY;AACZ,SAAO,IAAI,MAAM,KAAK;AAAA,IACpB,IAAI,YAAY,SAAS,OAAO;AAhGpC;AAiGM,YAAM,mBAAmB,WAAW,OAAwB;AAE5D,YAAM,wBAAwB,IAAG,SAAI,gBAAJ,mBAAiB,IAAI,IAAI,QAAQ,SAAS,CAAC;AAC5E,YAAM,kBACJ,sDAAgB,mBAAhB,YAAkC;AACpC,YAAM,SAAS,EAAE,GAAG,gBAAgB,eAAe;AAGnD,UAAI,OAAO,qBAAqB,YAAY;AAC1C,eAAO,YAAY,iBAAiB,KAAK,UAAU,GAAG,MAAM;AAAA,MAC9D;AAEA,YAAM,uBACJ,oBACA,CAAC,MAAM,QAAQ,gBAAgB,KAC/B,EAAE,4BAA4B,SAC9B,OAAO,qBAAqB;AAG9B,UAAI,sBAAsB;AACxB,eAAO,cAAc,kBAAkB,MAAM;AAAA,MAC/C;AAGA,aAAO,QAAQ,IAAI,YAAY,SAAS,KAAK;AAAA,IAC/C;AAAA,EACF,CAAC;AACH;","names":[]}
@@ -0,0 +1,128 @@
1
+ import { SpanContext } from '@opentelemetry/api';
2
+
3
+ /**
4
+ * Configuration options for Langfuse OpenAI tracing.
5
+ *
6
+ * This interface defines all available options for customizing how OpenAI
7
+ * SDK calls are traced and stored in Langfuse. It includes both trace-level
8
+ * metadata and generation-specific configuration.
9
+ *
10
+ * @public
11
+ */
12
+ type LangfuseConfig = {
13
+ /** OpenTelemetry span context to use as parent for the generated span */
14
+ parentSpanContext?: SpanContext;
15
+ /** Name for the trace that will contain this generation */
16
+ traceName?: string;
17
+ /** Session identifier to group related interactions */
18
+ sessionId?: string;
19
+ /** User identifier for associating the trace with a specific user */
20
+ userId?: string;
21
+ /** Tags for categorizing and filtering traces */
22
+ tags?: string[];
23
+ /** Custom name for the generation observation (defaults to SDK method name) */
24
+ generationName?: string;
25
+ /** Additional metadata to attach to the generation */
26
+ generationMetadata?: Record<string, unknown>;
27
+ /** Information about the Langfuse prompt used for this generation */
28
+ langfusePrompt?: {
29
+ /** Name of the prompt template in Langfuse */
30
+ name: string;
31
+ /** Version number of the prompt template */
32
+ version: number;
33
+ /** Whether this is a fallback prompt due to retrieval failure */
34
+ isFallback: boolean;
35
+ };
36
+ };
37
+
38
+ /**
39
+ * Wraps an OpenAI SDK client with automatic Langfuse tracing.
40
+ *
41
+ * This function creates a proxy around the OpenAI SDK that automatically
42
+ * traces all method calls, capturing detailed information about requests,
43
+ * responses, token usage, costs, and performance metrics. It works with
44
+ * both streaming and non-streaming OpenAI API calls.
45
+ *
46
+ * The wrapper recursively traces nested objects in the OpenAI SDK, ensuring
47
+ * that all API calls (chat completions, embeddings, fine-tuning, etc.) are
48
+ * automatically captured as Langfuse generations.
49
+ *
50
+ * @param sdk - The OpenAI SDK client instance to wrap with tracing
51
+ * @param langfuseConfig - Optional configuration for tracing behavior
52
+ * @returns A proxied version of the OpenAI SDK with automatic tracing
53
+ *
54
+ * @example
55
+ * ```typescript
56
+ * import OpenAI from 'openai';
57
+ * import { observeOpenAI } from '@elasticdash/openai';
58
+ *
59
+ * const openai = observeOpenAI(new OpenAI({
60
+ * apiKey: process.env.OPENAI_API_KEY,
61
+ * }));
62
+ *
63
+ * // All OpenAI calls are now automatically traced
64
+ * const response = await openai.chat.completions.create({
65
+ * model: 'gpt-4',
66
+ * messages: [{ role: 'user', content: 'Hello!' }],
67
+ * max_tokens: 100,
68
+ * temperature: 0.7
69
+ * });
70
+ * ```
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * // With custom tracing configuration
75
+ * const openai = observeOpenAI(new OpenAI({
76
+ * apiKey: process.env.OPENAI_API_KEY
77
+ * }), {
78
+ * traceName: 'AI-Assistant-Chat',
79
+ * userId: 'user-123',
80
+ * sessionId: 'session-456',
81
+ * tags: ['production', 'chat-feature'],
82
+ * generationName: 'gpt-4-chat-completion'
83
+ * });
84
+ *
85
+ * const completion = await openai.chat.completions.create({
86
+ * model: 'gpt-4',
87
+ * messages: [{ role: 'user', content: 'Explain quantum computing' }]
88
+ * });
89
+ * ```
90
+ *
91
+ * @example
92
+ * ```typescript
93
+ * // Streaming responses are also automatically traced
94
+ * const stream = await openai.chat.completions.create({
95
+ * model: 'gpt-4',
96
+ * messages: [{ role: 'user', content: 'Write a story' }],
97
+ * stream: true
98
+ * });
99
+ *
100
+ * for await (const chunk of stream) {
101
+ * process.stdout.write(chunk.choices[0]?.delta?.content || '');
102
+ * }
103
+ * // Final usage details and complete output are captured automatically
104
+ * ```
105
+ *
106
+ * @example
107
+ * ```typescript
108
+ * // Using with Langfuse prompt management
109
+ * const openai = observeOpenAI(new OpenAI({
110
+ * apiKey: process.env.OPENAI_API_KEY
111
+ * }), {
112
+ * langfusePrompt: {
113
+ * name: 'chat-assistant-v2',
114
+ * version: 3,
115
+ * isFallback: false
116
+ * },
117
+ * generationMetadata: {
118
+ * environment: 'production',
119
+ * feature: 'chat-assistant'
120
+ * }
121
+ * });
122
+ * ```
123
+ *
124
+ * @public
125
+ */
126
+ declare const observeOpenAI: <SDKType extends object>(sdk: SDKType, langfuseConfig?: LangfuseConfig) => SDKType;
127
+
128
+ export { type LangfuseConfig, observeOpenAI };
@@ -0,0 +1,128 @@
1
+ import { SpanContext } from '@opentelemetry/api';
2
+
3
+ /**
4
+ * Configuration options for Langfuse OpenAI tracing.
5
+ *
6
+ * This interface defines all available options for customizing how OpenAI
7
+ * SDK calls are traced and stored in Langfuse. It includes both trace-level
8
+ * metadata and generation-specific configuration.
9
+ *
10
+ * @public
11
+ */
12
+ type LangfuseConfig = {
13
+ /** OpenTelemetry span context to use as parent for the generated span */
14
+ parentSpanContext?: SpanContext;
15
+ /** Name for the trace that will contain this generation */
16
+ traceName?: string;
17
+ /** Session identifier to group related interactions */
18
+ sessionId?: string;
19
+ /** User identifier for associating the trace with a specific user */
20
+ userId?: string;
21
+ /** Tags for categorizing and filtering traces */
22
+ tags?: string[];
23
+ /** Custom name for the generation observation (defaults to SDK method name) */
24
+ generationName?: string;
25
+ /** Additional metadata to attach to the generation */
26
+ generationMetadata?: Record<string, unknown>;
27
+ /** Information about the Langfuse prompt used for this generation */
28
+ langfusePrompt?: {
29
+ /** Name of the prompt template in Langfuse */
30
+ name: string;
31
+ /** Version number of the prompt template */
32
+ version: number;
33
+ /** Whether this is a fallback prompt due to retrieval failure */
34
+ isFallback: boolean;
35
+ };
36
+ };
37
+
38
+ /**
39
+ * Wraps an OpenAI SDK client with automatic Langfuse tracing.
40
+ *
41
+ * This function creates a proxy around the OpenAI SDK that automatically
42
+ * traces all method calls, capturing detailed information about requests,
43
+ * responses, token usage, costs, and performance metrics. It works with
44
+ * both streaming and non-streaming OpenAI API calls.
45
+ *
46
+ * The wrapper recursively traces nested objects in the OpenAI SDK, ensuring
47
+ * that all API calls (chat completions, embeddings, fine-tuning, etc.) are
48
+ * automatically captured as Langfuse generations.
49
+ *
50
+ * @param sdk - The OpenAI SDK client instance to wrap with tracing
51
+ * @param langfuseConfig - Optional configuration for tracing behavior
52
+ * @returns A proxied version of the OpenAI SDK with automatic tracing
53
+ *
54
+ * @example
55
+ * ```typescript
56
+ * import OpenAI from 'openai';
57
+ * import { observeOpenAI } from '@elasticdash/openai';
58
+ *
59
+ * const openai = observeOpenAI(new OpenAI({
60
+ * apiKey: process.env.OPENAI_API_KEY,
61
+ * }));
62
+ *
63
+ * // All OpenAI calls are now automatically traced
64
+ * const response = await openai.chat.completions.create({
65
+ * model: 'gpt-4',
66
+ * messages: [{ role: 'user', content: 'Hello!' }],
67
+ * max_tokens: 100,
68
+ * temperature: 0.7
69
+ * });
70
+ * ```
71
+ *
72
+ * @example
73
+ * ```typescript
74
+ * // With custom tracing configuration
75
+ * const openai = observeOpenAI(new OpenAI({
76
+ * apiKey: process.env.OPENAI_API_KEY
77
+ * }), {
78
+ * traceName: 'AI-Assistant-Chat',
79
+ * userId: 'user-123',
80
+ * sessionId: 'session-456',
81
+ * tags: ['production', 'chat-feature'],
82
+ * generationName: 'gpt-4-chat-completion'
83
+ * });
84
+ *
85
+ * const completion = await openai.chat.completions.create({
86
+ * model: 'gpt-4',
87
+ * messages: [{ role: 'user', content: 'Explain quantum computing' }]
88
+ * });
89
+ * ```
90
+ *
91
+ * @example
92
+ * ```typescript
93
+ * // Streaming responses are also automatically traced
94
+ * const stream = await openai.chat.completions.create({
95
+ * model: 'gpt-4',
96
+ * messages: [{ role: 'user', content: 'Write a story' }],
97
+ * stream: true
98
+ * });
99
+ *
100
+ * for await (const chunk of stream) {
101
+ * process.stdout.write(chunk.choices[0]?.delta?.content || '');
102
+ * }
103
+ * // Final usage details and complete output are captured automatically
104
+ * ```
105
+ *
106
+ * @example
107
+ * ```typescript
108
+ * // Using with Langfuse prompt management
109
+ * const openai = observeOpenAI(new OpenAI({
110
+ * apiKey: process.env.OPENAI_API_KEY
111
+ * }), {
112
+ * langfusePrompt: {
113
+ * name: 'chat-assistant-v2',
114
+ * version: 3,
115
+ * isFallback: false
116
+ * },
117
+ * generationMetadata: {
118
+ * environment: 'production',
119
+ * feature: 'chat-assistant'
120
+ * }
121
+ * });
122
+ * ```
123
+ *
124
+ * @public
125
+ */
126
+ declare const observeOpenAI: <SDKType extends object>(sdk: SDKType, langfuseConfig?: LangfuseConfig) => SDKType;
127
+
128
+ export { type LangfuseConfig, observeOpenAI };
package/dist/index.mjs ADDED
@@ -0,0 +1,401 @@
1
+ // src/traceMethod.ts
2
+ import { startObservation } from "@elasticdash/tracing";
3
+
4
+ // src/parseOpenAI.ts
5
+ var parseInputArgs = (args) => {
6
+ let params = {};
7
+ params = {
8
+ frequency_penalty: args.frequency_penalty,
9
+ logit_bias: args.logit_bias,
10
+ logprobs: args.logprobs,
11
+ max_tokens: args.max_tokens,
12
+ n: args.n,
13
+ presence_penalty: args.presence_penalty,
14
+ seed: args.seed,
15
+ stop: args.stop,
16
+ stream: args.stream,
17
+ temperature: args.temperature,
18
+ top_p: args.top_p,
19
+ user: args.user,
20
+ response_format: args.response_format,
21
+ top_logprobs: args.top_logprobs
22
+ };
23
+ let input = args.input;
24
+ if (args && typeof args === "object" && !Array.isArray(args) && "messages" in args) {
25
+ input = {};
26
+ input.messages = args.messages;
27
+ if ("function_call" in args) {
28
+ input.function_call = args.function_call;
29
+ }
30
+ if ("functions" in args) {
31
+ input.functions = args.functions;
32
+ }
33
+ if ("tools" in args) {
34
+ input.tools = args.tools;
35
+ }
36
+ if ("tool_choice" in args) {
37
+ input.tool_choice = args.tool_choice;
38
+ }
39
+ } else if (!input) {
40
+ input = args.prompt;
41
+ }
42
+ return {
43
+ model: args.model,
44
+ input,
45
+ modelParameters: params
46
+ };
47
+ };
48
+ var parseCompletionOutput = (res) => {
49
+ var _a;
50
+ if (res instanceof Object && "output_text" in res && res["output_text"] !== "") {
51
+ return res["output_text"];
52
+ }
53
+ if (typeof res === "object" && res && "output" in res && Array.isArray(res["output"])) {
54
+ const output = res["output"];
55
+ if (output.length > 1) {
56
+ return output;
57
+ }
58
+ if (output.length === 1) {
59
+ return output[0];
60
+ }
61
+ return null;
62
+ }
63
+ if (!(res instanceof Object && "choices" in res && Array.isArray(res.choices))) {
64
+ return "";
65
+ }
66
+ return "message" in res.choices[0] ? res.choices[0].message : (_a = res.choices[0].text) != null ? _a : "";
67
+ };
68
+ var parseUsageDetails = (completionUsage) => {
69
+ if ("prompt_tokens" in completionUsage) {
70
+ const {
71
+ prompt_tokens,
72
+ completion_tokens,
73
+ total_tokens,
74
+ completion_tokens_details,
75
+ prompt_tokens_details
76
+ } = completionUsage;
77
+ const flatPromptTokensDetails = Object.fromEntries(
78
+ Object.entries(prompt_tokens_details != null ? prompt_tokens_details : {}).map(([key, value]) => [
79
+ `input_${key}`,
80
+ value
81
+ ])
82
+ );
83
+ const flatCompletionTokensDetails = Object.fromEntries(
84
+ Object.entries(completion_tokens_details != null ? completion_tokens_details : {}).map(([key, value]) => [
85
+ `output_${key}`,
86
+ value
87
+ ])
88
+ );
89
+ let finalInputTokens = prompt_tokens;
90
+ Object.values(flatPromptTokensDetails).forEach((value) => {
91
+ finalInputTokens = Math.max(finalInputTokens - value, 0);
92
+ });
93
+ let finalOutputTokens = completion_tokens;
94
+ Object.values(flatCompletionTokensDetails).forEach((value) => {
95
+ finalOutputTokens = Math.max(finalOutputTokens - value, 0);
96
+ });
97
+ return {
98
+ input: finalInputTokens,
99
+ output: finalOutputTokens,
100
+ total: total_tokens,
101
+ ...flatPromptTokensDetails,
102
+ ...flatCompletionTokensDetails
103
+ };
104
+ } else if ("input_tokens" in completionUsage) {
105
+ const {
106
+ input_tokens,
107
+ output_tokens,
108
+ total_tokens,
109
+ input_tokens_details,
110
+ output_tokens_details
111
+ } = completionUsage;
112
+ let finalInputTokens = input_tokens;
113
+ Object.keys(input_tokens_details != null ? input_tokens_details : {}).forEach((key) => {
114
+ finalInputTokens = Math.max(
115
+ finalInputTokens - input_tokens_details[key],
116
+ 0
117
+ );
118
+ });
119
+ let finalOutputTokens = output_tokens;
120
+ Object.keys(output_tokens_details != null ? output_tokens_details : {}).forEach((key) => {
121
+ finalOutputTokens = Math.max(
122
+ finalOutputTokens - output_tokens_details[key],
123
+ 0
124
+ );
125
+ });
126
+ return {
127
+ input: finalInputTokens,
128
+ output: finalOutputTokens,
129
+ total: total_tokens,
130
+ ...Object.fromEntries(
131
+ Object.entries(input_tokens_details != null ? input_tokens_details : {}).map(([key, value]) => [
132
+ `input_${key}`,
133
+ value
134
+ ])
135
+ ),
136
+ ...Object.fromEntries(
137
+ Object.entries(output_tokens_details != null ? output_tokens_details : {}).map(([key, value]) => [
138
+ `output_${key}`,
139
+ value
140
+ ])
141
+ )
142
+ };
143
+ }
144
+ };
145
+ var parseUsageDetailsFromResponse = (res) => {
146
+ if (hasCompletionUsage(res)) {
147
+ return parseUsageDetails(res.usage);
148
+ }
149
+ };
150
+ var parseChunk = (rawChunk) => {
151
+ var _a, _b;
152
+ let isToolCall = false;
153
+ const _chunk = rawChunk;
154
+ const chunkData = (_a = _chunk == null ? void 0 : _chunk.choices) == null ? void 0 : _a[0];
155
+ try {
156
+ if ("delta" in chunkData && "tool_calls" in chunkData.delta && Array.isArray(chunkData.delta.tool_calls)) {
157
+ isToolCall = true;
158
+ return { isToolCall, data: chunkData.delta.tool_calls[0] };
159
+ }
160
+ if ("delta" in chunkData) {
161
+ return { isToolCall, data: ((_b = chunkData.delta) == null ? void 0 : _b.content) || "" };
162
+ }
163
+ if ("text" in chunkData) {
164
+ return { isToolCall, data: chunkData.text || "" };
165
+ }
166
+ } catch {
167
+ }
168
+ return { isToolCall: false, data: "" };
169
+ };
170
+ function hasCompletionUsage(obj) {
171
+ return obj instanceof Object && "usage" in obj && obj.usage instanceof Object && // Completion API Usage format
172
+ (typeof obj.usage.prompt_tokens === "number" && typeof obj.usage.completion_tokens === "number" && typeof obj.usage.total_tokens === "number" || // Response API Usage format
173
+ typeof obj.usage.input_tokens === "number" && typeof obj.usage.output_tokens === "number" && typeof obj.usage.total_tokens === "number");
174
+ }
175
+ var getToolCallOutput = (toolCallChunks) => {
176
+ var _a, _b;
177
+ let name = "";
178
+ let toolArguments = "";
179
+ for (const toolCall of toolCallChunks) {
180
+ name = ((_a = toolCall.function) == null ? void 0 : _a.name) || name;
181
+ toolArguments += ((_b = toolCall.function) == null ? void 0 : _b.arguments) || "";
182
+ }
183
+ return {
184
+ tool_calls: [
185
+ {
186
+ function: {
187
+ name,
188
+ arguments: toolArguments
189
+ }
190
+ }
191
+ ]
192
+ };
193
+ };
194
+ var parseModelDataFromResponse = (res) => {
195
+ if (typeof res !== "object" || res === null) {
196
+ return {
197
+ model: void 0,
198
+ modelParameters: void 0,
199
+ metadata: void 0
200
+ };
201
+ }
202
+ const model = "model" in res ? res["model"] : void 0;
203
+ const modelParameters = {};
204
+ const modelParamKeys = [
205
+ "max_output_tokens",
206
+ "parallel_tool_calls",
207
+ "store",
208
+ "temperature",
209
+ "tool_choice",
210
+ "top_p",
211
+ "truncation",
212
+ "user"
213
+ ];
214
+ const metadata = {};
215
+ const metadataKeys = [
216
+ "reasoning",
217
+ "incomplete_details",
218
+ "instructions",
219
+ "previous_response_id",
220
+ "tools",
221
+ "metadata",
222
+ "status",
223
+ "error"
224
+ ];
225
+ for (const key of modelParamKeys) {
226
+ const val = key in res ? res[key] : null;
227
+ if (val !== null && val !== void 0) {
228
+ modelParameters[key] = val;
229
+ }
230
+ }
231
+ for (const key of metadataKeys) {
232
+ const val = key in res ? res[key] : null;
233
+ if (val) {
234
+ metadata[key] = val;
235
+ }
236
+ }
237
+ return {
238
+ model,
239
+ modelParameters: Object.keys(modelParameters).length > 0 ? modelParameters : void 0,
240
+ metadata: Object.keys(metadata).length > 0 ? metadata : void 0
241
+ };
242
+ };
243
+
244
+ // src/utils.ts
245
+ var isAsyncIterable = (x) => x != null && typeof x === "object" && typeof x[Symbol.asyncIterator] === "function";
246
+
247
+ // src/traceMethod.ts
248
+ var withTracing = (tracedMethod, config) => {
249
+ return (...args) => wrapMethod(tracedMethod, config, ...args);
250
+ };
251
+ var wrapMethod = (tracedMethod, config, ...args) => {
252
+ var _a, _b;
253
+ const { model, input, modelParameters } = parseInputArgs((_a = args[0]) != null ? _a : {});
254
+ const finalModelParams = { ...modelParameters, response_format: "" };
255
+ const finalMetadata = {
256
+ ...config == null ? void 0 : config.generationMetadata,
257
+ response_format: "response_format" in modelParameters ? modelParameters.response_format : void 0
258
+ };
259
+ const generation = startObservation(
260
+ (_b = config == null ? void 0 : config.generationName) != null ? _b : "OpenAI-completion",
261
+ {
262
+ model,
263
+ input,
264
+ modelParameters: finalModelParams,
265
+ prompt: config == null ? void 0 : config.langfusePrompt,
266
+ metadata: finalMetadata
267
+ },
268
+ {
269
+ asType: "generation",
270
+ parentSpanContext: config == null ? void 0 : config.parentSpanContext
271
+ }
272
+ ).updateTrace({
273
+ userId: config == null ? void 0 : config.userId,
274
+ sessionId: config == null ? void 0 : config.sessionId,
275
+ tags: config == null ? void 0 : config.tags,
276
+ name: config == null ? void 0 : config.traceName
277
+ });
278
+ try {
279
+ const res = tracedMethod(...args);
280
+ if (isAsyncIterable(res)) {
281
+ return wrapAsyncIterable(res, generation);
282
+ }
283
+ if (res instanceof Promise) {
284
+ const wrappedPromise = res.then((result) => {
285
+ if (isAsyncIterable(result)) {
286
+ return wrapAsyncIterable(result, generation);
287
+ }
288
+ const output = parseCompletionOutput(result);
289
+ const usageDetails = parseUsageDetailsFromResponse(result);
290
+ const {
291
+ model: modelFromResponse,
292
+ modelParameters: modelParametersFromResponse,
293
+ metadata: metadataFromResponse
294
+ } = parseModelDataFromResponse(result);
295
+ generation.update({
296
+ output,
297
+ usageDetails,
298
+ model: modelFromResponse,
299
+ modelParameters: modelParametersFromResponse,
300
+ metadata: metadataFromResponse
301
+ }).end();
302
+ return result;
303
+ }).catch((err) => {
304
+ generation.update({
305
+ statusMessage: String(err),
306
+ level: "ERROR",
307
+ costDetails: {
308
+ input: 0,
309
+ output: 0,
310
+ total: 0
311
+ }
312
+ }).end();
313
+ throw err;
314
+ });
315
+ return wrappedPromise;
316
+ }
317
+ return res;
318
+ } catch (error) {
319
+ generation.update({
320
+ statusMessage: String(error),
321
+ level: "ERROR",
322
+ costDetails: {
323
+ input: 0,
324
+ output: 0,
325
+ total: 0
326
+ }
327
+ }).end();
328
+ throw error;
329
+ }
330
+ };
331
+ function wrapAsyncIterable(iterable, generation) {
332
+ async function* tracedOutputGenerator() {
333
+ const response = iterable;
334
+ const textChunks = [];
335
+ const toolCallChunks = [];
336
+ let usage = null;
337
+ let completionStartTime = void 0;
338
+ let usageDetails = void 0;
339
+ let output = null;
340
+ for await (const rawChunk of response) {
341
+ completionStartTime = completionStartTime != null ? completionStartTime : /* @__PURE__ */ new Date();
342
+ if (typeof rawChunk === "object" && rawChunk && "response" in rawChunk) {
343
+ const result = rawChunk["response"];
344
+ output = parseCompletionOutput(result);
345
+ usageDetails = parseUsageDetailsFromResponse(result);
346
+ const {
347
+ model: modelFromResponse,
348
+ modelParameters: modelParametersFromResponse,
349
+ metadata: metadataFromResponse
350
+ } = parseModelDataFromResponse(result);
351
+ generation.update({
352
+ model: modelFromResponse,
353
+ modelParameters: modelParametersFromResponse,
354
+ metadata: metadataFromResponse
355
+ });
356
+ }
357
+ if (typeof rawChunk === "object" && rawChunk != null && "usage" in rawChunk) {
358
+ usage = rawChunk.usage;
359
+ }
360
+ const processedChunk = parseChunk(rawChunk);
361
+ if (!processedChunk.isToolCall) {
362
+ textChunks.push(processedChunk.data);
363
+ } else {
364
+ toolCallChunks.push(processedChunk.data);
365
+ }
366
+ yield rawChunk;
367
+ }
368
+ output = output != null ? output : toolCallChunks.length > 0 ? getToolCallOutput(toolCallChunks) : textChunks.join("");
369
+ generation.update({
370
+ output,
371
+ completionStartTime,
372
+ usageDetails: usageDetails != null ? usageDetails : usage ? parseUsageDetails(usage) : void 0
373
+ }).end();
374
+ }
375
+ return tracedOutputGenerator();
376
+ }
377
+
378
+ // src/observeOpenAI.ts
379
+ var observeOpenAI = (sdk, langfuseConfig) => {
380
+ return new Proxy(sdk, {
381
+ get(wrappedSdk, propKey, proxy) {
382
+ var _a, _b;
383
+ const originalProperty = wrappedSdk[propKey];
384
+ const defaultGenerationName = `${(_a = sdk.constructor) == null ? void 0 : _a.name}.${propKey.toString()}`;
385
+ const generationName = (_b = langfuseConfig == null ? void 0 : langfuseConfig.generationName) != null ? _b : defaultGenerationName;
386
+ const config = { ...langfuseConfig, generationName };
387
+ if (typeof originalProperty === "function") {
388
+ return withTracing(originalProperty.bind(wrappedSdk), config);
389
+ }
390
+ const isNestedOpenAIObject = originalProperty && !Array.isArray(originalProperty) && !(originalProperty instanceof Date) && typeof originalProperty === "object";
391
+ if (isNestedOpenAIObject) {
392
+ return observeOpenAI(originalProperty, config);
393
+ }
394
+ return Reflect.get(wrappedSdk, propKey, proxy);
395
+ }
396
+ });
397
+ };
398
+ export {
399
+ observeOpenAI
400
+ };
401
+ //# sourceMappingURL=index.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/traceMethod.ts","../src/parseOpenAI.ts","../src/utils.ts","../src/observeOpenAI.ts"],"sourcesContent":["import { LangfuseGeneration, startObservation } from \"@elasticdash/tracing\";\nimport type OpenAI from \"openai\";\n\nimport {\n getToolCallOutput,\n parseChunk,\n parseCompletionOutput,\n parseInputArgs,\n parseUsageDetails,\n parseModelDataFromResponse,\n parseUsageDetailsFromResponse,\n} from \"./parseOpenAI.js\";\nimport type { LangfuseConfig } from \"./types.js\";\nimport { isAsyncIterable } from \"./utils.js\";\n\n/**\n * Generic method type for any function that can be traced.\n * @internal\n */\ntype GenericMethod = (...args: unknown[]) => unknown;\n\n/**\n * Wraps a method with Langfuse tracing functionality.\n *\n * This function creates a wrapper around OpenAI SDK methods that automatically\n * creates Langfuse generations, captures input/output data, handles streaming\n * responses, and records usage metrics and errors.\n *\n * @param tracedMethod - The OpenAI SDK method to wrap with tracing\n * @param config - Configuration for the trace and generation\n * @returns A wrapped version of the method that creates Langfuse traces\n *\n * @internal\n */\nexport const withTracing = <T extends GenericMethod>(\n tracedMethod: T,\n config?: LangfuseConfig & Required<{ generationName: string }>,\n): ((...args: Parameters<T>) => Promise<ReturnType<T>>) => {\n return (...args) => wrapMethod(tracedMethod, config, ...args);\n};\n\n/**\n * Internal method that handles the actual tracing logic for OpenAI SDK methods.\n *\n * This function creates a Langfuse generation, executes the original method,\n * and captures all relevant data including input, output, usage, and errors.\n * It handles both streaming and non-streaming responses appropriately.\n *\n * @param tracedMethod - The original OpenAI SDK method to execute\n * @param config - Langfuse configuration options\n * @param args - Arguments to pass to the original method\n * @returns The result from the original method, potentially wrapped for streaming\n *\n * @internal\n */\nconst wrapMethod = <T extends GenericMethod>(\n tracedMethod: T,\n config?: LangfuseConfig,\n ...args: Parameters<T>\n): ReturnType<T> | any => {\n const { model, input, modelParameters } = parseInputArgs(args[0] ?? {});\n\n const finalModelParams = { ...modelParameters, response_format: \"\" };\n const finalMetadata = {\n ...config?.generationMetadata,\n response_format:\n \"response_format\" in modelParameters\n ? modelParameters.response_format\n : undefined,\n };\n\n const generation = startObservation(\n config?.generationName ?? \"OpenAI-completion\",\n {\n model,\n input,\n modelParameters: finalModelParams,\n prompt: config?.langfusePrompt,\n metadata: finalMetadata,\n },\n {\n asType: \"generation\",\n parentSpanContext: config?.parentSpanContext,\n },\n ).updateTrace({\n userId: config?.userId,\n sessionId: config?.sessionId,\n tags: config?.tags,\n name: config?.traceName,\n });\n\n try {\n const res = tracedMethod(...args);\n\n // Handle stream responses\n if (isAsyncIterable(res)) {\n return wrapAsyncIterable(res, generation);\n }\n\n if (res instanceof Promise) {\n const wrappedPromise = res\n .then((result) => {\n if (isAsyncIterable(result)) {\n return wrapAsyncIterable(result, generation);\n }\n\n const output = parseCompletionOutput(result);\n const usageDetails = parseUsageDetailsFromResponse(result);\n const {\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n } = parseModelDataFromResponse(result);\n\n generation\n .update({\n output,\n usageDetails,\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n })\n .end();\n\n return result;\n })\n .catch((err) => {\n generation\n .update({\n statusMessage: String(err),\n level: \"ERROR\",\n costDetails: {\n input: 0,\n output: 0,\n total: 0,\n },\n })\n .end();\n\n throw err;\n });\n\n return wrappedPromise;\n }\n\n return res;\n } catch (error) {\n generation\n .update({\n statusMessage: String(error),\n level: \"ERROR\",\n costDetails: {\n input: 0,\n output: 0,\n total: 0,\n },\n })\n .end();\n\n throw error;\n }\n};\n\n/**\n * Wraps an async iterable (streaming response) with Langfuse tracing.\n *\n * This function handles streaming OpenAI responses by collecting chunks,\n * parsing usage information, and updating the Langfuse generation with\n * the complete output and usage details once the stream is consumed.\n *\n * @param iterable - The async iterable from OpenAI (streaming response)\n * @param generation - The Langfuse generation to update with stream data\n * @returns An async generator that yields original chunks while collecting data\n *\n * @internal\n */\nfunction wrapAsyncIterable<R>(\n iterable: AsyncIterable<unknown>,\n generation: LangfuseGeneration,\n): R {\n async function* tracedOutputGenerator(): AsyncGenerator<\n unknown,\n void,\n unknown\n > {\n const response = iterable;\n const textChunks: string[] = [];\n const toolCallChunks: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall[] =\n [];\n let usage: OpenAI.CompletionUsage | null = null;\n let completionStartTime: Date | undefined = undefined;\n let usageDetails: Record<string, number> | undefined = undefined;\n let output: unknown = null;\n\n for await (const rawChunk of response as AsyncIterable<unknown>) {\n completionStartTime = completionStartTime ?? new Date();\n\n // Handle Response API chunks\n if (typeof rawChunk === \"object\" && rawChunk && \"response\" in rawChunk) {\n const result = rawChunk[\"response\"];\n output = parseCompletionOutput(result);\n usageDetails = parseUsageDetailsFromResponse(result);\n\n const {\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n } = parseModelDataFromResponse(result);\n\n generation.update({\n model: modelFromResponse,\n modelParameters: modelParametersFromResponse,\n metadata: metadataFromResponse,\n });\n }\n\n if (\n typeof rawChunk === \"object\" &&\n rawChunk != null &&\n \"usage\" in rawChunk\n ) {\n usage = rawChunk.usage as OpenAI.CompletionUsage | null;\n }\n\n const processedChunk = parseChunk(rawChunk);\n\n if (!processedChunk.isToolCall) {\n textChunks.push(processedChunk.data);\n } else {\n toolCallChunks.push(processedChunk.data);\n }\n\n yield rawChunk;\n }\n\n output =\n output ??\n (toolCallChunks.length > 0\n ? getToolCallOutput(toolCallChunks)\n : textChunks.join(\"\"));\n\n generation\n .update({\n output,\n completionStartTime,\n usageDetails:\n usageDetails ?? (usage ? parseUsageDetails(usage) : undefined),\n })\n .end();\n }\n\n return tracedOutputGenerator() as R;\n}\n","import type OpenAI from \"openai\";\n\ntype ParsedOpenAIArguments = {\n model: string;\n input: Record<string, any> | string;\n modelParameters: Record<string, any>;\n};\n\nexport const parseInputArgs = (\n args: Record<string, any>,\n): ParsedOpenAIArguments => {\n let params: Record<string, any> = {};\n params = {\n frequency_penalty: args.frequency_penalty,\n logit_bias: args.logit_bias,\n logprobs: args.logprobs,\n max_tokens: args.max_tokens,\n n: args.n,\n presence_penalty: args.presence_penalty,\n seed: args.seed,\n stop: args.stop,\n stream: args.stream,\n temperature: args.temperature,\n top_p: args.top_p,\n user: args.user,\n response_format: args.response_format,\n top_logprobs: args.top_logprobs,\n };\n\n let input: Record<string, any> | string = args.input;\n\n if (\n args &&\n typeof args === \"object\" &&\n !Array.isArray(args) &&\n \"messages\" in args\n ) {\n input = {};\n input.messages = args.messages;\n if (\"function_call\" in args) {\n input.function_call = args.function_call;\n }\n if (\"functions\" in args) {\n input.functions = args.functions;\n }\n if (\"tools\" in args) {\n input.tools = args.tools;\n }\n\n if (\"tool_choice\" in args) {\n input.tool_choice = args.tool_choice;\n }\n } else if (!input) {\n input = args.prompt;\n }\n\n return {\n model: args.model,\n input: input,\n modelParameters: params,\n };\n};\n\nexport const parseCompletionOutput = (res: unknown): unknown => {\n if (\n res instanceof Object &&\n \"output_text\" in res &&\n res[\"output_text\"] !== \"\"\n ) {\n return res[\"output_text\"] as string;\n }\n\n if (\n typeof res === \"object\" &&\n res &&\n \"output\" in res &&\n Array.isArray(res[\"output\"])\n ) {\n const output = res[\"output\"];\n\n if (output.length > 1) {\n return output;\n }\n if (output.length === 1) {\n return output[0] as Record<string, unknown>;\n }\n\n return null;\n }\n\n if (\n !(res instanceof Object && \"choices\" in res && Array.isArray(res.choices))\n ) {\n return \"\";\n }\n\n return \"message\" in res.choices[0]\n ? res.choices[0].message\n : (res.choices[0].text ?? \"\");\n};\n\nexport const parseUsageDetails = (\n completionUsage: OpenAI.CompletionUsage,\n): Record<string, number> | undefined => {\n if (\"prompt_tokens\" in completionUsage) {\n const {\n prompt_tokens,\n completion_tokens,\n total_tokens,\n completion_tokens_details,\n prompt_tokens_details,\n } = completionUsage;\n\n const flatPromptTokensDetails = Object.fromEntries(\n Object.entries(prompt_tokens_details ?? {}).map(([key, value]) => [\n `input_${key}`,\n value as number,\n ]),\n );\n\n const flatCompletionTokensDetails = Object.fromEntries(\n Object.entries(completion_tokens_details ?? {}).map(([key, value]) => [\n `output_${key}`,\n value as number,\n ]),\n );\n\n let finalInputTokens = prompt_tokens as number;\n Object.values(flatPromptTokensDetails).forEach((value) => {\n finalInputTokens = Math.max(finalInputTokens - value, 0);\n });\n\n let finalOutputTokens = completion_tokens as number;\n Object.values(flatCompletionTokensDetails).forEach((value) => {\n finalOutputTokens = Math.max(finalOutputTokens - value, 0);\n });\n\n return {\n input: finalInputTokens,\n output: finalOutputTokens,\n total: total_tokens,\n ...flatPromptTokensDetails,\n ...flatCompletionTokensDetails,\n };\n } else if (\"input_tokens\" in completionUsage) {\n const {\n input_tokens,\n output_tokens,\n total_tokens,\n input_tokens_details,\n output_tokens_details,\n } = completionUsage;\n\n let finalInputTokens = input_tokens as number;\n Object.keys(input_tokens_details ?? {}).forEach((key) => {\n finalInputTokens = Math.max(\n finalInputTokens -\n input_tokens_details[key as keyof typeof input_tokens_details],\n 0,\n );\n });\n\n let finalOutputTokens = output_tokens as number;\n Object.keys(output_tokens_details ?? {}).forEach((key) => {\n finalOutputTokens = Math.max(\n finalOutputTokens -\n output_tokens_details[key as keyof typeof output_tokens_details],\n 0,\n );\n });\n\n return {\n input: finalInputTokens,\n output: finalOutputTokens,\n total: total_tokens,\n ...Object.fromEntries(\n Object.entries(input_tokens_details ?? {}).map(([key, value]) => [\n `input_${key}`,\n value as number,\n ]),\n ),\n ...Object.fromEntries(\n Object.entries(output_tokens_details ?? {}).map(([key, value]) => [\n `output_${key}`,\n value as number,\n ]),\n ),\n };\n }\n};\n\nexport const parseUsageDetailsFromResponse = (\n res: unknown,\n): Record<string, number> | undefined => {\n if (hasCompletionUsage(res)) {\n return parseUsageDetails(res.usage);\n }\n};\n\nexport const parseChunk = (\n rawChunk: unknown,\n):\n | { isToolCall: false; data: string }\n | {\n isToolCall: true;\n data: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall;\n } => {\n let isToolCall = false;\n const _chunk = rawChunk as\n | OpenAI.ChatCompletionChunk\n | OpenAI.Completions.Completion;\n const chunkData = _chunk?.choices?.[0];\n\n try {\n if (\n \"delta\" in chunkData &&\n \"tool_calls\" in chunkData.delta &&\n Array.isArray(chunkData.delta.tool_calls)\n ) {\n isToolCall = true;\n\n return { isToolCall, data: chunkData.delta.tool_calls[0] };\n }\n if (\"delta\" in chunkData) {\n return { isToolCall, data: chunkData.delta?.content || \"\" };\n }\n\n if (\"text\" in chunkData) {\n return { isToolCall, data: chunkData.text || \"\" };\n }\n } catch {}\n\n return { isToolCall: false, data: \"\" };\n};\n\n// Type guard to check if an unknown object is a UsageResponse\nfunction hasCompletionUsage(\n obj: any,\n): obj is { usage: OpenAI.CompletionUsage } {\n return (\n obj instanceof Object &&\n \"usage\" in obj &&\n obj.usage instanceof Object &&\n // Completion API Usage format\n ((typeof obj.usage.prompt_tokens === \"number\" &&\n typeof obj.usage.completion_tokens === \"number\" &&\n typeof obj.usage.total_tokens === \"number\") ||\n // Response API Usage format\n (typeof obj.usage.input_tokens === \"number\" &&\n typeof obj.usage.output_tokens === \"number\" &&\n typeof obj.usage.total_tokens === \"number\"))\n );\n}\n\nexport const getToolCallOutput = (\n toolCallChunks: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall[],\n): {\n tool_calls: {\n function: {\n name: string;\n arguments: string;\n };\n }[];\n} => {\n let name = \"\";\n let toolArguments = \"\";\n\n for (const toolCall of toolCallChunks) {\n name = toolCall.function?.name || name;\n toolArguments += toolCall.function?.arguments || \"\";\n }\n\n return {\n tool_calls: [\n {\n function: {\n name,\n arguments: toolArguments,\n },\n },\n ],\n };\n};\n\nexport const parseModelDataFromResponse = (\n res: unknown,\n): {\n model: string | undefined;\n modelParameters: Record<string, string | number> | undefined;\n metadata: Record<string, unknown> | undefined;\n} => {\n if (typeof res !== \"object\" || res === null) {\n return {\n model: undefined,\n modelParameters: undefined,\n metadata: undefined,\n };\n }\n\n const model = \"model\" in res ? (res[\"model\"] as string) : undefined;\n const modelParameters: Record<string, string | number> = {};\n const modelParamKeys = [\n \"max_output_tokens\",\n \"parallel_tool_calls\",\n \"store\",\n \"temperature\",\n \"tool_choice\",\n \"top_p\",\n \"truncation\",\n \"user\",\n ];\n\n const metadata: Record<string, unknown> = {};\n const metadataKeys = [\n \"reasoning\",\n \"incomplete_details\",\n \"instructions\",\n \"previous_response_id\",\n \"tools\",\n \"metadata\",\n \"status\",\n \"error\",\n ];\n\n for (const key of modelParamKeys) {\n const val =\n key in res ? (res[key as keyof typeof res] as string | number) : null;\n if (val !== null && val !== undefined) {\n modelParameters[key as keyof typeof modelParameters] = val;\n }\n }\n\n for (const key of metadataKeys) {\n const val =\n key in res ? (res[key as keyof typeof res] as string | number) : null;\n if (val) {\n metadata[key as keyof typeof metadata] = val;\n }\n }\n\n return {\n model,\n modelParameters:\n Object.keys(modelParameters).length > 0 ? modelParameters : undefined,\n metadata: Object.keys(metadata).length > 0 ? metadata : undefined,\n };\n};\n","/**\n * Type guard to check if a value is an async iterable.\n *\n * This utility function determines whether a given value implements the\n * AsyncIterable interface, which is used to identify streaming responses\n * from the OpenAI SDK.\n *\n * @param x - The value to check\n * @returns True if the value is an async iterable, false otherwise\n *\n * @example\n * ```typescript\n * import { isAsyncIterable } from './utils.js';\n *\n * const response = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [...],\n * stream: true\n * });\n *\n * if (isAsyncIterable(response)) {\n * // Handle streaming response\n * for await (const chunk of response) {\n * console.log(chunk);\n * }\n * } else {\n * // Handle regular response\n * console.log(response);\n * }\n * ```\n *\n * @public\n */\nexport const isAsyncIterable = (x: unknown): x is AsyncIterable<unknown> =>\n x != null &&\n typeof x === \"object\" &&\n typeof (x as any)[Symbol.asyncIterator] === \"function\";\n","import { withTracing } from \"./traceMethod.js\";\nimport type { LangfuseConfig } from \"./types.js\";\n\n/**\n * Wraps an OpenAI SDK client with automatic Langfuse tracing.\n *\n * This function creates a proxy around the OpenAI SDK that automatically\n * traces all method calls, capturing detailed information about requests,\n * responses, token usage, costs, and performance metrics. It works with\n * both streaming and non-streaming OpenAI API calls.\n *\n * The wrapper recursively traces nested objects in the OpenAI SDK, ensuring\n * that all API calls (chat completions, embeddings, fine-tuning, etc.) are\n * automatically captured as Langfuse generations.\n *\n * @param sdk - The OpenAI SDK client instance to wrap with tracing\n * @param langfuseConfig - Optional configuration for tracing behavior\n * @returns A proxied version of the OpenAI SDK with automatic tracing\n *\n * @example\n * ```typescript\n * import OpenAI from 'openai';\n * import { observeOpenAI } from '@elasticdash/openai';\n *\n * const openai = observeOpenAI(new OpenAI({\n * apiKey: process.env.OPENAI_API_KEY,\n * }));\n *\n * // All OpenAI calls are now automatically traced\n * const response = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Hello!' }],\n * max_tokens: 100,\n * temperature: 0.7\n * });\n * ```\n *\n * @example\n * ```typescript\n * // With custom tracing configuration\n * const openai = observeOpenAI(new OpenAI({\n * apiKey: process.env.OPENAI_API_KEY\n * }), {\n * traceName: 'AI-Assistant-Chat',\n * userId: 'user-123',\n * sessionId: 'session-456',\n * tags: ['production', 'chat-feature'],\n * generationName: 'gpt-4-chat-completion'\n * });\n *\n * const completion = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Explain quantum computing' }]\n * });\n * ```\n *\n * @example\n * ```typescript\n * // Streaming responses are also automatically traced\n * const stream = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: 'Write a story' }],\n * stream: true\n * });\n *\n * for await (const chunk of stream) {\n * process.stdout.write(chunk.choices[0]?.delta?.content || '');\n * }\n * // Final usage details and complete output are captured automatically\n * ```\n *\n * @example\n * ```typescript\n * // Using with Langfuse prompt management\n * const openai = observeOpenAI(new OpenAI({\n * apiKey: process.env.OPENAI_API_KEY\n * }), {\n * langfusePrompt: {\n * name: 'chat-assistant-v2',\n * version: 3,\n * isFallback: false\n * },\n * generationMetadata: {\n * environment: 'production',\n * feature: 'chat-assistant'\n * }\n * });\n * ```\n *\n * @public\n */\nexport const observeOpenAI = <SDKType extends object>(\n sdk: SDKType,\n langfuseConfig?: LangfuseConfig,\n): SDKType => {\n return new Proxy(sdk, {\n get(wrappedSdk, propKey, proxy) {\n const originalProperty = wrappedSdk[propKey as keyof SDKType];\n\n const defaultGenerationName = `${sdk.constructor?.name}.${propKey.toString()}`;\n const generationName =\n langfuseConfig?.generationName ?? defaultGenerationName;\n const config = { ...langfuseConfig, generationName };\n\n // Trace methods of the OpenAI SDK\n if (typeof originalProperty === \"function\") {\n return withTracing(originalProperty.bind(wrappedSdk), config);\n }\n\n const isNestedOpenAIObject =\n originalProperty &&\n !Array.isArray(originalProperty) &&\n !(originalProperty instanceof Date) &&\n typeof originalProperty === \"object\";\n\n // Recursively wrap nested objects to ensure all nested properties or methods are also traced\n if (isNestedOpenAIObject) {\n return observeOpenAI(originalProperty, config);\n }\n\n // Fallback to returning the original value\n return Reflect.get(wrappedSdk, propKey, proxy);\n },\n });\n};\n"],"mappings":";AAAA,SAA6B,wBAAwB;;;ACQ9C,IAAM,iBAAiB,CAC5B,SAC0B;AAC1B,MAAI,SAA8B,CAAC;AACnC,WAAS;AAAA,IACP,mBAAmB,KAAK;AAAA,IACxB,YAAY,KAAK;AAAA,IACjB,UAAU,KAAK;AAAA,IACf,YAAY,KAAK;AAAA,IACjB,GAAG,KAAK;AAAA,IACR,kBAAkB,KAAK;AAAA,IACvB,MAAM,KAAK;AAAA,IACX,MAAM,KAAK;AAAA,IACX,QAAQ,KAAK;AAAA,IACb,aAAa,KAAK;AAAA,IAClB,OAAO,KAAK;AAAA,IACZ,MAAM,KAAK;AAAA,IACX,iBAAiB,KAAK;AAAA,IACtB,cAAc,KAAK;AAAA,EACrB;AAEA,MAAI,QAAsC,KAAK;AAE/C,MACE,QACA,OAAO,SAAS,YAChB,CAAC,MAAM,QAAQ,IAAI,KACnB,cAAc,MACd;AACA,YAAQ,CAAC;AACT,UAAM,WAAW,KAAK;AACtB,QAAI,mBAAmB,MAAM;AAC3B,YAAM,gBAAgB,KAAK;AAAA,IAC7B;AACA,QAAI,eAAe,MAAM;AACvB,YAAM,YAAY,KAAK;AAAA,IACzB;AACA,QAAI,WAAW,MAAM;AACnB,YAAM,QAAQ,KAAK;AAAA,IACrB;AAEA,QAAI,iBAAiB,MAAM;AACzB,YAAM,cAAc,KAAK;AAAA,IAC3B;AAAA,EACF,WAAW,CAAC,OAAO;AACjB,YAAQ,KAAK;AAAA,EACf;AAEA,SAAO;AAAA,IACL,OAAO,KAAK;AAAA,IACZ;AAAA,IACA,iBAAiB;AAAA,EACnB;AACF;AAEO,IAAM,wBAAwB,CAAC,QAA0B;AA/DhE;AAgEE,MACE,eAAe,UACf,iBAAiB,OACjB,IAAI,aAAa,MAAM,IACvB;AACA,WAAO,IAAI,aAAa;AAAA,EAC1B;AAEA,MACE,OAAO,QAAQ,YACf,OACA,YAAY,OACZ,MAAM,QAAQ,IAAI,QAAQ,CAAC,GAC3B;AACA,UAAM,SAAS,IAAI,QAAQ;AAE3B,QAAI,OAAO,SAAS,GAAG;AACrB,aAAO;AAAA,IACT;AACA,QAAI,OAAO,WAAW,GAAG;AACvB,aAAO,OAAO,CAAC;AAAA,IACjB;AAEA,WAAO;AAAA,EACT;AAEA,MACE,EAAE,eAAe,UAAU,aAAa,OAAO,MAAM,QAAQ,IAAI,OAAO,IACxE;AACA,WAAO;AAAA,EACT;AAEA,SAAO,aAAa,IAAI,QAAQ,CAAC,IAC7B,IAAI,QAAQ,CAAC,EAAE,WACd,SAAI,QAAQ,CAAC,EAAE,SAAf,YAAuB;AAC9B;AAEO,IAAM,oBAAoB,CAC/B,oBACuC;AACvC,MAAI,mBAAmB,iBAAiB;AACtC,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,UAAM,0BAA0B,OAAO;AAAA,MACrC,OAAO,QAAQ,wDAAyB,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,QAChE,SAAS,GAAG;AAAA,QACZ;AAAA,MACF,CAAC;AAAA,IACH;AAEA,UAAM,8BAA8B,OAAO;AAAA,MACzC,OAAO,QAAQ,gEAA6B,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,QACpE,UAAU,GAAG;AAAA,QACb;AAAA,MACF,CAAC;AAAA,IACH;AAEA,QAAI,mBAAmB;AACvB,WAAO,OAAO,uBAAuB,EAAE,QAAQ,CAAC,UAAU;AACxD,yBAAmB,KAAK,IAAI,mBAAmB,OAAO,CAAC;AAAA,IACzD,CAAC;AAED,QAAI,oBAAoB;AACxB,WAAO,OAAO,2BAA2B,EAAE,QAAQ,CAAC,UAAU;AAC5D,0BAAoB,KAAK,IAAI,oBAAoB,OAAO,CAAC;AAAA,IAC3D,CAAC;AAED,WAAO;AAAA,MACL,OAAO;AAAA,MACP,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,GAAG;AAAA,MACH,GAAG;AAAA,IACL;AAAA,EACF,WAAW,kBAAkB,iBAAiB;AAC5C,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,QAAI,mBAAmB;AACvB,WAAO,KAAK,sDAAwB,CAAC,CAAC,EAAE,QAAQ,CAAC,QAAQ;AACvD,yBAAmB,KAAK;AAAA,QACtB,mBACE,qBAAqB,GAAwC;AAAA,QAC/D;AAAA,MACF;AAAA,IACF,CAAC;AAED,QAAI,oBAAoB;AACxB,WAAO,KAAK,wDAAyB,CAAC,CAAC,EAAE,QAAQ,CAAC,QAAQ;AACxD,0BAAoB,KAAK;AAAA,QACvB,oBACE,sBAAsB,GAAyC;AAAA,QACjE;AAAA,MACF;AAAA,IACF,CAAC;AAED,WAAO;AAAA,MACL,OAAO;AAAA,MACP,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,GAAG,OAAO;AAAA,QACR,OAAO,QAAQ,sDAAwB,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,UAC/D,SAAS,GAAG;AAAA,UACZ;AAAA,QACF,CAAC;AAAA,MACH;AAAA,MACA,GAAG,OAAO;AAAA,QACR,OAAO,QAAQ,wDAAyB,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,KAAK,KAAK,MAAM;AAAA,UAChE,UAAU,GAAG;AAAA,UACb;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AACF;AAEO,IAAM,gCAAgC,CAC3C,QACuC;AACvC,MAAI,mBAAmB,GAAG,GAAG;AAC3B,WAAO,kBAAkB,IAAI,KAAK;AAAA,EACpC;AACF;AAEO,IAAM,aAAa,CACxB,aAMO;AA9MT;AA+ME,MAAI,aAAa;AACjB,QAAM,SAAS;AAGf,QAAM,aAAY,sCAAQ,YAAR,mBAAkB;AAEpC,MAAI;AACF,QACE,WAAW,aACX,gBAAgB,UAAU,SAC1B,MAAM,QAAQ,UAAU,MAAM,UAAU,GACxC;AACA,mBAAa;AAEb,aAAO,EAAE,YAAY,MAAM,UAAU,MAAM,WAAW,CAAC,EAAE;AAAA,IAC3D;AACA,QAAI,WAAW,WAAW;AACxB,aAAO,EAAE,YAAY,QAAM,eAAU,UAAV,mBAAiB,YAAW,GAAG;AAAA,IAC5D;AAEA,QAAI,UAAU,WAAW;AACvB,aAAO,EAAE,YAAY,MAAM,UAAU,QAAQ,GAAG;AAAA,IAClD;AAAA,EACF,QAAQ;AAAA,EAAC;AAET,SAAO,EAAE,YAAY,OAAO,MAAM,GAAG;AACvC;AAGA,SAAS,mBACP,KAC0C;AAC1C,SACE,eAAe,UACf,WAAW,OACX,IAAI,iBAAiB;AAAA,GAEnB,OAAO,IAAI,MAAM,kBAAkB,YACnC,OAAO,IAAI,MAAM,sBAAsB,YACvC,OAAO,IAAI,MAAM,iBAAiB;AAAA,EAEjC,OAAO,IAAI,MAAM,iBAAiB,YACjC,OAAO,IAAI,MAAM,kBAAkB,YACnC,OAAO,IAAI,MAAM,iBAAiB;AAE1C;AAEO,IAAM,oBAAoB,CAC/B,mBAQG;AAvQL;AAwQE,MAAI,OAAO;AACX,MAAI,gBAAgB;AAEpB,aAAW,YAAY,gBAAgB;AACrC,aAAO,cAAS,aAAT,mBAAmB,SAAQ;AAClC,uBAAiB,cAAS,aAAT,mBAAmB,cAAa;AAAA,EACnD;AAEA,SAAO;AAAA,IACL,YAAY;AAAA,MACV;AAAA,QACE,UAAU;AAAA,UACR;AAAA,UACA,WAAW;AAAA,QACb;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AAEO,IAAM,6BAA6B,CACxC,QAKG;AACH,MAAI,OAAO,QAAQ,YAAY,QAAQ,MAAM;AAC3C,WAAO;AAAA,MACL,OAAO;AAAA,MACP,iBAAiB;AAAA,MACjB,UAAU;AAAA,IACZ;AAAA,EACF;AAEA,QAAM,QAAQ,WAAW,MAAO,IAAI,OAAO,IAAe;AAC1D,QAAM,kBAAmD,CAAC;AAC1D,QAAM,iBAAiB;AAAA,IACrB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,QAAM,WAAoC,CAAC;AAC3C,QAAM,eAAe;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,aAAW,OAAO,gBAAgB;AAChC,UAAM,MACJ,OAAO,MAAO,IAAI,GAAuB,IAAwB;AACnE,QAAI,QAAQ,QAAQ,QAAQ,QAAW;AACrC,sBAAgB,GAAmC,IAAI;AAAA,IACzD;AAAA,EACF;AAEA,aAAW,OAAO,cAAc;AAC9B,UAAM,MACJ,OAAO,MAAO,IAAI,GAAuB,IAAwB;AACnE,QAAI,KAAK;AACP,eAAS,GAA4B,IAAI;AAAA,IAC3C;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA,iBACE,OAAO,KAAK,eAAe,EAAE,SAAS,IAAI,kBAAkB;AAAA,IAC9D,UAAU,OAAO,KAAK,QAAQ,EAAE,SAAS,IAAI,WAAW;AAAA,EAC1D;AACF;;;ACzTO,IAAM,kBAAkB,CAAC,MAC9B,KAAK,QACL,OAAO,MAAM,YACb,OAAQ,EAAU,OAAO,aAAa,MAAM;;;AFFvC,IAAM,cAAc,CACzB,cACA,WACyD;AACzD,SAAO,IAAI,SAAS,WAAW,cAAc,QAAQ,GAAG,IAAI;AAC9D;AAgBA,IAAM,aAAa,CACjB,cACA,WACG,SACqB;AA3D1B;AA4DE,QAAM,EAAE,OAAO,OAAO,gBAAgB,IAAI,gBAAe,UAAK,CAAC,MAAN,YAAW,CAAC,CAAC;AAEtE,QAAM,mBAAmB,EAAE,GAAG,iBAAiB,iBAAiB,GAAG;AACnE,QAAM,gBAAgB;AAAA,IACpB,GAAG,iCAAQ;AAAA,IACX,iBACE,qBAAqB,kBACjB,gBAAgB,kBAChB;AAAA,EACR;AAEA,QAAM,aAAa;AAAA,KACjB,sCAAQ,mBAAR,YAA0B;AAAA,IAC1B;AAAA,MACE;AAAA,MACA;AAAA,MACA,iBAAiB;AAAA,MACjB,QAAQ,iCAAQ;AAAA,MAChB,UAAU;AAAA,IACZ;AAAA,IACA;AAAA,MACE,QAAQ;AAAA,MACR,mBAAmB,iCAAQ;AAAA,IAC7B;AAAA,EACF,EAAE,YAAY;AAAA,IACZ,QAAQ,iCAAQ;AAAA,IAChB,WAAW,iCAAQ;AAAA,IACnB,MAAM,iCAAQ;AAAA,IACd,MAAM,iCAAQ;AAAA,EAChB,CAAC;AAED,MAAI;AACF,UAAM,MAAM,aAAa,GAAG,IAAI;AAGhC,QAAI,gBAAgB,GAAG,GAAG;AACxB,aAAO,kBAAkB,KAAK,UAAU;AAAA,IAC1C;AAEA,QAAI,eAAe,SAAS;AAC1B,YAAM,iBAAiB,IACpB,KAAK,CAAC,WAAW;AAChB,YAAI,gBAAgB,MAAM,GAAG;AAC3B,iBAAO,kBAAkB,QAAQ,UAAU;AAAA,QAC7C;AAEA,cAAM,SAAS,sBAAsB,MAAM;AAC3C,cAAM,eAAe,8BAA8B,MAAM;AACzD,cAAM;AAAA,UACJ,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,IAAI,2BAA2B,MAAM;AAErC,mBACG,OAAO;AAAA,UACN;AAAA,UACA;AAAA,UACA,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,CAAC,EACA,IAAI;AAEP,eAAO;AAAA,MACT,CAAC,EACA,MAAM,CAAC,QAAQ;AACd,mBACG,OAAO;AAAA,UACN,eAAe,OAAO,GAAG;AAAA,UACzB,OAAO;AAAA,UACP,aAAa;AAAA,YACX,OAAO;AAAA,YACP,QAAQ;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF,CAAC,EACA,IAAI;AAEP,cAAM;AAAA,MACR,CAAC;AAEH,aAAO;AAAA,IACT;AAEA,WAAO;AAAA,EACT,SAAS,OAAO;AACd,eACG,OAAO;AAAA,MACN,eAAe,OAAO,KAAK;AAAA,MAC3B,OAAO;AAAA,MACP,aAAa;AAAA,QACX,OAAO;AAAA,QACP,QAAQ;AAAA,QACR,OAAO;AAAA,MACT;AAAA,IACF,CAAC,EACA,IAAI;AAEP,UAAM;AAAA,EACR;AACF;AAeA,SAAS,kBACP,UACA,YACG;AACH,kBAAgB,wBAId;AACA,UAAM,WAAW;AACjB,UAAM,aAAuB,CAAC;AAC9B,UAAM,iBACJ,CAAC;AACH,QAAI,QAAuC;AAC3C,QAAI,sBAAwC;AAC5C,QAAI,eAAmD;AACvD,QAAI,SAAkB;AAEtB,qBAAiB,YAAY,UAAoC;AAC/D,4BAAsB,oDAAuB,oBAAI,KAAK;AAGtD,UAAI,OAAO,aAAa,YAAY,YAAY,cAAc,UAAU;AACtE,cAAM,SAAS,SAAS,UAAU;AAClC,iBAAS,sBAAsB,MAAM;AACrC,uBAAe,8BAA8B,MAAM;AAEnD,cAAM;AAAA,UACJ,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,IAAI,2BAA2B,MAAM;AAErC,mBAAW,OAAO;AAAA,UAChB,OAAO;AAAA,UACP,iBAAiB;AAAA,UACjB,UAAU;AAAA,QACZ,CAAC;AAAA,MACH;AAEA,UACE,OAAO,aAAa,YACpB,YAAY,QACZ,WAAW,UACX;AACA,gBAAQ,SAAS;AAAA,MACnB;AAEA,YAAM,iBAAiB,WAAW,QAAQ;AAE1C,UAAI,CAAC,eAAe,YAAY;AAC9B,mBAAW,KAAK,eAAe,IAAI;AAAA,MACrC,OAAO;AACL,uBAAe,KAAK,eAAe,IAAI;AAAA,MACzC;AAEA,YAAM;AAAA,IACR;AAEA,aACE,0BACC,eAAe,SAAS,IACrB,kBAAkB,cAAc,IAChC,WAAW,KAAK,EAAE;AAExB,eACG,OAAO;AAAA,MACN;AAAA,MACA;AAAA,MACA,cACE,sCAAiB,QAAQ,kBAAkB,KAAK,IAAI;AAAA,IACxD,CAAC,EACA,IAAI;AAAA,EACT;AAEA,SAAO,sBAAsB;AAC/B;;;AGjKO,IAAM,gBAAgB,CAC3B,KACA,mBACY;AACZ,SAAO,IAAI,MAAM,KAAK;AAAA,IACpB,IAAI,YAAY,SAAS,OAAO;AAhGpC;AAiGM,YAAM,mBAAmB,WAAW,OAAwB;AAE5D,YAAM,wBAAwB,IAAG,SAAI,gBAAJ,mBAAiB,IAAI,IAAI,QAAQ,SAAS,CAAC;AAC5E,YAAM,kBACJ,sDAAgB,mBAAhB,YAAkC;AACpC,YAAM,SAAS,EAAE,GAAG,gBAAgB,eAAe;AAGnD,UAAI,OAAO,qBAAqB,YAAY;AAC1C,eAAO,YAAY,iBAAiB,KAAK,UAAU,GAAG,MAAM;AAAA,MAC9D;AAEA,YAAM,uBACJ,oBACA,CAAC,MAAM,QAAQ,gBAAgB,KAC/B,EAAE,4BAA4B,SAC9B,OAAO,qBAAqB;AAG9B,UAAI,sBAAsB;AACxB,eAAO,cAAc,kBAAkB,MAAM;AAAA,MAC/C;AAGA,aAAO,QAAQ,IAAI,YAAY,SAAS,KAAK;AAAA,IAC/C;AAAA,EACF,CAAC;AACH;","names":[]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@elasticdash/openai",
3
- "version": "0.0.1",
3
+ "version": "0.0.3",
4
4
  "description": "Langfuse integration for OpenAI SDK",
5
5
  "type": "module",
6
6
  "sideEffects": false,
@@ -14,14 +14,6 @@
14
14
  "require": "./dist/index.cjs"
15
15
  }
16
16
  },
17
- "scripts": {
18
- "build": "tsup",
19
- "test": "vitest run",
20
- "test:watch": "vitest",
21
- "format": "prettier --write \"src/**/*.ts\"",
22
- "format:check": "prettier --check \"src/**/*.ts\"",
23
- "clean": "rm -rf dist"
24
- },
25
17
  "author": "Langfuse",
26
18
  "license": "MIT",
27
19
  "repository": {
@@ -33,10 +25,18 @@
33
25
  "dist"
34
26
  ],
35
27
  "dependencies": {
36
- "@elasticdash/core": "workspace:^",
37
- "@elasticdash/tracing": "workspace:^"
28
+ "@elasticdash/core": "^0.0.3",
29
+ "@elasticdash/tracing": "^0.0.3"
38
30
  },
39
31
  "devDependencies": {
40
32
  "openai": "^5.0.0"
33
+ },
34
+ "scripts": {
35
+ "build": "tsup",
36
+ "test": "vitest run",
37
+ "test:watch": "vitest",
38
+ "format": "prettier --write \"src/**/*.ts\"",
39
+ "format:check": "prettier --check \"src/**/*.ts\"",
40
+ "clean": "rm -rf dist"
41
41
  }
42
- }
42
+ }