langsmith 0.5.21 → 0.5.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/client.cjs +327 -10
  2. package/dist/client.d.ts +90 -1
  3. package/dist/client.js +330 -13
  4. package/dist/evaluation/_runner.cjs +1 -4
  5. package/dist/evaluation/_runner.js +1 -4
  6. package/dist/experimental/sandbox/client.cjs +102 -427
  7. package/dist/experimental/sandbox/client.d.ts +68 -159
  8. package/dist/experimental/sandbox/client.js +104 -429
  9. package/dist/experimental/sandbox/errors.cjs +1 -2
  10. package/dist/experimental/sandbox/errors.d.ts +1 -2
  11. package/dist/experimental/sandbox/errors.js +1 -2
  12. package/dist/experimental/sandbox/helpers.cjs +8 -98
  13. package/dist/experimental/sandbox/helpers.d.ts +0 -29
  14. package/dist/experimental/sandbox/helpers.js +9 -95
  15. package/dist/experimental/sandbox/index.cjs +6 -1
  16. package/dist/experimental/sandbox/index.d.ts +7 -2
  17. package/dist/experimental/sandbox/index.js +6 -1
  18. package/dist/experimental/sandbox/sandbox.cjs +3 -11
  19. package/dist/experimental/sandbox/sandbox.d.ts +3 -5
  20. package/dist/experimental/sandbox/sandbox.js +3 -11
  21. package/dist/experimental/sandbox/types.d.ts +32 -149
  22. package/dist/index.cjs +1 -1
  23. package/dist/index.d.ts +1 -1
  24. package/dist/index.js +1 -1
  25. package/dist/schemas.d.ts +54 -0
  26. package/dist/utils/error.cjs +7 -0
  27. package/dist/utils/error.d.ts +1 -0
  28. package/dist/utils/error.js +6 -0
  29. package/dist/utils/fast-safe-stringify/index.cjs +228 -0
  30. package/dist/utils/fast-safe-stringify/index.d.ts +33 -0
  31. package/dist/utils/fast-safe-stringify/index.js +227 -0
  32. package/dist/utils/prompts.cjs +7 -2
  33. package/dist/utils/prompts.d.ts +6 -1
  34. package/dist/utils/prompts.js +6 -1
  35. package/dist/wrappers/openai_agents.cjs +849 -0
  36. package/dist/wrappers/openai_agents.d.ts +92 -0
  37. package/dist/wrappers/openai_agents.js +845 -0
  38. package/package.json +22 -6
  39. package/wrappers/openai_agents.cjs +1 -0
  40. package/wrappers/openai_agents.d.cts +1 -0
  41. package/wrappers/openai_agents.d.ts +1 -0
  42. package/wrappers/openai_agents.js +1 -0
@@ -0,0 +1,849 @@
1
+ "use strict";
2
+ /**
3
+ * LangSmith integration for OpenAI Agents SDK.
4
+ *
5
+ * This module provides tracing support for the OpenAI Agents SDK.
6
+ */
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ exports.OpenAIAgentsTracingProcessor = void 0;
9
+ const node_async_hooks_1 = require("node:async_hooks");
10
+ const run_trees_js_1 = require("../run_trees.cjs");
11
+ const client_js_1 = require("../client.cjs");
12
+ const traceable_js_1 = require("../singletons/traceable.cjs");
13
+ traceable_js_1.AsyncLocalStorageProviderSingleton.initializeGlobalInstance(new node_async_hooks_1.AsyncLocalStorage());
14
+ /**
15
+ * Set the current AsyncLocalStorage store to the given RunTree without a
16
+ * callback. Uses `AsyncLocalStorage.enterWith` if available on the underlying
17
+ * instance (it is on Node's built-in ALS). This is required because the
18
+ * OpenAI Agents tracing processor receives `onSpanStart`/`onSpanEnd` callbacks
19
+ * at different points with no single function to wrap via `withRunTree`.
20
+ *
21
+ * Returns the previous store so callers can restore it on exit.
22
+ *
23
+ * Caveats of `enterWith` (inherent, not avoidable with this API shape):
24
+ * - Replaces the ALS store for the current async task and all its
25
+ * descendants. Concurrent async tasks spawned from the caller's scope
26
+ * during the trace will see the installed store.
27
+ * - `onTraceEnd`/`onSpanEnd` restoration only works when it runs on the
28
+ * same async task as the matching start. This is guaranteed by the
29
+ * OpenAI Agents SDK's span lifecycle (span.start / fn / span.end are
30
+ * invoked on one task via `_withSpanFactory`).
31
+ */
32
+ function enterRunTreeContext(runTree) {
33
+ const storage = traceable_js_1.AsyncLocalStorageProviderSingleton.getInstance();
34
+ const previous = storage.getStore();
35
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
36
+ const maybeEnterWith = storage.enterWith;
37
+ if (typeof maybeEnterWith === "function") {
38
+ maybeEnterWith.call(storage, runTree);
39
+ }
40
+ return previous;
41
+ }
42
+ /**
43
+ * Parse inputs or outputs into a dictionary format.
44
+ */
45
+ function isOpenAIAgentsItemArray(data) {
46
+ return (Array.isArray(data) &&
47
+ data.length > 0 &&
48
+ data.every((item) => typeof item === "object" &&
49
+ item !== null &&
50
+ "type" in item &&
51
+ typeof item.type === "string"));
52
+ }
53
+ function normalizeResponseInputItemsForReplay(items) {
54
+ return items.map((item) => {
55
+ const type = item.type;
56
+ if (type === "message") {
57
+ return {
58
+ type: "message",
59
+ role: item.role,
60
+ content: item.content,
61
+ };
62
+ }
63
+ if (type === "reasoning") {
64
+ return {
65
+ type: "reasoning",
66
+ ...(item.id ? { id: item.id } : {}),
67
+ content: Array.isArray(item.content) ? item.content : [],
68
+ };
69
+ }
70
+ if (type === "function_call") {
71
+ return {
72
+ type: "function_call",
73
+ ...(item.id ? { id: item.id } : {}),
74
+ call_id: item.callId,
75
+ name: item.name,
76
+ arguments: item.arguments,
77
+ };
78
+ }
79
+ if (type === "function_call_result") {
80
+ const output = item.output;
81
+ return {
82
+ type: "function_call_output",
83
+ call_id: item.callId,
84
+ output: typeof output === "object" && output !== null && "text" in output
85
+ ? output.text
86
+ : output,
87
+ };
88
+ }
89
+ return item;
90
+ });
91
+ }
92
+ function parseIO(data, defaultKey = "output") {
93
+ if (data === null || data === undefined) {
94
+ return {};
95
+ }
96
+ if (Array.isArray(data)) {
97
+ if (data.length === 0) {
98
+ return {};
99
+ }
100
+ // Check if this is a list of output blocks (reasoning, message, etc.)
101
+ if (data.length > 0 && typeof data[0] === "object" && data[0] !== null) {
102
+ if ("type" in data[0]) {
103
+ return { [defaultKey]: data };
104
+ }
105
+ else if (data.length === 1) {
106
+ return data[0];
107
+ }
108
+ }
109
+ return { [defaultKey]: data };
110
+ }
111
+ if (typeof data === "object") {
112
+ return data;
113
+ }
114
+ if (typeof data === "string") {
115
+ try {
116
+ const parsed = JSON.parse(data);
117
+ if (typeof parsed === "object" && parsed !== null) {
118
+ return parsed;
119
+ }
120
+ return { [defaultKey]: data };
121
+ }
122
+ catch {
123
+ return { [defaultKey]: data };
124
+ }
125
+ }
126
+ return { [defaultKey]: data };
127
+ }
128
+ /**
129
+ * Get the LangSmith run type for a span.
130
+ */
131
+ function getRunType(span) {
132
+ const spanType = span.spanData?.type;
133
+ if (spanType === "agent" || spanType === "handoff" || spanType === "custom") {
134
+ return "chain";
135
+ }
136
+ else if (spanType === "function" || spanType === "guardrail") {
137
+ return "tool";
138
+ }
139
+ else if (spanType === "generation" || spanType === "response") {
140
+ return "llm";
141
+ }
142
+ return "chain";
143
+ }
144
+ /**
145
+ * Get the run name for a span.
146
+ */
147
+ function getRunName(span) {
148
+ const spanData = span.spanData;
149
+ if ("name" in spanData && spanData.name) {
150
+ return spanData.name;
151
+ }
152
+ const spanType = spanData?.type;
153
+ if (spanType === "generation") {
154
+ return "Generation";
155
+ }
156
+ else if (spanType === "response") {
157
+ return "Response";
158
+ }
159
+ else if (spanType === "handoff") {
160
+ return "Handoff";
161
+ }
162
+ return "Span";
163
+ }
164
+ function deriveAgentInputsOutputs(run) {
165
+ const children = [...run.child_runs];
166
+ const firstChildWithInputs = children.find((child) => child.inputs && Object.keys(child.inputs).length > 0);
167
+ const lastChildWithOutputs = [...children]
168
+ .reverse()
169
+ .find((child) => child.outputs && Object.keys(child.outputs).length > 0);
170
+ return {
171
+ ...(firstChildWithInputs ? { inputs: firstChildWithInputs.inputs } : {}),
172
+ ...(lastChildWithOutputs ? { outputs: lastChildWithOutputs.outputs } : {}),
173
+ };
174
+ }
175
+ /**
176
+ * Extract span data into a format suitable for LangSmith runs.
177
+ */
178
+ function extractSpanData(span) {
179
+ const spanData = span.spanData;
180
+ const data = {};
181
+ if (spanData.type === "function") {
182
+ const functionData = spanData;
183
+ data.inputs = parseIO(functionData.input, "input");
184
+ data.outputs = parseIO(functionData.output, "output");
185
+ }
186
+ else if (spanData.type === "generation") {
187
+ const generationData = spanData;
188
+ data.inputs = parseIO(generationData.input, "input");
189
+ data.outputs = parseIO(generationData.output, "output");
190
+ data.invocation_params = {
191
+ model: generationData.model,
192
+ model_config: generationData.model_config,
193
+ };
194
+ if (generationData.usage) {
195
+ data.metadata = {
196
+ usage_metadata: createUsageMetadata(generationData.usage),
197
+ };
198
+ }
199
+ }
200
+ else if (spanData.type === "response") {
201
+ const responseData = spanData;
202
+ if (responseData._input !== undefined) {
203
+ data.inputs = {
204
+ input: isOpenAIAgentsItemArray(responseData._input)
205
+ ? normalizeResponseInputItemsForReplay(responseData._input)
206
+ : responseData._input,
207
+ instructions: typeof responseData._response?.instructions === "string"
208
+ ? responseData._response.instructions
209
+ : "",
210
+ };
211
+ }
212
+ if (responseData._response) {
213
+ const response = responseData._response;
214
+ const outputData = response.output ?? [];
215
+ data.outputs = parseIO(outputData, "output");
216
+ // Extract invocation params
217
+ const invocationParams = {};
218
+ const invocationKeys = [
219
+ "max_output_tokens",
220
+ "model",
221
+ "parallel_tool_calls",
222
+ "reasoning",
223
+ "temperature",
224
+ "text",
225
+ "tool_choice",
226
+ "tools",
227
+ "top_p",
228
+ "truncation",
229
+ ];
230
+ for (const key of invocationKeys) {
231
+ if (key in response) {
232
+ invocationParams[key] = response[key];
233
+ }
234
+ }
235
+ data.invocation_params = invocationParams;
236
+ // Extract metadata
237
+ const metadata = {};
238
+ const metadataKeys = Object.keys(response).filter((k) => k !== "output" &&
239
+ k !== "usage" &&
240
+ k !== "instructions" &&
241
+ !invocationKeys.includes(k));
242
+ for (const key of metadataKeys) {
243
+ metadata[key] = response[key];
244
+ }
245
+ metadata.ls_model_name = invocationParams.model;
246
+ metadata.ls_max_tokens = invocationParams.max_output_tokens;
247
+ metadata.ls_temperature = invocationParams.temperature;
248
+ metadata.ls_model_type = "chat";
249
+ metadata.ls_provider = "openai";
250
+ if (response.usage) {
251
+ metadata.usage_metadata = createResponsesUsageMetadata(response.usage);
252
+ }
253
+ data.metadata = metadata;
254
+ }
255
+ }
256
+ else if (spanData.type === "agent") {
257
+ const agentData = spanData;
258
+ data.invocation_params = {
259
+ tools: agentData.tools,
260
+ handoffs: agentData.handoffs,
261
+ };
262
+ data.metadata = {
263
+ output_type: agentData.output_type,
264
+ };
265
+ }
266
+ else if (spanData.type === "handoff") {
267
+ const handoffData = spanData;
268
+ data.inputs = {
269
+ from_agent: handoffData.from_agent,
270
+ };
271
+ data.outputs = {
272
+ to_agent: handoffData.to_agent,
273
+ };
274
+ }
275
+ else if (spanData.type === "guardrail") {
276
+ const guardrailData = spanData;
277
+ data.metadata = {
278
+ triggered: guardrailData.triggered,
279
+ };
280
+ }
281
+ else if (spanData.type === "custom") {
282
+ const customData = spanData;
283
+ data.metadata = customData.data;
284
+ }
285
+ return data;
286
+ }
287
+ /**
288
+ * Create usage metadata from a `generation` span's `GenerationUsageData`.
289
+ *
290
+ * The Agents SDK's generation-span usage shape is intentionally flexible and
291
+ * puts token breakdowns under `usage.details` (e.g. `cached_tokens`,
292
+ * `reasoning_tokens`, `audio_tokens`). This is distinct from the OpenAI
293
+ * Responses API shape used by `response` spans (see
294
+ * {@link createResponsesUsageMetadata}).
295
+ */
296
+ function createUsageMetadata(usage) {
297
+ const inputTokens = usage.input_tokens ?? 0;
298
+ const outputTokens = usage.output_tokens ?? 0;
299
+ const result = {
300
+ input_tokens: inputTokens,
301
+ output_tokens: outputTokens,
302
+ total_tokens: inputTokens + outputTokens,
303
+ };
304
+ // Handle details if present
305
+ if (usage.details) {
306
+ const details = usage.details;
307
+ const inputTokenDetails = {};
308
+ const outputTokenDetails = {};
309
+ // Map common detail fields
310
+ if (typeof details.cached_tokens === "number") {
311
+ inputTokenDetails.cache_read = details.cached_tokens;
312
+ }
313
+ if (typeof details.reasoning_tokens === "number") {
314
+ outputTokenDetails.reasoning = details.reasoning_tokens;
315
+ }
316
+ if (typeof details.audio_tokens === "number") {
317
+ inputTokenDetails.audio = details.audio_tokens;
318
+ }
319
+ if (Object.keys(inputTokenDetails).length > 0) {
320
+ result.input_token_details = inputTokenDetails;
321
+ }
322
+ if (Object.keys(outputTokenDetails).length > 0) {
323
+ result.output_token_details = outputTokenDetails;
324
+ }
325
+ }
326
+ return result;
327
+ }
328
+ /**
329
+ * Create usage metadata from a `response` span's embedded OpenAI Responses API
330
+ * usage object.
331
+ *
332
+ * Shape:
333
+ * ```
334
+ * {
335
+ * input_tokens, output_tokens, total_tokens,
336
+ * input_tokens_details: { cached_tokens },
337
+ * output_tokens_details: { reasoning_tokens },
338
+ * }
339
+ * ```
340
+ *
341
+ * This is distinct from {@link createUsageMetadata}, which handles the
342
+ * Agents SDK `GenerationUsageData` shape (with breakdowns under `details`).
343
+ */
344
+ function createResponsesUsageMetadata(usage) {
345
+ const inputTokens = usage.input_tokens ?? 0;
346
+ const outputTokens = usage.output_tokens ?? 0;
347
+ const totalTokens = usage.total_tokens ?? inputTokens + outputTokens;
348
+ const result = {
349
+ input_tokens: inputTokens,
350
+ output_tokens: outputTokens,
351
+ total_tokens: totalTokens,
352
+ };
353
+ const inputTokenDetails = {};
354
+ const outputTokenDetails = {};
355
+ const inputDetails = usage.input_tokens_details;
356
+ if (inputDetails && typeof inputDetails.cached_tokens === "number") {
357
+ inputTokenDetails.cache_read = inputDetails.cached_tokens;
358
+ }
359
+ const outputDetails = usage.output_tokens_details;
360
+ if (outputDetails && typeof outputDetails.reasoning_tokens === "number") {
361
+ outputTokenDetails.reasoning = outputDetails.reasoning_tokens;
362
+ }
363
+ if (Object.keys(inputTokenDetails).length > 0) {
364
+ result.input_token_details = inputTokenDetails;
365
+ }
366
+ if (Object.keys(outputTokenDetails).length > 0) {
367
+ result.output_token_details = outputTokenDetails;
368
+ }
369
+ return result;
370
+ }
371
+ /**
372
+ * Tracing processor for the [OpenAI Agents SDK](https://openai.github.io/openai-agents-js/).
373
+ *
374
+ * Traces all intermediate steps of your OpenAI Agent to LangSmith.
375
+ *
376
+ * Requirements: Make sure to install `npm install @openai/agents`.
377
+ *
378
+ * Installing this processor is itself an explicit opt-in to tracing,
379
+ * so traces will be posted regardless of the `LANGSMITH_TRACING` env
380
+ * variable. Any nested `traceable()` calls made from within an agent
381
+ * run (e.g. inside a tool handler) will inherit this and also post,
382
+ * even if `LANGSMITH_TRACING` is not set.
383
+ *
384
+ * @param client - An instance of `langsmith.Client`. If not provided, a default client is created.
385
+ * @param metadata - Metadata to associate with all traces.
386
+ * @param tags - Tags to associate with all traces.
387
+ * @param projectName - LangSmith project to trace to.
388
+ * @param name - Name of the root trace.
389
+ *
390
+ * @example
391
+ * ```typescript
392
+ * import { Agent, Runner, function_tool, setTraceProcessors } from "@openai/agents";
393
+ * import { OpenAIAgentsTracingProcessor } from "langsmith/wrappers/openai_agents";
394
+ *
395
+ * setTraceProcessors([new OpenAIAgentsTracingProcessor()]);
396
+ *
397
+ * const getWeather = function_tool({
398
+ * name: "get_weather",
399
+ * description: "Get the weather for a city",
400
+ * parameters: { type: "object", properties: { city: { type: "string" } } },
401
+ * run: async ({ city }: { city: string }) => `The weather in ${city} is sunny`,
402
+ * });
403
+ *
404
+ * const agent = new Agent({
405
+ * name: "Assistant",
406
+ * instructions: "You are a helpful assistant",
407
+ * model: "gpt-4.1-mini",
408
+ * tools: [getWeather],
409
+ * });
410
+ *
411
+ * const result = await Runner.run(agent, "What's the weather in New York?");
412
+ * console.log(result.finalOutput);
413
+ * ```
414
+ */
415
+ class OpenAIAgentsTracingProcessor {
416
+ constructor(options) {
417
+ Object.defineProperty(this, "client", {
418
+ enumerable: true,
419
+ configurable: true,
420
+ writable: true,
421
+ value: void 0
422
+ });
423
+ Object.defineProperty(this, "_metadata", {
424
+ enumerable: true,
425
+ configurable: true,
426
+ writable: true,
427
+ value: void 0
428
+ });
429
+ Object.defineProperty(this, "_tags", {
430
+ enumerable: true,
431
+ configurable: true,
432
+ writable: true,
433
+ value: void 0
434
+ });
435
+ Object.defineProperty(this, "_projectName", {
436
+ enumerable: true,
437
+ configurable: true,
438
+ writable: true,
439
+ value: void 0
440
+ });
441
+ Object.defineProperty(this, "_name", {
442
+ enumerable: true,
443
+ configurable: true,
444
+ writable: true,
445
+ value: void 0
446
+ });
447
+ Object.defineProperty(this, "_firstResponseInputs", {
448
+ enumerable: true,
449
+ configurable: true,
450
+ writable: true,
451
+ value: {}
452
+ });
453
+ Object.defineProperty(this, "_lastResponseOutputs", {
454
+ enumerable: true,
455
+ configurable: true,
456
+ writable: true,
457
+ value: {}
458
+ });
459
+ Object.defineProperty(this, "_runs", {
460
+ enumerable: true,
461
+ configurable: true,
462
+ writable: true,
463
+ value: new Map()
464
+ });
465
+ Object.defineProperty(this, "_spanDataTypes", {
466
+ enumerable: true,
467
+ configurable: true,
468
+ writable: true,
469
+ value: new Map()
470
+ });
471
+ Object.defineProperty(this, "_unpostedTraces", {
472
+ enumerable: true,
473
+ configurable: true,
474
+ writable: true,
475
+ value: new Set()
476
+ });
477
+ Object.defineProperty(this, "_unpostedSpans", {
478
+ enumerable: true,
479
+ configurable: true,
480
+ writable: true,
481
+ value: new Set()
482
+ });
483
+ // Previous AsyncLocalStorage store for each trace/span, so nested
484
+ // traceable() calls inside Agents tools correctly nest under the
485
+ // enclosing span and context can be restored when the span/trace ends.
486
+ Object.defineProperty(this, "_previousStoreByTrace", {
487
+ enumerable: true,
488
+ configurable: true,
489
+ writable: true,
490
+ value: new Map()
491
+ });
492
+ Object.defineProperty(this, "_previousStoreBySpan", {
493
+ enumerable: true,
494
+ configurable: true,
495
+ writable: true,
496
+ value: new Map()
497
+ });
498
+ this.client = options?.client ?? new client_js_1.Client();
499
+ this._metadata = options?.metadata;
500
+ this._tags = options?.tags;
501
+ this._projectName = options?.projectName;
502
+ this._name = options?.name;
503
+ }
504
+ async onTraceStart(trace) {
505
+ let currentRunTree;
506
+ try {
507
+ currentRunTree = (0, traceable_js_1.getCurrentRunTree)();
508
+ }
509
+ catch {
510
+ // Not in a traceable context
511
+ currentRunTree = undefined;
512
+ }
513
+ // Determine run name
514
+ let runName;
515
+ if (this._name) {
516
+ runName = this._name;
517
+ }
518
+ else if (trace.name) {
519
+ runName = trace.name;
520
+ }
521
+ else {
522
+ runName = "Agent workflow";
523
+ }
524
+ // Build metadata
525
+ const runExtra = {
526
+ metadata: {
527
+ ...this._metadata,
528
+ ls_integration: "openai-agents-sdk",
529
+ ls_agent_type: "root",
530
+ },
531
+ };
532
+ const traceDict = trace.toJSON() ?? {};
533
+ const groupId = trace.groupId ??
534
+ traceDict.groupId ??
535
+ traceDict.group_id;
536
+ if (groupId !== undefined && groupId !== null) {
537
+ runExtra.metadata.thread_id = groupId;
538
+ }
539
+ try {
540
+ let newRun;
541
+ if (currentRunTree !== undefined) {
542
+ // Nest under existing trace
543
+ newRun = currentRunTree.createChild({
544
+ name: runName,
545
+ run_type: "chain",
546
+ inputs: {},
547
+ extra: runExtra,
548
+ tags: this._tags,
549
+ });
550
+ }
551
+ else {
552
+ // Create new root trace. Force `tracingEnabled: true` because
553
+ // installing this processor is itself an explicit opt-in to
554
+ // tracing; this ensures nested `traceable()` calls inside tools
555
+ // (which otherwise gate on LANGSMITH_TRACING) also post their
556
+ // runs. The setting propagates to children via createChild.
557
+ const runTreeConfig = {
558
+ name: runName,
559
+ run_type: "chain",
560
+ inputs: {},
561
+ extra: runExtra,
562
+ tags: this._tags,
563
+ client: this.client,
564
+ tracingEnabled: true,
565
+ };
566
+ if (this._projectName !== undefined) {
567
+ runTreeConfig.project_name = this._projectName;
568
+ }
569
+ newRun = new run_trees_js_1.RunTree(runTreeConfig);
570
+ }
571
+ // Delay posting until first response/generation span ends
572
+ // so inputs can be included in the POST.
573
+ this._unpostedTraces.add(trace.traceId);
574
+ this._runs.set(trace.traceId, newRun);
575
+ // Set this run as the current context so nested traceable() calls
576
+ // invoked from inside Agents tools nest under it. Remember the previous
577
+ // store so we can restore it in onTraceEnd.
578
+ const previousStore = enterRunTreeContext(newRun);
579
+ this._previousStoreByTrace.set(trace.traceId, previousStore);
580
+ }
581
+ catch (e) {
582
+ console.error("Error creating trace run:", e);
583
+ }
584
+ }
585
+ async onTraceEnd(trace) {
586
+ const run = this._runs.get(trace.traceId);
587
+ if (!run) {
588
+ return;
589
+ }
590
+ this._runs.delete(trace.traceId);
591
+ const traceDict = trace.toJSON() ?? {};
592
+ const metadata = {
593
+ ...traceDict.metadata,
594
+ ...this._metadata,
595
+ };
596
+ try {
597
+ // Update run with final inputs/outputs
598
+ run.outputs = this._lastResponseOutputs[trace.traceId] ?? {};
599
+ // Update metadata
600
+ if (!run.extra) {
601
+ run.extra = {};
602
+ }
603
+ if (!run.extra.metadata) {
604
+ run.extra.metadata = {};
605
+ }
606
+ run.extra.metadata = {
607
+ ...run.extra.metadata,
608
+ ...metadata,
609
+ };
610
+ // End and patch
611
+ await run.end();
612
+ if (this._unpostedTraces.has(trace.traceId)) {
613
+ // No response/generation spans ended, post now
614
+ run.inputs = this._firstResponseInputs[trace.traceId] ?? {};
615
+ this._unpostedTraces.delete(trace.traceId);
616
+ await run.postRun();
617
+ }
618
+ else {
619
+ await run.patchRun({ excludeInputs: true });
620
+ }
621
+ delete this._firstResponseInputs[trace.traceId];
622
+ delete this._lastResponseOutputs[trace.traceId];
623
+ }
624
+ catch (e) {
625
+ console.error("Error updating trace run:", e);
626
+ }
627
+ finally {
628
+ // Restore the previous AsyncLocalStorage store so contexts outside
629
+ // this trace are not polluted.
630
+ if (this._previousStoreByTrace.has(trace.traceId)) {
631
+ const previousStore = this._previousStoreByTrace.get(trace.traceId);
632
+ this._previousStoreByTrace.delete(trace.traceId);
633
+ enterRunTreeContext(previousStore);
634
+ }
635
+ }
636
+ }
637
+ async onSpanStart(span) {
638
+ // Find parent run
639
+ const parentId = span.parentId;
640
+ const parentRun = parentId
641
+ ? this._runs.get(parentId)
642
+ : this._runs.get(span.traceId);
643
+ if (!parentRun) {
644
+ console.warn(`No trace info found for span, skipping: ${span.spanId}`);
645
+ return;
646
+ }
647
+ // Extract span data
648
+ let runName = getRunName(span);
649
+ const spanData = span.spanData;
650
+ if (spanData.type === "response") {
651
+ const parentName = parentRun.name;
652
+ const rawSpanName = runName;
653
+ if (parentName) {
654
+ runName = `${parentName} ${rawSpanName}`.trim();
655
+ }
656
+ else {
657
+ runName = rawSpanName;
658
+ }
659
+ }
660
+ const runType = getRunType(span);
661
+ const extracted = extractSpanData(span);
662
+ // Create child run and install it into AsyncLocalStorage SYNCHRONOUSLY,
663
+ // before any `await`. The OpenAI Agents runtime invokes `span.start()`
664
+ // (which calls this method without awaiting) right before it executes
665
+ // the tool/agent body in the same async task. Setting ALS via
666
+ // `enterWith` here ensures nested `traceable()` calls inside tool
667
+ // `execute` functions see this span's RunTree as their parent.
668
+ let childRun;
669
+ try {
670
+ childRun = parentRun.createChild({
671
+ name: runName,
672
+ run_type: runType,
673
+ inputs: extracted.inputs ?? {},
674
+ extra: extracted,
675
+ start_time: span.startedAt
676
+ ? new Date(span.startedAt).getTime()
677
+ : undefined,
678
+ });
679
+ }
680
+ catch (e) {
681
+ console.error("Error creating span run:", e);
682
+ return;
683
+ }
684
+ // Add ls_agent_type metadata for agent spans that are children of
685
+ // function spans (i.e., agents used as tools).
686
+ // Handoff agents are not considered subagents.
687
+ if (spanData.type === "agent") {
688
+ const parentSpanType = parentId
689
+ ? this._spanDataTypes.get(parentId)
690
+ : undefined;
691
+ if (parentSpanType === "function") {
692
+ if (!childRun.extra) {
693
+ childRun.extra = {};
694
+ }
695
+ if (!childRun.extra.metadata) {
696
+ childRun.extra.metadata = {};
697
+ }
698
+ childRun.extra.metadata = {
699
+ ...childRun.extra.metadata,
700
+ ls_agent_type: "subagent",
701
+ };
702
+ }
703
+ }
704
+ // Track span data type for parent lookups
705
+ this._spanDataTypes.set(span.spanId, spanData.type);
706
+ this._runs.set(span.spanId, childRun);
707
+ // Enter AsyncLocalStorage context synchronously so nested traceable()
708
+ // calls inside the span's body nest under this run. Remember the
709
+ // previous store so we can restore it in onSpanEnd.
710
+ const previousStore = enterRunTreeContext(childRun);
711
+ this._previousStoreBySpan.set(span.spanId, previousStore);
712
+ try {
713
+ // Delay posting for spans whose complete inputs/outputs aren't
714
+ // available at start.
715
+ if (spanData.type === "generation" ||
716
+ spanData.type === "response" ||
717
+ spanData.type === "function" ||
718
+ spanData.type === "handoff") {
719
+ this._unpostedSpans.add(span.spanId);
720
+ }
721
+ else {
722
+ await childRun.postRun();
723
+ }
724
+ }
725
+ catch (e) {
726
+ console.error("Error posting span run:", e);
727
+ }
728
+ }
729
+ async onSpanEnd(span) {
730
+ // Restore the previous AsyncLocalStorage store synchronously so any
731
+ // further async work in the enclosing scope doesn't see this span's
732
+ // run as its parent. Done before any await to match span.end()
733
+ // which fires onSpanEnd without awaiting.
734
+ if (this._previousStoreBySpan.has(span.spanId)) {
735
+ const previousStore = this._previousStoreBySpan.get(span.spanId);
736
+ this._previousStoreBySpan.delete(span.spanId);
737
+ enterRunTreeContext(previousStore);
738
+ }
739
+ const run = this._runs.get(span.spanId);
740
+ this._spanDataTypes.delete(span.spanId);
741
+ if (!run) {
742
+ return;
743
+ }
744
+ this._runs.delete(span.spanId);
745
+ try {
746
+ // Extract outputs and metadata
747
+ const extracted = extractSpanData(span);
748
+ const outputs = extracted.outputs ?? {};
749
+ const inputs = extracted.inputs ?? {};
750
+ // Update run
751
+ run.outputs = outputs;
752
+ if (Object.keys(inputs).length > 0) {
753
+ run.inputs = inputs;
754
+ }
755
+ if (span.error) {
756
+ run.error = span.error.message;
757
+ }
758
+ if (span.spanData.type === "agent") {
759
+ const derived = deriveAgentInputsOutputs(run);
760
+ if (Object.keys(run.inputs ?? {}).length === 0 &&
761
+ derived.inputs &&
762
+ Object.keys(derived.inputs).length > 0) {
763
+ run.inputs = derived.inputs;
764
+ }
765
+ if (Object.keys(run.outputs ?? {}).length === 0 &&
766
+ derived.outputs &&
767
+ Object.keys(derived.outputs).length > 0) {
768
+ run.outputs = derived.outputs;
769
+ }
770
+ }
771
+ // Add OpenAI metadata
772
+ if (!run.extra) {
773
+ run.extra = {};
774
+ }
775
+ if (!run.extra.metadata) {
776
+ run.extra.metadata = {};
777
+ }
778
+ run.extra.metadata = {
779
+ ...run.extra.metadata,
780
+ openai_parent_id: span.parentId ?? undefined,
781
+ openai_trace_id: span.traceId,
782
+ openai_span_id: span.spanId,
783
+ };
784
+ if (extracted.metadata) {
785
+ run.extra.metadata = {
786
+ ...run.extra.metadata,
787
+ ...extracted.metadata,
788
+ };
789
+ }
790
+ if (extracted.invocation_params) {
791
+ run.extra.invocation_params = extracted.invocation_params;
792
+ }
793
+ const spanData = span.spanData;
794
+ if (spanData.type === "response") {
795
+ this._firstResponseInputs[span.traceId] =
796
+ this._firstResponseInputs[span.traceId] ?? inputs;
797
+ this._lastResponseOutputs[span.traceId] = outputs;
798
+ await this._maybePostTrace(span.traceId, inputs);
799
+ }
800
+ else if (spanData.type === "generation") {
801
+ this._firstResponseInputs[span.traceId] =
802
+ this._firstResponseInputs[span.traceId] ?? inputs;
803
+ this._lastResponseOutputs[span.traceId] = outputs;
804
+ await this._maybePostTrace(span.traceId, inputs);
805
+ }
806
+ // End the run
807
+ if (span.endedAt) {
808
+ await run.end(undefined, undefined, new Date(span.endedAt).getTime());
809
+ }
810
+ else {
811
+ await run.end();
812
+ }
813
+ if (this._unpostedSpans.has(span.spanId)) {
814
+ this._unpostedSpans.delete(span.spanId);
815
+ await run.postRun();
816
+ }
817
+ else {
818
+ await run.patchRun(span.spanData.type === "agent" ? undefined : { excludeInputs: true });
819
+ }
820
+ }
821
+ catch (e) {
822
+ console.error("Error updating span run:", e);
823
+ }
824
+ }
825
+ async _maybePostTrace(traceId, inputs) {
826
+ if (this._unpostedTraces.has(traceId)) {
827
+ const traceRun = this._runs.get(traceId);
828
+ if (traceRun) {
829
+ traceRun.inputs = inputs;
830
+ try {
831
+ await traceRun.postRun();
832
+ }
833
+ catch (e) {
834
+ console.error("Error posting trace:", e);
835
+ }
836
+ this._unpostedTraces.delete(traceId);
837
+ }
838
+ }
839
+ }
840
+ async shutdown() {
841
+ await this.client.flush();
842
+ await this.client.awaitPendingTraceBatches();
843
+ }
844
+ async forceFlush() {
845
+ await this.client.flush();
846
+ await this.client.awaitPendingTraceBatches();
847
+ }
848
+ }
849
+ exports.OpenAIAgentsTracingProcessor = OpenAIAgentsTracingProcessor;