langsmith 0.5.21 → 0.5.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client.cjs +60 -0
- package/dist/client.d.ts +12 -0
- package/dist/client.js +60 -0
- package/dist/experimental/sandbox/client.cjs +102 -427
- package/dist/experimental/sandbox/client.d.ts +68 -159
- package/dist/experimental/sandbox/client.js +104 -429
- package/dist/experimental/sandbox/errors.cjs +1 -2
- package/dist/experimental/sandbox/errors.d.ts +1 -2
- package/dist/experimental/sandbox/errors.js +1 -2
- package/dist/experimental/sandbox/helpers.cjs +8 -98
- package/dist/experimental/sandbox/helpers.d.ts +0 -29
- package/dist/experimental/sandbox/helpers.js +9 -95
- package/dist/experimental/sandbox/index.cjs +6 -1
- package/dist/experimental/sandbox/index.d.ts +7 -2
- package/dist/experimental/sandbox/index.js +6 -1
- package/dist/experimental/sandbox/sandbox.cjs +3 -11
- package/dist/experimental/sandbox/sandbox.d.ts +3 -5
- package/dist/experimental/sandbox/sandbox.js +3 -11
- package/dist/experimental/sandbox/types.d.ts +32 -149
- package/dist/index.cjs +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/wrappers/openai_agents.cjs +849 -0
- package/dist/wrappers/openai_agents.d.ts +92 -0
- package/dist/wrappers/openai_agents.js +845 -0
- package/package.json +19 -5
- package/wrappers/openai_agents.cjs +1 -0
- package/wrappers/openai_agents.d.cts +1 -0
- package/wrappers/openai_agents.d.ts +1 -0
- package/wrappers/openai_agents.js +1 -0
|
@@ -0,0 +1,845 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LangSmith integration for OpenAI Agents SDK.
|
|
3
|
+
*
|
|
4
|
+
* This module provides tracing support for the OpenAI Agents SDK.
|
|
5
|
+
*/
|
|
6
|
+
import { AsyncLocalStorage } from "node:async_hooks";
|
|
7
|
+
import { RunTree } from "../run_trees.js";
|
|
8
|
+
import { Client } from "../client.js";
|
|
9
|
+
import { AsyncLocalStorageProviderSingleton, getCurrentRunTree, } from "../singletons/traceable.js";
|
|
10
|
+
AsyncLocalStorageProviderSingleton.initializeGlobalInstance(new AsyncLocalStorage());
|
|
11
|
+
/**
|
|
12
|
+
* Set the current AsyncLocalStorage store to the given RunTree without a
|
|
13
|
+
* callback. Uses `AsyncLocalStorage.enterWith` if available on the underlying
|
|
14
|
+
* instance (it is on Node's built-in ALS). This is required because the
|
|
15
|
+
* OpenAI Agents tracing processor receives `onSpanStart`/`onSpanEnd` callbacks
|
|
16
|
+
* at different points with no single function to wrap via `withRunTree`.
|
|
17
|
+
*
|
|
18
|
+
* Returns the previous store so callers can restore it on exit.
|
|
19
|
+
*
|
|
20
|
+
* Caveats of `enterWith` (inherent, not avoidable with this API shape):
|
|
21
|
+
* - Replaces the ALS store for the current async task and all its
|
|
22
|
+
* descendants. Concurrent async tasks spawned from the caller's scope
|
|
23
|
+
* during the trace will see the installed store.
|
|
24
|
+
* - `onTraceEnd`/`onSpanEnd` restoration only works when it runs on the
|
|
25
|
+
* same async task as the matching start. This is guaranteed by the
|
|
26
|
+
* OpenAI Agents SDK's span lifecycle (span.start / fn / span.end are
|
|
27
|
+
* invoked on one task via `_withSpanFactory`).
|
|
28
|
+
*/
|
|
29
|
+
function enterRunTreeContext(runTree) {
|
|
30
|
+
const storage = AsyncLocalStorageProviderSingleton.getInstance();
|
|
31
|
+
const previous = storage.getStore();
|
|
32
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
33
|
+
const maybeEnterWith = storage.enterWith;
|
|
34
|
+
if (typeof maybeEnterWith === "function") {
|
|
35
|
+
maybeEnterWith.call(storage, runTree);
|
|
36
|
+
}
|
|
37
|
+
return previous;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Parse inputs or outputs into a dictionary format.
|
|
41
|
+
*/
|
|
42
|
+
function isOpenAIAgentsItemArray(data) {
|
|
43
|
+
return (Array.isArray(data) &&
|
|
44
|
+
data.length > 0 &&
|
|
45
|
+
data.every((item) => typeof item === "object" &&
|
|
46
|
+
item !== null &&
|
|
47
|
+
"type" in item &&
|
|
48
|
+
typeof item.type === "string"));
|
|
49
|
+
}
|
|
50
|
+
function normalizeResponseInputItemsForReplay(items) {
|
|
51
|
+
return items.map((item) => {
|
|
52
|
+
const type = item.type;
|
|
53
|
+
if (type === "message") {
|
|
54
|
+
return {
|
|
55
|
+
type: "message",
|
|
56
|
+
role: item.role,
|
|
57
|
+
content: item.content,
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
if (type === "reasoning") {
|
|
61
|
+
return {
|
|
62
|
+
type: "reasoning",
|
|
63
|
+
...(item.id ? { id: item.id } : {}),
|
|
64
|
+
content: Array.isArray(item.content) ? item.content : [],
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
if (type === "function_call") {
|
|
68
|
+
return {
|
|
69
|
+
type: "function_call",
|
|
70
|
+
...(item.id ? { id: item.id } : {}),
|
|
71
|
+
call_id: item.callId,
|
|
72
|
+
name: item.name,
|
|
73
|
+
arguments: item.arguments,
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
if (type === "function_call_result") {
|
|
77
|
+
const output = item.output;
|
|
78
|
+
return {
|
|
79
|
+
type: "function_call_output",
|
|
80
|
+
call_id: item.callId,
|
|
81
|
+
output: typeof output === "object" && output !== null && "text" in output
|
|
82
|
+
? output.text
|
|
83
|
+
: output,
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
return item;
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
function parseIO(data, defaultKey = "output") {
|
|
90
|
+
if (data === null || data === undefined) {
|
|
91
|
+
return {};
|
|
92
|
+
}
|
|
93
|
+
if (Array.isArray(data)) {
|
|
94
|
+
if (data.length === 0) {
|
|
95
|
+
return {};
|
|
96
|
+
}
|
|
97
|
+
// Check if this is a list of output blocks (reasoning, message, etc.)
|
|
98
|
+
if (data.length > 0 && typeof data[0] === "object" && data[0] !== null) {
|
|
99
|
+
if ("type" in data[0]) {
|
|
100
|
+
return { [defaultKey]: data };
|
|
101
|
+
}
|
|
102
|
+
else if (data.length === 1) {
|
|
103
|
+
return data[0];
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
return { [defaultKey]: data };
|
|
107
|
+
}
|
|
108
|
+
if (typeof data === "object") {
|
|
109
|
+
return data;
|
|
110
|
+
}
|
|
111
|
+
if (typeof data === "string") {
|
|
112
|
+
try {
|
|
113
|
+
const parsed = JSON.parse(data);
|
|
114
|
+
if (typeof parsed === "object" && parsed !== null) {
|
|
115
|
+
return parsed;
|
|
116
|
+
}
|
|
117
|
+
return { [defaultKey]: data };
|
|
118
|
+
}
|
|
119
|
+
catch {
|
|
120
|
+
return { [defaultKey]: data };
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
return { [defaultKey]: data };
|
|
124
|
+
}
|
|
125
|
+
/**
|
|
126
|
+
* Get the LangSmith run type for a span.
|
|
127
|
+
*/
|
|
128
|
+
function getRunType(span) {
|
|
129
|
+
const spanType = span.spanData?.type;
|
|
130
|
+
if (spanType === "agent" || spanType === "handoff" || spanType === "custom") {
|
|
131
|
+
return "chain";
|
|
132
|
+
}
|
|
133
|
+
else if (spanType === "function" || spanType === "guardrail") {
|
|
134
|
+
return "tool";
|
|
135
|
+
}
|
|
136
|
+
else if (spanType === "generation" || spanType === "response") {
|
|
137
|
+
return "llm";
|
|
138
|
+
}
|
|
139
|
+
return "chain";
|
|
140
|
+
}
|
|
141
|
+
/**
|
|
142
|
+
* Get the run name for a span.
|
|
143
|
+
*/
|
|
144
|
+
function getRunName(span) {
|
|
145
|
+
const spanData = span.spanData;
|
|
146
|
+
if ("name" in spanData && spanData.name) {
|
|
147
|
+
return spanData.name;
|
|
148
|
+
}
|
|
149
|
+
const spanType = spanData?.type;
|
|
150
|
+
if (spanType === "generation") {
|
|
151
|
+
return "Generation";
|
|
152
|
+
}
|
|
153
|
+
else if (spanType === "response") {
|
|
154
|
+
return "Response";
|
|
155
|
+
}
|
|
156
|
+
else if (spanType === "handoff") {
|
|
157
|
+
return "Handoff";
|
|
158
|
+
}
|
|
159
|
+
return "Span";
|
|
160
|
+
}
|
|
161
|
+
function deriveAgentInputsOutputs(run) {
|
|
162
|
+
const children = [...run.child_runs];
|
|
163
|
+
const firstChildWithInputs = children.find((child) => child.inputs && Object.keys(child.inputs).length > 0);
|
|
164
|
+
const lastChildWithOutputs = [...children]
|
|
165
|
+
.reverse()
|
|
166
|
+
.find((child) => child.outputs && Object.keys(child.outputs).length > 0);
|
|
167
|
+
return {
|
|
168
|
+
...(firstChildWithInputs ? { inputs: firstChildWithInputs.inputs } : {}),
|
|
169
|
+
...(lastChildWithOutputs ? { outputs: lastChildWithOutputs.outputs } : {}),
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
/**
|
|
173
|
+
* Extract span data into a format suitable for LangSmith runs.
|
|
174
|
+
*/
|
|
175
|
+
function extractSpanData(span) {
|
|
176
|
+
const spanData = span.spanData;
|
|
177
|
+
const data = {};
|
|
178
|
+
if (spanData.type === "function") {
|
|
179
|
+
const functionData = spanData;
|
|
180
|
+
data.inputs = parseIO(functionData.input, "input");
|
|
181
|
+
data.outputs = parseIO(functionData.output, "output");
|
|
182
|
+
}
|
|
183
|
+
else if (spanData.type === "generation") {
|
|
184
|
+
const generationData = spanData;
|
|
185
|
+
data.inputs = parseIO(generationData.input, "input");
|
|
186
|
+
data.outputs = parseIO(generationData.output, "output");
|
|
187
|
+
data.invocation_params = {
|
|
188
|
+
model: generationData.model,
|
|
189
|
+
model_config: generationData.model_config,
|
|
190
|
+
};
|
|
191
|
+
if (generationData.usage) {
|
|
192
|
+
data.metadata = {
|
|
193
|
+
usage_metadata: createUsageMetadata(generationData.usage),
|
|
194
|
+
};
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
else if (spanData.type === "response") {
|
|
198
|
+
const responseData = spanData;
|
|
199
|
+
if (responseData._input !== undefined) {
|
|
200
|
+
data.inputs = {
|
|
201
|
+
input: isOpenAIAgentsItemArray(responseData._input)
|
|
202
|
+
? normalizeResponseInputItemsForReplay(responseData._input)
|
|
203
|
+
: responseData._input,
|
|
204
|
+
instructions: typeof responseData._response?.instructions === "string"
|
|
205
|
+
? responseData._response.instructions
|
|
206
|
+
: "",
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
if (responseData._response) {
|
|
210
|
+
const response = responseData._response;
|
|
211
|
+
const outputData = response.output ?? [];
|
|
212
|
+
data.outputs = parseIO(outputData, "output");
|
|
213
|
+
// Extract invocation params
|
|
214
|
+
const invocationParams = {};
|
|
215
|
+
const invocationKeys = [
|
|
216
|
+
"max_output_tokens",
|
|
217
|
+
"model",
|
|
218
|
+
"parallel_tool_calls",
|
|
219
|
+
"reasoning",
|
|
220
|
+
"temperature",
|
|
221
|
+
"text",
|
|
222
|
+
"tool_choice",
|
|
223
|
+
"tools",
|
|
224
|
+
"top_p",
|
|
225
|
+
"truncation",
|
|
226
|
+
];
|
|
227
|
+
for (const key of invocationKeys) {
|
|
228
|
+
if (key in response) {
|
|
229
|
+
invocationParams[key] = response[key];
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
data.invocation_params = invocationParams;
|
|
233
|
+
// Extract metadata
|
|
234
|
+
const metadata = {};
|
|
235
|
+
const metadataKeys = Object.keys(response).filter((k) => k !== "output" &&
|
|
236
|
+
k !== "usage" &&
|
|
237
|
+
k !== "instructions" &&
|
|
238
|
+
!invocationKeys.includes(k));
|
|
239
|
+
for (const key of metadataKeys) {
|
|
240
|
+
metadata[key] = response[key];
|
|
241
|
+
}
|
|
242
|
+
metadata.ls_model_name = invocationParams.model;
|
|
243
|
+
metadata.ls_max_tokens = invocationParams.max_output_tokens;
|
|
244
|
+
metadata.ls_temperature = invocationParams.temperature;
|
|
245
|
+
metadata.ls_model_type = "chat";
|
|
246
|
+
metadata.ls_provider = "openai";
|
|
247
|
+
if (response.usage) {
|
|
248
|
+
metadata.usage_metadata = createResponsesUsageMetadata(response.usage);
|
|
249
|
+
}
|
|
250
|
+
data.metadata = metadata;
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
else if (spanData.type === "agent") {
|
|
254
|
+
const agentData = spanData;
|
|
255
|
+
data.invocation_params = {
|
|
256
|
+
tools: agentData.tools,
|
|
257
|
+
handoffs: agentData.handoffs,
|
|
258
|
+
};
|
|
259
|
+
data.metadata = {
|
|
260
|
+
output_type: agentData.output_type,
|
|
261
|
+
};
|
|
262
|
+
}
|
|
263
|
+
else if (spanData.type === "handoff") {
|
|
264
|
+
const handoffData = spanData;
|
|
265
|
+
data.inputs = {
|
|
266
|
+
from_agent: handoffData.from_agent,
|
|
267
|
+
};
|
|
268
|
+
data.outputs = {
|
|
269
|
+
to_agent: handoffData.to_agent,
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
else if (spanData.type === "guardrail") {
|
|
273
|
+
const guardrailData = spanData;
|
|
274
|
+
data.metadata = {
|
|
275
|
+
triggered: guardrailData.triggered,
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
else if (spanData.type === "custom") {
|
|
279
|
+
const customData = spanData;
|
|
280
|
+
data.metadata = customData.data;
|
|
281
|
+
}
|
|
282
|
+
return data;
|
|
283
|
+
}
|
|
284
|
+
/**
|
|
285
|
+
* Create usage metadata from a `generation` span's `GenerationUsageData`.
|
|
286
|
+
*
|
|
287
|
+
* The Agents SDK's generation-span usage shape is intentionally flexible and
|
|
288
|
+
* puts token breakdowns under `usage.details` (e.g. `cached_tokens`,
|
|
289
|
+
* `reasoning_tokens`, `audio_tokens`). This is distinct from the OpenAI
|
|
290
|
+
* Responses API shape used by `response` spans (see
|
|
291
|
+
* {@link createResponsesUsageMetadata}).
|
|
292
|
+
*/
|
|
293
|
+
function createUsageMetadata(usage) {
|
|
294
|
+
const inputTokens = usage.input_tokens ?? 0;
|
|
295
|
+
const outputTokens = usage.output_tokens ?? 0;
|
|
296
|
+
const result = {
|
|
297
|
+
input_tokens: inputTokens,
|
|
298
|
+
output_tokens: outputTokens,
|
|
299
|
+
total_tokens: inputTokens + outputTokens,
|
|
300
|
+
};
|
|
301
|
+
// Handle details if present
|
|
302
|
+
if (usage.details) {
|
|
303
|
+
const details = usage.details;
|
|
304
|
+
const inputTokenDetails = {};
|
|
305
|
+
const outputTokenDetails = {};
|
|
306
|
+
// Map common detail fields
|
|
307
|
+
if (typeof details.cached_tokens === "number") {
|
|
308
|
+
inputTokenDetails.cache_read = details.cached_tokens;
|
|
309
|
+
}
|
|
310
|
+
if (typeof details.reasoning_tokens === "number") {
|
|
311
|
+
outputTokenDetails.reasoning = details.reasoning_tokens;
|
|
312
|
+
}
|
|
313
|
+
if (typeof details.audio_tokens === "number") {
|
|
314
|
+
inputTokenDetails.audio = details.audio_tokens;
|
|
315
|
+
}
|
|
316
|
+
if (Object.keys(inputTokenDetails).length > 0) {
|
|
317
|
+
result.input_token_details = inputTokenDetails;
|
|
318
|
+
}
|
|
319
|
+
if (Object.keys(outputTokenDetails).length > 0) {
|
|
320
|
+
result.output_token_details = outputTokenDetails;
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
return result;
|
|
324
|
+
}
|
|
325
|
+
/**
|
|
326
|
+
* Create usage metadata from a `response` span's embedded OpenAI Responses API
|
|
327
|
+
* usage object.
|
|
328
|
+
*
|
|
329
|
+
* Shape:
|
|
330
|
+
* ```
|
|
331
|
+
* {
|
|
332
|
+
* input_tokens, output_tokens, total_tokens,
|
|
333
|
+
* input_tokens_details: { cached_tokens },
|
|
334
|
+
* output_tokens_details: { reasoning_tokens },
|
|
335
|
+
* }
|
|
336
|
+
* ```
|
|
337
|
+
*
|
|
338
|
+
* This is distinct from {@link createUsageMetadata}, which handles the
|
|
339
|
+
* Agents SDK `GenerationUsageData` shape (with breakdowns under `details`).
|
|
340
|
+
*/
|
|
341
|
+
function createResponsesUsageMetadata(usage) {
|
|
342
|
+
const inputTokens = usage.input_tokens ?? 0;
|
|
343
|
+
const outputTokens = usage.output_tokens ?? 0;
|
|
344
|
+
const totalTokens = usage.total_tokens ?? inputTokens + outputTokens;
|
|
345
|
+
const result = {
|
|
346
|
+
input_tokens: inputTokens,
|
|
347
|
+
output_tokens: outputTokens,
|
|
348
|
+
total_tokens: totalTokens,
|
|
349
|
+
};
|
|
350
|
+
const inputTokenDetails = {};
|
|
351
|
+
const outputTokenDetails = {};
|
|
352
|
+
const inputDetails = usage.input_tokens_details;
|
|
353
|
+
if (inputDetails && typeof inputDetails.cached_tokens === "number") {
|
|
354
|
+
inputTokenDetails.cache_read = inputDetails.cached_tokens;
|
|
355
|
+
}
|
|
356
|
+
const outputDetails = usage.output_tokens_details;
|
|
357
|
+
if (outputDetails && typeof outputDetails.reasoning_tokens === "number") {
|
|
358
|
+
outputTokenDetails.reasoning = outputDetails.reasoning_tokens;
|
|
359
|
+
}
|
|
360
|
+
if (Object.keys(inputTokenDetails).length > 0) {
|
|
361
|
+
result.input_token_details = inputTokenDetails;
|
|
362
|
+
}
|
|
363
|
+
if (Object.keys(outputTokenDetails).length > 0) {
|
|
364
|
+
result.output_token_details = outputTokenDetails;
|
|
365
|
+
}
|
|
366
|
+
return result;
|
|
367
|
+
}
|
|
368
|
+
/**
|
|
369
|
+
* Tracing processor for the [OpenAI Agents SDK](https://openai.github.io/openai-agents-js/).
|
|
370
|
+
*
|
|
371
|
+
* Traces all intermediate steps of your OpenAI Agent to LangSmith.
|
|
372
|
+
*
|
|
373
|
+
* Requirements: Make sure to install `npm install @openai/agents`.
|
|
374
|
+
*
|
|
375
|
+
* Installing this processor is itself an explicit opt-in to tracing,
|
|
376
|
+
* so traces will be posted regardless of the `LANGSMITH_TRACING` env
|
|
377
|
+
* variable. Any nested `traceable()` calls made from within an agent
|
|
378
|
+
* run (e.g. inside a tool handler) will inherit this and also post,
|
|
379
|
+
* even if `LANGSMITH_TRACING` is not set.
|
|
380
|
+
*
|
|
381
|
+
* @param client - An instance of `langsmith.Client`. If not provided, a default client is created.
|
|
382
|
+
* @param metadata - Metadata to associate with all traces.
|
|
383
|
+
* @param tags - Tags to associate with all traces.
|
|
384
|
+
* @param projectName - LangSmith project to trace to.
|
|
385
|
+
* @param name - Name of the root trace.
|
|
386
|
+
*
|
|
387
|
+
* @example
|
|
388
|
+
* ```typescript
|
|
389
|
+
* import { Agent, Runner, function_tool, setTraceProcessors } from "@openai/agents";
|
|
390
|
+
* import { OpenAIAgentsTracingProcessor } from "langsmith/wrappers/openai_agents";
|
|
391
|
+
*
|
|
392
|
+
* setTraceProcessors([new OpenAIAgentsTracingProcessor()]);
|
|
393
|
+
*
|
|
394
|
+
* const getWeather = function_tool({
|
|
395
|
+
* name: "get_weather",
|
|
396
|
+
* description: "Get the weather for a city",
|
|
397
|
+
* parameters: { type: "object", properties: { city: { type: "string" } } },
|
|
398
|
+
* run: async ({ city }: { city: string }) => `The weather in ${city} is sunny`,
|
|
399
|
+
* });
|
|
400
|
+
*
|
|
401
|
+
* const agent = new Agent({
|
|
402
|
+
* name: "Assistant",
|
|
403
|
+
* instructions: "You are a helpful assistant",
|
|
404
|
+
* model: "gpt-4.1-mini",
|
|
405
|
+
* tools: [getWeather],
|
|
406
|
+
* });
|
|
407
|
+
*
|
|
408
|
+
* const result = await Runner.run(agent, "What's the weather in New York?");
|
|
409
|
+
* console.log(result.finalOutput);
|
|
410
|
+
* ```
|
|
411
|
+
*/
|
|
412
|
+
export class OpenAIAgentsTracingProcessor {
|
|
413
|
+
constructor(options) {
|
|
414
|
+
Object.defineProperty(this, "client", {
|
|
415
|
+
enumerable: true,
|
|
416
|
+
configurable: true,
|
|
417
|
+
writable: true,
|
|
418
|
+
value: void 0
|
|
419
|
+
});
|
|
420
|
+
Object.defineProperty(this, "_metadata", {
|
|
421
|
+
enumerable: true,
|
|
422
|
+
configurable: true,
|
|
423
|
+
writable: true,
|
|
424
|
+
value: void 0
|
|
425
|
+
});
|
|
426
|
+
Object.defineProperty(this, "_tags", {
|
|
427
|
+
enumerable: true,
|
|
428
|
+
configurable: true,
|
|
429
|
+
writable: true,
|
|
430
|
+
value: void 0
|
|
431
|
+
});
|
|
432
|
+
Object.defineProperty(this, "_projectName", {
|
|
433
|
+
enumerable: true,
|
|
434
|
+
configurable: true,
|
|
435
|
+
writable: true,
|
|
436
|
+
value: void 0
|
|
437
|
+
});
|
|
438
|
+
Object.defineProperty(this, "_name", {
|
|
439
|
+
enumerable: true,
|
|
440
|
+
configurable: true,
|
|
441
|
+
writable: true,
|
|
442
|
+
value: void 0
|
|
443
|
+
});
|
|
444
|
+
Object.defineProperty(this, "_firstResponseInputs", {
|
|
445
|
+
enumerable: true,
|
|
446
|
+
configurable: true,
|
|
447
|
+
writable: true,
|
|
448
|
+
value: {}
|
|
449
|
+
});
|
|
450
|
+
Object.defineProperty(this, "_lastResponseOutputs", {
|
|
451
|
+
enumerable: true,
|
|
452
|
+
configurable: true,
|
|
453
|
+
writable: true,
|
|
454
|
+
value: {}
|
|
455
|
+
});
|
|
456
|
+
Object.defineProperty(this, "_runs", {
|
|
457
|
+
enumerable: true,
|
|
458
|
+
configurable: true,
|
|
459
|
+
writable: true,
|
|
460
|
+
value: new Map()
|
|
461
|
+
});
|
|
462
|
+
Object.defineProperty(this, "_spanDataTypes", {
|
|
463
|
+
enumerable: true,
|
|
464
|
+
configurable: true,
|
|
465
|
+
writable: true,
|
|
466
|
+
value: new Map()
|
|
467
|
+
});
|
|
468
|
+
Object.defineProperty(this, "_unpostedTraces", {
|
|
469
|
+
enumerable: true,
|
|
470
|
+
configurable: true,
|
|
471
|
+
writable: true,
|
|
472
|
+
value: new Set()
|
|
473
|
+
});
|
|
474
|
+
Object.defineProperty(this, "_unpostedSpans", {
|
|
475
|
+
enumerable: true,
|
|
476
|
+
configurable: true,
|
|
477
|
+
writable: true,
|
|
478
|
+
value: new Set()
|
|
479
|
+
});
|
|
480
|
+
// Previous AsyncLocalStorage store for each trace/span, so nested
|
|
481
|
+
// traceable() calls inside Agents tools correctly nest under the
|
|
482
|
+
// enclosing span and context can be restored when the span/trace ends.
|
|
483
|
+
Object.defineProperty(this, "_previousStoreByTrace", {
|
|
484
|
+
enumerable: true,
|
|
485
|
+
configurable: true,
|
|
486
|
+
writable: true,
|
|
487
|
+
value: new Map()
|
|
488
|
+
});
|
|
489
|
+
Object.defineProperty(this, "_previousStoreBySpan", {
|
|
490
|
+
enumerable: true,
|
|
491
|
+
configurable: true,
|
|
492
|
+
writable: true,
|
|
493
|
+
value: new Map()
|
|
494
|
+
});
|
|
495
|
+
this.client = options?.client ?? new Client();
|
|
496
|
+
this._metadata = options?.metadata;
|
|
497
|
+
this._tags = options?.tags;
|
|
498
|
+
this._projectName = options?.projectName;
|
|
499
|
+
this._name = options?.name;
|
|
500
|
+
}
|
|
501
|
+
async onTraceStart(trace) {
|
|
502
|
+
let currentRunTree;
|
|
503
|
+
try {
|
|
504
|
+
currentRunTree = getCurrentRunTree();
|
|
505
|
+
}
|
|
506
|
+
catch {
|
|
507
|
+
// Not in a traceable context
|
|
508
|
+
currentRunTree = undefined;
|
|
509
|
+
}
|
|
510
|
+
// Determine run name
|
|
511
|
+
let runName;
|
|
512
|
+
if (this._name) {
|
|
513
|
+
runName = this._name;
|
|
514
|
+
}
|
|
515
|
+
else if (trace.name) {
|
|
516
|
+
runName = trace.name;
|
|
517
|
+
}
|
|
518
|
+
else {
|
|
519
|
+
runName = "Agent workflow";
|
|
520
|
+
}
|
|
521
|
+
// Build metadata
|
|
522
|
+
const runExtra = {
|
|
523
|
+
metadata: {
|
|
524
|
+
...this._metadata,
|
|
525
|
+
ls_integration: "openai-agents-sdk",
|
|
526
|
+
ls_agent_type: "root",
|
|
527
|
+
},
|
|
528
|
+
};
|
|
529
|
+
const traceDict = trace.toJSON() ?? {};
|
|
530
|
+
const groupId = trace.groupId ??
|
|
531
|
+
traceDict.groupId ??
|
|
532
|
+
traceDict.group_id;
|
|
533
|
+
if (groupId !== undefined && groupId !== null) {
|
|
534
|
+
runExtra.metadata.thread_id = groupId;
|
|
535
|
+
}
|
|
536
|
+
try {
|
|
537
|
+
let newRun;
|
|
538
|
+
if (currentRunTree !== undefined) {
|
|
539
|
+
// Nest under existing trace
|
|
540
|
+
newRun = currentRunTree.createChild({
|
|
541
|
+
name: runName,
|
|
542
|
+
run_type: "chain",
|
|
543
|
+
inputs: {},
|
|
544
|
+
extra: runExtra,
|
|
545
|
+
tags: this._tags,
|
|
546
|
+
});
|
|
547
|
+
}
|
|
548
|
+
else {
|
|
549
|
+
// Create new root trace. Force `tracingEnabled: true` because
|
|
550
|
+
// installing this processor is itself an explicit opt-in to
|
|
551
|
+
// tracing; this ensures nested `traceable()` calls inside tools
|
|
552
|
+
// (which otherwise gate on LANGSMITH_TRACING) also post their
|
|
553
|
+
// runs. The setting propagates to children via createChild.
|
|
554
|
+
const runTreeConfig = {
|
|
555
|
+
name: runName,
|
|
556
|
+
run_type: "chain",
|
|
557
|
+
inputs: {},
|
|
558
|
+
extra: runExtra,
|
|
559
|
+
tags: this._tags,
|
|
560
|
+
client: this.client,
|
|
561
|
+
tracingEnabled: true,
|
|
562
|
+
};
|
|
563
|
+
if (this._projectName !== undefined) {
|
|
564
|
+
runTreeConfig.project_name = this._projectName;
|
|
565
|
+
}
|
|
566
|
+
newRun = new RunTree(runTreeConfig);
|
|
567
|
+
}
|
|
568
|
+
// Delay posting until first response/generation span ends
|
|
569
|
+
// so inputs can be included in the POST.
|
|
570
|
+
this._unpostedTraces.add(trace.traceId);
|
|
571
|
+
this._runs.set(trace.traceId, newRun);
|
|
572
|
+
// Set this run as the current context so nested traceable() calls
|
|
573
|
+
// invoked from inside Agents tools nest under it. Remember the previous
|
|
574
|
+
// store so we can restore it in onTraceEnd.
|
|
575
|
+
const previousStore = enterRunTreeContext(newRun);
|
|
576
|
+
this._previousStoreByTrace.set(trace.traceId, previousStore);
|
|
577
|
+
}
|
|
578
|
+
catch (e) {
|
|
579
|
+
console.error("Error creating trace run:", e);
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
async onTraceEnd(trace) {
|
|
583
|
+
const run = this._runs.get(trace.traceId);
|
|
584
|
+
if (!run) {
|
|
585
|
+
return;
|
|
586
|
+
}
|
|
587
|
+
this._runs.delete(trace.traceId);
|
|
588
|
+
const traceDict = trace.toJSON() ?? {};
|
|
589
|
+
const metadata = {
|
|
590
|
+
...traceDict.metadata,
|
|
591
|
+
...this._metadata,
|
|
592
|
+
};
|
|
593
|
+
try {
|
|
594
|
+
// Update run with final inputs/outputs
|
|
595
|
+
run.outputs = this._lastResponseOutputs[trace.traceId] ?? {};
|
|
596
|
+
// Update metadata
|
|
597
|
+
if (!run.extra) {
|
|
598
|
+
run.extra = {};
|
|
599
|
+
}
|
|
600
|
+
if (!run.extra.metadata) {
|
|
601
|
+
run.extra.metadata = {};
|
|
602
|
+
}
|
|
603
|
+
run.extra.metadata = {
|
|
604
|
+
...run.extra.metadata,
|
|
605
|
+
...metadata,
|
|
606
|
+
};
|
|
607
|
+
// End and patch
|
|
608
|
+
await run.end();
|
|
609
|
+
if (this._unpostedTraces.has(trace.traceId)) {
|
|
610
|
+
// No response/generation spans ended, post now
|
|
611
|
+
run.inputs = this._firstResponseInputs[trace.traceId] ?? {};
|
|
612
|
+
this._unpostedTraces.delete(trace.traceId);
|
|
613
|
+
await run.postRun();
|
|
614
|
+
}
|
|
615
|
+
else {
|
|
616
|
+
await run.patchRun({ excludeInputs: true });
|
|
617
|
+
}
|
|
618
|
+
delete this._firstResponseInputs[trace.traceId];
|
|
619
|
+
delete this._lastResponseOutputs[trace.traceId];
|
|
620
|
+
}
|
|
621
|
+
catch (e) {
|
|
622
|
+
console.error("Error updating trace run:", e);
|
|
623
|
+
}
|
|
624
|
+
finally {
|
|
625
|
+
// Restore the previous AsyncLocalStorage store so contexts outside
|
|
626
|
+
// this trace are not polluted.
|
|
627
|
+
if (this._previousStoreByTrace.has(trace.traceId)) {
|
|
628
|
+
const previousStore = this._previousStoreByTrace.get(trace.traceId);
|
|
629
|
+
this._previousStoreByTrace.delete(trace.traceId);
|
|
630
|
+
enterRunTreeContext(previousStore);
|
|
631
|
+
}
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
async onSpanStart(span) {
|
|
635
|
+
// Find parent run
|
|
636
|
+
const parentId = span.parentId;
|
|
637
|
+
const parentRun = parentId
|
|
638
|
+
? this._runs.get(parentId)
|
|
639
|
+
: this._runs.get(span.traceId);
|
|
640
|
+
if (!parentRun) {
|
|
641
|
+
console.warn(`No trace info found for span, skipping: ${span.spanId}`);
|
|
642
|
+
return;
|
|
643
|
+
}
|
|
644
|
+
// Extract span data
|
|
645
|
+
let runName = getRunName(span);
|
|
646
|
+
const spanData = span.spanData;
|
|
647
|
+
if (spanData.type === "response") {
|
|
648
|
+
const parentName = parentRun.name;
|
|
649
|
+
const rawSpanName = runName;
|
|
650
|
+
if (parentName) {
|
|
651
|
+
runName = `${parentName} ${rawSpanName}`.trim();
|
|
652
|
+
}
|
|
653
|
+
else {
|
|
654
|
+
runName = rawSpanName;
|
|
655
|
+
}
|
|
656
|
+
}
|
|
657
|
+
const runType = getRunType(span);
|
|
658
|
+
const extracted = extractSpanData(span);
|
|
659
|
+
// Create child run and install it into AsyncLocalStorage SYNCHRONOUSLY,
|
|
660
|
+
// before any `await`. The OpenAI Agents runtime invokes `span.start()`
|
|
661
|
+
// (which calls this method without awaiting) right before it executes
|
|
662
|
+
// the tool/agent body in the same async task. Setting ALS via
|
|
663
|
+
// `enterWith` here ensures nested `traceable()` calls inside tool
|
|
664
|
+
// `execute` functions see this span's RunTree as their parent.
|
|
665
|
+
let childRun;
|
|
666
|
+
try {
|
|
667
|
+
childRun = parentRun.createChild({
|
|
668
|
+
name: runName,
|
|
669
|
+
run_type: runType,
|
|
670
|
+
inputs: extracted.inputs ?? {},
|
|
671
|
+
extra: extracted,
|
|
672
|
+
start_time: span.startedAt
|
|
673
|
+
? new Date(span.startedAt).getTime()
|
|
674
|
+
: undefined,
|
|
675
|
+
});
|
|
676
|
+
}
|
|
677
|
+
catch (e) {
|
|
678
|
+
console.error("Error creating span run:", e);
|
|
679
|
+
return;
|
|
680
|
+
}
|
|
681
|
+
// Add ls_agent_type metadata for agent spans that are children of
|
|
682
|
+
// function spans (i.e., agents used as tools).
|
|
683
|
+
// Handoff agents are not considered subagents.
|
|
684
|
+
if (spanData.type === "agent") {
|
|
685
|
+
const parentSpanType = parentId
|
|
686
|
+
? this._spanDataTypes.get(parentId)
|
|
687
|
+
: undefined;
|
|
688
|
+
if (parentSpanType === "function") {
|
|
689
|
+
if (!childRun.extra) {
|
|
690
|
+
childRun.extra = {};
|
|
691
|
+
}
|
|
692
|
+
if (!childRun.extra.metadata) {
|
|
693
|
+
childRun.extra.metadata = {};
|
|
694
|
+
}
|
|
695
|
+
childRun.extra.metadata = {
|
|
696
|
+
...childRun.extra.metadata,
|
|
697
|
+
ls_agent_type: "subagent",
|
|
698
|
+
};
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
// Track span data type for parent lookups
|
|
702
|
+
this._spanDataTypes.set(span.spanId, spanData.type);
|
|
703
|
+
this._runs.set(span.spanId, childRun);
|
|
704
|
+
// Enter AsyncLocalStorage context synchronously so nested traceable()
|
|
705
|
+
// calls inside the span's body nest under this run. Remember the
|
|
706
|
+
// previous store so we can restore it in onSpanEnd.
|
|
707
|
+
const previousStore = enterRunTreeContext(childRun);
|
|
708
|
+
this._previousStoreBySpan.set(span.spanId, previousStore);
|
|
709
|
+
try {
|
|
710
|
+
// Delay posting for spans whose complete inputs/outputs aren't
|
|
711
|
+
// available at start.
|
|
712
|
+
if (spanData.type === "generation" ||
|
|
713
|
+
spanData.type === "response" ||
|
|
714
|
+
spanData.type === "function" ||
|
|
715
|
+
spanData.type === "handoff") {
|
|
716
|
+
this._unpostedSpans.add(span.spanId);
|
|
717
|
+
}
|
|
718
|
+
else {
|
|
719
|
+
await childRun.postRun();
|
|
720
|
+
}
|
|
721
|
+
}
|
|
722
|
+
catch (e) {
|
|
723
|
+
console.error("Error posting span run:", e);
|
|
724
|
+
}
|
|
725
|
+
}
|
|
726
|
+
async onSpanEnd(span) {
|
|
727
|
+
// Restore the previous AsyncLocalStorage store synchronously so any
|
|
728
|
+
// further async work in the enclosing scope doesn't see this span's
|
|
729
|
+
// run as its parent. Done before any await to match span.end()
|
|
730
|
+
// which fires onSpanEnd without awaiting.
|
|
731
|
+
if (this._previousStoreBySpan.has(span.spanId)) {
|
|
732
|
+
const previousStore = this._previousStoreBySpan.get(span.spanId);
|
|
733
|
+
this._previousStoreBySpan.delete(span.spanId);
|
|
734
|
+
enterRunTreeContext(previousStore);
|
|
735
|
+
}
|
|
736
|
+
const run = this._runs.get(span.spanId);
|
|
737
|
+
this._spanDataTypes.delete(span.spanId);
|
|
738
|
+
if (!run) {
|
|
739
|
+
return;
|
|
740
|
+
}
|
|
741
|
+
this._runs.delete(span.spanId);
|
|
742
|
+
try {
|
|
743
|
+
// Extract outputs and metadata
|
|
744
|
+
const extracted = extractSpanData(span);
|
|
745
|
+
const outputs = extracted.outputs ?? {};
|
|
746
|
+
const inputs = extracted.inputs ?? {};
|
|
747
|
+
// Update run
|
|
748
|
+
run.outputs = outputs;
|
|
749
|
+
if (Object.keys(inputs).length > 0) {
|
|
750
|
+
run.inputs = inputs;
|
|
751
|
+
}
|
|
752
|
+
if (span.error) {
|
|
753
|
+
run.error = span.error.message;
|
|
754
|
+
}
|
|
755
|
+
if (span.spanData.type === "agent") {
|
|
756
|
+
const derived = deriveAgentInputsOutputs(run);
|
|
757
|
+
if (Object.keys(run.inputs ?? {}).length === 0 &&
|
|
758
|
+
derived.inputs &&
|
|
759
|
+
Object.keys(derived.inputs).length > 0) {
|
|
760
|
+
run.inputs = derived.inputs;
|
|
761
|
+
}
|
|
762
|
+
if (Object.keys(run.outputs ?? {}).length === 0 &&
|
|
763
|
+
derived.outputs &&
|
|
764
|
+
Object.keys(derived.outputs).length > 0) {
|
|
765
|
+
run.outputs = derived.outputs;
|
|
766
|
+
}
|
|
767
|
+
}
|
|
768
|
+
// Add OpenAI metadata
|
|
769
|
+
if (!run.extra) {
|
|
770
|
+
run.extra = {};
|
|
771
|
+
}
|
|
772
|
+
if (!run.extra.metadata) {
|
|
773
|
+
run.extra.metadata = {};
|
|
774
|
+
}
|
|
775
|
+
run.extra.metadata = {
|
|
776
|
+
...run.extra.metadata,
|
|
777
|
+
openai_parent_id: span.parentId ?? undefined,
|
|
778
|
+
openai_trace_id: span.traceId,
|
|
779
|
+
openai_span_id: span.spanId,
|
|
780
|
+
};
|
|
781
|
+
if (extracted.metadata) {
|
|
782
|
+
run.extra.metadata = {
|
|
783
|
+
...run.extra.metadata,
|
|
784
|
+
...extracted.metadata,
|
|
785
|
+
};
|
|
786
|
+
}
|
|
787
|
+
if (extracted.invocation_params) {
|
|
788
|
+
run.extra.invocation_params = extracted.invocation_params;
|
|
789
|
+
}
|
|
790
|
+
const spanData = span.spanData;
|
|
791
|
+
if (spanData.type === "response") {
|
|
792
|
+
this._firstResponseInputs[span.traceId] =
|
|
793
|
+
this._firstResponseInputs[span.traceId] ?? inputs;
|
|
794
|
+
this._lastResponseOutputs[span.traceId] = outputs;
|
|
795
|
+
await this._maybePostTrace(span.traceId, inputs);
|
|
796
|
+
}
|
|
797
|
+
else if (spanData.type === "generation") {
|
|
798
|
+
this._firstResponseInputs[span.traceId] =
|
|
799
|
+
this._firstResponseInputs[span.traceId] ?? inputs;
|
|
800
|
+
this._lastResponseOutputs[span.traceId] = outputs;
|
|
801
|
+
await this._maybePostTrace(span.traceId, inputs);
|
|
802
|
+
}
|
|
803
|
+
// End the run
|
|
804
|
+
if (span.endedAt) {
|
|
805
|
+
await run.end(undefined, undefined, new Date(span.endedAt).getTime());
|
|
806
|
+
}
|
|
807
|
+
else {
|
|
808
|
+
await run.end();
|
|
809
|
+
}
|
|
810
|
+
if (this._unpostedSpans.has(span.spanId)) {
|
|
811
|
+
this._unpostedSpans.delete(span.spanId);
|
|
812
|
+
await run.postRun();
|
|
813
|
+
}
|
|
814
|
+
else {
|
|
815
|
+
await run.patchRun(span.spanData.type === "agent" ? undefined : { excludeInputs: true });
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
catch (e) {
|
|
819
|
+
console.error("Error updating span run:", e);
|
|
820
|
+
}
|
|
821
|
+
}
|
|
822
|
+
async _maybePostTrace(traceId, inputs) {
|
|
823
|
+
if (this._unpostedTraces.has(traceId)) {
|
|
824
|
+
const traceRun = this._runs.get(traceId);
|
|
825
|
+
if (traceRun) {
|
|
826
|
+
traceRun.inputs = inputs;
|
|
827
|
+
try {
|
|
828
|
+
await traceRun.postRun();
|
|
829
|
+
}
|
|
830
|
+
catch (e) {
|
|
831
|
+
console.error("Error posting trace:", e);
|
|
832
|
+
}
|
|
833
|
+
this._unpostedTraces.delete(traceId);
|
|
834
|
+
}
|
|
835
|
+
}
|
|
836
|
+
}
|
|
837
|
+
async shutdown() {
|
|
838
|
+
await this.client.flush();
|
|
839
|
+
await this.client.awaitPendingTraceBatches();
|
|
840
|
+
}
|
|
841
|
+
async forceFlush() {
|
|
842
|
+
await this.client.flush();
|
|
843
|
+
await this.client.awaitPendingTraceBatches();
|
|
844
|
+
}
|
|
845
|
+
}
|