langsmith 0.0.71-rc.5 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -71,21 +71,261 @@ console.log(response);
71
71
 
72
72
  ### Logging Traces Outside LangChain
73
73
 
74
- _Note: this API is experimental and may change in the future_
75
-
76
74
  You can still use the LangSmith development platform without depending on any
77
75
  LangChain code. You can connect either by setting the appropriate environment variables,
78
76
  or by directly specifying the connection information in the RunTree.
79
77
 
80
78
  1. **Copy the environment variables from the Settings Page and add them to your application.**
81
79
 
82
- ```typescript
83
- process.env["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"; // or your own server
84
- process.env["LANGCHAIN_API_KEY"] = "<YOUR-LANGSMITH-API-KEY>";
85
- // process.env["LANGCHAIN_PROJECT"] = "My Project Name"; // Optional: "default" is used if not set
80
+ ```shell
81
+ export LANGCHAIN_API_KEY=<YOUR-LANGSMITH-API-KEY>
82
+ # export LANGCHAIN_PROJECT="My Project Name" # Optional: "default" is used if not set
83
+ # export LANGCHAIN_ENDPOINT=https://api.smith.langchain.com # or your own server
84
+ ```
85
+
86
+ ## Integrations
87
+
88
+ Langsmith's `traceable` wrapper function makes it easy to trace any function or LLM call in your own favorite framework. Below are some examples.
89
+
90
+ ### OpenAI SDK
91
+
92
+ <!-- markdown-link-check-disable -->
93
+ The easiest ways to trace calls from the [OpenAI SDK](https://platform.openai.com/docs/api-reference) with LangSmith
94
+ is using the `traceable` wrapper function available in LangSmith 0.1.0 and up.
95
+
96
+ In order to use, you first need to set your LangSmith API key:
97
+
98
+ ```shell
99
+ export LANGCHAIN_API_KEY=<your-api-key>
100
+ ```
101
+
102
+ Next, you will need to install the LangSmith SDK and the OpenAI SDK:
103
+
104
+ ```shell
105
+ npm install langsmith openai
106
+ ```
107
+
108
+ After that, initialize your OpenAI client:
109
+
110
+ ```ts
111
+ import { OpenAI } from "openai";
112
+
113
+ const client = new OpenAI();
114
+ ```
115
+
116
+ Then, you can wrap the client methods you want to use by passing it to the `traceable` function like this:
117
+
118
+ ```ts
119
+ import { traceable } from "langsmith/traceable";
120
+
121
+ const createCompletion = traceable(
122
+ openai.chat.completions.create.bind(openai.chat.completions),
123
+ { name: "OpenAI Chat Completion", run_type: "llm" }
124
+ );
125
+ ```
126
+
127
+ Note the use of `.bind` to preserve the function's context. The `run_type` field in the extra config object
128
+ marks the function as an LLM call, and enables token usage tracking for OpenAI.
129
+
130
+ This new method takes the same exact arguments and has the same return type as the original method,
131
+ but will log everything to LangSmith!
132
+
133
+ ```ts
134
+ await createCompletion({
135
+ model: "gpt-3.5-turbo",
136
+ messages: [{ content: "Hi there!", role: "user" }],
137
+ });
138
+ ```
139
+
140
+ ```
141
+ {
142
+ id: 'chatcmpl-8sOWEOYVyehDlyPcBiaDtTxWvr9v6',
143
+ object: 'chat.completion',
144
+ created: 1707974654,
145
+ model: 'gpt-3.5-turbo-0613',
146
+ choices: [
147
+ {
148
+ index: 0,
149
+ message: { role: 'assistant', content: 'Hello! How can I help you today?' },
150
+ logprobs: null,
151
+ finish_reason: 'stop'
152
+ }
153
+ ],
154
+ usage: { prompt_tokens: 10, completion_tokens: 9, total_tokens: 19 },
155
+ system_fingerprint: null
156
+ }
157
+ ```
158
+
159
+ This also works for streaming:
160
+
161
+ ```ts
162
+ const stream = await createCompletion({
163
+ model: "gpt-3.5-turbo",
164
+ stream: true,
165
+ messages: [{ content: "Hi there!", role: "user" }],
166
+ });
167
+ ```
168
+
169
+ ```ts
170
+ for await (const chunk of stream) {
171
+ console.log(chunk);
172
+ }
86
173
  ```
87
174
 
88
- 2. **Log traces using a RunTree.**
175
+ Oftentimes, you use the OpenAI client inside of other functions or as part of a longer
176
+ sequence. You can automatically get nested traces by using this wrapped method
177
+ within other functions wrapped with `traceable`.
178
+
179
+ ```ts
180
+ const nestedTrace = traceable(async (text: string) => {
181
+ const completion = await createCompletion({
182
+ model: "gpt-3.5-turbo",
183
+ messages: [{ content: text, role: "user" }],
184
+ });
185
+ return completion;
186
+ });
187
+
188
+ await nestedTrace("Why is the sky blue?");
189
+ ```
190
+
191
+ ```
192
+ {
193
+ "id": "chatcmpl-8sPToJQLLVepJvyeTfzZMOMVIKjMo",
194
+ "object": "chat.completion",
195
+ "created": 1707978348,
196
+ "model": "gpt-3.5-turbo-0613",
197
+ "choices": [
198
+ {
199
+ "index": 0,
200
+ "message": {
201
+ "role": "assistant",
202
+ "content": "The sky appears blue because of a phenomenon known as Rayleigh scattering. The Earth's atmosphere is composed of tiny molecules, such as nitrogen and oxygen, which are much smaller than the wavelength of visible light. When sunlight interacts with these molecules, it gets scattered in all directions. However, shorter wavelengths of light (blue and violet) are scattered more compared to longer wavelengths (red, orange, and yellow). \n\nAs a result, when sunlight passes through the Earth's atmosphere, the blue and violet wavelengths are scattered in all directions, making the sky appear blue. This scattering of shorter wavelengths is also responsible for the vibrant colors observed during sunrise and sunset, when the sunlight has to pass through a thicker portion of the atmosphere, causing the longer wavelengths to dominate the scattered light."
203
+ },
204
+ "logprobs": null,
205
+ "finish_reason": "stop"
206
+ }
207
+ ],
208
+ "usage": {
209
+ "prompt_tokens": 13,
210
+ "completion_tokens": 154,
211
+ "total_tokens": 167
212
+ },
213
+ "system_fingerprint": null
214
+ }
215
+ ```
216
+
217
+ :::tip
218
+ [Click here](https://smith.langchain.com/public/4af46ef6-b065-46dc-9cf0-70f1274edb01/r) to see an example LangSmith trace of the above.
219
+ :::
220
+
221
+ ## Next.js
222
+
223
+ You can use the `traceable` wrapper function in Next.js apps to wrap arbitrary functions much like in the example above.
224
+
225
+ One neat trick you can use for Next.js and other similar server frameworks is to wrap the entire exported handler for a route
226
+ to group traces for the any sub-runs. Here's an example:
227
+
228
+ ```ts
229
+ import { NextRequest, NextResponse } from "next/server";
230
+
231
+ import { OpenAI } from "openai";
232
+ import { traceable } from "langsmith/traceable";
233
+
234
+ export const runtime = "edge";
235
+
236
+ const handler = traceable(
237
+ async function () {
238
+ const openai = new OpenAI();
239
+ const createCompletion = traceable(
240
+ openai.chat.completions.create.bind(openai.chat.completions),
241
+ { name: "OpenAI Chat Completion", run_type: "llm" }
242
+ );
243
+
244
+ const completion = await createCompletion({
245
+ model: "gpt-3.5-turbo",
246
+ messages: [{ content: "Why is the sky blue?", role: "user" }],
247
+ });
248
+
249
+ const response1 = completion.choices[0].message.content;
250
+
251
+ const completion2 = await createCompletion({
252
+ model: "gpt-3.5-turbo",
253
+ messages: [
254
+ { content: "Why is the sky blue?", role: "user" },
255
+ { content: response1, role: "assistant" },
256
+ { content: "Cool thank you!", role: "user" },
257
+ ],
258
+ });
259
+
260
+ const response2 = completion2.choices[0].message.content;
261
+
262
+ return {
263
+ text: response2,
264
+ };
265
+ },
266
+ {
267
+ name: "Simple Next.js handler",
268
+ }
269
+ );
270
+
271
+ export async function POST(req: NextRequest) {
272
+ const result = await handler();
273
+ return NextResponse.json(result);
274
+ }
275
+ ```
276
+
277
+ The two OpenAI calls within the handler will be traced with appropriate inputs, outputs,
278
+ and token usage information.
279
+
280
+ :::tip
281
+ [Click here](https://smith.langchain.com/public/faaf26ad-8c59-4622-bcfe-b7d896733ca6/r) to see an example LangSmith trace of the above.
282
+ :::
283
+
284
+ ## Vercel AI SDK
285
+
286
+ The [Vercel AI SDK](https://sdk.vercel.ai/docs) contains integrations with a variety of model providers.
287
+ Here's an example of how you can trace outputs in a Next.js handler:
288
+
289
+ ```ts
290
+ import { traceable } from 'langsmith/traceable';
291
+ import { OpenAIStream, StreamingTextResponse } from 'ai';
292
+
293
+ // Note: There are no types for the Mistral API client yet.
294
+ import MistralClient from '@mistralai/mistralai';
295
+
296
+ const client = new MistralClient(process.env.MISTRAL_API_KEY || '');
297
+
298
+ export async function POST(req: Request) {
299
+ // Extract the `messages` from the body of the request
300
+ const { messages } = await req.json();
301
+
302
+ const mistralChatStream = traceable(
303
+ client.chatStream.bind(client),
304
+ {
305
+ name: "Mistral Stream",
306
+ run_type: "llm",
307
+ }
308
+ );
309
+
310
+ const response = await mistralChatStream({
311
+ model: 'mistral-tiny',
312
+ maxTokens: 1000,
313
+ messages,
314
+ });
315
+
316
+ // Convert the response into a friendly text-stream. The Mistral client responses are
317
+ // compatible with the Vercel AI SDK OpenAIStream adapter.
318
+ const stream = OpenAIStream(response as any);
319
+
320
+ // Respond with the stream
321
+ return new StreamingTextResponse(stream);
322
+ }
323
+ ```
324
+
325
+ See the [AI SDK docs](https://sdk.vercel.ai/docs) for more examples.
326
+
327
+
328
+ #### Alternatives: **Log traces using a RunTree.**
89
329
 
90
330
  A RunTree tracks your application. Each RunTree object is required to have a name and run_type. These and other important attributes are as follows:
91
331
 
@@ -189,7 +429,9 @@ await parentRun.end({
189
429
  await parentRun.patchRun();
190
430
  ```
191
431
 
192
- ### Create a Dataset from Existing Runs
432
+ ## Evaluation
433
+
434
+ #### Create a Dataset from Existing Runs
193
435
 
194
436
  Once your runs are stored in LangSmith, you can convert them into a dataset.
195
437
  For this example, we will do so using the Client, but you can also do this using
package/dist/client.cjs CHANGED
@@ -23,7 +23,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
23
23
  return result;
24
24
  };
25
25
  Object.defineProperty(exports, "__esModule", { value: true });
26
- exports.Client = void 0;
26
+ exports.Client = exports.Queue = void 0;
27
27
  const uuid = __importStar(require("uuid"));
28
28
  const async_caller_js_1 = require("./utils/async_caller.cjs");
29
29
  const messages_js_1 = require("./utils/messages.cjs");
@@ -98,6 +98,43 @@ function assertUuid(str) {
98
98
  throw new Error(`Invalid UUID: ${str}`);
99
99
  }
100
100
  }
101
+ class Queue {
102
+ constructor() {
103
+ Object.defineProperty(this, "items", {
104
+ enumerable: true,
105
+ configurable: true,
106
+ writable: true,
107
+ value: []
108
+ });
109
+ }
110
+ get size() {
111
+ return this.items.length;
112
+ }
113
+ push(item) {
114
+ // this.items.push is synchronous with promise creation:
115
+ // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/Promise
116
+ return new Promise((resolve) => {
117
+ this.items.push([item, resolve]);
118
+ });
119
+ }
120
+ pop(upToN) {
121
+ if (upToN < 1) {
122
+ throw new Error("Number of items to pop off may not be less than 1.");
123
+ }
124
+ const popped = [];
125
+ while (popped.length < upToN && this.items.length) {
126
+ const item = this.items.shift();
127
+ if (item) {
128
+ popped.push(item);
129
+ }
130
+ else {
131
+ break;
132
+ }
133
+ }
134
+ return [popped.map((it) => it[0]), () => popped.forEach((it) => it[1]())];
135
+ }
136
+ }
137
+ exports.Queue = Queue;
101
138
  class Client {
102
139
  constructor(config = {}) {
103
140
  Object.defineProperty(this, "apiKey", {
@@ -164,7 +201,7 @@ class Client {
164
201
  enumerable: true,
165
202
  configurable: true,
166
203
  writable: true,
167
- value: false
204
+ value: true
168
205
  });
169
206
  Object.defineProperty(this, "batchEndpointSupported", {
170
207
  enumerable: true,
@@ -172,11 +209,11 @@ class Client {
172
209
  writable: true,
173
210
  value: void 0
174
211
  });
175
- Object.defineProperty(this, "pendingAutoBatchedRuns", {
212
+ Object.defineProperty(this, "autoBatchQueue", {
176
213
  enumerable: true,
177
214
  configurable: true,
178
215
  writable: true,
179
- value: []
216
+ value: new Queue()
180
217
  });
181
218
  Object.defineProperty(this, "pendingAutoBatchedRunLimit", {
182
219
  enumerable: true,
@@ -384,45 +421,46 @@ class Client {
384
421
  return sampled;
385
422
  }
386
423
  }
387
- async triggerAutoBatchSend(runs) {
388
- let batch = runs;
389
- if (batch === undefined) {
390
- batch = this.pendingAutoBatchedRuns.slice(0, this.pendingAutoBatchedRunLimit);
391
- this.pendingAutoBatchedRuns = this.pendingAutoBatchedRuns.slice(this.pendingAutoBatchedRunLimit);
424
+ async drainAutoBatchQueue() {
425
+ while (this.autoBatchQueue.size >= 0) {
426
+ const [batch, done] = this.autoBatchQueue.pop(this.pendingAutoBatchedRunLimit);
427
+ if (!batch.length) {
428
+ done();
429
+ return;
430
+ }
431
+ try {
432
+ await this.batchIngestRuns({
433
+ runCreates: batch
434
+ .filter((item) => item.action === "create")
435
+ .map((item) => item.item),
436
+ runUpdates: batch
437
+ .filter((item) => item.action === "update")
438
+ .map((item) => item.item),
439
+ });
440
+ }
441
+ finally {
442
+ done();
443
+ }
392
444
  }
393
- await this.batchIngestRuns({
394
- runCreates: batch
395
- .filter((item) => item.action === "create")
396
- .map((item) => item.item),
397
- runUpdates: batch
398
- .filter((item) => item.action === "update")
399
- .map((item) => item.item),
400
- });
401
445
  }
402
- appendRunCreateToAutoBatchQueue(item) {
446
+ async processRunOperation(item, immediatelyTriggerBatch) {
403
447
  const oldTimeout = this.autoBatchTimeout;
404
448
  clearTimeout(this.autoBatchTimeout);
405
449
  this.autoBatchTimeout = undefined;
406
- this.pendingAutoBatchedRuns.push(item);
407
- while (this.pendingAutoBatchedRuns.length >= this.pendingAutoBatchedRunLimit) {
408
- const batch = this.pendingAutoBatchedRuns.slice(0, this.pendingAutoBatchedRunLimit);
409
- this.pendingAutoBatchedRuns = this.pendingAutoBatchedRuns.slice(this.pendingAutoBatchedRunLimit);
410
- void this.triggerAutoBatchSend(batch);
411
- }
412
- if (this.pendingAutoBatchedRuns.length > 0) {
413
- if (!oldTimeout) {
414
- this.autoBatchTimeout = setTimeout(() => {
415
- this.autoBatchTimeout = undefined;
416
- void this.triggerAutoBatchSend();
417
- }, this.autoBatchInitialDelayMs);
418
- }
419
- else {
420
- this.autoBatchTimeout = setTimeout(() => {
421
- this.autoBatchTimeout = undefined;
422
- void this.triggerAutoBatchSend();
423
- }, this.autoBatchAggregationDelayMs);
424
- }
450
+ const itemPromise = this.autoBatchQueue.push(item);
451
+ if (immediatelyTriggerBatch ||
452
+ this.autoBatchQueue.size > this.pendingAutoBatchedRunLimit) {
453
+ await this.drainAutoBatchQueue();
454
+ }
455
+ if (this.autoBatchQueue.size > 0) {
456
+ this.autoBatchTimeout = setTimeout(() => {
457
+ this.autoBatchTimeout = undefined;
458
+ void this.drainAutoBatchQueue();
459
+ }, oldTimeout
460
+ ? this.autoBatchAggregationDelayMs
461
+ : this.autoBatchInitialDelayMs);
425
462
  }
463
+ return itemPromise;
426
464
  }
427
465
  async batchEndpointIsSupported() {
428
466
  const response = await fetch(`${this.apiUrl}/info`, {
@@ -453,7 +491,7 @@ class Client {
453
491
  if (this.autoBatchTracing &&
454
492
  runCreate.trace_id !== undefined &&
455
493
  runCreate.dotted_order !== undefined) {
456
- this.appendRunCreateToAutoBatchQueue({
494
+ void this.processRunOperation({
457
495
  action: "create",
458
496
  item: runCreate,
459
497
  });
@@ -555,7 +593,15 @@ class Client {
555
593
  if (this.autoBatchTracing &&
556
594
  data.trace_id !== undefined &&
557
595
  data.dotted_order !== undefined) {
558
- this.appendRunCreateToAutoBatchQueue({ action: "update", item: data });
596
+ if (run.end_time !== undefined && data.parent_run_id === undefined) {
597
+ // Trigger a batch as soon as a root trace ends and block to ensure trace finishes
598
+ // in serverless environments.
599
+ await this.processRunOperation({ action: "update", item: data }, true);
600
+ return;
601
+ }
602
+ else {
603
+ void this.processRunOperation({ action: "update", item: data });
604
+ }
559
605
  return;
560
606
  }
561
607
  const headers = { ...this.headers, "Content-Type": "application/json" };
package/dist/client.d.ts CHANGED
@@ -66,6 +66,12 @@ export type CreateExampleOptions = {
66
66
  createdAt?: Date;
67
67
  exampleId?: string;
68
68
  };
69
+ export declare class Queue<T> {
70
+ items: [T, () => void][];
71
+ get size(): number;
72
+ push(item: T): Promise<void>;
73
+ pop(upToN: number): [T[], () => void];
74
+ }
69
75
  export declare class Client {
70
76
  private apiKey?;
71
77
  private apiUrl;
@@ -79,7 +85,7 @@ export declare class Client {
79
85
  private sampledPostUuids;
80
86
  private autoBatchTracing;
81
87
  private batchEndpointSupported?;
82
- private pendingAutoBatchedRuns;
88
+ private autoBatchQueue;
83
89
  private pendingAutoBatchedRunLimit;
84
90
  private autoBatchTimeout;
85
91
  private autoBatchInitialDelayMs;
@@ -103,8 +109,8 @@ export declare class Client {
103
109
  private _getPaginated;
104
110
  private _getCursorPaginatedList;
105
111
  private _filterForSampling;
106
- private triggerAutoBatchSend;
107
- private appendRunCreateToAutoBatchQueue;
112
+ private drainAutoBatchQueue;
113
+ private processRunOperation;
108
114
  protected batchEndpointIsSupported(): Promise<boolean>;
109
115
  createRun(run: CreateRunParams): Promise<void>;
110
116
  /**
package/dist/client.js CHANGED
@@ -72,6 +72,42 @@ function assertUuid(str) {
72
72
  throw new Error(`Invalid UUID: ${str}`);
73
73
  }
74
74
  }
75
+ export class Queue {
76
+ constructor() {
77
+ Object.defineProperty(this, "items", {
78
+ enumerable: true,
79
+ configurable: true,
80
+ writable: true,
81
+ value: []
82
+ });
83
+ }
84
+ get size() {
85
+ return this.items.length;
86
+ }
87
+ push(item) {
88
+ // this.items.push is synchronous with promise creation:
89
+ // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/Promise
90
+ return new Promise((resolve) => {
91
+ this.items.push([item, resolve]);
92
+ });
93
+ }
94
+ pop(upToN) {
95
+ if (upToN < 1) {
96
+ throw new Error("Number of items to pop off may not be less than 1.");
97
+ }
98
+ const popped = [];
99
+ while (popped.length < upToN && this.items.length) {
100
+ const item = this.items.shift();
101
+ if (item) {
102
+ popped.push(item);
103
+ }
104
+ else {
105
+ break;
106
+ }
107
+ }
108
+ return [popped.map((it) => it[0]), () => popped.forEach((it) => it[1]())];
109
+ }
110
+ }
75
111
  export class Client {
76
112
  constructor(config = {}) {
77
113
  Object.defineProperty(this, "apiKey", {
@@ -138,7 +174,7 @@ export class Client {
138
174
  enumerable: true,
139
175
  configurable: true,
140
176
  writable: true,
141
- value: false
177
+ value: true
142
178
  });
143
179
  Object.defineProperty(this, "batchEndpointSupported", {
144
180
  enumerable: true,
@@ -146,11 +182,11 @@ export class Client {
146
182
  writable: true,
147
183
  value: void 0
148
184
  });
149
- Object.defineProperty(this, "pendingAutoBatchedRuns", {
185
+ Object.defineProperty(this, "autoBatchQueue", {
150
186
  enumerable: true,
151
187
  configurable: true,
152
188
  writable: true,
153
- value: []
189
+ value: new Queue()
154
190
  });
155
191
  Object.defineProperty(this, "pendingAutoBatchedRunLimit", {
156
192
  enumerable: true,
@@ -358,45 +394,46 @@ export class Client {
358
394
  return sampled;
359
395
  }
360
396
  }
361
- async triggerAutoBatchSend(runs) {
362
- let batch = runs;
363
- if (batch === undefined) {
364
- batch = this.pendingAutoBatchedRuns.slice(0, this.pendingAutoBatchedRunLimit);
365
- this.pendingAutoBatchedRuns = this.pendingAutoBatchedRuns.slice(this.pendingAutoBatchedRunLimit);
397
+ async drainAutoBatchQueue() {
398
+ while (this.autoBatchQueue.size >= 0) {
399
+ const [batch, done] = this.autoBatchQueue.pop(this.pendingAutoBatchedRunLimit);
400
+ if (!batch.length) {
401
+ done();
402
+ return;
403
+ }
404
+ try {
405
+ await this.batchIngestRuns({
406
+ runCreates: batch
407
+ .filter((item) => item.action === "create")
408
+ .map((item) => item.item),
409
+ runUpdates: batch
410
+ .filter((item) => item.action === "update")
411
+ .map((item) => item.item),
412
+ });
413
+ }
414
+ finally {
415
+ done();
416
+ }
366
417
  }
367
- await this.batchIngestRuns({
368
- runCreates: batch
369
- .filter((item) => item.action === "create")
370
- .map((item) => item.item),
371
- runUpdates: batch
372
- .filter((item) => item.action === "update")
373
- .map((item) => item.item),
374
- });
375
418
  }
376
- appendRunCreateToAutoBatchQueue(item) {
419
+ async processRunOperation(item, immediatelyTriggerBatch) {
377
420
  const oldTimeout = this.autoBatchTimeout;
378
421
  clearTimeout(this.autoBatchTimeout);
379
422
  this.autoBatchTimeout = undefined;
380
- this.pendingAutoBatchedRuns.push(item);
381
- while (this.pendingAutoBatchedRuns.length >= this.pendingAutoBatchedRunLimit) {
382
- const batch = this.pendingAutoBatchedRuns.slice(0, this.pendingAutoBatchedRunLimit);
383
- this.pendingAutoBatchedRuns = this.pendingAutoBatchedRuns.slice(this.pendingAutoBatchedRunLimit);
384
- void this.triggerAutoBatchSend(batch);
385
- }
386
- if (this.pendingAutoBatchedRuns.length > 0) {
387
- if (!oldTimeout) {
388
- this.autoBatchTimeout = setTimeout(() => {
389
- this.autoBatchTimeout = undefined;
390
- void this.triggerAutoBatchSend();
391
- }, this.autoBatchInitialDelayMs);
392
- }
393
- else {
394
- this.autoBatchTimeout = setTimeout(() => {
395
- this.autoBatchTimeout = undefined;
396
- void this.triggerAutoBatchSend();
397
- }, this.autoBatchAggregationDelayMs);
398
- }
423
+ const itemPromise = this.autoBatchQueue.push(item);
424
+ if (immediatelyTriggerBatch ||
425
+ this.autoBatchQueue.size > this.pendingAutoBatchedRunLimit) {
426
+ await this.drainAutoBatchQueue();
427
+ }
428
+ if (this.autoBatchQueue.size > 0) {
429
+ this.autoBatchTimeout = setTimeout(() => {
430
+ this.autoBatchTimeout = undefined;
431
+ void this.drainAutoBatchQueue();
432
+ }, oldTimeout
433
+ ? this.autoBatchAggregationDelayMs
434
+ : this.autoBatchInitialDelayMs);
399
435
  }
436
+ return itemPromise;
400
437
  }
401
438
  async batchEndpointIsSupported() {
402
439
  const response = await fetch(`${this.apiUrl}/info`, {
@@ -427,7 +464,7 @@ export class Client {
427
464
  if (this.autoBatchTracing &&
428
465
  runCreate.trace_id !== undefined &&
429
466
  runCreate.dotted_order !== undefined) {
430
- this.appendRunCreateToAutoBatchQueue({
467
+ void this.processRunOperation({
431
468
  action: "create",
432
469
  item: runCreate,
433
470
  });
@@ -529,7 +566,15 @@ export class Client {
529
566
  if (this.autoBatchTracing &&
530
567
  data.trace_id !== undefined &&
531
568
  data.dotted_order !== undefined) {
532
- this.appendRunCreateToAutoBatchQueue({ action: "update", item: data });
569
+ if (run.end_time !== undefined && data.parent_run_id === undefined) {
570
+ // Trigger a batch as soon as a root trace ends and block to ensure trace finishes
571
+ // in serverless environments.
572
+ await this.processRunOperation({ action: "update", item: data }, true);
573
+ return;
574
+ }
575
+ else {
576
+ void this.processRunOperation({ action: "update", item: data });
577
+ }
533
578
  return;
534
579
  }
535
580
  const headers = { ...this.headers, "Content-Type": "application/json" };
package/dist/index.cjs CHANGED
@@ -6,4 +6,4 @@ Object.defineProperty(exports, "Client", { enumerable: true, get: function () {
6
6
  var run_trees_js_1 = require("./run_trees.cjs");
7
7
  Object.defineProperty(exports, "RunTree", { enumerable: true, get: function () { return run_trees_js_1.RunTree; } });
8
8
  // Update using yarn bump-version
9
- exports.__version__ = "0.0.71-rc.5";
9
+ exports.__version__ = "0.1.0";
package/dist/index.d.ts CHANGED
@@ -1,4 +1,4 @@
1
1
  export { Client } from "./client.js";
2
2
  export type { Dataset, Example, TracerSession, Run, Feedback, } from "./schemas.js";
3
3
  export { RunTree, type RunTreeConfig } from "./run_trees.js";
4
- export declare const __version__ = "0.0.71-rc.5";
4
+ export declare const __version__ = "0.1.0";
package/dist/index.js CHANGED
@@ -1,4 +1,4 @@
1
1
  export { Client } from "./client.js";
2
2
  export { RunTree } from "./run_trees.js";
3
3
  // Update using yarn bump-version
4
- export const __version__ = "0.0.71-rc.5";
4
+ export const __version__ = "0.1.0";
@@ -1,6 +1,34 @@
1
1
  import { RunTree, RunTreeConfig } from "./run_trees.js";
2
2
  export type RunTreeLike = RunTree;
3
- export type TraceableFunction<Inputs extends any[], Output> = (...rawInputs: Inputs | [RunTreeLike, ...Inputs]) => Promise<Output>;
3
+ type WrapArgReturnPair<Pair> = Pair extends [
4
+ infer Args extends any[],
5
+ infer Return
6
+ ] ? {
7
+ (...args: Args): Promise<Return>;
8
+ (...args: [runTree: RunTreeLike, ...rest: Args]): Promise<Return>;
9
+ } : never;
10
+ type UnionToIntersection<U> = (U extends any ? (x: U) => void : never) extends (x: infer I) => void ? I : never;
11
+ export type TraceableFunction<Func extends (...args: any[]) => any> = Func extends {
12
+ (...args: infer A1): infer R1;
13
+ (...args: infer A2): infer R2;
14
+ (...args: infer A3): infer R3;
15
+ (...args: infer A4): infer R4;
16
+ (...args: infer A5): infer R5;
17
+ } ? UnionToIntersection<WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4] | [A5, R5]>> : Func extends {
18
+ (...args: infer A1): infer R1;
19
+ (...args: infer A2): infer R2;
20
+ (...args: infer A3): infer R3;
21
+ (...args: infer A4): infer R4;
22
+ } ? UnionToIntersection<WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3] | [A4, R4]>> : Func extends {
23
+ (...args: infer A1): infer R1;
24
+ (...args: infer A2): infer R2;
25
+ (...args: infer A3): infer R3;
26
+ } ? UnionToIntersection<WrapArgReturnPair<[A1, R1] | [A2, R2] | [A3, R3]>> : Func extends {
27
+ (...args: infer A1): infer R1;
28
+ (...args: infer A2): infer R2;
29
+ } ? UnionToIntersection<WrapArgReturnPair<[A1, R1] | [A2, R2]>> : Func extends {
30
+ (...args: infer A1): infer R1;
31
+ } ? UnionToIntersection<WrapArgReturnPair<[A1, R1]>> : never;
4
32
  /**
5
33
  * Higher-order function that takes function as input and returns a
6
34
  * "TraceableFunction" - a wrapped version of the input that
@@ -15,5 +43,6 @@ export type TraceableFunction<Inputs extends any[], Output> = (...rawInputs: Inp
15
43
  * @param config Additional metadata such as name, tags or providing
16
44
  * a custom LangSmith client instance
17
45
  */
18
- export declare function traceable<Inputs extends any[], Output>(wrappedFunc: (...args: Inputs) => Output, config?: RunTreeConfig): TraceableFunction<Inputs, Output>;
19
- export declare function isTraceableFunction(x: unknown): x is TraceableFunction<any, any>;
46
+ export declare function traceable<Func extends (...args: any[]) => any>(wrappedFunc: Func, config?: Partial<RunTreeConfig>): TraceableFunction<Func>;
47
+ export declare function isTraceableFunction(x: unknown): x is TraceableFunction<any>;
48
+ export {};
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langsmith",
3
- "version": "0.0.71-rc.5",
3
+ "version": "0.1.0",
4
4
  "description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.",
5
5
  "packageManager": "yarn@1.22.19",
6
6
  "files": [