langsmith 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +31 -68
- package/dist/index.cjs +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/wrappers.cjs +12 -0
- package/dist/wrappers.d.ts +5 -0
- package/dist/wrappers.js +8 -0
- package/package.json +10 -1
- package/wrappers.cjs +1 -0
- package/wrappers.d.ts +1 -0
- package/wrappers.js +1 -0
package/README.md
CHANGED
|
@@ -90,6 +90,7 @@ Langsmith's `traceable` wrapper function makes it easy to trace any function or
|
|
|
90
90
|
### OpenAI SDK
|
|
91
91
|
|
|
92
92
|
<!-- markdown-link-check-disable -->
|
|
93
|
+
|
|
93
94
|
The easiest ways to trace calls from the [OpenAI SDK](https://platform.openai.com/docs/api-reference) with LangSmith
|
|
94
95
|
is using the `traceable` wrapper function available in LangSmith 0.1.0 and up.
|
|
95
96
|
|
|
@@ -105,72 +106,41 @@ Next, you will need to install the LangSmith SDK and the OpenAI SDK:
|
|
|
105
106
|
npm install langsmith openai
|
|
106
107
|
```
|
|
107
108
|
|
|
108
|
-
After that, initialize your OpenAI client:
|
|
109
|
+
After that, initialize your OpenAI client and wrap the client with `wrapOpenAI` method to enable tracing for Completion and Chat completion API:
|
|
109
110
|
|
|
110
111
|
```ts
|
|
111
112
|
import { OpenAI } from "openai";
|
|
113
|
+
import { wrapOpenAI } from "langsmith/wrappers";
|
|
114
|
+
|
|
115
|
+
const openai = wrapOpenAI(new OpenAI());
|
|
112
116
|
|
|
113
|
-
|
|
117
|
+
await openai.chat.completions.create({
|
|
118
|
+
model: "gpt-3.5-turbo",
|
|
119
|
+
messages: [{ content: "Hi there!", role: "user" }],
|
|
120
|
+
});
|
|
114
121
|
```
|
|
115
122
|
|
|
116
|
-
|
|
123
|
+
Alternatively, you can use the `traceable` function to wrap the client methods you want to use:
|
|
117
124
|
|
|
118
125
|
```ts
|
|
119
126
|
import { traceable } from "langsmith/traceable";
|
|
120
127
|
|
|
128
|
+
const openai = new OpenAI();
|
|
129
|
+
|
|
121
130
|
const createCompletion = traceable(
|
|
122
131
|
openai.chat.completions.create.bind(openai.chat.completions),
|
|
123
132
|
{ name: "OpenAI Chat Completion", run_type: "llm" }
|
|
124
133
|
);
|
|
125
|
-
```
|
|
126
|
-
|
|
127
|
-
Note the use of `.bind` to preserve the function's context. The `run_type` field in the extra config object
|
|
128
|
-
marks the function as an LLM call, and enables token usage tracking for OpenAI.
|
|
129
|
-
|
|
130
|
-
This new method takes the same exact arguments and has the same return type as the original method,
|
|
131
|
-
but will log everything to LangSmith!
|
|
132
134
|
|
|
133
|
-
```ts
|
|
134
135
|
await createCompletion({
|
|
135
136
|
model: "gpt-3.5-turbo",
|
|
136
137
|
messages: [{ content: "Hi there!", role: "user" }],
|
|
137
138
|
});
|
|
138
139
|
```
|
|
139
140
|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
object: 'chat.completion',
|
|
144
|
-
created: 1707974654,
|
|
145
|
-
model: 'gpt-3.5-turbo-0613',
|
|
146
|
-
choices: [
|
|
147
|
-
{
|
|
148
|
-
index: 0,
|
|
149
|
-
message: { role: 'assistant', content: 'Hello! How can I help you today?' },
|
|
150
|
-
logprobs: null,
|
|
151
|
-
finish_reason: 'stop'
|
|
152
|
-
}
|
|
153
|
-
],
|
|
154
|
-
usage: { prompt_tokens: 10, completion_tokens: 9, total_tokens: 19 },
|
|
155
|
-
system_fingerprint: null
|
|
156
|
-
}
|
|
157
|
-
```
|
|
158
|
-
|
|
159
|
-
This also works for streaming:
|
|
160
|
-
|
|
161
|
-
```ts
|
|
162
|
-
const stream = await createCompletion({
|
|
163
|
-
model: "gpt-3.5-turbo",
|
|
164
|
-
stream: true,
|
|
165
|
-
messages: [{ content: "Hi there!", role: "user" }],
|
|
166
|
-
});
|
|
167
|
-
```
|
|
168
|
-
|
|
169
|
-
```ts
|
|
170
|
-
for await (const chunk of stream) {
|
|
171
|
-
console.log(chunk);
|
|
172
|
-
}
|
|
173
|
-
```
|
|
141
|
+
Note the use of `.bind` to preserve the function's context. The `run_type` field in the
|
|
142
|
+
extra config object marks the function as an LLM call, and enables token usage tracking
|
|
143
|
+
for OpenAI.
|
|
174
144
|
|
|
175
145
|
Oftentimes, you use the OpenAI client inside of other functions or as part of a longer
|
|
176
146
|
sequence. You can automatically get nested traces by using this wrapped method
|
|
@@ -178,7 +148,7 @@ within other functions wrapped with `traceable`.
|
|
|
178
148
|
|
|
179
149
|
```ts
|
|
180
150
|
const nestedTrace = traceable(async (text: string) => {
|
|
181
|
-
const completion = await
|
|
151
|
+
const completion = await openai.chat.completions.create({
|
|
182
152
|
model: "gpt-3.5-turbo",
|
|
183
153
|
messages: [{ content: text, role: "user" }],
|
|
184
154
|
});
|
|
@@ -230,25 +200,22 @@ import { NextRequest, NextResponse } from "next/server";
|
|
|
230
200
|
|
|
231
201
|
import { OpenAI } from "openai";
|
|
232
202
|
import { traceable } from "langsmith/traceable";
|
|
203
|
+
import { wrapOpenAI } from "langsmith/wrappers";
|
|
233
204
|
|
|
234
205
|
export const runtime = "edge";
|
|
235
206
|
|
|
236
207
|
const handler = traceable(
|
|
237
208
|
async function () {
|
|
238
|
-
const openai = new OpenAI();
|
|
239
|
-
const createCompletion = traceable(
|
|
240
|
-
openai.chat.completions.create.bind(openai.chat.completions),
|
|
241
|
-
{ name: "OpenAI Chat Completion", run_type: "llm" }
|
|
242
|
-
);
|
|
209
|
+
const openai = wrapOpenAI(new OpenAI());
|
|
243
210
|
|
|
244
|
-
const completion = await
|
|
211
|
+
const completion = await openai.chat.completions.create({
|
|
245
212
|
model: "gpt-3.5-turbo",
|
|
246
213
|
messages: [{ content: "Why is the sky blue?", role: "user" }],
|
|
247
214
|
});
|
|
248
215
|
|
|
249
216
|
const response1 = completion.choices[0].message.content;
|
|
250
217
|
|
|
251
|
-
const completion2 = await
|
|
218
|
+
const completion2 = await openai.chat.completions.create({
|
|
252
219
|
model: "gpt-3.5-turbo",
|
|
253
220
|
messages: [
|
|
254
221
|
{ content: "Why is the sky blue?", role: "user" },
|
|
@@ -287,28 +254,25 @@ The [Vercel AI SDK](https://sdk.vercel.ai/docs) contains integrations with a var
|
|
|
287
254
|
Here's an example of how you can trace outputs in a Next.js handler:
|
|
288
255
|
|
|
289
256
|
```ts
|
|
290
|
-
import { traceable } from
|
|
291
|
-
import { OpenAIStream, StreamingTextResponse } from
|
|
257
|
+
import { traceable } from "langsmith/traceable";
|
|
258
|
+
import { OpenAIStream, StreamingTextResponse } from "ai";
|
|
292
259
|
|
|
293
260
|
// Note: There are no types for the Mistral API client yet.
|
|
294
|
-
import MistralClient from
|
|
261
|
+
import MistralClient from "@mistralai/mistralai";
|
|
295
262
|
|
|
296
|
-
const client = new MistralClient(process.env.MISTRAL_API_KEY ||
|
|
263
|
+
const client = new MistralClient(process.env.MISTRAL_API_KEY || "");
|
|
297
264
|
|
|
298
265
|
export async function POST(req: Request) {
|
|
299
266
|
// Extract the `messages` from the body of the request
|
|
300
267
|
const { messages } = await req.json();
|
|
301
268
|
|
|
302
|
-
const mistralChatStream = traceable(
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
run_type: "llm",
|
|
307
|
-
}
|
|
308
|
-
);
|
|
269
|
+
const mistralChatStream = traceable(client.chatStream.bind(client), {
|
|
270
|
+
name: "Mistral Stream",
|
|
271
|
+
run_type: "llm",
|
|
272
|
+
});
|
|
309
273
|
|
|
310
274
|
const response = await mistralChatStream({
|
|
311
|
-
model:
|
|
275
|
+
model: "mistral-tiny",
|
|
312
276
|
maxTokens: 1000,
|
|
313
277
|
messages,
|
|
314
278
|
});
|
|
@@ -324,7 +288,6 @@ export async function POST(req: Request) {
|
|
|
324
288
|
|
|
325
289
|
See the [AI SDK docs](https://sdk.vercel.ai/docs) for more examples.
|
|
326
290
|
|
|
327
|
-
|
|
328
291
|
#### Alternatives: **Log traces using a RunTree.**
|
|
329
292
|
|
|
330
293
|
A RunTree tracks your application. Each RunTree object is required to have a name and run_type. These and other important attributes are as follows:
|
|
@@ -413,7 +376,7 @@ try {
|
|
|
413
376
|
await childChainRun.end({
|
|
414
377
|
error: `I errored again ${e.message}`,
|
|
415
378
|
});
|
|
416
|
-
await childChainRun.patchRun();
|
|
379
|
+
await childChainRun.patchRun();
|
|
417
380
|
throw e;
|
|
418
381
|
}
|
|
419
382
|
|
|
@@ -431,7 +394,7 @@ await parentRun.patchRun();
|
|
|
431
394
|
|
|
432
395
|
## Evaluation
|
|
433
396
|
|
|
434
|
-
####
|
|
397
|
+
#### Create a Dataset from Existing Runs
|
|
435
398
|
|
|
436
399
|
Once your runs are stored in LangSmith, you can convert them into a dataset.
|
|
437
400
|
For this example, we will do so using the Client, but you can also do this using
|
package/dist/index.cjs
CHANGED
|
@@ -6,4 +6,4 @@ Object.defineProperty(exports, "Client", { enumerable: true, get: function () {
|
|
|
6
6
|
var run_trees_js_1 = require("./run_trees.cjs");
|
|
7
7
|
Object.defineProperty(exports, "RunTree", { enumerable: true, get: function () { return run_trees_js_1.RunTree; } });
|
|
8
8
|
// Update using yarn bump-version
|
|
9
|
-
exports.__version__ = "0.1.
|
|
9
|
+
exports.__version__ = "0.1.3";
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
1
|
export { Client } from "./client.js";
|
|
2
2
|
export type { Dataset, Example, TracerSession, Run, Feedback, } from "./schemas.js";
|
|
3
3
|
export { RunTree, type RunTreeConfig } from "./run_trees.js";
|
|
4
|
-
export declare const __version__ = "0.1.
|
|
4
|
+
export declare const __version__ = "0.1.3";
|
package/dist/index.js
CHANGED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.wrapOpenAI = void 0;
|
|
4
|
+
const traceable_js_1 = require("./traceable.cjs");
|
|
5
|
+
const wrapOpenAI = (openai, options) => {
|
|
6
|
+
// @ts-expect-error Promise<APIPromise<...>> != APIPromise<...>
|
|
7
|
+
openai.chat.completions.create = (0, traceable_js_1.traceable)(openai.chat.completions.create.bind(openai.chat.completions), Object.assign({ name: "ChatOpenAI", run_type: "llm" }, options?.client));
|
|
8
|
+
// @ts-expect-error Promise<APIPromise<...>> != APIPromise<...>
|
|
9
|
+
openai.completions.create = (0, traceable_js_1.traceable)(openai.completions.create.bind(openai.completions), Object.assign({ name: "OpenAI", run_type: "llm" }, options?.client));
|
|
10
|
+
return openai;
|
|
11
|
+
};
|
|
12
|
+
exports.wrapOpenAI = wrapOpenAI;
|
package/dist/wrappers.js
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import { traceable } from "./traceable.js";
|
|
2
|
+
export const wrapOpenAI = (openai, options) => {
|
|
3
|
+
// @ts-expect-error Promise<APIPromise<...>> != APIPromise<...>
|
|
4
|
+
openai.chat.completions.create = traceable(openai.chat.completions.create.bind(openai.chat.completions), Object.assign({ name: "ChatOpenAI", run_type: "llm" }, options?.client));
|
|
5
|
+
// @ts-expect-error Promise<APIPromise<...>> != APIPromise<...>
|
|
6
|
+
openai.completions.create = traceable(openai.completions.create.bind(openai.completions), Object.assign({ name: "OpenAI", run_type: "llm" }, options?.client));
|
|
7
|
+
return openai;
|
|
8
|
+
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langsmith",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.3",
|
|
4
4
|
"description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.",
|
|
5
5
|
"packageManager": "yarn@1.22.19",
|
|
6
6
|
"files": [
|
|
@@ -20,6 +20,9 @@
|
|
|
20
20
|
"schemas.cjs",
|
|
21
21
|
"schemas.js",
|
|
22
22
|
"schemas.d.ts",
|
|
23
|
+
"wrappers.cjs",
|
|
24
|
+
"wrappers.js",
|
|
25
|
+
"wrappers.d.ts",
|
|
23
26
|
"index.cjs",
|
|
24
27
|
"index.js",
|
|
25
28
|
"index.d.ts"
|
|
@@ -80,6 +83,7 @@
|
|
|
80
83
|
"eslint-plugin-no-instanceof": "^1.0.1",
|
|
81
84
|
"eslint-plugin-prettier": "^4.2.1",
|
|
82
85
|
"jest": "^29.5.0",
|
|
86
|
+
"openai": "^4.28.0",
|
|
83
87
|
"prettier": "^2.8.8",
|
|
84
88
|
"ts-jest": "^29.1.0",
|
|
85
89
|
"ts-node": "^10.9.1",
|
|
@@ -129,6 +133,11 @@
|
|
|
129
133
|
"import": "./schemas.js",
|
|
130
134
|
"require": "./schemas.cjs"
|
|
131
135
|
},
|
|
136
|
+
"./wrappers": {
|
|
137
|
+
"types": "./wrappers.d.ts",
|
|
138
|
+
"import": "./wrappers.js",
|
|
139
|
+
"require": "./wrappers.cjs"
|
|
140
|
+
},
|
|
132
141
|
"./package.json": "./package.json"
|
|
133
142
|
}
|
|
134
143
|
}
|
package/wrappers.cjs
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('./dist/wrappers.cjs');
|
package/wrappers.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from './dist/wrappers.js'
|
package/wrappers.js
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from './dist/wrappers.js'
|