@rollyjoely/mlflow-langchain 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +42 -0
- package/dist/index.d.ts +23 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +313 -0
- package/package.json +52 -0
package/README.md
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# MLflow LangChain Integration
|
|
2
|
+
|
|
3
|
+
Auto-instrumentation for [LangChain](https://js.langchain.com/) chat models with MLflow Tracing.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install mlflow-langchain mlflow-tracing @langchain/core
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import * as mlflow from 'mlflow-tracing';
|
|
15
|
+
import { tracedModel } from 'mlflow-langchain';
|
|
16
|
+
import { ChatAnthropic } from '@langchain/anthropic';
|
|
17
|
+
|
|
18
|
+
mlflow.init({
|
|
19
|
+
trackingUri: 'http://localhost:5000',
|
|
20
|
+
experimentId: '<experiment-id>',
|
|
21
|
+
});
|
|
22
|
+
|
|
23
|
+
const model = tracedModel(new ChatAnthropic({ model: 'claude-sonnet-4-5-20250514' }));
|
|
24
|
+
|
|
25
|
+
// Both invoke() and stream() are automatically traced
|
|
26
|
+
const result = await model.invoke([{ role: 'user', content: 'Hello!' }]);
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Supported Models
|
|
30
|
+
|
|
31
|
+
- `ChatAnthropic` (`@langchain/anthropic`)
|
|
32
|
+
- `ChatOpenAI` (`@langchain/openai`)
|
|
33
|
+
- `ChatXAI` (`@langchain/xai`)
|
|
34
|
+
- Any `BaseChatModel` subclass with `invoke()` / `stream()` methods
|
|
35
|
+
|
|
36
|
+
## Features
|
|
37
|
+
|
|
38
|
+
- Traces `invoke()` and `stream()` calls as LLM spans
|
|
39
|
+
- Captures input messages and output content
|
|
40
|
+
- Extracts token usage from `usage_metadata`
|
|
41
|
+
- Auto-detects message format from model class name
|
|
42
|
+
- Preserves tracing through `bindTools()` calls
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MLflow Tracing wrapper for LangChain BaseChatModel.
|
|
3
|
+
*
|
|
4
|
+
* Wraps LangChain chat models to produce well-formatted MLflow spans
|
|
5
|
+
* for invoke() and stream() calls. Works with all LangChain providers:
|
|
6
|
+
* ChatAnthropic, ChatOpenAI, ChatXAI, and any BaseChatModel subclass.
|
|
7
|
+
*/
|
|
8
|
+
/**
|
|
9
|
+
* Create a traced version of a LangChain BaseChatModel with MLflow tracing.
|
|
10
|
+
*
|
|
11
|
+
* Wraps `invoke()` and `stream()` to produce LLM spans with:
|
|
12
|
+
* - Input messages
|
|
13
|
+
* - Output content
|
|
14
|
+
* - Token usage from usage_metadata
|
|
15
|
+
* - Message format (auto-detected from model class name)
|
|
16
|
+
*
|
|
17
|
+
* Also wraps `bindTools()` so that models with bound tools remain traced.
|
|
18
|
+
*
|
|
19
|
+
* @param model - The LangChain BaseChatModel instance to trace
|
|
20
|
+
* @returns Traced model with the same interface
|
|
21
|
+
*/
|
|
22
|
+
export declare function tracedModel<T = any>(model: T): T;
|
|
23
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAsCH;;;;;;;;;;;;;GAaG;AACH,wBAAgB,WAAW,CAAC,CAAC,GAAG,GAAG,EAAE,KAAK,EAAE,CAAC,GAAG,CAAC,CAoChD"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* MLflow Tracing wrapper for LangChain BaseChatModel.
|
|
4
|
+
*
|
|
5
|
+
* Wraps LangChain chat models to produce well-formatted MLflow spans
|
|
6
|
+
* for invoke() and stream() calls. Works with all LangChain providers:
|
|
7
|
+
* ChatAnthropic, ChatOpenAI, ChatXAI, and any BaseChatModel subclass.
|
|
8
|
+
*/
|
|
9
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
10
|
+
exports.tracedModel = tracedModel;
|
|
11
|
+
const mlflow_tracing_1 = require("@rollyjoely/mlflow-tracing");
|
|
12
|
+
const TRACED_METHODS = ['invoke', 'stream'];
|
|
13
|
+
/**
|
|
14
|
+
* Known LangChain model class names mapped to MLflow message format strings.
|
|
15
|
+
* Used to set the MESSAGE_FORMAT span attribute for proper UI rendering.
|
|
16
|
+
*/
|
|
17
|
+
const MODEL_CLASS_TO_FORMAT = {
|
|
18
|
+
ChatAnthropic: 'langchain-anthropic',
|
|
19
|
+
ChatOpenAI: 'langchain-openai',
|
|
20
|
+
ChatXAI: 'langchain-openai',
|
|
21
|
+
ChatGoogleGenerativeAI: 'langchain-gemini',
|
|
22
|
+
};
|
|
23
|
+
/**
|
|
24
|
+
* Detect the message format from the model's class name.
|
|
25
|
+
* Falls back to 'langchain' for unknown model types.
|
|
26
|
+
*/
|
|
27
|
+
function detectMessageFormat(model) {
|
|
28
|
+
const className = model?.constructor?.name;
|
|
29
|
+
if (className && className in MODEL_CLASS_TO_FORMAT) {
|
|
30
|
+
return MODEL_CLASS_TO_FORMAT[className];
|
|
31
|
+
}
|
|
32
|
+
return 'langchain';
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Create a traced version of a LangChain BaseChatModel with MLflow tracing.
|
|
36
|
+
*
|
|
37
|
+
* Wraps `invoke()` and `stream()` to produce LLM spans with:
|
|
38
|
+
* - Input messages
|
|
39
|
+
* - Output content
|
|
40
|
+
* - Token usage from usage_metadata
|
|
41
|
+
* - Message format (auto-detected from model class name)
|
|
42
|
+
*
|
|
43
|
+
* Also wraps `bindTools()` so that models with bound tools remain traced.
|
|
44
|
+
*
|
|
45
|
+
* @param model - The LangChain BaseChatModel instance to trace
|
|
46
|
+
* @returns Traced model with the same interface
|
|
47
|
+
*/
|
|
48
|
+
function tracedModel(model) {
|
|
49
|
+
if (!model || typeof model !== 'object') {
|
|
50
|
+
return model;
|
|
51
|
+
}
|
|
52
|
+
const messageFormat = detectMessageFormat(model);
|
|
53
|
+
return new Proxy(model, {
|
|
54
|
+
get(target, prop, receiver) {
|
|
55
|
+
const original = Reflect.get(target, prop, receiver);
|
|
56
|
+
if (typeof original === 'function') {
|
|
57
|
+
if (prop === 'invoke') {
|
|
58
|
+
// eslint-disable-next-line @typescript-eslint/ban-types
|
|
59
|
+
return wrapInvoke(original, target, messageFormat);
|
|
60
|
+
}
|
|
61
|
+
if (prop === 'stream') {
|
|
62
|
+
// eslint-disable-next-line @typescript-eslint/ban-types
|
|
63
|
+
return wrapStream(original, target, messageFormat);
|
|
64
|
+
}
|
|
65
|
+
if (prop === 'bindTools') {
|
|
66
|
+
// Ensure bindTools returns a traced model
|
|
67
|
+
return function (...args) {
|
|
68
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
69
|
+
const bound = original.apply(target, args);
|
|
70
|
+
return tracedModel(bound);
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-call
|
|
74
|
+
return original.bind(target);
|
|
75
|
+
}
|
|
76
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
77
|
+
return original;
|
|
78
|
+
},
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
/**
|
|
82
|
+
* Wrap the invoke() method with MLflow tracing.
|
|
83
|
+
* Creates an LLM span that captures inputs, outputs, and token usage.
|
|
84
|
+
*/
|
|
85
|
+
// eslint-disable-next-line @typescript-eslint/ban-types
|
|
86
|
+
function wrapInvoke(fn, target, messageFormat) {
|
|
87
|
+
return function (...args) {
|
|
88
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
89
|
+
return (0, mlflow_tracing_1.withSpan)(async (span) => {
|
|
90
|
+
span.setInputs(serializeInput(args[0]));
|
|
91
|
+
const result = await fn.apply(target, args);
|
|
92
|
+
span.setOutputs(serializeOutput(result));
|
|
93
|
+
try {
|
|
94
|
+
const usage = extractTokenUsage(result);
|
|
95
|
+
if (usage) {
|
|
96
|
+
span.setAttribute(mlflow_tracing_1.SpanAttributeKey.TOKEN_USAGE, usage);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
catch (error) {
|
|
100
|
+
console.debug('Error extracting token usage from LangChain response', error);
|
|
101
|
+
}
|
|
102
|
+
span.setAttribute(mlflow_tracing_1.SpanAttributeKey.MESSAGE_FORMAT, messageFormat);
|
|
103
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
104
|
+
return result;
|
|
105
|
+
}, { name: 'ChatModel', spanType: mlflow_tracing_1.SpanType.LLM });
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
/**
|
|
109
|
+
* Wrap the stream() method with MLflow tracing.
|
|
110
|
+
* Creates an LLM span that wraps the async iterator, collecting chunks
|
|
111
|
+
* and recording the aggregated result when iteration completes.
|
|
112
|
+
*/
|
|
113
|
+
// eslint-disable-next-line @typescript-eslint/ban-types
|
|
114
|
+
function wrapStream(fn, target, messageFormat) {
|
|
115
|
+
return function (...args) {
|
|
116
|
+
const inputs = serializeInput(args[0]);
|
|
117
|
+
// stream() returns an IterableReadableStream (async iterable).
|
|
118
|
+
// We need to wrap the async iterator to trace the full lifecycle.
|
|
119
|
+
const streamPromise = fn.apply(target, args);
|
|
120
|
+
// LangChain's stream() can be sync or async depending on version.
|
|
121
|
+
// Handle both cases by always wrapping in a promise.
|
|
122
|
+
if (streamPromise && typeof streamPromise.then === 'function') {
|
|
123
|
+
// Async: stream() returns a Promise<IterableReadableStream>
|
|
124
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
125
|
+
return streamPromise.then((stream) => {
|
|
126
|
+
return wrapAsyncIterable(stream, inputs, messageFormat);
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
// Sync: stream() returns an IterableReadableStream directly
|
|
130
|
+
return wrapAsyncIterable(streamPromise, inputs, messageFormat);
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* Wrap an async iterable (stream result) with MLflow tracing.
|
|
135
|
+
* Returns a proxy that intercepts Symbol.asyncIterator to add span tracking.
|
|
136
|
+
*/
|
|
137
|
+
function wrapAsyncIterable(stream, inputs, messageFormat) {
|
|
138
|
+
let tracingClaimed = false;
|
|
139
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
140
|
+
return new Proxy(stream, {
|
|
141
|
+
get(target, prop, receiver) {
|
|
142
|
+
const original = Reflect.get(target, prop, receiver);
|
|
143
|
+
if (prop === Symbol.asyncIterator) {
|
|
144
|
+
return function () {
|
|
145
|
+
if (tracingClaimed) {
|
|
146
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-return, @typescript-eslint/no-unsafe-call
|
|
147
|
+
return target[Symbol.asyncIterator]();
|
|
148
|
+
}
|
|
149
|
+
tracingClaimed = true;
|
|
150
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-call, @typescript-eslint/no-unsafe-argument
|
|
151
|
+
return wrapStreamIterator(target[Symbol.asyncIterator](), inputs, messageFormat);
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
if (typeof original === 'function') {
|
|
155
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-call
|
|
156
|
+
return original.bind(target);
|
|
157
|
+
}
|
|
158
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
159
|
+
return original;
|
|
160
|
+
},
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Wrap an async iterator with MLflow span tracking.
|
|
165
|
+
* Collects all chunks during iteration and records aggregated outputs on completion.
|
|
166
|
+
*/
|
|
167
|
+
async function* wrapStreamIterator(iterator, inputs, messageFormat) {
|
|
168
|
+
const parentSpan = (0, mlflow_tracing_1.getCurrentActiveSpan)();
|
|
169
|
+
const span = (0, mlflow_tracing_1.startSpan)({ name: 'ChatModel', spanType: mlflow_tracing_1.SpanType.LLM, parent: parentSpan ?? undefined });
|
|
170
|
+
span.setInputs(inputs);
|
|
171
|
+
const chunks = [];
|
|
172
|
+
let iterationError;
|
|
173
|
+
try {
|
|
174
|
+
while (true) {
|
|
175
|
+
const { value, done } = await iterator.next();
|
|
176
|
+
if (done) {
|
|
177
|
+
break;
|
|
178
|
+
}
|
|
179
|
+
chunks.push(value);
|
|
180
|
+
yield value;
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
catch (error) {
|
|
184
|
+
iterationError = error;
|
|
185
|
+
throw error;
|
|
186
|
+
}
|
|
187
|
+
finally {
|
|
188
|
+
if (iterationError) {
|
|
189
|
+
span.setAttribute(mlflow_tracing_1.SpanAttributeKey.MESSAGE_FORMAT, messageFormat);
|
|
190
|
+
span.setStatus(mlflow_tracing_1.SpanStatusCode.ERROR, iterationError.message);
|
|
191
|
+
span.end();
|
|
192
|
+
}
|
|
193
|
+
else {
|
|
194
|
+
try {
|
|
195
|
+
// Aggregate chunks using LangChain's concat pattern
|
|
196
|
+
const aggregated = aggregateChunks(chunks);
|
|
197
|
+
if (aggregated) {
|
|
198
|
+
span.setOutputs(serializeOutput(aggregated));
|
|
199
|
+
const usage = extractTokenUsage(aggregated);
|
|
200
|
+
if (usage) {
|
|
201
|
+
span.setAttribute(mlflow_tracing_1.SpanAttributeKey.TOKEN_USAGE, usage);
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
catch (e) {
|
|
206
|
+
console.debug('Could not aggregate stream chunks', e);
|
|
207
|
+
}
|
|
208
|
+
span.setAttribute(mlflow_tracing_1.SpanAttributeKey.MESSAGE_FORMAT, messageFormat);
|
|
209
|
+
span.end();
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
/**
|
|
214
|
+
* Aggregate LangChain AIMessageChunks using their concat() method.
|
|
215
|
+
* This is the same pattern used by LangChain internally for combining streamed chunks.
|
|
216
|
+
*/
|
|
217
|
+
function aggregateChunks(chunks) {
|
|
218
|
+
if (chunks.length === 0)
|
|
219
|
+
return undefined;
|
|
220
|
+
if (chunks.length === 1)
|
|
221
|
+
return chunks[0];
|
|
222
|
+
try {
|
|
223
|
+
// LangChain AIMessageChunk implements concat()
|
|
224
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return, @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-call
|
|
225
|
+
return chunks.slice(1).reduce((acc, chunk) => acc.concat(chunk), chunks[0]);
|
|
226
|
+
}
|
|
227
|
+
catch {
|
|
228
|
+
// Fallback: return last chunk if concat not available
|
|
229
|
+
return chunks[chunks.length - 1];
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Serialize LangChain message input for span recording.
|
|
234
|
+
* Handles both array of BaseMessage objects and raw input formats.
|
|
235
|
+
*/
|
|
236
|
+
function serializeInput(input) {
|
|
237
|
+
if (!input)
|
|
238
|
+
return input;
|
|
239
|
+
// If input is an array of LangChain messages, serialize them
|
|
240
|
+
if (Array.isArray(input)) {
|
|
241
|
+
return input.map((msg) => {
|
|
242
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
243
|
+
if (msg && typeof msg === 'object' && msg.content !== undefined) {
|
|
244
|
+
return {
|
|
245
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment
|
|
246
|
+
role: msg._getType?.() ?? msg.constructor?.name ?? 'unknown',
|
|
247
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment
|
|
248
|
+
content: msg.content,
|
|
249
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
250
|
+
...(msg.name ? { name: msg.name } : {}),
|
|
251
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
252
|
+
...(msg.tool_calls ? { tool_calls: msg.tool_calls } : {}),
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
256
|
+
return msg;
|
|
257
|
+
});
|
|
258
|
+
}
|
|
259
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
260
|
+
return input;
|
|
261
|
+
}
|
|
262
|
+
/**
|
|
263
|
+
* Serialize LangChain message output for span recording.
|
|
264
|
+
* Extracts the key fields from AIMessage/AIMessageChunk.
|
|
265
|
+
*/
|
|
266
|
+
function serializeOutput(output) {
|
|
267
|
+
if (!output || typeof output !== 'object')
|
|
268
|
+
return output;
|
|
269
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
270
|
+
if (output.content !== undefined) {
|
|
271
|
+
const serialized = {
|
|
272
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment
|
|
273
|
+
content: output.content,
|
|
274
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access, @typescript-eslint/no-unsafe-assignment
|
|
275
|
+
response_metadata: output.response_metadata,
|
|
276
|
+
};
|
|
277
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
278
|
+
if (output.tool_calls && output.tool_calls.length > 0) {
|
|
279
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
|
|
280
|
+
serialized.tool_calls = output.tool_calls;
|
|
281
|
+
}
|
|
282
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
283
|
+
if (output.usage_metadata) {
|
|
284
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-member-access
|
|
285
|
+
serialized.usage_metadata = output.usage_metadata;
|
|
286
|
+
}
|
|
287
|
+
return serialized;
|
|
288
|
+
}
|
|
289
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
290
|
+
return output;
|
|
291
|
+
}
|
|
292
|
+
/**
|
|
293
|
+
* Extract token usage from a LangChain response.
|
|
294
|
+
* LangChain provides usage_metadata with input_tokens/output_tokens fields.
|
|
295
|
+
*/
|
|
296
|
+
function extractTokenUsage(response) {
|
|
297
|
+
// LangChain standardized usage_metadata
|
|
298
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
299
|
+
const usage = response?.usage_metadata;
|
|
300
|
+
if (!usage) {
|
|
301
|
+
return undefined;
|
|
302
|
+
}
|
|
303
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
304
|
+
const inputTokens = usage.input_tokens ?? usage.inputTokens ?? 0;
|
|
305
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
306
|
+
const outputTokens = usage.output_tokens ?? usage.outputTokens ?? 0;
|
|
307
|
+
const totalTokens = inputTokens + outputTokens;
|
|
308
|
+
return {
|
|
309
|
+
input_tokens: inputTokens,
|
|
310
|
+
output_tokens: outputTokens,
|
|
311
|
+
total_tokens: totalTokens,
|
|
312
|
+
};
|
|
313
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@rollyjoely/mlflow-langchain",
|
|
3
|
+
"version": "0.1.3",
|
|
4
|
+
"description": "LangChain integration package for MLflow Tracing",
|
|
5
|
+
"repository": {
|
|
6
|
+
"type": "git",
|
|
7
|
+
"url": "https://github.com/mlflow/mlflow.git"
|
|
8
|
+
},
|
|
9
|
+
"homepage": "https://mlflow.org/",
|
|
10
|
+
"author": {
|
|
11
|
+
"name": "MLflow",
|
|
12
|
+
"url": "https://mlflow.org/"
|
|
13
|
+
},
|
|
14
|
+
"bugs": {
|
|
15
|
+
"url": "https://github.com/mlflow/mlflow/issues"
|
|
16
|
+
},
|
|
17
|
+
"license": "Apache-2.0",
|
|
18
|
+
"keywords": [
|
|
19
|
+
"mlflow",
|
|
20
|
+
"tracing",
|
|
21
|
+
"observability",
|
|
22
|
+
"opentelemetry",
|
|
23
|
+
"llm",
|
|
24
|
+
"langchain",
|
|
25
|
+
"javascript",
|
|
26
|
+
"typescript"
|
|
27
|
+
],
|
|
28
|
+
"main": "dist/index.js",
|
|
29
|
+
"types": "dist/index.d.ts",
|
|
30
|
+
"scripts": {
|
|
31
|
+
"build": "tsc",
|
|
32
|
+
"test": "jest",
|
|
33
|
+
"lint": "eslint . --ext .ts --max-warnings 0",
|
|
34
|
+
"lint:fix": "eslint . --ext .ts --fix",
|
|
35
|
+
"format": "prettier --write .",
|
|
36
|
+
"format:check": "prettier --check ."
|
|
37
|
+
},
|
|
38
|
+
"peerDependencies": {
|
|
39
|
+
"@langchain/core": ">=0.3.0",
|
|
40
|
+
"@rollyjoely/mlflow-tracing": "^0.1.3"
|
|
41
|
+
},
|
|
42
|
+
"devDependencies": {
|
|
43
|
+
"jest": "^29.6.2",
|
|
44
|
+
"typescript": "^5.8.3"
|
|
45
|
+
},
|
|
46
|
+
"engines": {
|
|
47
|
+
"node": ">=18"
|
|
48
|
+
},
|
|
49
|
+
"files": [
|
|
50
|
+
"dist/"
|
|
51
|
+
]
|
|
52
|
+
}
|