@artemiskit/adapter-langchain 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +133 -0
- package/README.md +151 -0
- package/dist/client.d.ts +73 -0
- package/dist/client.d.ts.map +1 -0
- package/dist/index.d.ts +21 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +170 -0
- package/dist/types.d.ts +87 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +54 -0
- package/src/client.test.ts +309 -0
- package/src/client.ts +264 -0
- package/src/index.ts +29 -0
- package/src/types.ts +100 -0
- package/tsconfig.json +13 -0
package/package.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@artemiskit/adapter-langchain",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "LangChain.js adapter for ArtemisKit - Test LangChain chains and agents",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"license": "Apache-2.0",
|
|
7
|
+
"author": "code-sensei",
|
|
8
|
+
"repository": {
|
|
9
|
+
"type": "git",
|
|
10
|
+
"url": "https://github.com/code-sensei/artemiskit.git",
|
|
11
|
+
"directory": "packages/adapters/langchain"
|
|
12
|
+
},
|
|
13
|
+
"bugs": {
|
|
14
|
+
"url": "https://github.com/code-sensei/artemiskit/issues"
|
|
15
|
+
},
|
|
16
|
+
"homepage": "https://artemiskit.vercel.app",
|
|
17
|
+
"keywords": ["llm", "langchain", "agents", "chains", "adapter", "artemiskit", "testing"],
|
|
18
|
+
"main": "./dist/index.js",
|
|
19
|
+
"types": "./dist/index.d.ts",
|
|
20
|
+
"exports": {
|
|
21
|
+
".": {
|
|
22
|
+
"import": "./dist/index.js",
|
|
23
|
+
"types": "./dist/index.d.ts"
|
|
24
|
+
}
|
|
25
|
+
},
|
|
26
|
+
"scripts": {
|
|
27
|
+
"build": "tsc && bun build ./src/index.ts --outdir ./dist --target bun",
|
|
28
|
+
"typecheck": "tsc --noEmit",
|
|
29
|
+
"clean": "rm -rf dist",
|
|
30
|
+
"test": "bun test"
|
|
31
|
+
},
|
|
32
|
+
"dependencies": {
|
|
33
|
+
"@artemiskit/core": "workspace:*",
|
|
34
|
+
"nanoid": "^5.0.0"
|
|
35
|
+
},
|
|
36
|
+
"peerDependencies": {
|
|
37
|
+
"@langchain/core": ">=0.1.0",
|
|
38
|
+
"langchain": ">=0.1.0"
|
|
39
|
+
},
|
|
40
|
+
"peerDependenciesMeta": {
|
|
41
|
+
"@langchain/core": {
|
|
42
|
+
"optional": true
|
|
43
|
+
},
|
|
44
|
+
"langchain": {
|
|
45
|
+
"optional": true
|
|
46
|
+
}
|
|
47
|
+
},
|
|
48
|
+
"devDependencies": {
|
|
49
|
+
"@langchain/core": "^0.3.0",
|
|
50
|
+
"@types/bun": "^1.1.0",
|
|
51
|
+
"langchain": "^0.3.0",
|
|
52
|
+
"typescript": "^5.3.0"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for LangChain adapter
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { describe, expect, it, mock, spyOn } from 'bun:test';
|
|
6
|
+
import { LangChainAdapter, createLangChainAdapter } from './client';
|
|
7
|
+
import type { LangChainRunnable, LangChainRunnableOutput } from './types';
|
|
8
|
+
|
|
9
|
+
// Mock runnable factory for testing
|
|
10
|
+
function createMockRunnable(
|
|
11
|
+
outputOrFn:
|
|
12
|
+
| string
|
|
13
|
+
| LangChainRunnableOutput
|
|
14
|
+
| ((input: unknown) => Promise<LangChainRunnableOutput>),
|
|
15
|
+
options?: { supportsStreaming?: boolean }
|
|
16
|
+
): LangChainRunnable {
|
|
17
|
+
const runnable: LangChainRunnable = {
|
|
18
|
+
invoke: mock(async (input) => {
|
|
19
|
+
if (typeof outputOrFn === 'function') {
|
|
20
|
+
return outputOrFn(input);
|
|
21
|
+
}
|
|
22
|
+
if (typeof outputOrFn === 'string') {
|
|
23
|
+
return outputOrFn as unknown as LangChainRunnableOutput;
|
|
24
|
+
}
|
|
25
|
+
return outputOrFn;
|
|
26
|
+
}),
|
|
27
|
+
};
|
|
28
|
+
|
|
29
|
+
if (options?.supportsStreaming) {
|
|
30
|
+
runnable.stream = mock(async function* (input) {
|
|
31
|
+
const result = typeof outputOrFn === 'string' ? outputOrFn : 'streamed content';
|
|
32
|
+
for (const char of result) {
|
|
33
|
+
yield { content: char };
|
|
34
|
+
}
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return runnable;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
describe('LangChainAdapter', () => {
|
|
42
|
+
describe('constructor', () => {
|
|
43
|
+
it('should create adapter with runnable', () => {
|
|
44
|
+
const runnable = createMockRunnable('test output');
|
|
45
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
46
|
+
|
|
47
|
+
expect(adapter.provider).toBe('langchain');
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
it('should detect agent type from properties', () => {
|
|
51
|
+
const agentRunnable = {
|
|
52
|
+
...createMockRunnable('test'),
|
|
53
|
+
agent: {},
|
|
54
|
+
} as LangChainRunnable;
|
|
55
|
+
|
|
56
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, agentRunnable);
|
|
57
|
+
expect(adapter.provider).toBe('langchain');
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
it('should use configured runnable type', () => {
|
|
61
|
+
const runnable = createMockRunnable('test');
|
|
62
|
+
const adapter = new LangChainAdapter(
|
|
63
|
+
{ provider: 'langchain', runnableType: 'chain' },
|
|
64
|
+
runnable
|
|
65
|
+
);
|
|
66
|
+
|
|
67
|
+
expect(adapter.provider).toBe('langchain');
|
|
68
|
+
});
|
|
69
|
+
});
|
|
70
|
+
|
|
71
|
+
describe('generate', () => {
|
|
72
|
+
it('should call runnable.invoke with string prompt', async () => {
|
|
73
|
+
const runnable = createMockRunnable({ output: 'Hello, world!' });
|
|
74
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
75
|
+
|
|
76
|
+
const result = await adapter.generate({ prompt: 'Say hello' });
|
|
77
|
+
|
|
78
|
+
expect(runnable.invoke).toHaveBeenCalledWith({ input: 'Say hello' });
|
|
79
|
+
expect(result.text).toBe('Hello, world!');
|
|
80
|
+
expect(result.finishReason).toBe('stop');
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
it('should handle chat message array prompts', async () => {
|
|
84
|
+
const runnable = createMockRunnable({ output: 'Response' });
|
|
85
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
86
|
+
|
|
87
|
+
const result = await adapter.generate({
|
|
88
|
+
prompt: [
|
|
89
|
+
{ role: 'system', content: 'You are helpful' },
|
|
90
|
+
{ role: 'user', content: 'Hello' },
|
|
91
|
+
],
|
|
92
|
+
});
|
|
93
|
+
|
|
94
|
+
expect(runnable.invoke).toHaveBeenCalledWith(
|
|
95
|
+
expect.objectContaining({
|
|
96
|
+
input: 'Hello',
|
|
97
|
+
system: 'You are helpful',
|
|
98
|
+
})
|
|
99
|
+
);
|
|
100
|
+
expect(result.text).toBe('Response');
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it('should handle direct string responses (StringOutputParser)', async () => {
|
|
104
|
+
const runnable = createMockRunnable('Direct string response');
|
|
105
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
106
|
+
|
|
107
|
+
const result = await adapter.generate({ prompt: 'Test' });
|
|
108
|
+
|
|
109
|
+
expect(result.text).toBe('Direct string response');
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
it('should handle content property responses', async () => {
|
|
113
|
+
const runnable = createMockRunnable({ content: 'Content response' });
|
|
114
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
115
|
+
|
|
116
|
+
const result = await adapter.generate({ prompt: 'Test' });
|
|
117
|
+
|
|
118
|
+
expect(result.text).toBe('Content response');
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
it('should handle text property responses', async () => {
|
|
122
|
+
const runnable = createMockRunnable({ text: 'Text response' });
|
|
123
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
124
|
+
|
|
125
|
+
const result = await adapter.generate({ prompt: 'Test' });
|
|
126
|
+
|
|
127
|
+
expect(result.text).toBe('Text response');
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
it('should handle custom input/output keys', async () => {
|
|
131
|
+
const runnable = createMockRunnable({ answer: 'Custom output' });
|
|
132
|
+
const adapter = new LangChainAdapter(
|
|
133
|
+
{ provider: 'langchain', inputKey: 'question', outputKey: 'answer' },
|
|
134
|
+
runnable
|
|
135
|
+
);
|
|
136
|
+
|
|
137
|
+
const result = await adapter.generate({ prompt: 'What is 2+2?' });
|
|
138
|
+
|
|
139
|
+
expect(runnable.invoke).toHaveBeenCalledWith({ question: 'What is 2+2?' });
|
|
140
|
+
expect(result.text).toBe('Custom output');
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
it('should capture intermediate steps from agent execution', async () => {
|
|
144
|
+
const agentOutput: LangChainRunnableOutput = {
|
|
145
|
+
output: 'Final answer',
|
|
146
|
+
intermediateSteps: [
|
|
147
|
+
{
|
|
148
|
+
action: { tool: 'calculator', toolInput: { query: '2+2' } },
|
|
149
|
+
observation: '4',
|
|
150
|
+
},
|
|
151
|
+
{
|
|
152
|
+
action: { tool: 'search', toolInput: { query: 'meaning of life' } },
|
|
153
|
+
observation: '42',
|
|
154
|
+
},
|
|
155
|
+
],
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
const runnable = createMockRunnable(agentOutput);
|
|
159
|
+
const adapter = new LangChainAdapter(
|
|
160
|
+
{ provider: 'langchain', runnableType: 'agent' },
|
|
161
|
+
runnable
|
|
162
|
+
);
|
|
163
|
+
|
|
164
|
+
const result = await adapter.generate({ prompt: 'Complex question' });
|
|
165
|
+
|
|
166
|
+
expect(result.text).toBe('Final answer');
|
|
167
|
+
expect(result.raw).toMatchObject({
|
|
168
|
+
metadata: {
|
|
169
|
+
runnableType: 'agent',
|
|
170
|
+
toolsUsed: ['calculator', 'search'],
|
|
171
|
+
totalToolCalls: 2,
|
|
172
|
+
},
|
|
173
|
+
});
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
it('should track latency', async () => {
|
|
177
|
+
const runnable = createMockRunnable(async () => {
|
|
178
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
179
|
+
return { output: 'Delayed response' };
|
|
180
|
+
});
|
|
181
|
+
|
|
182
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
183
|
+
const result = await adapter.generate({ prompt: 'Test' });
|
|
184
|
+
|
|
185
|
+
expect(result.latencyMs).toBeGreaterThanOrEqual(10);
|
|
186
|
+
});
|
|
187
|
+
|
|
188
|
+
it('should include model name in result', async () => {
|
|
189
|
+
const runnable = createMockRunnable({ output: 'Test' });
|
|
190
|
+
const adapter = new LangChainAdapter(
|
|
191
|
+
{ provider: 'langchain', name: 'my-rag-chain' },
|
|
192
|
+
runnable
|
|
193
|
+
);
|
|
194
|
+
|
|
195
|
+
const result = await adapter.generate({ prompt: 'Test' });
|
|
196
|
+
|
|
197
|
+
expect(result.model).toBe('my-rag-chain');
|
|
198
|
+
});
|
|
199
|
+
|
|
200
|
+
it('should fallback to JSON for complex outputs', async () => {
|
|
201
|
+
const complexOutput = { data: { nested: 'value' }, count: 42 };
|
|
202
|
+
const runnable = createMockRunnable(complexOutput as unknown as LangChainRunnableOutput);
|
|
203
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
204
|
+
|
|
205
|
+
const result = await adapter.generate({ prompt: 'Test' });
|
|
206
|
+
|
|
207
|
+
expect(result.text).toBe(JSON.stringify(complexOutput));
|
|
208
|
+
});
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
describe('stream', () => {
|
|
212
|
+
it('should stream chunks when supported', async () => {
|
|
213
|
+
const runnable = createMockRunnable('streaming', { supportsStreaming: true });
|
|
214
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
215
|
+
|
|
216
|
+
const chunks: string[] = [];
|
|
217
|
+
const onChunk = (chunk: string) => chunks.push(chunk);
|
|
218
|
+
|
|
219
|
+
for await (const chunk of adapter.stream({ prompt: 'Test' }, onChunk)) {
|
|
220
|
+
// Collect
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
expect(chunks.length).toBeGreaterThan(0);
|
|
224
|
+
expect(chunks.join('')).toBe('streaming');
|
|
225
|
+
});
|
|
226
|
+
|
|
227
|
+
it('should fallback to generate when streaming not supported', async () => {
|
|
228
|
+
const runnable = createMockRunnable({ output: 'non-streaming' });
|
|
229
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
230
|
+
|
|
231
|
+
const chunks: string[] = [];
|
|
232
|
+
const onChunk = (chunk: string) => chunks.push(chunk);
|
|
233
|
+
|
|
234
|
+
for await (const chunk of adapter.stream({ prompt: 'Test' }, onChunk)) {
|
|
235
|
+
// Collect
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
expect(chunks).toEqual(['non-streaming']);
|
|
239
|
+
});
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
describe('capabilities', () => {
|
|
243
|
+
it('should report streaming capability based on runnable', async () => {
|
|
244
|
+
const streamingRunnable = createMockRunnable('test', { supportsStreaming: true });
|
|
245
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, streamingRunnable);
|
|
246
|
+
|
|
247
|
+
const caps = await adapter.capabilities();
|
|
248
|
+
expect(caps.streaming).toBe(true);
|
|
249
|
+
});
|
|
250
|
+
|
|
251
|
+
it('should report no streaming when not supported', async () => {
|
|
252
|
+
const runnable = createMockRunnable('test');
|
|
253
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
254
|
+
|
|
255
|
+
const caps = await adapter.capabilities();
|
|
256
|
+
expect(caps.streaming).toBe(false);
|
|
257
|
+
});
|
|
258
|
+
|
|
259
|
+
it('should report tool capabilities for agents', async () => {
|
|
260
|
+
const runnable = createMockRunnable('test');
|
|
261
|
+
const adapter = new LangChainAdapter(
|
|
262
|
+
{ provider: 'langchain', runnableType: 'agent' },
|
|
263
|
+
runnable
|
|
264
|
+
);
|
|
265
|
+
|
|
266
|
+
const caps = await adapter.capabilities();
|
|
267
|
+
expect(caps.functionCalling).toBe(true);
|
|
268
|
+
expect(caps.toolUse).toBe(true);
|
|
269
|
+
});
|
|
270
|
+
|
|
271
|
+
it('should report no tool capabilities for chains', async () => {
|
|
272
|
+
const runnable = createMockRunnable('test');
|
|
273
|
+
const adapter = new LangChainAdapter(
|
|
274
|
+
{ provider: 'langchain', runnableType: 'chain' },
|
|
275
|
+
runnable
|
|
276
|
+
);
|
|
277
|
+
|
|
278
|
+
const caps = await adapter.capabilities();
|
|
279
|
+
expect(caps.functionCalling).toBe(false);
|
|
280
|
+
expect(caps.toolUse).toBe(false);
|
|
281
|
+
});
|
|
282
|
+
});
|
|
283
|
+
|
|
284
|
+
describe('close', () => {
|
|
285
|
+
it('should complete without error', async () => {
|
|
286
|
+
const runnable = createMockRunnable('test');
|
|
287
|
+
const adapter = new LangChainAdapter({ provider: 'langchain' }, runnable);
|
|
288
|
+
|
|
289
|
+
await expect(adapter.close()).resolves.toBeUndefined();
|
|
290
|
+
});
|
|
291
|
+
});
|
|
292
|
+
});
|
|
293
|
+
|
|
294
|
+
describe('createLangChainAdapter', () => {
|
|
295
|
+
it('should create adapter with factory function', () => {
|
|
296
|
+
const runnable = createMockRunnable('test');
|
|
297
|
+
const adapter = createLangChainAdapter(runnable, { name: 'test-chain' });
|
|
298
|
+
|
|
299
|
+
expect(adapter).toBeInstanceOf(LangChainAdapter);
|
|
300
|
+
expect(adapter.provider).toBe('langchain');
|
|
301
|
+
});
|
|
302
|
+
|
|
303
|
+
it('should work with minimal options', () => {
|
|
304
|
+
const runnable = createMockRunnable('test');
|
|
305
|
+
const adapter = createLangChainAdapter(runnable);
|
|
306
|
+
|
|
307
|
+
expect(adapter).toBeInstanceOf(LangChainAdapter);
|
|
308
|
+
});
|
|
309
|
+
});
|
package/src/client.ts
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LangChain Adapter
|
|
3
|
+
* Wraps LangChain chains and agents for ArtemisKit testing
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type {
|
|
7
|
+
AdapterConfig,
|
|
8
|
+
GenerateOptions,
|
|
9
|
+
GenerateResult,
|
|
10
|
+
ModelCapabilities,
|
|
11
|
+
ModelClient,
|
|
12
|
+
} from '@artemiskit/core';
|
|
13
|
+
import { nanoid } from 'nanoid';
|
|
14
|
+
import type {
|
|
15
|
+
LangChainAdapterConfig,
|
|
16
|
+
LangChainExecutionMetadata,
|
|
17
|
+
LangChainRunnable,
|
|
18
|
+
LangChainRunnableOutput,
|
|
19
|
+
LangChainRunnableType,
|
|
20
|
+
} from './types';
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Adapter for testing LangChain chains and agents with ArtemisKit
|
|
24
|
+
*
|
|
25
|
+
* @example
|
|
26
|
+
* ```typescript
|
|
27
|
+
* import { LangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
28
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
29
|
+
* import { StringOutputParser } from '@langchain/core/output_parsers';
|
|
30
|
+
* import { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
31
|
+
*
|
|
32
|
+
* // Create a LangChain chain
|
|
33
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
34
|
+
* const prompt = ChatPromptTemplate.fromTemplate('Answer: {input}');
|
|
35
|
+
* const chain = prompt.pipe(model).pipe(new StringOutputParser());
|
|
36
|
+
*
|
|
37
|
+
* // Wrap with ArtemisKit adapter
|
|
38
|
+
* const adapter = new LangChainAdapter({
|
|
39
|
+
* provider: 'langchain',
|
|
40
|
+
* runnable: chain,
|
|
41
|
+
* runnableType: 'chain',
|
|
42
|
+
* });
|
|
43
|
+
*
|
|
44
|
+
* // Use in ArtemisKit tests
|
|
45
|
+
* const result = await adapter.generate({ prompt: 'What is 2+2?' });
|
|
46
|
+
* ```
|
|
47
|
+
*/
|
|
48
|
+
export class LangChainAdapter implements ModelClient {
|
|
49
|
+
private runnable: LangChainRunnable;
|
|
50
|
+
private config: LangChainAdapterConfig;
|
|
51
|
+
private runnableType: LangChainRunnableType;
|
|
52
|
+
readonly provider = 'langchain';
|
|
53
|
+
|
|
54
|
+
constructor(config: AdapterConfig, runnable: LangChainRunnable) {
|
|
55
|
+
this.config = config as LangChainAdapterConfig;
|
|
56
|
+
this.runnable = runnable;
|
|
57
|
+
this.runnableType = this.config.runnableType ?? this.detectRunnableType(runnable);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Attempt to detect the type of runnable based on its properties
|
|
62
|
+
*/
|
|
63
|
+
private detectRunnableType(runnable: LangChainRunnable): LangChainRunnableType {
|
|
64
|
+
// Check for agent-specific properties
|
|
65
|
+
// Cast through unknown first since LangChainRunnable is a specific interface
|
|
66
|
+
const runnableAny = runnable as unknown as Record<string, unknown>;
|
|
67
|
+
if (
|
|
68
|
+
runnableAny.agent ||
|
|
69
|
+
runnableAny.agentExecutor ||
|
|
70
|
+
typeof runnableAny.runAgent === 'function'
|
|
71
|
+
) {
|
|
72
|
+
return 'agent';
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// Check for LLM-specific properties
|
|
76
|
+
if (runnableAny.modelName || runnableAny.model || runnableAny._llmType) {
|
|
77
|
+
return 'llm';
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Default to 'runnable' for generic LCEL chains
|
|
81
|
+
return 'runnable';
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
async generate(options: GenerateOptions): Promise<GenerateResult> {
|
|
85
|
+
const startTime = Date.now();
|
|
86
|
+
|
|
87
|
+
// Prepare input based on options
|
|
88
|
+
const input = this.prepareInput(options);
|
|
89
|
+
|
|
90
|
+
// Execute the runnable
|
|
91
|
+
const response = await this.runnable.invoke(input);
|
|
92
|
+
|
|
93
|
+
const latencyMs = Date.now() - startTime;
|
|
94
|
+
|
|
95
|
+
// Extract text output from various possible response shapes
|
|
96
|
+
const text = this.extractOutput(response);
|
|
97
|
+
|
|
98
|
+
// Extract metadata from execution
|
|
99
|
+
const metadata = this.extractMetadata(response);
|
|
100
|
+
|
|
101
|
+
return {
|
|
102
|
+
id: nanoid(),
|
|
103
|
+
model: this.config.name || `langchain:${this.runnableType}`,
|
|
104
|
+
text,
|
|
105
|
+
tokens: {
|
|
106
|
+
prompt: 0, // LangChain doesn't expose token counts directly
|
|
107
|
+
completion: 0,
|
|
108
|
+
total: 0,
|
|
109
|
+
},
|
|
110
|
+
latencyMs,
|
|
111
|
+
finishReason: 'stop',
|
|
112
|
+
raw: {
|
|
113
|
+
response,
|
|
114
|
+
metadata,
|
|
115
|
+
},
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
async *stream(options: GenerateOptions, onChunk: (chunk: string) => void): AsyncIterable<string> {
|
|
120
|
+
if (!this.runnable.stream) {
|
|
121
|
+
// Fallback to non-streaming if not supported
|
|
122
|
+
const result = await this.generate(options);
|
|
123
|
+
onChunk(result.text);
|
|
124
|
+
yield result.text;
|
|
125
|
+
return;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
const input = this.prepareInput(options);
|
|
129
|
+
const stream = this.runnable.stream(input);
|
|
130
|
+
|
|
131
|
+
for await (const chunk of stream) {
|
|
132
|
+
const text = chunk.content?.toString() || chunk.text || '';
|
|
133
|
+
if (text) {
|
|
134
|
+
onChunk(text);
|
|
135
|
+
yield text;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
async capabilities(): Promise<ModelCapabilities> {
|
|
141
|
+
return {
|
|
142
|
+
streaming: typeof this.runnable.stream === 'function',
|
|
143
|
+
functionCalling: this.runnableType === 'agent',
|
|
144
|
+
toolUse: this.runnableType === 'agent',
|
|
145
|
+
maxContext: 128000, // Varies by underlying model
|
|
146
|
+
vision: false, // Depends on underlying model
|
|
147
|
+
jsonMode: false,
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
async close(): Promise<void> {
|
|
152
|
+
// No cleanup needed for most LangChain runnables
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Prepare input for the LangChain runnable
|
|
157
|
+
*/
|
|
158
|
+
private prepareInput(options: GenerateOptions): Record<string, unknown> | string {
|
|
159
|
+
const inputKey = this.config.inputKey ?? 'input';
|
|
160
|
+
|
|
161
|
+
// Handle string prompts
|
|
162
|
+
if (typeof options.prompt === 'string') {
|
|
163
|
+
// Some runnables accept just a string, others need an object
|
|
164
|
+
return { [inputKey]: options.prompt };
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
// Handle chat message array - convert to single prompt string
|
|
168
|
+
const messages = options.prompt;
|
|
169
|
+
const lastUserMessage = messages.findLast((m) => m.role === 'user');
|
|
170
|
+
const systemMessage = messages.find((m) => m.role === 'system');
|
|
171
|
+
|
|
172
|
+
// Build input with system context if available
|
|
173
|
+
if (systemMessage) {
|
|
174
|
+
return {
|
|
175
|
+
[inputKey]: lastUserMessage?.content || '',
|
|
176
|
+
system: systemMessage.content,
|
|
177
|
+
messages: messages.map((m) => ({
|
|
178
|
+
role: m.role,
|
|
179
|
+
content: m.content,
|
|
180
|
+
})),
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
return { [inputKey]: lastUserMessage?.content || '' };
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
/**
|
|
188
|
+
* Extract the text output from a LangChain response
|
|
189
|
+
* Handles various response shapes from different runnable types
|
|
190
|
+
*/
|
|
191
|
+
private extractOutput(response: LangChainRunnableOutput): string {
|
|
192
|
+
const outputKey = this.config.outputKey ?? 'output';
|
|
193
|
+
|
|
194
|
+
// Direct string response (from StringOutputParser)
|
|
195
|
+
if (typeof response === 'string') {
|
|
196
|
+
return response;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// Check common output keys in order of preference
|
|
200
|
+
const possibleKeys = [outputKey, 'output', 'content', 'text', 'result', 'answer'];
|
|
201
|
+
|
|
202
|
+
for (const key of possibleKeys) {
|
|
203
|
+
const value = response[key];
|
|
204
|
+
if (typeof value === 'string') {
|
|
205
|
+
return value;
|
|
206
|
+
}
|
|
207
|
+
if (value && typeof value === 'object' && 'content' in value) {
|
|
208
|
+
return String((value as { content: unknown }).content);
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
// For agent responses, extract from intermediate steps
|
|
213
|
+
if (response.intermediateSteps?.length) {
|
|
214
|
+
const steps = response.intermediateSteps;
|
|
215
|
+
const lastStep = steps[steps.length - 1];
|
|
216
|
+
return lastStep?.observation || JSON.stringify(response);
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
// Fallback to JSON stringification
|
|
220
|
+
return JSON.stringify(response);
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/**
|
|
224
|
+
* Extract execution metadata from the response
|
|
225
|
+
*/
|
|
226
|
+
private extractMetadata(response: LangChainRunnableOutput): LangChainExecutionMetadata {
|
|
227
|
+
const metadata: LangChainExecutionMetadata = {
|
|
228
|
+
runnableType: this.runnableType,
|
|
229
|
+
name: this.config.name,
|
|
230
|
+
};
|
|
231
|
+
|
|
232
|
+
// Capture intermediate steps if available and enabled
|
|
233
|
+
if (this.config.captureIntermediateSteps !== false && response.intermediateSteps) {
|
|
234
|
+
metadata.intermediateSteps = response.intermediateSteps;
|
|
235
|
+
metadata.toolsUsed = [...new Set(response.intermediateSteps.map((s) => s.action.tool))];
|
|
236
|
+
metadata.totalToolCalls = response.intermediateSteps.length;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
return metadata;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* Factory function to create a LangChain adapter
|
|
245
|
+
*
|
|
246
|
+
* @example
|
|
247
|
+
* ```typescript
|
|
248
|
+
* const adapter = createLangChainAdapter(myChain, {
|
|
249
|
+
* name: 'my-rag-chain',
|
|
250
|
+
* runnableType: 'chain',
|
|
251
|
+
* });
|
|
252
|
+
* ```
|
|
253
|
+
*/
|
|
254
|
+
export function createLangChainAdapter(
|
|
255
|
+
runnable: LangChainRunnable,
|
|
256
|
+
options?: Partial<LangChainAdapterConfig>
|
|
257
|
+
): LangChainAdapter {
|
|
258
|
+
const config: LangChainAdapterConfig = {
|
|
259
|
+
provider: 'langchain',
|
|
260
|
+
...options,
|
|
261
|
+
};
|
|
262
|
+
|
|
263
|
+
return new LangChainAdapter(config, runnable);
|
|
264
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @artemiskit/adapter-langchain
|
|
3
|
+
*
|
|
4
|
+
* LangChain.js adapter for ArtemisKit LLM evaluation toolkit.
|
|
5
|
+
* Enables testing of LangChain chains, agents, and runnables.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* import { createLangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
10
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
11
|
+
*
|
|
12
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
13
|
+
* const adapter = createLangChainAdapter(model, { name: 'gpt4-direct' });
|
|
14
|
+
*
|
|
15
|
+
* // Use with ArtemisKit
|
|
16
|
+
* const result = await adapter.generate({ prompt: 'Hello!' });
|
|
17
|
+
* ```
|
|
18
|
+
*/
|
|
19
|
+
|
|
20
|
+
export { LangChainAdapter, createLangChainAdapter } from './client';
|
|
21
|
+
export type {
|
|
22
|
+
LangChainAdapterConfig,
|
|
23
|
+
LangChainRunnable,
|
|
24
|
+
LangChainRunnableOutput,
|
|
25
|
+
LangChainRunnableType,
|
|
26
|
+
LangChainIntermediateStep,
|
|
27
|
+
LangChainStreamChunk,
|
|
28
|
+
LangChainExecutionMetadata,
|
|
29
|
+
} from './types';
|