@artemiskit/adapter-langchain 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +133 -0
- package/README.md +151 -0
- package/dist/client.d.ts +73 -0
- package/dist/client.d.ts.map +1 -0
- package/dist/index.d.ts +21 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +170 -0
- package/dist/types.d.ts +87 -0
- package/dist/types.d.ts.map +1 -0
- package/package.json +54 -0
- package/src/client.test.ts +309 -0
- package/src/client.ts +264 -0
- package/src/index.ts +29 -0
- package/src/types.ts +100 -0
- package/tsconfig.json +13 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
# @artemiskit/adapter-langchain
|
|
2
|
+
|
|
3
|
+
## 0.2.0
|
|
4
|
+
|
|
5
|
+
### Minor Changes
|
|
6
|
+
|
|
7
|
+
- ## v0.3.0 - SDK, Guardian Mode & OWASP Compliance
|
|
8
|
+
|
|
9
|
+
This major release delivers the full programmatic SDK, runtime protection with Guardian Mode, OWASP LLM Top 10 2025 attack vectors, and agentic framework adapters.
|
|
10
|
+
|
|
11
|
+
### Programmatic SDK (`@artemiskit/sdk`)
|
|
12
|
+
|
|
13
|
+
The new SDK package provides a complete programmatic API for LLM evaluation:
|
|
14
|
+
|
|
15
|
+
- **ArtemisKit class** with `run()`, `redteam()`, and `stress()` methods
|
|
16
|
+
- **Jest integration** with custom matchers (`toPassAllCases`, `toHaveSuccessRate`, etc.)
|
|
17
|
+
- **Vitest integration** with identical matchers
|
|
18
|
+
- **Event handling** for real-time progress updates
|
|
19
|
+
- **13 custom matchers** for run, red team, and stress test assertions
|
|
20
|
+
|
|
21
|
+
```typescript
|
|
22
|
+
import { ArtemisKit } from "@artemiskit/sdk";
|
|
23
|
+
import { jestMatchers } from "@artemiskit/sdk/jest";
|
|
24
|
+
|
|
25
|
+
expect.extend(jestMatchers);
|
|
26
|
+
|
|
27
|
+
const kit = new ArtemisKit({ provider: "openai", model: "gpt-4o" });
|
|
28
|
+
const results = await kit.run({ scenario: "./tests.yaml" });
|
|
29
|
+
expect(results).toPassAllCases();
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### Guardian Mode (Runtime Protection)
|
|
33
|
+
|
|
34
|
+
New Guardian Mode provides runtime protection for AI/LLM applications:
|
|
35
|
+
|
|
36
|
+
- **Three operating modes**: `testing`, `guardian`, `hybrid`
|
|
37
|
+
- **Prompt injection detection** and blocking
|
|
38
|
+
- **PII detection & redaction** (email, SSN, phone, API keys)
|
|
39
|
+
- **Action validation** for agent tool/function calls
|
|
40
|
+
- **Intent classification** with risk assessment
|
|
41
|
+
- **Circuit breaker** for automatic blocking on repeated violations
|
|
42
|
+
- **Rate limiting** and **cost limiting**
|
|
43
|
+
- **Custom policies** via TypeScript or YAML
|
|
44
|
+
|
|
45
|
+
```typescript
|
|
46
|
+
import { createGuardian } from "@artemiskit/sdk/guardian";
|
|
47
|
+
|
|
48
|
+
const guardian = createGuardian({ mode: "guardian", blockOnFailure: true });
|
|
49
|
+
const protectedClient = guardian.protect(myLLMClient);
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
### OWASP LLM Top 10 2025 Attack Vectors
|
|
53
|
+
|
|
54
|
+
New red team mutations aligned with OWASP LLM Top 10 2025:
|
|
55
|
+
|
|
56
|
+
| Mutation | OWASP | Description |
|
|
57
|
+
| -------------------- | ----- | ------------------------------ |
|
|
58
|
+
| `bad-likert-judge` | LLM01 | Exploit evaluation capability |
|
|
59
|
+
| `crescendo` | LLM01 | Multi-turn gradual escalation |
|
|
60
|
+
| `deceptive-delight` | LLM01 | Positive framing bypass |
|
|
61
|
+
| `system-extraction` | LLM07 | System prompt leakage |
|
|
62
|
+
| `output-injection` | LLM05 | XSS, SQLi in output |
|
|
63
|
+
| `excessive-agency` | LLM06 | Unauthorized action claims |
|
|
64
|
+
| `hallucination-trap` | LLM09 | Confident fabrication triggers |
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
akit redteam scenario.yaml --owasp LLM01,LLM05
|
|
68
|
+
akit redteam scenario.yaml --owasp-full
|
|
69
|
+
```
|
|
70
|
+
|
|
71
|
+
### Agentic Framework Adapters
|
|
72
|
+
|
|
73
|
+
New adapters for testing agentic AI systems:
|
|
74
|
+
|
|
75
|
+
**LangChain Adapter** (`@artemiskit/adapter-langchain`)
|
|
76
|
+
|
|
77
|
+
- Test chains, agents, and runnables
|
|
78
|
+
- Capture intermediate steps and tool usage
|
|
79
|
+
- Support for LCEL, ReAct agents, RAG chains
|
|
80
|
+
|
|
81
|
+
**DeepAgents Adapter** (`@artemiskit/adapter-deepagents`)
|
|
82
|
+
|
|
83
|
+
- Test multi-agent systems and workflows
|
|
84
|
+
- Capture agent traces and inter-agent messages
|
|
85
|
+
- Support for sequential, parallel, and hierarchical workflows
|
|
86
|
+
|
|
87
|
+
```typescript
|
|
88
|
+
import { createLangChainAdapter } from "@artemiskit/adapter-langchain";
|
|
89
|
+
import { createDeepAgentsAdapter } from "@artemiskit/adapter-deepagents";
|
|
90
|
+
|
|
91
|
+
const adapter = createLangChainAdapter(myChain, {
|
|
92
|
+
captureIntermediateSteps: true,
|
|
93
|
+
});
|
|
94
|
+
const result = await adapter.generate({ prompt: "Test query" });
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
### Supabase Storage Enhancements
|
|
98
|
+
|
|
99
|
+
Enhanced cloud storage capabilities:
|
|
100
|
+
|
|
101
|
+
- **Analytics tables** for metrics tracking
|
|
102
|
+
- **Case results table** for granular analysis
|
|
103
|
+
- **Baseline management** for regression detection
|
|
104
|
+
- **Trend analysis** queries
|
|
105
|
+
|
|
106
|
+
### Bug Fixes
|
|
107
|
+
|
|
108
|
+
- **adapter-openai**: Use `max_completion_tokens` for newer OpenAI models (o1, o3, gpt-4.5)
|
|
109
|
+
- **redteam**: Resolve TypeScript and flaky test issues in OWASP mutations
|
|
110
|
+
- **adapters**: Fix TypeScript build errors for agentic adapters
|
|
111
|
+
- **core**: Add `langchain` and `deepagents` to ProviderType union
|
|
112
|
+
|
|
113
|
+
### Examples
|
|
114
|
+
|
|
115
|
+
New comprehensive examples organized by feature:
|
|
116
|
+
|
|
117
|
+
- `examples/guardian/` - Guardian Mode examples (testing, guardian, hybrid modes)
|
|
118
|
+
- `examples/sdk/` - SDK usage examples (Jest, Vitest, events)
|
|
119
|
+
- `examples/adapters/` - Agentic adapter examples
|
|
120
|
+
- `examples/owasp/` - OWASP LLM Top 10 test scenarios
|
|
121
|
+
|
|
122
|
+
### Documentation
|
|
123
|
+
|
|
124
|
+
- Complete SDK documentation with API reference
|
|
125
|
+
- Guardian Mode guide with all three modes explained
|
|
126
|
+
- Agentic adapters documentation (LangChain, DeepAgents)
|
|
127
|
+
- Test matchers reference for Jest/Vitest
|
|
128
|
+
- OWASP LLM Top 10 testing scenarios
|
|
129
|
+
|
|
130
|
+
### Patch Changes
|
|
131
|
+
|
|
132
|
+
- Updated dependencies
|
|
133
|
+
- @artemiskit/core@0.3.0
|
package/README.md
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
# @artemiskit/adapter-langchain
|
|
2
|
+
|
|
3
|
+
LangChain.js adapter for ArtemisKit - Test and evaluate LangChain chains, agents, and runnables.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
bun add @artemiskit/adapter-langchain
|
|
9
|
+
# or
|
|
10
|
+
npm install @artemiskit/adapter-langchain
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Quick Start
|
|
14
|
+
|
|
15
|
+
### Testing a Simple Chain
|
|
16
|
+
|
|
17
|
+
```typescript
|
|
18
|
+
import { createLangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
19
|
+
import { ChatOpenAI } from '@langchain/openai';
|
|
20
|
+
import { StringOutputParser } from '@langchain/core/output_parsers';
|
|
21
|
+
import { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
22
|
+
|
|
23
|
+
// Create your LangChain chain
|
|
24
|
+
const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
25
|
+
const prompt = ChatPromptTemplate.fromTemplate('Answer concisely: {input}');
|
|
26
|
+
const chain = prompt.pipe(model).pipe(new StringOutputParser());
|
|
27
|
+
|
|
28
|
+
// Wrap with ArtemisKit adapter
|
|
29
|
+
const adapter = createLangChainAdapter(chain, {
|
|
30
|
+
name: 'qa-chain',
|
|
31
|
+
runnableType: 'chain',
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
// Use in ArtemisKit tests
|
|
35
|
+
const result = await adapter.generate({ prompt: 'What is 2+2?' });
|
|
36
|
+
console.log(result.text); // "4"
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### Testing an Agent
|
|
40
|
+
|
|
41
|
+
```typescript
|
|
42
|
+
import { createLangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
43
|
+
import { AgentExecutor, createReactAgent } from 'langchain/agents';
|
|
44
|
+
import { ChatOpenAI } from '@langchain/openai';
|
|
45
|
+
import { Calculator } from '@langchain/community/tools/calculator';
|
|
46
|
+
|
|
47
|
+
// Create agent
|
|
48
|
+
const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
49
|
+
const tools = [new Calculator()];
|
|
50
|
+
const agent = createReactAgent({ llm: model, tools, prompt: agentPrompt });
|
|
51
|
+
const agentExecutor = new AgentExecutor({ agent, tools });
|
|
52
|
+
|
|
53
|
+
// Wrap with ArtemisKit adapter
|
|
54
|
+
const adapter = createLangChainAdapter(agentExecutor, {
|
|
55
|
+
name: 'calculator-agent',
|
|
56
|
+
runnableType: 'agent',
|
|
57
|
+
captureIntermediateSteps: true,
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
// Use in ArtemisKit tests
|
|
61
|
+
const result = await adapter.generate({ prompt: 'Calculate 25 * 4' });
|
|
62
|
+
console.log(result.text); // "100"
|
|
63
|
+
|
|
64
|
+
// Access agent execution metadata
|
|
65
|
+
console.log(result.raw.metadata.toolsUsed); // ['calculator']
|
|
66
|
+
console.log(result.raw.metadata.totalToolCalls); // 1
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Testing RAG Chains
|
|
70
|
+
|
|
71
|
+
```typescript
|
|
72
|
+
import { createLangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
73
|
+
import { ChatOpenAI } from '@langchain/openai';
|
|
74
|
+
import { RetrievalQAChain } from 'langchain/chains';
|
|
75
|
+
|
|
76
|
+
// Assume vectorstore is already set up
|
|
77
|
+
const retriever = vectorstore.asRetriever();
|
|
78
|
+
const chain = RetrievalQAChain.fromLLM(
|
|
79
|
+
new ChatOpenAI({ model: 'gpt-4' }),
|
|
80
|
+
retriever
|
|
81
|
+
);
|
|
82
|
+
|
|
83
|
+
const adapter = createLangChainAdapter(chain, {
|
|
84
|
+
name: 'rag-qa',
|
|
85
|
+
inputKey: 'query',
|
|
86
|
+
outputKey: 'result',
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
const result = await adapter.generate({
|
|
90
|
+
prompt: 'What does the document say about X?',
|
|
91
|
+
});
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## Configuration Options
|
|
95
|
+
|
|
96
|
+
| Option | Type | Default | Description |
|
|
97
|
+
| -------------------------- | ----------------------------------------------- | ----------- | ---------------------------------------- |
|
|
98
|
+
| `name` | `string` | - | Identifier for the chain/agent |
|
|
99
|
+
| `runnableType` | `'chain' \| 'agent' \| 'llm' \| 'runnable'` | auto-detect | Type of LangChain runnable |
|
|
100
|
+
| `captureIntermediateSteps` | `boolean` | `true` | Capture agent intermediate steps |
|
|
101
|
+
| `inputKey` | `string` | `'input'` | Custom input key for the runnable |
|
|
102
|
+
| `outputKey` | `string` | `'output'` | Custom output key for the runnable |
|
|
103
|
+
|
|
104
|
+
## Supported Runnable Types
|
|
105
|
+
|
|
106
|
+
The adapter supports all LangChain runnables that implement `invoke()`:
|
|
107
|
+
|
|
108
|
+
- **Chains**: LCEL chains, RetrievalQA, ConversationalRetrievalQA, etc.
|
|
109
|
+
- **Agents**: ReAct agents, OpenAI Functions agents, Tool-calling agents
|
|
110
|
+
- **LLMs**: Direct ChatOpenAI, ChatAnthropic, etc.
|
|
111
|
+
- **Custom Runnables**: Any object with an `invoke()` method
|
|
112
|
+
|
|
113
|
+
## Streaming Support
|
|
114
|
+
|
|
115
|
+
If your runnable supports streaming via `stream()`, the adapter will use it:
|
|
116
|
+
|
|
117
|
+
```typescript
|
|
118
|
+
for await (const chunk of adapter.stream({ prompt: 'Tell me a story' }, console.log)) {
|
|
119
|
+
// Process streaming chunks
|
|
120
|
+
}
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## ArtemisKit Integration
|
|
124
|
+
|
|
125
|
+
Use with ArtemisKit scenarios:
|
|
126
|
+
|
|
127
|
+
```yaml
|
|
128
|
+
# scenario.yaml
|
|
129
|
+
name: langchain-qa-test
|
|
130
|
+
provider: langchain
|
|
131
|
+
scenarios:
|
|
132
|
+
- name: Basic QA
|
|
133
|
+
input: 'What is the capital of France?'
|
|
134
|
+
expected:
|
|
135
|
+
contains: 'Paris'
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
```typescript
|
|
139
|
+
// Register adapter in your test setup
|
|
140
|
+
import { adapterRegistry } from '@artemiskit/core';
|
|
141
|
+
import { LangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
142
|
+
|
|
143
|
+
adapterRegistry.register('langchain', async (config) => {
|
|
144
|
+
// Your chain/agent setup
|
|
145
|
+
return new LangChainAdapter(config, myChain);
|
|
146
|
+
});
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
## License
|
|
150
|
+
|
|
151
|
+
Apache-2.0
|
package/dist/client.d.ts
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LangChain Adapter
|
|
3
|
+
* Wraps LangChain chains and agents for ArtemisKit testing
|
|
4
|
+
*/
|
|
5
|
+
import type { AdapterConfig, GenerateOptions, GenerateResult, ModelCapabilities, ModelClient } from '@artemiskit/core';
|
|
6
|
+
import type { LangChainAdapterConfig, LangChainRunnable } from './types';
|
|
7
|
+
/**
|
|
8
|
+
* Adapter for testing LangChain chains and agents with ArtemisKit
|
|
9
|
+
*
|
|
10
|
+
* @example
|
|
11
|
+
* ```typescript
|
|
12
|
+
* import { LangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
13
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
14
|
+
* import { StringOutputParser } from '@langchain/core/output_parsers';
|
|
15
|
+
* import { ChatPromptTemplate } from '@langchain/core/prompts';
|
|
16
|
+
*
|
|
17
|
+
* // Create a LangChain chain
|
|
18
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
19
|
+
* const prompt = ChatPromptTemplate.fromTemplate('Answer: {input}');
|
|
20
|
+
* const chain = prompt.pipe(model).pipe(new StringOutputParser());
|
|
21
|
+
*
|
|
22
|
+
* // Wrap with ArtemisKit adapter
|
|
23
|
+
* const adapter = new LangChainAdapter({
|
|
24
|
+
* provider: 'langchain',
|
|
25
|
+
* runnable: chain,
|
|
26
|
+
* runnableType: 'chain',
|
|
27
|
+
* });
|
|
28
|
+
*
|
|
29
|
+
* // Use in ArtemisKit tests
|
|
30
|
+
* const result = await adapter.generate({ prompt: 'What is 2+2?' });
|
|
31
|
+
* ```
|
|
32
|
+
*/
|
|
33
|
+
export declare class LangChainAdapter implements ModelClient {
|
|
34
|
+
private runnable;
|
|
35
|
+
private config;
|
|
36
|
+
private runnableType;
|
|
37
|
+
readonly provider = "langchain";
|
|
38
|
+
constructor(config: AdapterConfig, runnable: LangChainRunnable);
|
|
39
|
+
/**
|
|
40
|
+
* Attempt to detect the type of runnable based on its properties
|
|
41
|
+
*/
|
|
42
|
+
private detectRunnableType;
|
|
43
|
+
generate(options: GenerateOptions): Promise<GenerateResult>;
|
|
44
|
+
stream(options: GenerateOptions, onChunk: (chunk: string) => void): AsyncIterable<string>;
|
|
45
|
+
capabilities(): Promise<ModelCapabilities>;
|
|
46
|
+
close(): Promise<void>;
|
|
47
|
+
/**
|
|
48
|
+
* Prepare input for the LangChain runnable
|
|
49
|
+
*/
|
|
50
|
+
private prepareInput;
|
|
51
|
+
/**
|
|
52
|
+
* Extract the text output from a LangChain response
|
|
53
|
+
* Handles various response shapes from different runnable types
|
|
54
|
+
*/
|
|
55
|
+
private extractOutput;
|
|
56
|
+
/**
|
|
57
|
+
* Extract execution metadata from the response
|
|
58
|
+
*/
|
|
59
|
+
private extractMetadata;
|
|
60
|
+
}
|
|
61
|
+
/**
|
|
62
|
+
* Factory function to create a LangChain adapter
|
|
63
|
+
*
|
|
64
|
+
* @example
|
|
65
|
+
* ```typescript
|
|
66
|
+
* const adapter = createLangChainAdapter(myChain, {
|
|
67
|
+
* name: 'my-rag-chain',
|
|
68
|
+
* runnableType: 'chain',
|
|
69
|
+
* });
|
|
70
|
+
* ```
|
|
71
|
+
*/
|
|
72
|
+
export declare function createLangChainAdapter(runnable: LangChainRunnable, options?: Partial<LangChainAdapterConfig>): LangChainAdapter;
|
|
73
|
+
//# sourceMappingURL=client.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAAA;;;GAGG;AAEH,OAAO,KAAK,EACV,aAAa,EACb,eAAe,EACf,cAAc,EACd,iBAAiB,EACjB,WAAW,EACZ,MAAM,kBAAkB,CAAC;AAE1B,OAAO,KAAK,EACV,sBAAsB,EAEtB,iBAAiB,EAGlB,MAAM,SAAS,CAAC;AAEjB;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,qBAAa,gBAAiB,YAAW,WAAW;IAClD,OAAO,CAAC,QAAQ,CAAoB;IACpC,OAAO,CAAC,MAAM,CAAyB;IACvC,OAAO,CAAC,YAAY,CAAwB;IAC5C,QAAQ,CAAC,QAAQ,eAAe;gBAEpB,MAAM,EAAE,aAAa,EAAE,QAAQ,EAAE,iBAAiB;IAM9D;;OAEG;IACH,OAAO,CAAC,kBAAkB;IAqBpB,QAAQ,CAAC,OAAO,EAAE,eAAe,GAAG,OAAO,CAAC,cAAc,CAAC;IAmC1D,MAAM,CAAC,OAAO,EAAE,eAAe,EAAE,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,GAAG,aAAa,CAAC,MAAM,CAAC;IAqB1F,YAAY,IAAI,OAAO,CAAC,iBAAiB,CAAC;IAW1C,KAAK,IAAI,OAAO,CAAC,IAAI,CAAC;IAI5B;;OAEG;IACH,OAAO,CAAC,YAAY;IA6BpB;;;OAGG;IACH,OAAO,CAAC,aAAa;IAgCrB;;OAEG;IACH,OAAO,CAAC,eAAe;CAexB;AAED;;;;;;;;;;GAUG;AACH,wBAAgB,sBAAsB,CACpC,QAAQ,EAAE,iBAAiB,EAC3B,OAAO,CAAC,EAAE,OAAO,CAAC,sBAAsB,CAAC,GACxC,gBAAgB,CAOlB"}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @artemiskit/adapter-langchain
|
|
3
|
+
*
|
|
4
|
+
* LangChain.js adapter for ArtemisKit LLM evaluation toolkit.
|
|
5
|
+
* Enables testing of LangChain chains, agents, and runnables.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```typescript
|
|
9
|
+
* import { createLangChainAdapter } from '@artemiskit/adapter-langchain';
|
|
10
|
+
* import { ChatOpenAI } from '@langchain/openai';
|
|
11
|
+
*
|
|
12
|
+
* const model = new ChatOpenAI({ model: 'gpt-4' });
|
|
13
|
+
* const adapter = createLangChainAdapter(model, { name: 'gpt4-direct' });
|
|
14
|
+
*
|
|
15
|
+
* // Use with ArtemisKit
|
|
16
|
+
* const result = await adapter.generate({ prompt: 'Hello!' });
|
|
17
|
+
* ```
|
|
18
|
+
*/
|
|
19
|
+
export { LangChainAdapter, createLangChainAdapter } from './client';
|
|
20
|
+
export type { LangChainAdapterConfig, LangChainRunnable, LangChainRunnableOutput, LangChainRunnableType, LangChainIntermediateStep, LangChainStreamChunk, LangChainExecutionMetadata, } from './types';
|
|
21
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;;;GAiBG;AAEH,OAAO,EAAE,gBAAgB,EAAE,sBAAsB,EAAE,MAAM,UAAU,CAAC;AACpE,YAAY,EACV,sBAAsB,EACtB,iBAAiB,EACjB,uBAAuB,EACvB,qBAAqB,EACrB,yBAAyB,EACzB,oBAAoB,EACpB,0BAA0B,GAC3B,MAAM,SAAS,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
// @bun
|
|
2
|
+
// ../../../node_modules/nanoid/index.js
|
|
3
|
+
import { webcrypto as crypto } from "crypto";
|
|
4
|
+
|
|
5
|
+
// ../../../node_modules/nanoid/url-alphabet/index.js
|
|
6
|
+
var urlAlphabet = "useandom-26T198340PX75pxJACKVERYMINDBUSHWOLF_GQZbfghjklqvwyzrict";
|
|
7
|
+
|
|
8
|
+
// ../../../node_modules/nanoid/index.js
|
|
9
|
+
var POOL_SIZE_MULTIPLIER = 128;
|
|
10
|
+
var pool;
|
|
11
|
+
var poolOffset;
|
|
12
|
+
function fillPool(bytes) {
|
|
13
|
+
if (!pool || pool.length < bytes) {
|
|
14
|
+
pool = Buffer.allocUnsafe(bytes * POOL_SIZE_MULTIPLIER);
|
|
15
|
+
crypto.getRandomValues(pool);
|
|
16
|
+
poolOffset = 0;
|
|
17
|
+
} else if (poolOffset + bytes > pool.length) {
|
|
18
|
+
crypto.getRandomValues(pool);
|
|
19
|
+
poolOffset = 0;
|
|
20
|
+
}
|
|
21
|
+
poolOffset += bytes;
|
|
22
|
+
}
|
|
23
|
+
function nanoid(size = 21) {
|
|
24
|
+
fillPool(size |= 0);
|
|
25
|
+
let id = "";
|
|
26
|
+
for (let i = poolOffset - size;i < poolOffset; i++) {
|
|
27
|
+
id += urlAlphabet[pool[i] & 63];
|
|
28
|
+
}
|
|
29
|
+
return id;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// src/client.ts
|
|
33
|
+
class LangChainAdapter {
|
|
34
|
+
runnable;
|
|
35
|
+
config;
|
|
36
|
+
runnableType;
|
|
37
|
+
provider = "langchain";
|
|
38
|
+
constructor(config, runnable) {
|
|
39
|
+
this.config = config;
|
|
40
|
+
this.runnable = runnable;
|
|
41
|
+
this.runnableType = this.config.runnableType ?? this.detectRunnableType(runnable);
|
|
42
|
+
}
|
|
43
|
+
detectRunnableType(runnable) {
|
|
44
|
+
const runnableAny = runnable;
|
|
45
|
+
if (runnableAny.agent || runnableAny.agentExecutor || typeof runnableAny.runAgent === "function") {
|
|
46
|
+
return "agent";
|
|
47
|
+
}
|
|
48
|
+
if (runnableAny.modelName || runnableAny.model || runnableAny._llmType) {
|
|
49
|
+
return "llm";
|
|
50
|
+
}
|
|
51
|
+
return "runnable";
|
|
52
|
+
}
|
|
53
|
+
async generate(options) {
|
|
54
|
+
const startTime = Date.now();
|
|
55
|
+
const input = this.prepareInput(options);
|
|
56
|
+
const response = await this.runnable.invoke(input);
|
|
57
|
+
const latencyMs = Date.now() - startTime;
|
|
58
|
+
const text = this.extractOutput(response);
|
|
59
|
+
const metadata = this.extractMetadata(response);
|
|
60
|
+
return {
|
|
61
|
+
id: nanoid(),
|
|
62
|
+
model: this.config.name || `langchain:${this.runnableType}`,
|
|
63
|
+
text,
|
|
64
|
+
tokens: {
|
|
65
|
+
prompt: 0,
|
|
66
|
+
completion: 0,
|
|
67
|
+
total: 0
|
|
68
|
+
},
|
|
69
|
+
latencyMs,
|
|
70
|
+
finishReason: "stop",
|
|
71
|
+
raw: {
|
|
72
|
+
response,
|
|
73
|
+
metadata
|
|
74
|
+
}
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
async* stream(options, onChunk) {
|
|
78
|
+
if (!this.runnable.stream) {
|
|
79
|
+
const result = await this.generate(options);
|
|
80
|
+
onChunk(result.text);
|
|
81
|
+
yield result.text;
|
|
82
|
+
return;
|
|
83
|
+
}
|
|
84
|
+
const input = this.prepareInput(options);
|
|
85
|
+
const stream = this.runnable.stream(input);
|
|
86
|
+
for await (const chunk of stream) {
|
|
87
|
+
const text = chunk.content?.toString() || chunk.text || "";
|
|
88
|
+
if (text) {
|
|
89
|
+
onChunk(text);
|
|
90
|
+
yield text;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
async capabilities() {
|
|
95
|
+
return {
|
|
96
|
+
streaming: typeof this.runnable.stream === "function",
|
|
97
|
+
functionCalling: this.runnableType === "agent",
|
|
98
|
+
toolUse: this.runnableType === "agent",
|
|
99
|
+
maxContext: 128000,
|
|
100
|
+
vision: false,
|
|
101
|
+
jsonMode: false
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
async close() {}
|
|
105
|
+
prepareInput(options) {
|
|
106
|
+
const inputKey = this.config.inputKey ?? "input";
|
|
107
|
+
if (typeof options.prompt === "string") {
|
|
108
|
+
return { [inputKey]: options.prompt };
|
|
109
|
+
}
|
|
110
|
+
const messages = options.prompt;
|
|
111
|
+
const lastUserMessage = messages.findLast((m) => m.role === "user");
|
|
112
|
+
const systemMessage = messages.find((m) => m.role === "system");
|
|
113
|
+
if (systemMessage) {
|
|
114
|
+
return {
|
|
115
|
+
[inputKey]: lastUserMessage?.content || "",
|
|
116
|
+
system: systemMessage.content,
|
|
117
|
+
messages: messages.map((m) => ({
|
|
118
|
+
role: m.role,
|
|
119
|
+
content: m.content
|
|
120
|
+
}))
|
|
121
|
+
};
|
|
122
|
+
}
|
|
123
|
+
return { [inputKey]: lastUserMessage?.content || "" };
|
|
124
|
+
}
|
|
125
|
+
extractOutput(response) {
|
|
126
|
+
const outputKey = this.config.outputKey ?? "output";
|
|
127
|
+
if (typeof response === "string") {
|
|
128
|
+
return response;
|
|
129
|
+
}
|
|
130
|
+
const possibleKeys = [outputKey, "output", "content", "text", "result", "answer"];
|
|
131
|
+
for (const key of possibleKeys) {
|
|
132
|
+
const value = response[key];
|
|
133
|
+
if (typeof value === "string") {
|
|
134
|
+
return value;
|
|
135
|
+
}
|
|
136
|
+
if (value && typeof value === "object" && "content" in value) {
|
|
137
|
+
return String(value.content);
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
if (response.intermediateSteps?.length) {
|
|
141
|
+
const steps = response.intermediateSteps;
|
|
142
|
+
const lastStep = steps[steps.length - 1];
|
|
143
|
+
return lastStep?.observation || JSON.stringify(response);
|
|
144
|
+
}
|
|
145
|
+
return JSON.stringify(response);
|
|
146
|
+
}
|
|
147
|
+
extractMetadata(response) {
|
|
148
|
+
const metadata = {
|
|
149
|
+
runnableType: this.runnableType,
|
|
150
|
+
name: this.config.name
|
|
151
|
+
};
|
|
152
|
+
if (this.config.captureIntermediateSteps !== false && response.intermediateSteps) {
|
|
153
|
+
metadata.intermediateSteps = response.intermediateSteps;
|
|
154
|
+
metadata.toolsUsed = [...new Set(response.intermediateSteps.map((s) => s.action.tool))];
|
|
155
|
+
metadata.totalToolCalls = response.intermediateSteps.length;
|
|
156
|
+
}
|
|
157
|
+
return metadata;
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
function createLangChainAdapter(runnable, options) {
|
|
161
|
+
const config = {
|
|
162
|
+
provider: "langchain",
|
|
163
|
+
...options
|
|
164
|
+
};
|
|
165
|
+
return new LangChainAdapter(config, runnable);
|
|
166
|
+
}
|
|
167
|
+
export {
|
|
168
|
+
createLangChainAdapter,
|
|
169
|
+
LangChainAdapter
|
|
170
|
+
};
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Types for LangChain adapter
|
|
3
|
+
*/
|
|
4
|
+
import type { BaseAdapterConfig } from '@artemiskit/core';
|
|
5
|
+
/**
|
|
6
|
+
* Supported LangChain runnable types
|
|
7
|
+
*/
|
|
8
|
+
export type LangChainRunnableType = 'chain' | 'agent' | 'llm' | 'runnable';
|
|
9
|
+
/**
|
|
10
|
+
* Configuration for LangChain adapter
|
|
11
|
+
*/
|
|
12
|
+
export interface LangChainAdapterConfig extends BaseAdapterConfig {
|
|
13
|
+
provider: 'langchain';
|
|
14
|
+
/**
|
|
15
|
+
* The type of LangChain runnable being wrapped
|
|
16
|
+
*/
|
|
17
|
+
runnableType?: LangChainRunnableType;
|
|
18
|
+
/**
|
|
19
|
+
* Name identifier for the chain/agent (for logging/tracking)
|
|
20
|
+
*/
|
|
21
|
+
name?: string;
|
|
22
|
+
/**
|
|
23
|
+
* Whether to capture intermediate steps from agents
|
|
24
|
+
* @default true
|
|
25
|
+
*/
|
|
26
|
+
captureIntermediateSteps?: boolean;
|
|
27
|
+
/**
|
|
28
|
+
* Custom input key for the runnable (default: 'input')
|
|
29
|
+
*/
|
|
30
|
+
inputKey?: string;
|
|
31
|
+
/**
|
|
32
|
+
* Custom output key for the runnable (default: 'output')
|
|
33
|
+
*/
|
|
34
|
+
outputKey?: string;
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Generic interface for LangChain-like runnables
|
|
38
|
+
* Supports both chains and agents with invoke() method
|
|
39
|
+
*/
|
|
40
|
+
export interface LangChainRunnable {
|
|
41
|
+
invoke(input: Record<string, unknown> | string, config?: Record<string, unknown>): Promise<LangChainRunnableOutput>;
|
|
42
|
+
stream?(input: Record<string, unknown> | string, config?: Record<string, unknown>): AsyncIterable<LangChainStreamChunk>;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Output from a LangChain runnable
|
|
46
|
+
*/
|
|
47
|
+
export interface LangChainRunnableOutput {
|
|
48
|
+
/** Main text output - can be in various forms */
|
|
49
|
+
output?: string;
|
|
50
|
+
content?: string;
|
|
51
|
+
text?: string;
|
|
52
|
+
result?: string;
|
|
53
|
+
/** For agent outputs with intermediate steps */
|
|
54
|
+
intermediateSteps?: LangChainIntermediateStep[];
|
|
55
|
+
/** Raw response for other properties */
|
|
56
|
+
[key: string]: unknown;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Intermediate step from agent execution
|
|
60
|
+
*/
|
|
61
|
+
export interface LangChainIntermediateStep {
|
|
62
|
+
action: {
|
|
63
|
+
tool: string;
|
|
64
|
+
toolInput: unknown;
|
|
65
|
+
log?: string;
|
|
66
|
+
};
|
|
67
|
+
observation: string;
|
|
68
|
+
}
|
|
69
|
+
/**
|
|
70
|
+
* Streaming chunk from LangChain
|
|
71
|
+
*/
|
|
72
|
+
export interface LangChainStreamChunk {
|
|
73
|
+
content?: string;
|
|
74
|
+
text?: string;
|
|
75
|
+
[key: string]: unknown;
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Metadata extracted from LangChain execution
|
|
79
|
+
*/
|
|
80
|
+
export interface LangChainExecutionMetadata {
|
|
81
|
+
runnableType: LangChainRunnableType;
|
|
82
|
+
name?: string;
|
|
83
|
+
intermediateSteps?: LangChainIntermediateStep[];
|
|
84
|
+
toolsUsed?: string[];
|
|
85
|
+
totalToolCalls?: number;
|
|
86
|
+
}
|
|
87
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,kBAAkB,CAAC;AAE1D;;GAEG;AACH,MAAM,MAAM,qBAAqB,GAAG,OAAO,GAAG,OAAO,GAAG,KAAK,GAAG,UAAU,CAAC;AAE3E;;GAEG;AACH,MAAM,WAAW,sBAAuB,SAAQ,iBAAiB;IAC/D,QAAQ,EAAE,WAAW,CAAC;IACtB;;OAEG;IACH,YAAY,CAAC,EAAE,qBAAqB,CAAC;IACrC;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;;OAGG;IACH,wBAAwB,CAAC,EAAE,OAAO,CAAC;IACnC;;OAEG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB;;OAEG;IACH,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED;;;GAGG;AACH,MAAM,WAAW,iBAAiB;IAChC,MAAM,CACJ,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,EACvC,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC/B,OAAO,CAAC,uBAAuB,CAAC,CAAC;IACpC,MAAM,CAAC,CACL,KAAK,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAAG,MAAM,EACvC,MAAM,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,GAC/B,aAAa,CAAC,oBAAoB,CAAC,CAAC;CACxC;AAED;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACtC,iDAAiD;IACjD,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,gDAAgD;IAChD,iBAAiB,CAAC,EAAE,yBAAyB,EAAE,CAAC;IAChD,wCAAwC;IACxC,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CACxB;AAED;;GAEG;AACH,MAAM,WAAW,yBAAyB;IACxC,MAAM,EAAE;QACN,IAAI,EAAE,MAAM,CAAC;QACb,SAAS,EAAE,OAAO,CAAC;QACnB,GAAG,CAAC,EAAE,MAAM,CAAC;KACd,CAAC;IACF,WAAW,EAAE,MAAM,CAAC;CACrB;AAED;;GAEG;AACH,MAAM,WAAW,oBAAoB;IACnC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,CAAC,GAAG,EAAE,MAAM,GAAG,OAAO,CAAC;CACxB;AAED;;GAEG;AACH,MAAM,WAAW,0BAA0B;IACzC,YAAY,EAAE,qBAAqB,CAAC;IACpC,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,iBAAiB,CAAC,EAAE,yBAAyB,EAAE,CAAC;IAChD,SAAS,CAAC,EAAE,MAAM,EAAE,CAAC;IACrB,cAAc,CAAC,EAAE,MAAM,CAAC;CACzB"}
|