agentloop-langchain 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +102 -0
- package/dist/index.d.mts +99 -0
- package/dist/index.d.ts +99 -0
- package/dist/index.js +146 -0
- package/dist/index.mjs +118 -0
- package/package.json +41 -0
- package/src/callback-handler.ts +127 -0
- package/src/index.ts +3 -0
- package/src/tool.ts +70 -0
- package/src/types.ts +19 -0
- package/tsconfig.json +14 -0
package/README.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# agentloop-langchain
|
|
2
|
+
|
|
3
|
+
AgentLoop integration for LangChain. Earn up to 70% revenue share when your agent naturally recommends relevant products — with zero changes to your existing chains.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install agentloop-langchain
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Option 1 — Callback Handler (recommended)
|
|
12
|
+
|
|
13
|
+
Drop into any chain. Runs automatically after every LLM response.
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
import { AgentLoopCallbackHandler } from 'agentloop-langchain'
|
|
17
|
+
import { ChatOpenAI } from '@langchain/openai'
|
|
18
|
+
import { HumanMessage } from '@langchain/core/messages'
|
|
19
|
+
|
|
20
|
+
const agentloop = new AgentLoopCallbackHandler({
|
|
21
|
+
apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
22
|
+
getUserId: () => hashUserId(session.userId), // hash it — never raw PII
|
|
23
|
+
})
|
|
24
|
+
|
|
25
|
+
// Add to any model, chain, or agent
|
|
26
|
+
const model = new ChatOpenAI({
|
|
27
|
+
callbacks: [agentloop],
|
|
28
|
+
})
|
|
29
|
+
|
|
30
|
+
const response = await model.invoke([
|
|
31
|
+
new HumanMessage('what tool should I use to monitor my LLM in production?')
|
|
32
|
+
])
|
|
33
|
+
|
|
34
|
+
// response.content already has the sponsored mention appended if relevant
|
|
35
|
+
console.log(response.content)
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
Works with any LangChain chain type:
|
|
39
|
+
|
|
40
|
+
```typescript
|
|
41
|
+
// With LCEL chains
|
|
42
|
+
const chain = prompt.pipe(model).pipe(outputParser)
|
|
43
|
+
const result = await chain.invoke({ question }, { callbacks: [agentloop] })
|
|
44
|
+
|
|
45
|
+
// With ConversationChain
|
|
46
|
+
const conversation = new ConversationChain({ llm: model, callbacks: [agentloop] })
|
|
47
|
+
|
|
48
|
+
// With agents
|
|
49
|
+
const agent = createReactAgent({ llm: model, tools, prompt })
|
|
50
|
+
const executor = AgentExecutor.fromAgentAndTools({ agent, tools, callbacks: [agentloop] })
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## Option 2 — Tool (agent decides when to check)
|
|
54
|
+
|
|
55
|
+
Let the agent decide when to invoke AgentLoop, rather than automatically after every response.
|
|
56
|
+
|
|
57
|
+
```typescript
|
|
58
|
+
import { AgentLoopTool } from 'agentloop-langchain'
|
|
59
|
+
import { createReactAgent, AgentExecutor } from 'langchain/agents'
|
|
60
|
+
|
|
61
|
+
const tools = [
|
|
62
|
+
new AgentLoopTool({
|
|
63
|
+
apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
64
|
+
getUserId: () => hashUserId(session.userId),
|
|
65
|
+
}),
|
|
66
|
+
// ...your other tools
|
|
67
|
+
]
|
|
68
|
+
|
|
69
|
+
const agent = createReactAgent({ llm, tools, prompt })
|
|
70
|
+
const executor = AgentExecutor.fromAgentAndTools({ agent, tools })
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Configuration
|
|
74
|
+
|
|
75
|
+
```typescript
|
|
76
|
+
new AgentLoopCallbackHandler({
|
|
77
|
+
apiKey: string // required — your AgentLoop API key
|
|
78
|
+
getUserId?: () => string // recommended — return a hashed user ID per session
|
|
79
|
+
contextWindow?: number // how many messages to use as context (default: 5)
|
|
80
|
+
debug?: boolean // verbose logging (default: true in development)
|
|
81
|
+
})
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
## How it works
|
|
85
|
+
|
|
86
|
+
1. User sends a message to your agent
|
|
87
|
+
2. Your agent generates a response via LangChain
|
|
88
|
+
3. AgentLoop intercepts the response and scores it against active campaigns
|
|
89
|
+
4. If relevance score > 70/100, a natural sponsored mention is appended
|
|
90
|
+
5. You earn up to 70% of the CPC/CPL when the user clicks or converts
|
|
91
|
+
|
|
92
|
+
**Guardrails built in:**
|
|
93
|
+
- Max 1 mention per conversation
|
|
94
|
+
- Crisis/emergency conversations are never monetised
|
|
95
|
+
- FTC-compliant "Sponsored mention via AgentLoop" disclosure on every mention
|
|
96
|
+
- Fails silently — never breaks your chain
|
|
97
|
+
|
|
98
|
+
## Get your API key
|
|
99
|
+
|
|
100
|
+
Sign up at [agentloop.life](https://agentloop.life) → Dashboard → API Keys.
|
|
101
|
+
|
|
102
|
+
Free to join as an agent owner.
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
|
|
2
|
+
import { LLMResult } from '@langchain/core/outputs';
|
|
3
|
+
import { Serialized } from '@langchain/core/load/serializable';
|
|
4
|
+
import { Tool } from '@langchain/core/tools';
|
|
5
|
+
|
|
6
|
+
interface AgentLoopLangChainConfig {
|
|
7
|
+
/** Your AgentLoop API key (starts with al_live_) */
|
|
8
|
+
apiKey: string;
|
|
9
|
+
/**
|
|
10
|
+
* A function that returns a stable hashed user ID for the current session.
|
|
11
|
+
* Never pass raw user identifiers — hash them first.
|
|
12
|
+
* If not provided, a random session ID is used.
|
|
13
|
+
*/
|
|
14
|
+
getUserId?: () => string;
|
|
15
|
+
/**
|
|
16
|
+
* Number of recent messages to include as conversation context.
|
|
17
|
+
* Defaults to 5.
|
|
18
|
+
*/
|
|
19
|
+
contextWindow?: number;
|
|
20
|
+
/**
|
|
21
|
+
* Enable debug logging. Defaults to true in development.
|
|
22
|
+
*/
|
|
23
|
+
debug?: boolean;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* AgentLoop LangChain Callback Handler
|
|
28
|
+
*
|
|
29
|
+
* Drop into any LangChain chain or agent to automatically earn revenue
|
|
30
|
+
* when your agent recommends relevant products.
|
|
31
|
+
*
|
|
32
|
+
* @example
|
|
33
|
+
* ```typescript
|
|
34
|
+
* import { AgentLoopCallbackHandler } from 'agentloop-langchain'
|
|
35
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
36
|
+
* import { HumanMessage } from '@langchain/core/messages'
|
|
37
|
+
*
|
|
38
|
+
* const agentloop = new AgentLoopCallbackHandler({
|
|
39
|
+
* apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
40
|
+
* getUserId: () => hashUserId(session.userId),
|
|
41
|
+
* })
|
|
42
|
+
*
|
|
43
|
+
* const model = new ChatOpenAI({ callbacks: [agentloop] })
|
|
44
|
+
* const response = await model.invoke([new HumanMessage('How do I monitor my LLM?')])
|
|
45
|
+
* // If relevant, response.content already has the sponsored mention appended
|
|
46
|
+
* ```
|
|
47
|
+
*/
|
|
48
|
+
declare class AgentLoopCallbackHandler extends BaseCallbackHandler {
|
|
49
|
+
name: string;
|
|
50
|
+
private readonly client;
|
|
51
|
+
private readonly getUserId;
|
|
52
|
+
private readonly contextWindow;
|
|
53
|
+
private readonly debug;
|
|
54
|
+
private conversationHistory;
|
|
55
|
+
constructor(config: AgentLoopLangChainConfig);
|
|
56
|
+
/** Capture human input as it enters the chain */
|
|
57
|
+
handleChainStart(_chain: Serialized, inputs: Record<string, unknown>): Promise<void>;
|
|
58
|
+
/**
|
|
59
|
+
* Intercept LLM output — check AgentLoop and append mention if relevant.
|
|
60
|
+
* This is the core hook: it runs after every LLM generation.
|
|
61
|
+
*/
|
|
62
|
+
handleLLMEnd(output: LLMResult): Promise<void>;
|
|
63
|
+
/** Reset conversation history (call between separate user sessions) */
|
|
64
|
+
resetHistory(): void;
|
|
65
|
+
private extractText;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* AgentLoop LangChain Tool
|
|
70
|
+
*
|
|
71
|
+
* Use this when you want the agent to explicitly decide when to check AgentLoop,
|
|
72
|
+
* rather than it happening automatically via the callback handler.
|
|
73
|
+
*
|
|
74
|
+
* @example
|
|
75
|
+
* ```typescript
|
|
76
|
+
* import { AgentLoopTool } from 'agentloop-langchain'
|
|
77
|
+
* import { createReactAgent } from 'langchain/agents'
|
|
78
|
+
*
|
|
79
|
+
* const tools = [
|
|
80
|
+
* new AgentLoopTool({
|
|
81
|
+
* apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
82
|
+
* getUserId: () => hashUserId(session.userId),
|
|
83
|
+
* }),
|
|
84
|
+
* // ...your other tools
|
|
85
|
+
* ]
|
|
86
|
+
*
|
|
87
|
+
* const agent = createReactAgent({ llm, tools, prompt })
|
|
88
|
+
* ```
|
|
89
|
+
*/
|
|
90
|
+
declare class AgentLoopTool extends Tool {
|
|
91
|
+
name: string;
|
|
92
|
+
description: string;
|
|
93
|
+
private readonly client;
|
|
94
|
+
private readonly getUserId;
|
|
95
|
+
constructor(config: AgentLoopLangChainConfig);
|
|
96
|
+
_call(input: string): Promise<string>;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
export { AgentLoopCallbackHandler, type AgentLoopLangChainConfig, AgentLoopTool };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
import { BaseCallbackHandler } from '@langchain/core/callbacks/base';
|
|
2
|
+
import { LLMResult } from '@langchain/core/outputs';
|
|
3
|
+
import { Serialized } from '@langchain/core/load/serializable';
|
|
4
|
+
import { Tool } from '@langchain/core/tools';
|
|
5
|
+
|
|
6
|
+
interface AgentLoopLangChainConfig {
|
|
7
|
+
/** Your AgentLoop API key (starts with al_live_) */
|
|
8
|
+
apiKey: string;
|
|
9
|
+
/**
|
|
10
|
+
* A function that returns a stable hashed user ID for the current session.
|
|
11
|
+
* Never pass raw user identifiers — hash them first.
|
|
12
|
+
* If not provided, a random session ID is used.
|
|
13
|
+
*/
|
|
14
|
+
getUserId?: () => string;
|
|
15
|
+
/**
|
|
16
|
+
* Number of recent messages to include as conversation context.
|
|
17
|
+
* Defaults to 5.
|
|
18
|
+
*/
|
|
19
|
+
contextWindow?: number;
|
|
20
|
+
/**
|
|
21
|
+
* Enable debug logging. Defaults to true in development.
|
|
22
|
+
*/
|
|
23
|
+
debug?: boolean;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* AgentLoop LangChain Callback Handler
|
|
28
|
+
*
|
|
29
|
+
* Drop into any LangChain chain or agent to automatically earn revenue
|
|
30
|
+
* when your agent recommends relevant products.
|
|
31
|
+
*
|
|
32
|
+
* @example
|
|
33
|
+
* ```typescript
|
|
34
|
+
* import { AgentLoopCallbackHandler } from 'agentloop-langchain'
|
|
35
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
36
|
+
* import { HumanMessage } from '@langchain/core/messages'
|
|
37
|
+
*
|
|
38
|
+
* const agentloop = new AgentLoopCallbackHandler({
|
|
39
|
+
* apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
40
|
+
* getUserId: () => hashUserId(session.userId),
|
|
41
|
+
* })
|
|
42
|
+
*
|
|
43
|
+
* const model = new ChatOpenAI({ callbacks: [agentloop] })
|
|
44
|
+
* const response = await model.invoke([new HumanMessage('How do I monitor my LLM?')])
|
|
45
|
+
* // If relevant, response.content already has the sponsored mention appended
|
|
46
|
+
* ```
|
|
47
|
+
*/
|
|
48
|
+
declare class AgentLoopCallbackHandler extends BaseCallbackHandler {
|
|
49
|
+
name: string;
|
|
50
|
+
private readonly client;
|
|
51
|
+
private readonly getUserId;
|
|
52
|
+
private readonly contextWindow;
|
|
53
|
+
private readonly debug;
|
|
54
|
+
private conversationHistory;
|
|
55
|
+
constructor(config: AgentLoopLangChainConfig);
|
|
56
|
+
/** Capture human input as it enters the chain */
|
|
57
|
+
handleChainStart(_chain: Serialized, inputs: Record<string, unknown>): Promise<void>;
|
|
58
|
+
/**
|
|
59
|
+
* Intercept LLM output — check AgentLoop and append mention if relevant.
|
|
60
|
+
* This is the core hook: it runs after every LLM generation.
|
|
61
|
+
*/
|
|
62
|
+
handleLLMEnd(output: LLMResult): Promise<void>;
|
|
63
|
+
/** Reset conversation history (call between separate user sessions) */
|
|
64
|
+
resetHistory(): void;
|
|
65
|
+
private extractText;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* AgentLoop LangChain Tool
|
|
70
|
+
*
|
|
71
|
+
* Use this when you want the agent to explicitly decide when to check AgentLoop,
|
|
72
|
+
* rather than it happening automatically via the callback handler.
|
|
73
|
+
*
|
|
74
|
+
* @example
|
|
75
|
+
* ```typescript
|
|
76
|
+
* import { AgentLoopTool } from 'agentloop-langchain'
|
|
77
|
+
* import { createReactAgent } from 'langchain/agents'
|
|
78
|
+
*
|
|
79
|
+
* const tools = [
|
|
80
|
+
* new AgentLoopTool({
|
|
81
|
+
* apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
82
|
+
* getUserId: () => hashUserId(session.userId),
|
|
83
|
+
* }),
|
|
84
|
+
* // ...your other tools
|
|
85
|
+
* ]
|
|
86
|
+
*
|
|
87
|
+
* const agent = createReactAgent({ llm, tools, prompt })
|
|
88
|
+
* ```
|
|
89
|
+
*/
|
|
90
|
+
declare class AgentLoopTool extends Tool {
|
|
91
|
+
name: string;
|
|
92
|
+
description: string;
|
|
93
|
+
private readonly client;
|
|
94
|
+
private readonly getUserId;
|
|
95
|
+
constructor(config: AgentLoopLangChainConfig);
|
|
96
|
+
_call(input: string): Promise<string>;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
export { AgentLoopCallbackHandler, type AgentLoopLangChainConfig, AgentLoopTool };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
AgentLoopCallbackHandler: () => AgentLoopCallbackHandler,
|
|
24
|
+
AgentLoopTool: () => AgentLoopTool
|
|
25
|
+
});
|
|
26
|
+
module.exports = __toCommonJS(index_exports);
|
|
27
|
+
|
|
28
|
+
// src/callback-handler.ts
|
|
29
|
+
var import_base = require("@langchain/core/callbacks/base");
|
|
30
|
+
var import_agentloop_sdk = require("agentloop-sdk");
|
|
31
|
+
var AgentLoopCallbackHandler = class extends import_base.BaseCallbackHandler {
|
|
32
|
+
constructor(config) {
|
|
33
|
+
super();
|
|
34
|
+
this.name = "AgentLoopCallbackHandler";
|
|
35
|
+
// Tracks conversation history across calls
|
|
36
|
+
this.conversationHistory = [];
|
|
37
|
+
this.client = new import_agentloop_sdk.AgentLoop({
|
|
38
|
+
apiKey: config.apiKey,
|
|
39
|
+
debug: config.debug
|
|
40
|
+
});
|
|
41
|
+
this.getUserId = config.getUserId ?? (() => `session-${Math.random().toString(36).slice(2)}`);
|
|
42
|
+
this.contextWindow = config.contextWindow ?? 5;
|
|
43
|
+
this.debug = config.debug ?? process.env.NODE_ENV === "development";
|
|
44
|
+
}
|
|
45
|
+
/** Capture human input as it enters the chain */
|
|
46
|
+
async handleChainStart(_chain, inputs) {
|
|
47
|
+
const input = this.extractText(inputs);
|
|
48
|
+
if (input) {
|
|
49
|
+
this.conversationHistory.push(`Human: ${input}`);
|
|
50
|
+
if (this.conversationHistory.length > this.contextWindow * 2) {
|
|
51
|
+
this.conversationHistory = this.conversationHistory.slice(-this.contextWindow * 2);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Intercept LLM output — check AgentLoop and append mention if relevant.
|
|
57
|
+
* This is the core hook: it runs after every LLM generation.
|
|
58
|
+
*/
|
|
59
|
+
async handleLLMEnd(output) {
|
|
60
|
+
try {
|
|
61
|
+
const generation = output.generations?.[0]?.[0];
|
|
62
|
+
if (!generation) return;
|
|
63
|
+
const agentResponse = typeof generation.text === "string" ? generation.text : generation.message?.content ?? "";
|
|
64
|
+
if (!agentResponse) return;
|
|
65
|
+
const context = this.conversationHistory.slice(-this.contextWindow).join("\n");
|
|
66
|
+
const result = await this.client.check({
|
|
67
|
+
conversationContext: context || agentResponse,
|
|
68
|
+
agentResponse,
|
|
69
|
+
userId: this.getUserId()
|
|
70
|
+
});
|
|
71
|
+
if (result.shouldMention && result.suggestedMention) {
|
|
72
|
+
generation.text = `${agentResponse}
|
|
73
|
+
|
|
74
|
+
${result.suggestedMention}`;
|
|
75
|
+
if (generation.message?.content) {
|
|
76
|
+
generation.message.content = generation.text;
|
|
77
|
+
}
|
|
78
|
+
if (this.debug) {
|
|
79
|
+
console.log(`[AgentLoop] Mention appended for: ${result.product?.name}`);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
this.conversationHistory.push(`Agent: ${agentResponse}`);
|
|
83
|
+
} catch {
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
/** Reset conversation history (call between separate user sessions) */
|
|
87
|
+
resetHistory() {
|
|
88
|
+
this.conversationHistory = [];
|
|
89
|
+
}
|
|
90
|
+
extractText(inputs) {
|
|
91
|
+
if (typeof inputs.input === "string") return inputs.input;
|
|
92
|
+
if (typeof inputs.question === "string") return inputs.question;
|
|
93
|
+
if (typeof inputs.human_input === "string") return inputs.human_input;
|
|
94
|
+
if (Array.isArray(inputs.messages)) {
|
|
95
|
+
const last = inputs.messages[inputs.messages.length - 1];
|
|
96
|
+
return last?.content ?? last?.text ?? "";
|
|
97
|
+
}
|
|
98
|
+
return "";
|
|
99
|
+
}
|
|
100
|
+
};
|
|
101
|
+
|
|
102
|
+
// src/tool.ts
|
|
103
|
+
var import_tools = require("@langchain/core/tools");
|
|
104
|
+
var import_agentloop_sdk2 = require("agentloop-sdk");
|
|
105
|
+
var AgentLoopTool = class extends import_tools.Tool {
|
|
106
|
+
constructor(config) {
|
|
107
|
+
super();
|
|
108
|
+
this.name = "agentloop_check";
|
|
109
|
+
this.description = [
|
|
110
|
+
"Check if a relevant product should be mentioned to the user.",
|
|
111
|
+
"Use this before sending a response when the user is asking for a tool, product, or solution recommendation.",
|
|
112
|
+
'Input: JSON string with "conversationContext" (last few messages) and "agentResponse" (your planned reply).',
|
|
113
|
+
"Output: The agentResponse with a sponsored mention appended if relevant, or the original response if not.",
|
|
114
|
+
"Do NOT use this during crisis, grief, or emergency conversations."
|
|
115
|
+
].join(" ");
|
|
116
|
+
this.client = new import_agentloop_sdk2.AgentLoop({ apiKey: config.apiKey, debug: config.debug });
|
|
117
|
+
this.getUserId = config.getUserId ?? (() => `session-${Math.random().toString(36).slice(2)}`);
|
|
118
|
+
}
|
|
119
|
+
async _call(input) {
|
|
120
|
+
try {
|
|
121
|
+
const parsed = JSON.parse(input);
|
|
122
|
+
const result = await this.client.check({
|
|
123
|
+
conversationContext: parsed.conversationContext,
|
|
124
|
+
agentResponse: parsed.agentResponse,
|
|
125
|
+
userId: this.getUserId()
|
|
126
|
+
});
|
|
127
|
+
if (result.shouldMention && result.suggestedMention) {
|
|
128
|
+
return `${parsed.agentResponse}
|
|
129
|
+
|
|
130
|
+
${result.suggestedMention}`;
|
|
131
|
+
}
|
|
132
|
+
return parsed.agentResponse;
|
|
133
|
+
} catch {
|
|
134
|
+
try {
|
|
135
|
+
return JSON.parse(input).agentResponse ?? input;
|
|
136
|
+
} catch {
|
|
137
|
+
return input;
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
};
|
|
142
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
143
|
+
0 && (module.exports = {
|
|
144
|
+
AgentLoopCallbackHandler,
|
|
145
|
+
AgentLoopTool
|
|
146
|
+
});
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
// src/callback-handler.ts
|
|
2
|
+
import { BaseCallbackHandler } from "@langchain/core/callbacks/base";
|
|
3
|
+
import { AgentLoop } from "agentloop-sdk";
|
|
4
|
+
var AgentLoopCallbackHandler = class extends BaseCallbackHandler {
|
|
5
|
+
constructor(config) {
|
|
6
|
+
super();
|
|
7
|
+
this.name = "AgentLoopCallbackHandler";
|
|
8
|
+
// Tracks conversation history across calls
|
|
9
|
+
this.conversationHistory = [];
|
|
10
|
+
this.client = new AgentLoop({
|
|
11
|
+
apiKey: config.apiKey,
|
|
12
|
+
debug: config.debug
|
|
13
|
+
});
|
|
14
|
+
this.getUserId = config.getUserId ?? (() => `session-${Math.random().toString(36).slice(2)}`);
|
|
15
|
+
this.contextWindow = config.contextWindow ?? 5;
|
|
16
|
+
this.debug = config.debug ?? process.env.NODE_ENV === "development";
|
|
17
|
+
}
|
|
18
|
+
/** Capture human input as it enters the chain */
|
|
19
|
+
async handleChainStart(_chain, inputs) {
|
|
20
|
+
const input = this.extractText(inputs);
|
|
21
|
+
if (input) {
|
|
22
|
+
this.conversationHistory.push(`Human: ${input}`);
|
|
23
|
+
if (this.conversationHistory.length > this.contextWindow * 2) {
|
|
24
|
+
this.conversationHistory = this.conversationHistory.slice(-this.contextWindow * 2);
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Intercept LLM output — check AgentLoop and append mention if relevant.
|
|
30
|
+
* This is the core hook: it runs after every LLM generation.
|
|
31
|
+
*/
|
|
32
|
+
async handleLLMEnd(output) {
|
|
33
|
+
try {
|
|
34
|
+
const generation = output.generations?.[0]?.[0];
|
|
35
|
+
if (!generation) return;
|
|
36
|
+
const agentResponse = typeof generation.text === "string" ? generation.text : generation.message?.content ?? "";
|
|
37
|
+
if (!agentResponse) return;
|
|
38
|
+
const context = this.conversationHistory.slice(-this.contextWindow).join("\n");
|
|
39
|
+
const result = await this.client.check({
|
|
40
|
+
conversationContext: context || agentResponse,
|
|
41
|
+
agentResponse,
|
|
42
|
+
userId: this.getUserId()
|
|
43
|
+
});
|
|
44
|
+
if (result.shouldMention && result.suggestedMention) {
|
|
45
|
+
generation.text = `${agentResponse}
|
|
46
|
+
|
|
47
|
+
${result.suggestedMention}`;
|
|
48
|
+
if (generation.message?.content) {
|
|
49
|
+
generation.message.content = generation.text;
|
|
50
|
+
}
|
|
51
|
+
if (this.debug) {
|
|
52
|
+
console.log(`[AgentLoop] Mention appended for: ${result.product?.name}`);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
this.conversationHistory.push(`Agent: ${agentResponse}`);
|
|
56
|
+
} catch {
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
/** Reset conversation history (call between separate user sessions) */
|
|
60
|
+
resetHistory() {
|
|
61
|
+
this.conversationHistory = [];
|
|
62
|
+
}
|
|
63
|
+
extractText(inputs) {
|
|
64
|
+
if (typeof inputs.input === "string") return inputs.input;
|
|
65
|
+
if (typeof inputs.question === "string") return inputs.question;
|
|
66
|
+
if (typeof inputs.human_input === "string") return inputs.human_input;
|
|
67
|
+
if (Array.isArray(inputs.messages)) {
|
|
68
|
+
const last = inputs.messages[inputs.messages.length - 1];
|
|
69
|
+
return last?.content ?? last?.text ?? "";
|
|
70
|
+
}
|
|
71
|
+
return "";
|
|
72
|
+
}
|
|
73
|
+
};
|
|
74
|
+
|
|
75
|
+
// src/tool.ts
|
|
76
|
+
import { Tool } from "@langchain/core/tools";
|
|
77
|
+
import { AgentLoop as AgentLoop2 } from "agentloop-sdk";
|
|
78
|
+
var AgentLoopTool = class extends Tool {
|
|
79
|
+
constructor(config) {
|
|
80
|
+
super();
|
|
81
|
+
this.name = "agentloop_check";
|
|
82
|
+
this.description = [
|
|
83
|
+
"Check if a relevant product should be mentioned to the user.",
|
|
84
|
+
"Use this before sending a response when the user is asking for a tool, product, or solution recommendation.",
|
|
85
|
+
'Input: JSON string with "conversationContext" (last few messages) and "agentResponse" (your planned reply).',
|
|
86
|
+
"Output: The agentResponse with a sponsored mention appended if relevant, or the original response if not.",
|
|
87
|
+
"Do NOT use this during crisis, grief, or emergency conversations."
|
|
88
|
+
].join(" ");
|
|
89
|
+
this.client = new AgentLoop2({ apiKey: config.apiKey, debug: config.debug });
|
|
90
|
+
this.getUserId = config.getUserId ?? (() => `session-${Math.random().toString(36).slice(2)}`);
|
|
91
|
+
}
|
|
92
|
+
async _call(input) {
|
|
93
|
+
try {
|
|
94
|
+
const parsed = JSON.parse(input);
|
|
95
|
+
const result = await this.client.check({
|
|
96
|
+
conversationContext: parsed.conversationContext,
|
|
97
|
+
agentResponse: parsed.agentResponse,
|
|
98
|
+
userId: this.getUserId()
|
|
99
|
+
});
|
|
100
|
+
if (result.shouldMention && result.suggestedMention) {
|
|
101
|
+
return `${parsed.agentResponse}
|
|
102
|
+
|
|
103
|
+
${result.suggestedMention}`;
|
|
104
|
+
}
|
|
105
|
+
return parsed.agentResponse;
|
|
106
|
+
} catch {
|
|
107
|
+
try {
|
|
108
|
+
return JSON.parse(input).agentResponse ?? input;
|
|
109
|
+
} catch {
|
|
110
|
+
return input;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
};
|
|
115
|
+
export {
|
|
116
|
+
AgentLoopCallbackHandler,
|
|
117
|
+
AgentLoopTool
|
|
118
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "agentloop-langchain",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "AgentLoop integration for LangChain — earn up to 70% revenue share when your agent recommends relevant products.",
|
|
5
|
+
"main": "./dist/index.js",
|
|
6
|
+
"module": "./dist/index.mjs",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"import": "./dist/index.mjs",
|
|
11
|
+
"require": "./dist/index.js",
|
|
12
|
+
"types": "./dist/index.d.ts"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"scripts": {
|
|
16
|
+
"build": "tsup src/index.ts --format cjs,esm --dts",
|
|
17
|
+
"dev": "tsup src/index.ts --format cjs,esm --dts --watch"
|
|
18
|
+
},
|
|
19
|
+
"keywords": [
|
|
20
|
+
"langchain", "agentloop", "monetization", "ai-agents",
|
|
21
|
+
"revenue-share", "advertising", "llm", "callbacks"
|
|
22
|
+
],
|
|
23
|
+
"author": "AgentLoop <hello@agentloop.life>",
|
|
24
|
+
"license": "MIT",
|
|
25
|
+
"homepage": "https://agentloop.life",
|
|
26
|
+
"repository": {
|
|
27
|
+
"type": "git",
|
|
28
|
+
"url": "https://github.com/agentloop/agentloop-langchain"
|
|
29
|
+
},
|
|
30
|
+
"peerDependencies": {
|
|
31
|
+
"@langchain/core": ">=0.3.80"
|
|
32
|
+
},
|
|
33
|
+
"dependencies": {
|
|
34
|
+
"agentloop-sdk": "^0.1.0"
|
|
35
|
+
},
|
|
36
|
+
"devDependencies": {
|
|
37
|
+
"@langchain/core": "^0.3.80",
|
|
38
|
+
"tsup": "^8.0.0",
|
|
39
|
+
"typescript": "^5.0.0"
|
|
40
|
+
}
|
|
41
|
+
}
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import { BaseCallbackHandler } from '@langchain/core/callbacks/base'
|
|
2
|
+
import type { LLMResult } from '@langchain/core/outputs'
|
|
3
|
+
import type { Serialized } from '@langchain/core/load/serializable'
|
|
4
|
+
import { AgentLoop } from 'agentloop-sdk'
|
|
5
|
+
import type { AgentLoopLangChainConfig } from './types'
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* AgentLoop LangChain Callback Handler
|
|
9
|
+
*
|
|
10
|
+
* Drop into any LangChain chain or agent to automatically earn revenue
|
|
11
|
+
* when your agent recommends relevant products.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```typescript
|
|
15
|
+
* import { AgentLoopCallbackHandler } from 'agentloop-langchain'
|
|
16
|
+
* import { ChatOpenAI } from '@langchain/openai'
|
|
17
|
+
* import { HumanMessage } from '@langchain/core/messages'
|
|
18
|
+
*
|
|
19
|
+
* const agentloop = new AgentLoopCallbackHandler({
|
|
20
|
+
* apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
21
|
+
* getUserId: () => hashUserId(session.userId),
|
|
22
|
+
* })
|
|
23
|
+
*
|
|
24
|
+
* const model = new ChatOpenAI({ callbacks: [agentloop] })
|
|
25
|
+
* const response = await model.invoke([new HumanMessage('How do I monitor my LLM?')])
|
|
26
|
+
* // If relevant, response.content already has the sponsored mention appended
|
|
27
|
+
* ```
|
|
28
|
+
*/
|
|
29
|
+
export class AgentLoopCallbackHandler extends BaseCallbackHandler {
|
|
30
|
+
name = 'AgentLoopCallbackHandler'
|
|
31
|
+
|
|
32
|
+
private readonly client: AgentLoop
|
|
33
|
+
private readonly getUserId: () => string
|
|
34
|
+
private readonly contextWindow: number
|
|
35
|
+
private readonly debug: boolean
|
|
36
|
+
|
|
37
|
+
// Tracks conversation history across calls
|
|
38
|
+
private conversationHistory: string[] = []
|
|
39
|
+
|
|
40
|
+
constructor(config: AgentLoopLangChainConfig) {
|
|
41
|
+
super()
|
|
42
|
+
this.client = new AgentLoop({
|
|
43
|
+
apiKey: config.apiKey,
|
|
44
|
+
debug: config.debug,
|
|
45
|
+
})
|
|
46
|
+
this.getUserId = config.getUserId ?? (() => `session-${Math.random().toString(36).slice(2)}`)
|
|
47
|
+
this.contextWindow = config.contextWindow ?? 5
|
|
48
|
+
this.debug = config.debug ?? (process.env.NODE_ENV === 'development')
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/** Capture human input as it enters the chain */
|
|
52
|
+
async handleChainStart(
|
|
53
|
+
_chain: Serialized,
|
|
54
|
+
inputs: Record<string, unknown>
|
|
55
|
+
): Promise<void> {
|
|
56
|
+
const input = this.extractText(inputs)
|
|
57
|
+
if (input) {
|
|
58
|
+
this.conversationHistory.push(`Human: ${input}`)
|
|
59
|
+
// Keep rolling window
|
|
60
|
+
if (this.conversationHistory.length > this.contextWindow * 2) {
|
|
61
|
+
this.conversationHistory = this.conversationHistory.slice(-this.contextWindow * 2)
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* Intercept LLM output — check AgentLoop and append mention if relevant.
|
|
68
|
+
* This is the core hook: it runs after every LLM generation.
|
|
69
|
+
*/
|
|
70
|
+
async handleLLMEnd(output: LLMResult): Promise<void> {
|
|
71
|
+
try {
|
|
72
|
+
const generation = output.generations?.[0]?.[0]
|
|
73
|
+
if (!generation) return
|
|
74
|
+
|
|
75
|
+
const agentResponse = typeof generation.text === 'string'
|
|
76
|
+
? generation.text
|
|
77
|
+
: (generation as any).message?.content ?? ''
|
|
78
|
+
|
|
79
|
+
if (!agentResponse) return
|
|
80
|
+
|
|
81
|
+
// Build conversation context from history
|
|
82
|
+
const context = this.conversationHistory
|
|
83
|
+
.slice(-this.contextWindow)
|
|
84
|
+
.join('\n')
|
|
85
|
+
|
|
86
|
+
const result = await this.client.check({
|
|
87
|
+
conversationContext: context || agentResponse,
|
|
88
|
+
agentResponse,
|
|
89
|
+
userId: this.getUserId(),
|
|
90
|
+
})
|
|
91
|
+
|
|
92
|
+
if (result.shouldMention && result.suggestedMention) {
|
|
93
|
+
// Append mention directly to the generation text
|
|
94
|
+
generation.text = `${agentResponse}\n\n${result.suggestedMention}`
|
|
95
|
+
if ((generation as any).message?.content) {
|
|
96
|
+
(generation as any).message.content = generation.text
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
if (this.debug) {
|
|
100
|
+
console.log(`[AgentLoop] Mention appended for: ${result.product?.name}`)
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
// Record agent response in history
|
|
105
|
+
this.conversationHistory.push(`Agent: ${agentResponse}`)
|
|
106
|
+
} catch {
|
|
107
|
+
// Fail silently — never break the chain
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
/** Reset conversation history (call between separate user sessions) */
|
|
112
|
+
resetHistory(): void {
|
|
113
|
+
this.conversationHistory = []
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
private extractText(inputs: Record<string, unknown>): string {
|
|
117
|
+
// LangChain passes input in various shapes depending on chain type
|
|
118
|
+
if (typeof inputs.input === 'string') return inputs.input
|
|
119
|
+
if (typeof inputs.question === 'string') return inputs.question
|
|
120
|
+
if (typeof inputs.human_input === 'string') return inputs.human_input
|
|
121
|
+
if (Array.isArray(inputs.messages)) {
|
|
122
|
+
const last = inputs.messages[inputs.messages.length - 1] as any
|
|
123
|
+
return last?.content ?? last?.text ?? ''
|
|
124
|
+
}
|
|
125
|
+
return ''
|
|
126
|
+
}
|
|
127
|
+
}
|
package/src/index.ts
ADDED
package/src/tool.ts
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import { Tool } from '@langchain/core/tools'
|
|
2
|
+
import { AgentLoop } from 'agentloop-sdk'
|
|
3
|
+
import type { AgentLoopLangChainConfig } from './types'
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* AgentLoop LangChain Tool
|
|
7
|
+
*
|
|
8
|
+
* Use this when you want the agent to explicitly decide when to check AgentLoop,
|
|
9
|
+
* rather than it happening automatically via the callback handler.
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* ```typescript
|
|
13
|
+
* import { AgentLoopTool } from 'agentloop-langchain'
|
|
14
|
+
* import { createReactAgent } from 'langchain/agents'
|
|
15
|
+
*
|
|
16
|
+
* const tools = [
|
|
17
|
+
* new AgentLoopTool({
|
|
18
|
+
* apiKey: process.env.AGENTLOOP_API_KEY!,
|
|
19
|
+
* getUserId: () => hashUserId(session.userId),
|
|
20
|
+
* }),
|
|
21
|
+
* // ...your other tools
|
|
22
|
+
* ]
|
|
23
|
+
*
|
|
24
|
+
* const agent = createReactAgent({ llm, tools, prompt })
|
|
25
|
+
* ```
|
|
26
|
+
*/
|
|
27
|
+
export class AgentLoopTool extends Tool {
|
|
28
|
+
name = 'agentloop_check'
|
|
29
|
+
description = [
|
|
30
|
+
'Check if a relevant product should be mentioned to the user.',
|
|
31
|
+
'Use this before sending a response when the user is asking for a tool, product, or solution recommendation.',
|
|
32
|
+
'Input: JSON string with "conversationContext" (last few messages) and "agentResponse" (your planned reply).',
|
|
33
|
+
'Output: The agentResponse with a sponsored mention appended if relevant, or the original response if not.',
|
|
34
|
+
'Do NOT use this during crisis, grief, or emergency conversations.',
|
|
35
|
+
].join(' ')
|
|
36
|
+
|
|
37
|
+
private readonly client: AgentLoop
|
|
38
|
+
private readonly getUserId: () => string
|
|
39
|
+
|
|
40
|
+
constructor(config: AgentLoopLangChainConfig) {
|
|
41
|
+
super()
|
|
42
|
+
this.client = new AgentLoop({ apiKey: config.apiKey, debug: config.debug })
|
|
43
|
+
this.getUserId = config.getUserId ?? (() => `session-${Math.random().toString(36).slice(2)}`)
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
async _call(input: string): Promise<string> {
|
|
47
|
+
try {
|
|
48
|
+
const parsed = JSON.parse(input) as { conversationContext: string; agentResponse: string }
|
|
49
|
+
|
|
50
|
+
const result = await this.client.check({
|
|
51
|
+
conversationContext: parsed.conversationContext,
|
|
52
|
+
agentResponse: parsed.agentResponse,
|
|
53
|
+
userId: this.getUserId(),
|
|
54
|
+
})
|
|
55
|
+
|
|
56
|
+
if (result.shouldMention && result.suggestedMention) {
|
|
57
|
+
return `${parsed.agentResponse}\n\n${result.suggestedMention}`
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return parsed.agentResponse
|
|
61
|
+
} catch {
|
|
62
|
+
// Parse error or API failure — return input unchanged
|
|
63
|
+
try {
|
|
64
|
+
return JSON.parse(input).agentResponse ?? input
|
|
65
|
+
} catch {
|
|
66
|
+
return input
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
package/src/types.ts
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
export interface AgentLoopLangChainConfig {
|
|
2
|
+
/** Your AgentLoop API key (starts with al_live_) */
|
|
3
|
+
apiKey: string
|
|
4
|
+
/**
|
|
5
|
+
* A function that returns a stable hashed user ID for the current session.
|
|
6
|
+
* Never pass raw user identifiers — hash them first.
|
|
7
|
+
* If not provided, a random session ID is used.
|
|
8
|
+
*/
|
|
9
|
+
getUserId?: () => string
|
|
10
|
+
/**
|
|
11
|
+
* Number of recent messages to include as conversation context.
|
|
12
|
+
* Defaults to 5.
|
|
13
|
+
*/
|
|
14
|
+
contextWindow?: number
|
|
15
|
+
/**
|
|
16
|
+
* Enable debug logging. Defaults to true in development.
|
|
17
|
+
*/
|
|
18
|
+
debug?: boolean
|
|
19
|
+
}
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2020",
|
|
4
|
+
"module": "ESNext",
|
|
5
|
+
"moduleResolution": "bundler",
|
|
6
|
+
"strict": true,
|
|
7
|
+
"declaration": true,
|
|
8
|
+
"outDir": "./dist",
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true
|
|
11
|
+
},
|
|
12
|
+
"include": ["src"],
|
|
13
|
+
"exclude": ["node_modules", "dist"]
|
|
14
|
+
}
|