@agentvoy/core 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adapters/anthropic.d.ts +8 -0
- package/dist/adapters/anthropic.d.ts.map +1 -0
- package/dist/adapters/anthropic.js +247 -0
- package/dist/adapters/anthropic.js.map +1 -0
- package/dist/adapters/index.js +4 -0
- package/dist/adapters/index.js.map +1 -1
- package/dist/adapters/langgraph.d.ts +8 -0
- package/dist/adapters/langgraph.d.ts.map +1 -0
- package/dist/adapters/langgraph.js +309 -0
- package/dist/adapters/langgraph.js.map +1 -0
- package/package.json +1 -1
- package/src/adapters/anthropic.ts +268 -0
- package/src/adapters/index.ts +4 -0
- package/src/adapters/langgraph.ts +336 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.d.ts","sourceRoot":"","sources":["../../src/adapters/anthropic.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EACV,gBAAgB,EAMjB,MAAM,aAAa,CAAC;AAGrB,eAAO,MAAM,gBAAgB,EAAE,gBAyE9B,CAAC"}
|
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Anthropic SDK Adapter
|
|
4
|
+
*
|
|
5
|
+
* Scaffolds projects using the Anthropic Agent SDK (Python).
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.anthropicAdapter = void 0;
|
|
9
|
+
const config_js_1 = require("../config.js");
|
|
10
|
+
exports.anthropicAdapter = {
|
|
11
|
+
name: "anthropic",
|
|
12
|
+
displayName: "Anthropic SDK",
|
|
13
|
+
language: "python",
|
|
14
|
+
async scaffold(config) {
|
|
15
|
+
const files = [
|
|
16
|
+
{
|
|
17
|
+
path: "agent.py",
|
|
18
|
+
content: generateAgentFile(config),
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
path: "tools.py",
|
|
22
|
+
content: generateToolsFile(config),
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
path: "run.py",
|
|
26
|
+
content: generateRunFile(config),
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
path: "requirements.txt",
|
|
30
|
+
content: generateRequirements(),
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
path: ".env.example",
|
|
34
|
+
content: "ANTHROPIC_API_KEY=your-api-key-here\n",
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
path: "agent.guard.yml",
|
|
38
|
+
content: (0, config_js_1.generateDefaultConfig)(config.projectName, "anthropic", config.model.model || "claude-sonnet-4-20250514"),
|
|
39
|
+
},
|
|
40
|
+
];
|
|
41
|
+
return {
|
|
42
|
+
files,
|
|
43
|
+
dependencies: {},
|
|
44
|
+
devDependencies: {},
|
|
45
|
+
scripts: {
|
|
46
|
+
start: "python run.py",
|
|
47
|
+
},
|
|
48
|
+
postInstallInstructions: [
|
|
49
|
+
"pip install -r requirements.txt",
|
|
50
|
+
"cp .env.example .env",
|
|
51
|
+
"Add your ANTHROPIC_API_KEY to .env",
|
|
52
|
+
"python run.py",
|
|
53
|
+
],
|
|
54
|
+
};
|
|
55
|
+
},
|
|
56
|
+
validateConfig(config) {
|
|
57
|
+
const errors = [];
|
|
58
|
+
const warnings = [];
|
|
59
|
+
if (config.model.provider !== "anthropic") {
|
|
60
|
+
warnings.push({
|
|
61
|
+
field: "model.provider",
|
|
62
|
+
message: `Anthropic adapter works best with provider "anthropic", got "${config.model.provider}"`,
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
return { valid: errors.length === 0, errors, warnings };
|
|
66
|
+
},
|
|
67
|
+
getDependencies() {
|
|
68
|
+
return {
|
|
69
|
+
anthropic: ">=0.40.0",
|
|
70
|
+
"python-dotenv": ">=1.0.0",
|
|
71
|
+
};
|
|
72
|
+
},
|
|
73
|
+
};
|
|
74
|
+
function generateAgentFile(config) {
|
|
75
|
+
const model = config.model.model || "claude-sonnet-4-20250514";
|
|
76
|
+
const maxIterations = config.guardrails?.behavior?.max_iterations || 20;
|
|
77
|
+
return `"""
|
|
78
|
+
${config.projectName} — Built with AgentVoy
|
|
79
|
+
https://github.com/agentvoy
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
import anthropic
|
|
83
|
+
from tools import get_tools, process_tool_call
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def create_client() -> anthropic.Anthropic:
|
|
87
|
+
"""Create the Anthropic client."""
|
|
88
|
+
return anthropic.Anthropic()
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def run_agent(prompt: str) -> str:
|
|
92
|
+
"""Run the agent with an agentic loop."""
|
|
93
|
+
client = create_client()
|
|
94
|
+
tools = get_tools()
|
|
95
|
+
messages = [{"role": "user", "content": prompt}]
|
|
96
|
+
|
|
97
|
+
iteration = 0
|
|
98
|
+
max_iterations = ${maxIterations}
|
|
99
|
+
|
|
100
|
+
while iteration < max_iterations:
|
|
101
|
+
iteration += 1
|
|
102
|
+
|
|
103
|
+
response = client.messages.create(
|
|
104
|
+
model="${model}",
|
|
105
|
+
max_tokens=8096,
|
|
106
|
+
tools=tools,
|
|
107
|
+
messages=messages,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# Add assistant response to messages
|
|
111
|
+
messages.append({"role": "assistant", "content": response.content})
|
|
112
|
+
|
|
113
|
+
# If no tool calls, we're done
|
|
114
|
+
if response.stop_reason == "end_turn":
|
|
115
|
+
# Extract final text response
|
|
116
|
+
for block in response.content:
|
|
117
|
+
if hasattr(block, "text"):
|
|
118
|
+
return block.text
|
|
119
|
+
return "Done."
|
|
120
|
+
|
|
121
|
+
# Process tool calls
|
|
122
|
+
if response.stop_reason == "tool_use":
|
|
123
|
+
tool_results = []
|
|
124
|
+
for block in response.content:
|
|
125
|
+
if block.type == "tool_use":
|
|
126
|
+
result = process_tool_call(block.name, block.input)
|
|
127
|
+
tool_results.append({
|
|
128
|
+
"type": "tool_result",
|
|
129
|
+
"tool_use_id": block.id,
|
|
130
|
+
"content": str(result),
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
messages.append({"role": "user", "content": tool_results})
|
|
134
|
+
|
|
135
|
+
return "Max iterations reached."
|
|
136
|
+
`;
|
|
137
|
+
}
|
|
138
|
+
function generateToolsFile(_config) {
|
|
139
|
+
return `"""
|
|
140
|
+
Agent tools — add your custom tools here.
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def get_tools() -> list:
|
|
145
|
+
"""Return tool definitions for the Anthropic API."""
|
|
146
|
+
return [
|
|
147
|
+
{
|
|
148
|
+
"name": "search_web",
|
|
149
|
+
"description": "Search the web for information on a given topic.",
|
|
150
|
+
"input_schema": {
|
|
151
|
+
"type": "object",
|
|
152
|
+
"properties": {
|
|
153
|
+
"query": {
|
|
154
|
+
"type": "string",
|
|
155
|
+
"description": "The search query.",
|
|
156
|
+
}
|
|
157
|
+
},
|
|
158
|
+
"required": ["query"],
|
|
159
|
+
},
|
|
160
|
+
},
|
|
161
|
+
{
|
|
162
|
+
"name": "read_file",
|
|
163
|
+
"description": "Read the contents of a file.",
|
|
164
|
+
"input_schema": {
|
|
165
|
+
"type": "object",
|
|
166
|
+
"properties": {
|
|
167
|
+
"path": {
|
|
168
|
+
"type": "string",
|
|
169
|
+
"description": "Path to the file to read.",
|
|
170
|
+
}
|
|
171
|
+
},
|
|
172
|
+
"required": ["path"],
|
|
173
|
+
},
|
|
174
|
+
},
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def process_tool_call(tool_name: str, tool_input: dict) -> str:
|
|
179
|
+
"""Execute a tool call and return the result."""
|
|
180
|
+
if tool_name == "search_web":
|
|
181
|
+
return _search_web(tool_input["query"])
|
|
182
|
+
elif tool_name == "read_file":
|
|
183
|
+
return _read_file(tool_input["path"])
|
|
184
|
+
else:
|
|
185
|
+
return f"Unknown tool: {tool_name}"
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _search_web(query: str) -> str:
|
|
189
|
+
"""Search the web for information."""
|
|
190
|
+
# TODO: Implement your search logic (e.g., Tavily, Serper, Brave Search)
|
|
191
|
+
return f"Search results for: {query}"
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _read_file(path: str) -> str:
|
|
195
|
+
"""Read a file's contents."""
|
|
196
|
+
try:
|
|
197
|
+
with open(path, "r") as f:
|
|
198
|
+
return f.read()
|
|
199
|
+
except FileNotFoundError:
|
|
200
|
+
return f"File not found: {path}"
|
|
201
|
+
except PermissionError:
|
|
202
|
+
return f"Permission denied: {path}"
|
|
203
|
+
`;
|
|
204
|
+
}
|
|
205
|
+
function generateRunFile(config) {
|
|
206
|
+
return `"""
|
|
207
|
+
Run the ${config.projectName} agent.
|
|
208
|
+
"""
|
|
209
|
+
|
|
210
|
+
from dotenv import load_dotenv
|
|
211
|
+
from agent import run_agent
|
|
212
|
+
|
|
213
|
+
load_dotenv()
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def main():
|
|
217
|
+
print("\\n🚀 ${config.projectName} — Powered by AgentVoy")
|
|
218
|
+
print("=" * 50)
|
|
219
|
+
print("Type your prompt (or 'quit' to exit):\\n")
|
|
220
|
+
|
|
221
|
+
while True:
|
|
222
|
+
try:
|
|
223
|
+
prompt = input("> ")
|
|
224
|
+
if prompt.lower() in ("quit", "exit", "q"):
|
|
225
|
+
print("\\nGoodbye!")
|
|
226
|
+
break
|
|
227
|
+
if not prompt.strip():
|
|
228
|
+
continue
|
|
229
|
+
|
|
230
|
+
print("\\nThinking...\\n")
|
|
231
|
+
result = run_agent(prompt)
|
|
232
|
+
print(f"\\n{result}\\n")
|
|
233
|
+
except KeyboardInterrupt:
|
|
234
|
+
print("\\n\\nGoodbye!")
|
|
235
|
+
break
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
if __name__ == "__main__":
|
|
239
|
+
main()
|
|
240
|
+
`;
|
|
241
|
+
}
|
|
242
|
+
function generateRequirements() {
|
|
243
|
+
return `anthropic>=0.40.0
|
|
244
|
+
python-dotenv>=1.0.0
|
|
245
|
+
`;
|
|
246
|
+
}
|
|
247
|
+
//# sourceMappingURL=anthropic.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"anthropic.js","sourceRoot":"","sources":["../../src/adapters/anthropic.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;AAUH,4CAAqD;AAExC,QAAA,gBAAgB,GAAqB;IAChD,IAAI,EAAE,WAAW;IACjB,WAAW,EAAE,eAAe;IAC5B,QAAQ,EAAE,QAAQ;IAElB,KAAK,CAAC,QAAQ,CAAC,MAAsB;QACnC,MAAM,KAAK,GAAoB;YAC7B;gBACE,IAAI,EAAE,UAAU;gBAChB,OAAO,EAAE,iBAAiB,CAAC,MAAM,CAAC;aACnC;YACD;gBACE,IAAI,EAAE,UAAU;gBAChB,OAAO,EAAE,iBAAiB,CAAC,MAAM,CAAC;aACnC;YACD;gBACE,IAAI,EAAE,QAAQ;gBACd,OAAO,EAAE,eAAe,CAAC,MAAM,CAAC;aACjC;YACD;gBACE,IAAI,EAAE,kBAAkB;gBACxB,OAAO,EAAE,oBAAoB,EAAE;aAChC;YACD;gBACE,IAAI,EAAE,cAAc;gBACpB,OAAO,EAAE,uCAAuC;aACjD;YACD;gBACE,IAAI,EAAE,iBAAiB;gBACvB,OAAO,EAAE,IAAA,iCAAqB,EAC5B,MAAM,CAAC,WAAW,EAClB,WAAW,EACX,MAAM,CAAC,KAAK,CAAC,KAAK,IAAI,0BAA0B,CACjD;aACF;SACF,CAAC;QAEF,OAAO;YACL,KAAK;YACL,YAAY,EAAE,EAAE;YAChB,eAAe,EAAE,EAAE;YACnB,OAAO,EAAE;gBACP,KAAK,EAAE,eAAe;aACvB;YACD,uBAAuB,EAAE;gBACvB,iCAAiC;gBACjC,sBAAsB;gBACtB,oCAAoC;gBACpC,eAAe;aAChB;SACF,CAAC;IACJ,CAAC;IAED,cAAc,CAAC,MAAwB;QACrC,MAAM,MAAM,GAAyC,EAAE,CAAC;QACxD,MAAM,QAAQ,GAAyC,EAAE,CAAC;QAE1D,IAAI,MAAM,CAAC,KAAK,CAAC,QAAQ,KAAK,WAAW,EAAE,CAAC;YAC1C,QAAQ,CAAC,IAAI,CAAC;gBACZ,KAAK,EAAE,gBAAgB;gBACvB,OAAO,EAAE,gEAAgE,MAAM,CAAC,KAAK,CAAC,QAAQ,GAAG;aAClG,CAAC,CAAC;QACL,CAAC;QAED,OAAO,EAAE,KAAK,EAAE,MAAM,CAAC,MAAM,KAAK,CAAC,EAAE,MAAM,EAAE,QAAQ,EAAE,CAAC;IAC1D,CAAC;IAED,eAAe;QACb,OAAO;YACL,SAAS,EAAE,UAAU;YACrB,eAAe,EAAE,SAAS;SAC3B,CAAC;IACJ,CAAC;CACF,CAAC;AAEF,SAAS,iBAAiB,CAAC,MAAsB;IAC/C,MAAM,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,KAAK,IAAI,0BAA0B,CAAC;IAC/D,MAAM,aAAa,GAAG,MAAM,CAAC,UAAU,EAAE,QAAQ,EAAE,cAAc,IAAI,EAAE,CAAC;IAExE,OAAO;EACP,MAAM,CAAC,WAAW;;;;;;;;;;;;;;;;;;;;uBAoBG,aAAa;;;;;;qBAMf,KAAK;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAgCzB,CAAC;AACF,CAAC;AAED,SAAS,iBAAiB,CAAC,OAAuB;IAChD,OAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAgER,CAAC;AACF,CAAC;AAED,SAAS,eAAe,CAAC,MAAsB;IAC7C,OAAO;UACC,MAAM,CAAC,WAAW;;;;;;;;;;mBAUT,MAAM,CAAC,WAAW;;;;;;;;;;;;;;;;;;;;;;;CAuBpC,CAAC;AACF,CAAC;AAED,SAAS,oBAAoB;IAC3B,OAAO;;CAER,CAAC;AACF,CAAC"}
|
package/dist/adapters/index.js
CHANGED
|
@@ -16,8 +16,12 @@ const registry_js_2 = require("./registry.js");
|
|
|
16
16
|
const openai_js_1 = require("./openai.js");
|
|
17
17
|
const google_adk_js_1 = require("./google-adk.js");
|
|
18
18
|
const crewai_js_1 = require("./crewai.js");
|
|
19
|
+
const anthropic_js_1 = require("./anthropic.js");
|
|
20
|
+
const langgraph_js_1 = require("./langgraph.js");
|
|
19
21
|
// Register built-in adapters
|
|
20
22
|
(0, registry_js_2.registerAdapter)(openai_js_1.openaiAdapter);
|
|
21
23
|
(0, registry_js_2.registerAdapter)(google_adk_js_1.googleAdkAdapter);
|
|
22
24
|
(0, registry_js_2.registerAdapter)(crewai_js_1.crewaiAdapter);
|
|
25
|
+
(0, registry_js_2.registerAdapter)(anthropic_js_1.anthropicAdapter);
|
|
26
|
+
(0, registry_js_2.registerAdapter)(langgraph_js_1.langgraphAdapter);
|
|
23
27
|
//# sourceMappingURL=index.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/adapters/index.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;AAEH,6CAAsG;AAA7F,8GAAA,eAAe,OAAA;AAAE,yGAAA,UAAU,OAAA;AAAE,2GAAA,YAAY,OAAA;AAAE,6GAAA,cAAc,OAAA;AAAE,yGAAA,UAAU,OAAA;AAE9E,+CAAgD;AAChD,2CAA4C;AAC5C,mDAAmD;AACnD,2CAA4C;
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/adapters/index.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;AAEH,6CAAsG;AAA7F,8GAAA,eAAe,OAAA;AAAE,yGAAA,UAAU,OAAA;AAAE,2GAAA,YAAY,OAAA;AAAE,6GAAA,cAAc,OAAA;AAAE,yGAAA,UAAU,OAAA;AAE9E,+CAAgD;AAChD,2CAA4C;AAC5C,mDAAmD;AACnD,2CAA4C;AAC5C,iDAAkD;AAClD,iDAAkD;AAElD,6BAA6B;AAC7B,IAAA,6BAAe,EAAC,yBAAa,CAAC,CAAC;AAC/B,IAAA,6BAAe,EAAC,gCAAgB,CAAC,CAAC;AAClC,IAAA,6BAAe,EAAC,yBAAa,CAAC,CAAC;AAC/B,IAAA,6BAAe,EAAC,+BAAgB,CAAC,CAAC;AAClC,IAAA,6BAAe,EAAC,+BAAgB,CAAC,CAAC"}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LangGraph Adapter
|
|
3
|
+
*
|
|
4
|
+
* Scaffolds projects using LangGraph (Python) — stateful, graph-based agent workflows.
|
|
5
|
+
*/
|
|
6
|
+
import type { FrameworkAdapter } from "../types.js";
|
|
7
|
+
export declare const langgraphAdapter: FrameworkAdapter;
|
|
8
|
+
//# sourceMappingURL=langgraph.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"langgraph.d.ts","sourceRoot":"","sources":["../../src/adapters/langgraph.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EACV,gBAAgB,EAMjB,MAAM,aAAa,CAAC;AAGrB,eAAO,MAAM,gBAAgB,EAAE,gBAgF9B,CAAC"}
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* LangGraph Adapter
|
|
4
|
+
*
|
|
5
|
+
* Scaffolds projects using LangGraph (Python) — stateful, graph-based agent workflows.
|
|
6
|
+
*/
|
|
7
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
8
|
+
exports.langgraphAdapter = void 0;
|
|
9
|
+
const config_js_1 = require("../config.js");
|
|
10
|
+
exports.langgraphAdapter = {
|
|
11
|
+
name: "langgraph",
|
|
12
|
+
displayName: "LangGraph",
|
|
13
|
+
language: "python",
|
|
14
|
+
async scaffold(config) {
|
|
15
|
+
const files = [
|
|
16
|
+
{
|
|
17
|
+
path: "agent.py",
|
|
18
|
+
content: generateAgentFile(config),
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
path: "tools.py",
|
|
22
|
+
content: generateToolsFile(),
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
path: "state.py",
|
|
26
|
+
content: generateStateFile(config),
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
path: "run.py",
|
|
30
|
+
content: generateRunFile(config),
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
path: "requirements.txt",
|
|
34
|
+
content: generateRequirements(config),
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
path: ".env.example",
|
|
38
|
+
content: generateEnvExample(config),
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
path: "agent.guard.yml",
|
|
42
|
+
content: (0, config_js_1.generateDefaultConfig)(config.projectName, "langgraph", config.model.model || "gpt-4o"),
|
|
43
|
+
},
|
|
44
|
+
];
|
|
45
|
+
return {
|
|
46
|
+
files,
|
|
47
|
+
dependencies: {},
|
|
48
|
+
devDependencies: {},
|
|
49
|
+
scripts: {
|
|
50
|
+
start: "python run.py",
|
|
51
|
+
},
|
|
52
|
+
postInstallInstructions: [
|
|
53
|
+
"pip install -r requirements.txt",
|
|
54
|
+
"cp .env.example .env",
|
|
55
|
+
`Add your ${getApiKeyEnv(config)} to .env`,
|
|
56
|
+
"python run.py",
|
|
57
|
+
],
|
|
58
|
+
};
|
|
59
|
+
},
|
|
60
|
+
validateConfig(config) {
|
|
61
|
+
const errors = [];
|
|
62
|
+
const warnings = [];
|
|
63
|
+
const supported = ["openai", "anthropic", "google"];
|
|
64
|
+
if (!supported.includes(config.model.provider)) {
|
|
65
|
+
warnings.push({
|
|
66
|
+
field: "model.provider",
|
|
67
|
+
message: `LangGraph works best with providers: ${supported.join(", ")}. Got "${config.model.provider}"`,
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
return { valid: errors.length === 0, errors, warnings };
|
|
71
|
+
},
|
|
72
|
+
getDependencies() {
|
|
73
|
+
return {
|
|
74
|
+
langgraph: ">=0.2.0",
|
|
75
|
+
langchain: ">=0.3.0",
|
|
76
|
+
"langchain-core": ">=0.3.0",
|
|
77
|
+
"python-dotenv": ">=1.0.0",
|
|
78
|
+
};
|
|
79
|
+
},
|
|
80
|
+
};
|
|
81
|
+
function getApiKeyEnv(config) {
|
|
82
|
+
const envMap = {
|
|
83
|
+
openai: "OPENAI_API_KEY",
|
|
84
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
85
|
+
google: "GOOGLE_API_KEY",
|
|
86
|
+
};
|
|
87
|
+
return envMap[config.model.provider] || "API_KEY";
|
|
88
|
+
}
|
|
89
|
+
function getLangChainPackage(config) {
|
|
90
|
+
const pkgMap = {
|
|
91
|
+
openai: "langchain-openai",
|
|
92
|
+
anthropic: "langchain-anthropic",
|
|
93
|
+
google: "langchain-google-genai",
|
|
94
|
+
};
|
|
95
|
+
return pkgMap[config.model.provider] || "langchain-openai";
|
|
96
|
+
}
|
|
97
|
+
function getLLMImport(config) {
|
|
98
|
+
const importMap = {
|
|
99
|
+
openai: "from langchain_openai import ChatOpenAI",
|
|
100
|
+
anthropic: "from langchain_anthropic import ChatAnthropic",
|
|
101
|
+
google: "from langchain_google_genai import ChatGoogleGenerativeAI",
|
|
102
|
+
};
|
|
103
|
+
return importMap[config.model.provider] || "from langchain_openai import ChatOpenAI";
|
|
104
|
+
}
|
|
105
|
+
function getLLMClass(config) {
|
|
106
|
+
const classMap = {
|
|
107
|
+
openai: "ChatOpenAI",
|
|
108
|
+
anthropic: "ChatAnthropic",
|
|
109
|
+
google: "ChatGoogleGenerativeAI",
|
|
110
|
+
};
|
|
111
|
+
return classMap[config.model.provider] || "ChatOpenAI";
|
|
112
|
+
}
|
|
113
|
+
function generateAgentFile(config) {
|
|
114
|
+
const model = config.model.model || "gpt-4o";
|
|
115
|
+
const maxIterations = config.guardrails?.behavior?.max_iterations || 20;
|
|
116
|
+
const llmImport = getLLMImport(config);
|
|
117
|
+
const llmClass = getLLMClass(config);
|
|
118
|
+
return `"""
|
|
119
|
+
${config.projectName} — Built with AgentVoy
|
|
120
|
+
https://github.com/agentvoy
|
|
121
|
+
|
|
122
|
+
LangGraph agent with a stateful agentic loop.
|
|
123
|
+
"""
|
|
124
|
+
|
|
125
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
126
|
+
from langgraph.graph import StateGraph, END
|
|
127
|
+
from langgraph.prebuilt import ToolNode
|
|
128
|
+
${llmImport}
|
|
129
|
+
|
|
130
|
+
from state import AgentState
|
|
131
|
+
from tools import get_tools
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def create_graph():
|
|
135
|
+
"""Build the agent state graph."""
|
|
136
|
+
tools = get_tools()
|
|
137
|
+
llm = ${llmClass}(model="${model}").bind_tools(tools)
|
|
138
|
+
tool_node = ToolNode(tools)
|
|
139
|
+
|
|
140
|
+
def should_continue(state: AgentState) -> str:
|
|
141
|
+
"""Route: call tools or finish."""
|
|
142
|
+
messages = state["messages"]
|
|
143
|
+
last = messages[-1]
|
|
144
|
+
if last.tool_calls:
|
|
145
|
+
return "tools"
|
|
146
|
+
return END
|
|
147
|
+
|
|
148
|
+
def call_model(state: AgentState) -> dict:
|
|
149
|
+
"""Call the LLM with current messages."""
|
|
150
|
+
messages = state["messages"]
|
|
151
|
+
iteration = state.get("iteration", 0)
|
|
152
|
+
|
|
153
|
+
if iteration >= ${maxIterations}:
|
|
154
|
+
from langchain_core.messages import AIMessage
|
|
155
|
+
return {
|
|
156
|
+
"messages": [AIMessage(content="Max iterations reached.")],
|
|
157
|
+
"iteration": iteration,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
response = llm.invoke(messages)
|
|
161
|
+
return {
|
|
162
|
+
"messages": [response],
|
|
163
|
+
"iteration": iteration + 1,
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
# Build the graph
|
|
167
|
+
graph = StateGraph(AgentState)
|
|
168
|
+
graph.add_node("agent", call_model)
|
|
169
|
+
graph.add_node("tools", tool_node)
|
|
170
|
+
|
|
171
|
+
graph.set_entry_point("agent")
|
|
172
|
+
graph.add_conditional_edges("agent", should_continue)
|
|
173
|
+
graph.add_edge("tools", "agent")
|
|
174
|
+
|
|
175
|
+
return graph.compile()
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def run_agent(prompt: str) -> str:
|
|
179
|
+
"""Run the agent graph with a user prompt."""
|
|
180
|
+
app = create_graph()
|
|
181
|
+
|
|
182
|
+
initial_state = {
|
|
183
|
+
"messages": [HumanMessage(content=prompt)],
|
|
184
|
+
"iteration": 0,
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
final_state = app.invoke(initial_state)
|
|
188
|
+
messages = final_state["messages"]
|
|
189
|
+
|
|
190
|
+
# Return the last AI message text
|
|
191
|
+
for msg in reversed(messages):
|
|
192
|
+
if hasattr(msg, "content") and isinstance(msg.content, str):
|
|
193
|
+
return msg.content
|
|
194
|
+
|
|
195
|
+
return "Done."
|
|
196
|
+
`;
|
|
197
|
+
}
|
|
198
|
+
function generateStateFile(_config) {
|
|
199
|
+
return `"""
|
|
200
|
+
Agent state definition for LangGraph.
|
|
201
|
+
"""
|
|
202
|
+
|
|
203
|
+
from typing import TypedDict, Annotated, Sequence
|
|
204
|
+
from langchain_core.messages import BaseMessage
|
|
205
|
+
import operator
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class AgentState(TypedDict):
|
|
209
|
+
"""State passed between nodes in the graph."""
|
|
210
|
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
|
211
|
+
iteration: int
|
|
212
|
+
`;
|
|
213
|
+
}
|
|
214
|
+
function generateToolsFile() {
|
|
215
|
+
return `"""
|
|
216
|
+
Agent tools — add your custom tools here.
|
|
217
|
+
LangGraph uses @tool decorated functions from langchain_core.
|
|
218
|
+
"""
|
|
219
|
+
|
|
220
|
+
from langchain_core.tools import tool
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
@tool
|
|
224
|
+
def search_web(query: str) -> str:
|
|
225
|
+
"""Search the web for information on a given topic.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
query: The search query.
|
|
229
|
+
"""
|
|
230
|
+
# TODO: Implement your search logic (e.g., Tavily, Serper, Brave Search)
|
|
231
|
+
# Example with Tavily:
|
|
232
|
+
# from tavily import TavilyClient
|
|
233
|
+
# client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
|
234
|
+
# return client.search(query)["results"][0]["content"]
|
|
235
|
+
return f"Search results for: {query}"
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
@tool
|
|
239
|
+
def read_file(path: str) -> str:
|
|
240
|
+
"""Read the contents of a file.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
path: Path to the file to read.
|
|
244
|
+
"""
|
|
245
|
+
try:
|
|
246
|
+
with open(path, "r") as f:
|
|
247
|
+
return f.read()
|
|
248
|
+
except FileNotFoundError:
|
|
249
|
+
return f"File not found: {path}"
|
|
250
|
+
except PermissionError:
|
|
251
|
+
return f"Permission denied: {path}"
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def get_tools() -> list:
|
|
255
|
+
"""Return all available tools."""
|
|
256
|
+
return [search_web, read_file]
|
|
257
|
+
`;
|
|
258
|
+
}
|
|
259
|
+
function generateRunFile(config) {
|
|
260
|
+
return `"""
|
|
261
|
+
Run the ${config.projectName} agent.
|
|
262
|
+
"""
|
|
263
|
+
|
|
264
|
+
from dotenv import load_dotenv
|
|
265
|
+
from agent import run_agent
|
|
266
|
+
|
|
267
|
+
load_dotenv()
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def main():
|
|
271
|
+
print("\\n🚀 ${config.projectName} — Powered by AgentVoy")
|
|
272
|
+
print("=" * 50)
|
|
273
|
+
print("Type your prompt (or 'quit' to exit):\\n")
|
|
274
|
+
|
|
275
|
+
while True:
|
|
276
|
+
try:
|
|
277
|
+
prompt = input("> ")
|
|
278
|
+
if prompt.lower() in ("quit", "exit", "q"):
|
|
279
|
+
print("\\nGoodbye!")
|
|
280
|
+
break
|
|
281
|
+
if not prompt.strip():
|
|
282
|
+
continue
|
|
283
|
+
|
|
284
|
+
print("\\nThinking...\\n")
|
|
285
|
+
result = run_agent(prompt)
|
|
286
|
+
print(f"\\n{result}\\n")
|
|
287
|
+
except KeyboardInterrupt:
|
|
288
|
+
print("\\n\\nGoodbye!")
|
|
289
|
+
break
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
if __name__ == "__main__":
|
|
293
|
+
main()
|
|
294
|
+
`;
|
|
295
|
+
}
|
|
296
|
+
function generateRequirements(config) {
|
|
297
|
+
const langchainPkg = getLangChainPackage(config);
|
|
298
|
+
return `langgraph>=0.2.0
|
|
299
|
+
langchain>=0.3.0
|
|
300
|
+
langchain-core>=0.3.0
|
|
301
|
+
${langchainPkg}>=0.2.0
|
|
302
|
+
python-dotenv>=1.0.0
|
|
303
|
+
`;
|
|
304
|
+
}
|
|
305
|
+
function generateEnvExample(config) {
|
|
306
|
+
const envKey = getApiKeyEnv(config);
|
|
307
|
+
return `${envKey}=your-api-key-here\n`;
|
|
308
|
+
}
|
|
309
|
+
//# sourceMappingURL=langgraph.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"langgraph.js","sourceRoot":"","sources":["../../src/adapters/langgraph.ts"],"names":[],"mappings":";AAAA;;;;GAIG;;;AAUH,4CAAqD;AAExC,QAAA,gBAAgB,GAAqB;IAChD,IAAI,EAAE,WAAW;IACjB,WAAW,EAAE,WAAW;IACxB,QAAQ,EAAE,QAAQ;IAElB,KAAK,CAAC,QAAQ,CAAC,MAAsB;QACnC,MAAM,KAAK,GAAoB;YAC7B;gBACE,IAAI,EAAE,UAAU;gBAChB,OAAO,EAAE,iBAAiB,CAAC,MAAM,CAAC;aACnC;YACD;gBACE,IAAI,EAAE,UAAU;gBAChB,OAAO,EAAE,iBAAiB,EAAE;aAC7B;YACD;gBACE,IAAI,EAAE,UAAU;gBAChB,OAAO,EAAE,iBAAiB,CAAC,MAAM,CAAC;aACnC;YACD;gBACE,IAAI,EAAE,QAAQ;gBACd,OAAO,EAAE,eAAe,CAAC,MAAM,CAAC;aACjC;YACD;gBACE,IAAI,EAAE,kBAAkB;gBACxB,OAAO,EAAE,oBAAoB,CAAC,MAAM,CAAC;aACtC;YACD;gBACE,IAAI,EAAE,cAAc;gBACpB,OAAO,EAAE,kBAAkB,CAAC,MAAM,CAAC;aACpC;YACD;gBACE,IAAI,EAAE,iBAAiB;gBACvB,OAAO,EAAE,IAAA,iCAAqB,EAC5B,MAAM,CAAC,WAAW,EAClB,WAAW,EACX,MAAM,CAAC,KAAK,CAAC,KAAK,IAAI,QAAQ,CAC/B;aACF;SACF,CAAC;QAEF,OAAO;YACL,KAAK;YACL,YAAY,EAAE,EAAE;YAChB,eAAe,EAAE,EAAE;YACnB,OAAO,EAAE;gBACP,KAAK,EAAE,eAAe;aACvB;YACD,uBAAuB,EAAE;gBACvB,iCAAiC;gBACjC,sBAAsB;gBACtB,YAAY,YAAY,CAAC,MAAM,CAAC,UAAU;gBAC1C,eAAe;aAChB;SACF,CAAC;IACJ,CAAC;IAED,cAAc,CAAC,MAAwB;QACrC,MAAM,MAAM,GAAyC,EAAE,CAAC;QACxD,MAAM,QAAQ,GAAyC,EAAE,CAAC;QAE1D,MAAM,SAAS,GAAG,CAAC,QAAQ,EAAE,WAAW,EAAE,QAAQ,CAAC,CAAC;QACpD,IAAI,CAAC,SAAS,CAAC,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,EAAE,CAAC;YAC/C,QAAQ,CAAC,IAAI,CAAC;gBACZ,KAAK,EAAE,gBAAgB;gBACvB,OAAO,EAAE,wCAAwC,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,MAAM,CAAC,KAAK,CAAC,QAAQ,GAAG;aACxG,CAAC,CAAC;QACL,CAAC;QAED,OAAO,EAAE,KAAK,EAAE,MAAM,CAAC,MAAM,KAAK,CAAC,EAAE,MAAM,EAAE,QAAQ,EAAE,CAAC;IAC1D,CAAC;IAED,eAAe;QACb,OAAO;YACL,SAAS,EAAE,SAAS;YACpB,SAAS,EAAE,SAAS;YACpB,gBAAgB,EAAE,SAAS;YAC3B,eAAe,EAAE,SAAS;SAC3B,CAAC;IACJ,CAAC;CACF,CAAC;AAEF,SAAS,YAAY,CAAC,MAAsB;IAC1C,MAAM,MAAM,GAA2B;QACrC,MAAM,EAAE,gBAAgB;QACxB,SAAS,EAAE,mBAAmB;QAC9B,MAAM,EAAE,gBAAgB;KACzB,CAAC;IACF,OAAO,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,SAAS,CAAC;AACpD,CAAC;AAED,SAAS,mBAAmB,CAAC,MAAsB;IACjD,MAAM,MAAM,GAA2B;QACrC,MAAM,EAAE,kBAAkB;QAC1B,SAAS,EAAE,qBAAqB;QAChC,MAAM,EAAE,wBAAwB;KACjC,CAAC;IACF,OAAO,MAAM,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,kBAAkB,CAAC;AAC7D,CAAC;AAED,SAAS,YAAY,CAAC,MAAsB;IAC1C,MAAM,SAAS,GAA2B;QACxC,MAAM,EAAE,yCAAyC;QACjD,SAAS,EAAE,+CAA+C;QAC1D,MAAM,EAAE,2DAA2D;KACpE,CAAC;IACF,OAAO,SAAS,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,yCAAyC,CAAC;AACvF,CAAC;AAED,SAAS,WAAW,CAAC,MAAsB;IACzC,MAAM,QAAQ,GAA2B;QACvC,MAAM,EAAE,YAAY;QACpB,SAAS,EAAE,eAAe;QAC1B,MAAM,EAAE,wBAAwB;KACjC,CAAC;IACF,OAAO,QAAQ,CAAC,MAAM,CAAC,KAAK,CAAC,QAAQ,CAAC,IAAI,YAAY,CAAC;AACzD,CAAC;AAED,SAAS,iBAAiB,CAAC,MAAsB;IAC/C,MAAM,KAAK,GAAG,MAAM,CAAC,KAAK,CAAC,KAAK,IAAI,QAAQ,CAAC;IAC7C,MAAM,aAAa,GAAG,MAAM,CAAC,UAAU,EAAE,QAAQ,EAAE,cAAc,IAAI,EAAE,CAAC;IACxE,MAAM,SAAS,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC;IACvC,MAAM,QAAQ,GAAG,WAAW,CAAC,MAAM,CAAC,CAAC;IAErC,OAAO;EACP,MAAM,CAAC,WAAW;;;;;;;;;EASlB,SAAS;;;;;;;;;YASC,QAAQ,WAAW,KAAK;;;;;;;;;;;;;;;;0BAgBV,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA2CtC,CAAC;AACF,CAAC;AAED,SAAS,iBAAiB,CAAC,OAAuB;IAChD,OAAO;;;;;;;;;;;;;CAaR,CAAC;AACF,CAAC;AAED,SAAS,iBAAiB;IACxB,OAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA0CR,CAAC;AACF,CAAC;AAED,SAAS,eAAe,CAAC,MAAsB;IAC7C,OAAO;UACC,MAAM,CAAC,WAAW;;;;;;;;;;mBAUT,MAAM,CAAC,WAAW;;;;;;;;;;;;;;;;;;;;;;;CAuBpC,CAAC;AACF,CAAC;AAED,SAAS,oBAAoB,CAAC,MAAsB;IAClD,MAAM,YAAY,GAAG,mBAAmB,CAAC,MAAM,CAAC,CAAC;IACjD,OAAO;;;EAGP,YAAY;;CAEb,CAAC;AACF,CAAC;AAED,SAAS,kBAAkB,CAAC,MAAsB;IAChD,MAAM,MAAM,GAAG,YAAY,CAAC,MAAM,CAAC,CAAC;IACpC,OAAO,GAAG,MAAM,sBAAsB,CAAC;AACzC,CAAC"}
|
package/package.json
CHANGED
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Anthropic SDK Adapter
|
|
3
|
+
*
|
|
4
|
+
* Scaffolds projects using the Anthropic Agent SDK (Python).
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import type {
|
|
8
|
+
FrameworkAdapter,
|
|
9
|
+
ScaffoldConfig,
|
|
10
|
+
ScaffoldResult,
|
|
11
|
+
AgentGuardConfig,
|
|
12
|
+
ValidationResult,
|
|
13
|
+
GeneratedFile,
|
|
14
|
+
} from "../types.js";
|
|
15
|
+
import { generateDefaultConfig } from "../config.js";
|
|
16
|
+
|
|
17
|
+
export const anthropicAdapter: FrameworkAdapter = {
|
|
18
|
+
name: "anthropic",
|
|
19
|
+
displayName: "Anthropic SDK",
|
|
20
|
+
language: "python",
|
|
21
|
+
|
|
22
|
+
async scaffold(config: ScaffoldConfig): Promise<ScaffoldResult> {
|
|
23
|
+
const files: GeneratedFile[] = [
|
|
24
|
+
{
|
|
25
|
+
path: "agent.py",
|
|
26
|
+
content: generateAgentFile(config),
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
path: "tools.py",
|
|
30
|
+
content: generateToolsFile(config),
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
path: "run.py",
|
|
34
|
+
content: generateRunFile(config),
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
path: "requirements.txt",
|
|
38
|
+
content: generateRequirements(),
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
path: ".env.example",
|
|
42
|
+
content: "ANTHROPIC_API_KEY=your-api-key-here\n",
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
path: "agent.guard.yml",
|
|
46
|
+
content: generateDefaultConfig(
|
|
47
|
+
config.projectName,
|
|
48
|
+
"anthropic",
|
|
49
|
+
config.model.model || "claude-sonnet-4-20250514"
|
|
50
|
+
),
|
|
51
|
+
},
|
|
52
|
+
];
|
|
53
|
+
|
|
54
|
+
return {
|
|
55
|
+
files,
|
|
56
|
+
dependencies: {},
|
|
57
|
+
devDependencies: {},
|
|
58
|
+
scripts: {
|
|
59
|
+
start: "python run.py",
|
|
60
|
+
},
|
|
61
|
+
postInstallInstructions: [
|
|
62
|
+
"pip install -r requirements.txt",
|
|
63
|
+
"cp .env.example .env",
|
|
64
|
+
"Add your ANTHROPIC_API_KEY to .env",
|
|
65
|
+
"python run.py",
|
|
66
|
+
],
|
|
67
|
+
};
|
|
68
|
+
},
|
|
69
|
+
|
|
70
|
+
validateConfig(config: AgentGuardConfig): ValidationResult {
|
|
71
|
+
const errors: { field: string; message: string }[] = [];
|
|
72
|
+
const warnings: { field: string; message: string }[] = [];
|
|
73
|
+
|
|
74
|
+
if (config.model.provider !== "anthropic") {
|
|
75
|
+
warnings.push({
|
|
76
|
+
field: "model.provider",
|
|
77
|
+
message: `Anthropic adapter works best with provider "anthropic", got "${config.model.provider}"`,
|
|
78
|
+
});
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
return { valid: errors.length === 0, errors, warnings };
|
|
82
|
+
},
|
|
83
|
+
|
|
84
|
+
getDependencies() {
|
|
85
|
+
return {
|
|
86
|
+
anthropic: ">=0.40.0",
|
|
87
|
+
"python-dotenv": ">=1.0.0",
|
|
88
|
+
};
|
|
89
|
+
},
|
|
90
|
+
};
|
|
91
|
+
|
|
92
|
+
function generateAgentFile(config: ScaffoldConfig): string {
|
|
93
|
+
const model = config.model.model || "claude-sonnet-4-20250514";
|
|
94
|
+
const maxIterations = config.guardrails?.behavior?.max_iterations || 20;
|
|
95
|
+
|
|
96
|
+
return `"""
|
|
97
|
+
${config.projectName} — Built with AgentVoy
|
|
98
|
+
https://github.com/agentvoy
|
|
99
|
+
"""
|
|
100
|
+
|
|
101
|
+
import anthropic
|
|
102
|
+
from tools import get_tools, process_tool_call
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def create_client() -> anthropic.Anthropic:
|
|
106
|
+
"""Create the Anthropic client."""
|
|
107
|
+
return anthropic.Anthropic()
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def run_agent(prompt: str) -> str:
|
|
111
|
+
"""Run the agent with an agentic loop."""
|
|
112
|
+
client = create_client()
|
|
113
|
+
tools = get_tools()
|
|
114
|
+
messages = [{"role": "user", "content": prompt}]
|
|
115
|
+
|
|
116
|
+
iteration = 0
|
|
117
|
+
max_iterations = ${maxIterations}
|
|
118
|
+
|
|
119
|
+
while iteration < max_iterations:
|
|
120
|
+
iteration += 1
|
|
121
|
+
|
|
122
|
+
response = client.messages.create(
|
|
123
|
+
model="${model}",
|
|
124
|
+
max_tokens=8096,
|
|
125
|
+
tools=tools,
|
|
126
|
+
messages=messages,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Add assistant response to messages
|
|
130
|
+
messages.append({"role": "assistant", "content": response.content})
|
|
131
|
+
|
|
132
|
+
# If no tool calls, we're done
|
|
133
|
+
if response.stop_reason == "end_turn":
|
|
134
|
+
# Extract final text response
|
|
135
|
+
for block in response.content:
|
|
136
|
+
if hasattr(block, "text"):
|
|
137
|
+
return block.text
|
|
138
|
+
return "Done."
|
|
139
|
+
|
|
140
|
+
# Process tool calls
|
|
141
|
+
if response.stop_reason == "tool_use":
|
|
142
|
+
tool_results = []
|
|
143
|
+
for block in response.content:
|
|
144
|
+
if block.type == "tool_use":
|
|
145
|
+
result = process_tool_call(block.name, block.input)
|
|
146
|
+
tool_results.append({
|
|
147
|
+
"type": "tool_result",
|
|
148
|
+
"tool_use_id": block.id,
|
|
149
|
+
"content": str(result),
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
messages.append({"role": "user", "content": tool_results})
|
|
153
|
+
|
|
154
|
+
return "Max iterations reached."
|
|
155
|
+
`;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
function generateToolsFile(_config: ScaffoldConfig): string {
|
|
159
|
+
return `"""
|
|
160
|
+
Agent tools — add your custom tools here.
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def get_tools() -> list:
|
|
165
|
+
"""Return tool definitions for the Anthropic API."""
|
|
166
|
+
return [
|
|
167
|
+
{
|
|
168
|
+
"name": "search_web",
|
|
169
|
+
"description": "Search the web for information on a given topic.",
|
|
170
|
+
"input_schema": {
|
|
171
|
+
"type": "object",
|
|
172
|
+
"properties": {
|
|
173
|
+
"query": {
|
|
174
|
+
"type": "string",
|
|
175
|
+
"description": "The search query.",
|
|
176
|
+
}
|
|
177
|
+
},
|
|
178
|
+
"required": ["query"],
|
|
179
|
+
},
|
|
180
|
+
},
|
|
181
|
+
{
|
|
182
|
+
"name": "read_file",
|
|
183
|
+
"description": "Read the contents of a file.",
|
|
184
|
+
"input_schema": {
|
|
185
|
+
"type": "object",
|
|
186
|
+
"properties": {
|
|
187
|
+
"path": {
|
|
188
|
+
"type": "string",
|
|
189
|
+
"description": "Path to the file to read.",
|
|
190
|
+
}
|
|
191
|
+
},
|
|
192
|
+
"required": ["path"],
|
|
193
|
+
},
|
|
194
|
+
},
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def process_tool_call(tool_name: str, tool_input: dict) -> str:
|
|
199
|
+
"""Execute a tool call and return the result."""
|
|
200
|
+
if tool_name == "search_web":
|
|
201
|
+
return _search_web(tool_input["query"])
|
|
202
|
+
elif tool_name == "read_file":
|
|
203
|
+
return _read_file(tool_input["path"])
|
|
204
|
+
else:
|
|
205
|
+
return f"Unknown tool: {tool_name}"
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _search_web(query: str) -> str:
|
|
209
|
+
"""Search the web for information."""
|
|
210
|
+
# TODO: Implement your search logic (e.g., Tavily, Serper, Brave Search)
|
|
211
|
+
return f"Search results for: {query}"
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _read_file(path: str) -> str:
|
|
215
|
+
"""Read a file's contents."""
|
|
216
|
+
try:
|
|
217
|
+
with open(path, "r") as f:
|
|
218
|
+
return f.read()
|
|
219
|
+
except FileNotFoundError:
|
|
220
|
+
return f"File not found: {path}"
|
|
221
|
+
except PermissionError:
|
|
222
|
+
return f"Permission denied: {path}"
|
|
223
|
+
`;
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
function generateRunFile(config: ScaffoldConfig): string {
|
|
227
|
+
return `"""
|
|
228
|
+
Run the ${config.projectName} agent.
|
|
229
|
+
"""
|
|
230
|
+
|
|
231
|
+
from dotenv import load_dotenv
|
|
232
|
+
from agent import run_agent
|
|
233
|
+
|
|
234
|
+
load_dotenv()
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def main():
|
|
238
|
+
print("\\n🚀 ${config.projectName} — Powered by AgentVoy")
|
|
239
|
+
print("=" * 50)
|
|
240
|
+
print("Type your prompt (or 'quit' to exit):\\n")
|
|
241
|
+
|
|
242
|
+
while True:
|
|
243
|
+
try:
|
|
244
|
+
prompt = input("> ")
|
|
245
|
+
if prompt.lower() in ("quit", "exit", "q"):
|
|
246
|
+
print("\\nGoodbye!")
|
|
247
|
+
break
|
|
248
|
+
if not prompt.strip():
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
print("\\nThinking...\\n")
|
|
252
|
+
result = run_agent(prompt)
|
|
253
|
+
print(f"\\n{result}\\n")
|
|
254
|
+
except KeyboardInterrupt:
|
|
255
|
+
print("\\n\\nGoodbye!")
|
|
256
|
+
break
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
if __name__ == "__main__":
|
|
260
|
+
main()
|
|
261
|
+
`;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
function generateRequirements(): string {
|
|
265
|
+
return `anthropic>=0.40.0
|
|
266
|
+
python-dotenv>=1.0.0
|
|
267
|
+
`;
|
|
268
|
+
}
|
package/src/adapters/index.ts
CHANGED
|
@@ -10,8 +10,12 @@ import { registerAdapter } from "./registry.js";
|
|
|
10
10
|
import { openaiAdapter } from "./openai.js";
|
|
11
11
|
import { googleAdkAdapter } from "./google-adk.js";
|
|
12
12
|
import { crewaiAdapter } from "./crewai.js";
|
|
13
|
+
import { anthropicAdapter } from "./anthropic.js";
|
|
14
|
+
import { langgraphAdapter } from "./langgraph.js";
|
|
13
15
|
|
|
14
16
|
// Register built-in adapters
|
|
15
17
|
registerAdapter(openaiAdapter);
|
|
16
18
|
registerAdapter(googleAdkAdapter);
|
|
17
19
|
registerAdapter(crewaiAdapter);
|
|
20
|
+
registerAdapter(anthropicAdapter);
|
|
21
|
+
registerAdapter(langgraphAdapter);
|
|
@@ -0,0 +1,336 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LangGraph Adapter
|
|
3
|
+
*
|
|
4
|
+
* Scaffolds projects using LangGraph (Python) — stateful, graph-based agent workflows.
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import type {
|
|
8
|
+
FrameworkAdapter,
|
|
9
|
+
ScaffoldConfig,
|
|
10
|
+
ScaffoldResult,
|
|
11
|
+
AgentGuardConfig,
|
|
12
|
+
ValidationResult,
|
|
13
|
+
GeneratedFile,
|
|
14
|
+
} from "../types.js";
|
|
15
|
+
import { generateDefaultConfig } from "../config.js";
|
|
16
|
+
|
|
17
|
+
export const langgraphAdapter: FrameworkAdapter = {
|
|
18
|
+
name: "langgraph",
|
|
19
|
+
displayName: "LangGraph",
|
|
20
|
+
language: "python",
|
|
21
|
+
|
|
22
|
+
async scaffold(config: ScaffoldConfig): Promise<ScaffoldResult> {
|
|
23
|
+
const files: GeneratedFile[] = [
|
|
24
|
+
{
|
|
25
|
+
path: "agent.py",
|
|
26
|
+
content: generateAgentFile(config),
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
path: "tools.py",
|
|
30
|
+
content: generateToolsFile(),
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
path: "state.py",
|
|
34
|
+
content: generateStateFile(config),
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
path: "run.py",
|
|
38
|
+
content: generateRunFile(config),
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
path: "requirements.txt",
|
|
42
|
+
content: generateRequirements(config),
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
path: ".env.example",
|
|
46
|
+
content: generateEnvExample(config),
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
path: "agent.guard.yml",
|
|
50
|
+
content: generateDefaultConfig(
|
|
51
|
+
config.projectName,
|
|
52
|
+
"langgraph",
|
|
53
|
+
config.model.model || "gpt-4o"
|
|
54
|
+
),
|
|
55
|
+
},
|
|
56
|
+
];
|
|
57
|
+
|
|
58
|
+
return {
|
|
59
|
+
files,
|
|
60
|
+
dependencies: {},
|
|
61
|
+
devDependencies: {},
|
|
62
|
+
scripts: {
|
|
63
|
+
start: "python run.py",
|
|
64
|
+
},
|
|
65
|
+
postInstallInstructions: [
|
|
66
|
+
"pip install -r requirements.txt",
|
|
67
|
+
"cp .env.example .env",
|
|
68
|
+
`Add your ${getApiKeyEnv(config)} to .env`,
|
|
69
|
+
"python run.py",
|
|
70
|
+
],
|
|
71
|
+
};
|
|
72
|
+
},
|
|
73
|
+
|
|
74
|
+
validateConfig(config: AgentGuardConfig): ValidationResult {
|
|
75
|
+
const errors: { field: string; message: string }[] = [];
|
|
76
|
+
const warnings: { field: string; message: string }[] = [];
|
|
77
|
+
|
|
78
|
+
const supported = ["openai", "anthropic", "google"];
|
|
79
|
+
if (!supported.includes(config.model.provider)) {
|
|
80
|
+
warnings.push({
|
|
81
|
+
field: "model.provider",
|
|
82
|
+
message: `LangGraph works best with providers: ${supported.join(", ")}. Got "${config.model.provider}"`,
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return { valid: errors.length === 0, errors, warnings };
|
|
87
|
+
},
|
|
88
|
+
|
|
89
|
+
getDependencies() {
|
|
90
|
+
return {
|
|
91
|
+
langgraph: ">=0.2.0",
|
|
92
|
+
langchain: ">=0.3.0",
|
|
93
|
+
"langchain-core": ">=0.3.0",
|
|
94
|
+
"python-dotenv": ">=1.0.0",
|
|
95
|
+
};
|
|
96
|
+
},
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
function getApiKeyEnv(config: ScaffoldConfig): string {
|
|
100
|
+
const envMap: Record<string, string> = {
|
|
101
|
+
openai: "OPENAI_API_KEY",
|
|
102
|
+
anthropic: "ANTHROPIC_API_KEY",
|
|
103
|
+
google: "GOOGLE_API_KEY",
|
|
104
|
+
};
|
|
105
|
+
return envMap[config.model.provider] || "API_KEY";
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
function getLangChainPackage(config: ScaffoldConfig): string {
|
|
109
|
+
const pkgMap: Record<string, string> = {
|
|
110
|
+
openai: "langchain-openai",
|
|
111
|
+
anthropic: "langchain-anthropic",
|
|
112
|
+
google: "langchain-google-genai",
|
|
113
|
+
};
|
|
114
|
+
return pkgMap[config.model.provider] || "langchain-openai";
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
function getLLMImport(config: ScaffoldConfig): string {
|
|
118
|
+
const importMap: Record<string, string> = {
|
|
119
|
+
openai: "from langchain_openai import ChatOpenAI",
|
|
120
|
+
anthropic: "from langchain_anthropic import ChatAnthropic",
|
|
121
|
+
google: "from langchain_google_genai import ChatGoogleGenerativeAI",
|
|
122
|
+
};
|
|
123
|
+
return importMap[config.model.provider] || "from langchain_openai import ChatOpenAI";
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
function getLLMClass(config: ScaffoldConfig): string {
|
|
127
|
+
const classMap: Record<string, string> = {
|
|
128
|
+
openai: "ChatOpenAI",
|
|
129
|
+
anthropic: "ChatAnthropic",
|
|
130
|
+
google: "ChatGoogleGenerativeAI",
|
|
131
|
+
};
|
|
132
|
+
return classMap[config.model.provider] || "ChatOpenAI";
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
function generateAgentFile(config: ScaffoldConfig): string {
|
|
136
|
+
const model = config.model.model || "gpt-4o";
|
|
137
|
+
const maxIterations = config.guardrails?.behavior?.max_iterations || 20;
|
|
138
|
+
const llmImport = getLLMImport(config);
|
|
139
|
+
const llmClass = getLLMClass(config);
|
|
140
|
+
|
|
141
|
+
return `"""
|
|
142
|
+
${config.projectName} — Built with AgentVoy
|
|
143
|
+
https://github.com/agentvoy
|
|
144
|
+
|
|
145
|
+
LangGraph agent with a stateful agentic loop.
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
from langchain_core.messages import HumanMessage, SystemMessage
|
|
149
|
+
from langgraph.graph import StateGraph, END
|
|
150
|
+
from langgraph.prebuilt import ToolNode
|
|
151
|
+
${llmImport}
|
|
152
|
+
|
|
153
|
+
from state import AgentState
|
|
154
|
+
from tools import get_tools
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def create_graph():
|
|
158
|
+
"""Build the agent state graph."""
|
|
159
|
+
tools = get_tools()
|
|
160
|
+
llm = ${llmClass}(model="${model}").bind_tools(tools)
|
|
161
|
+
tool_node = ToolNode(tools)
|
|
162
|
+
|
|
163
|
+
def should_continue(state: AgentState) -> str:
|
|
164
|
+
"""Route: call tools or finish."""
|
|
165
|
+
messages = state["messages"]
|
|
166
|
+
last = messages[-1]
|
|
167
|
+
if last.tool_calls:
|
|
168
|
+
return "tools"
|
|
169
|
+
return END
|
|
170
|
+
|
|
171
|
+
def call_model(state: AgentState) -> dict:
|
|
172
|
+
"""Call the LLM with current messages."""
|
|
173
|
+
messages = state["messages"]
|
|
174
|
+
iteration = state.get("iteration", 0)
|
|
175
|
+
|
|
176
|
+
if iteration >= ${maxIterations}:
|
|
177
|
+
from langchain_core.messages import AIMessage
|
|
178
|
+
return {
|
|
179
|
+
"messages": [AIMessage(content="Max iterations reached.")],
|
|
180
|
+
"iteration": iteration,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
response = llm.invoke(messages)
|
|
184
|
+
return {
|
|
185
|
+
"messages": [response],
|
|
186
|
+
"iteration": iteration + 1,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
# Build the graph
|
|
190
|
+
graph = StateGraph(AgentState)
|
|
191
|
+
graph.add_node("agent", call_model)
|
|
192
|
+
graph.add_node("tools", tool_node)
|
|
193
|
+
|
|
194
|
+
graph.set_entry_point("agent")
|
|
195
|
+
graph.add_conditional_edges("agent", should_continue)
|
|
196
|
+
graph.add_edge("tools", "agent")
|
|
197
|
+
|
|
198
|
+
return graph.compile()
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def run_agent(prompt: str) -> str:
|
|
202
|
+
"""Run the agent graph with a user prompt."""
|
|
203
|
+
app = create_graph()
|
|
204
|
+
|
|
205
|
+
initial_state = {
|
|
206
|
+
"messages": [HumanMessage(content=prompt)],
|
|
207
|
+
"iteration": 0,
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
final_state = app.invoke(initial_state)
|
|
211
|
+
messages = final_state["messages"]
|
|
212
|
+
|
|
213
|
+
# Return the last AI message text
|
|
214
|
+
for msg in reversed(messages):
|
|
215
|
+
if hasattr(msg, "content") and isinstance(msg.content, str):
|
|
216
|
+
return msg.content
|
|
217
|
+
|
|
218
|
+
return "Done."
|
|
219
|
+
`;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
function generateStateFile(_config: ScaffoldConfig): string {
|
|
223
|
+
return `"""
|
|
224
|
+
Agent state definition for LangGraph.
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
from typing import TypedDict, Annotated, Sequence
|
|
228
|
+
from langchain_core.messages import BaseMessage
|
|
229
|
+
import operator
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
class AgentState(TypedDict):
|
|
233
|
+
"""State passed between nodes in the graph."""
|
|
234
|
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
|
235
|
+
iteration: int
|
|
236
|
+
`;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
function generateToolsFile(): string {
|
|
240
|
+
return `"""
|
|
241
|
+
Agent tools — add your custom tools here.
|
|
242
|
+
LangGraph uses @tool decorated functions from langchain_core.
|
|
243
|
+
"""
|
|
244
|
+
|
|
245
|
+
from langchain_core.tools import tool
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
@tool
|
|
249
|
+
def search_web(query: str) -> str:
|
|
250
|
+
"""Search the web for information on a given topic.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
query: The search query.
|
|
254
|
+
"""
|
|
255
|
+
# TODO: Implement your search logic (e.g., Tavily, Serper, Brave Search)
|
|
256
|
+
# Example with Tavily:
|
|
257
|
+
# from tavily import TavilyClient
|
|
258
|
+
# client = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
|
|
259
|
+
# return client.search(query)["results"][0]["content"]
|
|
260
|
+
return f"Search results for: {query}"
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
@tool
|
|
264
|
+
def read_file(path: str) -> str:
|
|
265
|
+
"""Read the contents of a file.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
path: Path to the file to read.
|
|
269
|
+
"""
|
|
270
|
+
try:
|
|
271
|
+
with open(path, "r") as f:
|
|
272
|
+
return f.read()
|
|
273
|
+
except FileNotFoundError:
|
|
274
|
+
return f"File not found: {path}"
|
|
275
|
+
except PermissionError:
|
|
276
|
+
return f"Permission denied: {path}"
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def get_tools() -> list:
|
|
280
|
+
"""Return all available tools."""
|
|
281
|
+
return [search_web, read_file]
|
|
282
|
+
`;
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
function generateRunFile(config: ScaffoldConfig): string {
|
|
286
|
+
return `"""
|
|
287
|
+
Run the ${config.projectName} agent.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
from dotenv import load_dotenv
|
|
291
|
+
from agent import run_agent
|
|
292
|
+
|
|
293
|
+
load_dotenv()
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def main():
|
|
297
|
+
print("\\n🚀 ${config.projectName} — Powered by AgentVoy")
|
|
298
|
+
print("=" * 50)
|
|
299
|
+
print("Type your prompt (or 'quit' to exit):\\n")
|
|
300
|
+
|
|
301
|
+
while True:
|
|
302
|
+
try:
|
|
303
|
+
prompt = input("> ")
|
|
304
|
+
if prompt.lower() in ("quit", "exit", "q"):
|
|
305
|
+
print("\\nGoodbye!")
|
|
306
|
+
break
|
|
307
|
+
if not prompt.strip():
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
print("\\nThinking...\\n")
|
|
311
|
+
result = run_agent(prompt)
|
|
312
|
+
print(f"\\n{result}\\n")
|
|
313
|
+
except KeyboardInterrupt:
|
|
314
|
+
print("\\n\\nGoodbye!")
|
|
315
|
+
break
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
if __name__ == "__main__":
|
|
319
|
+
main()
|
|
320
|
+
`;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
function generateRequirements(config: ScaffoldConfig): string {
|
|
324
|
+
const langchainPkg = getLangChainPackage(config);
|
|
325
|
+
return `langgraph>=0.2.0
|
|
326
|
+
langchain>=0.3.0
|
|
327
|
+
langchain-core>=0.3.0
|
|
328
|
+
${langchainPkg}>=0.2.0
|
|
329
|
+
python-dotenv>=1.0.0
|
|
330
|
+
`;
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
function generateEnvExample(config: ScaffoldConfig): string {
|
|
334
|
+
const envKey = getApiKeyEnv(config);
|
|
335
|
+
return `${envKey}=your-api-key-here\n`;
|
|
336
|
+
}
|