bedrock-agentcore-starter-toolkit 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bedrock-agentcore-starter-toolkit might be problematic. Click here for more details.
- bedrock_agentcore_starter_toolkit/cli/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/cli/cli.py +2 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/README.md +35 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/agent_info.py +230 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/commands.py +518 -0
- bedrock_agentcore_starter_toolkit/operations/gateway/client.py +2 -2
- bedrock_agentcore_starter_toolkit/services/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/memory_manager_template.py +207 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/requirements_langchain.j2 +9 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/requirements_strands.j2 +5 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/template_fixtures_merged.json +1102 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/base_bedrock_translate.py +1668 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/bedrock_to_langchain.py +382 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/bedrock_to_strands.py +374 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/utils.py +417 -0
- bedrock_agentcore_starter_toolkit/utils/runtime/templates/execution_role_policy.json.j2 +2 -1
- {bedrock_agentcore_starter_toolkit-0.1.3.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/METADATA +22 -2
- {bedrock_agentcore_starter_toolkit-0.1.3.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/RECORD +25 -9
- {bedrock_agentcore_starter_toolkit-0.1.3.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/WHEEL +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.3.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/entry_points.txt +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.3.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/licenses/LICENSE.txt +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.3.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/licenses/NOTICE.txt +0 -0
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
# pylint: disable=consider-using-f-string, line-too-long
|
|
2
|
+
# ruff: noqa: E501
|
|
3
|
+
"""Bedrock Agent to LangChain Translator.
|
|
4
|
+
|
|
5
|
+
This script translates AWS Bedrock Agent configurations into equivalent LangChain code.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import textwrap
|
|
10
|
+
|
|
11
|
+
from .base_bedrock_translate import BaseBedrockTranslator
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BedrockLangchainTranslation(BaseBedrockTranslator):
|
|
15
|
+
"""Class to translate Bedrock Agent configurations to LangChain code."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, agent_config, debug: bool, output_dir: str, enabled_primitives: dict):
|
|
18
|
+
"""Initialize the BedrockLangchainTranslation class."""
|
|
19
|
+
super().__init__(agent_config, debug, output_dir, enabled_primitives)
|
|
20
|
+
|
|
21
|
+
self.imports_code += self.generate_imports()
|
|
22
|
+
self.tools_code = self.generate_action_groups_code(platform="langchain")
|
|
23
|
+
self.memory_code = self.generate_memory_configuration(memory_saver="InMemorySaver")
|
|
24
|
+
self.collaboration_code = self.generate_collaboration_code()
|
|
25
|
+
self.kb_code = self.generate_knowledge_base_code()
|
|
26
|
+
self.models_code = self.generate_model_configurations()
|
|
27
|
+
self.agent_setup_code = self.generate_agent_setup()
|
|
28
|
+
self.usage_code = self.generate_example_usage()
|
|
29
|
+
|
|
30
|
+
# Observability
|
|
31
|
+
if self.observability_enabled:
|
|
32
|
+
self.imports_code += """
|
|
33
|
+
from opentelemetry.instrumentation.langchain import LangchainInstrumentor
|
|
34
|
+
LangchainInstrumentor().instrument()
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
# Format prompts code
|
|
38
|
+
self.prompts_code = textwrap.fill(
|
|
39
|
+
self.prompts_code, width=150, break_long_words=False, replace_whitespace=False
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
self.code_sections = [
|
|
43
|
+
self.imports_code,
|
|
44
|
+
self.models_code,
|
|
45
|
+
self.prompts_code,
|
|
46
|
+
self.collaboration_code,
|
|
47
|
+
self.tools_code,
|
|
48
|
+
self.memory_code,
|
|
49
|
+
self.kb_code,
|
|
50
|
+
self.agent_setup_code,
|
|
51
|
+
self.usage_code,
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
def generate_imports(self) -> str:
|
|
55
|
+
"""Generate import statements for LangChain components."""
|
|
56
|
+
return """
|
|
57
|
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
58
|
+
|
|
59
|
+
from langchain_aws import ChatBedrock
|
|
60
|
+
from langchain_aws.retrievers import AmazonKnowledgeBasesRetriever
|
|
61
|
+
|
|
62
|
+
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage
|
|
63
|
+
from langchain_core.globals import set_verbose, set_debug
|
|
64
|
+
|
|
65
|
+
from langchain.tools import tool
|
|
66
|
+
|
|
67
|
+
from langgraph.prebuilt import create_react_agent, InjectedState
|
|
68
|
+
from langgraph.checkpoint.memory import InMemorySaver
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def generate_model_configurations(self) -> str:
|
|
72
|
+
"""Generate LangChain model configurations from Bedrock agent config."""
|
|
73
|
+
model_configs = []
|
|
74
|
+
|
|
75
|
+
for i, config in enumerate(self.prompt_configs):
|
|
76
|
+
prompt_type = config.get("promptType", f"CUSTOM_{i}")
|
|
77
|
+
inference_config = config.get("inferenceConfiguration", {})
|
|
78
|
+
|
|
79
|
+
# Skip KB Generation if no knowledge bases are defined
|
|
80
|
+
if prompt_type == "KNOWLEDGE_BASE_RESPONSE_GENERATION" and not self.knowledge_bases:
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
# Build model configuration string
|
|
84
|
+
model_config = f"""
|
|
85
|
+
# {prompt_type} LLM configuration
|
|
86
|
+
llm_{prompt_type} = ChatBedrock(
|
|
87
|
+
model_id="{self.model_id}",
|
|
88
|
+
region_name="{self.agent_region}",
|
|
89
|
+
provider="{self.agent_info["model"]["providerName"].lower()}",
|
|
90
|
+
model_kwargs={{
|
|
91
|
+
{f'"top_k": {inference_config.get("topK", 250)},' if self.agent_info["model"]["providerName"].lower() in ["anthropic", "amazon"] else ""}
|
|
92
|
+
"top_p":{inference_config.get("topP", 1.0)},
|
|
93
|
+
"temperature": {inference_config.get("temperature", 0)},
|
|
94
|
+
"max_tokens": {inference_config.get("maximumLength", 2048)},
|
|
95
|
+
{f'"stop_sequences": {repr(inference_config.get("stopSequences", []))},'.strip() if self.agent_info["model"]["providerName"].lower() in ["anthropic", "amazon"] else ""}
|
|
96
|
+
}}"""
|
|
97
|
+
|
|
98
|
+
# Add guardrails if available
|
|
99
|
+
if self.guardrail_config:
|
|
100
|
+
model_config += f""",
|
|
101
|
+
guardrails={self.guardrail_config}"""
|
|
102
|
+
|
|
103
|
+
model_config += "\n)"
|
|
104
|
+
model_configs.append(model_config)
|
|
105
|
+
|
|
106
|
+
self.generate_prompt(config)
|
|
107
|
+
|
|
108
|
+
return "\n".join(model_configs)
|
|
109
|
+
|
|
110
|
+
def generate_knowledge_base_code(self) -> str:
|
|
111
|
+
"""Generate code for knowledge base retrievers."""
|
|
112
|
+
if not self.knowledge_bases:
|
|
113
|
+
return ""
|
|
114
|
+
|
|
115
|
+
kb_code = ""
|
|
116
|
+
|
|
117
|
+
for kb in self.knowledge_bases:
|
|
118
|
+
kb_name = kb.get("name", "")
|
|
119
|
+
kb_description = kb.get("description", "")
|
|
120
|
+
kb_id = kb.get("knowledgeBaseId", "")
|
|
121
|
+
kb_region_name = kb.get("knowledgeBaseArn", "").split(":")[3]
|
|
122
|
+
|
|
123
|
+
kb_code += f"""retriever_{kb_name} = AmazonKnowledgeBasesRetriever(
|
|
124
|
+
knowledge_base_id="{kb_id}",
|
|
125
|
+
retrieval_config={{"vectorSearchConfiguration": {{"numberOfResults": 5}}}},
|
|
126
|
+
region_name="{kb_region_name}"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
retriever_tool_{kb_name} = retriever_{kb_name}.as_tool(name="kb_{kb_name}", description="{kb_description}")
|
|
130
|
+
|
|
131
|
+
"""
|
|
132
|
+
self.tools.append(f"retriever_tool_{kb_name}")
|
|
133
|
+
|
|
134
|
+
return kb_code
|
|
135
|
+
|
|
136
|
+
def generate_collaboration_code(self) -> str:
|
|
137
|
+
"""Generate code for multi-agent collaboration."""
|
|
138
|
+
if not self.multi_agent_enabled or not self.collaborators:
|
|
139
|
+
return ""
|
|
140
|
+
|
|
141
|
+
collaborator_code = ""
|
|
142
|
+
|
|
143
|
+
# Create the collaborators
|
|
144
|
+
for i, collaborator in enumerate(self.collaborators):
|
|
145
|
+
collaborator_name = collaborator.get("collaboratorName", "")
|
|
146
|
+
collaborator_file_name = f"langchain_collaborator_{collaborator_name}"
|
|
147
|
+
collaborator_path = os.path.join(self.output_dir, f"{collaborator_file_name}.py")
|
|
148
|
+
|
|
149
|
+
# Recursively translate the collaborator agent to LangChain
|
|
150
|
+
BedrockLangchainTranslation(
|
|
151
|
+
collaborator, debug=self.debug, output_dir=self.output_dir, enabled_primitives=self.enabled_primitives
|
|
152
|
+
).translate_bedrock_to_langchain(collaborator_path)
|
|
153
|
+
|
|
154
|
+
self.imports_code += (
|
|
155
|
+
f"\nfrom {collaborator_file_name} import invoke_agent as invoke_{collaborator_name}_collaborator"
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# conversation relay
|
|
159
|
+
relay_conversation_history = collaborator.get("relayConversationHistory", "DISABLED") == "TO_COLLABORATOR"
|
|
160
|
+
|
|
161
|
+
# Create tool to invoke the collaborator
|
|
162
|
+
collaborator_code += """
|
|
163
|
+
@tool
|
|
164
|
+
def invoke_{0}(query: str, state: Annotated[dict, InjectedState]) -> str:
|
|
165
|
+
\"\"\"Invoke the collaborator agent/specialist with the following description: {1}\"\"\"
|
|
166
|
+
{2}
|
|
167
|
+
invoke_agent_response = invoke_{0}_collaborator(query{3})
|
|
168
|
+
tools_used.update([msg.name for msg in invoke_agent_response if isinstance(msg, ToolMessage)])
|
|
169
|
+
return invoke_agent_response
|
|
170
|
+
""".format(
|
|
171
|
+
collaborator_name,
|
|
172
|
+
self.collaborator_descriptions[i],
|
|
173
|
+
"relay_history = state.get('messages', [])[:-1]" if relay_conversation_history else "",
|
|
174
|
+
", relay_history" if relay_conversation_history else "",
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Add the tool to the list of tools
|
|
178
|
+
self.tools.append(f"invoke_{collaborator_name}")
|
|
179
|
+
|
|
180
|
+
return collaborator_code
|
|
181
|
+
|
|
182
|
+
def generate_agent_setup(self) -> str:
|
|
183
|
+
"""Generate agent setup code."""
|
|
184
|
+
agent_code = f"tools = [{','.join(self.tools)}]\ntools_used = set()"
|
|
185
|
+
|
|
186
|
+
if self.action_groups and self.tools_code:
|
|
187
|
+
agent_code += """\ntools += action_group_tools"""
|
|
188
|
+
|
|
189
|
+
if self.gateway_enabled:
|
|
190
|
+
agent_code += """\ntools += mcp_tools"""
|
|
191
|
+
|
|
192
|
+
memory_retrieve_code = (
|
|
193
|
+
""
|
|
194
|
+
if not self.memory_enabled
|
|
195
|
+
else (
|
|
196
|
+
"memory_synopsis = memory_manager.get_memory_synopsis()"
|
|
197
|
+
if not self.agentcore_memory_enabled
|
|
198
|
+
else """
|
|
199
|
+
memories = memory_client.retrieve_memories(memory_id=memory_id, namespace=f'/summaries/{user_id}', query="Retrieve the most recent session sumamries.", actor_id=user_id, top_k=20)
|
|
200
|
+
memory_synopsis = "\\n".join([m.get("content", {}).get("text", "") for m in memories])
|
|
201
|
+
"""
|
|
202
|
+
)
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# Create agent based on available components
|
|
206
|
+
agent_code += """
|
|
207
|
+
config = {{"configurable": {{"thread_id": "1"}}}}
|
|
208
|
+
set_verbose({})
|
|
209
|
+
set_debug({})
|
|
210
|
+
|
|
211
|
+
_agent = None
|
|
212
|
+
first_turn = True
|
|
213
|
+
last_input = ""
|
|
214
|
+
user_id = ""
|
|
215
|
+
{}
|
|
216
|
+
|
|
217
|
+
# agent update loop
|
|
218
|
+
def get_agent():
|
|
219
|
+
|
|
220
|
+
global _agent, user_id, memory_id
|
|
221
|
+
|
|
222
|
+
{}
|
|
223
|
+
{}
|
|
224
|
+
system_prompt = ORCHESTRATION_TEMPLATE
|
|
225
|
+
{}
|
|
226
|
+
_agent = create_react_agent(
|
|
227
|
+
model=llm_ORCHESTRATION,
|
|
228
|
+
prompt=system_prompt,
|
|
229
|
+
tools=tools,
|
|
230
|
+
checkpointer=checkpointer_STM,
|
|
231
|
+
debug={}
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
return _agent
|
|
235
|
+
""".format(
|
|
236
|
+
self.debug,
|
|
237
|
+
self.debug,
|
|
238
|
+
'last_agent = ""' if self.multi_agent_enabled and self.supervision_type == "SUPERVISOR_ROUTER" else "",
|
|
239
|
+
(
|
|
240
|
+
"if _agent is None or memory_manager.has_memory_changed():"
|
|
241
|
+
if self.memory_enabled and not self.agentcore_memory_enabled
|
|
242
|
+
else "if _agent is None:"
|
|
243
|
+
),
|
|
244
|
+
memory_retrieve_code,
|
|
245
|
+
(
|
|
246
|
+
"system_prompt = system_prompt.replace('$memory_synopsis$', memory_synopsis)"
|
|
247
|
+
if self.memory_enabled
|
|
248
|
+
else ""
|
|
249
|
+
),
|
|
250
|
+
self.debug,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
# Generate routing code if needed
|
|
254
|
+
routing_code = self.generate_routing_code()
|
|
255
|
+
|
|
256
|
+
# Set up relay parameter definition based on whether we're accepting relays
|
|
257
|
+
relay_param_def = ", relayed_messages = []" if self.is_accepting_relays else ""
|
|
258
|
+
|
|
259
|
+
# Add relay handling code if needed
|
|
260
|
+
relay_code = (
|
|
261
|
+
"""if relayed_messages:
|
|
262
|
+
agent.update_state(config, {"messages": relayed_messages})"""
|
|
263
|
+
if self.is_accepting_relays
|
|
264
|
+
else ""
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
# Set up preprocessing code if enabled
|
|
268
|
+
preprocess_code = ""
|
|
269
|
+
if "PRE_PROCESSING" in self.enabled_prompts:
|
|
270
|
+
preprocess_code = """
|
|
271
|
+
pre_process_output = llm_PRE_PROCESSING.invoke([SystemMessage(PRE_PROCESSING_TEMPLATE), HumanMessage(question)])
|
|
272
|
+
question += "\\n<PRE_PROCESSING>{}</PRE_PROCESSING>".format(pre_process_output.content)
|
|
273
|
+
"""
|
|
274
|
+
if self.debug:
|
|
275
|
+
preprocess_code += ' print("PREPROCESSING_OUTPUT: {pre_process_output}")'
|
|
276
|
+
|
|
277
|
+
# Memory recording code
|
|
278
|
+
memory_add_user = (
|
|
279
|
+
"""
|
|
280
|
+
memory_manager.add_message({'role': 'user', 'content': question})"""
|
|
281
|
+
if self.memory_enabled and not self.agentcore_memory_enabled
|
|
282
|
+
else ""
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
memory_add_assistant = (
|
|
286
|
+
"""
|
|
287
|
+
memory_manager.add_message({'role': 'assistant', 'content': str(response)})"""
|
|
288
|
+
if self.memory_enabled and not self.agentcore_memory_enabled
|
|
289
|
+
else ""
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# KB optimization code if enabled
|
|
293
|
+
kb_code = ""
|
|
294
|
+
if self.single_kb_optimization_enabled:
|
|
295
|
+
kb_name = self.knowledge_bases[0]["name"]
|
|
296
|
+
kb_code = f"""
|
|
297
|
+
if first_turn:
|
|
298
|
+
search_results = retriever_{kb_name}.invoke(question)
|
|
299
|
+
response = llm_KNOWLEDGE_BASE_RESPONSE_GENERATION.invoke([SystemMessage(KB_GENERATION_TEMPLATE.replace("$search_results$, search_results)), HumanMessage(question))])
|
|
300
|
+
first_turn = False
|
|
301
|
+
"""
|
|
302
|
+
|
|
303
|
+
# Post-processing code
|
|
304
|
+
post_process_code = (
|
|
305
|
+
"""
|
|
306
|
+
post_process_prompt = POST_PROCESSING_TEMPLATE.replace("$question$", question).replace("$latest_response$", response["messages"][-1].content).replace("$responses$", str(response["messages"]))
|
|
307
|
+
post_process_output = llm_POST_PROCESSING.invoke([HumanMessage(post_process_prompt)])
|
|
308
|
+
return [AIMessage(post_process_output.content)]"""
|
|
309
|
+
if "POST_PROCESSING" in self.enabled_prompts
|
|
310
|
+
else "return response['messages']"
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# Combine it all into the invoke_agent function
|
|
314
|
+
agent_code += f"""
|
|
315
|
+
def invoke_agent(question: str{relay_param_def}):
|
|
316
|
+
{"global last_agent" if self.supervision_type == "SUPERVISOR_ROUTER" else ""}
|
|
317
|
+
{"global first_turn" if self.single_kb_optimization_enabled else ""}
|
|
318
|
+
global last_input, memory_id
|
|
319
|
+
last_input = question
|
|
320
|
+
agent = get_agent()
|
|
321
|
+
{relay_code}
|
|
322
|
+
{routing_code}
|
|
323
|
+
{preprocess_code}
|
|
324
|
+
{memory_add_user}
|
|
325
|
+
|
|
326
|
+
response = asyncio.run(agent.ainvoke({{"messages": [{{"role": "user", "content": question}}]}}, config))
|
|
327
|
+
{memory_add_assistant}
|
|
328
|
+
{kb_code}
|
|
329
|
+
{post_process_code}
|
|
330
|
+
"""
|
|
331
|
+
|
|
332
|
+
agent_code += self.generate_entrypoint_code("langchain")
|
|
333
|
+
|
|
334
|
+
return agent_code
|
|
335
|
+
|
|
336
|
+
def generate_routing_code(self):
|
|
337
|
+
"""Generate routing code for supervisor router."""
|
|
338
|
+
if not self.multi_agent_enabled or self.supervision_type != "SUPERVISOR_ROUTER":
|
|
339
|
+
return ""
|
|
340
|
+
|
|
341
|
+
code = """
|
|
342
|
+
conversation = agent.checkpointer.get(config)
|
|
343
|
+
if not conversation:
|
|
344
|
+
conversation = {}
|
|
345
|
+
messages = str(conversation.get("channel_values", {}).get("messages", []))
|
|
346
|
+
|
|
347
|
+
routing_template = ROUTING_TEMPLATE
|
|
348
|
+
routing_template = routing_template.replace("$last_user_request$", question).replace("$conversation$", messages).replace("$last_most_specialized_agent$", last_agent)
|
|
349
|
+
routing_choice = llm_ROUTING_CLASSIFIER.invoke([SystemMessage(routing_template), HumanMessage(question)]).content
|
|
350
|
+
|
|
351
|
+
choice = str(re.findall(r'<a.*?>(.*?)</a>', routing_choice)[0])"""
|
|
352
|
+
|
|
353
|
+
if self.debug:
|
|
354
|
+
code += """
|
|
355
|
+
print("Routing to agent: {}. Last used agent was {}.".format(choice, last_agent))"""
|
|
356
|
+
|
|
357
|
+
code += """
|
|
358
|
+
if choice == "undecidable":
|
|
359
|
+
pass"""
|
|
360
|
+
|
|
361
|
+
for agent in self.collaborators:
|
|
362
|
+
agent_name = agent.get("collaboratorName", "")
|
|
363
|
+
relay_param = (
|
|
364
|
+
", messages"
|
|
365
|
+
if self.collaborator_map.get(agent_name, {}).get("relayConversationHistory", "DISABLED")
|
|
366
|
+
== "TO_COLLABORATOR"
|
|
367
|
+
else ""
|
|
368
|
+
)
|
|
369
|
+
code += f"""
|
|
370
|
+
elif choice == "{agent_name}":
|
|
371
|
+
last_agent = "{agent_name}"
|
|
372
|
+
return invoke_{agent_name}_collaborator(question{relay_param})"""
|
|
373
|
+
|
|
374
|
+
code += """
|
|
375
|
+
elif choice == "keep_previous_agent":
|
|
376
|
+
return eval(f"invoke_{last_agent}_collaborator")(question, messages)"""
|
|
377
|
+
|
|
378
|
+
return code
|
|
379
|
+
|
|
380
|
+
def translate_bedrock_to_langchain(self, output_path: str) -> dict:
|
|
381
|
+
"""Translate Bedrock agent config to LangChain code."""
|
|
382
|
+
return self.translate(output_path, self.code_sections, "langchain")
|