bedrock-agentcore-starter-toolkit 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bedrock-agentcore-starter-toolkit might be problematic. Click here for more details.
- bedrock_agentcore_starter_toolkit/cli/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/cli/cli.py +3 -1
- bedrock_agentcore_starter_toolkit/cli/common.py +1 -1
- bedrock_agentcore_starter_toolkit/cli/import_agent/README.md +35 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/agent_info.py +230 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/commands.py +518 -0
- bedrock_agentcore_starter_toolkit/cli/runtime/commands.py +132 -42
- bedrock_agentcore_starter_toolkit/notebook/runtime/bedrock_agentcore.py +120 -22
- bedrock_agentcore_starter_toolkit/operations/gateway/client.py +2 -2
- bedrock_agentcore_starter_toolkit/operations/runtime/configure.py +5 -2
- bedrock_agentcore_starter_toolkit/operations/runtime/invoke.py +1 -1
- bedrock_agentcore_starter_toolkit/operations/runtime/launch.py +108 -30
- bedrock_agentcore_starter_toolkit/operations/runtime/models.py +1 -1
- bedrock_agentcore_starter_toolkit/services/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/memory_manager_template.py +207 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/requirements_langchain.j2 +9 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/requirements_strands.j2 +5 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/template_fixtures_merged.json +1102 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/base_bedrock_translate.py +1668 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/bedrock_to_langchain.py +382 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/bedrock_to_strands.py +374 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/utils.py +417 -0
- bedrock_agentcore_starter_toolkit/services/runtime.py +35 -12
- bedrock_agentcore_starter_toolkit/utils/runtime/container.py +54 -3
- bedrock_agentcore_starter_toolkit/utils/runtime/entrypoint.py +11 -5
- bedrock_agentcore_starter_toolkit/utils/runtime/templates/execution_role_policy.json.j2 +2 -1
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/METADATA +22 -2
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/RECORD +35 -19
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/WHEEL +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/entry_points.txt +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/licenses/LICENSE.txt +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/licenses/NOTICE.txt +0 -0
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
# pylint: disable=consider-using-f-string, line-too-long
|
|
2
|
+
# ruff: noqa: E501
|
|
3
|
+
"""Bedrock Agent to Strands Translator.
|
|
4
|
+
|
|
5
|
+
This script translates AWS Bedrock Agent configurations into equivalent Strands code.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import textwrap
|
|
10
|
+
|
|
11
|
+
from .base_bedrock_translate import BaseBedrockTranslator
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BedrockStrandsTranslation(BaseBedrockTranslator):
|
|
15
|
+
"""Class to translate Bedrock Agent configurations to Strands code."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, agent_config, debug: bool, output_dir: str, enabled_primitives: dict):
|
|
18
|
+
"""Initialize the BedrockStrandsTranslation class."""
|
|
19
|
+
super().__init__(agent_config, debug, output_dir, enabled_primitives)
|
|
20
|
+
|
|
21
|
+
self.imports_code += self.generate_imports()
|
|
22
|
+
self.tools_code = self.generate_action_groups_code(platform="strands")
|
|
23
|
+
self.memory_code = self.generate_memory_configuration(memory_saver="SlidingWindowConversationManager")
|
|
24
|
+
self.collaboration_code = self.generate_collaboration_code()
|
|
25
|
+
self.kb_code = self.generate_knowledge_base_code()
|
|
26
|
+
self.models_code = self.generate_model_configurations()
|
|
27
|
+
self.agent_setup_code = self.generate_agent_setup()
|
|
28
|
+
self.usage_code = self.generate_example_usage()
|
|
29
|
+
|
|
30
|
+
# make prompts more readable
|
|
31
|
+
self.prompts_code = textwrap.fill(
|
|
32
|
+
self.prompts_code, width=150, break_long_words=False, replace_whitespace=False
|
|
33
|
+
)
|
|
34
|
+
self.code_sections = [
|
|
35
|
+
self.imports_code,
|
|
36
|
+
self.models_code,
|
|
37
|
+
self.prompts_code,
|
|
38
|
+
self.collaboration_code,
|
|
39
|
+
self.tools_code,
|
|
40
|
+
self.memory_code,
|
|
41
|
+
self.kb_code,
|
|
42
|
+
self.agent_setup_code,
|
|
43
|
+
self.usage_code,
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
def generate_imports(self) -> str:
|
|
47
|
+
"""Generate import statements for Strands components."""
|
|
48
|
+
return """
|
|
49
|
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
50
|
+
|
|
51
|
+
from strands import Agent, tool
|
|
52
|
+
from strands.agent.conversation_manager import SlidingWindowConversationManager
|
|
53
|
+
from strands.models import BedrockModel
|
|
54
|
+
from strands.types.content import Message
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def generate_model_configurations(self) -> str:
|
|
58
|
+
"""Generate Strands model configurations from Bedrock agent config."""
|
|
59
|
+
model_configs = []
|
|
60
|
+
|
|
61
|
+
for i, config in enumerate(self.prompt_configs):
|
|
62
|
+
prompt_type = config.get("promptType", "CUSTOM_{}".format(i))
|
|
63
|
+
if prompt_type == "KNOWLEDGE_BASE_RESPONSE_GENERATION" and not self.knowledge_bases:
|
|
64
|
+
continue
|
|
65
|
+
inference_config = config.get("inferenceConfiguration", {})
|
|
66
|
+
|
|
67
|
+
# Build model config string using string formatting
|
|
68
|
+
model_config = f"""
|
|
69
|
+
llm_{prompt_type} = BedrockModel(
|
|
70
|
+
model_id="{self.model_id}",
|
|
71
|
+
region_name="{self.agent_region}",
|
|
72
|
+
temperature={inference_config.get("temperature", 0)},
|
|
73
|
+
max_tokens={inference_config.get("maximumLength", 2048)},
|
|
74
|
+
stop_sequences={repr(inference_config.get("stopSequences", []))},
|
|
75
|
+
top_p={inference_config.get("topP", 1.0)},
|
|
76
|
+
top_k={inference_config.get("topK", 250)}"""
|
|
77
|
+
|
|
78
|
+
# NOTE: Converse Models support guardrails, but they are applied too eagerly on 2nd invocations.
|
|
79
|
+
# Disabling guardrail support for Strands for now.
|
|
80
|
+
|
|
81
|
+
# Add guardrails if available
|
|
82
|
+
# if self.guardrail_config and prompt_type != "MEMORY_SUMMARIZATION":
|
|
83
|
+
# model_config += f""",
|
|
84
|
+
# guardrail_id="{self.guardrail_config["guardrailIdentifier"]}",
|
|
85
|
+
# guardrail_version="{self.guardrail_config["guardrailVersion"]}\""""
|
|
86
|
+
|
|
87
|
+
model_config += "\n)"
|
|
88
|
+
model_configs.append(model_config)
|
|
89
|
+
|
|
90
|
+
self.generate_prompt(config)
|
|
91
|
+
|
|
92
|
+
return "\n".join(model_configs)
|
|
93
|
+
|
|
94
|
+
def generate_knowledge_base_code(self) -> str:
|
|
95
|
+
"""Generate code for knowledge base retrievers."""
|
|
96
|
+
if not self.knowledge_bases:
|
|
97
|
+
return ""
|
|
98
|
+
|
|
99
|
+
kb_code = ""
|
|
100
|
+
|
|
101
|
+
for kb in self.knowledge_bases:
|
|
102
|
+
kb_name = kb.get("name", "").replace(" ", "_")
|
|
103
|
+
kb_description = kb.get("description", "")
|
|
104
|
+
kb_id = kb.get("knowledgeBaseId", "")
|
|
105
|
+
kb_region_name = kb.get("knowledgeBaseArn", "").split(":")[3]
|
|
106
|
+
|
|
107
|
+
kb_code += f"""
|
|
108
|
+
@tool
|
|
109
|
+
def retrieve_{kb_name}(query: str):
|
|
110
|
+
\"""This is a knowledge base with the following description: {kb_description}. Invoke it with a query to get relevant results.\"""
|
|
111
|
+
client = boto3.client("bedrock-agent-runtime", region_name="{kb_region_name}")
|
|
112
|
+
return client.retrieve(
|
|
113
|
+
retrievalQuery={{"text": query}},
|
|
114
|
+
knowledgeBaseId="{kb_id}",
|
|
115
|
+
retrievalConfiguration={{
|
|
116
|
+
"vectorSearchConfiguration": {{"numberOfResults": 10}},
|
|
117
|
+
}},
|
|
118
|
+
).get('retrievalResults', [])
|
|
119
|
+
"""
|
|
120
|
+
self.tools.append(f"retrieve_{kb_name}")
|
|
121
|
+
|
|
122
|
+
return kb_code
|
|
123
|
+
|
|
124
|
+
def generate_collaboration_code(self) -> str:
|
|
125
|
+
"""Generate code for multi-agent collaboration."""
|
|
126
|
+
if not self.multi_agent_enabled or not self.collaborators:
|
|
127
|
+
return ""
|
|
128
|
+
|
|
129
|
+
collaborator_code = ""
|
|
130
|
+
|
|
131
|
+
# create the collaborators
|
|
132
|
+
for i, collaborator in enumerate(self.collaborators):
|
|
133
|
+
collaborator_file_name = f"strands_collaborator_{collaborator.get('collaboratorName', '')}"
|
|
134
|
+
collaborator_path = os.path.join(self.output_dir, f"{collaborator_file_name}.py")
|
|
135
|
+
BedrockStrandsTranslation(
|
|
136
|
+
collaborator, debug=self.debug, output_dir=self.output_dir, enabled_primitives=self.enabled_primitives
|
|
137
|
+
).translate_bedrock_to_strands(collaborator_path)
|
|
138
|
+
|
|
139
|
+
self.imports_code += f"\nfrom {collaborator_file_name} import invoke_agent as invoke_{collaborator.get('collaboratorName', '')}_collaborator"
|
|
140
|
+
|
|
141
|
+
# conversation relay
|
|
142
|
+
relay_conversation_history = collaborator.get("relayConversationHistory", "DISABLED") == "TO_COLLABORATOR"
|
|
143
|
+
|
|
144
|
+
# create the collaboration code
|
|
145
|
+
collaborator_code += f"""
|
|
146
|
+
@tool
|
|
147
|
+
def invoke_{collaborator.get("collaboratorName", "")}(query: str) -> str:
|
|
148
|
+
\"""Invoke the collaborator agent/specialist with the following description: {self.collaborator_descriptions[i]}\"""
|
|
149
|
+
{"relay_history = get_agent().messages[:-2]" if relay_conversation_history else ""}
|
|
150
|
+
invoke_agent_response = invoke_{collaborator.get("collaboratorName", "")}_collaborator(query{", relay_history" if relay_conversation_history else ""})
|
|
151
|
+
return invoke_agent_response
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
self.tools.append("invoke_" + collaborator.get("collaboratorName", ""))
|
|
155
|
+
|
|
156
|
+
return collaborator_code
|
|
157
|
+
|
|
158
|
+
def generate_agent_setup(self) -> str:
|
|
159
|
+
"""Generate agent setup code."""
|
|
160
|
+
agent_code = f"tools = [{','.join(self.tools)}]\ntools_used = set()"
|
|
161
|
+
|
|
162
|
+
if self.gateway_enabled:
|
|
163
|
+
agent_code += """\ntools += mcp_tools"""
|
|
164
|
+
|
|
165
|
+
if self.debug:
|
|
166
|
+
self.imports_code += "\nfrom strands.telemetry import StrandsTelemetry"
|
|
167
|
+
agent_code += """
|
|
168
|
+
strands_telemetry = StrandsTelemetry()
|
|
169
|
+
strands_telemetry.setup_meter(enable_console_exporter=True)
|
|
170
|
+
strands_telemetry.setup_console_exporter()
|
|
171
|
+
"""
|
|
172
|
+
|
|
173
|
+
if self.action_groups and self.tools_code:
|
|
174
|
+
agent_code += """\ntools += action_group_tools"""
|
|
175
|
+
|
|
176
|
+
memory_retrieve_code = (
|
|
177
|
+
""
|
|
178
|
+
if not self.memory_enabled
|
|
179
|
+
else (
|
|
180
|
+
"memory_synopsis = memory_manager.get_memory_synopsis()"
|
|
181
|
+
if not self.agentcore_memory_enabled
|
|
182
|
+
else """
|
|
183
|
+
memories = memory_client.retrieve_memories(memory_id=memory_id, namespace=f'/summaries/{user_id}', query="Retrieve the most recent session sumamries.", top_k=20)
|
|
184
|
+
memory_synopsis = "\\n".join([m.get("content", {}).get("text", "") for m in memories])
|
|
185
|
+
"""
|
|
186
|
+
)
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Create agent based on available components
|
|
190
|
+
agent_code += """
|
|
191
|
+
|
|
192
|
+
def make_msg(role, text):
|
|
193
|
+
return {{
|
|
194
|
+
"role": role,
|
|
195
|
+
"content": [{{"text": text}}]
|
|
196
|
+
}}
|
|
197
|
+
|
|
198
|
+
def inference(model, messages, system_prompt=""):
|
|
199
|
+
async def run_inference():
|
|
200
|
+
results = []
|
|
201
|
+
async for event in model.stream(messages=messages, system_prompt=system_prompt):
|
|
202
|
+
results.append(event)
|
|
203
|
+
return results
|
|
204
|
+
|
|
205
|
+
response = asyncio.run(run_inference())
|
|
206
|
+
|
|
207
|
+
text = ""
|
|
208
|
+
for chunk in response:
|
|
209
|
+
if not "contentBlockDelta" in chunk:
|
|
210
|
+
continue
|
|
211
|
+
text += chunk["contentBlockDelta"].get("delta", {{}}).get("text", "")
|
|
212
|
+
|
|
213
|
+
return text
|
|
214
|
+
|
|
215
|
+
_agent = None
|
|
216
|
+
first_turn = True
|
|
217
|
+
last_input = ""
|
|
218
|
+
user_id = ""
|
|
219
|
+
{}
|
|
220
|
+
|
|
221
|
+
# agent update loop
|
|
222
|
+
def get_agent():
|
|
223
|
+
global _agent
|
|
224
|
+
{}
|
|
225
|
+
{}
|
|
226
|
+
system_prompt = ORCHESTRATION_TEMPLATE
|
|
227
|
+
{}
|
|
228
|
+
_agent = Agent(
|
|
229
|
+
model=llm_ORCHESTRATION,
|
|
230
|
+
system_prompt=system_prompt,
|
|
231
|
+
tools=tools,
|
|
232
|
+
conversation_manager=checkpointer_STM
|
|
233
|
+
)
|
|
234
|
+
return _agent
|
|
235
|
+
""".format(
|
|
236
|
+
'last_agent = ""' if self.multi_agent_enabled and self.supervision_type == "SUPERVISOR_ROUTER" else "",
|
|
237
|
+
(
|
|
238
|
+
"if _agent is None or memory_manager.has_memory_changed():"
|
|
239
|
+
if self.memory_enabled and not self.agentcore_memory_enabled
|
|
240
|
+
else "if _agent is None:"
|
|
241
|
+
),
|
|
242
|
+
memory_retrieve_code,
|
|
243
|
+
(
|
|
244
|
+
"system_prompt = system_prompt.replace('$memory_synopsis$', memory_synopsis)"
|
|
245
|
+
if self.memory_enabled
|
|
246
|
+
else ""
|
|
247
|
+
),
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Generate routing code if needed
|
|
251
|
+
routing_code = self.generate_routing_code()
|
|
252
|
+
|
|
253
|
+
# Set up relay parameter definition based on whether we're accepting relays
|
|
254
|
+
relay_param_def = ", relayed_messages = []" if self.is_accepting_relays else ""
|
|
255
|
+
|
|
256
|
+
# Add relay handling code if needed
|
|
257
|
+
relay_code = (
|
|
258
|
+
"""if relayed_messages:
|
|
259
|
+
agent.messages = relayed_messages"""
|
|
260
|
+
if self.is_accepting_relays
|
|
261
|
+
else ""
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
# Set up preprocessing code if enabled
|
|
265
|
+
preprocess_code = ""
|
|
266
|
+
if "PRE_PROCESSING" in self.enabled_prompts:
|
|
267
|
+
preprocess_code = """
|
|
268
|
+
pre_process_output = inference(llm_PRE_PROCESSING, [make_msg("user", question)], system_prompt=PRE_PROCESSING_TEMPLATE)
|
|
269
|
+
question += "\\n<PRE_PROCESSING>{}</PRE_PROCESSING>".format(pre_process_output)
|
|
270
|
+
"""
|
|
271
|
+
if self.debug:
|
|
272
|
+
preprocess_code += ' print("PREPROCESSING_OUTPUT: {pre_process_output}")'
|
|
273
|
+
|
|
274
|
+
# Memory recording code
|
|
275
|
+
memory_add_user = (
|
|
276
|
+
"""
|
|
277
|
+
memory_manager.add_message({'role': 'user', 'content': question})"""
|
|
278
|
+
if self.memory_enabled and not self.agentcore_memory_enabled
|
|
279
|
+
else ""
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
memory_add_assistant = (
|
|
283
|
+
"""
|
|
284
|
+
memory_manager.add_message({'role': 'assistant', 'content': str(response)})"""
|
|
285
|
+
if self.memory_enabled and not self.agentcore_memory_enabled
|
|
286
|
+
else ""
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
# KB optimization code if enabled
|
|
290
|
+
kb_code = ""
|
|
291
|
+
if self.single_kb_optimization_enabled:
|
|
292
|
+
kb_name = self.knowledge_bases[0]["name"]
|
|
293
|
+
kb_code = f"""
|
|
294
|
+
if first_turn:
|
|
295
|
+
search_results = retrieve_{kb_name}(question)
|
|
296
|
+
kb_prompt_templated = KB_GENERATION_TEMPLATE.replace("$search_results$", search_results)
|
|
297
|
+
response = inference(llm_KNOWLEDGE_BASE_RESPONSE_GENERATION, [make_msg("user", question)], system_prompt=kb_prompt_templated)
|
|
298
|
+
first_turn = False
|
|
299
|
+
"""
|
|
300
|
+
|
|
301
|
+
# Post-processing code
|
|
302
|
+
post_process_code = (
|
|
303
|
+
"""
|
|
304
|
+
post_process_prompt = POST_PROCESSING_TEMPLATE.replace("$question$", question).replace("$latest_response$", str(response)).replace("$responses$", str(agent.messages))
|
|
305
|
+
post_process_output = inference(llm_POST_PROCESSING, [make_msg("user", post_process_prompt)])
|
|
306
|
+
return post_process_output"""
|
|
307
|
+
if "POST_PROCESSING" in self.enabled_prompts
|
|
308
|
+
else "return response"
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
# Combine it all into the invoke_agent function
|
|
312
|
+
agent_code += f"""
|
|
313
|
+
def invoke_agent(question: str{relay_param_def}):
|
|
314
|
+
{"global last_agent" if self.supervision_type == "SUPERVISOR_ROUTER" else ""}
|
|
315
|
+
{"global first_turn" if self.single_kb_optimization_enabled else ""}
|
|
316
|
+
global last_input
|
|
317
|
+
last_input = question
|
|
318
|
+
agent = get_agent()
|
|
319
|
+
{relay_code}
|
|
320
|
+
{routing_code}
|
|
321
|
+
{preprocess_code}
|
|
322
|
+
{memory_add_user}
|
|
323
|
+
|
|
324
|
+
original_stdout = sys.stdout
|
|
325
|
+
sys.stdout = io.StringIO()
|
|
326
|
+
response = agent(question)
|
|
327
|
+
sys.stdout = original_stdout
|
|
328
|
+
{memory_add_assistant}
|
|
329
|
+
{kb_code}
|
|
330
|
+
{post_process_code}
|
|
331
|
+
"""
|
|
332
|
+
|
|
333
|
+
agent_code += self.generate_entrypoint_code("strands")
|
|
334
|
+
|
|
335
|
+
return agent_code
|
|
336
|
+
|
|
337
|
+
def generate_routing_code(self):
|
|
338
|
+
"""Generate routing code for supervisor router."""
|
|
339
|
+
if not self.multi_agent_enabled or self.supervision_type != "SUPERVISOR_ROUTER":
|
|
340
|
+
return ""
|
|
341
|
+
|
|
342
|
+
code = """
|
|
343
|
+
messages = str(agent.messages)
|
|
344
|
+
|
|
345
|
+
routing_template = ROUTING_TEMPLATE
|
|
346
|
+
routing_template = routing_template.replace("$last_user_request$", question).replace("$conversation$", messages).replace("$last_most_specialized_agent$", last_agent)
|
|
347
|
+
routing_choice = inference(llm_ROUTING_CLASSIFIER, [make_msg("user", question)], system_prompt=ROUTING_TEMPLATE)
|
|
348
|
+
|
|
349
|
+
choice = str(re.findall(r'<a.*?>(.*?)</a>', routing_choice)[0])"""
|
|
350
|
+
|
|
351
|
+
if self.debug:
|
|
352
|
+
code += """
|
|
353
|
+
print("Routing to agent: {}. Last used agent was {}.".format(choice, last_agent))"""
|
|
354
|
+
|
|
355
|
+
code += """
|
|
356
|
+
if choice == "undecidable":
|
|
357
|
+
pass"""
|
|
358
|
+
|
|
359
|
+
for agent in self.collaborators:
|
|
360
|
+
agent_name = agent.get("collaboratorName", "")
|
|
361
|
+
code += f"""
|
|
362
|
+
elif choice == "{agent_name}":
|
|
363
|
+
last_agent = "{agent_name}"
|
|
364
|
+
return invoke_{agent_name}_collaborator(question)"""
|
|
365
|
+
|
|
366
|
+
code += """
|
|
367
|
+
elif choice == "keep_previous_agent":
|
|
368
|
+
return eval(f"invoke_{last_agent}_collaborator")(question)"""
|
|
369
|
+
|
|
370
|
+
return code
|
|
371
|
+
|
|
372
|
+
def translate_bedrock_to_strands(self, output_path) -> dict:
|
|
373
|
+
"""Translate Bedrock agent configuration to Strands code."""
|
|
374
|
+
return self.translate(output_path, self.code_sections, "strands")
|