bedrock-agentcore-starter-toolkit 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bedrock-agentcore-starter-toolkit might be problematic. Click here for more details.
- bedrock_agentcore_starter_toolkit/cli/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/cli/cli.py +3 -1
- bedrock_agentcore_starter_toolkit/cli/common.py +1 -1
- bedrock_agentcore_starter_toolkit/cli/import_agent/README.md +35 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/agent_info.py +230 -0
- bedrock_agentcore_starter_toolkit/cli/import_agent/commands.py +518 -0
- bedrock_agentcore_starter_toolkit/cli/runtime/commands.py +132 -42
- bedrock_agentcore_starter_toolkit/notebook/runtime/bedrock_agentcore.py +120 -22
- bedrock_agentcore_starter_toolkit/operations/gateway/client.py +2 -2
- bedrock_agentcore_starter_toolkit/operations/runtime/configure.py +5 -2
- bedrock_agentcore_starter_toolkit/operations/runtime/invoke.py +1 -1
- bedrock_agentcore_starter_toolkit/operations/runtime/launch.py +108 -30
- bedrock_agentcore_starter_toolkit/operations/runtime/models.py +1 -1
- bedrock_agentcore_starter_toolkit/services/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/memory_manager_template.py +207 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/requirements_langchain.j2 +9 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/requirements_strands.j2 +5 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/assets/template_fixtures_merged.json +1102 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/__init__.py +1 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/base_bedrock_translate.py +1668 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/bedrock_to_langchain.py +382 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/scripts/bedrock_to_strands.py +374 -0
- bedrock_agentcore_starter_toolkit/services/import_agent/utils.py +417 -0
- bedrock_agentcore_starter_toolkit/services/runtime.py +35 -12
- bedrock_agentcore_starter_toolkit/utils/runtime/container.py +54 -3
- bedrock_agentcore_starter_toolkit/utils/runtime/entrypoint.py +11 -5
- bedrock_agentcore_starter_toolkit/utils/runtime/templates/execution_role_policy.json.j2 +2 -1
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/METADATA +22 -2
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/RECORD +35 -19
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/WHEEL +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/entry_points.txt +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/licenses/LICENSE.txt +0 -0
- {bedrock_agentcore_starter_toolkit-0.1.2.dist-info → bedrock_agentcore_starter_toolkit-0.1.4.dist-info}/licenses/NOTICE.txt +0 -0
|
@@ -0,0 +1,417 @@
|
|
|
1
|
+
"""Utility functions for Bedrock Agent import service."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
import secrets
|
|
7
|
+
import textwrap
|
|
8
|
+
from typing import Any, Dict, List, Union
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def json_to_obj_fixed(json_string: str):
|
|
12
|
+
"""Convert a JSON string to a Python object, handling common formatting issues."""
|
|
13
|
+
json_string = json_string.strip()
|
|
14
|
+
json_string = " ".join(json_string.split())
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
output = json.loads(json_string)
|
|
18
|
+
except json.JSONDecodeError:
|
|
19
|
+
output = json_string
|
|
20
|
+
|
|
21
|
+
return output
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def fix_field(obj, field=None):
|
|
25
|
+
"""Fixes the field in the object by converting it to a JSON object if it's a string."""
|
|
26
|
+
if field is None:
|
|
27
|
+
return json_to_obj_fixed(obj)
|
|
28
|
+
else:
|
|
29
|
+
# Create a new dict to avoid modifying the original
|
|
30
|
+
new_obj = obj.copy()
|
|
31
|
+
new_obj[field] = json_to_obj_fixed(obj[field])
|
|
32
|
+
|
|
33
|
+
return new_obj
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def clean_variable_name(text):
|
|
37
|
+
"""Clean a string to create a valid Python variable name. Useful for cleaning Bedrock Agents fields."""
|
|
38
|
+
text = str(text)
|
|
39
|
+
cleaned = re.sub(r"[^a-zA-Z0-9\s]", " ", text)
|
|
40
|
+
cleaned = cleaned.lower()
|
|
41
|
+
cleaned = re.sub(r"\s+", " ", cleaned)
|
|
42
|
+
cleaned = cleaned.strip()
|
|
43
|
+
cleaned = cleaned.replace(" ", "_")
|
|
44
|
+
if cleaned and cleaned[0].isdigit():
|
|
45
|
+
cleaned = f"_{cleaned}"
|
|
46
|
+
|
|
47
|
+
if not cleaned:
|
|
48
|
+
cleaned = "variable"
|
|
49
|
+
|
|
50
|
+
return cleaned
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def unindent_by_one(input_code, spaces_per_indent=4):
|
|
54
|
+
"""Unindents the input code by one level of indentation.
|
|
55
|
+
|
|
56
|
+
Note: text dedent does not work as expected in this context, so we implement our own logic.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
input_code (str): The code to unindent.
|
|
60
|
+
spaces_per_indent (int): The number of spaces per indentation level (default is 4).
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
str: The unindented code.
|
|
64
|
+
"""
|
|
65
|
+
lines = input_code.splitlines(True) # Keep the line endings
|
|
66
|
+
# Process each line
|
|
67
|
+
unindented = []
|
|
68
|
+
for line in lines:
|
|
69
|
+
if line.strip(): # If line is not empty
|
|
70
|
+
current_indent = len(line) - len(line.lstrip())
|
|
71
|
+
# Remove one level of indentation if possible
|
|
72
|
+
if current_indent >= spaces_per_indent:
|
|
73
|
+
line = line[spaces_per_indent:]
|
|
74
|
+
unindented.append(line)
|
|
75
|
+
|
|
76
|
+
return "".join(unindented)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def generate_pydantic_models(
|
|
80
|
+
schema_input: Union[Dict[str, Any], List[Dict[str, Any]], str],
|
|
81
|
+
root_model_name: str = "RequestModel",
|
|
82
|
+
content_type_annotation: str = "",
|
|
83
|
+
) -> str:
|
|
84
|
+
"""Generate Pydantic models from OpenAPI schema objects. Works recursively for nested objects.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
schema_input: The OpenAPI schema, parameter object or parameter array as dictionary/list or JSON string
|
|
88
|
+
root_model_name: Name for the root model
|
|
89
|
+
content_type_annotation: Optional content type annotation for the root model
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
String containing Python code for the Pydantic models
|
|
93
|
+
"""
|
|
94
|
+
# Convert JSON string to dictionary/list if needed
|
|
95
|
+
if isinstance(schema_input, str):
|
|
96
|
+
try:
|
|
97
|
+
schema_input = json.loads(schema_input)
|
|
98
|
+
except json.JSONDecodeError as e:
|
|
99
|
+
raise ValueError(f"Invalid JSON input: {e}") from e
|
|
100
|
+
|
|
101
|
+
# Start with the imports
|
|
102
|
+
code = "\n"
|
|
103
|
+
|
|
104
|
+
# Dictionary to keep track of models we've created
|
|
105
|
+
models = {}
|
|
106
|
+
|
|
107
|
+
def clean_class_name(name: str) -> str:
|
|
108
|
+
"""Create a valid Python class name."""
|
|
109
|
+
# Replace non-alphanumeric characters with underscores
|
|
110
|
+
cleaned = re.sub(r"[^a-zA-Z0-9]", "_", name)
|
|
111
|
+
# Ensure it starts with a letter
|
|
112
|
+
if cleaned and not cleaned[0].isalpha():
|
|
113
|
+
cleaned = "Model_" + cleaned
|
|
114
|
+
# Convert to CamelCase
|
|
115
|
+
return "".join(word.capitalize() for word in cleaned.split("_"))
|
|
116
|
+
|
|
117
|
+
def process_schema(schema_obj: Dict[str, Any], name: str) -> str:
|
|
118
|
+
"""Process a schema object and return the model class name."""
|
|
119
|
+
# Handle schema wrapper
|
|
120
|
+
if "schema" in schema_obj:
|
|
121
|
+
schema_obj = schema_obj["schema"]
|
|
122
|
+
|
|
123
|
+
# Handle $ref
|
|
124
|
+
if "$ref" in schema_obj:
|
|
125
|
+
ref_name = schema_obj["$ref"].split("/")[-1]
|
|
126
|
+
return clean_class_name(ref_name)
|
|
127
|
+
|
|
128
|
+
obj_type = schema_obj.get("type")
|
|
129
|
+
|
|
130
|
+
# Default to object type if not specified
|
|
131
|
+
if obj_type is None:
|
|
132
|
+
obj_type = "object"
|
|
133
|
+
|
|
134
|
+
if obj_type == "object":
|
|
135
|
+
# Generate a valid Python class name
|
|
136
|
+
class_name = clean_class_name(name)
|
|
137
|
+
|
|
138
|
+
# Avoid duplicate model names
|
|
139
|
+
if class_name in models:
|
|
140
|
+
return class_name
|
|
141
|
+
|
|
142
|
+
properties = schema_obj.get("properties", {})
|
|
143
|
+
required = schema_obj.get("required", [])
|
|
144
|
+
|
|
145
|
+
class_def = f"class {class_name}(BaseModel):\n"
|
|
146
|
+
|
|
147
|
+
# Add content type annotation if provided
|
|
148
|
+
if content_type_annotation:
|
|
149
|
+
class_def += f' content_type_annotation: Literal["{content_type_annotation}"]\n'
|
|
150
|
+
|
|
151
|
+
if "description" in schema_obj:
|
|
152
|
+
class_def += f' """{schema_obj["description"]}"""\n'
|
|
153
|
+
|
|
154
|
+
if not properties:
|
|
155
|
+
class_def += " pass\n"
|
|
156
|
+
models[class_name] = class_def
|
|
157
|
+
return class_name
|
|
158
|
+
|
|
159
|
+
for prop_name, prop_schema in properties.items():
|
|
160
|
+
field_type = get_type_hint(prop_schema, f"{name}_{prop_name}")
|
|
161
|
+
|
|
162
|
+
# Check if required
|
|
163
|
+
is_required = prop_name in required
|
|
164
|
+
|
|
165
|
+
# Build the field definition
|
|
166
|
+
if is_required:
|
|
167
|
+
if "description" in prop_schema:
|
|
168
|
+
field_def = f' = Field(description="{prop_schema["description"]}")'
|
|
169
|
+
else:
|
|
170
|
+
field_def = ""
|
|
171
|
+
else:
|
|
172
|
+
field_type = f"Optional[{field_type}]"
|
|
173
|
+
if "description" in prop_schema:
|
|
174
|
+
field_def = f' = Field(None, description="{prop_schema["description"]}")'
|
|
175
|
+
else:
|
|
176
|
+
field_def = " = None"
|
|
177
|
+
|
|
178
|
+
class_def += f" {prop_name}: {field_type}{field_def}\n"
|
|
179
|
+
|
|
180
|
+
models[class_name] = class_def
|
|
181
|
+
return class_name
|
|
182
|
+
elif obj_type == "array":
|
|
183
|
+
items = schema_obj.get("items", {})
|
|
184
|
+
item_type = get_type_hint(items, f"{name}_item")
|
|
185
|
+
return f"List[{item_type}]"
|
|
186
|
+
else:
|
|
187
|
+
return get_python_type(obj_type)
|
|
188
|
+
|
|
189
|
+
def get_type_hint(prop_schema: Dict[str, Any], name: str) -> str:
|
|
190
|
+
"""Get the Python type hint for a property schema."""
|
|
191
|
+
if "$ref" in prop_schema:
|
|
192
|
+
ref_name = prop_schema["$ref"].split("/")[-1]
|
|
193
|
+
return clean_class_name(ref_name)
|
|
194
|
+
|
|
195
|
+
prop_type = prop_schema.get("type")
|
|
196
|
+
|
|
197
|
+
# Default to Any if type is not specified
|
|
198
|
+
if prop_type is None:
|
|
199
|
+
return "Any"
|
|
200
|
+
|
|
201
|
+
if prop_type == "object":
|
|
202
|
+
# This is a nested object, create a new model for it
|
|
203
|
+
return process_schema(prop_schema, name)
|
|
204
|
+
elif prop_type == "array":
|
|
205
|
+
items = prop_schema.get("items", {})
|
|
206
|
+
item_type = get_type_hint(items, name)
|
|
207
|
+
return f"List[{item_type}]"
|
|
208
|
+
else:
|
|
209
|
+
return get_python_type(prop_type)
|
|
210
|
+
|
|
211
|
+
def get_python_type(openapi_type: str) -> str:
|
|
212
|
+
"""Convert OpenAPI type to Python type."""
|
|
213
|
+
type_mapping = {
|
|
214
|
+
"string": "str",
|
|
215
|
+
"integer": "int",
|
|
216
|
+
"number": "float",
|
|
217
|
+
"boolean": "bool",
|
|
218
|
+
"null": "None",
|
|
219
|
+
"object": "Dict[str, Any]",
|
|
220
|
+
}
|
|
221
|
+
return type_mapping.get(openapi_type, "Any")
|
|
222
|
+
|
|
223
|
+
def process_parameter_list(params: List[Dict[str, Any]], name: str) -> str:
|
|
224
|
+
"""Process OpenAPI parameter array and create a model."""
|
|
225
|
+
class_name = clean_class_name(name)
|
|
226
|
+
if class_name in models:
|
|
227
|
+
return class_name
|
|
228
|
+
|
|
229
|
+
class_def = f"class {class_name}(BaseModel):\n"
|
|
230
|
+
|
|
231
|
+
if not params:
|
|
232
|
+
class_def += " pass\n"
|
|
233
|
+
models[class_name] = class_def
|
|
234
|
+
return class_name
|
|
235
|
+
|
|
236
|
+
# Group parameters by 'in' value to potentially create separate models
|
|
237
|
+
param_groups = {}
|
|
238
|
+
for param in params:
|
|
239
|
+
param_in = param.get("in", "query") # Default to query if not specified
|
|
240
|
+
if param_in not in param_groups:
|
|
241
|
+
param_groups[param_in] = []
|
|
242
|
+
param_groups[param_in].append(param)
|
|
243
|
+
|
|
244
|
+
# If only one type or specifically requested, create a single model
|
|
245
|
+
if len(param_groups) == 1 or name != "RequestModel":
|
|
246
|
+
for param in params:
|
|
247
|
+
param_name = param.get("name", "")
|
|
248
|
+
if not param_name:
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
# Get the parameter type
|
|
252
|
+
if "schema" in param:
|
|
253
|
+
# OpenAPI 3.0 style
|
|
254
|
+
field_type = get_type_hint(param["schema"], f"{name}_{param_name}")
|
|
255
|
+
else:
|
|
256
|
+
# OpenAPI 2.0 style
|
|
257
|
+
field_type = get_python_type(param.get("type", "string"))
|
|
258
|
+
|
|
259
|
+
# Check if required
|
|
260
|
+
is_required = param.get("required", False)
|
|
261
|
+
|
|
262
|
+
# Build the field definition
|
|
263
|
+
if is_required:
|
|
264
|
+
if "description" in param:
|
|
265
|
+
field_def = f' = Field(description="{param["description"]}")'
|
|
266
|
+
else:
|
|
267
|
+
field_def = ""
|
|
268
|
+
else:
|
|
269
|
+
field_type = f"Optional[{field_type}]"
|
|
270
|
+
if "description" in param:
|
|
271
|
+
field_def = f' = Field(None, description="{param["description"]}")'
|
|
272
|
+
else:
|
|
273
|
+
field_def = " = None"
|
|
274
|
+
|
|
275
|
+
class_def += f" {param_name}: {field_type}{field_def}\n"
|
|
276
|
+
else:
|
|
277
|
+
# Create separate models for each parameter type
|
|
278
|
+
for param_in, param_list in param_groups.items():
|
|
279
|
+
in_type_name = f"{name}_{param_in.capitalize()}Params"
|
|
280
|
+
in_class_name = process_parameter_list(param_list, in_type_name)
|
|
281
|
+
class_def += f" {param_in}_params: {in_class_name}\n"
|
|
282
|
+
|
|
283
|
+
models[class_name] = class_def
|
|
284
|
+
return class_name
|
|
285
|
+
|
|
286
|
+
def process_parameter_dict(params: Dict[str, Dict[str, Any]], name: str) -> str:
|
|
287
|
+
"""Process a dictionary of named parameters."""
|
|
288
|
+
class_name = clean_class_name(name)
|
|
289
|
+
if class_name in models:
|
|
290
|
+
return class_name
|
|
291
|
+
|
|
292
|
+
class_def = f"class {class_name}(BaseModel):\n"
|
|
293
|
+
|
|
294
|
+
if not params:
|
|
295
|
+
class_def += " pass\n"
|
|
296
|
+
models[class_name] = class_def
|
|
297
|
+
return class_name
|
|
298
|
+
|
|
299
|
+
for param_name, param_def in params.items():
|
|
300
|
+
# Get the parameter type
|
|
301
|
+
if "schema" in param_def:
|
|
302
|
+
# OpenAPI 3.0 style
|
|
303
|
+
field_type = get_type_hint(param_def["schema"], f"{name}_{param_name}")
|
|
304
|
+
else:
|
|
305
|
+
# OpenAPI 2.0 style or simplified parameter
|
|
306
|
+
field_type = get_python_type(param_def.get("type", "string"))
|
|
307
|
+
|
|
308
|
+
# Check if required
|
|
309
|
+
is_required = param_def.get("required", False)
|
|
310
|
+
|
|
311
|
+
# Build the field definition
|
|
312
|
+
if is_required:
|
|
313
|
+
if "description" in param_def:
|
|
314
|
+
field_def = f' = Field(description="{param_def["description"]}")'
|
|
315
|
+
else:
|
|
316
|
+
field_def = ""
|
|
317
|
+
else:
|
|
318
|
+
field_type = f"Optional[{field_type}]"
|
|
319
|
+
if "description" in param_def:
|
|
320
|
+
field_def = f' = Field(None, description="{param_def["description"]}")'
|
|
321
|
+
else:
|
|
322
|
+
field_def = " = None"
|
|
323
|
+
|
|
324
|
+
class_def += f" {param_name}: {field_type}{field_def}\n"
|
|
325
|
+
|
|
326
|
+
models[class_name] = class_def
|
|
327
|
+
return class_name
|
|
328
|
+
|
|
329
|
+
# Determine the type of input and process accordingly
|
|
330
|
+
if isinstance(schema_input, list):
|
|
331
|
+
# This is likely a parameter array
|
|
332
|
+
process_parameter_list(schema_input, root_model_name)
|
|
333
|
+
elif isinstance(schema_input, dict):
|
|
334
|
+
if "schema" in schema_input:
|
|
335
|
+
# This is likely a request body schema
|
|
336
|
+
process_schema(schema_input, root_model_name)
|
|
337
|
+
elif "parameters" in schema_input:
|
|
338
|
+
# This is an operation object with parameters
|
|
339
|
+
process_parameter_list(schema_input["parameters"], root_model_name)
|
|
340
|
+
elif all(isinstance(value, dict) and ("name" in value and "in" in value) for value in schema_input.values()):
|
|
341
|
+
# This appears to be a parameter dict with name/in properties
|
|
342
|
+
process_parameter_list(list(schema_input.values()), root_model_name)
|
|
343
|
+
elif all(isinstance(value, dict) for value in schema_input.values()):
|
|
344
|
+
# This appears to be a dictionary of named parameters
|
|
345
|
+
process_parameter_dict(schema_input, root_model_name)
|
|
346
|
+
else:
|
|
347
|
+
# Try to process as a schema object
|
|
348
|
+
process_schema({"type": "object", "properties": schema_input}, root_model_name)
|
|
349
|
+
|
|
350
|
+
# Add all models to the code
|
|
351
|
+
for model_code in models.values():
|
|
352
|
+
code += model_code + "\n\n"
|
|
353
|
+
|
|
354
|
+
code = code.rstrip() + "\n"
|
|
355
|
+
return textwrap.indent(code, " "), clean_class_name(root_model_name)
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def prune_tool_name(tool_name: str, length=50) -> str:
|
|
359
|
+
"""Prune tool name to avoid maxiumum of 64 characters. If it exceeds, truncate and append a random suffix."""
|
|
360
|
+
if len(tool_name) > length:
|
|
361
|
+
tool_name = tool_name[:length]
|
|
362
|
+
tool_name += f"_{secrets.token_hex(3)}"
|
|
363
|
+
return tool_name
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def get_template_fixtures(field: str = "orchestrationBasePrompts", group: str = "REACT_MULTI_ACTION") -> dict:
|
|
367
|
+
"""Extract all templateFixtures from a specified field in template_fixtures_merged.json.
|
|
368
|
+
|
|
369
|
+
For orchestrationBasePrompts, uses the specified group (defaults to REACT_MULTI_ACTION).
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
field: The field to extract templateFixtures from (defaults to "orchestrationBasePrompts")
|
|
373
|
+
group: For orchestrationBasePrompts, which group to use (defaults to "REACT_MULTI_ACTION")
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Dict mapping fixture names to their template strings
|
|
377
|
+
"""
|
|
378
|
+
project_root = os.path.dirname(os.path.abspath(__file__))
|
|
379
|
+
file_path = os.path.join(project_root, "assets", "template_fixtures_merged.json")
|
|
380
|
+
with open(file_path, "r", encoding="utf-8") as f:
|
|
381
|
+
data = json.load(f)
|
|
382
|
+
|
|
383
|
+
if field not in data:
|
|
384
|
+
raise ValueError(f"Field '{field}' not found in template_fixtures_merged.json")
|
|
385
|
+
|
|
386
|
+
field_data = data[field]
|
|
387
|
+
|
|
388
|
+
# For orchestrationBasePrompts, get the specified group's templateFixtures
|
|
389
|
+
if field == "orchestrationBasePrompts":
|
|
390
|
+
if group not in field_data:
|
|
391
|
+
raise ValueError(f"Group '{group}' not found in orchestrationBasePrompts")
|
|
392
|
+
fixtures = field_data[group].get("templateFixtures", {})
|
|
393
|
+
else:
|
|
394
|
+
# For other fields, get templateFixtures directly
|
|
395
|
+
fixtures = field_data.get("templateFixtures", {})
|
|
396
|
+
|
|
397
|
+
result = {}
|
|
398
|
+
for name, fixture in fixtures.items():
|
|
399
|
+
if isinstance(fixture, dict) and "template" in fixture:
|
|
400
|
+
result[name] = fixture["template"]
|
|
401
|
+
|
|
402
|
+
return result
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def safe_substitute_placeholders(template_str, substitutions):
|
|
406
|
+
"""Safely substitute placeholders in a string, leaving non-matching placeholders unchanged."""
|
|
407
|
+
result = template_str
|
|
408
|
+
for key, value in substitutions.items():
|
|
409
|
+
# Only replace if the key exists in the substitutions dict
|
|
410
|
+
if key in template_str:
|
|
411
|
+
result = result.replace(key, value)
|
|
412
|
+
return result
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def get_base_dir(file):
|
|
416
|
+
"""Get the base directory of the project."""
|
|
417
|
+
return os.path.dirname(os.path.dirname(os.path.abspath(file)))
|
|
@@ -10,9 +10,13 @@ from typing import Any, Dict, Optional
|
|
|
10
10
|
import boto3
|
|
11
11
|
import requests
|
|
12
12
|
from botocore.exceptions import ClientError
|
|
13
|
+
from rich.console import Console
|
|
13
14
|
|
|
14
15
|
from ..utils.endpoints import get_control_plane_endpoint, get_data_plane_endpoint
|
|
15
16
|
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
console = Console()
|
|
19
|
+
|
|
16
20
|
|
|
17
21
|
def generate_session_id() -> str:
|
|
18
22
|
"""Generate session ID."""
|
|
@@ -47,19 +51,26 @@ def _handle_aws_response(response) -> dict:
|
|
|
47
51
|
|
|
48
52
|
|
|
49
53
|
def _handle_streaming_response(response) -> Dict[str, Any]:
|
|
50
|
-
|
|
51
|
-
logger.setLevel(logging.INFO)
|
|
52
|
-
|
|
53
|
-
content = []
|
|
54
|
+
complete_text = ""
|
|
54
55
|
for line in response.iter_lines(chunk_size=1):
|
|
55
56
|
if line:
|
|
56
57
|
line = line.decode("utf-8")
|
|
57
58
|
if line.startswith("data: "):
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
59
|
+
json_chunk = line[6:]
|
|
60
|
+
try:
|
|
61
|
+
parsed_chunk = json.loads(json_chunk)
|
|
62
|
+
if isinstance(parsed_chunk, str):
|
|
63
|
+
text_chunk = parsed_chunk
|
|
64
|
+
else:
|
|
65
|
+
text_chunk = json.dumps(parsed_chunk, ensure_ascii=False)
|
|
66
|
+
text_chunk += "\n"
|
|
67
|
+
console.print(text_chunk, end="", style="bold cyan")
|
|
68
|
+
complete_text += text_chunk
|
|
69
|
+
except json.JSONDecodeError:
|
|
70
|
+
console.print(json_chunk, style="bold cyan")
|
|
71
|
+
continue
|
|
72
|
+
console.print()
|
|
73
|
+
return {}
|
|
63
74
|
|
|
64
75
|
|
|
65
76
|
class BedrockAgentCoreClient:
|
|
@@ -128,7 +139,19 @@ class BedrockAgentCoreClient:
|
|
|
128
139
|
if error_code == "ConflictException":
|
|
129
140
|
if not auto_update_on_conflict:
|
|
130
141
|
self.logger.error("Agent '%s' already exists and auto_update_on_conflict is disabled", agent_name)
|
|
131
|
-
|
|
142
|
+
# Create a more helpful error message
|
|
143
|
+
raise ClientError(
|
|
144
|
+
{
|
|
145
|
+
"Error": {
|
|
146
|
+
"Code": "ConflictException",
|
|
147
|
+
"Message": (
|
|
148
|
+
f"Agent '{agent_name}' already exists. To update the existing agent, "
|
|
149
|
+
"use the --auto-update-on-conflict flag with the launch command."
|
|
150
|
+
),
|
|
151
|
+
}
|
|
152
|
+
},
|
|
153
|
+
"CreateAgentRuntime",
|
|
154
|
+
) from e
|
|
132
155
|
|
|
133
156
|
self.logger.info("Agent '%s' already exists, searching for existing agent...", agent_name)
|
|
134
157
|
|
|
@@ -428,7 +451,7 @@ class HttpBedrockAgentCoreClient:
|
|
|
428
451
|
params={"qualifier": endpoint_name},
|
|
429
452
|
headers=headers,
|
|
430
453
|
json=body,
|
|
431
|
-
timeout=
|
|
454
|
+
timeout=900,
|
|
432
455
|
stream=True,
|
|
433
456
|
)
|
|
434
457
|
return _handle_http_response(response)
|
|
@@ -466,7 +489,7 @@ class LocalBedrockAgentCoreClient:
|
|
|
466
489
|
|
|
467
490
|
try:
|
|
468
491
|
# Make request with timeout
|
|
469
|
-
response = requests.post(url, headers=headers, json=body, timeout=
|
|
492
|
+
response = requests.post(url, headers=headers, json=body, timeout=900, stream=True)
|
|
470
493
|
return _handle_http_response(response)
|
|
471
494
|
except requests.exceptions.RequestException as e:
|
|
472
495
|
self.logger.error("Failed to invoke agent endpoint: %s", str(e))
|
|
@@ -29,21 +29,56 @@ class ContainerRuntime:
|
|
|
29
29
|
"""
|
|
30
30
|
runtime_type = runtime_type or self.DEFAULT_RUNTIME
|
|
31
31
|
self.available_runtimes = ["finch", "docker", "podman"]
|
|
32
|
+
self.runtime = None
|
|
33
|
+
self.has_local_runtime = False
|
|
32
34
|
|
|
33
35
|
if runtime_type == "auto":
|
|
34
36
|
for runtime in self.available_runtimes:
|
|
35
37
|
if self._is_runtime_installed(runtime):
|
|
36
38
|
self.runtime = runtime
|
|
39
|
+
self.has_local_runtime = True
|
|
37
40
|
break
|
|
38
41
|
else:
|
|
39
|
-
|
|
42
|
+
# Informational message - default CodeBuild deployment works fine
|
|
43
|
+
_handle_warn(
|
|
44
|
+
"ℹ️ No container engine found (Docker/Finch/Podman not installed)\n"
|
|
45
|
+
"✅ Default deployment uses CodeBuild (no container engine needed)\n"
|
|
46
|
+
"💡 Run 'agentcore launch' for cloud-based building and deployment\n"
|
|
47
|
+
"💡 For local builds, install Docker, Finch, or Podman"
|
|
48
|
+
)
|
|
49
|
+
self.runtime = "none"
|
|
50
|
+
self.has_local_runtime = False
|
|
40
51
|
elif runtime_type in self.available_runtimes:
|
|
41
52
|
if self._is_runtime_installed(runtime_type):
|
|
42
53
|
self.runtime = runtime_type
|
|
54
|
+
self.has_local_runtime = True
|
|
43
55
|
else:
|
|
44
|
-
|
|
56
|
+
# Convert hard error to warning - suggest CodeBuild instead
|
|
57
|
+
_handle_warn(
|
|
58
|
+
f"⚠️ {runtime_type.capitalize()} is not installed\n"
|
|
59
|
+
"💡 Recommendation: Use CodeBuild for building containers in the cloud\n"
|
|
60
|
+
"💡 Run 'agentcore launch' (default) for CodeBuild deployment\n"
|
|
61
|
+
f"💡 For local builds, please install {runtime_type.capitalize()}"
|
|
62
|
+
)
|
|
63
|
+
self.runtime = "none"
|
|
64
|
+
self.has_local_runtime = False
|
|
45
65
|
else:
|
|
46
|
-
|
|
66
|
+
if runtime_type == "none":
|
|
67
|
+
raise ValueError(
|
|
68
|
+
"No supported container engine found.\n\n"
|
|
69
|
+
"AgentCore requires one of the following container engines for local builds:\n"
|
|
70
|
+
"• Docker (any recent version, including Docker Desktop)\n"
|
|
71
|
+
"• Finch (Amazon's open-source container engine)\n"
|
|
72
|
+
"• Podman (compatible alternative to Docker)\n\n"
|
|
73
|
+
"To install:\n"
|
|
74
|
+
"• Docker: https://docs.docker.com/get-docker/\n"
|
|
75
|
+
"• Finch: https://github.com/runfinch/finch\n"
|
|
76
|
+
"• Podman: https://podman.io/getting-started/installation\n\n"
|
|
77
|
+
"Alternative: Use CodeBuild for cloud-based building (no container engine needed):\n"
|
|
78
|
+
" agentcore launch # Uses CodeBuild (default)"
|
|
79
|
+
)
|
|
80
|
+
else:
|
|
81
|
+
raise ValueError(f"Unsupported runtime: {runtime_type}")
|
|
47
82
|
|
|
48
83
|
def _is_runtime_installed(self, runtime: str) -> bool:
|
|
49
84
|
"""Check if runtime is installed."""
|
|
@@ -190,6 +225,14 @@ class ContainerRuntime:
|
|
|
190
225
|
|
|
191
226
|
def build(self, dockerfile_dir: Path, tag: str, platform: Optional[str] = None) -> Tuple[bool, List[str]]:
|
|
192
227
|
"""Build container image."""
|
|
228
|
+
if not self.has_local_runtime:
|
|
229
|
+
return False, [
|
|
230
|
+
"No container runtime available for local build",
|
|
231
|
+
"💡 Recommendation: Use CodeBuild for building containers in the cloud",
|
|
232
|
+
"💡 Run 'agentcore launch' (default) for CodeBuild deployment",
|
|
233
|
+
"💡 For local builds, please install Docker, Finch, or Podman",
|
|
234
|
+
]
|
|
235
|
+
|
|
193
236
|
if not dockerfile_dir.exists():
|
|
194
237
|
return False, [f"Directory not found: {dockerfile_dir}"]
|
|
195
238
|
|
|
@@ -212,6 +255,14 @@ class ContainerRuntime:
|
|
|
212
255
|
port: Port to expose (default: 8080)
|
|
213
256
|
env_vars: Additional environment variables to pass to container
|
|
214
257
|
"""
|
|
258
|
+
if not self.has_local_runtime:
|
|
259
|
+
raise RuntimeError(
|
|
260
|
+
"No container runtime available for local run\n"
|
|
261
|
+
"💡 Recommendation: Use CodeBuild for building containers in the cloud\n"
|
|
262
|
+
"💡 Run 'agentcore launch' (default) for CodeBuild deployment\n"
|
|
263
|
+
"💡 For local runs, please install Docker, Finch, or Podman"
|
|
264
|
+
)
|
|
265
|
+
|
|
215
266
|
container_name = f"{tag.split(':')[0]}-{int(time.time())}"
|
|
216
267
|
cmd = [self.runtime, "run", "-it", "--rm", "-p", f"{port}:8080", "--name", container_name]
|
|
217
268
|
|
|
@@ -144,7 +144,6 @@ def _handle_explicit_file(package_dir: Path, explicit_file: str) -> DependencyIn
|
|
|
144
144
|
# Ensure file is within project directory for Docker context
|
|
145
145
|
try:
|
|
146
146
|
relative_path = explicit_path.relative_to(package_dir.resolve())
|
|
147
|
-
file_path = str(relative_path)
|
|
148
147
|
except ValueError:
|
|
149
148
|
raise ValueError(
|
|
150
149
|
f"Requirements file must be within project directory. File: {explicit_path}, Project: {package_dir}"
|
|
@@ -155,14 +154,21 @@ def _handle_explicit_file(package_dir: Path, explicit_file: str) -> DependencyIn
|
|
|
155
154
|
install_path = None
|
|
156
155
|
|
|
157
156
|
if file_type == "pyproject":
|
|
158
|
-
if
|
|
157
|
+
if len(relative_path.parts) > 1:
|
|
159
158
|
# pyproject.toml in subdirectory - install from that directory
|
|
160
|
-
install_path =
|
|
159
|
+
install_path = Path(relative_path).parent
|
|
161
160
|
else:
|
|
162
161
|
# pyproject.toml in root - install from current directory
|
|
163
|
-
install_path = "."
|
|
162
|
+
install_path = Path(".")
|
|
164
163
|
|
|
165
|
-
|
|
164
|
+
# Get POSIX strings for file and install path
|
|
165
|
+
file_path = relative_path.as_posix()
|
|
166
|
+
install_path = install_path and install_path.as_posix()
|
|
167
|
+
|
|
168
|
+
# Maintain local format for explicit path
|
|
169
|
+
explicit_path = str(explicit_path)
|
|
170
|
+
|
|
171
|
+
return DependencyInfo(file=file_path, type=file_type, resolved_path=explicit_path, install_path=install_path)
|
|
166
172
|
|
|
167
173
|
|
|
168
174
|
def validate_requirements_file(build_dir: Path, requirements_file: str) -> DependencyInfo:
|