universal-mcp-agents 0.1.19rc1__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. universal_mcp/agents/__init__.py +15 -16
  2. universal_mcp/agents/base.py +46 -35
  3. universal_mcp/agents/bigtool/state.py +1 -1
  4. universal_mcp/agents/cli.py +2 -5
  5. universal_mcp/agents/codeact0/__init__.py +2 -3
  6. universal_mcp/agents/codeact0/__main__.py +4 -7
  7. universal_mcp/agents/codeact0/agent.py +444 -96
  8. universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
  9. universal_mcp/agents/codeact0/llm_tool.py +2 -254
  10. universal_mcp/agents/codeact0/prompts.py +247 -137
  11. universal_mcp/agents/codeact0/sandbox.py +52 -18
  12. universal_mcp/agents/codeact0/state.py +26 -6
  13. universal_mcp/agents/codeact0/tools.py +400 -74
  14. universal_mcp/agents/codeact0/utils.py +175 -11
  15. universal_mcp/agents/codeact00/__init__.py +3 -0
  16. universal_mcp/agents/{unified → codeact00}/__main__.py +4 -6
  17. universal_mcp/agents/codeact00/agent.py +578 -0
  18. universal_mcp/agents/codeact00/config.py +77 -0
  19. universal_mcp/agents/{unified → codeact00}/langgraph_agent.py +2 -2
  20. universal_mcp/agents/{unified → codeact00}/llm_tool.py +1 -1
  21. universal_mcp/agents/codeact00/prompts.py +364 -0
  22. universal_mcp/agents/{unified → codeact00}/sandbox.py +52 -18
  23. universal_mcp/agents/codeact00/state.py +66 -0
  24. universal_mcp/agents/codeact00/tools.py +525 -0
  25. universal_mcp/agents/codeact00/utils.py +678 -0
  26. universal_mcp/agents/codeact01/__init__.py +3 -0
  27. universal_mcp/agents/{codeact → codeact01}/__main__.py +4 -11
  28. universal_mcp/agents/codeact01/agent.py +413 -0
  29. universal_mcp/agents/codeact01/config.py +77 -0
  30. universal_mcp/agents/codeact01/langgraph_agent.py +14 -0
  31. universal_mcp/agents/codeact01/llm_tool.py +25 -0
  32. universal_mcp/agents/codeact01/prompts.py +246 -0
  33. universal_mcp/agents/codeact01/sandbox.py +162 -0
  34. universal_mcp/agents/{unified → codeact01}/state.py +26 -10
  35. universal_mcp/agents/codeact01/tools.py +648 -0
  36. universal_mcp/agents/{unified → codeact01}/utils.py +175 -11
  37. universal_mcp/agents/llm.py +14 -4
  38. universal_mcp/agents/react.py +3 -3
  39. universal_mcp/agents/sandbox.py +124 -69
  40. universal_mcp/applications/llm/app.py +76 -24
  41. {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/METADATA +6 -5
  42. universal_mcp_agents-0.1.24rc3.dist-info/RECORD +66 -0
  43. universal_mcp/agents/codeact/__init__.py +0 -3
  44. universal_mcp/agents/codeact/agent.py +0 -240
  45. universal_mcp/agents/codeact/models.py +0 -11
  46. universal_mcp/agents/codeact/prompts.py +0 -82
  47. universal_mcp/agents/codeact/sandbox.py +0 -85
  48. universal_mcp/agents/codeact/state.py +0 -11
  49. universal_mcp/agents/codeact/utils.py +0 -68
  50. universal_mcp/agents/codeact0/playbook_agent.py +0 -355
  51. universal_mcp/agents/unified/README.md +0 -45
  52. universal_mcp/agents/unified/__init__.py +0 -3
  53. universal_mcp/agents/unified/agent.py +0 -289
  54. universal_mcp/agents/unified/prompts.py +0 -192
  55. universal_mcp/agents/unified/tools.py +0 -188
  56. universal_mcp_agents-0.1.19rc1.dist-info/RECORD +0 -64
  57. {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/WHEEL +0 -0
@@ -0,0 +1,678 @@
1
+ import ast
2
+ import importlib
3
+ import re
4
+ from collections.abc import Sequence
5
+ from typing import Any
6
+
7
+ from langchain_core.messages import AIMessage, BaseMessage
8
+ from universal_mcp.types import ToolConfig
9
+
10
+ MAX_CHARS = 5000
11
+
12
+
13
+ def build_anthropic_cache_message(text: str, role: str = "system", ttl: str = "1h") -> list[dict[str, Any]]:
14
+ """Build a complete Anthropic cache messages array from text.
15
+
16
+ Returns a list with a single cache message whose content is the
17
+ cached Anthropic content array with ephemeral cache control and TTL.
18
+ """
19
+ return [
20
+ {
21
+ "role": role,
22
+ "content": [
23
+ {
24
+ "type": "text",
25
+ "text": text,
26
+ "cache_control": {"type": "ephemeral", "ttl": ttl},
27
+ }
28
+ ],
29
+ }
30
+ ]
31
+
32
+
33
+ def strip_thinking(messages: list[BaseMessage]):
34
+ """Remove Anthropic 'thinking' segments from the most recent AIMessage in-place.
35
+
36
+ Scans from the end to find the last AIMessage, then removes thinking blocks
37
+ from its content. Handles both plain-string and block-array content.
38
+ """
39
+ if not messages:
40
+ return messages
41
+
42
+ # Find the last AIMessage from the end
43
+ last_ai_index = None
44
+ for i in range(len(messages) - 1, -1, -1):
45
+ if isinstance(messages[i], AIMessage):
46
+ last_ai_index = i
47
+ break
48
+
49
+ if last_ai_index is None:
50
+ return messages
51
+
52
+ ai_msg = messages[last_ai_index]
53
+ content = ai_msg.content
54
+
55
+ # If it's already plain text, nothing to strip
56
+ if isinstance(content, str):
57
+ return messages
58
+
59
+ # If Anthropic-style content blocks
60
+ if isinstance(content, list):
61
+ filtered_output: list[object] = []
62
+ removed_any = False
63
+ for b in content:
64
+ is_thinking = False
65
+ if isinstance(b, dict):
66
+ t = b.get("type")
67
+ if t == "thinking":
68
+ is_thinking = True
69
+ elif "thinking" in b and isinstance(b["thinking"], str):
70
+ is_thinking = True
71
+
72
+ if is_thinking:
73
+ removed_any = True
74
+ continue
75
+ filtered_output.append(b)
76
+
77
+ if removed_any:
78
+ ai_msg.content = filtered_output
79
+ messages[last_ai_index] = ai_msg
80
+
81
+ return messages
82
+
83
+
84
+ def add_tools(tool_config: ToolConfig, tools_to_add: ToolConfig):
85
+ for app_id, new_tools in tools_to_add.items():
86
+ all_tools = tool_config.get(app_id, []) + new_tools
87
+ tool_config[app_id] = list(set(all_tools))
88
+ return tool_config
89
+
90
+
91
+ def light_copy(data):
92
+ """
93
+ Deep copy a dict[str, any] or Sequence[any] with string truncation.
94
+
95
+ Args:
96
+ data: Either a dictionary with string keys, or a sequence of such dictionaries
97
+
98
+ Returns:
99
+ A deep copy where all string values are truncated to MAX_CHARS characters
100
+ """
101
+
102
+ def truncate_string(value):
103
+ """Truncate string to MAX_CHARS chars, preserve other types"""
104
+ if isinstance(value, str) and len(value) > MAX_CHARS:
105
+ return value[:MAX_CHARS] + "..."
106
+ return value
107
+
108
+ def copy_dict(d):
109
+ """Recursively copy a dictionary, truncating strings"""
110
+ result = {}
111
+ for key, value in d.items():
112
+ if isinstance(value, dict):
113
+ result[key] = copy_dict(value)
114
+ elif isinstance(value, Sequence) and not isinstance(value, str):
115
+ result[key] = [
116
+ copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in value[:20]
117
+ ] # Limit to first 20 items
118
+ else:
119
+ result[key] = truncate_string(value)
120
+ return result
121
+
122
+ # Handle the two main cases
123
+ if isinstance(data, dict):
124
+ return copy_dict(data)
125
+ elif isinstance(data, Sequence) and not isinstance(data, str):
126
+ return [
127
+ copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in data[:20]
128
+ ] # Limit to first 20 items
129
+ else:
130
+ # For completeness, handle other types
131
+ return truncate_string(data)
132
+
133
+
134
+ def get_message_text(msg: BaseMessage) -> str:
135
+ """Get the text content of a message."""
136
+ content = msg.content
137
+ if isinstance(content, str):
138
+ return content
139
+ elif isinstance(content, dict):
140
+ return content.get("text", "")
141
+ else:
142
+ txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
143
+ return "".join(txts).strip()
144
+
145
+
146
+ def make_safe_function_name(name: str) -> str:
147
+ """Convert a tool name to a valid Python function name."""
148
+ # Replace non-alphanumeric characters with underscores
149
+ safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
150
+ # Ensure the name doesn't start with a digit
151
+ if safe_name and safe_name[0].isdigit():
152
+ safe_name = f"tool_{safe_name}"
153
+ # Handle empty name edge case
154
+ if not safe_name:
155
+ safe_name = "unnamed_tool"
156
+ return safe_name
157
+
158
+
159
+ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
160
+ """
161
+ Derive context from code by extracting classes, functions, and import statements.
162
+
163
+ Args:
164
+ code: Python code as a string
165
+ context: Existing context dictionary to append to
166
+
167
+ Returns:
168
+ Updated context dictionary with extracted entities
169
+ """
170
+
171
+ # Initialize context keys if they don't exist
172
+ if "imports" not in context:
173
+ context["imports"] = []
174
+ if "classes" not in context:
175
+ context["classes"] = []
176
+ if "functions" not in context:
177
+ context["functions"] = []
178
+
179
+ try:
180
+ # Parse the code into an AST
181
+ tree = ast.parse(code)
182
+
183
+ # Extract imports
184
+ for node in ast.walk(tree):
185
+ if isinstance(node, ast.Import):
186
+ for alias in node.names:
187
+ if alias.asname:
188
+ import_stmt = f"import {alias.name} as {alias.asname}"
189
+ else:
190
+ import_stmt = f"import {alias.name}"
191
+ if import_stmt not in context["imports"]:
192
+ context["imports"].append(import_stmt)
193
+
194
+ elif isinstance(node, ast.ImportFrom):
195
+ module = node.module or ""
196
+ # Handle multiple imports in a single from statement
197
+ import_names = []
198
+ for alias in node.names:
199
+ if alias.asname:
200
+ import_names.append(f"{alias.name} as {alias.asname}")
201
+ else:
202
+ import_names.append(alias.name)
203
+
204
+ import_stmt = f"from {module} import {', '.join(import_names)}"
205
+ if import_stmt not in context["imports"]:
206
+ context["imports"].append(import_stmt)
207
+
208
+ # Extract class definitions
209
+ for node in ast.walk(tree):
210
+ if isinstance(node, ast.ClassDef):
211
+ # Get the class definition as a string
212
+ class_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
213
+ class_def = "\n".join(class_lines)
214
+
215
+ # Clean up the class definition (remove leading/trailing whitespace)
216
+ class_def = class_def.strip()
217
+
218
+ if class_def not in context["classes"]:
219
+ context["classes"].append(class_def)
220
+
221
+ # Extract function definitions (including async)
222
+ for node in ast.walk(tree):
223
+ if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
224
+ func_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
225
+ func_def = "\n".join(func_lines)
226
+
227
+ # Only top-level functions (col_offset == 0)
228
+ if node.col_offset == 0:
229
+ func_def = func_def.strip()
230
+ if func_def not in context["functions"]:
231
+ context["functions"].append(func_def)
232
+
233
+ except SyntaxError:
234
+ # If the code has syntax errors, try a simpler regex-based approach
235
+
236
+ # Extract import statements using regex
237
+ import_patterns = [
238
+ r"import\s+(\w+(?:\.\w+)*)(?:\s+as\s+(\w+))?",
239
+ r"from\s+(\w+(?:\.\w+)*)\s+import\s+(\w+(?:\s+as\s+\w+)?)",
240
+ ]
241
+
242
+ for pattern in import_patterns:
243
+ matches = re.finditer(pattern, code)
244
+ for match in matches:
245
+ if "from" in pattern:
246
+ module = match.group(1)
247
+ imports = match.group(2).split(",")
248
+ for import_name in imports:
249
+ imp = import_name.strip()
250
+ if " as " in imp:
251
+ name, alias = imp.split(" as ")
252
+ import_stmt = f"from {module} import {name.strip()} as {alias.strip()}"
253
+ else:
254
+ import_stmt = f"from {module} import {imp}"
255
+ if import_stmt not in context["imports"]:
256
+ context["imports"].append(import_stmt)
257
+ else:
258
+ module = match.group(1)
259
+ alias = match.group(2)
260
+ if alias:
261
+ import_stmt = f"import {module} as {alias}"
262
+ else:
263
+ import_stmt = f"import {module}"
264
+ if import_stmt not in context["imports"]:
265
+ context["imports"].append(import_stmt)
266
+
267
+ # Extract class definitions using regex
268
+ class_pattern = r"class\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
269
+ class_matches = re.finditer(class_pattern, code, re.DOTALL)
270
+ for match in class_matches:
271
+ class_def = match.group(0).strip()
272
+ if class_def not in context["classes"]:
273
+ context["classes"].append(class_def)
274
+
275
+ # Extract function definitions using regex
276
+ func_pattern = r"def\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
277
+ func_matches = re.finditer(func_pattern, code, re.DOTALL)
278
+ for match in func_matches:
279
+ func_def = match.group(0).strip()
280
+ if func_def not in context["functions"]:
281
+ context["functions"].append(func_def)
282
+
283
+ return context
284
+
285
+
286
+ def inject_context(
287
+ context_dict: dict[str, list[str]], existing_namespace: dict[str, Any] | None = None
288
+ ) -> dict[str, Any]:
289
+ """
290
+ Inject Python entities from a dictionary into a namespace.
291
+
292
+ This function takes a dictionary where keys represent entity types (imports, classes, functions, etc.)
293
+ and values are lists of entity definitions. It attempts to import or create these entities and returns
294
+ them in a namespace dictionary. Can optionally build upon an existing namespace and apply additional aliases.
295
+
296
+ Args:
297
+ context_dict: Dictionary with entity types as keys and lists of entity definitions as values.
298
+ Supported keys: 'imports', 'classes', 'functions'
299
+ - 'imports': List of import statements as strings (e.g., ['import pandas', 'import numpy as np'])
300
+ - 'classes': List of class definitions as strings
301
+ - 'functions': List of function definitions as strings
302
+ existing_namespace: Optional existing namespace to build upon. If provided, new entities
303
+ will be added to this namespace rather than creating a new one.
304
+
305
+ Returns:
306
+ Dictionary containing the injected entities as key-value pairs
307
+
308
+ Example:
309
+ context = {
310
+ 'imports': ['import pandas as pd', 'import numpy as np'],
311
+ 'classes': ['class MyClass:\n def __init__(self, x):\n self.x = x'],
312
+ 'functions': ['def my_function(x):\n return x * 2']
313
+ }
314
+ existing_ns = {'math': <math module>, 'data': [1, 2, 3]}
315
+ namespace = inject_context(context, existing_ns)
316
+ # namespace will contain: {'math': <math module>, 'data': [1, 2, 3], 'pandas': <module>, 'pd': <module>, 'numpy': <module>, 'np': <module>, 'MyClass': <class>, 'MC': <class>, 'my_function': <function>, ...}
317
+ """
318
+
319
+ # Start with existing namespace or create new one
320
+ namespace: dict[str, Any] = existing_namespace.copy() if existing_namespace is not None else {}
321
+
322
+ # Handle imports (execute import statements as strings)
323
+ if "imports" in context_dict:
324
+ for import_statement in context_dict["imports"]:
325
+ try:
326
+ # Execute the import statement in the current namespace
327
+ exec(import_statement, namespace)
328
+ except Exception as e:
329
+ # If execution fails, try to extract module name and create placeholder
330
+
331
+ # Handle different import patterns
332
+ import_match = re.search(r"import\s+(\w+)(?:\s+as\s+(\w+))?", import_statement)
333
+ if import_match:
334
+ module_name = import_match.group(1)
335
+ alias_name = import_match.group(2)
336
+
337
+ try:
338
+ # Try to import the module manually
339
+ module = importlib.import_module(module_name)
340
+ namespace[module_name] = module
341
+ if alias_name:
342
+ namespace[alias_name] = module
343
+ except ImportError:
344
+ # Create placeholders for missing imports
345
+ namespace[module_name] = f"<import '{module_name}' not available>"
346
+ if alias_name:
347
+ namespace[alias_name] = f"<import '{module_name}' as '{alias_name}' not available>"
348
+ else:
349
+ # If we can't parse the import statement, create a generic placeholder
350
+ namespace[f"import_{len(namespace)}"] = f"<import statement failed: {str(e)}>"
351
+
352
+ # Handle classes - execute class definitions as strings
353
+ if "classes" in context_dict:
354
+ for class_definition in context_dict["classes"]:
355
+ try:
356
+ # Execute the class definition in the current namespace
357
+ exec(class_definition, namespace)
358
+ except Exception:
359
+ # If execution fails, try to extract class name and create placeholder
360
+
361
+ class_match = re.search(r"class\s+(\w+)", class_definition)
362
+ if class_match:
363
+ class_name = class_match.group(1)
364
+
365
+ # Create a placeholder class
366
+ class PlaceholderClass:
367
+ def __init__(self, *args, **kwargs):
368
+ raise NotImplementedError("Class '{class_name}' failed to load")
369
+
370
+ namespace[class_name] = PlaceholderClass
371
+ else:
372
+ # If we can't extract class name, create a generic placeholder
373
+ class GenericPlaceholderClass:
374
+ def __init__(self, *args, **kwargs):
375
+ raise NotImplementedError("Class definition failed to load")
376
+
377
+ namespace[f"class_{len(namespace)}"] = GenericPlaceholderClass
378
+
379
+ # Handle functions - execute function definitions as strings
380
+ if "functions" in context_dict:
381
+ for function_definition in context_dict["functions"]:
382
+ try:
383
+ # Execute the function definition in the current namespace
384
+ exec(function_definition, namespace)
385
+ except Exception:
386
+ # If execution fails, try to extract function name and create placeholder
387
+ func_match = re.search(r"(async\s+)?def\s+(\w+)", function_definition)
388
+ if func_match:
389
+ func_name = func_match.group(2)
390
+ is_async = bool(func_match.group(1))
391
+
392
+ if is_async:
393
+
394
+ async def placeholder_func(*args, **kwargs):
395
+ raise NotImplementedError(f"Async function '{func_name}' failed to load")
396
+ else:
397
+
398
+ def placeholder_func(*args, **kwargs):
399
+ raise NotImplementedError(f"Function '{func_name}' failed to load")
400
+
401
+ placeholder_func.__name__ = func_name
402
+ namespace[func_name] = placeholder_func
403
+
404
+ return namespace
405
+
406
+
407
+ def schema_to_signature(schema: dict, func_name: str = "my_function") -> str:
408
+ """
409
+ Convert a JSON schema into a Python-style function signature string.
410
+ Handles fields with `type`, `anyOf`, defaults, and missing metadata safely.
411
+ """
412
+ type_map = {
413
+ "integer": "int",
414
+ "string": "str",
415
+ "boolean": "bool",
416
+ "null": "None",
417
+ "number": "float",
418
+ "array": "list",
419
+ "object": "dict",
420
+ }
421
+
422
+ params = []
423
+ for name, meta in schema.items():
424
+ if not isinstance(meta, dict):
425
+ typ = "Any"
426
+ elif "type" in meta:
427
+ typ = type_map.get(meta["type"], "Any")
428
+ elif "anyOf" in meta:
429
+ types = []
430
+ for t in meta["anyOf"]:
431
+ if not isinstance(t, dict):
432
+ continue
433
+ t_type = t.get("type")
434
+ types.append(type_map.get(t_type, "Any") if t_type else "Any")
435
+ typ = " | ".join(sorted(set(types))) if types else "Any"
436
+ else:
437
+ typ = "Any"
438
+
439
+ # Handle defaults gracefully
440
+ default = meta.get("default")
441
+ if default is None:
442
+ params.append(f"{name}: {typ}")
443
+ else:
444
+ params.append(f"{name}: {typ} = {repr(default)}")
445
+
446
+ param_str = ",\n ".join(params)
447
+ return f"def {func_name}(\n {param_str},\n):"
448
+
449
+
450
+ def smart_truncate(
451
+ output: str, max_chars_full: int = 2000, max_lines_headtail: int = 20, summary_threshold: int = 10000
452
+ ) -> str:
453
+ """
454
+ Truncates or summarizes output intelligently to avoid filling the context too fast.
455
+
456
+ Args:
457
+ output (str): The string output from code execution.
458
+ max_chars_full (int): Max characters to keep full output.
459
+ max_lines_headtail (int): Number of lines to keep from head and tail for medium outputs.
460
+ summary_threshold (int): If truncated output exceeds this, hard-truncate.
461
+
462
+ Returns:
463
+ str: Truncated or summarized output.
464
+ """
465
+ if len(output) <= max_chars_full:
466
+ return output # Small output, include fully
467
+
468
+ lines = output.splitlines()
469
+ if len(lines) <= 2 * max_lines_headtail:
470
+ return output # Medium output, include fully
471
+
472
+ # Medium-large output: take head + tail
473
+ head = "\n".join(lines[:max_lines_headtail])
474
+ tail = "\n".join(lines[-max_lines_headtail:])
475
+ truncated = f"{head}\n... [truncated {len(lines) - 2 * max_lines_headtail} lines] ...\n{tail}"
476
+
477
+ # If still too big, cut to summary threshold
478
+ if len(truncated) > summary_threshold:
479
+ truncated = truncated[:summary_threshold] + "\n... [output truncated to fit context] ..."
480
+
481
+ return truncated
482
+
483
+
484
+ async def get_connected_apps_string(registry) -> str:
485
+ """Get a formatted string of connected applications from the registry."""
486
+ if not registry:
487
+ return ""
488
+
489
+ try:
490
+ # Get connected apps from registry
491
+ connections = await registry.list_connected_apps()
492
+ if not connections:
493
+ return "No applications are currently connected."
494
+
495
+ # Extract app names from connections
496
+ connected_app_ids = {connection["app_id"] for connection in connections}
497
+
498
+ # Format the apps list
499
+ apps_list = []
500
+ for app_id in connected_app_ids:
501
+ apps_list.append(f"- {app_id}")
502
+
503
+ return "\n".join(apps_list)
504
+ except Exception:
505
+ return "Unable to retrieve connected applications."
506
+
507
+ def extract_plan_parameters(plan_steps: list[str]) -> list[dict[str, Any]]:
508
+ """
509
+ Extracts parameters from plan steps and formats them into a list of OpenAPI-like parameter objects.
510
+
511
+ Parses parameters enclosed in backticks, identifying their name, if they are required, and any default values.
512
+ e.g., `variable` -> {"name": "variable", "required": True}
513
+ e.g., `variable(default = 'value')` -> {"name": "variable", "required": False, "default": "value"}
514
+ """
515
+ parameters_map: dict[str, Any] = {}
516
+ # Regex to find anything inside backticks
517
+ outer_pattern = re.compile(r"`([^`]+)`")
518
+ # Regex to parse parameters with default values
519
+ inner_pattern = re.compile(r"^\s*(\w+)\s*\(\s*default\s*=\s*(.+)\s*\)\s*$")
520
+
521
+ for step in plan_steps:
522
+ matches = outer_pattern.findall(step)
523
+ for match in matches:
524
+ param_str = match.strip()
525
+ inner_match = inner_pattern.match(param_str)
526
+
527
+ if inner_match:
528
+ # Parameter with a default value
529
+ name, default_val_str = inner_match.groups()
530
+ default_value: Any
531
+ try:
532
+ # Safely evaluate the default value (e.g., 'string', 123, True)
533
+ default_value = ast.literal_eval(default_val_str)
534
+ except (ValueError, SyntaxError):
535
+ # If it's not a valid literal, treat it as a string
536
+ default_value = default_val_str
537
+ parameters_map[name] = {"required": False, "default": default_value}
538
+ else:
539
+ # Required parameter (no default value)
540
+ name = param_str
541
+ # Only set as required if it hasn't been defined with a default already
542
+ if name not in parameters_map:
543
+ parameters_map[name] = {"required": True}
544
+
545
+ # Convert the map to the final list format
546
+ final_parameters = []
547
+ for name, details in sorted(parameters_map.items()):
548
+ param_obj = {"name": name}
549
+ param_obj.update(details)
550
+ final_parameters.append(param_obj)
551
+
552
+ return final_parameters
553
+
554
+ def is_openai_style_patch(text: str) -> bool:
555
+ """Detect if a string looks like an OpenAI/Codex-style patch.
556
+
557
+ Minimal check: presence of the Begin/End Patch fences.
558
+ """
559
+ if not isinstance(text, str):
560
+ return False
561
+ return "*** Begin Patch" in text and "*** End Patch" in text
562
+
563
+
564
+ def _parse_openai_patch_hunks(patch_text: str) -> list[tuple[list[str], list[str]]]:
565
+ """Parse a minimal subset of OpenAI patch format into (src_lines, dst_lines) hunks.
566
+
567
+ We ignore file-level headers and only process sections between @@ markers.
568
+ Each hunk collects context lines (prefix ' ') and deletions ('-') for src,
569
+ and context (' ') and additions ('+') for dst, preserving order.
570
+ """
571
+ in_patch = False
572
+ src_acc: list[str] = []
573
+ dst_acc: list[str] = []
574
+ hunks: list[tuple[list[str], list[str]]] = []
575
+
576
+ for raw in patch_text.splitlines():
577
+ line = raw.rstrip("\n")
578
+ if not in_patch:
579
+ if line.strip() == "*** Begin Patch":
580
+ in_patch = True
581
+ continue
582
+
583
+ # End of patch
584
+ if line.strip() == "*** End Patch":
585
+ if src_acc or dst_acc:
586
+ hunks.append((src_acc, dst_acc))
587
+ break
588
+
589
+ # Start of new hunk
590
+ if line.startswith("@@"):
591
+ if src_acc or dst_acc:
592
+ hunks.append((src_acc, dst_acc))
593
+ src_acc, dst_acc = [], []
594
+ continue
595
+
596
+ # Ignore file headers like '*** Update File:' etc.
597
+ if line.startswith("*** "):
598
+ continue
599
+
600
+ if line.startswith(" "):
601
+ src_acc.append(line[1:])
602
+ dst_acc.append(line[1:])
603
+ elif line.startswith("-"):
604
+ src_acc.append(line[1:])
605
+ elif line.startswith("+"):
606
+ dst_acc.append(line[1:])
607
+ else:
608
+ # Unknown/empty line inside hunk – treat as context
609
+ src_acc.append(line)
610
+ dst_acc.append(line)
611
+
612
+ return hunks
613
+
614
+
615
+ def apply_openai_style_patch(original: str, patch_text: str) -> str:
616
+ """Apply a minimal OpenAI-style patch to a single text buffer.
617
+
618
+ Strategy per hunk:
619
+ - Build src_block from ' ' and '-' lines; dst_block from ' ' and '+' lines
620
+ - Replace the first occurrence of src_block with dst_block
621
+ - If exact replacement fails, try a lenient fallback using trimmed boundaries
622
+ """
623
+ if not is_openai_style_patch(patch_text):
624
+ return original
625
+
626
+ result = original
627
+ hunks = _parse_openai_patch_hunks(patch_text)
628
+ for src_lines, dst_lines in hunks:
629
+ src_block = "\n".join(src_lines)
630
+ dst_block = "\n".join(dst_lines)
631
+
632
+ # Fresh generation or insert-only: no source lines
633
+ if not src_lines:
634
+ # If original is empty, take dst as full content; otherwise replace entire buffer
635
+ result = dst_block
636
+ continue
637
+
638
+ # Exact match replacement first
639
+ if src_block in result:
640
+ result = result.replace(src_block, dst_block, 1)
641
+ continue
642
+
643
+ # Fallback: try boundary-based replacement using first/last lines
644
+ def _find_boundary_replace(text: str, src: list[str], repl: str) -> tuple[bool, str]:
645
+ if not src:
646
+ return False, text
647
+ start_token = src[0].strip()
648
+ end_token = src[-1].strip()
649
+ start_idx = text.find(start_token)
650
+ if start_idx == -1:
651
+ return False, text
652
+ end_idx = text.find(end_token, start_idx + len(start_token))
653
+ if end_idx == -1:
654
+ return False, text
655
+ end_idx += len(end_token)
656
+ # Replace the slice
657
+ new_text = text[:start_idx] + repl + text[end_idx:]
658
+ return True, new_text
659
+
660
+ replaced, result2 = _find_boundary_replace(result, src_lines, dst_block)
661
+ if replaced:
662
+ result = result2
663
+ continue
664
+
665
+ # As last resort: no-op this hunk
666
+ # (In a richer implementation, raise or collect diagnostics.)
667
+ continue
668
+ return result
669
+
670
+
671
+ def apply_patch_or_use_proposed(original: str, proposed: str) -> str:
672
+ """If proposed content is a patch, apply it to original; otherwise return proposed.
673
+
674
+ This provides a unified entry point for handling both full replacements and patch updates.
675
+ """
676
+ if is_openai_style_patch(proposed):
677
+ return apply_openai_style_patch(original, proposed)
678
+ return proposed
@@ -0,0 +1,3 @@
1
+ from .agent import CodeActPlaybookAgent
2
+
3
+ __all__ = ["CodeActPlaybookAgent"]