universal-mcp-agents 0.1.10__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. universal_mcp/agents/__init__.py +17 -19
  2. universal_mcp/agents/base.py +10 -7
  3. universal_mcp/agents/{bigtoolcache → bigtool}/__init__.py +2 -2
  4. universal_mcp/agents/{bigtoolcache → bigtool}/__main__.py +0 -1
  5. universal_mcp/agents/{bigtoolcache → bigtool}/agent.py +0 -1
  6. universal_mcp/agents/{bigtoolcache → bigtool}/graph.py +6 -5
  7. universal_mcp/agents/builder/__main__.py +125 -0
  8. universal_mcp/agents/builder/builder.py +225 -0
  9. universal_mcp/agents/builder/prompts.py +173 -0
  10. universal_mcp/agents/builder/state.py +24 -0
  11. universal_mcp/agents/cli.py +3 -2
  12. universal_mcp/agents/codeact/__main__.py +2 -4
  13. universal_mcp/agents/codeact/agent.py +188 -108
  14. universal_mcp/agents/codeact/models.py +11 -0
  15. universal_mcp/agents/codeact/prompts.py +34 -43
  16. universal_mcp/agents/codeact/sandbox.py +78 -40
  17. universal_mcp/agents/codeact/state.py +5 -4
  18. universal_mcp/agents/codeact0/__init__.py +3 -0
  19. universal_mcp/agents/codeact0/__main__.py +35 -0
  20. universal_mcp/agents/codeact0/agent.py +136 -0
  21. universal_mcp/agents/codeact0/config.py +77 -0
  22. universal_mcp/agents/codeact0/langgraph_graph.py +17 -0
  23. universal_mcp/agents/codeact0/legacy_codeact.py +104 -0
  24. universal_mcp/agents/codeact0/llm_tool.py +379 -0
  25. universal_mcp/agents/codeact0/prompts.py +156 -0
  26. universal_mcp/agents/codeact0/sandbox.py +90 -0
  27. universal_mcp/agents/codeact0/state.py +12 -0
  28. universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +4 -0
  29. universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +10 -0
  30. universal_mcp/agents/codeact0/usecases/11-github.yaml +13 -0
  31. universal_mcp/agents/codeact0/usecases/2-reddit.yaml +27 -0
  32. universal_mcp/agents/codeact0/usecases/2.1-instructions.md +81 -0
  33. universal_mcp/agents/codeact0/usecases/2.2-instructions.md +71 -0
  34. universal_mcp/agents/codeact0/usecases/3-earnings.yaml +4 -0
  35. universal_mcp/agents/codeact0/usecases/4-maps.yaml +41 -0
  36. universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +8 -0
  37. universal_mcp/agents/codeact0/usecases/6-contract.yaml +6 -0
  38. universal_mcp/agents/codeact0/usecases/7-overnight.yaml +14 -0
  39. universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +25 -0
  40. universal_mcp/agents/codeact0/usecases/9-learning.yaml +9 -0
  41. universal_mcp/agents/codeact0/utils.py +374 -0
  42. universal_mcp/agents/hil.py +4 -4
  43. universal_mcp/agents/planner/__init__.py +7 -1
  44. universal_mcp/agents/react.py +11 -3
  45. universal_mcp/agents/simple.py +12 -2
  46. universal_mcp/agents/utils.py +17 -0
  47. universal_mcp/applications/llm/__init__.py +3 -0
  48. universal_mcp/applications/llm/app.py +158 -0
  49. universal_mcp/applications/ui/app.py +118 -144
  50. {universal_mcp_agents-0.1.10.dist-info → universal_mcp_agents-0.1.12.dist-info}/METADATA +1 -1
  51. universal_mcp_agents-0.1.12.dist-info/RECORD +65 -0
  52. universal_mcp/agents/bigtool2/__init__.py +0 -67
  53. universal_mcp/agents/bigtool2/__main__.py +0 -23
  54. universal_mcp/agents/bigtool2/agent.py +0 -13
  55. universal_mcp/agents/bigtool2/graph.py +0 -155
  56. universal_mcp/agents/bigtool2/meta_tools.py +0 -120
  57. universal_mcp/agents/bigtool2/prompts.py +0 -15
  58. universal_mcp/agents/bigtoolcache/state.py +0 -27
  59. universal_mcp/agents/builder.py +0 -204
  60. universal_mcp_agents-0.1.10.dist-info/RECORD +0 -42
  61. /universal_mcp/agents/{bigtoolcache → bigtool}/context.py +0 -0
  62. /universal_mcp/agents/{bigtoolcache → bigtool}/prompts.py +0 -0
  63. /universal_mcp/agents/{bigtool2 → bigtool}/state.py +0 -0
  64. /universal_mcp/agents/{bigtoolcache → bigtool}/tools.py +0 -0
  65. {universal_mcp_agents-0.1.10.dist-info → universal_mcp_agents-0.1.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,379 @@
1
+ import json
2
+ from dataclasses import dataclass
3
+ from typing import Any, Literal, cast
4
+
5
+ from langchain.chat_models import init_chat_model
6
+ from langchain_openai import AzureChatOpenAI
7
+
8
+ from universal_mcp.agents.codeact0.utils import get_message_text, light_copy
9
+
10
+ MAX_RETRIES = 3
11
+
12
+
13
+ def get_context_str(source: Any | list[Any] | dict[str, Any]) -> str:
14
+ """Converts context to a string representation."""
15
+ if not isinstance(source, dict):
16
+ if isinstance(source, list):
17
+ source = {f"doc_{i + 1}": str(doc) for i, doc in enumerate(source)}
18
+ else:
19
+ source = {"content": str(source)}
20
+
21
+ return "\n".join(f"<{k}>\n{str(v)}\n</{k}>" for k, v in source.items())
22
+
23
+
24
+ def smart_print(data: Any) -> None:
25
+ """Prints a dictionary or list of dictionaries with string values truncated to 30 characters.
26
+
27
+ Args:
28
+ data: Either a dictionary with string keys, or a list of such dictionaries
29
+ """
30
+ light_copy(data)
31
+
32
+
33
+ def creative_writer(
34
+ task: str,
35
+ context: Any | list[Any] | dict[str, Any],
36
+ tone: str = "normal",
37
+ format: Literal["markdown", "html", "plain"] = "markdown",
38
+ length: Literal["very-short", "concise", "normal", "long"] = "concise",
39
+ ) -> str:
40
+ """
41
+ Given a high-level writing task and context, returns a well-written text
42
+ that achieves the task, given the context.
43
+
44
+ Example Call:
45
+ creative_writer("Summarize this website with the goal of making it easy to understand.", web_content)
46
+ creative_writer("Make a markdown table summarizing the key differences between doc_1 and doc_2.", {"doc_1": str(doc_1), "doc_2": str(doc_2)})
47
+ creative_writer("Summarize all the provided documents.", [doc_1, doc_2, doc_3])
48
+
49
+ Important:
50
+ - Include specifics of the goal in the context verbatim.
51
+ - Be precise and direct in the task, and include as much context as possible.
52
+ - Include relevant high-level goals or intent in the task.
53
+ - You can provide multiple documents as input, and reference them in the task.
54
+ - You MUST provide the contents of any source documents to `creative_writer`.
55
+ - NEVER use `creative_writer` to produce JSON for a Pydantic model.
56
+
57
+ Args:
58
+ task: The main writing task or directive.
59
+ context: A single string, list of strings, or dict mapping labels to content.
60
+ tone: The desired tone of the output (e.g., "normal", "flirty", "formal", "casual", "crisp", "poetic", "technical", "internet-chat", "smartass", etc.).
61
+ format: Output format ('markdown', 'html', 'plain-text').
62
+ length: Desired length of the output ('very-short', 'concise', 'normal', 'long').
63
+
64
+ Returns:
65
+ str: The generated text output.
66
+ """
67
+
68
+ context = get_context_str(context)
69
+
70
+ task = task.strip() + "\n\n"
71
+ if format == "markdown":
72
+ task += "Please write in Markdown format.\n\n"
73
+ elif format == "html":
74
+ task += "Please write in HTML format.\n\n"
75
+ else:
76
+ task += "Please write in plain text format. Don't use markdown or HTML.\n\n"
77
+
78
+ if tone not in ["normal", "default", ""]:
79
+ task = f"{task} (Tone instructions: {tone})"
80
+
81
+ if length not in ["normal", "default", ""]:
82
+ task = f"{task} (Length instructions: {length})"
83
+
84
+ prompt = f"{task}\n\nContext:\n{context}\n\n"
85
+
86
+ model = AzureChatOpenAI(model="gpt-4o", temperature=0.7)
87
+
88
+ response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(prompt)
89
+ return get_message_text(response)
90
+
91
+
92
+ def ai_classify(
93
+ classification_task_and_requirements: str,
94
+ context: Any | list[Any] | dict[str, Any],
95
+ class_descriptions: dict[str, str],
96
+ ) -> dict[str, Any]:
97
+ """
98
+ Classifies and compares data based on given requirements.
99
+
100
+ Use `ai_classify` for tasks which need to classify data into one of many categories.
101
+ If making multiple binary classifications, call `ai_classify` for each.
102
+
103
+ Guidance:
104
+ - Prefer to use ai_classify operations to compare strings, rather than string ops.
105
+ - Prefer to include an "Unsure" category for classification tasks.
106
+ - The `class_descriptions` dict argument MUST be a map from possible class names to a precise description.
107
+ - Use precise and specific class names and concise descriptions.
108
+ - Pass ALL relevant context, preferably as a dict mapping labels to content.
109
+ - Returned dict maps each possible class name to a probability.
110
+
111
+ Example Usage:
112
+ classification_task_and_requirements = "Does the document contain an address?"
113
+ class_descriptions = {
114
+ "Is_Address": "Valid addresses usually have street names, city, and zip codes.",
115
+ "Not_Address": "Not valid addresses."
116
+ }
117
+ classification = ai_classify(
118
+ classification_task_and_requirements,
119
+ {"address": extracted_address},
120
+ class_descriptions
121
+ )
122
+ if classification["probabilities"]["Is_Address"] > 0.5:
123
+ ...
124
+
125
+ Args:
126
+ classification_task_and_requirements: The classification question and rules.
127
+ context: The data to classify (string, list, or dict).
128
+ class_descriptions: Mapping from class names to descriptions.
129
+
130
+ Returns:
131
+ dict: {
132
+ probabilities: dict[str, float],
133
+ reason: str,
134
+ top_class: str,
135
+ }
136
+ """
137
+
138
+ context = get_context_str(context)
139
+
140
+ prompt = (
141
+ f"{classification_task_and_requirements}\n\n"
142
+ f"\nThis is classification task\nPossible classes and descriptions:\n"
143
+ f"{json.dumps(class_descriptions, indent=2)}\n"
144
+ f"\nContext:\n{context}\n\n"
145
+ "Return ONLY a valid JSON object, no extra text."
146
+ )
147
+
148
+ model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
149
+
150
+ @dataclass
151
+ class ClassificationResult:
152
+ probabilities: dict[str, float]
153
+ reason: str
154
+ top_class: str
155
+
156
+ response = (
157
+ model.with_structured_output(schema=ClassificationResult, method="json_mode")
158
+ .with_retry(stop_after_attempt=MAX_RETRIES)
159
+ .invoke(prompt)
160
+ )
161
+ return cast(dict[str, Any], response)
162
+
163
+
164
+ def call_llm(
165
+ task_instructions: str, context: Any | list[Any] | dict[str, Any], output_json_schema: dict[str, Any]
166
+ ) -> dict[str, Any]:
167
+ """
168
+ Call a Large Language Model (LLM) with an instruction and contextual information,
169
+ returning a dictionary matching the given output_json_schema.
170
+ Can be used for tasks like creative writing, llm reasoning based content generation, etc.
171
+
172
+ You MUST anticipate Exceptions in reasoning based tasks which will lead to some empty fields
173
+ in the returned output; skip this item if applicable.
174
+
175
+ General Guidelines:
176
+ - Be comprehensive, specific, and precise on the task instructions.
177
+ - Include as much context as possible.
178
+ - You can provide multiple items in context, and reference them in the task.
179
+ - Include relevant high-level goals or intent in the task.
180
+ - In the output_json_schema, use required field wherever necessary.
181
+ - The more specific your task instructions and output_json_schema are, the better the results.
182
+
183
+ Guidelines for content generation tasks:
184
+ - Feel free to add instructions for tone, length, and format (markdown, html, plain-text, xml)
185
+ - Some examples of tone are: "normal", "flirty", "formal", "casual", "crisp", "poetic", "technical", "internet-chat", "smartass", etc.
186
+ - Prefer length to be concise by default. Other examples are: "very-short", "concise", "normal", "long", "2-3 lines", etc.
187
+ - In format prefer plain-text but you can also use markdown and html wherever useful.
188
+
189
+ Args:
190
+ instruction: The main directive for the LLM (e.g., "Summarize the article" or "Extract key entities").
191
+ context:
192
+ A dictionary containing named text elements that provide additional
193
+ information for the LLM. Keys are labels (e.g., 'article', 'transcript'),
194
+ values are strings of content.
195
+ output_json_schema: must be a valid JSON schema with top-level 'title' and 'description' keys.
196
+
197
+ Returns:
198
+ dict: Parsed JSON object matching the desired output_json_schema.
199
+
200
+ """
201
+ context = get_context_str(context)
202
+
203
+ prompt = f"{task_instructions}\n\nContext:\n{context}\n\nReturn ONLY a valid JSON object, no extra text."
204
+
205
+ model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
206
+
207
+ response = (
208
+ model.with_structured_output(schema=output_json_schema, method="json_mode")
209
+ .with_retry(stop_after_attempt=MAX_RETRIES)
210
+ .invoke(prompt)
211
+ )
212
+ return cast(dict[str, Any], response)
213
+
214
+
215
+ def data_extractor(
216
+ extraction_task: str, source: Any | list[Any] | dict[str, Any], output_json_schema: dict[str, Any]
217
+ ) -> dict[str, Any]:
218
+ """
219
+ Extracts structured data from unstructured data (documents, webpages, images, large bodies of text),
220
+ returning a dictionary matching the given output_json_schema.
221
+
222
+ You MUST anticipate Exception raised for unextractable data; skip this item if applicable.
223
+
224
+ Strongly prefer to:
225
+ - Be comprehensive, specific, and precise on the data you want to extract.
226
+ - Use optional fields everywhere.
227
+ - Extract multiple items from each source unless otherwise specified.
228
+ - The more specific your extraction task and output_json_schema are, the better the results.
229
+
230
+ Args:
231
+ extraction_task: The directive describing what to extract.
232
+ source: The unstructured data to extract from.
233
+ output_json_schema: must be a valid JSON schema with top-level 'title' and 'description' keys.
234
+
235
+ Returns:
236
+ dict: Parsed JSON object matching the desired output_json_schema.
237
+
238
+ Example:
239
+ news_articles_schema = {
240
+ "title": "NewsArticleList",
241
+ "description": "A list of news articles with headlines and URLs",
242
+ "type": "object",
243
+ "properties": {
244
+ "articles": {
245
+ "type": "array",
246
+ "items": {
247
+ "type": "object",
248
+ "properties": {
249
+ "headline": {
250
+ "type": "string"
251
+ },
252
+ "url": {
253
+ "type": "string"
254
+ }
255
+ },
256
+ "required": ["headline", "url"]
257
+ }
258
+ }
259
+ },
260
+ "required": ["articles"]
261
+ }
262
+
263
+ news_articles = data_extractor("Extract headlines and their corresponding URLs.", content, news_articles_schema)
264
+ """
265
+
266
+ context = get_context_str(source)
267
+
268
+ prompt = f"{extraction_task}\n\nContext:\n{context}\n\nReturn ONLY a valid JSON object, no extra text."
269
+
270
+ model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
271
+
272
+ response = (
273
+ model.with_structured_output(schema=output_json_schema, method="json_mode")
274
+ .with_retry(stop_after_attempt=MAX_RETRIES)
275
+ .invoke(prompt)
276
+ )
277
+ return cast(dict[str, Any], response)
278
+
279
+
280
+ # news_articles_schema = {
281
+ # "type": "object",
282
+ # "properties": {
283
+ # "articles": {
284
+ # "type": "array",
285
+ # "title": "Articles",
286
+ # "description": "List of news articles",
287
+ # "items": {
288
+ # "type": "object",
289
+ # "properties": {
290
+ # "headline": {
291
+ # "type": "string",
292
+ # "title": "Headline",
293
+ # "description": "The headline of the news article"
294
+ # },
295
+ # "url": {
296
+ # "type": "string",
297
+ # "title": "URL",
298
+ # "description": "The URL of the news article"
299
+ # }
300
+ # },
301
+ # "required": ["headline", "url"],
302
+ # }
303
+ # }
304
+ # },
305
+ # "required": ["articles"],
306
+ # }
307
+
308
+
309
+ # news_articles_schema = {
310
+ # "title": "NewsArticleList",
311
+ # "description": "A list of news articles with headlines and URLs",
312
+ # "type": "object",
313
+ # "properties": {
314
+ # "articles": {
315
+ # "type": "array",
316
+ # "items": {
317
+ # "type": "object",
318
+ # "properties": {
319
+ # "headline": {
320
+ # "type": "string"
321
+ # },
322
+ # "url": {
323
+ # "type": "string"
324
+ # }
325
+ # },
326
+ # "required": ["headline", "url"]
327
+ # }
328
+ # }
329
+ # },
330
+ # "required": ["articles"]
331
+ # }
332
+ # model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
333
+ # structured_model = model.with_structured_output(news_articles_schema)
334
+
335
+
336
+ # class TwitterComment(BaseModel):
337
+ # skip: bool
338
+ # reason: str
339
+ # comment: str
340
+
341
+ # twitter_comment_schema = {
342
+ # "title": "TwitterComment",
343
+ # "description": "A twitter comment to engage with followers",
344
+ # "type": "object",
345
+ # "properties": {
346
+ # "skip": {
347
+ # "type": "boolean"
348
+ # },
349
+ # "reason": {
350
+ # "type": "string"
351
+ # },
352
+ # "comment": {
353
+ # "type": "string"
354
+ # },
355
+ # "tagged_profiles": {
356
+ # "type": "array",
357
+ # "items": {
358
+ # "type": "string"
359
+ # }
360
+ # }
361
+ # },
362
+ # "required": ["skip", "reason"]
363
+ # }
364
+
365
+ # comment = {
366
+ # "tweet_id": "08109402",
367
+ # "handle": "@iamnishant",
368
+ # "text": "Hey really loved this tweet! Well said 💯"
369
+ # }
370
+
371
+ # comment_instructions = (
372
+ # "Goal is to engage with my twitter followers who have commented on my tweets."
373
+ # "Please generate a single line, context-aware, conversational reply for the given comment."
374
+ # "- Use social media language (can use hinglish)."
375
+ # "- Skip the reply, if the comment is too generic."
376
+ # "- Also tag relevant people in the reply."
377
+ # )
378
+
379
+ # my_reply = call_llm(comment_instructions, comment, twitter_comment_schema)
@@ -0,0 +1,156 @@
1
+ uneditable_prompt = """
2
+ You are Wingman, an AI Assistant created by AgentR. You are a creative, straight-forward and direct principal software engineer.
3
+
4
+ Your job is to answer the user's question or perform the task they ask for.
5
+ - Answer simple questions (which do not require you to write any code or access any external resources) directly. Note that any operation that involves using ONLY print functions should be answered directly.
6
+ - For task requiring operations or access to external resources, you should achieve the task by executing Python code snippets.
7
+ - You have access to `execute_ipython_cell` tool that allows you to execute Python code in an IPython notebook cell.
8
+ - In writing or natural language processing tasks DO NOT answer directly. Instead use `execute_ipython_cell` tool with the AI functions provided to you for tasks like summarizing, text generation, classification, data extraction from text or unstructured data, etc.
9
+ - The code you write will be executed in a sandbox environment, and you can use the output of previous executions in your code.
10
+ - Read and understand the output of the previous code snippet and use it to answer the user's request. Note that the code output is not visible to the user, so after the task is complete, you have to give the output to the user in a markdown format.
11
+ - If needed, feel free to ask for more information from the user (without using the execute_ipython_cell tool) to clarify the task.
12
+
13
+ GUIDELINES for writing code:
14
+ - Variables defined at the top level of previous code snippets can be referenced in your code.
15
+ - External functions which return a dict or list[dict] are ambiguous. Therefore, you MUST explore the structure of the returned data using `smart_print()` statements before using it, printing keys and values.
16
+ - Ensure to not print large amounts of data, use string truncation to limit the output to a few lines when checking the data structure.
17
+ - When an operation involves running a fixed set of steps on a list of items, run one run correctly and then use a for loop to run the steps on each item in the list.
18
+ - In a single code snippet, try to achieve as much as possible.
19
+ - You can only import libraries that come pre-installed with Python, and these have to be imported at the top of every code snippet where required.
20
+ - Wrap await calls in a function and call it using `asyncio.run` to use async functions.
21
+
22
+ NOTE: If any function throws an error requiring authentication, provide the user with a Markdown link to the authentication page and prompt them to authenticate.
23
+ """
24
+ import inspect
25
+ import re
26
+ from collections.abc import Sequence
27
+ from datetime import datetime
28
+
29
+ from langchain_core.tools import StructuredTool
30
+
31
+
32
+ def make_safe_function_name(name: str) -> str:
33
+ """Convert a tool name to a valid Python function name."""
34
+ # Replace non-alphanumeric characters with underscores
35
+ safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
36
+ # Ensure the name doesn't start with a digit
37
+ if safe_name and safe_name[0].isdigit():
38
+ safe_name = f"tool_{safe_name}"
39
+ # Handle empty name edge case
40
+ if not safe_name:
41
+ safe_name = "unnamed_tool"
42
+ return safe_name
43
+
44
+
45
+ def dedent(text):
46
+ """Remove any common leading whitespace from every line in `text`.
47
+
48
+ This can be used to make triple-quoted strings line up with the left
49
+ edge of the display, while still presenting them in the source code
50
+ in indented form.
51
+
52
+ Note that tabs and spaces are both treated as whitespace, but they
53
+ are not equal: the lines " hello" and "\\thello" are
54
+ considered to have no common leading whitespace.
55
+
56
+ Entirely blank lines are normalized to a newline character.
57
+ """
58
+ # Look for the longest leading string of spaces and tabs common to
59
+ # all lines.
60
+ margin = None
61
+ _whitespace_only_re = re.compile("^[ \t]+$", re.MULTILINE)
62
+ _leading_whitespace_re = re.compile("(^[ \t]*)(?:[^ \t\n])", re.MULTILINE)
63
+ text = _whitespace_only_re.sub("", text)
64
+ indents = _leading_whitespace_re.findall(text)
65
+ for indent in indents:
66
+ if margin is None:
67
+ margin = indent
68
+
69
+ # Current line more deeply indented than previous winner:
70
+ # no change (previous winner is still on top).
71
+ elif indent.startswith(margin):
72
+ pass
73
+
74
+ # Current line consistent with and no deeper than previous winner:
75
+ # it's the new winner.
76
+ elif margin.startswith(indent):
77
+ margin = indent
78
+
79
+ # Find the largest common whitespace between current line and previous
80
+ # winner.
81
+ else:
82
+ for i, (x, y) in enumerate(zip(margin, indent)):
83
+ if x != y:
84
+ margin = margin[:i]
85
+ break
86
+
87
+ # sanity check (testing/debugging only)
88
+ if 0 and margin:
89
+ for line in text.split("\n"):
90
+ assert not line or line.startswith(margin), f"line = {line!r}, margin = {margin!r}"
91
+
92
+ if margin:
93
+ text = re.sub(r"(?m)^" + margin, "", text)
94
+ return text
95
+
96
+
97
+ def indent(text, prefix, predicate=None):
98
+ """Adds 'prefix' to the beginning of selected lines in 'text'.
99
+
100
+ If 'predicate' is provided, 'prefix' will only be added to the lines
101
+ where 'predicate(line)' is True. If 'predicate' is not provided,
102
+ it will default to adding 'prefix' to all non-empty lines that do not
103
+ consist solely of whitespace characters.
104
+ """
105
+ if predicate is None:
106
+ # str.splitlines(True) doesn't produce empty string.
107
+ # ''.splitlines(True) => []
108
+ # 'foo\n'.splitlines(True) => ['foo\n']
109
+ # So we can use just `not s.isspace()` here.
110
+ def predicate(s):
111
+ return not s.isspace()
112
+
113
+ prefixed_lines = []
114
+ for line in text.splitlines(True):
115
+ if predicate(line):
116
+ prefixed_lines.append(prefix)
117
+ prefixed_lines.append(line)
118
+
119
+ return "".join(prefixed_lines)
120
+
121
+
122
+ def create_default_prompt(
123
+ tools: Sequence[StructuredTool],
124
+ base_prompt: str | None = None,
125
+ ):
126
+ system_prompt = uneditable_prompt.strip() + (
127
+ "\n\nIn addition to the Python Standard Library, you can use the following external functions:\n"
128
+ )
129
+ tools_context = {}
130
+ for tool in tools:
131
+ # Create a safe function name
132
+ safe_name = make_safe_function_name(tool.name)
133
+ # Use coroutine if it exists, otherwise use func
134
+ if hasattr(tool, "coroutine") and tool.coroutine is not None:
135
+ tool_callable = tool.coroutine
136
+ signature = inspect.signature(tool_callable)
137
+ is_async = True
138
+ elif hasattr(tool, "func") and tool.func is not None:
139
+ tool_callable = tool.func
140
+ signature = inspect.signature(tool_callable)
141
+ is_async = False
142
+ else:
143
+ raise ValueError(f"Tool {tool.name} does not have a callable coroutine or function.")
144
+ tools_context[safe_name] = tool_callable
145
+ system_prompt += f'''
146
+ {"async " if is_async else ""}def {safe_name}{str(signature)}:
147
+ """
148
+ {indent(dedent(tool.description), " ")}
149
+ """
150
+ ...
151
+ '''
152
+ system_prompt += f"\n\nThe current time is {datetime.now().strftime('%H:%M:%S')}"
153
+ if base_prompt and base_prompt.strip():
154
+ system_prompt += f"Your goal is to perform the following task:\n\n{base_prompt}"
155
+
156
+ return system_prompt, tools_context
@@ -0,0 +1,90 @@
1
+ import contextlib
2
+ import inspect
3
+ import io
4
+ import queue
5
+ import re
6
+ import socket
7
+ import threading
8
+ import types
9
+ from typing import Any
10
+
11
+ from langchain_core.tools import tool
12
+
13
+ from universal_mcp.agents.codeact0.utils import derive_context
14
+
15
+
16
+ def eval_unsafe(
17
+ code: str, _locals: dict[str, Any], add_context: dict[str, Any]
18
+ ) -> tuple[str, dict[str, Any], dict[str, Any]]:
19
+ # print(_locals)
20
+ EXCLUDE_TYPES = (
21
+ types.ModuleType, # modules
22
+ type(re.match("", "")),
23
+ type(threading.Lock()), # instead of threading.Lock
24
+ type(threading.RLock()), # reentrant lock
25
+ threading.Event, # events
26
+ threading.Condition, # condition vars
27
+ threading.Semaphore, # semaphores
28
+ queue.Queue, # thread-safe queues
29
+ socket.socket, # network sockets
30
+ io.IOBase, # file handles (and StringIO/BytesIO)
31
+ )
32
+ try:
33
+ with contextlib.redirect_stdout(io.StringIO()) as f:
34
+ # Execute the code in the provided locals context
35
+ # Using exec to allow dynamic code execution
36
+ # This is a simplified version; in production, consider security implications
37
+ exec(code, _locals, _locals)
38
+ result = f.getvalue()
39
+ if not result:
40
+ result = "<code ran, no output printed to stdout>"
41
+ except Exception as e:
42
+ result = f"Error during execution: {repr(e)}"
43
+
44
+ # Return all variables in locals except __builtins__ and unpicklable objects (including tools)
45
+ all_vars = {}
46
+ for key, value in _locals.items():
47
+ if key == "__builtins__":
48
+ continue
49
+
50
+ # Skip coroutines, async generators, and coroutine functions
51
+ if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
52
+ continue
53
+ if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
54
+ continue
55
+
56
+ # Skip "obviously unpicklable" types
57
+ if isinstance(value, EXCLUDE_TYPES):
58
+ continue
59
+
60
+ # Keep if it's not a callable OR if it has no __name__ attribute
61
+ if not callable(value) or not hasattr(value, "__name__"):
62
+ all_vars[key] = value
63
+
64
+ new_add_context = derive_context(code, add_context)
65
+ return result, all_vars, new_add_context
66
+
67
+
68
+ @tool(parse_docstring=True)
69
+ def execute_ipython_cell(snippet: str) -> str:
70
+ """
71
+ Executes Python code in an IPython notebook cell:
72
+ * The output generated by the notebook cell is returned by this tool
73
+ * State is persistent across executions and discussions with the user
74
+ * The input code may reference variables created in previous executions
75
+
76
+ Args:
77
+ snippet: The Python code to execute.
78
+
79
+ Returns:
80
+ String containing the execution output or error message.
81
+
82
+ Raises:
83
+ ValueError: If snippet is empty.
84
+ """
85
+ # Validate required parameters
86
+ if not snippet or not snippet.strip():
87
+ raise ValueError("Parameter 'snippet' is required and cannot be empty or whitespace")
88
+
89
+ # Your actual execution logic would go here
90
+ return f"Successfully executed {len(snippet)} characters of Python code"
@@ -0,0 +1,12 @@
1
+ from typing import Any
2
+
3
+ from langgraph.graph import MessagesState
4
+
5
+
6
+ class CodeActState(MessagesState):
7
+ """State for CodeAct agent."""
8
+
9
+ context: dict[str, Any]
10
+ """Dictionary containing the execution context with available tools and variables."""
11
+ add_context: dict[str, Any]
12
+ """Dictionary containing the additional context (functions, classes, imports) to be added to the execution context."""
@@ -0,0 +1,4 @@
1
+ base_prompt: 'Find and extract unsubscribe links from all emails in my inbox from the last 7 days. List all unsubscribe links found with the email subject and sender.'
2
+ tools:
3
+ - google_mail__list_messages
4
+ - google_mail__get_message_details
@@ -0,0 +1,10 @@
1
+ base_prompt: 'Process rows 2-5 from the Google Sheet (ID: 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8). For each unprocessed row, extract Reddit post links, fetch post details and comments, analyze content relevance to AgentR/Wingmen products, classify into tiers 1-4, generate appropriate response drafts, and update the sheet with all findings.'
2
+ tools:
3
+ - google_sheet__add_table
4
+ - google_sheet__append_values
5
+ - google_sheet__update_values
6
+ - reddit__get_post_comments_details
7
+ - google_mail__list_messages
8
+ - google_sheet__format_cells
9
+ - google_sheet__get_spreadsheet_metadata
10
+ - google_sheet__batch_get_values_by_range
@@ -0,0 +1,13 @@
1
+ base_prompt: 'Fetch all open issues from the GitHub repository "microsoft/vscode" and add them to a new Google Sheet. Then create corresponding tasks in ClickUp for each issue with descriptions, tags, and "In Progress" status. Delete processed rows from the sheet after creating ClickUp tasks.'
2
+ tools:
3
+ - google_sheet__get_values
4
+ - clickup__tasks_create_new_task
5
+ - clickup__spaces_get_details
6
+ - clickup__lists_get_list_details
7
+ - clickup__tasks_get_list_tasks
8
+ - google_sheet__delete_dimensions
9
+ - google_sheet__update_values
10
+ - google_sheet__get_spreadsheet_metadata
11
+ - google_sheet__batch_get_values_by_range
12
+ - github__list_issues
13
+ - github__update_issue