universal-mcp-agents 0.1.14__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +1 -1
- universal_mcp/agents/base.py +2 -1
- universal_mcp/agents/bigtool/__main__.py +4 -3
- universal_mcp/agents/bigtool/agent.py +1 -0
- universal_mcp/agents/bigtool/graph.py +7 -4
- universal_mcp/agents/bigtool/tools.py +4 -5
- universal_mcp/agents/builder/__main__.py +49 -23
- universal_mcp/agents/builder/builder.py +101 -102
- universal_mcp/agents/builder/helper.py +4 -6
- universal_mcp/agents/builder/prompts.py +92 -39
- universal_mcp/agents/builder/state.py +1 -1
- universal_mcp/agents/codeact0/__init__.py +2 -1
- universal_mcp/agents/codeact0/agent.py +12 -5
- universal_mcp/agents/codeact0/langgraph_agent.py +11 -14
- universal_mcp/agents/codeact0/llm_tool.py +1 -2
- universal_mcp/agents/codeact0/playbook_agent.py +353 -0
- universal_mcp/agents/codeact0/prompts.py +113 -39
- universal_mcp/agents/codeact0/sandbox.py +43 -32
- universal_mcp/agents/codeact0/state.py +27 -3
- universal_mcp/agents/codeact0/tools.py +180 -0
- universal_mcp/agents/codeact0/utils.py +53 -18
- universal_mcp/agents/shared/__main__.py +3 -2
- universal_mcp/agents/shared/prompts.py +1 -1
- universal_mcp/agents/shared/tool_node.py +17 -12
- universal_mcp/agents/utils.py +18 -12
- {universal_mcp_agents-0.1.14.dist-info → universal_mcp_agents-0.1.15.dist-info}/METADATA +3 -3
- universal_mcp_agents-0.1.15.dist-info/RECORD +50 -0
- universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +0 -4
- universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +0 -10
- universal_mcp/agents/codeact0/usecases/11-github.yaml +0 -14
- universal_mcp/agents/codeact0/usecases/2-reddit.yaml +0 -27
- universal_mcp/agents/codeact0/usecases/2.1-instructions.md +0 -81
- universal_mcp/agents/codeact0/usecases/2.2-instructions.md +0 -71
- universal_mcp/agents/codeact0/usecases/3-earnings.yaml +0 -4
- universal_mcp/agents/codeact0/usecases/4-maps.yaml +0 -41
- universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +0 -8
- universal_mcp/agents/codeact0/usecases/6-contract.yaml +0 -6
- universal_mcp/agents/codeact0/usecases/7-overnight.yaml +0 -14
- universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +0 -25
- universal_mcp/agents/codeact0/usecases/9-learning.yaml +0 -9
- universal_mcp/agents/planner/__init__.py +0 -51
- universal_mcp/agents/planner/__main__.py +0 -28
- universal_mcp/agents/planner/graph.py +0 -85
- universal_mcp/agents/planner/prompts.py +0 -14
- universal_mcp/agents/planner/state.py +0 -11
- universal_mcp_agents-0.1.14.dist-info/RECORD +0 -66
- {universal_mcp_agents-0.1.14.dist-info → universal_mcp_agents-0.1.15.dist-info}/WHEEL +0 -0
|
@@ -1,9 +1,13 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import importlib
|
|
1
3
|
import re
|
|
2
4
|
from collections.abc import Sequence
|
|
3
5
|
from typing import Any
|
|
4
6
|
|
|
5
7
|
from langchain_core.messages import BaseMessage
|
|
6
8
|
|
|
9
|
+
MAX_CHARS = 300
|
|
10
|
+
|
|
7
11
|
|
|
8
12
|
def light_copy(data):
|
|
9
13
|
"""
|
|
@@ -13,13 +17,13 @@ def light_copy(data):
|
|
|
13
17
|
data: Either a dictionary with string keys, or a sequence of such dictionaries
|
|
14
18
|
|
|
15
19
|
Returns:
|
|
16
|
-
A deep copy where all string values are truncated to
|
|
20
|
+
A deep copy where all string values are truncated to MAX_CHARS characters
|
|
17
21
|
"""
|
|
18
22
|
|
|
19
23
|
def truncate_string(value):
|
|
20
|
-
"""Truncate string to
|
|
21
|
-
if isinstance(value, str) and len(value) >
|
|
22
|
-
return value[:
|
|
24
|
+
"""Truncate string to MAX_CHARS chars, preserve other types"""
|
|
25
|
+
if isinstance(value, str) and len(value) > MAX_CHARS:
|
|
26
|
+
return value[:MAX_CHARS] + "..."
|
|
23
27
|
return value
|
|
24
28
|
|
|
25
29
|
def copy_dict(d):
|
|
@@ -72,6 +76,7 @@ def make_safe_function_name(name: str) -> str:
|
|
|
72
76
|
safe_name = "unnamed_tool"
|
|
73
77
|
return safe_name
|
|
74
78
|
|
|
79
|
+
|
|
75
80
|
def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
76
81
|
"""
|
|
77
82
|
Derive context from code by extracting classes, functions, and import statements.
|
|
@@ -83,8 +88,6 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
|
83
88
|
Returns:
|
|
84
89
|
Updated context dictionary with extracted entities
|
|
85
90
|
"""
|
|
86
|
-
import ast
|
|
87
|
-
import re
|
|
88
91
|
|
|
89
92
|
# Initialize context keys if they don't exist
|
|
90
93
|
if "imports" not in context:
|
|
@@ -135,7 +138,7 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
|
135
138
|
|
|
136
139
|
if class_def not in context["classes"]:
|
|
137
140
|
context["classes"].append(class_def)
|
|
138
|
-
|
|
141
|
+
|
|
139
142
|
# Extract function definitions (including async)
|
|
140
143
|
for node in ast.walk(tree):
|
|
141
144
|
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
|
@@ -148,7 +151,6 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
|
148
151
|
if func_def not in context["functions"]:
|
|
149
152
|
context["functions"].append(func_def)
|
|
150
153
|
|
|
151
|
-
|
|
152
154
|
except SyntaxError:
|
|
153
155
|
# If the code has syntax errors, try a simpler regex-based approach
|
|
154
156
|
|
|
@@ -164,8 +166,8 @@ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
|
164
166
|
if "from" in pattern:
|
|
165
167
|
module = match.group(1)
|
|
166
168
|
imports = match.group(2).split(",")
|
|
167
|
-
for
|
|
168
|
-
imp =
|
|
169
|
+
for import_name in imports:
|
|
170
|
+
imp = import_name.strip()
|
|
169
171
|
if " as " in imp:
|
|
170
172
|
name, alias = imp.split(" as ")
|
|
171
173
|
import_stmt = f"from {module} import {name.strip()} as {alias.strip()}"
|
|
@@ -234,8 +236,6 @@ def inject_context(
|
|
|
234
236
|
namespace = inject_context(context, existing_ns)
|
|
235
237
|
# namespace will contain: {'math': <math module>, 'data': [1, 2, 3], 'pandas': <module>, 'pd': <module>, 'numpy': <module>, 'np': <module>, 'MyClass': <class>, 'MC': <class>, 'my_function': <function>, ...}
|
|
236
238
|
"""
|
|
237
|
-
import importlib
|
|
238
|
-
from typing import Any
|
|
239
239
|
|
|
240
240
|
# Start with existing namespace or create new one
|
|
241
241
|
namespace: dict[str, Any] = existing_namespace.copy() if existing_namespace is not None else {}
|
|
@@ -248,7 +248,6 @@ def inject_context(
|
|
|
248
248
|
exec(import_statement, namespace)
|
|
249
249
|
except Exception as e:
|
|
250
250
|
# If execution fails, try to extract module name and create placeholder
|
|
251
|
-
import re
|
|
252
251
|
|
|
253
252
|
# Handle different import patterns
|
|
254
253
|
import_match = re.search(r"import\s+(\w+)(?:\s+as\s+(\w+))?", import_statement)
|
|
@@ -279,7 +278,6 @@ def inject_context(
|
|
|
279
278
|
exec(class_definition, namespace)
|
|
280
279
|
except Exception:
|
|
281
280
|
# If execution fails, try to extract class name and create placeholder
|
|
282
|
-
import re
|
|
283
281
|
|
|
284
282
|
class_match = re.search(r"class\s+(\w+)", class_definition)
|
|
285
283
|
if class_match:
|
|
@@ -288,14 +286,14 @@ def inject_context(
|
|
|
288
286
|
# Create a placeholder class
|
|
289
287
|
class PlaceholderClass:
|
|
290
288
|
def __init__(self, *args, **kwargs):
|
|
291
|
-
raise NotImplementedError(
|
|
289
|
+
raise NotImplementedError("Class '{class_name}' failed to load")
|
|
292
290
|
|
|
293
291
|
namespace[class_name] = PlaceholderClass
|
|
294
292
|
else:
|
|
295
293
|
# If we can't extract class name, create a generic placeholder
|
|
296
294
|
class GenericPlaceholderClass:
|
|
297
295
|
def __init__(self, *args, **kwargs):
|
|
298
|
-
raise NotImplementedError(
|
|
296
|
+
raise NotImplementedError("Class definition failed to load")
|
|
299
297
|
|
|
300
298
|
namespace[f"class_{len(namespace)}"] = GenericPlaceholderClass
|
|
301
299
|
|
|
@@ -313,17 +311,20 @@ def inject_context(
|
|
|
313
311
|
is_async = bool(func_match.group(1))
|
|
314
312
|
|
|
315
313
|
if is_async:
|
|
314
|
+
|
|
316
315
|
async def placeholder_func(*args, **kwargs):
|
|
317
|
-
raise NotImplementedError(f"Async function '{func_name}' failed to load
|
|
316
|
+
raise NotImplementedError(f"Async function '{func_name}' failed to load")
|
|
318
317
|
else:
|
|
318
|
+
|
|
319
319
|
def placeholder_func(*args, **kwargs):
|
|
320
|
-
raise NotImplementedError(f"Function '{func_name}' failed to load
|
|
320
|
+
raise NotImplementedError(f"Function '{func_name}' failed to load")
|
|
321
321
|
|
|
322
322
|
placeholder_func.__name__ = func_name
|
|
323
323
|
namespace[func_name] = placeholder_func
|
|
324
324
|
|
|
325
325
|
return namespace
|
|
326
326
|
|
|
327
|
+
|
|
327
328
|
def schema_to_signature(schema: dict, func_name="my_function") -> str:
|
|
328
329
|
type_map = {
|
|
329
330
|
"integer": "int",
|
|
@@ -351,3 +352,37 @@ def schema_to_signature(schema: dict, func_name="my_function") -> str:
|
|
|
351
352
|
# join into signature
|
|
352
353
|
param_str = ",\n ".join(params)
|
|
353
354
|
return f"def {func_name}(\n {param_str},\n):"
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def smart_truncate(
|
|
358
|
+
output: str, max_chars_full: int = 2000, max_lines_headtail: int = 20, summary_threshold: int = 10000
|
|
359
|
+
) -> str:
|
|
360
|
+
"""
|
|
361
|
+
Truncates or summarizes output intelligently to avoid filling the context too fast.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
output (str): The string output from code execution.
|
|
365
|
+
max_chars_full (int): Max characters to keep full output.
|
|
366
|
+
max_lines_headtail (int): Number of lines to keep from head and tail for medium outputs.
|
|
367
|
+
summary_threshold (int): If truncated output exceeds this, hard-truncate.
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
str: Truncated or summarized output.
|
|
371
|
+
"""
|
|
372
|
+
if len(output) <= max_chars_full:
|
|
373
|
+
return output # Small output, include fully
|
|
374
|
+
|
|
375
|
+
lines = output.splitlines()
|
|
376
|
+
if len(lines) <= 2 * max_lines_headtail:
|
|
377
|
+
return output # Medium output, include fully
|
|
378
|
+
|
|
379
|
+
# Medium-large output: take head + tail
|
|
380
|
+
head = "\n".join(lines[:max_lines_headtail])
|
|
381
|
+
tail = "\n".join(lines[-max_lines_headtail:])
|
|
382
|
+
truncated = f"{head}\n... [truncated {len(lines) - 2 * max_lines_headtail} lines] ...\n{tail}"
|
|
383
|
+
|
|
384
|
+
# If still too big, cut to summary threshold
|
|
385
|
+
if len(truncated) > summary_threshold:
|
|
386
|
+
truncated = truncated[:summary_threshold] + "\n... [output truncated to fit context] ..."
|
|
387
|
+
|
|
388
|
+
return truncated
|
|
@@ -2,9 +2,10 @@ import asyncio
|
|
|
2
2
|
|
|
3
3
|
from rich import print
|
|
4
4
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
+
from universal_mcp.logger import setup_logger
|
|
6
|
+
|
|
5
7
|
from universal_mcp.agents.llm import load_chat_model
|
|
6
8
|
from universal_mcp.agents.shared.tool_node import build_tool_node_graph
|
|
7
|
-
from universal_mcp.logger import setup_logger
|
|
8
9
|
|
|
9
10
|
|
|
10
11
|
async def main():
|
|
@@ -40,4 +41,4 @@ async def main():
|
|
|
40
41
|
|
|
41
42
|
|
|
42
43
|
if __name__ == "__main__":
|
|
43
|
-
asyncio.run(main())
|
|
44
|
+
asyncio.run(main())
|
|
@@ -113,33 +113,35 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
113
113
|
"""
|
|
114
114
|
A retry node that performs a general tool search without app filters.
|
|
115
115
|
"""
|
|
116
|
-
|
|
116
|
+
state["original_task"]
|
|
117
117
|
queries = state["queries"]
|
|
118
118
|
retry_count = state.get("retry_count", 0)
|
|
119
119
|
|
|
120
120
|
if retry_count >= MAX_RETRIES:
|
|
121
121
|
logger.error("Max retries reached. Failing the planning process.")
|
|
122
122
|
return Command(
|
|
123
|
-
update={
|
|
123
|
+
update={
|
|
124
|
+
"messages": [AIMessage(content="I could not find any relevant tools after extensive searching.")]
|
|
125
|
+
},
|
|
124
126
|
goto="handle_failure",
|
|
125
127
|
)
|
|
126
128
|
|
|
127
129
|
logger.info(f"--- RETRY {retry_count + 1}/{MAX_RETRIES} ---")
|
|
128
130
|
logger.info("Performing a general tool search without app filters.")
|
|
129
131
|
|
|
130
|
-
general_search_tasks = [
|
|
131
|
-
registry.search_tools(query, distance_threshold=0.85) for query in queries
|
|
132
|
-
]
|
|
132
|
+
general_search_tasks = [registry.search_tools(query, distance_threshold=0.85) for query in queries]
|
|
133
133
|
tool_results = await asyncio.gather(*general_search_tasks)
|
|
134
|
-
|
|
135
|
-
unique_tools = {tool[
|
|
134
|
+
|
|
135
|
+
unique_tools = {tool["id"]: tool for tool_list in tool_results for tool in tool_list}
|
|
136
136
|
candidate_tools = list(unique_tools.values())
|
|
137
137
|
|
|
138
138
|
if not candidate_tools:
|
|
139
139
|
logger.error("General search (retry) also failed to find any tools.")
|
|
140
140
|
return Command(
|
|
141
141
|
update={
|
|
142
|
-
"messages": [
|
|
142
|
+
"messages": [
|
|
143
|
+
AIMessage(content="I could not find any tools for your request, even with a broader search.")
|
|
144
|
+
],
|
|
143
145
|
"retry_count": retry_count + 1,
|
|
144
146
|
},
|
|
145
147
|
goto="handle_failure",
|
|
@@ -167,14 +169,17 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
167
169
|
if retry_count >= MAX_RETRIES:
|
|
168
170
|
logger.error("LLM did not select any tools, even after a retry. Failing.")
|
|
169
171
|
return Command(
|
|
170
|
-
update={
|
|
172
|
+
update={
|
|
173
|
+
"messages": [AIMessage(content="I found potential tools, but could not create a final plan.")]
|
|
174
|
+
},
|
|
171
175
|
goto="handle_failure",
|
|
172
176
|
)
|
|
173
177
|
else:
|
|
174
|
-
logger.warning(
|
|
178
|
+
logger.warning(
|
|
179
|
+
"LLM did not select any tools from the current candidate list. Triggering general search."
|
|
180
|
+
)
|
|
175
181
|
return Command(goto="general_search_and_select")
|
|
176
182
|
|
|
177
|
-
|
|
178
183
|
logger.success(f"Selected {len(selected_tool_ids)} tools for the final plan: {selected_tool_ids}")
|
|
179
184
|
|
|
180
185
|
final_plan = defaultdict(list)
|
|
@@ -203,4 +208,4 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
203
208
|
|
|
204
209
|
workflow.set_entry_point("search_for_tools")
|
|
205
210
|
|
|
206
|
-
return workflow.compile()
|
|
211
|
+
return workflow.compile()
|
universal_mcp/agents/utils.py
CHANGED
|
@@ -1,16 +1,19 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from contextlib import contextmanager
|
|
3
|
+
from http import HTTPStatus
|
|
3
4
|
|
|
5
|
+
import httpx
|
|
6
|
+
import requests
|
|
4
7
|
from langchain_core.messages.base import BaseMessage
|
|
5
8
|
from loguru import logger
|
|
9
|
+
from pydantic import ValidationError
|
|
10
|
+
from requests import JSONDecodeError
|
|
6
11
|
from rich.console import Console
|
|
7
12
|
from rich.live import Live
|
|
8
13
|
from rich.markdown import Markdown
|
|
9
14
|
from rich.panel import Panel
|
|
10
15
|
from rich.prompt import Prompt
|
|
11
16
|
from rich.table import Table
|
|
12
|
-
from pydantic import ValidationError
|
|
13
|
-
from requests import JSONDecodeError
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
class RichCLI:
|
|
@@ -148,17 +151,14 @@ def get_message_text(message: BaseMessage):
|
|
|
148
151
|
|
|
149
152
|
|
|
150
153
|
def filter_retry_on(exc: Exception) -> bool:
|
|
151
|
-
import httpx
|
|
152
|
-
import requests
|
|
153
|
-
|
|
154
154
|
# Transient local/network issues and parsing hiccups
|
|
155
155
|
if isinstance(
|
|
156
156
|
exc,
|
|
157
157
|
(
|
|
158
|
-
TimeoutError,
|
|
159
|
-
ConnectionError,
|
|
160
|
-
JSONDecodeError,
|
|
161
|
-
ValidationError,
|
|
158
|
+
TimeoutError,
|
|
159
|
+
ConnectionError,
|
|
160
|
+
JSONDecodeError,
|
|
161
|
+
ValidationError,
|
|
162
162
|
),
|
|
163
163
|
):
|
|
164
164
|
return True
|
|
@@ -180,12 +180,18 @@ def filter_retry_on(exc: Exception) -> bool:
|
|
|
180
180
|
# HTTP status based retries: 408 (timeout), 429 (rate limit), and 5xx
|
|
181
181
|
if isinstance(exc, httpx.HTTPStatusError):
|
|
182
182
|
status = exc.response.status_code
|
|
183
|
-
return
|
|
183
|
+
return (
|
|
184
|
+
status in {408, 429}
|
|
185
|
+
or HTTPStatus.INTERNAL_SERVER_ERROR.value <= status <= HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED.value
|
|
186
|
+
)
|
|
184
187
|
if isinstance(exc, requests.HTTPError):
|
|
185
188
|
if exc.response is None:
|
|
186
189
|
return True
|
|
187
190
|
status = exc.response.status_code
|
|
188
|
-
return
|
|
191
|
+
return (
|
|
192
|
+
status in {408, 429}
|
|
193
|
+
or HTTPStatus.INTERNAL_SERVER_ERROR.value <= status <= HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED.value
|
|
194
|
+
)
|
|
189
195
|
|
|
190
196
|
if isinstance(
|
|
191
197
|
exc,
|
|
@@ -207,4 +213,4 @@ def filter_retry_on(exc: Exception) -> bool:
|
|
|
207
213
|
return False
|
|
208
214
|
|
|
209
215
|
# Default: do not retry unknown exceptions
|
|
210
|
-
return False
|
|
216
|
+
return False
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.15
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -12,8 +12,8 @@ Requires-Dist: langchain-google-genai>=2.1.10
|
|
|
12
12
|
Requires-Dist: langchain-openai>=0.3.32
|
|
13
13
|
Requires-Dist: langgraph>=0.6.6
|
|
14
14
|
Requires-Dist: typer>=0.17.4
|
|
15
|
-
Requires-Dist: universal-mcp-applications>=0.1.
|
|
16
|
-
Requires-Dist: universal-mcp>=0.1.
|
|
15
|
+
Requires-Dist: universal-mcp-applications>=0.1.24
|
|
16
|
+
Requires-Dist: universal-mcp>=0.1.24rc25
|
|
17
17
|
Provides-Extra: dev
|
|
18
18
|
Requires-Dist: pre-commit; extra == 'dev'
|
|
19
19
|
Requires-Dist: ruff; extra == 'dev'
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
universal_mcp/agents/__init__.py,sha256=5aE4zVgem6ehzfRrt5QqE6gLi7949vySZAn_uFuaU7Q,1252
|
|
2
|
+
universal_mcp/agents/base.py,sha256=GmagwWFJdPpp_yLeAfsrr4fu-zzxh7CgIlBJbTgFgWM,7072
|
|
3
|
+
universal_mcp/agents/cli.py,sha256=AG9e4iSX3GazT537573YrYT1wSaZYOr42rrYQ7xP3YA,1016
|
|
4
|
+
universal_mcp/agents/hil.py,sha256=_5PCK6q0goGm8qylJq44aSp2MadP-yCPvhOJYKqWLMo,3808
|
|
5
|
+
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
6
|
+
universal_mcp/agents/react.py,sha256=8XQvJ0HLVgc-K0qn9Ml48WGcgUGuIKtL67HatlT6Da0,3334
|
|
7
|
+
universal_mcp/agents/simple.py,sha256=NSATg5TWzsRNS7V3LFiDG28WSOCIwCdcC1g7NRwg2nM,2095
|
|
8
|
+
universal_mcp/agents/utils.py,sha256=lGeDam3Efcrxv6dve31LQonBtwkSpNmBuoNXap_TqaQ,7128
|
|
9
|
+
universal_mcp/agents/bigtool/__init__.py,sha256=mZG8dsaCVyKlm82otxtiTA225GIFLUCUUYPEIPF24uw,2299
|
|
10
|
+
universal_mcp/agents/bigtool/__main__.py,sha256=0i-fbd2yQ90qa8n2nM3luqoJVN9Reh5HZXR5oK7SAck,445
|
|
11
|
+
universal_mcp/agents/bigtool/agent.py,sha256=mtCDNN8WjE2hjJjooDqusmbferKBHeJMHrhXUPUWaVc,252
|
|
12
|
+
universal_mcp/agents/bigtool/context.py,sha256=ny7gd-vvVpUOYAeQbAEUT0A6Vm6Nn2qGywxTzPBzYFg,929
|
|
13
|
+
universal_mcp/agents/bigtool/graph.py,sha256=2Sy0dtevTWeT3hJDq4BDerZFvk_zJqx15j8VH2XLq8Y,5848
|
|
14
|
+
universal_mcp/agents/bigtool/prompts.py,sha256=Joi5mCzZX63aM_6eBrMOKuNRHjTkceVIibSsGBGqhYE,2041
|
|
15
|
+
universal_mcp/agents/bigtool/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
16
|
+
universal_mcp/agents/bigtool/tools.py,sha256=-u80ta6xEaqzEMSzDVe3QZiTZm3YlgLkBD8WTghzClw,6315
|
|
17
|
+
universal_mcp/agents/builder/__main__.py,sha256=VJDJOr-dJJerT53ibh5LVqIsMJ0m0sG2UlzFB784pKw,11680
|
|
18
|
+
universal_mcp/agents/builder/builder.py,sha256=mh3MZpMVB1FE1DWzvMW9NnfiaF145VGn8cJzKSYUlzY,8587
|
|
19
|
+
universal_mcp/agents/builder/helper.py,sha256=8igR1b3Gy_N2u3WxHYKIWzvw7F5BMnfpO2IU74v6vsw,2680
|
|
20
|
+
universal_mcp/agents/builder/prompts.py,sha256=8Xs6uzTUHguDRngVMLak3lkXFkk2VV_uQXaDllzP5cI,4670
|
|
21
|
+
universal_mcp/agents/builder/state.py,sha256=7DeWllxfN-yD6cd9wJ3KIgjO8TctkJvVjAbZT8W_zqk,922
|
|
22
|
+
universal_mcp/agents/codeact/__init__.py,sha256=rLE8gvOo5H4YSr71DRq76b3RV3uuotxuAy_VnBVaVwk,60
|
|
23
|
+
universal_mcp/agents/codeact/__main__.py,sha256=W2cHXRwH1dZG3ETIkMwUqA_d62K3IctHP-FDZWDjxdw,1067
|
|
24
|
+
universal_mcp/agents/codeact/agent.py,sha256=sKZWokTHcuL68Y6SNyaaHe6_XkWxaIq36TrNmPJfQto,9762
|
|
25
|
+
universal_mcp/agents/codeact/models.py,sha256=2fdAcF5bxWDpljjEwDEdPBflTMShSPwwncHrphRjsYg,222
|
|
26
|
+
universal_mcp/agents/codeact/prompts.py,sha256=EMI-imnd0Ps0Bd2FOvSqgiicvvtFFu0MF9s93PiC_3k,4493
|
|
27
|
+
universal_mcp/agents/codeact/sandbox.py,sha256=NjN6ISj8psFtHf8V0w24ChJdUMUWkq7OrlbHdzm4wBc,2299
|
|
28
|
+
universal_mcp/agents/codeact/state.py,sha256=WTPfpxDlGRnlr5tZuXMg_KU7GS7TZbnrIKslOvZLbQI,565
|
|
29
|
+
universal_mcp/agents/codeact/utils.py,sha256=JUbT_HYGS_D1BzmzoVpORIe7SGur1KgJguTZ_1tZ4JY,1918
|
|
30
|
+
universal_mcp/agents/codeact0/__init__.py,sha256=ebKkpgg-0UnsvDtagEJ2tMer1VsfhmEE5KJcFzUk9fU,133
|
|
31
|
+
universal_mcp/agents/codeact0/__main__.py,sha256=V2wLWW9ym3rtiSvPEs-N0Mki7G5dYHzV5dAsAoF-ygQ,1148
|
|
32
|
+
universal_mcp/agents/codeact0/agent.py,sha256=9BInAQr3csES-XHSscmeJlYJ3-wQUHPvLOf-6wFILUU,6695
|
|
33
|
+
universal_mcp/agents/codeact0/config.py,sha256=H-1woj_nhSDwf15F63WYn723y4qlRefXzGxuH81uYF0,2215
|
|
34
|
+
universal_mcp/agents/codeact0/langgraph_agent.py,sha256=ehjMV_Z1118pCFWB_Sa5H7XnUp0udsbUHjfjXjhIQM8,435
|
|
35
|
+
universal_mcp/agents/codeact0/llm_tool.py,sha256=I7QIlgZZbzBdxHuNUYODA28Z7xqWgYz5v5qWSWqB0rE,13781
|
|
36
|
+
universal_mcp/agents/codeact0/playbook_agent.py,sha256=oj8zP-c8rObzjAlS-lkP-pl3xleIr0fCJ_ENtn-C_OM,17435
|
|
37
|
+
universal_mcp/agents/codeact0/prompts.py,sha256=j8HxA3Rp-EZsms9qMBcRmFrUjeySrG1IWjqrNFXZZn8,9457
|
|
38
|
+
universal_mcp/agents/codeact0/sandbox.py,sha256=zMgHrWnQYkSkJb2MzfXvT3euCc4hvqzBE_EbX2_iLxA,3142
|
|
39
|
+
universal_mcp/agents/codeact0/state.py,sha256=Qcr1whST3J8v7w0opnKSfOG9u5gRtxAzPs2NFhaAhJE,1199
|
|
40
|
+
universal_mcp/agents/codeact0/tools.py,sha256=emfBLA3eChQ5B1wirOWf5RWHMy3OIRDQYnN5h5OPqFk,7401
|
|
41
|
+
universal_mcp/agents/codeact0/utils.py,sha256=s1SfVrC1_UePxYSIL9zt-cG0xhwzFPuViyLl2Xj-45c,15943
|
|
42
|
+
universal_mcp/agents/shared/__main__.py,sha256=XxH5qGDpgFWfq7fwQfgKULXGiUgeTp_YKfcxftuVZq8,1452
|
|
43
|
+
universal_mcp/agents/shared/prompts.py,sha256=yjP3zbbuKi87qCj21qwTTicz8TqtkKgnyGSeEjMu3ho,3761
|
|
44
|
+
universal_mcp/agents/shared/tool_node.py,sha256=DC9F-Ri28Pam0u3sXWNODVgmj9PtAEUb5qP1qOoGgfs,9169
|
|
45
|
+
universal_mcp/applications/llm/__init__.py,sha256=xnpxq4Wl_pevvwtSUtEwcty8_d61ywO1V2YnEXyCREY,46
|
|
46
|
+
universal_mcp/applications/llm/app.py,sha256=iNLU6z2LRZc01GfSKvV0vNzT1LhKAjq_UrSJEmjthjw,6032
|
|
47
|
+
universal_mcp/applications/ui/app.py,sha256=c7OkZsO2fRtndgAzAQbKu-1xXRuRp9Kjgml57YD2NR4,9459
|
|
48
|
+
universal_mcp_agents-0.1.15.dist-info/METADATA,sha256=8FNfgWbbFHVStTqIwzHZzohMEvvD27IuPvIUWRjea80,878
|
|
49
|
+
universal_mcp_agents-0.1.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
50
|
+
universal_mcp_agents-0.1.15.dist-info/RECORD,,
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
base_prompt: 'Process rows 2-5 from the Google Sheet (ID: 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8). For each unprocessed row, extract Reddit post links, fetch post details and comments, analyze content relevance to AgentR/Wingmen products, classify into tiers 1-4, generate appropriate response drafts, and update the sheet with all findings.'
|
|
2
|
-
tools:
|
|
3
|
-
- google_sheet__add_table
|
|
4
|
-
- google_sheet__append_values
|
|
5
|
-
- google_sheet__update_values
|
|
6
|
-
- reddit__get_post_comments_details
|
|
7
|
-
- google_mail__list_messages
|
|
8
|
-
- google_sheet__format_cells
|
|
9
|
-
- google_sheet__get_spreadsheet_metadata
|
|
10
|
-
- google_sheet__batch_get_values_by_range
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
base_prompt: 'Fetch all open issues from the GitHub repository "microsoft/vscode" and add them to a new Google Sheet. Then create corresponding tasks in ClickUp for each issue with descriptions, tags, and "In Progress" status. Delete processed rows from the sheet after creating ClickUp tasks.'
|
|
2
|
-
tools:
|
|
3
|
-
- google_sheet__get_values
|
|
4
|
-
- google_sheet__create_spreadsheet
|
|
5
|
-
- google_sheet__write_values_to_sheet
|
|
6
|
-
- google_sheet__delete_dimensions
|
|
7
|
-
- google_sheet__append_values
|
|
8
|
-
- google_sheet__update_values
|
|
9
|
-
- clickup__tasks_create_new_task
|
|
10
|
-
- clickup__spaces_get_details
|
|
11
|
-
- clickup__lists_get_list_details
|
|
12
|
-
- clickup__tasks_get_list_tasks
|
|
13
|
-
- github__search_issues
|
|
14
|
-
- github__update_issue
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
base_prompt: 'Goal: Process unprocessed rows in a fixed Google Sheet, scrape Reddit
|
|
2
|
-
for context, filter posts, and generate short, natural comments linking to AgentR/Wingmen
|
|
3
|
-
when relevant. Workflow: 1) Sheet & Row Selection: Fixed Sheet ID 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8,
|
|
4
|
-
tab Posts. Process rows 2-5 (first 4 unprocessed rows) immediately without asking for user input. Only process rows with empty Match Type (Col
|
|
5
|
-
I) and no Tier 1-4 assigned. 2) Reddit Context Fetch: Extract Post Link & ID. Use reddit to fetch post upvotes + top comments (max 5). Ensure
|
|
6
|
-
post/comment is active, visible, and unlocked. 3) Filtration & Fit: Classify content
|
|
7
|
-
(developer, consumer, anecdotal). Apply GTM Filtration to skip irrelevant, negative,
|
|
8
|
-
political, or low-quality posts. Identify direct or adjacent fit to AgentR (Universal
|
|
9
|
-
MCP Server) or Wingmen. Decide platform + account type: Direct fit/competitor mention
|
|
10
|
-
→ Technical Q = Team account, Non-technical = Burner account. Adjacent fit → Official
|
|
11
|
-
account. Decide reply target (original comment/post or parent post). 4) Comment
|
|
12
|
-
Generation: For Tier 1-3, craft a 2-3 line, context-aware, conversational reply.
|
|
13
|
-
Mention AgentR/Wingmen organically, avoid sales tone or forced CTAs. Use light imperfections
|
|
14
|
-
for human tone. Skip negative sentiment entirely. One comment per post. 5) Populate
|
|
15
|
-
Output: Fill Upvote Count, Match Type, Account Type, Response Draft, Respond on.
|
|
16
|
-
Return updated Google Sheet link. Tier Definitions: Tier 1 = Deep MCP, AI agent,
|
|
17
|
-
tool integrations, or architecture discussions where infra is highly relevant. Tier
|
|
18
|
-
2 = Specific workflows, automation tooling, or productivity systems where Wingmen
|
|
19
|
-
or MCP Server could be useful. Tier 3 = Broader ecosystem (LangChain/CrewAI/agent
|
|
20
|
-
tooling) where a soft recommendation adds value. Tier 4 = Unclear, generic, sarcastic,
|
|
21
|
-
hostile, or irrelevant mentions — skip. Execute immediately using the fixed Google Sheet ID: 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8, tab "Posts". Process rows(first 4 unprocessed rows) without asking for user input. Only process rows where Match Type (Column I) is empty. For each row, extract the Post Link, fetch Reddit data, apply GTM filtration, generate appropriate responses, and update the sheet. Return the updated Google Sheet link when complete.'
|
|
22
|
-
tools:
|
|
23
|
-
- reddit__get_post_comments_details
|
|
24
|
-
- google_sheet__update_values
|
|
25
|
-
- google_sheet__get_values
|
|
26
|
-
- google_sheet__get_spreadsheet_metadata
|
|
27
|
-
- google_sheet__batch_get_values_by_range
|
|
@@ -1,81 +0,0 @@
|
|
|
1
|
-
# Goal: Reddit Engagement of Wingmen.app
|
|
2
|
-
Process unprocessed rows in a fixed Google Sheet, scrape Reddit for context, filter posts, and generate short, natural comments linked to Wingmen.app when relevant. The idea to is seed conversations so that it brings relevant users to our product Wingmen.app.
|
|
3
|
-
|
|
4
|
-
# Knowledge
|
|
5
|
-
|
|
6
|
-
Wingmen.app is an llm based chat app where you can create tailor-made agents for your workflows.
|
|
7
|
-
It is an MCP Client that has access to a large collection of tools. Some popular tool categories are:
|
|
8
|
-
1) Google Suite: Gmail, Google Sheets, Docs, Calendar, Maps, etc.
|
|
9
|
-
2) Microsoft Suite: Outlook, Excel, Teams, etc.
|
|
10
|
-
3) Other productivity apps: Slack, Clickup, Notion, etc.
|
|
11
|
-
4) Enterprise data providers: Apollo.io, Semrush, Ahref, etc.
|
|
12
|
-
5) Web & search utilities: Exa, Perplexity, Domain Availability, Firecrawl etc.
|
|
13
|
-
6) CRMs: Hubspot, etc.
|
|
14
|
-
7) Developer Apps: Github, Jira, Linear, etc.
|
|
15
|
-
|
|
16
|
-
The coolest USPs of Wingmen are:
|
|
17
|
-
1) You can schedule workflows like everyday at 9am mail me a daily summary of news related to my stock portfolio
|
|
18
|
-
2) Creating workflows is super easy – with a single prompt in natural language (useful for non-technical people)
|
|
19
|
-
3) Can handle complex taks like find all .ai and .com domains available by combining (play, go, run) with (work, flow, automation)
|
|
20
|
-
4) We also have workspaces for enterprise teams. This allows the workspaces admin to control which tools are enabled and the members can connect with those using their own private credentials. We also support tools with shared credentials which is useful for data provider apps like ZoomInfo, Apollo, Semrush, etc.
|
|
21
|
-
|
|
22
|
-
# Workflow
|
|
23
|
-
|
|
24
|
-
## Step 1
|
|
25
|
-
Get all the unprocessed rows from the Google Sheet ID: 1_5Ao61LWZHo5QijHdXdRw0u9aj7JIoHtp4uBIBwi84U
|
|
26
|
-
Each row contains details of a Reddit comment with content and post link.
|
|
27
|
-
|
|
28
|
-
For each unprocessed row follow Step 2 to Step 5 to process it.
|
|
29
|
-
If there are more than 20 unprocessed rows just process 20 rows.
|
|
30
|
-
## Step 2
|
|
31
|
-
Fetch the context of the comment.
|
|
32
|
-
The context will contain details about the parent post and other top comments.
|
|
33
|
-
|
|
34
|
-
## Step 3
|
|
35
|
-
Decide the ideal place (post/comment id) to comment. This should be based on two factors: Relevance and Popularity. Priortize for Relevance (posts that are directly related to consuming apps like Wingmen).
|
|
36
|
-
- High relevance themes:
|
|
37
|
-
- People asking for our USPs.
|
|
38
|
-
- Posts mentioning other MCP Client apps or General chat apps like ChatGPT, Claude Desktop, Langdock, etc.
|
|
39
|
-
- Posts mentioning our competitors like Usedash, Runable.com, or Manus
|
|
40
|
-
- Posts trying to consume MCP Servers.
|
|
41
|
-
- Medium relevance themes:
|
|
42
|
-
- building apps like Wingmen, or building Agents in general.
|
|
43
|
-
- consuming MCP Servers, tool authentication etc. Could be mentioning Composio, Toolhouse, etc.
|
|
44
|
-
- Analysing MCP Protocol in general. Could be a comment around MCP Security, Tool sprawl etc.
|
|
45
|
-
- If you could not find anything relevant then look for
|
|
46
|
-
- posts where people are genuinely asking for help; or
|
|
47
|
-
- posts where we can steer the conversation towards Wingmen.app. We can do it two steps by asking for suggestions for apps which have one of the USPs of Wingmen. And in the next step answering the question with a link to Wingmen.app
|
|
48
|
-
- Otherwise, just pick the most popular comments (highest upvote). It is generally the parent post.
|
|
49
|
-
- Note: If the thread is deleted or archived we should skip this row. We should also skip low quality posts.
|
|
50
|
-
|
|
51
|
-
The final output of this step should be:
|
|
52
|
-
- skip: bool
|
|
53
|
-
- comment_id: str (Optional, should be empty when skip is true)
|
|
54
|
-
- reasoning: str
|
|
55
|
-
|
|
56
|
-
## Step 4
|
|
57
|
-
Try to identify the theme using the reasoning of the previous step and the entire conversation. The theme buckets are:
|
|
58
|
-
- Consuming apps/agents (here we can subtly promote Wingmen.app)
|
|
59
|
-
- Engagement Strategy: A happy Wingmen.app user sharing his positive experience in a specific usecase.
|
|
60
|
-
- Building apps/agents (here we can bond with other builders)
|
|
61
|
-
- Engagement Strategy: A developer from Wingmen.app team sharing his insights or just bonding over the development challenges.
|
|
62
|
-
- Altruistic Help (here we are just resolving any genuine pain point which is not relevant to our product)
|
|
63
|
-
- Engagement Strategy: Helping the commentor in purely altruistic way. No need to talk about Wingmen.app.
|
|
64
|
-
- Artifical Nudge (here we will steer the conversation towards Wingmen.app)
|
|
65
|
-
- Engagement Strategy: A typical consumer/developer plants a question so that the official team members can reply to this and highlight the USPs of Wingmen.app. The generated comment_content should be just the question.
|
|
66
|
-
|
|
67
|
-
The final output of this step should be:
|
|
68
|
-
- match_type: str (one of [consumer, builder, altruistic_help, artificial_nudge])
|
|
69
|
-
- comment_content: str
|
|
70
|
-
|
|
71
|
-
**Guidelines for generating comment content:**
|
|
72
|
-
- craft a 2-3 line, context-aware, conversational reply.
|
|
73
|
-
- when mentioning AgentR/Wingmen, avoid sales tone or forced CTAs.
|
|
74
|
-
- Use light imperfections for human tone.
|
|
75
|
-
|
|
76
|
-
## Step 5
|
|
77
|
-
|
|
78
|
-
Fill remaining columns (Skip, Comment ID, Reasoning, Match Type, Comment Content) for each row
|
|
79
|
-
|
|
80
|
-
## Step 6
|
|
81
|
-
After all the rows are processed just return the sheet link again in markdown format.
|
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
# Goal: Reddit Engagement of Wingmen.app
|
|
2
|
-
Given a reddit comment, scrape Reddit for context, filter posts, and generate short, natural comments linked to Wingmen.app when relevant. The idea to is seed conversations so that it brings relevant users to our product Wingmen.app.
|
|
3
|
-
|
|
4
|
-
# Knowledge
|
|
5
|
-
|
|
6
|
-
Wingmen.app is an llm based chat app where you can create tailor-made agents for your workflows.
|
|
7
|
-
It is an MCP Client that has access to a large collection of tools. Some popular tool categories are:
|
|
8
|
-
1) Google Suite: Gmail, Google Sheets, Docs, Calendar, Maps, etc.
|
|
9
|
-
2) Microsoft Suite: Outlook, Excel, Teams, etc.
|
|
10
|
-
3) Other productivity apps: Slack, Clickup, Notion, etc.
|
|
11
|
-
4) Enterprise data providers: Apollo.io, Semrush, Ahref, etc.
|
|
12
|
-
5) Web & search utilities: Exa, Perplexity, Domain Availability, Firecrawl etc.
|
|
13
|
-
6) CRMs: Hubspot, etc.
|
|
14
|
-
7) Developer Apps: Github, Jira, Linear, etc.
|
|
15
|
-
|
|
16
|
-
The coolest USPs of Wingmen are:
|
|
17
|
-
1) You can schedule workflows like everyday at 9am mail me a daily summary of news related to my stock portfolio
|
|
18
|
-
2) Creating workflows is super easy – with a single prompt in natural language (useful for non-technical people)
|
|
19
|
-
3) Can handle complex taks like find all .ai and .com domains available by combining (play, go, run) with (work, flow, automation)
|
|
20
|
-
4) We also have workspaces for enterprise teams. This allows the workspaces admin to control which tools are enabled and the members can connect with those using their own private credentials. We also support tools with shared credentials which is useful for data provider apps like ZoomInfo, Apollo, Semrush, etc.
|
|
21
|
-
|
|
22
|
-
# Workflow
|
|
23
|
-
|
|
24
|
-
## Input
|
|
25
|
-
I will give you a reddit comment id.
|
|
26
|
-
|
|
27
|
-
## Step 1
|
|
28
|
-
Fetch the context of the comment.
|
|
29
|
-
The context will contain details about the parent post and other top comments.
|
|
30
|
-
|
|
31
|
-
## Step 2
|
|
32
|
-
Decide the ideal place (post/comment id) to comment. This should be based on two factors: Relevance and Popularity. Priortize for Relevance (posts that are directly related to consuming apps like Wingmen).
|
|
33
|
-
- High relevance themes:
|
|
34
|
-
- People asking for our USPs.
|
|
35
|
-
- Posts mentioning other MCP Client apps or General chat apps like ChatGPT, Claude Desktop, Langdock, etc.
|
|
36
|
-
- Posts mentioning our competitors like Usedash, Runable.com, or Manus
|
|
37
|
-
- Posts trying to consume MCP Servers.
|
|
38
|
-
- Medium relevance themes:
|
|
39
|
-
- building apps like Wingmen, or building Agents in general.
|
|
40
|
-
- consuming MCP Servers, tool authentication etc. Could be mentioning Composio, Toolhouse, etc.
|
|
41
|
-
- Analysing MCP Protocol in general. Could be a comment around MCP Security, Tool sprawl etc.
|
|
42
|
-
- If you could not find anything relevant then look for
|
|
43
|
-
- posts where people are genuinely asking for help; or
|
|
44
|
-
- posts where we can steer the conversation towards Wingmen.app. We can do it two steps by asking for suggestions for apps which have one of the USPs of Wingmen. And in the next step answering the question with a link to Wingmen.app
|
|
45
|
-
- Otherwise, just pick the most popular comments (highest upvote). It is generally the parent post.
|
|
46
|
-
- Note: If the thread is deleted or archived we should skip this row. We should also skip low quality posts.
|
|
47
|
-
|
|
48
|
-
The final output of this step should be:
|
|
49
|
-
- skip: bool
|
|
50
|
-
- comment_id: str (Optional, should be empty when skip is true)
|
|
51
|
-
- reasoning: str
|
|
52
|
-
|
|
53
|
-
## Final step
|
|
54
|
-
Try to identify the theme using the reasoning of the previous step and the entire conversation. The theme buckets are:
|
|
55
|
-
- Consuming apps/agents (here we can subtly promote Wingmen.app)
|
|
56
|
-
- Engagement Strategy: A happy Wingmen.app user sharing his positive experience in a specific usecase.
|
|
57
|
-
- Building apps/agents (here we can bond with other builders)
|
|
58
|
-
- Engagement Strategy: A developer from Wingmen.app team sharing his insights or just bonding over the development challenges.
|
|
59
|
-
- Pain Resolution (here we are just trying to be helpful)
|
|
60
|
-
- Engagement Strategy: Helping the commentor in purely altruistic way.
|
|
61
|
-
- Artifical Nudge (here we will steer the conversation towards Wingmen.app)
|
|
62
|
-
- Engagement Strategy: A typical consumer/developer plants a question so that the official team members can comment and highlight the USPs of Wingmen.app.
|
|
63
|
-
|
|
64
|
-
The final output of the workflow should be:
|
|
65
|
-
- match_type: str (one of [consumer, builder, pain_resolver, artificial_nudge])
|
|
66
|
-
- comment_content: str
|
|
67
|
-
|
|
68
|
-
**Guidelines for generating comment content:**
|
|
69
|
-
- craft a 2-3 line, context-aware, conversational reply.
|
|
70
|
-
- when mentioning AgentR/Wingmen, avoid sales tone or forced CTAs.
|
|
71
|
-
- Use light imperfections for human tone.
|
|
@@ -1,4 +0,0 @@
|
|
|
1
|
-
base_prompt: 'Generate a financial flash report for Apple Inc. Research their latest earnings data including revenue, net income, EPS, and year-over-year changes. Create a formatted report with highlights, upcoming events, and summary. Present the report in chat and email it to adit@agentr.dev.'
|
|
2
|
-
tools:
|
|
3
|
-
- exa__answer
|
|
4
|
-
- google_mail__send_email
|