universal-mcp-agents 0.1.13__tar.gz → 0.1.15__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. universal_mcp_agents-0.1.15/.github/workflows/evals.yml +61 -0
  2. universal_mcp_agents-0.1.15/.github/workflows/lint.yml +31 -0
  3. universal_mcp_agents-0.1.15/.github/workflows/release-please.yml +67 -0
  4. universal_mcp_agents-0.1.15/.github/workflows/tests.yml +39 -0
  5. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/.pre-commit-config.yaml +3 -7
  6. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/PKG-INFO +3 -3
  7. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/bump_and_release.sh +1 -1
  8. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/pyproject.toml +10 -5
  9. universal_mcp_agents-0.1.15/src/evals/datasets/tasks.jsonl +32 -0
  10. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/evaluators.py +4 -54
  11. universal_mcp_agents-0.1.15/src/evals/prompts.py +66 -0
  12. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/run.py +3 -2
  13. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/tests/test_agents.py +32 -31
  14. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/__init__.py +1 -1
  15. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/base.py +3 -0
  16. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/__init__.py +1 -1
  17. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/__main__.py +4 -3
  18. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/agent.py +3 -2
  19. universal_mcp_agents-0.1.15/src/universal_mcp/agents/bigtool/graph.py +152 -0
  20. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/prompts.py +2 -2
  21. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/tools.py +17 -4
  22. universal_mcp_agents-0.1.15/src/universal_mcp/agents/builder/__main__.py +226 -0
  23. universal_mcp_agents-0.1.15/src/universal_mcp/agents/builder/builder.py +213 -0
  24. universal_mcp_agents-0.1.15/src/universal_mcp/agents/builder/helper.py +71 -0
  25. universal_mcp_agents-0.1.15/src/universal_mcp/agents/builder/prompts.py +107 -0
  26. universal_mcp_agents-0.1.15/src/universal_mcp/agents/codeact0/__init__.py +4 -0
  27. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact0/agent.py +13 -5
  28. universal_mcp_agents-0.1.15/src/universal_mcp/agents/codeact0/langgraph_agent.py +14 -0
  29. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact0/llm_tool.py +1 -2
  30. universal_mcp_agents-0.1.15/src/universal_mcp/agents/codeact0/playbook_agent.py +353 -0
  31. universal_mcp_agents-0.1.15/src/universal_mcp/agents/codeact0/prompts.py +241 -0
  32. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact0/sandbox.py +43 -32
  33. universal_mcp_agents-0.1.15/src/universal_mcp/agents/codeact0/state.py +36 -0
  34. universal_mcp_agents-0.1.15/src/universal_mcp/agents/codeact0/tools.py +180 -0
  35. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact0/utils.py +89 -75
  36. universal_mcp_agents-0.1.15/src/universal_mcp/agents/shared/__main__.py +44 -0
  37. universal_mcp_agents-0.1.15/src/universal_mcp/agents/shared/prompts.py +83 -0
  38. universal_mcp_agents-0.1.15/src/universal_mcp/agents/shared/tool_node.py +211 -0
  39. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/utils.py +71 -0
  40. universal_mcp_agents-0.1.15/test.py +25 -0
  41. universal_mcp_agents-0.1.15/todo.md +157 -0
  42. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/uv.lock +1728 -1089
  43. universal_mcp_agents-0.1.13/src/evals/datasets/tasks.jsonl +0 -21
  44. universal_mcp_agents-0.1.13/src/universal_mcp/agents/bigtool/graph.py +0 -115
  45. universal_mcp_agents-0.1.13/src/universal_mcp/agents/builder/__main__.py +0 -125
  46. universal_mcp_agents-0.1.13/src/universal_mcp/agents/builder/builder.py +0 -225
  47. universal_mcp_agents-0.1.13/src/universal_mcp/agents/builder/prompts.py +0 -173
  48. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/__init__.py +0 -3
  49. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/prompts.py +0 -156
  50. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/state.py +0 -12
  51. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +0 -4
  52. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +0 -10
  53. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/11-github.yaml +0 -13
  54. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/2-reddit.yaml +0 -27
  55. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/2.1-instructions.md +0 -81
  56. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/2.2-instructions.md +0 -71
  57. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/3-earnings.yaml +0 -4
  58. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/4-maps.yaml +0 -41
  59. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +0 -8
  60. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/6-contract.yaml +0 -6
  61. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/7-overnight.yaml +0 -14
  62. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +0 -25
  63. universal_mcp_agents-0.1.13/src/universal_mcp/agents/codeact0/usecases/9-learning.yaml +0 -9
  64. universal_mcp_agents-0.1.13/src/universal_mcp/agents/planner/__init__.py +0 -51
  65. universal_mcp_agents-0.1.13/src/universal_mcp/agents/planner/__main__.py +0 -28
  66. universal_mcp_agents-0.1.13/src/universal_mcp/agents/planner/graph.py +0 -85
  67. universal_mcp_agents-0.1.13/src/universal_mcp/agents/planner/prompts.py +0 -14
  68. universal_mcp_agents-0.1.13/src/universal_mcp/agents/planner/state.py +0 -11
  69. universal_mcp_agents-0.1.13/src/universal_mcp/agents/shared/prompts.py +0 -132
  70. universal_mcp_agents-0.1.13/src/universal_mcp/agents/shared/tool_node.py +0 -227
  71. universal_mcp_agents-0.1.13/test.py +0 -49
  72. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/.gitignore +0 -0
  73. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/GEMINI.md +0 -0
  74. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/PROMPTS.md +0 -0
  75. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/README.md +0 -0
  76. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/__init__.py +0 -0
  77. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/dataset.py +0 -0
  78. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/datasets/codeact.jsonl +0 -0
  79. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/datasets/exact.jsonl +0 -0
  80. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/evals/utils.py +0 -0
  81. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/context.py +0 -0
  82. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/bigtool/state.py +0 -0
  83. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/builder/state.py +0 -0
  84. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/cli.py +0 -0
  85. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/__init__.py +0 -0
  86. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/__main__.py +0 -0
  87. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/agent.py +0 -0
  88. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/models.py +0 -0
  89. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/prompts.py +0 -0
  90. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/sandbox.py +0 -0
  91. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/state.py +0 -0
  92. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact/utils.py +0 -0
  93. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact0/__main__.py +0 -0
  94. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/codeact0/config.py +0 -0
  95. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/hil.py +0 -0
  96. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/llm.py +0 -0
  97. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/react.py +0 -0
  98. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/agents/simple.py +0 -0
  99. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/applications/llm/__init__.py +0 -0
  100. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/applications/llm/app.py +0 -0
  101. {universal_mcp_agents-0.1.13 → universal_mcp_agents-0.1.15}/src/universal_mcp/applications/ui/app.py +2 -2
@@ -0,0 +1,61 @@
1
+ name: evals
2
+
3
+ on:
4
+ push:
5
+ branches: [ main ]
6
+ pull_request:
7
+ branches: [ main ]
8
+
9
+ jobs:
10
+ eval:
11
+ runs-on: ubuntu-latest
12
+ strategy:
13
+ fail-fast: false
14
+ matrix:
15
+ include:
16
+ - { agent: bigtool, dataset: src/evals/datasets/tasks.jsonl, evaluator: llm_as_judge, difficulty: easy }
17
+ - { agent: codeact-repl, dataset: src/evals/datasets/codeact.jsonl, evaluator: codeact, difficulty: none }
18
+ steps:
19
+ - name: Check out repository
20
+ uses: actions/checkout@v4
21
+
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v5
24
+ with:
25
+ python-version: '3.11'
26
+
27
+ - name: Install uv
28
+ uses: astral-sh/setup-uv@v3
29
+
30
+ - name: Install dependencies
31
+ run: |
32
+ uv sync --all-extras
33
+
34
+ - name: Run eval
35
+ env:
36
+ PYTHONUNBUFFERED: '1'
37
+ run: |
38
+ mkdir -p dist/evals/${{ matrix.agent }}/${{ matrix.evaluator }}
39
+ if [ "${{ matrix.difficulty }}" = "none" ]; then
40
+ uv run python -u src/evals/run.py \
41
+ ${{ matrix.agent }} \
42
+ ${{ matrix.dataset }} \
43
+ ${{ matrix.evaluator }} \
44
+ 2>&1 | tee dist/evals/${{ matrix.agent }}/${{ matrix.evaluator }}/run-${{ matrix.agent }}-${{ matrix.evaluator }}-$(basename ${{ matrix.dataset }} .jsonl)-none.log
45
+ else
46
+ uv run python -u src/evals/run.py \
47
+ ${{ matrix.agent }} \
48
+ ${{ matrix.dataset }} \
49
+ ${{ matrix.evaluator }} \
50
+ --difficulty ${{ matrix.difficulty }} \
51
+ 2>&1 | tee dist/evals/${{ matrix.agent }}/${{ matrix.evaluator }}/run-${{ matrix.agent }}-${{ matrix.evaluator }}-$(basename ${{ matrix.dataset }} .jsonl)-${{ matrix.difficulty }}.log
52
+ fi
53
+
54
+ - name: Upload eval artifacts
55
+ uses: actions/upload-artifact@v4
56
+ with:
57
+ name: eval-artifacts-${{ matrix.agent }}-${{ matrix.evaluator }}
58
+ path: dist/evals/${{ matrix.agent }}/${{ matrix.evaluator }}
59
+ if-no-files-found: warn
60
+
61
+
@@ -0,0 +1,31 @@
1
+ name: lint
2
+
3
+ on:
4
+ push:
5
+ branches: [ main ]
6
+ pull_request:
7
+ branches: [ main ]
8
+
9
+ jobs:
10
+ lint:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - name: Check out repository
14
+ uses: actions/checkout@v4
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: '3.11'
20
+
21
+ - name: Install uv
22
+ uses: astral-sh/setup-uv@v3
23
+
24
+ - name: Install dependencies
25
+ run: |
26
+ uv sync --all-extras
27
+
28
+ - name: Run ruff
29
+ run: |
30
+ uv run ruff check .
31
+ uv run ruff format --check .
@@ -0,0 +1,67 @@
1
+ name: release-please
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ permissions:
9
+ contents: write
10
+ pull-requests: write
11
+
12
+ jobs:
13
+ release:
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: googleapis/release-please-action@v4
17
+ id: release
18
+ with:
19
+ release-type: python
20
+ package-name: agents
21
+ default-branch: main
22
+
23
+ - name: Checkout repository
24
+ if: ${{ steps.release.outputs.release_created }}
25
+ uses: actions/checkout@v4
26
+ with:
27
+ token: ${{ secrets.GITHUB_TOKEN }}
28
+ fetch-depth: 0
29
+
30
+ - name: Add Authors and Update Release
31
+ if: ${{ steps.release.outputs.release_created }}
32
+ env:
33
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
34
+ RELEASE_TAG: ${{ steps.release.outputs.tag_name }}
35
+ run: |
36
+ set -e
37
+
38
+ TEMP_CHANGELOG="CHANGELOG.md.new"
39
+ touch "$TEMP_CHANGELOG"
40
+
41
+ if [ -f "CHANGELOG.md" ]; then
42
+ while IFS= read -r line; do
43
+ if [[ "$line" =~ \*\ (.*)\ \(\[([a-f0-9]{7,40})\]\(.* ]]; then
44
+ commit_hash="${BASH_REMATCH[2]}"
45
+ github_user=$(gh api "repos/${{ github.repository }}/commits/${commit_hash}" | jq -r '.author.login // "unknown"')
46
+ echo "${line} by @${github_user}" >> "$TEMP_CHANGELOG"
47
+ else
48
+ echo "$line" >> "$TEMP_CHANGELOG"
49
+ fi
50
+ done < "CHANGELOG.md"
51
+ fi
52
+
53
+ mv "$TEMP_CHANGELOG" "CHANGELOG.md"
54
+
55
+ RELEASE_BODY=$(awk "/^## \\[?${RELEASE_TAG#v}/{flag=1;next} /^## / && flag{exit} flag" CHANGELOG.md)
56
+ if [ -z "$RELEASE_BODY" ]; then
57
+ RELEASE_BODY="Release $RELEASE_TAG"
58
+ fi
59
+ gh release edit "$RELEASE_TAG" --notes "$RELEASE_BODY"
60
+
61
+ git config user.name "github-actions[bot]"
62
+ git config user.email "github-actions[bot]@users.noreply.github.com"
63
+ git add CHANGELOG.md || true
64
+ git commit -m "update CHANGELOG.md with author info [skip ci] [skip release]" || true
65
+ git push || true
66
+
67
+
@@ -0,0 +1,39 @@
1
+ name: tests
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - name: Check out repository
14
+ uses: actions/checkout@v4
15
+
16
+ - name: Set up Python
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.11"
20
+
21
+ - name: Install uv
22
+ uses: astral-sh/setup-uv@v3
23
+
24
+ - name: Install dependencies
25
+ run: |
26
+ uv sync --all-extras
27
+
28
+ - name: Run tests
29
+ run: |
30
+ uv run pytest -s -q --maxfail=1 --disable-warnings --junitxml=junit.xml
31
+ env:
32
+ PYTHONUNBUFFERED: "1"
33
+
34
+ - name: Store artifacts
35
+ uses: actions/upload-artifact@v4
36
+ with:
37
+ name: test-artifacts
38
+ path: junit.xml
39
+ if-no-files-found: ignore
@@ -1,20 +1,16 @@
1
1
  fail_fast: false
2
2
 
3
3
  repos:
4
- - repo: https://github.com/pre-commit/mirrors-prettier
5
- rev: v3.1.0
6
- hooks:
7
- - id: prettier
8
- types_or: [yaml, json5]
9
4
 
10
5
  - repo: https://github.com/astral-sh/ruff-pre-commit
11
- rev: v0.11.13
6
+ rev: v0.13.3
12
7
  hooks:
13
8
  # Run the linter.
14
9
  - id: ruff-check
15
- args: [--fix]
10
+ args: ["--fix", "src", "--config", "pyproject.toml"]
16
11
  # Run the formatter.
17
12
  - id: ruff-format
13
+ args: ["src", "--config", "pyproject.toml"]
18
14
 
19
15
  # - repo: https://github.com/pre-commit/mirrors-mypy
20
16
  # rev: v1.8.0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp-agents
3
- Version: 0.1.13
3
+ Version: 0.1.15
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/universal-mcp/applications
6
6
  Project-URL: Repository, https://github.com/universal-mcp/applications
@@ -12,8 +12,8 @@ Requires-Dist: langchain-google-genai>=2.1.10
12
12
  Requires-Dist: langchain-openai>=0.3.32
13
13
  Requires-Dist: langgraph>=0.6.6
14
14
  Requires-Dist: typer>=0.17.4
15
- Requires-Dist: universal-mcp-applications>=0.1.14
16
- Requires-Dist: universal-mcp>=0.1.24rc21
15
+ Requires-Dist: universal-mcp-applications>=0.1.24
16
+ Requires-Dist: universal-mcp>=0.1.24rc25
17
17
  Provides-Extra: dev
18
18
  Requires-Dist: pre-commit; extra == 'dev'
19
19
  Requires-Dist: ruff; extra == 'dev'
@@ -9,7 +9,7 @@ uv sync --all-extras
9
9
 
10
10
  # Run tests with pytest
11
11
  echo "Running tests with pytest..."
12
- # uv run pytest
12
+ uv run pytest
13
13
 
14
14
  echo "Tests passed!"
15
15
 
@@ -6,7 +6,7 @@ build-backend = "hatchling.build"
6
6
 
7
7
  [project]
8
8
  name = "universal-mcp-agents"
9
- version = "0.1.13"
9
+ version = "0.1.15"
10
10
  description = "Add your description here"
11
11
  readme = "README.md"
12
12
  authors = [
@@ -19,8 +19,8 @@ dependencies = [
19
19
  "langchain-openai>=0.3.32",
20
20
  "langgraph>=0.6.6",
21
21
  "typer>=0.17.4",
22
- "universal-mcp>=0.1.24rc21",
23
- "universal-mcp-applications>=0.1.14",
22
+ "universal-mcp>=0.1.24rc25",
23
+ "universal-mcp-applications>=0.1.24",
24
24
  ]
25
25
 
26
26
  [project.license]
@@ -51,7 +51,7 @@ packages = [
51
51
 
52
52
  [tool.coverage.run]
53
53
  source = [
54
- "src",
54
+ "src",
55
55
  ]
56
56
  branch = true
57
57
 
@@ -68,6 +68,12 @@ lint.ignore = [
68
68
  "E501", # Ignore line length errors
69
69
  ]
70
70
 
71
+ [tool.ruff.lint.pylint]
72
+ max-args = 10
73
+ max-statements = 85
74
+ max-returns = 10
75
+ max-branches = 37
76
+
71
77
 
72
78
  [tool.ruff.format]
73
79
  quote-style = "double"
@@ -83,4 +89,3 @@ asyncio_default_fixture_loop_scope = "module"
83
89
  dev = [
84
90
  "ruff>=0.13.0",
85
91
  ]
86
-
@@ -0,0 +1,32 @@
1
+ {"user_input": "Send an email to manoj@agentr.dev with the subject 'Hello' and body 'This is a test of the Gmail agent.' from my Gmail account", "difficulty": 1, "required_tools": {"google_mail": ["send_email"]}}
2
+ {"user_input": "Show me events from today's Google Calendar.", "difficulty": 1, "required_tools": {"google_calendar": ["get_upcoming_events"]}}
3
+ {"user_input": "Fetch my last inbox mail from Microsoft Outlook", "difficulty": 1, "required_tools": {"outlook": ["list_user_messages"]}}
4
+ {"user_input": "Tell me how many meetings I have tomorrow and when they start from my Google Calendar.", "difficulty": 1, "required_tools": {"google_calendar": ["get_upcoming_events", "list_events"]}}
5
+ {"user_input": "Find the best restaurants in Goa using exa web search", "difficulty": 2, "required_tools": {"exa": ["search_with_filters"]}}
6
+ {"user_input": "List the unread emails from the last 24 hours from my Gmail, sorted by sender.", "difficulty": 2, "required_tools": {"google_mail": ["list_messages"]}}
7
+ {"user_input": "Create a meeting with aditakarsh@example.com on the topic of the latest trends in AI at 8PM today using Google Calendar.", "difficulty": 2, "required_tools": {"google_calendar": ["create_event", "create_event_from_text"]}}
8
+ {"user_input": "Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 1 day", "difficulty": 3, "required_tools": {"google_mail": ["list_messages"]}}
9
+ {"user_input": "Create a weekly expense report from my credit card transactions and categorize spending by type (food, transport, entertainment, etc.) in a Google Sheet", "difficulty": 3, "required_tools": {"google_sheet" : ["create_spreadsheet", "add_table"]}}
10
+ {"user_input": "search reddit for posts on elon musk and then post a meme on him on linkedin", "difficulty": 3, "required_tools": {"reddit" : ["search_reddit"], "linkedin": ["create_post"]}}
11
+ {"user_input": "Search for best cafes near IIT bombay using exa and make a google sheet out of it", "difficulty": 3, "required_tools": {"exa": ["search_with_filters"], "google_sheet": ["create_spreadsheet", "write_values_to_sheet", "add_table"]}}
12
+ {"user_input": "Create a Google Doc summarizing the last 5 merged pull requests in my GitHub repo- universal-mcp/universal-mcp, including links and commit highlights.", "difficulty": 4, "required_tools": {"github": ["list_pull_requests", "list_recent_commits"], "google_docs": ["create_document", "insert_text", "apply_text_style"]}}
13
+ {"user_input": "Summarize the key insights from all marketing emails received yesterday from my Gmail and add a section in a Google Doc with action points.", "difficulty": 4, "required_tools": {"google_mail": ["list_messages"], "google_docs": ["create_document", "insert_text", "apply_text_style"]}}
14
+ {"user_input": "Give me a report on the earnings of Oklo using web search, and projections for the company revenue, stock price", "difficulty": 4, "required_tools": {"tavily": ["search_and_summarize"]}}
15
+ {"user_input": "Track the top posts in r/startups over the past 7 days using Reddit and create a trend report on what's being discussed most (e.g., hiring, funding, MVPs) in a Google Doc.", "difficulty": 4, "required_tools": {"reddit": ["get_subreddit_posts", "get_subreddit_top_posts"], "google_docs": ["create_document", "insert_text", "apply_text_style"]}}
16
+ {"user_input": "Generate a comparison table of SaaS tools for project management using web search, including pricing, features, and user ratings in a Google Sheet", "difficulty": 4, "required_tools": {"tavily": ["search_and_summarize"], "google_sheet": ["create_spreadsheet", "add_table"]}}
17
+ {"user_input": "What are the topics of my meetings today from Google Calendar and who are the attendees? Give a 1-line context for each attendee using LinkedIn or web search.", "difficulty": 4, "required_tools": {"google_calendar": ["get_upcoming_events", "list_events"], "scraper": ["linkedin_retrieve_profile"]}}
18
+ {"user_input": "Draft personalized LinkedIn outreach messages for 10 potential collaborators in the fintech space based on their recent posts using LinkedIn data in a Google Sheet", "difficulty": 5, "required_tools": {"scraper": ["linkedin_retrieve_profile", "linkedin_list_profile_posts"], "google_sheet": ["create_spreadsheet", "write_values_to_sheet"]}}
19
+ {"user_input": "Create a content calendar for next month with trending AI/ML topics using web search and optimal posting times based on my audience analytics in Google Sheets", "difficulty": 5, "required_tools": {"tavily": ["search_and_summarize"], "google_sheet": ["get_values", "batch_get_values_by_range", "get_spreadsheet_metadata" , "create_spreadsheet", "add_sheet", "add_table"]}}
20
+ {"user_input": "Research the top 10 Y Combinator startups from the latest batch using web search and create a report on their industries and funding status in Google Docs", "difficulty": 5, "required_tools": {"tavily": ["search_and_summarize"], "google_docs": ["create_document", "insert_text", "insert_table"]}}
21
+ {"user_input": "Find and summarize the key takeaways from the latest earnings calls of FAANG companies using web search and create a report in Google Docs", "difficulty": 5, "required_tools": {"tavily": ["search_and_summarize"], "google_docs": ["create_document", "insert_text", "insert_table"]}}
22
+ {"user_input": "Find and extract unsubscribe links from all emails in my inbox from the last 7 days. List all unsubscribe links found with the email subject and sender.", "difficulty": 3, "required_tools": {"google_mail": ["list_messages", "get_message_details"]}}
23
+ {"user_input": "Process rows 2-5 from the Google Sheet (ID: 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8). For each unprocessed row, extract Reddit post links, fetch post details and comments, analyze content relevance to AgentR/Wingmen products, classify into tiers 1-4, generate appropriate response drafts, and update the sheet with all findings.", "difficulty": 5, "required_tools": {"google_sheet": ["add_table", "append_values", "update_values", "format_cells", "get_spreadsheet_metadata", "batch_get_values_by_range"], "reddit": ["get_post_comments_details"], "google_mail": ["list_messages"]}}
24
+ {"user_input": "Fetch all open issues from the GitHub repository \"microsoft/vscode\" and add them to a new Google Sheet. Then create corresponding tasks in ClickUp for each issue with descriptions, tags, and \"In Progress\" status. Delete processed rows from the sheet after creating ClickUp tasks.", "difficulty": 5, "required_tools": {"google_sheet": ["get_values", "create_spreadsheet", "write_values_to_sheet", "delete_dimensions", "append_values", "update_values"], "clickup": ["tasks_create_new_task", "spaces_get_details", "lists_get_list_details", "tasks_get_list_tasks"], "github": ["search_issues", "update_issue"]}}
25
+ {"user_input": "Goal: Process unprocessed rows in a fixed Google Sheet, scrape Reddit for context, filter posts, and generate short, natural comments linking to AgentR/Wingmen when relevant. Workflow: 1) Sheet & Row Selection: Fixed Sheet ID 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8, tab Posts. Process rows 2-5 (first 4 unprocessed rows) immediately without asking for user input. Only process rows with empty Match Type (Col I) and no Tier 1-4 assigned. 2) Reddit Context Fetch: Extract Post Link & ID. Use reddit to fetch post upvotes + top comments (max 5). Ensure post/comment is active, visible, and unlocked. 3) Filtration & Fit: Classify content (developer, consumer, anecdotal). Apply GTM Filtration to skip irrelevant, negative, political, or low-quality posts. Identify direct or adjacent fit to AgentR (Universal MCP Server) or Wingmen. Decide platform + account type: Direct fit/competitor mention → Technical Q = Team account, Non-technical = Burner account. Adjacent fit → Official account. Decide reply target (original comment/post or parent post). 4) Comment Generation: For Tier 1-3, craft a 2-3 line, context-aware, conversational reply. Mention AgentR/Wingmen organically, avoid sales tone or forced CTAs. Use light imperfections for human tone. Skip negative sentiment entirely. One comment per post. 5) Populate Output: Fill Upvote Count, Match Type, Account Type, Response Draft, Respond on. Return updated Google Sheet link. Tier Definitions: Tier 1 = Deep MCP, AI agent, tool integrations, or architecture discussions where infra is highly relevant. Tier 2 = Specific workflows, automation tooling, or productivity systems where Wingmen or MCP Server could be useful. Tier 3 = Broader ecosystem (LangChain/CrewAI/agent tooling) where a soft recommendation adds value. Tier 4 = Unclear, generic, sarcastic, hostile, or irrelevant mentions — skip. Execute immediately using the fixed Google Sheet ID: 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8, tab \"Posts\". Process rows(first 4 unprocessed rows) without asking for user input. Only process rows where Match Type (Column I) is empty. For each row, extract the Post Link, fetch Reddit data, apply GTM filtration, generate appropriate responses, and update the sheet. Return the updated Google Sheet link when complete.", "difficulty": 5, "required_tools": {"reddit": ["get_post_comments_details"], "google_sheet": ["update_values", "get_values", "get_spreadsheet_metadata", "batch_get_values_by_range"]}}
26
+ {"user_input": "Generate a financial flash report for Apple Inc. Research their latest earnings data including revenue, net income, EPS, and year-over-year changes. Create a formatted report with highlights, upcoming events, and summary. Present the report in chat and email it to adit@agentr.dev.", "difficulty": 4, "required_tools": {"exa": ["answer"], "google_mail": ["send_email"]}}
27
+ {"user_input": "Objective: Find businesses from Google Maps for a given category & location, store them in a Google Sheet, then process unprocessed leads to scrape emails and sync with HubSpot CRM. Stage 1 - Lead Discovery Get coordinates of Area + City. Search on Google Maps with category & coordinates. Extract: Name, Google Maps URL, Address, Phone, Website; leave Email & CRM Status blank. Sheet: Name: {Area}, {City} Leads - {Category} - {dd-mmm} If exists → append non-duplicate rows; else create in folder \"Leads from Google Maps\" (ID: 142QBejJX0jAqzDz_NHdwVTkcmagoog__). Add headers: Name | Google Maps URL | Address | Phone | Website | Email | CRM Status. Populate with businesses found. Edge Cases: No results → return message, skip sheet creation. Missing data → leave blank. Stage 2 - Lead Processing & CRM Sync Locate sheet in Google Drive, ensure headers match. Parse category from sheet name. Identify unprocessed rows (CRM Status blank) — by default process the first, or a specified row/range/count. Scrape Website for Email: If website exists → scrape homepage/contact page; fallback to firecrawl_scrape_url. Save found email in sheet. HubSpot Handling: Search contact by email/website/phone. If not found → create with available details, Lead Status = New, add note {Area, City} — {Category} — {Google Maps URL}. If exists → append note; keep other fields unchanged. Save HubSpot Contact URL/ID in sheet. Update CRM Status: Lead Created, Lead Creation Failed, Website not found, Email not found, etc. Edge Cases: No Website → create with phone; mark Website not found. No Email → create; mark Email not found. Email already in sheet → skip row. Execute immediately for \"Cafes\" near \"IIT Bombay\" in \"Mumbai\" without asking for confirmation.", "difficulty": 5, "required_tools": {"serpapi": ["google_maps_search"], "firecrawl": ["scrape_url"], "google_drive": ["get_file_details", "create_folder", "find_folder_id_by_name", "search_files"], "google_sheet": ["update_values", "get_values", "get_spreadsheet_metadata", "batch_get_values_by_range", "create_spreadsheet", "clear_values"], "hubspot": ["search_contacts_post", "batch_read_contacts_post", "get_contacts", "get_contact_by_id", "update_contact_by_id", "batch_update_contacts", "create_contacts_batch", "create_contact"]}}
28
+ {"user_input": "Process emails from the last 24 hours. Fetch primary inbox emails excluding replied threads, classify with LLM as Reply Required, No Reply Needed, or Ambiguous. For Reply Required/Ambiguous, draft human, on-brand replies for user review. Follow greeting, acknowledge, address concern, invite further questions, and friendly sign-off. Provide end summary of drafts, skipped, and ambiguous emails. Execute immediately without asking for confirmation. Do not send any emails. Just provide me a report.", "difficulty": 4, "required_tools": {"google_mail": ["list_messages", "get_message_details"]}}
29
+ {"user_input": "Analyze a contract from my google drive from the perspective of the Service Provider. Use the search to find it, do not ask me any questions, and assume details that I have not provided. Identify potentially unfavorable clauses such as vague terms, one-sided obligations, IP transfer issues, indemnity clauses, termination conditions, and payment problems. Provide a structured analysis with clause numbers, full text, and explanations of concerns.", "difficulty": 4, "required_tools": {"google_drive": ["get_file_details", "search_files"], "google_docs": ["get_document"], "exa": ["answer"]}}
30
+ {"user_input": "Create a summary of overnight updates from 8:00 PM yesterday to 8:00 AM today in IST. Check Gmail for important emails and ClickUp for mentions and assigned tasks. Organize findings into high priority and other items, then provide a comprehensive summary of all overnight activity.", "difficulty": 4, "required_tools": {"google_mail": ["list_messages"], "clickup": ["comments_get_task_comments", "comments_get_list_comments", "comments_get_view_comments", "tasks_get_list_tasks", "tasks_filter_team_tasks", "time_tracking_get_time_entries_within_date_range", "time_tracking_get_time_entry_history", "authorization_get_workspace_list", "spaces_get_details", "lists_get_list_details"]}}
31
+ {"user_input": "Analyze the data in Google Sheet (ID: 1nnnCp3_IWcdHv4UVgXtwYF5wedxbqF4RIeyjN6mCKD8) and create 3-5 relevant charts and visualizations. Add pie charts, bar graphs, and other appropriate visualizations based on the data structure. Embed all charts directly into the sheet and provide the updated sheet link.", "difficulty": 4, "required_tools": {"google_sheet": ["create_spreadsheet", "get_spreadsheet_metadata", "batch_get_values_by_range", "append_dimensions", "insert_dimensions", "delete_sheet", "add_sheet", "delete_dimensions", "add_basic_chart", "add_table", "add_pie_chart", "clear_values", "update_values", "clear_basic_filter", "get_values", "discover_tables", "set_basic_filter", "analyze_table_schema", "copy_sheet_to_spreadsheet", "append_values", "batch_get_values_by_data_filter", "batch_clear_values", "format_cells"]}}
32
+ {"user_input": "Create a 7-day learning plan for Python Programming. Research essential concepts and skills, create a detailed day-by-day plan with topics, goals, resources, and exercises. Compile the plan into a Google Doc and schedule daily emails at 8 AM starting today. Send Day 1 immediately to adit@agentr.dev and provide the Google Doc link.", "difficulty": 5, "required_tools": {"google_docs": ["get_document", "create_document", "insert_text"], "google_mail": ["send_email", "send_draft", "create_draft"], "exa": ["answer"]}}
@@ -7,6 +7,8 @@ from langsmith.evaluation import EvaluationResult, run_evaluator
7
7
  from langsmith.schemas import Example, Run
8
8
  from openevals.llm import create_llm_as_judge
9
9
 
10
+ from evals.prompts import CODEACT_EVALUATOR_PROMPT, CORRECTNESS_PROMPT
11
+
10
12
 
11
13
  @run_evaluator
12
14
  def exact_match_evaluator(run: Run, example: Example | None = None) -> EvaluationResult:
@@ -38,58 +40,6 @@ def exact_match_evaluator(run: Run, example: Example | None = None) -> Evaluatio
38
40
  return EvaluationResult(key="exact_match", score=score, comment=comment)
39
41
 
40
42
 
41
- CORRECTNESS_PROMPT = """You are an expert data labeler evaluating model outputs for correctness. Your task is to assign a score based on the following rubric:
42
-
43
- <Rubric>
44
- A correct answer:
45
- - Provides accurate and complete information
46
- - Contains no factual errors
47
- - Addresses all parts of the question
48
- - Is logically consistent
49
- - Uses precise and accurate terminology
50
-
51
- When scoring, you should penalize:
52
- - Factual errors or inaccuracies
53
- - Incomplete or partial answers
54
- - Misleading or ambiguous statements
55
- - Incorrect terminology
56
- - Logical inconsistencies
57
- - Missing key information
58
-
59
- Ignore the following:
60
- - If the answer is not in the same language as the question.
61
- - use the specifically requested tool, as the tool name can be different
62
- - Do not penalize for incorrect third party data coming from the tool.
63
- </Rubric>
64
-
65
- <Instructions>
66
- - Carefully read the input and output
67
- - Check for factual accuracy and completeness
68
- - Focus on correctness of information rather than style or verbosity
69
- - If the user tool is not authorized, give a partial credit of `0.5`
70
- - Give partial credit if tools and called correctly, but the data is incorrect from tools.
71
- </Instructions>
72
-
73
- <Reminder>
74
- The goal is to evaluate factual correctness and completeness of the response.
75
- </Reminder>
76
-
77
- <input>
78
- {inputs}
79
- </input>
80
-
81
- <output>
82
- {outputs}
83
- </output>
84
-
85
- Use the reference outputs below to help you evaluate the correctness of the response:
86
-
87
- <reference_outputs>
88
- {reference_outputs}
89
- </reference_outputs>
90
- """
91
-
92
-
93
43
  correctness_evaluator = create_llm_as_judge(
94
44
  prompt=CORRECTNESS_PROMPT,
95
45
  feedback_key="correctness",
@@ -103,8 +53,8 @@ trajectory_evaluator = create_trajectory_llm_as_judge(
103
53
  )
104
54
 
105
55
 
106
- codeact_evaluator = create_trajectory_llm_as_judge(
107
- prompt=TRAJECTORY_ACCURACY_PROMPT,
56
+ codeact_evaluator = create_llm_as_judge(
57
+ prompt=CODEACT_EVALUATOR_PROMPT,
108
58
  feedback_key="codeact_accuracy",
109
59
  model="anthropic:claude-4-sonnet-20250514",
110
60
  )
@@ -0,0 +1,66 @@
1
+ CORRECTNESS_PROMPT = """You are an expert data labeler evaluating model outputs for correctness. Your task is to assign a score based on the following rubric:
2
+
3
+ <Rubric>
4
+ A correct answer:
5
+ - Provides accurate and complete information
6
+ - Contains no factual errors
7
+ - Addresses all parts of the question
8
+ - Is logically consistent
9
+ - Uses precise and accurate terminology
10
+
11
+ When scoring, you should penalize:
12
+ - Factual errors or inaccuracies
13
+ - Incomplete or partial answers
14
+ - Misleading or ambiguous statements
15
+ - Incorrect terminology
16
+ - Logical inconsistencies
17
+ - Missing key information
18
+
19
+ Ignore the following:
20
+ - If the answer is not in the same language as the question.
21
+ - use the specifically requested tool, as the tool name can be different
22
+ - Do not penalize for incorrect third party data coming from the tool.
23
+ </Rubric>
24
+
25
+ <Instructions>
26
+ - Carefully read the input and output
27
+ - Check for factual accuracy and completeness
28
+ - Focus on correctness of information rather than style or verbosity
29
+ - If the user tool is not authorized, give a partial credit of `0.5`
30
+ - Give partial credit if tools and called correctly, but the data is incorrect from tools.
31
+ </Instructions>
32
+
33
+ <Reminder>
34
+ The goal is to evaluate factual correctness and completeness of the response.
35
+ </Reminder>
36
+
37
+ <input>
38
+ {inputs}
39
+ </input>
40
+
41
+ <output>
42
+ {outputs}
43
+ </output>
44
+
45
+ Use the reference outputs below to help you evaluate the correctness of the response:
46
+
47
+ <reference_outputs>
48
+ {reference_outputs}
49
+ </reference_outputs>
50
+ """
51
+
52
+ CODEACT_EVALUATOR_PROMPT = """
53
+ You are a code execution evaluator. You will be given the entire run of an agent, starting with a human input task, the intermediate steps taken, and the final output of the agent given to the user. These steps will contain code written by the agent to solve the problem as well as its outputs. Your job is to check ONLY if the code executes correctly.
54
+ Keep in mind that the agent has access to tools like- ai_classify, call_llm, creative_writer, data_extractor. These calls are to be treated as valid if they run without errors.
55
+ These are the only criteria you should evaluate-
56
+
57
+ <Rubric>
58
+ - The code written by the agent in tool calls should be syntactically correct and use existing objects.
59
+ - The code outputs should not have an error or empty/unexpected outputs
60
+ </Rubric>
61
+ If either of the above are not satisfied, you should give 0.
62
+
63
+ <Reminder>
64
+ You must not judge whether the code is helpful to the task or not, only if the code itself is correct or not.
65
+ </Reminder>
66
+ """
@@ -11,6 +11,7 @@ from universal_mcp.agentr.registry import AgentrRegistry
11
11
 
12
12
  from evals.dataset import load_dataset
13
13
  from evals.evaluators import (
14
+ codeact_evaluator,
14
15
  correctness_evaluator,
15
16
  exact_match_evaluator,
16
17
  tool_node_evaluator,
@@ -62,12 +63,12 @@ async def agent_runner(agent_name: str, inputs: dict) -> dict:
62
63
  registry = AgentrRegistry(client=client) if agent_name != "simple" else None
63
64
  common_params = {
64
65
  "instructions": f"You are a helpful assistant. Keep your responses short and concise. Do not provide with any explanation. The current date and time is {current_date_time}",
65
- "model": "anthropic/claude-4-sonnet-20250514",
66
+ "model": "azure/gpt-4.1",
66
67
  "registry": registry,
67
68
  "tools": inputs.get("tools", {}),
68
69
  }
69
70
  agent = get_agent(agent_name)(name=agent_name, **common_params)
70
- result = await agent.invoke(user_input=inputs["user_input"])
71
+ result = await agent.invoke(user_input=inputs["user_input"], thread_id="evals")
71
72
  messages = messages_to_list(result["messages"])
72
73
  return_result = {"output": messages}
73
74
  if "tool_config" in result:
@@ -145,6 +145,7 @@ class MockToolRegistry(ToolRegistry):
145
145
  self,
146
146
  query: str,
147
147
  limit: int = 10,
148
+ distance_threshold: float = 0.7,
148
149
  ) -> list[dict[str, Any]]:
149
150
  """
150
151
  Search for apps by a query.
@@ -167,6 +168,7 @@ class MockToolRegistry(ToolRegistry):
167
168
  query: str,
168
169
  limit: int = 10,
169
170
  app_id: str | None = None,
171
+ distance_threshold: float = 0.8,
170
172
  ) -> list[dict[str, Any]]:
171
173
  """
172
174
  Search for tools by a query.
@@ -175,11 +177,23 @@ class MockToolRegistry(ToolRegistry):
175
177
  brittle keyword search.
176
178
  """
177
179
  if not app_id:
178
- return []
179
-
180
- # Return all tools for the given app, letting the LLM choose.
180
+ # General search
181
+ all_tools = []
182
+ for current_app_id, tools in self._tools.items():
183
+ for tool in tools:
184
+ tool_with_app_id = tool.copy()
185
+ tool_with_app_id["id"] = f"{current_app_id}__{tool['name']}"
186
+ all_tools.append(tool_with_app_id)
187
+ return all_tools[:limit]
188
+
189
+ # App-specific search
181
190
  all_app_tools = self._tools.get(app_id, [])
182
- return all_app_tools[:limit]
191
+ tools_with_app_id = []
192
+ for tool in all_app_tools:
193
+ tool_with_app_id = tool.copy()
194
+ tool_with_app_id["id"] = f"{app_id}__{tool['name']}"
195
+ tools_with_app_id.append(tool_with_app_id)
196
+ return tools_with_app_id[:limit]
183
197
 
184
198
  async def export_tools(
185
199
  self,
@@ -217,17 +231,6 @@ class TestToolFinderGraph:
217
231
  def registry(self):
218
232
  return MockToolRegistry()
219
233
 
220
- def _get_tool_config_from_plan(self, plan: list[dict[str, Any]]) -> dict[str, list[str]]:
221
- """
222
- Helper function to convert a consolidated execution plan to a tool_config dict.
223
- MODIFIED: This now correctly handles the already-consolidated plan from the graph.
224
- """
225
- if not plan:
226
- return {}
227
-
228
- config = {step["app_id"]: step["tool_ids"] for step in plan if step.get("app_id") and step.get("tool_ids")}
229
- return config
230
-
231
234
  @pytest.mark.asyncio
232
235
  async def test_simple_case(self, llm, registry):
233
236
  """Test Case 1: Simple task requiring a single app and tool."""
@@ -237,9 +240,7 @@ class TestToolFinderGraph:
237
240
  {"original_task": task, "messages": [HumanMessage(content=task)], "decomposition_attempts": 0}
238
241
  )
239
242
 
240
- plan = final_state.get("execution_plan")
241
-
242
- tool_config = self._get_tool_config_from_plan(plan)
243
+ tool_config = final_state.get("execution_plan")
243
244
 
244
245
  # FIX: Assert against the correct, hyphenated app ID.
245
246
  assert "google_mail" in tool_config
@@ -254,10 +255,8 @@ class TestToolFinderGraph:
254
255
  {"original_task": task, "messages": [HumanMessage(content=task)], "decomposition_attempts": 0}
255
256
  )
256
257
 
257
- plan = final_state.get("execution_plan")
258
- assert plan, "Execution plan should not be empty"
259
-
260
- tool_config = self._get_tool_config_from_plan(plan)
258
+ tool_config = final_state.get("execution_plan")
259
+ assert tool_config, "Execution plan should not be empty"
261
260
 
262
261
  assert "github" in tool_config
263
262
  assert "create_issue" in tool_config["github"]
@@ -275,7 +274,7 @@ class TestToolFinderGraph:
275
274
  plan = final_state.get("execution_plan")
276
275
  assert not plan
277
276
  last_message = final_state.get("messages", [])[-1].content
278
- assert "unable to create a complete plan" in last_message.lower()
277
+ assert "could not create a final plan" in last_message.lower()
279
278
 
280
279
 
281
280
  @pytest.mark.parametrize(
@@ -312,19 +311,20 @@ class TestAgents:
312
311
  await agent.ainit()
313
312
  # Invoke the agent graph to get the final state
314
313
  final_state = await agent.invoke(
315
- user_input=task,
314
+ user_input={"userInput": task} if agent.name == "Test builder" else task,
316
315
  thread_id=thread_id,
317
316
  )
318
317
 
319
318
  # Extract the content of the last message
320
- final_messages = final_state.get("messages", [])
321
- assert final_messages, "The agent should have produced at least one message."
322
- last_message = final_messages[-1]
319
+ if agent.name != "Test builder":
320
+ final_messages = final_state.get("messages", [])
321
+ assert final_messages, "The agent should have produced at least one message."
322
+ last_message = final_messages[-1]
323
323
 
324
- final_response = last_message.content if hasattr(last_message, "content") else str(last_message)
324
+ final_response = last_message.content if hasattr(last_message, "content") else str(last_message)
325
325
 
326
- assert final_response is not None, "The final response should not be None."
327
- assert final_response != "", "The final response should not be an empty string."
326
+ assert final_response is not None, "The final response should not be None."
327
+ assert final_response != "", "The final response should not be an empty string."
328
328
 
329
329
 
330
330
  class TestAgentBuilder:
@@ -344,8 +344,9 @@ class TestAgentBuilder:
344
344
  async def test_create_agent(self, agent_builder: BuilderAgent):
345
345
  """Test case for creating an agent with the builder."""
346
346
  task = "Send a daily email to manoj@agentr.dev with daily agenda of the day"
347
+ thread_id = "test-thread-create-agent"
347
348
 
348
- result = await agent_builder.invoke(task)
349
+ result = await agent_builder.invoke(thread_id=thread_id, user_input={"userInput": task})
349
350
 
350
351
  assert "generated_agent" in result
351
352
  generated_agent = result["generated_agent"]