fleet-python 0.2.80__tar.gz → 0.2.81__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fleet_python-0.2.80/fleet_python.egg-info → fleet_python-0.2.81}/PKG-INFO +4 -1
- fleet_python-0.2.81/fleet/agent/__init__.py +32 -0
- fleet_python-0.2.81/fleet/agent/gemini_cua/__init__.py +10 -0
- fleet_python-0.2.81/fleet/agent/gemini_cua/agent.py +405 -0
- fleet_python-0.2.81/fleet/agent/gemini_cua/mcp_server.py +520 -0
- fleet_python-0.2.81/fleet/agent/orchestrator.py +481 -0
- fleet_python-0.2.81/fleet/agent/types.py +48 -0
- fleet_python-0.2.81/fleet/agent/utils.py +34 -0
- fleet_python-0.2.81/fleet/cli.py +893 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81/fleet_python.egg-info}/PKG-INFO +4 -1
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet_python.egg-info/SOURCES.txt +7 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet_python.egg-info/requires.txt +4 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/pyproject.toml +5 -1
- fleet_python-0.2.80/fleet/cli.py +0 -354
- {fleet_python-0.2.80 → fleet_python-0.2.81}/LICENSE +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/README.md +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/diff_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/dsl_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/exampleResume.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_account.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_action_log.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_mcp_anthropic.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_mcp_openai.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_sync.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_task.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_tasks.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/example_verifier.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/export_tasks.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/fetch_tasks.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/gemini_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/import_tasks.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/iterate_verifiers.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/json_tasks_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/nova_act_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/openai_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/openai_simple_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/query_builder_example.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/quickstart.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/examples/test_cdp_logging.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/base.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/env/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/env/client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/exceptions.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/global_client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/instance/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/instance/base.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/instance/client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/models.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/resources/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/resources/base.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/resources/browser.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/resources/mcp.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/resources/sqlite.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/tasks.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/verifiers/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/verifiers/bundler.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/_async/verifiers/verifier.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/base.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/config.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/env/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/env/client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/exceptions.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/global_client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/instance/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/instance/base.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/instance/client.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/instance/models.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/models.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/resources/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/resources/base.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/resources/browser.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/resources/mcp.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/resources/sqlite.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/tasks.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/types.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/bundler.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/code.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/db.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/decorator.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/parse.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/sql_differ.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet/verifiers/verifier.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet_python.egg-info/dependency_links.txt +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet_python.egg-info/entry_points.txt +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/fleet_python.egg-info/top_level.txt +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/scripts/fix_sync_imports.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/scripts/unasync.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/setup.cfg +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/__init__.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/test_app_method.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/test_expect_only.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/test_instance_dispatch.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/test_sqlite_resource_dual_mode.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/test_sqlite_shared_memory_behavior.py +0 -0
- {fleet_python-0.2.80 → fleet_python-0.2.81}/tests/test_verifier_from_string.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fleet-python
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.81
|
|
4
4
|
Summary: Python SDK for Fleet environments
|
|
5
5
|
Author-email: Fleet AI <nic@fleet.so>
|
|
6
6
|
License: Apache-2.0
|
|
@@ -42,6 +42,9 @@ Requires-Dist: typer>=0.9.0; extra == "dev"
|
|
|
42
42
|
Requires-Dist: rich>=10.0.0; extra == "dev"
|
|
43
43
|
Provides-Extra: playwright
|
|
44
44
|
Requires-Dist: playwright>=1.40.0; extra == "playwright"
|
|
45
|
+
Provides-Extra: eval
|
|
46
|
+
Requires-Dist: aiohttp>=3.9.0; extra == "eval"
|
|
47
|
+
Requires-Dist: google-genai>=1.0.0; extra == "eval"
|
|
45
48
|
Dynamic: license-file
|
|
46
49
|
|
|
47
50
|
# Fleet SDK
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Fleet Agent - Run agents locally with Docker-based browser control.
|
|
2
|
+
|
|
3
|
+
Usage:
|
|
4
|
+
# Via CLI
|
|
5
|
+
flt eval run -p my-project -m google/gemini-2.5-pro --local gemini_cua
|
|
6
|
+
|
|
7
|
+
# Via Python
|
|
8
|
+
from fleet.agent import run_agent
|
|
9
|
+
|
|
10
|
+
results = await run_agent(
|
|
11
|
+
project_key="my-project",
|
|
12
|
+
agent="gemini_cua",
|
|
13
|
+
api_keys={"GEMINI_API_KEY": "xxx"},
|
|
14
|
+
)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from .types import AgentConfig, AgentResult, TaskResult
|
|
18
|
+
from .utils import get_agent_path, AGENT_DIR
|
|
19
|
+
|
|
20
|
+
# Import these last to avoid circular imports
|
|
21
|
+
from .orchestrator import run_agent, AgentOrchestrator
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"AgentConfig",
|
|
25
|
+
"AgentResult",
|
|
26
|
+
"TaskResult",
|
|
27
|
+
"run_agent",
|
|
28
|
+
"AgentOrchestrator",
|
|
29
|
+
"get_agent_path",
|
|
30
|
+
"AGENT_DIR",
|
|
31
|
+
]
|
|
32
|
+
|
|
@@ -0,0 +1,405 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Gemini CUA Agent
|
|
4
|
+
|
|
5
|
+
Env vars:
|
|
6
|
+
GEMINI_API_KEY: API key
|
|
7
|
+
FLEET_MCP_URL: CUA server URL (http://localhost:PORT)
|
|
8
|
+
FLEET_TASK_PROMPT: Task prompt
|
|
9
|
+
FLEET_TASK_KEY: Task key
|
|
10
|
+
FLEET_MODEL: Model (default: gemini-2.5-pro)
|
|
11
|
+
FLEET_MAX_STEPS: Max steps (default: 50)
|
|
12
|
+
FLEET_VERBOSE: Enable verbose logging (default: false)
|
|
13
|
+
USE_OAUTH: Use gcloud OAuth instead of API key (default: false)
|
|
14
|
+
GOOG_PROJECT: Google Cloud project for OAuth (default: gemini-agents-area)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import asyncio
|
|
18
|
+
import json
|
|
19
|
+
import os
|
|
20
|
+
import subprocess
|
|
21
|
+
import sys
|
|
22
|
+
import time
|
|
23
|
+
from typing import Any, Dict, List, Optional
|
|
24
|
+
|
|
25
|
+
# Verbose logging flag
|
|
26
|
+
VERBOSE = os.environ.get("FLEET_VERBOSE", "false").lower() in ("true", "1", "yes")
|
|
27
|
+
|
|
28
|
+
def log_verbose(*args, **kwargs):
|
|
29
|
+
"""Print only if VERBOSE is enabled."""
|
|
30
|
+
if VERBOSE:
|
|
31
|
+
print(*args, **kwargs)
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
from mcp import ClientSession
|
|
35
|
+
from mcp.client.streamable_http import streamablehttp_client
|
|
36
|
+
except ImportError:
|
|
37
|
+
print(json.dumps({"completed": False, "error": "Missing mcp. Run: pip install mcp"}))
|
|
38
|
+
sys.exit(1)
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
from google import genai
|
|
42
|
+
from google.genai import types
|
|
43
|
+
except ImportError:
|
|
44
|
+
print(json.dumps({"completed": False, "error": "Missing google-genai. Run: pip install google-genai"}))
|
|
45
|
+
sys.exit(1)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# OAuth configuration
|
|
49
|
+
GOOG_PROJECT = os.environ.get("GOOG_PROJECT", "gemini-agents-area")
|
|
50
|
+
USE_OAUTH = os.environ.get("USE_OAUTH", "false").lower() in ("true", "1", "yes")
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_oauth_token() -> str:
|
|
54
|
+
"""Get OAuth token from gcloud."""
|
|
55
|
+
ret = subprocess.run(
|
|
56
|
+
["gcloud", "auth", "application-default", "print-access-token"],
|
|
57
|
+
capture_output=True,
|
|
58
|
+
check=True,
|
|
59
|
+
)
|
|
60
|
+
return ret.stdout.decode().strip()
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def get_gemini_client() -> genai.Client:
|
|
64
|
+
"""Create Gemini client with appropriate auth."""
|
|
65
|
+
api_key = os.environ.get("GEMINI_API_KEY")
|
|
66
|
+
|
|
67
|
+
if USE_OAUTH:
|
|
68
|
+
log_verbose(f"Using OAuth authentication (project: {GOOG_PROJECT})")
|
|
69
|
+
return genai.Client(
|
|
70
|
+
api_key=api_key,
|
|
71
|
+
http_options=types.HttpOptions(
|
|
72
|
+
headers={
|
|
73
|
+
"Authorization": "Bearer " + get_oauth_token(),
|
|
74
|
+
"X-Goog-User-Project": GOOG_PROJECT,
|
|
75
|
+
},
|
|
76
|
+
api_version="v1alpha",
|
|
77
|
+
)
|
|
78
|
+
)
|
|
79
|
+
else:
|
|
80
|
+
log_verbose("Using API key authentication")
|
|
81
|
+
return genai.Client(api_key=api_key)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class MCP:
|
|
86
|
+
"""MCP client using streamable-http transport."""
|
|
87
|
+
|
|
88
|
+
def __init__(self, url: str):
|
|
89
|
+
# Ensure URL ends with /mcp/ for streamable-http
|
|
90
|
+
self.url = url.rstrip("/") + "/mcp/"
|
|
91
|
+
self._session: Optional[ClientSession] = None
|
|
92
|
+
self._client = None
|
|
93
|
+
self._tools: List[Dict] = []
|
|
94
|
+
|
|
95
|
+
async def __aenter__(self):
|
|
96
|
+
# Connect using streamable-http transport
|
|
97
|
+
self._client = streamablehttp_client(self.url)
|
|
98
|
+
read, write, _ = await self._client.__aenter__()
|
|
99
|
+
self._session = ClientSession(read, write)
|
|
100
|
+
await self._session.__aenter__()
|
|
101
|
+
await self._session.initialize()
|
|
102
|
+
|
|
103
|
+
# Fetch available tools from server
|
|
104
|
+
result = await self._session.list_tools()
|
|
105
|
+
self._tools = [
|
|
106
|
+
{
|
|
107
|
+
"name": tool.name,
|
|
108
|
+
"description": tool.description or "",
|
|
109
|
+
"inputSchema": tool.inputSchema,
|
|
110
|
+
}
|
|
111
|
+
for tool in result.tools
|
|
112
|
+
]
|
|
113
|
+
return self
|
|
114
|
+
|
|
115
|
+
async def __aexit__(self, *args):
|
|
116
|
+
if self._session:
|
|
117
|
+
await self._session.__aexit__(*args)
|
|
118
|
+
if self._client:
|
|
119
|
+
await self._client.__aexit__(*args)
|
|
120
|
+
|
|
121
|
+
async def call(self, name: str, args: Dict = None) -> Dict:
|
|
122
|
+
"""Call a tool and return the result."""
|
|
123
|
+
result = await self._session.call_tool(name, args or {})
|
|
124
|
+
# Convert MCP result to dict format expected by agent
|
|
125
|
+
content = []
|
|
126
|
+
for item in result.content:
|
|
127
|
+
if hasattr(item, "type"):
|
|
128
|
+
if item.type == "image":
|
|
129
|
+
content.append({
|
|
130
|
+
"type": "image",
|
|
131
|
+
"data": item.data,
|
|
132
|
+
"mimeType": getattr(item, "mimeType", "image/png"),
|
|
133
|
+
})
|
|
134
|
+
elif item.type == "text":
|
|
135
|
+
content.append({"type": "text", "text": item.text})
|
|
136
|
+
return {"content": content, "isError": result.isError if hasattr(result, "isError") else False}
|
|
137
|
+
|
|
138
|
+
def get_tools(self) -> List[Dict]:
|
|
139
|
+
"""Return the list of tools from the server."""
|
|
140
|
+
return self._tools
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def mcp_tools_to_gemini(mcp_tools: List[Dict]) -> List[types.FunctionDeclaration]:
|
|
144
|
+
"""Convert MCP tool definitions to Gemini FunctionDeclarations."""
|
|
145
|
+
declarations = []
|
|
146
|
+
for tool in mcp_tools:
|
|
147
|
+
declarations.append(types.FunctionDeclaration(
|
|
148
|
+
name=tool["name"],
|
|
149
|
+
description=tool.get("description", ""),
|
|
150
|
+
parameters=tool.get("inputSchema", {"type": "object", "properties": {}}),
|
|
151
|
+
))
|
|
152
|
+
return declarations
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def get_image_data(result: Dict) -> Optional[str]:
|
|
156
|
+
"""Extract base64 image from MCP result."""
|
|
157
|
+
for content in result.get("content", []):
|
|
158
|
+
if content.get("type") == "image":
|
|
159
|
+
return content.get("data")
|
|
160
|
+
return None
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class GeminiAgent:
|
|
164
|
+
"""Gemini Computer Use Agent."""
|
|
165
|
+
|
|
166
|
+
def __init__(self, mcp: MCP, model: str):
|
|
167
|
+
self.mcp = mcp
|
|
168
|
+
# Strip provider prefix if present
|
|
169
|
+
self.model = model.split("/")[-1] if "/" in model else model
|
|
170
|
+
self.client = get_gemini_client()
|
|
171
|
+
self.transcript: List[Dict] = []
|
|
172
|
+
|
|
173
|
+
async def _execute_tool(self, name: str, args: Dict) -> Dict:
|
|
174
|
+
return await self.mcp.call(name, args)
|
|
175
|
+
|
|
176
|
+
async def run(self, prompt: str, max_steps: int) -> Dict[str, Any]:
|
|
177
|
+
"""Run the agent on a task."""
|
|
178
|
+
start_time = time.time()
|
|
179
|
+
|
|
180
|
+
system_prompt = f"""You control a browser via tools.
|
|
181
|
+
|
|
182
|
+
STRICT RULES:
|
|
183
|
+
- Text output with no tool calls means task complete. Only output text when fully done.
|
|
184
|
+
- When finished: output only "DONE: [what you did]"
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
# Get tools from MCP server and convert to Gemini format
|
|
188
|
+
mcp_tools = self.mcp.get_tools()
|
|
189
|
+
gemini_tools = mcp_tools_to_gemini(mcp_tools)
|
|
190
|
+
|
|
191
|
+
# Log system prompt and tools
|
|
192
|
+
log_verbose("\n" + "="*60)
|
|
193
|
+
log_verbose("SYSTEM PROMPT:")
|
|
194
|
+
log_verbose("="*60)
|
|
195
|
+
log_verbose(system_prompt)
|
|
196
|
+
|
|
197
|
+
log_verbose("\n" + "="*60)
|
|
198
|
+
log_verbose(f"TOOLS ({len(mcp_tools)} total):")
|
|
199
|
+
log_verbose("="*60)
|
|
200
|
+
for tool in mcp_tools:
|
|
201
|
+
log_verbose(f"\n {tool['name']}:")
|
|
202
|
+
log_verbose(f" Description: {tool.get('description', '')[:200]}")
|
|
203
|
+
schema = tool.get('inputSchema', {})
|
|
204
|
+
props = schema.get('properties', {})
|
|
205
|
+
if props:
|
|
206
|
+
log_verbose(f" Parameters:")
|
|
207
|
+
for pname, pinfo in props.items():
|
|
208
|
+
ptype = pinfo.get('type', 'any')
|
|
209
|
+
pdesc = pinfo.get('description', '')[:80]
|
|
210
|
+
log_verbose(f" - {pname} ({ptype}): {pdesc}")
|
|
211
|
+
|
|
212
|
+
config = types.GenerateContentConfig(
|
|
213
|
+
max_output_tokens=4096,
|
|
214
|
+
system_instruction=system_prompt,
|
|
215
|
+
tools=[types.Tool(function_declarations=gemini_tools)],
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
history: List[types.Content] = []
|
|
219
|
+
|
|
220
|
+
user_prompt = f"""###User instruction: {prompt}"""
|
|
221
|
+
history.append(types.Content(role="user", parts=[types.Part(text=user_prompt)]))
|
|
222
|
+
self.transcript.append({"role": "user", "content": prompt})
|
|
223
|
+
|
|
224
|
+
log_verbose("\n" + "="*60)
|
|
225
|
+
log_verbose("USER PROMPT:")
|
|
226
|
+
log_verbose("="*60)
|
|
227
|
+
log_verbose(user_prompt)
|
|
228
|
+
|
|
229
|
+
for step in range(1, max_steps + 1):
|
|
230
|
+
print(f"\n{'='*50}")
|
|
231
|
+
print(f"Step {step}/{max_steps}")
|
|
232
|
+
|
|
233
|
+
# Log history size
|
|
234
|
+
log_verbose(f" History: {len(history)} messages")
|
|
235
|
+
|
|
236
|
+
try:
|
|
237
|
+
response = self.client.models.generate_content(
|
|
238
|
+
model=self.model,
|
|
239
|
+
contents=history,
|
|
240
|
+
config=config,
|
|
241
|
+
)
|
|
242
|
+
except Exception as e:
|
|
243
|
+
print(f"API error: {e}")
|
|
244
|
+
return self._result(False, str(e), step, start_time)
|
|
245
|
+
|
|
246
|
+
if not response.candidates:
|
|
247
|
+
print("[WARN] No candidates, retrying...")
|
|
248
|
+
log_verbose(f" Response: {response}")
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
candidate = response.candidates[0]
|
|
252
|
+
if not candidate.content or not candidate.content.parts:
|
|
253
|
+
print("[WARN] Empty response, retrying...")
|
|
254
|
+
log_verbose(f" Candidate: {candidate}")
|
|
255
|
+
continue
|
|
256
|
+
|
|
257
|
+
# Log all parts for debugging
|
|
258
|
+
log_verbose(f"\n Response parts ({len(candidate.content.parts)}):")
|
|
259
|
+
for i, part in enumerate(candidate.content.parts):
|
|
260
|
+
if part.text:
|
|
261
|
+
log_verbose(f" [{i}] TEXT: {part.text[:300]}{'...' if len(part.text) > 300 else ''}")
|
|
262
|
+
elif part.function_call:
|
|
263
|
+
fc = part.function_call
|
|
264
|
+
args_str = json.dumps(dict(fc.args) if fc.args else {})
|
|
265
|
+
log_verbose(f" [{i}] FUNCTION_CALL: {fc.name}({args_str})")
|
|
266
|
+
elif hasattr(part, 'thought') and part.thought:
|
|
267
|
+
log_verbose(f" [{i}] THOUGHT: {part.thought[:300]}{'...' if len(part.thought) > 300 else ''}")
|
|
268
|
+
else:
|
|
269
|
+
log_verbose(f" [{i}] OTHER: {type(part).__name__}")
|
|
270
|
+
|
|
271
|
+
# Extract function calls and text
|
|
272
|
+
function_calls = [p.function_call for p in candidate.content.parts if p.function_call]
|
|
273
|
+
text_parts = [p.text for p in candidate.content.parts if p.text]
|
|
274
|
+
|
|
275
|
+
# Print model output
|
|
276
|
+
if text_parts:
|
|
277
|
+
for text in text_parts:
|
|
278
|
+
display = text[:200] + "..." if len(text) > 200 else text
|
|
279
|
+
print(f"Model: {display}")
|
|
280
|
+
|
|
281
|
+
# Check for completion
|
|
282
|
+
if text_parts and not function_calls:
|
|
283
|
+
final_text = " ".join(text_parts)
|
|
284
|
+
self.transcript.append({"role": "assistant", "content": final_text})
|
|
285
|
+
|
|
286
|
+
if final_text.strip().upper().startswith("DONE:"):
|
|
287
|
+
answer = final_text.strip()[5:].strip()
|
|
288
|
+
print(f"\n✓ Agent completed: {answer[:100]}")
|
|
289
|
+
return self._result(True, None, step, start_time, answer)
|
|
290
|
+
elif final_text.strip().upper().startswith("FAILED:"):
|
|
291
|
+
error = final_text.strip()[7:].strip()
|
|
292
|
+
print(f"\n✗ Agent failed: {error[:100]}")
|
|
293
|
+
return self._result(False, error, step, start_time)
|
|
294
|
+
else:
|
|
295
|
+
# Text without DONE/FAILED - treat as completion
|
|
296
|
+
print(f"\n✓ Agent finished with response")
|
|
297
|
+
return self._result(True, None, step, start_time, final_text)
|
|
298
|
+
|
|
299
|
+
if function_calls:
|
|
300
|
+
# Add model's response to history
|
|
301
|
+
history.append(candidate.content)
|
|
302
|
+
|
|
303
|
+
log_verbose(f"\n Executing {len(function_calls)} function call(s):")
|
|
304
|
+
|
|
305
|
+
# Execute each function call in series with delays
|
|
306
|
+
response_parts = []
|
|
307
|
+
for i, fc in enumerate(function_calls):
|
|
308
|
+
name = fc.name
|
|
309
|
+
args = dict(fc.args) if fc.args else {}
|
|
310
|
+
print(f" Tool {i+1}/{len(function_calls)}: {name}({json.dumps(args)})")
|
|
311
|
+
self.transcript.append({"role": "tool_call", "name": name, "args": args})
|
|
312
|
+
|
|
313
|
+
try:
|
|
314
|
+
result = await self._execute_tool(name, args)
|
|
315
|
+
log_verbose(f" Result: isError={result.get('isError', False)}, content_types={[c.get('type') for c in result.get('content', [])]}")
|
|
316
|
+
except Exception as e:
|
|
317
|
+
print(f" Error: {e}")
|
|
318
|
+
log_verbose(f" Exception: {type(e).__name__}: {e}")
|
|
319
|
+
result = {"content": [{"type": "text", "text": str(e)}], "isError": True}
|
|
320
|
+
|
|
321
|
+
# Build function response with image embedded (per reference format)
|
|
322
|
+
img_data = get_image_data(result) # Base64 string
|
|
323
|
+
|
|
324
|
+
if img_data:
|
|
325
|
+
log_verbose(f" Response: image (base64 len={len(img_data)})")
|
|
326
|
+
# Function response with image in parts
|
|
327
|
+
fr_part = types.Part(
|
|
328
|
+
function_response=types.FunctionResponse(
|
|
329
|
+
name=name,
|
|
330
|
+
response={"status": "success" if not result.get("isError") else "error"},
|
|
331
|
+
parts=[
|
|
332
|
+
types.FunctionResponsePart(
|
|
333
|
+
inline_data=types.FunctionResponseBlob(
|
|
334
|
+
mime_type="image/png",
|
|
335
|
+
data=img_data, # Base64 string
|
|
336
|
+
)
|
|
337
|
+
)
|
|
338
|
+
],
|
|
339
|
+
)
|
|
340
|
+
)
|
|
341
|
+
else:
|
|
342
|
+
log_verbose(f" Response: no image (status only)")
|
|
343
|
+
# Function response without image
|
|
344
|
+
fr_part = types.Part(
|
|
345
|
+
function_response=types.FunctionResponse(
|
|
346
|
+
name=name,
|
|
347
|
+
response={"status": "error" if result.get("isError") else "success"},
|
|
348
|
+
)
|
|
349
|
+
)
|
|
350
|
+
response_parts.append(fr_part)
|
|
351
|
+
|
|
352
|
+
# Small delay between tool calls to let page settle
|
|
353
|
+
if i < len(function_calls) - 1:
|
|
354
|
+
await asyncio.sleep(0.1)
|
|
355
|
+
|
|
356
|
+
# Add function responses with role="model" (per reference)
|
|
357
|
+
history.append(types.Content(role="model", parts=response_parts))
|
|
358
|
+
log_verbose(f" Added {len(response_parts)} function response(s) to history")
|
|
359
|
+
|
|
360
|
+
return self._result(False, "Max steps reached", max_steps, start_time)
|
|
361
|
+
|
|
362
|
+
def _result(self, completed: bool, error: Optional[str], steps: int, start_time: float, answer: str = None) -> Dict:
|
|
363
|
+
"""Build result dict."""
|
|
364
|
+
return {
|
|
365
|
+
"completed": completed,
|
|
366
|
+
"error": error,
|
|
367
|
+
"final_answer": answer,
|
|
368
|
+
"steps_taken": steps,
|
|
369
|
+
"execution_time_ms": int((time.time() - start_time) * 1000),
|
|
370
|
+
"transcript": self.transcript,
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
async def main():
|
|
375
|
+
"""Main entry point."""
|
|
376
|
+
config = {
|
|
377
|
+
"url": os.environ.get("FLEET_MCP_URL", "http://localhost:8765"),
|
|
378
|
+
"prompt": os.environ.get("FLEET_TASK_PROMPT", ""),
|
|
379
|
+
"task_key": os.environ.get("FLEET_TASK_KEY", ""),
|
|
380
|
+
"model": os.environ.get("FLEET_MODEL", "gemini-2.5-pro"),
|
|
381
|
+
"max_steps": int(os.environ.get("FLEET_MAX_STEPS", "50")),
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
print(f"Gemini CUA Agent")
|
|
385
|
+
print(f" Model: {config['model']}")
|
|
386
|
+
print(f" MCP: {config['url']}")
|
|
387
|
+
print(f" Verbose: {VERBOSE}")
|
|
388
|
+
print(f" Task: {config['prompt'][:80]}...")
|
|
389
|
+
|
|
390
|
+
if not os.environ.get("GEMINI_API_KEY"):
|
|
391
|
+
result = {"task_key": config["task_key"], "completed": False, "error": "No GEMINI_API_KEY"}
|
|
392
|
+
print(json.dumps(result))
|
|
393
|
+
return result
|
|
394
|
+
|
|
395
|
+
async with MCP(config["url"]) as mcp:
|
|
396
|
+
agent = GeminiAgent(mcp, config["model"])
|
|
397
|
+
result = await agent.run(config["prompt"], config["max_steps"])
|
|
398
|
+
result["task_key"] = config["task_key"]
|
|
399
|
+
print(json.dumps(result))
|
|
400
|
+
return result
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
if __name__ == "__main__":
|
|
404
|
+
result = asyncio.run(main())
|
|
405
|
+
sys.exit(0 if result.get("completed") else 1)
|