reasoning-deployment-service 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reasoning-deployment-service might be problematic. Click here for more details.
- examples/programmatic_usage.py +154 -0
- reasoning_deployment_service/__init__.py +25 -0
- reasoning_deployment_service/cli_editor/__init__.py +5 -0
- reasoning_deployment_service/cli_editor/api_client.py +666 -0
- reasoning_deployment_service/cli_editor/cli_runner.py +343 -0
- reasoning_deployment_service/cli_editor/config.py +82 -0
- reasoning_deployment_service/cli_editor/google_deps.py +29 -0
- reasoning_deployment_service/cli_editor/reasoning_engine_creator.py +448 -0
- reasoning_deployment_service/gui_editor/__init__.py +5 -0
- reasoning_deployment_service/gui_editor/main.py +280 -0
- reasoning_deployment_service/gui_editor/requirements_minimal.txt +54 -0
- reasoning_deployment_service/gui_editor/run_program.sh +55 -0
- reasoning_deployment_service/gui_editor/src/__init__.py +1 -0
- reasoning_deployment_service/gui_editor/src/core/__init__.py +1 -0
- reasoning_deployment_service/gui_editor/src/core/api_client.py +647 -0
- reasoning_deployment_service/gui_editor/src/core/config.py +43 -0
- reasoning_deployment_service/gui_editor/src/core/google_deps.py +22 -0
- reasoning_deployment_service/gui_editor/src/core/reasoning_engine_creator.py +448 -0
- reasoning_deployment_service/gui_editor/src/ui/__init__.py +1 -0
- reasoning_deployment_service/gui_editor/src/ui/agent_space_view.py +312 -0
- reasoning_deployment_service/gui_editor/src/ui/authorization_view.py +280 -0
- reasoning_deployment_service/gui_editor/src/ui/reasoning_engine_view.py +354 -0
- reasoning_deployment_service/gui_editor/src/ui/reasoning_engines_view.py +204 -0
- reasoning_deployment_service/gui_editor/src/ui/ui_components.py +1221 -0
- reasoning_deployment_service/reasoning_deployment_service.py +687 -0
- reasoning_deployment_service-0.2.8.dist-info/METADATA +177 -0
- reasoning_deployment_service-0.2.8.dist-info/RECORD +29 -0
- reasoning_deployment_service-0.2.8.dist-info/WHEEL +5 -0
- reasoning_deployment_service-0.2.8.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,647 @@
|
|
|
1
|
+
"""API client for Google Cloud Agent Space and Reasoning Engine operations."""
|
|
2
|
+
import json
|
|
3
|
+
import uuid
|
|
4
|
+
import time
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys, importlib, importlib.util
|
|
7
|
+
from typing import Optional, Dict, Any, List, Tuple
|
|
8
|
+
from pprint import pprint
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from .google_deps import (
|
|
12
|
+
HAS_GOOGLE, google, GoogleAuthRequest,
|
|
13
|
+
vertexai, agent_engines
|
|
14
|
+
)
|
|
15
|
+
from .reasoning_engine_creator import ReasoningEngineCreator
|
|
16
|
+
|
|
17
|
+
BASE_URL = "https://discoveryengine.googleapis.com/v1alpha"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
# --- helpers for clean packaging ---
|
|
21
|
+
EXCLUDES = [
|
|
22
|
+
".env", ".env.*", ".git", "__pycache__", ".pytest_cache", ".mypy_cache",
|
|
23
|
+
".DS_Store", "*.pyc", "*.pyo", "*.pyd", ".venv", "venv", "tests", "docs"
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ApiClient:
|
|
28
|
+
"""
|
|
29
|
+
Single responsibility: hold configuration & credentials and expose API calls.
|
|
30
|
+
This class has both 'live' and 'mock' modes; the public surface is identical.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
project_id: str,
|
|
36
|
+
project_number: str,
|
|
37
|
+
location: str,
|
|
38
|
+
engine_name: str,
|
|
39
|
+
staging_bucket: str = "",
|
|
40
|
+
oauth_client_id: str = "",
|
|
41
|
+
oauth_client_secret: str = "",
|
|
42
|
+
agent_import: Optional[str] = None,
|
|
43
|
+
mode: str = "mock", # "live" or "mock"
|
|
44
|
+
profile_path: str = "agent_profile.json",
|
|
45
|
+
):
|
|
46
|
+
self.project_id = project_id
|
|
47
|
+
self.project_number = project_number
|
|
48
|
+
self.location = location
|
|
49
|
+
self.engine_name = engine_name
|
|
50
|
+
self.staging_bucket = staging_bucket
|
|
51
|
+
self.oauth_client_id = oauth_client_id
|
|
52
|
+
self.oauth_client_secret = oauth_client_secret
|
|
53
|
+
self.agent_import = agent_import
|
|
54
|
+
self.mode = mode
|
|
55
|
+
self.profile_path = profile_path
|
|
56
|
+
|
|
57
|
+
# Local state persisted between runs
|
|
58
|
+
self._profile: Dict[str, Any] = {
|
|
59
|
+
"display_name": "Demo Agent" if mode == "mock" else "Your Agent",
|
|
60
|
+
"description": "Prototype only" if mode == "mock" else "Live Agent",
|
|
61
|
+
"name": None, # reasoning engine resource
|
|
62
|
+
"agent_space_agent_id": None,
|
|
63
|
+
"requirements": [],
|
|
64
|
+
"extra_packages": [],
|
|
65
|
+
"tool_description": "Tooling",
|
|
66
|
+
}
|
|
67
|
+
self._agents_cache: List[Dict[str, str]] = [] # mock-only
|
|
68
|
+
self._loaded_agent = None # live-only
|
|
69
|
+
|
|
70
|
+
# Authentication caching for performance ("live enough")
|
|
71
|
+
self._auth_cache = None
|
|
72
|
+
self._auth_cache_time = 0
|
|
73
|
+
self._auth_cache_duration = 30 # Cache for 30 seconds
|
|
74
|
+
|
|
75
|
+
# Performance optimizations
|
|
76
|
+
self._vertex_inited = False # Cache Vertex AI initialization
|
|
77
|
+
self.debug = False # Set True only when debugging needed
|
|
78
|
+
|
|
79
|
+
# Reuse HTTP session to avoid repeated TLS handshakes
|
|
80
|
+
import requests as _requests
|
|
81
|
+
self._http = _requests.Session()
|
|
82
|
+
self._http.headers.update({"Content-Type": "application/json"})
|
|
83
|
+
|
|
84
|
+
self._load_profile()
|
|
85
|
+
|
|
86
|
+
if self.is_live:
|
|
87
|
+
if not HAS_GOOGLE:
|
|
88
|
+
raise RuntimeError("Live mode requested but Google libs not installed.")
|
|
89
|
+
# Lazy-load actual agent code if provided
|
|
90
|
+
self._loaded_agent = self._maybe_import_agent(self.agent_import)
|
|
91
|
+
|
|
92
|
+
# ---------------- Properties ----------------
|
|
93
|
+
@property
|
|
94
|
+
def is_live(self) -> bool:
|
|
95
|
+
return self.mode == "live"
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def profile(self) -> Dict[str, Any]:
|
|
99
|
+
return self._profile
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def is_authenticated(self) -> bool:
|
|
103
|
+
"""Check if we have valid authentication (cached for performance)."""
|
|
104
|
+
if not self.is_live:
|
|
105
|
+
return True
|
|
106
|
+
|
|
107
|
+
# Use cached result if still fresh (30 seconds)
|
|
108
|
+
now = time.time()
|
|
109
|
+
if (self._auth_cache is not None and
|
|
110
|
+
(now - self._auth_cache_time) < self._auth_cache_duration):
|
|
111
|
+
return self._auth_cache
|
|
112
|
+
|
|
113
|
+
# Check authentication and cache result
|
|
114
|
+
try:
|
|
115
|
+
_ = self._access_token()
|
|
116
|
+
self._auth_cache = True
|
|
117
|
+
except Exception:
|
|
118
|
+
self._auth_cache = False
|
|
119
|
+
|
|
120
|
+
self._auth_cache_time = now
|
|
121
|
+
return self._auth_cache
|
|
122
|
+
|
|
123
|
+
def refresh_auth_cache(self):
|
|
124
|
+
"""Force refresh of authentication cache."""
|
|
125
|
+
self._auth_cache = None
|
|
126
|
+
self._auth_cache_time = 0
|
|
127
|
+
|
|
128
|
+
def _ensure_vertex_inited(self):
|
|
129
|
+
"""Initialize Vertex AI once and reuse to avoid repeated heavy init calls."""
|
|
130
|
+
if not self._vertex_inited:
|
|
131
|
+
vertexai.init(project=self.project_id, location=self.location, staging_bucket=self.staging_bucket)
|
|
132
|
+
self._vertex_inited = True
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def has_engine(self) -> bool:
|
|
136
|
+
"""Check if we have a reasoning engine."""
|
|
137
|
+
return bool(self._profile.get("name"))
|
|
138
|
+
|
|
139
|
+
@property
|
|
140
|
+
def has_deployed_agent(self) -> bool:
|
|
141
|
+
"""Check if we have a deployed agent."""
|
|
142
|
+
return bool(self._profile.get("agent_space_agent_id"))
|
|
143
|
+
|
|
144
|
+
# ---------------- Profile Management ----------------
|
|
145
|
+
def set_auth_name(self, name: str):
|
|
146
|
+
self._profile["working_auth_name"] = name
|
|
147
|
+
self._save_profile()
|
|
148
|
+
|
|
149
|
+
def _save_profile(self):
|
|
150
|
+
try:
|
|
151
|
+
with open(self.profile_path, "w") as f:
|
|
152
|
+
json.dump(self._profile, f, indent=2)
|
|
153
|
+
except Exception:
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
def _load_profile(self):
|
|
157
|
+
try:
|
|
158
|
+
with open(self.profile_path, "r") as f:
|
|
159
|
+
saved = json.load(f)
|
|
160
|
+
# shallow update only known keys
|
|
161
|
+
for k in self._profile.keys():
|
|
162
|
+
if k in saved:
|
|
163
|
+
self._profile[k] = saved[k]
|
|
164
|
+
# also pick up working_auth_name if present
|
|
165
|
+
if "working_auth_name" in saved:
|
|
166
|
+
self._profile["working_auth_name"] = saved["working_auth_name"]
|
|
167
|
+
except Exception:
|
|
168
|
+
pass
|
|
169
|
+
|
|
170
|
+
def _maybe_import_agent(self, mod_attr: Optional[str]):
|
|
171
|
+
if not mod_attr:
|
|
172
|
+
return None
|
|
173
|
+
parts = mod_attr.split(":")
|
|
174
|
+
if len(parts) != 2:
|
|
175
|
+
return None
|
|
176
|
+
mod, attr = parts
|
|
177
|
+
try:
|
|
178
|
+
imported = __import__(mod, fromlist=[attr])
|
|
179
|
+
return getattr(imported, attr, None)
|
|
180
|
+
except Exception:
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
# ---------------- Authentication ----------------
|
|
184
|
+
def _access_token(self) -> str:
|
|
185
|
+
"""Live: fetch ADC access token; raises if not available."""
|
|
186
|
+
creds, _ = google.auth.default(scopes=["https://www.googleapis.com/auth/cloud-platform"])
|
|
187
|
+
# Only refresh if needed - avoid network hit on every call
|
|
188
|
+
if not creds.valid or (creds.expired and creds.refresh_token):
|
|
189
|
+
creds.refresh(GoogleAuthRequest())
|
|
190
|
+
return creds.token
|
|
191
|
+
|
|
192
|
+
def authenticate(self) -> bool:
|
|
193
|
+
"""
|
|
194
|
+
For Live: ensure ADC is configured (runs gcloud flow if needed).
|
|
195
|
+
For Mock: fast True.
|
|
196
|
+
"""
|
|
197
|
+
if not self.is_live:
|
|
198
|
+
# Minimal delay to simulate network without causing UI lag
|
|
199
|
+
time.sleep(0.02)
|
|
200
|
+
return True
|
|
201
|
+
|
|
202
|
+
# If token works, we're good
|
|
203
|
+
try:
|
|
204
|
+
_ = self._access_token()
|
|
205
|
+
return True
|
|
206
|
+
except Exception:
|
|
207
|
+
pass
|
|
208
|
+
|
|
209
|
+
# Launch gcloud browser flow
|
|
210
|
+
try:
|
|
211
|
+
subprocess.run(["gcloud", "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
|
|
212
|
+
except Exception:
|
|
213
|
+
raise RuntimeError("'gcloud' not found on PATH. Install Google Cloud SDK.")
|
|
214
|
+
|
|
215
|
+
proc = subprocess.run(
|
|
216
|
+
["gcloud", "auth", "application-default", "login"],
|
|
217
|
+
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
|
|
218
|
+
)
|
|
219
|
+
if proc.returncode != 0:
|
|
220
|
+
raise RuntimeError(f"ADC auth failed:\n{proc.stdout}")
|
|
221
|
+
|
|
222
|
+
# Validate we can fetch a token now
|
|
223
|
+
_ = self._access_token()
|
|
224
|
+
return True
|
|
225
|
+
|
|
226
|
+
# ---------------- Agent Space APIs ----------------
|
|
227
|
+
def list_agent_space_agents(self) -> List[Dict[str, str]]:
|
|
228
|
+
if not self.is_live:
|
|
229
|
+
time.sleep(0.02)
|
|
230
|
+
return list(self._agents_cache) # Return empty list initially
|
|
231
|
+
|
|
232
|
+
headers = {
|
|
233
|
+
"Authorization": f"Bearer {self._access_token()}",
|
|
234
|
+
"Content-Type": "application/json",
|
|
235
|
+
"X-Goog-User-Project": self.project_id,
|
|
236
|
+
}
|
|
237
|
+
url = (f"{BASE_URL}/projects/{self.project_id}/locations/global/collections/default_collection/"
|
|
238
|
+
f"engines/{self.engine_name}/assistants/default_assistant/agents")
|
|
239
|
+
r = self._http.get(url, headers=headers, timeout=60)
|
|
240
|
+
r.raise_for_status()
|
|
241
|
+
data = r.json()
|
|
242
|
+
out = []
|
|
243
|
+
|
|
244
|
+
for a in data.get("agents", []):
|
|
245
|
+
try:
|
|
246
|
+
authorization_full = a.get('adkAgentDefinition', {}).get('authorizations', [])
|
|
247
|
+
authorization_id = "N/A"
|
|
248
|
+
authorization_path = "N/A"
|
|
249
|
+
|
|
250
|
+
if authorization_full and len(authorization_full) > 0:
|
|
251
|
+
authorization_path = authorization_full[0]
|
|
252
|
+
# Extract just the authorization ID from the full path
|
|
253
|
+
if "/" in authorization_path:
|
|
254
|
+
authorization_id = authorization_path.split("/")[-1]
|
|
255
|
+
else:
|
|
256
|
+
authorization_id = authorization_path
|
|
257
|
+
|
|
258
|
+
# Extract engine ID from reasoning engine path
|
|
259
|
+
engine_full = a.get('adkAgentDefinition', {}).get('provisionedReasoningEngine', {}).get('reasoningEngine', '')
|
|
260
|
+
engine_id = "N/A"
|
|
261
|
+
|
|
262
|
+
if engine_full and "/" in engine_full:
|
|
263
|
+
engine_id = engine_full.split("/")[-1]
|
|
264
|
+
elif engine_full:
|
|
265
|
+
engine_id = engine_full
|
|
266
|
+
|
|
267
|
+
except Exception:
|
|
268
|
+
authorization_id = "N/A"
|
|
269
|
+
authorization_path = "N/A"
|
|
270
|
+
engine_id = "N/A"
|
|
271
|
+
engine_full = "N/A"
|
|
272
|
+
|
|
273
|
+
full = a.get("name", "")
|
|
274
|
+
out.append({
|
|
275
|
+
"id": full.split("/")[-1] if full else "",
|
|
276
|
+
"display_name": a.get("displayName", "N/A"),
|
|
277
|
+
"authorization_id": authorization_id,
|
|
278
|
+
"engine_id": engine_id,
|
|
279
|
+
"full_name": full,
|
|
280
|
+
# Store full paths for popup
|
|
281
|
+
"authorization_full": authorization_path,
|
|
282
|
+
"engine_full": engine_full,
|
|
283
|
+
})
|
|
284
|
+
return out
|
|
285
|
+
|
|
286
|
+
def delete_agent_from_space(self, full_name: str) -> Tuple[str, str]:
|
|
287
|
+
if not self.is_live:
|
|
288
|
+
time.sleep(0.02)
|
|
289
|
+
before = len(self._agents_cache)
|
|
290
|
+
self._agents_cache = [a for a in self._agents_cache if a["full_name"] != full_name]
|
|
291
|
+
if before != len(self._agents_cache) and self._profile.get("agent_space_agent_id") == full_name:
|
|
292
|
+
self._profile["agent_space_agent_id"] = None
|
|
293
|
+
return ("deleted", "Removed (mock)")
|
|
294
|
+
|
|
295
|
+
headers = {
|
|
296
|
+
"Authorization": f"Bearer {self._access_token()}",
|
|
297
|
+
"Content-Type": "application/json",
|
|
298
|
+
"X-Goog-User-Project": self.project_id,
|
|
299
|
+
}
|
|
300
|
+
url = f"{BASE_URL}/{full_name}"
|
|
301
|
+
r = self._http.delete(url, headers=headers, timeout=60)
|
|
302
|
+
# Treat any 2xx status as success (many APIs return 204 No Content)
|
|
303
|
+
if 200 <= r.status_code < 300:
|
|
304
|
+
if self._profile.get("agent_space_agent_id") == full_name:
|
|
305
|
+
self._profile["agent_space_agent_id"] = None
|
|
306
|
+
self._save_profile()
|
|
307
|
+
return ("deleted", "Deleted")
|
|
308
|
+
elif r.status_code == 404:
|
|
309
|
+
return ("not_found", "Not found")
|
|
310
|
+
else:
|
|
311
|
+
return ("failed", f"{r.status_code} {r.text}")
|
|
312
|
+
|
|
313
|
+
# ---------------- Reasoning Engine APIs ----------------
|
|
314
|
+
def list_reasoning_engines(self) -> List[Dict[str, str]]:
|
|
315
|
+
"""List all reasoning engines in the project."""
|
|
316
|
+
if not self.is_live:
|
|
317
|
+
time.sleep(0.02)
|
|
318
|
+
# Return empty list initially - no mock data by default
|
|
319
|
+
return []
|
|
320
|
+
|
|
321
|
+
# Use the Vertex AI SDK to list reasoning engines
|
|
322
|
+
try:
|
|
323
|
+
self._ensure_vertex_inited() # Use cached initialization
|
|
324
|
+
engines = agent_engines.list()
|
|
325
|
+
|
|
326
|
+
out = []
|
|
327
|
+
for engine in engines:
|
|
328
|
+
try:
|
|
329
|
+
resource_name = str(engine.resource_name) if engine.resource_name else ""
|
|
330
|
+
engine_id = resource_name.split("/")[-1] if resource_name else ""
|
|
331
|
+
|
|
332
|
+
# Handle datetime objects safely
|
|
333
|
+
create_time = "Unknown"
|
|
334
|
+
if hasattr(engine, 'create_time') and engine.create_time:
|
|
335
|
+
try:
|
|
336
|
+
# Convert datetime to string
|
|
337
|
+
create_time = str(engine.create_time)
|
|
338
|
+
except Exception:
|
|
339
|
+
create_time = "Unknown"
|
|
340
|
+
|
|
341
|
+
# Safely get display name
|
|
342
|
+
display_name = str(engine.display_name) if hasattr(engine, 'display_name') and engine.display_name else "Unnamed Engine"
|
|
343
|
+
|
|
344
|
+
out.append({
|
|
345
|
+
"id": engine_id,
|
|
346
|
+
"display_name": display_name,
|
|
347
|
+
"resource_name": resource_name,
|
|
348
|
+
"create_time": create_time
|
|
349
|
+
})
|
|
350
|
+
except Exception as e:
|
|
351
|
+
# Skip engines that cause issues but continue processing
|
|
352
|
+
print(f"⚠️ Skipped engine due to error: {str(e)}")
|
|
353
|
+
continue
|
|
354
|
+
|
|
355
|
+
return out
|
|
356
|
+
except Exception as e:
|
|
357
|
+
# Handle API registration and other Vertex AI errors gracefully
|
|
358
|
+
error_msg = str(e)
|
|
359
|
+
if "api_mode" in error_msg:
|
|
360
|
+
print(f"⚠️ Vertex AI API registration warning: {error_msg}")
|
|
361
|
+
# Return empty list but don't crash the app
|
|
362
|
+
return []
|
|
363
|
+
else:
|
|
364
|
+
raise RuntimeError(f"Failed to list reasoning engines: {error_msg}")
|
|
365
|
+
|
|
366
|
+
def delete_reasoning_engine_by_id(self, resource_name: str) -> Tuple[str, str]:
|
|
367
|
+
"""Delete a reasoning engine by resource name."""
|
|
368
|
+
if not self.is_live:
|
|
369
|
+
time.sleep(0.02)
|
|
370
|
+
return ("deleted", f"Engine {resource_name} deleted (mock)")
|
|
371
|
+
|
|
372
|
+
try:
|
|
373
|
+
engine = agent_engines.get(resource_name)
|
|
374
|
+
engine.delete(force=True)
|
|
375
|
+
return ("deleted", "Reasoning engine deleted")
|
|
376
|
+
except Exception as e:
|
|
377
|
+
return ("failed", f"Delete failed: {str(e)}")
|
|
378
|
+
|
|
379
|
+
def _load_agent_from_file(self, agent_file_path: str):
|
|
380
|
+
"""Load root_agent from a Python file, handling relative imports properly."""
|
|
381
|
+
agent_file = Path(agent_file_path).resolve()
|
|
382
|
+
if not agent_file.exists():
|
|
383
|
+
raise RuntimeError(f"Agent file not found: {agent_file}")
|
|
384
|
+
|
|
385
|
+
agent_dir = agent_file.parent
|
|
386
|
+
package_name = agent_dir.name
|
|
387
|
+
module_name = f"{package_name}.{agent_file.stem}"
|
|
388
|
+
|
|
389
|
+
# Define paths before using them in print statements
|
|
390
|
+
parent_dir = str(agent_dir.parent)
|
|
391
|
+
agent_dir_str = str(agent_dir)
|
|
392
|
+
|
|
393
|
+
print(f"🤖 Loading {agent_file.stem} from: {agent_file}")
|
|
394
|
+
print(f"📁 Agent directory: {agent_dir}")
|
|
395
|
+
print(f"📦 Package name: {package_name}")
|
|
396
|
+
print(f"🔧 Module name: {module_name}")
|
|
397
|
+
if self.debug:
|
|
398
|
+
print(f"🛤️ Adding to sys.path: {parent_dir} (for package imports)")
|
|
399
|
+
print(f"🛤️ Adding to sys.path: {agent_dir_str} (for absolute imports like 'tools')")
|
|
400
|
+
|
|
401
|
+
# Add both the parent directory (for package imports) and the agent directory (for absolute imports like 'tools')
|
|
402
|
+
paths_added = []
|
|
403
|
+
|
|
404
|
+
# Add parent directory for package imports (e.g., 'executive_summary_builder.agent')
|
|
405
|
+
if parent_dir not in sys.path:
|
|
406
|
+
sys.path.insert(0, parent_dir)
|
|
407
|
+
paths_added.append(parent_dir)
|
|
408
|
+
|
|
409
|
+
# Add agent directory for absolute imports within the package (e.g., 'tools.gmail_search_supporter')
|
|
410
|
+
if agent_dir_str not in sys.path:
|
|
411
|
+
sys.path.insert(0, agent_dir_str)
|
|
412
|
+
paths_added.append(agent_dir_str)
|
|
413
|
+
|
|
414
|
+
try:
|
|
415
|
+
# Create package spec for proper relative import handling
|
|
416
|
+
package_spec = importlib.util.spec_from_file_location(
|
|
417
|
+
package_name,
|
|
418
|
+
agent_dir / "__init__.py" if (agent_dir / "__init__.py").exists() else None
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
if package_spec:
|
|
422
|
+
# Load the package first
|
|
423
|
+
package_module = importlib.util.module_from_spec(package_spec)
|
|
424
|
+
sys.modules[package_name] = package_module
|
|
425
|
+
if package_spec.loader and (agent_dir / "__init__.py").exists():
|
|
426
|
+
package_spec.loader.exec_module(package_module)
|
|
427
|
+
|
|
428
|
+
# Now load the agent module as part of the package
|
|
429
|
+
spec = importlib.util.spec_from_file_location(module_name, agent_file)
|
|
430
|
+
if spec is None or spec.loader is None:
|
|
431
|
+
raise RuntimeError(f"Could not load module spec from {agent_file}")
|
|
432
|
+
|
|
433
|
+
module = importlib.util.module_from_spec(spec)
|
|
434
|
+
# Set the package for relative imports
|
|
435
|
+
module.__package__ = package_name
|
|
436
|
+
|
|
437
|
+
# Add to sys.modules for relative imports to work
|
|
438
|
+
sys.modules[module_name] = module
|
|
439
|
+
|
|
440
|
+
spec.loader.exec_module(module)
|
|
441
|
+
|
|
442
|
+
if not hasattr(module, "root_agent"):
|
|
443
|
+
raise RuntimeError(f"Module '{agent_file}' does not define `root_agent`.")
|
|
444
|
+
|
|
445
|
+
print(f"✅ Successfully loaded root_agent from {agent_file}")
|
|
446
|
+
return getattr(module, "root_agent")
|
|
447
|
+
|
|
448
|
+
except Exception as e:
|
|
449
|
+
print(f"❌ Failed to load agent: {e}")
|
|
450
|
+
raise RuntimeError(f"Failed to execute agent module {agent_file}: {e}") from e
|
|
451
|
+
finally:
|
|
452
|
+
# Clean up sys.path - remove all paths we added
|
|
453
|
+
for path in reversed(paths_added): # Remove in reverse order
|
|
454
|
+
while path in sys.path:
|
|
455
|
+
sys.path.remove(path)
|
|
456
|
+
|
|
457
|
+
# Clean up sys.modules
|
|
458
|
+
modules_to_remove = [name for name in sys.modules.keys()
|
|
459
|
+
if name.startswith(package_name)]
|
|
460
|
+
for name in modules_to_remove:
|
|
461
|
+
if name in sys.modules:
|
|
462
|
+
del sys.modules[name]
|
|
463
|
+
|
|
464
|
+
def create_reasoning_engine_advanced(self, config: Dict[str, Any]) -> Tuple[str, str, Optional[str]]:
|
|
465
|
+
"""Create a reasoning engine with advanced configuration options."""
|
|
466
|
+
creator = ReasoningEngineCreator(
|
|
467
|
+
project_id=self.project_id,
|
|
468
|
+
location=self.location,
|
|
469
|
+
staging_bucket=self.staging_bucket,
|
|
470
|
+
debug=self.debug,
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
return creator.create_advanced_engine(config)
|
|
474
|
+
|
|
475
|
+
def delete_reasoning_engine(self) -> Tuple[str, str]:
|
|
476
|
+
if not self._profile.get("name"):
|
|
477
|
+
return ("not_found", "No engine")
|
|
478
|
+
if not self.is_live:
|
|
479
|
+
time.sleep(0.02)
|
|
480
|
+
self._profile["name"] = None
|
|
481
|
+
self._save_profile()
|
|
482
|
+
return ("deleted", "Engine deleted (mock)")
|
|
483
|
+
try:
|
|
484
|
+
agent_engines.delete(self._profile["name"])
|
|
485
|
+
self._profile["name"] = None
|
|
486
|
+
self._save_profile()
|
|
487
|
+
return ("deleted", "Engine deleted")
|
|
488
|
+
except Exception as e:
|
|
489
|
+
return ("failed", str(e))
|
|
490
|
+
|
|
491
|
+
# ---------------- Deploy APIs ----------------
|
|
492
|
+
def list_authorizations(self) -> List[Dict[str, str]]:
|
|
493
|
+
"""List all authorizations in the project."""
|
|
494
|
+
if not self.is_live:
|
|
495
|
+
time.sleep(0.02)
|
|
496
|
+
# Mock some authorizations for testing
|
|
497
|
+
return [
|
|
498
|
+
{"id": "demo-auth", "name": f"projects/{self.project_id}/locations/global/authorizations/demo-auth"},
|
|
499
|
+
{"id": "google-drive-auth", "name": f"projects/{self.project_id}/locations/global/authorizations/google-drive-auth"}
|
|
500
|
+
]
|
|
501
|
+
|
|
502
|
+
headers = {
|
|
503
|
+
"Authorization": f"Bearer {self._access_token()}",
|
|
504
|
+
"Content-Type": "application/json",
|
|
505
|
+
"X-Goog-User-Project": self.project_id,
|
|
506
|
+
}
|
|
507
|
+
url = f"{BASE_URL}/projects/{self.project_id}/locations/global/authorizations"
|
|
508
|
+
r = self._http.get(url, headers=headers, timeout=60)
|
|
509
|
+
r.raise_for_status()
|
|
510
|
+
data = r.json()
|
|
511
|
+
|
|
512
|
+
out = []
|
|
513
|
+
for auth in data.get("authorizations", []):
|
|
514
|
+
full_name = auth.get("name", "")
|
|
515
|
+
out.append({
|
|
516
|
+
"id": full_name.split("/")[-1] if full_name else "",
|
|
517
|
+
"name": full_name,
|
|
518
|
+
})
|
|
519
|
+
return out
|
|
520
|
+
|
|
521
|
+
def delete_authorization(self, auth_id: str) -> Tuple[str, str]:
|
|
522
|
+
"""Delete an authorization by ID."""
|
|
523
|
+
if not self.is_live:
|
|
524
|
+
time.sleep(0.02)
|
|
525
|
+
return ("deleted", f"Authorization {auth_id} deleted (mock)")
|
|
526
|
+
|
|
527
|
+
headers = {
|
|
528
|
+
"Authorization": f"Bearer {self._access_token()}",
|
|
529
|
+
"Content-Type": "application/json",
|
|
530
|
+
"X-Goog-User-Project": self.project_id,
|
|
531
|
+
}
|
|
532
|
+
url = f"{BASE_URL}/projects/{self.project_id}/locations/global/authorizations/{auth_id}"
|
|
533
|
+
r = self._http.delete(url, headers=headers, timeout=60)
|
|
534
|
+
|
|
535
|
+
# Treat any 2xx status as success (many APIs return 204 No Content)
|
|
536
|
+
if 200 <= r.status_code < 300:
|
|
537
|
+
return ("deleted", "Authorization deleted")
|
|
538
|
+
elif r.status_code == 404:
|
|
539
|
+
return ("not_found", "Authorization not found")
|
|
540
|
+
else:
|
|
541
|
+
return ("failed", f"{r.status_code} {r.text}")
|
|
542
|
+
|
|
543
|
+
def _ensure_authorization(self, auth_name: str) -> Tuple[bool, str]:
|
|
544
|
+
if not self.is_live:
|
|
545
|
+
time.sleep(0.02)
|
|
546
|
+
return True, "mock"
|
|
547
|
+
|
|
548
|
+
headers = {
|
|
549
|
+
"Authorization": f"Bearer {self._access_token()}",
|
|
550
|
+
"Content-Type": "application/json",
|
|
551
|
+
"X-Goog-User-Project": self.project_number,
|
|
552
|
+
}
|
|
553
|
+
payload = {
|
|
554
|
+
"name": f"projects/{self.project_number}/locations/global/authorizations/{auth_name}",
|
|
555
|
+
"serverSideOauth2": {
|
|
556
|
+
"clientId": self.oauth_client_id or "your-client-id",
|
|
557
|
+
"clientSecret": self.oauth_client_secret or "your-client-secret",
|
|
558
|
+
"authorizationUri": (
|
|
559
|
+
"https://accounts.google.com/o/oauth2/auth"
|
|
560
|
+
"?response_type=code"
|
|
561
|
+
f"&client_id={(self.oauth_client_id or 'your-client-id')}"
|
|
562
|
+
"&scope=openid"
|
|
563
|
+
"%20https://www.googleapis.com/auth/userinfo.email"
|
|
564
|
+
"%20https://www.googleapis.com/auth/calendar"
|
|
565
|
+
"%20https://www.googleapis.com/auth/gmail.send"
|
|
566
|
+
"%20https://www.googleapis.com/auth/gmail.compose"
|
|
567
|
+
"%20https://www.googleapis.com/auth/drive"
|
|
568
|
+
"%20https://www.googleapis.com/auth/presentations"
|
|
569
|
+
"%20https://www.googleapis.com/auth/cloud-platform"
|
|
570
|
+
"%20https://mail.google.com/"
|
|
571
|
+
"&access_type=offline&prompt=consent"
|
|
572
|
+
),
|
|
573
|
+
"tokenUri": "https://oauth2.googleapis.com/token"
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
url = f"{BASE_URL}/projects/{self.project_id}/locations/global/authorizations?authorizationId={auth_name}"
|
|
577
|
+
r = self._http.post(url, headers=headers, json=payload, timeout=60)
|
|
578
|
+
|
|
579
|
+
if self.debug:
|
|
580
|
+
from pprint import pprint
|
|
581
|
+
pprint(r.json()) # Debugging output
|
|
582
|
+
if r.status_code < 400:
|
|
583
|
+
return True, "created"
|
|
584
|
+
if r.status_code == 409:
|
|
585
|
+
return True, "exists"
|
|
586
|
+
return False, f"{r.status_code} {r.text}"
|
|
587
|
+
|
|
588
|
+
def deploy_to_agent_space(self, with_authorization: bool, auth_name: str) -> Tuple[str, str, Optional[Dict[str, str]]]:
|
|
589
|
+
if not self._profile.get("name"):
|
|
590
|
+
return ("failed", "Reasoning engine required before deploy", None)
|
|
591
|
+
|
|
592
|
+
if not self.is_live:
|
|
593
|
+
time.sleep(0.02)
|
|
594
|
+
aid = f"agent_{uuid.uuid4().hex[:6]}"
|
|
595
|
+
full = (f"projects/{self.project_id}/locations/global/collections/default_collection/"
|
|
596
|
+
f"engines/{self.engine_name}/assistants/default_assistant/agents/{aid}")
|
|
597
|
+
item = {"id": aid, "display_name": self._profile.get("display_name", "Demo Agent"), "full_name": full}
|
|
598
|
+
self._agents_cache.append(item)
|
|
599
|
+
self._profile["agent_space_agent_id"] = full
|
|
600
|
+
self._save_profile()
|
|
601
|
+
return ("created", f"Deployed (mock, oauth={with_authorization})", item)
|
|
602
|
+
|
|
603
|
+
if with_authorization:
|
|
604
|
+
ok, msg = self._ensure_authorization(auth_name)
|
|
605
|
+
if not ok:
|
|
606
|
+
return ("failed", f"Authorization failed: {msg}", None)
|
|
607
|
+
|
|
608
|
+
headers = {
|
|
609
|
+
"Authorization": f"Bearer {self._access_token()}",
|
|
610
|
+
"Content-Type": "application/json",
|
|
611
|
+
"X-Goog-User-Project": self.project_number,
|
|
612
|
+
}
|
|
613
|
+
payload = {
|
|
614
|
+
"displayName": self._profile.get("display_name", "Live Agent"),
|
|
615
|
+
"description": self._profile.get("description", "Live Agent"),
|
|
616
|
+
"adk_agent_definition": {
|
|
617
|
+
"tool_settings": {"tool_description": self._profile.get("tool_description", "Tooling")},
|
|
618
|
+
"provisioned_reasoning_engine": {"reasoning_engine": self._profile["name"]},
|
|
619
|
+
},
|
|
620
|
+
}
|
|
621
|
+
if with_authorization:
|
|
622
|
+
payload["adk_agent_definition"]["authorizations"] = [
|
|
623
|
+
f"projects/{self.project_number}/locations/global/authorizations/{auth_name}"
|
|
624
|
+
]
|
|
625
|
+
|
|
626
|
+
url = (f"{BASE_URL}/projects/{self.project_number}/locations/global/collections/default_collection/"
|
|
627
|
+
f"engines/{self.engine_name}/assistants/default_assistant/agents")
|
|
628
|
+
if self.debug:
|
|
629
|
+
pprint(payload)
|
|
630
|
+
pprint(url)
|
|
631
|
+
r = self._http.post(url, headers=headers, json=payload, timeout=90)
|
|
632
|
+
|
|
633
|
+
if self.debug:
|
|
634
|
+
pprint(r.json()) # Debugging output
|
|
635
|
+
if not r.ok:
|
|
636
|
+
return ("failed", f"Deploy failed: {r.status_code} {r.text}", None)
|
|
637
|
+
|
|
638
|
+
info = r.json()
|
|
639
|
+
full = info.get("name", "")
|
|
640
|
+
item = {
|
|
641
|
+
"id": full.split("/")[-1] if full else "",
|
|
642
|
+
"display_name": info.get("displayName", self._profile.get("display_name", "")),
|
|
643
|
+
"full_name": full,
|
|
644
|
+
}
|
|
645
|
+
self._profile["agent_space_agent_id"] = full
|
|
646
|
+
self._save_profile()
|
|
647
|
+
return ("created", "Deployed", item)
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
"""Configuration management for Agent Space Deployment Service."""
|
|
2
|
+
import os
|
|
3
|
+
from typing import Dict, Any
|
|
4
|
+
import dotenv
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Config:
|
|
8
|
+
"""Configuration class to manage environment variables and defaults."""
|
|
9
|
+
|
|
10
|
+
def __init__(self):
|
|
11
|
+
# Use new environment variable names
|
|
12
|
+
self.project_id = os.getenv("DEV_PROJECT_ID") if os.getenv("DEV_PROJECT_ID") else os.getenv("PROD_PROJECT_ID")
|
|
13
|
+
self.project_number = os.getenv("DEV_PROJECT_NUMBER") if os.getenv("DEV_PROJECT_NUMBER") else os.getenv("PROD_PROJECT_NUMBER")
|
|
14
|
+
self.location = os.getenv("DEV_PROJECT_LOCATION") if os.getenv("DEV_PROJECT_LOCATION") else os.getenv("PROD_PROJECT_LOCATION")
|
|
15
|
+
self.agent_space = os.getenv("DEV_AGENT_SPACE_ENGINE") if os.getenv("DEV_AGENT_SPACE_ENGINE") else os.getenv("PROD_AGENT_SPACE_ENGINE")
|
|
16
|
+
self.engine_name = self.agent_space # Alias for compatibility
|
|
17
|
+
|
|
18
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
19
|
+
"""Convert config to dictionary for API client initialization."""
|
|
20
|
+
return {
|
|
21
|
+
"project_id": self.project_id,
|
|
22
|
+
"project_number": self.project_number,
|
|
23
|
+
"location": self.location,
|
|
24
|
+
"agent_space": self.agent_space,
|
|
25
|
+
"engine_name": self.engine_name,
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def is_configured(self) -> bool:
|
|
30
|
+
"""Check if minimum required configuration is available."""
|
|
31
|
+
return all([
|
|
32
|
+
self.project_id,
|
|
33
|
+
self.project_number,
|
|
34
|
+
self.location,
|
|
35
|
+
self.agent_space
|
|
36
|
+
])
|
|
37
|
+
|
|
38
|
+
def loadenv(env_path: str):
|
|
39
|
+
"""Load environment variables from the specified file."""
|
|
40
|
+
if not os.path.exists(env_path):
|
|
41
|
+
raise FileNotFoundError(f"Environment file not found: {env_path}")
|
|
42
|
+
dotenv.load_dotenv(env_path)
|
|
43
|
+
print(f"Environment variables loaded from {env_path}")
|