amd-gaia 0.15.0__py3-none-any.whl → 0.15.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/METADATA +222 -223
- amd_gaia-0.15.2.dist-info/RECORD +182 -0
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/WHEEL +1 -1
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/entry_points.txt +1 -0
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/licenses/LICENSE.md +20 -20
- gaia/__init__.py +29 -29
- gaia/agents/__init__.py +19 -19
- gaia/agents/base/__init__.py +9 -9
- gaia/agents/base/agent.py +2132 -2177
- gaia/agents/base/api_agent.py +119 -120
- gaia/agents/base/console.py +1967 -1841
- gaia/agents/base/errors.py +237 -237
- gaia/agents/base/mcp_agent.py +86 -86
- gaia/agents/base/tools.py +88 -83
- gaia/agents/blender/__init__.py +7 -0
- gaia/agents/blender/agent.py +553 -556
- gaia/agents/blender/agent_simple.py +133 -135
- gaia/agents/blender/app.py +211 -211
- gaia/agents/blender/app_simple.py +41 -41
- gaia/agents/blender/core/__init__.py +16 -16
- gaia/agents/blender/core/materials.py +506 -506
- gaia/agents/blender/core/objects.py +316 -316
- gaia/agents/blender/core/rendering.py +225 -225
- gaia/agents/blender/core/scene.py +220 -220
- gaia/agents/blender/core/view.py +146 -146
- gaia/agents/chat/__init__.py +9 -9
- gaia/agents/chat/agent.py +809 -835
- gaia/agents/chat/app.py +1065 -1058
- gaia/agents/chat/session.py +508 -508
- gaia/agents/chat/tools/__init__.py +15 -15
- gaia/agents/chat/tools/file_tools.py +96 -96
- gaia/agents/chat/tools/rag_tools.py +1744 -1729
- gaia/agents/chat/tools/shell_tools.py +437 -436
- gaia/agents/code/__init__.py +7 -7
- gaia/agents/code/agent.py +549 -549
- gaia/agents/code/cli.py +377 -0
- gaia/agents/code/models.py +135 -135
- gaia/agents/code/orchestration/__init__.py +24 -24
- gaia/agents/code/orchestration/checklist_executor.py +1763 -1763
- gaia/agents/code/orchestration/checklist_generator.py +713 -713
- gaia/agents/code/orchestration/factories/__init__.py +9 -9
- gaia/agents/code/orchestration/factories/base.py +63 -63
- gaia/agents/code/orchestration/factories/nextjs_factory.py +118 -118
- gaia/agents/code/orchestration/factories/python_factory.py +106 -106
- gaia/agents/code/orchestration/orchestrator.py +841 -841
- gaia/agents/code/orchestration/project_analyzer.py +391 -391
- gaia/agents/code/orchestration/steps/__init__.py +67 -67
- gaia/agents/code/orchestration/steps/base.py +188 -188
- gaia/agents/code/orchestration/steps/error_handler.py +314 -314
- gaia/agents/code/orchestration/steps/nextjs.py +828 -828
- gaia/agents/code/orchestration/steps/python.py +307 -307
- gaia/agents/code/orchestration/template_catalog.py +469 -469
- gaia/agents/code/orchestration/workflows/__init__.py +14 -14
- gaia/agents/code/orchestration/workflows/base.py +80 -80
- gaia/agents/code/orchestration/workflows/nextjs.py +186 -186
- gaia/agents/code/orchestration/workflows/python.py +94 -94
- gaia/agents/code/prompts/__init__.py +11 -11
- gaia/agents/code/prompts/base_prompt.py +77 -77
- gaia/agents/code/prompts/code_patterns.py +2034 -2036
- gaia/agents/code/prompts/nextjs_prompt.py +40 -40
- gaia/agents/code/prompts/python_prompt.py +109 -109
- gaia/agents/code/schema_inference.py +365 -365
- gaia/agents/code/system_prompt.py +41 -41
- gaia/agents/code/tools/__init__.py +42 -42
- gaia/agents/code/tools/cli_tools.py +1138 -1138
- gaia/agents/code/tools/code_formatting.py +319 -319
- gaia/agents/code/tools/code_tools.py +769 -769
- gaia/agents/code/tools/error_fixing.py +1347 -1347
- gaia/agents/code/tools/external_tools.py +180 -180
- gaia/agents/code/tools/file_io.py +845 -845
- gaia/agents/code/tools/prisma_tools.py +190 -190
- gaia/agents/code/tools/project_management.py +1016 -1016
- gaia/agents/code/tools/testing.py +321 -321
- gaia/agents/code/tools/typescript_tools.py +122 -122
- gaia/agents/code/tools/validation_parsing.py +461 -461
- gaia/agents/code/tools/validation_tools.py +806 -806
- gaia/agents/code/tools/web_dev_tools.py +1758 -1758
- gaia/agents/code/validators/__init__.py +16 -16
- gaia/agents/code/validators/antipattern_checker.py +241 -241
- gaia/agents/code/validators/ast_analyzer.py +197 -197
- gaia/agents/code/validators/requirements_validator.py +145 -145
- gaia/agents/code/validators/syntax_validator.py +171 -171
- gaia/agents/docker/__init__.py +7 -7
- gaia/agents/docker/agent.py +643 -642
- gaia/agents/emr/__init__.py +8 -8
- gaia/agents/emr/agent.py +1504 -1506
- gaia/agents/emr/cli.py +1322 -1322
- gaia/agents/emr/constants.py +475 -475
- gaia/agents/emr/dashboard/__init__.py +4 -4
- gaia/agents/emr/dashboard/server.py +1972 -1974
- gaia/agents/jira/__init__.py +11 -11
- gaia/agents/jira/agent.py +894 -894
- gaia/agents/jira/jql_templates.py +299 -299
- gaia/agents/routing/__init__.py +7 -7
- gaia/agents/routing/agent.py +567 -570
- gaia/agents/routing/system_prompt.py +75 -75
- gaia/agents/summarize/__init__.py +11 -0
- gaia/agents/summarize/agent.py +885 -0
- gaia/agents/summarize/prompts.py +129 -0
- gaia/api/__init__.py +23 -23
- gaia/api/agent_registry.py +238 -238
- gaia/api/app.py +305 -305
- gaia/api/openai_server.py +575 -575
- gaia/api/schemas.py +186 -186
- gaia/api/sse_handler.py +373 -373
- gaia/apps/__init__.py +4 -4
- gaia/apps/llm/__init__.py +6 -6
- gaia/apps/llm/app.py +184 -169
- gaia/apps/summarize/app.py +116 -633
- gaia/apps/summarize/html_viewer.py +133 -133
- gaia/apps/summarize/pdf_formatter.py +284 -284
- gaia/audio/__init__.py +2 -2
- gaia/audio/audio_client.py +439 -439
- gaia/audio/audio_recorder.py +269 -269
- gaia/audio/kokoro_tts.py +599 -599
- gaia/audio/whisper_asr.py +432 -432
- gaia/chat/__init__.py +16 -16
- gaia/chat/app.py +428 -430
- gaia/chat/prompts.py +522 -522
- gaia/chat/sdk.py +1228 -1225
- gaia/cli.py +5659 -5632
- gaia/database/__init__.py +10 -10
- gaia/database/agent.py +176 -176
- gaia/database/mixin.py +290 -290
- gaia/database/testing.py +64 -64
- gaia/eval/batch_experiment.py +2332 -2332
- gaia/eval/claude.py +542 -542
- gaia/eval/config.py +37 -37
- gaia/eval/email_generator.py +512 -512
- gaia/eval/eval.py +3179 -3179
- gaia/eval/groundtruth.py +1130 -1130
- gaia/eval/transcript_generator.py +582 -582
- gaia/eval/webapp/README.md +167 -167
- gaia/eval/webapp/package-lock.json +875 -875
- gaia/eval/webapp/package.json +20 -20
- gaia/eval/webapp/public/app.js +3402 -3402
- gaia/eval/webapp/public/index.html +87 -87
- gaia/eval/webapp/public/styles.css +3661 -3661
- gaia/eval/webapp/server.js +415 -415
- gaia/eval/webapp/test-setup.js +72 -72
- gaia/installer/__init__.py +23 -0
- gaia/installer/init_command.py +1275 -0
- gaia/installer/lemonade_installer.py +619 -0
- gaia/llm/__init__.py +10 -2
- gaia/llm/base_client.py +60 -0
- gaia/llm/exceptions.py +12 -0
- gaia/llm/factory.py +70 -0
- gaia/llm/lemonade_client.py +3421 -3221
- gaia/llm/lemonade_manager.py +294 -294
- gaia/llm/providers/__init__.py +9 -0
- gaia/llm/providers/claude.py +108 -0
- gaia/llm/providers/lemonade.py +118 -0
- gaia/llm/providers/openai_provider.py +79 -0
- gaia/llm/vlm_client.py +382 -382
- gaia/logger.py +189 -189
- gaia/mcp/agent_mcp_server.py +245 -245
- gaia/mcp/blender_mcp_client.py +138 -138
- gaia/mcp/blender_mcp_server.py +648 -648
- gaia/mcp/context7_cache.py +332 -332
- gaia/mcp/external_services.py +518 -518
- gaia/mcp/mcp_bridge.py +811 -550
- gaia/mcp/servers/__init__.py +6 -6
- gaia/mcp/servers/docker_mcp.py +83 -83
- gaia/perf_analysis.py +361 -0
- gaia/rag/__init__.py +10 -10
- gaia/rag/app.py +293 -293
- gaia/rag/demo.py +304 -304
- gaia/rag/pdf_utils.py +235 -235
- gaia/rag/sdk.py +2194 -2194
- gaia/security.py +183 -163
- gaia/talk/app.py +287 -289
- gaia/talk/sdk.py +538 -538
- gaia/testing/__init__.py +87 -87
- gaia/testing/assertions.py +330 -330
- gaia/testing/fixtures.py +333 -333
- gaia/testing/mocks.py +493 -493
- gaia/util.py +46 -46
- gaia/utils/__init__.py +33 -33
- gaia/utils/file_watcher.py +675 -675
- gaia/utils/parsing.py +223 -223
- gaia/version.py +100 -100
- amd_gaia-0.15.0.dist-info/RECORD +0 -168
- gaia/agents/code/app.py +0 -266
- gaia/llm/llm_client.py +0 -723
- {amd_gaia-0.15.0.dist-info → amd_gaia-0.15.2.dist-info}/top_level.txt +0 -0
gaia/api/app.py
CHANGED
|
@@ -1,305 +1,305 @@
|
|
|
1
|
-
# Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
|
|
2
|
-
# SPDX-License-Identifier: MIT
|
|
3
|
-
"""
|
|
4
|
-
CLI entry point for GAIA OpenAI-compatible API server
|
|
5
|
-
|
|
6
|
-
This module provides command-line interface for managing the API server.
|
|
7
|
-
|
|
8
|
-
Usage:
|
|
9
|
-
gaia api start [--host HOST] [--port PORT] [--background]
|
|
10
|
-
gaia api status
|
|
11
|
-
gaia api stop
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
import argparse
|
|
15
|
-
import logging
|
|
16
|
-
import os
|
|
17
|
-
import subprocess
|
|
18
|
-
import sys
|
|
19
|
-
from pathlib import Path
|
|
20
|
-
|
|
21
|
-
logger = logging.getLogger(__name__)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def start_server(
|
|
25
|
-
host: str = "localhost",
|
|
26
|
-
port: int = 8080,
|
|
27
|
-
background: bool = False,
|
|
28
|
-
debug: bool = False,
|
|
29
|
-
show_prompts: bool = False,
|
|
30
|
-
streaming: bool = False,
|
|
31
|
-
step_through: bool = False,
|
|
32
|
-
) -> None:
|
|
33
|
-
"""
|
|
34
|
-
Start the API server.
|
|
35
|
-
|
|
36
|
-
Args:
|
|
37
|
-
host: Host to bind to (default: localhost)
|
|
38
|
-
port: Port to bind to (default: 8080)
|
|
39
|
-
background: Run in background if True, foreground otherwise
|
|
40
|
-
debug: Enable debug logging
|
|
41
|
-
show_prompts: Display prompts sent to LLM
|
|
42
|
-
streaming: Enable real-time streaming of LLM responses
|
|
43
|
-
step_through: Enable step-through debugging mode
|
|
44
|
-
|
|
45
|
-
Example:
|
|
46
|
-
>>> start_server("localhost", 8080, background=True)
|
|
47
|
-
✅ GAIA API server started in background (PID: 12345)
|
|
48
|
-
>>> start_server("localhost", 8080, debug=True, show_prompts=True)
|
|
49
|
-
✅ GAIA API server started with debug mode enabled
|
|
50
|
-
"""
|
|
51
|
-
# Set environment variables for agent configuration
|
|
52
|
-
# These will be read by agent_registry.py when agents are instantiated
|
|
53
|
-
if debug:
|
|
54
|
-
os.environ["GAIA_API_DEBUG"] = "1"
|
|
55
|
-
if show_prompts:
|
|
56
|
-
os.environ["GAIA_API_SHOW_PROMPTS"] = "1"
|
|
57
|
-
if streaming:
|
|
58
|
-
os.environ["GAIA_API_STREAMING"] = "1"
|
|
59
|
-
if step_through:
|
|
60
|
-
os.environ["GAIA_API_STEP_THROUGH"] = "1"
|
|
61
|
-
|
|
62
|
-
if background:
|
|
63
|
-
# Start in background
|
|
64
|
-
log_file = Path("gaia_api.log")
|
|
65
|
-
with open(log_file, "w", encoding="utf-8") as f:
|
|
66
|
-
proc = subprocess.Popen(
|
|
67
|
-
[
|
|
68
|
-
sys.executable,
|
|
69
|
-
"-m",
|
|
70
|
-
"uvicorn",
|
|
71
|
-
"gaia.api.openai_server:app",
|
|
72
|
-
"--host",
|
|
73
|
-
host,
|
|
74
|
-
"--port",
|
|
75
|
-
str(port),
|
|
76
|
-
],
|
|
77
|
-
stdout=f,
|
|
78
|
-
stderr=f,
|
|
79
|
-
)
|
|
80
|
-
print(f"✅ GAIA API server started in background (PID: {proc.pid})")
|
|
81
|
-
print(f"📝 Logs: {log_file}")
|
|
82
|
-
print(f"🌐 URL: http://{host}:{port}")
|
|
83
|
-
if debug or show_prompts or streaming or step_through:
|
|
84
|
-
print("\n🐛 Debug features enabled:")
|
|
85
|
-
if debug:
|
|
86
|
-
print(" • Debug logging")
|
|
87
|
-
if show_prompts:
|
|
88
|
-
print(" • Show prompts")
|
|
89
|
-
if streaming:
|
|
90
|
-
print(" • LLM streaming")
|
|
91
|
-
if step_through:
|
|
92
|
-
print(" • Step-through mode")
|
|
93
|
-
print("\nAvailable endpoints:")
|
|
94
|
-
print(f" • POST http://{host}:{port}/v1/chat/completions")
|
|
95
|
-
print(f" • GET http://{host}:{port}/v1/models")
|
|
96
|
-
print(f" • GET http://{host}:{port}/health")
|
|
97
|
-
else:
|
|
98
|
-
# Start in foreground
|
|
99
|
-
import uvicorn
|
|
100
|
-
|
|
101
|
-
print(f"🚀 Starting GAIA API server on http://{host}:{port}")
|
|
102
|
-
if debug or show_prompts or streaming or step_through:
|
|
103
|
-
print("\n🐛 Debug features enabled:")
|
|
104
|
-
if debug:
|
|
105
|
-
print(" • Debug logging")
|
|
106
|
-
if show_prompts:
|
|
107
|
-
print(" • Show prompts")
|
|
108
|
-
if streaming:
|
|
109
|
-
print(" • LLM streaming")
|
|
110
|
-
if step_through:
|
|
111
|
-
print(" • Step-through mode")
|
|
112
|
-
print("\nAvailable endpoints:")
|
|
113
|
-
print(f" • POST http://{host}:{port}/v1/chat/completions")
|
|
114
|
-
print(f" • GET http://{host}:{port}/v1/models")
|
|
115
|
-
print(f" • GET http://{host}:{port}/health")
|
|
116
|
-
print("\nPress Ctrl+C to stop\n")
|
|
117
|
-
|
|
118
|
-
# Set uvicorn log level based on debug flag
|
|
119
|
-
log_level = "debug" if debug else "info"
|
|
120
|
-
uvicorn.run(
|
|
121
|
-
"gaia.api.openai_server:app", host=host, port=port, log_level=log_level
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
def check_status() -> None:
|
|
126
|
-
"""
|
|
127
|
-
Check if API server is running.
|
|
128
|
-
|
|
129
|
-
This will be implemented in a future version.
|
|
130
|
-
For now, it just prints a message.
|
|
131
|
-
"""
|
|
132
|
-
print("Status check not yet implemented")
|
|
133
|
-
print("Try: curl http://localhost:8080/health")
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
def stop_server(port: int = 8080) -> None:
|
|
137
|
-
"""
|
|
138
|
-
Stop the API server by finding and killing processes on the port.
|
|
139
|
-
|
|
140
|
-
Args:
|
|
141
|
-
port: Port number to stop server on (default: 8080)
|
|
142
|
-
|
|
143
|
-
Returns:
|
|
144
|
-
None
|
|
145
|
-
"""
|
|
146
|
-
import platform
|
|
147
|
-
import signal
|
|
148
|
-
|
|
149
|
-
system = platform.system()
|
|
150
|
-
|
|
151
|
-
try:
|
|
152
|
-
if system == "Windows":
|
|
153
|
-
# Windows: Use netstat to find PID, then taskkill to stop it
|
|
154
|
-
result = subprocess.run(
|
|
155
|
-
["netstat", "-ano"],
|
|
156
|
-
capture_output=True,
|
|
157
|
-
text=True,
|
|
158
|
-
timeout=5,
|
|
159
|
-
check=False,
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
# Parse netstat output to find PIDs listening on the port
|
|
163
|
-
pids = set()
|
|
164
|
-
for line in result.stdout.splitlines():
|
|
165
|
-
if f":{port}" in line and "LISTENING" in line:
|
|
166
|
-
# Line format: " TCP 0.0.0.0:8080 0.0.0.0:0 LISTENING 12345"
|
|
167
|
-
parts = line.split()
|
|
168
|
-
if parts and parts[-1].isdigit():
|
|
169
|
-
pids.add(parts[-1])
|
|
170
|
-
|
|
171
|
-
if pids:
|
|
172
|
-
for pid in pids:
|
|
173
|
-
try:
|
|
174
|
-
subprocess.run(
|
|
175
|
-
["taskkill", "/F", "/PID", pid],
|
|
176
|
-
capture_output=True,
|
|
177
|
-
timeout=5,
|
|
178
|
-
check=False,
|
|
179
|
-
)
|
|
180
|
-
print(f"🛑 Stopped API server process (PID: {pid})")
|
|
181
|
-
except (
|
|
182
|
-
subprocess.TimeoutExpired,
|
|
183
|
-
subprocess.CalledProcessError,
|
|
184
|
-
) as e:
|
|
185
|
-
print(f"⚠️ Failed to stop PID {pid}: {e}")
|
|
186
|
-
print("✅ API server stopped")
|
|
187
|
-
else:
|
|
188
|
-
print("ℹ️ No API server found running on port {port}")
|
|
189
|
-
|
|
190
|
-
else:
|
|
191
|
-
# Linux/Mac: Use lsof to find PID, then kill it
|
|
192
|
-
result = subprocess.run(
|
|
193
|
-
["lsof", "-ti", f":{port}"],
|
|
194
|
-
capture_output=True,
|
|
195
|
-
text=True,
|
|
196
|
-
timeout=5,
|
|
197
|
-
check=False,
|
|
198
|
-
)
|
|
199
|
-
|
|
200
|
-
pids = result.stdout.strip().split("\n")
|
|
201
|
-
pids = [pid for pid in pids if pid] # Filter empty strings
|
|
202
|
-
|
|
203
|
-
if pids:
|
|
204
|
-
for pid in pids:
|
|
205
|
-
try:
|
|
206
|
-
os.kill(int(pid), signal.SIGTERM)
|
|
207
|
-
print(f"🛑 Stopped API server process (PID: {pid})")
|
|
208
|
-
except (ProcessLookupError, ValueError) as e:
|
|
209
|
-
print(f"⚠️ Failed to stop PID {pid}: {e}")
|
|
210
|
-
print("✅ API server stopped")
|
|
211
|
-
else:
|
|
212
|
-
print(f"ℹ️ No API server found running on port {port}")
|
|
213
|
-
|
|
214
|
-
except FileNotFoundError as e:
|
|
215
|
-
print(f"❌ Required command not found: {e}")
|
|
216
|
-
print("To stop manually, find the process using the port:")
|
|
217
|
-
if system == "Windows":
|
|
218
|
-
print(f" netstat -ano | findstr :{port}")
|
|
219
|
-
print(" taskkill /F /PID <PID>")
|
|
220
|
-
else:
|
|
221
|
-
print(f" lsof -ti :{port}")
|
|
222
|
-
print(" kill -9 <PID>")
|
|
223
|
-
except subprocess.TimeoutExpired:
|
|
224
|
-
print(f"❌ Timeout while trying to stop server on port {port}")
|
|
225
|
-
except Exception as e:
|
|
226
|
-
print(f"❌ Error stopping server: {e}")
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
def main() -> None:
|
|
230
|
-
"""
|
|
231
|
-
CLI entry point for API server commands.
|
|
232
|
-
|
|
233
|
-
Example:
|
|
234
|
-
$ gaia api start
|
|
235
|
-
$ gaia api start --host 0.0.0.0 --port 8000 --background
|
|
236
|
-
$ gaia api status
|
|
237
|
-
$ gaia api stop
|
|
238
|
-
"""
|
|
239
|
-
parser = argparse.ArgumentParser(description="GAIA OpenAI-compatible API server")
|
|
240
|
-
|
|
241
|
-
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
|
|
242
|
-
|
|
243
|
-
# Start command
|
|
244
|
-
start_parser = subparsers.add_parser("start", help="Start API server")
|
|
245
|
-
start_parser.add_argument(
|
|
246
|
-
"--host", default="localhost", help="Host to bind to (default: localhost)"
|
|
247
|
-
)
|
|
248
|
-
start_parser.add_argument(
|
|
249
|
-
"--port", type=int, default=8080, help="Port to bind to (default: 8080)"
|
|
250
|
-
)
|
|
251
|
-
start_parser.add_argument(
|
|
252
|
-
"--background", action="store_true", help="Run in background"
|
|
253
|
-
)
|
|
254
|
-
start_parser.add_argument(
|
|
255
|
-
"--debug",
|
|
256
|
-
action="store_true",
|
|
257
|
-
help="Enable debug logging",
|
|
258
|
-
)
|
|
259
|
-
start_parser.add_argument(
|
|
260
|
-
"--show-prompts",
|
|
261
|
-
action="store_true",
|
|
262
|
-
help="Display prompts sent to LLM",
|
|
263
|
-
)
|
|
264
|
-
start_parser.add_argument(
|
|
265
|
-
"--streaming",
|
|
266
|
-
action="store_true",
|
|
267
|
-
help="Enable real-time streaming of LLM responses",
|
|
268
|
-
)
|
|
269
|
-
start_parser.add_argument(
|
|
270
|
-
"--step-through",
|
|
271
|
-
action="store_true",
|
|
272
|
-
help="Enable step-through debugging mode (pause at each agent step)",
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
# Status command
|
|
276
|
-
subparsers.add_parser("status", help="Check server status")
|
|
277
|
-
|
|
278
|
-
# Stop command
|
|
279
|
-
stop_parser = subparsers.add_parser("stop", help="Stop server")
|
|
280
|
-
stop_parser.add_argument(
|
|
281
|
-
"--port", type=int, default=8080, help="Port number (default: 8080)"
|
|
282
|
-
)
|
|
283
|
-
|
|
284
|
-
args = parser.parse_args()
|
|
285
|
-
|
|
286
|
-
if args.command == "start":
|
|
287
|
-
start_server(
|
|
288
|
-
args.host,
|
|
289
|
-
args.port,
|
|
290
|
-
args.background,
|
|
291
|
-
getattr(args, "debug", False),
|
|
292
|
-
getattr(args, "show_prompts", False),
|
|
293
|
-
getattr(args, "streaming", False),
|
|
294
|
-
getattr(args, "step_through", False),
|
|
295
|
-
)
|
|
296
|
-
elif args.command == "status":
|
|
297
|
-
check_status()
|
|
298
|
-
elif args.command == "stop":
|
|
299
|
-
stop_server(getattr(args, "port", 8080))
|
|
300
|
-
else:
|
|
301
|
-
parser.print_help()
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
if __name__ == "__main__":
|
|
305
|
-
main()
|
|
1
|
+
# Copyright(C) 2025-2026 Advanced Micro Devices, Inc. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
"""
|
|
4
|
+
CLI entry point for GAIA OpenAI-compatible API server
|
|
5
|
+
|
|
6
|
+
This module provides command-line interface for managing the API server.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
gaia api start [--host HOST] [--port PORT] [--background]
|
|
10
|
+
gaia api status
|
|
11
|
+
gaia api stop
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import argparse
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
import subprocess
|
|
18
|
+
import sys
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def start_server(
|
|
25
|
+
host: str = "localhost",
|
|
26
|
+
port: int = 8080,
|
|
27
|
+
background: bool = False,
|
|
28
|
+
debug: bool = False,
|
|
29
|
+
show_prompts: bool = False,
|
|
30
|
+
streaming: bool = False,
|
|
31
|
+
step_through: bool = False,
|
|
32
|
+
) -> None:
|
|
33
|
+
"""
|
|
34
|
+
Start the API server.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
host: Host to bind to (default: localhost)
|
|
38
|
+
port: Port to bind to (default: 8080)
|
|
39
|
+
background: Run in background if True, foreground otherwise
|
|
40
|
+
debug: Enable debug logging
|
|
41
|
+
show_prompts: Display prompts sent to LLM
|
|
42
|
+
streaming: Enable real-time streaming of LLM responses
|
|
43
|
+
step_through: Enable step-through debugging mode
|
|
44
|
+
|
|
45
|
+
Example:
|
|
46
|
+
>>> start_server("localhost", 8080, background=True)
|
|
47
|
+
✅ GAIA API server started in background (PID: 12345)
|
|
48
|
+
>>> start_server("localhost", 8080, debug=True, show_prompts=True)
|
|
49
|
+
✅ GAIA API server started with debug mode enabled
|
|
50
|
+
"""
|
|
51
|
+
# Set environment variables for agent configuration
|
|
52
|
+
# These will be read by agent_registry.py when agents are instantiated
|
|
53
|
+
if debug:
|
|
54
|
+
os.environ["GAIA_API_DEBUG"] = "1"
|
|
55
|
+
if show_prompts:
|
|
56
|
+
os.environ["GAIA_API_SHOW_PROMPTS"] = "1"
|
|
57
|
+
if streaming:
|
|
58
|
+
os.environ["GAIA_API_STREAMING"] = "1"
|
|
59
|
+
if step_through:
|
|
60
|
+
os.environ["GAIA_API_STEP_THROUGH"] = "1"
|
|
61
|
+
|
|
62
|
+
if background:
|
|
63
|
+
# Start in background
|
|
64
|
+
log_file = Path("gaia_api.log")
|
|
65
|
+
with open(log_file, "w", encoding="utf-8") as f:
|
|
66
|
+
proc = subprocess.Popen(
|
|
67
|
+
[
|
|
68
|
+
sys.executable,
|
|
69
|
+
"-m",
|
|
70
|
+
"uvicorn",
|
|
71
|
+
"gaia.api.openai_server:app",
|
|
72
|
+
"--host",
|
|
73
|
+
host,
|
|
74
|
+
"--port",
|
|
75
|
+
str(port),
|
|
76
|
+
],
|
|
77
|
+
stdout=f,
|
|
78
|
+
stderr=f,
|
|
79
|
+
)
|
|
80
|
+
print(f"✅ GAIA API server started in background (PID: {proc.pid})")
|
|
81
|
+
print(f"📝 Logs: {log_file}")
|
|
82
|
+
print(f"🌐 URL: http://{host}:{port}")
|
|
83
|
+
if debug or show_prompts or streaming or step_through:
|
|
84
|
+
print("\n🐛 Debug features enabled:")
|
|
85
|
+
if debug:
|
|
86
|
+
print(" • Debug logging")
|
|
87
|
+
if show_prompts:
|
|
88
|
+
print(" • Show prompts")
|
|
89
|
+
if streaming:
|
|
90
|
+
print(" • LLM streaming")
|
|
91
|
+
if step_through:
|
|
92
|
+
print(" • Step-through mode")
|
|
93
|
+
print("\nAvailable endpoints:")
|
|
94
|
+
print(f" • POST http://{host}:{port}/v1/chat/completions")
|
|
95
|
+
print(f" • GET http://{host}:{port}/v1/models")
|
|
96
|
+
print(f" • GET http://{host}:{port}/health")
|
|
97
|
+
else:
|
|
98
|
+
# Start in foreground
|
|
99
|
+
import uvicorn
|
|
100
|
+
|
|
101
|
+
print(f"🚀 Starting GAIA API server on http://{host}:{port}")
|
|
102
|
+
if debug or show_prompts or streaming or step_through:
|
|
103
|
+
print("\n🐛 Debug features enabled:")
|
|
104
|
+
if debug:
|
|
105
|
+
print(" • Debug logging")
|
|
106
|
+
if show_prompts:
|
|
107
|
+
print(" • Show prompts")
|
|
108
|
+
if streaming:
|
|
109
|
+
print(" • LLM streaming")
|
|
110
|
+
if step_through:
|
|
111
|
+
print(" • Step-through mode")
|
|
112
|
+
print("\nAvailable endpoints:")
|
|
113
|
+
print(f" • POST http://{host}:{port}/v1/chat/completions")
|
|
114
|
+
print(f" • GET http://{host}:{port}/v1/models")
|
|
115
|
+
print(f" • GET http://{host}:{port}/health")
|
|
116
|
+
print("\nPress Ctrl+C to stop\n")
|
|
117
|
+
|
|
118
|
+
# Set uvicorn log level based on debug flag
|
|
119
|
+
log_level = "debug" if debug else "info"
|
|
120
|
+
uvicorn.run(
|
|
121
|
+
"gaia.api.openai_server:app", host=host, port=port, log_level=log_level
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def check_status() -> None:
|
|
126
|
+
"""
|
|
127
|
+
Check if API server is running.
|
|
128
|
+
|
|
129
|
+
This will be implemented in a future version.
|
|
130
|
+
For now, it just prints a message.
|
|
131
|
+
"""
|
|
132
|
+
print("Status check not yet implemented")
|
|
133
|
+
print("Try: curl http://localhost:8080/health")
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def stop_server(port: int = 8080) -> None:
|
|
137
|
+
"""
|
|
138
|
+
Stop the API server by finding and killing processes on the port.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
port: Port number to stop server on (default: 8080)
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
None
|
|
145
|
+
"""
|
|
146
|
+
import platform
|
|
147
|
+
import signal
|
|
148
|
+
|
|
149
|
+
system = platform.system()
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
if system == "Windows":
|
|
153
|
+
# Windows: Use netstat to find PID, then taskkill to stop it
|
|
154
|
+
result = subprocess.run(
|
|
155
|
+
["netstat", "-ano"],
|
|
156
|
+
capture_output=True,
|
|
157
|
+
text=True,
|
|
158
|
+
timeout=5,
|
|
159
|
+
check=False,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Parse netstat output to find PIDs listening on the port
|
|
163
|
+
pids = set()
|
|
164
|
+
for line in result.stdout.splitlines():
|
|
165
|
+
if f":{port}" in line and "LISTENING" in line:
|
|
166
|
+
# Line format: " TCP 0.0.0.0:8080 0.0.0.0:0 LISTENING 12345"
|
|
167
|
+
parts = line.split()
|
|
168
|
+
if parts and parts[-1].isdigit():
|
|
169
|
+
pids.add(parts[-1])
|
|
170
|
+
|
|
171
|
+
if pids:
|
|
172
|
+
for pid in pids:
|
|
173
|
+
try:
|
|
174
|
+
subprocess.run(
|
|
175
|
+
["taskkill", "/F", "/PID", pid],
|
|
176
|
+
capture_output=True,
|
|
177
|
+
timeout=5,
|
|
178
|
+
check=False,
|
|
179
|
+
)
|
|
180
|
+
print(f"🛑 Stopped API server process (PID: {pid})")
|
|
181
|
+
except (
|
|
182
|
+
subprocess.TimeoutExpired,
|
|
183
|
+
subprocess.CalledProcessError,
|
|
184
|
+
) as e:
|
|
185
|
+
print(f"⚠️ Failed to stop PID {pid}: {e}")
|
|
186
|
+
print("✅ API server stopped")
|
|
187
|
+
else:
|
|
188
|
+
print("ℹ️ No API server found running on port {port}")
|
|
189
|
+
|
|
190
|
+
else:
|
|
191
|
+
# Linux/Mac: Use lsof to find PID, then kill it
|
|
192
|
+
result = subprocess.run(
|
|
193
|
+
["lsof", "-ti", f":{port}"],
|
|
194
|
+
capture_output=True,
|
|
195
|
+
text=True,
|
|
196
|
+
timeout=5,
|
|
197
|
+
check=False,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
pids = result.stdout.strip().split("\n")
|
|
201
|
+
pids = [pid for pid in pids if pid] # Filter empty strings
|
|
202
|
+
|
|
203
|
+
if pids:
|
|
204
|
+
for pid in pids:
|
|
205
|
+
try:
|
|
206
|
+
os.kill(int(pid), signal.SIGTERM)
|
|
207
|
+
print(f"🛑 Stopped API server process (PID: {pid})")
|
|
208
|
+
except (ProcessLookupError, ValueError) as e:
|
|
209
|
+
print(f"⚠️ Failed to stop PID {pid}: {e}")
|
|
210
|
+
print("✅ API server stopped")
|
|
211
|
+
else:
|
|
212
|
+
print(f"ℹ️ No API server found running on port {port}")
|
|
213
|
+
|
|
214
|
+
except FileNotFoundError as e:
|
|
215
|
+
print(f"❌ Required command not found: {e}")
|
|
216
|
+
print("To stop manually, find the process using the port:")
|
|
217
|
+
if system == "Windows":
|
|
218
|
+
print(f" netstat -ano | findstr :{port}")
|
|
219
|
+
print(" taskkill /F /PID <PID>")
|
|
220
|
+
else:
|
|
221
|
+
print(f" lsof -ti :{port}")
|
|
222
|
+
print(" kill -9 <PID>")
|
|
223
|
+
except subprocess.TimeoutExpired:
|
|
224
|
+
print(f"❌ Timeout while trying to stop server on port {port}")
|
|
225
|
+
except Exception as e:
|
|
226
|
+
print(f"❌ Error stopping server: {e}")
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def main() -> None:
|
|
230
|
+
"""
|
|
231
|
+
CLI entry point for API server commands.
|
|
232
|
+
|
|
233
|
+
Example:
|
|
234
|
+
$ gaia api start
|
|
235
|
+
$ gaia api start --host 0.0.0.0 --port 8000 --background
|
|
236
|
+
$ gaia api status
|
|
237
|
+
$ gaia api stop
|
|
238
|
+
"""
|
|
239
|
+
parser = argparse.ArgumentParser(description="GAIA OpenAI-compatible API server")
|
|
240
|
+
|
|
241
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
|
|
242
|
+
|
|
243
|
+
# Start command
|
|
244
|
+
start_parser = subparsers.add_parser("start", help="Start API server")
|
|
245
|
+
start_parser.add_argument(
|
|
246
|
+
"--host", default="localhost", help="Host to bind to (default: localhost)"
|
|
247
|
+
)
|
|
248
|
+
start_parser.add_argument(
|
|
249
|
+
"--port", type=int, default=8080, help="Port to bind to (default: 8080)"
|
|
250
|
+
)
|
|
251
|
+
start_parser.add_argument(
|
|
252
|
+
"--background", action="store_true", help="Run in background"
|
|
253
|
+
)
|
|
254
|
+
start_parser.add_argument(
|
|
255
|
+
"--debug",
|
|
256
|
+
action="store_true",
|
|
257
|
+
help="Enable debug logging",
|
|
258
|
+
)
|
|
259
|
+
start_parser.add_argument(
|
|
260
|
+
"--show-prompts",
|
|
261
|
+
action="store_true",
|
|
262
|
+
help="Display prompts sent to LLM",
|
|
263
|
+
)
|
|
264
|
+
start_parser.add_argument(
|
|
265
|
+
"--streaming",
|
|
266
|
+
action="store_true",
|
|
267
|
+
help="Enable real-time streaming of LLM responses",
|
|
268
|
+
)
|
|
269
|
+
start_parser.add_argument(
|
|
270
|
+
"--step-through",
|
|
271
|
+
action="store_true",
|
|
272
|
+
help="Enable step-through debugging mode (pause at each agent step)",
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Status command
|
|
276
|
+
subparsers.add_parser("status", help="Check server status")
|
|
277
|
+
|
|
278
|
+
# Stop command
|
|
279
|
+
stop_parser = subparsers.add_parser("stop", help="Stop server")
|
|
280
|
+
stop_parser.add_argument(
|
|
281
|
+
"--port", type=int, default=8080, help="Port number (default: 8080)"
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
args = parser.parse_args()
|
|
285
|
+
|
|
286
|
+
if args.command == "start":
|
|
287
|
+
start_server(
|
|
288
|
+
args.host,
|
|
289
|
+
args.port,
|
|
290
|
+
args.background,
|
|
291
|
+
getattr(args, "debug", False),
|
|
292
|
+
getattr(args, "show_prompts", False),
|
|
293
|
+
getattr(args, "streaming", False),
|
|
294
|
+
getattr(args, "step_through", False),
|
|
295
|
+
)
|
|
296
|
+
elif args.command == "status":
|
|
297
|
+
check_status()
|
|
298
|
+
elif args.command == "stop":
|
|
299
|
+
stop_server(getattr(args, "port", 8080))
|
|
300
|
+
else:
|
|
301
|
+
parser.print_help()
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
if __name__ == "__main__":
|
|
305
|
+
main()
|