amd-gaia 0.15.1__py3-none-any.whl → 0.15.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/METADATA +2 -2
- {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/RECORD +38 -32
- {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/WHEEL +1 -1
- gaia/agents/base/agent.py +317 -113
- gaia/agents/base/api_agent.py +0 -1
- gaia/agents/base/console.py +334 -9
- gaia/agents/base/tools.py +7 -2
- gaia/agents/blender/__init__.py +7 -0
- gaia/agents/blender/agent.py +7 -10
- gaia/agents/blender/core/view.py +2 -2
- gaia/agents/chat/agent.py +22 -48
- gaia/agents/chat/app.py +7 -0
- gaia/agents/chat/tools/rag_tools.py +23 -8
- gaia/agents/chat/tools/shell_tools.py +1 -0
- gaia/agents/code/prompts/code_patterns.py +2 -4
- gaia/agents/docker/agent.py +1 -0
- gaia/agents/emr/agent.py +3 -5
- gaia/agents/emr/cli.py +1 -1
- gaia/agents/emr/dashboard/server.py +2 -4
- gaia/agents/tools/__init__.py +11 -0
- gaia/agents/tools/file_tools.py +715 -0
- gaia/apps/llm/app.py +14 -3
- gaia/chat/app.py +2 -4
- gaia/cli.py +751 -333
- gaia/installer/__init__.py +23 -0
- gaia/installer/init_command.py +1605 -0
- gaia/installer/lemonade_installer.py +678 -0
- gaia/llm/__init__.py +2 -1
- gaia/llm/lemonade_client.py +427 -99
- gaia/llm/lemonade_manager.py +55 -11
- gaia/llm/providers/lemonade.py +21 -14
- gaia/rag/sdk.py +1 -1
- gaia/security.py +24 -4
- gaia/talk/app.py +2 -4
- gaia/version.py +2 -2
- {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/entry_points.txt +0 -0
- {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/licenses/LICENSE.md +0 -0
- {amd_gaia-0.15.1.dist-info → amd_gaia-0.15.3.dist-info}/top_level.txt +0 -0
gaia/cli.py
CHANGED
|
@@ -11,6 +11,7 @@ from pathlib import Path
|
|
|
11
11
|
|
|
12
12
|
from dotenv import load_dotenv
|
|
13
13
|
|
|
14
|
+
from gaia.agents.base.console import AgentConsole
|
|
14
15
|
from gaia.llm import create_client
|
|
15
16
|
from gaia.llm.lemonade_client import (
|
|
16
17
|
DEFAULT_HOST,
|
|
@@ -55,25 +56,6 @@ DEFAULT_EXPERIMENTS_DIR = "output/experiments"
|
|
|
55
56
|
DEFAULT_EVALUATIONS_DIR = "output/evaluations"
|
|
56
57
|
|
|
57
58
|
|
|
58
|
-
# Helper functions for download progress display
|
|
59
|
-
def _format_bytes(b: int) -> str:
|
|
60
|
-
"""Format bytes to human readable string."""
|
|
61
|
-
if b >= 1024 * 1024 * 1024:
|
|
62
|
-
return f"{b / (1024 * 1024 * 1024):.1f} GB"
|
|
63
|
-
elif b >= 1024 * 1024:
|
|
64
|
-
return f"{b / (1024 * 1024):.1f} MB"
|
|
65
|
-
elif b >= 1024:
|
|
66
|
-
return f"{b / 1024:.1f} KB"
|
|
67
|
-
return f"{b} B"
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def _make_progress_bar(percent: int, width: int = 20) -> str:
|
|
71
|
-
"""Create a progress bar string."""
|
|
72
|
-
filled = int(width * percent / 100)
|
|
73
|
-
empty = width - filled
|
|
74
|
-
return f"[{'█' * filled}{'░' * empty}]"
|
|
75
|
-
|
|
76
|
-
|
|
77
59
|
def check_lemonade_health(host=None, port=None):
|
|
78
60
|
"""Check if Lemonade server is running and healthy using LemonadeClient."""
|
|
79
61
|
log = get_logger(__name__)
|
|
@@ -162,6 +144,7 @@ def initialize_lemonade_for_agent(
|
|
|
162
144
|
"docker": 32768,
|
|
163
145
|
"talk": 32768,
|
|
164
146
|
"rag": 32768,
|
|
147
|
+
"sd": 8192, # SD agent needs 8K for image + story workflow
|
|
165
148
|
"mcp": 4096,
|
|
166
149
|
"minimal": 4096,
|
|
167
150
|
"vlm": 8192,
|
|
@@ -189,7 +172,7 @@ def ensure_agent_models(
|
|
|
189
172
|
host: str = DEFAULT_HOST,
|
|
190
173
|
port: int = DEFAULT_PORT,
|
|
191
174
|
quiet: bool = False,
|
|
192
|
-
|
|
175
|
+
_timeout: int = 1800, # Reserved for future use
|
|
193
176
|
) -> bool:
|
|
194
177
|
"""
|
|
195
178
|
Ensure all models required for an agent are downloaded.
|
|
@@ -229,86 +212,72 @@ def ensure_agent_models(
|
|
|
229
212
|
log.debug(f"All models for {agent} agent already available")
|
|
230
213
|
return True
|
|
231
214
|
|
|
215
|
+
# Use AgentConsole for nicely formatted progress display
|
|
216
|
+
console = AgentConsole()
|
|
217
|
+
|
|
232
218
|
if not quiet:
|
|
233
|
-
|
|
234
|
-
f"
|
|
219
|
+
console.print_info(
|
|
220
|
+
f"Downloading {len(models_to_download)} model(s) for {agent} agent"
|
|
235
221
|
)
|
|
236
|
-
print()
|
|
237
|
-
|
|
238
|
-
# Progress tracking
|
|
239
|
-
last_percent = [-1]
|
|
240
|
-
last_file_index = [0]
|
|
241
222
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
if quiet:
|
|
245
|
-
|
|
223
|
+
# Download each model with progress display
|
|
224
|
+
for model_id in models_to_download:
|
|
225
|
+
if not quiet:
|
|
226
|
+
console.print_download_start(model_id)
|
|
246
227
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
total_files = data.get("total_files", 1)
|
|
252
|
-
|
|
253
|
-
# Print newline when moving to a new file
|
|
254
|
-
if file_index != last_file_index[0] and last_file_index[0] > 0:
|
|
255
|
-
print() # Newline for previous file
|
|
256
|
-
last_file_index[0] = file_index
|
|
257
|
-
|
|
258
|
-
# Update every 2% for smooth progress
|
|
259
|
-
if percent >= last_percent[0] + 2 or percent == 0 or percent == 100:
|
|
260
|
-
bytes_downloaded = data.get("bytes_downloaded", 0)
|
|
261
|
-
bytes_total = data.get("bytes_total", 0)
|
|
262
|
-
|
|
263
|
-
# Create progress bar
|
|
264
|
-
bar = _make_progress_bar(percent)
|
|
265
|
-
progress_line = (
|
|
266
|
-
f" {bar} {percent:3d}% "
|
|
267
|
-
f"[{file_index}/{total_files}] {file_name}: "
|
|
268
|
-
f"{_format_bytes(bytes_downloaded)}/{_format_bytes(bytes_total)}"
|
|
269
|
-
)
|
|
270
|
-
print(f"\r{progress_line:<100}", end="", flush=True)
|
|
271
|
-
last_percent[0] = percent
|
|
228
|
+
try:
|
|
229
|
+
event_count = 0
|
|
230
|
+
last_bytes = 0
|
|
231
|
+
last_time = time.time()
|
|
272
232
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
last_percent[0] = -1
|
|
277
|
-
last_file_index[0] = 0
|
|
233
|
+
for event in client.pull_model_stream(model_name=model_id):
|
|
234
|
+
event_count += 1
|
|
235
|
+
event_type = event.get("event")
|
|
278
236
|
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
237
|
+
if event_type == "progress":
|
|
238
|
+
# Skip first 2 spurious events from Lemonade
|
|
239
|
+
if event_count <= 2 or quiet:
|
|
240
|
+
continue
|
|
283
241
|
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
242
|
+
# Calculate download speed
|
|
243
|
+
current_bytes = event.get("bytes_downloaded", 0)
|
|
244
|
+
current_time = time.time()
|
|
245
|
+
time_delta = current_time - last_time
|
|
246
|
+
|
|
247
|
+
speed_mbps = 0.0
|
|
248
|
+
if time_delta > 0.1 and current_bytes > last_bytes:
|
|
249
|
+
bytes_delta = current_bytes - last_bytes
|
|
250
|
+
speed_mbps = (bytes_delta / time_delta) / (1024 * 1024)
|
|
251
|
+
last_bytes = current_bytes
|
|
252
|
+
last_time = current_time
|
|
253
|
+
|
|
254
|
+
console.print_download_progress(
|
|
255
|
+
percent=event.get("percent", 0),
|
|
256
|
+
bytes_downloaded=current_bytes,
|
|
257
|
+
bytes_total=event.get("bytes_total", 0),
|
|
258
|
+
speed_mbps=speed_mbps,
|
|
259
|
+
)
|
|
287
260
|
|
|
288
|
-
|
|
289
|
-
|
|
261
|
+
elif event_type == "complete":
|
|
262
|
+
if not quiet:
|
|
263
|
+
console.print_download_complete(model_id)
|
|
290
264
|
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
):
|
|
297
|
-
if event.get("event") == "error":
|
|
265
|
+
elif event_type == "error":
|
|
266
|
+
if not quiet:
|
|
267
|
+
console.print_download_error(
|
|
268
|
+
event.get("error", "Unknown error"), model_id
|
|
269
|
+
)
|
|
298
270
|
log.error(f"Failed to download {model_id}")
|
|
299
271
|
return False
|
|
272
|
+
|
|
300
273
|
except LemonadeClientError as e:
|
|
301
274
|
log.error(f"Failed to download {model_id}: {e}")
|
|
302
275
|
if not quiet:
|
|
303
|
-
|
|
276
|
+
console.print_download_error(str(e), model_id)
|
|
304
277
|
return False
|
|
305
278
|
|
|
306
|
-
if not quiet:
|
|
307
|
-
print()
|
|
308
|
-
|
|
309
279
|
if not quiet:
|
|
310
|
-
|
|
311
|
-
print()
|
|
280
|
+
console.print_success(f"All models ready for {agent} agent")
|
|
312
281
|
|
|
313
282
|
return True
|
|
314
283
|
|
|
@@ -523,14 +492,30 @@ async def async_main(action, **kwargs):
|
|
|
523
492
|
# Create client for actions that use GaiaCliClient (not chat - it uses ChatAgent)
|
|
524
493
|
client = None
|
|
525
494
|
if action in ["prompt", "stats"]:
|
|
526
|
-
# Filter out
|
|
495
|
+
# Filter out parameters that are not accepted by GaiaCliClient
|
|
496
|
+
# GaiaCliClient only accepts: model, max_tokens, show_stats, logging_level
|
|
527
497
|
audio_params = {
|
|
528
498
|
"whisper_model_size",
|
|
529
499
|
"audio_device_index",
|
|
530
500
|
"silence_threshold",
|
|
531
501
|
"no_tts",
|
|
532
502
|
}
|
|
533
|
-
|
|
503
|
+
llm_provider_params = {
|
|
504
|
+
"use_claude",
|
|
505
|
+
"use_chatgpt",
|
|
506
|
+
"claude_model",
|
|
507
|
+
"base_url",
|
|
508
|
+
}
|
|
509
|
+
cli_params = {
|
|
510
|
+
"action",
|
|
511
|
+
"message",
|
|
512
|
+
"stats",
|
|
513
|
+
"assistant_name",
|
|
514
|
+
"stream",
|
|
515
|
+
"no_lemonade_check",
|
|
516
|
+
"list_tools",
|
|
517
|
+
}
|
|
518
|
+
excluded_params = cli_params | audio_params | llm_provider_params
|
|
534
519
|
client_params = {k: v for k, v in kwargs.items() if k not in excluded_params}
|
|
535
520
|
client = GaiaCliClient(**client_params)
|
|
536
521
|
|
|
@@ -1003,6 +988,62 @@ def main():
|
|
|
1003
988
|
help="Port for the Blender MCP server (default: 9876)",
|
|
1004
989
|
)
|
|
1005
990
|
|
|
991
|
+
# Add SD (Stable Diffusion) image generation command
|
|
992
|
+
sd_parser = subparsers.add_parser(
|
|
993
|
+
"sd",
|
|
994
|
+
help="Generate images using Stable Diffusion",
|
|
995
|
+
parents=[parent_parser],
|
|
996
|
+
)
|
|
997
|
+
sd_parser.add_argument(
|
|
998
|
+
"prompt",
|
|
999
|
+
nargs="?",
|
|
1000
|
+
help="Text description of the image to generate",
|
|
1001
|
+
)
|
|
1002
|
+
sd_parser.add_argument(
|
|
1003
|
+
"-i",
|
|
1004
|
+
"--interactive",
|
|
1005
|
+
action="store_true",
|
|
1006
|
+
help="Run in interactive mode",
|
|
1007
|
+
)
|
|
1008
|
+
sd_parser.add_argument(
|
|
1009
|
+
"--sd-model",
|
|
1010
|
+
dest="sd_model",
|
|
1011
|
+
choices=["SD-1.5", "SD-Turbo", "SDXL-Base-1.0", "SDXL-Turbo"],
|
|
1012
|
+
default="SDXL-Turbo",
|
|
1013
|
+
help="SD model: SDXL-Turbo (fast, good quality, default), SD-Turbo (faster but lower quality), SDXL-Base-1.0 (photorealistic, slow)",
|
|
1014
|
+
)
|
|
1015
|
+
sd_parser.add_argument(
|
|
1016
|
+
"--size",
|
|
1017
|
+
choices=["512x512", "768x768", "1024x1024"],
|
|
1018
|
+
help="Image size (auto-selected if not specified: 512px for SD-1.5/Turbo, 1024px for SDXL)",
|
|
1019
|
+
)
|
|
1020
|
+
sd_parser.add_argument(
|
|
1021
|
+
"--steps",
|
|
1022
|
+
type=int,
|
|
1023
|
+
help="Inference steps (auto-selected if not specified: 4 for Turbo, 20 for Base)",
|
|
1024
|
+
)
|
|
1025
|
+
sd_parser.add_argument(
|
|
1026
|
+
"--cfg-scale",
|
|
1027
|
+
dest="cfg_scale",
|
|
1028
|
+
type=float,
|
|
1029
|
+
help="CFG scale (auto-selected if not specified: 1.0 for Turbo, 7.5 for Base)",
|
|
1030
|
+
)
|
|
1031
|
+
sd_parser.add_argument(
|
|
1032
|
+
"--output-dir",
|
|
1033
|
+
default=".gaia/cache/sd/images",
|
|
1034
|
+
help="Directory to save generated images",
|
|
1035
|
+
)
|
|
1036
|
+
sd_parser.add_argument(
|
|
1037
|
+
"--seed",
|
|
1038
|
+
type=int,
|
|
1039
|
+
help="Random seed for reproducibility",
|
|
1040
|
+
)
|
|
1041
|
+
sd_parser.add_argument(
|
|
1042
|
+
"--no-open",
|
|
1043
|
+
action="store_true",
|
|
1044
|
+
help="Skip prompt to open image in viewer (for automation/scripting)",
|
|
1045
|
+
)
|
|
1046
|
+
|
|
1006
1047
|
# Add Jira app command
|
|
1007
1048
|
jira_parser = subparsers.add_parser(
|
|
1008
1049
|
"jira",
|
|
@@ -1117,82 +1158,6 @@ def main():
|
|
|
1117
1158
|
)
|
|
1118
1159
|
api_parser.set_defaults(action="api")
|
|
1119
1160
|
|
|
1120
|
-
# Add model pull command
|
|
1121
|
-
pull_parser = subparsers.add_parser(
|
|
1122
|
-
"pull",
|
|
1123
|
-
help="Download/install a model from the Lemonade Server registry",
|
|
1124
|
-
parents=[parent_parser],
|
|
1125
|
-
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
1126
|
-
epilog="""
|
|
1127
|
-
Examples:
|
|
1128
|
-
# Pull a registered model
|
|
1129
|
-
gaia pull Qwen3-0.6B-GGUF
|
|
1130
|
-
|
|
1131
|
-
# Pull and register a custom model from HuggingFace
|
|
1132
|
-
gaia pull user.Custom-Model-GGUF --checkpoint unsloth/Custom-Model-GGUF:Q4_K_M --recipe llamacpp
|
|
1133
|
-
|
|
1134
|
-
# Pull a reasoning model
|
|
1135
|
-
gaia pull user.DeepSeek-GGUF --checkpoint unsloth/DeepSeek-R1-GGUF --recipe llamacpp --reasoning
|
|
1136
|
-
|
|
1137
|
-
# Pull a vision model with mmproj
|
|
1138
|
-
gaia pull user.Vision-Model --checkpoint model/vision:Q4 --recipe llamacpp --vision --mmproj mmproj.gguf
|
|
1139
|
-
""",
|
|
1140
|
-
)
|
|
1141
|
-
pull_parser.add_argument(
|
|
1142
|
-
"model_name",
|
|
1143
|
-
help="Name of the model to pull (use 'user.' prefix for custom models)",
|
|
1144
|
-
)
|
|
1145
|
-
pull_parser.add_argument(
|
|
1146
|
-
"--checkpoint",
|
|
1147
|
-
help="HuggingFace checkpoint for custom models (e.g., unsloth/Model-GGUF:Q4_K_M)",
|
|
1148
|
-
)
|
|
1149
|
-
pull_parser.add_argument(
|
|
1150
|
-
"--recipe",
|
|
1151
|
-
help="Lemonade recipe for custom models (e.g., llamacpp, oga-cpu)",
|
|
1152
|
-
)
|
|
1153
|
-
pull_parser.add_argument(
|
|
1154
|
-
"--reasoning",
|
|
1155
|
-
action="store_true",
|
|
1156
|
-
help="Mark model as a reasoning model (like DeepSeek)",
|
|
1157
|
-
)
|
|
1158
|
-
pull_parser.add_argument(
|
|
1159
|
-
"--vision",
|
|
1160
|
-
action="store_true",
|
|
1161
|
-
help="Mark model as having vision capabilities",
|
|
1162
|
-
)
|
|
1163
|
-
pull_parser.add_argument(
|
|
1164
|
-
"--embedding",
|
|
1165
|
-
action="store_true",
|
|
1166
|
-
help="Mark model as an embedding model",
|
|
1167
|
-
)
|
|
1168
|
-
pull_parser.add_argument(
|
|
1169
|
-
"--reranking",
|
|
1170
|
-
action="store_true",
|
|
1171
|
-
help="Mark model as a reranking model",
|
|
1172
|
-
)
|
|
1173
|
-
pull_parser.add_argument(
|
|
1174
|
-
"--mmproj",
|
|
1175
|
-
help="Multimodal projector file for vision models",
|
|
1176
|
-
)
|
|
1177
|
-
pull_parser.add_argument(
|
|
1178
|
-
"--timeout",
|
|
1179
|
-
type=int,
|
|
1180
|
-
default=1200,
|
|
1181
|
-
help="Timeout in seconds for model download (default: 1200)",
|
|
1182
|
-
)
|
|
1183
|
-
pull_parser.add_argument(
|
|
1184
|
-
"--host",
|
|
1185
|
-
default="localhost",
|
|
1186
|
-
help="Lemonade server host (default: localhost)",
|
|
1187
|
-
)
|
|
1188
|
-
pull_parser.add_argument(
|
|
1189
|
-
"--port",
|
|
1190
|
-
type=int,
|
|
1191
|
-
default=8000,
|
|
1192
|
-
help="Lemonade server port (default: 8000)",
|
|
1193
|
-
)
|
|
1194
|
-
pull_parser.set_defaults(action="pull")
|
|
1195
|
-
|
|
1196
1161
|
# Add model download command
|
|
1197
1162
|
download_parser = subparsers.add_parser(
|
|
1198
1163
|
"download",
|
|
@@ -1330,7 +1295,12 @@ Available agents: chat, code, talk, rag, blender, jira, docker, vlm, minimal, mc
|
|
|
1330
1295
|
"kill", help="Kill process running on specific port", parents=[parent_parser]
|
|
1331
1296
|
)
|
|
1332
1297
|
kill_parser.add_argument(
|
|
1333
|
-
"--port", type=int,
|
|
1298
|
+
"--port", type=int, default=None, help="Port number to kill process on"
|
|
1299
|
+
)
|
|
1300
|
+
kill_parser.add_argument(
|
|
1301
|
+
"--lemonade",
|
|
1302
|
+
action="store_true",
|
|
1303
|
+
help="Kill Lemonade server (port 8000)",
|
|
1334
1304
|
)
|
|
1335
1305
|
|
|
1336
1306
|
# Add LLM app command
|
|
@@ -2111,6 +2081,107 @@ Examples:
|
|
|
2111
2081
|
"--all", action="store_true", help="Clear all caches"
|
|
2112
2082
|
)
|
|
2113
2083
|
|
|
2084
|
+
# Init command (one-stop GAIA setup)
|
|
2085
|
+
init_parser = subparsers.add_parser(
|
|
2086
|
+
"init",
|
|
2087
|
+
help="Initialize GAIA: install Lemonade and download models",
|
|
2088
|
+
parents=[parent_parser],
|
|
2089
|
+
)
|
|
2090
|
+
init_parser.add_argument(
|
|
2091
|
+
"--profile",
|
|
2092
|
+
"-p",
|
|
2093
|
+
default="chat",
|
|
2094
|
+
choices=["minimal", "sd", "chat", "code", "rag", "all"],
|
|
2095
|
+
help="Profile to initialize: minimal, sd (image gen), chat, code, rag, all (default: chat)",
|
|
2096
|
+
)
|
|
2097
|
+
init_parser.add_argument(
|
|
2098
|
+
"--minimal",
|
|
2099
|
+
action="store_true",
|
|
2100
|
+
help="Use minimal profile (~2.5 GB) - shortcut for --profile minimal",
|
|
2101
|
+
)
|
|
2102
|
+
init_parser.add_argument(
|
|
2103
|
+
"--skip-models",
|
|
2104
|
+
action="store_true",
|
|
2105
|
+
help="Skip model downloads (only install Lemonade)",
|
|
2106
|
+
)
|
|
2107
|
+
init_parser.add_argument(
|
|
2108
|
+
"--skip-lemonade",
|
|
2109
|
+
action="store_true",
|
|
2110
|
+
help="Skip Lemonade installation check (for CI with pre-installed Lemonade)",
|
|
2111
|
+
)
|
|
2112
|
+
init_parser.add_argument(
|
|
2113
|
+
"--force-reinstall",
|
|
2114
|
+
action="store_true",
|
|
2115
|
+
help="Force reinstall even if compatible version exists",
|
|
2116
|
+
)
|
|
2117
|
+
init_parser.add_argument(
|
|
2118
|
+
"--force-models",
|
|
2119
|
+
action="store_true",
|
|
2120
|
+
help="Force re-download models (deletes then re-downloads each model)",
|
|
2121
|
+
)
|
|
2122
|
+
init_parser.add_argument(
|
|
2123
|
+
"--yes",
|
|
2124
|
+
"-y",
|
|
2125
|
+
action="store_true",
|
|
2126
|
+
help="Skip confirmation prompts (non-interactive)",
|
|
2127
|
+
)
|
|
2128
|
+
init_parser.add_argument(
|
|
2129
|
+
"--verbose",
|
|
2130
|
+
action="store_true",
|
|
2131
|
+
help="Enable verbose output",
|
|
2132
|
+
)
|
|
2133
|
+
init_parser.add_argument(
|
|
2134
|
+
"--remote",
|
|
2135
|
+
action="store_true",
|
|
2136
|
+
help="Lemonade is hosted on a remote machine (skip local server start, still check version)",
|
|
2137
|
+
)
|
|
2138
|
+
|
|
2139
|
+
# Install command (install specific components)
|
|
2140
|
+
install_parser = subparsers.add_parser(
|
|
2141
|
+
"install",
|
|
2142
|
+
help="Install GAIA components",
|
|
2143
|
+
parents=[parent_parser],
|
|
2144
|
+
)
|
|
2145
|
+
install_parser.add_argument(
|
|
2146
|
+
"--lemonade",
|
|
2147
|
+
action="store_true",
|
|
2148
|
+
help="Install Lemonade Server",
|
|
2149
|
+
)
|
|
2150
|
+
install_parser.add_argument(
|
|
2151
|
+
"--yes",
|
|
2152
|
+
"-y",
|
|
2153
|
+
action="store_true",
|
|
2154
|
+
help="Skip confirmation prompts",
|
|
2155
|
+
)
|
|
2156
|
+
install_parser.add_argument(
|
|
2157
|
+
"--silent",
|
|
2158
|
+
action="store_true",
|
|
2159
|
+
help="Silent installation (no UI, no desktop shortcuts)",
|
|
2160
|
+
)
|
|
2161
|
+
|
|
2162
|
+
# Uninstall command (uninstall specific components)
|
|
2163
|
+
uninstall_parser = subparsers.add_parser(
|
|
2164
|
+
"uninstall",
|
|
2165
|
+
help="Uninstall GAIA components",
|
|
2166
|
+
parents=[parent_parser],
|
|
2167
|
+
)
|
|
2168
|
+
uninstall_parser.add_argument(
|
|
2169
|
+
"--lemonade",
|
|
2170
|
+
action="store_true",
|
|
2171
|
+
help="Uninstall Lemonade Server",
|
|
2172
|
+
)
|
|
2173
|
+
uninstall_parser.add_argument(
|
|
2174
|
+
"--models",
|
|
2175
|
+
action="store_true",
|
|
2176
|
+
help="Clear all downloaded models (frees disk space)",
|
|
2177
|
+
)
|
|
2178
|
+
uninstall_parser.add_argument(
|
|
2179
|
+
"--yes",
|
|
2180
|
+
"-y",
|
|
2181
|
+
action="store_true",
|
|
2182
|
+
help="Skip confirmation prompts",
|
|
2183
|
+
)
|
|
2184
|
+
|
|
2114
2185
|
args = parser.parse_args()
|
|
2115
2186
|
|
|
2116
2187
|
# Check if action is specified
|
|
@@ -2664,9 +2735,7 @@ Examples:
|
|
|
2664
2735
|
print(f"❌ Error: Failed to initialize TTS: {e}")
|
|
2665
2736
|
return
|
|
2666
2737
|
|
|
2667
|
-
test_text =
|
|
2668
|
-
args.test_text
|
|
2669
|
-
or """
|
|
2738
|
+
test_text = args.test_text or """
|
|
2670
2739
|
Let's play a game of trivia. I'll ask you a series of questions on a particular topic,
|
|
2671
2740
|
and you try to answer them to the best of your ability.
|
|
2672
2741
|
|
|
@@ -2682,7 +2751,6 @@ E) Edgar Allan Poe
|
|
|
2682
2751
|
|
|
2683
2752
|
Let me know your answer!
|
|
2684
2753
|
"""
|
|
2685
|
-
)
|
|
2686
2754
|
|
|
2687
2755
|
if args.test_type == "tts-preprocessing":
|
|
2688
2756
|
tts.test_preprocessing(test_text)
|
|
@@ -2796,106 +2864,48 @@ Let me know your answer!
|
|
|
2796
2864
|
|
|
2797
2865
|
# Handle kill command
|
|
2798
2866
|
if args.action == "kill":
|
|
2799
|
-
|
|
2800
|
-
|
|
2801
|
-
|
|
2802
|
-
|
|
2867
|
+
if args.lemonade:
|
|
2868
|
+
# Use lemonade-server stop for graceful shutdown
|
|
2869
|
+
try:
|
|
2870
|
+
result = subprocess.run(
|
|
2871
|
+
["lemonade-server", "stop"],
|
|
2872
|
+
capture_output=True,
|
|
2873
|
+
text=True,
|
|
2874
|
+
check=False,
|
|
2875
|
+
)
|
|
2876
|
+
if result.returncode == 0:
|
|
2877
|
+
print("✅ Lemonade server stopped")
|
|
2878
|
+
else:
|
|
2879
|
+
# Fallback to port kill if stop command fails
|
|
2880
|
+
log.warning(f"lemonade-server stop failed: {result.stderr}")
|
|
2881
|
+
port_result = kill_process_by_port(8000)
|
|
2882
|
+
if port_result["success"]:
|
|
2883
|
+
print(f"✅ {port_result['message']}")
|
|
2884
|
+
else:
|
|
2885
|
+
print(f"❌ {port_result['message']}")
|
|
2886
|
+
except FileNotFoundError:
|
|
2887
|
+
# lemonade-server not in PATH, fallback to port kill
|
|
2888
|
+
log.warning("lemonade-server not found, falling back to port kill")
|
|
2889
|
+
port_result = kill_process_by_port(8000)
|
|
2890
|
+
if port_result["success"]:
|
|
2891
|
+
print(f"✅ {port_result['message']}")
|
|
2892
|
+
else:
|
|
2893
|
+
print(f"❌ {port_result['message']}")
|
|
2894
|
+
elif args.port:
|
|
2895
|
+
port = args.port
|
|
2896
|
+
log.info(f"Attempting to kill process on port {port}")
|
|
2897
|
+
result = kill_process_by_port(port)
|
|
2898
|
+
if result["success"]:
|
|
2899
|
+
print(f"✅ {result['message']}")
|
|
2900
|
+
else:
|
|
2901
|
+
print(f"❌ {result['message']}")
|
|
2803
2902
|
else:
|
|
2804
|
-
print(
|
|
2903
|
+
print("❌ Specify --lemonade or --port <number>")
|
|
2805
2904
|
return
|
|
2806
2905
|
|
|
2807
2906
|
# Import LemonadeManager for model commands error handling
|
|
2808
2907
|
from gaia.llm.lemonade_manager import LemonadeManager
|
|
2809
2908
|
|
|
2810
|
-
# Handle model pull command
|
|
2811
|
-
if args.action == "pull":
|
|
2812
|
-
log.info(f"Pulling model: {args.model_name}")
|
|
2813
|
-
verbose = getattr(args, "verbose", False)
|
|
2814
|
-
try:
|
|
2815
|
-
client = LemonadeClient(host=args.host, port=args.port, verbose=verbose)
|
|
2816
|
-
|
|
2817
|
-
# Check if Lemonade server is running
|
|
2818
|
-
if not check_lemonade_health(args.host, args.port):
|
|
2819
|
-
LemonadeManager.print_server_error()
|
|
2820
|
-
return
|
|
2821
|
-
|
|
2822
|
-
print(f"📥 Pulling model: {args.model_name}")
|
|
2823
|
-
|
|
2824
|
-
# Define a CLI progress callback for real-time updates
|
|
2825
|
-
last_percent = [-1] # Use list to allow mutation in closure
|
|
2826
|
-
last_file_index = [0]
|
|
2827
|
-
|
|
2828
|
-
def cli_progress_callback(event_type: str, data: dict) -> None:
|
|
2829
|
-
"""Display download progress in CLI."""
|
|
2830
|
-
if event_type == "progress":
|
|
2831
|
-
percent = data.get("percent", 0)
|
|
2832
|
-
file_name = data.get("file", "unknown")
|
|
2833
|
-
file_index = data.get("file_index", 1)
|
|
2834
|
-
total_files = data.get("total_files", 1)
|
|
2835
|
-
|
|
2836
|
-
# Print newline when moving to a new file
|
|
2837
|
-
if file_index != last_file_index[0] and last_file_index[0] > 0:
|
|
2838
|
-
print() # Newline for previous file
|
|
2839
|
-
last_file_index[0] = file_index
|
|
2840
|
-
|
|
2841
|
-
# Update every 2% for smooth progress
|
|
2842
|
-
if percent >= last_percent[0] + 2 or percent == 0 or percent == 100:
|
|
2843
|
-
bytes_downloaded = data.get("bytes_downloaded", 0)
|
|
2844
|
-
bytes_total = data.get("bytes_total", 0)
|
|
2845
|
-
|
|
2846
|
-
# Create progress bar
|
|
2847
|
-
bar = _make_progress_bar(percent)
|
|
2848
|
-
progress_line = (
|
|
2849
|
-
f" {bar} {percent:3d}% "
|
|
2850
|
-
f"[{file_index}/{total_files}] {file_name}: "
|
|
2851
|
-
f"{_format_bytes(bytes_downloaded)}/{_format_bytes(bytes_total)}"
|
|
2852
|
-
)
|
|
2853
|
-
print(f"\r{progress_line:<100}", end="", flush=True)
|
|
2854
|
-
last_percent[0] = percent
|
|
2855
|
-
|
|
2856
|
-
elif event_type == "complete":
|
|
2857
|
-
print() # Newline after progress
|
|
2858
|
-
print(f"✅ Model downloaded successfully: {args.model_name}")
|
|
2859
|
-
|
|
2860
|
-
elif event_type == "error":
|
|
2861
|
-
print() # Newline after progress
|
|
2862
|
-
error_msg = data.get("error", "Unknown error")
|
|
2863
|
-
print(f"❌ Download failed: {error_msg}")
|
|
2864
|
-
|
|
2865
|
-
# Use streaming pull with progress callback
|
|
2866
|
-
completed = False
|
|
2867
|
-
for event in client.pull_model_stream(
|
|
2868
|
-
model_name=args.model_name,
|
|
2869
|
-
checkpoint=getattr(args, "checkpoint", None),
|
|
2870
|
-
recipe=getattr(args, "recipe", None),
|
|
2871
|
-
reasoning=getattr(args, "reasoning", False) or None,
|
|
2872
|
-
vision=getattr(args, "vision", False) or None,
|
|
2873
|
-
embedding=getattr(args, "embedding", False) or None,
|
|
2874
|
-
reranking=getattr(args, "reranking", False) or None,
|
|
2875
|
-
mmproj=getattr(args, "mmproj", None),
|
|
2876
|
-
timeout=args.timeout,
|
|
2877
|
-
progress_callback=cli_progress_callback,
|
|
2878
|
-
):
|
|
2879
|
-
if event.get("event") == "complete":
|
|
2880
|
-
completed = True
|
|
2881
|
-
elif event.get("event") == "error":
|
|
2882
|
-
sys.exit(1)
|
|
2883
|
-
|
|
2884
|
-
if not completed:
|
|
2885
|
-
print("⚠️ Model pull completed without explicit complete event")
|
|
2886
|
-
|
|
2887
|
-
except LemonadeClientError as e:
|
|
2888
|
-
print(f"❌ Error pulling model: {e}")
|
|
2889
|
-
sys.exit(1)
|
|
2890
|
-
except Exception as e:
|
|
2891
|
-
error_msg = str(e).lower()
|
|
2892
|
-
if "connection" in error_msg or "refused" in error_msg:
|
|
2893
|
-
LemonadeManager.print_server_error()
|
|
2894
|
-
else:
|
|
2895
|
-
print(f"❌ Error: {e}")
|
|
2896
|
-
sys.exit(1)
|
|
2897
|
-
return
|
|
2898
|
-
|
|
2899
2909
|
# Handle model download command
|
|
2900
2910
|
if args.action == "download":
|
|
2901
2911
|
from gaia.llm.lemonade_client import AGENT_PROFILES, MODELS
|
|
@@ -3020,122 +3030,110 @@ Let me know your answer!
|
|
|
3020
3030
|
agent_name = args.agent.lower()
|
|
3021
3031
|
model_ids = client.get_required_models(agent_name)
|
|
3022
3032
|
|
|
3033
|
+
console = AgentConsole()
|
|
3034
|
+
|
|
3023
3035
|
if not model_ids:
|
|
3024
3036
|
if agent_name != "all":
|
|
3025
3037
|
profile = client.get_agent_profile(agent_name)
|
|
3026
3038
|
if not profile:
|
|
3027
|
-
|
|
3028
|
-
|
|
3039
|
+
console.print_error(f"Unknown agent: {agent_name}")
|
|
3040
|
+
console.print_info(
|
|
3041
|
+
f"Available: {', '.join(client.list_agents())}"
|
|
3042
|
+
)
|
|
3029
3043
|
sys.exit(1)
|
|
3030
|
-
|
|
3044
|
+
console.print_info(f"No models to download for '{agent_name}'")
|
|
3031
3045
|
return
|
|
3032
3046
|
|
|
3033
|
-
|
|
3034
|
-
|
|
3035
|
-
|
|
3036
|
-
# Track progress per model
|
|
3037
|
-
current_model = [None]
|
|
3038
|
-
last_percent = [-1]
|
|
3039
|
-
last_file_index = [0]
|
|
3040
|
-
|
|
3041
|
-
def download_progress_callback(event_type: str, data: dict) -> None:
|
|
3042
|
-
"""Display download progress in CLI."""
|
|
3043
|
-
if event_type == "progress":
|
|
3044
|
-
percent = data.get("percent", 0)
|
|
3045
|
-
file_name = data.get("file", "unknown")
|
|
3046
|
-
file_index = data.get("file_index", 1)
|
|
3047
|
-
total_files = data.get("total_files", 1)
|
|
3048
|
-
|
|
3049
|
-
# Print newline when moving to a new file
|
|
3050
|
-
if file_index != last_file_index[0] and last_file_index[0] > 0:
|
|
3051
|
-
print() # Newline for previous file
|
|
3052
|
-
last_file_index[0] = file_index
|
|
3053
|
-
|
|
3054
|
-
# Update every 2% for smooth progress
|
|
3055
|
-
if percent >= last_percent[0] + 2 or percent == 0 or percent == 100:
|
|
3056
|
-
bytes_downloaded = data.get("bytes_downloaded", 0)
|
|
3057
|
-
bytes_total = data.get("bytes_total", 0)
|
|
3058
|
-
|
|
3059
|
-
# Create progress bar
|
|
3060
|
-
bar = _make_progress_bar(percent)
|
|
3061
|
-
progress_line = (
|
|
3062
|
-
f" {bar} {percent:3d}% "
|
|
3063
|
-
f"[{file_index}/{total_files}] {file_name}: "
|
|
3064
|
-
f"{_format_bytes(bytes_downloaded)}/{_format_bytes(bytes_total)}"
|
|
3065
|
-
)
|
|
3066
|
-
print(f"\r{progress_line:<100}", end="", flush=True)
|
|
3067
|
-
last_percent[0] = percent
|
|
3068
|
-
|
|
3069
|
-
elif event_type == "complete":
|
|
3070
|
-
print() # Newline after progress
|
|
3071
|
-
print(" ✅ Download complete")
|
|
3072
|
-
last_percent[0] = -1 # Reset for next model
|
|
3073
|
-
last_file_index[0] = 0
|
|
3074
|
-
|
|
3075
|
-
elif event_type == "error":
|
|
3076
|
-
print() # Newline after progress
|
|
3077
|
-
error_msg = data.get("error", "Unknown error")
|
|
3078
|
-
print(f" ❌ Error: {error_msg}")
|
|
3047
|
+
console.print_info(
|
|
3048
|
+
f"Downloading {len(model_ids)} model(s) for '{agent_name}'"
|
|
3049
|
+
)
|
|
3079
3050
|
|
|
3080
|
-
# Download each model
|
|
3051
|
+
# Download each model with progress display
|
|
3081
3052
|
success_count = 0
|
|
3082
3053
|
skip_count = 0
|
|
3083
3054
|
fail_count = 0
|
|
3084
3055
|
|
|
3085
3056
|
for model_id in model_ids:
|
|
3086
|
-
current_model[0] = model_id
|
|
3087
|
-
last_percent[0] = -1
|
|
3088
|
-
|
|
3089
3057
|
# Check if already available
|
|
3090
3058
|
if client.check_model_available(model_id):
|
|
3091
|
-
|
|
3059
|
+
console.print_download_skipped(model_id)
|
|
3092
3060
|
skip_count += 1
|
|
3093
3061
|
continue
|
|
3094
3062
|
|
|
3095
|
-
|
|
3063
|
+
console.print_download_start(model_id)
|
|
3096
3064
|
|
|
3097
3065
|
try:
|
|
3098
3066
|
completed = False
|
|
3099
|
-
|
|
3100
|
-
|
|
3101
|
-
|
|
3102
|
-
|
|
3103
|
-
):
|
|
3104
|
-
|
|
3067
|
+
event_count = 0
|
|
3068
|
+
last_bytes = 0
|
|
3069
|
+
last_time = time.time()
|
|
3070
|
+
|
|
3071
|
+
for event in client.pull_model_stream(model_name=model_id):
|
|
3072
|
+
event_count += 1
|
|
3073
|
+
event_type = event.get("event")
|
|
3074
|
+
|
|
3075
|
+
if event_type == "progress":
|
|
3076
|
+
# Skip first 2 spurious events from Lemonade
|
|
3077
|
+
if event_count <= 2:
|
|
3078
|
+
continue
|
|
3079
|
+
|
|
3080
|
+
# Calculate download speed
|
|
3081
|
+
current_bytes = event.get("bytes_downloaded", 0)
|
|
3082
|
+
current_time = time.time()
|
|
3083
|
+
time_delta = current_time - last_time
|
|
3084
|
+
|
|
3085
|
+
speed_mbps = 0.0
|
|
3086
|
+
if time_delta > 0.1 and current_bytes > last_bytes:
|
|
3087
|
+
bytes_delta = current_bytes - last_bytes
|
|
3088
|
+
speed_mbps = (bytes_delta / time_delta) / (1024 * 1024)
|
|
3089
|
+
last_bytes = current_bytes
|
|
3090
|
+
last_time = current_time
|
|
3091
|
+
|
|
3092
|
+
console.print_download_progress(
|
|
3093
|
+
percent=event.get("percent", 0),
|
|
3094
|
+
bytes_downloaded=current_bytes,
|
|
3095
|
+
bytes_total=event.get("bytes_total", 0),
|
|
3096
|
+
speed_mbps=speed_mbps,
|
|
3097
|
+
)
|
|
3098
|
+
|
|
3099
|
+
elif event_type == "complete":
|
|
3100
|
+
console.print_download_complete(model_id)
|
|
3105
3101
|
completed = True
|
|
3106
|
-
|
|
3102
|
+
|
|
3103
|
+
elif event_type == "error":
|
|
3104
|
+
console.print_download_error(
|
|
3105
|
+
event.get("error", "Unknown error"), model_id
|
|
3106
|
+
)
|
|
3107
3107
|
fail_count += 1
|
|
3108
3108
|
break
|
|
3109
3109
|
|
|
3110
3110
|
if completed:
|
|
3111
3111
|
success_count += 1
|
|
3112
3112
|
except LemonadeClientError as e:
|
|
3113
|
-
|
|
3113
|
+
console.print_download_error(str(e), model_id)
|
|
3114
3114
|
fail_count += 1
|
|
3115
3115
|
|
|
3116
|
-
print()
|
|
3117
|
-
|
|
3118
3116
|
# Summary
|
|
3119
|
-
|
|
3120
|
-
|
|
3121
|
-
|
|
3122
|
-
|
|
3117
|
+
console.print_info("=" * 50)
|
|
3118
|
+
console.print_info("Download Summary:")
|
|
3119
|
+
console.print_success(f"Downloaded: {success_count}")
|
|
3120
|
+
console.print_info(f"Skipped (already available): {skip_count}")
|
|
3123
3121
|
if fail_count > 0:
|
|
3124
|
-
|
|
3125
|
-
|
|
3122
|
+
console.print_error(f"Failed: {fail_count}")
|
|
3123
|
+
console.print_info("=" * 50)
|
|
3126
3124
|
|
|
3127
3125
|
if fail_count > 0:
|
|
3128
3126
|
sys.exit(1)
|
|
3129
3127
|
|
|
3130
3128
|
except LemonadeClientError as e:
|
|
3131
|
-
|
|
3129
|
+
console.print_error(str(e))
|
|
3132
3130
|
sys.exit(1)
|
|
3133
3131
|
except Exception as e:
|
|
3134
3132
|
error_msg = str(e).lower()
|
|
3135
3133
|
if "connection" in error_msg or "refused" in error_msg:
|
|
3136
3134
|
LemonadeManager.print_server_error()
|
|
3137
3135
|
else:
|
|
3138
|
-
|
|
3136
|
+
console.print_error(str(e))
|
|
3139
3137
|
sys.exit(1)
|
|
3140
3138
|
return
|
|
3141
3139
|
|
|
@@ -4067,6 +4065,11 @@ Let me know your answer!
|
|
|
4067
4065
|
handle_blender_command(args)
|
|
4068
4066
|
return
|
|
4069
4067
|
|
|
4068
|
+
# Handle SD (image generation) command
|
|
4069
|
+
if args.action == "sd":
|
|
4070
|
+
handle_sd_command(args)
|
|
4071
|
+
return
|
|
4072
|
+
|
|
4070
4073
|
# Handle Jira command
|
|
4071
4074
|
if args.action == "jira":
|
|
4072
4075
|
handle_jira_command(args)
|
|
@@ -4091,6 +4094,249 @@ Let me know your answer!
|
|
|
4091
4094
|
handle_visualize_command(args)
|
|
4092
4095
|
return
|
|
4093
4096
|
|
|
4097
|
+
# Handle init command
|
|
4098
|
+
if args.action == "init":
|
|
4099
|
+
from gaia.installer.init_command import run_init
|
|
4100
|
+
|
|
4101
|
+
# --minimal flag overrides --profile
|
|
4102
|
+
profile = "minimal" if args.minimal else args.profile
|
|
4103
|
+
|
|
4104
|
+
exit_code = run_init(
|
|
4105
|
+
profile=profile,
|
|
4106
|
+
skip_models=args.skip_models,
|
|
4107
|
+
skip_lemonade=getattr(args, "skip_lemonade", False),
|
|
4108
|
+
force_reinstall=args.force_reinstall,
|
|
4109
|
+
force_models=args.force_models,
|
|
4110
|
+
yes=args.yes,
|
|
4111
|
+
verbose=getattr(args, "verbose", False),
|
|
4112
|
+
remote=getattr(args, "remote", False),
|
|
4113
|
+
)
|
|
4114
|
+
sys.exit(exit_code)
|
|
4115
|
+
|
|
4116
|
+
# Handle install command
|
|
4117
|
+
if args.action == "install":
|
|
4118
|
+
if args.lemonade:
|
|
4119
|
+
from gaia.installer.lemonade_installer import LemonadeInstaller
|
|
4120
|
+
from gaia.version import LEMONADE_VERSION
|
|
4121
|
+
|
|
4122
|
+
installer = LemonadeInstaller()
|
|
4123
|
+
|
|
4124
|
+
# Check if already installed
|
|
4125
|
+
info = installer.check_installation()
|
|
4126
|
+
if info.installed and info.version:
|
|
4127
|
+
installed_ver = info.version.lstrip("v")
|
|
4128
|
+
target_ver = LEMONADE_VERSION.lstrip("v")
|
|
4129
|
+
|
|
4130
|
+
if installed_ver == target_ver:
|
|
4131
|
+
print(f"✅ Lemonade Server v{info.version} is already installed")
|
|
4132
|
+
sys.exit(0)
|
|
4133
|
+
else:
|
|
4134
|
+
print(f"Lemonade Server v{info.version} is installed")
|
|
4135
|
+
print(f"GAIA requires v{LEMONADE_VERSION}")
|
|
4136
|
+
print("")
|
|
4137
|
+
print("To update, run:")
|
|
4138
|
+
print(" gaia uninstall --lemonade")
|
|
4139
|
+
print(" gaia install --lemonade")
|
|
4140
|
+
sys.exit(1)
|
|
4141
|
+
|
|
4142
|
+
# Confirm installation
|
|
4143
|
+
if not args.yes:
|
|
4144
|
+
response = input(f"Install Lemonade v{LEMONADE_VERSION}? [Y/n]: ")
|
|
4145
|
+
if response.lower() == "n":
|
|
4146
|
+
print("Installation cancelled")
|
|
4147
|
+
sys.exit(0)
|
|
4148
|
+
|
|
4149
|
+
# Download and install
|
|
4150
|
+
print("Downloading Lemonade Server...")
|
|
4151
|
+
try:
|
|
4152
|
+
installer_path = installer.download_installer()
|
|
4153
|
+
print("Installing...")
|
|
4154
|
+
result = installer.install(installer_path, silent=args.silent)
|
|
4155
|
+
|
|
4156
|
+
if result.success:
|
|
4157
|
+
# Verify installation
|
|
4158
|
+
verify_info = installer.check_installation()
|
|
4159
|
+
if verify_info.installed:
|
|
4160
|
+
print(f"✅ Installed Lemonade Server v{verify_info.version}")
|
|
4161
|
+
else:
|
|
4162
|
+
print(f"✅ Installed Lemonade Server v{result.version}")
|
|
4163
|
+
sys.exit(0)
|
|
4164
|
+
else:
|
|
4165
|
+
print(f"❌ Installation failed: {result.error}")
|
|
4166
|
+
sys.exit(1)
|
|
4167
|
+
except Exception as e:
|
|
4168
|
+
print(f"❌ Installation failed: {e}")
|
|
4169
|
+
sys.exit(1)
|
|
4170
|
+
else:
|
|
4171
|
+
print("Specify what to install: --lemonade")
|
|
4172
|
+
sys.exit(1)
|
|
4173
|
+
|
|
4174
|
+
# Handle uninstall command
|
|
4175
|
+
if args.action == "uninstall":
|
|
4176
|
+
from rich.console import Console
|
|
4177
|
+
|
|
4178
|
+
console = Console()
|
|
4179
|
+
|
|
4180
|
+
# Handle model cache clearing
|
|
4181
|
+
if args.models:
|
|
4182
|
+
import shutil
|
|
4183
|
+
|
|
4184
|
+
try:
|
|
4185
|
+
# Find HuggingFace cache directory
|
|
4186
|
+
hf_cache = Path.home() / ".cache" / "huggingface" / "hub"
|
|
4187
|
+
if sys.platform == "win32":
|
|
4188
|
+
hf_cache = (
|
|
4189
|
+
Path(os.path.expanduser("~")) / ".cache" / "huggingface" / "hub"
|
|
4190
|
+
)
|
|
4191
|
+
|
|
4192
|
+
if not hf_cache.exists():
|
|
4193
|
+
console.print("[yellow]📦 No model cache found[/yellow]")
|
|
4194
|
+
console.print(f" [dim]Checked: {hf_cache}[/dim]")
|
|
4195
|
+
sys.exit(0)
|
|
4196
|
+
|
|
4197
|
+
# Find all model directories
|
|
4198
|
+
model_dirs = list(hf_cache.glob("models--*"))
|
|
4199
|
+
if not model_dirs:
|
|
4200
|
+
console.print("[green]✅ Model cache is already empty[/green]")
|
|
4201
|
+
console.print(f" [dim]Location: {hf_cache}[/dim]")
|
|
4202
|
+
sys.exit(0)
|
|
4203
|
+
|
|
4204
|
+
# Calculate total size
|
|
4205
|
+
total_size = 0
|
|
4206
|
+
for model_dir in model_dirs:
|
|
4207
|
+
try:
|
|
4208
|
+
total_size += sum(
|
|
4209
|
+
f.stat().st_size
|
|
4210
|
+
for f in model_dir.rglob("*")
|
|
4211
|
+
if f.is_file()
|
|
4212
|
+
)
|
|
4213
|
+
except Exception:
|
|
4214
|
+
pass
|
|
4215
|
+
|
|
4216
|
+
size_gb = total_size / (1024**3)
|
|
4217
|
+
|
|
4218
|
+
# Show what will be deleted
|
|
4219
|
+
console.print()
|
|
4220
|
+
console.print(f"[bold]Found {len(model_dirs)} model(s) in cache[/bold]")
|
|
4221
|
+
console.print(f" [dim]Location: {hf_cache}[/dim]")
|
|
4222
|
+
console.print(f" [dim]Total size: ~{size_gb:.1f} GB[/dim]")
|
|
4223
|
+
console.print()
|
|
4224
|
+
|
|
4225
|
+
# Confirm deletion
|
|
4226
|
+
if not args.yes:
|
|
4227
|
+
console.print(
|
|
4228
|
+
f"[bold]Delete all {len(model_dirs)} model(s)?[/bold] [dim](frees ~{size_gb:.1f} GB)[/dim]"
|
|
4229
|
+
)
|
|
4230
|
+
console.print()
|
|
4231
|
+
console.print(" [y/N]: ", end="")
|
|
4232
|
+
response = input()
|
|
4233
|
+
if response.lower() != "y":
|
|
4234
|
+
console.print("[dim]Model deletion cancelled[/dim]")
|
|
4235
|
+
sys.exit(0)
|
|
4236
|
+
|
|
4237
|
+
console.print()
|
|
4238
|
+
console.print(
|
|
4239
|
+
f"[bold blue]Deleting {len(model_dirs)} model(s)...[/bold blue]"
|
|
4240
|
+
)
|
|
4241
|
+
console.print()
|
|
4242
|
+
|
|
4243
|
+
success_count = 0
|
|
4244
|
+
fail_count = 0
|
|
4245
|
+
|
|
4246
|
+
for model_dir in model_dirs:
|
|
4247
|
+
# Extract model name from directory
|
|
4248
|
+
model_name = model_dir.name.replace("models--", "").replace(
|
|
4249
|
+
"--", "/"
|
|
4250
|
+
)
|
|
4251
|
+
console.print(f" [cyan]{model_name}[/cyan]... ", end="")
|
|
4252
|
+
try:
|
|
4253
|
+
shutil.rmtree(model_dir)
|
|
4254
|
+
console.print("[green]✅[/green]")
|
|
4255
|
+
success_count += 1
|
|
4256
|
+
except PermissionError:
|
|
4257
|
+
console.print("[red]❌ (locked)[/red]")
|
|
4258
|
+
fail_count += 1
|
|
4259
|
+
except Exception as e:
|
|
4260
|
+
console.print(f"[red]❌ ({e})[/red]")
|
|
4261
|
+
fail_count += 1
|
|
4262
|
+
|
|
4263
|
+
# Summary
|
|
4264
|
+
console.print()
|
|
4265
|
+
if success_count > 0:
|
|
4266
|
+
console.print(f"[green]✅ Deleted {success_count} model(s)[/green]")
|
|
4267
|
+
if fail_count > 0:
|
|
4268
|
+
console.print(
|
|
4269
|
+
f"[red]❌ Failed to delete {fail_count} model(s)[/red]"
|
|
4270
|
+
)
|
|
4271
|
+
console.print()
|
|
4272
|
+
console.print(" [bold]If files are locked:[/bold]")
|
|
4273
|
+
console.print(
|
|
4274
|
+
" [dim]1. Close all apps using models (gaia chat, etc.)[/dim]"
|
|
4275
|
+
)
|
|
4276
|
+
console.print(
|
|
4277
|
+
" [dim]2. Stop Lemonade:[/dim] [cyan]gaia kill --lemonade[/cyan]"
|
|
4278
|
+
)
|
|
4279
|
+
console.print(
|
|
4280
|
+
" [dim]3. Re-run:[/dim] [cyan]gaia uninstall --models[/cyan]"
|
|
4281
|
+
)
|
|
4282
|
+
|
|
4283
|
+
sys.exit(0 if fail_count == 0 else 1)
|
|
4284
|
+
|
|
4285
|
+
except Exception as e:
|
|
4286
|
+
console.print(f"[red]❌ Error: {e}[/red]")
|
|
4287
|
+
sys.exit(1)
|
|
4288
|
+
|
|
4289
|
+
# Handle Lemonade Server uninstallation
|
|
4290
|
+
elif args.lemonade:
|
|
4291
|
+
from gaia.installer.lemonade_installer import LemonadeInstaller
|
|
4292
|
+
|
|
4293
|
+
installer = LemonadeInstaller(console=console)
|
|
4294
|
+
|
|
4295
|
+
# Check if installed
|
|
4296
|
+
info = installer.check_installation()
|
|
4297
|
+
if not info.installed:
|
|
4298
|
+
console.print("[green]✅ Lemonade Server is not installed[/green]")
|
|
4299
|
+
sys.exit(0)
|
|
4300
|
+
|
|
4301
|
+
# Show installation details
|
|
4302
|
+
console.print()
|
|
4303
|
+
console.print(f"[bold]Found Lemonade Server v{info.version}[/bold]")
|
|
4304
|
+
if info.path:
|
|
4305
|
+
console.print(f" [dim]Location: {info.path}[/dim]")
|
|
4306
|
+
console.print()
|
|
4307
|
+
|
|
4308
|
+
# Confirm uninstallation
|
|
4309
|
+
if not args.yes:
|
|
4310
|
+
console.print(
|
|
4311
|
+
f"[bold]Uninstall Lemonade Server v{info.version}?[/bold] \\[y/N]: ",
|
|
4312
|
+
end="",
|
|
4313
|
+
)
|
|
4314
|
+
response = input()
|
|
4315
|
+
if response.lower() != "y":
|
|
4316
|
+
console.print("[dim]Uninstall cancelled[/dim]")
|
|
4317
|
+
sys.exit(0)
|
|
4318
|
+
|
|
4319
|
+
# Uninstall
|
|
4320
|
+
console.print()
|
|
4321
|
+
console.print("[bold blue]Uninstalling Lemonade Server...[/bold blue]")
|
|
4322
|
+
result = installer.uninstall(silent=True)
|
|
4323
|
+
|
|
4324
|
+
if result.success:
|
|
4325
|
+
console.print()
|
|
4326
|
+
console.print(
|
|
4327
|
+
"[green]✅ Lemonade Server uninstalled successfully[/green]"
|
|
4328
|
+
)
|
|
4329
|
+
sys.exit(0)
|
|
4330
|
+
else:
|
|
4331
|
+
console.print()
|
|
4332
|
+
console.print(f"[red]❌ Uninstall failed: {result.error}[/red]")
|
|
4333
|
+
sys.exit(1)
|
|
4334
|
+
else:
|
|
4335
|
+
console.print("[yellow]Specify what to uninstall:[/yellow]")
|
|
4336
|
+
console.print(" [cyan]--lemonade[/cyan] Uninstall Lemonade Server")
|
|
4337
|
+
console.print(" [cyan]--models[/cyan] Clear all downloaded models")
|
|
4338
|
+
sys.exit(1)
|
|
4339
|
+
|
|
4094
4340
|
# Log error for unknown action
|
|
4095
4341
|
log.error(f"Unknown action specified: {args.action}")
|
|
4096
4342
|
parser.print_help()
|
|
@@ -4714,6 +4960,178 @@ def handle_visualize_command(args):
|
|
|
4714
4960
|
print(f"⚠️ Error stopping server: {e}")
|
|
4715
4961
|
|
|
4716
4962
|
|
|
4963
|
+
def handle_sd_command(args):
|
|
4964
|
+
"""
|
|
4965
|
+
Handle the SD (Stable Diffusion) image generation command.
|
|
4966
|
+
|
|
4967
|
+
Args:
|
|
4968
|
+
args: Parsed command line arguments for the sd command
|
|
4969
|
+
"""
|
|
4970
|
+
# No prompt and not interactive - show help (no server needed)
|
|
4971
|
+
if not args.prompt and not args.interactive:
|
|
4972
|
+
print("Usage: gaia sd <prompt> [options]")
|
|
4973
|
+
print(" gaia sd -i (interactive mode)")
|
|
4974
|
+
print()
|
|
4975
|
+
print("Examples:")
|
|
4976
|
+
print(' gaia sd "a sunset over mountains"')
|
|
4977
|
+
print(' gaia sd "cyberpunk city" --sd-model SDXL-Turbo --size 1024x1024')
|
|
4978
|
+
print(" gaia sd -i")
|
|
4979
|
+
return
|
|
4980
|
+
|
|
4981
|
+
from gaia.agents.sd import SDAgent, SDAgentConfig
|
|
4982
|
+
|
|
4983
|
+
# Ensure Lemonade is ready with proper context size for SD agent
|
|
4984
|
+
# SD agent needs 8K context for image + story workflow
|
|
4985
|
+
success, _ = initialize_lemonade_for_agent(
|
|
4986
|
+
agent="sd",
|
|
4987
|
+
use_claude=getattr(args, "use_claude", False),
|
|
4988
|
+
use_chatgpt=getattr(args, "use_chatgpt", False),
|
|
4989
|
+
quiet=False,
|
|
4990
|
+
)
|
|
4991
|
+
|
|
4992
|
+
if not success and not (
|
|
4993
|
+
getattr(args, "use_claude", False) or getattr(args, "use_chatgpt", False)
|
|
4994
|
+
):
|
|
4995
|
+
print("Failed to initialize Lemonade Server with required 8K context.")
|
|
4996
|
+
print("Try: lemonade-server serve --ctx-size 8192")
|
|
4997
|
+
sys.exit(1)
|
|
4998
|
+
|
|
4999
|
+
# Create config - ensure LLM model is set
|
|
5000
|
+
llm_model = getattr(args, "model", None)
|
|
5001
|
+
if not llm_model:
|
|
5002
|
+
llm_model = "Qwen3-8B-GGUF" # Default LLM for prompt enhancement
|
|
5003
|
+
|
|
5004
|
+
config = SDAgentConfig(
|
|
5005
|
+
sd_model=args.sd_model,
|
|
5006
|
+
output_dir=args.output_dir,
|
|
5007
|
+
prompt_to_open=not args.no_open,
|
|
5008
|
+
show_stats=getattr(args, "stats", False),
|
|
5009
|
+
use_claude=getattr(args, "use_claude", False),
|
|
5010
|
+
use_chatgpt=getattr(args, "use_chatgpt", False),
|
|
5011
|
+
base_url=getattr(args, "base_url", "http://localhost:8000/api/v1"),
|
|
5012
|
+
model_id=llm_model,
|
|
5013
|
+
)
|
|
5014
|
+
|
|
5015
|
+
# Create agent with LLM prompt enhancement
|
|
5016
|
+
agent = SDAgent(config)
|
|
5017
|
+
|
|
5018
|
+
# Check health
|
|
5019
|
+
health = agent.sd_health_check()
|
|
5020
|
+
if health["status"] != "healthy":
|
|
5021
|
+
print(f"Error: {health.get('error', 'SD endpoint unavailable')}")
|
|
5022
|
+
print("Make sure Lemonade Server is running and SD model is available:")
|
|
5023
|
+
print(" lemonade-server serve")
|
|
5024
|
+
print(" lemonade-server pull SD-Turbo")
|
|
5025
|
+
sys.exit(1)
|
|
5026
|
+
|
|
5027
|
+
print()
|
|
5028
|
+
print("=" * 80)
|
|
5029
|
+
print(f"🖼️ SD Image Generator - {args.sd_model}")
|
|
5030
|
+
print("=" * 80)
|
|
5031
|
+
print("LLM-powered prompt enhancement for better image quality")
|
|
5032
|
+
print(f"Output: {args.output_dir}")
|
|
5033
|
+
if not args.no_open:
|
|
5034
|
+
print("You'll be prompted to open images after generation")
|
|
5035
|
+
print("=" * 80)
|
|
5036
|
+
print()
|
|
5037
|
+
|
|
5038
|
+
# Interactive mode
|
|
5039
|
+
if args.interactive:
|
|
5040
|
+
print("Type 'quit' to exit.")
|
|
5041
|
+
print()
|
|
5042
|
+
|
|
5043
|
+
while True:
|
|
5044
|
+
try:
|
|
5045
|
+
user_prompt = input("You: ").strip()
|
|
5046
|
+
if not user_prompt:
|
|
5047
|
+
continue
|
|
5048
|
+
if user_prompt.lower() in ("quit", "exit", "q"):
|
|
5049
|
+
print("Goodbye!")
|
|
5050
|
+
break
|
|
5051
|
+
|
|
5052
|
+
# Track images before this query
|
|
5053
|
+
initial_count = len(agent.sd_generations)
|
|
5054
|
+
|
|
5055
|
+
# Use agent.process_query() for LLM enhancement
|
|
5056
|
+
result = agent.process_query(user_prompt)
|
|
5057
|
+
if result.get("final_answer"):
|
|
5058
|
+
print(f"\nAgent: {result['final_answer']}\n")
|
|
5059
|
+
else:
|
|
5060
|
+
print("\nAgent: Generation complete\n")
|
|
5061
|
+
|
|
5062
|
+
# Prompt to open image(s) after agent completes
|
|
5063
|
+
if not args.no_open and result.get("status") != "error":
|
|
5064
|
+
try:
|
|
5065
|
+
# Get all newly generated images from this query
|
|
5066
|
+
new_images = agent.sd_generations[initial_count:]
|
|
5067
|
+
|
|
5068
|
+
if new_images:
|
|
5069
|
+
num_images = len(new_images)
|
|
5070
|
+
prompt_text = (
|
|
5071
|
+
f"Open {num_images} images in default viewer? [Y/n]: "
|
|
5072
|
+
if num_images > 1
|
|
5073
|
+
else "Open image in default viewer? [Y/n]: "
|
|
5074
|
+
)
|
|
5075
|
+
response = input(prompt_text).strip().lower()
|
|
5076
|
+
|
|
5077
|
+
if response in ("", "y", "yes"):
|
|
5078
|
+
for img in new_images:
|
|
5079
|
+
path = img["image_path"]
|
|
5080
|
+
if sys.platform == "win32":
|
|
5081
|
+
os.startfile(path) # pylint: disable=no-member
|
|
5082
|
+
elif sys.platform == "darwin":
|
|
5083
|
+
subprocess.run(["open", path], check=False)
|
|
5084
|
+
else:
|
|
5085
|
+
subprocess.run(["xdg-open", path], check=False)
|
|
5086
|
+
plural = "s" if num_images > 1 else ""
|
|
5087
|
+
print(f"[{num_images} image{plural} opened]\n")
|
|
5088
|
+
except (KeyboardInterrupt, EOFError):
|
|
5089
|
+
pass
|
|
5090
|
+
|
|
5091
|
+
except KeyboardInterrupt:
|
|
5092
|
+
print("\nGoodbye!")
|
|
5093
|
+
break
|
|
5094
|
+
|
|
5095
|
+
# Single prompt mode
|
|
5096
|
+
else:
|
|
5097
|
+
# Track images before this command
|
|
5098
|
+
initial_count = len(agent.sd_generations)
|
|
5099
|
+
|
|
5100
|
+
# Use agent.process_query() for LLM enhancement
|
|
5101
|
+
result = agent.process_query(args.prompt)
|
|
5102
|
+
if result.get("final_answer"):
|
|
5103
|
+
print(f"\n{result['final_answer']}\n")
|
|
5104
|
+
|
|
5105
|
+
# Prompt to open image(s) after agent completes
|
|
5106
|
+
if not args.no_open and result.get("status") != "error":
|
|
5107
|
+
try:
|
|
5108
|
+
# Get all newly generated images from this command
|
|
5109
|
+
new_images = agent.sd_generations[initial_count:]
|
|
5110
|
+
|
|
5111
|
+
if new_images:
|
|
5112
|
+
num_images = len(new_images)
|
|
5113
|
+
prompt_text = (
|
|
5114
|
+
f"Open {num_images} images in default viewer? [Y/n]: "
|
|
5115
|
+
if num_images > 1
|
|
5116
|
+
else "Open image in default viewer? [Y/n]: "
|
|
5117
|
+
)
|
|
5118
|
+
response = input(prompt_text).strip().lower()
|
|
5119
|
+
|
|
5120
|
+
if response in ("", "y", "yes"):
|
|
5121
|
+
for img in new_images:
|
|
5122
|
+
path = img["image_path"]
|
|
5123
|
+
if sys.platform == "win32":
|
|
5124
|
+
os.startfile(path) # pylint: disable=no-member
|
|
5125
|
+
elif sys.platform == "darwin":
|
|
5126
|
+
subprocess.run(["open", path], check=False)
|
|
5127
|
+
else:
|
|
5128
|
+
subprocess.run(["xdg-open", path], check=False)
|
|
5129
|
+
plural = "s" if num_images > 1 else ""
|
|
5130
|
+
print(f"[{num_images} image{plural} opened]\n")
|
|
5131
|
+
except (KeyboardInterrupt, EOFError):
|
|
5132
|
+
pass
|
|
5133
|
+
|
|
5134
|
+
|
|
4717
5135
|
def handle_blender_command(args):
|
|
4718
5136
|
"""
|
|
4719
5137
|
Handle the Blender agent command.
|