cortex-llm 1.0.10__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cortex/ui/model_ui.py ADDED
@@ -0,0 +1,408 @@
1
+ """Model download/login/management UI helpers for CLI."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import getpass
6
+ import sys
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+
11
+ def download_model(*, cli: Any, args: str = "") -> None:
12
+ """Download a model from HuggingFace."""
13
+ if args:
14
+ parts = args.split()
15
+ repo_id = parts[0]
16
+ filename = parts[1] if len(parts) > 1 else None
17
+ else:
18
+ width = min(cli.get_terminal_width() - 2, 70)
19
+
20
+ print()
21
+ cli.print_box_header("Model Manager", width)
22
+ cli.print_empty_line(width)
23
+
24
+ option_num = 1
25
+ available = cli.model_manager.discover_available_models()
26
+
27
+ if available:
28
+ cli.print_box_line(" \033[96mLoad Existing Model:\033[0m", width)
29
+ cli.print_empty_line(width)
30
+
31
+ for model in available[:5]:
32
+ name = model["name"][: width - 15]
33
+ size = f"{model['size_gb']:.1f}GB"
34
+ line = f" \033[93m[{option_num}]\033[0m {name} \033[2m({size})\033[0m"
35
+ cli.print_box_line(line, width)
36
+ option_num += 1
37
+
38
+ if len(available) > 5:
39
+ line = f" \033[93m[{option_num}]\033[0m \033[2mShow all {len(available)} models...\033[0m"
40
+ cli.print_box_line(line, width)
41
+ option_num += 1
42
+
43
+ cli.print_empty_line(width)
44
+ cli.print_box_separator(width)
45
+ cli.print_empty_line(width)
46
+
47
+ cli.print_box_line(" \033[96mDownload New Model:\033[0m", width)
48
+ cli.print_empty_line(width)
49
+
50
+ line = " \033[2mEnter repository ID (e.g., meta-llama/Llama-3.2-3B)\033[0m"
51
+ cli.print_box_line(line, width)
52
+
53
+ cli.print_empty_line(width)
54
+ cli.print_box_footer(width)
55
+
56
+ choice = cli.get_input_with_escape("Choice or repo ID")
57
+
58
+ if choice is None:
59
+ return
60
+
61
+ try:
62
+ choice_num = int(choice)
63
+
64
+ if available and choice_num <= len(available[:5]):
65
+ model = available[choice_num - 1]
66
+ print(f"\n\033[96m⚡\033[0m Loading {model['name']}...")
67
+ success, msg = cli.model_manager.load_model(model["path"])
68
+ if success:
69
+ print("\033[32m✓\033[0m Model loaded successfully!")
70
+
71
+ model_info = cli.model_manager.get_current_model()
72
+ if model_info:
73
+ tokenizer = cli.model_manager.tokenizers.get(model_info.name)
74
+ profile = cli.template_registry.setup_model(
75
+ model_info.name,
76
+ tokenizer=tokenizer,
77
+ interactive=False,
78
+ )
79
+ if profile:
80
+ template_name = profile.config.name
81
+ print(f" \033[2m• Template: {template_name}\033[0m")
82
+ else:
83
+ print(f"\033[31m✗\033[0m Failed to load: {msg}")
84
+ return
85
+
86
+ if available and choice_num == len(available[:5]) + 1 and len(available) > 5:
87
+ print()
88
+ cli.manage_models()
89
+ return
90
+
91
+ print("\033[31m✗ Invalid choice\033[0m")
92
+ return
93
+
94
+ except ValueError:
95
+ repo_id = choice
96
+ parts = repo_id.split()
97
+ repo_id = parts[0]
98
+ filename = parts[1] if len(parts) > 1 else None
99
+
100
+ if "/" not in repo_id:
101
+ print("\n\033[31m✗ Invalid format. Expected: username/model-name\033[0m")
102
+ return
103
+
104
+ print(f"\n\033[96m⬇\033[0m Downloading: \033[93m{repo_id}\033[0m")
105
+ if filename:
106
+ print(f" File: \033[93m{filename}\033[0m")
107
+ print()
108
+
109
+ success, message, path = cli.model_downloader.download_model(repo_id, filename)
110
+
111
+ if success:
112
+ width = min(cli.get_terminal_width() - 2, 70)
113
+ print()
114
+ title_with_color = " \033[32mDownload Complete\033[0m "
115
+ visible_len = cli.get_visible_length(title_with_color)
116
+ padding = width - visible_len - 3
117
+ print(f"╭─{title_with_color}" + "─" * padding + "╮")
118
+ cli.print_box_line(" \033[32m✓\033[0m Model downloaded successfully!", width)
119
+
120
+ location_str = str(path)[: width - 13]
121
+ cli.print_box_line(f" \033[2mLocation: {location_str}\033[0m", width)
122
+ cli.print_empty_line(width)
123
+ cli.print_box_line(" \033[96mLoad this model now?\033[0m", width)
124
+ cli.print_box_line(" \033[93m[Y]es\033[0m \033[2m[N]o\033[0m", width)
125
+ cli.print_box_footer(width)
126
+
127
+ try:
128
+ choice = input("\n\033[96m▶\033[0m Choice (\033[93my\033[0m/\033[2mn\033[0m): ").strip().lower()
129
+ if choice in ["y", "yes"]:
130
+ print("\n\033[96m⚡\033[0m Loading model...")
131
+ load_success, load_msg = cli.model_manager.load_model(str(path))
132
+ if load_success:
133
+ print("\033[32m✓\033[0m Model loaded successfully!")
134
+ else:
135
+ print(f"\033[31m✗\033[0m Failed to load: {load_msg}")
136
+ except KeyboardInterrupt:
137
+ print("\n\033[2mCancelled\033[0m")
138
+ else:
139
+ print(f"\n\033[31m✗\033[0m {message}")
140
+
141
+
142
+ def hf_login(*, cli: Any) -> None:
143
+ """Login to HuggingFace for accessing gated models."""
144
+ try:
145
+ from huggingface_hub import login, HfApi
146
+ from huggingface_hub.utils import HfHubHTTPError
147
+ except ImportError:
148
+ print("\n\033[31m✗\033[0m huggingface-hub not installed. Install with: pip install huggingface-hub")
149
+ return
150
+
151
+ width = min(cli.get_terminal_width() - 2, 70)
152
+
153
+ print()
154
+ cli.print_box_header("HuggingFace Login", width)
155
+ cli.print_empty_line(width)
156
+
157
+ try:
158
+ api = HfApi()
159
+ user_info = api.whoami()
160
+ if user_info:
161
+ username = user_info.get("name", "Unknown")
162
+ cli.print_box_line(
163
+ f" \033[32m✓\033[0m Already logged in as: \033[93m{username}\033[0m",
164
+ width,
165
+ )
166
+ cli.print_empty_line(width)
167
+ cli.print_box_line(" \033[96mOptions:\033[0m", width)
168
+ cli.print_box_line(" \033[93m[1]\033[0m Login with new token", width)
169
+ cli.print_box_line(" \033[93m[2]\033[0m Logout", width)
170
+ cli.print_box_line(" \033[93m[3]\033[0m Cancel", width)
171
+ cli.print_box_footer(width)
172
+
173
+ choice = cli.get_input_with_escape("Select option (1-3)")
174
+ if choice == "1":
175
+ pass
176
+ elif choice == "2":
177
+ from huggingface_hub import logout
178
+
179
+ logout()
180
+ print("\n\033[32m✓\033[0m Successfully logged out from HuggingFace")
181
+ return
182
+ else:
183
+ return
184
+ except Exception:
185
+ pass
186
+
187
+ print()
188
+ cli.print_box_header("HuggingFace Login", width)
189
+ cli.print_empty_line(width)
190
+ cli.print_box_line(" To access gated models, you need a HuggingFace token.", width)
191
+ cli.print_empty_line(width)
192
+ cli.print_box_line(" \033[96m1.\033[0m Get your token from:", width)
193
+ cli.print_box_line(" \033[93mhttps://huggingface.co/settings/tokens\033[0m", width)
194
+ cli.print_empty_line(width)
195
+ cli.print_box_line(" \033[96m2.\033[0m Create a token with \033[93mread\033[0m permissions", width)
196
+ cli.print_empty_line(width)
197
+ cli.print_box_line(" \033[96m3.\033[0m Paste the token below (input hidden)", width)
198
+ cli.print_box_footer(width)
199
+
200
+ print()
201
+ token = getpass.getpass("\033[96m▶\033[0m Enter token \033[2m(or press Enter to cancel)\033[0m: ")
202
+
203
+ if not token:
204
+ print("\033[2mCancelled\033[0m")
205
+ return
206
+
207
+ print("\n\033[96m⚡\033[0m Authenticating with HuggingFace...")
208
+ try:
209
+ login(token=token, add_to_git_credential=True)
210
+
211
+ api = HfApi()
212
+ user_info = api.whoami()
213
+ username = user_info.get("name", "Unknown")
214
+
215
+ print(f"\033[32m✓\033[0m Successfully logged in as: \033[93m{username}\033[0m")
216
+ print("\033[2m Token saved for future use\033[0m")
217
+ print("\033[2m You can now download gated models\033[0m")
218
+
219
+ except HfHubHTTPError as e:
220
+ if "Invalid token" in str(e):
221
+ print("\033[31m✗\033[0m Invalid token. Please check your token and try again.")
222
+ else:
223
+ print(f"\033[31m✗\033[0m Login failed: {str(e)}")
224
+ except Exception as e:
225
+ print(f"\033[31m✗\033[0m Login failed: {str(e)}")
226
+
227
+
228
+ def manage_models(*, cli: Any, args: str = "") -> None:
229
+ """Interactive model manager - simplified for better UX."""
230
+ if args:
231
+ print(f"\033[96m⚡\033[0m Loading model: \033[93m{args}\033[0m...")
232
+ success, message = cli.model_manager.load_model(args)
233
+ if success:
234
+ print("\033[32m✓\033[0m Model loaded successfully")
235
+ else:
236
+ print(f"\033[31m✗\033[0m Failed: {message}", file=sys.stderr)
237
+ return
238
+
239
+ available = cli.model_manager.discover_available_models()
240
+
241
+ if not available:
242
+ print(f"\n\033[31m✗\033[0m No models found in \033[2m{cli.config.model.model_path}\033[0m")
243
+ print("Use \033[93m/download\033[0m to download models from HuggingFace")
244
+ return
245
+
246
+ width = min(cli.get_terminal_width() - 2, 70)
247
+
248
+ print()
249
+ cli.print_box_header("Select Model", width)
250
+ cli.print_empty_line(width)
251
+
252
+ for i, model in enumerate(available, 1):
253
+ name = model["name"][: width - 30]
254
+ size = f"{model['size_gb']:.1f}GB"
255
+
256
+ current_model = cli.model_manager.current_model or ""
257
+ is_current = (
258
+ model["name"] == current_model
259
+ or model.get("mlx_name") == current_model
260
+ or current_model.endswith(model["name"])
261
+ )
262
+
263
+ status_parts = []
264
+ if model.get("mlx_optimized"):
265
+ status_parts.append("\033[36m⚡ MLX\033[0m")
266
+ elif model.get("mlx_available"):
267
+ status_parts.append("\033[2m○ MLX ready\033[0m")
268
+
269
+ if is_current:
270
+ status_parts.append("\033[32m● loaded\033[0m")
271
+
272
+ status = " ".join(status_parts) if status_parts else ""
273
+
274
+ line = f" \033[93m[{i}]\033[0m {name} \033[2m({size})\033[0m {status}"
275
+ cli.print_box_line(line, width)
276
+
277
+ cli.print_empty_line(width)
278
+ cli.print_box_separator(width)
279
+ cli.print_empty_line(width)
280
+
281
+ cli.print_box_line(" \033[93m[D]\033[0m Delete a model", width)
282
+ cli.print_box_line(" \033[93m[N]\033[0m Download new model", width)
283
+
284
+ cli.print_empty_line(width)
285
+ cli.print_box_footer(width)
286
+
287
+ choice = cli.get_input_with_escape(f"Select model to load (1-{len(available)}) or option")
288
+
289
+ if choice is None:
290
+ return
291
+
292
+ choice = choice.lower()
293
+
294
+ if choice == "n":
295
+ cli.download_model()
296
+ return
297
+ if choice == "d":
298
+ del_choice = cli.get_input_with_escape(f"Select model to delete (1-{len(available)})")
299
+ if del_choice is None:
300
+ return
301
+ try:
302
+ model_idx = int(del_choice) - 1
303
+ if 0 <= model_idx < len(available):
304
+ selected_model = available[model_idx]
305
+ print(f"\n\033[31m⚠\033[0m Delete \033[93m{selected_model['name']}\033[0m?")
306
+ print(f" This will free \033[93m{selected_model['size_gb']:.1f}GB\033[0m of disk space.")
307
+ confirm = cli.get_input_with_escape("Confirm deletion (\033[93my\033[0m/\033[2mN\033[0m)")
308
+ if confirm is None:
309
+ return
310
+ confirm = confirm.lower()
311
+
312
+ if confirm == "y":
313
+ model_path = Path(selected_model["path"])
314
+ try:
315
+ if model_path.is_file():
316
+ model_path.unlink()
317
+ elif model_path.is_dir():
318
+ import shutil
319
+
320
+ shutil.rmtree(model_path)
321
+
322
+ print(
323
+ "\033[32m✓\033[0m Model deleted successfully. "
324
+ f"Freed \033[93m{selected_model['size_gb']:.1f}GB\033[0m."
325
+ )
326
+
327
+ if selected_model["name"] == cli.model_manager.current_model:
328
+ cli.model_manager.current_model = None
329
+ print(
330
+ "\033[2mNote: Deleted model was currently loaded. "
331
+ "Load another model to continue.\033[0m"
332
+ )
333
+ except Exception as e:
334
+ print(f"\033[31m✗\033[0m Failed to delete: {str(e)}")
335
+ else:
336
+ print("\033[2mDeletion cancelled.\033[0m")
337
+ except (ValueError, IndexError):
338
+ print("\033[31m✗\033[0m Invalid selection")
339
+ return
340
+
341
+ try:
342
+ model_idx = int(choice) - 1
343
+ if 0 <= model_idx < len(available):
344
+ selected_model = available[model_idx]
345
+
346
+ if selected_model["name"] == cli.model_manager.current_model:
347
+ print(f"\033[2mModel already loaded: {selected_model['name']}\033[0m")
348
+ return
349
+
350
+ print(f"\n\033[96m⚡\033[0m Loading \033[93m{selected_model['name']}\033[0m...")
351
+ success, message = cli.model_manager.load_model(selected_model["path"])
352
+ if success:
353
+ model_info = cli.model_manager.get_current_model()
354
+ if model_info:
355
+ model_name = model_info.name
356
+ if "_4bit" in model_name or "4bit" in str(model_info.quantization):
357
+ quant_type = "4-bit"
358
+ elif "_5bit" in model_name or "5bit" in str(model_info.quantization):
359
+ quant_type = "5-bit"
360
+ elif "_8bit" in model_name or "8bit" in str(model_info.quantization):
361
+ quant_type = "8-bit"
362
+ else:
363
+ quant_type = ""
364
+
365
+ clean_name = selected_model["name"]
366
+ if clean_name.startswith("_Users_"):
367
+ parts = clean_name.split("_")
368
+ for i, part in enumerate(parts):
369
+ if "models" in part:
370
+ clean_name = "_".join(parts[i + 1 :])
371
+ break
372
+ clean_name = clean_name.replace("_4bit", "").replace("_5bit", "").replace("_8bit", "")
373
+
374
+ format_display = model_info.format.value
375
+ if format_display.lower() == "mlx":
376
+ format_display = "MLX (Apple Silicon optimized)"
377
+ elif format_display.lower() == "gguf":
378
+ format_display = "GGUF"
379
+ elif format_display.lower() == "safetensors":
380
+ format_display = "SafeTensors"
381
+ elif format_display.lower() == "pytorch":
382
+ format_display = "PyTorch"
383
+
384
+ print(f" \033[32m✓\033[0m Model ready: \033[93m{clean_name}\033[0m")
385
+ if quant_type:
386
+ print(f" \033[2m• Size: {model_info.size_gb:.1f}GB ({quant_type} quantized)\033[0m")
387
+ else:
388
+ print(f" \033[2m• Size: {model_info.size_gb:.1f}GB (quantized)\033[0m")
389
+ print(" \033[2m• Optimizations: AMX acceleration, operation fusion\033[0m")
390
+ print(f" \033[2m• Format: {format_display}\033[0m")
391
+
392
+ tokenizer = cli.model_manager.tokenizers.get(model_info.name)
393
+ profile = cli.template_registry.setup_model(
394
+ model_info.name,
395
+ tokenizer=tokenizer,
396
+ interactive=False,
397
+ )
398
+ if profile:
399
+ template_name = profile.config.name
400
+ print(f" \033[2m• Template: {template_name}\033[0m")
401
+ else:
402
+ print("\033[32m✓\033[0m Model loaded successfully!")
403
+ else:
404
+ print(f"\033[31m✗\033[0m Failed to load: {message}")
405
+ else:
406
+ print("\033[31m✗\033[0m Invalid selection")
407
+ except ValueError:
408
+ print("\033[31m✗\033[0m Invalid choice")
cortex/ui/status_ui.py ADDED
@@ -0,0 +1,78 @@
1
+ """Status and benchmark UI helpers for CLI."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+
8
+ def show_status(*, cli: Any) -> None:
9
+ """Show current setup status."""
10
+ is_valid, gpu_info, errors = cli.gpu_validator.validate()
11
+
12
+ width = min(cli.get_terminal_width() - 2, 70)
13
+
14
+ print()
15
+ cli.print_box_header("Current Setup", width)
16
+ cli.print_empty_line(width)
17
+
18
+ if gpu_info:
19
+ cli.print_box_line(f" \033[2mGPU:\033[0m \033[93m{gpu_info.chip_name}\033[0m", width)
20
+ cli.print_box_line(f" \033[2mCores:\033[0m \033[93m{gpu_info.gpu_cores}\033[0m", width)
21
+
22
+ mem_gb = gpu_info.total_memory / (1024**3)
23
+ mem_str = f"{mem_gb:.1f} GB"
24
+ cli.print_box_line(f" \033[2mMemory:\033[0m \033[93m{mem_str}\033[0m", width)
25
+
26
+ if cli.model_manager.current_model:
27
+ model_info = cli.model_manager.get_current_model()
28
+ if model_info:
29
+ cli.print_box_line(f" \033[2mModel:\033[0m \033[93m{model_info.name[:43]}\033[0m", width)
30
+
31
+ tokenizer = cli.model_manager.tokenizers.get(model_info.name)
32
+ profile = cli.template_registry.get_template(model_info.name)
33
+ if profile:
34
+ template_name = profile.config.name
35
+ cli.print_box_line(f" \033[2mTemplate:\033[0m \033[93m{template_name}\033[0m", width)
36
+ else:
37
+ cli.print_box_line(" \033[2mModel:\033[0m \033[31mNone loaded\033[0m", width)
38
+
39
+ cli.print_empty_line(width)
40
+ cli.print_box_footer(width)
41
+
42
+
43
+ def show_gpu_status(*, cli: Any) -> None:
44
+ """Show GPU status."""
45
+ is_valid, gpu_info, errors = cli.gpu_validator.validate()
46
+ if gpu_info:
47
+ print("\n\033[96mGPU Information:\033[0m")
48
+ print(f" Chip: \033[93m{gpu_info.chip_name}\033[0m")
49
+ print(f" GPU Cores: \033[93m{gpu_info.gpu_cores}\033[0m")
50
+ print(f" Total Memory: \033[93m{gpu_info.total_memory / (1024**3):.1f} GB\033[0m")
51
+ print(f" Available Memory: \033[93m{gpu_info.available_memory / (1024**3):.1f} GB\033[0m")
52
+ print(f" Metal Support: {'\033[32mYes\033[0m' if gpu_info.has_metal else '\033[31mNo\033[0m'}")
53
+ print(f" MPS Support: {'\033[32mYes\033[0m' if gpu_info.has_mps else '\033[31mNo\033[0m'}")
54
+
55
+ memory_status = cli.model_manager.get_memory_status()
56
+ print("\n\033[96mMemory Status:\033[0m")
57
+ print(f" Available: \033[93m{memory_status['available_gb']:.1f} GB\033[0m")
58
+ print(f" Models Loaded: \033[93m{memory_status['models_loaded']}\033[0m")
59
+ print(f" Model Memory: \033[93m{memory_status['model_memory_gb']:.1f} GB\033[0m")
60
+
61
+
62
+ def run_benchmark(*, cli: Any) -> None:
63
+ """Run performance benchmark."""
64
+ if not cli.model_manager.current_model:
65
+ print("\033[31m✗\033[0m No model loaded.")
66
+ return
67
+
68
+ print("\033[96m⚡\033[0m Running benchmark (100 tokens)...")
69
+ metrics = cli.inference_engine.benchmark()
70
+
71
+ if metrics:
72
+ print("\n\033[96mBenchmark Results:\033[0m")
73
+ print(f" Tokens Generated: \033[93m{metrics.tokens_generated}\033[0m")
74
+ print(f" Time: \033[93m{metrics.time_elapsed:.2f}s\033[0m")
75
+ print(f" Tokens/Second: \033[93m{metrics.tokens_per_second:.1f}\033[0m")
76
+ print(f" First Token: \033[93m{metrics.first_token_latency:.3f}s\033[0m")
77
+ print(f" GPU Usage: \033[93m{metrics.gpu_utilization:.1f}%\033[0m")
78
+ print(f" Memory: \033[93m{metrics.memory_used_gb:.1f}GB\033[0m")
@@ -0,0 +1,82 @@
1
+ """Tool activity rendering helpers for the CLI."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List
6
+
7
+ from rich.console import Console
8
+ from rich.style import Style
9
+ from rich.text import Text
10
+
11
+ from cortex.ui.markdown_render import PrefixedRenderable
12
+
13
+
14
+ def summarize_tool_call(call: dict) -> str:
15
+ name = str(call.get("name", "tool"))
16
+ args = call.get("arguments") or {}
17
+ parts: List[str] = []
18
+ preferred = ("path", "query", "anchor", "start_line", "end_line", "recursive", "max_results")
19
+ for key in preferred:
20
+ if key in args:
21
+ value = args[key]
22
+ if isinstance(value, str) and len(value) > 60:
23
+ value = value[:57] + "..."
24
+ parts.append(f"{key}={value!r}")
25
+ if not parts and args:
26
+ for key in list(args.keys())[:3]:
27
+ value = args[key]
28
+ if isinstance(value, str) and len(value) > 60:
29
+ value = value[:57] + "..."
30
+ parts.append(f"{key}={value!r}")
31
+ arg_str = ", ".join(parts)
32
+ return f"{name}({arg_str})" if arg_str else f"{name}()"
33
+
34
+
35
+ def summarize_tool_result(result: dict) -> str:
36
+ name = str(result.get("name", "tool"))
37
+ if not result.get("ok", False):
38
+ error = result.get("error") or "unknown error"
39
+ return f"{name} -> error: {error}"
40
+ payload = result.get("result") or {}
41
+ if name == "list_dir":
42
+ entries = payload.get("entries") or []
43
+ return f"{name} -> entries={len(entries)}"
44
+ if name == "search":
45
+ matches = payload.get("results") or []
46
+ return f"{name} -> results={len(matches)}"
47
+ if name == "read_file":
48
+ path = payload.get("path") or ""
49
+ start = payload.get("start_line")
50
+ end = payload.get("end_line")
51
+ if start and end:
52
+ return f"{name} -> {path} lines {start}-{end}"
53
+ if start:
54
+ return f"{name} -> {path} from line {start}"
55
+ return f"{name} -> {path}"
56
+ if name in {"write_file", "create_file", "delete_file", "replace_in_file", "insert_after", "insert_before"}:
57
+ path = payload.get("path") or ""
58
+ return f"{name} -> {path}"
59
+ return f"{name} -> ok"
60
+
61
+
62
+ def print_tool_activity(
63
+ console: Console,
64
+ tool_calls: list,
65
+ tool_results: list,
66
+ terminal_width: int,
67
+ ) -> None:
68
+ lines = []
69
+ for call, result in zip(tool_calls, tool_results):
70
+ lines.append(f"tool {summarize_tool_call(call)} -> {summarize_tool_result(result)}")
71
+ if not lines:
72
+ return
73
+ text = Text("\n".join(lines), style=Style(color="bright_black", italic=True))
74
+ renderable = PrefixedRenderable(text, prefix=" ", prefix_style=Style(dim=True), indent=" ", auto_space=False)
75
+ original_console_width = console._width
76
+ target_width = max(40, int(terminal_width * 0.75))
77
+ console.width = target_width
78
+ try:
79
+ console.print(renderable, highlight=False, soft_wrap=True)
80
+ console.print()
81
+ finally:
82
+ console._width = original_console_width
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cortex-llm
3
- Version: 1.0.10
3
+ Version: 1.0.11
4
4
  Summary: GPU-Accelerated LLM Terminal for Apple Silicon
5
5
  Home-page: https://github.com/faisalmumtaz/Cortex
6
6
  Author: Cortex Development Team
@@ -60,6 +60,8 @@ Dynamic: requires-python
60
60
 
61
61
  GPU-accelerated local LLMs on Apple Silicon, built for the terminal.
62
62
 
63
+ ![Cortex preview](docs/assets/cortex-llm.png)
64
+
63
65
  Cortex is a fast, native CLI for running and fine-tuning LLMs on Apple Silicon using MLX and Metal. It automatically detects chat templates, supports multiple model formats, and keeps your workflow inside the terminal.
64
66
 
65
67
  ## Highlights
@@ -1,9 +1,9 @@
1
- cortex/__init__.py,sha256=xl3alSv0U-9AkcwCu66nZJNgNP7Hwfs1_tjAJUXfEQI,2203
1
+ cortex/__init__.py,sha256=aAlejgacHykWBbAVAA-JevF9MwqsZvhArSn5Ljw-Ctg,2203
2
2
  cortex/__main__.py,sha256=I7Njt7BjGoHtPhftDoA44OyOYbwWNNaPwP_qlJSn0J4,2857
3
- cortex/config.py,sha256=IQnMaXznTflTSvr91aybtPMnNW088r-BYeVMhxny63w,13444
3
+ cortex/config.py,sha256=sWvudNkFAK0JDIS60H1cX76z6zZThdFcMSckZ6RfwOs,14922
4
4
  cortex/conversation_manager.py,sha256=aSTdGjVttsMKIiRPzztP0tOXlqZBkWtgZDNCZGyaR-c,17177
5
5
  cortex/gpu_validator.py,sha256=un6vMQ78MWMnKWIz8n-92v9Fb4g_YXqU_E1pUPinncY,16582
6
- cortex/inference_engine.py,sha256=4ayAjw5aRVOFfRffKmKkwCMMYBnrrGqT7xbIz1rGbIE,30907
6
+ cortex/inference_engine.py,sha256=2XAOlQUwQWLjHz3TQqSSNDxmLOVnYLo0u93zCfh23NE,32323
7
7
  cortex/model_downloader.py,sha256=VuPhvxq_66qKjsPjEWcLW-VmUHzOHik6LBMiGDk-cX8,4977
8
8
  cortex/model_manager.py,sha256=Ra21TjhtFS-7_hRzDMh9m0BUazIGWoKr7Gye3GiVRJM,102671
9
9
  cortex/fine_tuning/__init__.py,sha256=IXKQqNqN1C3mha3na35i7KI-hMnsqqrmUgV4NrPKHy0,269
@@ -39,17 +39,25 @@ cortex/template_registry/template_profiles/standard/llama.py,sha256=jz4MyvmISSPt
39
39
  cortex/template_registry/template_profiles/standard/simple.py,sha256=dGOOcL6HRoJFxkixLrYC4w7c63h-QmOOWC2TsOihYog,2422
40
40
  cortex/tools/__init__.py,sha256=gwOg8T6UTg8nDefV6lr6FpEEq_9-kK59Var47PiNZGw,113
41
41
  cortex/tools/errors.py,sha256=BAGznHVle7HqBLsy_Z4xF9uhbL8ZYtZC2d1z9nS4M4Q,203
42
- cortex/tools/fs_ops.py,sha256=8VeIURphURLwBZworIw1fD72wliX_jf8qfJt75zMAzc,5993
42
+ cortex/tools/fs_ops.py,sha256=MNqB8EEVk1VThv145j3bziN1Ype2QsXN-E7euoWMW6s,8202
43
43
  cortex/tools/protocol.py,sha256=_dKiHJe0-RuW6lwVmL1XXmVPL6ms4bCF4fJMLl06e0c,2760
44
- cortex/tools/search.py,sha256=cc0-xCixvOlbDw6tewpGLwvqXmdKEC1d5I7fyDJx79M,2972
45
- cortex/tools/tool_runner.py,sha256=lbp-E02jNayvA5NUszWNTunskGClj-CHt5wU5SEyZE8,6995
44
+ cortex/tools/search.py,sha256=Exw3M6rs07OGd3G5wqYQwrGnHzSBJSJ6SamV9-lOhaY,4939
45
+ cortex/tools/tool_runner.py,sha256=PdLhgBAqjPHSqgycN3Pi_Dgq1i79DVJpJkVF3JFerZg,10139
46
46
  cortex/ui/__init__.py,sha256=t3GrHJMHTVgBEKh2_qt4B9mS594V5jriTDqc3eZKMGc,3409
47
- cortex/ui/cli.py,sha256=ZHdlB7vTckBLTZfTPpjSjFobA6rjVN_4ZIuyKTOkeUg,81317
47
+ cortex/ui/box_rendering.py,sha256=1suhU7ujyiD9RWODrPeEvK6GUDtGtQNFMJl1iZO1Das,2729
48
+ cortex/ui/cli.py,sha256=jhJV944d4bsmkmjVEUgKOMsKjmvwShVpRoaZ1JrqTbs,34455
49
+ cortex/ui/cli_commands.py,sha256=byBztFkQmiX1HoKe60sr9ksBCXE1KClUrtYmIuxPiTU,1840
50
+ cortex/ui/cli_prompt.py,sha256=ClQuZuwRNegPtMg4DEgVHMg6z4ViTYwRL1jaTtVRaTE,3308
51
+ cortex/ui/help_ui.py,sha256=h8KzuAl3SgU-iyASq1lfHblb2c0jq2awbC10V2-uNLA,2035
52
+ cortex/ui/input_box.py,sha256=t87vf9CCk5NRDvyMLH-Zmb2CYaAw7HQOIPao3V5y5JE,6679
48
53
  cortex/ui/markdown_render.py,sha256=KAFBF5XUnhw1G7ZB9wMnLQyvJ4GCIW8uGK7auoKkrr4,8096
54
+ cortex/ui/model_ui.py,sha256=AKJKqfUJvn2vKtQXHEb1wD0Td43WKa_fRXE3y5acXvk,16362
55
+ cortex/ui/status_ui.py,sha256=JeyzELy9TPihSj53DrYfhZ0h9TP4ud2yUFU5Fi9riEc,3412
49
56
  cortex/ui/terminal_app.py,sha256=SF3KqcGFyZ4hpTmgX21idPzOTJLdKGkt4QdA-wwUBNE,18317
50
- cortex_llm-1.0.10.dist-info/licenses/LICENSE,sha256=_frJ3VsZWQGhMznZw2Tgjk7xwfAfDZRcBl43uZh8_4E,1070
51
- cortex_llm-1.0.10.dist-info/METADATA,sha256=-z8lVLdrQFFLxDzMIwhTwRQwPzvRznRSI9ftTAFqjbE,5578
52
- cortex_llm-1.0.10.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
53
- cortex_llm-1.0.10.dist-info/entry_points.txt,sha256=g83Nuz3iFrNdMLHxGLR2LnscdM7rdQRchuL3WGobQC8,48
54
- cortex_llm-1.0.10.dist-info/top_level.txt,sha256=79LAeTJJ_pMIBy3mkF7uNaN0mdBRt5tGrnne5N_iAio,7
55
- cortex_llm-1.0.10.dist-info/RECORD,,
57
+ cortex/ui/tool_activity.py,sha256=N6T8g_Es3_Tnzpw_jqzn-t5S5P9sWBXvwcWqyu8tvdE,2968
58
+ cortex_llm-1.0.11.dist-info/licenses/LICENSE,sha256=_frJ3VsZWQGhMznZw2Tgjk7xwfAfDZRcBl43uZh8_4E,1070
59
+ cortex_llm-1.0.11.dist-info/METADATA,sha256=t9DilQAJBZhdDjC776Esdt22JviSACEIgzsfHjG_UQ4,5625
60
+ cortex_llm-1.0.11.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
61
+ cortex_llm-1.0.11.dist-info/entry_points.txt,sha256=g83Nuz3iFrNdMLHxGLR2LnscdM7rdQRchuL3WGobQC8,48
62
+ cortex_llm-1.0.11.dist-info/top_level.txt,sha256=79LAeTJJ_pMIBy3mkF7uNaN0mdBRt5tGrnne5N_iAio,7
63
+ cortex_llm-1.0.11.dist-info/RECORD,,