hud-python 0.4.35__py3-none-any.whl → 0.4.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hud-python might be problematic. Click here for more details.

Files changed (63) hide show
  1. hud/agents/__init__.py +2 -0
  2. hud/agents/lite_llm.py +72 -0
  3. hud/agents/openai_chat_generic.py +21 -7
  4. hud/agents/tests/test_claude.py +32 -7
  5. hud/agents/tests/test_openai.py +29 -6
  6. hud/cli/__init__.py +228 -79
  7. hud/cli/build.py +26 -6
  8. hud/cli/dev.py +21 -40
  9. hud/cli/eval.py +96 -15
  10. hud/cli/flows/tasks.py +198 -65
  11. hud/cli/init.py +222 -629
  12. hud/cli/pull.py +6 -0
  13. hud/cli/push.py +11 -1
  14. hud/cli/rl/__init__.py +14 -4
  15. hud/cli/rl/celebrate.py +187 -0
  16. hud/cli/rl/config.py +15 -8
  17. hud/cli/rl/local_runner.py +44 -20
  18. hud/cli/rl/remote_runner.py +166 -87
  19. hud/cli/rl/viewer.py +141 -0
  20. hud/cli/rl/wait_utils.py +89 -0
  21. hud/cli/tests/test_build.py +3 -27
  22. hud/cli/tests/test_mcp_server.py +1 -12
  23. hud/cli/utils/config.py +85 -0
  24. hud/cli/utils/docker.py +21 -39
  25. hud/cli/utils/env_check.py +196 -0
  26. hud/cli/utils/environment.py +4 -3
  27. hud/cli/utils/interactive.py +2 -1
  28. hud/cli/utils/local_runner.py +204 -0
  29. hud/cli/utils/metadata.py +3 -1
  30. hud/cli/utils/package_runner.py +292 -0
  31. hud/cli/utils/remote_runner.py +4 -1
  32. hud/cli/utils/source_hash.py +108 -0
  33. hud/clients/base.py +1 -1
  34. hud/clients/fastmcp.py +1 -1
  35. hud/clients/mcp_use.py +30 -7
  36. hud/datasets/parallel.py +3 -1
  37. hud/datasets/runner.py +4 -1
  38. hud/otel/config.py +1 -1
  39. hud/otel/context.py +40 -6
  40. hud/rl/buffer.py +3 -0
  41. hud/rl/tests/test_learner.py +1 -1
  42. hud/rl/vllm_adapter.py +1 -1
  43. hud/server/server.py +234 -7
  44. hud/server/tests/test_add_tool.py +60 -0
  45. hud/server/tests/test_context.py +128 -0
  46. hud/server/tests/test_mcp_server_handlers.py +44 -0
  47. hud/server/tests/test_mcp_server_integration.py +405 -0
  48. hud/server/tests/test_mcp_server_more.py +247 -0
  49. hud/server/tests/test_run_wrapper.py +53 -0
  50. hud/server/tests/test_server_extra.py +166 -0
  51. hud/server/tests/test_sigterm_runner.py +78 -0
  52. hud/settings.py +38 -0
  53. hud/shared/hints.py +2 -2
  54. hud/telemetry/job.py +2 -2
  55. hud/types.py +9 -2
  56. hud/utils/tasks.py +32 -24
  57. hud/utils/tests/test_version.py +1 -1
  58. hud/version.py +1 -1
  59. {hud_python-0.4.35.dist-info → hud_python-0.4.37.dist-info}/METADATA +43 -23
  60. {hud_python-0.4.35.dist-info → hud_python-0.4.37.dist-info}/RECORD +63 -46
  61. {hud_python-0.4.35.dist-info → hud_python-0.4.37.dist-info}/WHEEL +0 -0
  62. {hud_python-0.4.35.dist-info → hud_python-0.4.37.dist-info}/entry_points.txt +0 -0
  63. {hud_python-0.4.35.dist-info → hud_python-0.4.37.dist-info}/licenses/LICENSE +0 -0
@@ -6,14 +6,15 @@ This module implements the new interactive flow for RL training.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- import os
10
- import subprocess
11
9
  import time
12
10
  import uuid
13
11
  from pathlib import Path
14
12
 
15
13
  from rich.console import Console
16
14
 
15
+ from hud.cli.rl.celebrate import show_confetti_async
16
+ from hud.cli.rl.viewer import show_json_interactive
17
+ from hud.cli.rl.wait_utils import wait_for_enter_cancel_or_change
17
18
  from hud.utils.hud_console import hud_console
18
19
  from hud.utils.tasks import load_tasks
19
20
 
@@ -57,7 +58,7 @@ def ensure_vllm_deployed(model_name: str, gpu_type: str = "A100", timeout: int =
57
58
  hud_console.error("Timeout waiting for vLLM deployment")
58
59
  raise ValueError("vLLM deployment timeout")
59
60
  info = rl_api.get_model(model_name)
60
- if info.vllm_url or info.status == "ready":
61
+ if info.status == "ready":
61
62
  hud_console.success(
62
63
  f"vLLM server ready at http://rl.hud.so/v1/models/{model_name}/vllm"
63
64
  )
@@ -70,48 +71,71 @@ def run_remote_training(
70
71
  model: str | None,
71
72
  config_file: Path | None,
72
73
  output_dir: str,
74
+ yes: bool = False,
73
75
  ) -> None:
74
76
  """Run RL training remotely via the API server following the new interactive flow."""
75
77
  from hud.settings import settings
76
78
 
77
79
  if not settings.api_key:
78
80
  hud_console.error("API key not found")
79
- console.print("[yellow]Please set HUD_API_KEY environment variable[/yellow]")
81
+ console.print(
82
+ "[yellow]Set it in your environment or run: hud set HUD_API_KEY=your-key-here[/yellow]"
83
+ )
80
84
  raise ValueError("API key not found")
81
85
 
82
- # Step 1: CONFIRMATION - Load tasks and show example
86
+ # Step 1: CONFIRMATION - Load tasks
83
87
  if tasks_file:
84
- tasks = load_tasks(tasks_file)
88
+ tasks: list[Task] = load_tasks(tasks_file) # type: ignore[assignment]
89
+ # Resolve tasks immediately after loading (validate + fill defaults)
90
+ from hud.types import Task
91
+
92
+ resolved_tasks: list[dict] = []
93
+ for t in tasks:
94
+ try:
95
+ resolved = Task(**t.model_dump()).model_dump()
96
+ except Exception:
97
+ resolved = t.model_dump()
98
+ resolved_tasks.append(resolved)
99
+
100
+ # Preview resolved task
101
+ if resolved_tasks and not yes:
102
+ try:
103
+ show_json_interactive(resolved_tasks[0], title="Task Preview")
104
+ except Exception as e:
105
+ hud_console.warning(f"Interactive viewer failed: {e}")
85
106
  else:
86
107
  raise ValueError("Tasks file not found")
87
108
 
88
109
  # Show example task for confirmation
89
- hud_console.section_title("Example Task from Dataset")
90
-
91
- if tasks:
92
- # Display task with truncated values
93
- task_data = tasks[0].model_dump()
94
- truncated_data = {}
95
- max_value_length = 120 # Maximum characters to show per line
96
-
97
- for key, value in task_data.items():
98
- value_str = str(value)
99
- if len(value_str) > max_value_length:
100
- truncated_data[key] = value_str[:max_value_length] + "..."
101
- else:
102
- truncated_data[key] = value_str
103
-
104
- hud_console.key_value_table(truncated_data)
105
-
106
- if not hud_console.confirm("Proceed with training on this dataset?", default=True):
107
- hud_console.error("Training cancelled")
108
- return
110
+ # hud_console.section_title("Example Task from Dataset")
111
+
112
+ # if tasks:
113
+ # # Display task with truncated values
114
+ # try:
115
+ # task_data = resolved_tasks[0]
116
+ # except Exception:
117
+ # task_data = tasks[0].model_dump()
118
+ # truncated_data = {}
119
+ # max_value_length = 120 # Maximum characters to show per line
120
+
121
+ # for key, value in task_data.items():
122
+ # value_str = str(value)
123
+ # if len(value_str) > max_value_length:
124
+ # truncated_data[key] = value_str[:max_value_length] + "..."
125
+ # else:
126
+ # truncated_data[key] = value_str
127
+
128
+ # hud_console.key_value_table(truncated_data)
129
+
130
+ # if not hud_console.confirm("Proceed with training on this dataset?", default=True):
131
+ # hud_console.error("Training cancelled")
132
+ # return
109
133
 
110
134
  # Step 2: MODEL SELECTION
111
135
  hud_console.section_title("Model Selection")
112
136
 
113
137
  # Fetch existing models
114
- hud_console.info("Fetching your models from https://app.hud.so/models")
138
+ hud_console.info("Fetching your models from https://hud.so/models")
115
139
 
116
140
  try:
117
141
  models = rl_api.list_models()
@@ -135,7 +159,11 @@ def run_remote_training(
135
159
  choices.append({"name": "Create new model", "value": "__new__"})
136
160
 
137
161
  if not model:
138
- if choices:
162
+ if yes:
163
+ # In yes mode, always create a new model to avoid conflicts
164
+ selected = "__new__"
165
+ hud_console.info("Auto-creating new model (--yes mode)")
166
+ elif choices:
139
167
  selected = hud_console.select("Select a model:", choices=choices)
140
168
  else:
141
169
  selected = "__new__"
@@ -153,14 +181,18 @@ def run_remote_training(
153
181
  hud_console.info("Creating new model...")
154
182
 
155
183
  # Ask for model type
156
- model_type = hud_console.select(
157
- "Select base model type:",
158
- choices=[
159
- {"name": "Qwen2.5-VL-3B-Instruct", "value": "Qwen/Qwen2.5-VL-3B-Instruct"},
160
- # {"name": "Qwen2.5-VL-7B-Instruct", "value": "Qwen/Qwen2.5-VL-7B-Instruct"},
161
- ],
162
- default=0,
163
- )
184
+ if yes:
185
+ model_type = "Qwen/Qwen2.5-VL-3B-Instruct" # Default model in yes mode
186
+ hud_console.info(f"Auto-selecting base model: {model_type} (--yes mode)")
187
+ else:
188
+ model_type = hud_console.select(
189
+ "Select base model type:",
190
+ choices=[
191
+ {"name": "Qwen2.5-VL-3B-Instruct", "value": "Qwen/Qwen2.5-VL-3B-Instruct"},
192
+ # {"name": "Qwen2.5-VL-7B-Instruct", "value": "Qwen/Qwen2.5-VL-7B-Instruct"}, # noqa: E501
193
+ ],
194
+ default=0,
195
+ )
164
196
  from rich.prompt import Prompt
165
197
 
166
198
  # Ask for model name
@@ -172,9 +204,13 @@ def run_remote_training(
172
204
  default_name = f"{base_default}-{suffix}"
173
205
  suffix += 1
174
206
 
175
- hud_console.info(f"Enter model name (default: {default_name}):")
176
- model_name = Prompt.ask("Model name", default=default_name)
177
- model_name = model_name.replace("/", "-").lower()
207
+ if yes:
208
+ model_name = default_name
209
+ hud_console.info(f"Auto-using model name: {model_name} (--yes mode)")
210
+ else:
211
+ hud_console.info(f"Enter model name (default: {default_name}):")
212
+ model_name = Prompt.ask("Model name", default=default_name)
213
+ model_name = model_name.replace("/", "-").lower()
178
214
 
179
215
  # Create the model with retry on name conflict
180
216
  hud_console.info(f"Creating model: {model_name}")
@@ -201,7 +237,11 @@ def run_remote_training(
201
237
  try:
202
238
  from rich.prompt import Prompt as _Prompt
203
239
 
204
- chosen = _Prompt.ask("Use different name", default=alt_name)
240
+ if yes:
241
+ chosen = alt_name
242
+ hud_console.info(f"Auto-using suggested name: {chosen} (--yes mode)")
243
+ else:
244
+ chosen = _Prompt.ask("Use different name", default=alt_name)
205
245
  chosen = chosen.replace("/", "-").lower()
206
246
  rl_api.create_model(chosen, model_type)
207
247
  hud_console.success(f"Created model: {chosen}")
@@ -221,7 +261,11 @@ def run_remote_training(
221
261
 
222
262
  # Check if model is in training
223
263
  if model_info.status == "training":
224
- if hud_console.confirm(
264
+ if yes:
265
+ # In yes mode, skip training if model is already training
266
+ hud_console.warning(f"{model_name} is already training, skipping (--yes mode)")
267
+ return
268
+ elif hud_console.confirm(
225
269
  f"{model_name} is currently training. Stop current training?", default=False
226
270
  ):
227
271
  hud_console.info(f"Stopping training for {model_name}...")
@@ -264,25 +308,33 @@ def run_remote_training(
264
308
 
265
309
  # console.print(gpu_table)
266
310
 
267
- gpu_choice = hud_console.select(
268
- "Select GPU type:",
269
- choices=[
270
- {"name": "A100 80GB", "value": "A100"},
271
- {"name": "H100 80GB", "value": "H100"},
272
- ],
273
- default=0,
274
- )
311
+ if yes:
312
+ gpu_choice = "A100" # Default GPU in yes mode
313
+ hud_console.info(f"Auto-selecting GPU: {gpu_choice} 80GB (--yes mode)")
314
+ else:
315
+ gpu_choice = hud_console.select(
316
+ "Select GPU type:",
317
+ choices=[
318
+ {"name": "A100 80GB", "value": "A100"},
319
+ {"name": "H100 80GB", "value": "H100"},
320
+ ],
321
+ default=0,
322
+ )
275
323
 
276
- num_gpus = hud_console.select(
277
- "Number of GPUs:",
278
- choices=[
279
- {"name": "1 GPU", "value": 1},
280
- {"name": "2 GPUs", "value": 2},
281
- {"name": "4 GPUs", "value": 4},
282
- {"name": "8 GPUs", "value": 8},
283
- ],
284
- default=1,
285
- )
324
+ if yes:
325
+ num_gpus = 1 # Default to 1 GPU in yes mode
326
+ hud_console.info(f"Auto-selecting {num_gpus} GPU(s) (--yes mode)")
327
+ else:
328
+ num_gpus = hud_console.select(
329
+ "Number of GPUs:",
330
+ choices=[
331
+ {"name": "1 GPU", "value": 1},
332
+ {"name": "2 GPUs", "value": 2},
333
+ {"name": "4 GPUs", "value": 4},
334
+ {"name": "8 GPUs", "value": 8},
335
+ ],
336
+ default=1,
337
+ )
286
338
 
287
339
  # Generate config with presets
288
340
  hud_console.info("Generating training configuration...")
@@ -292,6 +344,7 @@ def run_remote_training(
292
344
  config, _ = generate_config_interactive(
293
345
  model_name=model_info.base_model,
294
346
  presets=presets,
347
+ yes=yes,
295
348
  )
296
349
 
297
350
  # Use a short label for tasks (avoid full absolute paths)
@@ -306,39 +359,61 @@ def run_remote_training(
306
359
 
307
360
  config.job_name = f"RL {model_name} on {tasks_label}"
308
361
 
309
- # Save config for editing
362
+ # Save config so user can review/edit externally
310
363
  temp_config_path = Path(f".rl_config_temp_{model_name}.json")
311
364
  save_config(config, temp_config_path)
312
365
 
313
- # Ask to edit config
366
+ # Interactive review loop: show preview, allow external edits, press Enter to start
314
367
  hud_console.info(
315
368
  f"Using training configuration from [underline cyan]{temp_config_path.absolute()}[/underline cyan]" # noqa: E501
316
369
  )
317
- edit_choice = hud_console.select(
318
- "Would you like to start training?",
319
- choices=[
320
- {"name": "🚀 Start training!", "value": "start"},
321
- {"name": "✏️ Review configuration", "value": "edit"},
322
- {"name": "❌ Cancel", "value": "cancel"},
323
- ],
324
- )
325
-
326
- if edit_choice == "cancel":
327
- hud_console.error("Training cancelled")
328
- return
329
- elif edit_choice == "edit":
330
- # Open editor
331
- editor = os.environ.get("EDITOR", "nano")
332
- hud_console.info(f"Opening {editor} to edit configuration...")
333
370
 
371
+ if yes:
372
+ # In yes mode, skip the interactive review loop
373
+ hud_console.info("Auto-accepting config (--yes mode)")
374
+ # Still show the config briefly
334
375
  try:
335
- subprocess.run([editor, str(temp_config_path)], check=True) # noqa: S603
336
- # Reload config
337
- config = load_config(temp_config_path)
338
- hud_console.success("Configuration updated")
376
+ show_json_interactive(
377
+ config.to_dict() if hasattr(config, "to_dict") else {},
378
+ title="RL Config Preview",
379
+ prompt=False,
380
+ )
339
381
  except Exception as e:
340
- hud_console.error(f"Failed to edit config: {e}")
341
- return
382
+ hud_console.warning(f"Interactive viewer failed: {e}")
383
+ else:
384
+ while True:
385
+ # Reload latest config from file each cycle
386
+ try:
387
+ config = load_config(temp_config_path)
388
+ except Exception as e:
389
+ hud_console.warning(f"Failed to load config from disk, using in-memory: {e}")
390
+
391
+ # Preview current config (no extra prompt here; main loop handles start/cancel)
392
+ try:
393
+ show_json_interactive(
394
+ config.to_dict() if hasattr(config, "to_dict") else {},
395
+ title="RL Config Preview",
396
+ prompt=False,
397
+ )
398
+ except Exception as e:
399
+ hud_console.warning(f"Interactive viewer failed: {e}")
400
+
401
+ console.print(
402
+ "\n[dim]Edit the config file above if needed, then save.[/dim]\n"
403
+ "[bold]Press Enter to start training[/bold], or press 'q' to cancel."
404
+ )
405
+
406
+ start_training, cancelled, changed = wait_for_enter_cancel_or_change(
407
+ temp_config_path
408
+ )
409
+
410
+ if cancelled:
411
+ hud_console.error("Training cancelled")
412
+ return
413
+ if start_training:
414
+ break # proceed
415
+ if changed:
416
+ hud_console.info("Detected configuration changes. Reloading preview...")
342
417
 
343
418
  config_dict = config.to_dict()
344
419
  else:
@@ -351,17 +426,21 @@ def run_remote_training(
351
426
 
352
427
  # Launch training
353
428
  try:
429
+ # Little celebration before launching
430
+ try:
431
+ show_confetti_async(console)
432
+ except Exception:
433
+ hud_console.info("Launching training...")
434
+
354
435
  rl_api.launch_training(
355
436
  model_name=model_name,
356
437
  config=config_dict,
357
- tasks=[task.model_dump() for task in tasks],
438
+ tasks=resolved_tasks,
358
439
  gpu_type=gpu_choice,
359
440
  gpu_count=int(num_gpus),
360
441
  )
361
442
 
362
- hud_console.success("Training Started Successfully!")
363
-
364
- hud_console.info(f"See your model {model_name} training on https://app.hud.so/models")
443
+ hud_console.info(f"Your model {model_name} has started training")
365
444
  hud_console.hint("Launch another training run via: hud rl <tasks_file>")
366
445
  hud_console.hint("Or evaluate the model via: hud eval <tasks_file>")
367
446
 
hud/cli/rl/viewer.py ADDED
@@ -0,0 +1,141 @@
1
+ """Inline JSON preview with expandable view for RL flow.
2
+
3
+ Uses minimal terminal interaction for inline display.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ from typing import Any
10
+
11
+ from blessed import Terminal
12
+ from rich.console import Console
13
+ from rich.json import JSON as RichJSON
14
+ from rich.panel import Panel
15
+ from rich.table import Table
16
+
17
+
18
+ def _mask_secrets(value: Any) -> Any:
19
+ """Recursively mask common secret-looking values."""
20
+ secret_keys = {"authorization", "api-key", "apikey", "token", "secret", "password"}
21
+
22
+ def _is_secret_key(k: str) -> bool:
23
+ lowered = k.lower()
24
+ if lowered in secret_keys:
25
+ return True
26
+ return any(s in lowered for s in ["api", "key", "token", "secret", "password"])
27
+
28
+ if isinstance(value, dict):
29
+ result: dict[str, Any] = {}
30
+ for k, v in value.items():
31
+ if _is_secret_key(str(k)) and isinstance(v, str) and v:
32
+ prefix = v[:4]
33
+ suffix = v[-4:] if len(v) > 8 else ""
34
+ result[k] = f"{prefix}…{suffix}"
35
+ else:
36
+ result[k] = _mask_secrets(v)
37
+ return result
38
+ if isinstance(value, list):
39
+ return [_mask_secrets(v) for v in value]
40
+ return value
41
+
42
+
43
+ def _truncate_value(value: Any, max_len: int = 60) -> str:
44
+ """Truncate a value for preview display."""
45
+ if isinstance(value, str):
46
+ if len(value) > max_len:
47
+ return value[:max_len] + "…"
48
+ return value
49
+ elif isinstance(value, (dict, list)):
50
+ s = json.dumps(value, separators=(",", ":"))
51
+ if len(s) > max_len:
52
+ return s[:max_len] + "…"
53
+ return s
54
+ else:
55
+ return str(value)
56
+
57
+
58
+ def show_json_interactive(
59
+ data: Any,
60
+ *,
61
+ title: str | None = None,
62
+ max_string_len: int = 60,
63
+ prompt: bool = True,
64
+ initial_expanded: bool = False,
65
+ ) -> None:
66
+ """Display JSON inline with keyboard-based expand/collapse."""
67
+ console = Console()
68
+ safe_data = _mask_secrets(data)
69
+
70
+ # Create preview table
71
+ table = Table(show_header=False, box=None, padding=(0, 1))
72
+ table.add_column("Key", style="cyan", no_wrap=True)
73
+ table.add_column("Value", style="green")
74
+
75
+ if title:
76
+ console.print(f"\n[bold cyan]{title}[/bold cyan]")
77
+
78
+ # Show preview
79
+ if isinstance(safe_data, dict):
80
+ items = list(safe_data.items())
81
+ for _, (key, value) in enumerate(items[:5]):
82
+ truncated = _truncate_value(value, max_string_len)
83
+ table.add_row(key + ":", truncated)
84
+
85
+ if len(items) > 5:
86
+ table.add_row("", f"[dim]... and {len(items) - 5} more items[/dim]")
87
+ else:
88
+ table.add_row("", _truncate_value(safe_data, max_string_len))
89
+
90
+ # Display with border
91
+ if not initial_expanded:
92
+ console.print(Panel(table, expand=False, border_style="dim"))
93
+ else:
94
+ # Expanded view
95
+ if title:
96
+ console.rule(f"[bold cyan]{title} (expanded)[/bold cyan]")
97
+ try:
98
+ console.print(RichJSON.from_data(safe_data))
99
+ except Exception:
100
+ console.print(json.dumps(safe_data, indent=2))
101
+
102
+ if not prompt:
103
+ console.print()
104
+ return
105
+
106
+ # Prompt for expansion (interactive mode)
107
+ console.print("[dim]Press 'e' to expand, Enter to continue[/dim] ", end="")
108
+
109
+ try:
110
+ term = Terminal()
111
+ with term.cbreak():
112
+ key = term.inkey(timeout=30) # 30 second timeout
113
+ if key and key.lower() == "e":
114
+ console.print() # New line
115
+ if title:
116
+ console.rule(f"[bold cyan]{title} (expanded)[/bold cyan]")
117
+
118
+ try:
119
+ console.print(RichJSON.from_data(safe_data))
120
+ except Exception:
121
+ console.print(json.dumps(safe_data, indent=2))
122
+
123
+ console.print("\n[dim]Press Enter to continue...[/dim]")
124
+ term.inkey()
125
+ except Exception:
126
+ console.print() # Ensure we're on a new line
127
+ choice = input().strip().lower()
128
+
129
+ if choice == "e":
130
+ if title:
131
+ console.rule(f"[bold cyan]{title} (expanded)[/bold cyan]")
132
+
133
+ try:
134
+ console.print(RichJSON.from_data(safe_data))
135
+ except Exception:
136
+ console.print(json.dumps(safe_data, indent=2))
137
+
138
+ console.print("\n[dim]Press Enter to continue...[/dim]")
139
+ input()
140
+
141
+ console.print()
@@ -0,0 +1,89 @@
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import os
5
+ import select
6
+ import sys
7
+ import threading
8
+ import time as _time
9
+ from typing import TYPE_CHECKING
10
+
11
+ from watchfiles import watch
12
+
13
+ if TYPE_CHECKING:
14
+ from pathlib import Path
15
+
16
+
17
+ def wait_for_enter_cancel_or_change(file_path: Path) -> tuple[bool, bool, bool]:
18
+ """Block until Enter (start), 'q' (cancel), or file change.
19
+
20
+ Returns (start_training, cancelled, changed).
21
+ - start_training: True if Enter (or any non-'q' line on POSIX) was received
22
+ - cancelled: True if 'q' was received or Ctrl-C
23
+ - changed: True if the file changed on disk
24
+ """
25
+ start_training = False
26
+ cancelled = False
27
+ changed = False
28
+
29
+ stop_evt: threading.Event = threading.Event()
30
+ changed_evt: threading.Event = threading.Event()
31
+
32
+ def _watcher() -> None:
33
+ with contextlib.suppress(Exception):
34
+ for _ in watch(file_path, stop_event=stop_evt, debounce=200):
35
+ changed_evt.set()
36
+ break
37
+
38
+ t = threading.Thread(target=_watcher, daemon=True)
39
+ t.start()
40
+
41
+ try:
42
+ if os.name == "nt":
43
+ import msvcrt # type: ignore[attr-defined]
44
+
45
+ while True:
46
+ if changed_evt.is_set():
47
+ changed = True
48
+ break
49
+
50
+ if msvcrt.kbhit():
51
+ ch = msvcrt.getwch()
52
+ if ch in ("\r", "\n"):
53
+ start_training = True
54
+ break
55
+ if ch.lower() == "q":
56
+ cancelled = True
57
+ break
58
+ _time.sleep(0.15)
59
+ else:
60
+ while True:
61
+ if changed_evt.is_set():
62
+ changed = True
63
+ break
64
+
65
+ rlist, _, _ = select.select([sys.stdin], [], [], 0.25)
66
+ if rlist:
67
+ line = sys.stdin.readline()
68
+ if line is None:
69
+ continue
70
+ stripped = line.strip().lower()
71
+ if stripped == "q":
72
+ cancelled = True
73
+ break
74
+ # Any other (including empty) => start
75
+ start_training = True
76
+ break
77
+ _time.sleep(0.05)
78
+
79
+ except KeyboardInterrupt:
80
+ cancelled = True
81
+ finally:
82
+ stop_evt.set()
83
+ with contextlib.suppress(Exception):
84
+ t.join(timeout=1)
85
+
86
+ return start_training, cancelled, changed
87
+
88
+
89
+ __all__ = ["wait_for_enter_cancel_or_change"]
@@ -235,14 +235,10 @@ class TestAnalyzeMcpEnvironment:
235
235
  # Setup mock client to fail
236
236
  mock_client = mock.AsyncMock()
237
237
  mock_client_class.return_value = mock_client
238
- mock_client.initialize.side_effect = Exception("Connection failed")
238
+ mock_client.initialize.side_effect = ConnectionError("Connection failed")
239
239
 
240
- result = await analyze_mcp_environment("test:latest")
241
-
242
- assert result["success"] is False
243
- assert result["toolCount"] == 0
244
- assert "error" in result
245
- assert "Connection failed" in result["error"]
240
+ with pytest.raises(ConnectionError):
241
+ await analyze_mcp_environment("test:latest")
246
242
 
247
243
  @mock.patch("hud.cli.build.MCPClient")
248
244
  async def test_analyze_verbose_mode(self, mock_client_class):
@@ -404,23 +400,3 @@ ENV API_KEY
404
400
 
405
401
  with pytest.raises(typer.Exit):
406
402
  build_environment(str(env_dir))
407
-
408
- @mock.patch("hud.cli.build.build_docker_image")
409
- @mock.patch("hud.cli.build.analyze_mcp_environment")
410
- def test_build_environment_analysis_failure(self, mock_analyze, mock_build, tmp_path):
411
- """Test when MCP analysis fails."""
412
- env_dir = tmp_path / "test-env"
413
- env_dir.mkdir()
414
- (env_dir / "pyproject.toml").write_text("[tool.hud]")
415
- (env_dir / "Dockerfile").write_text("FROM python:3.11")
416
-
417
- mock_build.return_value = True
418
- mock_analyze.return_value = {
419
- "success": False,
420
- "error": "Connection failed",
421
- "toolCount": 0,
422
- "tools": [],
423
- }
424
-
425
- with pytest.raises(typer.Exit):
426
- build_environment(str(env_dir))
@@ -11,7 +11,6 @@ from hud.cli.dev import (
11
11
  create_proxy_server,
12
12
  get_docker_cmd,
13
13
  get_image_name,
14
- inject_supervisor,
15
14
  run_mcp_dev_server,
16
15
  update_pyproject_toml,
17
16
  )
@@ -52,16 +51,6 @@ class TestDockerUtils:
52
51
  cmd = get_docker_cmd("test-image:latest")
53
52
  assert cmd is None
54
53
 
55
- def test_inject_supervisor(self) -> None:
56
- """Test supervisor injection into Docker CMD."""
57
- original_cmd = ["python", "-m", "server"]
58
- modified = inject_supervisor(original_cmd)
59
-
60
- assert modified[0] == "sh"
61
- assert modified[1] == "-c"
62
- assert "watchfiles" in modified[2]
63
- assert "python -m server" in modified[2]
64
-
65
54
 
66
55
  class TestImageResolution:
67
56
  """Test image name resolution."""
@@ -90,7 +79,7 @@ image = "my-project:latest"
90
79
  test_dir.mkdir()
91
80
 
92
81
  name, source = get_image_name(str(test_dir))
93
- assert name == "hud-my-test-project:dev"
82
+ assert name == "my-test-project:dev"
94
83
  assert source == "auto"
95
84
 
96
85
  def test_update_pyproject_toml(self, tmp_path: Path) -> None: