weco 0.2.20__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/chatbot.py ADDED
@@ -0,0 +1,797 @@
1
+ import pathlib
2
+ import shlex
3
+ import argparse
4
+ from typing import List, Optional, Dict, Any, Tuple
5
+
6
+ from rich.console import Console
7
+ from rich.prompt import Prompt
8
+ from rich.live import Live
9
+ from gitingest import ingest
10
+
11
+ from .api import (
12
+ get_optimization_suggestions_from_codebase,
13
+ generate_evaluation_script_and_metrics,
14
+ analyze_script_execution_requirements,
15
+ analyze_evaluation_environment,
16
+ )
17
+ from .panels import OptimizationOptionsPanel, EvaluationScriptPanel
18
+
19
+
20
+ class UserInteractionHelper:
21
+ """Helper class for standardized user interactions."""
22
+
23
+ def __init__(self, console: Console):
24
+ self.console = console
25
+
26
+ def get_choice(
27
+ self, prompt: str, choices: List[str], default: str = None, show_choices: bool = True, max_retries: int = 5
28
+ ) -> str:
29
+ """Standardized choice prompt with error handling."""
30
+ return self._get_choice_with_retry(prompt, choices, default, show_choices, max_retries)
31
+
32
+ def _get_choice_with_retry(
33
+ self, prompt: str, choices: List[str], default: str = None, show_choices: bool = True, max_retries: int = 5
34
+ ) -> str:
35
+ """Get user choice with retry logic and error handling."""
36
+ attempts = 0
37
+
38
+ while attempts < max_retries:
39
+ try:
40
+ # Use Rich's Prompt.ask which handles basic validation
41
+ response = Prompt.ask(prompt, choices=choices, default=default, show_choices=show_choices)
42
+ return response
43
+ except (KeyboardInterrupt, EOFError):
44
+ # Handle Ctrl+C or Ctrl+D gracefully
45
+ self.console.print("\n[yellow]Operation cancelled by user.[/]")
46
+ raise
47
+ except Exception:
48
+ attempts += 1
49
+ self.console.print("\n[red]Invalid option.[/]")
50
+
51
+ if attempts >= max_retries:
52
+ self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
53
+ raise Exception("Maximum retry attempts exceeded")
54
+
55
+ # Show available options without the full prompt
56
+ if choices:
57
+ if len(choices) <= 10: # Show all options if not too many
58
+ options_str = " / ".join([f"[bold]{choice}[/]" for choice in choices])
59
+ self.console.print(f"Valid options: {options_str}")
60
+ else:
61
+ self.console.print(f"Please enter a valid option from the {len(choices)} available choices.")
62
+
63
+ if default:
64
+ self.console.print(f"Press Enter for default: [bold]{default}[/]")
65
+
66
+ continue
67
+
68
+ # This should never be reached due to the exception above, but just in case
69
+ raise Exception("Unexpected error in choice selection")
70
+
71
+ def get_choice_numeric(self, prompt: str, max_number: int, default: int = None, max_retries: int = 5) -> int:
72
+ """Get numeric choice with validation and error handling."""
73
+ choices = [str(i + 1) for i in range(max_number)]
74
+ default_str = str(default) if default is not None else None
75
+
76
+ attempts = 0
77
+ while attempts < max_retries:
78
+ try:
79
+ response = Prompt.ask(prompt, choices=choices, default=default_str, show_choices=False)
80
+ return int(response)
81
+ except (KeyboardInterrupt, EOFError):
82
+ self.console.print("\n[yellow]Operation cancelled by user.[/]")
83
+ raise
84
+ except (ValueError, Exception):
85
+ attempts += 1
86
+ self.console.print("\n[red]Invalid option.[/]")
87
+
88
+ if attempts >= max_retries:
89
+ self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
90
+ raise Exception("Maximum retry attempts exceeded")
91
+
92
+ # Show valid range
93
+ self.console.print(f"Please enter a number between [bold]1[/] and [bold]{max_number}[/]")
94
+ if default_str:
95
+ self.console.print(f"Press Enter for default: [bold]{default_str}[/]")
96
+
97
+ continue
98
+
99
+ raise Exception("Unexpected error in numeric choice selection")
100
+
101
+ def get_yes_no(self, prompt: str, default: str = "y", max_retries: int = 5) -> bool:
102
+ """Standardized yes/no prompt with error handling."""
103
+ attempts = 0
104
+
105
+ while attempts < max_retries:
106
+ try:
107
+ response = Prompt.ask(prompt, choices=["y", "n"], default=default).lower()
108
+ return response in ["y", "yes"]
109
+ except (KeyboardInterrupt, EOFError):
110
+ self.console.print("\n[yellow]Operation cancelled by user.[/]")
111
+ raise
112
+ except Exception:
113
+ attempts += 1
114
+ self.console.print("\n[red]Invalid option.[/]")
115
+
116
+ if attempts >= max_retries:
117
+ self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
118
+ raise Exception("Maximum retry attempts exceeded")
119
+
120
+ self.console.print("Valid options: [bold]y[/] / [bold]n[/]")
121
+ if default:
122
+ self.console.print(f"Press Enter for default: [bold]{default}[/]")
123
+
124
+ continue
125
+
126
+ raise Exception("Unexpected error in yes/no selection")
127
+
128
+ def display_optimization_options_table(self, options: List[Dict[str, str]]) -> None:
129
+ """Display optimization options in a formatted table."""
130
+ options_panel = OptimizationOptionsPanel()
131
+ table = options_panel.get_display(options)
132
+ self.console.print(table)
133
+
134
+ def display_selection_confirmation(self, item_text: str) -> None:
135
+ """Display a confirmation message for user selection."""
136
+ self.console.print(f"\n[bold blue]Selected:[/] [bold cyan]{item_text}[/]")
137
+ self.console.print()
138
+
139
+ def get_multiline_input(self, intro_message: str) -> str:
140
+ """Handle multiline input with proper instructions."""
141
+ self.console.print(intro_message)
142
+ self.console.print("[dim]Current script content will be replaced[/dim]\n")
143
+ edited_lines = []
144
+ try:
145
+ while True:
146
+ line = input()
147
+ edited_lines.append(line + "\n")
148
+ except EOFError:
149
+ pass
150
+ except KeyboardInterrupt:
151
+ self.console.print("\n[yellow]Edit cancelled.[/]")
152
+ return ""
153
+ return "".join(edited_lines)
154
+
155
+
156
+ class Chatbot:
157
+ def __init__(
158
+ self, project_path: pathlib.Path, console: Console, run_parser: argparse.ArgumentParser, model: Optional[str] = None
159
+ ):
160
+ self.project_path = project_path
161
+ self.console = console
162
+ self.ui_helper = UserInteractionHelper(console)
163
+ self.weco_run_parser = run_parser
164
+ self.user_specified_model = model # Store user's model choice
165
+ self.resolved_model = None # Will be determined during workflow
166
+ # State tracking (replacing conversation manager)
167
+ self.current_step: str = "eval_analysis"
168
+ self.evaluation_analysis: Optional[Dict[str, Any]] = None
169
+ self.selected_eval_config: Optional[Dict[str, Any]] = None
170
+
171
+ # GitIngest data
172
+ self.gitingest_summary: Optional[str] = None
173
+ self.gitingest_tree: Optional[str] = None
174
+ self.gitingest_content: Optional[Dict[str, str]] = None
175
+ self.gitingest_content_str: Optional[str] = None
176
+
177
+ # Chat UI components (removed unused chat layout)
178
+ self.active_live_display: Optional[Live] = None
179
+ self.use_chat_ui = False
180
+
181
+ def analyze_codebase_and_get_optimization_options(self) -> Optional[List[Dict[str, str]]]:
182
+ """Analyze the codebase using gitingest and get optimization suggestions from Gemini."""
183
+ try:
184
+ with self.console.status("[bold green]Parsing codebase...[/]"):
185
+ result = ingest(
186
+ str(self.project_path),
187
+ exclude_patterns=set(
188
+ [
189
+ "*.git",
190
+ "*.gitignore",
191
+ "LICENSE*",
192
+ "CONTRIBUTING*",
193
+ "CODE_OF_CONDUCT*",
194
+ "CHANGELOG*",
195
+ "*.repomixignore",
196
+ "*.dockerignore",
197
+ "*.pyc",
198
+ "*.pyo",
199
+ "*.csv",
200
+ "*.json",
201
+ "*.jsonl",
202
+ "*.txt",
203
+ "*.md",
204
+ "*.rst",
205
+ "*.yml",
206
+ "*.yaml",
207
+ ]
208
+ ),
209
+ )
210
+ self.gitingest_summary, self.gitingest_tree, self.gitingest_content_str = result
211
+
212
+ if not self.gitingest_content_str:
213
+ self.console.print("[yellow]Warning: gitingest found no content to analyze.[/]")
214
+ return None
215
+
216
+ with self.console.status("[bold green]Generating optimization suggestions...[/]"):
217
+ result = get_optimization_suggestions_from_codebase(
218
+ self.gitingest_summary, self.gitingest_tree, self.gitingest_content_str, self.console
219
+ )
220
+
221
+ if result and isinstance(result, list):
222
+ options = result # Use the dictionaries directly from API
223
+ else:
224
+ options = None
225
+
226
+ if not options or not isinstance(options, list):
227
+ self.console.print("[red]Failed to get valid optimization options.[/]")
228
+ return None
229
+
230
+ if not options:
231
+ self.console.print("[yellow]No optimizations suggested for this codebase.[/]")
232
+ return None
233
+
234
+ self.ui_helper.display_optimization_options_table(options)
235
+ return options
236
+
237
+ except Exception as e:
238
+ self.console.print(f"[bold red]An error occurred during analysis: {e}[/]")
239
+ import traceback
240
+
241
+ traceback.print_exc()
242
+ return None
243
+
244
+ def get_user_option_selection(self, options: List[Dict[str, str]]) -> Optional[Dict[str, str]]:
245
+ """Get user's selection from the optimization options."""
246
+ if not options:
247
+ return None
248
+
249
+ try:
250
+ choice_num = self.ui_helper.get_choice_numeric(
251
+ "\n[bold]Which optimization would you like to pursue?[/bold] (Enter number)", len(options)
252
+ )
253
+ selected_option = options[choice_num - 1]
254
+ self.ui_helper.display_selection_confirmation(selected_option["description"])
255
+ return selected_option
256
+ except Exception as e:
257
+ self.console.print(f"[red]Error selecting optimization option: {e}[/]")
258
+ return None
259
+
260
+ def handle_script_generation_workflow(self, selected_option: Dict[str, str]) -> Optional[Dict[str, str]]:
261
+ """Handle script generation, editing, and custom path workflows.
262
+
263
+ This implements a state machine for evaluation script creation with these states:
264
+ 1. No script: User can Generate or Provide path
265
+ 2. Script exists: User can Use, Edit, Regenerate, or Provide different path
266
+
267
+ The workflow continues until user chooses to Use a script or exits.
268
+ """
269
+ eval_script_content = None
270
+ eval_script_path_str = None
271
+ metric_name = None
272
+ goal = None
273
+
274
+ # Main workflow loop - continues until user accepts a script
275
+ while True:
276
+ if eval_script_content:
277
+ # State: Script exists - show it and offer actions
278
+ self.console.print("\n[bold]Current evaluation script:[/]")
279
+ script_panel = EvaluationScriptPanel()
280
+ panel = script_panel.get_display(eval_script_content, eval_script_path_str or "evaluate.py")
281
+ self.console.print(panel)
282
+
283
+ if metric_name and goal:
284
+ self.console.print(f"\n[green]Suggested metric:[/] {metric_name} (goal: {goal})")
285
+
286
+ action = self.ui_helper.get_choice(
287
+ "Choose an action: [bold]U[/]se this script / [bold]E[/]dit content / [bold]R[/]egenerate / [bold]P[/]rovide different path",
288
+ ["u", "U", "e", "E", "r", "R", "p", "P"],
289
+ default="u",
290
+ ).lower()
291
+ else:
292
+ # State: No script - offer initial options
293
+ action = self.ui_helper.get_choice(
294
+ "How to proceed? ([bold]G[/]enerate / Provide [bold]P[/]ath)", ["g", "G", "p", "P"], default="g"
295
+ ).lower()
296
+
297
+ # Action: Use current script (exit workflow)
298
+ if action == "u" and eval_script_content:
299
+ # Save generated script to file if it hasn't been saved yet
300
+ if not eval_script_path_str:
301
+ eval_script_path_obj = self.project_path / "evaluate.py"
302
+ eval_script_path_obj.write_text(eval_script_content)
303
+ eval_script_path_str = "evaluate.py"
304
+ self.console.print(f"Generated script saved as [cyan]{eval_script_path_str}[/]")
305
+ break
306
+
307
+ # Action: Edit current script
308
+ elif action == "e" and eval_script_content:
309
+ eval_script_content = self.ui_helper.get_multiline_input(
310
+ "\nPlease paste your edited script below. Press Ctrl+D (Unix) or Ctrl+Z then Enter (Windows) when done:"
311
+ )
312
+ if not eval_script_content:
313
+ continue # Edit was cancelled, stay in loop
314
+
315
+ # After editing, we need new metric info since script changed
316
+ eval_script_path_str = None # Clear path since content changed
317
+ metric_name = Prompt.ask("Please specify the metric name this script will print")
318
+ goal = self.ui_helper.get_choice(
319
+ "Should we maximize or minimize this metric?", choices=["maximize", "minimize"], default="maximize"
320
+ )
321
+ continue # Show the edited script for review
322
+
323
+ # Action: Generate new script (or regenerate)
324
+ elif action == "g" or action == "r":
325
+ with self.console.status("[bold green]Generating evaluation script and determining metrics...[/]"):
326
+ result = generate_evaluation_script_and_metrics(
327
+ selected_option["target_file"],
328
+ selected_option["description"],
329
+ self.gitingest_content_str,
330
+ self.console,
331
+ )
332
+ if result and result[0]:
333
+ eval_script_content, metric_name, goal, reasoning = result
334
+ if reasoning:
335
+ self.console.print(f"[dim]Reasoning: {reasoning}[/]")
336
+ else:
337
+ self.console.print("[red]Failed to generate an evaluation script.[/]")
338
+ eval_script_content = None
339
+ metric_name = None
340
+ goal = None
341
+ eval_script_path_str = None # Generated content not saved yet
342
+ continue # Show the generated script for review
343
+
344
+ # Action: Provide path to existing script
345
+ elif action == "p":
346
+ user_script_path_str = Prompt.ask("Enter the path to your evaluation script (relative to project root)")
347
+ user_script_path = self.project_path / user_script_path_str
348
+ if user_script_path.is_file():
349
+ try:
350
+ eval_script_content = user_script_path.read_text()
351
+ eval_script_path_str = user_script_path_str
352
+ self.console.print(f"Using script from [cyan]{eval_script_path_str}[/]")
353
+
354
+ # For user-provided scripts, we need manual metric specification
355
+ metric_name = Prompt.ask("Please specify the metric name this script will print")
356
+ goal = self.ui_helper.get_choice(
357
+ "Should we maximize or minimize this metric?", choices=["maximize", "minimize"], default="maximize"
358
+ )
359
+ break # User provided script is ready to use
360
+ except Exception as e:
361
+ self.console.print(f"[red]Error reading script {user_script_path_str}: {e}[/]")
362
+ eval_script_content = None
363
+ else:
364
+ self.console.print(f"[red]File not found: {user_script_path}[/]")
365
+ continue # Stay in loop to try again
366
+
367
+ # Validate we have all required components
368
+ if not eval_script_content or not eval_script_path_str or not metric_name or not goal:
369
+ return None
370
+
371
+ # Analyze the script to determine the proper execution command
372
+ with self.console.status("[bold green]Analyzing script execution requirements...[/]"):
373
+ eval_command = analyze_script_execution_requirements(
374
+ eval_script_content, eval_script_path_str, selected_option["target_file"], self.console
375
+ )
376
+
377
+ return {
378
+ "script_path": eval_script_path_str,
379
+ "script_content": eval_script_content,
380
+ "metric_name": metric_name,
381
+ "goal": goal,
382
+ "eval_command": eval_command or f"python {eval_script_path_str}",
383
+ }
384
+
385
+ def get_evaluation_configuration(self, selected_option: Dict[str, str]) -> Optional[Dict[str, str]]:
386
+ """Get or create evaluation script configuration using intelligent conversation-guided approach."""
387
+ with self.console.status("[bold green]Analyzing evaluation environment...[/]"):
388
+ analysis = analyze_evaluation_environment(
389
+ selected_option["target_file"],
390
+ selected_option["description"],
391
+ self.gitingest_summary,
392
+ self.gitingest_tree,
393
+ self.gitingest_content_str,
394
+ self.console,
395
+ )
396
+
397
+ if not analysis:
398
+ self.console.print("[yellow]Failed to analyze evaluation environment. Falling back to generation.[/]")
399
+ return self.handle_script_generation_workflow(selected_option)
400
+
401
+ self.evaluation_analysis = analysis
402
+ self.current_step = "script_selection"
403
+
404
+ return self.handle_evaluation_decision(selected_option, analysis)
405
+
406
+ def handle_evaluation_decision(
407
+ self, selected_option: Dict[str, str], analysis: Dict[str, Any]
408
+ ) -> Optional[Dict[str, str]]:
409
+ """Handle user decision based on intelligent evaluation analysis.
410
+
411
+ This method implements a recommendation system that:
412
+ 1. Shows existing evaluation scripts found in the codebase
413
+ 2. Provides AI-generated recommendations (use_existing vs generate_new)
414
+ 3. Handles user choice with different flows based on recommendation
415
+
416
+ The logic adapts the default choice based on AI recommendation to guide users
417
+ toward the most suitable option while still allowing them to override.
418
+ """
419
+ existing_evals = analysis.get("existing_evaluations", [])
420
+ recommendation = analysis.get("recommendation", "generate_new")
421
+ reasoning = analysis.get("reasoning", "")
422
+
423
+ # Display existing evaluation scripts if any were found
424
+ if existing_evals:
425
+ from rich.table import Table
426
+ from rich import box
427
+
428
+ table = Table(
429
+ title="Existing Evaluation Scripts", show_lines=True, box=box.ROUNDED, border_style="cyan", padding=(1, 1)
430
+ )
431
+ table.add_column("No.", style="bold white", width=5, header_style="bold white", justify="center")
432
+ table.add_column("Script Path", style="cyan", width=20, header_style="bold white")
433
+ table.add_column("Suitability", style="magenta", width=40, header_style="bold white")
434
+ table.add_column("Metrics", style="yellow", width=20, header_style="bold white")
435
+ table.add_column("Confidence", style="green", width=10, header_style="bold white")
436
+
437
+ for i, eval_script in enumerate(existing_evals):
438
+ metrics_str = ", ".join([f"{m['name']} ({m['goal']})" for m in eval_script.get("metrics", [])])
439
+ suitability_str = eval_script.get("suitability", "")
440
+ table.add_row(str(i + 1), eval_script["script_path"], suitability_str, metrics_str, eval_script["confidence"])
441
+ self.console.print(table)
442
+ else:
443
+ self.console.print("\n[yellow]No existing evaluation scripts found.[/]")
444
+
445
+ # Show AI recommendation with reasoning
446
+ self.console.print(f"\n💡 [bold green]Recommended:[/] [cyan]{recommendation.replace('_', ' ').title()}[/]")
447
+ self.console.print()
448
+ self.console.print(f"[yellow]🧠 Reasoning:[/] {reasoning}")
449
+
450
+ # Decision flow 1: AI recommends using existing script
451
+ if existing_evals and recommendation == "use_existing":
452
+ choices = [str(i + 1) for i in range(len(existing_evals))] + ["g"]
453
+ choice = self.ui_helper.get_choice(
454
+ "\n[bold]Choose an option:[/] (Enter number to use existing script, 'g' to generate new)",
455
+ choices,
456
+ default="1" if existing_evals else "g", # Default to first existing script
457
+ show_choices=False,
458
+ )
459
+
460
+ if choice == "g":
461
+ return self.handle_script_generation_workflow(selected_option)
462
+ else:
463
+ selected_eval = existing_evals[int(choice) - 1]
464
+ return self.handle_existing_evaluation_selection(selected_option, selected_eval)
465
+
466
+ # Decision flow 2: Scripts exist but AI recommends generating new
467
+ elif existing_evals:
468
+ choices = [str(i + 1) for i in range(len(existing_evals))] + ["g"]
469
+ choice = self.ui_helper.get_choice(
470
+ "\n[bold]Choose an option:[/] (Enter number to use existing script, 'g' to generate new as recommended)",
471
+ choices,
472
+ default="g", # Default to generate new (following recommendation)
473
+ show_choices=False,
474
+ )
475
+
476
+ if choice == "g":
477
+ return self.handle_script_generation_workflow(selected_option)
478
+ else:
479
+ selected_eval = existing_evals[int(choice) - 1]
480
+ return self.handle_existing_evaluation_selection(selected_option, selected_eval)
481
+
482
+ # Decision flow 3: No existing scripts found - must generate new
483
+ else:
484
+ self.console.print("\n[cyan]Proceeding to generate a new evaluation script...[/]")
485
+ return self.handle_script_generation_workflow(selected_option)
486
+
487
+ def handle_existing_evaluation_selection(
488
+ self, selected_option: Dict[str, str], selected_eval: Dict[str, Any]
489
+ ) -> Optional[Dict[str, str]]:
490
+ """Handle selection of an existing evaluation script."""
491
+ script_path = selected_eval["script_path"]
492
+
493
+ try:
494
+ script_file = self.project_path / script_path
495
+ if not script_file.exists():
496
+ self.console.print(f"[red]Error: Script file {script_path} not found.[/]")
497
+ return None
498
+
499
+ script_content = script_file.read_text()
500
+ self.console.print(f"\n[green]Using existing script:[/] [cyan]{script_path}[/]")
501
+ self.console.print()
502
+
503
+ metrics = selected_eval.get("metrics", [])
504
+
505
+ if not metrics:
506
+ self.console.print("[yellow]No metrics detected. Please specify manually.[/]")
507
+ metric_name = Prompt.ask("Please specify the metric name this script will print")
508
+ goal = self.ui_helper.get_choice(
509
+ "Should we maximize or minimize this metric?", choices=["maximize", "minimize"], default="maximize"
510
+ )
511
+ elif len(metrics) == 1:
512
+ metric_name = metrics[0]["name"]
513
+ goal = metrics[0]["goal"]
514
+ self.console.print(f"[green]Using detected metric:[/] [yellow]{metric_name}[/] (goal: {goal})")
515
+ else:
516
+ self.console.print("[green]Multiple metrics detected:[/]")
517
+ for i, m in enumerate(metrics):
518
+ self.console.print(f" {i + 1}. {m['name']} (goal: {m['goal']})")
519
+ try:
520
+ choice_num = self.ui_helper.get_choice_numeric("Which metric to use?", len(metrics))
521
+ selected_metric = metrics[choice_num - 1]
522
+ metric_name = selected_metric["name"]
523
+ goal = selected_metric["goal"]
524
+ except Exception as e:
525
+ self.console.print(f"[red]Error selecting metric: {e}[/]")
526
+ return None
527
+
528
+ eval_command = selected_eval.get("run_command", "")
529
+ if not eval_command or eval_command == f"python {script_path}":
530
+ with self.console.status("[bold green]Analyzing script execution requirements...[/]"):
531
+ eval_command = analyze_script_execution_requirements(
532
+ script_content, script_path, selected_option["target_file"], self.console
533
+ )
534
+
535
+ self.current_step = "confirmation"
536
+ eval_config = {
537
+ "script_path": script_path,
538
+ "script_content": script_content,
539
+ "metric_name": metric_name,
540
+ "goal": goal,
541
+ "eval_command": eval_command or f"python {script_path}",
542
+ }
543
+
544
+ self.selected_eval_config = eval_config
545
+ return eval_config
546
+
547
+ except Exception as e:
548
+ self.console.print(f"[red]Error processing script {script_path}: {e}[/]")
549
+ return None
550
+
551
+ def confirm_and_finalize_evaluation_config(self, eval_config: Dict[str, str]) -> Optional[Dict[str, str]]:
552
+ """Allow user to modify the evaluation command if needed."""
553
+ self.console.print(f"\n[green]Analyzed evaluation command:[/] [cyan]{eval_config['eval_command']}[/]")
554
+
555
+ modify_command = self.ui_helper.get_yes_no("Is this the right command to run the evaluation?", default="y")
556
+
557
+ if not modify_command:
558
+ self.console.print(f"\n[dim]Current command:[/] {eval_config['eval_command']}")
559
+ new_command = Prompt.ask("Enter the corrected evaluation command", default=eval_config["eval_command"])
560
+ self.console.print(f"[green]Updated command:[/] {new_command}")
561
+ eval_config = {
562
+ "script_path": eval_config["script_path"],
563
+ "script_content": eval_config["script_content"],
564
+ "metric_name": eval_config["metric_name"],
565
+ "goal": eval_config["goal"],
566
+ "eval_command": new_command,
567
+ }
568
+
569
+ return eval_config
570
+
571
+ def build_weco_command(
572
+ self, target_file: str, steps: int, eval_config: Dict[str, str], model: str, additional_instructions: str = None
573
+ ) -> str:
574
+ """Build the weco command from the optimization and evaluation configs.
575
+
576
+ Constructs a properly quoted shell command that can be executed directly.
577
+ Uses shlex.quote() to handle special characters and spaces in arguments safely.
578
+ """
579
+ command_parts = [
580
+ "weco",
581
+ "run",
582
+ "--source",
583
+ shlex.quote(target_file), # Quote file paths for shell safety
584
+ "--eval-command",
585
+ shlex.quote(eval_config["eval_command"]), # Quote complex commands with spaces/args
586
+ "--metric",
587
+ shlex.quote(eval_config["metric_name"]), # Quote metric names that might have spaces
588
+ "--goal",
589
+ eval_config["goal"], # Goal is always "maximize" or "minimize" (no quoting needed)
590
+ "--steps",
591
+ str(steps), # Convert int to string
592
+ "--model",
593
+ shlex.quote(model), # Always include resolved model
594
+ ]
595
+
596
+ # Add optional parameters if they're specified
597
+ if additional_instructions:
598
+ command_parts.extend(["--additional-instructions", shlex.quote(additional_instructions)])
599
+
600
+ return " ".join(command_parts)
601
+
602
+ def execute_optimization(
603
+ self,
604
+ eval_config: Dict[str, str],
605
+ target_file: str,
606
+ steps: int,
607
+ model: str,
608
+ additional_instructions: str,
609
+ weco_run_cmd: str,
610
+ ) -> None:
611
+ """Execute the optimization with the given configuration.
612
+
613
+ This method handles two execution paths:
614
+ 1. Direct execution: Run with the provided configuration
615
+ 2. User adjustment: Allow user to modify the command before execution
616
+
617
+ If user chooses to adjust, we parse their command to validate it and extract
618
+ the new configuration parameters.
619
+ """
620
+ self.console.print("\n[bold green]🚀 Starting optimization...[/]")
621
+ self.console.print(f"[dim]Command: {weco_run_cmd}[/]\n")
622
+
623
+ # Give user option to adjust parameters before execution
624
+ adjust_command = self.ui_helper.get_yes_no(
625
+ f"\n[bold]Current command:[/] [dim]{weco_run_cmd}[/]\n"
626
+ "[dim]You can modify the evaluation command, steps, model, or other parameters.[/]\n"
627
+ "Would you like to adjust any parameters?",
628
+ default="no",
629
+ )
630
+
631
+ if adjust_command:
632
+ # User wants to modify the command - get their input
633
+ new_weco_run_cmd_str = Prompt.ask("Enter the full weco run command", default=weco_run_cmd)
634
+
635
+ # Parse the user's command safely using shlex
636
+ try:
637
+ command_tokens = shlex.split(new_weco_run_cmd_str)
638
+ except ValueError as e:
639
+ self.console.print(f"[bold red]Error parsing command: {e}. Please check quotes.[/]")
640
+ return
641
+
642
+ # Validate command structure (must start with "weco run")
643
+ if not command_tokens or command_tokens[0] != "weco" or (len(command_tokens) > 1 and command_tokens[1] != "run"):
644
+ self.console.print("[bold red]Invalid command. Must start with 'weco run'.[/]")
645
+ return
646
+
647
+ # Extract arguments for the 'run' subcommand (skip "weco run")
648
+ run_args_list = command_tokens[2:]
649
+
650
+ try:
651
+ # Parse the arguments using the same parser that CLI uses
652
+ parsed_ns = self.weco_run_parser.parse_args(run_args_list)
653
+
654
+ # Update configuration from parsed arguments
655
+ eval_config = {
656
+ "script_path": "", # Not needed for execution
657
+ "script_content": "", # Not needed for execution
658
+ "metric_name": parsed_ns.metric,
659
+ "goal": parsed_ns.goal,
660
+ "eval_command": parsed_ns.eval_command,
661
+ }
662
+
663
+ target_file = parsed_ns.source
664
+ steps = parsed_ns.steps
665
+ model = parsed_ns.model
666
+ additional_instructions = parsed_ns.additional_instructions
667
+
668
+ except Exception as e:
669
+ self.console.print(f"[bold red]Error parsing adjusted command: {e}. Optimization not started.[/]")
670
+ return
671
+
672
+ # Import and execute the actual optimization function
673
+ # (Import here to avoid circular imports)
674
+ from .optimizer import execute_optimization as actual_execute_optimization
675
+
676
+ success = actual_execute_optimization(
677
+ source=target_file,
678
+ eval_command=eval_config["eval_command"],
679
+ metric=eval_config["metric_name"],
680
+ goal=eval_config["goal"],
681
+ steps=steps,
682
+ model=model,
683
+ log_dir=".runs", # Standard log directory
684
+ additional_instructions=additional_instructions,
685
+ console=self.console,
686
+ )
687
+
688
+ # Report final result to user
689
+ if success:
690
+ self.console.print("\n[bold green]✅ Optimization completed successfully![/]")
691
+ else:
692
+ self.console.print("\n[bold yellow]⚠️ Optimization ended early or encountered issues.[/]")
693
+
694
+ def show_and_copy_command(self, command: str) -> None:
695
+ """Show the command and copy it to clipboard."""
696
+ import subprocess
697
+ import platform
698
+
699
+ try:
700
+ if platform.system() == "Darwin":
701
+ subprocess.run(["pbcopy"], input=command.encode(), check=True)
702
+ elif platform.system() == "Linux":
703
+ subprocess.run(["xclip", "-selection", "clipboard"], input=command.encode(), check=True)
704
+ elif platform.system() == "Windows":
705
+ subprocess.run(["clip"], input=command.encode(), check=True)
706
+ self.console.print("\n[green]✅ Command copied to clipboard! Exiting...[/]")
707
+ except Exception:
708
+ self.console.print("\n[yellow]Could not copy to clipboard automatically.[/]")
709
+
710
+ def setup_evaluation(self, selected_option: Dict[str, str]) -> Optional[Tuple[str, Dict[str, str], str, int, str, str]]:
711
+ """Setup evaluation environment for the selected optimization."""
712
+ eval_config = self.get_evaluation_configuration(selected_option)
713
+ if not eval_config:
714
+ self.console.print("[red]Evaluation script setup failed.[/]")
715
+ return None
716
+
717
+ eval_config = self.confirm_and_finalize_evaluation_config(eval_config)
718
+ if not eval_config:
719
+ self.console.print("[red]Evaluation configuration failed.[/]")
720
+ return None
721
+
722
+ steps = 20
723
+ steps_input = Prompt.ask(f"Number of optimization steps (or press Enter to use {steps})", default=str(steps))
724
+ try:
725
+ steps = int(steps_input)
726
+ steps = max(1, min(1000, steps))
727
+ if steps != int(steps_input):
728
+ self.console.print(f"[yellow]Adjusted to valid range: {steps}[/]")
729
+ except ValueError:
730
+ self.console.print(f"[yellow]Invalid input, using default value: {steps}[/]")
731
+
732
+ # Resolve the model to use
733
+ if self.user_specified_model:
734
+ self.resolved_model = self.user_specified_model
735
+ else:
736
+ # Use same default model selection as weco run
737
+ from .utils import determine_default_model, read_api_keys_from_env
738
+
739
+ llm_api_keys = read_api_keys_from_env()
740
+ self.resolved_model = determine_default_model(llm_api_keys)
741
+
742
+ target_file = selected_option["target_file"]
743
+ additional_instructions = selected_option["description"]
744
+
745
+ weco_run_cmd_str = self.build_weco_command(
746
+ target_file, steps, eval_config, self.resolved_model, additional_instructions
747
+ )
748
+ return weco_run_cmd_str, eval_config, target_file, steps, self.resolved_model, additional_instructions
749
+
750
+ def start(self):
751
+ self.console.print("[bold cyan]Welcome to Weco![/]")
752
+ self.console.print(f"Let's optimize your codebase in: [cyan]{self.project_path}[/]\n")
753
+
754
+ options = self.analyze_codebase_and_get_optimization_options()
755
+ if not options:
756
+ return
757
+
758
+ selected_option = self.get_user_option_selection(options)
759
+ if not selected_option:
760
+ return
761
+
762
+ result = self.setup_evaluation(selected_option)
763
+ if not result:
764
+ return
765
+
766
+ weco_command, eval_config, target_file, steps, model, additional_instructions = result
767
+
768
+ self.console.print("\n[bold green]Command:[/]")
769
+ self.console.print(f"[on black white]{weco_command}[/]\n")
770
+
771
+ self.console.print(f"[yellow]ℹ️ File paths are relative to: {self.project_path}[/]")
772
+
773
+ self.console.print("\n[bold green]🎯 What would you like to do?[/]")
774
+ self.console.print(" [cyan]1.[/] [bold]Run now[/] - Start the optimization immediately")
775
+ self.console.print(" [cyan]2.[/] [bold]Show and copy[/] - Display the command and copy to clipboard")
776
+
777
+ execution_choice = self.ui_helper.get_choice(
778
+ "\nEnter your choice", choices=["1", "2"], default="1", show_choices=False
779
+ )
780
+
781
+ if execution_choice == "1":
782
+ self.execute_optimization(eval_config, target_file, steps, model, additional_instructions, weco_command)
783
+ else:
784
+ self.show_and_copy_command(weco_command)
785
+
786
+
787
+ def run_onboarding_chatbot(
788
+ project_path: pathlib.Path, console: Console, run_parser: argparse.ArgumentParser, model: Optional[str] = None
789
+ ):
790
+ try:
791
+ chatbot = Chatbot(project_path, console, run_parser, model)
792
+ chatbot.start()
793
+ except Exception as e:
794
+ console.print(f"[bold red]An unexpected error occurred in the chatbot: {e}[/]")
795
+ import traceback
796
+
797
+ traceback.print_exc()