weco 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
weco/chatbot.py DELETED
@@ -1,827 +0,0 @@
1
- import pathlib
2
- import shlex
3
- import argparse
4
- from typing import List, Optional, Dict, Any, Tuple
5
- import sys
6
-
7
- from rich.console import Console
8
- from rich.prompt import Prompt
9
- from rich.live import Live
10
- from gitingest import ingest
11
-
12
- from .api import (
13
- get_optimization_suggestions_from_codebase,
14
- generate_evaluation_script_and_metrics,
15
- analyze_script_execution_requirements,
16
- analyze_evaluation_environment,
17
- )
18
- from .panels import OptimizationOptionsPanel, EvaluationScriptPanel
19
-
20
-
21
- class UserInteractionHelper:
22
- """Helper class for standardized user interactions."""
23
-
24
- def __init__(self, console: Console):
25
- self.console = console
26
-
27
- def get_choice(
28
- self, prompt: str, choices: List[str], default: str = None, show_choices: bool = True, max_retries: int = 5
29
- ) -> str:
30
- """Standardized choice prompt with error handling."""
31
- return self._get_choice_with_retry(prompt, choices, default, show_choices, max_retries)
32
-
33
- def _get_choice_with_retry(
34
- self, prompt: str, choices: List[str], default: str = None, show_choices: bool = True, max_retries: int = 5
35
- ) -> str:
36
- """Get user choice with retry logic and error handling."""
37
- attempts = 0
38
-
39
- while attempts < max_retries:
40
- try:
41
- # Use Rich's Prompt.ask which handles basic validation
42
- response = Prompt.ask(prompt, choices=choices, default=default, show_choices=show_choices)
43
- return response
44
- except (KeyboardInterrupt, EOFError):
45
- # Handle Ctrl+C or Ctrl+D gracefully
46
- self.console.print("\n[yellow]Operation cancelled by user.[/]")
47
- raise
48
- except Exception:
49
- attempts += 1
50
- self.console.print("\n[red]Invalid option.[/]")
51
-
52
- if attempts >= max_retries:
53
- self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
54
- raise Exception("Maximum retry attempts exceeded. Please try again.")
55
-
56
- # Show available options without the full prompt
57
- if choices:
58
- if len(choices) <= 10: # Show all options if not too many
59
- options_str = " / ".join([f"[bold]{choice}[/]" for choice in choices])
60
- self.console.print(f"Valid options: {options_str}")
61
- else:
62
- self.console.print(f"Please enter a valid option from the {len(choices)} available choices.")
63
-
64
- if default:
65
- self.console.print(f"Press Enter for default: [bold]{default}[/]")
66
-
67
- continue
68
-
69
- # This should never be reached due to the exception above, but just in case
70
- raise Exception("Unexpected error while selecting a choice")
71
-
72
- def get_choice_numeric(self, prompt: str, max_number: int, default: int = None, max_retries: int = 5) -> int:
73
- """Get numeric choice with validation and error handling."""
74
- choices = [str(i + 1) for i in range(max_number)]
75
- default_str = str(default) if default is not None else None
76
-
77
- attempts = 0
78
- while attempts < max_retries:
79
- try:
80
- response = Prompt.ask(prompt, choices=choices, default=default_str, show_choices=False)
81
- return int(response)
82
- except (KeyboardInterrupt, EOFError):
83
- self.console.print("\n[yellow]Operation cancelled by user.[/]")
84
- raise
85
- except (ValueError, Exception):
86
- attempts += 1
87
- self.console.print("\n[red]Invalid option.[/]")
88
-
89
- if attempts >= max_retries:
90
- self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
91
- raise Exception("Maximum retry attempts exceeded. Please try again.")
92
-
93
- # Show valid range
94
- self.console.print(f"Please enter a number between [bold]1[/] and [bold]{max_number}[/]")
95
- if default_str:
96
- self.console.print(f"Press Enter for default: [bold]{default_str}[/]")
97
-
98
- continue
99
-
100
- raise Exception("Unexpected error in numeric choice selection")
101
-
102
- def get_yes_no(self, prompt: str, default: str = "y", max_retries: int = 5) -> bool:
103
- """Standardized yes/no prompt with error handling."""
104
- attempts = 0
105
-
106
- while attempts < max_retries:
107
- try:
108
- response = Prompt.ask(prompt, choices=["y", "n"], default=default).lower()
109
- return response in ["y", "yes"]
110
- except (KeyboardInterrupt, EOFError):
111
- self.console.print("\n[yellow]Operation cancelled by user.[/]")
112
- raise
113
- except Exception:
114
- attempts += 1
115
- self.console.print("\n[red]Invalid option.[/]")
116
-
117
- if attempts >= max_retries:
118
- self.console.print(f"[red]Maximum retry attempts ({max_retries}) reached. Exiting.[/]")
119
- raise Exception("Maximum retry attempts exceeded. Please try again.")
120
-
121
- self.console.print("Valid options: [bold]y[/] / [bold]n[/]")
122
- if default:
123
- self.console.print(f"Press Enter for default: [bold]{default}[/]")
124
-
125
- continue
126
-
127
- raise Exception("Unexpected error while selecting an option")
128
-
129
- def display_optimization_options_table(self, options: List[Dict[str, str]]) -> None:
130
- """Display optimization options in a formatted table."""
131
- options_panel = OptimizationOptionsPanel()
132
- table = options_panel.get_display(options)
133
- self.console.print(table)
134
-
135
- def display_selection_confirmation(self, item_text: str) -> None:
136
- """Display a confirmation message for user selection."""
137
- self.console.print(f"\n[bold blue]Selected:[/] [bold cyan]{item_text}[/]")
138
- self.console.print()
139
-
140
- def get_multiline_input(self, intro_message: str) -> str:
141
- """Handle multiline input with proper instructions."""
142
- self.console.print(intro_message)
143
- self.console.print("[dim]Current script content will be replaced[/dim]\n")
144
- edited_lines = []
145
- try:
146
- while True:
147
- line = input()
148
- edited_lines.append(line + "\n")
149
- except EOFError:
150
- pass
151
- except KeyboardInterrupt:
152
- self.console.print("\n[yellow]Edit cancelled.[/]")
153
- return ""
154
- return "".join(edited_lines)
155
-
156
-
157
- class Chatbot:
158
- def __init__(
159
- self, project_path: pathlib.Path, console: Console, run_parser: argparse.ArgumentParser, model: Optional[str] = None
160
- ):
161
- self.project_path = project_path
162
- self.console = console
163
- self.ui_helper = UserInteractionHelper(console)
164
- self.weco_run_parser = run_parser
165
- self.user_specified_model = model # Store user's model choice
166
- self.resolved_model = None # Will be determined during workflow
167
- # State tracking (replacing conversation manager)
168
- self.current_step: str = "eval_analysis"
169
- self.evaluation_analysis: Optional[Dict[str, Any]] = None
170
- self.selected_eval_config: Optional[Dict[str, Any]] = None
171
-
172
- # GitIngest data
173
- self.gitingest_summary: Optional[str] = None
174
- self.gitingest_tree: Optional[str] = None
175
- self.gitingest_content: Optional[Dict[str, str]] = None
176
- self.gitingest_content_str: Optional[str] = None
177
-
178
- # Chat UI components (removed unused chat layout)
179
- self.active_live_display: Optional[Live] = None
180
- self.use_chat_ui = False
181
-
182
- def analyze_codebase_and_get_optimization_options(self) -> Optional[List[Dict[str, str]]]:
183
- """Analyze the codebase using gitingest and get optimization suggestions from Gemini."""
184
- try:
185
- with self.console.status("[bold green]Parsing codebase...[/]"):
186
- result = ingest(
187
- str(self.project_path),
188
- exclude_patterns=set(
189
- [
190
- "*.git",
191
- "*.gitignore",
192
- "LICENSE*",
193
- "CONTRIBUTING*",
194
- "CODE_OF_CONDUCT*",
195
- "CHANGELOG*",
196
- "*.repomixignore",
197
- "*.dockerignore",
198
- "*.pyc",
199
- "*.pyo",
200
- "*.csv",
201
- "*.json",
202
- "*.jsonl",
203
- "*.txt",
204
- "*.md",
205
- "*.rst",
206
- "*.yml",
207
- "*.yaml",
208
- ]
209
- ),
210
- )
211
- self.gitingest_summary, self.gitingest_tree, self.gitingest_content_str = result
212
-
213
- if not self.gitingest_content_str:
214
- self.console.print("[yellow]Warning: gitingest found no content to analyze.[/]")
215
- return None
216
-
217
- with self.console.status("[bold green]Generating optimization suggestions...[/]"):
218
- result = get_optimization_suggestions_from_codebase(
219
- console=self.console,
220
- gitingest_summary=self.gitingest_summary,
221
- gitingest_tree=self.gitingest_tree,
222
- gitingest_content_str=self.gitingest_content_str,
223
- auth_headers=getattr(self, "auth_headers", {}),
224
- )
225
-
226
- if result and isinstance(result, list):
227
- options = result # Use the dictionaries directly from API
228
- else:
229
- options = None
230
-
231
- if not options or not isinstance(options, list):
232
- self.console.print("[red]Unable to retrieve valid optimization options from the backend.[/]")
233
- return None
234
-
235
- if not options:
236
- self.console.print("[yellow]No optimizations suggested for this codebase.[/]")
237
- return None
238
-
239
- self.ui_helper.display_optimization_options_table(options)
240
- return options
241
-
242
- except Exception as e:
243
- self.console.print(f"[bold red]An error occurred during analysis: {e}[/]")
244
- import traceback
245
-
246
- traceback.print_exc()
247
- return None
248
-
249
- def get_user_option_selection(self, options: List[Dict[str, str]]) -> Optional[Dict[str, str]]:
250
- """Get user's selection from the optimization options."""
251
- if not options:
252
- return None
253
-
254
- try:
255
- choice_num = self.ui_helper.get_choice_numeric(
256
- "\n[bold]Which optimization would you like to pursue?[/bold] (Enter number)", len(options)
257
- )
258
- selected_option = options[choice_num - 1]
259
- self.ui_helper.display_selection_confirmation(selected_option["description"])
260
- return selected_option
261
- except Exception as e:
262
- self.console.print(f"[red]Error selecting optimization option: {e}[/]")
263
- return None
264
-
265
- def handle_script_generation_workflow(self, selected_option: Dict[str, str]) -> Optional[Dict[str, str]]:
266
- """Handle script generation, editing, and custom path workflows.
267
-
268
- This implements a state machine for evaluation script creation with these states:
269
- 1. No script: User can Generate or Provide path
270
- 2. Script exists: User can Use, Edit, Regenerate, or Provide different path
271
-
272
- The workflow continues until user chooses to Use a script or exits.
273
- """
274
- eval_script_content = None
275
- eval_script_path_str = None
276
- metric_name = None
277
- goal = None
278
-
279
- # Main workflow loop - continues until user accepts a script
280
- while True:
281
- if eval_script_content:
282
- # State: Script exists - show it and offer actions
283
- self.console.print("\n[bold]Current evaluation script:[/]")
284
- script_panel = EvaluationScriptPanel()
285
- panel = script_panel.get_display(eval_script_content, eval_script_path_str or "evaluate.py")
286
- self.console.print(panel)
287
-
288
- if metric_name and goal:
289
- self.console.print(f"\n[green]Suggested metric:[/] {metric_name} (goal: {goal})")
290
-
291
- action = self.ui_helper.get_choice(
292
- "Choose an action: [bold]U[/]se this script / [bold]E[/]dit content / [bold]R[/]egenerate / [bold]P[/]rovide different path",
293
- ["u", "U", "e", "E", "r", "R", "p", "P"],
294
- default="u",
295
- ).lower()
296
- else:
297
- # State: No script - offer initial options
298
- action = self.ui_helper.get_choice(
299
- "How to proceed? ([bold]G[/]enerate / Provide [bold]P[/]ath)", ["g", "G", "p", "P"], default="g"
300
- ).lower()
301
-
302
- # Action: Use current script (exit workflow)
303
- if action == "u" and eval_script_content:
304
- # Save generated script to file if it hasn't been saved yet
305
- if not eval_script_path_str:
306
- eval_script_path_obj = self.project_path / "evaluate.py"
307
- eval_script_path_obj.write_text(eval_script_content)
308
- eval_script_path_str = "evaluate.py"
309
- self.console.print(f"Generated script saved as [cyan]{eval_script_path_str}[/]")
310
- break
311
-
312
- # Action: Edit current script
313
- elif action == "e" and eval_script_content:
314
- eval_script_content = self.ui_helper.get_multiline_input(
315
- "\nPlease paste your edited script below. Press Ctrl+D (Unix) or Ctrl+Z then Enter (Windows) when done:"
316
- )
317
- if not eval_script_content:
318
- continue # Edit was cancelled, stay in loop
319
-
320
- # After editing, we need new metric info since script changed
321
- eval_script_path_str = None # Clear path since content changed
322
- metric_name = Prompt.ask("Please specify the metric name this script will print")
323
- goal = self.ui_helper.get_choice(
324
- "Should we maximize or minimize this metric?", choices=["maximize", "minimize"], default="maximize"
325
- )
326
- continue # Show the edited script for review
327
-
328
- # Action: Generate new script (or regenerate)
329
- elif action == "g" or action == "r":
330
- with self.console.status("[bold green]Generating evaluation script and determining metrics...[/]"):
331
- result = generate_evaluation_script_and_metrics(
332
- console=self.console,
333
- target_file=selected_option["target_file"],
334
- description=selected_option["description"],
335
- gitingest_content_str=self.gitingest_content_str,
336
- auth_headers=getattr(self, "auth_headers", {}),
337
- )
338
- if result and result[0]:
339
- eval_script_content, metric_name, goal, reasoning = result
340
- if reasoning:
341
- self.console.print(f"[dim]Reasoning: {reasoning}[/]")
342
- else:
343
- self.console.print(
344
- "[red]Unable to generate an evaluation script. Please try providing a custom script path instead.[/]"
345
- )
346
- eval_script_content = None
347
- metric_name = None
348
- goal = None
349
- eval_script_path_str = None # Generated content not saved yet
350
- continue # Show the generated script for review
351
-
352
- # Action: Provide path to existing script
353
- elif action == "p":
354
- user_script_path_str = Prompt.ask("Enter the path to your evaluation script (relative to project root)")
355
- user_script_path = self.project_path / user_script_path_str
356
- if user_script_path.is_file():
357
- try:
358
- eval_script_content = user_script_path.read_text()
359
- eval_script_path_str = user_script_path_str
360
- self.console.print(f"Using script from [cyan]{eval_script_path_str}[/]")
361
-
362
- # For user-provided scripts, we need manual metric specification
363
- metric_name = Prompt.ask("Please specify the metric name this script will print")
364
- goal = self.ui_helper.get_choice(
365
- "Should we maximize or minimize this metric?", choices=["maximize", "minimize"], default="maximize"
366
- )
367
- break # User provided script is ready to use
368
- except Exception as e:
369
- self.console.print(f"[red]Error reading script {user_script_path_str}: {e}[/]")
370
- eval_script_content = None
371
- else:
372
- self.console.print(f"[red]File not found: {user_script_path}[/]")
373
- continue # Stay in loop to try again
374
-
375
- # Validate we have all required components
376
- if not eval_script_content or not eval_script_path_str or not metric_name or not goal:
377
- return None
378
-
379
- # Analyze the script to determine the proper execution command
380
- with self.console.status("[bold green]Analyzing script execution requirements...[/]"):
381
- eval_command = analyze_script_execution_requirements(
382
- console=self.console,
383
- script_content=eval_script_content,
384
- script_path=eval_script_path_str,
385
- target_file=selected_option["target_file"],
386
- auth_headers=getattr(self, "auth_headers", {}),
387
- )
388
-
389
- return {
390
- "script_path": eval_script_path_str,
391
- "script_content": eval_script_content,
392
- "metric_name": metric_name,
393
- "goal": goal,
394
- "eval_command": eval_command or f"python {eval_script_path_str}",
395
- }
396
-
397
- def get_evaluation_configuration(self, selected_option: Dict[str, str]) -> Optional[Dict[str, str]]:
398
- """Get or create evaluation script configuration using intelligent conversation-guided approach."""
399
- with self.console.status("[bold green]Analyzing evaluation environment...[/]"):
400
- analysis = analyze_evaluation_environment(
401
- console=self.console,
402
- target_file=selected_option["target_file"],
403
- description=selected_option["description"],
404
- gitingest_summary=self.gitingest_summary,
405
- gitingest_tree=self.gitingest_tree,
406
- gitingest_content_str=self.gitingest_content_str,
407
- auth_headers=getattr(self, "auth_headers", {}),
408
- )
409
-
410
- if not analysis:
411
- self.console.print("[yellow]Unable to analyze evaluation environment. Falling back to script generation.[/]")
412
- return self.handle_script_generation_workflow(selected_option)
413
-
414
- self.evaluation_analysis = analysis
415
- self.current_step = "script_selection"
416
-
417
- return self.handle_evaluation_decision(selected_option, analysis)
418
-
419
- def handle_evaluation_decision(
420
- self, selected_option: Dict[str, str], analysis: Dict[str, Any]
421
- ) -> Optional[Dict[str, str]]:
422
- """Handle user decision based on intelligent evaluation analysis.
423
-
424
- This method implements a recommendation system that:
425
- 1. Shows existing evaluation scripts found in the codebase
426
- 2. Provides AI-generated recommendations (use_existing vs generate_new)
427
- 3. Handles user choice with different flows based on recommendation
428
-
429
- The logic adapts the default choice based on AI recommendation to guide users
430
- toward the most suitable option while still allowing them to override.
431
- """
432
- existing_evals = analysis.get("existing_evaluations", [])
433
- recommendation = analysis.get("recommendation", "generate_new")
434
- reasoning = analysis.get("reasoning", "")
435
-
436
- # Display existing evaluation scripts if any were found
437
- if existing_evals:
438
- from rich.table import Table
439
- from rich import box
440
-
441
- table = Table(
442
- title="Existing Evaluation Scripts", show_lines=True, box=box.ROUNDED, border_style="cyan", padding=(1, 1)
443
- )
444
- table.add_column("No.", style="bold white", width=5, header_style="bold white", justify="center")
445
- table.add_column("Script Path", style="cyan", width=20, header_style="bold white")
446
- table.add_column("Suitability", style="magenta", width=40, header_style="bold white")
447
- table.add_column("Metrics", style="yellow", width=20, header_style="bold white")
448
- table.add_column("Confidence", style="green", width=10, header_style="bold white")
449
-
450
- for i, eval_script in enumerate(existing_evals):
451
- metrics_str = ", ".join([f"{m['name']} ({m['goal']})" for m in eval_script.get("metrics", [])])
452
- suitability_str = eval_script.get("suitability", "")
453
- table.add_row(str(i + 1), eval_script["script_path"], suitability_str, metrics_str, eval_script["confidence"])
454
- self.console.print(table)
455
- else:
456
- self.console.print("\n[yellow]No existing evaluation scripts found.[/]")
457
-
458
- # Show AI recommendation with reasoning
459
- self.console.print(f"\n💡 [bold green]Recommended:[/] [cyan]{recommendation.replace('_', ' ').title()}[/]")
460
- self.console.print()
461
- self.console.print(f"[yellow]🧠 Reasoning:[/] {reasoning}")
462
-
463
- # Decision flow 1: AI recommends using existing script
464
- if existing_evals and recommendation == "use_existing":
465
- choices = [str(i + 1) for i in range(len(existing_evals))] + ["g"]
466
- choice = self.ui_helper.get_choice(
467
- "\n[bold]Choose an option:[/] (Enter number to use existing script, 'g' to generate new)",
468
- choices,
469
- default="1" if existing_evals else "g", # Default to first existing script
470
- show_choices=False,
471
- )
472
-
473
- if choice == "g":
474
- return self.handle_script_generation_workflow(selected_option)
475
- else:
476
- selected_eval = existing_evals[int(choice) - 1]
477
- return self.handle_existing_evaluation_selection(selected_option, selected_eval)
478
-
479
- # Decision flow 2: Scripts exist but AI recommends generating new
480
- elif existing_evals:
481
- choices = [str(i + 1) for i in range(len(existing_evals))] + ["g"]
482
- choice = self.ui_helper.get_choice(
483
- "\n[bold]Choose an option:[/] (Enter number to use existing script, 'g' to generate new as recommended)",
484
- choices,
485
- default="g", # Default to generate new (following recommendation)
486
- show_choices=False,
487
- )
488
-
489
- if choice == "g":
490
- return self.handle_script_generation_workflow(selected_option)
491
- else:
492
- selected_eval = existing_evals[int(choice) - 1]
493
- return self.handle_existing_evaluation_selection(selected_option, selected_eval)
494
-
495
- # Decision flow 3: No existing scripts found - must generate new
496
- else:
497
- self.console.print("\n[cyan]Proceeding to generate a new evaluation script...[/]")
498
- return self.handle_script_generation_workflow(selected_option)
499
-
500
- def handle_existing_evaluation_selection(
501
- self, selected_option: Dict[str, str], selected_eval: Dict[str, Any]
502
- ) -> Optional[Dict[str, str]]:
503
- """Handle selection of an existing evaluation script."""
504
- script_path = selected_eval["script_path"]
505
-
506
- try:
507
- script_file = self.project_path / script_path
508
- if not script_file.exists():
509
- self.console.print(f"[red]Error: Script file {script_path} not found.[/]")
510
- return None
511
-
512
- script_content = script_file.read_text()
513
- self.console.print(f"\n[green]Using existing script:[/] [cyan]{script_path}[/]")
514
- self.console.print()
515
-
516
- metrics = selected_eval.get("metrics", [])
517
-
518
- if not metrics:
519
- self.console.print("[yellow]No metrics detected. Please specify manually.[/]")
520
- metric_name = Prompt.ask("Please specify the metric name this script will print")
521
- goal = self.ui_helper.get_choice(
522
- "Should we maximize or minimize this metric?", choices=["maximize", "minimize"], default="maximize"
523
- )
524
- elif len(metrics) == 1:
525
- metric_name = metrics[0]["name"]
526
- goal = metrics[0]["goal"]
527
- self.console.print(f"[green]Using detected metric:[/] [yellow]{metric_name}[/] (goal: {goal})")
528
- else:
529
- self.console.print("[green]Multiple metrics detected:[/]")
530
- for i, m in enumerate(metrics):
531
- self.console.print(f" {i + 1}. {m['name']} (goal: {m['goal']})")
532
- try:
533
- choice_num = self.ui_helper.get_choice_numeric("Which metric to use?", len(metrics))
534
- selected_metric = metrics[choice_num - 1]
535
- metric_name = selected_metric["name"]
536
- goal = selected_metric["goal"]
537
- except Exception as e:
538
- self.console.print(f"[red]Error selecting metric: {e}[/]")
539
- return None
540
-
541
- eval_command = selected_eval.get("run_command", "")
542
- if not eval_command or eval_command == f"python {script_path}":
543
- with self.console.status("[bold green]Analyzing script execution requirements...[/]"):
544
- eval_command = analyze_script_execution_requirements(
545
- console=self.console,
546
- script_content=script_content,
547
- script_path=script_path,
548
- target_file=selected_option["target_file"],
549
- auth_headers=getattr(self, "auth_headers", {}),
550
- )
551
-
552
- self.current_step = "confirmation"
553
- eval_config = {
554
- "script_path": script_path,
555
- "script_content": script_content,
556
- "metric_name": metric_name,
557
- "goal": goal,
558
- "eval_command": eval_command or f"python {script_path}",
559
- }
560
-
561
- self.selected_eval_config = eval_config
562
- return eval_config
563
-
564
- except Exception as e:
565
- self.console.print(f"[red]Error processing script {script_path}: {e}[/]")
566
- return None
567
-
568
- def confirm_and_finalize_evaluation_config(self, eval_config: Dict[str, str]) -> Optional[Dict[str, str]]:
569
- """Allow user to modify the evaluation command if needed."""
570
- self.console.print(f"\n[green]Analyzed evaluation command:[/] [cyan]{eval_config['eval_command']}[/]")
571
-
572
- modify_command = self.ui_helper.get_yes_no("Is this the right command to run the evaluation?", default="y")
573
-
574
- if not modify_command:
575
- self.console.print(f"\n[dim]Current command:[/] {eval_config['eval_command']}")
576
- new_command = Prompt.ask("Enter the corrected evaluation command", default=eval_config["eval_command"])
577
- self.console.print(f"[green]Updated command:[/] {new_command}")
578
- eval_config = {
579
- "script_path": eval_config["script_path"],
580
- "script_content": eval_config["script_content"],
581
- "metric_name": eval_config["metric_name"],
582
- "goal": eval_config["goal"],
583
- "eval_command": new_command,
584
- }
585
-
586
- return eval_config
587
-
588
- def build_weco_command(
589
- self, target_file: str, steps: int, eval_config: Dict[str, str], model: str, additional_instructions: str = None
590
- ) -> str:
591
- """Build the weco command from the optimization and evaluation configs.
592
-
593
- Constructs a properly quoted shell command that can be executed directly.
594
- Uses shlex.quote() to handle special characters and spaces in arguments safely.
595
- """
596
- command_parts = [
597
- "weco",
598
- "run",
599
- "--source",
600
- shlex.quote(target_file), # Quote file paths for shell safety
601
- "--eval-command",
602
- shlex.quote(eval_config["eval_command"]), # Quote complex commands with spaces/args
603
- "--metric",
604
- shlex.quote(eval_config["metric_name"]), # Quote metric names that might have spaces
605
- "--goal",
606
- eval_config["goal"], # Goal is always "maximize" or "minimize" (no quoting needed)
607
- "--steps",
608
- str(steps), # Convert int to string
609
- "--model",
610
- shlex.quote(model), # Always include resolved model
611
- ]
612
-
613
- # Add optional parameters if they're specified
614
- if additional_instructions:
615
- command_parts.extend(["--additional-instructions", shlex.quote(additional_instructions)])
616
-
617
- return " ".join(command_parts)
618
-
619
- def execute_optimization(
620
- self,
621
- eval_config: Dict[str, str],
622
- target_file: str,
623
- steps: int,
624
- model: str,
625
- additional_instructions: str,
626
- weco_run_cmd: str,
627
- ) -> None:
628
- """Execute the optimization with the given configuration.
629
-
630
- This method handles two execution paths:
631
- 1. Direct execution: Run with the provided configuration
632
- 2. User adjustment: Allow user to modify the command before execution
633
-
634
- If user chooses to adjust, we parse their command to validate it and extract
635
- the new configuration parameters.
636
- """
637
- self.console.print("\n[bold green]🚀 Starting optimization...[/]")
638
- self.console.print(f"[dim]Command: {weco_run_cmd}[/]\n")
639
-
640
- # Give user option to adjust parameters before execution
641
- adjust_command = self.ui_helper.get_yes_no(
642
- f"\n[bold]Current command:[/] [dim]{weco_run_cmd}[/]\n"
643
- "[dim]You can modify the evaluation command, steps, model, or other parameters.[/]\n"
644
- "Would you like to adjust any parameters?",
645
- default="no",
646
- )
647
-
648
- if adjust_command:
649
- # User wants to modify the command - get their input
650
- new_weco_run_cmd_str = Prompt.ask("Enter the full weco run command", default=weco_run_cmd)
651
-
652
- # Parse the user's command safely using shlex
653
- try:
654
- command_tokens = shlex.split(new_weco_run_cmd_str)
655
- except ValueError as e:
656
- self.console.print(f"[bold red]Error parsing command: {e}. Please check quotes.[/]")
657
- return
658
-
659
- # Validate command structure (must start with "weco run")
660
- if not command_tokens or command_tokens[0] != "weco" or (len(command_tokens) > 1 and command_tokens[1] != "run"):
661
- self.console.print("[bold red]Invalid command. Must start with 'weco run'.[/]")
662
- return
663
-
664
- # Extract arguments for the 'run' subcommand (skip "weco run")
665
- run_args_list = command_tokens[2:]
666
-
667
- try:
668
- # Parse the arguments using the same parser that CLI uses
669
- parsed_ns = self.weco_run_parser.parse_args(run_args_list)
670
-
671
- # Update configuration from parsed arguments
672
- eval_config = {
673
- "script_path": "", # Not needed for execution
674
- "script_content": "", # Not needed for execution
675
- "metric_name": parsed_ns.metric,
676
- "goal": parsed_ns.goal,
677
- "eval_command": parsed_ns.eval_command,
678
- }
679
-
680
- target_file = parsed_ns.source
681
- steps = parsed_ns.steps
682
- model = parsed_ns.model
683
- additional_instructions = parsed_ns.additional_instructions
684
-
685
- except Exception as e:
686
- self.console.print(f"[bold red]Error parsing adjusted command: {e}. Optimization not started.[/]")
687
- return
688
-
689
- # Import and execute the actual optimization function
690
- # (Import here to avoid circular imports)
691
- from .optimizer import execute_optimization as execute_optimization_run
692
-
693
- success = execute_optimization_run(
694
- source=target_file,
695
- eval_command=eval_config["eval_command"],
696
- metric=eval_config["metric_name"],
697
- goal=eval_config["goal"],
698
- steps=steps,
699
- model=model,
700
- log_dir=".runs", # Standard log directory
701
- additional_instructions=additional_instructions,
702
- console=self.console,
703
- )
704
-
705
- # Report final result to user
706
- if success:
707
- self.console.print("\n[bold green]✅ Optimization completed successfully![/]")
708
- else:
709
- self.console.print("\n[bold yellow]⚠️ Optimization ended early or encountered issues.[/]")
710
-
711
- exit_code = 0 if success else 1
712
- sys.exit(exit_code)
713
-
714
- def show_and_copy_command(self, command: str) -> None:
715
- """Show the command and copy it to clipboard."""
716
- import subprocess
717
- import platform
718
-
719
- try:
720
- if platform.system() == "Darwin":
721
- subprocess.run(["pbcopy"], input=command.encode(), check=True)
722
- elif platform.system() == "Linux":
723
- subprocess.run(["xclip", "-selection", "clipboard"], input=command.encode(), check=True)
724
- elif platform.system() == "Windows":
725
- subprocess.run(["clip"], input=command.encode(), check=True)
726
- self.console.print("\n[green]✅ Command copied to clipboard! Exiting...[/]")
727
- except Exception:
728
- self.console.print("\n[yellow]Could not copy to clipboard automatically.[/]")
729
-
730
- def setup_evaluation(self, selected_option: Dict[str, str]) -> Optional[Tuple[str, Dict[str, str], str, int, str, str]]:
731
- """Setup evaluation environment for the selected optimization."""
732
- eval_config = self.get_evaluation_configuration(selected_option)
733
- if not eval_config:
734
- self.console.print("[red]Evaluation script setup failed. Please check your script configuration and try again.[/]")
735
- return None
736
-
737
- eval_config = self.confirm_and_finalize_evaluation_config(eval_config)
738
- if not eval_config:
739
- self.console.print("[red]Evaluation configuration failed.[/]")
740
- return None
741
-
742
- steps = 20
743
- steps_input = Prompt.ask(f"Number of optimization steps (or press Enter to use {steps})", default=str(steps))
744
- try:
745
- steps = int(steps_input)
746
- steps = max(1, min(1000, steps))
747
- if steps != int(steps_input):
748
- self.console.print(f"[yellow]Adjusted to valid range: {steps}[/]")
749
- except ValueError:
750
- self.console.print(f"[yellow]Invalid input, using default value: {steps}[/]")
751
-
752
- # Resolve the model to use
753
- if self.user_specified_model:
754
- self.resolved_model = self.user_specified_model
755
- else:
756
- # Use same default model selection as weco run
757
- from .utils import determine_model_for_onboarding
758
-
759
- self.resolved_model = determine_model_for_onboarding()
760
-
761
- target_file = selected_option["target_file"]
762
- additional_instructions = selected_option["description"]
763
-
764
- weco_run_cmd_str = self.build_weco_command(
765
- target_file, steps, eval_config, self.resolved_model, additional_instructions
766
- )
767
- return weco_run_cmd_str, eval_config, target_file, steps, self.resolved_model, additional_instructions
768
-
769
- def start(self):
770
- self.console.print("[bold cyan]Welcome to Weco![/]")
771
- self.console.print(f"Let's optimize your codebase in: [cyan]{self.project_path}[/]\n")
772
-
773
- # Mandatory authentication as per PLAN.md
774
- from .auth import handle_authentication
775
-
776
- weco_api_key, auth_headers = handle_authentication(self.console)
777
- if not weco_api_key:
778
- self.console.print("[yellow]Authentication is required to use Weco. Exiting...[/]")
779
- return
780
-
781
- # Store auth headers for API calls
782
- self.auth_headers = auth_headers
783
-
784
- options = self.analyze_codebase_and_get_optimization_options()
785
- if not options:
786
- return
787
-
788
- selected_option = self.get_user_option_selection(options)
789
- if not selected_option:
790
- return
791
-
792
- result = self.setup_evaluation(selected_option)
793
- if not result:
794
- return
795
-
796
- weco_command, eval_config, target_file, steps, model, additional_instructions = result
797
-
798
- self.console.print("\n[bold green]Command:[/]")
799
- self.console.print(f"[on black white]{weco_command}[/]\n")
800
-
801
- self.console.print(f"[yellow]ℹ️ File paths are relative to: {self.project_path}[/]")
802
-
803
- self.console.print("\n[bold green]🎯 What would you like to do?[/]")
804
- self.console.print(" [cyan]1.[/] [bold]Run now[/] - Start the optimization immediately")
805
- self.console.print(" [cyan]2.[/] [bold]Show and copy[/] - Display the command and copy to clipboard")
806
-
807
- execution_choice = self.ui_helper.get_choice(
808
- "\nEnter your choice", choices=["1", "2"], default="1", show_choices=False
809
- )
810
-
811
- if execution_choice == "1":
812
- self.execute_optimization(eval_config, target_file, steps, model, additional_instructions, weco_command)
813
- else:
814
- self.show_and_copy_command(weco_command)
815
-
816
-
817
- def run_onboarding_chatbot(
818
- project_path: pathlib.Path, console: Console, run_parser: argparse.ArgumentParser, model: Optional[str] = None
819
- ):
820
- try:
821
- chatbot = Chatbot(project_path, console, run_parser, model)
822
- chatbot.start()
823
- except Exception as e:
824
- console.print(f"[bold red]An unexpected error occurred: {e}[/]")
825
- import traceback
826
-
827
- traceback.print_exc()