copex 0.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of copex might be problematic. Click here for more details.

copex/cli.py ADDED
@@ -0,0 +1,1106 @@
1
+ """CLI interface for Copex."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import sys
7
+ from pathlib import Path
8
+ from typing import Annotated, Optional
9
+
10
+ import typer
11
+ from prompt_toolkit import PromptSession
12
+ from prompt_toolkit.completion import Completer, Completion
13
+ from prompt_toolkit.history import FileHistory
14
+ from prompt_toolkit.key_binding import KeyBindings
15
+ from rich.console import Console
16
+ from rich.live import Live
17
+ from rich.markdown import Markdown
18
+ from rich.panel import Panel
19
+
20
+ from copex.client import Copex, StreamChunk
21
+ from copex.config import CopexConfig, load_last_model, save_last_model
22
+ from copex.models import Model, ReasoningEffort
23
+
24
+ # Effective default: last used model or claude-opus-4.5
25
+ _DEFAULT_MODEL = load_last_model() or Model.CLAUDE_OPUS_4_5
26
+ from copex.plan import Plan, PlanExecutor, PlanStep, StepStatus
27
+ from copex.ralph import RalphState, RalphWiggum
28
+ from copex.ui import (
29
+ ActivityType,
30
+ CopexUI,
31
+ Icons,
32
+ Theme,
33
+ ToolCallInfo,
34
+ print_error,
35
+ print_retry,
36
+ print_user_prompt,
37
+ print_welcome,
38
+ )
39
+
40
+ app = typer.Typer(
41
+ name="copex",
42
+ help="Copilot Extended - Resilient wrapper with auto-retry and Ralph Wiggum loops.",
43
+ no_args_is_help=False,
44
+ invoke_without_command=True,
45
+ )
46
+ console = Console()
47
+
48
+
49
+ def model_callback(value: str | None) -> Model | None:
50
+ """Validate model name."""
51
+ if value is None:
52
+ return None
53
+ try:
54
+ return Model(value)
55
+ except ValueError:
56
+ valid = ", ".join(m.value for m in Model)
57
+ raise typer.BadParameter(f"Invalid model. Valid: {valid}")
58
+
59
+
60
+ def reasoning_callback(value: str | None) -> ReasoningEffort | None:
61
+ """Validate reasoning effort."""
62
+ if value is None:
63
+ return None
64
+ try:
65
+ return ReasoningEffort(value)
66
+ except ValueError:
67
+ valid = ", ".join(r.value for r in ReasoningEffort)
68
+ raise typer.BadParameter(f"Invalid reasoning effort. Valid: {valid}")
69
+
70
+
71
+ @app.callback(invoke_without_command=True)
72
+ def main(
73
+ ctx: typer.Context,
74
+ model: Annotated[
75
+ str | None, typer.Option("--model", "-m", help="Model to use")
76
+ ] = None,
77
+ reasoning: Annotated[
78
+ str, typer.Option("--reasoning", "-r", help="Reasoning effort level")
79
+ ] = ReasoningEffort.XHIGH.value,
80
+ ) -> None:
81
+ """Copilot Extended - Resilient wrapper with auto-retry and Ralph Wiggum loops."""
82
+ if ctx.invoked_subcommand is None:
83
+ # No command provided - launch interactive mode
84
+ effective_model = model or _DEFAULT_MODEL.value
85
+ interactive(model=effective_model, reasoning=reasoning)
86
+
87
+
88
+ class SlashCompleter(Completer):
89
+ """Completer that only triggers on slash commands."""
90
+
91
+ def __init__(self, commands: list[str]) -> None:
92
+ self.commands = commands
93
+
94
+ def get_completions(self, document, complete_event):
95
+ text = document.text_before_cursor.lstrip()
96
+ if not text.startswith("/"):
97
+ return
98
+ for cmd in self.commands:
99
+ if cmd.lower().startswith(text.lower()):
100
+ yield Completion(cmd, start_position=-len(text))
101
+
102
+
103
+ def _build_prompt_session() -> PromptSession:
104
+ history_path = Path.home() / ".copex" / "history"
105
+ history_path.parent.mkdir(parents=True, exist_ok=True)
106
+ bindings = KeyBindings()
107
+ commands = ["/model", "/reasoning", "/models", "/new", "/status", "/tools", "/help"]
108
+ completer = SlashCompleter(commands)
109
+
110
+ @bindings.add("enter")
111
+ def _(event) -> None:
112
+ buffer = event.app.current_buffer
113
+ if buffer.document.text.strip():
114
+ buffer.validate_and_handle()
115
+ else:
116
+ buffer.reset()
117
+
118
+ @bindings.add("escape", "enter")
119
+ def _(event) -> None:
120
+ event.app.current_buffer.insert_text("\n")
121
+
122
+ return PromptSession(
123
+ message="copilot> ",
124
+ history=FileHistory(str(history_path)),
125
+ key_bindings=bindings,
126
+ completer=completer,
127
+ complete_while_typing=True,
128
+ multiline=True,
129
+ prompt_continuation=lambda width, line_number, is_soft_wrap: "... ",
130
+ )
131
+
132
+
133
+ async def _model_picker(current: Model) -> Model | None:
134
+ """Interactive model picker using arrow keys."""
135
+ from prompt_toolkit.application import Application
136
+ from prompt_toolkit.key_binding import KeyBindings
137
+ from prompt_toolkit.layout import Layout
138
+ from prompt_toolkit.layout.containers import Window
139
+ from prompt_toolkit.layout.controls import FormattedTextControl
140
+
141
+ models = list(Model)
142
+ selected_idx = models.index(current) if current in models else 0
143
+
144
+ kb = KeyBindings()
145
+
146
+ @kb.add("up")
147
+ def move_up(event) -> None:
148
+ nonlocal selected_idx
149
+ selected_idx = (selected_idx - 1) % len(models)
150
+
151
+ @kb.add("down")
152
+ def move_down(event) -> None:
153
+ nonlocal selected_idx
154
+ selected_idx = (selected_idx + 1) % len(models)
155
+
156
+ @kb.add("enter")
157
+ def select(event) -> None:
158
+ event.app.exit(result=models[selected_idx])
159
+
160
+ @kb.add("escape")
161
+ @kb.add("c-c")
162
+ def cancel(event) -> None:
163
+ event.app.exit(result=None)
164
+
165
+ def get_text():
166
+ lines = [("bold", "Select a model (↑/↓ to navigate, Enter to select, Esc to cancel):\n\n")]
167
+ for i, m in enumerate(models):
168
+ if i == selected_idx:
169
+ lines.append(("class:selected", f" ▸ {m.value}"))
170
+ else:
171
+ lines.append(("", f" {m.value}"))
172
+ if m == current:
173
+ lines.append(("class:current", " ← current"))
174
+ lines.append(("", "\n"))
175
+ return lines
176
+
177
+ from prompt_toolkit.styles import Style
178
+ style = Style.from_dict({
179
+ "selected": "fg:ansicyan bold",
180
+ "current": "fg:ansiyellow italic",
181
+ })
182
+
183
+ app: Application[Model | None] = Application(
184
+ layout=Layout(Window(FormattedTextControl(get_text))),
185
+ key_bindings=kb,
186
+ style=style,
187
+ full_screen=False,
188
+ )
189
+
190
+ return await app.run_async()
191
+
192
+
193
+ async def _reasoning_picker(current: ReasoningEffort) -> ReasoningEffort | None:
194
+ """Interactive reasoning effort picker."""
195
+ from prompt_toolkit.application import Application
196
+ from prompt_toolkit.key_binding import KeyBindings
197
+ from prompt_toolkit.layout import Layout
198
+ from prompt_toolkit.layout.containers import Window
199
+ from prompt_toolkit.layout.controls import FormattedTextControl
200
+
201
+ efforts = list(ReasoningEffort)
202
+ selected_idx = efforts.index(current) if current in efforts else 0
203
+
204
+ kb = KeyBindings()
205
+
206
+ @kb.add("up")
207
+ def move_up(event) -> None:
208
+ nonlocal selected_idx
209
+ selected_idx = (selected_idx - 1) % len(efforts)
210
+
211
+ @kb.add("down")
212
+ def move_down(event) -> None:
213
+ nonlocal selected_idx
214
+ selected_idx = (selected_idx + 1) % len(efforts)
215
+
216
+ @kb.add("enter")
217
+ def select(event) -> None:
218
+ event.app.exit(result=efforts[selected_idx])
219
+
220
+ @kb.add("escape")
221
+ @kb.add("c-c")
222
+ def cancel(event) -> None:
223
+ event.app.exit(result=None)
224
+
225
+ def get_text():
226
+ lines = [("bold", "Select reasoning effort (↑/↓ to navigate, Enter to select, Esc to cancel):\n\n")]
227
+ for i, r in enumerate(efforts):
228
+ if i == selected_idx:
229
+ lines.append(("class:selected", f" ▸ {r.value}"))
230
+ else:
231
+ lines.append(("", f" {r.value}"))
232
+ if r == current:
233
+ lines.append(("class:current", " ← current"))
234
+ lines.append(("", "\n"))
235
+ return lines
236
+
237
+ from prompt_toolkit.styles import Style
238
+ style = Style.from_dict({
239
+ "selected": "fg:ansicyan bold",
240
+ "current": "fg:ansiyellow italic",
241
+ })
242
+
243
+ app: Application[ReasoningEffort | None] = Application(
244
+ layout=Layout(Window(FormattedTextControl(get_text))),
245
+ key_bindings=kb,
246
+ style=style,
247
+ full_screen=False,
248
+ )
249
+
250
+ return await app.run_async()
251
+
252
+
253
+ @app.command()
254
+ def chat(
255
+ prompt: Annotated[Optional[str], typer.Argument(help="Prompt to send (or read from stdin)")] = None,
256
+ model: Annotated[
257
+ str | None, typer.Option("--model", "-m", help="Model to use")
258
+ ] = None,
259
+ reasoning: Annotated[
260
+ str, typer.Option("--reasoning", "-r", help="Reasoning effort level")
261
+ ] = ReasoningEffort.XHIGH.value,
262
+ max_retries: Annotated[
263
+ int, typer.Option("--max-retries", help="Maximum retry attempts")
264
+ ] = 5,
265
+ no_stream: Annotated[
266
+ bool, typer.Option("--no-stream", help="Disable streaming output")
267
+ ] = False,
268
+ show_reasoning: Annotated[
269
+ bool, typer.Option("--show-reasoning/--no-reasoning", help="Show model reasoning")
270
+ ] = True,
271
+ config_file: Annotated[
272
+ Optional[Path], typer.Option("--config", "-c", help="Config file path")
273
+ ] = None,
274
+ raw: Annotated[
275
+ bool, typer.Option("--raw", help="Output raw text without formatting")
276
+ ] = False,
277
+ ui_theme: Annotated[
278
+ Optional[str], typer.Option("--ui-theme", help="UI theme (default, midnight, mono, sunset)")
279
+ ] = None,
280
+ ui_density: Annotated[
281
+ Optional[str], typer.Option("--ui-density", help="UI density (compact or extended)")
282
+ ] = None,
283
+ ) -> None:
284
+ """Send a prompt to Copilot with automatic retry on errors."""
285
+ # Load config
286
+ if config_file and config_file.exists():
287
+ config = CopexConfig.from_file(config_file)
288
+ else:
289
+ config = CopexConfig()
290
+
291
+ # Override with CLI options
292
+ effective_model = model or _DEFAULT_MODEL.value
293
+ try:
294
+ config.model = Model(effective_model)
295
+ except ValueError:
296
+ console.print(f"[red]Invalid model: {effective_model}[/red]")
297
+ raise typer.Exit(1)
298
+
299
+ try:
300
+ config.reasoning_effort = ReasoningEffort(reasoning)
301
+ except ValueError:
302
+ console.print(f"[red]Invalid reasoning effort: {reasoning}[/red]")
303
+ raise typer.Exit(1)
304
+
305
+ config.retry.max_retries = max_retries
306
+ config.streaming = not no_stream
307
+ if ui_theme:
308
+ config.ui_theme = ui_theme
309
+ if ui_density:
310
+ config.ui_density = ui_density
311
+
312
+ # Get prompt from stdin if not provided
313
+ if prompt is None:
314
+ if sys.stdin.isatty():
315
+ console.print("[yellow]Enter prompt (Ctrl+D to submit):[/yellow]")
316
+ prompt = sys.stdin.read().strip()
317
+ if not prompt:
318
+ console.print("[red]No prompt provided[/red]")
319
+ raise typer.Exit(1)
320
+
321
+ asyncio.run(_run_chat(config, prompt, show_reasoning, raw))
322
+
323
+
324
+ async def _run_chat(
325
+ config: CopexConfig, prompt: str, show_reasoning: bool, raw: bool
326
+ ) -> None:
327
+ """Run the chat command."""
328
+ client = Copex(config)
329
+
330
+ try:
331
+ await client.start()
332
+
333
+ if config.streaming and not raw:
334
+ await _stream_response(client, prompt, show_reasoning)
335
+ else:
336
+ response = await client.send(prompt)
337
+ if raw:
338
+ print(response.content)
339
+ else:
340
+ if show_reasoning and response.reasoning:
341
+ console.print(Panel(
342
+ Markdown(response.reasoning),
343
+ title="[dim]Reasoning[/dim]",
344
+ border_style="dim",
345
+ ))
346
+ console.print(Markdown(response.content))
347
+
348
+ if response.retries > 0:
349
+ console.print(
350
+ f"\n[dim]Completed with {response.retries} retries[/dim]"
351
+ )
352
+
353
+ except KeyboardInterrupt:
354
+ console.print("\n[yellow]Interrupted[/yellow]")
355
+ except Exception as e:
356
+ console.print(f"[red]Error: {e}[/red]")
357
+ raise typer.Exit(1)
358
+ finally:
359
+ await client.stop()
360
+
361
+
362
+ async def _stream_response(
363
+ client: Copex, prompt: str, show_reasoning: bool
364
+ ) -> None:
365
+ """Stream response with beautiful live updates."""
366
+ ui = CopexUI(console, theme=client.config.ui_theme, density=client.config.ui_density, show_all_tools=True)
367
+ ui.reset(model=client.config.model.value)
368
+ ui.set_activity(ActivityType.THINKING)
369
+ await _stream_with_ui(
370
+ client,
371
+ prompt,
372
+ ui,
373
+ show_reasoning=show_reasoning,
374
+ show_retry_notifications=True,
375
+ )
376
+
377
+
378
+ async def _stream_response_plain(client: Copex, prompt: str) -> None:
379
+ """Stream response as plain text."""
380
+ content = ""
381
+ retries = 0
382
+
383
+ def on_chunk(chunk: StreamChunk) -> None:
384
+ nonlocal content
385
+ if chunk.type == "message":
386
+ if chunk.is_final:
387
+ if chunk.content:
388
+ content = chunk.content
389
+ return
390
+ if chunk.delta:
391
+ content += chunk.delta
392
+ sys.stdout.write(chunk.delta)
393
+ sys.stdout.flush()
394
+ elif chunk.type == "system":
395
+ console.print(f"[yellow]{chunk.delta.strip()}[/yellow]")
396
+
397
+ response = await client.send(prompt, on_chunk=on_chunk)
398
+ retries = response.retries
399
+ if response.content and response.content != content:
400
+ if response.content.startswith(content):
401
+ sys.stdout.write(response.content[len(content):])
402
+ else:
403
+ sys.stdout.write(response.content)
404
+ sys.stdout.write("\n")
405
+ sys.stdout.flush()
406
+
407
+ if retries > 0:
408
+ console.print(f"[dim]Completed with {retries} retries[/dim]")
409
+
410
+
411
+ @app.command()
412
+ def models() -> None:
413
+ """List available models."""
414
+ console.print("[bold]Available Models:[/bold]\n")
415
+ for model in Model:
416
+ console.print(f" • {model.value}")
417
+
418
+
419
+ @app.command()
420
+ def init(
421
+ path: Annotated[
422
+ Path, typer.Option("--path", "-p", help="Config file path")
423
+ ] = CopexConfig.default_path(),
424
+ ) -> None:
425
+ """Create a default config file."""
426
+ import tomli_w
427
+
428
+ path.parent.mkdir(parents=True, exist_ok=True)
429
+
430
+ config = {
431
+ "model": Model.CLAUDE_OPUS_4_5.value,
432
+ "reasoning_effort": ReasoningEffort.XHIGH.value,
433
+ "streaming": True,
434
+ "timeout": 300.0,
435
+ "auto_continue": True,
436
+ "continue_prompt": "Keep going",
437
+ "retry": {
438
+ "max_retries": 5,
439
+ "base_delay": 1.0,
440
+ "max_delay": 30.0,
441
+ "exponential_base": 2.0,
442
+ "retry_on_errors": ["500", "502", "503", "504", "Internal Server Error", "rate limit"],
443
+ },
444
+ "ui_theme": "default",
445
+ "ui_density": "extended",
446
+ }
447
+
448
+ with open(path, "wb") as f:
449
+ tomli_w.dump(config, f)
450
+
451
+ console.print(f"[green]Created config at:[/green] {path}")
452
+
453
+
454
+ @app.command()
455
+ def interactive(
456
+ model: Annotated[
457
+ str | None, typer.Option("--model", "-m", help="Model to use")
458
+ ] = None,
459
+ reasoning: Annotated[
460
+ str, typer.Option("--reasoning", "-r", help="Reasoning effort level")
461
+ ] = ReasoningEffort.XHIGH.value,
462
+ ui_theme: Annotated[
463
+ Optional[str], typer.Option("--ui-theme", help="UI theme (default, midnight, mono, sunset)")
464
+ ] = None,
465
+ ui_density: Annotated[
466
+ Optional[str], typer.Option("--ui-density", help="UI density (compact or extended)")
467
+ ] = None,
468
+ ) -> None:
469
+ """Start an interactive chat session."""
470
+ effective_model = model or _DEFAULT_MODEL.value
471
+ try:
472
+ config = CopexConfig(
473
+ model=Model(effective_model),
474
+ reasoning_effort=ReasoningEffort(reasoning),
475
+ )
476
+ except ValueError as e:
477
+ console.print(f"[red]Error: {e}[/red]")
478
+ raise typer.Exit(1)
479
+
480
+ print_welcome(
481
+ console,
482
+ config.model.value,
483
+ config.reasoning_effort.value,
484
+ theme=config.ui_theme,
485
+ density=config.ui_density,
486
+ )
487
+ asyncio.run(_interactive_loop(config))
488
+
489
+
490
+ async def _interactive_loop(config: CopexConfig) -> None:
491
+ """Run interactive chat loop."""
492
+ client = Copex(config)
493
+ await client.start()
494
+ session = _build_prompt_session()
495
+ show_all_tools = False
496
+
497
+ # Create persistent UI for conversation history
498
+ ui = CopexUI(
499
+ console,
500
+ theme=config.ui_theme,
501
+ density=config.ui_density,
502
+ show_all_tools=show_all_tools,
503
+ )
504
+
505
+ def show_help() -> None:
506
+ console.print(f"\n[{Theme.MUTED}]Commands:[/{Theme.MUTED}]")
507
+ console.print(f" [{Theme.PRIMARY}]/model <name>[/{Theme.PRIMARY}] - Change model (e.g., /model gpt-5.1-codex)")
508
+ console.print(f" [{Theme.PRIMARY}]/reasoning <level>[/{Theme.PRIMARY}] - Change reasoning (low, medium, high, xhigh)")
509
+ console.print(f" [{Theme.PRIMARY}]/models[/{Theme.PRIMARY}] - List available models")
510
+ console.print(f" [{Theme.PRIMARY}]/new[/{Theme.PRIMARY}] - Start new session")
511
+ console.print(f" [{Theme.PRIMARY}]/status[/{Theme.PRIMARY}] - Show current settings")
512
+ console.print(f" [{Theme.PRIMARY}]/tools[/{Theme.PRIMARY}] - Toggle full tool call list")
513
+ console.print(f" [{Theme.PRIMARY}]/help[/{Theme.PRIMARY}] - Show this help")
514
+ console.print(f" [{Theme.PRIMARY}]exit[/{Theme.PRIMARY}] - Exit\n")
515
+
516
+ def show_status() -> None:
517
+ console.print(f"\n[{Theme.MUTED}]Current settings:[/{Theme.MUTED}]")
518
+ console.print(f" Model: [{Theme.PRIMARY}]{client.config.model.value}[/{Theme.PRIMARY}]")
519
+ console.print(f" Reasoning: [{Theme.PRIMARY}]{client.config.reasoning_effort.value}[/{Theme.PRIMARY}]\n")
520
+
521
+ try:
522
+ while True:
523
+ try:
524
+ prompt = await session.prompt_async()
525
+ except EOFError:
526
+ break
527
+ except KeyboardInterrupt:
528
+ break
529
+
530
+ prompt = prompt.strip()
531
+ if not prompt:
532
+ continue
533
+
534
+ command = prompt.lower()
535
+
536
+ if command in {"exit", "quit"}:
537
+ break
538
+
539
+ if command in {"new", "/new"}:
540
+ client.new_session()
541
+ # Clear UI history for new session
542
+ ui.state.history = []
543
+ console.print(f"\n[{Theme.SUCCESS}]{Icons.DONE} Started new session[/{Theme.SUCCESS}]\n")
544
+ continue
545
+
546
+ if command in {"help", "/help"}:
547
+ show_help()
548
+ continue
549
+
550
+ if command in {"status", "/status"}:
551
+ show_status()
552
+ continue
553
+
554
+ if command in {"models", "/models"}:
555
+ selected = await _model_picker(client.config.model)
556
+ if selected and selected != client.config.model:
557
+ client.config.model = selected
558
+ save_last_model(selected) # Persist for next run
559
+ # Prompt for reasoning effort if GPT model
560
+ if selected.value.startswith("gpt-"):
561
+ new_reasoning = await _reasoning_picker(client.config.reasoning_effort)
562
+ if new_reasoning:
563
+ client.config.reasoning_effort = new_reasoning
564
+ client.new_session()
565
+ # Clear UI history for new session
566
+ ui.state.history = []
567
+ console.print(f"\n[{Theme.SUCCESS}]{Icons.DONE} Switched to {selected.value} (new session started)[/{Theme.SUCCESS}]\n")
568
+ continue
569
+
570
+ if command in {"tools", "/tools"}:
571
+ show_all_tools = not show_all_tools
572
+ ui.show_all_tools = show_all_tools
573
+ mode = "all tools" if show_all_tools else "recent tools"
574
+ console.print(f"\n[{Theme.SUCCESS}]{Icons.DONE} Showing {mode}[/{Theme.SUCCESS}]\n")
575
+ continue
576
+
577
+ if command.startswith("/model ") or command.startswith("model "):
578
+ parts = prompt.split(maxsplit=1)
579
+ if len(parts) < 2:
580
+ console.print(f"[{Theme.ERROR}]Usage: /model <model-name>[/{Theme.ERROR}]")
581
+ continue
582
+ model_name = parts[1].strip()
583
+ try:
584
+ new_model = Model(model_name)
585
+ client.config.model = new_model
586
+ save_last_model(new_model) # Persist for next run
587
+ client.new_session() # Need new session for model change
588
+ # Clear UI history for new session
589
+ ui.state.history = []
590
+ console.print(f"\n[{Theme.SUCCESS}]{Icons.DONE} Switched to {new_model.value} (new session started)[/{Theme.SUCCESS}]\n")
591
+ except ValueError:
592
+ console.print(f"[{Theme.ERROR}]Unknown model: {model_name}[/{Theme.ERROR}]")
593
+ console.print(f"[{Theme.MUTED}]Use /models to see available models[/{Theme.MUTED}]")
594
+ continue
595
+
596
+ if command.startswith("/reasoning ") or command.startswith("reasoning "):
597
+ parts = prompt.split(maxsplit=1)
598
+ if len(parts) < 2:
599
+ console.print(f"[{Theme.ERROR}]Usage: /reasoning <level>[/{Theme.ERROR}]")
600
+ continue
601
+ level = parts[1].strip()
602
+ try:
603
+ new_reasoning = ReasoningEffort(level)
604
+ client.config.reasoning_effort = new_reasoning
605
+ client.new_session() # Need new session for reasoning change
606
+ # Clear UI history for new session
607
+ ui.state.history = []
608
+ console.print(f"\n[{Theme.SUCCESS}]{Icons.DONE} Switched to {new_reasoning.value} reasoning (new session started)[/{Theme.SUCCESS}]\n")
609
+ except ValueError:
610
+ valid = ", ".join(r.value for r in ReasoningEffort)
611
+ console.print(f"[{Theme.ERROR}]Invalid reasoning level. Valid: {valid}[/{Theme.ERROR}]")
612
+ continue
613
+
614
+ try:
615
+ print_user_prompt(console, prompt)
616
+ await _stream_response_interactive(client, prompt, ui)
617
+ except Exception as e:
618
+ print_error(console, str(e))
619
+
620
+ except KeyboardInterrupt:
621
+ console.print(f"\n[{Theme.WARNING}]{Icons.INFO} Goodbye![/{Theme.WARNING}]")
622
+ finally:
623
+ await client.stop()
624
+
625
+
626
+ async def _stream_response_interactive(
627
+ client: Copex,
628
+ prompt: str,
629
+ ui: CopexUI,
630
+ ) -> None:
631
+ """Stream response with beautiful UI in interactive mode."""
632
+ # Add user message to history
633
+ ui.add_user_message(prompt)
634
+
635
+ # Reset for new turn but preserve history
636
+ ui.reset(model=client.config.model.value, preserve_history=True)
637
+ ui.set_activity(ActivityType.THINKING)
638
+ await _stream_with_ui(client, prompt, ui, show_reasoning=True, render_final=False)
639
+
640
+ ui.finalize_assistant_response()
641
+ console.print(ui.build_final_display())
642
+ console.print()
643
+
644
+
645
+ async def _stream_with_ui(
646
+ client: Copex,
647
+ prompt: str,
648
+ ui: CopexUI,
649
+ *,
650
+ show_reasoning: bool = True,
651
+ show_retry_notifications: bool = False,
652
+ render_final: bool = True,
653
+ ) -> None:
654
+ """Stream a response using shared UI logic."""
655
+ live_display: Live | None = None
656
+ refresh_stop = asyncio.Event()
657
+
658
+ def on_chunk(chunk: StreamChunk) -> None:
659
+ if chunk.type == "message":
660
+ if chunk.is_final:
661
+ ui.set_final_content(chunk.content or ui.state.message, ui.state.reasoning)
662
+ else:
663
+ ui.add_message(chunk.delta)
664
+ elif chunk.type == "reasoning":
665
+ if show_reasoning:
666
+ if chunk.is_final:
667
+ pass
668
+ else:
669
+ ui.add_reasoning(chunk.delta)
670
+ elif chunk.type == "tool_call":
671
+ tool = ToolCallInfo(
672
+ name=chunk.tool_name or "unknown",
673
+ arguments=chunk.tool_args or {},
674
+ status="running",
675
+ )
676
+ ui.add_tool_call(tool)
677
+ elif chunk.type == "tool_result":
678
+ status = "success" if chunk.tool_success is not False else "error"
679
+ ui.update_tool_call(
680
+ chunk.tool_name or "unknown",
681
+ status,
682
+ result=chunk.tool_result,
683
+ duration=chunk.tool_duration,
684
+ )
685
+ elif chunk.type == "system":
686
+ ui.increment_retries()
687
+ if show_retry_notifications:
688
+ print_retry(console, ui.state.retries, client.config.retry.max_retries, chunk.delta)
689
+
690
+ if live_display:
691
+ live_display.update(ui.build_live_display())
692
+
693
+ async def refresh_loop() -> None:
694
+ while not refresh_stop.is_set():
695
+ if live_display and ui.consume_dirty():
696
+ live_display.update(ui.build_live_display())
697
+ await asyncio.sleep(0.1)
698
+
699
+ with Live(console=console, refresh_per_second=10, transient=True) as live:
700
+ live_display = live
701
+ live.update(ui.build_live_display())
702
+ refresh_task = asyncio.create_task(refresh_loop())
703
+ try:
704
+ response = await client.send(prompt, on_chunk=on_chunk)
705
+ final_message = ui.state.message if ui.state.message else response.content
706
+ final_reasoning = (ui.state.reasoning if ui.state.reasoning else response.reasoning) if show_reasoning else None
707
+ ui.set_final_content(final_message, final_reasoning)
708
+ ui.state.retries = response.retries
709
+ finally:
710
+ refresh_stop.set()
711
+ try:
712
+ await refresh_task
713
+ except asyncio.CancelledError:
714
+ pass
715
+
716
+ if render_final:
717
+ console.print(ui.build_final_display())
718
+
719
+
720
+ @app.command("ralph")
721
+ def ralph_command(
722
+ prompt: Annotated[str, typer.Argument(help="Task prompt for the Ralph loop")],
723
+ max_iterations: Annotated[
724
+ int, typer.Option("--max-iterations", "-n", help="Maximum iterations")
725
+ ] = 30,
726
+ completion_promise: Annotated[
727
+ Optional[str], typer.Option("--promise", "-p", help="Completion promise text")
728
+ ] = None,
729
+ model: Annotated[
730
+ str | None, typer.Option("--model", "-m", help="Model to use")
731
+ ] = None,
732
+ reasoning: Annotated[
733
+ str, typer.Option("--reasoning", "-r", help="Reasoning effort level")
734
+ ] = ReasoningEffort.XHIGH.value,
735
+ ) -> None:
736
+ """
737
+ Start a Ralph Wiggum loop - iterative AI development.
738
+
739
+ The same prompt is fed to the AI repeatedly. The AI sees its previous
740
+ work in conversation history and iteratively improves until complete.
741
+
742
+ Example:
743
+ copex ralph "Build a REST API with CRUD and tests" --promise "ALL TESTS PASSING" -n 20
744
+ """
745
+ effective_model = model or _DEFAULT_MODEL.value
746
+ try:
747
+ config = CopexConfig(
748
+ model=Model(effective_model),
749
+ reasoning_effort=ReasoningEffort(reasoning),
750
+ )
751
+ except ValueError as e:
752
+ console.print(f"[red]Error: {e}[/red]")
753
+ raise typer.Exit(1)
754
+
755
+ console.print(Panel(
756
+ f"[bold]Ralph Wiggum Loop[/bold]\n"
757
+ f"Model: {config.model.value}\n"
758
+ f"Reasoning: {config.reasoning_effort.value}\n"
759
+ f"Max iterations: {max_iterations}\n"
760
+ f"Completion promise: {completion_promise or '(none)'}",
761
+ title="🔄 Starting Loop",
762
+ border_style="yellow",
763
+ ))
764
+
765
+ if completion_promise:
766
+ console.print(
767
+ f"\n[dim]To complete, the AI must output: "
768
+ f"[yellow]<promise>{completion_promise}</promise>[/yellow][/dim]\n"
769
+ )
770
+
771
+ asyncio.run(_run_ralph(config, prompt, max_iterations, completion_promise))
772
+
773
+
774
+ async def _run_ralph(
775
+ config: CopexConfig,
776
+ prompt: str,
777
+ max_iterations: int,
778
+ completion_promise: str | None,
779
+ ) -> None:
780
+ """Run Ralph loop."""
781
+ client = Copex(config)
782
+ await client.start()
783
+
784
+ def on_iteration(iteration: int, response: str) -> None:
785
+ preview = response[:200] + "..." if len(response) > 200 else response
786
+ console.print(Panel(
787
+ preview,
788
+ title=f"[bold]Iteration {iteration}[/bold]",
789
+ border_style="blue",
790
+ ))
791
+
792
+ def on_complete(state: RalphState) -> None:
793
+ console.print(Panel(
794
+ f"Iterations: {state.iteration}\n"
795
+ f"Reason: {state.completion_reason}",
796
+ title="[bold green]Loop Complete[/bold green]",
797
+ border_style="green",
798
+ ))
799
+
800
+ try:
801
+ ralph = RalphWiggum(client)
802
+ await ralph.loop(
803
+ prompt,
804
+ max_iterations=max_iterations,
805
+ completion_promise=completion_promise,
806
+ on_iteration=on_iteration,
807
+ on_complete=on_complete,
808
+ )
809
+ except KeyboardInterrupt:
810
+ console.print("\n[yellow]Loop cancelled[/yellow]")
811
+ except Exception as e:
812
+ console.print(f"[red]Error: {e}[/red]")
813
+ raise typer.Exit(1)
814
+ finally:
815
+ await client.stop()
816
+
817
+
818
+ @app.command("login")
819
+ def login() -> None:
820
+ """Login to GitHub (uses GitHub CLI for authentication)."""
821
+ import shutil
822
+ import subprocess
823
+
824
+ # Check for gh CLI
825
+ gh_path = shutil.which("gh")
826
+ if not gh_path:
827
+ console.print("[red]Error: GitHub CLI (gh) not found.[/red]")
828
+ console.print("Install it from: [bold]https://cli.github.com/[/bold]")
829
+ console.print("\nOr with:")
830
+ console.print(" Windows: [bold]winget install GitHub.cli[/bold]")
831
+ console.print(" macOS: [bold]brew install gh[/bold]")
832
+ console.print(" Linux: [bold]sudo apt install gh[/bold]")
833
+ raise typer.Exit(1)
834
+
835
+ console.print("[blue]Opening browser for GitHub authentication...[/blue]\n")
836
+
837
+ try:
838
+ result = subprocess.run([gh_path, "auth", "login"], check=False)
839
+ if result.returncode == 0:
840
+ console.print("\n[green]✓ Successfully logged in![/green]")
841
+ console.print("You can now use [bold]copex chat[/bold]")
842
+ else:
843
+ console.print("\n[yellow]Login may have failed. Check status with:[/yellow]")
844
+ console.print(" [bold]copex status[/bold]")
845
+ except Exception as e:
846
+ console.print(f"[red]Error: {e}[/red]")
847
+ raise typer.Exit(1)
848
+
849
+
850
+ @app.command("logout")
851
+ def logout() -> None:
852
+ """Logout from GitHub."""
853
+ import shutil
854
+ import subprocess
855
+
856
+ gh_path = shutil.which("gh")
857
+ if not gh_path:
858
+ console.print("[red]Error: GitHub CLI (gh) not found.[/red]")
859
+ raise typer.Exit(1)
860
+
861
+ try:
862
+ result = subprocess.run([gh_path, "auth", "logout"], check=False)
863
+ if result.returncode == 0:
864
+ console.print("[green]✓ Logged out[/green]")
865
+ except Exception as e:
866
+ console.print(f"[red]Error: {e}[/red]")
867
+ raise typer.Exit(1)
868
+
869
+
870
+ @app.command("status")
871
+ def status() -> None:
872
+ """Check Copilot CLI and GitHub authentication status."""
873
+ import shutil
874
+ import subprocess
875
+
876
+ from copex.config import find_copilot_cli
877
+
878
+ cli_path = find_copilot_cli()
879
+ gh_path = shutil.which("gh")
880
+
881
+ # Get copilot version
882
+ copilot_version = "N/A"
883
+ if cli_path:
884
+ try:
885
+ result = subprocess.run(
886
+ [cli_path, "--version"],
887
+ capture_output=True, text=True, timeout=5
888
+ )
889
+ copilot_version = result.stdout.strip() or result.stderr.strip()
890
+ except Exception:
891
+ pass
892
+
893
+ console.print(Panel(
894
+ f"[bold]Copex Version:[/bold] {__version__}\n"
895
+ f"[bold]Copilot CLI:[/bold] {cli_path or '[red]Not found[/red]'}\n"
896
+ f"[bold]Copilot Version:[/bold] {copilot_version}\n"
897
+ f"[bold]GitHub CLI:[/bold] {gh_path or '[red]Not found[/red]'}",
898
+ title="Copex Status",
899
+ border_style="blue",
900
+ ))
901
+
902
+ if not cli_path:
903
+ console.print("\n[red]Copilot CLI not found.[/red]")
904
+ console.print("Install: [bold]npm install -g @github/copilot[/bold]")
905
+
906
+ if gh_path:
907
+ console.print("\n[bold]GitHub Auth Status:[/bold]")
908
+ try:
909
+ subprocess.run([gh_path, "auth", "status"], check=False)
910
+ except Exception as e:
911
+ console.print(f"[red]Error checking status: {e}[/red]")
912
+ else:
913
+ console.print("\n[yellow]GitHub CLI not found - cannot check auth status[/yellow]")
914
+ console.print("Install: [bold]https://cli.github.com/[/bold]")
915
+
916
+
917
+ @app.command("plan")
918
+ def plan_command(
919
+ task: Annotated[str, typer.Argument(help="Task to plan")],
920
+ execute: Annotated[
921
+ bool, typer.Option("--execute", "-e", help="Execute the plan after generating")
922
+ ] = False,
923
+ review: Annotated[
924
+ bool, typer.Option("--review", "-R", help="Show plan and confirm before executing")
925
+ ] = False,
926
+ output: Annotated[
927
+ Optional[Path], typer.Option("--output", "-o", help="Save plan to file")
928
+ ] = None,
929
+ from_step: Annotated[
930
+ int, typer.Option("--from-step", "-f", help="Resume execution from step number")
931
+ ] = 1,
932
+ load_plan: Annotated[
933
+ Optional[Path], typer.Option("--load", "-l", help="Load plan from file instead of generating")
934
+ ] = None,
935
+ model: Annotated[
936
+ str | None, typer.Option("--model", "-m", help="Model to use")
937
+ ] = None,
938
+ reasoning: Annotated[
939
+ str, typer.Option("--reasoning", "-r", help="Reasoning effort level")
940
+ ] = ReasoningEffort.XHIGH.value,
941
+ ) -> None:
942
+ """
943
+ Generate and optionally execute a step-by-step plan.
944
+
945
+ Examples:
946
+ copex plan "Build a REST API" # Generate plan only
947
+ copex plan "Build a REST API" --execute # Generate and execute
948
+ copex plan "Build a REST API" --review # Generate, review, then execute
949
+ copex plan "Continue" --load plan.json -f3 # Resume from step 3
950
+ """
951
+ effective_model = model or _DEFAULT_MODEL.value
952
+ try:
953
+ config = CopexConfig(
954
+ model=Model(effective_model),
955
+ reasoning_effort=ReasoningEffort(reasoning),
956
+ )
957
+ except ValueError as e:
958
+ console.print(f"[red]Error: {e}[/red]")
959
+ raise typer.Exit(1)
960
+
961
+ asyncio.run(_run_plan(
962
+ config=config,
963
+ task=task,
964
+ execute=execute or review,
965
+ review=review,
966
+ output=output,
967
+ from_step=from_step,
968
+ load_plan=load_plan,
969
+ ))
970
+
971
+
972
+ async def _run_plan(
973
+ config: CopexConfig,
974
+ task: str,
975
+ execute: bool,
976
+ review: bool,
977
+ output: Path | None,
978
+ from_step: int,
979
+ load_plan: Path | None,
980
+ ) -> None:
981
+ """Run plan generation and optional execution."""
982
+ client = Copex(config)
983
+ await client.start()
984
+
985
+ try:
986
+ executor = PlanExecutor(client)
987
+
988
+ # Load or generate plan
989
+ if load_plan:
990
+ if not load_plan.exists():
991
+ console.print(f"[red]Plan file not found: {load_plan}[/red]")
992
+ raise typer.Exit(1)
993
+ plan = Plan.load(load_plan)
994
+ console.print(f"[green]✓ Loaded plan from {load_plan}[/green]\n")
995
+ else:
996
+ console.print(Panel(
997
+ f"[bold]Generating plan for:[/bold]\n{task}",
998
+ title="📋 Plan Mode",
999
+ border_style="blue",
1000
+ ))
1001
+
1002
+ plan = await executor.generate_plan(task)
1003
+ console.print(f"\n[green]✓ Generated {len(plan.steps)} steps[/green]\n")
1004
+
1005
+ # Display plan
1006
+ _display_plan(plan)
1007
+
1008
+ # Save plan if requested
1009
+ if output:
1010
+ plan.save(output)
1011
+ console.print(f"\n[green]✓ Saved plan to {output}[/green]")
1012
+
1013
+ # Execute if requested
1014
+ if execute:
1015
+ if review:
1016
+ if not typer.confirm("\nProceed with execution?"):
1017
+ console.print("[yellow]Execution cancelled[/yellow]")
1018
+ return
1019
+
1020
+ console.print(f"\n[bold blue]Executing from step {from_step}...[/bold blue]\n")
1021
+
1022
+ def on_step_start(step: PlanStep) -> None:
1023
+ console.print(f"[blue]▶ Step {step.number}:[/blue] {step.description}")
1024
+
1025
+ def on_step_complete(step: PlanStep) -> None:
1026
+ preview = (step.result or "")[:150]
1027
+ if len(step.result or "") > 150:
1028
+ preview += "..."
1029
+ console.print(f"[green]✓ Step {step.number} complete[/green]")
1030
+ if preview:
1031
+ console.print(f" [dim]{preview}[/dim]")
1032
+ console.print()
1033
+
1034
+ def on_error(step: PlanStep, error: Exception) -> bool:
1035
+ console.print(f"[red]✗ Step {step.number} failed: {error}[/red]")
1036
+ return typer.confirm("Continue with next step?", default=False)
1037
+
1038
+ await executor.execute_plan(
1039
+ plan,
1040
+ from_step=from_step,
1041
+ on_step_start=on_step_start,
1042
+ on_step_complete=on_step_complete,
1043
+ on_error=on_error,
1044
+ )
1045
+
1046
+ # Show summary
1047
+ _display_plan_summary(plan)
1048
+
1049
+ # Save updated plan
1050
+ if output:
1051
+ plan.save(output)
1052
+ console.print(f"\n[green]✓ Updated plan saved to {output}[/green]")
1053
+
1054
+ except KeyboardInterrupt:
1055
+ console.print("\n[yellow]Cancelled[/yellow]")
1056
+ except Exception as e:
1057
+ console.print(f"[red]Error: {e}[/red]")
1058
+ raise typer.Exit(1)
1059
+ finally:
1060
+ await client.stop()
1061
+
1062
+
1063
+ def _display_plan(plan: Plan) -> None:
1064
+ """Display plan steps."""
1065
+ for step in plan.steps:
1066
+ status_icon = {
1067
+ StepStatus.PENDING: "⬜",
1068
+ StepStatus.RUNNING: "🔄",
1069
+ StepStatus.COMPLETED: "✅",
1070
+ StepStatus.FAILED: "❌",
1071
+ StepStatus.SKIPPED: "⏭️",
1072
+ }.get(step.status, "⬜")
1073
+ console.print(f"{status_icon} [bold]Step {step.number}:[/bold] {step.description}")
1074
+
1075
+
1076
+ def _display_plan_summary(plan: Plan) -> None:
1077
+ """Display plan execution summary."""
1078
+ completed = plan.completed_count
1079
+ failed = plan.failed_count
1080
+ total = len(plan.steps)
1081
+
1082
+ if plan.is_complete and failed == 0:
1083
+ console.print(Panel(
1084
+ f"[green]All {total} steps completed successfully![/green]",
1085
+ title="✅ Plan Complete",
1086
+ border_style="green",
1087
+ ))
1088
+ elif failed > 0:
1089
+ console.print(Panel(
1090
+ f"Completed: {completed}/{total}\nFailed: {failed}",
1091
+ title="⚠️ Plan Incomplete",
1092
+ border_style="yellow",
1093
+ ))
1094
+ else:
1095
+ console.print(Panel(
1096
+ f"Completed: {completed}/{total}",
1097
+ title="📋 Progress",
1098
+ border_style="blue",
1099
+ ))
1100
+
1101
+
1102
+ __version__ = "0.8.2"
1103
+
1104
+
1105
+ if __name__ == "__main__":
1106
+ app()