kimi-cli 0.35__py3-none-any.whl → 0.52__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kimi_cli/CHANGELOG.md +165 -0
- kimi_cli/__init__.py +0 -374
- kimi_cli/agents/{koder → default}/agent.yaml +1 -1
- kimi_cli/agents/{koder → default}/system.md +1 -1
- kimi_cli/agentspec.py +115 -0
- kimi_cli/app.py +208 -0
- kimi_cli/cli.py +321 -0
- kimi_cli/config.py +33 -16
- kimi_cli/constant.py +4 -0
- kimi_cli/exception.py +16 -0
- kimi_cli/llm.py +144 -3
- kimi_cli/metadata.py +6 -69
- kimi_cli/prompts/__init__.py +4 -0
- kimi_cli/session.py +103 -0
- kimi_cli/soul/__init__.py +130 -9
- kimi_cli/soul/agent.py +159 -0
- kimi_cli/soul/approval.py +5 -6
- kimi_cli/soul/compaction.py +106 -0
- kimi_cli/soul/context.py +1 -1
- kimi_cli/soul/kimisoul.py +180 -80
- kimi_cli/soul/message.py +6 -6
- kimi_cli/soul/runtime.py +96 -0
- kimi_cli/soul/toolset.py +3 -2
- kimi_cli/tools/__init__.py +35 -31
- kimi_cli/tools/bash/__init__.py +25 -9
- kimi_cli/tools/bash/cmd.md +31 -0
- kimi_cli/tools/dmail/__init__.py +5 -4
- kimi_cli/tools/file/__init__.py +8 -0
- kimi_cli/tools/file/glob.md +1 -1
- kimi_cli/tools/file/glob.py +4 -4
- kimi_cli/tools/file/grep.py +36 -19
- kimi_cli/tools/file/patch.py +52 -10
- kimi_cli/tools/file/read.py +6 -5
- kimi_cli/tools/file/replace.py +16 -4
- kimi_cli/tools/file/write.py +16 -4
- kimi_cli/tools/mcp.py +7 -4
- kimi_cli/tools/task/__init__.py +60 -41
- kimi_cli/tools/task/task.md +1 -1
- kimi_cli/tools/todo/__init__.py +4 -2
- kimi_cli/tools/utils.py +1 -1
- kimi_cli/tools/web/fetch.py +2 -1
- kimi_cli/tools/web/search.py +13 -12
- kimi_cli/ui/__init__.py +0 -68
- kimi_cli/ui/acp/__init__.py +67 -38
- kimi_cli/ui/print/__init__.py +46 -69
- kimi_cli/ui/shell/__init__.py +145 -154
- kimi_cli/ui/shell/console.py +27 -1
- kimi_cli/ui/shell/debug.py +187 -0
- kimi_cli/ui/shell/keyboard.py +183 -0
- kimi_cli/ui/shell/metacmd.py +34 -81
- kimi_cli/ui/shell/prompt.py +245 -28
- kimi_cli/ui/shell/replay.py +104 -0
- kimi_cli/ui/shell/setup.py +19 -19
- kimi_cli/ui/shell/update.py +11 -5
- kimi_cli/ui/shell/visualize.py +576 -0
- kimi_cli/ui/wire/README.md +109 -0
- kimi_cli/ui/wire/__init__.py +340 -0
- kimi_cli/ui/wire/jsonrpc.py +48 -0
- kimi_cli/utils/__init__.py +0 -0
- kimi_cli/utils/aiohttp.py +10 -0
- kimi_cli/utils/changelog.py +6 -2
- kimi_cli/utils/clipboard.py +10 -0
- kimi_cli/utils/message.py +15 -1
- kimi_cli/utils/rich/__init__.py +33 -0
- kimi_cli/utils/rich/markdown.py +959 -0
- kimi_cli/utils/rich/markdown_sample.md +108 -0
- kimi_cli/utils/rich/markdown_sample_short.md +2 -0
- kimi_cli/utils/signals.py +41 -0
- kimi_cli/utils/string.py +8 -0
- kimi_cli/utils/term.py +114 -0
- kimi_cli/wire/__init__.py +73 -0
- kimi_cli/wire/message.py +191 -0
- kimi_cli-0.52.dist-info/METADATA +186 -0
- kimi_cli-0.52.dist-info/RECORD +99 -0
- kimi_cli-0.52.dist-info/entry_points.txt +3 -0
- kimi_cli/agent.py +0 -261
- kimi_cli/agents/koder/README.md +0 -3
- kimi_cli/prompts/metacmds/__init__.py +0 -4
- kimi_cli/soul/wire.py +0 -101
- kimi_cli/ui/shell/liveview.py +0 -158
- kimi_cli/utils/provider.py +0 -64
- kimi_cli-0.35.dist-info/METADATA +0 -24
- kimi_cli-0.35.dist-info/RECORD +0 -76
- kimi_cli-0.35.dist-info/entry_points.txt +0 -3
- /kimi_cli/agents/{koder → default}/sub.yaml +0 -0
- /kimi_cli/prompts/{metacmds/compact.md → compact.md} +0 -0
- /kimi_cli/prompts/{metacmds/init.md → init.md} +0 -0
- {kimi_cli-0.35.dist-info → kimi_cli-0.52.dist-info}/WHEEL +0 -0
kimi_cli/ui/shell/prompt.py
CHANGED
|
@@ -1,19 +1,27 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import base64
|
|
2
3
|
import contextlib
|
|
3
4
|
import getpass
|
|
4
5
|
import json
|
|
5
6
|
import os
|
|
6
7
|
import re
|
|
7
8
|
import time
|
|
9
|
+
from collections import deque
|
|
8
10
|
from collections.abc import Callable
|
|
11
|
+
from dataclasses import dataclass
|
|
9
12
|
from datetime import datetime
|
|
10
13
|
from enum import Enum
|
|
11
14
|
from hashlib import md5
|
|
15
|
+
from io import BytesIO
|
|
12
16
|
from pathlib import Path
|
|
13
17
|
from typing import override
|
|
14
18
|
|
|
19
|
+
from kosong.message import ContentPart, ImageURLPart, TextPart
|
|
20
|
+
from PIL import Image, ImageGrab
|
|
15
21
|
from prompt_toolkit import PromptSession
|
|
16
22
|
from prompt_toolkit.application.current import get_app_or_none
|
|
23
|
+
from prompt_toolkit.buffer import Buffer
|
|
24
|
+
from prompt_toolkit.clipboard.pyperclip import PyperclipClipboard
|
|
17
25
|
from prompt_toolkit.completion import (
|
|
18
26
|
Completer,
|
|
19
27
|
Completion,
|
|
@@ -23,17 +31,25 @@ from prompt_toolkit.completion import (
|
|
|
23
31
|
merge_completers,
|
|
24
32
|
)
|
|
25
33
|
from prompt_toolkit.document import Document
|
|
26
|
-
from prompt_toolkit.filters import
|
|
34
|
+
from prompt_toolkit.filters import Condition, has_completions
|
|
27
35
|
from prompt_toolkit.formatted_text import FormattedText
|
|
28
36
|
from prompt_toolkit.history import InMemoryHistory
|
|
29
37
|
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
|
|
30
38
|
from prompt_toolkit.patch_stdout import patch_stdout
|
|
31
39
|
from pydantic import BaseModel, ValidationError
|
|
32
40
|
|
|
41
|
+
from kimi_cli.llm import ModelCapability
|
|
33
42
|
from kimi_cli.share import get_share_dir
|
|
34
43
|
from kimi_cli.soul import StatusSnapshot
|
|
44
|
+
from kimi_cli.ui.shell.console import console
|
|
35
45
|
from kimi_cli.ui.shell.metacmd import get_meta_commands
|
|
46
|
+
from kimi_cli.utils.clipboard import is_clipboard_available
|
|
36
47
|
from kimi_cli.utils.logging import logger
|
|
48
|
+
from kimi_cli.utils.string import random_string
|
|
49
|
+
|
|
50
|
+
PROMPT_SYMBOL = "✨"
|
|
51
|
+
PROMPT_SYMBOL_SHELL = "$"
|
|
52
|
+
PROMPT_SYMBOL_THINKING = "💫"
|
|
37
53
|
|
|
38
54
|
|
|
39
55
|
class MetaCommandCompleter(Completer):
|
|
@@ -299,7 +315,26 @@ class FileMentionCompleter(Completer):
|
|
|
299
315
|
mention_doc = Document(text=fragment, cursor_position=len(fragment))
|
|
300
316
|
self._fragment_hint = fragment
|
|
301
317
|
try:
|
|
302
|
-
|
|
318
|
+
# First, ask the fuzzy completer for candidates.
|
|
319
|
+
candidates = list(self._fuzzy.get_completions(mention_doc, complete_event))
|
|
320
|
+
|
|
321
|
+
# re-rank: prefer basename matches
|
|
322
|
+
frag_lower = fragment.lower()
|
|
323
|
+
|
|
324
|
+
def _rank(c: Completion) -> tuple:
|
|
325
|
+
path = c.text
|
|
326
|
+
base = path.rstrip("/").split("/")[-1].lower()
|
|
327
|
+
if base.startswith(frag_lower):
|
|
328
|
+
cat = 0
|
|
329
|
+
elif frag_lower in base:
|
|
330
|
+
cat = 1
|
|
331
|
+
else:
|
|
332
|
+
cat = 2
|
|
333
|
+
# preserve original FuzzyCompleter's order in the same category
|
|
334
|
+
return (cat,)
|
|
335
|
+
|
|
336
|
+
candidates.sort(key=_rank)
|
|
337
|
+
yield from candidates
|
|
303
338
|
finally:
|
|
304
339
|
self._fragment_hint = None
|
|
305
340
|
|
|
@@ -359,7 +394,11 @@ class PromptMode(Enum):
|
|
|
359
394
|
|
|
360
395
|
class UserInput(BaseModel):
|
|
361
396
|
mode: PromptMode
|
|
397
|
+
thinking: bool
|
|
362
398
|
command: str
|
|
399
|
+
"""The plain text representation of the user input."""
|
|
400
|
+
content: list[ContentPart]
|
|
401
|
+
"""The rich content parts."""
|
|
363
402
|
|
|
364
403
|
def __str__(self) -> str:
|
|
365
404
|
return self.command
|
|
@@ -369,23 +408,78 @@ class UserInput(BaseModel):
|
|
|
369
408
|
|
|
370
409
|
|
|
371
410
|
_REFRESH_INTERVAL = 1.0
|
|
372
|
-
_toast_queue: asyncio.Queue[tuple[str, float]] = asyncio.Queue()
|
|
373
411
|
|
|
374
412
|
|
|
375
|
-
|
|
413
|
+
@dataclass(slots=True)
|
|
414
|
+
class _ToastEntry:
|
|
415
|
+
topic: str | None
|
|
416
|
+
"""There can be only one toast of each non-None topic in the queue."""
|
|
417
|
+
message: str
|
|
418
|
+
duration: float
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
_toast_queue = deque[_ToastEntry]()
|
|
422
|
+
"""The queue of toasts to show, including the one currently being shown (the first one)."""
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
def toast(
|
|
426
|
+
message: str,
|
|
427
|
+
duration: float = 5.0,
|
|
428
|
+
topic: str | None = None,
|
|
429
|
+
immediate: bool = False,
|
|
430
|
+
) -> None:
|
|
376
431
|
duration = max(duration, _REFRESH_INTERVAL)
|
|
377
|
-
|
|
432
|
+
entry = _ToastEntry(topic=topic, message=message, duration=duration)
|
|
433
|
+
if topic is not None:
|
|
434
|
+
# Remove existing toasts with the same topic
|
|
435
|
+
for existing in list(_toast_queue):
|
|
436
|
+
if existing.topic == topic:
|
|
437
|
+
_toast_queue.remove(existing)
|
|
438
|
+
if immediate:
|
|
439
|
+
_toast_queue.appendleft(entry)
|
|
440
|
+
else:
|
|
441
|
+
_toast_queue.append(entry)
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
def _current_toast() -> _ToastEntry | None:
|
|
445
|
+
if not _toast_queue:
|
|
446
|
+
return None
|
|
447
|
+
return _toast_queue[0]
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def _toast_thinking(thinking: bool) -> None:
|
|
451
|
+
toast(
|
|
452
|
+
f"thinking {'on' if thinking else 'off'}, tab to toggle",
|
|
453
|
+
duration=3.0,
|
|
454
|
+
topic="thinking",
|
|
455
|
+
immediate=True,
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
_ATTACHMENT_PLACEHOLDER_RE = re.compile(
|
|
460
|
+
r"\[(?P<type>image):(?P<id>[a-zA-Z0-9_\-\.]+)(?:,(?P<width>\d+)x(?P<height>\d+))?\]"
|
|
461
|
+
)
|
|
378
462
|
|
|
379
463
|
|
|
380
464
|
class CustomPromptSession:
|
|
381
|
-
def __init__(
|
|
465
|
+
def __init__(
|
|
466
|
+
self,
|
|
467
|
+
*,
|
|
468
|
+
status_provider: Callable[[], StatusSnapshot],
|
|
469
|
+
model_capabilities: set[ModelCapability],
|
|
470
|
+
initial_thinking: bool,
|
|
471
|
+
) -> None:
|
|
382
472
|
history_dir = get_share_dir() / "user-history"
|
|
383
473
|
history_dir.mkdir(parents=True, exist_ok=True)
|
|
384
|
-
work_dir_id = md5(str(Path.cwd()).encode()).hexdigest()
|
|
474
|
+
work_dir_id = md5(str(Path.cwd()).encode(encoding="utf-8")).hexdigest()
|
|
385
475
|
self._history_file = (history_dir / work_dir_id).with_suffix(".jsonl")
|
|
386
476
|
self._status_provider = status_provider
|
|
477
|
+
self._model_capabilities = model_capabilities
|
|
387
478
|
self._last_history_content: str | None = None
|
|
388
479
|
self._mode: PromptMode = PromptMode.AGENT
|
|
480
|
+
self._thinking = initial_thinking
|
|
481
|
+
self._attachment_parts: dict[str, ContentPart] = {}
|
|
482
|
+
"""Mapping from attachment id to ContentPart."""
|
|
389
483
|
|
|
390
484
|
history_entries = _load_history_entries(self._history_file)
|
|
391
485
|
history = InMemoryHistory()
|
|
@@ -407,6 +501,7 @@ class CustomPromptSession:
|
|
|
407
501
|
|
|
408
502
|
# Build key bindings
|
|
409
503
|
_kb = KeyBindings()
|
|
504
|
+
shortcut_hints: list[str] = []
|
|
410
505
|
|
|
411
506
|
@_kb.add("enter", filter=has_completions)
|
|
412
507
|
def _accept_completion(event: KeyPressEvent) -> None:
|
|
@@ -419,30 +514,81 @@ class CustomPromptSession:
|
|
|
419
514
|
completion = buff.complete_state.completions[0]
|
|
420
515
|
buff.apply_completion(completion)
|
|
421
516
|
|
|
422
|
-
@_kb.add("c-
|
|
423
|
-
def
|
|
517
|
+
@_kb.add("c-x", eager=True)
|
|
518
|
+
def _switch_mode(event: KeyPressEvent) -> None:
|
|
424
519
|
self._mode = self._mode.toggle()
|
|
425
520
|
# Apply mode-specific settings
|
|
426
521
|
self._apply_mode(event)
|
|
427
522
|
# Redraw UI
|
|
428
523
|
event.app.invalidate()
|
|
429
524
|
|
|
525
|
+
shortcut_hints.append("ctrl-x: switch mode")
|
|
526
|
+
|
|
527
|
+
@_kb.add("escape", "enter", eager=True)
|
|
528
|
+
@_kb.add("c-j", eager=True)
|
|
529
|
+
def _insert_newline(event: KeyPressEvent) -> None:
|
|
530
|
+
"""Insert a newline when Alt-Enter or Ctrl-J is pressed."""
|
|
531
|
+
event.current_buffer.insert_text("\n")
|
|
532
|
+
|
|
533
|
+
shortcut_hints.append("ctrl-j: newline")
|
|
534
|
+
|
|
535
|
+
if is_clipboard_available():
|
|
536
|
+
|
|
537
|
+
@_kb.add("c-v", eager=True)
|
|
538
|
+
def _paste(event: KeyPressEvent) -> None:
|
|
539
|
+
if self._try_paste_image(event):
|
|
540
|
+
return
|
|
541
|
+
clipboard_data = event.app.clipboard.get_data()
|
|
542
|
+
event.current_buffer.paste_clipboard_data(clipboard_data)
|
|
543
|
+
|
|
544
|
+
shortcut_hints.append("ctrl-v: paste")
|
|
545
|
+
clipboard = PyperclipClipboard()
|
|
546
|
+
else:
|
|
547
|
+
clipboard = None
|
|
548
|
+
|
|
549
|
+
@Condition
|
|
550
|
+
def is_agent_mode() -> bool:
|
|
551
|
+
return self._mode == PromptMode.AGENT
|
|
552
|
+
|
|
553
|
+
_toast_thinking(self._thinking)
|
|
554
|
+
|
|
555
|
+
@_kb.add("tab", filter=~has_completions & is_agent_mode, eager=True)
|
|
556
|
+
def _switch_thinking(event: KeyPressEvent) -> None:
|
|
557
|
+
"""Toggle thinking mode when Tab is pressed and no completions are shown."""
|
|
558
|
+
if "thinking" not in self._model_capabilities:
|
|
559
|
+
console.print(
|
|
560
|
+
"[yellow]Thinking mode is not supported by the selected LLM model[/yellow]"
|
|
561
|
+
)
|
|
562
|
+
return
|
|
563
|
+
self._thinking = not self._thinking
|
|
564
|
+
_toast_thinking(self._thinking)
|
|
565
|
+
event.app.invalidate()
|
|
566
|
+
|
|
567
|
+
self._shortcut_hints = shortcut_hints
|
|
430
568
|
self._session = PromptSession(
|
|
431
569
|
message=self._render_message,
|
|
432
|
-
prompt_continuation=FormattedText([("fg:#4d4d4d", "... ")]),
|
|
570
|
+
# prompt_continuation=FormattedText([("fg:#4d4d4d", "... ")]),
|
|
433
571
|
completer=self._agent_mode_completer,
|
|
434
|
-
complete_while_typing=
|
|
572
|
+
complete_while_typing=Condition(lambda: self._mode == PromptMode.AGENT),
|
|
435
573
|
key_bindings=_kb,
|
|
574
|
+
clipboard=clipboard,
|
|
436
575
|
history=history,
|
|
437
576
|
bottom_toolbar=self._render_bottom_toolbar,
|
|
438
577
|
)
|
|
439
578
|
|
|
579
|
+
# Allow completion to be triggered when the text is changed,
|
|
580
|
+
# such as when backspace is used to delete text.
|
|
581
|
+
@self._session.default_buffer.on_text_changed.add_handler
|
|
582
|
+
def trigger_complete(buffer: Buffer) -> None:
|
|
583
|
+
if buffer.complete_while_typing():
|
|
584
|
+
buffer.start_completion()
|
|
585
|
+
|
|
440
586
|
self._status_refresh_task: asyncio.Task | None = None
|
|
441
|
-
self._current_toast: str | None = None
|
|
442
|
-
self._current_toast_duration: float = 0.0
|
|
443
587
|
|
|
444
588
|
def _render_message(self) -> FormattedText:
|
|
445
|
-
symbol =
|
|
589
|
+
symbol = PROMPT_SYMBOL if self._mode == PromptMode.AGENT else PROMPT_SYMBOL_SHELL
|
|
590
|
+
if self._mode == PromptMode.AGENT and self._thinking:
|
|
591
|
+
symbol = PROMPT_SYMBOL_THINKING
|
|
446
592
|
return FormattedText([("bold", f"{getpass.getuser()}{symbol} ")])
|
|
447
593
|
|
|
448
594
|
def _apply_mode(self, event: KeyPressEvent | None = None) -> None:
|
|
@@ -459,11 +605,9 @@ class CustomPromptSession:
|
|
|
459
605
|
buff.cancel_completion()
|
|
460
606
|
if buff is not None:
|
|
461
607
|
buff.completer = DummyCompleter()
|
|
462
|
-
buff.complete_while_typing = Never()
|
|
463
608
|
else:
|
|
464
609
|
if buff is not None:
|
|
465
610
|
buff.completer = self._agent_mode_completer
|
|
466
|
-
buff.complete_while_typing = Always()
|
|
467
611
|
|
|
468
612
|
def __enter__(self) -> "CustomPromptSession":
|
|
469
613
|
if self._status_refresh_task is not None and not self._status_refresh_task.done():
|
|
@@ -495,12 +639,85 @@ class CustomPromptSession:
|
|
|
495
639
|
if self._status_refresh_task is not None and not self._status_refresh_task.done():
|
|
496
640
|
self._status_refresh_task.cancel()
|
|
497
641
|
self._status_refresh_task = None
|
|
642
|
+
self._attachment_parts.clear()
|
|
643
|
+
|
|
644
|
+
def _try_paste_image(self, event: KeyPressEvent) -> bool:
|
|
645
|
+
"""Try to paste an image from the clipboard. Return True if successful."""
|
|
646
|
+
# Try get image from clipboard
|
|
647
|
+
image = ImageGrab.grabclipboard()
|
|
648
|
+
if isinstance(image, list):
|
|
649
|
+
for item in image:
|
|
650
|
+
try:
|
|
651
|
+
with Image.open(item) as img:
|
|
652
|
+
image = img.copy()
|
|
653
|
+
break
|
|
654
|
+
except Exception:
|
|
655
|
+
continue
|
|
656
|
+
else:
|
|
657
|
+
image = None
|
|
658
|
+
|
|
659
|
+
if image is None:
|
|
660
|
+
return False
|
|
661
|
+
|
|
662
|
+
if "image_in" not in self._model_capabilities:
|
|
663
|
+
console.print("[yellow]Image input is not supported by the selected LLM model[/yellow]")
|
|
664
|
+
return False
|
|
665
|
+
|
|
666
|
+
attachment_id = f"{random_string(8)}.png"
|
|
667
|
+
png_bytes = BytesIO()
|
|
668
|
+
image.save(png_bytes, format="PNG")
|
|
669
|
+
png_base64 = base64.b64encode(png_bytes.getvalue()).decode("ascii")
|
|
670
|
+
image_part = ImageURLPart(
|
|
671
|
+
image_url=ImageURLPart.ImageURL(
|
|
672
|
+
url=f"data:image/png;base64,{png_base64}", id=attachment_id
|
|
673
|
+
)
|
|
674
|
+
)
|
|
675
|
+
self._attachment_parts[attachment_id] = image_part
|
|
676
|
+
logger.debug(
|
|
677
|
+
"Pasted image from clipboard: {attachment_id}, {image_size}",
|
|
678
|
+
attachment_id=attachment_id,
|
|
679
|
+
image_size=image.size,
|
|
680
|
+
)
|
|
681
|
+
|
|
682
|
+
placeholder = f"[image:{attachment_id},{image.width}x{image.height}]"
|
|
683
|
+
event.current_buffer.insert_text(placeholder)
|
|
684
|
+
event.app.invalidate()
|
|
685
|
+
return True
|
|
498
686
|
|
|
499
687
|
async def prompt(self) -> UserInput:
|
|
500
|
-
with patch_stdout():
|
|
688
|
+
with patch_stdout(raw=True):
|
|
501
689
|
command = str(await self._session.prompt_async()).strip()
|
|
690
|
+
command = command.replace("\x00", "") # just in case null bytes are somehow inserted
|
|
502
691
|
self._append_history_entry(command)
|
|
503
|
-
|
|
692
|
+
|
|
693
|
+
# Parse rich content parts
|
|
694
|
+
content: list[ContentPart] = []
|
|
695
|
+
remaining_command = command
|
|
696
|
+
while match := _ATTACHMENT_PLACEHOLDER_RE.search(remaining_command):
|
|
697
|
+
start, end = match.span()
|
|
698
|
+
if start > 0:
|
|
699
|
+
content.append(TextPart(text=remaining_command[:start]))
|
|
700
|
+
attachment_id = match.group("id")
|
|
701
|
+
part = self._attachment_parts.get(attachment_id)
|
|
702
|
+
if part is not None:
|
|
703
|
+
content.append(part)
|
|
704
|
+
else:
|
|
705
|
+
logger.warning(
|
|
706
|
+
"Attachment placeholder found but no matching attachment part: {placeholder}",
|
|
707
|
+
placeholder=match.group(0),
|
|
708
|
+
)
|
|
709
|
+
content.append(TextPart(text=match.group(0)))
|
|
710
|
+
remaining_command = remaining_command[end:]
|
|
711
|
+
|
|
712
|
+
if remaining_command.strip():
|
|
713
|
+
content.append(TextPart(text=remaining_command.strip()))
|
|
714
|
+
|
|
715
|
+
return UserInput(
|
|
716
|
+
mode=self._mode,
|
|
717
|
+
thinking=self._thinking,
|
|
718
|
+
content=content,
|
|
719
|
+
command=command,
|
|
720
|
+
)
|
|
504
721
|
|
|
505
722
|
def _append_history_entry(self, text: str) -> None:
|
|
506
723
|
entry = _HistoryEntry(content=text.strip())
|
|
@@ -535,21 +752,24 @@ class CustomPromptSession:
|
|
|
535
752
|
columns -= len(now_text) + 2
|
|
536
753
|
|
|
537
754
|
mode = str(self._mode).lower()
|
|
755
|
+
if self._mode == PromptMode.AGENT and self._thinking:
|
|
756
|
+
mode += " (thinking)"
|
|
538
757
|
fragments.extend([("", f"{mode}"), ("", " " * 2)])
|
|
539
758
|
columns -= len(mode) + 2
|
|
540
759
|
|
|
541
760
|
status = self._status_provider()
|
|
542
761
|
status_text = self._format_status(status)
|
|
543
762
|
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
763
|
+
current_toast = _current_toast()
|
|
764
|
+
if current_toast is not None:
|
|
765
|
+
fragments.extend([("", current_toast.message), ("", " " * 2)])
|
|
766
|
+
columns -= len(current_toast.message) + 2
|
|
767
|
+
current_toast.duration -= _REFRESH_INTERVAL
|
|
768
|
+
if current_toast.duration <= 0.0:
|
|
769
|
+
_toast_queue.popleft()
|
|
550
770
|
else:
|
|
551
771
|
shortcuts = [
|
|
552
|
-
|
|
772
|
+
*self._shortcut_hints,
|
|
553
773
|
"ctrl-d: exit",
|
|
554
774
|
]
|
|
555
775
|
for shortcut in shortcuts:
|
|
@@ -559,9 +779,6 @@ class CustomPromptSession:
|
|
|
559
779
|
else:
|
|
560
780
|
break
|
|
561
781
|
|
|
562
|
-
if self._current_toast is None and not _toast_queue.empty():
|
|
563
|
-
self._current_toast, self._current_toast_duration = _toast_queue.get_nowait()
|
|
564
|
-
|
|
565
782
|
padding = max(1, columns - len(status_text))
|
|
566
783
|
fragments.append(("", " " * padding))
|
|
567
784
|
fragments.append(("", status_text))
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import contextlib
|
|
3
|
+
import getpass
|
|
4
|
+
from collections.abc import Sequence
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
from kosong.message import Message, TextPart
|
|
8
|
+
from kosong.tooling import ToolError, ToolOk
|
|
9
|
+
|
|
10
|
+
from kimi_cli.soul import StatusSnapshot
|
|
11
|
+
from kimi_cli.ui.shell.console import console
|
|
12
|
+
from kimi_cli.ui.shell.prompt import PROMPT_SYMBOL
|
|
13
|
+
from kimi_cli.ui.shell.visualize import visualize
|
|
14
|
+
from kimi_cli.utils.message import message_extract_text, message_stringify
|
|
15
|
+
from kimi_cli.wire import Wire
|
|
16
|
+
from kimi_cli.wire.message import ContentPart, StepBegin, ToolCall, ToolResult
|
|
17
|
+
|
|
18
|
+
MAX_REPLAY_RUNS = 5
|
|
19
|
+
|
|
20
|
+
type _ReplayEvent = StepBegin | ToolCall | ContentPart | ToolResult
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(slots=True)
|
|
24
|
+
class _ReplayRun:
|
|
25
|
+
user_message: Message
|
|
26
|
+
events: list[_ReplayEvent]
|
|
27
|
+
n_steps: int = 0
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
async def replay_recent_history(history: Sequence[Message]) -> None:
|
|
31
|
+
"""
|
|
32
|
+
Replay the most recent user-initiated runs from the provided message history.
|
|
33
|
+
"""
|
|
34
|
+
start_idx = _find_replay_start(history)
|
|
35
|
+
if start_idx is None:
|
|
36
|
+
return
|
|
37
|
+
|
|
38
|
+
runs = _build_replay_runs(history[start_idx:])
|
|
39
|
+
if not runs:
|
|
40
|
+
return
|
|
41
|
+
|
|
42
|
+
for run in runs:
|
|
43
|
+
wire = Wire()
|
|
44
|
+
console.print(f"{getpass.getuser()}{PROMPT_SYMBOL} {message_stringify(run.user_message)}")
|
|
45
|
+
ui_task = asyncio.create_task(
|
|
46
|
+
visualize(wire.ui_side, initial_status=StatusSnapshot(context_usage=0.0))
|
|
47
|
+
)
|
|
48
|
+
for event in run.events:
|
|
49
|
+
wire.soul_side.send(event)
|
|
50
|
+
await asyncio.sleep(0) # yield to UI loop
|
|
51
|
+
wire.shutdown()
|
|
52
|
+
with contextlib.suppress(asyncio.QueueShutDown):
|
|
53
|
+
await ui_task
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _is_user_message(message: Message) -> bool:
|
|
57
|
+
# FIXME: should consider non-text tool call results which are sent as user messages
|
|
58
|
+
if message.role != "user":
|
|
59
|
+
return False
|
|
60
|
+
return not message_extract_text(message).startswith("<system>CHECKPOINT")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _find_replay_start(history: Sequence[Message]) -> int | None:
|
|
64
|
+
indices = [idx for idx, message in enumerate(history) if _is_user_message(message)]
|
|
65
|
+
if not indices:
|
|
66
|
+
return None
|
|
67
|
+
# only replay last MAX_REPLAY_RUNS messages
|
|
68
|
+
return indices[max(0, len(indices) - MAX_REPLAY_RUNS)]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _build_replay_runs(history: Sequence[Message]) -> list[_ReplayRun]:
|
|
72
|
+
runs: list[_ReplayRun] = []
|
|
73
|
+
current_run: _ReplayRun | None = None
|
|
74
|
+
for message in history:
|
|
75
|
+
if _is_user_message(message):
|
|
76
|
+
# start a new run
|
|
77
|
+
if current_run is not None:
|
|
78
|
+
runs.append(current_run)
|
|
79
|
+
current_run = _ReplayRun(user_message=message, events=[])
|
|
80
|
+
elif message.role == "assistant":
|
|
81
|
+
if current_run is None:
|
|
82
|
+
continue
|
|
83
|
+
current_run.n_steps += 1
|
|
84
|
+
current_run.events.append(StepBegin(n=current_run.n_steps))
|
|
85
|
+
if isinstance(message.content, str):
|
|
86
|
+
current_run.events.append(TextPart(text=message.content))
|
|
87
|
+
else:
|
|
88
|
+
current_run.events.extend(message.content)
|
|
89
|
+
current_run.events.extend(message.tool_calls or [])
|
|
90
|
+
elif message.role == "tool":
|
|
91
|
+
if current_run is None:
|
|
92
|
+
continue
|
|
93
|
+
assert message.tool_call_id is not None
|
|
94
|
+
if isinstance(message.content, list) and any(
|
|
95
|
+
isinstance(part, TextPart) and part.text.startswith("<system>ERROR")
|
|
96
|
+
for part in message.content
|
|
97
|
+
):
|
|
98
|
+
result = ToolError(message="", output="", brief="")
|
|
99
|
+
else:
|
|
100
|
+
result = ToolOk(output=message.content)
|
|
101
|
+
current_run.events.append(ToolResult(tool_call_id=message.tool_call_id, result=result))
|
|
102
|
+
if current_run is not None:
|
|
103
|
+
runs.append(current_run)
|
|
104
|
+
return runs
|
kimi_cli/ui/shell/setup.py
CHANGED
|
@@ -7,9 +7,9 @@ from prompt_toolkit.shortcuts.choice_input import ChoiceInput
|
|
|
7
7
|
from pydantic import SecretStr
|
|
8
8
|
|
|
9
9
|
from kimi_cli.config import LLMModel, LLMProvider, MoonshotSearchConfig, load_config, save_config
|
|
10
|
-
from kimi_cli.soul.kimisoul import KimiSoul
|
|
11
10
|
from kimi_cli.ui.shell.console import console
|
|
12
11
|
from kimi_cli.ui.shell.metacmd import meta_command
|
|
12
|
+
from kimi_cli.utils.aiohttp import new_client_session
|
|
13
13
|
|
|
14
14
|
if TYPE_CHECKING:
|
|
15
15
|
from kimi_cli.ui.shell import ShellApp
|
|
@@ -20,36 +20,34 @@ class _Platform(NamedTuple):
|
|
|
20
20
|
name: str
|
|
21
21
|
base_url: str
|
|
22
22
|
search_url: str | None = None
|
|
23
|
-
|
|
23
|
+
allowed_prefixes: list[str] | None = None
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
_PLATFORMS = [
|
|
27
27
|
_Platform(
|
|
28
|
-
id="kimi-coding",
|
|
29
|
-
name="Kimi Coding
|
|
28
|
+
id="kimi-for-coding",
|
|
29
|
+
name="Kimi For Coding",
|
|
30
30
|
base_url="https://api.kimi.com/coding/v1",
|
|
31
31
|
search_url="https://api.kimi.com/coding/v1/search",
|
|
32
32
|
),
|
|
33
33
|
_Platform(
|
|
34
34
|
id="moonshot-cn",
|
|
35
|
-
name="Moonshot AI 开放平台",
|
|
35
|
+
name="Moonshot AI 开放平台 (moonshot.cn)",
|
|
36
36
|
base_url="https://api.moonshot.cn/v1",
|
|
37
|
-
|
|
37
|
+
allowed_prefixes=["kimi-k2-"],
|
|
38
38
|
),
|
|
39
39
|
_Platform(
|
|
40
40
|
id="moonshot-ai",
|
|
41
|
-
name="Moonshot AI Open Platform",
|
|
41
|
+
name="Moonshot AI Open Platform (moonshot.ai)",
|
|
42
42
|
base_url="https://api.moonshot.ai/v1",
|
|
43
|
-
|
|
43
|
+
allowed_prefixes=["kimi-k2-"],
|
|
44
44
|
),
|
|
45
45
|
]
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
@meta_command
|
|
48
|
+
@meta_command
|
|
49
49
|
async def setup(app: "ShellApp", args: list[str]):
|
|
50
50
|
"""Setup Kimi CLI"""
|
|
51
|
-
assert isinstance(app.soul, KimiSoul)
|
|
52
|
-
|
|
53
51
|
result = await _setup()
|
|
54
52
|
if not result:
|
|
55
53
|
# error message already printed
|
|
@@ -79,7 +77,7 @@ async def setup(app: "ShellApp", args: list[str]):
|
|
|
79
77
|
await asyncio.sleep(1)
|
|
80
78
|
console.clear()
|
|
81
79
|
|
|
82
|
-
from kimi_cli import Reload
|
|
80
|
+
from kimi_cli.cli import Reload
|
|
83
81
|
|
|
84
82
|
raise Reload
|
|
85
83
|
|
|
@@ -112,7 +110,7 @@ async def _setup() -> _SetupResult | None:
|
|
|
112
110
|
models_url = f"{platform.base_url}/models"
|
|
113
111
|
try:
|
|
114
112
|
async with (
|
|
115
|
-
|
|
113
|
+
new_client_session() as session,
|
|
116
114
|
session.get(
|
|
117
115
|
models_url,
|
|
118
116
|
headers={
|
|
@@ -129,11 +127,13 @@ async def _setup() -> _SetupResult | None:
|
|
|
129
127
|
model_dict = {model["id"]: model for model in resp_json["data"]}
|
|
130
128
|
|
|
131
129
|
# select the model
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
130
|
+
model_ids: list[str] = [model["id"] for model in resp_json["data"]]
|
|
131
|
+
if platform.allowed_prefixes is not None:
|
|
132
|
+
model_ids = [
|
|
133
|
+
model_id
|
|
134
|
+
for model_id in model_ids
|
|
135
|
+
if model_id.startswith(tuple(platform.allowed_prefixes))
|
|
136
|
+
]
|
|
137
137
|
|
|
138
138
|
if not model_ids:
|
|
139
139
|
console.print("[red]No models available for the selected platform[/red]")
|
|
@@ -187,6 +187,6 @@ async def _prompt_text(prompt: str, *, is_password: bool = False) -> str | None:
|
|
|
187
187
|
@meta_command
|
|
188
188
|
def reload(app: "ShellApp", args: list[str]):
|
|
189
189
|
"""Reload configuration"""
|
|
190
|
-
from kimi_cli import Reload
|
|
190
|
+
from kimi_cli.cli import Reload
|
|
191
191
|
|
|
192
192
|
raise Reload
|
kimi_cli/ui/shell/update.py
CHANGED
|
@@ -11,7 +11,9 @@ from pathlib import Path
|
|
|
11
11
|
|
|
12
12
|
import aiohttp
|
|
13
13
|
|
|
14
|
+
from kimi_cli.share import get_share_dir
|
|
14
15
|
from kimi_cli.ui.shell.console import console
|
|
16
|
+
from kimi_cli.utils.aiohttp import new_client_session
|
|
15
17
|
from kimi_cli.utils.logging import logger
|
|
16
18
|
|
|
17
19
|
BASE_URL = "https://cdn.kimi.com/binaries/kimi-cli"
|
|
@@ -30,7 +32,7 @@ class UpdateResult(Enum):
|
|
|
30
32
|
_UPDATE_LOCK = asyncio.Lock()
|
|
31
33
|
|
|
32
34
|
|
|
33
|
-
def
|
|
35
|
+
def semver_tuple(version: str) -> tuple[int, int, int]:
|
|
34
36
|
v = version.strip()
|
|
35
37
|
if v.startswith("v"):
|
|
36
38
|
v = v[1:]
|
|
@@ -79,8 +81,11 @@ async def do_update(*, print: bool = True, check_only: bool = False) -> UpdateRe
|
|
|
79
81
|
return await _do_update(print=print, check_only=check_only)
|
|
80
82
|
|
|
81
83
|
|
|
84
|
+
LATEST_VERSION_FILE = get_share_dir() / "latest_version.txt"
|
|
85
|
+
|
|
86
|
+
|
|
82
87
|
async def _do_update(*, print: bool, check_only: bool) -> UpdateResult:
|
|
83
|
-
from kimi_cli import
|
|
88
|
+
from kimi_cli.constant import VERSION as current_version
|
|
84
89
|
|
|
85
90
|
def _print(message: str) -> None:
|
|
86
91
|
if print:
|
|
@@ -91,7 +96,7 @@ async def _do_update(*, print: bool, check_only: bool) -> UpdateResult:
|
|
|
91
96
|
_print("[red]Failed to detect target platform.[/red]")
|
|
92
97
|
return UpdateResult.UNSUPPORTED
|
|
93
98
|
|
|
94
|
-
async with
|
|
99
|
+
async with new_client_session() as session:
|
|
95
100
|
logger.info("Checking for updates...")
|
|
96
101
|
_print("Checking for updates...")
|
|
97
102
|
latest_version = await _get_latest_version(session)
|
|
@@ -100,9 +105,10 @@ async def _do_update(*, print: bool, check_only: bool) -> UpdateResult:
|
|
|
100
105
|
return UpdateResult.FAILED
|
|
101
106
|
|
|
102
107
|
logger.debug("Latest version: {latest_version}", latest_version=latest_version)
|
|
108
|
+
LATEST_VERSION_FILE.write_text(latest_version, encoding="utf-8")
|
|
103
109
|
|
|
104
|
-
cur_t =
|
|
105
|
-
lat_t =
|
|
110
|
+
cur_t = semver_tuple(current_version)
|
|
111
|
+
lat_t = semver_tuple(latest_version)
|
|
106
112
|
|
|
107
113
|
if cur_t >= lat_t:
|
|
108
114
|
logger.debug("Already up to date: {current_version}", current_version=current_version)
|