npcsh 1.1.17__py3-none-any.whl → 1.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +114 -91
- npcsh/alicanto.py +2 -2
- npcsh/benchmark/__init__.py +8 -2
- npcsh/benchmark/npcsh_agent.py +46 -12
- npcsh/benchmark/runner.py +85 -43
- npcsh/benchmark/templates/install-npcsh.sh.j2 +35 -0
- npcsh/build.py +2 -4
- npcsh/completion.py +2 -6
- npcsh/config.py +1 -3
- npcsh/conversation_viewer.py +389 -0
- npcsh/corca.py +0 -1
- npcsh/execution.py +0 -1
- npcsh/guac.py +0 -1
- npcsh/mcp_helpers.py +2 -3
- npcsh/mcp_server.py +5 -10
- npcsh/npc.py +10 -11
- npcsh/npc_team/jinxs/bin/benchmark.jinx +1 -1
- npcsh/npc_team/jinxs/lib/core/search/db_search.jinx +321 -17
- npcsh/npc_team/jinxs/lib/core/search/file_search.jinx +312 -67
- npcsh/npc_team/jinxs/lib/core/search/kg_search.jinx +366 -44
- npcsh/npc_team/jinxs/lib/core/search/mem_review.jinx +73 -0
- npcsh/npc_team/jinxs/lib/core/search/mem_search.jinx +328 -20
- npcsh/npc_team/jinxs/lib/core/search/web_search.jinx +242 -10
- npcsh/npc_team/jinxs/lib/core/sleep.jinx +22 -11
- npcsh/npc_team/jinxs/lib/core/sql.jinx +10 -6
- npcsh/npc_team/jinxs/lib/research/paper_search.jinx +387 -76
- npcsh/npc_team/jinxs/lib/research/semantic_scholar.jinx +372 -55
- npcsh/npc_team/jinxs/lib/utils/jinxs.jinx +299 -144
- npcsh/npc_team/jinxs/modes/alicanto.jinx +356 -0
- npcsh/npc_team/jinxs/modes/arxiv.jinx +720 -0
- npcsh/npc_team/jinxs/modes/corca.jinx +430 -0
- npcsh/npc_team/jinxs/modes/guac.jinx +544 -0
- npcsh/npc_team/jinxs/modes/plonk.jinx +379 -0
- npcsh/npc_team/jinxs/modes/pti.jinx +357 -0
- npcsh/npc_team/jinxs/modes/reattach.jinx +291 -0
- npcsh/npc_team/jinxs/modes/spool.jinx +350 -0
- npcsh/npc_team/jinxs/modes/wander.jinx +455 -0
- npcsh/npc_team/jinxs/{bin → modes}/yap.jinx +13 -7
- npcsh/npcsh.py +7 -4
- npcsh/plonk.py +0 -1
- npcsh/pti.py +0 -1
- npcsh/routes.py +1 -3
- npcsh/spool.py +0 -1
- npcsh/ui.py +0 -1
- npcsh/wander.py +0 -1
- npcsh/yap.py +0 -1
- npcsh-1.1.18.data/data/npcsh/npc_team/alicanto.jinx +356 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/arxiv.jinx +720 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/benchmark.jinx +1 -1
- npcsh-1.1.18.data/data/npcsh/npc_team/corca.jinx +430 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/db_search.jinx +348 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/file_search.jinx +339 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/guac.jinx +544 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/jinxs.jinx +331 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/kg_search.jinx +418 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/mem_review.jinx +73 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/mem_search.jinx +388 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/paper_search.jinx +412 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/plonk.jinx +379 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/pti.jinx +357 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/reattach.jinx +291 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/semantic_scholar.jinx +386 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/sleep.jinx +22 -11
- npcsh-1.1.18.data/data/npcsh/npc_team/spool.jinx +350 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/sql.jinx +20 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/wander.jinx +455 -0
- npcsh-1.1.18.data/data/npcsh/npc_team/web_search.jinx +283 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/yap.jinx +13 -7
- {npcsh-1.1.17.dist-info → npcsh-1.1.18.dist-info}/METADATA +90 -1
- npcsh-1.1.18.dist-info/RECORD +235 -0
- {npcsh-1.1.17.dist-info → npcsh-1.1.18.dist-info}/WHEEL +1 -1
- {npcsh-1.1.17.dist-info → npcsh-1.1.18.dist-info}/entry_points.txt +0 -3
- npcsh/npc_team/jinxs/bin/spool.jinx +0 -161
- npcsh/npc_team/jinxs/bin/wander.jinx +0 -242
- npcsh/npc_team/jinxs/lib/research/arxiv.jinx +0 -76
- npcsh-1.1.17.data/data/npcsh/npc_team/arxiv.jinx +0 -76
- npcsh-1.1.17.data/data/npcsh/npc_team/db_search.jinx +0 -44
- npcsh-1.1.17.data/data/npcsh/npc_team/file_search.jinx +0 -94
- npcsh-1.1.17.data/data/npcsh/npc_team/jinxs.jinx +0 -176
- npcsh-1.1.17.data/data/npcsh/npc_team/kg_search.jinx +0 -96
- npcsh-1.1.17.data/data/npcsh/npc_team/mem_search.jinx +0 -80
- npcsh-1.1.17.data/data/npcsh/npc_team/paper_search.jinx +0 -101
- npcsh-1.1.17.data/data/npcsh/npc_team/semantic_scholar.jinx +0 -69
- npcsh-1.1.17.data/data/npcsh/npc_team/spool.jinx +0 -161
- npcsh-1.1.17.data/data/npcsh/npc_team/sql.jinx +0 -16
- npcsh-1.1.17.data/data/npcsh/npc_team/wander.jinx +0 -242
- npcsh-1.1.17.data/data/npcsh/npc_team/web_search.jinx +0 -51
- npcsh-1.1.17.dist-info/RECORD +0 -219
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/add_tab.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/browser_action.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/browser_screenshot.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/chat.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/click.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/close_browser.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/close_pane.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/close_tab.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/cmd.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/confirm.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/convene.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/delegate.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/focus_pane.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/guac.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/incognide.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/key_press.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/launch_app.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/list_panes.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/navigate.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/notify.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/nql.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/open_browser.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/open_pane.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/paste.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/read_pane.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/run_terminal.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/screenshot.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/search.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/send_message.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/sh.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/shh.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/split_pane.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/switch.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/switch_npc.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/switch_tab.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/switches.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/sync.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/teamviz.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/type_text.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/usage.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/verbose.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/wait.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/write_file.jinx +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.17.data → npcsh-1.1.18.data}/data/npcsh/npc_team/zen_mode.jinx +0 -0
- {npcsh-1.1.17.dist-info → npcsh-1.1.18.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.17.dist-info → npcsh-1.1.18.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,357 @@
|
|
|
1
|
+
jinx_name: pti
|
|
2
|
+
description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
|
|
3
|
+
npc: frederic
|
|
4
|
+
inputs:
|
|
5
|
+
- model: null
|
|
6
|
+
- provider: null
|
|
7
|
+
- files: null
|
|
8
|
+
- reasoning_model: null
|
|
9
|
+
|
|
10
|
+
steps:
|
|
11
|
+
- name: pti_repl
|
|
12
|
+
engine: python
|
|
13
|
+
code: |
|
|
14
|
+
import os
|
|
15
|
+
import sys
|
|
16
|
+
import re
|
|
17
|
+
import tty
|
|
18
|
+
import termios
|
|
19
|
+
from termcolor import colored
|
|
20
|
+
|
|
21
|
+
from npcpy.llm_funcs import get_llm_response
|
|
22
|
+
from npcpy.npc_sysenv import get_system_message, render_markdown
|
|
23
|
+
from npcpy.data.load import load_file_contents
|
|
24
|
+
from npcpy.data.text import rag_search
|
|
25
|
+
|
|
26
|
+
npc = context.get('npc')
|
|
27
|
+
team = context.get('team')
|
|
28
|
+
messages = context.get('messages', [])
|
|
29
|
+
files = context.get('files')
|
|
30
|
+
|
|
31
|
+
# Resolve npc if it's a string (npc name) rather than NPC object
|
|
32
|
+
if isinstance(npc, str) and team:
|
|
33
|
+
npc = team.get(npc) if hasattr(team, 'get') else None
|
|
34
|
+
elif isinstance(npc, str):
|
|
35
|
+
npc = None
|
|
36
|
+
|
|
37
|
+
# ========== TUI Helper Functions ==========
|
|
38
|
+
def get_terminal_size():
|
|
39
|
+
try:
|
|
40
|
+
size = os.get_terminal_size()
|
|
41
|
+
return size.columns, size.lines
|
|
42
|
+
except:
|
|
43
|
+
return 80, 24
|
|
44
|
+
|
|
45
|
+
def extract_thinking(messages):
|
|
46
|
+
"""Extract <think> blocks from assistant messages"""
|
|
47
|
+
thinking_blocks = []
|
|
48
|
+
for i, msg in enumerate(messages):
|
|
49
|
+
if msg.get('role') == 'assistant':
|
|
50
|
+
content = msg.get('content', '')
|
|
51
|
+
# Find all <think>...</think> blocks
|
|
52
|
+
pattern = r'<think>(.*?)</think>'
|
|
53
|
+
matches = re.findall(pattern, content, re.DOTALL)
|
|
54
|
+
for j, match in enumerate(matches):
|
|
55
|
+
thinking_blocks.append({
|
|
56
|
+
'msg_idx': i,
|
|
57
|
+
'block_idx': j,
|
|
58
|
+
'content': match.strip()
|
|
59
|
+
})
|
|
60
|
+
return thinking_blocks
|
|
61
|
+
|
|
62
|
+
def thinking_tui_browser(messages):
|
|
63
|
+
"""Interactive TUI browser for thinking steps"""
|
|
64
|
+
thinking = extract_thinking(messages)
|
|
65
|
+
|
|
66
|
+
if not thinking:
|
|
67
|
+
print(colored("No thinking blocks found. Use <think> tags to show reasoning.", "yellow"))
|
|
68
|
+
return
|
|
69
|
+
|
|
70
|
+
width, height = get_terminal_size()
|
|
71
|
+
selected = 0
|
|
72
|
+
scroll = 0
|
|
73
|
+
list_height = height - 5
|
|
74
|
+
mode = 'list'
|
|
75
|
+
preview_scroll = 0
|
|
76
|
+
preview_lines = []
|
|
77
|
+
|
|
78
|
+
fd = sys.stdin.fileno()
|
|
79
|
+
old_settings = termios.tcgetattr(fd)
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
tty.setcbreak(fd)
|
|
83
|
+
sys.stdout.write('\033[?25l')
|
|
84
|
+
sys.stdout.write('\033[2J\033[H')
|
|
85
|
+
|
|
86
|
+
while True:
|
|
87
|
+
width, height = get_terminal_size()
|
|
88
|
+
list_height = height - 5
|
|
89
|
+
|
|
90
|
+
if mode == 'list':
|
|
91
|
+
if selected < scroll:
|
|
92
|
+
scroll = selected
|
|
93
|
+
elif selected >= scroll + list_height:
|
|
94
|
+
scroll = selected - list_height + 1
|
|
95
|
+
|
|
96
|
+
sys.stdout.write('\033[H')
|
|
97
|
+
|
|
98
|
+
# Header
|
|
99
|
+
if mode == 'list':
|
|
100
|
+
header = f" PTI THINKING STEPS ({len(thinking)} blocks) "
|
|
101
|
+
else:
|
|
102
|
+
header = f" THINKING BLOCK {selected + 1} "
|
|
103
|
+
sys.stdout.write(f'\033[43;30;1m{header.ljust(width)}\033[0m\n')
|
|
104
|
+
|
|
105
|
+
if mode == 'list':
|
|
106
|
+
col_header = f' {"#":<4} {"MSG":<5} {"PREVIEW":<65}'
|
|
107
|
+
sys.stdout.write(f'\033[90m{col_header[:width]}\033[0m\n')
|
|
108
|
+
else:
|
|
109
|
+
sys.stdout.write(f'\033[90m{"─" * width}\033[0m\n')
|
|
110
|
+
|
|
111
|
+
if mode == 'list':
|
|
112
|
+
for i in range(list_height):
|
|
113
|
+
idx = scroll + i
|
|
114
|
+
sys.stdout.write(f'\033[{3+i};1H\033[K')
|
|
115
|
+
if idx >= len(thinking):
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
t = thinking[idx]
|
|
119
|
+
msg_num = t['msg_idx'] + 1
|
|
120
|
+
preview = t['content'][:65].replace('\n', ' ')
|
|
121
|
+
|
|
122
|
+
line = f" {idx+1:<4} {msg_num:<5} {preview}"
|
|
123
|
+
line = line[:width-1]
|
|
124
|
+
|
|
125
|
+
if idx == selected:
|
|
126
|
+
sys.stdout.write(f'\033[47;30;1m>{line.ljust(width-2)}\033[0m')
|
|
127
|
+
else:
|
|
128
|
+
sys.stdout.write(f'\033[33m{line}\033[0m')
|
|
129
|
+
|
|
130
|
+
# Status bar
|
|
131
|
+
sys.stdout.write(f'\033[{height-2};1H\033[K\033[90m{"─" * width}\033[0m')
|
|
132
|
+
t = thinking[selected] if thinking else {}
|
|
133
|
+
word_count = len(t.get('content', '').split())
|
|
134
|
+
sys.stdout.write(f'\033[{height-1};1H\033[K Words: {word_count}'.ljust(width)[:width])
|
|
135
|
+
sys.stdout.write(f'\033[{height};1H\033[K\033[43;30m j/k:Nav p:View Enter:Expand q:Quit [{selected+1}/{len(thinking)}] \033[0m')
|
|
136
|
+
|
|
137
|
+
else: # preview mode
|
|
138
|
+
for i in range(list_height):
|
|
139
|
+
idx = preview_scroll + i
|
|
140
|
+
sys.stdout.write(f'\033[{3+i};1H\033[K')
|
|
141
|
+
if idx < len(preview_lines):
|
|
142
|
+
sys.stdout.write(preview_lines[idx][:width-1])
|
|
143
|
+
|
|
144
|
+
sys.stdout.write(f'\033[{height-2};1H\033[K\033[90m{"─" * width}\033[0m')
|
|
145
|
+
sys.stdout.write(f'\033[{height-1};1H\033[K [{preview_scroll+1}/{len(preview_lines)} lines]')
|
|
146
|
+
sys.stdout.write(f'\033[{height};1H\033[K\033[43;30m j/k:Scroll b:Back q:Quit \033[0m')
|
|
147
|
+
|
|
148
|
+
sys.stdout.flush()
|
|
149
|
+
|
|
150
|
+
c = sys.stdin.read(1)
|
|
151
|
+
|
|
152
|
+
if c == '\x1b':
|
|
153
|
+
c2 = sys.stdin.read(1)
|
|
154
|
+
if c2 == '[':
|
|
155
|
+
c3 = sys.stdin.read(1)
|
|
156
|
+
if c3 == 'A': # Up
|
|
157
|
+
if mode == 'list' and selected > 0:
|
|
158
|
+
selected -= 1
|
|
159
|
+
elif mode == 'preview' and preview_scroll > 0:
|
|
160
|
+
preview_scroll -= 1
|
|
161
|
+
elif c3 == 'B': # Down
|
|
162
|
+
if mode == 'list' and selected < len(thinking) - 1:
|
|
163
|
+
selected += 1
|
|
164
|
+
elif mode == 'preview' and preview_scroll < max(0, len(preview_lines) - list_height):
|
|
165
|
+
preview_scroll += 1
|
|
166
|
+
else:
|
|
167
|
+
if mode == 'preview':
|
|
168
|
+
mode = 'list'
|
|
169
|
+
sys.stdout.write('\033[2J\033[H')
|
|
170
|
+
else:
|
|
171
|
+
return
|
|
172
|
+
continue
|
|
173
|
+
|
|
174
|
+
if c == 'q' or c == '\x03':
|
|
175
|
+
return
|
|
176
|
+
elif c == 'k':
|
|
177
|
+
if mode == 'list' and selected > 0:
|
|
178
|
+
selected -= 1
|
|
179
|
+
elif mode == 'preview' and preview_scroll > 0:
|
|
180
|
+
preview_scroll -= 1
|
|
181
|
+
elif c == 'j':
|
|
182
|
+
if mode == 'list' and selected < len(thinking) - 1:
|
|
183
|
+
selected += 1
|
|
184
|
+
elif mode == 'preview' and preview_scroll < max(0, len(preview_lines) - list_height):
|
|
185
|
+
preview_scroll += 1
|
|
186
|
+
elif c in ('p', '\r', '\n') and mode == 'list':
|
|
187
|
+
# Preview thinking block
|
|
188
|
+
t = thinking[selected]
|
|
189
|
+
preview_str = f"Thinking Block {selected + 1}\n"
|
|
190
|
+
preview_str += f"From message {t['msg_idx'] + 1}\n"
|
|
191
|
+
preview_str += f"{'=' * 40}\n\n"
|
|
192
|
+
preview_str += t['content']
|
|
193
|
+
preview_lines = preview_str.split('\n')
|
|
194
|
+
mode = 'preview'
|
|
195
|
+
preview_scroll = 0
|
|
196
|
+
sys.stdout.write('\033[2J\033[H')
|
|
197
|
+
elif c == 'b' and mode == 'preview':
|
|
198
|
+
mode = 'list'
|
|
199
|
+
sys.stdout.write('\033[2J\033[H')
|
|
200
|
+
|
|
201
|
+
finally:
|
|
202
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
203
|
+
sys.stdout.write('\033[?25h')
|
|
204
|
+
sys.stdout.write('\033[2J\033[H')
|
|
205
|
+
sys.stdout.flush()
|
|
206
|
+
|
|
207
|
+
# PTI uses reasoning model for deeper thinking
|
|
208
|
+
# Handle case where npc might be a string (npc name) or NPC object
|
|
209
|
+
model = context.get('reasoning_model') or context.get('model') or (npc.model if npc and hasattr(npc, 'model') else None)
|
|
210
|
+
provider = context.get('provider') or (npc.provider if npc and hasattr(npc, 'provider') else None)
|
|
211
|
+
|
|
212
|
+
print("""
|
|
213
|
+
██████╗ ████████╗██╗
|
|
214
|
+
██╔══██╗╚══██╔══╝██║
|
|
215
|
+
██████╔╝ ██║ ██║
|
|
216
|
+
██╔═══╝ ██║ ██║
|
|
217
|
+
██║ ██║ ██║
|
|
218
|
+
╚═╝ ╚═╝ ╚═╝
|
|
219
|
+
|
|
220
|
+
Pardon-The-Interruption
|
|
221
|
+
Human-in-the-loop reasoning mode
|
|
222
|
+
""")
|
|
223
|
+
|
|
224
|
+
npc_name = npc.name if npc else "pti"
|
|
225
|
+
print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
|
|
226
|
+
print(" - AI will use <think> tags for step-by-step reasoning")
|
|
227
|
+
print(" - Use <request_for_input> to pause and ask questions")
|
|
228
|
+
print(" - Ctrl+C interrupts stream for immediate feedback")
|
|
229
|
+
print(" - /thinking: Browse thinking steps")
|
|
230
|
+
|
|
231
|
+
# Load files if provided
|
|
232
|
+
loaded_content = {}
|
|
233
|
+
if files:
|
|
234
|
+
if isinstance(files, str):
|
|
235
|
+
files = [f.strip() for f in files.split(',')]
|
|
236
|
+
for file_path in files:
|
|
237
|
+
file_path = os.path.expanduser(file_path)
|
|
238
|
+
if os.path.exists(file_path):
|
|
239
|
+
try:
|
|
240
|
+
chunks = load_file_contents(file_path)
|
|
241
|
+
loaded_content[file_path] = "\n".join(chunks)
|
|
242
|
+
print(colored(f"Loaded: {file_path}", "green"))
|
|
243
|
+
except Exception as e:
|
|
244
|
+
print(colored(f"Error loading {file_path}: {e}", "red"))
|
|
245
|
+
|
|
246
|
+
# System message for PTI mode
|
|
247
|
+
pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
|
|
248
|
+
|
|
249
|
+
IMPORTANT INSTRUCTIONS:
|
|
250
|
+
1. Think step-by-step using <think>...</think> tags to show your reasoning
|
|
251
|
+
2. When you need more information from the user, use <request_for_input>your question</request_for_input>
|
|
252
|
+
3. Be thorough but concise in your reasoning
|
|
253
|
+
4. The user can interrupt at any time to provide guidance
|
|
254
|
+
|
|
255
|
+
Example:
|
|
256
|
+
<think>
|
|
257
|
+
Let me break this down...
|
|
258
|
+
Step 1: First I need to understand X
|
|
259
|
+
Step 2: Then consider Y
|
|
260
|
+
</think>
|
|
261
|
+
|
|
262
|
+
<request_for_input>
|
|
263
|
+
I notice you mentioned Z. Could you clarify what you mean by that?
|
|
264
|
+
</request_for_input>"""
|
|
265
|
+
|
|
266
|
+
if not messages or messages[0].get("role") != "system":
|
|
267
|
+
messages.insert(0, {"role": "system", "content": pti_system})
|
|
268
|
+
|
|
269
|
+
# REPL loop
|
|
270
|
+
user_input = None
|
|
271
|
+
while True:
|
|
272
|
+
try:
|
|
273
|
+
if not user_input:
|
|
274
|
+
prompt_str = f"{npc_name}:pti> "
|
|
275
|
+
user_input = input(prompt_str).strip()
|
|
276
|
+
|
|
277
|
+
if not user_input:
|
|
278
|
+
user_input = None
|
|
279
|
+
continue
|
|
280
|
+
|
|
281
|
+
if user_input.lower() == "/pq":
|
|
282
|
+
print("Exiting PTI mode.")
|
|
283
|
+
break
|
|
284
|
+
|
|
285
|
+
# Handle /thinking to browse thinking steps
|
|
286
|
+
if user_input.lower() == "/thinking":
|
|
287
|
+
thinking_tui_browser(messages)
|
|
288
|
+
user_input = None
|
|
289
|
+
continue
|
|
290
|
+
|
|
291
|
+
# Build prompt with file context
|
|
292
|
+
prompt_for_llm = user_input
|
|
293
|
+
if loaded_content:
|
|
294
|
+
context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
|
|
295
|
+
prompt_for_llm += f"\n\nContext:\n{context_str}"
|
|
296
|
+
|
|
297
|
+
prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
|
|
298
|
+
|
|
299
|
+
messages.append({"role": "user", "content": user_input})
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
resp = get_llm_response(
|
|
303
|
+
prompt_for_llm,
|
|
304
|
+
model=model,
|
|
305
|
+
provider=provider,
|
|
306
|
+
messages=messages[:-1], # Don't duplicate the user message
|
|
307
|
+
stream=True,
|
|
308
|
+
npc=npc
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
response_stream = resp.get('response')
|
|
312
|
+
full_response = ""
|
|
313
|
+
request_found = False
|
|
314
|
+
|
|
315
|
+
# Stream the response
|
|
316
|
+
for chunk in response_stream:
|
|
317
|
+
chunk_content = ""
|
|
318
|
+
if hasattr(chunk, 'choices') and chunk.choices:
|
|
319
|
+
delta = chunk.choices[0].delta
|
|
320
|
+
if hasattr(delta, 'content') and delta.content:
|
|
321
|
+
chunk_content = delta.content
|
|
322
|
+
elif isinstance(chunk, dict):
|
|
323
|
+
chunk_content = chunk.get("message", {}).get("content", "")
|
|
324
|
+
|
|
325
|
+
if chunk_content:
|
|
326
|
+
print(chunk_content, end='', flush=True)
|
|
327
|
+
full_response += chunk_content
|
|
328
|
+
|
|
329
|
+
# Check for request_for_input
|
|
330
|
+
if "</request_for_input>" in full_response:
|
|
331
|
+
request_found = True
|
|
332
|
+
break
|
|
333
|
+
|
|
334
|
+
print() # newline after stream
|
|
335
|
+
|
|
336
|
+
messages.append({"role": "assistant", "content": full_response})
|
|
337
|
+
user_input = None # Reset for next iteration
|
|
338
|
+
|
|
339
|
+
except KeyboardInterrupt:
|
|
340
|
+
print(colored("\n\n--- Interrupted ---", "yellow"))
|
|
341
|
+
interrupt_input = input("Your feedback: ").strip()
|
|
342
|
+
if interrupt_input:
|
|
343
|
+
user_input = interrupt_input
|
|
344
|
+
else:
|
|
345
|
+
user_input = None
|
|
346
|
+
continue
|
|
347
|
+
|
|
348
|
+
except KeyboardInterrupt:
|
|
349
|
+
print("\nUse '/pq' to exit or continue.")
|
|
350
|
+
user_input = None
|
|
351
|
+
continue
|
|
352
|
+
except EOFError:
|
|
353
|
+
print("\nExiting PTI mode.")
|
|
354
|
+
break
|
|
355
|
+
|
|
356
|
+
context['output'] = "Exited PTI mode."
|
|
357
|
+
context['messages'] = messages
|
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
jinx_name: reattach
|
|
2
|
+
description: Interactive viewer to browse and reattach to previous conversations
|
|
3
|
+
inputs:
|
|
4
|
+
- path: ""
|
|
5
|
+
- all: "false"
|
|
6
|
+
|
|
7
|
+
steps:
|
|
8
|
+
- name: launch_viewer
|
|
9
|
+
engine: python
|
|
10
|
+
code: |
|
|
11
|
+
import os
|
|
12
|
+
import sys
|
|
13
|
+
import tty
|
|
14
|
+
import termios
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from sqlalchemy import create_engine, text
|
|
17
|
+
|
|
18
|
+
def get_terminal_size():
|
|
19
|
+
try:
|
|
20
|
+
size = os.get_terminal_size()
|
|
21
|
+
return size.columns, size.lines
|
|
22
|
+
except:
|
|
23
|
+
return 80, 24
|
|
24
|
+
|
|
25
|
+
def format_ts(ts):
|
|
26
|
+
if not ts:
|
|
27
|
+
return 'unknown'
|
|
28
|
+
try:
|
|
29
|
+
if 'T' in str(ts):
|
|
30
|
+
dt = datetime.fromisoformat(str(ts).replace('Z', '+00:00'))
|
|
31
|
+
else:
|
|
32
|
+
dt = datetime.strptime(str(ts)[:19], '%Y-%m-%d %H:%M:%S')
|
|
33
|
+
now = datetime.now()
|
|
34
|
+
diff = now - dt.replace(tzinfo=None)
|
|
35
|
+
if diff.days == 0:
|
|
36
|
+
return f"Today {dt.strftime('%H:%M')}"
|
|
37
|
+
elif diff.days == 1:
|
|
38
|
+
return f"Yesterday {dt.strftime('%H:%M')}"
|
|
39
|
+
elif diff.days < 7:
|
|
40
|
+
return dt.strftime('%a %H:%M')
|
|
41
|
+
else:
|
|
42
|
+
return dt.strftime('%b %d')
|
|
43
|
+
except:
|
|
44
|
+
return str(ts)[:16]
|
|
45
|
+
|
|
46
|
+
filter_path = context.get('path', '').strip()
|
|
47
|
+
show_all = context.get('all', '').lower() in ('true', '1', 'yes')
|
|
48
|
+
db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
|
|
49
|
+
|
|
50
|
+
if not filter_path:
|
|
51
|
+
filter_path = os.getcwd()
|
|
52
|
+
filter_path = os.path.abspath(os.path.expanduser(filter_path))
|
|
53
|
+
|
|
54
|
+
engine = create_engine(f'sqlite:///{db_path}')
|
|
55
|
+
|
|
56
|
+
with engine.connect() as conn:
|
|
57
|
+
if show_all:
|
|
58
|
+
result = conn.execute(text("""
|
|
59
|
+
SELECT conversation_id, directory_path,
|
|
60
|
+
MIN(timestamp) as started,
|
|
61
|
+
MAX(timestamp) as last_msg,
|
|
62
|
+
COUNT(*) as msg_count,
|
|
63
|
+
GROUP_CONCAT(DISTINCT npc) as npcs,
|
|
64
|
+
GROUP_CONCAT(DISTINCT model) as models,
|
|
65
|
+
GROUP_CONCAT(DISTINCT provider) as providers,
|
|
66
|
+
COALESCE(SUM(input_tokens), 0) as total_input_tokens,
|
|
67
|
+
COALESCE(SUM(output_tokens), 0) as total_output_tokens,
|
|
68
|
+
COALESCE(SUM(CAST(cost AS REAL)), 0) as total_cost
|
|
69
|
+
FROM conversation_history
|
|
70
|
+
GROUP BY conversation_id
|
|
71
|
+
ORDER BY last_msg DESC
|
|
72
|
+
"""))
|
|
73
|
+
target_path = "ALL PATHS"
|
|
74
|
+
else:
|
|
75
|
+
result = conn.execute(text("""
|
|
76
|
+
SELECT conversation_id, directory_path,
|
|
77
|
+
MIN(timestamp) as started,
|
|
78
|
+
MAX(timestamp) as last_msg,
|
|
79
|
+
COUNT(*) as msg_count,
|
|
80
|
+
GROUP_CONCAT(DISTINCT npc) as npcs,
|
|
81
|
+
GROUP_CONCAT(DISTINCT model) as models,
|
|
82
|
+
GROUP_CONCAT(DISTINCT provider) as providers,
|
|
83
|
+
COALESCE(SUM(input_tokens), 0) as total_input_tokens,
|
|
84
|
+
COALESCE(SUM(output_tokens), 0) as total_output_tokens,
|
|
85
|
+
COALESCE(SUM(CAST(cost AS REAL)), 0) as total_cost
|
|
86
|
+
FROM conversation_history
|
|
87
|
+
WHERE directory_path = :path OR directory_path = :path_slash
|
|
88
|
+
GROUP BY conversation_id
|
|
89
|
+
ORDER BY last_msg DESC
|
|
90
|
+
"""), {"path": filter_path, "path_slash": filter_path + "/"})
|
|
91
|
+
target_path = filter_path
|
|
92
|
+
|
|
93
|
+
convos = [dict(row._mapping) for row in result.fetchall()]
|
|
94
|
+
|
|
95
|
+
if not convos:
|
|
96
|
+
context['output'] = f"No conversations for: {target_path}"
|
|
97
|
+
else:
|
|
98
|
+
width, height = get_terminal_size()
|
|
99
|
+
selected = 0
|
|
100
|
+
scroll = 0
|
|
101
|
+
list_height = height - 5
|
|
102
|
+
mode = 'list'
|
|
103
|
+
preview_msgs = []
|
|
104
|
+
preview_scroll = 0
|
|
105
|
+
|
|
106
|
+
fd = sys.stdin.fileno()
|
|
107
|
+
old_settings = termios.tcgetattr(fd)
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
tty.setcbreak(fd)
|
|
111
|
+
sys.stdout.write('\033[?25l')
|
|
112
|
+
sys.stdout.write('\033[2J\033[H')
|
|
113
|
+
|
|
114
|
+
while True:
|
|
115
|
+
if mode == 'list':
|
|
116
|
+
if selected < scroll:
|
|
117
|
+
scroll = selected
|
|
118
|
+
elif selected >= scroll + list_height:
|
|
119
|
+
scroll = selected - list_height + 1
|
|
120
|
+
|
|
121
|
+
sys.stdout.write('\033[H')
|
|
122
|
+
if mode == 'list':
|
|
123
|
+
header = f" REATTACH ({len(convos)} convos): {target_path[:width-30]} "
|
|
124
|
+
else:
|
|
125
|
+
header = f" PREVIEW: {convos[selected]['conversation_id'][:width-12]} "
|
|
126
|
+
sys.stdout.write(f'\033[44;37;1m{header.ljust(width)}\033[0m\n')
|
|
127
|
+
sys.stdout.write(f'\033[90m{"─" * width}\033[0m\n')
|
|
128
|
+
|
|
129
|
+
if mode == 'list':
|
|
130
|
+
for i in range(list_height):
|
|
131
|
+
idx = scroll + i
|
|
132
|
+
sys.stdout.write(f'\033[{3+i};1H\033[K')
|
|
133
|
+
if idx >= len(convos):
|
|
134
|
+
continue
|
|
135
|
+
|
|
136
|
+
c = convos[idx]
|
|
137
|
+
cid = c['conversation_id'][:12]
|
|
138
|
+
msgs = c['msg_count']
|
|
139
|
+
ts = format_ts(c['last_msg'])
|
|
140
|
+
npcs = (c['npcs'] or 'default')[:10]
|
|
141
|
+
models = (c.get('models') or '')
|
|
142
|
+
# Shorten model names
|
|
143
|
+
if models:
|
|
144
|
+
short_models = []
|
|
145
|
+
for m in models.split(',')[:2]:
|
|
146
|
+
m = m.strip()
|
|
147
|
+
if 'gpt-4' in m: short_models.append('gpt4')
|
|
148
|
+
elif 'gpt-3' in m: short_models.append('gpt3')
|
|
149
|
+
elif 'claude-3-5-sonnet' in m: short_models.append('sonnet')
|
|
150
|
+
elif 'claude-3-5-haiku' in m: short_models.append('haiku')
|
|
151
|
+
elif 'claude-3-opus' in m: short_models.append('opus')
|
|
152
|
+
elif 'claude' in m: short_models.append('claude')
|
|
153
|
+
elif 'gemini' in m: short_models.append('gemini')
|
|
154
|
+
elif m: short_models.append(m[:8])
|
|
155
|
+
models = ','.join(short_models)[:12]
|
|
156
|
+
else:
|
|
157
|
+
models = '-'
|
|
158
|
+
|
|
159
|
+
line = f" {cid:<14} {msgs:>3} msgs {ts:<12} {npcs:<10} {models:<12}"
|
|
160
|
+
line = line[:width-2].ljust(width-1)
|
|
161
|
+
|
|
162
|
+
if idx == selected:
|
|
163
|
+
sys.stdout.write(f'\033[47;30;1m>{line}\033[0m')
|
|
164
|
+
else:
|
|
165
|
+
sys.stdout.write(f' {line}')
|
|
166
|
+
|
|
167
|
+
sys.stdout.write(f'\033[{height-2};1H\033[K\033[90m{"─" * width}\033[0m')
|
|
168
|
+
sel = convos[selected]
|
|
169
|
+
sel_model = (sel.get('models') or '-')[:20]
|
|
170
|
+
in_tok = sel.get('total_input_tokens', 0)
|
|
171
|
+
out_tok = sel.get('total_output_tokens', 0)
|
|
172
|
+
cost = sel.get('total_cost', 0)
|
|
173
|
+
cost_str = f"${cost:.4f}" if cost else "-"
|
|
174
|
+
tok_str = f"{in_tok}in/{out_tok}out" if (in_tok or out_tok) else "-"
|
|
175
|
+
sys.stdout.write(f'\033[{height-1};1H\033[K {sel["conversation_id"][:16]} {sel_model} tokens:{tok_str} cost:{cost_str}'.ljust(width))
|
|
176
|
+
sys.stdout.write(f'\033[{height};1H\033[K\033[44;37m j/k:Nav Enter:Select p:Preview q:Quit [{selected+1}/{len(convos)}] \033[0m')
|
|
177
|
+
else:
|
|
178
|
+
for i in range(list_height):
|
|
179
|
+
idx = preview_scroll + i
|
|
180
|
+
sys.stdout.write(f'\033[{3+i};1H\033[K')
|
|
181
|
+
if idx >= len(preview_msgs):
|
|
182
|
+
continue
|
|
183
|
+
|
|
184
|
+
m = preview_msgs[idx]
|
|
185
|
+
role = m.get('role', '?')
|
|
186
|
+
content = (m.get('content') or '')[:200].replace('\n', ' ')
|
|
187
|
+
model = m.get('model') or ''
|
|
188
|
+
in_tok = m.get('input_tokens')
|
|
189
|
+
out_tok = m.get('output_tokens')
|
|
190
|
+
|
|
191
|
+
if role == 'user':
|
|
192
|
+
prefix = '\033[32;1mYou:\033[0m '
|
|
193
|
+
elif role == 'assistant':
|
|
194
|
+
# Shorten model name for display
|
|
195
|
+
short_model = ''
|
|
196
|
+
if model:
|
|
197
|
+
if 'gpt-4' in model: short_model = 'gpt4'
|
|
198
|
+
elif 'gpt-3' in model: short_model = 'gpt3'
|
|
199
|
+
elif 'claude-3-5-sonnet' in model: short_model = 'sonnet'
|
|
200
|
+
elif 'claude-3-5-haiku' in model: short_model = 'haiku'
|
|
201
|
+
elif 'claude-3-opus' in model: short_model = 'opus'
|
|
202
|
+
elif 'claude' in model: short_model = 'claude'
|
|
203
|
+
elif 'gemini' in model: short_model = 'gemini'
|
|
204
|
+
else: short_model = model[:10]
|
|
205
|
+
tok_info = ''
|
|
206
|
+
if in_tok or out_tok:
|
|
207
|
+
tok_info = f' [{in_tok or 0}/{out_tok or 0}]'
|
|
208
|
+
if short_model:
|
|
209
|
+
prefix = f'\033[34;1mAI({short_model}{tok_info}):\033[0m '
|
|
210
|
+
else:
|
|
211
|
+
prefix = f'\033[34;1mAI{tok_info}:\033[0m '
|
|
212
|
+
else:
|
|
213
|
+
prefix = f'\033[90m{role}:\033[0m '
|
|
214
|
+
|
|
215
|
+
sys.stdout.write(f'{prefix}{content[:width-len(prefix)+8]}')
|
|
216
|
+
|
|
217
|
+
sys.stdout.write(f'\033[{height-2};1H\033[K\033[90m{"─" * width}\033[0m')
|
|
218
|
+
sys.stdout.write(f'\033[{height-1};1H\033[K {len(preview_msgs)} messages')
|
|
219
|
+
sys.stdout.write(f'\033[{height};1H\033[K\033[44;37m j/k:Scroll b:Back Enter:Select q:Quit \033[0m')
|
|
220
|
+
|
|
221
|
+
sys.stdout.flush()
|
|
222
|
+
|
|
223
|
+
c = sys.stdin.read(1)
|
|
224
|
+
|
|
225
|
+
if c == '\x1b':
|
|
226
|
+
c2 = sys.stdin.read(1)
|
|
227
|
+
if c2 == '[':
|
|
228
|
+
c3 = sys.stdin.read(1)
|
|
229
|
+
if c3 == 'A': # Up
|
|
230
|
+
if mode == 'list' and selected > 0:
|
|
231
|
+
selected -= 1
|
|
232
|
+
elif mode == 'preview' and preview_scroll > 0:
|
|
233
|
+
preview_scroll -= 1
|
|
234
|
+
elif c3 == 'B': # Down
|
|
235
|
+
if mode == 'list' and selected < len(convos) - 1:
|
|
236
|
+
selected += 1
|
|
237
|
+
elif mode == 'preview' and preview_scroll < max(0, len(preview_msgs) - list_height):
|
|
238
|
+
preview_scroll += 1
|
|
239
|
+
else:
|
|
240
|
+
if mode == 'preview':
|
|
241
|
+
mode = 'list'
|
|
242
|
+
sys.stdout.write('\033[2J\033[H')
|
|
243
|
+
else:
|
|
244
|
+
context['output'] = "Cancelled."
|
|
245
|
+
break
|
|
246
|
+
continue
|
|
247
|
+
|
|
248
|
+
if c == 'q' or c == '\x03':
|
|
249
|
+
context['output'] = "Cancelled."
|
|
250
|
+
break
|
|
251
|
+
elif c == 'k':
|
|
252
|
+
if mode == 'list' and selected > 0:
|
|
253
|
+
selected -= 1
|
|
254
|
+
elif mode == 'preview' and preview_scroll > 0:
|
|
255
|
+
preview_scroll -= 1
|
|
256
|
+
elif c == 'j':
|
|
257
|
+
if mode == 'list' and selected < len(convos) - 1:
|
|
258
|
+
selected += 1
|
|
259
|
+
elif mode == 'preview' and preview_scroll < max(0, len(preview_msgs) - list_height):
|
|
260
|
+
preview_scroll += 1
|
|
261
|
+
elif c == 'p' and mode == 'list':
|
|
262
|
+
cid = convos[selected]['conversation_id']
|
|
263
|
+
with engine.connect() as conn:
|
|
264
|
+
result = conn.execute(text("""
|
|
265
|
+
SELECT role, content, timestamp, npc, model, provider,
|
|
266
|
+
input_tokens, output_tokens, cost
|
|
267
|
+
FROM conversation_history
|
|
268
|
+
WHERE conversation_id = :cid
|
|
269
|
+
ORDER BY timestamp ASC
|
|
270
|
+
"""), {"cid": cid})
|
|
271
|
+
preview_msgs = [dict(row._mapping) for row in result.fetchall()]
|
|
272
|
+
preview_scroll = 0
|
|
273
|
+
mode = 'preview'
|
|
274
|
+
sys.stdout.write('\033[2J\033[H')
|
|
275
|
+
elif c == 'b' and mode == 'preview':
|
|
276
|
+
mode = 'list'
|
|
277
|
+
sys.stdout.write('\033[2J\033[H')
|
|
278
|
+
elif c in ('\r', '\n'):
|
|
279
|
+
cid = convos[selected]['conversation_id']
|
|
280
|
+
if 'state' in dir() and state is not None:
|
|
281
|
+
state.conversation_id = cid
|
|
282
|
+
context['output'] = f"Reattached to: {cid}"
|
|
283
|
+
else:
|
|
284
|
+
context['output'] = f"Selected: {cid}\n\nRun: /set conversation_id={cid}"
|
|
285
|
+
break
|
|
286
|
+
|
|
287
|
+
finally:
|
|
288
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
289
|
+
sys.stdout.write('\033[?25h')
|
|
290
|
+
sys.stdout.write('\033[2J\033[H')
|
|
291
|
+
sys.stdout.flush()
|