npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +700 -377
- npcsh/alicanto.py +54 -1153
- npcsh/completion.py +206 -0
- npcsh/config.py +163 -0
- npcsh/corca.py +35 -1462
- npcsh/execution.py +185 -0
- npcsh/guac.py +31 -1986
- npcsh/npc_team/jinxs/code/sh.jinx +11 -15
- npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
- npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
- npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
- npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
- npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
- npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
- npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
- npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
- npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
- npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
- npcsh/npc_team/jinxs/utils/search.jinx +3 -3
- npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
- npcsh/npcsh.py +76 -20
- npcsh/parsing.py +118 -0
- npcsh/plonk.py +41 -329
- npcsh/pti.py +41 -201
- npcsh/spool.py +34 -239
- npcsh/ui.py +199 -0
- npcsh/wander.py +54 -542
- npcsh/yap.py +38 -570
- npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
- npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
- npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
- npcsh-1.1.14.dist-info/RECORD +135 -0
- npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
- npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
- npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
- npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
- npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
- npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
- npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
- npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
- npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
- npcsh-1.1.12.dist-info/RECORD +0 -126
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
- {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
- {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
npcsh/_state.py
CHANGED
|
@@ -1,22 +1,13 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
1
|
+
# Standard library imports
|
|
2
|
+
import atexit
|
|
3
3
|
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
4
5
|
import filecmp
|
|
6
|
+
import inspect
|
|
7
|
+
import logging
|
|
5
8
|
import os
|
|
6
9
|
from pathlib import Path
|
|
7
10
|
import platform
|
|
8
|
-
try:
|
|
9
|
-
import pty
|
|
10
|
-
import tty
|
|
11
|
-
|
|
12
|
-
import termios
|
|
13
|
-
|
|
14
|
-
import readline
|
|
15
|
-
except:
|
|
16
|
-
readline = None
|
|
17
|
-
pty = None
|
|
18
|
-
tty = None
|
|
19
|
-
|
|
20
11
|
import re
|
|
21
12
|
import select
|
|
22
13
|
import shlex
|
|
@@ -26,128 +17,121 @@ import sqlite3
|
|
|
26
17
|
import subprocess
|
|
27
18
|
import sys
|
|
28
19
|
import time
|
|
29
|
-
from typing import Dict, List, Any, Tuple, Union, Optional, Callable
|
|
30
|
-
import logging
|
|
31
20
|
import textwrap
|
|
32
|
-
from
|
|
33
|
-
|
|
34
|
-
start_new_conversation,
|
|
35
|
-
)
|
|
36
|
-
from npcpy.npc_compiler import NPC, Team
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
from npcpy.memory.command_history import CommandHistory
|
|
21
|
+
from typing import Dict, List, Any, Tuple, Union, Optional, Callable
|
|
22
|
+
import yaml
|
|
40
23
|
|
|
24
|
+
# Setup debug logging if NPCSH_DEBUG is set
|
|
25
|
+
def _setup_debug_logging():
|
|
26
|
+
if os.environ.get("NPCSH_DEBUG", "0") == "1":
|
|
27
|
+
logging.basicConfig(
|
|
28
|
+
level=logging.DEBUG,
|
|
29
|
+
format='%(asctime)s [%(name)s] %(levelname)s: %(message)s',
|
|
30
|
+
datefmt='%H:%M:%S'
|
|
31
|
+
)
|
|
32
|
+
# Set specific loggers to DEBUG
|
|
33
|
+
logging.getLogger("npcsh.state").setLevel(logging.DEBUG)
|
|
34
|
+
logging.getLogger("npcpy.llm_funcs").setLevel(logging.DEBUG)
|
|
35
|
+
logging.getLogger("npcsh.state").debug("Debug logging enabled via NPCSH_DEBUG=1")
|
|
41
36
|
|
|
37
|
+
_setup_debug_logging()
|
|
42
38
|
|
|
43
|
-
|
|
44
|
-
import sys
|
|
45
|
-
import atexit
|
|
46
|
-
import subprocess
|
|
47
|
-
import shlex
|
|
48
|
-
import re
|
|
49
|
-
from datetime import datetime
|
|
50
|
-
import importlib.metadata
|
|
51
|
-
import textwrap
|
|
52
|
-
from typing import Optional, List, Dict, Any, Tuple, Union
|
|
53
|
-
from dataclasses import dataclass, field
|
|
54
|
-
import platform
|
|
39
|
+
# Platform-specific imports
|
|
55
40
|
try:
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
41
|
+
import pty
|
|
42
|
+
import tty
|
|
43
|
+
import termios
|
|
44
|
+
import readline
|
|
45
|
+
except ImportError:
|
|
46
|
+
readline = None
|
|
47
|
+
pty = None
|
|
48
|
+
tty = None
|
|
49
|
+
termios = None
|
|
59
50
|
|
|
51
|
+
# Optional dependencies
|
|
60
52
|
try:
|
|
61
53
|
import chromadb
|
|
62
54
|
except ImportError:
|
|
63
55
|
chromadb = None
|
|
64
|
-
import shutil
|
|
65
|
-
import sqlite3
|
|
66
|
-
import yaml
|
|
67
|
-
|
|
68
56
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
get_locally_available_models,
|
|
74
|
-
lookup_provider
|
|
75
|
-
)
|
|
57
|
+
# Third-party imports
|
|
58
|
+
from colorama import Fore, Back, Style
|
|
59
|
+
from litellm import RateLimitError
|
|
60
|
+
from termcolor import colored
|
|
76
61
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
save_kg_to_db,
|
|
82
|
-
)
|
|
83
|
-
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog
|
|
62
|
+
# npcpy imports
|
|
63
|
+
from npcpy.data.load import load_file_contents
|
|
64
|
+
from npcpy.data.web import search_web
|
|
65
|
+
from npcpy.gen.embeddings import get_embeddings
|
|
84
66
|
from npcpy.llm_funcs import (
|
|
85
67
|
check_llm_command,
|
|
86
68
|
get_llm_response,
|
|
87
69
|
execute_llm_command,
|
|
88
|
-
breathe,
|
|
89
|
-
|
|
70
|
+
breathe,
|
|
90
71
|
)
|
|
91
|
-
from npcpy.
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
72
|
+
from npcpy.memory.command_history import (
|
|
73
|
+
CommandHistory,
|
|
74
|
+
start_new_conversation,
|
|
75
|
+
save_conversation_message,
|
|
76
|
+
load_kg_from_db,
|
|
77
|
+
save_kg_to_db,
|
|
96
78
|
)
|
|
97
|
-
from npcpy.
|
|
98
|
-
|
|
99
|
-
import inspect
|
|
100
|
-
import sys
|
|
79
|
+
from npcpy.memory.knowledge_graph import kg_evolve_incremental
|
|
101
80
|
from npcpy.memory.search import execute_rag_command, execute_brainblast_command
|
|
102
|
-
from npcpy.
|
|
103
|
-
from npcpy.
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
try:
|
|
110
|
-
VERSION = importlib.metadata.version("npcsh")
|
|
111
|
-
except importlib.metadata.PackageNotFoundError:
|
|
112
|
-
VERSION = "unknown"
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
from litellm import RateLimitError
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
NPCSH_CHAT_MODEL = os.environ.get("NPCSH_CHAT_MODEL", "gemma3:4b")
|
|
119
|
-
|
|
120
|
-
NPCSH_CHAT_PROVIDER = os.environ.get("NPCSH_CHAT_PROVIDER", "ollama")
|
|
121
|
-
|
|
122
|
-
NPCSH_DB_PATH = os.path.expanduser(
|
|
123
|
-
os.environ.get("NPCSH_DB_PATH", "~/npcsh_history.db")
|
|
124
|
-
)
|
|
125
|
-
NPCSH_VECTOR_DB_PATH = os.path.expanduser(
|
|
126
|
-
os.environ.get("NPCSH_VECTOR_DB_PATH", "~/npcsh_chroma.db")
|
|
81
|
+
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog
|
|
82
|
+
from npcpy.npc_sysenv import (
|
|
83
|
+
print_and_process_stream_with_markdown,
|
|
84
|
+
render_markdown,
|
|
85
|
+
get_model_and_provider,
|
|
86
|
+
get_locally_available_models,
|
|
87
|
+
lookup_provider
|
|
127
88
|
)
|
|
89
|
+
from npcpy.tools import auto_tools
|
|
128
90
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
91
|
+
# Local module imports
|
|
92
|
+
from .config import (
|
|
93
|
+
VERSION,
|
|
94
|
+
DEFAULT_NPC_TEAM_PATH,
|
|
95
|
+
PROJECT_NPC_TEAM_PATH,
|
|
96
|
+
HISTORY_DB_DEFAULT_PATH,
|
|
97
|
+
READLINE_HISTORY_FILE,
|
|
98
|
+
NPCSH_CHAT_MODEL,
|
|
99
|
+
NPCSH_CHAT_PROVIDER,
|
|
100
|
+
NPCSH_DB_PATH,
|
|
101
|
+
NPCSH_VECTOR_DB_PATH,
|
|
102
|
+
NPCSH_DEFAULT_MODE,
|
|
103
|
+
NPCSH_VISION_MODEL,
|
|
104
|
+
NPCSH_VISION_PROVIDER,
|
|
105
|
+
NPCSH_IMAGE_GEN_MODEL,
|
|
106
|
+
NPCSH_IMAGE_GEN_PROVIDER,
|
|
107
|
+
NPCSH_VIDEO_GEN_MODEL,
|
|
108
|
+
NPCSH_VIDEO_GEN_PROVIDER,
|
|
109
|
+
NPCSH_EMBEDDING_MODEL,
|
|
110
|
+
NPCSH_EMBEDDING_PROVIDER,
|
|
111
|
+
NPCSH_REASONING_MODEL,
|
|
112
|
+
NPCSH_REASONING_PROVIDER,
|
|
113
|
+
NPCSH_STREAM_OUTPUT,
|
|
114
|
+
NPCSH_API_URL,
|
|
115
|
+
NPCSH_SEARCH_PROVIDER,
|
|
116
|
+
NPCSH_BUILD_KG,
|
|
117
|
+
setup_npcsh_config,
|
|
118
|
+
is_npcsh_initialized,
|
|
119
|
+
set_npcsh_initialized,
|
|
120
|
+
set_npcsh_config_value,
|
|
135
121
|
)
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
122
|
+
from .ui import SpinnerContext, orange, get_file_color, format_file_listing, wrap_text
|
|
123
|
+
from .parsing import split_by_pipes, parse_command_safely, parse_generic_command_flags
|
|
124
|
+
from .execution import (
|
|
125
|
+
TERMINAL_EDITORS,
|
|
126
|
+
INTERACTIVE_COMMANDS as interactive_commands,
|
|
127
|
+
validate_bash_command,
|
|
128
|
+
handle_bash_command,
|
|
129
|
+
handle_cd_command,
|
|
130
|
+
handle_interactive_command,
|
|
131
|
+
open_terminal_editor,
|
|
132
|
+
list_directory,
|
|
139
133
|
)
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
NPCSH_EMBEDDING_MODEL = os.environ.get("NPCSH_EMBEDDING_MODEL", "nomic-embed-text")
|
|
143
|
-
NPCSH_EMBEDDING_PROVIDER = os.environ.get("NPCSH_EMBEDDING_PROVIDER", "ollama")
|
|
144
|
-
NPCSH_REASONING_MODEL = os.environ.get("NPCSH_REASONING_MODEL", "deepseek-r1")
|
|
145
|
-
NPCSH_REASONING_PROVIDER = os.environ.get("NPCSH_REASONING_PROVIDER", "ollama")
|
|
146
|
-
NPCSH_STREAM_OUTPUT = eval(os.environ.get("NPCSH_STREAM_OUTPUT", "0")) == 1
|
|
147
|
-
NPCSH_API_URL = os.environ.get("NPCSH_API_URL", None)
|
|
148
|
-
NPCSH_SEARCH_PROVIDER = os.environ.get("NPCSH_SEARCH_PROVIDER", "duckduckgo")
|
|
149
|
-
NPCSH_BUILD_KG = os.environ.get("NPCSH_BUILD_KG") == "1"
|
|
150
|
-
READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_history")
|
|
134
|
+
from .completion import setup_readline, save_readline_history, make_completer, get_slash_commands
|
|
151
135
|
|
|
152
136
|
|
|
153
137
|
|
|
@@ -178,7 +162,12 @@ class ShellState:
|
|
|
178
162
|
current_path: str = field(default_factory=os.getcwd)
|
|
179
163
|
stream_output: bool = NPCSH_STREAM_OUTPUT
|
|
180
164
|
attachments: Optional[List[Any]] = None
|
|
181
|
-
turn_count: int =0
|
|
165
|
+
turn_count: int = 0
|
|
166
|
+
# Token usage tracking
|
|
167
|
+
session_input_tokens: int = 0
|
|
168
|
+
session_output_tokens: int = 0
|
|
169
|
+
session_cost_usd: float = 0.0
|
|
170
|
+
|
|
182
171
|
def get_model_for_command(self, model_type: str = "chat"):
|
|
183
172
|
if model_type == "chat":
|
|
184
173
|
return self.chat_model, self.chat_provider
|
|
@@ -864,12 +853,7 @@ BASH_COMMANDS = [
|
|
|
864
853
|
]
|
|
865
854
|
|
|
866
855
|
|
|
867
|
-
interactive_commands
|
|
868
|
-
"ipython": ["ipython"],
|
|
869
|
-
"python": ["python", "-i"],
|
|
870
|
-
"sqlite3": ["sqlite3"],
|
|
871
|
-
"r": ["R", "--interactive"],
|
|
872
|
-
}
|
|
856
|
+
# interactive_commands imported from .execution
|
|
873
857
|
|
|
874
858
|
|
|
875
859
|
def start_interactive_session(command: str) -> int:
|
|
@@ -1213,13 +1197,8 @@ def save_readline_history():
|
|
|
1213
1197
|
|
|
1214
1198
|
|
|
1215
1199
|
|
|
1216
|
-
|
|
1217
|
-
EMBEDDINGS_DB_PATH =
|
|
1218
|
-
HISTORY_DB_DEFAULT_PATH = os.path.expanduser("~/npcsh_history.db")
|
|
1219
|
-
READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_readline_history")
|
|
1220
|
-
DEFAULT_NPC_TEAM_PATH = os.path.expanduser("~/.npcsh/npc_team/")
|
|
1221
|
-
PROJECT_NPC_TEAM_PATH = "./npc_team/"
|
|
1222
|
-
|
|
1200
|
+
# ChromaDB client (lazy init)
|
|
1201
|
+
EMBEDDINGS_DB_PATH = NPCSH_VECTOR_DB_PATH
|
|
1223
1202
|
|
|
1224
1203
|
try:
|
|
1225
1204
|
chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH) if chromadb else None
|
|
@@ -1467,15 +1446,17 @@ def open_terminal_editor(command: str) -> str:
|
|
|
1467
1446
|
except Exception as e:
|
|
1468
1447
|
return f"Error opening terminal editor: {e}"
|
|
1469
1448
|
|
|
1470
|
-
def get_multiline_input(prompt: str) -> str:
|
|
1449
|
+
def get_multiline_input(prompt: str, state=None, router=None, token_hint: str = "") -> str:
|
|
1450
|
+
"""Get input with hint line below prompt."""
|
|
1471
1451
|
lines = []
|
|
1472
1452
|
current_prompt = prompt
|
|
1473
1453
|
while True:
|
|
1474
1454
|
try:
|
|
1475
|
-
line =
|
|
1455
|
+
line = _input_with_hint_below(current_prompt, state, router, token_hint)
|
|
1476
1456
|
if line.endswith("\\"):
|
|
1477
1457
|
lines.append(line[:-1])
|
|
1478
|
-
current_prompt =
|
|
1458
|
+
current_prompt = "> "
|
|
1459
|
+
token_hint = ""
|
|
1479
1460
|
else:
|
|
1480
1461
|
lines.append(line)
|
|
1481
1462
|
break
|
|
@@ -1484,6 +1465,295 @@ def get_multiline_input(prompt: str) -> str:
|
|
|
1484
1465
|
sys.exit(0)
|
|
1485
1466
|
return "\n".join(lines)
|
|
1486
1467
|
|
|
1468
|
+
|
|
1469
|
+
def _input_with_hint_below(prompt: str, state=None, router=None, token_hint: str = "") -> str:
|
|
1470
|
+
"""Custom input with hint displayed below. Arrow keys work for history."""
|
|
1471
|
+
try:
|
|
1472
|
+
import termios
|
|
1473
|
+
import tty
|
|
1474
|
+
import readline
|
|
1475
|
+
except ImportError:
|
|
1476
|
+
return input(prompt)
|
|
1477
|
+
|
|
1478
|
+
if not sys.stdin.isatty():
|
|
1479
|
+
return input(prompt)
|
|
1480
|
+
|
|
1481
|
+
# Get history from readline
|
|
1482
|
+
hist_len = readline.get_current_history_length()
|
|
1483
|
+
history = [readline.get_history_item(i) for i in range(1, hist_len + 1)]
|
|
1484
|
+
history_idx = len(history)
|
|
1485
|
+
saved_line = ""
|
|
1486
|
+
|
|
1487
|
+
fd = sys.stdin.fileno()
|
|
1488
|
+
old_settings = termios.tcgetattr(fd)
|
|
1489
|
+
|
|
1490
|
+
buf = ""
|
|
1491
|
+
pos = 0 # cursor position in buf
|
|
1492
|
+
|
|
1493
|
+
# Calculate visible prompt length (strip ANSI codes)
|
|
1494
|
+
import re
|
|
1495
|
+
prompt_visible_len = len(re.sub(r'\x1b\[[0-9;]*m|\x01|\x02', '', prompt))
|
|
1496
|
+
|
|
1497
|
+
def current_hint():
|
|
1498
|
+
if buf.startswith('/') and len(buf) >= 1:
|
|
1499
|
+
h = _get_slash_hints(state, router, buf)
|
|
1500
|
+
return h if h else token_hint
|
|
1501
|
+
elif buf.startswith('@') and len(buf) >= 1:
|
|
1502
|
+
h = _get_npc_hints(state, buf)
|
|
1503
|
+
return h if h else token_hint
|
|
1504
|
+
return token_hint
|
|
1505
|
+
|
|
1506
|
+
# Get terminal width
|
|
1507
|
+
try:
|
|
1508
|
+
import shutil
|
|
1509
|
+
term_width = shutil.get_terminal_size().columns
|
|
1510
|
+
except:
|
|
1511
|
+
term_width = 80
|
|
1512
|
+
|
|
1513
|
+
def draw():
|
|
1514
|
+
# Calculate how many lines the input takes
|
|
1515
|
+
total_len = prompt_visible_len + len(buf)
|
|
1516
|
+
num_lines = (total_len // term_width) + 1
|
|
1517
|
+
|
|
1518
|
+
# Move to start of input (may need to go up multiple lines)
|
|
1519
|
+
# First go to column 0
|
|
1520
|
+
sys.stdout.write('\r')
|
|
1521
|
+
# Move up for each wrapped line we're on
|
|
1522
|
+
cursor_total = prompt_visible_len + pos
|
|
1523
|
+
cursor_line = cursor_total // term_width
|
|
1524
|
+
# Go up to the first line of input
|
|
1525
|
+
for _ in range(num_lines - 1):
|
|
1526
|
+
sys.stdout.write('\033[A')
|
|
1527
|
+
|
|
1528
|
+
# Clear from cursor to end of screen (clears all wrapped lines + hint)
|
|
1529
|
+
sys.stdout.write('\033[J')
|
|
1530
|
+
|
|
1531
|
+
# Print prompt and buffer
|
|
1532
|
+
sys.stdout.write(prompt + buf)
|
|
1533
|
+
|
|
1534
|
+
# Print hint on next line
|
|
1535
|
+
sys.stdout.write('\n\033[K' + current_hint())
|
|
1536
|
+
|
|
1537
|
+
# Now position cursor back to correct spot
|
|
1538
|
+
# Go back up to the line where cursor should be
|
|
1539
|
+
lines_after_cursor = (total_len // term_width) - (cursor_total // term_width) + 1 # +1 for hint line
|
|
1540
|
+
for _ in range(lines_after_cursor):
|
|
1541
|
+
sys.stdout.write('\033[A')
|
|
1542
|
+
|
|
1543
|
+
# Position cursor in correct column
|
|
1544
|
+
cursor_col = cursor_total % term_width
|
|
1545
|
+
sys.stdout.write('\r')
|
|
1546
|
+
if cursor_col > 0:
|
|
1547
|
+
sys.stdout.write('\033[' + str(cursor_col) + 'C')
|
|
1548
|
+
|
|
1549
|
+
sys.stdout.flush()
|
|
1550
|
+
|
|
1551
|
+
# Print prompt and reserve hint line
|
|
1552
|
+
sys.stdout.write(prompt + '\n' + (token_hint or '') + '\033[A\r')
|
|
1553
|
+
if prompt_visible_len > 0:
|
|
1554
|
+
sys.stdout.write('\033[' + str(prompt_visible_len) + 'C')
|
|
1555
|
+
sys.stdout.flush()
|
|
1556
|
+
|
|
1557
|
+
try:
|
|
1558
|
+
tty.setcbreak(fd)
|
|
1559
|
+
while True:
|
|
1560
|
+
c = sys.stdin.read(1)
|
|
1561
|
+
|
|
1562
|
+
if c in ('\n', '\r'):
|
|
1563
|
+
# Clear hint and newline
|
|
1564
|
+
sys.stdout.write('\n\033[K')
|
|
1565
|
+
sys.stdout.flush()
|
|
1566
|
+
if buf.strip():
|
|
1567
|
+
readline.add_history(buf)
|
|
1568
|
+
return buf
|
|
1569
|
+
|
|
1570
|
+
elif c == '\x1b': # ESC - could be arrow key
|
|
1571
|
+
c2 = sys.stdin.read(1)
|
|
1572
|
+
if c2 == '[':
|
|
1573
|
+
c3 = sys.stdin.read(1)
|
|
1574
|
+
if c3 == 'A': # Up
|
|
1575
|
+
if history_idx > 0:
|
|
1576
|
+
if history_idx == len(history):
|
|
1577
|
+
saved_line = buf
|
|
1578
|
+
history_idx -= 1
|
|
1579
|
+
buf = history[history_idx] or ''
|
|
1580
|
+
pos = len(buf)
|
|
1581
|
+
draw()
|
|
1582
|
+
elif c3 == 'B': # Down
|
|
1583
|
+
if history_idx < len(history):
|
|
1584
|
+
history_idx += 1
|
|
1585
|
+
buf = saved_line if history_idx == len(history) else (history[history_idx] or '')
|
|
1586
|
+
pos = len(buf)
|
|
1587
|
+
draw()
|
|
1588
|
+
elif c3 == 'C': # Right
|
|
1589
|
+
if pos < len(buf):
|
|
1590
|
+
pos += 1
|
|
1591
|
+
sys.stdout.write('\033[C')
|
|
1592
|
+
sys.stdout.flush()
|
|
1593
|
+
elif c3 == 'D': # Left
|
|
1594
|
+
if pos > 0:
|
|
1595
|
+
pos -= 1
|
|
1596
|
+
sys.stdout.write('\033[D')
|
|
1597
|
+
sys.stdout.flush()
|
|
1598
|
+
elif c3 == '3': # Del
|
|
1599
|
+
sys.stdin.read(1) # ~
|
|
1600
|
+
if pos < len(buf):
|
|
1601
|
+
buf = buf[:pos] + buf[pos+1:]
|
|
1602
|
+
draw()
|
|
1603
|
+
elif c3 == 'H': # Home
|
|
1604
|
+
pos = 0
|
|
1605
|
+
draw()
|
|
1606
|
+
elif c3 == 'F': # End
|
|
1607
|
+
pos = len(buf)
|
|
1608
|
+
draw()
|
|
1609
|
+
elif c2 == '\x1b': # Double ESC
|
|
1610
|
+
sys.stdout.write('\n\033[K')
|
|
1611
|
+
sys.stdout.flush()
|
|
1612
|
+
return '\x1b'
|
|
1613
|
+
|
|
1614
|
+
elif c == '\x7f' or c == '\x08': # Backspace
|
|
1615
|
+
if pos > 0:
|
|
1616
|
+
buf = buf[:pos-1] + buf[pos:]
|
|
1617
|
+
pos -= 1
|
|
1618
|
+
draw()
|
|
1619
|
+
|
|
1620
|
+
elif c == '\x03': # Ctrl-C
|
|
1621
|
+
sys.stdout.write('\n\033[K')
|
|
1622
|
+
sys.stdout.flush()
|
|
1623
|
+
raise KeyboardInterrupt
|
|
1624
|
+
|
|
1625
|
+
elif c == '\x04': # Ctrl-D
|
|
1626
|
+
if not buf:
|
|
1627
|
+
sys.stdout.write('\n\033[K')
|
|
1628
|
+
sys.stdout.flush()
|
|
1629
|
+
raise EOFError
|
|
1630
|
+
|
|
1631
|
+
elif c == '\x01': # Ctrl-A
|
|
1632
|
+
pos = 0
|
|
1633
|
+
draw()
|
|
1634
|
+
|
|
1635
|
+
elif c == '\x05': # Ctrl-E
|
|
1636
|
+
pos = len(buf)
|
|
1637
|
+
draw()
|
|
1638
|
+
|
|
1639
|
+
elif c == '\x15': # Ctrl-U
|
|
1640
|
+
buf = buf[pos:]
|
|
1641
|
+
pos = 0
|
|
1642
|
+
draw()
|
|
1643
|
+
|
|
1644
|
+
elif c == '\x0b': # Ctrl-K
|
|
1645
|
+
buf = buf[:pos]
|
|
1646
|
+
draw()
|
|
1647
|
+
|
|
1648
|
+
elif c == '\x17': # Ctrl-W - delete word back
|
|
1649
|
+
while pos > 0 and buf[pos-1] == ' ':
|
|
1650
|
+
buf = buf[:pos-1] + buf[pos:]
|
|
1651
|
+
pos -= 1
|
|
1652
|
+
while pos > 0 and buf[pos-1] != ' ':
|
|
1653
|
+
buf = buf[:pos-1] + buf[pos:]
|
|
1654
|
+
pos -= 1
|
|
1655
|
+
draw()
|
|
1656
|
+
|
|
1657
|
+
elif c == '\t': # Tab - do nothing for now
|
|
1658
|
+
pass
|
|
1659
|
+
|
|
1660
|
+
elif c == '\x0f': # Ctrl-O - show last tool call args
|
|
1661
|
+
try:
|
|
1662
|
+
import builtins
|
|
1663
|
+
last_call = getattr(builtins, '_npcsh_last_tool_call', None)
|
|
1664
|
+
if last_call:
|
|
1665
|
+
from termcolor import colored
|
|
1666
|
+
# Save cursor, move down past hint, show args, restore
|
|
1667
|
+
sys.stdout.write('\n\033[K') # New line, clear
|
|
1668
|
+
sys.stdout.write(colored(f"─── {last_call['name']} ───\n", "cyan"))
|
|
1669
|
+
args = last_call.get('arguments', {})
|
|
1670
|
+
for k, v in args.items():
|
|
1671
|
+
v_str = str(v)
|
|
1672
|
+
# Show with syntax highlighting for code
|
|
1673
|
+
if '\n' in v_str:
|
|
1674
|
+
sys.stdout.write(colored(f"{k}:\n", "yellow"))
|
|
1675
|
+
for line in v_str.split('\n')[:30]: # Limit lines
|
|
1676
|
+
sys.stdout.write(f" {line}\n")
|
|
1677
|
+
if v_str.count('\n') > 30:
|
|
1678
|
+
sys.stdout.write(colored(f" ... ({v_str.count(chr(10)) - 30} more lines)\n", "white", attrs=["dark"]))
|
|
1679
|
+
else:
|
|
1680
|
+
sys.stdout.write(colored(f"{k}: ", "yellow") + f"{v_str}\n")
|
|
1681
|
+
sys.stdout.write(colored("─" * 40 + "\n", "cyan"))
|
|
1682
|
+
# Redraw prompt
|
|
1683
|
+
sys.stdout.write(prompt)
|
|
1684
|
+
sys.stdout.write(buf)
|
|
1685
|
+
sys.stdout.write('\n' + (token_hint or ''))
|
|
1686
|
+
sys.stdout.write('\033[A\r')
|
|
1687
|
+
if prompt_visible_len > 0:
|
|
1688
|
+
sys.stdout.write('\033[' + str(prompt_visible_len + pos) + 'C')
|
|
1689
|
+
sys.stdout.flush()
|
|
1690
|
+
else:
|
|
1691
|
+
pass # No tool call to show
|
|
1692
|
+
except:
|
|
1693
|
+
pass
|
|
1694
|
+
|
|
1695
|
+
elif ord(c) >= 32: # Printable
|
|
1696
|
+
buf = buf[:pos] + c + buf[pos:]
|
|
1697
|
+
pos += 1
|
|
1698
|
+
draw()
|
|
1699
|
+
|
|
1700
|
+
finally:
|
|
1701
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
1702
|
+
|
|
1703
|
+
|
|
1704
|
+
def _get_slash_hints(state, router, prefix='/') -> str:
|
|
1705
|
+
"""Slash command hints - fits terminal width."""
|
|
1706
|
+
cmds = {'help', 'set', 'agent', 'chat', 'cmd', 'sq', 'quit', 'exit', 'clear', 'npc'}
|
|
1707
|
+
if state and state.team and hasattr(state.team, 'jinxs_dict'):
|
|
1708
|
+
cmds.update(state.team.jinxs_dict.keys())
|
|
1709
|
+
if router and hasattr(router, 'jinx_routes'):
|
|
1710
|
+
cmds.update(router.jinx_routes.keys())
|
|
1711
|
+
if len(prefix) > 1:
|
|
1712
|
+
f = prefix[1:].lower()
|
|
1713
|
+
cmds = {c for c in cmds if c.lower().startswith(f)}
|
|
1714
|
+
if cmds:
|
|
1715
|
+
# Get terminal width, default 80
|
|
1716
|
+
try:
|
|
1717
|
+
import shutil
|
|
1718
|
+
term_width = shutil.get_terminal_size().columns
|
|
1719
|
+
except:
|
|
1720
|
+
term_width = 80
|
|
1721
|
+
|
|
1722
|
+
# Build hint string that fits in terminal
|
|
1723
|
+
sorted_cmds = sorted(cmds)
|
|
1724
|
+
hint_parts = []
|
|
1725
|
+
current_len = 2 # leading spaces
|
|
1726
|
+
for c in sorted_cmds:
|
|
1727
|
+
item = '/' + c
|
|
1728
|
+
if current_len + len(item) + 2 > term_width - 5: # leave margin
|
|
1729
|
+
break
|
|
1730
|
+
hint_parts.append(item)
|
|
1731
|
+
current_len += len(item) + 2
|
|
1732
|
+
|
|
1733
|
+
if hint_parts:
|
|
1734
|
+
return colored(' ' + ' '.join(hint_parts), 'white', attrs=['dark'])
|
|
1735
|
+
return ""
|
|
1736
|
+
|
|
1737
|
+
|
|
1738
|
+
def _get_npc_hints(state, prefix='@') -> str:
|
|
1739
|
+
"""NPC hints."""
|
|
1740
|
+
npcs = set()
|
|
1741
|
+
if state and state.team:
|
|
1742
|
+
if hasattr(state.team, 'npcs') and state.team.npcs:
|
|
1743
|
+
npcs.update(state.team.npcs.keys())
|
|
1744
|
+
if hasattr(state.team, 'forenpc') and state.team.forenpc:
|
|
1745
|
+
npcs.add(state.team.forenpc.name)
|
|
1746
|
+
if not npcs:
|
|
1747
|
+
npcs = {'sibiji', 'guac', 'corca', 'kadiefa', 'plonk'}
|
|
1748
|
+
if len(prefix) > 1:
|
|
1749
|
+
f = prefix[1:].lower()
|
|
1750
|
+
npcs = {n for n in npcs if n.lower().startswith(f)}
|
|
1751
|
+
if npcs:
|
|
1752
|
+
return colored(' ' + ' '.join('@' + n for n in sorted(npcs)), 'cyan')
|
|
1753
|
+
return ""
|
|
1754
|
+
|
|
1755
|
+
|
|
1756
|
+
|
|
1487
1757
|
def split_by_pipes(command: str) -> List[str]:
|
|
1488
1758
|
parts = []
|
|
1489
1759
|
current = ""
|
|
@@ -1847,6 +2117,7 @@ def model_supports_tool_calls(model: Optional[str], provider: Optional[str]) ->
|
|
|
1847
2117
|
"llama3.1",
|
|
1848
2118
|
"llama-3.2",
|
|
1849
2119
|
"llama3.2",
|
|
2120
|
+
"gemini",
|
|
1850
2121
|
"tool",
|
|
1851
2122
|
]
|
|
1852
2123
|
return any(marker in model_lower for marker in toolish_markers)
|
|
@@ -1962,19 +2233,6 @@ def collect_llm_tools(state: ShellState) -> Tuple[List[Dict[str, Any]], Dict[str
|
|
|
1962
2233
|
return list(deduped.values()), tool_map
|
|
1963
2234
|
|
|
1964
2235
|
|
|
1965
|
-
def normalize_llm_result(llm_result: Any, fallback_messages: List[Dict[str, Any]]) -> Tuple[Any, List[Dict[str, Any]]]:
|
|
1966
|
-
"""
|
|
1967
|
-
Normalize varying LLM return shapes into (output, messages).
|
|
1968
|
-
"""
|
|
1969
|
-
if isinstance(llm_result, dict):
|
|
1970
|
-
messages = llm_result.get("messages", fallback_messages)
|
|
1971
|
-
output = llm_result.get("output")
|
|
1972
|
-
if output is None:
|
|
1973
|
-
output = llm_result.get("response")
|
|
1974
|
-
return output, messages
|
|
1975
|
-
return llm_result, fallback_messages
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
2236
|
def should_skip_kg_processing(user_input: str, assistant_output: str) -> bool:
|
|
1979
2237
|
"""Determine if this interaction is too trivial for KG processing"""
|
|
1980
2238
|
|
|
@@ -2209,8 +2467,8 @@ def process_pipeline_command(
|
|
|
2209
2467
|
f"Platform: {platform.system()} {platform.release()} "
|
|
2210
2468
|
f"({platform.machine()})"
|
|
2211
2469
|
)
|
|
2212
|
-
info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
|
|
2213
|
-
|
|
2470
|
+
info = path_cmd + '\n' + ls_files + '\n' + platform_info + '\n'
|
|
2471
|
+
# Note: Don't append user message here - get_llm_response/check_llm_command handle it
|
|
2214
2472
|
|
|
2215
2473
|
tools_for_llm: List[Dict[str, Any]] = []
|
|
2216
2474
|
tool_exec_map: Dict[str, Callable] = {}
|
|
@@ -2246,8 +2504,28 @@ def process_pipeline_command(
|
|
|
2246
2504
|
for name, func in inspect.getmembers(current_module, inspect.isfunction):
|
|
2247
2505
|
application_globals_for_jinx[name] = func
|
|
2248
2506
|
|
|
2507
|
+
# Log messages before LLM call
|
|
2508
|
+
logger = logging.getLogger("npcsh.state")
|
|
2509
|
+
logger.debug(f"[process_pipeline_command] Before LLM call: {len(state.messages)} messages, tool_capable={tool_capable}")
|
|
2510
|
+
for i, msg in enumerate(state.messages[-3:]):
|
|
2511
|
+
role = msg.get('role', 'unknown')
|
|
2512
|
+
content = msg.get('content', '')
|
|
2513
|
+
preview = content[:80] if isinstance(content, str) else str(type(content))
|
|
2514
|
+
logger.debug(f" msg[{len(state.messages)-3+i}] role={role}: {preview}...")
|
|
2515
|
+
|
|
2249
2516
|
try: # Added try-except for KeyboardInterrupt here
|
|
2250
2517
|
if tool_capable:
|
|
2518
|
+
# Build kwargs - don't pass tool_choice for gemini as it doesn't support it
|
|
2519
|
+
llm_kwargs = {
|
|
2520
|
+
"auto_process_tool_calls": True,
|
|
2521
|
+
"tools": tools_for_llm,
|
|
2522
|
+
"tool_map": tool_exec_map,
|
|
2523
|
+
}
|
|
2524
|
+
# Only add tool_choice for providers that support it (not gemini)
|
|
2525
|
+
is_gemini = (exec_provider and "gemini" in exec_provider.lower()) or \
|
|
2526
|
+
(exec_model and "gemini" in exec_model.lower())
|
|
2527
|
+
llm_kwargs["tool_choice"] = 'auto'
|
|
2528
|
+
|
|
2251
2529
|
llm_result = get_llm_response(
|
|
2252
2530
|
full_llm_cmd,
|
|
2253
2531
|
model=exec_model,
|
|
@@ -2258,16 +2536,13 @@ def process_pipeline_command(
|
|
|
2258
2536
|
stream=stream_final,
|
|
2259
2537
|
attachments=state.attachments,
|
|
2260
2538
|
context=info,
|
|
2261
|
-
|
|
2262
|
-
tools=tools_for_llm,
|
|
2263
|
-
tool_map=tool_exec_map,
|
|
2264
|
-
tool_choice={"type": "auto"},
|
|
2539
|
+
**llm_kwargs,
|
|
2265
2540
|
)
|
|
2266
2541
|
else:
|
|
2267
2542
|
llm_result = check_llm_command(
|
|
2268
2543
|
full_llm_cmd,
|
|
2269
|
-
model=exec_model,
|
|
2270
|
-
provider=exec_provider,
|
|
2544
|
+
model=exec_model,
|
|
2545
|
+
provider=exec_provider,
|
|
2271
2546
|
api_url=state.api_url,
|
|
2272
2547
|
api_key=state.api_key,
|
|
2273
2548
|
npc=state.npc,
|
|
@@ -2276,24 +2551,33 @@ def process_pipeline_command(
|
|
|
2276
2551
|
images=state.attachments,
|
|
2277
2552
|
stream=stream_final,
|
|
2278
2553
|
context=info,
|
|
2279
|
-
extra_globals=application_globals_for_jinx
|
|
2554
|
+
extra_globals=application_globals_for_jinx,
|
|
2555
|
+
tool_capable=tool_capable,
|
|
2280
2556
|
)
|
|
2281
2557
|
except KeyboardInterrupt:
|
|
2282
2558
|
print(colored("\nLLM processing interrupted by user.", "yellow"))
|
|
2283
2559
|
return state, colored("LLM processing interrupted.", "red")
|
|
2284
2560
|
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2561
|
+
# Extract output and messages from llm_result
|
|
2562
|
+
# get_llm_response uses 'response', check_llm_command uses 'output'
|
|
2563
|
+
if isinstance(llm_result, dict):
|
|
2564
|
+
new_messages = llm_result.get("messages", state.messages)
|
|
2565
|
+
logger.debug(f"[process_pipeline_command] After LLM call: received {len(new_messages)} messages (was {len(state.messages)})")
|
|
2566
|
+
state.messages = new_messages
|
|
2567
|
+
output_text = llm_result.get("output") or llm_result.get("response")
|
|
2568
|
+
|
|
2569
|
+
# Preserve usage info for process_result to accumulate
|
|
2570
|
+
output = {
|
|
2571
|
+
'output': output_text,
|
|
2572
|
+
'usage': llm_result.get('usage'),
|
|
2573
|
+
'model': exec_model,
|
|
2574
|
+
'provider': exec_provider,
|
|
2575
|
+
}
|
|
2576
|
+
else:
|
|
2577
|
+
output = llm_result
|
|
2289
2578
|
|
|
2290
|
-
if not review:
|
|
2291
|
-
|
|
2292
|
-
state.messages = llm_result.get("messages", state.messages)
|
|
2293
|
-
output = llm_result.get("output")
|
|
2294
|
-
return state, output
|
|
2295
|
-
else:
|
|
2296
|
-
return state, llm_result
|
|
2579
|
+
if tool_capable or not review:
|
|
2580
|
+
return state, output
|
|
2297
2581
|
else:
|
|
2298
2582
|
return review_and_iterate_command(
|
|
2299
2583
|
original_command=full_llm_cmd,
|
|
@@ -2359,299 +2643,320 @@ Please review and improve this response if needed. Provide a better, more comple
|
|
|
2359
2643
|
state.messages = current_messages
|
|
2360
2644
|
return state, refined_result
|
|
2361
2645
|
def check_mode_switch(command:str , state: ShellState):
|
|
2362
|
-
if command in ['/cmd', '/agent', '/chat'
|
|
2646
|
+
if command in ['/cmd', '/agent', '/chat']:
|
|
2363
2647
|
state.current_mode = command[1:]
|
|
2364
|
-
return True, state
|
|
2648
|
+
return True, state
|
|
2365
2649
|
return False, state
|
|
2366
2650
|
|
|
2367
|
-
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
|
|
2651
|
+
|
|
2652
|
+
def _delegate_to_npc(state: ShellState, npc_name: str, command: str, delegation_depth: int = 0) -> Tuple[ShellState, Any]:
|
|
2653
|
+
"""
|
|
2654
|
+
Delegate a command to a specific NPC.
|
|
2655
|
+
|
|
2656
|
+
Specialists just receive the task directly - no mention of delegation.
|
|
2657
|
+
Only forenpc can delegate (depth 0), and we catch @mentions in forenpc responses.
|
|
2658
|
+
"""
|
|
2659
|
+
import re
|
|
2660
|
+
|
|
2661
|
+
MAX_DELEGATION_DEPTH = 1 # Only allow one level of delegation
|
|
2662
|
+
|
|
2663
|
+
if delegation_depth > MAX_DELEGATION_DEPTH:
|
|
2664
|
+
return state, {'output': f"⚠ Maximum delegation depth reached."}
|
|
2665
|
+
|
|
2666
|
+
if not state.team or not hasattr(state.team, 'npcs') or npc_name not in state.team.npcs:
|
|
2667
|
+
return state, {'output': f"⚠ NPC '{npc_name}' not found in team"}
|
|
2668
|
+
|
|
2669
|
+
target_npc = state.team.npcs[npc_name]
|
|
2670
|
+
model_name = target_npc.model if hasattr(target_npc, 'model') else 'unknown'
|
|
2671
|
+
|
|
2672
|
+
try:
|
|
2673
|
+
# Build tools from the NPC's jinx catalog
|
|
2674
|
+
tools_for_npc = None
|
|
2675
|
+
tool_map_for_npc = None
|
|
2676
|
+
if hasattr(target_npc, 'jinx_tool_catalog') and target_npc.jinx_tool_catalog:
|
|
2677
|
+
tools_for_npc = list(target_npc.jinx_tool_catalog.values())
|
|
2678
|
+
# Build tool_map that executes jinxs
|
|
2679
|
+
tool_map_for_npc = {}
|
|
2680
|
+
for jinx_name, jinx_obj in target_npc.jinxs_dict.items():
|
|
2681
|
+
def make_executor(jname, jobj, npc):
|
|
2682
|
+
# Get expected input names from jinx
|
|
2683
|
+
expected_inputs = []
|
|
2684
|
+
for inp in (jobj.inputs or []):
|
|
2685
|
+
if isinstance(inp, str):
|
|
2686
|
+
expected_inputs.append(inp)
|
|
2687
|
+
elif isinstance(inp, dict):
|
|
2688
|
+
expected_inputs.append(list(inp.keys())[0])
|
|
2689
|
+
|
|
2690
|
+
def executor(**received):
|
|
2691
|
+
# Map received args to expected jinx inputs
|
|
2692
|
+
mapped = {}
|
|
2693
|
+
if expected_inputs:
|
|
2694
|
+
# If we got unexpected keys, map first value to first expected input
|
|
2695
|
+
received_keys = list(received.keys())
|
|
2696
|
+
for i, expected in enumerate(expected_inputs):
|
|
2697
|
+
if expected in received:
|
|
2698
|
+
mapped[expected] = received[expected]
|
|
2699
|
+
elif i < len(received_keys):
|
|
2700
|
+
# Map positionally
|
|
2701
|
+
mapped[expected] = received[received_keys[i]]
|
|
2702
|
+
else:
|
|
2703
|
+
mapped = received
|
|
2704
|
+
|
|
2705
|
+
result = npc.execute_jinx(jname, mapped)
|
|
2706
|
+
return result.get('output', str(result))
|
|
2707
|
+
executor.__name__ = jname
|
|
2708
|
+
return executor
|
|
2709
|
+
tool_map_for_npc[jinx_name] = make_executor(jinx_name, jinx_obj, target_npc)
|
|
2710
|
+
|
|
2711
|
+
with SpinnerContext(
|
|
2712
|
+
f"{npc_name} processing with {model_name}",
|
|
2713
|
+
style="dots_pulse"
|
|
2714
|
+
):
|
|
2715
|
+
# Just send the command directly - don't pass team context so they don't know about other NPCs
|
|
2716
|
+
result = target_npc.get_llm_response(
|
|
2717
|
+
command,
|
|
2718
|
+
messages=[], # Fresh messages - don't leak conversation history
|
|
2719
|
+
context={}, # No team context - they shouldn't know about teammates
|
|
2720
|
+
tools=tools_for_npc,
|
|
2721
|
+
tool_map=tool_map_for_npc,
|
|
2722
|
+
auto_process_tool_calls=True
|
|
2393
2723
|
)
|
|
2394
|
-
sys.stdout.flush()
|
|
2395
|
-
time.sleep(0.1)
|
|
2396
|
-
|
|
2397
|
-
def __enter__(self):
|
|
2398
|
-
self.spinning = True
|
|
2399
|
-
self.thread = threading.Thread(target=self._spin)
|
|
2400
|
-
self.thread.start()
|
|
2401
|
-
return self
|
|
2402
|
-
|
|
2403
|
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
2404
|
-
self.spinning = False
|
|
2405
|
-
if self.thread:
|
|
2406
|
-
self.thread.join()
|
|
2407
|
-
sys.stdout.write("\r" + " " * 80 + "\r")
|
|
2408
|
-
sys.stdout.flush()
|
|
2409
2724
|
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2424
|
-
|
|
2425
|
-
|
|
2426
|
-
|
|
2427
|
-
|
|
2428
|
-
|
|
2429
|
-
|
|
2430
|
-
|
|
2725
|
+
output = result.get("response") or result.get("output", "")
|
|
2726
|
+
if result.get("messages"):
|
|
2727
|
+
state.messages = result["messages"]
|
|
2728
|
+
|
|
2729
|
+
# Only forenpc/sibiji (depth 0) can have @mentions processed
|
|
2730
|
+
if delegation_depth == 0 and output and isinstance(output, str):
|
|
2731
|
+
# Look for @npc_name patterns in the response
|
|
2732
|
+
at_mention_pattern = r'@(\w+)\s*,?\s*(?:could you|can you|please|would you)?[^.!?\n]*[.!?\n]?'
|
|
2733
|
+
matches = re.findall(at_mention_pattern, output, re.IGNORECASE)
|
|
2734
|
+
|
|
2735
|
+
for mentioned_npc in matches:
|
|
2736
|
+
mentioned_npc = mentioned_npc.lower()
|
|
2737
|
+
if mentioned_npc in state.team.npcs and mentioned_npc != npc_name:
|
|
2738
|
+
# Extract what they're asking the other NPC to do
|
|
2739
|
+
delegation_match = re.search(
|
|
2740
|
+
rf'@{mentioned_npc}\s*,?\s*(.*?)(?:\n|$)',
|
|
2741
|
+
output,
|
|
2742
|
+
re.IGNORECASE
|
|
2743
|
+
)
|
|
2744
|
+
if delegation_match:
|
|
2745
|
+
sub_request = delegation_match.group(1).strip()
|
|
2746
|
+
if sub_request:
|
|
2747
|
+
# Recursive delegation will show its own spinner
|
|
2748
|
+
state, sub_output = _delegate_to_npc(
|
|
2749
|
+
state, mentioned_npc, sub_request, delegation_depth + 1
|
|
2750
|
+
)
|
|
2751
|
+
# Append the sub-NPC's response
|
|
2752
|
+
if isinstance(sub_output, dict):
|
|
2753
|
+
sub_text = sub_output.get('output', '')
|
|
2754
|
+
else:
|
|
2755
|
+
sub_text = str(sub_output)
|
|
2756
|
+
if sub_text:
|
|
2757
|
+
output += f"\n\n--- Response from {mentioned_npc} ---\n{sub_text}"
|
|
2758
|
+
|
|
2759
|
+
return state, {'output': output}
|
|
2760
|
+
|
|
2761
|
+
except KeyboardInterrupt:
|
|
2762
|
+
print(colored(f"\n{npc_name} interrupted.", "yellow"))
|
|
2763
|
+
return state, {'output': colored("Interrupted.", "red")}
|
|
2764
|
+
|
|
2431
2765
|
|
|
2432
2766
|
def execute_command(
|
|
2433
2767
|
command: str,
|
|
2434
2768
|
state: ShellState,
|
|
2435
|
-
review = False,
|
|
2769
|
+
review = False,
|
|
2436
2770
|
router = None,
|
|
2437
2771
|
command_history = None,
|
|
2438
2772
|
) -> Tuple[ShellState, Any]:
|
|
2773
|
+
"""
|
|
2774
|
+
Execute a command in npcsh.
|
|
2439
2775
|
|
|
2776
|
+
Routes commands based on:
|
|
2777
|
+
1. Mode switch commands (/agent, /chat, /cmd, etc.)
|
|
2778
|
+
2. Slash commands (/jinx_name) -> execute via router
|
|
2779
|
+
3. Default mode behavior -> pipeline processing in agent mode, or jinx execution for other modes
|
|
2780
|
+
"""
|
|
2440
2781
|
if not command.strip():
|
|
2441
2782
|
return state, ""
|
|
2442
|
-
|
|
2783
|
+
|
|
2784
|
+
# Check for mode switch commands
|
|
2443
2785
|
mode_change, state = check_mode_switch(command, state)
|
|
2444
2786
|
if mode_change:
|
|
2445
2787
|
print(colored(f"⚡ Switched to {state.current_mode} mode", "green"))
|
|
2446
2788
|
return state, 'Mode changed.'
|
|
2447
2789
|
|
|
2448
|
-
|
|
2449
|
-
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2790
|
+
# Check for @npc delegation syntax: @sibiji do something
|
|
2791
|
+
if command.startswith('@') and ' ' in command:
|
|
2792
|
+
npc_name = command.split()[0][1:] # Remove @ prefix
|
|
2793
|
+
delegated_command = command[len(npc_name) + 2:] # Rest of command
|
|
2794
|
+
|
|
2795
|
+
# Check if NPC exists in team
|
|
2796
|
+
if state.team and hasattr(state.team, 'npcs') and npc_name in state.team.npcs:
|
|
2797
|
+
state, output = _delegate_to_npc(state, npc_name, delegated_command)
|
|
2798
|
+
return state, output
|
|
2799
|
+
else:
|
|
2800
|
+
print(colored(f"⚠ NPC '{npc_name}' not found in team", "yellow"))
|
|
2801
|
+
# Fall through to normal processing
|
|
2802
|
+
|
|
2455
2803
|
original_command_for_embedding = command
|
|
2456
2804
|
commands = split_by_pipes(command)
|
|
2457
2805
|
|
|
2458
2806
|
stdin_for_next = None
|
|
2459
2807
|
final_output = None
|
|
2460
|
-
current_state = state
|
|
2461
|
-
|
|
2462
|
-
|
|
2463
|
-
|
|
2464
|
-
else None
|
|
2465
|
-
)
|
|
2466
|
-
npc_provider = (
|
|
2467
|
-
state.npc.provider
|
|
2468
|
-
if isinstance(state.npc, NPC) and state.npc.provider
|
|
2469
|
-
else None
|
|
2470
|
-
)
|
|
2471
|
-
active_model = npc_model or state.chat_model
|
|
2472
|
-
active_provider = npc_provider or state.chat_provider
|
|
2473
|
-
|
|
2808
|
+
current_state = state
|
|
2809
|
+
|
|
2810
|
+
# Agent mode uses pipeline processing (the original behavior)
|
|
2811
|
+
# Other modes route to their respective jinxs
|
|
2474
2812
|
if state.current_mode == 'agent':
|
|
2475
2813
|
total_stages = len(commands)
|
|
2476
|
-
|
|
2814
|
+
|
|
2477
2815
|
for i, cmd_segment in enumerate(commands):
|
|
2478
2816
|
stage_num = i + 1
|
|
2479
2817
|
stage_emoji = ["🎯", "⚙️", "🔧", "✨", "🚀"][i % 5]
|
|
2480
|
-
|
|
2481
|
-
|
|
2482
|
-
|
|
2483
|
-
|
|
2484
|
-
|
|
2485
|
-
|
|
2486
|
-
|
|
2818
|
+
|
|
2819
|
+
if total_stages > 1:
|
|
2820
|
+
print(colored(
|
|
2821
|
+
f"\n{stage_emoji} Pipeline Stage {stage_num}/{total_stages}",
|
|
2822
|
+
"cyan",
|
|
2823
|
+
attrs=["bold"]
|
|
2824
|
+
))
|
|
2825
|
+
|
|
2487
2826
|
is_last_command = (i == len(commands) - 1)
|
|
2488
|
-
stream_this_segment = state.stream_output and not is_last_command
|
|
2489
|
-
|
|
2827
|
+
stream_this_segment = state.stream_output and not is_last_command
|
|
2828
|
+
|
|
2490
2829
|
try:
|
|
2491
2830
|
current_state, output = process_pipeline_command(
|
|
2492
2831
|
cmd_segment.strip(),
|
|
2493
2832
|
stdin_for_next,
|
|
2494
|
-
current_state,
|
|
2495
|
-
stream_final=stream_this_segment,
|
|
2833
|
+
current_state,
|
|
2834
|
+
stream_final=stream_this_segment,
|
|
2496
2835
|
review=review,
|
|
2497
2836
|
router=router
|
|
2498
2837
|
)
|
|
2499
|
-
if isinstance(output, dict) and 'output' in output:
|
|
2500
|
-
output = output['output']
|
|
2501
2838
|
|
|
2839
|
+
# For last command, preserve full dict with usage info
|
|
2502
2840
|
if is_last_command:
|
|
2503
|
-
|
|
2841
|
+
if total_stages > 1:
|
|
2842
|
+
print(colored("✅ Pipeline complete", "green"))
|
|
2504
2843
|
return current_state, output
|
|
2505
|
-
|
|
2844
|
+
|
|
2845
|
+
# For intermediate stages, extract output text for piping
|
|
2846
|
+
if isinstance(output, dict) and 'output' in output:
|
|
2847
|
+
output = output['output']
|
|
2848
|
+
|
|
2506
2849
|
if isinstance(output, str):
|
|
2507
2850
|
stdin_for_next = output
|
|
2508
|
-
|
|
2851
|
+
else:
|
|
2509
2852
|
try:
|
|
2510
2853
|
if stream_this_segment:
|
|
2511
2854
|
full_stream_output = (
|
|
2512
2855
|
print_and_process_stream_with_markdown(
|
|
2513
|
-
output,
|
|
2514
|
-
state.npc.model,
|
|
2515
|
-
state.npc.provider,
|
|
2856
|
+
output,
|
|
2857
|
+
state.npc.model if isinstance(state.npc, NPC) else state.chat_model,
|
|
2858
|
+
state.npc.provider if isinstance(state.npc, NPC) else state.chat_provider,
|
|
2516
2859
|
show=True
|
|
2517
2860
|
)
|
|
2518
2861
|
)
|
|
2519
2862
|
stdin_for_next = full_stream_output
|
|
2520
|
-
if is_last_command:
|
|
2521
|
-
final_output = full_stream_output
|
|
2522
2863
|
except:
|
|
2523
|
-
if output is not None:
|
|
2524
|
-
try:
|
|
2864
|
+
if output is not None:
|
|
2865
|
+
try:
|
|
2525
2866
|
stdin_for_next = str(output)
|
|
2526
2867
|
except Exception:
|
|
2527
|
-
print(
|
|
2528
|
-
f"Warning: Cannot convert output to "
|
|
2529
|
-
f"string for piping: {type(output)}",
|
|
2530
|
-
file=sys.stderr
|
|
2531
|
-
)
|
|
2532
2868
|
stdin_for_next = None
|
|
2533
|
-
else:
|
|
2869
|
+
else:
|
|
2534
2870
|
stdin_for_next = None
|
|
2535
|
-
|
|
2536
|
-
|
|
2537
|
-
f" → Passing to stage {stage_num + 1}",
|
|
2538
|
-
|
|
2539
|
-
))
|
|
2871
|
+
|
|
2872
|
+
if total_stages > 1:
|
|
2873
|
+
print(colored(f" → Passing to stage {stage_num + 1}", "blue"))
|
|
2874
|
+
|
|
2540
2875
|
except KeyboardInterrupt:
|
|
2541
2876
|
print(colored("\nOperation interrupted by user.", "yellow"))
|
|
2542
2877
|
return current_state, colored("Command interrupted.", "red")
|
|
2543
2878
|
except RateLimitError:
|
|
2544
|
-
print(colored('Rate Limit Exceeded'))
|
|
2545
|
-
# wait 30 seconds then truncate messages/condense context with breathing mechanism
|
|
2546
|
-
# for now just limit to first plus last 10
|
|
2879
|
+
print(colored('Rate Limit Exceeded', 'yellow'))
|
|
2547
2880
|
messages = current_state.messages[0:1] + current_state.messages[-2:]
|
|
2548
2881
|
current_state.messages = messages
|
|
2549
|
-
|
|
2550
|
-
|
|
2551
|
-
print('sleeping...')
|
|
2552
|
-
print(current_state)
|
|
2553
|
-
print(current_state.messages)
|
|
2882
|
+
import time
|
|
2883
|
+
print('Waiting 30s before retry...')
|
|
2554
2884
|
time.sleep(30)
|
|
2555
|
-
|
|
2556
|
-
|
|
2557
|
-
return execute_command(command, current_state, review=review, router=router,)
|
|
2558
|
-
|
|
2559
|
-
|
|
2885
|
+
return execute_command(command, current_state, review=review, router=router)
|
|
2560
2886
|
except Exception as pipeline_error:
|
|
2561
2887
|
import traceback
|
|
2562
2888
|
traceback.print_exc()
|
|
2563
2889
|
error_msg = colored(
|
|
2564
|
-
f"❌ Error in stage {stage_num} "
|
|
2565
|
-
f"('{cmd_segment[:50]}...'): {pipeline_error}",
|
|
2890
|
+
f"❌ Error in stage {stage_num} ('{cmd_segment[:50]}...'): {pipeline_error}",
|
|
2566
2891
|
"red"
|
|
2567
2892
|
)
|
|
2568
2893
|
return current_state, error_msg
|
|
2569
2894
|
|
|
2570
|
-
if final_output is not None and isinstance(final_output,str):
|
|
2571
|
-
store_command_embeddings(
|
|
2572
|
-
original_command_for_embedding,
|
|
2573
|
-
final_output,
|
|
2574
|
-
current_state
|
|
2575
|
-
)
|
|
2895
|
+
if final_output is not None and isinstance(final_output, str):
|
|
2896
|
+
store_command_embeddings(original_command_for_embedding, final_output, current_state)
|
|
2576
2897
|
|
|
2577
2898
|
return current_state, final_output
|
|
2578
2899
|
|
|
2579
|
-
|
|
2580
|
-
|
|
2581
|
-
|
|
2582
|
-
|
|
2583
|
-
|
|
2584
|
-
|
|
2585
|
-
|
|
2586
|
-
|
|
2587
|
-
|
|
2588
|
-
|
|
2589
|
-
)
|
|
2590
|
-
|
|
2591
|
-
if is_probably_bash:
|
|
2900
|
+
else:
|
|
2901
|
+
# For non-agent modes (chat, cmd, or any custom mode), route through the jinx
|
|
2902
|
+
mode_jinx_name = state.current_mode
|
|
2903
|
+
|
|
2904
|
+
# Check if mode jinx exists in team or router
|
|
2905
|
+
mode_jinx = None
|
|
2906
|
+
if state.team and hasattr(state.team, 'jinxs_dict') and mode_jinx_name in state.team.jinxs_dict:
|
|
2907
|
+
mode_jinx = state.team.jinxs_dict[mode_jinx_name]
|
|
2908
|
+
elif router and mode_jinx_name in router.jinx_routes:
|
|
2909
|
+
# Execute via router
|
|
2592
2910
|
try:
|
|
2593
|
-
|
|
2594
|
-
|
|
2595
|
-
|
|
2596
|
-
|
|
2597
|
-
return
|
|
2598
|
-
else
|
|
2599
|
-
|
|
2600
|
-
|
|
2601
|
-
|
|
2602
|
-
command,
|
|
2603
|
-
None,
|
|
2604
|
-
state
|
|
2605
|
-
)
|
|
2606
|
-
return state, bash_output
|
|
2607
|
-
except Exception as bash_err:
|
|
2608
|
-
return state, colored(
|
|
2609
|
-
f"Bash execution failed: {bash_err}",
|
|
2610
|
-
"red"
|
|
2611
|
-
)
|
|
2612
|
-
except Exception:
|
|
2613
|
-
pass
|
|
2911
|
+
result = router.execute(f"/{mode_jinx_name} {command}",
|
|
2912
|
+
state=state, npc=state.npc, messages=state.messages)
|
|
2913
|
+
if isinstance(result, dict):
|
|
2914
|
+
state.messages = result.get('messages', state.messages)
|
|
2915
|
+
return state, result.get('output', '')
|
|
2916
|
+
return state, str(result) if result else ''
|
|
2917
|
+
except KeyboardInterrupt:
|
|
2918
|
+
print(colored(f"\n{mode_jinx_name} interrupted.", "yellow"))
|
|
2919
|
+
return state, colored("Interrupted.", "red")
|
|
2614
2920
|
|
|
2615
|
-
|
|
2616
|
-
|
|
2617
|
-
|
|
2618
|
-
|
|
2619
|
-
|
|
2921
|
+
if mode_jinx:
|
|
2922
|
+
# Execute the mode jinx directly
|
|
2923
|
+
try:
|
|
2924
|
+
result = mode_jinx.execute(
|
|
2925
|
+
input_values={'query': command, 'stream': state.stream_output},
|
|
2926
|
+
npc=state.npc,
|
|
2927
|
+
messages=state.messages,
|
|
2928
|
+
extra_globals={'state': state}
|
|
2929
|
+
)
|
|
2930
|
+
if isinstance(result, dict):
|
|
2931
|
+
state.messages = result.get('messages', state.messages)
|
|
2932
|
+
return state, result.get('output', '')
|
|
2933
|
+
return state, str(result) if result else ''
|
|
2934
|
+
except KeyboardInterrupt:
|
|
2935
|
+
print(colored(f"\n{mode_jinx_name} interrupted.", "yellow"))
|
|
2936
|
+
return state, colored("Interrupted.", "red")
|
|
2937
|
+
|
|
2938
|
+
# Fallback: if mode jinx not found, use basic LLM response
|
|
2939
|
+
npc_model = state.npc.model if isinstance(state.npc, NPC) and state.npc.model else None
|
|
2940
|
+
npc_provider = state.npc.provider if isinstance(state.npc, NPC) and state.npc.provider else None
|
|
2941
|
+
active_model = npc_model or state.chat_model
|
|
2942
|
+
active_provider = npc_provider or state.chat_provider
|
|
2943
|
+
|
|
2944
|
+
with SpinnerContext(f"Processing with {active_model}", style="brain"):
|
|
2945
|
+
try:
|
|
2620
2946
|
response = get_llm_response(
|
|
2621
|
-
command,
|
|
2622
|
-
model=active_model,
|
|
2623
|
-
provider=active_provider,
|
|
2947
|
+
command,
|
|
2948
|
+
model=active_model,
|
|
2949
|
+
provider=active_provider,
|
|
2624
2950
|
npc=state.npc,
|
|
2625
2951
|
stream=state.stream_output,
|
|
2626
2952
|
messages=state.messages
|
|
2627
2953
|
)
|
|
2628
2954
|
except KeyboardInterrupt:
|
|
2629
|
-
print(colored("\
|
|
2630
|
-
return state, colored("
|
|
2631
|
-
|
|
2632
|
-
state.messages = response['messages']
|
|
2633
|
-
return state, response['response']
|
|
2955
|
+
print(colored("\nInterrupted.", "yellow"))
|
|
2956
|
+
return state, colored("Interrupted.", "red")
|
|
2634
2957
|
|
|
2635
|
-
|
|
2636
|
-
|
|
2637
|
-
f"Executing with {active_model}",
|
|
2638
|
-
style="dots_pulse"
|
|
2639
|
-
):
|
|
2640
|
-
try: # Added try-except for KeyboardInterrupt here
|
|
2641
|
-
response = execute_llm_command(
|
|
2642
|
-
command,
|
|
2643
|
-
model=active_model,
|
|
2644
|
-
provider=active_provider,
|
|
2645
|
-
npc=state.npc,
|
|
2646
|
-
stream=state.stream_output,
|
|
2647
|
-
messages=state.messages
|
|
2648
|
-
)
|
|
2649
|
-
except KeyboardInterrupt:
|
|
2650
|
-
print(colored("\nCommand execution interrupted by user.", "yellow"))
|
|
2651
|
-
return state, colored("Command interrupted.", "red")
|
|
2652
|
-
|
|
2653
|
-
state.messages = response['messages']
|
|
2654
|
-
return state, response['response']
|
|
2958
|
+
state.messages = response.get('messages', state.messages)
|
|
2959
|
+
return state, response.get('response', '')
|
|
2655
2960
|
|
|
2656
2961
|
|
|
2657
2962
|
def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
@@ -2887,13 +3192,26 @@ def process_result(
|
|
|
2887
3192
|
result_state.attachments = None
|
|
2888
3193
|
|
|
2889
3194
|
final_output_str = None
|
|
2890
|
-
|
|
3195
|
+
|
|
2891
3196
|
# FIX: Handle dict output properly
|
|
2892
3197
|
if isinstance(output, dict):
|
|
2893
3198
|
output_content = output.get('output')
|
|
2894
3199
|
model_for_stream = output.get('model', active_npc.model)
|
|
2895
3200
|
provider_for_stream = output.get('provider', active_npc.provider)
|
|
2896
|
-
|
|
3201
|
+
|
|
3202
|
+
# Accumulate token usage if available
|
|
3203
|
+
if 'usage' in output:
|
|
3204
|
+
usage = output['usage']
|
|
3205
|
+
result_state.session_input_tokens += usage.get('input_tokens', 0)
|
|
3206
|
+
result_state.session_output_tokens += usage.get('output_tokens', 0)
|
|
3207
|
+
# Calculate cost
|
|
3208
|
+
from npcpy.gen.response import calculate_cost
|
|
3209
|
+
result_state.session_cost_usd += calculate_cost(
|
|
3210
|
+
model_for_stream,
|
|
3211
|
+
usage.get('input_tokens', 0),
|
|
3212
|
+
usage.get('output_tokens', 0)
|
|
3213
|
+
)
|
|
3214
|
+
|
|
2897
3215
|
# If output_content is still a dict or None, convert to string
|
|
2898
3216
|
if isinstance(output_content, dict):
|
|
2899
3217
|
output_content = str(output_content)
|
|
@@ -2927,14 +3245,19 @@ def process_result(
|
|
|
2927
3245
|
render_markdown(final_output_str)
|
|
2928
3246
|
|
|
2929
3247
|
|
|
3248
|
+
# Log message state after processing
|
|
3249
|
+
logger = logging.getLogger("npcsh.state")
|
|
3250
|
+
logger.debug(f"[process_result] Before final append: {len(result_state.messages)} messages, final_output_str={'set' if final_output_str else 'None'}")
|
|
3251
|
+
|
|
2930
3252
|
if final_output_str:
|
|
2931
3253
|
if result_state.messages:
|
|
2932
3254
|
if not result_state.messages or result_state.messages[-1].get("role") != "assistant":
|
|
2933
3255
|
result_state.messages.append({
|
|
2934
|
-
"role": "assistant",
|
|
3256
|
+
"role": "assistant",
|
|
2935
3257
|
"content": final_output_str
|
|
2936
3258
|
})
|
|
2937
|
-
|
|
3259
|
+
logger.debug(f"[process_result] Appended assistant message, now {len(result_state.messages)} messages")
|
|
3260
|
+
|
|
2938
3261
|
save_conversation_message(
|
|
2939
3262
|
command_history,
|
|
2940
3263
|
result_state.conversation_id,
|