npcpy 1.0.26__py3-none-any.whl → 1.2.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/__init__.py +0 -7
- npcpy/data/audio.py +16 -99
- npcpy/data/image.py +43 -42
- npcpy/data/load.py +83 -124
- npcpy/data/text.py +28 -28
- npcpy/data/video.py +8 -32
- npcpy/data/web.py +51 -23
- npcpy/ft/diff.py +110 -0
- npcpy/ft/ge.py +115 -0
- npcpy/ft/memory_trainer.py +171 -0
- npcpy/ft/model_ensembler.py +357 -0
- npcpy/ft/rl.py +360 -0
- npcpy/ft/sft.py +248 -0
- npcpy/ft/usft.py +128 -0
- npcpy/gen/audio_gen.py +24 -0
- npcpy/gen/embeddings.py +13 -13
- npcpy/gen/image_gen.py +262 -117
- npcpy/gen/response.py +615 -415
- npcpy/gen/video_gen.py +53 -7
- npcpy/llm_funcs.py +1869 -437
- npcpy/main.py +1 -1
- npcpy/memory/command_history.py +844 -510
- npcpy/memory/kg_vis.py +833 -0
- npcpy/memory/knowledge_graph.py +892 -1845
- npcpy/memory/memory_processor.py +81 -0
- npcpy/memory/search.py +188 -90
- npcpy/mix/debate.py +192 -3
- npcpy/npc_compiler.py +1672 -801
- npcpy/npc_sysenv.py +593 -1266
- npcpy/serve.py +3120 -0
- npcpy/sql/ai_function_tools.py +257 -0
- npcpy/sql/database_ai_adapters.py +186 -0
- npcpy/sql/database_ai_functions.py +163 -0
- npcpy/sql/model_runner.py +19 -19
- npcpy/sql/npcsql.py +706 -507
- npcpy/sql/sql_model_compiler.py +156 -0
- npcpy/tools.py +183 -0
- npcpy/work/plan.py +13 -279
- npcpy/work/trigger.py +3 -3
- npcpy-1.2.32.dist-info/METADATA +803 -0
- npcpy-1.2.32.dist-info/RECORD +54 -0
- npcpy/data/dataframes.py +0 -171
- npcpy/memory/deep_research.py +0 -125
- npcpy/memory/sleep.py +0 -557
- npcpy/modes/_state.py +0 -78
- npcpy/modes/alicanto.py +0 -1075
- npcpy/modes/guac.py +0 -785
- npcpy/modes/mcp_npcsh.py +0 -822
- npcpy/modes/npc.py +0 -213
- npcpy/modes/npcsh.py +0 -1158
- npcpy/modes/plonk.py +0 -409
- npcpy/modes/pti.py +0 -234
- npcpy/modes/serve.py +0 -1637
- npcpy/modes/spool.py +0 -312
- npcpy/modes/wander.py +0 -549
- npcpy/modes/yap.py +0 -572
- npcpy/npc_team/alicanto.npc +0 -2
- npcpy/npc_team/alicanto.png +0 -0
- npcpy/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcpy/npc_team/corca.npc +0 -13
- npcpy/npc_team/foreman.npc +0 -7
- npcpy/npc_team/frederic.npc +0 -6
- npcpy/npc_team/frederic4.png +0 -0
- npcpy/npc_team/guac.png +0 -0
- npcpy/npc_team/jinxs/automator.jinx +0 -18
- npcpy/npc_team/jinxs/bash_executer.jinx +0 -31
- npcpy/npc_team/jinxs/calculator.jinx +0 -11
- npcpy/npc_team/jinxs/edit_file.jinx +0 -96
- npcpy/npc_team/jinxs/file_chat.jinx +0 -14
- npcpy/npc_team/jinxs/gui_controller.jinx +0 -28
- npcpy/npc_team/jinxs/image_generation.jinx +0 -29
- npcpy/npc_team/jinxs/internet_search.jinx +0 -30
- npcpy/npc_team/jinxs/local_search.jinx +0 -152
- npcpy/npc_team/jinxs/npcsh_executor.jinx +0 -31
- npcpy/npc_team/jinxs/python_executor.jinx +0 -8
- npcpy/npc_team/jinxs/screen_cap.jinx +0 -25
- npcpy/npc_team/jinxs/sql_executor.jinx +0 -33
- npcpy/npc_team/kadiefa.npc +0 -3
- npcpy/npc_team/kadiefa.png +0 -0
- npcpy/npc_team/npcsh.ctx +0 -9
- npcpy/npc_team/npcsh_sibiji.png +0 -0
- npcpy/npc_team/plonk.npc +0 -2
- npcpy/npc_team/plonk.png +0 -0
- npcpy/npc_team/plonkjr.npc +0 -2
- npcpy/npc_team/plonkjr.png +0 -0
- npcpy/npc_team/sibiji.npc +0 -5
- npcpy/npc_team/sibiji.png +0 -0
- npcpy/npc_team/spool.png +0 -0
- npcpy/npc_team/templates/analytics/celona.npc +0 -0
- npcpy/npc_team/templates/hr_support/raone.npc +0 -0
- npcpy/npc_team/templates/humanities/eriane.npc +0 -4
- npcpy/npc_team/templates/it_support/lineru.npc +0 -0
- npcpy/npc_team/templates/marketing/slean.npc +0 -4
- npcpy/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcpy/npc_team/templates/sales/turnic.npc +0 -4
- npcpy/npc_team/templates/software/welxor.npc +0 -0
- npcpy/npc_team/yap.png +0 -0
- npcpy/routes.py +0 -958
- npcpy/work/mcp_helpers.py +0 -357
- npcpy/work/mcp_server.py +0 -194
- npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/automator.jinx +0 -18
- npcpy-1.0.26.data/data/npcpy/npc_team/bash_executer.jinx +0 -31
- npcpy-1.0.26.data/data/npcpy/npc_team/calculator.jinx +0 -11
- npcpy-1.0.26.data/data/npcpy/npc_team/celona.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/corca.npc +0 -13
- npcpy-1.0.26.data/data/npcpy/npc_team/edit_file.jinx +0 -96
- npcpy-1.0.26.data/data/npcpy/npc_team/eriane.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/file_chat.jinx +0 -14
- npcpy-1.0.26.data/data/npcpy/npc_team/foreman.npc +0 -7
- npcpy-1.0.26.data/data/npcpy/npc_team/frederic.npc +0 -6
- npcpy-1.0.26.data/data/npcpy/npc_team/frederic4.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/guac.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/gui_controller.jinx +0 -28
- npcpy-1.0.26.data/data/npcpy/npc_team/image_generation.jinx +0 -29
- npcpy-1.0.26.data/data/npcpy/npc_team/internet_search.jinx +0 -30
- npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.npc +0 -3
- npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/lineru.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/local_search.jinx +0 -152
- npcpy-1.0.26.data/data/npcpy/npc_team/maurawa.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh.ctx +0 -9
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_executor.jinx +0 -31
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_sibiji.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/plonk.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/plonk.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/python_executor.jinx +0 -8
- npcpy-1.0.26.data/data/npcpy/npc_team/raone.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/screen_cap.jinx +0 -25
- npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.npc +0 -5
- npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/slean.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/spool.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/sql_executor.jinx +0 -33
- npcpy-1.0.26.data/data/npcpy/npc_team/test_pipeline.py +0 -181
- npcpy-1.0.26.data/data/npcpy/npc_team/turnic.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/welxor.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/yap.png +0 -0
- npcpy-1.0.26.dist-info/METADATA +0 -827
- npcpy-1.0.26.dist-info/RECORD +0 -139
- npcpy-1.0.26.dist-info/entry_points.txt +0 -11
- /npcpy/{modes → ft}/__init__.py +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/modes/npcsh.py
DELETED
|
@@ -1,1158 +0,0 @@
|
|
|
1
|
-
# Standard Library Imports
|
|
2
|
-
import os
|
|
3
|
-
import sys
|
|
4
|
-
import atexit
|
|
5
|
-
import subprocess
|
|
6
|
-
import shlex
|
|
7
|
-
import re
|
|
8
|
-
from datetime import datetime
|
|
9
|
-
import argparse
|
|
10
|
-
import importlib.metadata
|
|
11
|
-
import textwrap
|
|
12
|
-
from typing import Optional, List, Dict, Any, Tuple, Union
|
|
13
|
-
from dataclasses import dataclass, field
|
|
14
|
-
try:
|
|
15
|
-
from inspect import isgenerator
|
|
16
|
-
except:
|
|
17
|
-
pass
|
|
18
|
-
import platform
|
|
19
|
-
try:
|
|
20
|
-
from termcolor import colored
|
|
21
|
-
except:
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
try:
|
|
25
|
-
import chromadb
|
|
26
|
-
except ImportError:
|
|
27
|
-
chromadb = None
|
|
28
|
-
import shutil
|
|
29
|
-
|
|
30
|
-
import yaml
|
|
31
|
-
# Local Application Imports
|
|
32
|
-
from npcpy.npc_sysenv import (
|
|
33
|
-
print_and_process_stream_with_markdown,
|
|
34
|
-
setup_npcsh_config,
|
|
35
|
-
is_npcsh_initialized,
|
|
36
|
-
initialize_base_npcs_if_needed,
|
|
37
|
-
orange,
|
|
38
|
-
interactive_commands,
|
|
39
|
-
BASH_COMMANDS,
|
|
40
|
-
log_action,
|
|
41
|
-
render_markdown,
|
|
42
|
-
get_locally_available_models,
|
|
43
|
-
start_interactive_session,
|
|
44
|
-
get_model_and_provider,
|
|
45
|
-
)
|
|
46
|
-
from npcpy.routes import router
|
|
47
|
-
from npcpy.data.image import capture_screenshot
|
|
48
|
-
from npcpy.memory.command_history import (
|
|
49
|
-
CommandHistory,
|
|
50
|
-
save_conversation_message,
|
|
51
|
-
)
|
|
52
|
-
from npcpy.memory.knowledge_graph import breathe
|
|
53
|
-
from npcpy.memory.sleep import sleep, forget
|
|
54
|
-
from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
|
|
55
|
-
from npcpy.llm_funcs import check_llm_command, get_llm_response, execute_llm_command
|
|
56
|
-
from npcpy.gen.embeddings import get_embeddings
|
|
57
|
-
try:
|
|
58
|
-
import readline
|
|
59
|
-
except:
|
|
60
|
-
print('no readline support, some features may not work as desired. ')
|
|
61
|
-
# --- Constants ---
|
|
62
|
-
try:
|
|
63
|
-
VERSION = importlib.metadata.version("npcpy")
|
|
64
|
-
except importlib.metadata.PackageNotFoundError:
|
|
65
|
-
VERSION = "unknown"
|
|
66
|
-
|
|
67
|
-
TERMINAL_EDITORS = ["vim", "emacs", "nano"]
|
|
68
|
-
EMBEDDINGS_DB_PATH = os.path.expanduser("~/npcsh_chroma.db")
|
|
69
|
-
HISTORY_DB_DEFAULT_PATH = os.path.expanduser("~/npcsh_history.db")
|
|
70
|
-
READLINE_HISTORY_FILE = os.path.expanduser("~/.npcsh_readline_history")
|
|
71
|
-
DEFAULT_NPC_TEAM_PATH = os.path.expanduser("~/.npcsh/npc_team/")
|
|
72
|
-
PROJECT_NPC_TEAM_PATH = "./npc_team/"
|
|
73
|
-
|
|
74
|
-
# --- Global Clients ---
|
|
75
|
-
try:
|
|
76
|
-
chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH) if chromadb else None
|
|
77
|
-
except Exception as e:
|
|
78
|
-
print(f"Warning: Failed to initialize ChromaDB client at {EMBEDDINGS_DB_PATH}: {e}")
|
|
79
|
-
chroma_client = None
|
|
80
|
-
|
|
81
|
-
# --- Custom Exceptions ---
|
|
82
|
-
class CommandNotFoundError(Exception):
|
|
83
|
-
pass
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
from npcpy.modes._state import initial_state, ShellState
|
|
87
|
-
|
|
88
|
-
def readline_safe_prompt(prompt: str) -> str:
|
|
89
|
-
ansi_escape = re.compile(r"(\033\[[0-9;]*[a-zA-Z])")
|
|
90
|
-
return ansi_escape.sub(r"\001\1\002", prompt)
|
|
91
|
-
|
|
92
|
-
def print_jinxs(jinxs):
|
|
93
|
-
output = "Available jinxs:\n"
|
|
94
|
-
for jinx in jinxs:
|
|
95
|
-
output += f" {jinx.jinx_name}\n"
|
|
96
|
-
output += f" Description: {jinx.description}\n"
|
|
97
|
-
output += f" Inputs: {jinx.inputs}\n"
|
|
98
|
-
return output
|
|
99
|
-
|
|
100
|
-
def open_terminal_editor(command: str) -> str:
|
|
101
|
-
try:
|
|
102
|
-
os.system(command)
|
|
103
|
-
return 'Terminal editor closed.'
|
|
104
|
-
except Exception as e:
|
|
105
|
-
return f"Error opening terminal editor: {e}"
|
|
106
|
-
|
|
107
|
-
def get_multiline_input(prompt: str) -> str:
|
|
108
|
-
lines = []
|
|
109
|
-
current_prompt = prompt
|
|
110
|
-
while True:
|
|
111
|
-
try:
|
|
112
|
-
line = input(current_prompt)
|
|
113
|
-
if line.endswith("\\"):
|
|
114
|
-
lines.append(line[:-1])
|
|
115
|
-
current_prompt = readline_safe_prompt("> ")
|
|
116
|
-
else:
|
|
117
|
-
lines.append(line)
|
|
118
|
-
break
|
|
119
|
-
except EOFError:
|
|
120
|
-
print("Goodbye!")
|
|
121
|
-
sys.exit(0)
|
|
122
|
-
return "\n".join(lines)
|
|
123
|
-
|
|
124
|
-
def split_by_pipes(command: str) -> List[str]:
|
|
125
|
-
parts = []
|
|
126
|
-
current = ""
|
|
127
|
-
in_single_quote = False
|
|
128
|
-
in_double_quote = False
|
|
129
|
-
escape = False
|
|
130
|
-
|
|
131
|
-
for char in command:
|
|
132
|
-
if escape:
|
|
133
|
-
current += char
|
|
134
|
-
escape = False
|
|
135
|
-
elif char == '\\':
|
|
136
|
-
escape = True
|
|
137
|
-
current += char
|
|
138
|
-
elif char == "'" and not in_double_quote:
|
|
139
|
-
in_single_quote = not in_single_quote
|
|
140
|
-
current += char
|
|
141
|
-
elif char == '"' and not in_single_quote:
|
|
142
|
-
in_double_quote = not in_single_quote
|
|
143
|
-
current += char
|
|
144
|
-
elif char == '|' and not in_single_quote and not in_double_quote:
|
|
145
|
-
parts.append(current.strip())
|
|
146
|
-
current = ""
|
|
147
|
-
else:
|
|
148
|
-
current += char
|
|
149
|
-
|
|
150
|
-
if current:
|
|
151
|
-
parts.append(current.strip())
|
|
152
|
-
return parts
|
|
153
|
-
|
|
154
|
-
def parse_command_safely(cmd: str) -> List[str]:
|
|
155
|
-
try:
|
|
156
|
-
return shlex.split(cmd)
|
|
157
|
-
except ValueError as e:
|
|
158
|
-
if "No closing quotation" in str(e):
|
|
159
|
-
if cmd.count('"') % 2 == 1:
|
|
160
|
-
cmd += '"'
|
|
161
|
-
elif cmd.count("'") % 2 == 1:
|
|
162
|
-
cmd += "'"
|
|
163
|
-
try:
|
|
164
|
-
return shlex.split(cmd)
|
|
165
|
-
except ValueError:
|
|
166
|
-
return cmd.split()
|
|
167
|
-
else:
|
|
168
|
-
return cmd.split()
|
|
169
|
-
|
|
170
|
-
def get_file_color(filepath: str) -> tuple:
|
|
171
|
-
if not os.path.exists(filepath):
|
|
172
|
-
return "grey", []
|
|
173
|
-
if os.path.isdir(filepath):
|
|
174
|
-
return "blue", ["bold"]
|
|
175
|
-
elif os.access(filepath, os.X_OK) and not os.path.isdir(filepath):
|
|
176
|
-
return "green", ["bold"]
|
|
177
|
-
elif filepath.endswith((".zip", ".tar", ".gz", ".bz2", ".xz", ".7z")):
|
|
178
|
-
return "red", []
|
|
179
|
-
elif filepath.endswith((".jpg", ".jpeg", ".png", ".gif", ".bmp", ".tiff")):
|
|
180
|
-
return "magenta", []
|
|
181
|
-
elif filepath.endswith((".py", ".pyw")):
|
|
182
|
-
return "yellow", []
|
|
183
|
-
elif filepath.endswith((".sh", ".bash", ".zsh")):
|
|
184
|
-
return "green", []
|
|
185
|
-
elif filepath.endswith((".c", ".cpp", ".h", ".hpp")):
|
|
186
|
-
return "cyan", []
|
|
187
|
-
elif filepath.endswith((".js", ".ts", ".jsx", ".tsx")):
|
|
188
|
-
return "yellow", []
|
|
189
|
-
elif filepath.endswith((".html", ".css", ".scss", ".sass")):
|
|
190
|
-
return "magenta", []
|
|
191
|
-
elif filepath.endswith((".md", ".txt", ".log")):
|
|
192
|
-
return "white", []
|
|
193
|
-
elif os.path.basename(filepath).startswith("."):
|
|
194
|
-
return "cyan", []
|
|
195
|
-
else:
|
|
196
|
-
return "white", []
|
|
197
|
-
|
|
198
|
-
def format_file_listing(output: str) -> str:
|
|
199
|
-
colored_lines = []
|
|
200
|
-
current_dir = os.getcwd()
|
|
201
|
-
for line in output.strip().split("\n"):
|
|
202
|
-
parts = line.split()
|
|
203
|
-
if not parts:
|
|
204
|
-
colored_lines.append(line)
|
|
205
|
-
continue
|
|
206
|
-
|
|
207
|
-
filepath_guess = parts[-1]
|
|
208
|
-
potential_path = os.path.join(current_dir, filepath_guess)
|
|
209
|
-
|
|
210
|
-
color, attrs = get_file_color(potential_path)
|
|
211
|
-
colored_filepath = colored(filepath_guess, color, attrs=attrs)
|
|
212
|
-
|
|
213
|
-
if len(parts) > 1 :
|
|
214
|
-
# Handle cases like 'ls -l' where filename is last
|
|
215
|
-
colored_line = " ".join(parts[:-1] + [colored_filepath])
|
|
216
|
-
else:
|
|
217
|
-
# Handle cases where line is just the filename
|
|
218
|
-
colored_line = colored_filepath
|
|
219
|
-
|
|
220
|
-
colored_lines.append(colored_line)
|
|
221
|
-
|
|
222
|
-
return "\n".join(colored_lines)
|
|
223
|
-
|
|
224
|
-
def wrap_text(text: str, width: int = 80) -> str:
|
|
225
|
-
lines = []
|
|
226
|
-
for paragraph in text.split("\n"):
|
|
227
|
-
if len(paragraph) > width:
|
|
228
|
-
lines.extend(textwrap.wrap(paragraph, width=width, replace_whitespace=False, drop_whitespace=False))
|
|
229
|
-
else:
|
|
230
|
-
lines.append(paragraph)
|
|
231
|
-
return "\n".join(lines)
|
|
232
|
-
|
|
233
|
-
# --- Readline Setup and Completion ---
|
|
234
|
-
|
|
235
|
-
def setup_readline() -> str:
|
|
236
|
-
try:
|
|
237
|
-
readline.read_history_file(READLINE_HISTORY_FILE)
|
|
238
|
-
|
|
239
|
-
readline.set_history_length(1000)
|
|
240
|
-
readline.parse_and_bind("set enable-bracketed-paste on")
|
|
241
|
-
#readline.parse_and_bind('"\e[A": history-search-backward')
|
|
242
|
-
#readline.parse_and_bind('"\e[B": history-search-forward')
|
|
243
|
-
readline.parse_and_bind(r'"\C-r": reverse-search-history')
|
|
244
|
-
readline.parse_and_bind(r'"\C-e": end-of-line')
|
|
245
|
-
readline.parse_and_bind(r'"\C-a": beginning-of-line')
|
|
246
|
-
#if sys.platform == "darwin":
|
|
247
|
-
# readline.parse_and_bind("bind ^I rl_complete")
|
|
248
|
-
#else:
|
|
249
|
-
# readline.parse_and_bind("tab: complete")
|
|
250
|
-
|
|
251
|
-
return READLINE_HISTORY_FILE
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
except FileNotFoundError:
|
|
255
|
-
pass
|
|
256
|
-
except OSError as e:
|
|
257
|
-
print(f"Warning: Could not read readline history file {READLINE_HISTORY_FILE}: {e}")
|
|
258
|
-
|
|
259
|
-
def save_readline_history():
|
|
260
|
-
try:
|
|
261
|
-
readline.write_history_file(READLINE_HISTORY_FILE)
|
|
262
|
-
except OSError as e:
|
|
263
|
-
print(f"Warning: Could not write readline history file {READLINE_HISTORY_FILE}: {e}")
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
# --- Placeholder for actual valid commands ---
|
|
267
|
-
# This should be populated dynamically based on router, builtins, and maybe PATH executables
|
|
268
|
-
valid_commands_list = list(router.routes.keys()) + list(interactive_commands.keys()) + ["cd", "exit", "quit"] + BASH_COMMANDS
|
|
269
|
-
|
|
270
|
-
def complete(text: str, state: int) -> Optional[str]:
|
|
271
|
-
try:
|
|
272
|
-
buffer = readline.get_line_buffer()
|
|
273
|
-
except:
|
|
274
|
-
print('couldnt get readline buffer')
|
|
275
|
-
line_parts = parse_command_safely(buffer) # Use safer parsing
|
|
276
|
-
word_before_cursor = ""
|
|
277
|
-
if len(line_parts) > 0 and not buffer.endswith(' '):
|
|
278
|
-
current_word = line_parts[-1]
|
|
279
|
-
else:
|
|
280
|
-
current_word = "" # Completing after a space
|
|
281
|
-
|
|
282
|
-
try:
|
|
283
|
-
# Command completion (start of line or after pipe/semicolon)
|
|
284
|
-
# This needs refinement to detect context better
|
|
285
|
-
is_command_start = not line_parts or (len(line_parts) == 1 and not buffer.endswith(' ')) # Basic check
|
|
286
|
-
if is_command_start and not text.startswith('-'): # Don't complete options as commands
|
|
287
|
-
cmd_matches = [cmd + ' ' for cmd in valid_commands_list if cmd.startswith(text)]
|
|
288
|
-
# Add executables from PATH? (Can be slow)
|
|
289
|
-
# path_executables = [f + ' ' for f in shutil.get_exec_path() if os.path.basename(f).startswith(text)]
|
|
290
|
-
# cmd_matches.extend(path_executables)
|
|
291
|
-
return cmd_matches[state]
|
|
292
|
-
|
|
293
|
-
# File/Directory completion (basic)
|
|
294
|
-
# Improve context awareness (e.g., after 'cd', 'ls', 'cat', etc.)
|
|
295
|
-
if text and (not text.startswith('/') or os.path.exists(os.path.dirname(text))):
|
|
296
|
-
basedir = os.path.dirname(text)
|
|
297
|
-
prefix = os.path.basename(text)
|
|
298
|
-
search_dir = basedir if basedir else '.'
|
|
299
|
-
try:
|
|
300
|
-
matches = [os.path.join(basedir, f) + ('/' if os.path.isdir(os.path.join(search_dir, f)) else ' ')
|
|
301
|
-
for f in os.listdir(search_dir) if f.startswith(prefix)]
|
|
302
|
-
return matches[state]
|
|
303
|
-
except OSError: # Handle permission denied etc.
|
|
304
|
-
return None
|
|
305
|
-
|
|
306
|
-
except IndexError:
|
|
307
|
-
return None
|
|
308
|
-
except Exception: # Catch broad exceptions during completion
|
|
309
|
-
return None
|
|
310
|
-
|
|
311
|
-
return None
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
# --- Command Execution Logic ---
|
|
315
|
-
|
|
316
|
-
def store_command_embeddings(command: str, output: Any, state: ShellState):
|
|
317
|
-
if not chroma_client or not state.embedding_model or not state.embedding_provider:
|
|
318
|
-
if not chroma_client: print("Warning: ChromaDB client not available for embeddings.", file=sys.stderr)
|
|
319
|
-
return
|
|
320
|
-
if not command and not output:
|
|
321
|
-
return
|
|
322
|
-
|
|
323
|
-
try:
|
|
324
|
-
output_str = str(output) if output else ""
|
|
325
|
-
if not command and not output_str: return # Avoid empty embeddings
|
|
326
|
-
|
|
327
|
-
texts_to_embed = [command, output_str]
|
|
328
|
-
|
|
329
|
-
embeddings = get_embeddings(
|
|
330
|
-
texts_to_embed,
|
|
331
|
-
state.embedding_model,
|
|
332
|
-
state.embedding_provider,
|
|
333
|
-
)
|
|
334
|
-
|
|
335
|
-
if not embeddings or len(embeddings) != 2:
|
|
336
|
-
print(f"Warning: Failed to generate embeddings for command: {command[:50]}...", file=sys.stderr)
|
|
337
|
-
return
|
|
338
|
-
|
|
339
|
-
timestamp = datetime.now().isoformat()
|
|
340
|
-
npc_name = state.npc.name if isinstance(state.npc, NPC) else state.npc
|
|
341
|
-
|
|
342
|
-
metadata = [
|
|
343
|
-
{
|
|
344
|
-
"type": "command", "timestamp": timestamp, "path": state.current_path,
|
|
345
|
-
"npc": npc_name, "conversation_id": state.conversation_id,
|
|
346
|
-
},
|
|
347
|
-
{
|
|
348
|
-
"type": "response", "timestamp": timestamp, "path": state.current_path,
|
|
349
|
-
"npc": npc_name, "conversation_id": state.conversation_id,
|
|
350
|
-
},
|
|
351
|
-
]
|
|
352
|
-
|
|
353
|
-
collection_name = f"{state.embedding_provider}_{state.embedding_model}_embeddings"
|
|
354
|
-
try:
|
|
355
|
-
collection = chroma_client.get_or_create_collection(collection_name)
|
|
356
|
-
ids = [f"cmd_{timestamp}_{hash(command)}", f"resp_{timestamp}_{hash(output_str)}"]
|
|
357
|
-
|
|
358
|
-
collection.add(
|
|
359
|
-
embeddings=embeddings,
|
|
360
|
-
documents=texts_to_embed,
|
|
361
|
-
metadatas=metadata,
|
|
362
|
-
ids=ids,
|
|
363
|
-
)
|
|
364
|
-
except Exception as e:
|
|
365
|
-
print(f"Warning: Failed to add embeddings to collection '{collection_name}': {e}", file=sys.stderr)
|
|
366
|
-
|
|
367
|
-
except Exception as e:
|
|
368
|
-
print(f"Warning: Failed to store embeddings: {e}", file=sys.stderr)
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
def handle_interactive_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
|
|
372
|
-
command_name = cmd_parts[0]
|
|
373
|
-
print(f"Starting interactive {command_name} session...")
|
|
374
|
-
try:
|
|
375
|
-
return_code = start_interactive_session(
|
|
376
|
-
interactive_commands[command_name], cmd_parts[1:]
|
|
377
|
-
)
|
|
378
|
-
output = f"Interactive {command_name} session ended with return code {return_code}"
|
|
379
|
-
except Exception as e:
|
|
380
|
-
output = f"Error starting interactive session {command_name}: {e}"
|
|
381
|
-
return state, output
|
|
382
|
-
|
|
383
|
-
def handle_cd_command(cmd_parts: List[str], state: ShellState) -> Tuple[ShellState, str]:
|
|
384
|
-
original_path = os.getcwd()
|
|
385
|
-
target_path = cmd_parts[1] if len(cmd_parts) > 1 else os.path.expanduser("~")
|
|
386
|
-
try:
|
|
387
|
-
os.chdir(target_path)
|
|
388
|
-
state.current_path = os.getcwd()
|
|
389
|
-
output = f"Changed directory to {state.current_path}"
|
|
390
|
-
except FileNotFoundError:
|
|
391
|
-
output = colored(f"cd: no such file or directory: {target_path}", "red")
|
|
392
|
-
except Exception as e:
|
|
393
|
-
output = colored(f"cd: error changing directory: {e}", "red")
|
|
394
|
-
os.chdir(original_path) # Revert if error
|
|
395
|
-
|
|
396
|
-
return state, output
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
def handle_bash_command(
|
|
400
|
-
cmd_parts: List[str],
|
|
401
|
-
cmd_str: str,
|
|
402
|
-
stdin_input: Optional[str],
|
|
403
|
-
state: ShellState,
|
|
404
|
-
) -> Tuple[ShellState, str]:
|
|
405
|
-
|
|
406
|
-
command_name = cmd_parts[0]
|
|
407
|
-
|
|
408
|
-
if command_name in TERMINAL_EDITORS:
|
|
409
|
-
output = open_terminal_editor(cmd_str)
|
|
410
|
-
return state, output
|
|
411
|
-
|
|
412
|
-
try:
|
|
413
|
-
process = subprocess.Popen(
|
|
414
|
-
cmd_parts,
|
|
415
|
-
stdin=subprocess.PIPE if stdin_input is not None else None,
|
|
416
|
-
stdout=subprocess.PIPE,
|
|
417
|
-
stderr=subprocess.PIPE,
|
|
418
|
-
text=True,
|
|
419
|
-
cwd=state.current_path
|
|
420
|
-
)
|
|
421
|
-
|
|
422
|
-
stdout, stderr = process.communicate(input=stdin_input)
|
|
423
|
-
|
|
424
|
-
if process.returncode != 0:
|
|
425
|
-
err_msg = stderr.strip() if stderr else f"Command '{cmd_str}' failed with return code {process.returncode}."
|
|
426
|
-
# If it failed because command not found, raise specific error for fallback
|
|
427
|
-
if "No such file or directory" in err_msg or "command not found" in err_msg:
|
|
428
|
-
raise CommandNotFoundError(err_msg)
|
|
429
|
-
# Otherwise, return the error output
|
|
430
|
-
full_output = stdout.strip() + ("\n" + colored(f"stderr: {err_msg}", "red") if err_msg else "")
|
|
431
|
-
return state, full_output.strip()
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
output = stdout.strip() if stdout else ""
|
|
435
|
-
if stderr:
|
|
436
|
-
# Log stderr but don't necessarily include in piped output unless requested
|
|
437
|
-
print(colored(f"stderr: {stderr.strip()}", "yellow"), file=sys.stderr)
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
if command_name in ["ls", "find", "dir"]:
|
|
441
|
-
output = format_file_listing(output)
|
|
442
|
-
elif not output and process.returncode == 0 and not stderr:
|
|
443
|
-
output = "" # No output is valid, don't print success message if piping
|
|
444
|
-
|
|
445
|
-
return state, output
|
|
446
|
-
|
|
447
|
-
except FileNotFoundError:
|
|
448
|
-
raise CommandNotFoundError(f"Command not found: {command_name}")
|
|
449
|
-
except PermissionError as e:
|
|
450
|
-
return state, colored(f"Error executing '{cmd_str}': Permission denied. {e}", "red")
|
|
451
|
-
except Exception as e:
|
|
452
|
-
return state, colored(f"Error executing command '{cmd_str}': {e}", "red")
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
def execute_slash_command(command: str, stdin_input: Optional[str], state: ShellState, stream: bool) -> Tuple[ShellState, Any]:
|
|
456
|
-
"""Executes slash commands using the router or checking NPC/Team jinxs."""
|
|
457
|
-
command_parts = command.split()
|
|
458
|
-
command_name = command_parts[0].lstrip('/')
|
|
459
|
-
handler = router.get_route(command_name)
|
|
460
|
-
#print(handler)
|
|
461
|
-
if handler:
|
|
462
|
-
# Prepare kwargs for the handler
|
|
463
|
-
handler_kwargs = {
|
|
464
|
-
'stream': stream,
|
|
465
|
-
'npc': state.npc,
|
|
466
|
-
'team': state.team,
|
|
467
|
-
'messages': state.messages,
|
|
468
|
-
'model': state.chat_model,
|
|
469
|
-
'provider': state.chat_provider,
|
|
470
|
-
'api_url': state.api_url,
|
|
471
|
-
'api_key': state.api_key,
|
|
472
|
-
}
|
|
473
|
-
#print(handler_kwargs, command)
|
|
474
|
-
if stdin_input is not None:
|
|
475
|
-
handler_kwargs['stdin_input'] = stdin_input
|
|
476
|
-
|
|
477
|
-
try:
|
|
478
|
-
result_dict = handler(command, **handler_kwargs)
|
|
479
|
-
|
|
480
|
-
if isinstance(result_dict, dict):
|
|
481
|
-
#some respond with output, some with response, needs to be fixed upstream
|
|
482
|
-
output = result_dict.get("output") or result_dict.get("response")
|
|
483
|
-
state.messages = result_dict.get("messages", state.messages)
|
|
484
|
-
return state, output
|
|
485
|
-
else:
|
|
486
|
-
return state, result_dict
|
|
487
|
-
|
|
488
|
-
except Exception as e:
|
|
489
|
-
import traceback
|
|
490
|
-
print(f"Error executing slash command '{command_name}':", file=sys.stderr)
|
|
491
|
-
traceback.print_exc()
|
|
492
|
-
return state, colored(f"Error executing slash command '{command_name}': {e}", "red")
|
|
493
|
-
|
|
494
|
-
active_npc = state.npc if isinstance(state.npc, NPC) else None
|
|
495
|
-
jinx_to_execute = None
|
|
496
|
-
executor = None
|
|
497
|
-
if active_npc and command_name in active_npc.jinxs_dict:
|
|
498
|
-
jinx_to_execute = active_npc.jinxs_dict[command_name]
|
|
499
|
-
executor = active_npc
|
|
500
|
-
elif state.team and command_name in state.team.jinxs_dict:
|
|
501
|
-
jinx_to_execute = state.team.jinxs_dict[command_name]
|
|
502
|
-
executor = state.team
|
|
503
|
-
|
|
504
|
-
if jinx_to_execute:
|
|
505
|
-
args = command_parts[1:]
|
|
506
|
-
try:
|
|
507
|
-
jinx_output = jinx_to_execute.run(
|
|
508
|
-
*args,
|
|
509
|
-
state=state,
|
|
510
|
-
stdin_input=stdin_input,
|
|
511
|
-
messages=state.messages # Pass messages explicitly if needed
|
|
512
|
-
)
|
|
513
|
-
return state, jinx_output
|
|
514
|
-
except Exception as e:
|
|
515
|
-
import traceback
|
|
516
|
-
print(f"Error executing jinx '{command_name}':", file=sys.stderr)
|
|
517
|
-
traceback.print_exc()
|
|
518
|
-
return state, colored(f"Error executing jinx '{command_name}': {e}", "red")
|
|
519
|
-
|
|
520
|
-
if state.team and command_name in state.team.npcs:
|
|
521
|
-
new_npc = state.team.npcs[command_name]
|
|
522
|
-
state.npc = new_npc # Update state directly
|
|
523
|
-
return state, f"Switched to NPC: {new_npc.name}"
|
|
524
|
-
|
|
525
|
-
return state, colored(f"Unknown slash command or jinx: {command_name}", "red")
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
def process_pipeline_command(
|
|
529
|
-
cmd_segment: str,
|
|
530
|
-
stdin_input: Optional[str],
|
|
531
|
-
state: ShellState,
|
|
532
|
-
stream_final: bool
|
|
533
|
-
) -> Tuple[ShellState, Any]:
|
|
534
|
-
|
|
535
|
-
if not cmd_segment:
|
|
536
|
-
return state, stdin_input
|
|
537
|
-
|
|
538
|
-
available_models_all = get_locally_available_models(state.current_path)
|
|
539
|
-
available_models_all_list = [item for key, item in available_models_all.items()]
|
|
540
|
-
model_override, provider_override, cmd_cleaned = get_model_and_provider(
|
|
541
|
-
cmd_segment, available_models_all_list
|
|
542
|
-
)
|
|
543
|
-
cmd_to_process = cmd_cleaned.strip()
|
|
544
|
-
if not cmd_to_process:
|
|
545
|
-
return state, stdin_input
|
|
546
|
-
|
|
547
|
-
exec_model = model_override or state.chat_model
|
|
548
|
-
exec_provider = provider_override or state.chat_provider
|
|
549
|
-
|
|
550
|
-
if cmd_to_process.startswith("/"):
|
|
551
|
-
#print(cmd_to_process)
|
|
552
|
-
return execute_slash_command(cmd_to_process, stdin_input, state, stream_final)
|
|
553
|
-
else:
|
|
554
|
-
try:
|
|
555
|
-
cmd_parts = parse_command_safely(cmd_to_process)
|
|
556
|
-
if not cmd_parts:
|
|
557
|
-
return state, stdin_input
|
|
558
|
-
|
|
559
|
-
command_name = cmd_parts[0]
|
|
560
|
-
|
|
561
|
-
if command_name in interactive_commands:
|
|
562
|
-
return handle_interactive_command(cmd_parts, state)
|
|
563
|
-
elif command_name == "cd":
|
|
564
|
-
return handle_cd_command(cmd_parts, state)
|
|
565
|
-
else:
|
|
566
|
-
try:
|
|
567
|
-
bash_state, bash_output = handle_bash_command(cmd_parts, cmd_to_process, stdin_input, state)
|
|
568
|
-
return bash_state, bash_output
|
|
569
|
-
except CommandNotFoundError:
|
|
570
|
-
full_llm_cmd = f"{cmd_to_process} {stdin_input}" if stdin_input else cmd_to_process
|
|
571
|
-
|
|
572
|
-
llm_result = check_llm_command(
|
|
573
|
-
command = full_llm_cmd,
|
|
574
|
-
model = exec_model,
|
|
575
|
-
provider = exec_provider,
|
|
576
|
-
api_url = state.api_url,
|
|
577
|
-
api_key = state.api_key,
|
|
578
|
-
npc = state.npc,
|
|
579
|
-
team = state.team,
|
|
580
|
-
messages = state.messages,
|
|
581
|
-
images = state.attachments,
|
|
582
|
-
stream = stream_final,
|
|
583
|
-
context = None ,
|
|
584
|
-
shell = True,
|
|
585
|
-
|
|
586
|
-
)
|
|
587
|
-
if isinstance(llm_result, dict):
|
|
588
|
-
state.messages = llm_result.get("messages", state.messages)
|
|
589
|
-
output = llm_result.get("output")
|
|
590
|
-
return state, output
|
|
591
|
-
else:
|
|
592
|
-
return state, llm_result
|
|
593
|
-
|
|
594
|
-
except Exception as bash_err:
|
|
595
|
-
return state, colored(f"Bash execution failed: {bash_err}", "red")
|
|
596
|
-
|
|
597
|
-
except Exception as e:
|
|
598
|
-
import traceback
|
|
599
|
-
traceback.print_exc()
|
|
600
|
-
return state, colored(f"Error processing command '{cmd_segment[:50]}...': {e}", "red")
|
|
601
|
-
def check_mode_switch(command:str , state: ShellState):
|
|
602
|
-
if command in ['/cmd', '/agent', '/chat', '/ride']:
|
|
603
|
-
state.current_mode = command[1:]
|
|
604
|
-
return True, state
|
|
605
|
-
|
|
606
|
-
return False, state
|
|
607
|
-
def execute_command(
|
|
608
|
-
command: str,
|
|
609
|
-
state: ShellState,
|
|
610
|
-
) -> Tuple[ShellState, Any]:
|
|
611
|
-
|
|
612
|
-
if not command.strip():
|
|
613
|
-
return state, ""
|
|
614
|
-
mode_change, state = check_mode_switch(command, state)
|
|
615
|
-
if mode_change:
|
|
616
|
-
return state, 'Mode changed.'
|
|
617
|
-
|
|
618
|
-
original_command_for_embedding = command
|
|
619
|
-
commands = split_by_pipes(command)
|
|
620
|
-
stdin_for_next = None
|
|
621
|
-
final_output = None
|
|
622
|
-
current_state = state
|
|
623
|
-
if state.current_mode == 'agent':
|
|
624
|
-
for i, cmd_segment in enumerate(commands):
|
|
625
|
-
is_last_command = (i == len(commands) - 1)
|
|
626
|
-
stream_this_segment = is_last_command and state.stream_output # Use state's stream setting
|
|
627
|
-
|
|
628
|
-
try:
|
|
629
|
-
current_state, output = process_pipeline_command(
|
|
630
|
-
cmd_segment.strip(),
|
|
631
|
-
stdin_for_next,
|
|
632
|
-
current_state,
|
|
633
|
-
stream_final=stream_this_segment
|
|
634
|
-
)
|
|
635
|
-
|
|
636
|
-
if is_last_command:
|
|
637
|
-
final_output = output # Capture the output of the last command
|
|
638
|
-
|
|
639
|
-
if isinstance(output, str):
|
|
640
|
-
stdin_for_next = output
|
|
641
|
-
elif isgenerator(output):
|
|
642
|
-
if not stream_this_segment: # If intermediate output is a stream, consume for piping
|
|
643
|
-
full_stream_output = "".join(map(str, output))
|
|
644
|
-
stdin_for_next = full_stream_output
|
|
645
|
-
if is_last_command: final_output = full_stream_output
|
|
646
|
-
else: # Final output is a stream, don't consume, can't pipe
|
|
647
|
-
stdin_for_next = None
|
|
648
|
-
final_output = output
|
|
649
|
-
elif output is not None: # Try converting other types to string
|
|
650
|
-
try: stdin_for_next = str(output)
|
|
651
|
-
except Exception:
|
|
652
|
-
print(f"Warning: Cannot convert output to string for piping: {type(output)}", file=sys.stderr)
|
|
653
|
-
stdin_for_next = None
|
|
654
|
-
else: # Output was None
|
|
655
|
-
stdin_for_next = None
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
except Exception as pipeline_error:
|
|
659
|
-
import traceback
|
|
660
|
-
traceback.print_exc()
|
|
661
|
-
error_msg = colored(f"Error in pipeline stage {i+1} ('{cmd_segment[:50]}...'): {pipeline_error}", "red")
|
|
662
|
-
# Return the state as it was when the error occurred, and the error message
|
|
663
|
-
return current_state, error_msg
|
|
664
|
-
|
|
665
|
-
# Store embeddings using the final state
|
|
666
|
-
if final_output is not None and not (isgenerator(final_output) and current_state.stream_output):
|
|
667
|
-
store_command_embeddings(original_command_for_embedding, final_output, current_state)
|
|
668
|
-
|
|
669
|
-
# Return the final state and the final output
|
|
670
|
-
return current_state, final_output
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
elif state.current_mode == 'chat':
|
|
674
|
-
# Only treat as bash if it looks like a shell command (starts with known command or is a slash command)
|
|
675
|
-
cmd_parts = parse_command_safely(command)
|
|
676
|
-
is_probably_bash = (
|
|
677
|
-
cmd_parts
|
|
678
|
-
and (
|
|
679
|
-
cmd_parts[0] in interactive_commands
|
|
680
|
-
or cmd_parts[0] in BASH_COMMANDS
|
|
681
|
-
or command.strip().startswith("./")
|
|
682
|
-
or command.strip().startswith("/")
|
|
683
|
-
)
|
|
684
|
-
)
|
|
685
|
-
if is_probably_bash:
|
|
686
|
-
try:
|
|
687
|
-
command_name = cmd_parts[0]
|
|
688
|
-
if command_name in interactive_commands:
|
|
689
|
-
return handle_interactive_command(cmd_parts, state)
|
|
690
|
-
elif command_name == "cd":
|
|
691
|
-
return handle_cd_command(cmd_parts, state)
|
|
692
|
-
else:
|
|
693
|
-
try:
|
|
694
|
-
bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
|
|
695
|
-
return bash_state, bash_output
|
|
696
|
-
except CommandNotFoundError:
|
|
697
|
-
pass # Fall through to LLM
|
|
698
|
-
except Exception as bash_err:
|
|
699
|
-
return state, colored(f"Bash execution failed: {bash_err}", "red")
|
|
700
|
-
except Exception:
|
|
701
|
-
pass # Fall through to LLM
|
|
702
|
-
|
|
703
|
-
# Otherwise, treat as chat (LLM)
|
|
704
|
-
response = get_llm_response(
|
|
705
|
-
command,
|
|
706
|
-
model=state.chat_model,
|
|
707
|
-
provider=state.chat_provider,
|
|
708
|
-
npc=state.npc,
|
|
709
|
-
stream=state.stream_output,
|
|
710
|
-
messages=state.messages
|
|
711
|
-
)
|
|
712
|
-
state.messages = response['messages']
|
|
713
|
-
return state, response['response']
|
|
714
|
-
|
|
715
|
-
elif state.current_mode == 'cmd':
|
|
716
|
-
|
|
717
|
-
response = execute_llm_command(command,
|
|
718
|
-
model = state.chat_model,
|
|
719
|
-
provider = state.chat_provider,
|
|
720
|
-
npc = state.npc,
|
|
721
|
-
stream = state.stream_output,
|
|
722
|
-
messages = state.messages)
|
|
723
|
-
state.messages = response['messages']
|
|
724
|
-
return state, response['response']
|
|
725
|
-
|
|
726
|
-
elif state.current_mode == 'ride':
|
|
727
|
-
# Allow bash commands in /ride mode
|
|
728
|
-
cmd_parts = parse_command_safely(command)
|
|
729
|
-
is_probably_bash = (
|
|
730
|
-
cmd_parts
|
|
731
|
-
and (
|
|
732
|
-
cmd_parts[0] in interactive_commands
|
|
733
|
-
or cmd_parts[0] in BASH_COMMANDS
|
|
734
|
-
or command.strip().startswith("./")
|
|
735
|
-
or command.strip().startswith("/")
|
|
736
|
-
)
|
|
737
|
-
)
|
|
738
|
-
if is_probably_bash:
|
|
739
|
-
try:
|
|
740
|
-
command_name = cmd_parts[0]
|
|
741
|
-
if command_name in interactive_commands:
|
|
742
|
-
return handle_interactive_command(cmd_parts, state)
|
|
743
|
-
elif command_name == "cd":
|
|
744
|
-
return handle_cd_command(cmd_parts, state)
|
|
745
|
-
else:
|
|
746
|
-
try:
|
|
747
|
-
bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
|
|
748
|
-
return bash_state, bash_output
|
|
749
|
-
except CommandNotFoundError:
|
|
750
|
-
return state, colored(f"Command not found: {command_name}", "red")
|
|
751
|
-
except Exception as bash_err:
|
|
752
|
-
return state, colored(f"Bash execution failed: {bash_err}", "red")
|
|
753
|
-
except Exception:
|
|
754
|
-
return state, colored("Failed to parse or execute bash command.", "red")
|
|
755
|
-
|
|
756
|
-
# Otherwise, run the agentic ride loop
|
|
757
|
-
return agentic_ride_loop(command, state)
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
def agentic_ride_loop(user_goal: str, state: ShellState) -> tuple:
|
|
761
|
-
"""
|
|
762
|
-
/ride mode: orchestrate via team, then LLM suggests 3 next steps, user picks or provides alternative input
|
|
763
|
-
repeat until quit.
|
|
764
|
-
|
|
765
|
-
"""
|
|
766
|
-
if not hasattr(state, "team") or state.team is None:
|
|
767
|
-
raise ValueError("No team found in shell state for orchestration.")
|
|
768
|
-
|
|
769
|
-
request = user_goal
|
|
770
|
-
all_results = []
|
|
771
|
-
|
|
772
|
-
while True:
|
|
773
|
-
# 1. Orchestrate the current request
|
|
774
|
-
result = state.team.orchestrate(request)
|
|
775
|
-
all_results.append(result)
|
|
776
|
-
render_markdown("# Orchestration Result")
|
|
777
|
-
render_markdown(f"- Request: {request}")
|
|
778
|
-
render_markdown(f"- Final response: {result.get('output')}")
|
|
779
|
-
|
|
780
|
-
render_markdown('- Summary: '+result['debrief']['summary'])
|
|
781
|
-
recommendations = result['debrief']['recommendations']
|
|
782
|
-
render_markdown(f'- Recommendations: {recommendations}')
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
# 2. Ask LLM for three next possible steps
|
|
786
|
-
suggestion_prompt = f"""
|
|
787
|
-
Given the following user goal and orchestration result, suggest three new
|
|
788
|
-
avenues to go down that are related but distinct from the original goal and from each other.
|
|
789
|
-
|
|
790
|
-
Be concise. Each step should be a single actionable instruction or question.
|
|
791
|
-
|
|
792
|
-
User goal: {user_goal}
|
|
793
|
-
Orchestration result: {result}
|
|
794
|
-
|
|
795
|
-
Return a JSON object with a "steps" key, whose value is a list of three strings, each string being a next step.
|
|
796
|
-
Return only the JSON object.
|
|
797
|
-
"""
|
|
798
|
-
suggestions = get_llm_response(
|
|
799
|
-
suggestion_prompt,
|
|
800
|
-
model=state.chat_model,
|
|
801
|
-
provider=state.chat_provider,
|
|
802
|
-
api_url=state.api_url,
|
|
803
|
-
api_key=state.api_key,
|
|
804
|
-
npc=state.npc,
|
|
805
|
-
format="json"
|
|
806
|
-
)
|
|
807
|
-
# No custom parsing: just use the parsed output
|
|
808
|
-
steps = suggestions.get("response", {}).get("steps", [])
|
|
809
|
-
if not steps or len(steps) < 1:
|
|
810
|
-
print("No further steps suggested by LLM. Exiting.")
|
|
811
|
-
break
|
|
812
|
-
|
|
813
|
-
print("\nNext possible steps:")
|
|
814
|
-
for idx, step in enumerate(steps, 1):
|
|
815
|
-
print(f"{idx}. {step}")
|
|
816
|
-
|
|
817
|
-
user_input = input("\nChoose next step (1/2/3) or q to quit: ").strip().lower()
|
|
818
|
-
if user_input in ("q", "quit", "exit"):
|
|
819
|
-
print("Exiting /ride agentic loop.")
|
|
820
|
-
break
|
|
821
|
-
try:
|
|
822
|
-
|
|
823
|
-
choice = int(user_input)
|
|
824
|
-
if 1 <= choice <= len(steps):
|
|
825
|
-
request = f"""
|
|
826
|
-
My initial goal was: {user_goal}
|
|
827
|
-
The orchestration result was: {result.get('output')}
|
|
828
|
-
I have chosen to pursue the next step: {steps[choice - 1]}
|
|
829
|
-
Now work on this next problem.
|
|
830
|
-
"""
|
|
831
|
-
else:
|
|
832
|
-
print("Invalid choice, please enter 1, 2, 3, or q.")
|
|
833
|
-
continue
|
|
834
|
-
except Exception:
|
|
835
|
-
# assume it is natural language input from the user on what to do next, not a number,
|
|
836
|
-
|
|
837
|
-
request = user_input
|
|
838
|
-
print("Invalid input, please enter 1, 2, 3, or q.")
|
|
839
|
-
continue
|
|
840
|
-
|
|
841
|
-
return state, all_results
|
|
842
|
-
|
|
843
|
-
# --- Main Application Logic ---
|
|
844
|
-
|
|
845
|
-
def check_deprecation_warnings():
|
|
846
|
-
if os.getenv("NPCSH_MODEL"):
|
|
847
|
-
cprint(
|
|
848
|
-
"Deprecation Warning: NPCSH_MODEL/PROVIDER deprecated. Use NPCSH_CHAT_MODEL/PROVIDER.",
|
|
849
|
-
"yellow",
|
|
850
|
-
)
|
|
851
|
-
|
|
852
|
-
def print_welcome_message():
|
|
853
|
-
print(
|
|
854
|
-
"""
|
|
855
|
-
Welcome to \033[1;94mnpc\033[0m\033[1;38;5;202msh\033[0m!
|
|
856
|
-
\033[1;94m \033[0m\033[1;38;5;202m \\\\
|
|
857
|
-
\033[1;94m _ __ _ __ ___ \033[0m\033[1;38;5;202m ___ | |___ \\\\
|
|
858
|
-
\033[1;94m| '_ \ | '_ \ / __|\033[0m\033[1;38;5;202m/ __/ | |_ _| \\\\
|
|
859
|
-
\033[1;94m| | | || |_) |( |__ \033[0m\033[1;38;5;202m\_ \ | | | | //
|
|
860
|
-
\033[1;94m|_| |_|| .__/ \___|\033[0m\033[1;38;5;202m|___/ |_| |_| //
|
|
861
|
-
\033[1;94m| | \033[0m\033[1;38;5;202m //
|
|
862
|
-
\033[1;94m| |
|
|
863
|
-
\033[1;94m|_|
|
|
864
|
-
|
|
865
|
-
Begin by asking a question, issuing a bash command, or typing '/help' for more information.
|
|
866
|
-
|
|
867
|
-
"""
|
|
868
|
-
)
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
872
|
-
check_deprecation_warnings()
|
|
873
|
-
setup_npcsh_config()
|
|
874
|
-
|
|
875
|
-
db_path = os.getenv("NPCSH_DB_PATH", HISTORY_DB_DEFAULT_PATH)
|
|
876
|
-
db_path = os.path.expanduser(db_path)
|
|
877
|
-
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
|
878
|
-
command_history = CommandHistory(db_path)
|
|
879
|
-
|
|
880
|
-
try:
|
|
881
|
-
readline.set_completer(complete)
|
|
882
|
-
history_file = setup_readline()
|
|
883
|
-
atexit.register(save_readline_history)
|
|
884
|
-
atexit.register(command_history.close)
|
|
885
|
-
except:
|
|
886
|
-
pass
|
|
887
|
-
|
|
888
|
-
project_team_path = os.path.abspath(PROJECT_NPC_TEAM_PATH)
|
|
889
|
-
global_team_path = os.path.expanduser(DEFAULT_NPC_TEAM_PATH)
|
|
890
|
-
team_dir = None
|
|
891
|
-
forenpc_obj = None
|
|
892
|
-
team_ctx = {}
|
|
893
|
-
|
|
894
|
-
# --- Always prefer local/project team first ---
|
|
895
|
-
if os.path.exists(project_team_path):
|
|
896
|
-
team_dir = project_team_path
|
|
897
|
-
forenpc_name = "forenpc"
|
|
898
|
-
else:
|
|
899
|
-
resp = input(f"No npc_team found in {os.getcwd()}. Create a new team here? [Y/n]: ").strip().lower()
|
|
900
|
-
if resp in ("", "y", "yes"):
|
|
901
|
-
team_dir = project_team_path
|
|
902
|
-
os.makedirs(team_dir, exist_ok=True)
|
|
903
|
-
forenpc_name = "forenpc"
|
|
904
|
-
forenpc_directive = input(
|
|
905
|
-
f"Enter a primary directive for {forenpc_name} (default: 'You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests.'): "
|
|
906
|
-
).strip() or "You are the forenpc of the team, coordinating activities between NPCs on the team, verifying that results from NPCs are high quality and can help to adequately answer user requests."
|
|
907
|
-
forenpc_model = input("Enter a model for your forenpc (default: llama3.2): ").strip() or "llama3.2"
|
|
908
|
-
forenpc_provider = input("Enter a provider for your forenpc (default: ollama): ").strip() or "ollama"
|
|
909
|
-
forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
|
|
910
|
-
if not os.path.exists(forenpc_path):
|
|
911
|
-
with open(forenpc_path, "w") as f:
|
|
912
|
-
yaml.dump({
|
|
913
|
-
"name": forenpc_name,
|
|
914
|
-
"primary_directive": forenpc_directive,
|
|
915
|
-
"model": forenpc_model,
|
|
916
|
-
"provider": forenpc_provider
|
|
917
|
-
}, f)
|
|
918
|
-
ctx_path = os.path.join(team_dir, "team.ctx")
|
|
919
|
-
folder_context = input("Enter a short description or context for this project/team (optional): ").strip()
|
|
920
|
-
team_ctx = {
|
|
921
|
-
"forenpc": forenpc_name,
|
|
922
|
-
"model": forenpc_model,
|
|
923
|
-
"provider": forenpc_provider,
|
|
924
|
-
"api_key": None,
|
|
925
|
-
"api_url": None,
|
|
926
|
-
"context": folder_context if folder_context else None
|
|
927
|
-
}
|
|
928
|
-
use_jinxs = input("Do you want to copy jinxs from the global folder to this project (c), or use them from the global folder (g)? [c/g, default: g]: ").strip().lower()
|
|
929
|
-
global_jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
930
|
-
project_jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
931
|
-
if use_jinxs == "c":
|
|
932
|
-
if os.path.exists(global_jinxs_dir):
|
|
933
|
-
shutil.copytree(global_jinxs_dir, project_jinxs_dir, dirs_exist_ok=True)
|
|
934
|
-
print(f"Copied jinxs from {global_jinxs_dir} to {project_jinxs_dir}")
|
|
935
|
-
else:
|
|
936
|
-
print(f"No global jinxs found at {global_jinxs_dir}")
|
|
937
|
-
else:
|
|
938
|
-
team_ctx["use_global_jinxs"] = True
|
|
939
|
-
|
|
940
|
-
with open(ctx_path, "w") as f:
|
|
941
|
-
yaml.dump(team_ctx, f)
|
|
942
|
-
elif os.path.exists(global_team_path):
|
|
943
|
-
team_dir = global_team_path
|
|
944
|
-
forenpc_name = "sibiji"
|
|
945
|
-
else:
|
|
946
|
-
print("No global npc_team found. Please run 'npcpy init' or create a team first.")
|
|
947
|
-
sys.exit(1)
|
|
948
|
-
|
|
949
|
-
# --- Load team context if it exists ---
|
|
950
|
-
ctx_path = os.path.join(team_dir, "team.ctx")
|
|
951
|
-
if os.path.exists(ctx_path):
|
|
952
|
-
with open(ctx_path, "r") as f:
|
|
953
|
-
team_ctx = yaml.safe_load(f) or team_ctx
|
|
954
|
-
|
|
955
|
-
# --- Load the forenpc_obj ---
|
|
956
|
-
forenpc_path = os.path.join(team_dir, f"{forenpc_name}.npc")
|
|
957
|
-
if os.path.exists(forenpc_path):
|
|
958
|
-
forenpc_obj = NPC(forenpc_path)
|
|
959
|
-
else:
|
|
960
|
-
forenpc_obj = None
|
|
961
|
-
|
|
962
|
-
# --- Decide which jinxs directory to use ---
|
|
963
|
-
if team_ctx.get("use_global_jinxs", False):
|
|
964
|
-
jinxs_dir = os.path.expanduser("~/.npcsh/npc_team/jinxs")
|
|
965
|
-
else:
|
|
966
|
-
jinxs_dir = os.path.join(team_dir, "jinxs")
|
|
967
|
-
from npcpy.npc_compiler import load_jinxs_from_directory
|
|
968
|
-
jinxs_list = load_jinxs_from_directory(jinxs_dir)
|
|
969
|
-
jinxs_dict = {jinx.jinx_name: jinx for jinx in jinxs_list}
|
|
970
|
-
|
|
971
|
-
team = Team(team_path=team_dir, forenpc=forenpc_obj, jinxs=jinxs_dict)
|
|
972
|
-
return command_history, team, forenpc_obj
|
|
973
|
-
|
|
974
|
-
def process_result(
|
|
975
|
-
user_input: str,
|
|
976
|
-
result_state: ShellState,
|
|
977
|
-
output: Any,
|
|
978
|
-
command_history: CommandHistory):
|
|
979
|
-
|
|
980
|
-
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else result_state.npc
|
|
981
|
-
team_name = result_state.team.name if isinstance(result_state.team, Team) else result_state.team
|
|
982
|
-
save_conversation_message(
|
|
983
|
-
command_history,
|
|
984
|
-
result_state.conversation_id,
|
|
985
|
-
"user",
|
|
986
|
-
user_input,
|
|
987
|
-
wd=result_state.current_path,
|
|
988
|
-
model=result_state.chat_model, # Log primary chat model? Or specific used one?
|
|
989
|
-
provider=result_state.chat_provider,
|
|
990
|
-
npc=npc_name,
|
|
991
|
-
team=team_name,
|
|
992
|
-
attachments=result_state.attachments,
|
|
993
|
-
)
|
|
994
|
-
|
|
995
|
-
result_state.attachments = None # Clear attachments after logging user message
|
|
996
|
-
|
|
997
|
-
final_output_str = None
|
|
998
|
-
if user_input =='/help':
|
|
999
|
-
render_markdown(output)
|
|
1000
|
-
elif result_state.stream_output:
|
|
1001
|
-
try:
|
|
1002
|
-
final_output_str = print_and_process_stream_with_markdown(output, result_state.chat_model, result_state.chat_provider)
|
|
1003
|
-
except AttributeError as e:
|
|
1004
|
-
if isinstance(output, str):
|
|
1005
|
-
if len(output) > 0:
|
|
1006
|
-
final_output_str = output
|
|
1007
|
-
render_markdown(final_output_str)
|
|
1008
|
-
|
|
1009
|
-
elif output is not None:
|
|
1010
|
-
final_output_str = str(output)
|
|
1011
|
-
render_markdown(final_output_str)
|
|
1012
|
-
if final_output_str and result_state.messages and result_state.messages[-1].get("role") != "assistant":
|
|
1013
|
-
result_state.messages.append({"role": "assistant", "content": final_output_str})
|
|
1014
|
-
|
|
1015
|
-
#print(result_state.messages)
|
|
1016
|
-
|
|
1017
|
-
print() # Add spacing after output
|
|
1018
|
-
|
|
1019
|
-
if final_output_str:
|
|
1020
|
-
save_conversation_message(
|
|
1021
|
-
command_history,
|
|
1022
|
-
result_state.conversation_id,
|
|
1023
|
-
"assistant",
|
|
1024
|
-
final_output_str,
|
|
1025
|
-
wd=result_state.current_path,
|
|
1026
|
-
model=result_state.chat_model,
|
|
1027
|
-
provider=result_state.chat_provider,
|
|
1028
|
-
npc=npc_name,
|
|
1029
|
-
team=team_name,
|
|
1030
|
-
)
|
|
1031
|
-
|
|
1032
|
-
def run_repl(command_history: CommandHistory, initial_state: ShellState):
|
|
1033
|
-
state = initial_state
|
|
1034
|
-
print_welcome_message()
|
|
1035
|
-
print(f'Using {state.current_mode} mode. Use /agent, /cmd, /chat, or /ride to switch to other modes')
|
|
1036
|
-
print(f'To switch to a different NPC, type /<npc_name>')
|
|
1037
|
-
is_windows = platform.system().lower().startswith("win")
|
|
1038
|
-
|
|
1039
|
-
def exit_shell(state):
|
|
1040
|
-
print("\nGoodbye!")
|
|
1041
|
-
print('beginning knowledge consolidation')
|
|
1042
|
-
try:
|
|
1043
|
-
breathe_result = breathe(state.messages, state.chat_model, state.chat_provider, state.npc)
|
|
1044
|
-
print(breathe_result)
|
|
1045
|
-
except KeyboardInterrupt:
|
|
1046
|
-
print("Knowledge consolidation interrupted. Exiting immediately.")
|
|
1047
|
-
sys.exit(0)
|
|
1048
|
-
|
|
1049
|
-
while True:
|
|
1050
|
-
try:
|
|
1051
|
-
if is_windows:
|
|
1052
|
-
cwd_part = os.path.basename(state.current_path)
|
|
1053
|
-
if isinstance(state.npc, NPC):
|
|
1054
|
-
prompt_end = f":{state.npc.name}> "
|
|
1055
|
-
else:
|
|
1056
|
-
prompt_end = ":npcsh> "
|
|
1057
|
-
prompt = f"{cwd_part}{prompt_end}"
|
|
1058
|
-
else:
|
|
1059
|
-
cwd_colored = colored(os.path.basename(state.current_path), "blue")
|
|
1060
|
-
if isinstance(state.npc, NPC):
|
|
1061
|
-
prompt_end = f":🤖{orange(state.npc.name)}:{state.chat_model}> "
|
|
1062
|
-
else:
|
|
1063
|
-
prompt_end = f":🤖{colored('npc', 'blue', attrs=['bold'])}{colored('sh', 'yellow')}> "
|
|
1064
|
-
prompt = readline_safe_prompt(f"{cwd_colored}{prompt_end}")
|
|
1065
|
-
cwd_colored = colored(os.path.basename(state.current_path), "blue")
|
|
1066
|
-
if isinstance(state.npc, NPC):
|
|
1067
|
-
prompt_end = f":🤖{orange(state.npc.name)}> "
|
|
1068
|
-
else:
|
|
1069
|
-
prompt_end = f":🤖{colored('npc', 'blue', attrs=['bold'])}{colored('sh', 'yellow')}> "
|
|
1070
|
-
prompt = readline_safe_prompt(f"{cwd_colored}{prompt_end}")
|
|
1071
|
-
|
|
1072
|
-
user_input = get_multiline_input(prompt).strip()
|
|
1073
|
-
# Handle Ctrl+Z (ASCII SUB, '\x1a') as exit (Windows and Unix)
|
|
1074
|
-
if user_input == "\x1a":
|
|
1075
|
-
exit_shell(state)
|
|
1076
|
-
|
|
1077
|
-
if not user_input:
|
|
1078
|
-
continue
|
|
1079
|
-
|
|
1080
|
-
if user_input.lower() in ["exit", "quit"]:
|
|
1081
|
-
if isinstance(state.npc, NPC):
|
|
1082
|
-
print(f"Exiting {state.npc.name} mode.")
|
|
1083
|
-
state.npc = None
|
|
1084
|
-
continue
|
|
1085
|
-
else:
|
|
1086
|
-
exit_shell(state)
|
|
1087
|
-
|
|
1088
|
-
state.current_path = os.getcwd()
|
|
1089
|
-
state, output = execute_command(user_input, state)
|
|
1090
|
-
process_result(user_input, state, output, command_history)
|
|
1091
|
-
|
|
1092
|
-
except KeyboardInterrupt:
|
|
1093
|
-
if is_windows:
|
|
1094
|
-
# On Windows, Ctrl+C cancels the current input line, show prompt again
|
|
1095
|
-
print("^C")
|
|
1096
|
-
continue
|
|
1097
|
-
else:
|
|
1098
|
-
# On Unix, Ctrl+C exits the shell as before
|
|
1099
|
-
exit_shell(state)
|
|
1100
|
-
except EOFError:
|
|
1101
|
-
# Ctrl+D: exit shell cleanly
|
|
1102
|
-
exit_shell(state)
|
|
1103
|
-
|
|
1104
|
-
def run_non_interactive(command_history: CommandHistory, initial_state: ShellState):
|
|
1105
|
-
state = initial_state
|
|
1106
|
-
# print("Running in non-interactive mode...", file=sys.stderr) # Optional debug
|
|
1107
|
-
|
|
1108
|
-
for line in sys.stdin:
|
|
1109
|
-
user_input = line.strip()
|
|
1110
|
-
if not user_input:
|
|
1111
|
-
continue
|
|
1112
|
-
if user_input.lower() in ["exit", "quit"]:
|
|
1113
|
-
break
|
|
1114
|
-
|
|
1115
|
-
state.current_path = os.getcwd()
|
|
1116
|
-
state, output = execute_command(user_input, state)
|
|
1117
|
-
# Non-interactive: just print raw output, don't process results complexly
|
|
1118
|
-
if state.stream_output and isgenerator(output):
|
|
1119
|
-
for chunk in output: print(str(chunk), end='')
|
|
1120
|
-
print()
|
|
1121
|
-
elif output is not None:
|
|
1122
|
-
print(output)
|
|
1123
|
-
# Maybe still log history?
|
|
1124
|
-
# process_result(user_input, state, output, command_history)
|
|
1125
|
-
|
|
1126
|
-
def main() -> None:
|
|
1127
|
-
parser = argparse.ArgumentParser(description="npcsh - An NPC-powered shell.")
|
|
1128
|
-
parser.add_argument(
|
|
1129
|
-
"-v", "--version", action="version", version=f"npcsh version {VERSION}"
|
|
1130
|
-
)
|
|
1131
|
-
parser.add_argument(
|
|
1132
|
-
"-c", "--command", type=str, help="Execute a single command and exit."
|
|
1133
|
-
)
|
|
1134
|
-
args = parser.parse_args()
|
|
1135
|
-
|
|
1136
|
-
command_history, team, default_npc = setup_shell()
|
|
1137
|
-
|
|
1138
|
-
initial_state.npc = default_npc
|
|
1139
|
-
initial_state.team = team
|
|
1140
|
-
#import pdb
|
|
1141
|
-
#pdb.set_trace()
|
|
1142
|
-
if args.command:
|
|
1143
|
-
state = initial_state
|
|
1144
|
-
state.current_path = os.getcwd()
|
|
1145
|
-
final_state, output = execute_command(args.command, state)
|
|
1146
|
-
if final_state.stream_output and isgenerator(output):
|
|
1147
|
-
for chunk in output: print(str(chunk), end='')
|
|
1148
|
-
print()
|
|
1149
|
-
elif output is not None:
|
|
1150
|
-
print(output)
|
|
1151
|
-
|
|
1152
|
-
elif not sys.stdin.isatty():
|
|
1153
|
-
run_non_interactive(command_history, initial_state)
|
|
1154
|
-
else:
|
|
1155
|
-
run_repl(command_history, initial_state)
|
|
1156
|
-
|
|
1157
|
-
if __name__ == "__main__":
|
|
1158
|
-
main()
|