npcsh 0.3.31__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/_state.py +942 -0
- npcsh/alicanto.py +1074 -0
- npcsh/guac.py +785 -0
- npcsh/mcp_helpers.py +357 -0
- npcsh/mcp_npcsh.py +822 -0
- npcsh/mcp_server.py +184 -0
- npcsh/npc.py +218 -0
- npcsh/npcsh.py +1161 -0
- npcsh/plonk.py +387 -269
- npcsh/pti.py +234 -0
- npcsh/routes.py +958 -0
- npcsh/spool.py +315 -0
- npcsh/wander.py +550 -0
- npcsh/yap.py +573 -0
- npcsh-1.0.0.dist-info/METADATA +596 -0
- npcsh-1.0.0.dist-info/RECORD +21 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/WHEEL +1 -1
- npcsh-1.0.0.dist-info/entry_points.txt +9 -0
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/licenses/LICENSE +1 -1
- npcsh/audio.py +0 -210
- npcsh/cli.py +0 -545
- npcsh/command_history.py +0 -566
- npcsh/conversation.py +0 -291
- npcsh/data_models.py +0 -46
- npcsh/dataframes.py +0 -163
- npcsh/embeddings.py +0 -168
- npcsh/helpers.py +0 -641
- npcsh/image.py +0 -298
- npcsh/image_gen.py +0 -79
- npcsh/knowledge_graph.py +0 -1006
- npcsh/llm_funcs.py +0 -2027
- npcsh/load_data.py +0 -83
- npcsh/main.py +0 -5
- npcsh/model_runner.py +0 -189
- npcsh/npc_compiler.py +0 -2870
- npcsh/npc_sysenv.py +0 -383
- npcsh/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcsh/npc_team/corca.npc +0 -13
- npcsh/npc_team/foreman.npc +0 -7
- npcsh/npc_team/npcsh.ctx +0 -11
- npcsh/npc_team/sibiji.npc +0 -4
- npcsh/npc_team/templates/analytics/celona.npc +0 -0
- npcsh/npc_team/templates/hr_support/raone.npc +0 -0
- npcsh/npc_team/templates/humanities/eriane.npc +0 -4
- npcsh/npc_team/templates/it_support/lineru.npc +0 -0
- npcsh/npc_team/templates/marketing/slean.npc +0 -4
- npcsh/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcsh/npc_team/templates/sales/turnic.npc +0 -4
- npcsh/npc_team/templates/software/welxor.npc +0 -0
- npcsh/npc_team/tools/bash_executer.tool +0 -32
- npcsh/npc_team/tools/calculator.tool +0 -8
- npcsh/npc_team/tools/code_executor.tool +0 -16
- npcsh/npc_team/tools/generic_search.tool +0 -27
- npcsh/npc_team/tools/image_generation.tool +0 -25
- npcsh/npc_team/tools/local_search.tool +0 -149
- npcsh/npc_team/tools/npcsh_executor.tool +0 -9
- npcsh/npc_team/tools/screen_cap.tool +0 -27
- npcsh/npc_team/tools/sql_executor.tool +0 -26
- npcsh/response.py +0 -623
- npcsh/search.py +0 -248
- npcsh/serve.py +0 -1460
- npcsh/shell.py +0 -538
- npcsh/shell_helpers.py +0 -3529
- npcsh/stream.py +0 -700
- npcsh/video.py +0 -49
- npcsh-0.3.31.data/data/npcsh/npc_team/bash_executer.tool +0 -32
- npcsh-0.3.31.data/data/npcsh/npc_team/calculator.tool +0 -8
- npcsh-0.3.31.data/data/npcsh/npc_team/celona.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/code_executor.tool +0 -16
- npcsh-0.3.31.data/data/npcsh/npc_team/corca.npc +0 -13
- npcsh-0.3.31.data/data/npcsh/npc_team/eriane.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/foreman.npc +0 -7
- npcsh-0.3.31.data/data/npcsh/npc_team/generic_search.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/image_generation.tool +0 -25
- npcsh-0.3.31.data/data/npcsh/npc_team/lineru.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/local_search.tool +0 -149
- npcsh-0.3.31.data/data/npcsh/npc_team/maurawa.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh.ctx +0 -11
- npcsh-0.3.31.data/data/npcsh/npc_team/npcsh_executor.tool +0 -9
- npcsh-0.3.31.data/data/npcsh/npc_team/raone.npc +0 -0
- npcsh-0.3.31.data/data/npcsh/npc_team/screen_cap.tool +0 -27
- npcsh-0.3.31.data/data/npcsh/npc_team/sibiji.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/slean.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/sql_executor.tool +0 -26
- npcsh-0.3.31.data/data/npcsh/npc_team/test_pipeline.py +0 -181
- npcsh-0.3.31.data/data/npcsh/npc_team/turnic.npc +0 -4
- npcsh-0.3.31.data/data/npcsh/npc_team/welxor.npc +0 -0
- npcsh-0.3.31.dist-info/METADATA +0 -1853
- npcsh-0.3.31.dist-info/RECORD +0 -76
- npcsh-0.3.31.dist-info/entry_points.txt +0 -3
- {npcsh-0.3.31.dist-info → npcsh-1.0.0.dist-info}/top_level.txt +0 -0
npcsh/pti.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
|
|
2
|
+
# pti
|
|
3
|
+
import json
|
|
4
|
+
from typing import Dict, List, Optional, Any, Generator
|
|
5
|
+
import os
|
|
6
|
+
from npcpy.memory.command_history import CommandHistory, save_attachment_to_message, start_new_conversation,save_conversation_message
|
|
7
|
+
from npcpy.npc_sysenv import (NPCSH_REASONING_MODEL,
|
|
8
|
+
NPCSH_REASONING_PROVIDER,
|
|
9
|
+
NPCSH_CHAT_MODEL,
|
|
10
|
+
NPCSH_CHAT_PROVIDER,
|
|
11
|
+
NPCSH_API_URL,
|
|
12
|
+
NPCSH_STREAM_OUTPUT,print_and_process_stream_with_markdown)
|
|
13
|
+
from npcpy.llm_funcs import get_llm_response, handle_request_input
|
|
14
|
+
|
|
15
|
+
from npcpy.npc_compiler import NPC
|
|
16
|
+
from npcpy.data.load import load_csv, load_pdf
|
|
17
|
+
from npcpy.data.text import rag_search
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def enter_reasoning_human_in_the_loop(
|
|
25
|
+
user_input=None,
|
|
26
|
+
messages: List[Dict[str, str]] = None,
|
|
27
|
+
reasoning_model: str = NPCSH_REASONING_MODEL,
|
|
28
|
+
reasoning_provider: str = NPCSH_REASONING_PROVIDER,
|
|
29
|
+
files : List = None,
|
|
30
|
+
npc: Any = None,
|
|
31
|
+
conversation_id : str= False,
|
|
32
|
+
answer_only: bool = False,
|
|
33
|
+
context=None,
|
|
34
|
+
) :
|
|
35
|
+
"""
|
|
36
|
+
Stream responses while checking for think tokens and handling human input when needed.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
messages: List of conversation messages
|
|
40
|
+
model: LLM model to use
|
|
41
|
+
provider: Model provider
|
|
42
|
+
npc: NPC instance if applicable
|
|
43
|
+
|
|
44
|
+
"""
|
|
45
|
+
# Get the initial stream
|
|
46
|
+
loaded_content = {} # New dictionary to hold loaded content
|
|
47
|
+
|
|
48
|
+
# Create conversation ID if not provided
|
|
49
|
+
if not conversation_id:
|
|
50
|
+
conversation_id = start_new_conversation()
|
|
51
|
+
|
|
52
|
+
command_history = CommandHistory()
|
|
53
|
+
# Load specified files if any
|
|
54
|
+
if files:
|
|
55
|
+
for file in files:
|
|
56
|
+
extension = os.path.splitext(file)[1].lower()
|
|
57
|
+
try:
|
|
58
|
+
if extension == ".pdf":
|
|
59
|
+
content = load_pdf(file)["texts"].iloc[0]
|
|
60
|
+
elif extension == ".csv":
|
|
61
|
+
content = load_csv(file)
|
|
62
|
+
else:
|
|
63
|
+
print(f"Unsupported file type: {file}")
|
|
64
|
+
continue
|
|
65
|
+
loaded_content[file] = content
|
|
66
|
+
print(f"Loaded content from: {file}")
|
|
67
|
+
except Exception as e:
|
|
68
|
+
print(f"Error loading {file}: {str(e)}")
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
while True:
|
|
73
|
+
|
|
74
|
+
if loaded_content:
|
|
75
|
+
context_content = ""
|
|
76
|
+
for filename, content in loaded_content.items():
|
|
77
|
+
retrieved_docs = rag_search(
|
|
78
|
+
user_input,
|
|
79
|
+
content,
|
|
80
|
+
)
|
|
81
|
+
if retrieved_docs:
|
|
82
|
+
context_content += (
|
|
83
|
+
f"\n\nLoaded content from: {filename}\n{content}\n\n"
|
|
84
|
+
)
|
|
85
|
+
if len(context_content) > 0:
|
|
86
|
+
user_input += f"""
|
|
87
|
+
Here is the loaded content that may be relevant to your query:
|
|
88
|
+
{context_content}
|
|
89
|
+
Please reference it explicitly in your response and use it for answering.
|
|
90
|
+
"""
|
|
91
|
+
if answer_only:
|
|
92
|
+
response = get_llm_response(
|
|
93
|
+
user_input,
|
|
94
|
+
model = reasoning_model,
|
|
95
|
+
provider=reasoning_provider,
|
|
96
|
+
messages=messages,
|
|
97
|
+
stream=True,
|
|
98
|
+
)
|
|
99
|
+
assistant_reply, messages = response['response'], response['messages']
|
|
100
|
+
assistant_reply = print_and_process_stream_with_markdown(assistant_reply, reasoning_model, reasoning_provider)
|
|
101
|
+
messages.append({'role':'assistant', 'content':assistant_reply})
|
|
102
|
+
return enter_reasoning_human_in_the_loop(user_input = None,
|
|
103
|
+
messages=messages,
|
|
104
|
+
reasoning_model=reasoning_model,
|
|
105
|
+
reasoning_provider=reasoning_provider, answer_only=False)
|
|
106
|
+
else:
|
|
107
|
+
message= "Think first though and use <think> tags in your chain of thought. Once finished, either answer plainly or write a request for input by beginning with the <request_for_input> tag. and close it with a </request_for_input>"
|
|
108
|
+
if user_input is None:
|
|
109
|
+
user_input = input('🐻❄️>')
|
|
110
|
+
|
|
111
|
+
message_id = save_conversation_message(
|
|
112
|
+
command_history,
|
|
113
|
+
conversation_id,
|
|
114
|
+
"user",
|
|
115
|
+
user_input,
|
|
116
|
+
wd=os.getcwd(),
|
|
117
|
+
model=reasoning_model,
|
|
118
|
+
provider=reasoning_provider,
|
|
119
|
+
npc=npc.name if npc else None,
|
|
120
|
+
|
|
121
|
+
)
|
|
122
|
+
response = get_llm_response(
|
|
123
|
+
user_input+message,
|
|
124
|
+
model = reasoning_model,
|
|
125
|
+
provider=reasoning_provider,
|
|
126
|
+
messages=messages,
|
|
127
|
+
stream=True,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
assistant_reply, messages = response['response'], response['messages']
|
|
131
|
+
thoughts = []
|
|
132
|
+
response_chunks = []
|
|
133
|
+
in_think_block = False # the thinking chain generated after reasoning
|
|
134
|
+
|
|
135
|
+
thinking = False # the reasoning content
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
for chunk in assistant_reply:
|
|
139
|
+
if thinking:
|
|
140
|
+
if not in_think_block:
|
|
141
|
+
in_think_block = True
|
|
142
|
+
try:
|
|
143
|
+
|
|
144
|
+
if reasoning_provider == "ollama":
|
|
145
|
+
chunk_content = chunk.get("message", {}).get("content", "")
|
|
146
|
+
else:
|
|
147
|
+
chunk_content = ''
|
|
148
|
+
reasoning_content = ''
|
|
149
|
+
for c in chunk.choices:
|
|
150
|
+
if hasattr(c.delta, "reasoning_content"):
|
|
151
|
+
|
|
152
|
+
reasoning_content += c.delta.reasoning_content
|
|
153
|
+
|
|
154
|
+
if reasoning_content:
|
|
155
|
+
thinking = True
|
|
156
|
+
chunk_content = reasoning_content
|
|
157
|
+
chunk_content += "".join(
|
|
158
|
+
choice.delta.content
|
|
159
|
+
for choice in chunk.choices
|
|
160
|
+
if choice.delta.content is not None
|
|
161
|
+
)
|
|
162
|
+
response_chunks.append(chunk_content)
|
|
163
|
+
print(chunk_content, end='')
|
|
164
|
+
combined_text = "".join(response_chunks)
|
|
165
|
+
|
|
166
|
+
if in_think_block:
|
|
167
|
+
if '</thinking>' in combined_text:
|
|
168
|
+
in_think_block = False
|
|
169
|
+
thoughts.append(chunk_content)
|
|
170
|
+
|
|
171
|
+
if "</request_for_input>" in combined_text:
|
|
172
|
+
# Process the LLM's input request
|
|
173
|
+
request_text = "".join(thoughts)
|
|
174
|
+
|
|
175
|
+
print("\nPlease provide the requested information: ")
|
|
176
|
+
|
|
177
|
+
user_input = input('🐻❄️>')
|
|
178
|
+
|
|
179
|
+
messages.append({"role": "assistant", "content": request_text})
|
|
180
|
+
|
|
181
|
+
print("\n[Continuing with provided information...]\n")
|
|
182
|
+
return enter_reasoning_human_in_the_loop( user_input = user_input,
|
|
183
|
+
messages=messages,
|
|
184
|
+
reasoning_model=reasoning_model,
|
|
185
|
+
reasoning_provider=reasoning_provider,
|
|
186
|
+
npc=npc,
|
|
187
|
+
answer_only=True)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
except KeyboardInterrupt:
|
|
191
|
+
user_interrupt = input("\n[Stream interrupted by user]\n Enter your additional input: ")
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
# Add the interruption to messages and restart stream
|
|
195
|
+
messages.append(
|
|
196
|
+
{"role": "user", "content": f"[INTERRUPT] {user_interrupt}"}
|
|
197
|
+
)
|
|
198
|
+
print(f"\n[Continuing with added context...]\n")
|
|
199
|
+
|
|
200
|
+
except KeyboardInterrupt:
|
|
201
|
+
user_interrupt = input("\n[Stream interrupted by user]\n 🔴🔴🔴🔴\nEnter your additional input: ")
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
# Add the interruption to messages and restart stream
|
|
205
|
+
messages.append(
|
|
206
|
+
{"role": "user", "content": f"[INTERRUPT] {user_interrupt}"}
|
|
207
|
+
)
|
|
208
|
+
print(f"\n[Continuing with added context...]\n")
|
|
209
|
+
|
|
210
|
+
return {'messages':messages, }
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def main():
|
|
214
|
+
# Example usage
|
|
215
|
+
import argparse
|
|
216
|
+
parser = argparse.ArgumentParser(description="Enter PTI mode for chatting with an LLM")
|
|
217
|
+
parser.add_argument("--npc", default='~/.npcsh/npc_team/frederic.npc', help="Path to NPC File")
|
|
218
|
+
parser.add_argument("--model", default=NPCSH_REASONING_MODEL, help="Model to use")
|
|
219
|
+
parser.add_argument("--provider", default=NPCSH_REASONING_PROVIDER, help="Provider to use")
|
|
220
|
+
parser.add_argument("--files", nargs="*", help="Files to load into context")
|
|
221
|
+
args = parser.parse_args()
|
|
222
|
+
|
|
223
|
+
npc = NPC(file=args.npc)
|
|
224
|
+
enter_reasoning_human_in_the_loop(
|
|
225
|
+
messages = [],
|
|
226
|
+
npc=npc,
|
|
227
|
+
reasoning_model=args.model,
|
|
228
|
+
reasoning_provider=args.provider,
|
|
229
|
+
files=args.files,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
if __name__ == "__main__":
|
|
233
|
+
main()
|
|
234
|
+
|