npcsh 0.3.25__py3-none-any.whl → 0.3.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/conversation.py +2 -2
- npcsh/embeddings.py +2 -1
- npcsh/image_gen.py +2 -1
- npcsh/knowledge_graph.py +9 -2
- npcsh/llm_funcs.py +40 -43
- npcsh/npc_sysenv.py +6 -2
- npcsh/response.py +2 -2
- npcsh/search.py +6 -2
- npcsh/shell_helpers.py +3 -4
- npcsh/stream.py +4 -2
- {npcsh-0.3.25.dist-info → npcsh-0.3.27.dist-info}/METADATA +690 -686
- {npcsh-0.3.25.dist-info → npcsh-0.3.27.dist-info}/RECORD +35 -35
- {npcsh-0.3.25.dist-info → npcsh-0.3.27.dist-info}/WHEEL +1 -1
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/calculator.tool +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/celona.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/eriane.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/generic_search.tool +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/image_generation.tool +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/lineru.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/local_search.tool +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/maurawa.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/raone.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/slean.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/sql_executor.tool +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/turnic.npc +0 -0
- {npcsh-0.3.25.data → npcsh-0.3.27.data}/data/npcsh/npc_team/welxor.npc +0 -0
- {npcsh-0.3.25.dist-info → npcsh-0.3.27.dist-info}/entry_points.txt +0 -0
- {npcsh-0.3.25.dist-info → npcsh-0.3.27.dist-info}/licenses/LICENSE +0 -0
- {npcsh-0.3.25.dist-info → npcsh-0.3.27.dist-info}/top_level.txt +0 -0
npcsh/conversation.py
CHANGED
|
@@ -7,9 +7,8 @@
|
|
|
7
7
|
from typing import Any, Dict, Generator, List
|
|
8
8
|
import os
|
|
9
9
|
import anthropic
|
|
10
|
-
|
|
10
|
+
|
|
11
11
|
from openai import OpenAI
|
|
12
|
-
from diffusers import StableDiffusionPipeline
|
|
13
12
|
from google.generativeai import types
|
|
14
13
|
import google.generativeai as genai
|
|
15
14
|
from .npc_sysenv import get_system_message
|
|
@@ -34,6 +33,7 @@ def get_ollama_conversation(
|
|
|
34
33
|
Returns:
|
|
35
34
|
List[Dict[str, str]]: The list of messages in the conversation.
|
|
36
35
|
"""
|
|
36
|
+
import ollama
|
|
37
37
|
|
|
38
38
|
messages_copy = messages.copy()
|
|
39
39
|
if messages_copy[0]["role"] != "system":
|
npcsh/embeddings.py
CHANGED
|
@@ -12,7 +12,6 @@ from npcsh.npc_sysenv import (
|
|
|
12
12
|
NPCSH_EMBEDDING_PROVIDER,
|
|
13
13
|
chroma_client,
|
|
14
14
|
)
|
|
15
|
-
import ollama
|
|
16
15
|
from openai import OpenAI
|
|
17
16
|
import anthropic
|
|
18
17
|
|
|
@@ -21,6 +20,8 @@ def get_ollama_embeddings(
|
|
|
21
20
|
texts: List[str], model: str = "nomic-embed-text"
|
|
22
21
|
) -> List[List[float]]:
|
|
23
22
|
"""Generate embeddings using Ollama."""
|
|
23
|
+
import ollama
|
|
24
|
+
|
|
24
25
|
embeddings = []
|
|
25
26
|
for text in texts:
|
|
26
27
|
response = ollama.embeddings(model=model, prompt=text)
|
npcsh/image_gen.py
CHANGED
|
@@ -10,7 +10,6 @@
|
|
|
10
10
|
import os
|
|
11
11
|
|
|
12
12
|
from openai import OpenAI
|
|
13
|
-
from diffusers import StableDiffusionPipeline
|
|
14
13
|
|
|
15
14
|
|
|
16
15
|
def generate_image_openai(
|
|
@@ -66,6 +65,8 @@ def generate_image_hf_diffusion(
|
|
|
66
65
|
PIL.Image: The generated image.
|
|
67
66
|
"""
|
|
68
67
|
# Load the Stable Diffusion pipeline
|
|
68
|
+
from diffusers import StableDiffusionPipeline
|
|
69
|
+
|
|
69
70
|
pipe = StableDiffusionPipeline.from_pretrained(model)
|
|
70
71
|
pipe = pipe.to(device)
|
|
71
72
|
|
npcsh/knowledge_graph.py
CHANGED
|
@@ -3,7 +3,11 @@ import os
|
|
|
3
3
|
import datetime
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
|
-
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import kuzu
|
|
9
|
+
except ModuleNotFoundError:
|
|
10
|
+
print("kuzu not installed")
|
|
7
11
|
from typing import Optional, Dict, List, Union, Tuple
|
|
8
12
|
|
|
9
13
|
|
|
@@ -581,7 +585,10 @@ def visualize_graph(conn):
|
|
|
581
585
|
plt.show()
|
|
582
586
|
|
|
583
587
|
|
|
584
|
-
|
|
588
|
+
try:
|
|
589
|
+
import chromadb
|
|
590
|
+
except ModuleNotFoundError:
|
|
591
|
+
print("chromadb not installed")
|
|
585
592
|
import numpy as np
|
|
586
593
|
import os
|
|
587
594
|
import datetime
|
npcsh/llm_funcs.py
CHANGED
|
@@ -4,17 +4,10 @@ import requests
|
|
|
4
4
|
import os
|
|
5
5
|
import json
|
|
6
6
|
import PIL
|
|
7
|
-
from PIL import Image
|
|
8
7
|
|
|
9
8
|
import sqlite3
|
|
10
9
|
from datetime import datetime
|
|
11
10
|
from typing import List, Dict, Any, Optional, Union, Generator
|
|
12
|
-
import typing_extensions as typing
|
|
13
|
-
from pydantic import BaseModel, Field
|
|
14
|
-
|
|
15
|
-
import base64
|
|
16
|
-
import re
|
|
17
|
-
import io
|
|
18
11
|
|
|
19
12
|
|
|
20
13
|
from jinja2 import Environment, FileSystemLoader, Template, Undefined
|
|
@@ -22,15 +15,6 @@ from jinja2 import Environment, FileSystemLoader, Template, Undefined
|
|
|
22
15
|
import pandas as pd
|
|
23
16
|
import numpy as np
|
|
24
17
|
|
|
25
|
-
# chroma
|
|
26
|
-
import chromadb
|
|
27
|
-
from chromadb import Client
|
|
28
|
-
|
|
29
|
-
# llm providers
|
|
30
|
-
import anthropic
|
|
31
|
-
import ollama # Add to setup.py if missing
|
|
32
|
-
from openai import OpenAI
|
|
33
|
-
from diffusers import StableDiffusionPipeline
|
|
34
18
|
from google.generativeai import types
|
|
35
19
|
import google.generativeai as genai
|
|
36
20
|
|
|
@@ -54,7 +38,6 @@ from .npc_sysenv import (
|
|
|
54
38
|
NPCSH_API_URL,
|
|
55
39
|
NPCSH_VISION_MODEL,
|
|
56
40
|
NPCSH_VISION_PROVIDER,
|
|
57
|
-
chroma_client,
|
|
58
41
|
available_reasoning_models,
|
|
59
42
|
available_chat_models,
|
|
60
43
|
)
|
|
@@ -150,25 +133,30 @@ def generate_image(
|
|
|
150
133
|
# image = generate_image_openai_like(prompt, model, npc.api_url, openai_api_key)
|
|
151
134
|
elif provider == "diffusers":
|
|
152
135
|
image = generate_image_hf_diffusion(prompt, model)
|
|
136
|
+
else:
|
|
137
|
+
image = None
|
|
153
138
|
# save image
|
|
154
139
|
# check if image is a PIL image
|
|
155
140
|
if isinstance(image, PIL.Image.Image):
|
|
156
141
|
image.save(filename)
|
|
157
142
|
return filename
|
|
158
143
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
144
|
+
else:
|
|
145
|
+
try:
|
|
146
|
+
# image is at a private url
|
|
147
|
+
response = requests.get(image.data[0].url)
|
|
148
|
+
with open(filename, "wb") as file:
|
|
149
|
+
file.write(response.content)
|
|
150
|
+
from PIL import Image
|
|
165
151
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
152
|
+
img = Image.open(filename)
|
|
153
|
+
img.show()
|
|
154
|
+
# console = Console()
|
|
155
|
+
# console.print(Image.from_path(filename))
|
|
156
|
+
return filename
|
|
170
157
|
|
|
171
|
-
|
|
158
|
+
except AttributeError as e:
|
|
159
|
+
print(f"Error saving image: {e}")
|
|
172
160
|
|
|
173
161
|
|
|
174
162
|
def get_embeddings(
|
|
@@ -511,7 +499,6 @@ def execute_llm_question(
|
|
|
511
499
|
# messages.append({"role": "assistant", "content": output})
|
|
512
500
|
|
|
513
501
|
else:
|
|
514
|
-
|
|
515
502
|
response = get_conversation(
|
|
516
503
|
messages,
|
|
517
504
|
model=model,
|
|
@@ -829,14 +816,29 @@ ReAct choices then will enter reasoning flow
|
|
|
829
816
|
|
|
830
817
|
Available tools:
|
|
831
818
|
"""
|
|
832
|
-
|
|
833
|
-
if npc.tools_dict is None or npc.tools_dict == {}
|
|
819
|
+
|
|
820
|
+
if (npc.tools_dict is None or npc.tools_dict == {}) & (
|
|
821
|
+
npc.all_tools_dict is None or npc.all_tools_dict == {}
|
|
822
|
+
):
|
|
834
823
|
prompt += "No tools available. Do not invoke tools."
|
|
835
824
|
else:
|
|
836
|
-
|
|
825
|
+
tools_set = {}
|
|
826
|
+
|
|
827
|
+
if npc.tools_dict is not None:
|
|
828
|
+
for tool_name, tool in npc.tools_dict.items():
|
|
829
|
+
if tool_name not in tools_set:
|
|
830
|
+
tools_set[tool_name] = tool.description
|
|
831
|
+
if npc.all_tools_dict is not None:
|
|
832
|
+
for tool_name, tool in npc.all_tools_dict.items():
|
|
833
|
+
if tool_name not in tools_set:
|
|
834
|
+
tools_set[tool_name] = tool.description
|
|
835
|
+
|
|
836
|
+
for tool_name, tool_description in tools_set.items():
|
|
837
837
|
prompt += f"""
|
|
838
|
-
|
|
839
|
-
|
|
838
|
+
|
|
839
|
+
{tool_name} : {tool_description} \n
|
|
840
|
+
"""
|
|
841
|
+
|
|
840
842
|
prompt += f"""
|
|
841
843
|
Available NPCs for alternative answers:
|
|
842
844
|
|
|
@@ -870,7 +872,7 @@ ReAct choices then will enter reasoning flow
|
|
|
870
872
|
Relevant shared context for the npc:
|
|
871
873
|
{npc.shared_context}
|
|
872
874
|
"""
|
|
873
|
-
print("shared_context: " + str(npc.shared_context))
|
|
875
|
+
# print("shared_context: " + str(npc.shared_context))
|
|
874
876
|
# print(prompt)
|
|
875
877
|
|
|
876
878
|
prompt += f"""
|
|
@@ -1015,7 +1017,6 @@ ReAct choices then will enter reasoning flow
|
|
|
1015
1017
|
return {"messages": messages, "output": output}
|
|
1016
1018
|
|
|
1017
1019
|
elif action == "answer_question":
|
|
1018
|
-
|
|
1019
1020
|
if ENTER_REASONING_FLOW:
|
|
1020
1021
|
print("entering reasoning flow")
|
|
1021
1022
|
result = enter_reasoning_human_in_the_loop(
|
|
@@ -1101,9 +1102,9 @@ ReAct choices then will enter reasoning flow
|
|
|
1101
1102
|
elif action == "execute_sequence":
|
|
1102
1103
|
tool_names = response_content_parsed.get("tool_name")
|
|
1103
1104
|
npc_names = response_content_parsed.get("npc_name")
|
|
1104
|
-
print(npc_names)
|
|
1105
|
+
# print(npc_names)
|
|
1105
1106
|
npcs = []
|
|
1106
|
-
print(tool_names, npc_names)
|
|
1107
|
+
# print(tool_names, npc_names)
|
|
1107
1108
|
if isinstance(npc_names, list):
|
|
1108
1109
|
for npc_name in npc_names:
|
|
1109
1110
|
for npc_obj in npc.resolved_npcs:
|
|
@@ -1192,9 +1193,7 @@ def handle_tool_call(
|
|
|
1192
1193
|
"""
|
|
1193
1194
|
# print(npc)
|
|
1194
1195
|
print("handling tool call")
|
|
1195
|
-
if not npc
|
|
1196
|
-
print("not available")
|
|
1197
|
-
available_tools = npc.all_tools_dict if npc else None
|
|
1196
|
+
if not npc:
|
|
1198
1197
|
print(
|
|
1199
1198
|
f"No tools available for NPC '{npc.name}' or tools_dict is empty. Available tools: {available_tools}"
|
|
1200
1199
|
)
|
|
@@ -1306,7 +1305,6 @@ def handle_tool_call(
|
|
|
1306
1305
|
print("Executing tool with input values:", input_values)
|
|
1307
1306
|
|
|
1308
1307
|
try:
|
|
1309
|
-
|
|
1310
1308
|
tool_output = tool.execute(
|
|
1311
1309
|
input_values,
|
|
1312
1310
|
npc.all_tools_dict,
|
|
@@ -1321,7 +1319,6 @@ def handle_tool_call(
|
|
|
1321
1319
|
if "Error" in tool_output:
|
|
1322
1320
|
raise Exception(tool_output)
|
|
1323
1321
|
except Exception as e:
|
|
1324
|
-
|
|
1325
1322
|
# diagnose_problem = get_llm_response(
|
|
1326
1323
|
## f"""a problem has occurred.
|
|
1327
1324
|
# Please provide a diagnosis of the problem and a suggested #fix.
|
npcsh/npc_sysenv.py
CHANGED
|
@@ -3,7 +3,6 @@ from datetime import datetime
|
|
|
3
3
|
from typing import Any
|
|
4
4
|
import os
|
|
5
5
|
import io
|
|
6
|
-
import chromadb
|
|
7
6
|
import sqlite3
|
|
8
7
|
from dotenv import load_dotenv
|
|
9
8
|
from PIL import Image
|
|
@@ -222,7 +221,12 @@ available_chat_models, available_reasoning_models = get_available_models()
|
|
|
222
221
|
|
|
223
222
|
EMBEDDINGS_DB_PATH = os.path.expanduser("~/npcsh_chroma.db")
|
|
224
223
|
|
|
225
|
-
|
|
224
|
+
try:
|
|
225
|
+
import chromadb
|
|
226
|
+
|
|
227
|
+
chroma_client = chromadb.PersistentClient(path=EMBEDDINGS_DB_PATH)
|
|
228
|
+
except:
|
|
229
|
+
chroma_client = None
|
|
226
230
|
|
|
227
231
|
|
|
228
232
|
# Load environment variables from .env file
|
npcsh/response.py
CHANGED
|
@@ -2,9 +2,7 @@ from typing import Any, Dict, Generator, List, Union
|
|
|
2
2
|
from pydantic import BaseModel
|
|
3
3
|
import os
|
|
4
4
|
import anthropic
|
|
5
|
-
import ollama # Add to setup.py if missing
|
|
6
5
|
from openai import OpenAI
|
|
7
|
-
from diffusers import StableDiffusionPipeline
|
|
8
6
|
from google.generativeai import types
|
|
9
7
|
from google import genai
|
|
10
8
|
|
|
@@ -143,6 +141,8 @@ def get_ollama_response(
|
|
|
143
141
|
Returns:
|
|
144
142
|
Dict[str, Any]: The response, optionally including updated messages.
|
|
145
143
|
"""
|
|
144
|
+
import ollama
|
|
145
|
+
|
|
146
146
|
# try:
|
|
147
147
|
# Prepare the message payload
|
|
148
148
|
system_message = get_system_message(npc) if npc else "You are a helpful assistant."
|
npcsh/search.py
CHANGED
|
@@ -162,8 +162,12 @@ def rag_search(
|
|
|
162
162
|
|
|
163
163
|
"""
|
|
164
164
|
if embedding_model is None:
|
|
165
|
-
|
|
166
|
-
|
|
165
|
+
try:
|
|
166
|
+
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
|
|
167
|
+
except:
|
|
168
|
+
raise Exception(
|
|
169
|
+
"Please install the sentence-transformers library to use this function or provide an embedding transformer model."
|
|
170
|
+
)
|
|
167
171
|
results = []
|
|
168
172
|
|
|
169
173
|
# Compute the embedding of the query
|
npcsh/shell_helpers.py
CHANGED
|
@@ -1433,7 +1433,6 @@ def execute_slash_command(
|
|
|
1433
1433
|
command_parts, model=model, provider=provider, npc=npc, api_url=api_url
|
|
1434
1434
|
)
|
|
1435
1435
|
elif command_name == "help": # New help command
|
|
1436
|
-
|
|
1437
1436
|
return {
|
|
1438
1437
|
"messages": messages,
|
|
1439
1438
|
"output": get_help(),
|
|
@@ -1763,7 +1762,7 @@ def execute_command(
|
|
|
1763
1762
|
messages: list = None,
|
|
1764
1763
|
conversation_id: str = None,
|
|
1765
1764
|
stream: bool = False,
|
|
1766
|
-
embedding_model
|
|
1765
|
+
embedding_model=None,
|
|
1767
1766
|
):
|
|
1768
1767
|
"""
|
|
1769
1768
|
Function Description:
|
|
@@ -1774,7 +1773,7 @@ def execute_command(
|
|
|
1774
1773
|
db_path : str : Database path
|
|
1775
1774
|
npc_compiler : NPCCompiler : NPC compiler
|
|
1776
1775
|
Keyword Args:
|
|
1777
|
-
embedding_model :
|
|
1776
|
+
embedding_model : Embedding model
|
|
1778
1777
|
current_npc : NPC : Current NPC
|
|
1779
1778
|
messages : list : Messages
|
|
1780
1779
|
Returns:
|
|
@@ -2086,7 +2085,7 @@ def execute_command_stream(
|
|
|
2086
2085
|
command: str,
|
|
2087
2086
|
db_path: str,
|
|
2088
2087
|
npc_compiler: NPCCompiler,
|
|
2089
|
-
embedding_model
|
|
2088
|
+
embedding_model=None,
|
|
2090
2089
|
current_npc: NPC = None,
|
|
2091
2090
|
model: str = None,
|
|
2092
2091
|
provider: str = None,
|
npcsh/stream.py
CHANGED
|
@@ -10,9 +10,7 @@ from npcsh.npc_sysenv import get_system_message
|
|
|
10
10
|
from typing import Any, Dict, Generator, List
|
|
11
11
|
import os
|
|
12
12
|
import anthropic
|
|
13
|
-
import ollama # Add to setup.py if missing
|
|
14
13
|
from openai import OpenAI
|
|
15
|
-
from diffusers import StableDiffusionPipeline
|
|
16
14
|
from google import genai
|
|
17
15
|
|
|
18
16
|
from google.generativeai import types
|
|
@@ -53,6 +51,8 @@ def get_anthropic_stream(
|
|
|
53
51
|
messages = messages[1:]
|
|
54
52
|
elif npc is not None:
|
|
55
53
|
system_message = get_system_message(npc)
|
|
54
|
+
else:
|
|
55
|
+
system_message = "You are a helpful assistant."
|
|
56
56
|
|
|
57
57
|
# Preprocess messages to ensure content is a list of dicts
|
|
58
58
|
for message in messages:
|
|
@@ -274,6 +274,8 @@ def get_ollama_stream(
|
|
|
274
274
|
**kwargs,
|
|
275
275
|
) -> Generator:
|
|
276
276
|
"""Streams responses from Ollama, supporting images and tools."""
|
|
277
|
+
import ollama
|
|
278
|
+
|
|
277
279
|
messages_copy = messages.copy()
|
|
278
280
|
|
|
279
281
|
# Handle images if provided
|