npcsh 0.3.30__py3-none-any.whl → 0.3.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcsh/audio.py +540 -181
- npcsh/audio_gen.py +1 -0
- npcsh/cli.py +37 -19
- npcsh/conversation.py +14 -251
- npcsh/dataframes.py +13 -5
- npcsh/helpers.py +5 -0
- npcsh/image.py +2 -4
- npcsh/image_gen.py +38 -38
- npcsh/knowledge_graph.py +4 -4
- npcsh/llm_funcs.py +517 -349
- npcsh/npc_compiler.py +44 -23
- npcsh/npc_sysenv.py +5 -0
- npcsh/npc_team/npcsh.ctx +8 -2
- npcsh/npc_team/tools/generic_search.tool +9 -1
- npcsh/plonk.py +2 -2
- npcsh/response.py +131 -482
- npcsh/search.py +20 -9
- npcsh/serve.py +210 -203
- npcsh/shell.py +78 -80
- npcsh/shell_helpers.py +513 -102
- npcsh/stream.py +87 -554
- npcsh/video.py +5 -2
- npcsh/video_gen.py +69 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/generic_search.tool +9 -1
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh.ctx +8 -2
- npcsh-0.3.32.dist-info/METADATA +779 -0
- npcsh-0.3.32.dist-info/RECORD +78 -0
- npcsh-0.3.30.dist-info/METADATA +0 -1862
- npcsh-0.3.30.dist-info/RECORD +0 -76
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/bash_executer.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/calculator.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/celona.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/code_executor.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/corca.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/eriane.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/image_generation.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/lineru.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/local_search.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/maurawa.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/npcsh_executor.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/raone.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/slean.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/sql_executor.tool +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/turnic.npc +0 -0
- {npcsh-0.3.30.data → npcsh-0.3.32.data}/data/npcsh/npc_team/welxor.npc +0 -0
- {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/WHEEL +0 -0
- {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/entry_points.txt +0 -0
- {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/licenses/LICENSE +0 -0
- {npcsh-0.3.30.dist-info → npcsh-0.3.32.dist-info}/top_level.txt +0 -0
npcsh/npc_compiler.py
CHANGED
|
@@ -19,7 +19,7 @@ from collections import defaultdict, deque
|
|
|
19
19
|
import traceback
|
|
20
20
|
|
|
21
21
|
# Importing functions
|
|
22
|
-
from .llm_funcs import (
|
|
22
|
+
from npcsh.llm_funcs import (
|
|
23
23
|
get_llm_response,
|
|
24
24
|
get_stream,
|
|
25
25
|
process_data_output,
|
|
@@ -29,9 +29,9 @@ from .llm_funcs import (
|
|
|
29
29
|
handle_tool_call,
|
|
30
30
|
execute_llm_command,
|
|
31
31
|
)
|
|
32
|
-
from .helpers import get_npc_path
|
|
33
|
-
from .search import search_web, rag_search
|
|
34
|
-
from .image import capture_screenshot, analyze_image_base
|
|
32
|
+
from npcsh.helpers import get_npc_path
|
|
33
|
+
from npcsh.search import search_web, rag_search
|
|
34
|
+
from npcsh.image import capture_screenshot, analyze_image_base
|
|
35
35
|
|
|
36
36
|
|
|
37
37
|
def create_or_replace_table(db_path: str, table_name: str, data: pd.DataFrame):
|
|
@@ -501,6 +501,18 @@ class SilentUndefined(Undefined):
|
|
|
501
501
|
return ""
|
|
502
502
|
|
|
503
503
|
|
|
504
|
+
class Context:
|
|
505
|
+
def __init__(self, context=None, mcp_servers=None, databases=None, files=None):
|
|
506
|
+
self.context = context
|
|
507
|
+
self.mcp_servers = mcp_servers
|
|
508
|
+
self.databases = databases
|
|
509
|
+
self.files = files
|
|
510
|
+
|
|
511
|
+
def load_context_file(self, path):
|
|
512
|
+
with open(path, "r") as f:
|
|
513
|
+
self.context = yaml.safe_load(f)
|
|
514
|
+
|
|
515
|
+
|
|
504
516
|
class Tool:
|
|
505
517
|
def __init__(self, tool_data: dict):
|
|
506
518
|
if not tool_data or not isinstance(tool_data, dict):
|
|
@@ -567,10 +579,14 @@ class Tool:
|
|
|
567
579
|
if i == len(self.steps) - 1 and stream: # this was causing the big issue X:
|
|
568
580
|
print("tool successful, passing output to stream")
|
|
569
581
|
return context
|
|
570
|
-
|
|
582
|
+
print("CONTEXT AFTER TOOL CALLS, ", context)
|
|
571
583
|
if context.get("output") is not None:
|
|
584
|
+
print("output from tool: ", context.get("output"))
|
|
585
|
+
if not isinstance(context.get("output"), str):
|
|
586
|
+
return str(context.get("output"))
|
|
572
587
|
return context.get("output")
|
|
573
588
|
elif context.get("llm_response") is not None:
|
|
589
|
+
print("output from tool: ", context.get("llm_response"))
|
|
574
590
|
return context.get("llm_response")
|
|
575
591
|
|
|
576
592
|
def execute_step(
|
|
@@ -601,6 +617,8 @@ class Tool:
|
|
|
601
617
|
except:
|
|
602
618
|
print("error rendering engine")
|
|
603
619
|
rendered_engine = engine
|
|
620
|
+
print(f"proceeding with engine: {rendered_engine}")
|
|
621
|
+
print("rendered code: ", rendered_code)
|
|
604
622
|
if rendered_engine == "natural":
|
|
605
623
|
if len(rendered_code.strip()) > 0:
|
|
606
624
|
# print(f"Executing natural language step: {rendered_code}")
|
|
@@ -645,25 +663,27 @@ class Tool:
|
|
|
645
663
|
}
|
|
646
664
|
new_locals = {}
|
|
647
665
|
exec_env = context.copy()
|
|
648
|
-
try:
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
if "output"
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
+
# try:
|
|
667
|
+
exec(rendered_code, exec_globals, new_locals)
|
|
668
|
+
exec_env.update(new_locals)
|
|
669
|
+
|
|
670
|
+
context.update(exec_env)
|
|
671
|
+
|
|
672
|
+
exec_env.update(new_locals)
|
|
673
|
+
context.update(exec_env)
|
|
674
|
+
# Add this line to explicitly copy the output
|
|
675
|
+
if "output" in new_locals:
|
|
676
|
+
context["output"] = new_locals["output"]
|
|
677
|
+
|
|
678
|
+
# Then your existing code
|
|
679
|
+
if "output" in exec_env:
|
|
680
|
+
if exec_env["output"] is not None:
|
|
681
|
+
context["results"] = exec_env["output"]
|
|
682
|
+
print("result from code execution: ", exec_env["output"])
|
|
683
|
+
# else:
|
|
684
|
+
# context["output"] = str(exec_env)
|
|
666
685
|
|
|
686
|
+
"""
|
|
667
687
|
except NameError as e:
|
|
668
688
|
tb_lines = traceback.format_exc().splitlines()
|
|
669
689
|
limited_tb = (
|
|
@@ -706,6 +726,7 @@ class Tool:
|
|
|
706
726
|
return {
|
|
707
727
|
"output": f"Error executing Python code : {e} with traceback: {limited_tb}"
|
|
708
728
|
}
|
|
729
|
+
"""
|
|
709
730
|
return context
|
|
710
731
|
|
|
711
732
|
def to_dict(self):
|
npcsh/npc_sysenv.py
CHANGED
|
@@ -374,6 +374,11 @@ NPCSH_IMAGE_GEN_MODEL = os.environ.get(
|
|
|
374
374
|
"NPCSH_IMAGE_GEN_MODEL", "runwayml/stable-diffusion-v1-5"
|
|
375
375
|
)
|
|
376
376
|
NPCSH_IMAGE_GEN_PROVIDER = os.environ.get("NPCSH_IMAGE_GEN_PROVIDER", "diffusers")
|
|
377
|
+
NPCSH_VIDEO_GEN_MODEL = os.environ.get(
|
|
378
|
+
"NPCSH_VIDEO_GEN_MODEL", "damo-vilab/text-to-video-ms-1.7b"
|
|
379
|
+
)
|
|
380
|
+
NPCSH_VIDEO_GEN_PROVIDER = os.environ.get("NPCSH_VIDEO_GEN_PROVIDER", "diffusers")
|
|
381
|
+
|
|
377
382
|
NPCSH_EMBEDDING_MODEL = os.environ.get("NPCSH_EMBEDDING_MODEL", "nomic-embed-text")
|
|
378
383
|
NPCSH_EMBEDDING_PROVIDER = os.environ.get("NPCSH_EMBEDDING_PROVIDER", "ollama")
|
|
379
384
|
NPCSH_REASONING_MODEL = os.environ.get("NPCSH_REASONING_MODEL", "deepseek-r1")
|
npcsh/npc_team/npcsh.ctx
CHANGED
|
@@ -1,5 +1,11 @@
|
|
|
1
|
-
|
|
1
|
+
context: |
|
|
2
2
|
The npcsh NPC team is devoted to providing a safe and helpful
|
|
3
3
|
environment for users where they can work and be as successful as possible.
|
|
4
4
|
npcsh is a command-line tool that makes it easy for users to harness
|
|
5
|
-
the power of LLMs from a command line shell.
|
|
5
|
+
the power of LLMs from a command line shell.
|
|
6
|
+
databases:
|
|
7
|
+
- ~/npcsh_history.db
|
|
8
|
+
mcp_servers:
|
|
9
|
+
- /path/to/mcp/server.py
|
|
10
|
+
- @npm for server
|
|
11
|
+
|
|
@@ -2,13 +2,21 @@ tool_name: "internet_search"
|
|
|
2
2
|
description: Searches the web for information based on a query in order to verify timiely details (e.g. current events) or to corroborate information in uncertain situations. Should be mainly only used when users specifically request a search, otherwise an LLMs basic knowledge should be sufficient.
|
|
3
3
|
inputs:
|
|
4
4
|
- query
|
|
5
|
+
- provider: ''
|
|
5
6
|
steps:
|
|
6
7
|
- engine: "python"
|
|
7
8
|
code: |
|
|
8
9
|
from npcsh.search import search_web
|
|
10
|
+
from npcsh.npc_sysenv import NPCSH_SEARCH_PROVIDER
|
|
9
11
|
query = "{{ query }}"
|
|
12
|
+
provider = '{{ provider }}'
|
|
13
|
+
if provider.strip() != '':
|
|
14
|
+
results = search_web(query, num_results=5, provider = provider)
|
|
15
|
+
else:
|
|
16
|
+
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
17
|
+
|
|
10
18
|
print('QUERY in tool', query)
|
|
11
|
-
results = search_web(query, num_results=5)
|
|
19
|
+
results = search_web(query, num_results=5, provider = NPCSH_SEARCH_PROVIDER)
|
|
12
20
|
print('RESULTS in tool', results)
|
|
13
21
|
- engine: "natural"
|
|
14
22
|
code: |
|
npcsh/plonk.py
CHANGED
|
@@ -6,8 +6,8 @@ try:
|
|
|
6
6
|
except KeyError as e:
|
|
7
7
|
print(f"Could not load pyautogui due to the following error: {e}")
|
|
8
8
|
|
|
9
|
-
from .image import capture_screenshot
|
|
10
|
-
from .llm_funcs import get_llm_response
|
|
9
|
+
from npcsh.image import capture_screenshot
|
|
10
|
+
from npcsh.llm_funcs import get_llm_response
|
|
11
11
|
|
|
12
12
|
import subprocess
|
|
13
13
|
import os
|