npcsh 0.3.28__py3-none-any.whl → 0.3.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. npcsh/llm_funcs.py +32 -23
  2. npcsh/npc_compiler.py +23 -4
  3. npcsh/npc_team/tools/bash_executer.tool +32 -0
  4. npcsh/npc_team/tools/code_executor.tool +16 -0
  5. npcsh/npc_team/tools/npcsh_executor.tool +9 -0
  6. npcsh/npc_team/tools/sql_executor.tool +2 -2
  7. npcsh/shell.py +19 -17
  8. npcsh/shell_helpers.py +576 -49
  9. npcsh-0.3.30.data/data/npcsh/npc_team/bash_executer.tool +32 -0
  10. npcsh-0.3.30.data/data/npcsh/npc_team/code_executor.tool +16 -0
  11. npcsh-0.3.30.data/data/npcsh/npc_team/npcsh_executor.tool +9 -0
  12. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/sql_executor.tool +2 -2
  13. {npcsh-0.3.28.dist-info → npcsh-0.3.30.dist-info}/METADATA +43 -3
  14. {npcsh-0.3.28.dist-info → npcsh-0.3.30.dist-info}/RECORD +36 -30
  15. {npcsh-0.3.28.dist-info → npcsh-0.3.30.dist-info}/WHEEL +1 -1
  16. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/calculator.tool +0 -0
  17. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/celona.npc +0 -0
  18. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/corca.npc +0 -0
  19. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/eriane.npc +0 -0
  20. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/foreman.npc +0 -0
  21. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/generic_search.tool +0 -0
  22. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/image_generation.tool +0 -0
  23. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/lineru.npc +0 -0
  24. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/local_search.tool +0 -0
  25. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/maurawa.npc +0 -0
  26. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  27. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/raone.npc +0 -0
  28. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/screen_cap.tool +0 -0
  29. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  30. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/slean.npc +0 -0
  31. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/test_pipeline.py +0 -0
  32. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/turnic.npc +0 -0
  33. {npcsh-0.3.28.data → npcsh-0.3.30.data}/data/npcsh/npc_team/welxor.npc +0 -0
  34. {npcsh-0.3.28.dist-info → npcsh-0.3.30.dist-info}/entry_points.txt +0 -0
  35. {npcsh-0.3.28.dist-info → npcsh-0.3.30.dist-info}/licenses/LICENSE +0 -0
  36. {npcsh-0.3.28.dist-info → npcsh-0.3.30.dist-info}/top_level.txt +0 -0
npcsh/llm_funcs.py CHANGED
@@ -802,15 +802,20 @@ ReAct choices then will enter reasoning flow
802
802
 
803
803
  prompt = f"""
804
804
  A user submitted this query: {command}
805
+
805
806
  Determine the nature of the user's request:
806
- 1. Is it a specific request for a task that could be accomplished via a bash command or a simple python script that could be executed in a single bash call?
807
- 2. Should a tool be invoked to fulfill the request?
808
- 3. Is it a general question that requires an informative answer or a highly specific question that
807
+
808
+ 1. Should a tool be invoked to fulfill the request?
809
+
810
+ 2. Is it a general question that requires an informative answer or a highly specific question that
809
811
  requires inforrmation on the web?
810
- 4. Would this question be best answered by an alternative NPC?
811
- 5. Is it a complex request that actually requires more than one
812
- tool to be called, perhaps in a sequence?
813
- 6. is there a need for the user to provide additional input to fulfill the request?
812
+
813
+ 3. Would this question be best answered by an alternative NPC?
814
+
815
+ 4. Is it a complex request that actually requires more than one
816
+ tool to be called, perhaps in a sequence?
817
+
818
+ 5. is there a need for the user to provide additional input to fulfill the request?
814
819
 
815
820
 
816
821
 
@@ -877,8 +882,12 @@ ReAct choices then will enter reasoning flow
877
882
 
878
883
  prompt += f"""
879
884
  In considering how to answer this, consider:
880
- - Whether it can be answered via a bash command on the user's computer. e.g. if a user is curious about file sizes within a directory or about processes running on their computer, these are likely best handled by a bash command.
881
- - Whether more context from the user is required to adequately answer the question. e.g. if a user asks for a joke about their favorite city but they don't include the city , it would be helpful to ask for that information. Similarly, if a user asks to open a browser and to check the weather in a city, it would be helpful to ask for the city and which website or source to use.
885
+
886
+ - Whether more context from the user is required to adequately answer the question.
887
+ e.g. if a user asks for a joke about their favorite city but they don't include the city ,
888
+ it would be helpful to ask for that information. Similarly, if a user asks to open a browser
889
+ and to check the weather in a city, it would be helpful to ask for the city and which website
890
+ or source to use.
882
891
  - Whether a tool should be used.
883
892
 
884
893
 
@@ -887,14 +896,17 @@ ReAct choices then will enter reasoning flow
887
896
  extra tools or agent passes.
888
897
  Only use tools or pass to other NPCs
889
898
  when it is obvious that the answer needs to be as up-to-date as possible. For example,
890
- a question about where mount everest is does not necessarily need to be answered by a tool call or an agent pass.
899
+ a question about where mount everest is does not necessarily need to be answered by a tool call or an agent pass.
891
900
  Similarly, if a user asks to explain the plot of the aeneid, this can be answered without a tool call or agent pass.
892
- If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is, then a tool call or agent pass may be appropriate. If a user asks about the process using the most ram or the biggest file in a directory, a bash command will be most appropriate.
901
+
902
+ If a user were to ask for the current weather in tokyo or the current price of bitcoin or who the mayor of a city is,
903
+ then a tool call or agent pass may be appropriate.
904
+
893
905
  Tools are valuable but their use should be limited and purposeful to
894
906
  ensure the best user experience.
895
907
 
896
908
  Respond with a JSON object containing:
897
- - "action": one of ["execute_command", "invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
909
+ - "action": one of ["invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
898
910
  - "tool_name": : if action is "invoke_tool": the name of the tool to use.
899
911
  else if action is "execute_sequence", a list of tool names to use.
900
912
  - "explanation": a brief explanation of why you chose this action.
@@ -907,7 +919,7 @@ ReAct choices then will enter reasoning flow
907
919
 
908
920
  The format of the JSON object is:
909
921
  {{
910
- "action": "execute_command" | "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
922
+ "action": "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
911
923
  "tool_name": "<tool_name(s)_if_applicable>",
912
924
  "explanation": "<your_explanation>",
913
925
  "npc_name": "<npc_name(s)_if_applicable>"
@@ -915,7 +927,9 @@ ReAct choices then will enter reasoning flow
915
927
 
916
928
  If you execute a sequence, ensure that you have a specified NPC for each tool use.
917
929
 
918
- Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING. There should be no prefix 'json'. Start straight with the opening curly brace.
930
+ Remember, do not include ANY ADDITIONAL MARKDOWN FORMATTING.
931
+ There should be no leading ```json.
932
+
919
933
  """
920
934
 
921
935
  if docs_context:
@@ -932,11 +946,6 @@ ReAct choices then will enter reasoning flow
932
946
  {context}
933
947
 
934
948
  """
935
-
936
- # print(prompt)
937
-
938
- # For action determination, we don't need to pass the conversation messages to avoid confusion
939
- # print(npc, model, provider)
940
949
  action_response = get_llm_response(
941
950
  prompt,
942
951
  model=model,
@@ -965,12 +974,11 @@ ReAct choices then will enter reasoning flow
965
974
  else:
966
975
  response_content_parsed = response_content
967
976
 
968
- # Proceed according to the action specified
969
977
  action = response_content_parsed.get("action")
970
978
  explanation = response_content["explanation"]
971
- # Include the user's command in the conversation messages
972
979
  print(f"action chosen: {action}")
973
980
  print(f"explanation given: {explanation}")
981
+
974
982
  if response_content_parsed.get("tool_name"):
975
983
  print(f"tool name: {response_content_parsed.get('tool_name')}")
976
984
 
@@ -1316,8 +1324,9 @@ def handle_tool_call(
1316
1324
  stream=stream,
1317
1325
  messages=messages,
1318
1326
  )
1319
- if "Error" in tool_output:
1320
- raise Exception(tool_output)
1327
+ if not stream:
1328
+ if "Error" in tool_output:
1329
+ raise Exception(tool_output)
1321
1330
  except Exception as e:
1322
1331
  # diagnose_problem = get_llm_response(
1323
1332
  ## f"""a problem has occurred.
npcsh/npc_compiler.py CHANGED
@@ -551,6 +551,7 @@ class Tool:
551
551
 
552
552
  # Process Steps
553
553
  for i, step in enumerate(self.steps):
554
+
554
555
  context = self.execute_step(
555
556
  step,
556
557
  context,
@@ -564,6 +565,7 @@ class Tool:
564
565
  # if i is the last step and the user has reuqested a streaming output
565
566
  # then we should return the stream
566
567
  if i == len(self.steps) - 1 and stream: # this was causing the big issue X:
568
+ print("tool successful, passing output to stream")
567
569
  return context
568
570
  # Return the final output
569
571
  if context.get("output") is not None:
@@ -592,8 +594,14 @@ class Tool:
592
594
  except Exception as e:
593
595
  print(f"Error rendering template: {e}")
594
596
  rendered_code = code
595
-
596
- if engine == "natural":
597
+ # render engine if necessary
598
+ try:
599
+ template = jinja_env.from_string(engine)
600
+ rendered_engine = template.render(**context)
601
+ except:
602
+ print("error rendering engine")
603
+ rendered_engine = engine
604
+ if rendered_engine == "natural":
597
605
  if len(rendered_code.strip()) > 0:
598
606
  # print(f"Executing natural language step: {rendered_code}")
599
607
  if stream:
@@ -610,7 +618,7 @@ class Tool:
610
618
  context["llm_response"] = response_text
611
619
  context["results"] = response_text
612
620
 
613
- elif engine == "python":
621
+ elif rendered_engine == "python":
614
622
  exec_globals = {
615
623
  "__builtins__": __builtins__,
616
624
  "npc": npc,
@@ -639,12 +647,23 @@ class Tool:
639
647
  exec_env = context.copy()
640
648
  try:
641
649
  exec(rendered_code, exec_globals, new_locals)
650
+ exec_env.update(new_locals)
651
+
652
+ context.update(exec_env)
653
+
642
654
  exec_env.update(new_locals)
643
655
  context.update(exec_env)
644
- # If output is set, also set it as results
656
+
657
+ # Add this line to explicitly copy the output
658
+ if "output" in new_locals:
659
+ context["output"] = new_locals["output"]
660
+
661
+ # Then your existing code
645
662
  if "output" in exec_env:
646
663
  if exec_env["output"] is not None:
647
664
  context["results"] = exec_env["output"]
665
+ print("result from code execution: ", exec_env["output"])
666
+
648
667
  except NameError as e:
649
668
  tb_lines = traceback.format_exc().splitlines()
650
669
  limited_tb = (
@@ -0,0 +1,32 @@
1
+ tool_name: bash_executor
2
+ description: Execute bash queries.
3
+ inputs:
4
+ - bash_command
5
+ - user_request
6
+ steps:
7
+ - engine: python
8
+ code: |
9
+ import subprocess
10
+ import os
11
+ cmd = '{{bash_command}}' # Properly quote the command input
12
+ def run_command(cmd):
13
+ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
14
+ stdout, stderr = process.communicate()
15
+ if stderr:
16
+ print(f"Error: {stderr.decode('utf-8')}")
17
+ return stderr
18
+ return stdout
19
+ result = run_command(cmd)
20
+ output = result.decode('utf-8')
21
+
22
+ - engine: natural
23
+ code: |
24
+
25
+ Here is the result of the bash command:
26
+ ```
27
+ {{ output }}
28
+ ```
29
+ This was the original user request: {{ user_request }}
30
+
31
+ Please provide a response accordingly.
32
+
@@ -0,0 +1,16 @@
1
+ tool_name: code_executor
2
+ description: Execute scripts with a specified language. choose from python, bash, R, or javascript. Set the ultimate result as the "output" variable. It must be a string. Do not add unnecessary print statements.
3
+ inputs:
4
+ - code
5
+ - language
6
+ steps:
7
+ - engine: '{{ language }}'
8
+ code: |
9
+ {{code}}
10
+ - engine: natural
11
+ code: |
12
+ Here is the result of the code execution that an agent ran.
13
+ ```
14
+ {{ output }}
15
+ ```
16
+ please provide a response accordingly.
@@ -0,0 +1,9 @@
1
+ tool_name: npcsh_executor
2
+ description: Execute npcsh commands. Use the macro commands.
3
+ inputs:
4
+ - code
5
+ - language
6
+ steps:
7
+ - engine: "{{language}}"
8
+ code: |
9
+ {{code}}
@@ -1,5 +1,5 @@
1
- tool_name: sql_executor
2
- description: Execute SQL queries on the ~/npcsh_history.db and display the result. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
1
+ tool_name: data_pull
2
+ description: Execute queries on the ~/npcsh_history.db to pull data. The database contains only information about conversations and other user-provided data. It does not store any information about individual files.
3
3
  inputs:
4
4
  - sql_query
5
5
  - interpret: false # Note that this is not a boolean, but a string
npcsh/shell.py CHANGED
@@ -378,6 +378,7 @@ def main() -> None:
378
378
 
379
379
  current_npc = result["current_npc"]
380
380
  output = result.get("output")
381
+
381
382
  conversation_id = result.get("conversation_id")
382
383
  model = result.get("model")
383
384
  provider = result.get("provider")
@@ -404,17 +405,16 @@ def main() -> None:
404
405
  npc=npc_name,
405
406
  attachments=attachments,
406
407
  )
407
- if NPCSH_STREAM_OUTPUT and (
408
- isgenerator(output)
409
- or (hasattr(output, "__iter__") and hasattr(output, "__next__"))
410
- ):
411
- str_output = ""
408
+
409
+ str_output = ""
410
+ if NPCSH_STREAM_OUTPUT and hasattr(output, "__iter__"):
411
+
412
412
  buffer = ""
413
413
  in_code = False
414
414
  code_buffer = ""
415
415
 
416
416
  for chunk in output:
417
- # Get chunk content based on provider
417
+
418
418
  if provider == "anthropic":
419
419
  chunk_content = (
420
420
  chunk.delta.text
@@ -434,7 +434,7 @@ def main() -> None:
434
434
  continue
435
435
 
436
436
  str_output += chunk_content
437
-
437
+ # print(str_output, "str_output")
438
438
  # Process the content character by character
439
439
  for char in chunk_content:
440
440
  buffer += char
@@ -490,16 +490,18 @@ def main() -> None:
490
490
  if str_output:
491
491
  output = str_output
492
492
  print("\n")
493
- save_conversation_message(
494
- command_history,
495
- conversation_id,
496
- "assistant",
497
- output,
498
- wd=current_path,
499
- model=model,
500
- provider=provider,
501
- npc=npc_name,
502
- )
493
+
494
+ if isinstance(output, str):
495
+ save_conversation_message(
496
+ command_history,
497
+ conversation_id,
498
+ "assistant",
499
+ output,
500
+ wd=current_path,
501
+ model=model,
502
+ provider=provider,
503
+ npc=npc_name,
504
+ )
503
505
 
504
506
  # if there are attachments in most recent user sent message, save them
505
507
  # save_attachment_to_message(command_history, message_id, # file_path, attachment_name, attachment_type)