npcsh 1.0.9__tar.gz → 1.0.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.9
3
+ Version: 1.0.10
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -14,11 +14,11 @@ import subprocess
14
14
  import networkx as nx
15
15
 
16
16
  from npcpy.npc_compiler import NPC
17
- from npcpy.llm_funcs import get_llm_response
17
+ from npcpy.llm_funcs import get_llm_response, extract_facts, identify_groups, assign_groups_to_fact
18
18
  from npcsh._state import NPCSH_CHAT_MODEL, NPCSH_CHAT_PROVIDER
19
19
  from npcpy.npc_sysenv import print_and_process_stream_with_markdown
20
- from npcpy.memory.deep_research import consolidate_research
21
- from npcpy.memory.knowledge_graph import extract_facts, identify_groups, assign_groups_to_fact
20
+
21
+
22
22
 
23
23
  def generate_random_npcs(num_npcs: int, model: str, provider: str, request: str) -> List[NPC]:
24
24
  """
@@ -54,8 +54,6 @@ from npcpy.memory.command_history import (
54
54
  CommandHistory,
55
55
  save_conversation_message,
56
56
  )
57
- from npcpy.memory.knowledge_graph import breathe
58
- from npcpy.memory.sleep import sleep, forget
59
57
  from npcpy.npc_compiler import NPC, Team, load_jinxs_from_directory
60
58
  from npcpy.llm_funcs import check_llm_command, get_llm_response, execute_llm_command
61
59
  from npcpy.gen.embeddings import get_embeddings
@@ -585,10 +583,8 @@ def execute_slash_command(command: str, stdin_input: Optional[str], state: Shell
585
583
  result_dict = handler(command, **handler_kwargs)
586
584
 
587
585
  if isinstance(result_dict, dict):
588
- #some respond with output, some with response, needs to be fixed upstream
589
- output = result_dict.get("output") or result_dict.get("response")
590
586
  state.messages = result_dict.get("messages", state.messages)
591
- return state, output
587
+ return state, result_dict
592
588
  else:
593
589
  return state, result_dict
594
590
 
@@ -702,7 +698,7 @@ def process_pipeline_command(
702
698
  images=state.attachments,
703
699
  stream=stream_final,
704
700
  context=info,
705
- shell=True,
701
+
706
702
  )
707
703
  if isinstance(llm_result, dict):
708
704
  state.messages = llm_result.get("messages", state.messages)
@@ -859,8 +855,6 @@ def execute_command(
859
855
  try:
860
856
  bash_state, bash_output = handle_bash_command(cmd_parts, command, None, state)
861
857
  return bash_state, bash_output
862
- except CommandNotFoundError:
863
- return state, colored(f"Command not found: {command_name}", "red")
864
858
  except Exception as bash_err:
865
859
  return state, colored(f"Bash execution failed: {bash_err}", "red")
866
860
  except Exception:
@@ -1137,12 +1131,13 @@ def execute_todo_item(todo: Dict[str, Any], ride_state: RideState, shell_state:
1137
1131
  team=shell_state.team,
1138
1132
  messages=[],
1139
1133
  stream=shell_state.stream_output,
1140
- shell=True,
1134
+
1141
1135
  )
1142
1136
 
1143
1137
  output_payload = result.get("output", "")
1144
1138
  output_str = ""
1145
1139
 
1140
+
1146
1141
  if isgenerator(output_payload):
1147
1142
  output_str = print_and_process_stream_with_markdown(output_payload, shell_state.chat_model, shell_state.chat_provider)
1148
1143
  elif isinstance(output_payload, dict):
@@ -1398,34 +1393,30 @@ def process_result(
1398
1393
  if user_input =='/help':
1399
1394
  render_markdown(output)
1400
1395
  elif result_state.stream_output:
1401
-
1402
- try:
1403
- final_output_str = print_and_process_stream_with_markdown(output, result_state.chat_model, result_state.chat_provider)
1404
- except AttributeError as e:
1405
- if isinstance(output, str):
1406
- if len(output) > 0:
1407
- final_output_str = output
1408
- render_markdown(final_output_str)
1409
- except TypeError as e:
1410
-
1411
- if isinstance(output, str):
1412
- if len(output) > 0:
1413
- final_output_str = output
1414
- render_markdown(final_output_str)
1415
- elif isinstance(output, dict):
1416
- if 'output' in output:
1417
- final_output_str = output['output']
1418
- render_markdown(final_output_str)
1396
+
1397
+ if isinstance(output, dict):
1398
+ output_gen = output.get('output')
1399
+ model = output.get('model', result_state.chat_model)
1400
+ provider = output.get('provider', result_state.chat_provider)
1401
+ else:
1402
+ output_gen = output
1403
+ model = result_state.chat_model
1404
+ provider = result_state.chat_provider
1405
+ print('processing stream output with markdown...')
1406
+
1407
+ final_output_str = print_and_process_stream_with_markdown(output_gen,
1408
+ model,
1409
+ provider)
1419
1410
 
1420
1411
  elif output is not None:
1421
1412
  final_output_str = str(output)
1422
- render_markdown(final_output_str)
1413
+ render_markdown('str not none: ', final_output_str)
1423
1414
  if final_output_str and result_state.messages and result_state.messages[-1].get("role") != "assistant":
1424
1415
  result_state.messages.append({"role": "assistant", "content": final_output_str})
1425
1416
 
1426
1417
  #print(result_state.messages)
1427
1418
 
1428
- print() # Add spacing after output
1419
+
1429
1420
 
1430
1421
  if final_output_str:
1431
1422
  save_conversation_message(
@@ -24,6 +24,7 @@ from npcpy.llm_funcs import (
24
24
  get_llm_response,
25
25
  gen_image,
26
26
  gen_video,
27
+ breathe,
27
28
  )
28
29
  from npcpy.npc_compiler import NPC, Team, Jinx
29
30
  from npcpy.npc_compiler import initialize_npc_project
@@ -38,8 +39,8 @@ from npcpy.memory.search import execute_rag_command, execute_search_command, exe
38
39
  from npcpy.memory.command_history import CommandHistory
39
40
 
40
41
 
41
- from npcpy.memory.knowledge_graph import breathe
42
- from npcpy.memory.sleep import sleep, forget
42
+
43
+
43
44
  from npcpy.serve import start_flask_server
44
45
 
45
46
 
@@ -336,7 +337,7 @@ def ots_handler(command: str, **kwargs):
336
337
  api_url=safe_get(kwargs, 'api_url'),
337
338
  api_key=safe_get(kwargs, 'api_key')
338
339
  )
339
- return {"output": response_data.get('response'), "messages": response_data.get('messages')}
340
+ return {"output": response_data.get('response'), "messages": response_data.get('messages'), "model": vision_model, "provider": vision_provider}
340
341
 
341
342
  except Exception as e:
342
343
  traceback.print_exc()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.9
3
+ Version: 1.0.10
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -84,7 +84,7 @@ extra_files = package_files("npcpy/npc_team/")
84
84
 
85
85
  setup(
86
86
  name="npcsh",
87
- version="1.0.9",
87
+ version="1.0.10",
88
88
  packages=find_packages(exclude=["tests*"]),
89
89
  install_requires=base_requirements, # Only install base requirements by default
90
90
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes