npcsh 1.1.1__tar.gz → 1.1.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {npcsh-1.1.1 → npcsh-1.1.2}/PKG-INFO +1 -1
  2. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/_state.py +160 -94
  3. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh.egg-info/PKG-INFO +1 -1
  4. {npcsh-1.1.1 → npcsh-1.1.2}/setup.py +1 -1
  5. {npcsh-1.1.1 → npcsh-1.1.2}/LICENSE +0 -0
  6. {npcsh-1.1.1 → npcsh-1.1.2}/README.md +0 -0
  7. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/__init__.py +0 -0
  8. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/alicanto.py +0 -0
  9. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/corca.py +0 -0
  10. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/guac.py +0 -0
  11. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/mcp_helpers.py +0 -0
  12. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/mcp_server.py +0 -0
  13. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc.py +0 -0
  14. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/alicanto.npc +0 -0
  15. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/alicanto.png +0 -0
  16. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/corca.npc +0 -0
  17. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/corca.png +0 -0
  18. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/foreman.npc +0 -0
  19. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/frederic.npc +0 -0
  20. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/frederic4.png +0 -0
  21. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/guac.png +0 -0
  22. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
  23. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
  24. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
  25. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
  26. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/kg_search.jinx +0 -0
  27. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/memory_search.jinx +0 -0
  28. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
  29. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
  30. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/kadiefa.npc +0 -0
  31. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/kadiefa.png +0 -0
  32. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/npcsh.ctx +0 -0
  33. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/npcsh_sibiji.png +0 -0
  34. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/plonk.npc +0 -0
  35. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/plonk.png +0 -0
  36. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/plonkjr.npc +0 -0
  37. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/plonkjr.png +0 -0
  38. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/sibiji.npc +0 -0
  39. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/sibiji.png +0 -0
  40. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/spool.png +0 -0
  41. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npc_team/yap.png +0 -0
  42. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/npcsh.py +0 -0
  43. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/plonk.py +0 -0
  44. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/pti.py +0 -0
  45. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/routes.py +0 -0
  46. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/spool.py +0 -0
  47. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/wander.py +0 -0
  48. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh/yap.py +0 -0
  49. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh.egg-info/SOURCES.txt +0 -0
  50. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh.egg-info/dependency_links.txt +0 -0
  51. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh.egg-info/entry_points.txt +0 -0
  52. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh.egg-info/requires.txt +0 -0
  53. {npcsh-1.1.1 → npcsh-1.1.2}/npcsh.egg-info/top_level.txt +0 -0
  54. {npcsh-1.1.1 → npcsh-1.1.2}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.1.1
3
+ Version: 1.1.2
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -2755,106 +2755,155 @@ def process_result(
2755
2755
  team=team_name,
2756
2756
  )
2757
2757
 
2758
- conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2759
- engine = command_history.engine
2758
+ result_state.turn_count += 1
2760
2759
 
2761
- memory_examples = command_history.get_memory_examples_for_context(
2762
- npc=npc_name,
2763
- team=team_name,
2764
- directory_path=result_state.current_path
2765
- )
2766
-
2767
- memory_context = format_memory_context(memory_examples)
2768
-
2769
- approved_facts = []
2770
- try:
2771
- facts = get_facts(
2772
- conversation_turn_text,
2773
- model=active_npc.model,
2774
- provider=active_npc.provider,
2775
- npc=active_npc,
2776
- context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
2760
+ if result_state.turn_count % 10 == 0:
2761
+ approved_facts = []
2762
+
2763
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2764
+ engine = command_history.engine
2765
+
2766
+ memory_examples = command_history.get_memory_examples_for_context(
2767
+ npc=npc_name,
2768
+ team=team_name,
2769
+ directory_path=result_state.current_path
2777
2770
  )
2778
2771
 
2779
- if facts:
2780
- memories_for_approval = []
2781
- for i, fact in enumerate(facts):
2782
- memories_for_approval.append({
2783
- "memory_id": f"temp_{i}",
2784
- "content": fact['statement'],
2785
- "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
2786
- "npc": npc_name,
2787
- "fact_data": fact
2788
- })
2789
-
2790
- approvals = memory_approval_ui(memories_for_approval)
2772
+ memory_context = format_memory_context(memory_examples)
2773
+
2774
+ try:
2775
+ facts = get_facts(
2776
+ conversation_turn_text,
2777
+ model=active_npc.model,
2778
+ provider=active_npc.provider,
2779
+ npc=active_npc,
2780
+ context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
2781
+ )
2791
2782
 
2792
- for approval in approvals:
2793
- fact_data = next(m['fact_data'] for m in memories_for_approval
2794
- if m['memory_id'] == approval['memory_id'])
2783
+ if facts:
2784
+ num_memories = len(facts)
2785
+ print(colored(
2786
+ f"\nThere are {num_memories} potential memories. Do you want to review them now?",
2787
+ "cyan"
2788
+ ))
2789
+ review_choice = input("[y/N]: ").strip().lower()
2795
2790
 
2796
- command_history.add_memory_to_database(
2797
- message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2798
- conversation_id=result_state.conversation_id,
2799
- npc=npc_name,
2800
- team=team_name,
2801
- directory_path=result_state.current_path,
2802
- initial_memory=fact_data['statement'],
2803
- status=approval['decision'],
2804
- model=active_npc.model,
2805
- provider=active_npc.provider,
2806
- final_memory=approval.get('final_memory')
2807
- )
2791
+ if review_choice == 'y':
2792
+ memories_for_approval = []
2793
+ for i, fact in enumerate(facts):
2794
+ memories_for_approval.append({
2795
+ "memory_id": f"temp_{i}",
2796
+ "content": fact['statement'],
2797
+ "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
2798
+ "npc": npc_name,
2799
+ "fact_data": fact
2800
+ })
2801
+
2802
+ approvals = memory_approval_ui(memories_for_approval)
2803
+
2804
+ for approval in approvals:
2805
+ fact_data = next(
2806
+ m['fact_data'] for m in memories_for_approval
2807
+ if m['memory_id'] == approval['memory_id']
2808
+ )
2809
+
2810
+ command_history.add_memory_to_database(
2811
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2812
+ conversation_id=result_state.conversation_id,
2813
+ npc=npc_name,
2814
+ team=team_name,
2815
+ directory_path=result_state.current_path,
2816
+ initial_memory=fact_data['statement'],
2817
+ status=approval['decision'],
2818
+ model=active_npc.model,
2819
+ provider=active_npc.provider,
2820
+ final_memory=approval.get('final_memory')
2821
+ )
2822
+
2823
+ if approval['decision'] in ['human-approved', 'human-edited']:
2824
+ approved_fact = {
2825
+ 'statement': approval.get('final_memory') or fact_data['statement'],
2826
+ 'source_text': fact_data.get('source_text', ''),
2827
+ 'type': fact_data.get('type', 'explicit'),
2828
+ 'generation': 0
2829
+ }
2830
+ approved_facts.append(approved_fact)
2831
+ else:
2832
+ for i, fact in enumerate(facts):
2833
+ command_history.add_memory_to_database(
2834
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2835
+ conversation_id=result_state.conversation_id,
2836
+ npc=npc_name,
2837
+ team=team_name,
2838
+ directory_path=result_state.current_path,
2839
+ initial_memory=fact['statement'],
2840
+ status='skipped',
2841
+ model=active_npc.model,
2842
+ provider=active_npc.provider,
2843
+ final_memory=None
2844
+ )
2845
+
2846
+ print(colored(
2847
+ f"Marked {num_memories} memories as skipped.",
2848
+ "yellow"
2849
+ ))
2808
2850
 
2809
- if approval['decision'] in ['human-approved', 'human-edited']:
2810
- approved_fact = {
2811
- 'statement': approval.get('final_memory') or fact_data['statement'],
2812
- 'source_text': fact_data.get('source_text', ''),
2813
- 'type': fact_data.get('type', 'explicit'),
2814
- 'generation': 0
2815
- }
2816
- approved_facts.append(approved_fact)
2817
-
2818
- except Exception as e:
2819
- print(colored(f"Memory generation error: {e}", "yellow"))
2820
-
2821
- if result_state.build_kg and approved_facts:
2822
- try:
2823
- if not should_skip_kg_processing(user_input, final_output_str):
2824
- npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
2825
- evolved_npc_kg, _ = kg_evolve_incremental(
2826
- existing_kg=npc_kg,
2827
- new_facts=approved_facts,
2828
- model=active_npc.model,
2829
- provider=active_npc.provider,
2830
- npc=active_npc,
2831
- get_concepts=True,
2832
- link_concepts_facts=False,
2833
- link_concepts_concepts=False,
2834
- link_facts_facts=False,
2835
- )
2836
- save_kg_to_db(
2837
- engine,
2838
- evolved_npc_kg,
2839
- team_name,
2840
- npc_name,
2841
- result_state.current_path
2842
- )
2843
2851
  except Exception as e:
2844
- print(colored(f"Error during real-time KG evolution: {e}", "red"))
2852
+ print(colored(f"Memory generation error: {e}", "yellow"))
2845
2853
 
2846
- result_state.turn_count += 1
2854
+ if result_state.build_kg and approved_facts:
2855
+ try:
2856
+ if not should_skip_kg_processing(user_input, final_output_str):
2857
+ npc_kg = load_kg_from_db(
2858
+ engine,
2859
+ team_name,
2860
+ npc_name,
2861
+ result_state.current_path
2862
+ )
2863
+ evolved_npc_kg, _ = kg_evolve_incremental(
2864
+ existing_kg=npc_kg,
2865
+ new_facts=approved_facts,
2866
+ model=active_npc.model,
2867
+ provider=active_npc.provider,
2868
+ npc=active_npc,
2869
+ get_concepts=True,
2870
+ link_concepts_facts=False,
2871
+ link_concepts_concepts=False,
2872
+ link_facts_facts=False,
2873
+ )
2874
+ save_kg_to_db(
2875
+ engine,
2876
+ evolved_npc_kg,
2877
+ team_name,
2878
+ npc_name,
2879
+ result_state.current_path
2880
+ )
2881
+ except Exception as e:
2882
+ print(colored(
2883
+ f"Error during real-time KG evolution: {e}",
2884
+ "red"
2885
+ ))
2847
2886
 
2848
- if result_state.turn_count % 10 == 0:
2849
- print(colored("\nChecking for potential team improvements...", "cyan"))
2887
+ print(colored(
2888
+ "\nChecking for potential team improvements...",
2889
+ "cyan"
2890
+ ))
2850
2891
  try:
2851
- summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
2892
+ summary = breathe(
2893
+ messages=result_state.messages[-20:],
2894
+ npc=active_npc
2895
+ )
2852
2896
  characterization = summary.get('output')
2853
2897
 
2854
2898
  if characterization and result_state.team:
2855
- team_ctx_path = get_team_ctx_path(result_state.team.team_path)
2899
+ team_ctx_path = get_team_ctx_path(
2900
+ result_state.team.team_path
2901
+ )
2856
2902
  if not team_ctx_path:
2857
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2903
+ team_ctx_path = os.path.join(
2904
+ result_state.team.team_path,
2905
+ "team.ctx"
2906
+ )
2858
2907
 
2859
2908
  ctx_data = {}
2860
2909
  if os.path.exists(team_ctx_path):
@@ -2879,11 +2928,20 @@ def process_result(
2879
2928
  suggestion = response.get("response", {}).get("suggestion")
2880
2929
 
2881
2930
  if suggestion:
2882
- new_context = (current_context + " " + suggestion).strip()
2883
- print(colored(f"{npc_name} suggests updating team context:", "yellow"))
2884
- print(f" - OLD: {current_context}\n + NEW: {new_context}")
2931
+ new_context = (
2932
+ current_context + " " + suggestion
2933
+ ).strip()
2934
+ print(colored(
2935
+ f"{npc_name} suggests updating team context:",
2936
+ "yellow"
2937
+ ))
2938
+ print(
2939
+ f" - OLD: {current_context}\n + NEW: {new_context}"
2940
+ )
2885
2941
 
2886
- choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
2942
+ choice = input(
2943
+ "Apply? [y/N/e(dit)]: "
2944
+ ).strip().lower()
2887
2945
 
2888
2946
  if choice == 'y':
2889
2947
  ctx_data['context'] = new_context
@@ -2891,21 +2949,29 @@ def process_result(
2891
2949
  yaml.dump(ctx_data, f)
2892
2950
  print(colored("Team context updated.", "green"))
2893
2951
  elif choice == 'e':
2894
- edited_context = input(f"Edit context [{new_context}]: ").strip()
2952
+ edited_context = input(
2953
+ f"Edit context [{new_context}]: "
2954
+ ).strip()
2895
2955
  if edited_context:
2896
2956
  ctx_data['context'] = edited_context
2897
2957
  else:
2898
2958
  ctx_data['context'] = new_context
2899
2959
  with open(team_ctx_path, 'w') as f:
2900
2960
  yaml.dump(ctx_data, f)
2901
- print(colored("Team context updated with edits.", "green"))
2961
+ print(colored(
2962
+ "Team context updated with edits.",
2963
+ "green"
2964
+ ))
2902
2965
  else:
2903
2966
  print("Suggestion declined.")
2904
2967
  except Exception as e:
2905
2968
  import traceback
2906
- print(colored(f"Could not generate team suggestions: {e}", "yellow"))
2969
+ print(colored(
2970
+ f"Could not generate team suggestions: {e}",
2971
+ "yellow"
2972
+ ))
2907
2973
  traceback.print_exc()
2908
-
2974
+
2909
2975
  initial_state = ShellState(
2910
2976
  conversation_id=start_new_conversation(),
2911
2977
  stream_output=NPCSH_STREAM_OUTPUT,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.1.1
3
+ Version: 1.1.2
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
78
78
 
79
79
  setup(
80
80
  name="npcsh",
81
- version="1.1.1",
81
+ version="1.1.2",
82
82
  packages=find_packages(exclude=["tests*"]),
83
83
  install_requires=base_requirements, # Only install base requirements by default
84
84
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes