npcsh 1.1.1__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. npcsh/_state.py +160 -123
  2. npcsh/alicanto.py +10 -5
  3. npcsh/build.py +291 -0
  4. npcsh/corca.py +263 -154
  5. npcsh/npc.py +127 -46
  6. npcsh/routes.py +229 -21
  7. {npcsh-1.1.1.dist-info → npcsh-1.1.3.dist-info}/METADATA +10 -1
  8. {npcsh-1.1.1.dist-info → npcsh-1.1.3.dist-info}/RECORD +40 -39
  9. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  10. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/alicanto.png +0 -0
  11. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/bash_executer.jinx +0 -0
  12. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/corca.npc +0 -0
  13. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/corca.png +0 -0
  14. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  15. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/foreman.npc +0 -0
  16. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/frederic.npc +0 -0
  17. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/frederic4.png +0 -0
  18. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/guac.png +0 -0
  19. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/image_generation.jinx +0 -0
  20. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
  21. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  22. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  23. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/kg_search.jinx +0 -0
  24. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/memory_search.jinx +0 -0
  25. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  26. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  27. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/plonk.npc +0 -0
  28. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/plonk.png +0 -0
  29. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  30. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  31. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
  32. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/screen_cap.jinx +0 -0
  33. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  34. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/sibiji.png +0 -0
  35. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/spool.png +0 -0
  36. {npcsh-1.1.1.data → npcsh-1.1.3.data}/data/npcsh/npc_team/yap.png +0 -0
  37. {npcsh-1.1.1.dist-info → npcsh-1.1.3.dist-info}/WHEEL +0 -0
  38. {npcsh-1.1.1.dist-info → npcsh-1.1.3.dist-info}/entry_points.txt +0 -0
  39. {npcsh-1.1.1.dist-info → npcsh-1.1.3.dist-info}/licenses/LICENSE +0 -0
  40. {npcsh-1.1.1.dist-info → npcsh-1.1.3.dist-info}/top_level.txt +0 -0
npcsh/_state.py CHANGED
@@ -645,7 +645,6 @@ BASH_COMMANDS = [
645
645
  "fg",
646
646
  "getopts",
647
647
  "hash",
648
- "help",
649
648
  "history",
650
649
  "if",
651
650
  "jobs",
@@ -2206,34 +2205,6 @@ def execute_command(
2206
2205
  npc_name = state.npc.name if isinstance(state.npc, NPC) else "__none__"
2207
2206
  team_name = state.team.name if state.team else "__none__"
2208
2207
 
2209
- if command_history:
2210
- relevant_memories = get_relevant_memories(
2211
- command_history=command_history,
2212
- npc_name=npc_name,
2213
- team_name=team_name,
2214
- path=state.current_path,
2215
- query=command,
2216
- max_memories=5,
2217
- state=state
2218
- )
2219
- print('Memory jogged...')
2220
- print(relevant_memories)
2221
-
2222
- if relevant_memories:
2223
- memory_context = "\n".join([
2224
- f"- {m.get('final_memory', '')}"
2225
- for m in relevant_memories
2226
- ])
2227
- memory_msg = {
2228
- "role": "system",
2229
- "content": f"Relevant memories:\n{memory_context}"
2230
- }
2231
- if not state.messages or \
2232
- state.messages[0].get("role") != "system":
2233
- state.messages.insert(0, memory_msg)
2234
- else:
2235
- state.messages[0]["content"] += \
2236
- f"\n\n{memory_msg['content']}"
2237
2208
 
2238
2209
  original_command_for_embedding = command
2239
2210
  commands = split_by_pipes(command)
@@ -2755,106 +2726,155 @@ def process_result(
2755
2726
  team=team_name,
2756
2727
  )
2757
2728
 
2758
- conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2759
- engine = command_history.engine
2729
+ result_state.turn_count += 1
2760
2730
 
2761
- memory_examples = command_history.get_memory_examples_for_context(
2762
- npc=npc_name,
2763
- team=team_name,
2764
- directory_path=result_state.current_path
2765
- )
2766
-
2767
- memory_context = format_memory_context(memory_examples)
2768
-
2769
- approved_facts = []
2770
- try:
2771
- facts = get_facts(
2772
- conversation_turn_text,
2773
- model=active_npc.model,
2774
- provider=active_npc.provider,
2775
- npc=active_npc,
2776
- context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
2731
+ if result_state.turn_count % 10 == 0:
2732
+ approved_facts = []
2733
+
2734
+ conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
2735
+ engine = command_history.engine
2736
+
2737
+ memory_examples = command_history.get_memory_examples_for_context(
2738
+ npc=npc_name,
2739
+ team=team_name,
2740
+ directory_path=result_state.current_path
2777
2741
  )
2778
2742
 
2779
- if facts:
2780
- memories_for_approval = []
2781
- for i, fact in enumerate(facts):
2782
- memories_for_approval.append({
2783
- "memory_id": f"temp_{i}",
2784
- "content": fact['statement'],
2785
- "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
2786
- "npc": npc_name,
2787
- "fact_data": fact
2788
- })
2789
-
2790
- approvals = memory_approval_ui(memories_for_approval)
2743
+ memory_context = format_memory_context(memory_examples)
2744
+
2745
+ try:
2746
+ facts = get_facts(
2747
+ conversation_turn_text,
2748
+ model=active_npc.model,
2749
+ provider=active_npc.provider,
2750
+ npc=active_npc,
2751
+ context=memory_context + 'Memories should be fully self contained. They should not use vague pronouns or words like that or this or it. Do not generate more than 1-2 memories at a time.'
2752
+ )
2791
2753
 
2792
- for approval in approvals:
2793
- fact_data = next(m['fact_data'] for m in memories_for_approval
2794
- if m['memory_id'] == approval['memory_id'])
2754
+ if facts:
2755
+ num_memories = len(facts)
2756
+ print(colored(
2757
+ f"\nThere are {num_memories} potential memories. Do you want to review them now?",
2758
+ "cyan"
2759
+ ))
2760
+ review_choice = input("[y/N]: ").strip().lower()
2795
2761
 
2796
- command_history.add_memory_to_database(
2797
- message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2798
- conversation_id=result_state.conversation_id,
2799
- npc=npc_name,
2800
- team=team_name,
2801
- directory_path=result_state.current_path,
2802
- initial_memory=fact_data['statement'],
2803
- status=approval['decision'],
2804
- model=active_npc.model,
2805
- provider=active_npc.provider,
2806
- final_memory=approval.get('final_memory')
2807
- )
2762
+ if review_choice == 'y':
2763
+ memories_for_approval = []
2764
+ for i, fact in enumerate(facts):
2765
+ memories_for_approval.append({
2766
+ "memory_id": f"temp_{i}",
2767
+ "content": fact['statement'],
2768
+ "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
2769
+ "npc": npc_name,
2770
+ "fact_data": fact
2771
+ })
2772
+
2773
+ approvals = memory_approval_ui(memories_for_approval)
2774
+
2775
+ for approval in approvals:
2776
+ fact_data = next(
2777
+ m['fact_data'] for m in memories_for_approval
2778
+ if m['memory_id'] == approval['memory_id']
2779
+ )
2780
+
2781
+ command_history.add_memory_to_database(
2782
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2783
+ conversation_id=result_state.conversation_id,
2784
+ npc=npc_name,
2785
+ team=team_name,
2786
+ directory_path=result_state.current_path,
2787
+ initial_memory=fact_data['statement'],
2788
+ status=approval['decision'],
2789
+ model=active_npc.model,
2790
+ provider=active_npc.provider,
2791
+ final_memory=approval.get('final_memory')
2792
+ )
2793
+
2794
+ if approval['decision'] in ['human-approved', 'human-edited']:
2795
+ approved_fact = {
2796
+ 'statement': approval.get('final_memory') or fact_data['statement'],
2797
+ 'source_text': fact_data.get('source_text', ''),
2798
+ 'type': fact_data.get('type', 'explicit'),
2799
+ 'generation': 0
2800
+ }
2801
+ approved_facts.append(approved_fact)
2802
+ else:
2803
+ for i, fact in enumerate(facts):
2804
+ command_history.add_memory_to_database(
2805
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
2806
+ conversation_id=result_state.conversation_id,
2807
+ npc=npc_name,
2808
+ team=team_name,
2809
+ directory_path=result_state.current_path,
2810
+ initial_memory=fact['statement'],
2811
+ status='skipped',
2812
+ model=active_npc.model,
2813
+ provider=active_npc.provider,
2814
+ final_memory=None
2815
+ )
2816
+
2817
+ print(colored(
2818
+ f"Marked {num_memories} memories as skipped.",
2819
+ "yellow"
2820
+ ))
2808
2821
 
2809
- if approval['decision'] in ['human-approved', 'human-edited']:
2810
- approved_fact = {
2811
- 'statement': approval.get('final_memory') or fact_data['statement'],
2812
- 'source_text': fact_data.get('source_text', ''),
2813
- 'type': fact_data.get('type', 'explicit'),
2814
- 'generation': 0
2815
- }
2816
- approved_facts.append(approved_fact)
2817
-
2818
- except Exception as e:
2819
- print(colored(f"Memory generation error: {e}", "yellow"))
2820
-
2821
- if result_state.build_kg and approved_facts:
2822
- try:
2823
- if not should_skip_kg_processing(user_input, final_output_str):
2824
- npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
2825
- evolved_npc_kg, _ = kg_evolve_incremental(
2826
- existing_kg=npc_kg,
2827
- new_facts=approved_facts,
2828
- model=active_npc.model,
2829
- provider=active_npc.provider,
2830
- npc=active_npc,
2831
- get_concepts=True,
2832
- link_concepts_facts=False,
2833
- link_concepts_concepts=False,
2834
- link_facts_facts=False,
2835
- )
2836
- save_kg_to_db(
2837
- engine,
2838
- evolved_npc_kg,
2839
- team_name,
2840
- npc_name,
2841
- result_state.current_path
2842
- )
2843
2822
  except Exception as e:
2844
- print(colored(f"Error during real-time KG evolution: {e}", "red"))
2823
+ print(colored(f"Memory generation error: {e}", "yellow"))
2845
2824
 
2846
- result_state.turn_count += 1
2825
+ if result_state.build_kg and approved_facts:
2826
+ try:
2827
+ if not should_skip_kg_processing(user_input, final_output_str):
2828
+ npc_kg = load_kg_from_db(
2829
+ engine,
2830
+ team_name,
2831
+ npc_name,
2832
+ result_state.current_path
2833
+ )
2834
+ evolved_npc_kg, _ = kg_evolve_incremental(
2835
+ existing_kg=npc_kg,
2836
+ new_facts=approved_facts,
2837
+ model=active_npc.model,
2838
+ provider=active_npc.provider,
2839
+ npc=active_npc,
2840
+ get_concepts=True,
2841
+ link_concepts_facts=False,
2842
+ link_concepts_concepts=False,
2843
+ link_facts_facts=False,
2844
+ )
2845
+ save_kg_to_db(
2846
+ engine,
2847
+ evolved_npc_kg,
2848
+ team_name,
2849
+ npc_name,
2850
+ result_state.current_path
2851
+ )
2852
+ except Exception as e:
2853
+ print(colored(
2854
+ f"Error during real-time KG evolution: {e}",
2855
+ "red"
2856
+ ))
2847
2857
 
2848
- if result_state.turn_count % 10 == 0:
2849
- print(colored("\nChecking for potential team improvements...", "cyan"))
2858
+ print(colored(
2859
+ "\nChecking for potential team improvements...",
2860
+ "cyan"
2861
+ ))
2850
2862
  try:
2851
- summary = breathe(messages=result_state.messages[-20:], npc=active_npc)
2863
+ summary = breathe(
2864
+ messages=result_state.messages[-20:],
2865
+ npc=active_npc
2866
+ )
2852
2867
  characterization = summary.get('output')
2853
2868
 
2854
2869
  if characterization and result_state.team:
2855
- team_ctx_path = get_team_ctx_path(result_state.team.team_path)
2870
+ team_ctx_path = get_team_ctx_path(
2871
+ result_state.team.team_path
2872
+ )
2856
2873
  if not team_ctx_path:
2857
- team_ctx_path = os.path.join(result_state.team.team_path, "team.ctx")
2874
+ team_ctx_path = os.path.join(
2875
+ result_state.team.team_path,
2876
+ "team.ctx"
2877
+ )
2858
2878
 
2859
2879
  ctx_data = {}
2860
2880
  if os.path.exists(team_ctx_path):
@@ -2879,11 +2899,20 @@ def process_result(
2879
2899
  suggestion = response.get("response", {}).get("suggestion")
2880
2900
 
2881
2901
  if suggestion:
2882
- new_context = (current_context + " " + suggestion).strip()
2883
- print(colored(f"{npc_name} suggests updating team context:", "yellow"))
2884
- print(f" - OLD: {current_context}\n + NEW: {new_context}")
2902
+ new_context = (
2903
+ current_context + " " + suggestion
2904
+ ).strip()
2905
+ print(colored(
2906
+ f"{npc_name} suggests updating team context:",
2907
+ "yellow"
2908
+ ))
2909
+ print(
2910
+ f" - OLD: {current_context}\n + NEW: {new_context}"
2911
+ )
2885
2912
 
2886
- choice = input("Apply? [y/N/e(dit)]: ").strip().lower()
2913
+ choice = input(
2914
+ "Apply? [y/N/e(dit)]: "
2915
+ ).strip().lower()
2887
2916
 
2888
2917
  if choice == 'y':
2889
2918
  ctx_data['context'] = new_context
@@ -2891,21 +2920,29 @@ def process_result(
2891
2920
  yaml.dump(ctx_data, f)
2892
2921
  print(colored("Team context updated.", "green"))
2893
2922
  elif choice == 'e':
2894
- edited_context = input(f"Edit context [{new_context}]: ").strip()
2923
+ edited_context = input(
2924
+ f"Edit context [{new_context}]: "
2925
+ ).strip()
2895
2926
  if edited_context:
2896
2927
  ctx_data['context'] = edited_context
2897
2928
  else:
2898
2929
  ctx_data['context'] = new_context
2899
2930
  with open(team_ctx_path, 'w') as f:
2900
2931
  yaml.dump(ctx_data, f)
2901
- print(colored("Team context updated with edits.", "green"))
2932
+ print(colored(
2933
+ "Team context updated with edits.",
2934
+ "green"
2935
+ ))
2902
2936
  else:
2903
2937
  print("Suggestion declined.")
2904
2938
  except Exception as e:
2905
2939
  import traceback
2906
- print(colored(f"Could not generate team suggestions: {e}", "yellow"))
2940
+ print(colored(
2941
+ f"Could not generate team suggestions: {e}",
2942
+ "yellow"
2943
+ ))
2907
2944
  traceback.print_exc()
2908
-
2945
+
2909
2946
  initial_state = ShellState(
2910
2947
  conversation_id=start_new_conversation(),
2911
2948
  stream_output=NPCSH_STREAM_OUTPUT,
npcsh/alicanto.py CHANGED
@@ -12,6 +12,16 @@ from dataclasses import dataclass, asdict, field
12
12
  from pathlib import Path
13
13
  from concurrent.futures import ThreadPoolExecutor
14
14
 
15
+
16
+ try:
17
+ from datasets import load_dataset
18
+ except:
19
+ load_dataset = None
20
+ from sklearn.feature_extraction.text import TfidfVectorizer
21
+ from sklearn.metrics.pairwise import cosine_similarity
22
+
23
+
24
+
15
25
  from npcpy.tools import auto_tools
16
26
  from npcpy.llm_funcs import get_llm_response
17
27
  from npcpy.data.web import search_web
@@ -88,11 +98,6 @@ def list_files(directory: str = ".") -> List[str]:
88
98
  return os.listdir(directory)
89
99
 
90
100
 
91
-
92
- from datasets import load_dataset
93
- from sklearn.feature_extraction.text import TfidfVectorizer
94
- from sklearn.metrics.pairwise import cosine_similarity
95
-
96
101
  DATASET_CACHE = None
97
102
  SEARCH_INDEX = None
98
103
 
npcsh/build.py ADDED
@@ -0,0 +1,291 @@
1
+ import os
2
+ import shutil
3
+ import textwrap
4
+ from pathlib import Path
5
+
6
+
7
+ def build_flask_server(config, **kwargs):
8
+ output_dir = Path(config['output_dir'])
9
+ output_dir.mkdir(parents=True, exist_ok=True)
10
+
11
+ server_script = output_dir / 'npc_server.py'
12
+
13
+ server_code = textwrap.dedent(f'''
14
+ import os
15
+ from npcpy.serve import start_flask_server
16
+ from npcpy.npc_compiler import Team
17
+ from sqlalchemy import create_engine
18
+
19
+ if __name__ == "__main__":
20
+ team_path = os.path.join(
21
+ os.path.dirname(__file__),
22
+ "npc_team"
23
+ )
24
+ db_path = os.path.expanduser("~/npcsh_history.db")
25
+
26
+ db_conn = create_engine(f'sqlite:///{{db_path}}')
27
+ team = Team(team_path=team_path, db_conn=db_conn)
28
+
29
+ start_flask_server(
30
+ port={config['port']},
31
+ cors_origins={config.get('cors_origins')},
32
+ teams={{"main": team}},
33
+ npcs=team.npcs,
34
+ db_path=db_path,
35
+ user_npc_directory=os.path.expanduser(
36
+ "~/.npcsh/npc_team"
37
+ )
38
+ )
39
+ ''')
40
+
41
+ server_script.write_text(server_code)
42
+
43
+ shutil.copytree(
44
+ config['team_path'],
45
+ output_dir / 'npc_team',
46
+ dirs_exist_ok=True
47
+ )
48
+
49
+ requirements = output_dir / 'requirements.txt'
50
+ requirements.write_text(
51
+ 'npcsh\n'
52
+ 'flask\n'
53
+ 'flask-cors\n'
54
+ 'sqlalchemy\n'
55
+ )
56
+
57
+ readme = output_dir / 'README.md'
58
+ readme.write_text(textwrap.dedent(f'''
59
+ # NPC Team Server
60
+
61
+ Run: python npc_server.py
62
+
63
+ Server will be available at http://localhost:{config['port']}
64
+
65
+ For pyinstaller standalone:
66
+ pyinstaller --onefile npc_server.py
67
+ '''))
68
+
69
+ return {
70
+ "output": f"Flask server built in {output_dir}",
71
+ "messages": kwargs.get('messages', [])
72
+ }
73
+
74
+
75
+ def build_docker_compose(config, **kwargs):
76
+ output_dir = Path(config['output_dir'])
77
+ output_dir.mkdir(parents=True, exist_ok=True)
78
+
79
+ shutil.copytree(
80
+ config['team_path'],
81
+ output_dir / 'npc_team',
82
+ dirs_exist_ok=True
83
+ )
84
+
85
+ dockerfile = output_dir / 'Dockerfile'
86
+ dockerfile.write_text(textwrap.dedent('''
87
+ FROM python:3.11-slim
88
+
89
+ WORKDIR /app
90
+
91
+ COPY requirements.txt .
92
+ RUN pip install --no-cache-dir -r requirements.txt
93
+
94
+ COPY npc_team ./npc_team
95
+ COPY npc_server.py .
96
+
97
+ EXPOSE 5337
98
+
99
+ CMD ["python", "npc_server.py"]
100
+ '''))
101
+
102
+ compose = output_dir / 'docker-compose.yml'
103
+ compose.write_text(textwrap.dedent(f'''
104
+ version: '3.8'
105
+
106
+ services:
107
+ npc-server:
108
+ build: .
109
+ ports:
110
+ - "{config['port']}:{config['port']}"
111
+ volumes:
112
+ - npc-data:/root/.npcsh
113
+ environment:
114
+ - NPCSH_DB_PATH=/root/.npcsh/npcsh_history.db
115
+
116
+ volumes:
117
+ npc-data:
118
+ '''))
119
+
120
+ build_flask_server(config, **kwargs)
121
+
122
+ return {
123
+ "output": f"Docker compose built in {output_dir}. Run: docker-compose up",
124
+ "messages": kwargs.get('messages', [])
125
+ }
126
+
127
+
128
+ def build_cli_executable(config, **kwargs):
129
+ output_dir = Path(config['output_dir'])
130
+ output_dir.mkdir(parents=True, exist_ok=True)
131
+
132
+ cli_script = output_dir / 'npc_cli.py'
133
+
134
+ cli_code = textwrap.dedent('''
135
+ import sys
136
+ from npcsh._state import setup_shell, execute_command, initial_state
137
+ from npcsh.routes import router
138
+
139
+ def main():
140
+ if len(sys.argv) < 2:
141
+ print("Usage: npc_cli <command>")
142
+ sys.exit(1)
143
+
144
+ command = " ".join(sys.argv[1:])
145
+
146
+ command_history, team, npc = setup_shell()
147
+ initial_state.npc = npc
148
+ initial_state.team = team
149
+
150
+ state, result = execute_command(
151
+ command,
152
+ initial_state,
153
+ router=router
154
+ )
155
+
156
+ output = result.get('output') if isinstance(result, dict) else result
157
+ print(output)
158
+
159
+ if __name__ == "__main__":
160
+ main()
161
+ ''')
162
+
163
+ cli_script.write_text(cli_code)
164
+
165
+ shutil.copytree(
166
+ config['team_path'],
167
+ output_dir / 'npc_team',
168
+ dirs_exist_ok=True
169
+ )
170
+
171
+ spec_file = output_dir / 'npc_cli.spec'
172
+ spec_file.write_text(textwrap.dedent('''
173
+ a = Analysis(
174
+ ['npc_cli.py'],
175
+ pathex=[],
176
+ binaries=[],
177
+ datas=[('npc_team', 'npc_team')],
178
+ hiddenimports=[],
179
+ hookspath=[],
180
+ hooksconfig={},
181
+ runtime_hooks=[],
182
+ excludes=[],
183
+ win_no_prefer_redirects=False,
184
+ win_private_assemblies=False,
185
+ cipher=None,
186
+ noarchive=False,
187
+ )
188
+ pyz = PYZ(a.pure, a.zipped_data, cipher=None)
189
+
190
+ exe = EXE(
191
+ pyz,
192
+ a.scripts,
193
+ a.binaries,
194
+ a.zipfiles,
195
+ a.datas,
196
+ [],
197
+ name='npc',
198
+ debug=False,
199
+ bootloader_ignore_signals=False,
200
+ strip=False,
201
+ upx=True,
202
+ upx_exclude=[],
203
+ runtime_tmpdir=None,
204
+ console=True,
205
+ )
206
+ '''))
207
+
208
+ return {
209
+ "output": (
210
+ f"CLI executable built in {output_dir}. "
211
+ f"Run: pyinstaller npc_cli.spec"
212
+ ),
213
+ "messages": kwargs.get('messages', [])
214
+ }
215
+
216
+
217
+ def build_static_site(config, **kwargs):
218
+ output_dir = Path(config['output_dir'])
219
+ output_dir.mkdir(parents=True, exist_ok=True)
220
+
221
+ html = output_dir / 'index.html'
222
+ html.write_text(textwrap.dedent(f'''
223
+ <!DOCTYPE html>
224
+ <html>
225
+ <head>
226
+ <title>NPC Team Interface</title>
227
+ <style>
228
+ body {{
229
+ font-family: monospace;
230
+ max-width: 800px;
231
+ margin: 50px auto;
232
+ }}
233
+ #output {{
234
+ white-space: pre-wrap;
235
+ background: #f5f5f5;
236
+ padding: 20px;
237
+ min-height: 300px;
238
+ }}
239
+ </style>
240
+ </head>
241
+ <body>
242
+ <h1>NPC Team</h1>
243
+ <input id="input" type="text"
244
+ placeholder="Enter command..."
245
+ style="width: 100%; padding: 10px;">
246
+ <div id="output"></div>
247
+
248
+ <script>
249
+ const API_URL = '{config.get("api_url", "http://localhost:5337")}';
250
+
251
+ document.getElementById('input').addEventListener('keypress',
252
+ async (e) => {{
253
+ if (e.key === 'Enter') {{
254
+ const cmd = e.target.value;
255
+ e.target.value = '';
256
+
257
+ const resp = await fetch(`${{API_URL}}/api/stream`, {{
258
+ method: 'POST',
259
+ headers: {{'Content-Type': 'application/json'}},
260
+ body: JSON.stringify({{
261
+ commandstr: cmd,
262
+ conversationId: 'web-session',
263
+ model: 'llama3.2',
264
+ provider: 'ollama'
265
+ }})
266
+ }});
267
+
268
+ const reader = resp.body.getReader();
269
+ const decoder = new TextDecoder();
270
+
271
+ while (true) {{
272
+ const {{done, value}} = await reader.read();
273
+ if (done) break;
274
+
275
+ const text = decoder.decode(value);
276
+ document.getElementById('output').textContent += text;
277
+ }}
278
+ }}
279
+ }});
280
+ </script>
281
+ </body>
282
+ </html>
283
+ '''))
284
+
285
+ return {
286
+ "output": (
287
+ f"Static site built in {output_dir}. "
288
+ f"Serve with: python -m http.server 8000"
289
+ ),
290
+ "messages": kwargs.get('messages', [])
291
+ }