npcsh 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. npcsh/_state.py +47 -63
  2. npcsh/npc_team/corca_example.png +0 -0
  3. npcsh/npc_team/jinxs/{python_executor.jinx → code/python.jinx} +1 -1
  4. npcsh/npc_team/jinxs/{bash_executer.jinx → code/sh.jinx} +1 -1
  5. npcsh/npc_team/jinxs/code/sql.jinx +18 -0
  6. npcsh/npc_team/jinxs/modes/alicanto.jinx +88 -0
  7. npcsh/npc_team/jinxs/modes/corca.jinx +28 -0
  8. npcsh/npc_team/jinxs/modes/guac.jinx +46 -0
  9. npcsh/npc_team/jinxs/modes/plonk.jinx +57 -0
  10. npcsh/npc_team/jinxs/modes/pti.jinx +28 -0
  11. npcsh/npc_team/jinxs/modes/spool.jinx +40 -0
  12. npcsh/npc_team/jinxs/modes/wander.jinx +81 -0
  13. npcsh/npc_team/jinxs/modes/yap.jinx +25 -0
  14. npcsh/npc_team/jinxs/utils/breathe.jinx +20 -0
  15. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  16. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  17. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  18. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  19. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  20. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  21. npcsh/npc_team/jinxs/{edit_file.jinx → utils/edit_file.jinx} +1 -1
  22. npcsh/npc_team/jinxs/utils/flush.jinx +39 -0
  23. npcsh/npc_team/jinxs/utils/npc-studio.jinx +82 -0
  24. npcsh/npc_team/jinxs/utils/ots.jinx +92 -0
  25. npcsh/npc_team/jinxs/utils/plan.jinx +33 -0
  26. npcsh/npc_team/jinxs/utils/roll.jinx +66 -0
  27. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  28. npcsh/npc_team/jinxs/utils/search/brainblast.jinx +51 -0
  29. npcsh/npc_team/jinxs/utils/search/rag.jinx +70 -0
  30. npcsh/npc_team/jinxs/utils/search/search.jinx +192 -0
  31. npcsh/npc_team/jinxs/utils/serve.jinx +29 -0
  32. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  33. npcsh/npc_team/jinxs/utils/trigger.jinx +36 -0
  34. npcsh/npc_team/jinxs/utils/vixynt.jinx +129 -0
  35. npcsh/npcsh.py +13 -11
  36. npcsh/routes.py +80 -1420
  37. npcsh-1.1.5.data/data/npcsh/npc_team/alicanto.jinx +88 -0
  38. npcsh-1.1.5.data/data/npcsh/npc_team/brainblast.jinx +51 -0
  39. npcsh-1.1.5.data/data/npcsh/npc_team/breathe.jinx +20 -0
  40. npcsh-1.1.5.data/data/npcsh/npc_team/build.jinx +65 -0
  41. npcsh-1.1.5.data/data/npcsh/npc_team/compile.jinx +50 -0
  42. npcsh-1.1.5.data/data/npcsh/npc_team/corca.jinx +28 -0
  43. npcsh-1.1.5.data/data/npcsh/npc_team/corca_example.png +0 -0
  44. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/edit_file.jinx +1 -1
  45. npcsh-1.1.5.data/data/npcsh/npc_team/flush.jinx +39 -0
  46. npcsh-1.1.5.data/data/npcsh/npc_team/guac.jinx +46 -0
  47. npcsh-1.1.5.data/data/npcsh/npc_team/help.jinx +52 -0
  48. npcsh-1.1.5.data/data/npcsh/npc_team/init.jinx +41 -0
  49. npcsh-1.1.5.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  50. npcsh-1.1.5.data/data/npcsh/npc_team/npc-studio.jinx +82 -0
  51. npcsh-1.1.5.data/data/npcsh/npc_team/ots.jinx +92 -0
  52. npcsh-1.1.5.data/data/npcsh/npc_team/plan.jinx +33 -0
  53. npcsh-1.1.5.data/data/npcsh/npc_team/plonk.jinx +57 -0
  54. npcsh-1.1.5.data/data/npcsh/npc_team/pti.jinx +28 -0
  55. npcsh-1.1.4.data/data/npcsh/npc_team/python_executor.jinx → npcsh-1.1.5.data/data/npcsh/npc_team/python.jinx +1 -1
  56. npcsh-1.1.5.data/data/npcsh/npc_team/rag.jinx +70 -0
  57. npcsh-1.1.5.data/data/npcsh/npc_team/roll.jinx +66 -0
  58. npcsh-1.1.5.data/data/npcsh/npc_team/sample.jinx +56 -0
  59. npcsh-1.1.5.data/data/npcsh/npc_team/search.jinx +192 -0
  60. npcsh-1.1.5.data/data/npcsh/npc_team/serve.jinx +29 -0
  61. npcsh-1.1.5.data/data/npcsh/npc_team/set.jinx +40 -0
  62. npcsh-1.1.4.data/data/npcsh/npc_team/bash_executer.jinx → npcsh-1.1.5.data/data/npcsh/npc_team/sh.jinx +1 -1
  63. npcsh-1.1.5.data/data/npcsh/npc_team/sleep.jinx +116 -0
  64. npcsh-1.1.5.data/data/npcsh/npc_team/spool.jinx +40 -0
  65. npcsh-1.1.5.data/data/npcsh/npc_team/sql.jinx +18 -0
  66. npcsh-1.1.5.data/data/npcsh/npc_team/trigger.jinx +36 -0
  67. npcsh-1.1.5.data/data/npcsh/npc_team/vixynt.jinx +129 -0
  68. npcsh-1.1.5.data/data/npcsh/npc_team/wander.jinx +81 -0
  69. npcsh-1.1.5.data/data/npcsh/npc_team/yap.jinx +25 -0
  70. {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/METADATA +1 -1
  71. npcsh-1.1.5.dist-info/RECORD +132 -0
  72. npcsh/npc_team/jinxs/image_generation.jinx +0 -29
  73. npcsh/npc_team/jinxs/internet_search.jinx +0 -31
  74. npcsh/npc_team/jinxs/screen_cap.jinx +0 -25
  75. npcsh-1.1.4.data/data/npcsh/npc_team/image_generation.jinx +0 -29
  76. npcsh-1.1.4.data/data/npcsh/npc_team/internet_search.jinx +0 -31
  77. npcsh-1.1.4.data/data/npcsh/npc_team/screen_cap.jinx +0 -25
  78. npcsh-1.1.4.dist-info/RECORD +0 -78
  79. /npcsh/npc_team/jinxs/{kg_search.jinx → utils/search/kg_search.jinx} +0 -0
  80. /npcsh/npc_team/jinxs/{memory_search.jinx → utils/search/memory_search.jinx} +0 -0
  81. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  82. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/alicanto.png +0 -0
  83. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/corca.npc +0 -0
  84. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/corca.png +0 -0
  85. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/foreman.npc +0 -0
  86. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/frederic.npc +0 -0
  87. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/frederic4.png +0 -0
  88. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/guac.png +0 -0
  89. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  90. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  91. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/kg_search.jinx +0 -0
  92. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/memory_search.jinx +0 -0
  93. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  94. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  95. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonk.npc +0 -0
  96. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonk.png +0 -0
  97. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  98. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  99. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  100. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/sibiji.png +0 -0
  101. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/spool.png +0 -0
  102. {npcsh-1.1.4.data → npcsh-1.1.5.data}/data/npcsh/npc_team/yap.png +0 -0
  103. {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/WHEEL +0 -0
  104. {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/entry_points.txt +0 -0
  105. {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/licenses/LICENSE +0 -0
  106. {npcsh-1.1.4.dist-info → npcsh-1.1.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,56 @@
1
+ jinx_name: "sample"
2
+ description: "Send a prompt directly to the LLM."
3
+ inputs:
4
+ - prompt: "" # Required text prompt to send to the LLM.
5
+ - model: "" # LLM model to use. Defaults to NPC's model.
6
+ - provider: "" # LLM provider to use. Defaults to NPC's provider.
7
+ steps:
8
+ - name: "send_prompt_to_llm"
9
+ engine: "python"
10
+ code: |
11
+ import traceback
12
+ from npcpy.llm_funcs import get_llm_response
13
+
14
+ prompt = context.get('prompt')
15
+ llm_model = context.get('model')
16
+ llm_provider = context.get('provider')
17
+ output_messages = context.get('messages', [])
18
+ current_npc = context.get('npc')
19
+
20
+ if not prompt or not prompt.strip():
21
+ context['output'] = "Usage: /sample <your prompt> [-m --model] model [-p --provider] provider"
22
+ context['messages'] = output_messages
23
+ exit()
24
+
25
+ # Fallback for model/provider if not explicitly set in Jinx inputs
26
+ if not llm_model and current_npc and current_npc.model:
27
+ llm_model = current_npc.model
28
+ if not llm_provider and current_npc and current_npc.provider:
29
+ llm_provider = current_npc.provider
30
+
31
+ # Final fallbacks (these would ideally come from npcsh._state config)
32
+ if not llm_model: llm_model = "gemini-1.5-pro" # Example default
33
+ if not llm_provider: llm_provider = "gemini" # Example default
34
+
35
+ try:
36
+ result = get_llm_response(
37
+ prompt=prompt,
38
+ model=llm_model,
39
+ provider=llm_provider,
40
+ npc=current_npc,
41
+ **{k:v for k,v in context.items() if k not in ['messages', 'prompt', 'model', 'provider']} # Pass other context
42
+ )
43
+
44
+ if isinstance(result, dict):
45
+ context['output'] = result.get('response')
46
+ context['messages'] = result.get('messages', output_messages)
47
+ context['model'] = llm_model
48
+ context['provider'] = llm_provider
49
+ else:
50
+ context['output'] = str(result)
51
+ context['messages'] = output_messages
52
+
53
+ except Exception as e:
54
+ traceback.print_exc()
55
+ context['output'] = f"Error sampling LLM: {e}"
56
+ context['messages'] = output_messages
@@ -0,0 +1,51 @@
1
+ jinx_name: "brainblast"
2
+ description: "Execute an advanced chunked search on command history"
3
+ inputs:
4
+ - search_query: "" # Required search terms.
5
+ - history_db_path: "~/npcsh_history.db" # Path to the command history database.
6
+ steps:
7
+ - name: "execute_brainblast"
8
+ engine: "python"
9
+ code: |
10
+ import os
11
+ import traceback
12
+ from npcpy.memory.command_history import CommandHistory
13
+ from npcpy.memory.search import execute_brainblast_command
14
+
15
+ search_query = context.get('search_query')
16
+ history_db_path = context.get('history_db_path')
17
+ output_messages = context.get('messages', [])
18
+
19
+ if not search_query or not search_query.strip():
20
+ context['output'] = "Usage: /brainblast <search_terms>"
21
+ context['messages'] = output_messages
22
+ exit()
23
+
24
+ command_history = context.get('command_history')
25
+ if not command_history:
26
+ try:
27
+ command_history = CommandHistory(os.path.expanduser(history_db_path))
28
+ context['command_history'] = command_history # Add to context for potential reuse
29
+ except Exception as e:
30
+ context['output'] = f"Error connecting to command history: {e}"
31
+ context['messages'] = output_messages
32
+ exit()
33
+
34
+ try:
35
+ # The original handler passed **kwargs, so we pass current context
36
+ # The execute_brainblast_command might filter out 'messages' itself
37
+ result = execute_brainblast_command(
38
+ command=search_query,
39
+ command_history=command_history,
40
+ **{k:v for k,v in context.items() if k != 'messages'}) # Exclude messages if not expected
41
+
42
+ if isinstance(result, dict):
43
+ context['output'] = result.get('output', 'Brainblast search executed.')
44
+ context['messages'] = result.get('messages', output_messages)
45
+ else:
46
+ context['output'] = str(result)
47
+ context['messages'] = output_messages
48
+ except Exception as e:
49
+ traceback.print_exc()
50
+ context['output'] = f"Error executing brainblast command: {e}"
51
+ context['messages'] = output_messages
@@ -0,0 +1,70 @@
1
+ jinx_name: "rag"
2
+ description: "Execute a RAG command using ChromaDB embeddings with optional file input (-f/--file)"
3
+ inputs:
4
+ - query: "" # Required search query for RAG.
5
+ - file_paths: "" # Optional comma-separated file paths to include in RAG.
6
+ - vector_db_path: "~/npcsh_chroma.db" # Path to the ChromaDB vector database.
7
+ - emodel: "" # Embedding model to use. Defaults to NPCSH_EMBEDDING_MODEL or NPC's model.
8
+ - eprovider: "" # Embedding provider to use. Defaults to NPCSH_EMBEDDING_PROVIDER or NPC's provider.
9
+ steps:
10
+ - name: "execute_rag"
11
+ engine: "python"
12
+ code: |
13
+ import os
14
+ import traceback
15
+ from npcpy.data.load import load_file_contents
16
+ from npcpy.memory.search import execute_rag_command
17
+ # Assuming NPCSH_EMBEDDING_MODEL and NPCSH_EMBEDDING_PROVIDER are accessible
18
+
19
+ query = context.get('query')
20
+ file_paths_str = context.get('file_paths')
21
+ vector_db_path = context.get('vector_db_path')
22
+ embedding_model = context.get('emodel')
23
+ embedding_provider = context.get('eprovider')
24
+ output_messages = context.get('messages', [])
25
+ current_npc = context.get('npc')
26
+
27
+ file_paths = []
28
+ if file_paths_str and file_paths_str.strip():
29
+ file_paths = [os.path.abspath(os.path.expanduser(p.strip())) for p in file_paths_str.split(',')]
30
+
31
+ if not query and not file_paths:
32
+ context['output'] = "Usage: /rag [-f file_path] <query>"
33
+ context['messages'] = output_messages
34
+ exit()
35
+
36
+ # Fallback for model/provider if not explicitly set in Jinx inputs
37
+ if not embedding_model and current_npc and current_npc.model:
38
+ embedding_model = current_npc.model
39
+ if not embedding_provider and current_npc and current_npc.provider:
40
+ embedding_provider = current_npc.provider
41
+
42
+ # Final fallbacks (these would ideally come from npcsh._state config)
43
+ if not embedding_model: embedding_model = "nomic-ai/nomic-embed-text-v1.5" # Example default
44
+ if not embedding_provider: embedding_provider = "ollama" # Example default
45
+
46
+ try:
47
+ file_contents = []
48
+ for file_path in file_paths:
49
+ try:
50
+ chunks = load_file_contents(file_path)
51
+ file_name = os.path.basename(file_path)
52
+ file_contents.extend([f"[{file_name}] {chunk}" for chunk in chunks])
53
+ except Exception as file_err:
54
+ file_contents.append(f"Error processing file {file_path}: {str(file_err)}")
55
+
56
+ result = execute_rag_command(
57
+ command=query,
58
+ vector_db_path=os.path.expanduser(vector_db_path),
59
+ embedding_model=embedding_model,
60
+ embedding_provider=embedding_provider,
61
+ file_contents=file_contents if file_paths else None,
62
+ **{k:v for k,v in context.items() if k not in ['messages', 'query', 'file_paths']} # Pass other context
63
+ )
64
+ context['output'] = result.get('response')
65
+ context['messages'] = result.get('messages', output_messages)
66
+
67
+ except Exception as e:
68
+ traceback.print_exc()
69
+ context['output'] = f"Error executing RAG command: {e}"
70
+ context['messages'] = output_messages
@@ -0,0 +1,192 @@
1
+ jinx_name: "search"
2
+ description: "Execute web search or memory/KG search. Usage: /search [-m/-mem | -kg] <query>"
3
+ inputs:
4
+ - query: "" # Required search query.
5
+ - search_type: "web" # Type of search: "web", "memory", or "kg".
6
+ - sprovider: "" # Search provider for web search. Defaults to NPCSH_SEARCH_PROVIDER.
7
+ - history_db_path: "~/npcsh_history.db" # Path to the command history database for memory/KG search.
8
+ steps:
9
+ - name: "execute_search"
10
+ engine: "python"
11
+ code: |
12
+ import os
13
+ import traceback
14
+ from npcpy.data.web import search_web
15
+ from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db # For KG search
16
+ from npcsh._state import get_relevant_memories # For memory search
17
+ # Assuming NPCSH_SEARCH_PROVIDER is accessible
18
+
19
+ query = context.get('query')
20
+ search_type = context.get('search_type')
21
+ search_provider = context.get('sprovider')
22
+ history_db_path = context.get('history_db_path')
23
+ output_messages = context.get('messages', [])
24
+ current_npc = context.get('npc')
25
+ current_team = context.get('team')
26
+
27
+ if not query or not query.strip():
28
+ context['output'] = (
29
+ "Usage:\n"
30
+ " /search <query> - Web search\n"
31
+ " /search -m <query> - Memory search\n"
32
+ " /search -kg <query> - Knowledge graph search"
33
+ )
34
+ context['messages'] = output_messages
35
+ exit()
36
+
37
+ def search_memories(query_str, current_context, output_msgs):
38
+ command_history = current_context.get('command_history')
39
+ if not command_history:
40
+ db_path = os.path.expanduser(current_context.get("history_db_path", "~/npcsh_history.db"))
41
+ try:
42
+ command_history = CommandHistory(db_path)
43
+ current_context['command_history'] = command_history
44
+ except Exception as e:
45
+ return {"output": f"Error connecting to history: {e}", "messages": output_msgs}
46
+
47
+ npc_name = current_context.get('npc_name', '__none__')
48
+ team_name = current_context.get('team_name', '__none__')
49
+ current_path = current_context.get('current_path', os.getcwd())
50
+ state = current_context.get('state')
51
+
52
+ try:
53
+ memories = get_relevant_memories(
54
+ command_history=command_history,
55
+ npc_name=npc_name,
56
+ team_name=team_name,
57
+ path=current_path,
58
+ query=query_str,
59
+ max_memories=10,
60
+ state=state
61
+ )
62
+
63
+ if not memories:
64
+ output = f"No memories found for query: '{query_str}'"
65
+ else:
66
+ output = f"Found {len(memories)} memories:\n\n"
67
+ for i, mem in enumerate(memories, 1):
68
+ final_mem = (
69
+ mem.get('final_memory') or
70
+ mem.get('initial_memory')
71
+ )
72
+ timestamp = mem.get('timestamp', 'unknown')
73
+ output += f"{i}. [{timestamp}] {final_mem}\n"
74
+
75
+ return {"output": output, "messages": output_msgs}
76
+
77
+ except Exception as e:
78
+ traceback.print_exc()
79
+ return {"output": f"Error searching memories: {e}", "messages": output_msgs}
80
+
81
+ def search_knowledge_graph(query_str, current_context, output_msgs):
82
+ command_history = current_context.get('command_history')
83
+ if not command_history:
84
+ db_path = os.path.expanduser(current_context.get("history_db_path", "~/npcsh_history.db"))
85
+ try:
86
+ command_history = CommandHistory(db_path)
87
+ current_context['command_history'] = command_history
88
+ except Exception as e:
89
+ return {"output": f"Error connecting to history: {e}", "messages": output_msgs}
90
+
91
+ npc_name = current_context.get('npc_name', '__none__')
92
+ team_name = current_context.get('team_name', '__none__')
93
+ current_path = current_context.get('current_path', os.getcwd())
94
+
95
+ try:
96
+ engine = command_history.engine
97
+ kg = load_kg_from_db(
98
+ engine,
99
+ team_name,
100
+ npc_name,
101
+ current_path
102
+ )
103
+
104
+ if not kg or not kg.get('facts'):
105
+ return {
106
+ "output": (
107
+ f"No knowledge graph found for current scope.\n"
108
+ f"Scope: Team='{team_name}', "
109
+ f"NPC='{npc_name}', Path='{current_path}'"
110
+ ),
111
+ "messages": output_msgs
112
+ }
113
+
114
+ query_lower = query_str.lower()
115
+ matching_facts = []
116
+ matching_concepts = []
117
+
118
+ for fact in kg.get('facts', []):
119
+ statement = fact.get('statement', '').lower()
120
+ if query_lower in statement:
121
+ matching_facts.append(fact)
122
+
123
+ for concept in kg.get('concepts', []):
124
+ name = concept.get('name', '').lower()
125
+ desc = concept.get('description', '').lower()
126
+ if query_lower in name or query_lower in desc:
127
+ matching_concepts.append(concept)
128
+
129
+ output = f"Knowledge Graph Search Results for '{query_str}':\n\n"
130
+
131
+ if matching_facts:
132
+ output += f"## Facts ({len(matching_facts)}):\n"
133
+ for i, fact in enumerate(matching_facts, 1):
134
+ output += f"{i}. {fact.get('statement')}\n"
135
+ output += "\n"
136
+
137
+ if matching_concepts:
138
+ output += f"## Concepts ({len(matching_concepts)}):\n"
139
+ for i, concept in enumerate(matching_concepts, 1):
140
+ name = concept.get('name')
141
+ desc = concept.get('description', '')
142
+ output += f"{i}. {name}: {desc}\n"
143
+
144
+ if not matching_facts and not matching_concepts:
145
+ output += "No matching facts or concepts found."
146
+
147
+ return {"output": output, "messages": output_msgs}
148
+
149
+ except Exception as e:
150
+ traceback.print_exc()
151
+ return {"output": f"Error searching KG: {e}", "messages": output_msgs}
152
+
153
+ def search_web_default(query_str, current_context, output_msgs):
154
+ # Fallback for search_provider if not explicitly set in Jinx inputs
155
+ current_search_provider = current_context.get('sprovider')
156
+ # If NPCSH_SEARCH_PROVIDER is accessible, use it. Otherwise, a default.
157
+ # For Jinx, let's just use a hardcoded default if not provided.
158
+ if not current_search_provider:
159
+ current_search_provider = "google" # Example default
160
+
161
+ # Assuming render_markdown is accessible
162
+ # render_markdown(f'- Searching {current_search_provider} for "{query_str}"') # Not directly supported in Jinx steps
163
+
164
+ try:
165
+ search_results = search_web(query_str, provider=current_search_provider)
166
+ output = (
167
+ "\n".join([f"- {res}" for res in search_results])
168
+ if search_results
169
+ else "No results found."
170
+ )
171
+ except Exception as e:
172
+ traceback.print_exc()
173
+ output = f"Error during web search: {e}"
174
+
175
+ return {"output": output, "messages": output_msgs}
176
+
177
+
178
+ # Populate npc_name, team_name, current_path for search functions
179
+ context['npc_name'] = current_npc.name if isinstance(current_npc, type(None).__class__) else "__none__"
180
+ context['team_name'] = current_team.name if current_team else "__none__"
181
+ context['current_path'] = os.getcwd() # Or get from context if available
182
+
183
+ final_result = None
184
+ if search_type == 'memory':
185
+ final_result = search_memories(query, context, output_messages)
186
+ elif search_type == 'kg':
187
+ final_result = search_knowledge_graph(query, context, output_messages)
188
+ else:
189
+ final_result = search_web_default(query, context, output_messages)
190
+
191
+ context['output'] = final_result.get('output')
192
+ context['messages'] = final_result.get('messages', output_messages)
@@ -0,0 +1,29 @@
1
+ jinx_name: "serve"
2
+ description: "Serve an NPC Team"
3
+ inputs:
4
+ - port: 5337 # The port to run the Flask server on.
5
+ - cors: "" # Comma-separated CORS origins.
6
+ steps:
7
+ - name: "start_flask_server"
8
+ engine: "python"
9
+ code: |
10
+ from npcpy.serve import start_flask_server
11
+
12
+ port = context.get('port')
13
+ cors_str = context.get('cors')
14
+ output_messages = context.get('messages', [])
15
+
16
+ cors_origins = None
17
+ if cors_str and cors_str.strip():
18
+ cors_origins = [origin.strip() for origin in cors_str.split(",")]
19
+
20
+ # start_flask_server blocks, so this will hold the Jinx until the server is stopped.
21
+ # In a real-world scenario, you might want to run this in a separate process
22
+ # or have a non-blocking server start.
23
+ start_flask_server(
24
+ port=int(port), # Ensure port is an integer
25
+ cors_origins=cors_origins,
26
+ )
27
+
28
+ context['output'] = "NPC Team server started. Execution of this jinx will pause until the server is stopped."
29
+ context['messages'] = output_messages
@@ -0,0 +1,116 @@
1
+ jinx_name: "sleep"
2
+ description: "Evolve knowledge graph. Use --dream to also run creative synthesis."
3
+ inputs:
4
+ - dream: False # Boolean flag to also run creative synthesis (dream process).
5
+ - ops: "" # Comma-separated list of operations to configure KG sleep process.
6
+ - model: "" # LLM model to use for KG evolution. Defaults to NPC's model.
7
+ - provider: "" # LLM provider to use for KG evolution. Defaults to NPC's provider.
8
+ steps:
9
+ - name: "evolve_knowledge_graph"
10
+ engine: "python"
11
+ code: |
12
+ import os
13
+ import traceback
14
+ from npcpy.memory.command_history import CommandHistory, load_kg_from_db, save_kg_to_db
15
+ from npcpy.memory.knowledge_graph import kg_sleep_process, kg_dream_process
16
+ # Assuming render_markdown is available if needed for logging progress
17
+
18
+ is_dreaming = context.get('dream')
19
+ operations_str = context.get('ops')
20
+ llm_model = context.get('model')
21
+ llm_provider = context.get('provider')
22
+ output_messages = context.get('messages', [])
23
+ current_npc = context.get('npc')
24
+ current_team = context.get('team')
25
+
26
+ operations_config = None
27
+ if operations_str and isinstance(operations_str, str):
28
+ operations_config = [op.strip() for op in operations_str.split(',')]
29
+
30
+ # Fallback for model/provider if not explicitly set in Jinx inputs
31
+ if not llm_model and current_npc and current_npc.model:
32
+ llm_model = current_npc.model
33
+ if not llm_provider and current_npc and current_npc.provider:
34
+ llm_provider = current_npc.provider
35
+
36
+ # Final fallbacks (these would ideally come from npcsh._state config)
37
+ if not llm_model: llm_model = "gemini-1.5-pro" # Example default
38
+ if not llm_provider: llm_provider = "gemini" # Example default
39
+
40
+ team_name = current_team.name if current_team else "__none__"
41
+ npc_name = current_npc.name if isinstance(current_npc, type(None).__class__) else "__none__"
42
+ current_path = os.getcwd()
43
+ scope_str = f"Team: '{team_name}', NPC: '{npc_name}', Path: '{current_path}'"
44
+
45
+ # Assume render_markdown exists
46
+ # render_markdown(f"- Checking knowledge graph for scope: {scope_str}")
47
+
48
+ command_history = None
49
+ try:
50
+ db_path = os.getenv("NPCSH_DB_PATH", os.path.expanduser("~/npcsh_history.db"))
51
+ command_history = CommandHistory(db_path)
52
+ engine = command_history.engine
53
+ except Exception as e:
54
+ context['output'] = f"Error connecting to history database for KG access: {e}"
55
+ context['messages'] = output_messages
56
+ exit()
57
+
58
+ output_result = ""
59
+ try:
60
+ current_kg = load_kg_from_db(engine, team_name, npc_name, current_path)
61
+
62
+ if not current_kg or not current_kg.get('facts'):
63
+ output_msg = f"Knowledge graph for the current scope is empty. Nothing to process.\n"
64
+ output_msg += f" - Scope Checked: {scope_str}\n\n"
65
+ output_msg += "**Hint:** Have a conversation or run some commands first to build up knowledge in this specific context. The KG is unique to each combination of Team, NPC, and directory."
66
+ context['output'] = output_msg
67
+ context['messages'] = output_messages
68
+ exit()
69
+
70
+ original_facts = len(current_kg.get('facts', []))
71
+ original_concepts = len(current_kg.get('concepts', []))
72
+
73
+ process_type = "Sleep"
74
+ ops_display = f"with operations: {operations_config}" if operations_config else "with random operations"
75
+ # render_markdown(f"- Initiating sleep process {ops_display}")
76
+
77
+ evolved_kg, _ = kg_sleep_process(
78
+ existing_kg=current_kg,
79
+ model=llm_model,
80
+ provider=llm_provider,
81
+ npc=current_npc,
82
+ operations_config=operations_config
83
+ )
84
+
85
+ if is_dreaming:
86
+ process_type += " & Dream"
87
+ # render_markdown(f"- Initiating dream process on the evolved KG...")
88
+ evolved_kg, _ = kg_dream_process(
89
+ existing_kg=evolved_kg,
90
+ model=llm_model,
91
+ provider=llm_provider,
92
+ npc=current_npc
93
+ )
94
+
95
+ save_kg_to_db(engine, evolved_kg, team_name, npc_name, current_path) # Changed conn to engine
96
+
97
+ new_facts = len(evolved_kg.get('facts', []))
98
+ new_concepts = len(evolved_kg.get('concepts', []))
99
+
100
+ output_result = f"{process_type} process complete.\n"
101
+ output_result += f"- Facts: {original_facts} -> {new_facts} ({new_facts - original_facts:+})\n"
102
+ output_result += f"- Concepts: {original_concepts} -> {new_concepts} ({new_concepts - original_concepts:+})"
103
+
104
+ print('Evolved facts:', evolved_kg.get('facts'))
105
+ print('Evolved concepts:', evolved_kg.get('concepts'))
106
+
107
+ context['output'] = output_result
108
+ context['messages'] = output_messages
109
+
110
+ except Exception as e:
111
+ traceback.print_exc()
112
+ context['output'] = f"Error during KG evolution process: {e}"
113
+ context['messages'] = output_messages
114
+ finally:
115
+ if command_history: # Check if it was successfully initialized
116
+ command_history.close()
@@ -0,0 +1,36 @@
1
+ jinx_name: "trigger"
2
+ description: "Execute a trigger command"
3
+ inputs:
4
+ - trigger_description: "" # Required description of the trigger to execute.
5
+ steps:
6
+ - name: "execute_trigger"
7
+ engine: "python"
8
+ code: |
9
+ import traceback
10
+ from npcpy.work.trigger import execute_trigger_command
11
+
12
+ trigger_description = context.get('trigger_description')
13
+ output_messages = context.get('messages', [])
14
+
15
+ if not trigger_description or not trigger_description.strip():
16
+ context['output'] = "Usage: /trigger <trigger_description>"
17
+ context['messages'] = output_messages
18
+ exit()
19
+
20
+ try:
21
+ # Pass all current context as kwargs to execute_trigger_command
22
+ result = execute_trigger_command(command=trigger_description, **context)
23
+
24
+ if isinstance(result, dict):
25
+ context['output'] = result.get('output', 'Trigger executed.')
26
+ context['messages'] = result.get('messages', output_messages)
27
+ else:
28
+ context['output'] = str(result)
29
+ context['messages'] = output_messages
30
+ except NameError:
31
+ context['output'] = "Trigger function (execute_trigger_command) not available."
32
+ context['messages'] = output_messages
33
+ except Exception as e:
34
+ traceback.print_exc()
35
+ context['output'] = f"Error executing trigger: {e}"
36
+ context['messages'] = output_messages