npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. npcsh/_state.py +700 -377
  2. npcsh/alicanto.py +54 -1153
  3. npcsh/completion.py +206 -0
  4. npcsh/config.py +163 -0
  5. npcsh/corca.py +35 -1462
  6. npcsh/execution.py +185 -0
  7. npcsh/guac.py +31 -1986
  8. npcsh/npc_team/jinxs/code/sh.jinx +11 -15
  9. npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
  10. npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
  11. npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
  12. npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
  13. npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
  14. npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
  15. npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
  16. npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
  17. npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
  18. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  19. npcsh/npc_team/jinxs/utils/search.jinx +3 -3
  20. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  21. npcsh/npcsh.py +76 -20
  22. npcsh/parsing.py +118 -0
  23. npcsh/plonk.py +41 -329
  24. npcsh/pti.py +41 -201
  25. npcsh/spool.py +34 -239
  26. npcsh/ui.py +199 -0
  27. npcsh/wander.py +54 -542
  28. npcsh/yap.py +38 -570
  29. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  30. npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
  31. npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
  32. npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
  33. npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
  34. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
  35. npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
  36. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
  37. npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
  38. npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
  39. npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
  40. npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
  41. npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
  42. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
  43. npcsh-1.1.14.dist-info/RECORD +135 -0
  44. npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
  45. npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
  46. npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
  47. npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
  48. npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
  49. npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
  50. npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
  51. npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
  52. npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
  53. npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
  54. npcsh-1.1.12.dist-info/RECORD +0 -126
  55. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
  56. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  57. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
  58. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
  59. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
  60. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
  61. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
  62. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
  63. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
  64. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  65. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
  66. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
  67. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
  68. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
  69. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
  70. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
  71. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
  72. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  73. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  74. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
  75. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
  76. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  77. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  78. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
  79. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
  80. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
  81. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  82. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  83. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
  84. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
  85. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
  86. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
  87. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
  88. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  89. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
  90. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  91. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
  92. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
  93. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  94. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
  95. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
  96. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
  97. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
  98. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
  99. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
@@ -1,28 +1,170 @@
1
- jinx_name: "pti"
2
- description: "Enter Pardon-The-Interruption mode for human-in-the-loop reasoning."
1
+ jinx_name: pti
2
+ description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
3
+ npc: frederic
3
4
  inputs:
4
- - command_args: "" # The full command string or specific arguments for PTI mode.
5
+ - model: null
6
+ - provider: null
7
+ - files: null
8
+ - reasoning_model: null
9
+
5
10
  steps:
6
- - name: "enter_pti"
7
- engine: "python"
11
+ - name: pti_repl
12
+ engine: python
8
13
  code: |
9
- import traceback
10
- from npcsh.pti import enter_pti_mode
11
-
12
- command_args = context.get('command_args', '') # The full command string from router
13
- output_messages = context.get('messages', [])
14
-
15
- try:
16
- # enter_pti_mode likely expects the full command string for its own parsing
17
- result = enter_pti_mode(command=command_args, **context)
18
-
19
- if isinstance(result, dict):
20
- context['output'] = result.get('output', 'Entered PTI mode.')
21
- context['messages'] = result.get('messages', output_messages)
22
- else:
23
- context['output'] = str(result)
24
- context['messages'] = output_messages
25
- except Exception as e:
26
- traceback.print_exc()
27
- context['output'] = f"Error entering pti mode: {e}"
28
- context['messages'] = output_messages
14
+ import os
15
+ import sys
16
+ from termcolor import colored
17
+
18
+ from npcpy.llm_funcs import get_llm_response
19
+ from npcpy.npc_sysenv import get_system_message, render_markdown
20
+ from npcpy.data.load import load_file_contents
21
+ from npcpy.data.text import rag_search
22
+
23
+ npc = context.get('npc')
24
+ team = context.get('team')
25
+ messages = context.get('messages', [])
26
+ files = context.get('files')
27
+
28
+ # PTI uses reasoning model for deeper thinking
29
+ model = context.get('reasoning_model') or context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ print("""
33
+ ██████╗ ████████╗██╗
34
+ ██╔══██╗╚══██╔══╝██║
35
+ ██████╔╝ ██║ ██║
36
+ ██╔═══╝ ██║ ██║
37
+ ██║ ██║ ██║
38
+ ╚═╝ ╚═╝ ╚═╝
39
+
40
+ Pardon-The-Interruption
41
+ Human-in-the-loop reasoning mode
42
+ """)
43
+
44
+ npc_name = npc.name if npc else "pti"
45
+ print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
46
+ print(" - AI will use <think> tags for step-by-step reasoning")
47
+ print(" - Use <request_for_input> to pause and ask questions")
48
+ print(" - Ctrl+C interrupts stream for immediate feedback")
49
+
50
+ # Load files if provided
51
+ loaded_content = {}
52
+ if files:
53
+ if isinstance(files, str):
54
+ files = [f.strip() for f in files.split(',')]
55
+ for file_path in files:
56
+ file_path = os.path.expanduser(file_path)
57
+ if os.path.exists(file_path):
58
+ try:
59
+ chunks = load_file_contents(file_path)
60
+ loaded_content[file_path] = "\n".join(chunks)
61
+ print(colored(f"Loaded: {file_path}", "green"))
62
+ except Exception as e:
63
+ print(colored(f"Error loading {file_path}: {e}", "red"))
64
+
65
+ # System message for PTI mode
66
+ pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
67
+
68
+ IMPORTANT INSTRUCTIONS:
69
+ 1. Think step-by-step using <think>...</think> tags to show your reasoning
70
+ 2. When you need more information from the user, use <request_for_input>your question</request_for_input>
71
+ 3. Be thorough but concise in your reasoning
72
+ 4. The user can interrupt at any time to provide guidance
73
+
74
+ Example:
75
+ <think>
76
+ Let me break this down...
77
+ Step 1: First I need to understand X
78
+ Step 2: Then consider Y
79
+ </think>
80
+
81
+ <request_for_input>
82
+ I notice you mentioned Z. Could you clarify what you mean by that?
83
+ </request_for_input>"""
84
+
85
+ if not messages or messages[0].get("role") != "system":
86
+ messages.insert(0, {"role": "system", "content": pti_system})
87
+
88
+ # REPL loop
89
+ user_input = None
90
+ while True:
91
+ try:
92
+ if not user_input:
93
+ prompt_str = f"{npc_name}:pti> "
94
+ user_input = input(prompt_str).strip()
95
+
96
+ if not user_input:
97
+ user_input = None
98
+ continue
99
+
100
+ if user_input.lower() == "/pq":
101
+ print("Exiting PTI mode.")
102
+ break
103
+
104
+ # Build prompt with file context
105
+ prompt_for_llm = user_input
106
+ if loaded_content:
107
+ context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
108
+ prompt_for_llm += f"\n\nContext:\n{context_str}"
109
+
110
+ prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
111
+
112
+ messages.append({"role": "user", "content": user_input})
113
+
114
+ try:
115
+ resp = get_llm_response(
116
+ prompt_for_llm,
117
+ model=model,
118
+ provider=provider,
119
+ messages=messages[:-1], # Don't duplicate the user message
120
+ stream=True,
121
+ npc=npc
122
+ )
123
+
124
+ response_stream = resp.get('response')
125
+ full_response = ""
126
+ request_found = False
127
+
128
+ # Stream the response
129
+ for chunk in response_stream:
130
+ chunk_content = ""
131
+ if hasattr(chunk, 'choices') and chunk.choices:
132
+ delta = chunk.choices[0].delta
133
+ if hasattr(delta, 'content') and delta.content:
134
+ chunk_content = delta.content
135
+ elif isinstance(chunk, dict):
136
+ chunk_content = chunk.get("message", {}).get("content", "")
137
+
138
+ if chunk_content:
139
+ print(chunk_content, end='', flush=True)
140
+ full_response += chunk_content
141
+
142
+ # Check for request_for_input
143
+ if "</request_for_input>" in full_response:
144
+ request_found = True
145
+ break
146
+
147
+ print() # newline after stream
148
+
149
+ messages.append({"role": "assistant", "content": full_response})
150
+ user_input = None # Reset for next iteration
151
+
152
+ except KeyboardInterrupt:
153
+ print(colored("\n\n--- Interrupted ---", "yellow"))
154
+ interrupt_input = input("Your feedback: ").strip()
155
+ if interrupt_input:
156
+ user_input = interrupt_input
157
+ else:
158
+ user_input = None
159
+ continue
160
+
161
+ except KeyboardInterrupt:
162
+ print("\nUse '/pq' to exit or continue.")
163
+ user_input = None
164
+ continue
165
+ except EOFError:
166
+ print("\nExiting PTI mode.")
167
+ break
168
+
169
+ context['output'] = "Exited PTI mode."
170
+ context['messages'] = messages
@@ -1,40 +1,161 @@
1
- jinx_name: "spool"
2
- description: "Enter interactive chat (spool) mode"
3
- inputs: [] # Spool mode typically takes its parameters directly from the environment/kwargs
1
+ jinx_name: spool
2
+ description: Interactive chat mode - simple conversational interface with an NPC
3
+ inputs:
4
+ - model: null
5
+ - provider: null
6
+ - attachments: null
7
+ - stream: true
8
+
4
9
  steps:
5
- - name: "enter_spool"
6
- engine: "python"
10
+ - name: spool_repl
11
+ engine: python
7
12
  code: |
8
- import traceback
9
- from npcpy.npc_compiler import NPC, Team
10
- from npcsh.spool import enter_spool_mode
11
-
12
- output_messages = context.get('messages', [])
13
- current_npc = context.get('npc')
14
- current_team = context.get('team')
15
-
16
- try:
17
- # Handle potential string NPC name if passed from CLI
18
- if isinstance(current_npc, str) and current_team:
19
- npc_name = current_npc
20
- if npc_name in current_team.npcs:
21
- current_npc = current_team.npcs[npc_name]
13
+ import os
14
+ import sys
15
+ from termcolor import colored
16
+
17
+ from npcpy.llm_funcs import get_llm_response
18
+ from npcpy.npc_sysenv import get_system_message, render_markdown
19
+ from npcpy.data.load import load_file_contents
20
+ from npcpy.data.text import rag_search
21
+
22
+ npc = context.get('npc')
23
+ team = context.get('team')
24
+ messages = context.get('messages', [])
25
+ stream = context.get('stream', True)
26
+ attachments = context.get('attachments')
27
+
28
+ # Use NPC's model/provider or fallback
29
+ model = context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ # ASCII art
33
+ print("""
34
+ _____ ____ ____ ____ _
35
+ / ___/| _ \ / __ \ / __ \| |
36
+ \___ \| |_) | | | | | | | |
37
+ ___) | __/| | | | | | | |___
38
+ |____/|_| \____/ \____/|_____|
39
+ """)
40
+
41
+ npc_name = npc.name if npc else "chat"
42
+ print(f"Entering spool mode (NPC: {npc_name}). Type '/sq' to exit.")
43
+
44
+ # Load attachments if provided
45
+ loaded_chunks = {}
46
+ if attachments:
47
+ if isinstance(attachments, str):
48
+ attachments = [f.strip() for f in attachments.split(',')]
49
+ for file_path in attachments:
50
+ file_path = os.path.expanduser(file_path)
51
+ if os.path.exists(file_path):
52
+ try:
53
+ chunks = load_file_contents(file_path)
54
+ loaded_chunks[file_path] = chunks
55
+ print(colored(f"Loaded {len(chunks)} chunks from: {file_path}", "green"))
56
+ except Exception as e:
57
+ print(colored(f"Error loading {file_path}: {e}", "red"))
58
+
59
+ # Ensure system message
60
+ if not messages or messages[0].get("role") != "system":
61
+ sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
62
+ messages.insert(0, {"role": "system", "content": sys_msg})
63
+
64
+ # REPL loop
65
+ while True:
66
+ try:
67
+ prompt_str = f"{npc_name}> "
68
+ user_input = input(prompt_str).strip()
69
+
70
+ if not user_input:
71
+ continue
72
+
73
+ if user_input.lower() == "/sq":
74
+ print("Exiting spool mode.")
75
+ break
76
+
77
+ # Handle /ots for screenshots inline
78
+ if user_input.startswith("/ots"):
79
+ from npcpy.data.image import capture_screenshot
80
+ parts = user_input.split()
81
+ image_paths = []
82
+ if len(parts) > 1:
83
+ for p in parts[1:]:
84
+ fp = os.path.expanduser(p)
85
+ if os.path.exists(fp):
86
+ image_paths.append(fp)
87
+ else:
88
+ ss = capture_screenshot()
89
+ if ss and "file_path" in ss:
90
+ image_paths.append(ss["file_path"])
91
+ print(colored(f"Screenshot: {ss['filename']}", "green"))
92
+
93
+ if image_paths:
94
+ vision_prompt = input("Prompt for image(s): ").strip() or "Describe these images."
95
+ resp = get_llm_response(
96
+ vision_prompt,
97
+ model=npc.vision_model if hasattr(npc, 'vision_model') else model,
98
+ provider=npc.vision_provider if hasattr(npc, 'vision_provider') else provider,
99
+ messages=messages,
100
+ images=image_paths,
101
+ stream=stream,
102
+ npc=npc
103
+ )
104
+ messages = resp.get('messages', messages)
105
+ render_markdown(str(resp.get('response', '')))
106
+ continue
107
+
108
+ # Add RAG context if files loaded
109
+ current_prompt = user_input
110
+ if loaded_chunks:
111
+ context_content = ""
112
+ for filename, chunks in loaded_chunks.items():
113
+ full_text = "\n".join(chunks)
114
+ retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
115
+ if retrieved:
116
+ context_content += f"\n\nContext from {filename}:\n{retrieved}\n"
117
+ if context_content:
118
+ current_prompt += f"\n\n--- Relevant context ---{context_content}"
119
+
120
+ # Get response
121
+ resp = get_llm_response(
122
+ current_prompt,
123
+ model=model,
124
+ provider=provider,
125
+ messages=messages,
126
+ stream=stream,
127
+ npc=npc
128
+ )
129
+
130
+ messages = resp.get('messages', messages)
131
+ response_text = resp.get('response', '')
132
+
133
+ # Handle streaming vs non-streaming
134
+ if hasattr(response_text, '__iter__') and not isinstance(response_text, str):
135
+ full_response = ""
136
+ for chunk in response_text:
137
+ if hasattr(chunk, 'choices') and chunk.choices:
138
+ delta = chunk.choices[0].delta
139
+ if hasattr(delta, 'content') and delta.content:
140
+ print(delta.content, end='', flush=True)
141
+ full_response += delta.content
142
+ print()
22
143
  else:
23
- context['output'] = f"Error: NPC '{npc_name}' not found in team. Available NPCs: {', '.join(current_team.npcs.keys())}"
24
- context['messages'] = output_messages
25
- exit()
26
- context['npc'] = current_npc # Ensure the NPC object is updated in context
27
-
28
- result = enter_spool_mode(**context) # Pass all context as kwargs
29
-
30
- if isinstance(result, dict):
31
- context['output'] = result.get('output', 'Exited Spool Mode.')
32
- context['messages'] = result.get('messages', output_messages)
33
- else:
34
- context['output'] = str(result)
35
- context['messages'] = output_messages
36
-
37
- except Exception as e:
38
- traceback.print_exc()
39
- context['output'] = f"Error entering spool mode: {e}"
40
- context['messages'] = output_messages
144
+ render_markdown(str(response_text))
145
+
146
+ # Track usage if available
147
+ if 'usage' in resp and npc and hasattr(npc, 'shared_context'):
148
+ usage = resp['usage']
149
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
150
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
151
+ npc.shared_context['turn_count'] += 1
152
+
153
+ except KeyboardInterrupt:
154
+ print("\nUse '/sq' to exit or continue.")
155
+ continue
156
+ except EOFError:
157
+ print("\nExiting spool mode.")
158
+ break
159
+
160
+ context['output'] = "Exited spool mode."
161
+ context['messages'] = messages
@@ -1,81 +1,186 @@
1
- jinx_name: "wander"
2
- description: "Enter wander mode (experimental)"
1
+ jinx_name: wander
2
+ description: Experimental wandering mode - creative exploration with varied temperatures and random events
3
3
  inputs:
4
- - problem: "" # The problem to wander about.
5
- - environment: "" # Optional environment for wander mode.
6
- - low_temp: 0.5 # Low temperature setting for LLM.
7
- - high_temp: 1.9 # High temperature setting for LLM.
8
- - interruption_likelihood: 1.0 # Likelihood of interruption.
9
- - sample_rate: 0.4 # Sample rate.
10
- - n_high_temp_streams: 5 # Number of high temperature streams.
11
- - include_events: False # Whether to include events.
12
- - num_events: 3 # Number of events to include.
4
+ - problem: null
5
+ - environment: null
6
+ - low_temp: 0.5
7
+ - high_temp: 1.9
8
+ - interruption_likelihood: 1.0
9
+ - sample_rate: 0.4
10
+ - n_streams: 5
11
+ - include_events: false
12
+ - num_events: 3
13
+ - model: null
14
+ - provider: null
15
+
13
16
  steps:
14
- - name: "enter_wander"
15
- engine: "python"
17
+ - name: wander_explore
18
+ engine: python
16
19
  code: |
17
- import traceback
18
- from npcsh.wander import enter_wander_mode
19
-
20
+ import os
21
+ import random
22
+ from termcolor import colored
23
+
24
+ from npcpy.llm_funcs import get_llm_response
25
+
26
+ npc = context.get('npc')
27
+ messages = context.get('messages', [])
28
+
20
29
  problem = context.get('problem')
21
30
  environment = context.get('environment')
22
- low_temp = float(context.get('low_temp', 0.5)) # Ensure float type
23
- high_temp = float(context.get('high_temp', 1.9)) # Ensure float type
24
- interruption_likelihood = float(context.get('interruption_likelihood', 1.0)) # Ensure float type
25
- sample_rate = float(context.get('sample_rate', 0.4)) # Ensure float type
26
- n_high_temp_streams = int(context.get('n_high_temp_streams', 5)) # Ensure int type
27
- include_events = context.get('include_events', False) # Boolean type
28
- num_events = int(context.get('num_events', 3)) # Ensure int type
29
-
30
- current_npc = context.get('npc')
31
- llm_model = context.get('model')
32
- llm_provider = context.get('provider')
33
- output_messages = context.get('messages', [])
34
-
35
- if not problem or not problem.strip():
36
- context['output'] = "Usage: /wander <problem> [key=value...]"
37
- context['messages'] = output_messages
31
+ low_temp = float(context.get('low_temp', 0.5))
32
+ high_temp = float(context.get('high_temp', 1.9))
33
+ interruption_likelihood = float(context.get('interruption_likelihood', 1.0))
34
+ sample_rate = float(context.get('sample_rate', 0.4))
35
+ n_streams = int(context.get('n_streams', 5))
36
+ include_events = context.get('include_events', False)
37
+ num_events = int(context.get('num_events', 3))
38
+
39
+ model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
40
+ provider = context.get('provider') or (npc.provider if npc else 'gemini')
41
+
42
+ if not problem:
43
+ context['output'] = """Usage: /wander <problem to explore>
44
+
45
+ Options:
46
+ --environment DESC Metaphorical environment for wandering
47
+ --low-temp F Low temperature (default: 0.5)
48
+ --high-temp F High temperature (default: 1.9)
49
+ --n-streams N Number of exploration streams (default: 5)
50
+ --include-events Add random events during wandering
51
+
52
+ Example: /wander How might we reimagine urban transportation?"""
53
+ context['messages'] = messages
38
54
  exit()
39
-
40
- # Fallback for model/provider if not explicitly set in Jinx inputs
41
- if not llm_model and current_npc and current_npc.model:
42
- llm_model = current_npc.model
43
- if not llm_provider and current_npc and current_npc.provider:
44
- llm_provider = current_npc.provider
45
-
46
- # Final fallbacks (these would ideally come from npcsh._state config)
47
- if not llm_model: llm_model = "gemini-1.5-pro" # Example default
48
- if not llm_provider: llm_provider = "gemini" # Example default
49
-
50
- try:
51
- mode_args = {
52
- 'problem': problem,
53
- 'npc': current_npc,
54
- 'model': llm_model,
55
- 'provider': llm_provider,
56
- 'environment': environment,
57
- 'low_temp': low_temp,
58
- 'high_temp': high_temp,
59
- 'interruption_likelihood': interruption_likelihood,
60
- 'sample_rate': sample_rate,
61
- 'n_high_temp_streams': n_high_temp_streams,
62
- 'include_events': include_events,
63
- 'num_events': num_events
64
- }
65
-
66
- result = enter_wander_mode(**mode_args)
67
-
68
- output_result = ""
69
- if isinstance(result, list) and result:
70
- output_result = result[-1].get("insight", "Wander mode session complete.")
55
+
56
+ print(f"""
57
+ ██╗ ██╗ █████╗ ███╗ ██╗██████╗ ███████╗██████╗
58
+ ██║ ██║██╔══██╗████╗ ██║██╔══██╗██╔════╝██╔══██╗
59
+ ██║ █╗ ██║███████║██╔██╗ ██║██║ ██║█████╗ ██████╔╝
60
+ ██║███╗██║██╔══██║██║╚██╗██║██║ ██║██╔══╝ ██╔══██╗
61
+ ╚███╔███╔╝██║ ██║██║ ╚████║██████╔╝███████╗██║ ██║
62
+ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚══════╝╚═╝ ╚═╝
63
+
64
+ Experimental Wandering Mode
65
+ Problem: {problem}
66
+ Temperature range: {low_temp} - {high_temp}
67
+ Streams: {n_streams}
68
+ """)
69
+
70
+ # Generate environment if not provided
71
+ if not environment:
72
+ env_prompt = f"""Create a rich, metaphorical environment for wandering through while thinking about:
73
+ "{problem}"
74
+
75
+ The environment should:
76
+ 1. Have distinct regions or areas
77
+ 2. Include various elements and features
78
+ 3. Be metaphorically related to the problem
79
+ 4. Be described in 3-5 sentences
80
+
81
+ Provide only the description, no framing."""
82
+
83
+ print(colored("Generating wandering environment...", "cyan"))
84
+ resp = get_llm_response(env_prompt, model=model, provider=provider, temperature=0.7, npc=npc)
85
+ environment = str(resp.get('response', 'A vast conceptual landscape stretches before you.'))
86
+ print(f"\n{environment}\n")
87
+
88
+ # Event types for random encounters
89
+ event_types = ["encounter", "discovery", "obstacle", "insight", "shift", "memory"]
90
+
91
+ all_insights = []
92
+ wandering_log = []
93
+
94
+ for stream_idx in range(n_streams):
95
+ # Alternate between low and high temperature
96
+ if stream_idx % 2 == 0:
97
+ temp = low_temp
98
+ mode = "focused"
71
99
  else:
72
- output_result = str(result) if result else "Wander mode session complete."
73
-
74
- output_messages.append({"role": "assistant", "content": output_result})
75
- context['output'] = output_result
76
- context['messages'] = output_messages
77
-
78
- except Exception as e:
79
- traceback.print_exc()
80
- context['output'] = f"Error during wander mode: {e}"
81
- context['messages'] = output_messages
100
+ temp = high_temp
101
+ mode = "creative"
102
+
103
+ print(colored(f"\n--- Stream {stream_idx + 1}/{n_streams} ({mode}, temp={temp}) ---", "cyan"))
104
+
105
+ # Generate random event if enabled
106
+ event_context = ""
107
+ if include_events and random.random() < sample_rate:
108
+ event_type = random.choice(event_types)
109
+ event_prompt = f"""In the environment: {environment}
110
+
111
+ While exploring the problem "{problem}", generate a {event_type} event.
112
+ The event should be metaphorical and relate to the problem.
113
+ Describe it in 2-3 sentences."""
114
+
115
+ event_resp = get_llm_response(event_prompt, model=model, provider=provider, temperature=0.9, npc=npc)
116
+ event = str(event_resp.get('response', ''))
117
+ event_context = f"\n\nEvent ({event_type}): {event}"
118
+ print(colored(f"[{event_type.upper()}] {event[:100]}...", "yellow"))
119
+
120
+ # Main wandering exploration
121
+ wander_prompt = f"""You are wandering through: {environment}
122
+
123
+ Problem being explored: "{problem}"
124
+ {event_context}
125
+
126
+ Previous insights: {all_insights[-3:] if all_insights else 'Starting fresh'}
127
+
128
+ In this {mode} exploration (temperature {temp}):
129
+ 1. Let your mind wander through the conceptual space
130
+ 2. Make unexpected connections
131
+ 3. Notice what emerges from the wandering
132
+ 4. Share any insights, questions, or realizations
133
+
134
+ Think freely and explore."""
135
+
136
+ resp = get_llm_response(wander_prompt, model=model, provider=provider, temperature=temp, npc=npc)
137
+ stream_output = str(resp.get('response', ''))
138
+ print(stream_output)
139
+
140
+ all_insights.append(stream_output)
141
+ wandering_log.append({
142
+ "stream": stream_idx + 1,
143
+ "mode": mode,
144
+ "temperature": temp,
145
+ "event": event_context if include_events else None,
146
+ "insight": stream_output
147
+ })
148
+
149
+ # Random interruption
150
+ if random.random() < interruption_likelihood * 0.2:
151
+ print(colored("\n[Pause for reflection...]", "magenta"))
152
+ reflect_prompt = f"Briefly reflect on what's emerged so far about: {problem}"
153
+ reflect_resp = get_llm_response(reflect_prompt, model=model, provider=provider, temperature=0.4, npc=npc)
154
+ print(colored(str(reflect_resp.get('response', ''))[:200], "magenta"))
155
+
156
+ # Synthesis
157
+ print(colored("\n--- Synthesizing Wanderings ---", "cyan"))
158
+
159
+ synthesis_prompt = f"""After wandering through "{environment}" exploring "{problem}":
160
+
161
+ All insights gathered:
162
+ {chr(10).join(all_insights)}
163
+
164
+ Synthesize what emerged from this wandering:
165
+ 1. Key themes that appeared
166
+ 2. Unexpected connections made
167
+ 3. New questions raised
168
+ 4. Potential directions to explore further"""
169
+
170
+ resp = get_llm_response(synthesis_prompt, model=model, provider=provider, temperature=0.5, npc=npc)
171
+ synthesis = str(resp.get('response', ''))
172
+
173
+ print("\n" + "="*50)
174
+ print(colored("WANDERING SYNTHESIS", "green", attrs=['bold']))
175
+ print("="*50)
176
+ print(synthesis)
177
+
178
+ context['output'] = synthesis
179
+ context['messages'] = messages
180
+ context['wander_result'] = {
181
+ 'problem': problem,
182
+ 'environment': environment,
183
+ 'log': wandering_log,
184
+ 'insights': all_insights,
185
+ 'synthesis': synthesis
186
+ }