npcsh 1.1.12__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. npcsh/_state.py +700 -377
  2. npcsh/alicanto.py +54 -1153
  3. npcsh/completion.py +206 -0
  4. npcsh/config.py +163 -0
  5. npcsh/corca.py +35 -1462
  6. npcsh/execution.py +185 -0
  7. npcsh/guac.py +31 -1986
  8. npcsh/npc_team/jinxs/code/sh.jinx +11 -15
  9. npcsh/npc_team/jinxs/modes/alicanto.jinx +186 -80
  10. npcsh/npc_team/jinxs/modes/corca.jinx +243 -22
  11. npcsh/npc_team/jinxs/modes/guac.jinx +313 -42
  12. npcsh/npc_team/jinxs/modes/plonk.jinx +209 -48
  13. npcsh/npc_team/jinxs/modes/pti.jinx +167 -25
  14. npcsh/npc_team/jinxs/modes/spool.jinx +158 -37
  15. npcsh/npc_team/jinxs/modes/wander.jinx +179 -74
  16. npcsh/npc_team/jinxs/modes/yap.jinx +258 -21
  17. npcsh/npc_team/jinxs/utils/chat.jinx +39 -12
  18. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  19. npcsh/npc_team/jinxs/utils/search.jinx +3 -3
  20. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  21. npcsh/npcsh.py +76 -20
  22. npcsh/parsing.py +118 -0
  23. npcsh/plonk.py +41 -329
  24. npcsh/pti.py +41 -201
  25. npcsh/spool.py +34 -239
  26. npcsh/ui.py +199 -0
  27. npcsh/wander.py +54 -542
  28. npcsh/yap.py +38 -570
  29. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  30. npcsh-1.1.14.data/data/npcsh/npc_team/chat.jinx +44 -0
  31. npcsh-1.1.14.data/data/npcsh/npc_team/cmd.jinx +44 -0
  32. npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +249 -0
  33. npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +317 -0
  34. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +214 -0
  35. npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +170 -0
  36. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/search.jinx +3 -3
  37. npcsh-1.1.14.data/data/npcsh/npc_team/sh.jinx +34 -0
  38. npcsh-1.1.14.data/data/npcsh/npc_team/spool.jinx +161 -0
  39. npcsh-1.1.14.data/data/npcsh/npc_team/usage.jinx +33 -0
  40. npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +186 -0
  41. npcsh-1.1.14.data/data/npcsh/npc_team/yap.jinx +262 -0
  42. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/METADATA +1 -1
  43. npcsh-1.1.14.dist-info/RECORD +135 -0
  44. npcsh-1.1.12.data/data/npcsh/npc_team/alicanto.jinx +0 -88
  45. npcsh-1.1.12.data/data/npcsh/npc_team/chat.jinx +0 -17
  46. npcsh-1.1.12.data/data/npcsh/npc_team/corca.jinx +0 -28
  47. npcsh-1.1.12.data/data/npcsh/npc_team/guac.jinx +0 -46
  48. npcsh-1.1.12.data/data/npcsh/npc_team/plonk.jinx +0 -53
  49. npcsh-1.1.12.data/data/npcsh/npc_team/pti.jinx +0 -28
  50. npcsh-1.1.12.data/data/npcsh/npc_team/sh.jinx +0 -38
  51. npcsh-1.1.12.data/data/npcsh/npc_team/spool.jinx +0 -40
  52. npcsh-1.1.12.data/data/npcsh/npc_team/wander.jinx +0 -81
  53. npcsh-1.1.12.data/data/npcsh/npc_team/yap.jinx +0 -25
  54. npcsh-1.1.12.dist-info/RECORD +0 -126
  55. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/agent.jinx +0 -0
  56. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  57. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/alicanto.png +0 -0
  58. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/build.jinx +0 -0
  59. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compile.jinx +0 -0
  60. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/compress.jinx +0 -0
  61. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.npc +0 -0
  62. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca.png +0 -0
  63. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/corca_example.png +0 -0
  64. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  65. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/foreman.npc +0 -0
  66. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic.npc +0 -0
  67. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/frederic4.png +0 -0
  68. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/guac.png +0 -0
  69. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/help.jinx +0 -0
  70. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/init.jinx +0 -0
  71. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/jinxs.jinx +0 -0
  72. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  73. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  74. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/load_file.jinx +0 -0
  75. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
  76. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  77. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  78. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/ots.jinx +0 -0
  79. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.npc +0 -0
  80. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonk.png +0 -0
  81. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  82. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  83. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/python.jinx +0 -0
  84. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/roll.jinx +0 -0
  85. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sample.jinx +0 -0
  86. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/serve.jinx +0 -0
  87. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/set.jinx +0 -0
  88. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  89. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sibiji.png +0 -0
  90. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  91. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/spool.png +0 -0
  92. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/sql.jinx +0 -0
  93. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  94. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/vixynt.jinx +0 -0
  95. {npcsh-1.1.12.data → npcsh-1.1.14.data}/data/npcsh/npc_team/yap.png +0 -0
  96. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/WHEEL +0 -0
  97. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/entry_points.txt +0 -0
  98. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/licenses/LICENSE +0 -0
  99. {npcsh-1.1.12.dist-info → npcsh-1.1.14.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,170 @@
1
+ jinx_name: pti
2
+ description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
3
+ npc: frederic
4
+ inputs:
5
+ - model: null
6
+ - provider: null
7
+ - files: null
8
+ - reasoning_model: null
9
+
10
+ steps:
11
+ - name: pti_repl
12
+ engine: python
13
+ code: |
14
+ import os
15
+ import sys
16
+ from termcolor import colored
17
+
18
+ from npcpy.llm_funcs import get_llm_response
19
+ from npcpy.npc_sysenv import get_system_message, render_markdown
20
+ from npcpy.data.load import load_file_contents
21
+ from npcpy.data.text import rag_search
22
+
23
+ npc = context.get('npc')
24
+ team = context.get('team')
25
+ messages = context.get('messages', [])
26
+ files = context.get('files')
27
+
28
+ # PTI uses reasoning model for deeper thinking
29
+ model = context.get('reasoning_model') or context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ print("""
33
+ ██████╗ ████████╗██╗
34
+ ██╔══██╗╚══██╔══╝██║
35
+ ██████╔╝ ██║ ██║
36
+ ██╔═══╝ ██║ ██║
37
+ ██║ ██║ ██║
38
+ ╚═╝ ╚═╝ ╚═╝
39
+
40
+ Pardon-The-Interruption
41
+ Human-in-the-loop reasoning mode
42
+ """)
43
+
44
+ npc_name = npc.name if npc else "pti"
45
+ print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
46
+ print(" - AI will use <think> tags for step-by-step reasoning")
47
+ print(" - Use <request_for_input> to pause and ask questions")
48
+ print(" - Ctrl+C interrupts stream for immediate feedback")
49
+
50
+ # Load files if provided
51
+ loaded_content = {}
52
+ if files:
53
+ if isinstance(files, str):
54
+ files = [f.strip() for f in files.split(',')]
55
+ for file_path in files:
56
+ file_path = os.path.expanduser(file_path)
57
+ if os.path.exists(file_path):
58
+ try:
59
+ chunks = load_file_contents(file_path)
60
+ loaded_content[file_path] = "\n".join(chunks)
61
+ print(colored(f"Loaded: {file_path}", "green"))
62
+ except Exception as e:
63
+ print(colored(f"Error loading {file_path}: {e}", "red"))
64
+
65
+ # System message for PTI mode
66
+ pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
67
+
68
+ IMPORTANT INSTRUCTIONS:
69
+ 1. Think step-by-step using <think>...</think> tags to show your reasoning
70
+ 2. When you need more information from the user, use <request_for_input>your question</request_for_input>
71
+ 3. Be thorough but concise in your reasoning
72
+ 4. The user can interrupt at any time to provide guidance
73
+
74
+ Example:
75
+ <think>
76
+ Let me break this down...
77
+ Step 1: First I need to understand X
78
+ Step 2: Then consider Y
79
+ </think>
80
+
81
+ <request_for_input>
82
+ I notice you mentioned Z. Could you clarify what you mean by that?
83
+ </request_for_input>"""
84
+
85
+ if not messages or messages[0].get("role") != "system":
86
+ messages.insert(0, {"role": "system", "content": pti_system})
87
+
88
+ # REPL loop
89
+ user_input = None
90
+ while True:
91
+ try:
92
+ if not user_input:
93
+ prompt_str = f"{npc_name}:pti> "
94
+ user_input = input(prompt_str).strip()
95
+
96
+ if not user_input:
97
+ user_input = None
98
+ continue
99
+
100
+ if user_input.lower() == "/pq":
101
+ print("Exiting PTI mode.")
102
+ break
103
+
104
+ # Build prompt with file context
105
+ prompt_for_llm = user_input
106
+ if loaded_content:
107
+ context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
108
+ prompt_for_llm += f"\n\nContext:\n{context_str}"
109
+
110
+ prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
111
+
112
+ messages.append({"role": "user", "content": user_input})
113
+
114
+ try:
115
+ resp = get_llm_response(
116
+ prompt_for_llm,
117
+ model=model,
118
+ provider=provider,
119
+ messages=messages[:-1], # Don't duplicate the user message
120
+ stream=True,
121
+ npc=npc
122
+ )
123
+
124
+ response_stream = resp.get('response')
125
+ full_response = ""
126
+ request_found = False
127
+
128
+ # Stream the response
129
+ for chunk in response_stream:
130
+ chunk_content = ""
131
+ if hasattr(chunk, 'choices') and chunk.choices:
132
+ delta = chunk.choices[0].delta
133
+ if hasattr(delta, 'content') and delta.content:
134
+ chunk_content = delta.content
135
+ elif isinstance(chunk, dict):
136
+ chunk_content = chunk.get("message", {}).get("content", "")
137
+
138
+ if chunk_content:
139
+ print(chunk_content, end='', flush=True)
140
+ full_response += chunk_content
141
+
142
+ # Check for request_for_input
143
+ if "</request_for_input>" in full_response:
144
+ request_found = True
145
+ break
146
+
147
+ print() # newline after stream
148
+
149
+ messages.append({"role": "assistant", "content": full_response})
150
+ user_input = None # Reset for next iteration
151
+
152
+ except KeyboardInterrupt:
153
+ print(colored("\n\n--- Interrupted ---", "yellow"))
154
+ interrupt_input = input("Your feedback: ").strip()
155
+ if interrupt_input:
156
+ user_input = interrupt_input
157
+ else:
158
+ user_input = None
159
+ continue
160
+
161
+ except KeyboardInterrupt:
162
+ print("\nUse '/pq' to exit or continue.")
163
+ user_input = None
164
+ continue
165
+ except EOFError:
166
+ print("\nExiting PTI mode.")
167
+ break
168
+
169
+ context['output'] = "Exited PTI mode."
170
+ context['messages'] = messages
@@ -41,11 +41,11 @@ steps:
41
41
 
42
42
  current_npc = current_state.npc
43
43
  current_team = current_state.team
44
-
44
+
45
45
  npc_name = getattr(current_npc, 'name', '__none__') if current_npc else '__none__'
46
46
  team_name = getattr(current_team, 'name', '__none__') if current_team else '__none__'
47
47
  current_path = os.getcwd()
48
- db_path = os.path.expanduser(context.get("history_db_path"))
48
+ db_path = os.path.expanduser(context.get("history_db_path") or "~/.npcsh/npcsh_history.db")
49
49
 
50
50
  try:
51
51
  cmd_history = CommandHistory(db_path)
@@ -101,7 +101,7 @@ steps:
101
101
 
102
102
  result = execute_rag_command(
103
103
  command=query,
104
- vector_db_path=os.path.expanduser(context.get('vector_db_path')),
104
+ vector_db_path=os.path.expanduser(context.get('vector_db_path') or "~/.npcsh/npcsh_chroma.db"),
105
105
  embedding_model=emodel,
106
106
  embedding_provider=eprovider,
107
107
  file_contents=file_contents or None
@@ -0,0 +1,34 @@
1
+ jinx_name: sh
2
+ description: Execute bash queries. Should be used to grep for file contents, list directories, explore information to answer user questions more practically.
3
+ inputs:
4
+ - bash_command
5
+ steps:
6
+ - name: execute_bash
7
+ engine: python
8
+ code: |
9
+ import subprocess
10
+ import os
11
+
12
+ cmd = '{{ bash_command }}'
13
+ output = ""
14
+
15
+ process = subprocess.Popen(
16
+ cmd,
17
+ shell=True,
18
+ stdout=subprocess.PIPE,
19
+ stderr=subprocess.PIPE
20
+ )
21
+ stdout, stderr = process.communicate()
22
+
23
+ # Only show debug output if NPCSH_DEBUG is set
24
+ if os.environ.get("NPCSH_DEBUG") == "1":
25
+ import sys
26
+ print(f"[sh] cmd: {cmd}", file=sys.stderr)
27
+ print(f"[sh] stdout: {stdout.decode('utf-8', errors='ignore')[:200]}", file=sys.stderr)
28
+
29
+ if stderr:
30
+ output = f"Error: {stderr.decode('utf-8')}"
31
+ else:
32
+ output = stdout.decode('utf-8')
33
+
34
+
@@ -0,0 +1,161 @@
1
+ jinx_name: spool
2
+ description: Interactive chat mode - simple conversational interface with an NPC
3
+ inputs:
4
+ - model: null
5
+ - provider: null
6
+ - attachments: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: spool_repl
11
+ engine: python
12
+ code: |
13
+ import os
14
+ import sys
15
+ from termcolor import colored
16
+
17
+ from npcpy.llm_funcs import get_llm_response
18
+ from npcpy.npc_sysenv import get_system_message, render_markdown
19
+ from npcpy.data.load import load_file_contents
20
+ from npcpy.data.text import rag_search
21
+
22
+ npc = context.get('npc')
23
+ team = context.get('team')
24
+ messages = context.get('messages', [])
25
+ stream = context.get('stream', True)
26
+ attachments = context.get('attachments')
27
+
28
+ # Use NPC's model/provider or fallback
29
+ model = context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ # ASCII art
33
+ print("""
34
+ _____ ____ ____ ____ _
35
+ / ___/| _ \ / __ \ / __ \| |
36
+ \___ \| |_) | | | | | | | |
37
+ ___) | __/| | | | | | | |___
38
+ |____/|_| \____/ \____/|_____|
39
+ """)
40
+
41
+ npc_name = npc.name if npc else "chat"
42
+ print(f"Entering spool mode (NPC: {npc_name}). Type '/sq' to exit.")
43
+
44
+ # Load attachments if provided
45
+ loaded_chunks = {}
46
+ if attachments:
47
+ if isinstance(attachments, str):
48
+ attachments = [f.strip() for f in attachments.split(',')]
49
+ for file_path in attachments:
50
+ file_path = os.path.expanduser(file_path)
51
+ if os.path.exists(file_path):
52
+ try:
53
+ chunks = load_file_contents(file_path)
54
+ loaded_chunks[file_path] = chunks
55
+ print(colored(f"Loaded {len(chunks)} chunks from: {file_path}", "green"))
56
+ except Exception as e:
57
+ print(colored(f"Error loading {file_path}: {e}", "red"))
58
+
59
+ # Ensure system message
60
+ if not messages or messages[0].get("role") != "system":
61
+ sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
62
+ messages.insert(0, {"role": "system", "content": sys_msg})
63
+
64
+ # REPL loop
65
+ while True:
66
+ try:
67
+ prompt_str = f"{npc_name}> "
68
+ user_input = input(prompt_str).strip()
69
+
70
+ if not user_input:
71
+ continue
72
+
73
+ if user_input.lower() == "/sq":
74
+ print("Exiting spool mode.")
75
+ break
76
+
77
+ # Handle /ots for screenshots inline
78
+ if user_input.startswith("/ots"):
79
+ from npcpy.data.image import capture_screenshot
80
+ parts = user_input.split()
81
+ image_paths = []
82
+ if len(parts) > 1:
83
+ for p in parts[1:]:
84
+ fp = os.path.expanduser(p)
85
+ if os.path.exists(fp):
86
+ image_paths.append(fp)
87
+ else:
88
+ ss = capture_screenshot()
89
+ if ss and "file_path" in ss:
90
+ image_paths.append(ss["file_path"])
91
+ print(colored(f"Screenshot: {ss['filename']}", "green"))
92
+
93
+ if image_paths:
94
+ vision_prompt = input("Prompt for image(s): ").strip() or "Describe these images."
95
+ resp = get_llm_response(
96
+ vision_prompt,
97
+ model=npc.vision_model if hasattr(npc, 'vision_model') else model,
98
+ provider=npc.vision_provider if hasattr(npc, 'vision_provider') else provider,
99
+ messages=messages,
100
+ images=image_paths,
101
+ stream=stream,
102
+ npc=npc
103
+ )
104
+ messages = resp.get('messages', messages)
105
+ render_markdown(str(resp.get('response', '')))
106
+ continue
107
+
108
+ # Add RAG context if files loaded
109
+ current_prompt = user_input
110
+ if loaded_chunks:
111
+ context_content = ""
112
+ for filename, chunks in loaded_chunks.items():
113
+ full_text = "\n".join(chunks)
114
+ retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
115
+ if retrieved:
116
+ context_content += f"\n\nContext from {filename}:\n{retrieved}\n"
117
+ if context_content:
118
+ current_prompt += f"\n\n--- Relevant context ---{context_content}"
119
+
120
+ # Get response
121
+ resp = get_llm_response(
122
+ current_prompt,
123
+ model=model,
124
+ provider=provider,
125
+ messages=messages,
126
+ stream=stream,
127
+ npc=npc
128
+ )
129
+
130
+ messages = resp.get('messages', messages)
131
+ response_text = resp.get('response', '')
132
+
133
+ # Handle streaming vs non-streaming
134
+ if hasattr(response_text, '__iter__') and not isinstance(response_text, str):
135
+ full_response = ""
136
+ for chunk in response_text:
137
+ if hasattr(chunk, 'choices') and chunk.choices:
138
+ delta = chunk.choices[0].delta
139
+ if hasattr(delta, 'content') and delta.content:
140
+ print(delta.content, end='', flush=True)
141
+ full_response += delta.content
142
+ print()
143
+ else:
144
+ render_markdown(str(response_text))
145
+
146
+ # Track usage if available
147
+ if 'usage' in resp and npc and hasattr(npc, 'shared_context'):
148
+ usage = resp['usage']
149
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
150
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
151
+ npc.shared_context['turn_count'] += 1
152
+
153
+ except KeyboardInterrupt:
154
+ print("\nUse '/sq' to exit or continue.")
155
+ continue
156
+ except EOFError:
157
+ print("\nExiting spool mode.")
158
+ break
159
+
160
+ context['output'] = "Exited spool mode."
161
+ context['messages'] = messages
@@ -0,0 +1,33 @@
1
+ jinx_name: usage
2
+ description: Display current session token usage and cost
3
+ inputs: []
4
+ steps:
5
+ - name: show_usage
6
+ engine: python
7
+ code: |
8
+ state = context.get('state')
9
+ if not state:
10
+ output = "No state available"
11
+ else:
12
+ inp = getattr(state, 'session_input_tokens', 0)
13
+ out = getattr(state, 'session_output_tokens', 0)
14
+ cost = getattr(state, 'session_cost_usd', 0.0)
15
+ turns = getattr(state, 'turn_count', 0)
16
+ total = inp + out
17
+
18
+ def fmt(n):
19
+ return f"{n/1000:.1f}k" if n >= 1000 else str(n)
20
+
21
+ def fmt_cost(c):
22
+ if c == 0:
23
+ return "free (local)"
24
+ elif c < 0.01:
25
+ return f"${c:.4f}"
26
+ else:
27
+ return f"${c:.2f}"
28
+
29
+ output = f"Session Usage\n"
30
+ output += f"Tokens: {fmt(inp)} in / {fmt(out)} out ({fmt(total)} total)\n"
31
+ output += f"Cost: {fmt_cost(cost)}\n"
32
+ output += f"Turns: {turns}"
33
+ context['output'] = output
@@ -0,0 +1,186 @@
1
+ jinx_name: wander
2
+ description: Experimental wandering mode - creative exploration with varied temperatures and random events
3
+ inputs:
4
+ - problem: null
5
+ - environment: null
6
+ - low_temp: 0.5
7
+ - high_temp: 1.9
8
+ - interruption_likelihood: 1.0
9
+ - sample_rate: 0.4
10
+ - n_streams: 5
11
+ - include_events: false
12
+ - num_events: 3
13
+ - model: null
14
+ - provider: null
15
+
16
+ steps:
17
+ - name: wander_explore
18
+ engine: python
19
+ code: |
20
+ import os
21
+ import random
22
+ from termcolor import colored
23
+
24
+ from npcpy.llm_funcs import get_llm_response
25
+
26
+ npc = context.get('npc')
27
+ messages = context.get('messages', [])
28
+
29
+ problem = context.get('problem')
30
+ environment = context.get('environment')
31
+ low_temp = float(context.get('low_temp', 0.5))
32
+ high_temp = float(context.get('high_temp', 1.9))
33
+ interruption_likelihood = float(context.get('interruption_likelihood', 1.0))
34
+ sample_rate = float(context.get('sample_rate', 0.4))
35
+ n_streams = int(context.get('n_streams', 5))
36
+ include_events = context.get('include_events', False)
37
+ num_events = int(context.get('num_events', 3))
38
+
39
+ model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
40
+ provider = context.get('provider') or (npc.provider if npc else 'gemini')
41
+
42
+ if not problem:
43
+ context['output'] = """Usage: /wander <problem to explore>
44
+
45
+ Options:
46
+ --environment DESC Metaphorical environment for wandering
47
+ --low-temp F Low temperature (default: 0.5)
48
+ --high-temp F High temperature (default: 1.9)
49
+ --n-streams N Number of exploration streams (default: 5)
50
+ --include-events Add random events during wandering
51
+
52
+ Example: /wander How might we reimagine urban transportation?"""
53
+ context['messages'] = messages
54
+ exit()
55
+
56
+ print(f"""
57
+ ██╗ ██╗ █████╗ ███╗ ██╗██████╗ ███████╗██████╗
58
+ ██║ ██║██╔══██╗████╗ ██║██╔══██╗██╔════╝██╔══██╗
59
+ ██║ █╗ ██║███████║██╔██╗ ██║██║ ██║█████╗ ██████╔╝
60
+ ██║███╗██║██╔══██║██║╚██╗██║██║ ██║██╔══╝ ██╔══██╗
61
+ ╚███╔███╔╝██║ ██║██║ ╚████║██████╔╝███████╗██║ ██║
62
+ ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚══════╝╚═╝ ╚═╝
63
+
64
+ Experimental Wandering Mode
65
+ Problem: {problem}
66
+ Temperature range: {low_temp} - {high_temp}
67
+ Streams: {n_streams}
68
+ """)
69
+
70
+ # Generate environment if not provided
71
+ if not environment:
72
+ env_prompt = f"""Create a rich, metaphorical environment for wandering through while thinking about:
73
+ "{problem}"
74
+
75
+ The environment should:
76
+ 1. Have distinct regions or areas
77
+ 2. Include various elements and features
78
+ 3. Be metaphorically related to the problem
79
+ 4. Be described in 3-5 sentences
80
+
81
+ Provide only the description, no framing."""
82
+
83
+ print(colored("Generating wandering environment...", "cyan"))
84
+ resp = get_llm_response(env_prompt, model=model, provider=provider, temperature=0.7, npc=npc)
85
+ environment = str(resp.get('response', 'A vast conceptual landscape stretches before you.'))
86
+ print(f"\n{environment}\n")
87
+
88
+ # Event types for random encounters
89
+ event_types = ["encounter", "discovery", "obstacle", "insight", "shift", "memory"]
90
+
91
+ all_insights = []
92
+ wandering_log = []
93
+
94
+ for stream_idx in range(n_streams):
95
+ # Alternate between low and high temperature
96
+ if stream_idx % 2 == 0:
97
+ temp = low_temp
98
+ mode = "focused"
99
+ else:
100
+ temp = high_temp
101
+ mode = "creative"
102
+
103
+ print(colored(f"\n--- Stream {stream_idx + 1}/{n_streams} ({mode}, temp={temp}) ---", "cyan"))
104
+
105
+ # Generate random event if enabled
106
+ event_context = ""
107
+ if include_events and random.random() < sample_rate:
108
+ event_type = random.choice(event_types)
109
+ event_prompt = f"""In the environment: {environment}
110
+
111
+ While exploring the problem "{problem}", generate a {event_type} event.
112
+ The event should be metaphorical and relate to the problem.
113
+ Describe it in 2-3 sentences."""
114
+
115
+ event_resp = get_llm_response(event_prompt, model=model, provider=provider, temperature=0.9, npc=npc)
116
+ event = str(event_resp.get('response', ''))
117
+ event_context = f"\n\nEvent ({event_type}): {event}"
118
+ print(colored(f"[{event_type.upper()}] {event[:100]}...", "yellow"))
119
+
120
+ # Main wandering exploration
121
+ wander_prompt = f"""You are wandering through: {environment}
122
+
123
+ Problem being explored: "{problem}"
124
+ {event_context}
125
+
126
+ Previous insights: {all_insights[-3:] if all_insights else 'Starting fresh'}
127
+
128
+ In this {mode} exploration (temperature {temp}):
129
+ 1. Let your mind wander through the conceptual space
130
+ 2. Make unexpected connections
131
+ 3. Notice what emerges from the wandering
132
+ 4. Share any insights, questions, or realizations
133
+
134
+ Think freely and explore."""
135
+
136
+ resp = get_llm_response(wander_prompt, model=model, provider=provider, temperature=temp, npc=npc)
137
+ stream_output = str(resp.get('response', ''))
138
+ print(stream_output)
139
+
140
+ all_insights.append(stream_output)
141
+ wandering_log.append({
142
+ "stream": stream_idx + 1,
143
+ "mode": mode,
144
+ "temperature": temp,
145
+ "event": event_context if include_events else None,
146
+ "insight": stream_output
147
+ })
148
+
149
+ # Random interruption
150
+ if random.random() < interruption_likelihood * 0.2:
151
+ print(colored("\n[Pause for reflection...]", "magenta"))
152
+ reflect_prompt = f"Briefly reflect on what's emerged so far about: {problem}"
153
+ reflect_resp = get_llm_response(reflect_prompt, model=model, provider=provider, temperature=0.4, npc=npc)
154
+ print(colored(str(reflect_resp.get('response', ''))[:200], "magenta"))
155
+
156
+ # Synthesis
157
+ print(colored("\n--- Synthesizing Wanderings ---", "cyan"))
158
+
159
+ synthesis_prompt = f"""After wandering through "{environment}" exploring "{problem}":
160
+
161
+ All insights gathered:
162
+ {chr(10).join(all_insights)}
163
+
164
+ Synthesize what emerged from this wandering:
165
+ 1. Key themes that appeared
166
+ 2. Unexpected connections made
167
+ 3. New questions raised
168
+ 4. Potential directions to explore further"""
169
+
170
+ resp = get_llm_response(synthesis_prompt, model=model, provider=provider, temperature=0.5, npc=npc)
171
+ synthesis = str(resp.get('response', ''))
172
+
173
+ print("\n" + "="*50)
174
+ print(colored("WANDERING SYNTHESIS", "green", attrs=['bold']))
175
+ print("="*50)
176
+ print(synthesis)
177
+
178
+ context['output'] = synthesis
179
+ context['messages'] = messages
180
+ context['wander_result'] = {
181
+ 'problem': problem,
182
+ 'environment': environment,
183
+ 'log': wandering_log,
184
+ 'insights': all_insights,
185
+ 'synthesis': synthesis
186
+ }