npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. npcsh/_state.py +3508 -0
  2. npcsh/alicanto.py +65 -0
  3. npcsh/build.py +291 -0
  4. npcsh/completion.py +206 -0
  5. npcsh/config.py +163 -0
  6. npcsh/corca.py +50 -0
  7. npcsh/execution.py +185 -0
  8. npcsh/guac.py +46 -0
  9. npcsh/mcp_helpers.py +357 -0
  10. npcsh/mcp_server.py +299 -0
  11. npcsh/npc.py +323 -0
  12. npcsh/npc_team/alicanto.npc +2 -0
  13. npcsh/npc_team/alicanto.png +0 -0
  14. npcsh/npc_team/corca.npc +12 -0
  15. npcsh/npc_team/corca.png +0 -0
  16. npcsh/npc_team/corca_example.png +0 -0
  17. npcsh/npc_team/foreman.npc +7 -0
  18. npcsh/npc_team/frederic.npc +6 -0
  19. npcsh/npc_team/frederic4.png +0 -0
  20. npcsh/npc_team/guac.png +0 -0
  21. npcsh/npc_team/jinxs/code/python.jinx +11 -0
  22. npcsh/npc_team/jinxs/code/sh.jinx +34 -0
  23. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  24. npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
  25. npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
  26. npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
  27. npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
  28. npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
  29. npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
  30. npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
  31. npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
  32. npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
  33. npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
  34. npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
  35. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  36. npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
  37. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  38. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  39. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  40. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  41. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  42. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  43. npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
  44. npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
  45. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  46. npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
  47. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  48. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  49. npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
  50. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  51. npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
  52. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  53. npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
  54. npcsh/npc_team/kadiefa.npc +3 -0
  55. npcsh/npc_team/kadiefa.png +0 -0
  56. npcsh/npc_team/npcsh.ctx +18 -0
  57. npcsh/npc_team/npcsh_sibiji.png +0 -0
  58. npcsh/npc_team/plonk.npc +2 -0
  59. npcsh/npc_team/plonk.png +0 -0
  60. npcsh/npc_team/plonkjr.npc +2 -0
  61. npcsh/npc_team/plonkjr.png +0 -0
  62. npcsh/npc_team/sibiji.npc +3 -0
  63. npcsh/npc_team/sibiji.png +0 -0
  64. npcsh/npc_team/spool.png +0 -0
  65. npcsh/npc_team/yap.png +0 -0
  66. npcsh/npcsh.py +296 -112
  67. npcsh/parsing.py +118 -0
  68. npcsh/plonk.py +54 -0
  69. npcsh/pti.py +54 -0
  70. npcsh/routes.py +139 -0
  71. npcsh/spool.py +48 -0
  72. npcsh/ui.py +199 -0
  73. npcsh/wander.py +62 -0
  74. npcsh/yap.py +50 -0
  75. npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
  76. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  77. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
  78. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
  79. npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
  80. npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
  81. npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
  82. npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
  83. npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
  84. npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
  85. npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
  86. npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
  87. npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
  88. npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  89. npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
  90. npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
  91. npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
  92. npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
  93. npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
  94. npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
  95. npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
  96. npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  97. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  98. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
  99. npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
  100. npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  101. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  102. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  103. npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
  104. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
  105. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
  106. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
  107. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  108. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
  109. npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
  110. npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
  111. npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
  112. npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
  113. npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
  114. npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
  115. npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
  116. npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
  117. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
  118. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
  119. npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
  120. npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
  121. npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
  122. npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
  123. npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
  124. npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
  125. npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
  126. npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
  127. npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
  128. npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
  129. npcsh-1.1.13.dist-info/METADATA +522 -0
  130. npcsh-1.1.13.dist-info/RECORD +135 -0
  131. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
  132. npcsh-1.1.13.dist-info/entry_points.txt +9 -0
  133. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
  134. npcsh/command_history.py +0 -81
  135. npcsh/helpers.py +0 -36
  136. npcsh/llm_funcs.py +0 -295
  137. npcsh/main.py +0 -5
  138. npcsh/modes.py +0 -343
  139. npcsh/npc_compiler.py +0 -124
  140. npcsh-0.1.2.dist-info/METADATA +0 -99
  141. npcsh-0.1.2.dist-info/RECORD +0 -14
  142. npcsh-0.1.2.dist-info/entry_points.txt +0 -2
  143. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,214 @@
1
+ jinx_name: plonk
2
+ description: Vision-based GUI automation - use vision model to interact with screen elements
3
+ inputs:
4
+ - task: null
5
+ - vmodel: null
6
+ - vprovider: null
7
+ - max_iterations: 10
8
+ - debug: true
9
+
10
+ steps:
11
+ - name: plonk_execute
12
+ engine: python
13
+ code: |
14
+ import os
15
+ import time
16
+ import platform
17
+ from termcolor import colored
18
+
19
+ from npcpy.llm_funcs import get_llm_response
20
+ from npcpy.data.image import capture_screenshot
21
+ from npcpy.work.desktop import perform_action
22
+
23
+ npc = context.get('npc')
24
+ messages = context.get('messages', [])
25
+
26
+ task = context.get('task')
27
+ vision_model = context.get('vmodel') or (npc.model if npc else 'gpt-4o')
28
+ vision_provider = context.get('vprovider') or (npc.provider if npc else 'openai')
29
+ max_iterations = int(context.get('max_iterations', 10))
30
+ debug = context.get('debug', True)
31
+
32
+ if not task:
33
+ context['output'] = """Usage: /plonk <task description>
34
+
35
+ Options:
36
+ --vmodel MODEL Vision model to use (default: gpt-4o)
37
+ --vprovider PROV Vision provider (default: openai)
38
+ --max-iterations N Max steps (default: 10)
39
+
40
+ Example: /plonk Open Firefox and navigate to google.com"""
41
+ context['messages'] = messages
42
+ exit()
43
+
44
+ print(f"""
45
+ ██████╗ ██╗ ██████╗ ███╗ ██╗██╗ ██╗
46
+ ██╔══██╗██║ ██╔═══██╗████╗ ██║██║ ██╔╝
47
+ ██████╔╝██║ ██║ ██║██╔██╗ ██║█████╔╝
48
+ ██╔═══╝ ██║ ██║ ██║██║╚██╗██║██╔═██╗
49
+ ██║ ███████╗╚██████╔╝██║ ╚████║██║ ██╗
50
+ ╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝
51
+
52
+ Vision GUI Automation
53
+ Task: {task}
54
+ Model: {vision_model} | Max iterations: {max_iterations}
55
+ """)
56
+
57
+ # System-specific examples
58
+ system = platform.system()
59
+ if system == "Windows":
60
+ app_examples = "start firefox, notepad, calc"
61
+ elif system == "Darwin":
62
+ app_examples = "open -a Firefox, open -a TextEdit"
63
+ else:
64
+ app_examples = "firefox &, gedit &, gnome-calculator &"
65
+
66
+ # Action types
67
+ ACTION_SCHEMA = {
68
+ "type": "object",
69
+ "properties": {
70
+ "action": {
71
+ "type": "string",
72
+ "enum": ["click", "type", "key", "launch", "wait", "done", "fail"],
73
+ "description": "Action to perform"
74
+ },
75
+ "x": {"type": "number", "description": "X coordinate (0-100 percentage)"},
76
+ "y": {"type": "number", "description": "Y coordinate (0-100 percentage)"},
77
+ "text": {"type": "string", "description": "Text to type or key to press"},
78
+ "command": {"type": "string", "description": "Command to launch"},
79
+ "duration": {"type": "number", "description": "Wait duration in seconds"},
80
+ "reason": {"type": "string", "description": "Explanation of action"}
81
+ },
82
+ "required": ["action", "reason"]
83
+ }
84
+
85
+ click_history = []
86
+ summary = []
87
+
88
+ for iteration in range(max_iterations):
89
+ print(colored(f"\n--- Iteration {iteration + 1}/{max_iterations} ---", "cyan"))
90
+
91
+ # Capture screenshot
92
+ ss = capture_screenshot()
93
+ if not ss or 'file_path' not in ss:
94
+ print(colored("Failed to capture screenshot", "red"))
95
+ break
96
+
97
+ screenshot_path = ss['file_path']
98
+ if debug:
99
+ print(colored(f"Screenshot: {screenshot_path}", "gray"))
100
+
101
+ # Build context from history
102
+ history_context = ""
103
+ if click_history:
104
+ history_context = f"\nPrevious actions ({len(click_history)}):\n"
105
+ for i, click in enumerate(click_history[-5:], 1):
106
+ history_context += f" {i}. {click.get('action', 'unknown')} at ({click.get('x', '?')}, {click.get('y', '?')}) - {click.get('reason', '')}\n"
107
+
108
+ prompt = f"""You are a GUI automation assistant. Analyze this screenshot and determine the next action to complete the task.
109
+
110
+ TASK: {task}
111
+
112
+ {history_context}
113
+
114
+ Available actions:
115
+ - click: Click at x,y coordinates (0-100 percentage of screen)
116
+ - type: Type text
117
+ - key: Press key (enter, tab, escape, etc.)
118
+ - launch: Launch application ({app_examples})
119
+ - wait: Wait for duration seconds
120
+ - done: Task completed successfully
121
+ - fail: Task cannot be completed
122
+
123
+ Respond with JSON: {{"action": "...", "x": N, "y": N, "text": "...", "command": "...", "duration": N, "reason": "..."}}"""
124
+
125
+ try:
126
+ resp = get_llm_response(
127
+ prompt,
128
+ model=vision_model,
129
+ provider=vision_provider,
130
+ images=[screenshot_path],
131
+ format="json",
132
+ npc=npc
133
+ )
134
+
135
+ action_response = resp.get('response', {})
136
+ if isinstance(action_response, str):
137
+ import json
138
+ try:
139
+ action_response = json.loads(action_response)
140
+ except:
141
+ print(colored(f"Invalid JSON response: {action_response[:100]}", "red"))
142
+ continue
143
+
144
+ action = action_response.get('action', 'fail')
145
+ reason = action_response.get('reason', 'No reason provided')
146
+
147
+ print(colored(f"Action: {action} - {reason}", "yellow"))
148
+
149
+ if action == 'done':
150
+ print(colored("Task completed successfully!", "green"))
151
+ summary.append({"iteration": iteration + 1, "action": "done", "reason": reason})
152
+ break
153
+
154
+ if action == 'fail':
155
+ print(colored(f"Task failed: {reason}", "red"))
156
+ summary.append({"iteration": iteration + 1, "action": "fail", "reason": reason})
157
+ break
158
+
159
+ # Execute action
160
+ if action == 'click':
161
+ x, y = action_response.get('x', 50), action_response.get('y', 50)
162
+ perform_action('click', x=x, y=y)
163
+ click_history.append({"action": "click", "x": x, "y": y, "reason": reason})
164
+ print(colored(f"Clicked at ({x}, {y})", "green"))
165
+
166
+ elif action == 'type':
167
+ text = action_response.get('text', '')
168
+ perform_action('type', text=text)
169
+ click_history.append({"action": "type", "text": text[:20], "reason": reason})
170
+ print(colored(f"Typed: {text[:30]}...", "green"))
171
+
172
+ elif action == 'key':
173
+ key = action_response.get('text', 'enter')
174
+ perform_action('key', key=key)
175
+ click_history.append({"action": "key", "key": key, "reason": reason})
176
+ print(colored(f"Pressed key: {key}", "green"))
177
+
178
+ elif action == 'launch':
179
+ cmd = action_response.get('command', '')
180
+ perform_action('launch', command=cmd)
181
+ click_history.append({"action": "launch", "command": cmd, "reason": reason})
182
+ print(colored(f"Launched: {cmd}", "green"))
183
+ time.sleep(2) # Wait for app to open
184
+
185
+ elif action == 'wait':
186
+ duration = action_response.get('duration', 1)
187
+ time.sleep(duration)
188
+ click_history.append({"action": "wait", "duration": duration, "reason": reason})
189
+ print(colored(f"Waited {duration}s", "green"))
190
+
191
+ summary.append({
192
+ "iteration": iteration + 1,
193
+ "action": action,
194
+ "last_click_coords": f"({click_history[-1].get('x', 'N/A')}, {click_history[-1].get('y', 'N/A')})" if click_history else "N/A",
195
+ "reason": reason
196
+ })
197
+
198
+ time.sleep(0.5) # Brief pause between actions
199
+
200
+ except Exception as e:
201
+ print(colored(f"Error in iteration {iteration + 1}: {e}", "red"))
202
+ summary.append({"iteration": iteration + 1, "error": str(e)})
203
+
204
+ # Generate summary
205
+ print("\n" + "="*50)
206
+ print(colored("PLONK SESSION SUMMARY", "cyan", attrs=['bold']))
207
+ print("="*50)
208
+ for s in summary:
209
+ print(f" Step {s.get('iteration', '?')}: {s.get('action', 'unknown')} - {s.get('reason', s.get('error', ''))[:60]}")
210
+
211
+ context['output'] = f"Plonk completed with {len(summary)} actions"
212
+ context['messages'] = messages
213
+ context['plonk_summary'] = summary
214
+ context['click_history'] = click_history
@@ -0,0 +1,170 @@
1
+ jinx_name: pti
2
+ description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
3
+ npc: frederic
4
+ inputs:
5
+ - model: null
6
+ - provider: null
7
+ - files: null
8
+ - reasoning_model: null
9
+
10
+ steps:
11
+ - name: pti_repl
12
+ engine: python
13
+ code: |
14
+ import os
15
+ import sys
16
+ from termcolor import colored
17
+
18
+ from npcpy.llm_funcs import get_llm_response
19
+ from npcpy.npc_sysenv import get_system_message, render_markdown
20
+ from npcpy.data.load import load_file_contents
21
+ from npcpy.data.text import rag_search
22
+
23
+ npc = context.get('npc')
24
+ team = context.get('team')
25
+ messages = context.get('messages', [])
26
+ files = context.get('files')
27
+
28
+ # PTI uses reasoning model for deeper thinking
29
+ model = context.get('reasoning_model') or context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ print("""
33
+ ██████╗ ████████╗██╗
34
+ ██╔══██╗╚══██╔══╝██║
35
+ ██████╔╝ ██║ ██║
36
+ ██╔═══╝ ██║ ██║
37
+ ██║ ██║ ██║
38
+ ╚═╝ ╚═╝ ╚═╝
39
+
40
+ Pardon-The-Interruption
41
+ Human-in-the-loop reasoning mode
42
+ """)
43
+
44
+ npc_name = npc.name if npc else "pti"
45
+ print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
46
+ print(" - AI will use <think> tags for step-by-step reasoning")
47
+ print(" - Use <request_for_input> to pause and ask questions")
48
+ print(" - Ctrl+C interrupts stream for immediate feedback")
49
+
50
+ # Load files if provided
51
+ loaded_content = {}
52
+ if files:
53
+ if isinstance(files, str):
54
+ files = [f.strip() for f in files.split(',')]
55
+ for file_path in files:
56
+ file_path = os.path.expanduser(file_path)
57
+ if os.path.exists(file_path):
58
+ try:
59
+ chunks = load_file_contents(file_path)
60
+ loaded_content[file_path] = "\n".join(chunks)
61
+ print(colored(f"Loaded: {file_path}", "green"))
62
+ except Exception as e:
63
+ print(colored(f"Error loading {file_path}: {e}", "red"))
64
+
65
+ # System message for PTI mode
66
+ pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
67
+
68
+ IMPORTANT INSTRUCTIONS:
69
+ 1. Think step-by-step using <think>...</think> tags to show your reasoning
70
+ 2. When you need more information from the user, use <request_for_input>your question</request_for_input>
71
+ 3. Be thorough but concise in your reasoning
72
+ 4. The user can interrupt at any time to provide guidance
73
+
74
+ Example:
75
+ <think>
76
+ Let me break this down...
77
+ Step 1: First I need to understand X
78
+ Step 2: Then consider Y
79
+ </think>
80
+
81
+ <request_for_input>
82
+ I notice you mentioned Z. Could you clarify what you mean by that?
83
+ </request_for_input>"""
84
+
85
+ if not messages or messages[0].get("role") != "system":
86
+ messages.insert(0, {"role": "system", "content": pti_system})
87
+
88
+ # REPL loop
89
+ user_input = None
90
+ while True:
91
+ try:
92
+ if not user_input:
93
+ prompt_str = f"{npc_name}:pti> "
94
+ user_input = input(prompt_str).strip()
95
+
96
+ if not user_input:
97
+ user_input = None
98
+ continue
99
+
100
+ if user_input.lower() == "/pq":
101
+ print("Exiting PTI mode.")
102
+ break
103
+
104
+ # Build prompt with file context
105
+ prompt_for_llm = user_input
106
+ if loaded_content:
107
+ context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
108
+ prompt_for_llm += f"\n\nContext:\n{context_str}"
109
+
110
+ prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
111
+
112
+ messages.append({"role": "user", "content": user_input})
113
+
114
+ try:
115
+ resp = get_llm_response(
116
+ prompt_for_llm,
117
+ model=model,
118
+ provider=provider,
119
+ messages=messages[:-1], # Don't duplicate the user message
120
+ stream=True,
121
+ npc=npc
122
+ )
123
+
124
+ response_stream = resp.get('response')
125
+ full_response = ""
126
+ request_found = False
127
+
128
+ # Stream the response
129
+ for chunk in response_stream:
130
+ chunk_content = ""
131
+ if hasattr(chunk, 'choices') and chunk.choices:
132
+ delta = chunk.choices[0].delta
133
+ if hasattr(delta, 'content') and delta.content:
134
+ chunk_content = delta.content
135
+ elif isinstance(chunk, dict):
136
+ chunk_content = chunk.get("message", {}).get("content", "")
137
+
138
+ if chunk_content:
139
+ print(chunk_content, end='', flush=True)
140
+ full_response += chunk_content
141
+
142
+ # Check for request_for_input
143
+ if "</request_for_input>" in full_response:
144
+ request_found = True
145
+ break
146
+
147
+ print() # newline after stream
148
+
149
+ messages.append({"role": "assistant", "content": full_response})
150
+ user_input = None # Reset for next iteration
151
+
152
+ except KeyboardInterrupt:
153
+ print(colored("\n\n--- Interrupted ---", "yellow"))
154
+ interrupt_input = input("Your feedback: ").strip()
155
+ if interrupt_input:
156
+ user_input = interrupt_input
157
+ else:
158
+ user_input = None
159
+ continue
160
+
161
+ except KeyboardInterrupt:
162
+ print("\nUse '/pq' to exit or continue.")
163
+ user_input = None
164
+ continue
165
+ except EOFError:
166
+ print("\nExiting PTI mode.")
167
+ break
168
+
169
+ context['output'] = "Exited PTI mode."
170
+ context['messages'] = messages
@@ -0,0 +1,161 @@
1
+ jinx_name: spool
2
+ description: Interactive chat mode - simple conversational interface with an NPC
3
+ inputs:
4
+ - model: null
5
+ - provider: null
6
+ - attachments: null
7
+ - stream: true
8
+
9
+ steps:
10
+ - name: spool_repl
11
+ engine: python
12
+ code: |
13
+ import os
14
+ import sys
15
+ from termcolor import colored
16
+
17
+ from npcpy.llm_funcs import get_llm_response
18
+ from npcpy.npc_sysenv import get_system_message, render_markdown
19
+ from npcpy.data.load import load_file_contents
20
+ from npcpy.data.text import rag_search
21
+
22
+ npc = context.get('npc')
23
+ team = context.get('team')
24
+ messages = context.get('messages', [])
25
+ stream = context.get('stream', True)
26
+ attachments = context.get('attachments')
27
+
28
+ # Use NPC's model/provider or fallback
29
+ model = context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ # ASCII art
33
+ print("""
34
+ _____ ____ ____ ____ _
35
+ / ___/| _ \ / __ \ / __ \| |
36
+ \___ \| |_) | | | | | | | |
37
+ ___) | __/| | | | | | | |___
38
+ |____/|_| \____/ \____/|_____|
39
+ """)
40
+
41
+ npc_name = npc.name if npc else "chat"
42
+ print(f"Entering spool mode (NPC: {npc_name}). Type '/sq' to exit.")
43
+
44
+ # Load attachments if provided
45
+ loaded_chunks = {}
46
+ if attachments:
47
+ if isinstance(attachments, str):
48
+ attachments = [f.strip() for f in attachments.split(',')]
49
+ for file_path in attachments:
50
+ file_path = os.path.expanduser(file_path)
51
+ if os.path.exists(file_path):
52
+ try:
53
+ chunks = load_file_contents(file_path)
54
+ loaded_chunks[file_path] = chunks
55
+ print(colored(f"Loaded {len(chunks)} chunks from: {file_path}", "green"))
56
+ except Exception as e:
57
+ print(colored(f"Error loading {file_path}: {e}", "red"))
58
+
59
+ # Ensure system message
60
+ if not messages or messages[0].get("role") != "system":
61
+ sys_msg = get_system_message(npc) if npc else "You are a helpful assistant."
62
+ messages.insert(0, {"role": "system", "content": sys_msg})
63
+
64
+ # REPL loop
65
+ while True:
66
+ try:
67
+ prompt_str = f"{npc_name}> "
68
+ user_input = input(prompt_str).strip()
69
+
70
+ if not user_input:
71
+ continue
72
+
73
+ if user_input.lower() == "/sq":
74
+ print("Exiting spool mode.")
75
+ break
76
+
77
+ # Handle /ots for screenshots inline
78
+ if user_input.startswith("/ots"):
79
+ from npcpy.data.image import capture_screenshot
80
+ parts = user_input.split()
81
+ image_paths = []
82
+ if len(parts) > 1:
83
+ for p in parts[1:]:
84
+ fp = os.path.expanduser(p)
85
+ if os.path.exists(fp):
86
+ image_paths.append(fp)
87
+ else:
88
+ ss = capture_screenshot()
89
+ if ss and "file_path" in ss:
90
+ image_paths.append(ss["file_path"])
91
+ print(colored(f"Screenshot: {ss['filename']}", "green"))
92
+
93
+ if image_paths:
94
+ vision_prompt = input("Prompt for image(s): ").strip() or "Describe these images."
95
+ resp = get_llm_response(
96
+ vision_prompt,
97
+ model=npc.vision_model if hasattr(npc, 'vision_model') else model,
98
+ provider=npc.vision_provider if hasattr(npc, 'vision_provider') else provider,
99
+ messages=messages,
100
+ images=image_paths,
101
+ stream=stream,
102
+ npc=npc
103
+ )
104
+ messages = resp.get('messages', messages)
105
+ render_markdown(str(resp.get('response', '')))
106
+ continue
107
+
108
+ # Add RAG context if files loaded
109
+ current_prompt = user_input
110
+ if loaded_chunks:
111
+ context_content = ""
112
+ for filename, chunks in loaded_chunks.items():
113
+ full_text = "\n".join(chunks)
114
+ retrieved = rag_search(user_input, full_text, similarity_threshold=0.3)
115
+ if retrieved:
116
+ context_content += f"\n\nContext from {filename}:\n{retrieved}\n"
117
+ if context_content:
118
+ current_prompt += f"\n\n--- Relevant context ---{context_content}"
119
+
120
+ # Get response
121
+ resp = get_llm_response(
122
+ current_prompt,
123
+ model=model,
124
+ provider=provider,
125
+ messages=messages,
126
+ stream=stream,
127
+ npc=npc
128
+ )
129
+
130
+ messages = resp.get('messages', messages)
131
+ response_text = resp.get('response', '')
132
+
133
+ # Handle streaming vs non-streaming
134
+ if hasattr(response_text, '__iter__') and not isinstance(response_text, str):
135
+ full_response = ""
136
+ for chunk in response_text:
137
+ if hasattr(chunk, 'choices') and chunk.choices:
138
+ delta = chunk.choices[0].delta
139
+ if hasattr(delta, 'content') and delta.content:
140
+ print(delta.content, end='', flush=True)
141
+ full_response += delta.content
142
+ print()
143
+ else:
144
+ render_markdown(str(response_text))
145
+
146
+ # Track usage if available
147
+ if 'usage' in resp and npc and hasattr(npc, 'shared_context'):
148
+ usage = resp['usage']
149
+ npc.shared_context['session_input_tokens'] += usage.get('input_tokens', 0)
150
+ npc.shared_context['session_output_tokens'] += usage.get('output_tokens', 0)
151
+ npc.shared_context['turn_count'] += 1
152
+
153
+ except KeyboardInterrupt:
154
+ print("\nUse '/sq' to exit or continue.")
155
+ continue
156
+ except EOFError:
157
+ print("\nExiting spool mode.")
158
+ break
159
+
160
+ context['output'] = "Exited spool mode."
161
+ context['messages'] = messages