npcsh 0.1.2__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. npcsh/_state.py +3508 -0
  2. npcsh/alicanto.py +65 -0
  3. npcsh/build.py +291 -0
  4. npcsh/completion.py +206 -0
  5. npcsh/config.py +163 -0
  6. npcsh/corca.py +50 -0
  7. npcsh/execution.py +185 -0
  8. npcsh/guac.py +46 -0
  9. npcsh/mcp_helpers.py +357 -0
  10. npcsh/mcp_server.py +299 -0
  11. npcsh/npc.py +323 -0
  12. npcsh/npc_team/alicanto.npc +2 -0
  13. npcsh/npc_team/alicanto.png +0 -0
  14. npcsh/npc_team/corca.npc +12 -0
  15. npcsh/npc_team/corca.png +0 -0
  16. npcsh/npc_team/corca_example.png +0 -0
  17. npcsh/npc_team/foreman.npc +7 -0
  18. npcsh/npc_team/frederic.npc +6 -0
  19. npcsh/npc_team/frederic4.png +0 -0
  20. npcsh/npc_team/guac.png +0 -0
  21. npcsh/npc_team/jinxs/code/python.jinx +11 -0
  22. npcsh/npc_team/jinxs/code/sh.jinx +34 -0
  23. npcsh/npc_team/jinxs/code/sql.jinx +16 -0
  24. npcsh/npc_team/jinxs/modes/alicanto.jinx +194 -0
  25. npcsh/npc_team/jinxs/modes/corca.jinx +249 -0
  26. npcsh/npc_team/jinxs/modes/guac.jinx +317 -0
  27. npcsh/npc_team/jinxs/modes/plonk.jinx +214 -0
  28. npcsh/npc_team/jinxs/modes/pti.jinx +170 -0
  29. npcsh/npc_team/jinxs/modes/spool.jinx +161 -0
  30. npcsh/npc_team/jinxs/modes/wander.jinx +186 -0
  31. npcsh/npc_team/jinxs/modes/yap.jinx +262 -0
  32. npcsh/npc_team/jinxs/npc_studio/npc-studio.jinx +77 -0
  33. npcsh/npc_team/jinxs/utils/agent.jinx +17 -0
  34. npcsh/npc_team/jinxs/utils/chat.jinx +44 -0
  35. npcsh/npc_team/jinxs/utils/cmd.jinx +44 -0
  36. npcsh/npc_team/jinxs/utils/compress.jinx +140 -0
  37. npcsh/npc_team/jinxs/utils/core/build.jinx +65 -0
  38. npcsh/npc_team/jinxs/utils/core/compile.jinx +50 -0
  39. npcsh/npc_team/jinxs/utils/core/help.jinx +52 -0
  40. npcsh/npc_team/jinxs/utils/core/init.jinx +41 -0
  41. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +32 -0
  42. npcsh/npc_team/jinxs/utils/core/set.jinx +40 -0
  43. npcsh/npc_team/jinxs/utils/edit_file.jinx +94 -0
  44. npcsh/npc_team/jinxs/utils/load_file.jinx +35 -0
  45. npcsh/npc_team/jinxs/utils/ots.jinx +61 -0
  46. npcsh/npc_team/jinxs/utils/roll.jinx +68 -0
  47. npcsh/npc_team/jinxs/utils/sample.jinx +56 -0
  48. npcsh/npc_team/jinxs/utils/search.jinx +130 -0
  49. npcsh/npc_team/jinxs/utils/serve.jinx +26 -0
  50. npcsh/npc_team/jinxs/utils/sleep.jinx +116 -0
  51. npcsh/npc_team/jinxs/utils/trigger.jinx +61 -0
  52. npcsh/npc_team/jinxs/utils/usage.jinx +33 -0
  53. npcsh/npc_team/jinxs/utils/vixynt.jinx +144 -0
  54. npcsh/npc_team/kadiefa.npc +3 -0
  55. npcsh/npc_team/kadiefa.png +0 -0
  56. npcsh/npc_team/npcsh.ctx +18 -0
  57. npcsh/npc_team/npcsh_sibiji.png +0 -0
  58. npcsh/npc_team/plonk.npc +2 -0
  59. npcsh/npc_team/plonk.png +0 -0
  60. npcsh/npc_team/plonkjr.npc +2 -0
  61. npcsh/npc_team/plonkjr.png +0 -0
  62. npcsh/npc_team/sibiji.npc +3 -0
  63. npcsh/npc_team/sibiji.png +0 -0
  64. npcsh/npc_team/spool.png +0 -0
  65. npcsh/npc_team/yap.png +0 -0
  66. npcsh/npcsh.py +296 -112
  67. npcsh/parsing.py +118 -0
  68. npcsh/plonk.py +54 -0
  69. npcsh/pti.py +54 -0
  70. npcsh/routes.py +139 -0
  71. npcsh/spool.py +48 -0
  72. npcsh/ui.py +199 -0
  73. npcsh/wander.py +62 -0
  74. npcsh/yap.py +50 -0
  75. npcsh-1.1.13.data/data/npcsh/npc_team/agent.jinx +17 -0
  76. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.jinx +194 -0
  77. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.npc +2 -0
  78. npcsh-1.1.13.data/data/npcsh/npc_team/alicanto.png +0 -0
  79. npcsh-1.1.13.data/data/npcsh/npc_team/build.jinx +65 -0
  80. npcsh-1.1.13.data/data/npcsh/npc_team/chat.jinx +44 -0
  81. npcsh-1.1.13.data/data/npcsh/npc_team/cmd.jinx +44 -0
  82. npcsh-1.1.13.data/data/npcsh/npc_team/compile.jinx +50 -0
  83. npcsh-1.1.13.data/data/npcsh/npc_team/compress.jinx +140 -0
  84. npcsh-1.1.13.data/data/npcsh/npc_team/corca.jinx +249 -0
  85. npcsh-1.1.13.data/data/npcsh/npc_team/corca.npc +12 -0
  86. npcsh-1.1.13.data/data/npcsh/npc_team/corca.png +0 -0
  87. npcsh-1.1.13.data/data/npcsh/npc_team/corca_example.png +0 -0
  88. npcsh-1.1.13.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  89. npcsh-1.1.13.data/data/npcsh/npc_team/foreman.npc +7 -0
  90. npcsh-1.1.13.data/data/npcsh/npc_team/frederic.npc +6 -0
  91. npcsh-1.1.13.data/data/npcsh/npc_team/frederic4.png +0 -0
  92. npcsh-1.1.13.data/data/npcsh/npc_team/guac.jinx +317 -0
  93. npcsh-1.1.13.data/data/npcsh/npc_team/guac.png +0 -0
  94. npcsh-1.1.13.data/data/npcsh/npc_team/help.jinx +52 -0
  95. npcsh-1.1.13.data/data/npcsh/npc_team/init.jinx +41 -0
  96. npcsh-1.1.13.data/data/npcsh/npc_team/jinxs.jinx +32 -0
  97. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  98. npcsh-1.1.13.data/data/npcsh/npc_team/kadiefa.png +0 -0
  99. npcsh-1.1.13.data/data/npcsh/npc_team/load_file.jinx +35 -0
  100. npcsh-1.1.13.data/data/npcsh/npc_team/npc-studio.jinx +77 -0
  101. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  102. npcsh-1.1.13.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  103. npcsh-1.1.13.data/data/npcsh/npc_team/ots.jinx +61 -0
  104. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.jinx +214 -0
  105. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.npc +2 -0
  106. npcsh-1.1.13.data/data/npcsh/npc_team/plonk.png +0 -0
  107. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  108. npcsh-1.1.13.data/data/npcsh/npc_team/plonkjr.png +0 -0
  109. npcsh-1.1.13.data/data/npcsh/npc_team/pti.jinx +170 -0
  110. npcsh-1.1.13.data/data/npcsh/npc_team/python.jinx +11 -0
  111. npcsh-1.1.13.data/data/npcsh/npc_team/roll.jinx +68 -0
  112. npcsh-1.1.13.data/data/npcsh/npc_team/sample.jinx +56 -0
  113. npcsh-1.1.13.data/data/npcsh/npc_team/search.jinx +130 -0
  114. npcsh-1.1.13.data/data/npcsh/npc_team/serve.jinx +26 -0
  115. npcsh-1.1.13.data/data/npcsh/npc_team/set.jinx +40 -0
  116. npcsh-1.1.13.data/data/npcsh/npc_team/sh.jinx +34 -0
  117. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.npc +3 -0
  118. npcsh-1.1.13.data/data/npcsh/npc_team/sibiji.png +0 -0
  119. npcsh-1.1.13.data/data/npcsh/npc_team/sleep.jinx +116 -0
  120. npcsh-1.1.13.data/data/npcsh/npc_team/spool.jinx +161 -0
  121. npcsh-1.1.13.data/data/npcsh/npc_team/spool.png +0 -0
  122. npcsh-1.1.13.data/data/npcsh/npc_team/sql.jinx +16 -0
  123. npcsh-1.1.13.data/data/npcsh/npc_team/trigger.jinx +61 -0
  124. npcsh-1.1.13.data/data/npcsh/npc_team/usage.jinx +33 -0
  125. npcsh-1.1.13.data/data/npcsh/npc_team/vixynt.jinx +144 -0
  126. npcsh-1.1.13.data/data/npcsh/npc_team/wander.jinx +186 -0
  127. npcsh-1.1.13.data/data/npcsh/npc_team/yap.jinx +262 -0
  128. npcsh-1.1.13.data/data/npcsh/npc_team/yap.png +0 -0
  129. npcsh-1.1.13.dist-info/METADATA +522 -0
  130. npcsh-1.1.13.dist-info/RECORD +135 -0
  131. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/WHEEL +1 -1
  132. npcsh-1.1.13.dist-info/entry_points.txt +9 -0
  133. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info/licenses}/LICENSE +1 -1
  134. npcsh/command_history.py +0 -81
  135. npcsh/helpers.py +0 -36
  136. npcsh/llm_funcs.py +0 -295
  137. npcsh/main.py +0 -5
  138. npcsh/modes.py +0 -343
  139. npcsh/npc_compiler.py +0 -124
  140. npcsh-0.1.2.dist-info/METADATA +0 -99
  141. npcsh-0.1.2.dist-info/RECORD +0 -14
  142. npcsh-0.1.2.dist-info/entry_points.txt +0 -2
  143. {npcsh-0.1.2.dist-info → npcsh-1.1.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,214 @@
1
+ jinx_name: plonk
2
+ description: Vision-based GUI automation - use vision model to interact with screen elements
3
+ inputs:
4
+ - task: null
5
+ - vmodel: null
6
+ - vprovider: null
7
+ - max_iterations: 10
8
+ - debug: true
9
+
10
+ steps:
11
+ - name: plonk_execute
12
+ engine: python
13
+ code: |
14
+ import os
15
+ import time
16
+ import platform
17
+ from termcolor import colored
18
+
19
+ from npcpy.llm_funcs import get_llm_response
20
+ from npcpy.data.image import capture_screenshot
21
+ from npcpy.work.desktop import perform_action
22
+
23
+ npc = context.get('npc')
24
+ messages = context.get('messages', [])
25
+
26
+ task = context.get('task')
27
+ vision_model = context.get('vmodel') or (npc.model if npc else 'gpt-4o')
28
+ vision_provider = context.get('vprovider') or (npc.provider if npc else 'openai')
29
+ max_iterations = int(context.get('max_iterations', 10))
30
+ debug = context.get('debug', True)
31
+
32
+ if not task:
33
+ context['output'] = """Usage: /plonk <task description>
34
+
35
+ Options:
36
+ --vmodel MODEL Vision model to use (default: gpt-4o)
37
+ --vprovider PROV Vision provider (default: openai)
38
+ --max-iterations N Max steps (default: 10)
39
+
40
+ Example: /plonk Open Firefox and navigate to google.com"""
41
+ context['messages'] = messages
42
+ exit()
43
+
44
+ print(f"""
45
+ ██████╗ ██╗ ██████╗ ███╗ ██╗██╗ ██╗
46
+ ██╔══██╗██║ ██╔═══██╗████╗ ██║██║ ██╔╝
47
+ ██████╔╝██║ ██║ ██║██╔██╗ ██║█████╔╝
48
+ ██╔═══╝ ██║ ██║ ██║██║╚██╗██║██╔═██╗
49
+ ██║ ███████╗╚██████╔╝██║ ╚████║██║ ██╗
50
+ ╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝
51
+
52
+ Vision GUI Automation
53
+ Task: {task}
54
+ Model: {vision_model} | Max iterations: {max_iterations}
55
+ """)
56
+
57
+ # System-specific examples
58
+ system = platform.system()
59
+ if system == "Windows":
60
+ app_examples = "start firefox, notepad, calc"
61
+ elif system == "Darwin":
62
+ app_examples = "open -a Firefox, open -a TextEdit"
63
+ else:
64
+ app_examples = "firefox &, gedit &, gnome-calculator &"
65
+
66
+ # Action types
67
+ ACTION_SCHEMA = {
68
+ "type": "object",
69
+ "properties": {
70
+ "action": {
71
+ "type": "string",
72
+ "enum": ["click", "type", "key", "launch", "wait", "done", "fail"],
73
+ "description": "Action to perform"
74
+ },
75
+ "x": {"type": "number", "description": "X coordinate (0-100 percentage)"},
76
+ "y": {"type": "number", "description": "Y coordinate (0-100 percentage)"},
77
+ "text": {"type": "string", "description": "Text to type or key to press"},
78
+ "command": {"type": "string", "description": "Command to launch"},
79
+ "duration": {"type": "number", "description": "Wait duration in seconds"},
80
+ "reason": {"type": "string", "description": "Explanation of action"}
81
+ },
82
+ "required": ["action", "reason"]
83
+ }
84
+
85
+ click_history = []
86
+ summary = []
87
+
88
+ for iteration in range(max_iterations):
89
+ print(colored(f"\n--- Iteration {iteration + 1}/{max_iterations} ---", "cyan"))
90
+
91
+ # Capture screenshot
92
+ ss = capture_screenshot()
93
+ if not ss or 'file_path' not in ss:
94
+ print(colored("Failed to capture screenshot", "red"))
95
+ break
96
+
97
+ screenshot_path = ss['file_path']
98
+ if debug:
99
+ print(colored(f"Screenshot: {screenshot_path}", "gray"))
100
+
101
+ # Build context from history
102
+ history_context = ""
103
+ if click_history:
104
+ history_context = f"\nPrevious actions ({len(click_history)}):\n"
105
+ for i, click in enumerate(click_history[-5:], 1):
106
+ history_context += f" {i}. {click.get('action', 'unknown')} at ({click.get('x', '?')}, {click.get('y', '?')}) - {click.get('reason', '')}\n"
107
+
108
+ prompt = f"""You are a GUI automation assistant. Analyze this screenshot and determine the next action to complete the task.
109
+
110
+ TASK: {task}
111
+
112
+ {history_context}
113
+
114
+ Available actions:
115
+ - click: Click at x,y coordinates (0-100 percentage of screen)
116
+ - type: Type text
117
+ - key: Press key (enter, tab, escape, etc.)
118
+ - launch: Launch application ({app_examples})
119
+ - wait: Wait for duration seconds
120
+ - done: Task completed successfully
121
+ - fail: Task cannot be completed
122
+
123
+ Respond with JSON: {{"action": "...", "x": N, "y": N, "text": "...", "command": "...", "duration": N, "reason": "..."}}"""
124
+
125
+ try:
126
+ resp = get_llm_response(
127
+ prompt,
128
+ model=vision_model,
129
+ provider=vision_provider,
130
+ images=[screenshot_path],
131
+ format="json",
132
+ npc=npc
133
+ )
134
+
135
+ action_response = resp.get('response', {})
136
+ if isinstance(action_response, str):
137
+ import json
138
+ try:
139
+ action_response = json.loads(action_response)
140
+ except:
141
+ print(colored(f"Invalid JSON response: {action_response[:100]}", "red"))
142
+ continue
143
+
144
+ action = action_response.get('action', 'fail')
145
+ reason = action_response.get('reason', 'No reason provided')
146
+
147
+ print(colored(f"Action: {action} - {reason}", "yellow"))
148
+
149
+ if action == 'done':
150
+ print(colored("Task completed successfully!", "green"))
151
+ summary.append({"iteration": iteration + 1, "action": "done", "reason": reason})
152
+ break
153
+
154
+ if action == 'fail':
155
+ print(colored(f"Task failed: {reason}", "red"))
156
+ summary.append({"iteration": iteration + 1, "action": "fail", "reason": reason})
157
+ break
158
+
159
+ # Execute action
160
+ if action == 'click':
161
+ x, y = action_response.get('x', 50), action_response.get('y', 50)
162
+ perform_action('click', x=x, y=y)
163
+ click_history.append({"action": "click", "x": x, "y": y, "reason": reason})
164
+ print(colored(f"Clicked at ({x}, {y})", "green"))
165
+
166
+ elif action == 'type':
167
+ text = action_response.get('text', '')
168
+ perform_action('type', text=text)
169
+ click_history.append({"action": "type", "text": text[:20], "reason": reason})
170
+ print(colored(f"Typed: {text[:30]}...", "green"))
171
+
172
+ elif action == 'key':
173
+ key = action_response.get('text', 'enter')
174
+ perform_action('key', key=key)
175
+ click_history.append({"action": "key", "key": key, "reason": reason})
176
+ print(colored(f"Pressed key: {key}", "green"))
177
+
178
+ elif action == 'launch':
179
+ cmd = action_response.get('command', '')
180
+ perform_action('launch', command=cmd)
181
+ click_history.append({"action": "launch", "command": cmd, "reason": reason})
182
+ print(colored(f"Launched: {cmd}", "green"))
183
+ time.sleep(2) # Wait for app to open
184
+
185
+ elif action == 'wait':
186
+ duration = action_response.get('duration', 1)
187
+ time.sleep(duration)
188
+ click_history.append({"action": "wait", "duration": duration, "reason": reason})
189
+ print(colored(f"Waited {duration}s", "green"))
190
+
191
+ summary.append({
192
+ "iteration": iteration + 1,
193
+ "action": action,
194
+ "last_click_coords": f"({click_history[-1].get('x', 'N/A')}, {click_history[-1].get('y', 'N/A')})" if click_history else "N/A",
195
+ "reason": reason
196
+ })
197
+
198
+ time.sleep(0.5) # Brief pause between actions
199
+
200
+ except Exception as e:
201
+ print(colored(f"Error in iteration {iteration + 1}: {e}", "red"))
202
+ summary.append({"iteration": iteration + 1, "error": str(e)})
203
+
204
+ # Generate summary
205
+ print("\n" + "="*50)
206
+ print(colored("PLONK SESSION SUMMARY", "cyan", attrs=['bold']))
207
+ print("="*50)
208
+ for s in summary:
209
+ print(f" Step {s.get('iteration', '?')}: {s.get('action', 'unknown')} - {s.get('reason', s.get('error', ''))[:60]}")
210
+
211
+ context['output'] = f"Plonk completed with {len(summary)} actions"
212
+ context['messages'] = messages
213
+ context['plonk_summary'] = summary
214
+ context['click_history'] = click_history
@@ -0,0 +1,2 @@
1
+ name: plonk
2
+ primary_directive: You are the superior automation specialist of the NPC team.
@@ -0,0 +1,2 @@
1
+ name: plonkjr
2
+ primary_directive: You are junior automation specialist in the NPC Team.
@@ -0,0 +1,170 @@
1
+ jinx_name: pti
2
+ description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
3
+ npc: frederic
4
+ inputs:
5
+ - model: null
6
+ - provider: null
7
+ - files: null
8
+ - reasoning_model: null
9
+
10
+ steps:
11
+ - name: pti_repl
12
+ engine: python
13
+ code: |
14
+ import os
15
+ import sys
16
+ from termcolor import colored
17
+
18
+ from npcpy.llm_funcs import get_llm_response
19
+ from npcpy.npc_sysenv import get_system_message, render_markdown
20
+ from npcpy.data.load import load_file_contents
21
+ from npcpy.data.text import rag_search
22
+
23
+ npc = context.get('npc')
24
+ team = context.get('team')
25
+ messages = context.get('messages', [])
26
+ files = context.get('files')
27
+
28
+ # PTI uses reasoning model for deeper thinking
29
+ model = context.get('reasoning_model') or context.get('model') or (npc.model if npc else None)
30
+ provider = context.get('provider') or (npc.provider if npc else None)
31
+
32
+ print("""
33
+ ██████╗ ████████╗██╗
34
+ ██╔══██╗╚══██╔══╝██║
35
+ ██████╔╝ ██║ ██║
36
+ ██╔═══╝ ██║ ██║
37
+ ██║ ██║ ██║
38
+ ╚═╝ ╚═╝ ╚═╝
39
+
40
+ Pardon-The-Interruption
41
+ Human-in-the-loop reasoning mode
42
+ """)
43
+
44
+ npc_name = npc.name if npc else "pti"
45
+ print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
46
+ print(" - AI will use <think> tags for step-by-step reasoning")
47
+ print(" - Use <request_for_input> to pause and ask questions")
48
+ print(" - Ctrl+C interrupts stream for immediate feedback")
49
+
50
+ # Load files if provided
51
+ loaded_content = {}
52
+ if files:
53
+ if isinstance(files, str):
54
+ files = [f.strip() for f in files.split(',')]
55
+ for file_path in files:
56
+ file_path = os.path.expanduser(file_path)
57
+ if os.path.exists(file_path):
58
+ try:
59
+ chunks = load_file_contents(file_path)
60
+ loaded_content[file_path] = "\n".join(chunks)
61
+ print(colored(f"Loaded: {file_path}", "green"))
62
+ except Exception as e:
63
+ print(colored(f"Error loading {file_path}: {e}", "red"))
64
+
65
+ # System message for PTI mode
66
+ pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
67
+
68
+ IMPORTANT INSTRUCTIONS:
69
+ 1. Think step-by-step using <think>...</think> tags to show your reasoning
70
+ 2. When you need more information from the user, use <request_for_input>your question</request_for_input>
71
+ 3. Be thorough but concise in your reasoning
72
+ 4. The user can interrupt at any time to provide guidance
73
+
74
+ Example:
75
+ <think>
76
+ Let me break this down...
77
+ Step 1: First I need to understand X
78
+ Step 2: Then consider Y
79
+ </think>
80
+
81
+ <request_for_input>
82
+ I notice you mentioned Z. Could you clarify what you mean by that?
83
+ </request_for_input>"""
84
+
85
+ if not messages or messages[0].get("role") != "system":
86
+ messages.insert(0, {"role": "system", "content": pti_system})
87
+
88
+ # REPL loop
89
+ user_input = None
90
+ while True:
91
+ try:
92
+ if not user_input:
93
+ prompt_str = f"{npc_name}:pti> "
94
+ user_input = input(prompt_str).strip()
95
+
96
+ if not user_input:
97
+ user_input = None
98
+ continue
99
+
100
+ if user_input.lower() == "/pq":
101
+ print("Exiting PTI mode.")
102
+ break
103
+
104
+ # Build prompt with file context
105
+ prompt_for_llm = user_input
106
+ if loaded_content:
107
+ context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
108
+ prompt_for_llm += f"\n\nContext:\n{context_str}"
109
+
110
+ prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
111
+
112
+ messages.append({"role": "user", "content": user_input})
113
+
114
+ try:
115
+ resp = get_llm_response(
116
+ prompt_for_llm,
117
+ model=model,
118
+ provider=provider,
119
+ messages=messages[:-1], # Don't duplicate the user message
120
+ stream=True,
121
+ npc=npc
122
+ )
123
+
124
+ response_stream = resp.get('response')
125
+ full_response = ""
126
+ request_found = False
127
+
128
+ # Stream the response
129
+ for chunk in response_stream:
130
+ chunk_content = ""
131
+ if hasattr(chunk, 'choices') and chunk.choices:
132
+ delta = chunk.choices[0].delta
133
+ if hasattr(delta, 'content') and delta.content:
134
+ chunk_content = delta.content
135
+ elif isinstance(chunk, dict):
136
+ chunk_content = chunk.get("message", {}).get("content", "")
137
+
138
+ if chunk_content:
139
+ print(chunk_content, end='', flush=True)
140
+ full_response += chunk_content
141
+
142
+ # Check for request_for_input
143
+ if "</request_for_input>" in full_response:
144
+ request_found = True
145
+ break
146
+
147
+ print() # newline after stream
148
+
149
+ messages.append({"role": "assistant", "content": full_response})
150
+ user_input = None # Reset for next iteration
151
+
152
+ except KeyboardInterrupt:
153
+ print(colored("\n\n--- Interrupted ---", "yellow"))
154
+ interrupt_input = input("Your feedback: ").strip()
155
+ if interrupt_input:
156
+ user_input = interrupt_input
157
+ else:
158
+ user_input = None
159
+ continue
160
+
161
+ except KeyboardInterrupt:
162
+ print("\nUse '/pq' to exit or continue.")
163
+ user_input = None
164
+ continue
165
+ except EOFError:
166
+ print("\nExiting PTI mode.")
167
+ break
168
+
169
+ context['output'] = "Exited PTI mode."
170
+ context['messages'] = messages
@@ -0,0 +1,11 @@
1
+ jinx_name: python
2
+ description: Execute scripts with python. You must set the ultimate result as the "output"
3
+ variable. It MUST be a string.
4
+ Do not add unnecessary print statements.
5
+ This jinx is intended for executing code snippets that are not
6
+ accomplished by other jinxes. Use it only when the others are insufficient.
7
+ inputs:
8
+ - code
9
+ steps:
10
+ - code: '{{code}}'
11
+ engine: python
@@ -0,0 +1,68 @@
1
+ jinx_name: "roll"
2
+ description: "Generate a video from a text prompt."
3
+ inputs:
4
+ - prompt: "" # Required text prompt for video generation.
5
+ - vgmodel: "" # Video generation model to use. Defaults to NPCSH_VIDEO_GEN_MODEL or NPC's model.
6
+ - vgprovider: "" # Video generation provider to use. Defaults to NPCSH_VIDEO_GEN_PROVIDER or NPC's provider.
7
+ - num_frames: 125 # Number of frames for the video.
8
+ - width: 256 # Width of the video.
9
+ - height: 256 # Height of the video.
10
+ - output_path: "output.mp4" # Output file path for the video.
11
+ steps:
12
+ - name: "generate_video"
13
+ engine: "python"
14
+ code: |
15
+ import traceback
16
+ from npcpy.llm_funcs import gen_video
17
+ # Assuming NPCSH_VIDEO_GEN_MODEL and NPCSH_VIDEO_GEN_PROVIDER are accessible
18
+
19
+ prompt = context.get('prompt')
20
+ num_frames = int(context.get('num_frames', 125)) # Ensure int type
21
+ width = int(context.get('width', 256)) # Ensure int type
22
+ height = int(context.get('height', 256)) # Ensure int type
23
+ output_path = context.get('output_path')
24
+ video_gen_model = context.get('vgmodel')
25
+ video_gen_provider = context.get('vgprovider')
26
+ output_messages = context.get('messages', [])
27
+ current_npc = context.get('npc')
28
+
29
+ if not prompt or not prompt.strip():
30
+ context['output'] = "Usage: /roll <your prompt>"
31
+ context['messages'] = output_messages
32
+ exit()
33
+
34
+ # Fallback for model/provider if not explicitly set in Jinx inputs
35
+ if not video_gen_model and current_npc and current_npc.model:
36
+ video_gen_model = current_npc.model
37
+ if not video_gen_provider and current_npc and current_npc.provider:
38
+ video_gen_provider = current_npc.provider
39
+
40
+ # Final fallbacks (these would ideally come from npcsh._state config)
41
+ if not video_gen_model:
42
+ video_gen_model = "stable-video-diffusion" # Example default
43
+ if not video_gen_provider:
44
+ video_gen_provider = "diffusers" # Example default
45
+
46
+ try:
47
+ result = gen_video(
48
+ prompt=prompt,
49
+ model=video_gen_model,
50
+ provider=video_gen_provider,
51
+ npc=current_npc,
52
+ num_frames=num_frames,
53
+ width=width,
54
+ height=height,
55
+ output_path=output_path,
56
+ **context.get('api_kwargs', {}) # Assuming api_kwargs might be passed
57
+ )
58
+
59
+ if isinstance(result, dict):
60
+ context['output'] = result.get('output', 'Video generated.')
61
+ context['messages'] = result.get('messages', output_messages)
62
+ else:
63
+ context['output'] = str(result)
64
+ context['messages'] = output_messages
65
+ except Exception as e:
66
+ traceback.print_exc()
67
+ context['output'] = f"Error generating video: {e}"
68
+ context['messages'] = output_messages
@@ -0,0 +1,56 @@
1
+ jinx_name: "sample"
2
+ description: "Send a prompt directly to the LLM."
3
+ inputs:
4
+ - prompt: "" # Required text prompt to send to the LLM.
5
+ - model: "" # LLM model to use. Defaults to NPC's model.
6
+ - provider: "" # LLM provider to use. Defaults to NPC's provider.
7
+ steps:
8
+ - name: "send_prompt_to_llm"
9
+ engine: "python"
10
+ code: |
11
+ import traceback
12
+ from npcpy.llm_funcs import get_llm_response
13
+
14
+ prompt = context.get('prompt')
15
+ llm_model = context.get('model')
16
+ llm_provider = context.get('provider')
17
+ output_messages = context.get('messages', [])
18
+ current_npc = context.get('npc')
19
+
20
+ if not prompt or not prompt.strip():
21
+ context['output'] = "Usage: /sample <your prompt> [-m --model] model [-p --provider] provider"
22
+ context['messages'] = output_messages
23
+ exit()
24
+
25
+ # Fallback for model/provider if not explicitly set in Jinx inputs
26
+ if not llm_model and current_npc and current_npc.model:
27
+ llm_model = current_npc.model
28
+ if not llm_provider and current_npc and current_npc.provider:
29
+ llm_provider = current_npc.provider
30
+
31
+ # Final fallbacks (these would ideally come from npcsh._state config)
32
+ if not llm_model: llm_model = "gemini-1.5-pro" # Example default
33
+ if not llm_provider: llm_provider = "gemini" # Example default
34
+
35
+ try:
36
+ result = get_llm_response(
37
+ prompt=prompt,
38
+ model=llm_model,
39
+ provider=llm_provider,
40
+ npc=current_npc,
41
+ **{k:v for k,v in context.items() if k not in ['messages', 'prompt', 'model', 'provider']} # Pass other context
42
+ )
43
+
44
+ if isinstance(result, dict):
45
+ context['output'] = result.get('response')
46
+ context['messages'] = result.get('messages', output_messages)
47
+ context['model'] = llm_model
48
+ context['provider'] = llm_provider
49
+ else:
50
+ context['output'] = str(result)
51
+ context['messages'] = output_messages
52
+
53
+ except Exception as e:
54
+ traceback.print_exc()
55
+ context['output'] = f"Error sampling LLM: {e}"
56
+ context['messages'] = output_messages
@@ -0,0 +1,130 @@
1
+ jinx_name: "search"
2
+ description: >
3
+ Executes a search across various sources.
4
+ Usage:
5
+ /search <query> (Default: Web Search)
6
+ /search --memory <query> (Search approved memories)
7
+ /search --kg <query> (Search the knowledge graph)
8
+ /search --rag [-f <paths>] <query> (Execute a RAG search)
9
+ /search --brainblast <query> (Advanced history search)
10
+ inputs:
11
+ - query: ""
12
+ - sprovider: ""
13
+ - memory: false
14
+ - kg: false
15
+ - rag: false
16
+ - brainblast: false
17
+ - file_paths: ""
18
+ - history_db_path: "~/npcsh_history.db"
19
+ - vector_db_path: "~/npcsh_chroma.db"
20
+ - emodel: ""
21
+ - eprovider: ""
22
+ steps:
23
+ - name: "execute_unified_search"
24
+ engine: "python"
25
+ code: |
26
+ import os
27
+ import traceback
28
+
29
+ # Access query from context
30
+ query = context.get('query')
31
+ if not query or not query.strip():
32
+ context['output'] = "Usage: /search [--memory|--kg|--rag|--brainblast] <query>"
33
+ else:
34
+ # state is available as a GLOBAL variable (from extra_globals)
35
+ # Access it directly, not from context
36
+ try:
37
+ current_state = state # This should work now
38
+ except NameError:
39
+ context['output'] = "Error: Shell state not available in jinx context"
40
+ raise
41
+
42
+ current_npc = current_state.npc
43
+ current_team = current_state.team
44
+
45
+ npc_name = getattr(current_npc, 'name', '__none__') if current_npc else '__none__'
46
+ team_name = getattr(current_team, 'name', '__none__') if current_team else '__none__'
47
+ current_path = os.getcwd()
48
+ db_path = os.path.expanduser(context.get("history_db_path") or "~/.npcsh/npcsh_history.db")
49
+
50
+ try:
51
+ cmd_history = CommandHistory(db_path)
52
+
53
+ if context.get('memory'):
54
+ memories = get_relevant_memories(
55
+ command_history=cmd_history,
56
+ npc_name=npc_name,
57
+ team_name=team_name,
58
+ path=current_path,
59
+ query=query,
60
+ max_memories=10,
61
+ state=current_state # Pass the state object
62
+ )
63
+ print(memories)
64
+
65
+ if not memories:
66
+ output = f"No memories found for query: '{query}'"
67
+ else:
68
+ output = f"Found {len(memories)} memories:\n\n" + "\n".join(
69
+ f"{i}. [{mem.get('timestamp', 'unknown')}] {mem.get('final_memory') or mem.get('initial_memory')}"
70
+ for i, mem in enumerate(memories, 1)
71
+ )
72
+
73
+ elif context.get('kg'):
74
+ facts = search_kg_facts(
75
+ cmd_history,
76
+ npc_name,
77
+ team_name,
78
+ current_path,
79
+ query
80
+ )
81
+ print(facts)
82
+
83
+ if not facts:
84
+ output = f"No KG facts found for query: '{query}'"
85
+ else:
86
+ output = f"Found {len(facts)} KG facts:\n\n" + "\n".join(
87
+ f"{i}. {fact.get('statement')}" for i, fact in enumerate(facts, 1)
88
+ )
89
+
90
+ elif context.get('rag'):
91
+ file_paths_str = context.get('file_paths', '')
92
+ file_paths = [os.path.abspath(os.path.expanduser(p.strip())) for p in file_paths_str.split(',') if p.strip()]
93
+ emodel = context.get('emodel') or current_state.embedding_model
94
+ eprovider = context.get('eprovider') or current_state.embedding_provider
95
+
96
+ file_contents = []
97
+ for path in file_paths:
98
+ chunks = load_file_contents(path)
99
+ basename = os.path.basename(path)
100
+ file_contents.extend([f"{basename}: {chunk}" for chunk in chunks])
101
+
102
+ result = execute_rag_command(
103
+ command=query,
104
+ vector_db_path=os.path.expanduser(context.get('vector_db_path') or "~/.npcsh/npcsh_chroma.db"),
105
+ embedding_model=emodel,
106
+ embedding_provider=eprovider,
107
+ file_contents=file_contents or None
108
+ )
109
+ print(result)
110
+ output = result.get('response', 'No response from RAG.')
111
+
112
+ elif context.get('brainblast'):
113
+ result = execute_brainblast_command(
114
+ command=query,
115
+ command_history=cmd_history,
116
+ **context
117
+ )
118
+ print(result)
119
+ output = result.get('output', 'Brainblast search executed.')
120
+
121
+ else:
122
+ # Default to web search
123
+ provider = context.get('sprovider') or current_state.search_provider
124
+ results = search_web(query, provider=provider)
125
+ output = "\n".join([f"- {res}" for res in results]) if results else "No web results found."
126
+
127
+ except Exception as e:
128
+ output = f"An error occurred in the search jinx: {e}\n{traceback.format_exc()}"
129
+
130
+ context['output'] = output