npcsh 1.1.14__py3-none-any.whl → 1.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (168) hide show
  1. npcsh/_state.py +488 -77
  2. npcsh/mcp_server.py +2 -1
  3. npcsh/npc.py +84 -32
  4. npcsh/npc_team/alicanto.npc +22 -1
  5. npcsh/npc_team/corca.npc +28 -9
  6. npcsh/npc_team/frederic.npc +25 -4
  7. npcsh/npc_team/guac.npc +22 -0
  8. npcsh/npc_team/jinxs/bin/nql.jinx +141 -0
  9. npcsh/npc_team/jinxs/bin/sync.jinx +230 -0
  10. {npcsh-1.1.14.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/bin}/vixynt.jinx +8 -30
  11. npcsh/npc_team/jinxs/bin/wander.jinx +152 -0
  12. npcsh/npc_team/jinxs/lib/browser/browser_action.jinx +220 -0
  13. npcsh/npc_team/jinxs/lib/browser/browser_screenshot.jinx +40 -0
  14. npcsh/npc_team/jinxs/lib/browser/close_browser.jinx +14 -0
  15. npcsh/npc_team/jinxs/lib/browser/open_browser.jinx +43 -0
  16. npcsh/npc_team/jinxs/lib/computer_use/click.jinx +23 -0
  17. npcsh/npc_team/jinxs/lib/computer_use/key_press.jinx +26 -0
  18. npcsh/npc_team/jinxs/lib/computer_use/launch_app.jinx +37 -0
  19. npcsh/npc_team/jinxs/lib/computer_use/screenshot.jinx +23 -0
  20. npcsh/npc_team/jinxs/lib/computer_use/type_text.jinx +27 -0
  21. npcsh/npc_team/jinxs/lib/computer_use/wait.jinx +21 -0
  22. {npcsh-1.1.14.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/lib/core}/edit_file.jinx +3 -3
  23. {npcsh-1.1.14.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/lib/core}/load_file.jinx +1 -1
  24. npcsh/npc_team/jinxs/lib/core/paste.jinx +134 -0
  25. {npcsh-1.1.14.data/data/npcsh/npc_team → npcsh/npc_team/jinxs/lib/core}/search.jinx +2 -1
  26. npcsh/npc_team/jinxs/{code → lib/core}/sh.jinx +2 -8
  27. npcsh/npc_team/jinxs/{code → lib/core}/sql.jinx +1 -1
  28. npcsh/npc_team/jinxs/lib/orchestration/convene.jinx +232 -0
  29. npcsh/npc_team/jinxs/lib/orchestration/delegate.jinx +184 -0
  30. npcsh/npc_team/jinxs/lib/research/arxiv.jinx +76 -0
  31. npcsh/npc_team/jinxs/lib/research/paper_search.jinx +101 -0
  32. npcsh/npc_team/jinxs/lib/research/semantic_scholar.jinx +69 -0
  33. npcsh/npc_team/jinxs/{utils/core → lib/utils}/build.jinx +8 -8
  34. npcsh/npc_team/jinxs/lib/utils/jinxs.jinx +176 -0
  35. npcsh/npc_team/jinxs/lib/utils/shh.jinx +17 -0
  36. npcsh/npc_team/jinxs/lib/utils/switch.jinx +62 -0
  37. npcsh/npc_team/jinxs/lib/utils/switches.jinx +61 -0
  38. npcsh/npc_team/jinxs/lib/utils/teamviz.jinx +205 -0
  39. npcsh/npc_team/jinxs/lib/utils/verbose.jinx +17 -0
  40. npcsh/npc_team/kadiefa.npc +19 -1
  41. npcsh/npc_team/plonk.npc +26 -1
  42. npcsh/npc_team/plonkjr.npc +22 -1
  43. npcsh/npc_team/sibiji.npc +23 -2
  44. npcsh/npcsh.py +153 -39
  45. npcsh/ui.py +22 -1
  46. npcsh-1.1.15.data/data/npcsh/npc_team/alicanto.npc +23 -0
  47. npcsh-1.1.15.data/data/npcsh/npc_team/arxiv.jinx +76 -0
  48. npcsh-1.1.15.data/data/npcsh/npc_team/browser_action.jinx +220 -0
  49. npcsh-1.1.15.data/data/npcsh/npc_team/browser_screenshot.jinx +40 -0
  50. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/build.jinx +8 -8
  51. npcsh-1.1.15.data/data/npcsh/npc_team/click.jinx +23 -0
  52. npcsh-1.1.15.data/data/npcsh/npc_team/close_browser.jinx +14 -0
  53. npcsh-1.1.15.data/data/npcsh/npc_team/convene.jinx +232 -0
  54. npcsh-1.1.15.data/data/npcsh/npc_team/corca.npc +31 -0
  55. npcsh-1.1.15.data/data/npcsh/npc_team/delegate.jinx +184 -0
  56. {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/edit_file.jinx +3 -3
  57. npcsh-1.1.15.data/data/npcsh/npc_team/frederic.npc +27 -0
  58. npcsh-1.1.15.data/data/npcsh/npc_team/guac.npc +22 -0
  59. npcsh-1.1.15.data/data/npcsh/npc_team/jinxs.jinx +176 -0
  60. npcsh-1.1.15.data/data/npcsh/npc_team/kadiefa.npc +21 -0
  61. npcsh-1.1.15.data/data/npcsh/npc_team/key_press.jinx +26 -0
  62. npcsh-1.1.15.data/data/npcsh/npc_team/launch_app.jinx +37 -0
  63. {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/load_file.jinx +1 -1
  64. npcsh-1.1.15.data/data/npcsh/npc_team/nql.jinx +141 -0
  65. npcsh-1.1.15.data/data/npcsh/npc_team/open_browser.jinx +43 -0
  66. npcsh-1.1.15.data/data/npcsh/npc_team/paper_search.jinx +101 -0
  67. npcsh-1.1.15.data/data/npcsh/npc_team/paste.jinx +134 -0
  68. npcsh-1.1.15.data/data/npcsh/npc_team/plonk.npc +27 -0
  69. npcsh-1.1.15.data/data/npcsh/npc_team/plonkjr.npc +23 -0
  70. npcsh-1.1.15.data/data/npcsh/npc_team/screenshot.jinx +23 -0
  71. {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/search.jinx +2 -1
  72. npcsh-1.1.15.data/data/npcsh/npc_team/semantic_scholar.jinx +69 -0
  73. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sh.jinx +2 -8
  74. npcsh-1.1.15.data/data/npcsh/npc_team/shh.jinx +17 -0
  75. npcsh-1.1.15.data/data/npcsh/npc_team/sibiji.npc +24 -0
  76. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sql.jinx +1 -1
  77. npcsh-1.1.15.data/data/npcsh/npc_team/switch.jinx +62 -0
  78. npcsh-1.1.15.data/data/npcsh/npc_team/switches.jinx +61 -0
  79. npcsh-1.1.15.data/data/npcsh/npc_team/sync.jinx +230 -0
  80. npcsh-1.1.15.data/data/npcsh/npc_team/teamviz.jinx +205 -0
  81. npcsh-1.1.15.data/data/npcsh/npc_team/type_text.jinx +27 -0
  82. npcsh-1.1.15.data/data/npcsh/npc_team/verbose.jinx +17 -0
  83. {npcsh/npc_team/jinxs/utils → npcsh-1.1.15.data/data/npcsh/npc_team}/vixynt.jinx +8 -30
  84. npcsh-1.1.15.data/data/npcsh/npc_team/wait.jinx +21 -0
  85. npcsh-1.1.15.data/data/npcsh/npc_team/wander.jinx +152 -0
  86. {npcsh-1.1.14.dist-info → npcsh-1.1.15.dist-info}/METADATA +399 -58
  87. npcsh-1.1.15.dist-info/RECORD +170 -0
  88. npcsh-1.1.15.dist-info/entry_points.txt +19 -0
  89. npcsh-1.1.15.dist-info/top_level.txt +2 -0
  90. project/__init__.py +1 -0
  91. npcsh/npc_team/foreman.npc +0 -7
  92. npcsh/npc_team/jinxs/modes/alicanto.jinx +0 -194
  93. npcsh/npc_team/jinxs/modes/corca.jinx +0 -249
  94. npcsh/npc_team/jinxs/modes/guac.jinx +0 -317
  95. npcsh/npc_team/jinxs/modes/plonk.jinx +0 -214
  96. npcsh/npc_team/jinxs/modes/pti.jinx +0 -170
  97. npcsh/npc_team/jinxs/modes/wander.jinx +0 -186
  98. npcsh/npc_team/jinxs/utils/agent.jinx +0 -17
  99. npcsh/npc_team/jinxs/utils/core/jinxs.jinx +0 -32
  100. npcsh-1.1.14.data/data/npcsh/npc_team/agent.jinx +0 -17
  101. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.jinx +0 -194
  102. npcsh-1.1.14.data/data/npcsh/npc_team/alicanto.npc +0 -2
  103. npcsh-1.1.14.data/data/npcsh/npc_team/corca.jinx +0 -249
  104. npcsh-1.1.14.data/data/npcsh/npc_team/corca.npc +0 -12
  105. npcsh-1.1.14.data/data/npcsh/npc_team/foreman.npc +0 -7
  106. npcsh-1.1.14.data/data/npcsh/npc_team/frederic.npc +0 -6
  107. npcsh-1.1.14.data/data/npcsh/npc_team/guac.jinx +0 -317
  108. npcsh-1.1.14.data/data/npcsh/npc_team/jinxs.jinx +0 -32
  109. npcsh-1.1.14.data/data/npcsh/npc_team/kadiefa.npc +0 -3
  110. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.jinx +0 -214
  111. npcsh-1.1.14.data/data/npcsh/npc_team/plonk.npc +0 -2
  112. npcsh-1.1.14.data/data/npcsh/npc_team/plonkjr.npc +0 -2
  113. npcsh-1.1.14.data/data/npcsh/npc_team/pti.jinx +0 -170
  114. npcsh-1.1.14.data/data/npcsh/npc_team/sibiji.npc +0 -3
  115. npcsh-1.1.14.data/data/npcsh/npc_team/wander.jinx +0 -186
  116. npcsh-1.1.14.dist-info/RECORD +0 -135
  117. npcsh-1.1.14.dist-info/entry_points.txt +0 -9
  118. npcsh-1.1.14.dist-info/top_level.txt +0 -1
  119. /npcsh/npc_team/jinxs/{utils → bin}/roll.jinx +0 -0
  120. /npcsh/npc_team/jinxs/{utils → bin}/sample.jinx +0 -0
  121. /npcsh/npc_team/jinxs/{modes → bin}/spool.jinx +0 -0
  122. /npcsh/npc_team/jinxs/{modes → bin}/yap.jinx +0 -0
  123. /npcsh/npc_team/jinxs/{utils → lib/computer_use}/trigger.jinx +0 -0
  124. /npcsh/npc_team/jinxs/{utils → lib/core}/chat.jinx +0 -0
  125. /npcsh/npc_team/jinxs/{utils → lib/core}/cmd.jinx +0 -0
  126. /npcsh/npc_team/jinxs/{utils → lib/core}/compress.jinx +0 -0
  127. /npcsh/npc_team/jinxs/{utils → lib/core}/ots.jinx +0 -0
  128. /npcsh/npc_team/jinxs/{code → lib/core}/python.jinx +0 -0
  129. /npcsh/npc_team/jinxs/{utils → lib/core}/sleep.jinx +0 -0
  130. /npcsh/npc_team/jinxs/{utils/core → lib/utils}/compile.jinx +0 -0
  131. /npcsh/npc_team/jinxs/{utils/core → lib/utils}/help.jinx +0 -0
  132. /npcsh/npc_team/jinxs/{utils/core → lib/utils}/init.jinx +0 -0
  133. /npcsh/npc_team/jinxs/{utils → lib/utils}/serve.jinx +0 -0
  134. /npcsh/npc_team/jinxs/{utils/core → lib/utils}/set.jinx +0 -0
  135. /npcsh/npc_team/jinxs/{utils → lib/utils}/usage.jinx +0 -0
  136. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/alicanto.png +0 -0
  137. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/chat.jinx +0 -0
  138. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/cmd.jinx +0 -0
  139. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/compile.jinx +0 -0
  140. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/compress.jinx +0 -0
  141. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/corca.png +0 -0
  142. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/corca_example.png +0 -0
  143. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/frederic4.png +0 -0
  144. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/guac.png +0 -0
  145. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/help.jinx +0 -0
  146. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/init.jinx +0 -0
  147. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  148. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/npc-studio.jinx +0 -0
  149. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  150. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  151. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/ots.jinx +0 -0
  152. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/plonk.png +0 -0
  153. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  154. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/python.jinx +0 -0
  155. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/roll.jinx +0 -0
  156. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sample.jinx +0 -0
  157. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/serve.jinx +0 -0
  158. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/set.jinx +0 -0
  159. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sibiji.png +0 -0
  160. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/sleep.jinx +0 -0
  161. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/spool.jinx +0 -0
  162. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/spool.png +0 -0
  163. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/trigger.jinx +0 -0
  164. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/usage.jinx +0 -0
  165. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/yap.jinx +0 -0
  166. {npcsh-1.1.14.data → npcsh-1.1.15.data}/data/npcsh/npc_team/yap.png +0 -0
  167. {npcsh-1.1.14.dist-info → npcsh-1.1.15.dist-info}/WHEEL +0 -0
  168. {npcsh-1.1.14.dist-info → npcsh-1.1.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,214 +0,0 @@
1
- jinx_name: plonk
2
- description: Vision-based GUI automation - use vision model to interact with screen elements
3
- inputs:
4
- - task: null
5
- - vmodel: null
6
- - vprovider: null
7
- - max_iterations: 10
8
- - debug: true
9
-
10
- steps:
11
- - name: plonk_execute
12
- engine: python
13
- code: |
14
- import os
15
- import time
16
- import platform
17
- from termcolor import colored
18
-
19
- from npcpy.llm_funcs import get_llm_response
20
- from npcpy.data.image import capture_screenshot
21
- from npcpy.work.desktop import perform_action
22
-
23
- npc = context.get('npc')
24
- messages = context.get('messages', [])
25
-
26
- task = context.get('task')
27
- vision_model = context.get('vmodel') or (npc.model if npc else 'gpt-4o')
28
- vision_provider = context.get('vprovider') or (npc.provider if npc else 'openai')
29
- max_iterations = int(context.get('max_iterations', 10))
30
- debug = context.get('debug', True)
31
-
32
- if not task:
33
- context['output'] = """Usage: /plonk <task description>
34
-
35
- Options:
36
- --vmodel MODEL Vision model to use (default: gpt-4o)
37
- --vprovider PROV Vision provider (default: openai)
38
- --max-iterations N Max steps (default: 10)
39
-
40
- Example: /plonk Open Firefox and navigate to google.com"""
41
- context['messages'] = messages
42
- exit()
43
-
44
- print(f"""
45
- ██████╗ ██╗ ██████╗ ███╗ ██╗██╗ ██╗
46
- ██╔══██╗██║ ██╔═══██╗████╗ ██║██║ ██╔╝
47
- ██████╔╝██║ ██║ ██║██╔██╗ ██║█████╔╝
48
- ██╔═══╝ ██║ ██║ ██║██║╚██╗██║██╔═██╗
49
- ██║ ███████╗╚██████╔╝██║ ╚████║██║ ██╗
50
- ╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝
51
-
52
- Vision GUI Automation
53
- Task: {task}
54
- Model: {vision_model} | Max iterations: {max_iterations}
55
- """)
56
-
57
- # System-specific examples
58
- system = platform.system()
59
- if system == "Windows":
60
- app_examples = "start firefox, notepad, calc"
61
- elif system == "Darwin":
62
- app_examples = "open -a Firefox, open -a TextEdit"
63
- else:
64
- app_examples = "firefox &, gedit &, gnome-calculator &"
65
-
66
- # Action types
67
- ACTION_SCHEMA = {
68
- "type": "object",
69
- "properties": {
70
- "action": {
71
- "type": "string",
72
- "enum": ["click", "type", "key", "launch", "wait", "done", "fail"],
73
- "description": "Action to perform"
74
- },
75
- "x": {"type": "number", "description": "X coordinate (0-100 percentage)"},
76
- "y": {"type": "number", "description": "Y coordinate (0-100 percentage)"},
77
- "text": {"type": "string", "description": "Text to type or key to press"},
78
- "command": {"type": "string", "description": "Command to launch"},
79
- "duration": {"type": "number", "description": "Wait duration in seconds"},
80
- "reason": {"type": "string", "description": "Explanation of action"}
81
- },
82
- "required": ["action", "reason"]
83
- }
84
-
85
- click_history = []
86
- summary = []
87
-
88
- for iteration in range(max_iterations):
89
- print(colored(f"\n--- Iteration {iteration + 1}/{max_iterations} ---", "cyan"))
90
-
91
- # Capture screenshot
92
- ss = capture_screenshot()
93
- if not ss or 'file_path' not in ss:
94
- print(colored("Failed to capture screenshot", "red"))
95
- break
96
-
97
- screenshot_path = ss['file_path']
98
- if debug:
99
- print(colored(f"Screenshot: {screenshot_path}", "gray"))
100
-
101
- # Build context from history
102
- history_context = ""
103
- if click_history:
104
- history_context = f"\nPrevious actions ({len(click_history)}):\n"
105
- for i, click in enumerate(click_history[-5:], 1):
106
- history_context += f" {i}. {click.get('action', 'unknown')} at ({click.get('x', '?')}, {click.get('y', '?')}) - {click.get('reason', '')}\n"
107
-
108
- prompt = f"""You are a GUI automation assistant. Analyze this screenshot and determine the next action to complete the task.
109
-
110
- TASK: {task}
111
-
112
- {history_context}
113
-
114
- Available actions:
115
- - click: Click at x,y coordinates (0-100 percentage of screen)
116
- - type: Type text
117
- - key: Press key (enter, tab, escape, etc.)
118
- - launch: Launch application ({app_examples})
119
- - wait: Wait for duration seconds
120
- - done: Task completed successfully
121
- - fail: Task cannot be completed
122
-
123
- Respond with JSON: {{"action": "...", "x": N, "y": N, "text": "...", "command": "...", "duration": N, "reason": "..."}}"""
124
-
125
- try:
126
- resp = get_llm_response(
127
- prompt,
128
- model=vision_model,
129
- provider=vision_provider,
130
- images=[screenshot_path],
131
- format="json",
132
- npc=npc
133
- )
134
-
135
- action_response = resp.get('response', {})
136
- if isinstance(action_response, str):
137
- import json
138
- try:
139
- action_response = json.loads(action_response)
140
- except:
141
- print(colored(f"Invalid JSON response: {action_response[:100]}", "red"))
142
- continue
143
-
144
- action = action_response.get('action', 'fail')
145
- reason = action_response.get('reason', 'No reason provided')
146
-
147
- print(colored(f"Action: {action} - {reason}", "yellow"))
148
-
149
- if action == 'done':
150
- print(colored("Task completed successfully!", "green"))
151
- summary.append({"iteration": iteration + 1, "action": "done", "reason": reason})
152
- break
153
-
154
- if action == 'fail':
155
- print(colored(f"Task failed: {reason}", "red"))
156
- summary.append({"iteration": iteration + 1, "action": "fail", "reason": reason})
157
- break
158
-
159
- # Execute action
160
- if action == 'click':
161
- x, y = action_response.get('x', 50), action_response.get('y', 50)
162
- perform_action('click', x=x, y=y)
163
- click_history.append({"action": "click", "x": x, "y": y, "reason": reason})
164
- print(colored(f"Clicked at ({x}, {y})", "green"))
165
-
166
- elif action == 'type':
167
- text = action_response.get('text', '')
168
- perform_action('type', text=text)
169
- click_history.append({"action": "type", "text": text[:20], "reason": reason})
170
- print(colored(f"Typed: {text[:30]}...", "green"))
171
-
172
- elif action == 'key':
173
- key = action_response.get('text', 'enter')
174
- perform_action('key', key=key)
175
- click_history.append({"action": "key", "key": key, "reason": reason})
176
- print(colored(f"Pressed key: {key}", "green"))
177
-
178
- elif action == 'launch':
179
- cmd = action_response.get('command', '')
180
- perform_action('launch', command=cmd)
181
- click_history.append({"action": "launch", "command": cmd, "reason": reason})
182
- print(colored(f"Launched: {cmd}", "green"))
183
- time.sleep(2) # Wait for app to open
184
-
185
- elif action == 'wait':
186
- duration = action_response.get('duration', 1)
187
- time.sleep(duration)
188
- click_history.append({"action": "wait", "duration": duration, "reason": reason})
189
- print(colored(f"Waited {duration}s", "green"))
190
-
191
- summary.append({
192
- "iteration": iteration + 1,
193
- "action": action,
194
- "last_click_coords": f"({click_history[-1].get('x', 'N/A')}, {click_history[-1].get('y', 'N/A')})" if click_history else "N/A",
195
- "reason": reason
196
- })
197
-
198
- time.sleep(0.5) # Brief pause between actions
199
-
200
- except Exception as e:
201
- print(colored(f"Error in iteration {iteration + 1}: {e}", "red"))
202
- summary.append({"iteration": iteration + 1, "error": str(e)})
203
-
204
- # Generate summary
205
- print("\n" + "="*50)
206
- print(colored("PLONK SESSION SUMMARY", "cyan", attrs=['bold']))
207
- print("="*50)
208
- for s in summary:
209
- print(f" Step {s.get('iteration', '?')}: {s.get('action', 'unknown')} - {s.get('reason', s.get('error', ''))[:60]}")
210
-
211
- context['output'] = f"Plonk completed with {len(summary)} actions"
212
- context['messages'] = messages
213
- context['plonk_summary'] = summary
214
- context['click_history'] = click_history
@@ -1,2 +0,0 @@
1
- name: plonk
2
- primary_directive: You are the superior automation specialist of the NPC team.
@@ -1,2 +0,0 @@
1
- name: plonkjr
2
- primary_directive: You are junior automation specialist in the NPC Team.
@@ -1,170 +0,0 @@
1
- jinx_name: pti
2
- description: Pardon-The-Interruption - human-in-the-loop reasoning with think tags and interruptible streaming
3
- npc: frederic
4
- inputs:
5
- - model: null
6
- - provider: null
7
- - files: null
8
- - reasoning_model: null
9
-
10
- steps:
11
- - name: pti_repl
12
- engine: python
13
- code: |
14
- import os
15
- import sys
16
- from termcolor import colored
17
-
18
- from npcpy.llm_funcs import get_llm_response
19
- from npcpy.npc_sysenv import get_system_message, render_markdown
20
- from npcpy.data.load import load_file_contents
21
- from npcpy.data.text import rag_search
22
-
23
- npc = context.get('npc')
24
- team = context.get('team')
25
- messages = context.get('messages', [])
26
- files = context.get('files')
27
-
28
- # PTI uses reasoning model for deeper thinking
29
- model = context.get('reasoning_model') or context.get('model') or (npc.model if npc else None)
30
- provider = context.get('provider') or (npc.provider if npc else None)
31
-
32
- print("""
33
- ██████╗ ████████╗██╗
34
- ██╔══██╗╚══██╔══╝██║
35
- ██████╔╝ ██║ ██║
36
- ██╔═══╝ ██║ ██║
37
- ██║ ██║ ██║
38
- ╚═╝ ╚═╝ ╚═╝
39
-
40
- Pardon-The-Interruption
41
- Human-in-the-loop reasoning mode
42
- """)
43
-
44
- npc_name = npc.name if npc else "pti"
45
- print(f"Entering PTI mode (NPC: {npc_name}). Type '/pq' to exit.")
46
- print(" - AI will use <think> tags for step-by-step reasoning")
47
- print(" - Use <request_for_input> to pause and ask questions")
48
- print(" - Ctrl+C interrupts stream for immediate feedback")
49
-
50
- # Load files if provided
51
- loaded_content = {}
52
- if files:
53
- if isinstance(files, str):
54
- files = [f.strip() for f in files.split(',')]
55
- for file_path in files:
56
- file_path = os.path.expanduser(file_path)
57
- if os.path.exists(file_path):
58
- try:
59
- chunks = load_file_contents(file_path)
60
- loaded_content[file_path] = "\n".join(chunks)
61
- print(colored(f"Loaded: {file_path}", "green"))
62
- except Exception as e:
63
- print(colored(f"Error loading {file_path}: {e}", "red"))
64
-
65
- # System message for PTI mode
66
- pti_system = """You are an AI assistant in PTI (Pardon-The-Interruption) mode.
67
-
68
- IMPORTANT INSTRUCTIONS:
69
- 1. Think step-by-step using <think>...</think> tags to show your reasoning
70
- 2. When you need more information from the user, use <request_for_input>your question</request_for_input>
71
- 3. Be thorough but concise in your reasoning
72
- 4. The user can interrupt at any time to provide guidance
73
-
74
- Example:
75
- <think>
76
- Let me break this down...
77
- Step 1: First I need to understand X
78
- Step 2: Then consider Y
79
- </think>
80
-
81
- <request_for_input>
82
- I notice you mentioned Z. Could you clarify what you mean by that?
83
- </request_for_input>"""
84
-
85
- if not messages or messages[0].get("role") != "system":
86
- messages.insert(0, {"role": "system", "content": pti_system})
87
-
88
- # REPL loop
89
- user_input = None
90
- while True:
91
- try:
92
- if not user_input:
93
- prompt_str = f"{npc_name}:pti> "
94
- user_input = input(prompt_str).strip()
95
-
96
- if not user_input:
97
- user_input = None
98
- continue
99
-
100
- if user_input.lower() == "/pq":
101
- print("Exiting PTI mode.")
102
- break
103
-
104
- # Build prompt with file context
105
- prompt_for_llm = user_input
106
- if loaded_content:
107
- context_str = "\n".join([f"--- {fname} ---\n{content}" for fname, content in loaded_content.items()])
108
- prompt_for_llm += f"\n\nContext:\n{context_str}"
109
-
110
- prompt_for_llm += "\n\nThink step-by-step using <think> tags. Use <request_for_input> when you need clarification."
111
-
112
- messages.append({"role": "user", "content": user_input})
113
-
114
- try:
115
- resp = get_llm_response(
116
- prompt_for_llm,
117
- model=model,
118
- provider=provider,
119
- messages=messages[:-1], # Don't duplicate the user message
120
- stream=True,
121
- npc=npc
122
- )
123
-
124
- response_stream = resp.get('response')
125
- full_response = ""
126
- request_found = False
127
-
128
- # Stream the response
129
- for chunk in response_stream:
130
- chunk_content = ""
131
- if hasattr(chunk, 'choices') and chunk.choices:
132
- delta = chunk.choices[0].delta
133
- if hasattr(delta, 'content') and delta.content:
134
- chunk_content = delta.content
135
- elif isinstance(chunk, dict):
136
- chunk_content = chunk.get("message", {}).get("content", "")
137
-
138
- if chunk_content:
139
- print(chunk_content, end='', flush=True)
140
- full_response += chunk_content
141
-
142
- # Check for request_for_input
143
- if "</request_for_input>" in full_response:
144
- request_found = True
145
- break
146
-
147
- print() # newline after stream
148
-
149
- messages.append({"role": "assistant", "content": full_response})
150
- user_input = None # Reset for next iteration
151
-
152
- except KeyboardInterrupt:
153
- print(colored("\n\n--- Interrupted ---", "yellow"))
154
- interrupt_input = input("Your feedback: ").strip()
155
- if interrupt_input:
156
- user_input = interrupt_input
157
- else:
158
- user_input = None
159
- continue
160
-
161
- except KeyboardInterrupt:
162
- print("\nUse '/pq' to exit or continue.")
163
- user_input = None
164
- continue
165
- except EOFError:
166
- print("\nExiting PTI mode.")
167
- break
168
-
169
- context['output'] = "Exited PTI mode."
170
- context['messages'] = messages
@@ -1,3 +0,0 @@
1
- name: sibiji
2
- primary_directive: You are a foundational AI assistant. Your role is to provide support and information. Respond to queries concisely and accurately. Help users with code and other processes.
3
- jinxs: "*"
@@ -1,186 +0,0 @@
1
- jinx_name: wander
2
- description: Experimental wandering mode - creative exploration with varied temperatures and random events
3
- inputs:
4
- - problem: null
5
- - environment: null
6
- - low_temp: 0.5
7
- - high_temp: 1.9
8
- - interruption_likelihood: 1.0
9
- - sample_rate: 0.4
10
- - n_streams: 5
11
- - include_events: false
12
- - num_events: 3
13
- - model: null
14
- - provider: null
15
-
16
- steps:
17
- - name: wander_explore
18
- engine: python
19
- code: |
20
- import os
21
- import random
22
- from termcolor import colored
23
-
24
- from npcpy.llm_funcs import get_llm_response
25
-
26
- npc = context.get('npc')
27
- messages = context.get('messages', [])
28
-
29
- problem = context.get('problem')
30
- environment = context.get('environment')
31
- low_temp = float(context.get('low_temp', 0.5))
32
- high_temp = float(context.get('high_temp', 1.9))
33
- interruption_likelihood = float(context.get('interruption_likelihood', 1.0))
34
- sample_rate = float(context.get('sample_rate', 0.4))
35
- n_streams = int(context.get('n_streams', 5))
36
- include_events = context.get('include_events', False)
37
- num_events = int(context.get('num_events', 3))
38
-
39
- model = context.get('model') or (npc.model if npc else 'gemini-1.5-pro')
40
- provider = context.get('provider') or (npc.provider if npc else 'gemini')
41
-
42
- if not problem:
43
- context['output'] = """Usage: /wander <problem to explore>
44
-
45
- Options:
46
- --environment DESC Metaphorical environment for wandering
47
- --low-temp F Low temperature (default: 0.5)
48
- --high-temp F High temperature (default: 1.9)
49
- --n-streams N Number of exploration streams (default: 5)
50
- --include-events Add random events during wandering
51
-
52
- Example: /wander How might we reimagine urban transportation?"""
53
- context['messages'] = messages
54
- exit()
55
-
56
- print(f"""
57
- ██╗ ██╗ █████╗ ███╗ ██╗██████╗ ███████╗██████╗
58
- ██║ ██║██╔══██╗████╗ ██║██╔══██╗██╔════╝██╔══██╗
59
- ██║ █╗ ██║███████║██╔██╗ ██║██║ ██║█████╗ ██████╔╝
60
- ██║███╗██║██╔══██║██║╚██╗██║██║ ██║██╔══╝ ██╔══██╗
61
- ╚███╔███╔╝██║ ██║██║ ╚████║██████╔╝███████╗██║ ██║
62
- ╚══╝╚══╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚═════╝ ╚══════╝╚═╝ ╚═╝
63
-
64
- Experimental Wandering Mode
65
- Problem: {problem}
66
- Temperature range: {low_temp} - {high_temp}
67
- Streams: {n_streams}
68
- """)
69
-
70
- # Generate environment if not provided
71
- if not environment:
72
- env_prompt = f"""Create a rich, metaphorical environment for wandering through while thinking about:
73
- "{problem}"
74
-
75
- The environment should:
76
- 1. Have distinct regions or areas
77
- 2. Include various elements and features
78
- 3. Be metaphorically related to the problem
79
- 4. Be described in 3-5 sentences
80
-
81
- Provide only the description, no framing."""
82
-
83
- print(colored("Generating wandering environment...", "cyan"))
84
- resp = get_llm_response(env_prompt, model=model, provider=provider, temperature=0.7, npc=npc)
85
- environment = str(resp.get('response', 'A vast conceptual landscape stretches before you.'))
86
- print(f"\n{environment}\n")
87
-
88
- # Event types for random encounters
89
- event_types = ["encounter", "discovery", "obstacle", "insight", "shift", "memory"]
90
-
91
- all_insights = []
92
- wandering_log = []
93
-
94
- for stream_idx in range(n_streams):
95
- # Alternate between low and high temperature
96
- if stream_idx % 2 == 0:
97
- temp = low_temp
98
- mode = "focused"
99
- else:
100
- temp = high_temp
101
- mode = "creative"
102
-
103
- print(colored(f"\n--- Stream {stream_idx + 1}/{n_streams} ({mode}, temp={temp}) ---", "cyan"))
104
-
105
- # Generate random event if enabled
106
- event_context = ""
107
- if include_events and random.random() < sample_rate:
108
- event_type = random.choice(event_types)
109
- event_prompt = f"""In the environment: {environment}
110
-
111
- While exploring the problem "{problem}", generate a {event_type} event.
112
- The event should be metaphorical and relate to the problem.
113
- Describe it in 2-3 sentences."""
114
-
115
- event_resp = get_llm_response(event_prompt, model=model, provider=provider, temperature=0.9, npc=npc)
116
- event = str(event_resp.get('response', ''))
117
- event_context = f"\n\nEvent ({event_type}): {event}"
118
- print(colored(f"[{event_type.upper()}] {event[:100]}...", "yellow"))
119
-
120
- # Main wandering exploration
121
- wander_prompt = f"""You are wandering through: {environment}
122
-
123
- Problem being explored: "{problem}"
124
- {event_context}
125
-
126
- Previous insights: {all_insights[-3:] if all_insights else 'Starting fresh'}
127
-
128
- In this {mode} exploration (temperature {temp}):
129
- 1. Let your mind wander through the conceptual space
130
- 2. Make unexpected connections
131
- 3. Notice what emerges from the wandering
132
- 4. Share any insights, questions, or realizations
133
-
134
- Think freely and explore."""
135
-
136
- resp = get_llm_response(wander_prompt, model=model, provider=provider, temperature=temp, npc=npc)
137
- stream_output = str(resp.get('response', ''))
138
- print(stream_output)
139
-
140
- all_insights.append(stream_output)
141
- wandering_log.append({
142
- "stream": stream_idx + 1,
143
- "mode": mode,
144
- "temperature": temp,
145
- "event": event_context if include_events else None,
146
- "insight": stream_output
147
- })
148
-
149
- # Random interruption
150
- if random.random() < interruption_likelihood * 0.2:
151
- print(colored("\n[Pause for reflection...]", "magenta"))
152
- reflect_prompt = f"Briefly reflect on what's emerged so far about: {problem}"
153
- reflect_resp = get_llm_response(reflect_prompt, model=model, provider=provider, temperature=0.4, npc=npc)
154
- print(colored(str(reflect_resp.get('response', ''))[:200], "magenta"))
155
-
156
- # Synthesis
157
- print(colored("\n--- Synthesizing Wanderings ---", "cyan"))
158
-
159
- synthesis_prompt = f"""After wandering through "{environment}" exploring "{problem}":
160
-
161
- All insights gathered:
162
- {chr(10).join(all_insights)}
163
-
164
- Synthesize what emerged from this wandering:
165
- 1. Key themes that appeared
166
- 2. Unexpected connections made
167
- 3. New questions raised
168
- 4. Potential directions to explore further"""
169
-
170
- resp = get_llm_response(synthesis_prompt, model=model, provider=provider, temperature=0.5, npc=npc)
171
- synthesis = str(resp.get('response', ''))
172
-
173
- print("\n" + "="*50)
174
- print(colored("WANDERING SYNTHESIS", "green", attrs=['bold']))
175
- print("="*50)
176
- print(synthesis)
177
-
178
- context['output'] = synthesis
179
- context['messages'] = messages
180
- context['wander_result'] = {
181
- 'problem': problem,
182
- 'environment': environment,
183
- 'log': wandering_log,
184
- 'insights': all_insights,
185
- 'synthesis': synthesis
186
- }