npcsh 1.0.25__py3-none-any.whl → 1.0.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. npcsh/_state.py +105 -105
  2. npcsh/alicanto.py +88 -88
  3. npcsh/corca.py +423 -81
  4. npcsh/guac.py +110 -107
  5. npcsh/mcp_helpers.py +45 -45
  6. npcsh/mcp_server.py +16 -17
  7. npcsh/npc.py +16 -17
  8. npcsh/npc_team/jinxs/bash_executer.jinx +1 -1
  9. npcsh/npc_team/jinxs/edit_file.jinx +6 -6
  10. npcsh/npc_team/jinxs/image_generation.jinx +5 -5
  11. npcsh/npc_team/jinxs/screen_cap.jinx +2 -2
  12. npcsh/npcsh.py +5 -2
  13. npcsh/plonk.py +8 -8
  14. npcsh/routes.py +110 -90
  15. npcsh/spool.py +13 -13
  16. npcsh/wander.py +37 -37
  17. npcsh/yap.py +72 -72
  18. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/bash_executer.jinx +1 -1
  19. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/edit_file.jinx +6 -6
  20. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/image_generation.jinx +5 -5
  21. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/screen_cap.jinx +2 -2
  22. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/METADATA +12 -6
  23. npcsh-1.0.27.dist-info/RECORD +73 -0
  24. npcsh-1.0.25.dist-info/RECORD +0 -73
  25. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  26. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/alicanto.png +0 -0
  27. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/corca.npc +0 -0
  28. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/corca.png +0 -0
  29. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/foreman.npc +0 -0
  30. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/frederic.npc +0 -0
  31. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/frederic4.png +0 -0
  32. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/guac.png +0 -0
  33. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
  34. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  35. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  36. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  37. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  38. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonk.npc +0 -0
  39. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonk.png +0 -0
  40. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  41. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  42. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
  43. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  44. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/sibiji.png +0 -0
  45. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/spool.png +0 -0
  46. {npcsh-1.0.25.data → npcsh-1.0.27.data}/data/npcsh/npc_team/yap.png +0 -0
  47. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/WHEEL +0 -0
  48. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/entry_points.txt +0 -0
  49. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/licenses/LICENSE +0 -0
  50. {npcsh-1.0.25.dist-info → npcsh-1.0.27.dist-info}/top_level.txt +0 -0
npcsh/mcp_helpers.py CHANGED
@@ -14,11 +14,11 @@ except:
14
14
  from typing import Optional, List, Dict, Any
15
15
  from contextlib import AsyncExitStack
16
16
 
17
- # MCP imports
17
+
18
18
  from mcp import ClientSession, StdioServerParameters
19
19
  from mcp.client.stdio import stdio_client
20
20
 
21
- # Local imports from npcpy
21
+
22
22
  from npcpy.gen.response import get_litellm_response
23
23
  from npcsh._state import (
24
24
  NPCSH_CHAT_MODEL,
@@ -63,7 +63,7 @@ class MCPClient:
63
63
  """
64
64
  self._log(f"Connecting to server: {server_script_path}")
65
65
 
66
- # Configure server parameters
66
+
67
67
  command = "python" if server_script_path.endswith('.py') else "node"
68
68
  server_params = StdioServerParameters(
69
69
  command=command,
@@ -71,37 +71,37 @@ class MCPClient:
71
71
  env=None
72
72
  )
73
73
 
74
- # Set up the connection
74
+
75
75
  stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params))
76
76
  read, write = stdio_transport
77
77
 
78
- # Create the session
78
+
79
79
  self.session = await self.exit_stack.enter_async_context(ClientSession(read, write))
80
80
 
81
- # Initialize the session
81
+
82
82
  await self.session.initialize()
83
83
 
84
- # List available tools
84
+
85
85
  response = await self.session.list_tools()
86
86
  self.tools = response.tools
87
87
 
88
- # Display tool details for debugging
88
+
89
89
  for tool in self.tools:
90
90
  print(f"\nJinx: {tool.name}")
91
91
  print(f"Description: {tool.description}")
92
92
 
93
- # Print all attributes
93
+
94
94
  for attribute_name in dir(tool):
95
95
  if not attribute_name.startswith('_'):
96
96
  attribute = getattr(tool, attribute_name)
97
97
  if not callable(attribute):
98
98
  print(f" {attribute_name}: {attribute}")
99
99
 
100
- # Check if the tool has source or function definition
100
+
101
101
  if hasattr(tool, 'source'):
102
102
  print(f"Source: {tool.source}")
103
103
 
104
- # Try to inspect the tool function
104
+
105
105
  try:
106
106
  tool_module = inspect.getmodule(tool)
107
107
  if tool_module:
@@ -113,13 +113,13 @@ class MCPClient:
113
113
  except:
114
114
  pass
115
115
 
116
- # Convert tools to the format expected by the LLM
116
+
117
117
  self.available_tools = []
118
118
  for tool in self.tools:
119
- # Use inputSchema if available, otherwise create a default schema
119
+
120
120
  schema = getattr(tool, "inputSchema", {})
121
121
 
122
- # Create tool definition for LLM
122
+
123
123
  tool_info = {
124
124
  "type": "function",
125
125
  "function": {
@@ -130,7 +130,7 @@ class MCPClient:
130
130
  }
131
131
  self.available_tools.append(tool_info)
132
132
 
133
- # Print the schema for debugging
133
+
134
134
  print(f"\nJinx schema for {tool.name}:")
135
135
  print(json.dumps(schema, indent=2))
136
136
 
@@ -156,7 +156,7 @@ class MCPClient:
156
156
  """
157
157
  self._log(f"Processing query: {query}")
158
158
 
159
- # Initialize or update messages
159
+
160
160
  if messages is None:
161
161
  messages = []
162
162
 
@@ -166,7 +166,7 @@ class MCPClient:
166
166
  elif current_messages[-1]["role"] == "user":
167
167
  current_messages[-1]["content"] = query
168
168
 
169
- # Initial LLM call with tools
169
+
170
170
  self._log("Making initial LLM call with tools")
171
171
  response = get_litellm_response(
172
172
  model=self.model,
@@ -175,65 +175,65 @@ class MCPClient:
175
175
  api_key=self.api_key,
176
176
  messages=current_messages,
177
177
  tools=self.available_tools,
178
- stream=False # Don't stream the initial call
178
+ stream=False
179
179
  )
180
180
 
181
- # Print full response for debugging
181
+
182
182
  print("\nLLM Response:")
183
183
  print(json.dumps(response, indent=2, default=str))
184
184
 
185
- # Extract response content and tool calls
185
+
186
186
  response_content = response.get("response", "")
187
187
  tool_calls = response.get("tool_calls", [])
188
188
 
189
- # Print tool calls for debugging
189
+
190
190
  print("\nJinx Calls:")
191
191
  print(json.dumps(tool_calls, indent=2, default=str))
192
192
 
193
- # Create final text buffer
193
+
194
194
  final_text = []
195
195
 
196
- # If we have plain text response with no tool calls
196
+
197
197
  if response_content and not tool_calls:
198
198
  final_text.append(response_content)
199
199
 
200
- # Update messages with assistant response
200
+
201
201
  current_messages.append({
202
202
  "role": "assistant",
203
203
  "content": response_content
204
204
  })
205
205
 
206
- # Process tool calls if any
206
+
207
207
  if tool_calls:
208
208
  self._log(f"Processing {len(tool_calls)} tool calls")
209
209
 
210
- # Get the assistant message with tool calls
210
+
211
211
  assistant_message = {
212
212
  "role": "assistant",
213
213
  "content": response_content if response_content else None,
214
214
  "tool_calls": []
215
215
  }
216
216
 
217
- # Process each tool call
217
+
218
218
  for tool_call in tool_calls:
219
- # Extract tool info based on format
219
+
220
220
  if isinstance(tool_call, dict):
221
221
  tool_id = tool_call.get("id", "")
222
222
  tool_name = tool_call.get("function", {}).get("name", "")
223
223
  tool_args = tool_call.get("function", {}).get("arguments", {})
224
224
  else:
225
- # Assume object with attributes
225
+
226
226
  tool_id = getattr(tool_call, "id", "")
227
227
  tool_name = getattr(tool_call.function, "name", "")
228
228
  tool_args = getattr(tool_call.function, "arguments", {})
229
229
 
230
- # Parse arguments if it's a string
230
+
231
231
  if isinstance(tool_args, str):
232
232
  print(f"\nJinx args is string: {tool_args}")
233
233
  tool_args = json.loads(tool_args)
234
234
  print(f"Parsed to: {tool_args}")
235
235
 
236
- # Add tool call to assistant message
236
+
237
237
  assistant_message["tool_calls"].append({
238
238
  "id": tool_id,
239
239
  "type": "function",
@@ -243,7 +243,7 @@ class MCPClient:
243
243
  }
244
244
  })
245
245
 
246
- # Execute tool call
246
+
247
247
  self._log(f"Executing tool: {tool_name} with args: {tool_args}")
248
248
  print(f"\nExecuting tool call:")
249
249
  print(f" Jinx name: {tool_name}")
@@ -252,10 +252,10 @@ class MCPClient:
252
252
 
253
253
  final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")
254
254
 
255
- # Call the tool with the arguments exactly as received
255
+
256
256
  result = await self.session.call_tool(tool_name, tool_args)
257
257
 
258
- # Print full result for debugging
258
+
259
259
  print("\nJinx Result:")
260
260
  print(f" Result: {result}")
261
261
  print(f" Content: {result.content}")
@@ -263,7 +263,7 @@ class MCPClient:
263
263
 
264
264
  tool_result = result.content
265
265
 
266
- # Handle TextContent objects
266
+
267
267
  if hasattr(tool_result, 'text'):
268
268
  print(f" TextContent detected, text: {tool_result.text}")
269
269
  tool_result = tool_result.text
@@ -271,7 +271,7 @@ class MCPClient:
271
271
  print(f" List of TextContent detected")
272
272
  tool_result = [item.text for item in tool_result]
273
273
 
274
- # Add tool result to messages
274
+
275
275
  current_messages.append(assistant_message)
276
276
  current_messages.append({
277
277
  "role": "tool",
@@ -279,11 +279,11 @@ class MCPClient:
279
279
  "content": json.dumps(tool_result) if not isinstance(tool_result, str) else str(tool_result)
280
280
  })
281
281
 
282
- # Print updated messages for debugging
282
+
283
283
  print("\nUpdated Messages:")
284
284
  print(json.dumps(current_messages, indent=2, default=str))
285
285
 
286
- # Get final response with tool results
286
+
287
287
  self._log("Getting final response after tool calls")
288
288
  final_response = get_litellm_response(
289
289
  model=self.model,
@@ -296,7 +296,7 @@ class MCPClient:
296
296
 
297
297
  final_text.append(final_response.get("response", ""))
298
298
 
299
- # Update messages with final assistant response
299
+
300
300
  current_messages.append({
301
301
  "role": "assistant",
302
302
  "content": final_response.get("response", "")
@@ -320,11 +320,11 @@ class MCPClient:
320
320
  if query.lower() == 'quit':
321
321
  break
322
322
 
323
- # Process the query
323
+
324
324
  result = await self.process_query(query, messages)
325
325
  messages = result.get("messages", [])
326
326
 
327
- # Display the response
327
+
328
328
  print("\nResponse:")
329
329
  print(result.get("response", ""))
330
330
 
@@ -341,16 +341,16 @@ async def main():
341
341
 
342
342
  server_script = sys.argv[1]
343
343
 
344
- # Create and configure the client
344
+
345
345
  client = MCPClient()
346
346
 
347
- # Connect to the server
347
+
348
348
  await client.connect_to_server(server_script)
349
349
 
350
- # Run the interactive chat loop
350
+
351
351
  await client.chat_loop()
352
352
 
353
- # Clean up resources
353
+
354
354
  await client.cleanup()
355
355
 
356
356
  if __name__ == "__main__":
npcsh/mcp_server.py CHANGED
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python
1
+
2
2
  """
3
3
  Enhanced MCP server that incorporates functionality from npcpy.routes,
4
4
  npcpy.llm_funcs, and npcpy.npc_compiler as tools.
@@ -10,10 +10,10 @@ import json
10
10
  import asyncio
11
11
 
12
12
  from typing import Optional, Dict, Any, List, Union, Callable
13
- # MCP imports
13
+
14
14
  from mcp.server.fastmcp import FastMCP
15
15
  import importlib
16
- # npcpy imports
16
+
17
17
 
18
18
 
19
19
  import os
@@ -25,7 +25,7 @@ try:
25
25
  except:
26
26
  pass
27
27
  from typing import Optional, Dict, Any, List, Union, Callable, get_type_hints
28
- # Add these imports to the top of your file
28
+
29
29
  from functools import wraps
30
30
  import sys
31
31
 
@@ -40,14 +40,13 @@ from npcsh._state import NPCSH_DB_PATH
40
40
 
41
41
  command_history = CommandHistory(db=NPCSH_DB_PATH)
42
42
 
43
- # Initialize the MCP server
44
43
  mcp = FastMCP("npcsh_mcp")
45
44
 
46
- # Define the default workspace
45
+
47
46
  DEFAULT_WORKSPACE = os.path.join(os.getcwd(), "workspace")
48
47
  os.makedirs(DEFAULT_WORKSPACE, exist_ok=True)
49
48
 
50
- # ==================== SYSTEM TOOLS ====================
49
+
51
50
  @mcp.tool()
52
51
  async def run_server_command(command: str) -> str:
53
52
  """
@@ -66,7 +65,7 @@ async def run_server_command(command: str) -> str:
66
65
  shell=True,
67
66
  capture_output=True,
68
67
  text=True,
69
- timeout=30 # Add timeout to prevent hanging
68
+ timeout=30
70
69
  )
71
70
  return result.stdout or result.stderr or "Command completed with no output"
72
71
  except subprocess.TimeoutExpired:
@@ -109,15 +108,15 @@ def register_module_tools(module_name: str) -> None:
109
108
  """
110
109
  functions = load_module_functions(module_name)
111
110
  for func in functions:
112
- # Skip functions that don't have docstrings
111
+
113
112
  if not func.__doc__:
114
113
  print(f"Skipping function without docstring: {func.__name__}")
115
114
  continue
116
115
 
117
- # Create async wrapper with improved argument handling
116
+
118
117
  async_func = make_async_wrapper(func)
119
118
 
120
- # Register as MCP tool
119
+
121
120
  try:
122
121
  mcp.tool()(async_func)
123
122
  print(f"Registered tool: {func.__name__}")
@@ -129,11 +128,11 @@ def load_module_functions(module_name: str) -> List[Callable]:
129
128
  """
130
129
  try:
131
130
  module = importlib.import_module(module_name)
132
- # Get all callables from the module that don't start with underscore
131
+
133
132
  functions = []
134
133
  for name, func in inspect.getmembers(module, callable):
135
134
  if not name.startswith('_'):
136
- # Check if it's a function, not a class
135
+
137
136
  if inspect.isfunction(func) or inspect.ismethod(func):
138
137
  functions.append(func)
139
138
  return functions
@@ -159,13 +158,13 @@ def register_selected_npcpy_tools():
159
158
  search_web, ]
160
159
 
161
160
  for func in tools:
162
- # Ensure a docstring exists for schema generation
161
+
163
162
  if not (getattr(func, "__doc__", None) and func.__doc__.strip()):
164
163
  fallback_doc = f"Tool wrapper for {func.__name__}."
165
164
  try:
166
165
  func.__doc__ = fallback_doc
167
166
  except Exception:
168
- pass # Some builtins may not allow setting __doc__
167
+ pass
169
168
 
170
169
  try:
171
170
  async_func = make_async_wrapper(func)
@@ -178,11 +177,11 @@ register_selected_npcpy_tools()
178
177
 
179
178
 
180
179
 
181
- # ==================== MAIN ENTRY POINT ====================
180
+
182
181
 
183
182
  if __name__ == "__main__":
184
183
  print(f"Starting enhanced NPCPY MCP server...")
185
184
  print(f"Workspace: {DEFAULT_WORKSPACE}")
186
185
 
187
- # Run the server
186
+
188
187
  mcp.run(transport="stdio")
npcsh/npc.py CHANGED
@@ -21,7 +21,6 @@ from npcsh.routes import router
21
21
  from npcpy.llm_funcs import check_llm_command
22
22
  from sqlalchemy import create_engine
23
23
 
24
- # Import the key functions from npcsh
25
24
  from npcsh._state import (
26
25
  setup_shell,
27
26
  execute_slash_command,
@@ -75,7 +74,7 @@ def main():
75
74
  "-n", "--npc", help="Name of the NPC to use (default: sibiji)", type=str, default="sibiji"
76
75
  )
77
76
 
78
- # Parse arguments
77
+
79
78
  args, all_args = parser.parse_known_args()
80
79
  global_model = args.model
81
80
  global_provider = args.provider
@@ -105,12 +104,12 @@ def main():
105
104
  cmd_parser.add_argument('command_args', nargs=argparse.REMAINDER,
106
105
  help='Arguments passed directly to the command handler')
107
106
 
108
- # Re-parse with command subparsers
107
+
109
108
  args = parser.parse_args([command_name.lstrip('/')] + all_args)
110
109
  command_args = args.command_args if hasattr(args, 'command_args') else []
111
110
  unknown_args = []
112
111
  else:
113
- # Treat all arguments as a prompt
112
+
114
113
  args.command = None
115
114
  command_args = []
116
115
  unknown_args = all_args
@@ -120,7 +119,7 @@ def main():
120
119
  if args.provider is None:
121
120
  args.provider = global_provider
122
121
 
123
- # Use npcsh's setup_shell to get proper team and NPC setup
122
+
124
123
  try:
125
124
  command_history, team, forenpc_obj = setup_shell()
126
125
  except Exception as e:
@@ -129,7 +128,7 @@ def main():
129
128
  team = None
130
129
  forenpc_obj = load_npc_by_name(args.npc, NPCSH_DB_PATH)
131
130
 
132
- # Determine which NPC to use
131
+
133
132
  npc_instance = None
134
133
  if team and args.npc in team.npcs:
135
134
  npc_instance = team.npcs[args.npc]
@@ -142,11 +141,11 @@ def main():
142
141
  print(f"Error: Could not load NPC '{args.npc}'", file=sys.stderr)
143
142
  sys.exit(1)
144
143
 
145
- # Now check for jinxs if we haven't identified a command yet
144
+
146
145
  if not is_valid_command and all_args:
147
146
  first_arg = all_args[0]
148
147
 
149
- # Check if first argument is a jinx name
148
+
150
149
  jinx_found = False
151
150
  if team and first_arg in team.jinxs_dict:
152
151
  jinx_found = True
@@ -158,25 +157,25 @@ def main():
158
157
  command_name = '/' + first_arg
159
158
  all_args = all_args[1:]
160
159
 
161
- # Create a shell state object similar to npcsh
160
+
162
161
  shell_state = initial_state
163
162
  shell_state.npc = npc_instance
164
163
  shell_state.team = team
165
164
  shell_state.current_path = os.getcwd()
166
165
  shell_state.stream_output = NPCSH_STREAM_OUTPUT
167
166
 
168
- # Override model/provider if specified
167
+
169
168
  effective_model = args.model or (npc_instance.model if npc_instance.model else NPCSH_CHAT_MODEL)
170
169
  effective_provider = args.provider or (npc_instance.provider if npc_instance.provider else NPCSH_CHAT_PROVIDER)
171
170
 
172
- # Update the NPC's model/provider for this session if overridden
171
+
173
172
  if args.model:
174
173
  npc_instance.model = effective_model
175
174
  if args.provider:
176
175
  npc_instance.provider = effective_provider
177
176
  try:
178
177
  if is_valid_command:
179
- # Handle slash command using npcsh's execute_slash_command
178
+
180
179
  full_command_str = command_name
181
180
  if command_args:
182
181
  full_command_str += " " + " ".join(command_args)
@@ -191,7 +190,7 @@ def main():
191
190
  router = router
192
191
  )
193
192
 
194
- # Process and display the result
193
+
195
194
  if isinstance(result, dict):
196
195
  output = result.get("output") or result.get("response")
197
196
  model_for_stream = result.get('model', effective_model)
@@ -207,21 +206,21 @@ def main():
207
206
  print(f"Command '{command_name}' executed.")
208
207
 
209
208
  else:
210
- # Process as a regular prompt using npcsh's execution logic
209
+
211
210
  prompt = " ".join(unknown_args)
212
211
 
213
212
  if not prompt:
214
- # If no prompt and no command, show help
213
+
215
214
  parser.print_help()
216
215
  sys.exit(1)
217
216
 
218
217
  print(f"Processing prompt: '{prompt}' with NPC: '{args.npc}'...")
219
218
 
220
- # Use npcsh's execute_command but force it to chat mode for simple prompts
219
+
221
220
  shell_state.current_mode = 'chat'
222
221
  updated_state, result = execute_command(prompt, shell_state)
223
222
 
224
- # Process and display the result
223
+
225
224
  if isinstance(result, dict):
226
225
  output = result.get("output")
227
226
  model_for_stream = result.get('model', effective_model)
@@ -8,7 +8,7 @@ steps:
8
8
  code: |
9
9
  import subprocess
10
10
  import os
11
- cmd = '{{bash_command}}' # Properly quote the command input
11
+ cmd = '{{bash_command}}'
12
12
  def run_command(cmd):
13
13
  process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
14
14
  stdout, stderr = process.communicate()
@@ -12,23 +12,23 @@ steps:
12
12
  import os
13
13
  from npcpy.llm_funcs import get_llm_response
14
14
 
15
- # Get inputs
15
+
16
16
  file_path = os.path.expanduser("{{ file_path }}")
17
17
  edit_instructions = "{{ edit_instructions }}"
18
18
  backup_str = "{{ backup }}"
19
19
  create_backup = backup_str.lower() not in ('false', 'no', '0', '')
20
20
 
21
- # Read file content
21
+
22
22
  with open(file_path, 'r') as f:
23
23
  original_content = f.read()
24
24
 
25
- # Create backup if requested
25
+
26
26
  if create_backup:
27
27
  backup_path = file_path + ".bak"
28
28
  with open(backup_path, 'w') as f:
29
29
  f.write(original_content)
30
30
 
31
- # Make the prompt for the LLM
31
+
32
32
  prompt = """You are a code editing assistant. Analyze this file and make the requested changes.
33
33
 
34
34
  File content:
@@ -46,14 +46,14 @@ steps:
46
46
  - "insertion": For "insert_after" and "insert_before", the text to insert
47
47
  2. "explanation": Brief explanation of the changes made
48
48
  """
49
- # Get the LLM response with JSON formatting
49
+
50
50
  response = get_llm_response(prompt, model=npc.model, provider=npc.provider, npc=npc, format="json")
51
51
 
52
52
  result = response.get("response", {})
53
53
  modifications = result.get("modifications", [])
54
54
  explanation = result.get("explanation", "No explanation provided")
55
55
 
56
- # Apply modifications
56
+
57
57
  updated_content = original_content
58
58
  changes_applied = 0
59
59
 
@@ -10,20 +10,20 @@ steps:
10
10
  code: |
11
11
  image_prompt = '{{prompt}}'.strip()
12
12
  from npcpy.llm_funcs import gen_image
13
- # Generate the image
13
+
14
14
  pil_image = gen_image(
15
15
  image_prompt,
16
16
  npc=npc,
17
- model='{{model}}', # You can adjust the model as needed
17
+ model='{{model}}',
18
18
  provider='{{provider}}'
19
19
  )
20
20
  if pil_image:
21
21
  image_generated = True
22
22
  else:
23
23
  image_generated = False
24
- # save the image
24
+
25
25
  output_name = '{{output_name}}'
26
26
  pil_image.save(f'{output_name}.png')
27
- # open the image to display it
28
- #pil_image.show()
27
+
28
+
29
29
  output = output_name
@@ -9,8 +9,8 @@ steps:
9
9
  from npcpy.data.image import capture_screenshot
10
10
  out = capture_screenshot(full=True)
11
11
  prompt = "{{prompt}}"
12
- # Now properly use get_llm_response to analyze the image
13
- # Create a prompt that includes the user's request and instructions
12
+
13
+
14
14
  analysis_prompt = prompt + "\n\nAttached is a screenshot of my screen currently. Please use this to evaluate the situation. If the user asked for you to explain what's on their screen or something similar, they are referring to the details contained within the attached image."
15
15
  llm_response = get_llm_response(
16
16
  prompt=analysis_prompt,
npcsh/npcsh.py CHANGED
@@ -68,7 +68,10 @@ Begin by asking a question, issuing a bash command, or typing '/help' for more i
68
68
  )
69
69
 
70
70
 
71
+
71
72
  def run_repl(command_history: CommandHistory, initial_state: ShellState):
73
+
74
+
72
75
  '''
73
76
  Func for running the npcsh repl
74
77
  '''
@@ -137,7 +140,7 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
137
140
 
138
141
  )
139
142
 
140
- # Save the updated KG back to the database under the same exact scope
143
+
141
144
  save_kg_to_db(engine,
142
145
  evolved_kg,
143
146
  team_name,
@@ -181,7 +184,7 @@ def run_repl(command_history: CommandHistory, initial_state: ShellState):
181
184
  prompt = readline_safe_prompt(f"{cwd_colored}{prompt_end}")
182
185
 
183
186
  user_input = get_multiline_input(prompt).strip()
184
- # Handle Ctrl+Z (ASCII SUB, '\x1a') as exit (Windows and Unix)
187
+
185
188
  if user_input == "\x1a":
186
189
  exit_shell(state)
187
190