npcsh 1.0.20__py3-none-any.whl → 1.0.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. npcsh/_state.py +6 -5
  2. npcsh/corca.py +146 -129
  3. npcsh/guac.py +394 -119
  4. npcsh/npc_team/alicanto.npc +2 -0
  5. npcsh/npc_team/alicanto.png +0 -0
  6. npcsh/npc_team/corca.npc +13 -0
  7. npcsh/npc_team/corca.png +0 -0
  8. npcsh/npc_team/foreman.npc +7 -0
  9. npcsh/npc_team/frederic.npc +6 -0
  10. npcsh/npc_team/frederic4.png +0 -0
  11. npcsh/npc_team/guac.png +0 -0
  12. npcsh/npc_team/jinxs/bash_executer.jinx +20 -0
  13. npcsh/npc_team/jinxs/edit_file.jinx +94 -0
  14. npcsh/npc_team/jinxs/image_generation.jinx +29 -0
  15. npcsh/npc_team/jinxs/internet_search.jinx +31 -0
  16. npcsh/npc_team/jinxs/python_executor.jinx +11 -0
  17. npcsh/npc_team/jinxs/screen_cap.jinx +25 -0
  18. npcsh/npc_team/kadiefa.npc +3 -0
  19. npcsh/npc_team/kadiefa.png +0 -0
  20. npcsh/npc_team/npcsh.ctx +18 -0
  21. npcsh/npc_team/npcsh_sibiji.png +0 -0
  22. npcsh/npc_team/plonk.npc +2 -0
  23. npcsh/npc_team/plonk.png +0 -0
  24. npcsh/npc_team/plonkjr.npc +2 -0
  25. npcsh/npc_team/plonkjr.png +0 -0
  26. npcsh/npc_team/sibiji.npc +3 -0
  27. npcsh/npc_team/sibiji.png +0 -0
  28. npcsh/npc_team/spool.png +0 -0
  29. npcsh/npc_team/yap.png +0 -0
  30. npcsh-1.0.22.data/data/npcsh/npc_team/alicanto.npc +2 -0
  31. npcsh-1.0.22.data/data/npcsh/npc_team/alicanto.png +0 -0
  32. npcsh-1.0.22.data/data/npcsh/npc_team/bash_executer.jinx +20 -0
  33. npcsh-1.0.22.data/data/npcsh/npc_team/corca.npc +13 -0
  34. npcsh-1.0.22.data/data/npcsh/npc_team/corca.png +0 -0
  35. npcsh-1.0.22.data/data/npcsh/npc_team/edit_file.jinx +94 -0
  36. npcsh-1.0.22.data/data/npcsh/npc_team/foreman.npc +7 -0
  37. npcsh-1.0.22.data/data/npcsh/npc_team/frederic.npc +6 -0
  38. npcsh-1.0.22.data/data/npcsh/npc_team/frederic4.png +0 -0
  39. npcsh-1.0.22.data/data/npcsh/npc_team/guac.png +0 -0
  40. npcsh-1.0.22.data/data/npcsh/npc_team/image_generation.jinx +29 -0
  41. npcsh-1.0.22.data/data/npcsh/npc_team/internet_search.jinx +31 -0
  42. npcsh-1.0.22.data/data/npcsh/npc_team/kadiefa.npc +3 -0
  43. npcsh-1.0.22.data/data/npcsh/npc_team/kadiefa.png +0 -0
  44. npcsh-1.0.22.data/data/npcsh/npc_team/npcsh.ctx +18 -0
  45. npcsh-1.0.22.data/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  46. npcsh-1.0.22.data/data/npcsh/npc_team/plonk.npc +2 -0
  47. npcsh-1.0.22.data/data/npcsh/npc_team/plonk.png +0 -0
  48. npcsh-1.0.22.data/data/npcsh/npc_team/plonkjr.npc +2 -0
  49. npcsh-1.0.22.data/data/npcsh/npc_team/plonkjr.png +0 -0
  50. npcsh-1.0.22.data/data/npcsh/npc_team/python_executor.jinx +11 -0
  51. npcsh-1.0.22.data/data/npcsh/npc_team/screen_cap.jinx +25 -0
  52. npcsh-1.0.22.data/data/npcsh/npc_team/sibiji.npc +3 -0
  53. npcsh-1.0.22.data/data/npcsh/npc_team/sibiji.png +0 -0
  54. npcsh-1.0.22.data/data/npcsh/npc_team/spool.png +0 -0
  55. npcsh-1.0.22.data/data/npcsh/npc_team/yap.png +0 -0
  56. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/METADATA +8 -3
  57. npcsh-1.0.22.dist-info/RECORD +73 -0
  58. npcsh-1.0.20.dist-info/RECORD +0 -21
  59. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/WHEEL +0 -0
  60. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/entry_points.txt +0 -0
  61. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/licenses/LICENSE +0 -0
  62. {npcsh-1.0.20.dist-info → npcsh-1.0.22.dist-info}/top_level.txt +0 -0
npcsh/_state.py CHANGED
@@ -2198,10 +2198,10 @@ def execute_command(
2198
2198
  active_model = npc_model or state.chat_model
2199
2199
  active_provider = npc_provider or state.chat_provider
2200
2200
  if state.current_mode == 'agent':
2201
- print('# of parsed commands: ', len(commands))
2202
- print('Commands:' '\n'.join(commands))
2201
+ #print('# of parsed commands: ', len(commands))
2202
+ #print('Commands:' '\n'.join(commands))
2203
2203
  for i, cmd_segment in enumerate(commands):
2204
- render_markdown(f'- executing command {i+1}/{len(commands)}')
2204
+ render_markdown(f'- Executing command {i+1}/{len(commands)}')
2205
2205
  is_last_command = (i == len(commands) - 1)
2206
2206
  stream_this_segment = state.stream_output and not is_last_command
2207
2207
  try:
@@ -2353,8 +2353,9 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
2353
2353
  ctx_path = os.path.join(team_dir, "team.ctx")
2354
2354
  folder_context = input("Enter a short description for this project/team (optional): ").strip()
2355
2355
  team_ctx_data = {
2356
- "forenpc": default_forenpc_name, "model": forenpc_model,
2357
- "provider": forenpc_provider, "api_key": None, "api_url": None,
2356
+ "forenpc": default_forenpc_name,
2357
+ "model": forenpc_model,
2358
+ "provider": forenpc_provider,
2358
2359
  "context": folder_context if folder_context else None
2359
2360
  }
2360
2361
  use_jinxs = input("Use global jinxs folder (g) or copy to this project (c)? [g/c, default: g]: ").strip().lower()
npcsh/corca.py CHANGED
@@ -163,97 +163,131 @@ class MCPClientNPC:
163
163
  pass
164
164
  self.session = None
165
165
 
166
- def execute_command_corca(command: str, state: ShellState, command_history) -> Tuple[ShellState, Any]:
167
- mcp_tools = []
168
-
169
- if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
170
- mcp_tools = state.mcp_client.available_tools_llm
171
- else:
172
- cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
173
-
174
- active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
175
-
176
- # Get the initial response with tools available but don't auto-process
177
- response_dict = get_llm_response(
178
- prompt=command,
179
- model=active_npc.model or state.chat_model,
180
- provider=active_npc.provider or state.chat_provider,
181
- npc=state.npc,
182
- messages=state.messages,
183
- tools=mcp_tools,
184
- auto_process_tool_calls=False,
185
- stream=state.stream_output
186
- )
187
-
188
- # Process the streaming response to extract tool calls
189
- stream_response = response_dict.get('response')
190
- messages = response_dict.get('messages', state.messages)
191
-
192
- # Collect the streamed content and extract tool calls
166
+
167
+ def process_mcp_stream(stream_response, active_npc):
168
+ """Process streaming response and extract content + tool calls for both Ollama and OpenAI providers"""
193
169
  collected_content = ""
194
170
  tool_calls = []
195
- current_tool_call = None
196
171
 
197
- print("DEBUG: Processing stream response...")
172
+ interrupted = False
198
173
 
199
- if hasattr(stream_response, '__iter__'):
200
- # Process the stream to extract content and tool calls
201
- for chunk in stream_response:
202
- print(f"DEBUG: Chunk type: {type(chunk)}")
203
-
204
- if hasattr(chunk, 'choices') and chunk.choices:
205
- delta = chunk.choices[0].delta
206
-
207
- if hasattr(delta, 'content') and delta.content:
208
- collected_content += delta.content
209
- print(delta.content, end='', flush=True)
210
-
211
- if hasattr(delta, 'tool_calls') and delta.tool_calls:
212
- for tool_call_delta in delta.tool_calls:
213
- print(f"DEBUG: Tool call delta: {tool_call_delta}")
174
+ # Save cursor position at the start
175
+ sys.stdout.write('\033[s') # Save cursor position
176
+ sys.stdout.flush()
177
+ try:
178
+ for chunk in stream_response:
179
+ if hasattr(active_npc, 'provider') and active_npc.provider == "ollama" and 'gpt-oss' not in active_npc.model:
180
+ if hasattr(chunk, 'message') and hasattr(chunk.message, 'tool_calls') and chunk.message.tool_calls:
181
+ for tool_call in chunk.message.tool_calls:
182
+ tool_call_data = {
183
+ 'id': getattr(tool_call, 'id', ''),
184
+ 'type': 'function',
185
+ 'function': {
186
+ 'name': getattr(tool_call.function, 'name', '') if hasattr(tool_call, 'function') else '',
187
+ 'arguments': getattr(tool_call.function, 'arguments', {}) if hasattr(tool_call, 'function') else {}
188
+ }
189
+ }
214
190
 
215
- if hasattr(tool_call_delta, 'index'):
216
- idx = tool_call_delta.index
217
-
218
- # Initialize tool call if needed
219
- while len(tool_calls) <= idx:
220
- tool_calls.append({
221
- 'id': '',
222
- 'type': 'function',
223
- 'function': {
224
- 'name': '',
225
- 'arguments': ''
226
- }
227
- })
228
-
229
- # Update tool call data
230
- if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
231
- tool_calls[idx]['id'] = tool_call_delta.id
232
-
233
- if hasattr(tool_call_delta, 'function'):
234
- if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
235
- tool_calls[idx]['function']['name'] = tool_call_delta.function.name
191
+ if isinstance(tool_call_data['function']['arguments'], str):
192
+ try:
193
+ tool_call_data['function']['arguments'] = json.loads(tool_call_data['function']['arguments'])
194
+ except json.JSONDecodeError:
195
+ tool_call_data['function']['arguments'] = {'raw': tool_call_data['function']['arguments']}
196
+
197
+ tool_calls.append(tool_call_data)
198
+
199
+ if hasattr(chunk, 'message') and hasattr(chunk.message, 'content') and chunk.message.content:
200
+ collected_content += chunk.message.content
201
+ print(chunk.message.content, end='', flush=True)
202
+
203
+ # Handle OpenAI-style responses (including gpt-oss)
204
+ else:
205
+ if hasattr(chunk, 'choices') and chunk.choices:
206
+ delta = chunk.choices[0].delta
207
+
208
+ if hasattr(delta, 'content') and delta.content:
209
+ collected_content += delta.content
210
+ print(delta.content, end='', flush=True)
211
+
212
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
213
+ for tool_call_delta in delta.tool_calls:
214
+ if hasattr(tool_call_delta, 'index'):
215
+ idx = tool_call_delta.index
236
216
 
237
- if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
238
- tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
239
-
240
- print(f"\nDEBUG: Final collected_content: {collected_content}")
241
- print(f"DEBUG: Final tool_calls: {tool_calls}")
242
-
243
- # Update messages with the assistant response
244
- state.messages = messages
245
- if collected_content or tool_calls:
246
- assistant_message = {"role": "assistant", "content": collected_content}
247
- if tool_calls:
248
- assistant_message["tool_calls"] = tool_calls
249
- state.messages.append(assistant_message)
217
+ while len(tool_calls) <= idx:
218
+ tool_calls.append({
219
+ 'id': '',
220
+ 'type': 'function',
221
+ 'function': {'name': '', 'arguments': ''}
222
+ })
223
+
224
+ if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
225
+ tool_calls[idx]['id'] = tool_call_delta.id
226
+
227
+ if hasattr(tool_call_delta, 'function'):
228
+ if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
229
+ tool_calls[idx]['function']['name'] = tool_call_delta.function.name
230
+
231
+ if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
232
+ tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
233
+ except KeyboardInterrupt:
234
+ interrupted = True
235
+ print('\n⚠️ Stream interrupted by user')
236
+ if interrupted:
237
+ str_output += "\n\n[⚠️ Response interrupted by user]"
238
+ # Always restore cursor position and clear everything after it
239
+ sys.stdout.write('\033[u') # Restore cursor position
240
+ sys.stdout.write('\033[J') # Clear from cursor down
241
+ sys.stdout.flush()
250
242
 
251
- return state, {
252
- "output": collected_content,
253
- "tool_calls": tool_calls,
254
- "messages": state.messages
255
- }
256
-
243
+ # Now render the markdown at the restored position
244
+ render_markdown(collected_content)
245
+ print('\n')
246
+ return collected_content, tool_calls
247
+
248
+ def execute_command_corca(command: str, state: ShellState, command_history) -> Tuple[ShellState, Any]:
249
+ mcp_tools = []
250
+
251
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
252
+ mcp_tools = state.mcp_client.available_tools_llm
253
+ else:
254
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
255
+
256
+ active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
257
+
258
+ response_dict = get_llm_response(
259
+ prompt=command,
260
+ model=active_npc.model or state.chat_model,
261
+ provider=active_npc.provider or state.chat_provider,
262
+ npc=state.npc,
263
+ messages=state.messages,
264
+ tools=mcp_tools,
265
+ auto_process_tool_calls=False,
266
+ stream=state.stream_output
267
+ )
268
+
269
+ stream_response = response_dict.get('response')
270
+ messages = response_dict.get('messages', state.messages)
271
+
272
+ print("DEBUG: Processing stream response...")
273
+ collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
274
+
275
+ print(f"\nDEBUG: Final collected_content: {collected_content}")
276
+ print(f"DEBUG: Final tool_calls: {tool_calls}")
277
+
278
+ state.messages = messages
279
+ if collected_content or tool_calls:
280
+ assistant_message = {"role": "assistant", "content": collected_content}
281
+ if tool_calls:
282
+ assistant_message["tool_calls"] = tool_calls
283
+ state.messages.append(assistant_message)
284
+
285
+ return state, {
286
+ "output": collected_content,
287
+ "tool_calls": tool_calls,
288
+ "messages": state.messages
289
+ }
290
+
257
291
  def print_corca_welcome_message():
258
292
  turq = "\033[38;2;64;224;208m"
259
293
  chrome = "\033[38;2;211;211;211m"
@@ -313,11 +347,13 @@ def process_corca_result(
313
347
  tool_responses = []
314
348
  for tool_call in tool_calls:
315
349
  tool_name = tool_call['function']['name']
316
- tool_args_str = tool_call['function']['arguments']
350
+ tool_args = tool_call['function']['arguments']
317
351
  tool_call_id = tool_call['id']
318
352
 
319
353
  try:
320
- tool_args = json.loads(tool_args_str) if tool_args_str.strip() else {}
354
+ if isinstance(tool_args, str):
355
+ tool_args = json.loads(tool_args) if tool_args.strip() else {}
356
+
321
357
  except json.JSONDecodeError:
322
358
  tool_args = {}
323
359
 
@@ -389,45 +425,10 @@ def process_corca_result(
389
425
  follow_up_tool_calls = []
390
426
 
391
427
  if result_state.stream_output:
392
- collected_content = ""
393
- follow_up_tool_calls = []
394
-
395
428
  if hasattr(follow_up_content, '__iter__'):
396
- for chunk in follow_up_content:
397
- if hasattr(chunk, 'choices') and chunk.choices:
398
- delta = chunk.choices[0].delta
399
-
400
- if hasattr(delta, 'content') and delta.content:
401
- collected_content += delta.content
402
- print(delta.content, end='', flush=True)
403
-
404
- if hasattr(delta, 'tool_calls') and delta.tool_calls:
405
- for tool_call_delta in delta.tool_calls:
406
- if hasattr(tool_call_delta, 'index'):
407
- idx = tool_call_delta.index
408
-
409
- while len(follow_up_tool_calls) <= idx:
410
- follow_up_tool_calls.append({
411
- 'id': '',
412
- 'type': 'function',
413
- 'function': {
414
- 'name': '',
415
- 'arguments': ''
416
- }
417
- })
418
-
419
- if hasattr(tool_call_delta, 'id') and tool_call_delta.id:
420
- follow_up_tool_calls[idx]['id'] = tool_call_delta.id
421
-
422
- if hasattr(tool_call_delta, 'function'):
423
- if hasattr(tool_call_delta.function, 'name') and tool_call_delta.function.name:
424
- follow_up_tool_calls[idx]['function']['name'] = tool_call_delta.function.name
425
-
426
- if hasattr(tool_call_delta.function, 'arguments') and tool_call_delta.function.arguments:
427
- follow_up_tool_calls[idx]['function']['arguments'] += tool_call_delta.function.arguments
429
+ collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, active_npc)
428
430
  else:
429
- collected_content = str(follow_up_content)
430
-
431
+ collected_content = str(follow_up_content)
431
432
  follow_up_content = collected_content
432
433
  else:
433
434
  if follow_up_messages:
@@ -452,11 +453,11 @@ def process_corca_result(
452
453
  print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
453
454
  for tool_call in follow_up_tool_calls:
454
455
  tool_name = tool_call['function']['name']
455
- tool_args_str = tool_call['function']['arguments']
456
+ tool_args = tool_call['function']['arguments']
456
457
  tool_call_id = tool_call['id']
457
458
 
458
459
  try:
459
- tool_args = json.loads(tool_args_str) if tool_args_str.strip() else {}
460
+ tool_args = json.loads(tool_args) if tool_args.strip() else {}
460
461
  except json.JSONDecodeError:
461
462
  tool_args = {}
462
463
 
@@ -648,7 +649,7 @@ def enter_corca_mode(command: str,
648
649
  if state.npc:
649
650
  prompt_npc_name = state.npc.name
650
651
 
651
- prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:corca:{prompt_npc_name}🦌> "
652
+ prompt_str = f"{colored(os.path.basename(state.current_path), 'blue')}:{prompt_npc_name}🦌> "
652
653
  prompt = readline_safe_prompt(prompt_str)
653
654
 
654
655
  user_input = get_multiline_input(prompt).strip()
@@ -680,14 +681,31 @@ def enter_corca_mode(command: str,
680
681
 
681
682
  render_markdown("\n# Exiting Corca Mode")
682
683
  return {"output": "", "messages": state.messages}
683
-
684
684
  def main():
685
685
  parser = argparse.ArgumentParser(description="Corca - An MCP-powered npcsh shell.")
686
686
  parser.add_argument("--mcp-server-path", type=str, help="Path to an MCP server script to connect to.")
687
687
  args = parser.parse_args()
688
688
 
689
689
  command_history, team, default_npc = setup_shell()
690
-
690
+
691
+ # Override default_npc with corca priority
692
+ project_team_path = os.path.abspath('./npc_team/')
693
+ global_team_path = os.path.expanduser('~/.npcsh/npc_team/')
694
+
695
+ project_corca_path = os.path.join(project_team_path, "corca.npc")
696
+ global_corca_path = os.path.join(global_team_path, "corca.npc")
697
+
698
+ if os.path.exists(project_corca_path):
699
+ default_npc = NPC(file=project_corca_path,
700
+ db_conn=command_history.engine)
701
+ elif os.path.exists(global_corca_path):
702
+ default_npc = NPC(file=global_corca_path,
703
+ db_conn=command_history.engine)
704
+ print('Team Default: ', team.provider, team.model)
705
+ if default_npc.model is None:
706
+ default_npc.model = team.model
707
+ if default_npc.provider is None:
708
+ default_npc.provider = team.provider
691
709
  from npcsh._state import initial_state
692
710
  initial_shell_state = initial_state
693
711
  initial_shell_state.team = team
@@ -704,6 +722,5 @@ def main():
704
722
  }
705
723
 
706
724
  enter_corca_mode(**kwargs)
707
-
708
725
  if __name__ == "__main__":
709
726
  main()