npcsh 1.0.35__py3-none-any.whl → 1.0.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. npcsh/corca.py +373 -241
  2. {npcsh-1.0.35.dist-info → npcsh-1.0.36.dist-info}/METADATA +1 -1
  3. {npcsh-1.0.35.dist-info → npcsh-1.0.36.dist-info}/RECORD +33 -33
  4. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/alicanto.npc +0 -0
  5. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/alicanto.png +0 -0
  6. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/bash_executer.jinx +0 -0
  7. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/corca.npc +0 -0
  8. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/corca.png +0 -0
  9. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/edit_file.jinx +0 -0
  10. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/foreman.npc +0 -0
  11. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/frederic.npc +0 -0
  12. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/frederic4.png +0 -0
  13. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/guac.png +0 -0
  14. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/image_generation.jinx +0 -0
  15. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/internet_search.jinx +0 -0
  16. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/kadiefa.npc +0 -0
  17. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/kadiefa.png +0 -0
  18. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/npcsh.ctx +0 -0
  19. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/npcsh_sibiji.png +0 -0
  20. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/plonk.npc +0 -0
  21. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/plonk.png +0 -0
  22. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/plonkjr.npc +0 -0
  23. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/plonkjr.png +0 -0
  24. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/python_executor.jinx +0 -0
  25. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/screen_cap.jinx +0 -0
  26. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/sibiji.npc +0 -0
  27. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/sibiji.png +0 -0
  28. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/spool.png +0 -0
  29. {npcsh-1.0.35.data → npcsh-1.0.36.data}/data/npcsh/npc_team/yap.png +0 -0
  30. {npcsh-1.0.35.dist-info → npcsh-1.0.36.dist-info}/WHEEL +0 -0
  31. {npcsh-1.0.35.dist-info → npcsh-1.0.36.dist-info}/entry_points.txt +0 -0
  32. {npcsh-1.0.35.dist-info → npcsh-1.0.36.dist-info}/licenses/LICENSE +0 -0
  33. {npcsh-1.0.35.dist-info → npcsh-1.0.36.dist-info}/top_level.txt +0 -0
npcsh/corca.py CHANGED
@@ -7,7 +7,7 @@ from contextlib import AsyncExitStack
7
7
  from typing import Optional, Callable, Dict, Any, Tuple, List
8
8
  import shutil
9
9
  import traceback
10
- from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError
10
+ from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError, BadRequestError
11
11
 
12
12
  try:
13
13
  from mcp import ClientSession, StdioServerParameters
@@ -248,102 +248,189 @@ def process_mcp_stream(stream_response, active_npc):
248
248
 
249
249
  return collected_content, tool_calls
250
250
 
251
-
252
-
253
- def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
254
- mcp_tools_for_llm = []
255
-
256
- if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
257
- all_available_mcp_tools = state.mcp_client.available_tools_llm
251
+ def clean_orphaned_tool_calls(messages):
252
+ cleaned_messages = []
253
+ i = 0
254
+ while i < len(messages):
255
+ msg = messages[i]
258
256
 
259
- if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
260
- mcp_tools_for_llm = [
261
- tool_def for tool_def in all_available_mcp_tools
262
- if tool_def['function']['name'] in selected_mcp_tools_names
263
- ]
264
- if not mcp_tools_for_llm:
265
- cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
257
+ if msg.get("role") == "tool":
258
+ # Check if there's a preceding assistant message with tool_calls
259
+ found_preceding_assistant = False
260
+ for j in range(i-1, -1, -1):
261
+ prev_msg = messages[j]
262
+ if prev_msg.get("role") == "assistant" and prev_msg.get("tool_calls"):
263
+ # Check if this tool response matches any tool call
264
+ tool_call_ids = {tc["id"] for tc in prev_msg["tool_calls"]}
265
+ if msg.get("tool_call_id") in tool_call_ids:
266
+ found_preceding_assistant = True
267
+ break
268
+ elif prev_msg.get("role") in ["user", "assistant"]:
269
+ break
270
+
271
+ if found_preceding_assistant:
272
+ cleaned_messages.append(msg)
273
+ # Skip orphaned tool responses
274
+
275
+ elif (msg.get("role") == "assistant" and msg.get("tool_calls")):
276
+ tool_call_ids = {tc["id"] for tc in msg["tool_calls"]}
277
+ j = i + 1
278
+ found_responses = set()
279
+
280
+ while j < len(messages):
281
+ next_msg = messages[j]
282
+ if next_msg.get("role") == "tool":
283
+ if next_msg.get("tool_call_id") in tool_call_ids:
284
+ found_responses.add(next_msg.get("tool_call_id"))
285
+ elif next_msg.get("role") in ["user", "assistant"]:
286
+ break
287
+ j += 1
288
+
289
+ missing_responses = tool_call_ids - found_responses
290
+ if missing_responses:
291
+ assistant_msg = msg.copy()
292
+ assistant_msg["tool_calls"] = [
293
+ tc for tc in msg["tool_calls"]
294
+ if tc["id"] not in missing_responses
295
+ ]
296
+ if not assistant_msg["tool_calls"]:
297
+ del assistant_msg["tool_calls"]
298
+ cleaned_messages.append(assistant_msg)
299
+ else:
300
+ cleaned_messages.append(msg)
266
301
  else:
267
- mcp_tools_for_llm = all_available_mcp_tools
268
- else:
269
- cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
270
-
271
- active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
302
+ cleaned_messages.append(msg)
303
+ i += 1
304
+
305
+ return cleaned_messages
272
306
 
273
- if not state.messages or not any("working directory" in msg.get("content", "").lower() for msg in state.messages):
274
- context_message = {
275
- "role": "system",
276
- "content": f"You are currently operating in the directory: {state.current_path}. All file operations should be relative to this location unless explicitly specified otherwise."
277
- }
278
- state.messages.insert(0, context_message)
279
307
 
280
- if len(state.messages) > 50:
281
- compressed_state = active_npc.compress_planning_state(state.messages)
282
- state.messages = [{"role": "system", "content": compressed_state}]
283
- print(compressed_state)
284
-
308
+ def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, context=None):
309
+ """Unified LLM response with exception handling."""
310
+ messages = clean_orphaned_tool_calls(messages)
311
+
285
312
  try:
286
- response_dict = get_llm_response(
287
- prompt=command,
288
- npc=state.npc,
289
- messages=state.messages,
290
- tools=mcp_tools_for_llm,
313
+ return get_llm_response(
314
+ prompt=prompt,
315
+ npc=npc,
316
+ messages=messages,
317
+ tools=tools,
291
318
  auto_process_tool_calls=False,
292
- stream=state.stream_output,
293
- team=state.team
319
+ stream=stream,
320
+ team=team,
321
+ context=context
294
322
  )
295
323
  except Timeout:
296
- response_dict = get_llm_response(
297
- prompt=command,
298
- npc=state.npc,
299
- messages=state.messages,
300
- tools=mcp_tools_for_llm,
324
+ return get_llm_response(
325
+ prompt=prompt,
326
+ npc=npc,
327
+ messages=messages,
328
+ tools=tools,
301
329
  auto_process_tool_calls=False,
302
- stream=state.stream_output,
303
- team=state.team
330
+ stream=stream,
331
+ team=team
304
332
  )
305
333
  except ContextWindowExceededError:
306
- compressed_state = active_npc.compress_planning_state(state.messages)
307
- state.messages = [{"role": "system", "content": compressed_state}]
308
-
309
- response_dict = get_llm_response(
310
- prompt=command,
311
- npc=state.npc,
312
- messages=state.messages,
313
- tools=mcp_tools_for_llm,
334
+ print('compressing..... ')
335
+ compressed_state = npc.compress_planning_state(messages)
336
+ compressed_messages = [{"role": "system", "content": compressed_state}]
337
+ return get_llm_response(
338
+ prompt=prompt,
339
+ npc=npc,
340
+ messages=compressed_messages,
341
+ tools=tools,
314
342
  auto_process_tool_calls=False,
315
- stream=state.stream_output,
316
- team=state.team
343
+ stream=stream,
344
+ team=team
317
345
  )
318
346
  except RateLimitError:
319
347
  import time
320
348
  print('rate limit hit... waiting 60 seconds')
321
349
  time.sleep(60)
322
350
  print('compressing..... ')
323
- compressed_state = active_npc.compress_planning_state(state.messages)
324
- state.messages = [{"role": "system", "content": compressed_state}]
325
-
326
- response_dict = get_llm_response(
327
- prompt=command,
328
- npc=state.npc,
329
- messages=state.messages,
330
- tools=mcp_tools_for_llm,
351
+ compressed_state = npc.compress_planning_state(messages)
352
+ compressed_messages = [{"role": "system", "content": compressed_state}]
353
+ return get_llm_response(
354
+ prompt=prompt,
355
+ npc=npc,
356
+ messages=compressed_messages,
357
+ tools=tools,
331
358
  auto_process_tool_calls=False,
332
- stream=state.stream_output,
333
- team=state.team
359
+ stream=stream,
360
+ team=team
334
361
  )
362
+ except BadRequestError as e:
363
+ if "tool_call_id" in str(e).lower():
364
+ cleaned_messages = clean_orphaned_tool_calls(messages)
365
+ return get_llm_response(
366
+ prompt=prompt,
367
+ npc=npc,
368
+ messages=cleaned_messages,
369
+ tools=tools,
370
+ auto_process_tool_calls=False,
371
+ stream=stream,
372
+ team=team,
373
+ context=context
374
+ )
375
+ else:
376
+ raise e
377
+
378
+
379
+
380
+ def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
381
+ mcp_tools_for_llm = []
382
+
383
+ if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
384
+ all_available_mcp_tools = state.mcp_client.available_tools_llm
385
+
386
+ if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
387
+ mcp_tools_for_llm = [
388
+ tool_def for tool_def in all_available_mcp_tools
389
+ if tool_def['function']['name'] in selected_mcp_tools_names
390
+ ]
391
+ if not mcp_tools_for_llm:
392
+ cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
393
+ else:
394
+ mcp_tools_for_llm = all_available_mcp_tools
395
+ else:
396
+ cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
397
+
398
+ if len(state.messages) > 20:
399
+ compressed_state = state.npc.compress_planning_state(messages)
400
+ state.messages = [{"role": "system", "content": state.npc.get_system_message() + f' Your current task: {compressed_state}'}]
401
+ print("Compressed messages during tool execution.")
402
+
403
+ response_dict = get_llm_response_with_handling(
404
+ prompt=command,
405
+ npc=state.npc,
406
+ messages=state.messages,
407
+ tools=mcp_tools_for_llm,
408
+ stream=state.stream_output,
409
+ team=state.team,
410
+ context=f' The users working directory is {state.current_path}'
411
+ )
335
412
 
336
413
  stream_response = response_dict.get('response')
337
414
  messages = response_dict.get('messages', state.messages)
415
+ tool_calls = response_dict.get('tool_calls', [])
416
+
417
+ collected_content, stream_tool_calls = process_mcp_stream(stream_response, state.npc)
338
418
 
339
- collected_content, tool_calls = process_mcp_stream(stream_response, active_npc)
419
+ if stream_tool_calls:
420
+ tool_calls = stream_tool_calls
340
421
 
341
422
  state.messages = messages
342
- if collected_content or tool_calls:
343
- assistant_message = {"role": "assistant", "content": collected_content}
344
- if tool_calls:
345
- assistant_message["tool_calls"] = tool_calls
346
- state.messages.append(assistant_message)
423
+
424
+ if tool_calls and hasattr(state, 'mcp_client') and state.mcp_client:
425
+ final_content, state.messages = execute_mcp_tool_calls(
426
+ tool_calls,
427
+ state.mcp_client,
428
+ state.messages,
429
+ state.npc,
430
+ state.stream_output
431
+ )
432
+ if final_content:
433
+ collected_content = final_content
347
434
 
348
435
  return state, {
349
436
  "output": collected_content,
@@ -352,6 +439,129 @@ def execute_command_corca(command: str, state: ShellState, command_history, sele
352
439
  }
353
440
 
354
441
 
442
+ def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output):
443
+ if not tool_calls or not mcp_client:
444
+ return None, messages
445
+
446
+ messages = clean_orphaned_tool_calls(messages)
447
+
448
+ print(colored("\n🔧 Executing MCP tools...", "cyan"))
449
+
450
+ while tool_calls:
451
+ tool_responses = []
452
+
453
+ if len(messages) > 20:
454
+ compressed_state = npc.compress_planning_state(messages)
455
+ messages = [{"role": "system", "content": npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
456
+ print("Compressed messages during tool execution.")
457
+
458
+
459
+ for tool_call in tool_calls:
460
+ tool_name = tool_call['function']['name']
461
+ tool_args = tool_call['function']['arguments']
462
+ tool_call_id = tool_call['id']
463
+
464
+ if isinstance(tool_args, str):
465
+ try:
466
+ tool_args = json.loads(tool_args) if tool_args.strip() else {}
467
+ except json.JSONDecodeError:
468
+ tool_args = {}
469
+
470
+ try:
471
+ print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
472
+
473
+ loop = asyncio.get_event_loop()
474
+ if loop.is_closed():
475
+ loop = asyncio.new_event_loop()
476
+ asyncio.set_event_loop(loop)
477
+
478
+ mcp_result = loop.run_until_complete(
479
+ mcp_client.session.call_tool(tool_name, tool_args)
480
+ )
481
+
482
+ tool_content = ""
483
+ if hasattr(mcp_result, 'content') and mcp_result.content:
484
+ for content_item in mcp_result.content:
485
+ if hasattr(content_item, 'text'):
486
+ tool_content += content_item.text
487
+ elif hasattr(content_item, 'data'):
488
+ tool_content += str(content_item.data)
489
+ else:
490
+ tool_content += str(content_item)
491
+ else:
492
+ tool_content = str(mcp_result)
493
+
494
+ tool_responses.append({
495
+ "role": "tool",
496
+ "tool_call_id": tool_call_id,
497
+ "name": tool_name,
498
+ "content": tool_content
499
+ })
500
+
501
+ print(colored(f" ✓ {tool_name} completed", "green"))
502
+
503
+ except KeyboardInterrupt:
504
+ print(colored(f"\n ⚠️ Tool execution interrupted", "yellow"))
505
+ return None, messages
506
+ except Exception as e:
507
+ print(colored(f" ✗ {tool_name} failed: {e}", "red"))
508
+ tool_responses.append({
509
+ "role": "tool",
510
+ "tool_call_id": tool_call_id,
511
+ "name": tool_name,
512
+ "content": f"Error: {str(e)}"
513
+ })
514
+
515
+ current_messages = messages + tool_responses
516
+
517
+ try:
518
+ follow_up_response = get_llm_response_with_handling(
519
+ prompt="",
520
+ npc=npc,
521
+ messages=current_messages,
522
+ tools=mcp_client.available_tools_llm,
523
+ stream=stream_output,
524
+ team=None
525
+ )
526
+ except KeyboardInterrupt:
527
+ print(colored(f"\n ⚠️ Follow-up response interrupted", "yellow"))
528
+ return None, messages
529
+
530
+ follow_up_messages = follow_up_response.get('messages', current_messages)
531
+ follow_up_content = follow_up_response.get('response', '')
532
+ follow_up_tool_calls = []
533
+
534
+ if stream_output:
535
+ if hasattr(follow_up_content, '__iter__'):
536
+ collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, npc)
537
+ else:
538
+ collected_content = str(follow_up_content)
539
+ follow_up_content = collected_content
540
+ else:
541
+ if follow_up_messages:
542
+ last_message = follow_up_messages[-1]
543
+ if last_message.get("role") == "assistant" and "tool_calls" in last_message:
544
+ follow_up_tool_calls = last_message["tool_calls"]
545
+
546
+ messages = follow_up_messages
547
+
548
+ if not follow_up_tool_calls:
549
+ if not stream_output:
550
+ print('\n')
551
+ render_markdown(follow_up_content)
552
+ return follow_up_content, messages
553
+ else:
554
+ if follow_up_content or follow_up_tool_calls:
555
+ assistant_message = {"role": "assistant", "content": follow_up_content}
556
+ if follow_up_tool_calls:
557
+ assistant_message["tool_calls"] = follow_up_tool_calls
558
+ messages.append(assistant_message)
559
+
560
+ tool_calls = follow_up_tool_calls
561
+ print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
562
+
563
+ return None, messages
564
+
355
565
  def _resolve_and_copy_mcp_server_path(
356
566
  explicit_path: Optional[str],
357
567
  current_path: Optional[str],
@@ -515,13 +725,17 @@ def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None
515
725
 
516
726
  return state
517
727
 
518
-
728
+
519
729
  def process_corca_result(
520
730
  user_input: str,
521
731
  result_state: ShellState,
522
732
  output: Any,
523
733
  command_history: CommandHistory,
524
734
  ):
735
+ from npcpy.llm_funcs import get_facts
736
+ from npcpy.memory.memory_processor import memory_approval_ui
737
+ from npcsh._state import format_memory_context
738
+
525
739
  team_name = result_state.team.name if result_state.team else "__none__"
526
740
  npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
527
741
 
@@ -529,7 +743,8 @@ def process_corca_result(
529
743
  name="default",
530
744
  model=result_state.chat_model,
531
745
  provider=result_state.chat_provider,
532
- db_conn=command_history.engine)
746
+ db_conn=command_history.engine
747
+ )
533
748
 
534
749
  save_conversation_message(
535
750
  command_history,
@@ -550,167 +765,20 @@ def process_corca_result(
550
765
  final_output_str = None
551
766
 
552
767
  if tool_calls and hasattr(result_state, 'mcp_client') and result_state.mcp_client:
553
- print(colored("\n🔧 Executing MCP tools...", "cyan"))
554
-
555
- tool_responses = []
556
- for tool_call in tool_calls:
557
- tool_name = tool_call['function']['name']
558
- tool_args = tool_call['function']['arguments']
559
- tool_call_id = tool_call['id']
560
-
561
- try:
562
- if isinstance(tool_args, str):
563
- tool_args = json.loads(tool_args) if tool_args.strip() else {}
564
-
565
- except json.JSONDecodeError:
566
- tool_args = {}
567
-
568
- try:
569
-
570
- loop = asyncio.get_event_loop()
571
- if loop.is_closed():
572
- loop = asyncio.new_event_loop()
573
- asyncio.set_event_loop(loop)
574
-
575
- mcp_result = loop.run_until_complete(
576
- result_state.mcp_client.session.call_tool(tool_name, tool_args)
577
- )
578
-
579
-
580
- tool_content = ""
581
- if hasattr(mcp_result, 'content') and mcp_result.content:
582
-
583
- for i, content_item in enumerate(mcp_result.content):
584
-
585
- if hasattr(content_item, 'text'):
586
- tool_content += content_item.text
587
- else:
588
- tool_content += str(content_item)
589
- else:
590
- tool_content = str(mcp_result)
591
-
592
-
593
-
594
- tool_responses.append({
595
- "role": "tool",
596
- "tool_call_id": tool_call_id,
597
- "name": tool_name,
598
- "content": tool_content
599
- })
600
-
601
- print(colored(f" ✓ {tool_name} completed", "green"))
602
-
603
- except Exception as e:
604
- print(colored(f" ✗ {tool_name} failed: {e}", "red"))
605
- tool_responses.append({
606
- "role": "tool",
607
- "tool_call_id": tool_call_id,
608
- "name": tool_name,
609
- "content": f"Error: {str(e)}"
610
- })
611
-
612
- result_state.messages.extend(tool_responses)
613
-
614
- while True:
615
- follow_up_response = get_llm_response(
616
- prompt="",
617
- model=active_npc.model,
618
- provider=active_npc.provider,
619
- npc=active_npc,
620
- messages=result_state.messages,
621
- tools=result_state.mcp_client.available_tools_llm,
622
- auto_process_tool_calls=False,
623
- stream=result_state.stream_output
624
- )
625
-
626
- follow_up_messages = follow_up_response.get('messages', [])
627
- follow_up_content = follow_up_response.get('response', '')
628
- follow_up_tool_calls = []
629
-
630
- if result_state.stream_output:
631
- if hasattr(follow_up_content, '__iter__'):
632
- collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, active_npc)
633
- else:
634
- collected_content = str(follow_up_content)
635
- follow_up_content = collected_content
636
- else:
637
- if follow_up_messages:
638
- last_message = follow_up_messages[-1]
639
- if last_message.get("role") == "assistant" and "tool_calls" in last_message:
640
- follow_up_tool_calls = last_message["tool_calls"]
641
-
642
- result_state.messages = follow_up_messages
643
- if follow_up_content or follow_up_tool_calls:
644
- assistant_message = {"role": "assistant", "content": follow_up_content}
645
- if follow_up_tool_calls:
646
- assistant_message["tool_calls"] = follow_up_tool_calls
647
- result_state.messages.append(assistant_message)
648
-
649
- if not follow_up_tool_calls:
650
- final_output_str = follow_up_content
651
- if not result_state.stream_output:
652
- print('\n')
653
- render_markdown(final_output_str)
654
- break
655
-
656
- print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
657
- for tool_call in follow_up_tool_calls:
658
- tool_name = tool_call['function']['name']
659
- tool_args = tool_call['function']['arguments']
660
- tool_call_id = tool_call['id']
661
-
662
- try:
663
- tool_args = json.loads(tool_args) if tool_args.strip() else {}
664
- except json.JSONDecodeError:
665
- tool_args = {}
666
-
667
- try:
668
- print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
669
-
670
- loop = asyncio.get_event_loop()
671
- if loop.is_closed():
672
- loop = asyncio.new_event_loop()
673
- asyncio.set_event_loop(loop)
674
-
675
- mcp_result = loop.run_until_complete(
676
- result_state.mcp_client.session.call_tool(tool_name, tool_args)
677
- )
678
-
679
-
680
- tool_content = ""
681
- if hasattr(mcp_result, 'content') and mcp_result.content:
682
- for i, content_item in enumerate(mcp_result.content):
683
-
684
- if hasattr(content_item, 'text') and content_item.text:
685
- tool_content += content_item.text
686
- elif hasattr(content_item, 'data'):
687
- tool_content += str(content_item.data)
688
- else:
689
- tool_content += str(content_item)
690
- result_state.messages.append({
691
- "role": "tool",
692
- "tool_call_id": tool_call_id,
693
- "name": tool_name,
694
- "content": tool_content
695
- })
696
-
697
- print(colored(f" ✓ {tool_name} completed", "green"))
698
-
699
- except Exception as e:
700
- print(colored(f" ✗ {tool_name} failed: {e}", "red"))
701
- result_state.messages.append({
702
- "role": "tool",
703
- "tool_call_id": tool_call_id,
704
- "name": tool_name,
705
- "content": f"Error: {str(e)}"
706
- })
768
+ final_output_str, result_state.messages = execute_mcp_tool_calls(
769
+ tool_calls,
770
+ result_state.mcp_client,
771
+ result_state.messages,
772
+ result_state.npc,
773
+ result_state.stream_output
774
+ )
707
775
  else:
708
776
  print('\n')
709
777
  if result_state.stream_output:
710
778
  final_output_str = print_and_process_stream_with_markdown(
711
779
  output_content,
712
- active_npc.model,
713
- active_npc.provider,
780
+ result_state.npc.model,
781
+ result_state.npc.provider,
714
782
  show=True
715
783
  )
716
784
  else:
@@ -736,25 +804,88 @@ def process_corca_result(
736
804
  conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
737
805
  engine = command_history.engine
738
806
 
739
- if result_state.build_kg:
807
+ memory_examples = command_history.get_memory_examples_for_context(
808
+ npc=npc_name,
809
+ team=team_name,
810
+ directory_path=result_state.current_path
811
+ )
812
+
813
+ memory_context = format_memory_context(memory_examples)
814
+
815
+ approved_facts = []
816
+ try:
817
+ facts = get_facts(
818
+ conversation_turn_text,
819
+ model=active_npc.model,
820
+ provider=active_npc.provider,
821
+ npc=active_npc,
822
+ context=memory_context
823
+ )
824
+
825
+ if facts:
826
+ memories_for_approval = []
827
+ for i, fact in enumerate(facts):
828
+ memories_for_approval.append({
829
+ "memory_id": f"temp_{i}",
830
+ "content": fact['statement'],
831
+ "context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
832
+ "npc": npc_name,
833
+ "fact_data": fact
834
+ })
835
+
836
+ approvals = memory_approval_ui(memories_for_approval)
837
+
838
+ for approval in approvals:
839
+ fact_data = next(m['fact_data'] for m in memories_for_approval
840
+ if m['memory_id'] == approval['memory_id'])
841
+
842
+ command_history.add_memory_to_database(
843
+ message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
844
+ conversation_id=result_state.conversation_id,
845
+ npc=npc_name,
846
+ team=team_name,
847
+ directory_path=result_state.current_path,
848
+ initial_memory=fact_data['statement'],
849
+ status=approval['decision'],
850
+ model=active_npc.model,
851
+ provider=active_npc.provider,
852
+ final_memory=approval.get('final_memory')
853
+ )
854
+
855
+ if approval['decision'] in ['human-approved', 'human-edited']:
856
+ approved_fact = {
857
+ 'statement': approval.get('final_memory') or fact_data['statement'],
858
+ 'source_text': fact_data.get('source_text', ''),
859
+ 'type': fact_data.get('type', 'explicit'),
860
+ 'generation': 0
861
+ }
862
+ approved_facts.append(approved_fact)
863
+
864
+ except Exception as e:
865
+ print(colored(f"Memory generation error: {e}", "yellow"))
866
+
867
+ if result_state.build_kg and approved_facts:
740
868
  try:
741
869
  if not should_skip_kg_processing(user_input, final_output_str):
742
870
  npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
743
871
  evolved_npc_kg, _ = kg_evolve_incremental(
744
872
  existing_kg=npc_kg,
745
- new_content_text=conversation_turn_text,
873
+ new_facts=approved_facts,
746
874
  model=active_npc.model,
747
875
  provider=active_npc.provider,
876
+ npc=active_npc,
748
877
  get_concepts=True,
749
- link_concepts_facts = False,
750
- link_concepts_concepts = False,
751
- link_facts_facts = False,
878
+ link_concepts_facts=False,
879
+ link_concepts_concepts=False,
880
+ link_facts_facts=False,
881
+ )
882
+ save_kg_to_db(
883
+ engine,
884
+ evolved_npc_kg,
885
+ team_name,
886
+ npc_name,
887
+ result_state.current_path
752
888
  )
753
- save_kg_to_db(engine,
754
- evolved_npc_kg,
755
- team_name,
756
- npc_name,
757
- result_state.current_path)
758
889
  except Exception as e:
759
890
  print(colored(f"Error during real-time KG evolution: {e}", "red"))
760
891
 
@@ -819,8 +950,9 @@ def process_corca_result(
819
950
  except Exception as e:
820
951
  import traceback
821
952
  print(colored(f"Could not generate team suggestions: {e}", "yellow"))
822
- traceback.print_exc()
823
-
953
+ traceback.print_exc()
954
+
955
+
824
956
  def _read_npcsh_global_env() -> Dict[str, str]:
825
957
  global_env_file = Path(".npcsh_global")
826
958
  env_vars = {}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcsh
3
- Version: 1.0.35
3
+ Version: 1.0.36
4
4
  Summary: npcsh is a command-line toolkit for using AI agents in novel ways.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcsh
6
6
  Author: Christopher Agostino
@@ -1,7 +1,7 @@
1
1
  npcsh/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  npcsh/_state.py,sha256=96NR32dMOytt1XIUvvO4CGODx3i4hMnaPJ-IlMJAMUE,93032
3
3
  npcsh/alicanto.py,sha256=xcYlKZY6_NcnPUADosGKRuY5SVeZ_4msw82-fE2JZN8,42117
4
- npcsh/corca.py,sha256=ZABu9d7HmKR3S-YF5NNP25gqvAhoOPRNJDxKqdOO-OM,49835
4
+ npcsh/corca.py,sha256=PasQqTEZ8OS1JD2Ow5qclGmCcsl89Ju9jK78bLPmbcA,54435
5
5
  npcsh/guac.py,sha256=sVdLYVkzkQw6TE7crtLUX0QJZFkzQthWZTe1p7IfAQE,80412
6
6
  npcsh/mcp_helpers.py,sha256=9TsCfcquGu_vX4WaKlY3J3P13-uxruQKrXng-jJ5YyY,11176
7
7
  npcsh/mcp_server.py,sha256=krc1rhiSU9gI76w99Ph3Mk7OyUVzfiEXKMvVid-7-Ik,5201
@@ -39,35 +39,35 @@ npcsh/npc_team/jinxs/image_generation.jinx,sha256=bQxZdEm0_eqvf_OJSHw3qarp8Klp3L
39
39
  npcsh/npc_team/jinxs/internet_search.jinx,sha256=s8zVxwKFUAof_VzQrwB5dpAQdeJ6hUBkbIdRsT-I5mo,1267
40
40
  npcsh/npc_team/jinxs/python_executor.jinx,sha256=vZz3pZaj1BnPFHMs_wpyjRc0b32JR4rLhZfulCMDF1s,398
41
41
  npcsh/npc_team/jinxs/screen_cap.jinx,sha256=-4DG0EiEe61N_kMhVqqHKhLVGOLuZQT9ax6IZk20NjI,960
42
- npcsh-1.0.35.data/data/npcsh/npc_team/alicanto.npc,sha256=y9yDY3lq8ZwxQxpnrgle8w5IJwZqvxDepZFU4OaZCtg,148
43
- npcsh-1.0.35.data/data/npcsh/npc_team/alicanto.png,sha256=A7xeMbcoKGjlkELxJEVifCEZLVWbOKZarTN5ZFJG-FM,3519858
44
- npcsh-1.0.35.data/data/npcsh/npc_team/bash_executer.jinx,sha256=C_oQccOY8cKevMMPLRdznlMOccQvLgyzyOIThXvmrD8,692
45
- npcsh-1.0.35.data/data/npcsh/npc_team/corca.npc,sha256=HI3Bs6KlUBPMz7icF1TRE8-V3f3EdU_VxvQxEpru3L4,662
46
- npcsh-1.0.35.data/data/npcsh/npc_team/corca.png,sha256=0lF70hKu6tY-37YmIPVF2cuaPzvnQ4-UtQOzuAbKEf4,1666776
47
- npcsh-1.0.35.data/data/npcsh/npc_team/edit_file.jinx,sha256=4PaJs8g_cdeDpbQwQSBycU5RDA0rczEC_NpLfLjo74Y,3490
48
- npcsh-1.0.35.data/data/npcsh/npc_team/foreman.npc,sha256=WqB8jLfBToGmr8c1vip1KOnTHxfXlGXwDUGnZoDMQr0,327
49
- npcsh-1.0.35.data/data/npcsh/npc_team/frederic.npc,sha256=EE2dOUItp-VKuW3ZMSHffmIEO4evjPcU2W_C4P3WXbY,362
50
- npcsh-1.0.35.data/data/npcsh/npc_team/frederic4.png,sha256=ll8uoV0npnPp5HVJWv7h0xDSeuq4pqsk_gYGBHLS0VY,1590744
51
- npcsh-1.0.35.data/data/npcsh/npc_team/guac.png,sha256=MCE7eJuEJwLJEzc9FS7lL62Mm-38jQRHkxXogPfOTuw,211470
52
- npcsh-1.0.35.data/data/npcsh/npc_team/image_generation.jinx,sha256=bQxZdEm0_eqvf_OJSHw3qarp8Klp3LlBDv1_HY3COo4,1307
53
- npcsh-1.0.35.data/data/npcsh/npc_team/internet_search.jinx,sha256=s8zVxwKFUAof_VzQrwB5dpAQdeJ6hUBkbIdRsT-I5mo,1267
54
- npcsh-1.0.35.data/data/npcsh/npc_team/kadiefa.npc,sha256=Yl5a4wrfe4F2f6Ndw_ukzlVVX7NE9g_mG-3QqJSkg_o,381
55
- npcsh-1.0.35.data/data/npcsh/npc_team/kadiefa.png,sha256=3CAwL8crKIwJko6o75Z6OYYEEM9Rk--yGzCJg7zoszg,3062528
56
- npcsh-1.0.35.data/data/npcsh/npc_team/npcsh.ctx,sha256=-jKYaPm2YbZHAGgWAXhyPIwhiNe1H1ZRFg1Zc7tHSxk,1049
57
- npcsh-1.0.35.data/data/npcsh/npc_team/npcsh_sibiji.png,sha256=9fUqgYMsSHmaH-kBTBQ7N5UCS5-eLZF94Log0O3mtFg,4544
58
- npcsh-1.0.35.data/data/npcsh/npc_team/plonk.npc,sha256=u1m2a1D512XGQ2kC3eWDAY8Y2IvpkNU73DI_CPE65UE,90
59
- npcsh-1.0.35.data/data/npcsh/npc_team/plonk.png,sha256=IU5ey-Dl4HEKlwnf75RSWNSHpF8rVqGmdbsa0deL4rQ,2727773
60
- npcsh-1.0.35.data/data/npcsh/npc_team/plonkjr.npc,sha256=It-i-BEuG0XddKk0d85onk2aJr9Pe5pLnJzNaCWaQIM,87
61
- npcsh-1.0.35.data/data/npcsh/npc_team/plonkjr.png,sha256=MqLEGwsyECUeODZIti0HQQrMMVxA6XERpW01R06NbpY,2606710
62
- npcsh-1.0.35.data/data/npcsh/npc_team/python_executor.jinx,sha256=vZz3pZaj1BnPFHMs_wpyjRc0b32JR4rLhZfulCMDF1s,398
63
- npcsh-1.0.35.data/data/npcsh/npc_team/screen_cap.jinx,sha256=-4DG0EiEe61N_kMhVqqHKhLVGOLuZQT9ax6IZk20NjI,960
64
- npcsh-1.0.35.data/data/npcsh/npc_team/sibiji.npc,sha256=Hb4wXKIObKKgibwnio5hLec9yd_9bKDCA87Nm2zijFA,216
65
- npcsh-1.0.35.data/data/npcsh/npc_team/sibiji.png,sha256=1dlZb7J3E62FcVo9CVOzLb8nu1bIUV7cr97nsFocHCM,35615
66
- npcsh-1.0.35.data/data/npcsh/npc_team/spool.png,sha256=LWTLkwDxXBfLuSUCX32_lo5yAmLYGsA67Xpsz-7MmWU,2876725
67
- npcsh-1.0.35.data/data/npcsh/npc_team/yap.png,sha256=_l7UbWnXJdsy4Mx-x5l9DT0R6ize3HTnkwQQnOFlI18,1548649
68
- npcsh-1.0.35.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
69
- npcsh-1.0.35.dist-info/METADATA,sha256=1EVu7WXtWuOl3yCQxrW9F8mtXpXlXVYxYe3XWZ4PJrM,25486
70
- npcsh-1.0.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
71
- npcsh-1.0.35.dist-info/entry_points.txt,sha256=S5yIuGm8ZXQ4siHYgN5gs0J7bxgobSEULXf8L5HaW5o,206
72
- npcsh-1.0.35.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
73
- npcsh-1.0.35.dist-info/RECORD,,
42
+ npcsh-1.0.36.data/data/npcsh/npc_team/alicanto.npc,sha256=y9yDY3lq8ZwxQxpnrgle8w5IJwZqvxDepZFU4OaZCtg,148
43
+ npcsh-1.0.36.data/data/npcsh/npc_team/alicanto.png,sha256=A7xeMbcoKGjlkELxJEVifCEZLVWbOKZarTN5ZFJG-FM,3519858
44
+ npcsh-1.0.36.data/data/npcsh/npc_team/bash_executer.jinx,sha256=C_oQccOY8cKevMMPLRdznlMOccQvLgyzyOIThXvmrD8,692
45
+ npcsh-1.0.36.data/data/npcsh/npc_team/corca.npc,sha256=HI3Bs6KlUBPMz7icF1TRE8-V3f3EdU_VxvQxEpru3L4,662
46
+ npcsh-1.0.36.data/data/npcsh/npc_team/corca.png,sha256=0lF70hKu6tY-37YmIPVF2cuaPzvnQ4-UtQOzuAbKEf4,1666776
47
+ npcsh-1.0.36.data/data/npcsh/npc_team/edit_file.jinx,sha256=4PaJs8g_cdeDpbQwQSBycU5RDA0rczEC_NpLfLjo74Y,3490
48
+ npcsh-1.0.36.data/data/npcsh/npc_team/foreman.npc,sha256=WqB8jLfBToGmr8c1vip1KOnTHxfXlGXwDUGnZoDMQr0,327
49
+ npcsh-1.0.36.data/data/npcsh/npc_team/frederic.npc,sha256=EE2dOUItp-VKuW3ZMSHffmIEO4evjPcU2W_C4P3WXbY,362
50
+ npcsh-1.0.36.data/data/npcsh/npc_team/frederic4.png,sha256=ll8uoV0npnPp5HVJWv7h0xDSeuq4pqsk_gYGBHLS0VY,1590744
51
+ npcsh-1.0.36.data/data/npcsh/npc_team/guac.png,sha256=MCE7eJuEJwLJEzc9FS7lL62Mm-38jQRHkxXogPfOTuw,211470
52
+ npcsh-1.0.36.data/data/npcsh/npc_team/image_generation.jinx,sha256=bQxZdEm0_eqvf_OJSHw3qarp8Klp3LlBDv1_HY3COo4,1307
53
+ npcsh-1.0.36.data/data/npcsh/npc_team/internet_search.jinx,sha256=s8zVxwKFUAof_VzQrwB5dpAQdeJ6hUBkbIdRsT-I5mo,1267
54
+ npcsh-1.0.36.data/data/npcsh/npc_team/kadiefa.npc,sha256=Yl5a4wrfe4F2f6Ndw_ukzlVVX7NE9g_mG-3QqJSkg_o,381
55
+ npcsh-1.0.36.data/data/npcsh/npc_team/kadiefa.png,sha256=3CAwL8crKIwJko6o75Z6OYYEEM9Rk--yGzCJg7zoszg,3062528
56
+ npcsh-1.0.36.data/data/npcsh/npc_team/npcsh.ctx,sha256=-jKYaPm2YbZHAGgWAXhyPIwhiNe1H1ZRFg1Zc7tHSxk,1049
57
+ npcsh-1.0.36.data/data/npcsh/npc_team/npcsh_sibiji.png,sha256=9fUqgYMsSHmaH-kBTBQ7N5UCS5-eLZF94Log0O3mtFg,4544
58
+ npcsh-1.0.36.data/data/npcsh/npc_team/plonk.npc,sha256=u1m2a1D512XGQ2kC3eWDAY8Y2IvpkNU73DI_CPE65UE,90
59
+ npcsh-1.0.36.data/data/npcsh/npc_team/plonk.png,sha256=IU5ey-Dl4HEKlwnf75RSWNSHpF8rVqGmdbsa0deL4rQ,2727773
60
+ npcsh-1.0.36.data/data/npcsh/npc_team/plonkjr.npc,sha256=It-i-BEuG0XddKk0d85onk2aJr9Pe5pLnJzNaCWaQIM,87
61
+ npcsh-1.0.36.data/data/npcsh/npc_team/plonkjr.png,sha256=MqLEGwsyECUeODZIti0HQQrMMVxA6XERpW01R06NbpY,2606710
62
+ npcsh-1.0.36.data/data/npcsh/npc_team/python_executor.jinx,sha256=vZz3pZaj1BnPFHMs_wpyjRc0b32JR4rLhZfulCMDF1s,398
63
+ npcsh-1.0.36.data/data/npcsh/npc_team/screen_cap.jinx,sha256=-4DG0EiEe61N_kMhVqqHKhLVGOLuZQT9ax6IZk20NjI,960
64
+ npcsh-1.0.36.data/data/npcsh/npc_team/sibiji.npc,sha256=Hb4wXKIObKKgibwnio5hLec9yd_9bKDCA87Nm2zijFA,216
65
+ npcsh-1.0.36.data/data/npcsh/npc_team/sibiji.png,sha256=1dlZb7J3E62FcVo9CVOzLb8nu1bIUV7cr97nsFocHCM,35615
66
+ npcsh-1.0.36.data/data/npcsh/npc_team/spool.png,sha256=LWTLkwDxXBfLuSUCX32_lo5yAmLYGsA67Xpsz-7MmWU,2876725
67
+ npcsh-1.0.36.data/data/npcsh/npc_team/yap.png,sha256=_l7UbWnXJdsy4Mx-x5l9DT0R6ize3HTnkwQQnOFlI18,1548649
68
+ npcsh-1.0.36.dist-info/licenses/LICENSE,sha256=IKBvAECHP-aCiJtE4cHGCE5Yl0tozYz02PomGeWS3y4,1070
69
+ npcsh-1.0.36.dist-info/METADATA,sha256=PU22LzEtIdClDgg_fCnc9yRRU-5c8E9CaR54AZs1d9A,25486
70
+ npcsh-1.0.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
71
+ npcsh-1.0.36.dist-info/entry_points.txt,sha256=S5yIuGm8ZXQ4siHYgN5gs0J7bxgobSEULXf8L5HaW5o,206
72
+ npcsh-1.0.36.dist-info/top_level.txt,sha256=kHSNgKMCkfjV95-DH0YSp1LLBi0HXdF3w57j7MQON3E,6
73
+ npcsh-1.0.36.dist-info/RECORD,,
File without changes