npcsh 1.0.35__tar.gz → 1.0.37__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcsh-1.0.35 → npcsh-1.0.37}/PKG-INFO +1 -1
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/_state.py +1 -9
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/corca.py +373 -241
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/mcp_server.py +119 -7
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh.egg-info/PKG-INFO +1 -1
- {npcsh-1.0.35 → npcsh-1.0.37}/setup.py +2 -2
- {npcsh-1.0.35 → npcsh-1.0.37}/LICENSE +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/README.md +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/__init__.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/alicanto.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/guac.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/mcp_helpers.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/alicanto.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/alicanto.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/corca.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/corca.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/foreman.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/frederic.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/frederic4.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/guac.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/jinxs/bash_executer.jinx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/jinxs/edit_file.jinx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/jinxs/image_generation.jinx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/jinxs/internet_search.jinx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/jinxs/python_executor.jinx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/jinxs/screen_cap.jinx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/kadiefa.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/kadiefa.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/npcsh.ctx +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/npcsh_sibiji.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/plonk.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/plonk.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/plonkjr.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/plonkjr.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/sibiji.npc +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/sibiji.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/spool.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npc_team/yap.png +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/npcsh.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/plonk.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/pti.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/routes.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/spool.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/wander.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh/yap.py +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh.egg-info/SOURCES.txt +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh.egg-info/dependency_links.txt +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh.egg-info/entry_points.txt +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh.egg-info/requires.txt +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/npcsh.egg-info/top_level.txt +0 -0
- {npcsh-1.0.35 → npcsh-1.0.37}/setup.cfg +0 -0
|
@@ -2458,18 +2458,10 @@ def setup_shell() -> Tuple[CommandHistory, Team, Optional[NPC]]:
|
|
|
2458
2458
|
|
|
2459
2459
|
|
|
2460
2460
|
|
|
2461
|
-
from npcpy.memory.memory_processor import
|
|
2461
|
+
from npcpy.memory.memory_processor import memory_approval_ui
|
|
2462
2462
|
from npcpy.ft.memory_trainer import MemoryTrainer
|
|
2463
2463
|
from npcpy.llm_funcs import get_facts
|
|
2464
2464
|
|
|
2465
|
-
_memory_queue = None
|
|
2466
|
-
|
|
2467
|
-
def get_memory_queue(command_history):
|
|
2468
|
-
global _memory_queue
|
|
2469
|
-
if _memory_queue is None:
|
|
2470
|
-
_memory_queue = MemoryApprovalQueue(command_history)
|
|
2471
|
-
_memory_queue.start_background_processing()
|
|
2472
|
-
return _memory_queue
|
|
2473
2465
|
|
|
2474
2466
|
def format_memory_context(memory_examples):
|
|
2475
2467
|
if not memory_examples:
|
|
@@ -7,7 +7,7 @@ from contextlib import AsyncExitStack
|
|
|
7
7
|
from typing import Optional, Callable, Dict, Any, Tuple, List
|
|
8
8
|
import shutil
|
|
9
9
|
import traceback
|
|
10
|
-
from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError
|
|
10
|
+
from litellm.exceptions import Timeout, ContextWindowExceededError, RateLimitError, BadRequestError
|
|
11
11
|
|
|
12
12
|
try:
|
|
13
13
|
from mcp import ClientSession, StdioServerParameters
|
|
@@ -248,102 +248,189 @@ def process_mcp_stream(stream_response, active_npc):
|
|
|
248
248
|
|
|
249
249
|
return collected_content, tool_calls
|
|
250
250
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
257
|
-
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
251
|
+
def clean_orphaned_tool_calls(messages):
|
|
252
|
+
cleaned_messages = []
|
|
253
|
+
i = 0
|
|
254
|
+
while i < len(messages):
|
|
255
|
+
msg = messages[i]
|
|
258
256
|
|
|
259
|
-
if
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
257
|
+
if msg.get("role") == "tool":
|
|
258
|
+
# Check if there's a preceding assistant message with tool_calls
|
|
259
|
+
found_preceding_assistant = False
|
|
260
|
+
for j in range(i-1, -1, -1):
|
|
261
|
+
prev_msg = messages[j]
|
|
262
|
+
if prev_msg.get("role") == "assistant" and prev_msg.get("tool_calls"):
|
|
263
|
+
# Check if this tool response matches any tool call
|
|
264
|
+
tool_call_ids = {tc["id"] for tc in prev_msg["tool_calls"]}
|
|
265
|
+
if msg.get("tool_call_id") in tool_call_ids:
|
|
266
|
+
found_preceding_assistant = True
|
|
267
|
+
break
|
|
268
|
+
elif prev_msg.get("role") in ["user", "assistant"]:
|
|
269
|
+
break
|
|
270
|
+
|
|
271
|
+
if found_preceding_assistant:
|
|
272
|
+
cleaned_messages.append(msg)
|
|
273
|
+
# Skip orphaned tool responses
|
|
274
|
+
|
|
275
|
+
elif (msg.get("role") == "assistant" and msg.get("tool_calls")):
|
|
276
|
+
tool_call_ids = {tc["id"] for tc in msg["tool_calls"]}
|
|
277
|
+
j = i + 1
|
|
278
|
+
found_responses = set()
|
|
279
|
+
|
|
280
|
+
while j < len(messages):
|
|
281
|
+
next_msg = messages[j]
|
|
282
|
+
if next_msg.get("role") == "tool":
|
|
283
|
+
if next_msg.get("tool_call_id") in tool_call_ids:
|
|
284
|
+
found_responses.add(next_msg.get("tool_call_id"))
|
|
285
|
+
elif next_msg.get("role") in ["user", "assistant"]:
|
|
286
|
+
break
|
|
287
|
+
j += 1
|
|
288
|
+
|
|
289
|
+
missing_responses = tool_call_ids - found_responses
|
|
290
|
+
if missing_responses:
|
|
291
|
+
assistant_msg = msg.copy()
|
|
292
|
+
assistant_msg["tool_calls"] = [
|
|
293
|
+
tc for tc in msg["tool_calls"]
|
|
294
|
+
if tc["id"] not in missing_responses
|
|
295
|
+
]
|
|
296
|
+
if not assistant_msg["tool_calls"]:
|
|
297
|
+
del assistant_msg["tool_calls"]
|
|
298
|
+
cleaned_messages.append(assistant_msg)
|
|
299
|
+
else:
|
|
300
|
+
cleaned_messages.append(msg)
|
|
266
301
|
else:
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
active_npc = state.npc if isinstance(state.npc, NPC) else NPC(name="default")
|
|
302
|
+
cleaned_messages.append(msg)
|
|
303
|
+
i += 1
|
|
304
|
+
|
|
305
|
+
return cleaned_messages
|
|
272
306
|
|
|
273
|
-
if not state.messages or not any("working directory" in msg.get("content", "").lower() for msg in state.messages):
|
|
274
|
-
context_message = {
|
|
275
|
-
"role": "system",
|
|
276
|
-
"content": f"You are currently operating in the directory: {state.current_path}. All file operations should be relative to this location unless explicitly specified otherwise."
|
|
277
|
-
}
|
|
278
|
-
state.messages.insert(0, context_message)
|
|
279
307
|
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
308
|
+
def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, context=None):
|
|
309
|
+
"""Unified LLM response with exception handling."""
|
|
310
|
+
messages = clean_orphaned_tool_calls(messages)
|
|
311
|
+
|
|
285
312
|
try:
|
|
286
|
-
|
|
287
|
-
prompt=
|
|
288
|
-
npc=
|
|
289
|
-
messages=
|
|
290
|
-
tools=
|
|
313
|
+
return get_llm_response(
|
|
314
|
+
prompt=prompt,
|
|
315
|
+
npc=npc,
|
|
316
|
+
messages=messages,
|
|
317
|
+
tools=tools,
|
|
291
318
|
auto_process_tool_calls=False,
|
|
292
|
-
stream=
|
|
293
|
-
team=
|
|
319
|
+
stream=stream,
|
|
320
|
+
team=team,
|
|
321
|
+
context=context
|
|
294
322
|
)
|
|
295
323
|
except Timeout:
|
|
296
|
-
|
|
297
|
-
prompt=
|
|
298
|
-
npc=
|
|
299
|
-
messages=
|
|
300
|
-
tools=
|
|
324
|
+
return get_llm_response(
|
|
325
|
+
prompt=prompt,
|
|
326
|
+
npc=npc,
|
|
327
|
+
messages=messages,
|
|
328
|
+
tools=tools,
|
|
301
329
|
auto_process_tool_calls=False,
|
|
302
|
-
stream=
|
|
303
|
-
team=
|
|
330
|
+
stream=stream,
|
|
331
|
+
team=team
|
|
304
332
|
)
|
|
305
333
|
except ContextWindowExceededError:
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
prompt=
|
|
311
|
-
npc=
|
|
312
|
-
messages=
|
|
313
|
-
tools=
|
|
334
|
+
print('compressing..... ')
|
|
335
|
+
compressed_state = npc.compress_planning_state(messages)
|
|
336
|
+
compressed_messages = [{"role": "system", "content": compressed_state}]
|
|
337
|
+
return get_llm_response(
|
|
338
|
+
prompt=prompt,
|
|
339
|
+
npc=npc,
|
|
340
|
+
messages=compressed_messages,
|
|
341
|
+
tools=tools,
|
|
314
342
|
auto_process_tool_calls=False,
|
|
315
|
-
stream=
|
|
316
|
-
team=
|
|
343
|
+
stream=stream,
|
|
344
|
+
team=team
|
|
317
345
|
)
|
|
318
346
|
except RateLimitError:
|
|
319
347
|
import time
|
|
320
348
|
print('rate limit hit... waiting 60 seconds')
|
|
321
349
|
time.sleep(60)
|
|
322
350
|
print('compressing..... ')
|
|
323
|
-
compressed_state =
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
tools=mcp_tools_for_llm,
|
|
351
|
+
compressed_state = npc.compress_planning_state(messages)
|
|
352
|
+
compressed_messages = [{"role": "system", "content": compressed_state}]
|
|
353
|
+
return get_llm_response(
|
|
354
|
+
prompt=prompt,
|
|
355
|
+
npc=npc,
|
|
356
|
+
messages=compressed_messages,
|
|
357
|
+
tools=tools,
|
|
331
358
|
auto_process_tool_calls=False,
|
|
332
|
-
stream=
|
|
333
|
-
team=
|
|
359
|
+
stream=stream,
|
|
360
|
+
team=team
|
|
334
361
|
)
|
|
362
|
+
except BadRequestError as e:
|
|
363
|
+
if "tool_call_id" in str(e).lower():
|
|
364
|
+
cleaned_messages = clean_orphaned_tool_calls(messages)
|
|
365
|
+
return get_llm_response(
|
|
366
|
+
prompt=prompt,
|
|
367
|
+
npc=npc,
|
|
368
|
+
messages=cleaned_messages,
|
|
369
|
+
tools=tools,
|
|
370
|
+
auto_process_tool_calls=False,
|
|
371
|
+
stream=stream,
|
|
372
|
+
team=team,
|
|
373
|
+
context=context
|
|
374
|
+
)
|
|
375
|
+
else:
|
|
376
|
+
raise e
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def execute_command_corca(command: str, state: ShellState, command_history, selected_mcp_tools_names: Optional[List[str]] = None) -> Tuple[ShellState, Any]:
|
|
381
|
+
mcp_tools_for_llm = []
|
|
382
|
+
|
|
383
|
+
if hasattr(state, 'mcp_client') and state.mcp_client and state.mcp_client.session:
|
|
384
|
+
all_available_mcp_tools = state.mcp_client.available_tools_llm
|
|
385
|
+
|
|
386
|
+
if selected_mcp_tools_names and len(selected_mcp_tools_names) > 0:
|
|
387
|
+
mcp_tools_for_llm = [
|
|
388
|
+
tool_def for tool_def in all_available_mcp_tools
|
|
389
|
+
if tool_def['function']['name'] in selected_mcp_tools_names
|
|
390
|
+
]
|
|
391
|
+
if not mcp_tools_for_llm:
|
|
392
|
+
cprint("Warning: No selected MCP tools found or matched. Corca will proceed without tools.", "yellow", file=sys.stderr)
|
|
393
|
+
else:
|
|
394
|
+
mcp_tools_for_llm = all_available_mcp_tools
|
|
395
|
+
else:
|
|
396
|
+
cprint("Warning: Corca agent has no tools. No MCP server connected.", "yellow", file=sys.stderr)
|
|
397
|
+
|
|
398
|
+
if len(state.messages) > 20:
|
|
399
|
+
compressed_state = state.npc.compress_planning_state(messages)
|
|
400
|
+
state.messages = [{"role": "system", "content": state.npc.get_system_message() + f' Your current task: {compressed_state}'}]
|
|
401
|
+
print("Compressed messages during tool execution.")
|
|
402
|
+
|
|
403
|
+
response_dict = get_llm_response_with_handling(
|
|
404
|
+
prompt=command,
|
|
405
|
+
npc=state.npc,
|
|
406
|
+
messages=state.messages,
|
|
407
|
+
tools=mcp_tools_for_llm,
|
|
408
|
+
stream=state.stream_output,
|
|
409
|
+
team=state.team,
|
|
410
|
+
context=f' The users working directory is {state.current_path}'
|
|
411
|
+
)
|
|
335
412
|
|
|
336
413
|
stream_response = response_dict.get('response')
|
|
337
414
|
messages = response_dict.get('messages', state.messages)
|
|
415
|
+
tool_calls = response_dict.get('tool_calls', [])
|
|
416
|
+
|
|
417
|
+
collected_content, stream_tool_calls = process_mcp_stream(stream_response, state.npc)
|
|
338
418
|
|
|
339
|
-
|
|
419
|
+
if stream_tool_calls:
|
|
420
|
+
tool_calls = stream_tool_calls
|
|
340
421
|
|
|
341
422
|
state.messages = messages
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
423
|
+
|
|
424
|
+
if tool_calls and hasattr(state, 'mcp_client') and state.mcp_client:
|
|
425
|
+
final_content, state.messages = execute_mcp_tool_calls(
|
|
426
|
+
tool_calls,
|
|
427
|
+
state.mcp_client,
|
|
428
|
+
state.messages,
|
|
429
|
+
state.npc,
|
|
430
|
+
state.stream_output
|
|
431
|
+
)
|
|
432
|
+
if final_content:
|
|
433
|
+
collected_content = final_content
|
|
347
434
|
|
|
348
435
|
return state, {
|
|
349
436
|
"output": collected_content,
|
|
@@ -352,6 +439,129 @@ def execute_command_corca(command: str, state: ShellState, command_history, sele
|
|
|
352
439
|
}
|
|
353
440
|
|
|
354
441
|
|
|
442
|
+
def execute_mcp_tool_calls(tool_calls, mcp_client, messages, npc, stream_output):
|
|
443
|
+
if not tool_calls or not mcp_client:
|
|
444
|
+
return None, messages
|
|
445
|
+
|
|
446
|
+
messages = clean_orphaned_tool_calls(messages)
|
|
447
|
+
|
|
448
|
+
print(colored("\n🔧 Executing MCP tools...", "cyan"))
|
|
449
|
+
|
|
450
|
+
while tool_calls:
|
|
451
|
+
tool_responses = []
|
|
452
|
+
|
|
453
|
+
if len(messages) > 20:
|
|
454
|
+
compressed_state = npc.compress_planning_state(messages)
|
|
455
|
+
messages = [{"role": "system", "content": npc.get_system_prompt() + f' Your current task: {compressed_state}'}]
|
|
456
|
+
print("Compressed messages during tool execution.")
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
for tool_call in tool_calls:
|
|
460
|
+
tool_name = tool_call['function']['name']
|
|
461
|
+
tool_args = tool_call['function']['arguments']
|
|
462
|
+
tool_call_id = tool_call['id']
|
|
463
|
+
|
|
464
|
+
if isinstance(tool_args, str):
|
|
465
|
+
try:
|
|
466
|
+
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
467
|
+
except json.JSONDecodeError:
|
|
468
|
+
tool_args = {}
|
|
469
|
+
|
|
470
|
+
try:
|
|
471
|
+
print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
|
|
472
|
+
|
|
473
|
+
loop = asyncio.get_event_loop()
|
|
474
|
+
if loop.is_closed():
|
|
475
|
+
loop = asyncio.new_event_loop()
|
|
476
|
+
asyncio.set_event_loop(loop)
|
|
477
|
+
|
|
478
|
+
mcp_result = loop.run_until_complete(
|
|
479
|
+
mcp_client.session.call_tool(tool_name, tool_args)
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
tool_content = ""
|
|
483
|
+
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
484
|
+
for content_item in mcp_result.content:
|
|
485
|
+
if hasattr(content_item, 'text'):
|
|
486
|
+
tool_content += content_item.text
|
|
487
|
+
elif hasattr(content_item, 'data'):
|
|
488
|
+
tool_content += str(content_item.data)
|
|
489
|
+
else:
|
|
490
|
+
tool_content += str(content_item)
|
|
491
|
+
else:
|
|
492
|
+
tool_content = str(mcp_result)
|
|
493
|
+
|
|
494
|
+
tool_responses.append({
|
|
495
|
+
"role": "tool",
|
|
496
|
+
"tool_call_id": tool_call_id,
|
|
497
|
+
"name": tool_name,
|
|
498
|
+
"content": tool_content
|
|
499
|
+
})
|
|
500
|
+
|
|
501
|
+
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
502
|
+
|
|
503
|
+
except KeyboardInterrupt:
|
|
504
|
+
print(colored(f"\n ⚠️ Tool execution interrupted", "yellow"))
|
|
505
|
+
return None, messages
|
|
506
|
+
except Exception as e:
|
|
507
|
+
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
508
|
+
tool_responses.append({
|
|
509
|
+
"role": "tool",
|
|
510
|
+
"tool_call_id": tool_call_id,
|
|
511
|
+
"name": tool_name,
|
|
512
|
+
"content": f"Error: {str(e)}"
|
|
513
|
+
})
|
|
514
|
+
|
|
515
|
+
current_messages = messages + tool_responses
|
|
516
|
+
|
|
517
|
+
try:
|
|
518
|
+
follow_up_response = get_llm_response_with_handling(
|
|
519
|
+
prompt="",
|
|
520
|
+
npc=npc,
|
|
521
|
+
messages=current_messages,
|
|
522
|
+
tools=mcp_client.available_tools_llm,
|
|
523
|
+
stream=stream_output,
|
|
524
|
+
team=None
|
|
525
|
+
)
|
|
526
|
+
except KeyboardInterrupt:
|
|
527
|
+
print(colored(f"\n ⚠️ Follow-up response interrupted", "yellow"))
|
|
528
|
+
return None, messages
|
|
529
|
+
|
|
530
|
+
follow_up_messages = follow_up_response.get('messages', current_messages)
|
|
531
|
+
follow_up_content = follow_up_response.get('response', '')
|
|
532
|
+
follow_up_tool_calls = []
|
|
533
|
+
|
|
534
|
+
if stream_output:
|
|
535
|
+
if hasattr(follow_up_content, '__iter__'):
|
|
536
|
+
collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, npc)
|
|
537
|
+
else:
|
|
538
|
+
collected_content = str(follow_up_content)
|
|
539
|
+
follow_up_content = collected_content
|
|
540
|
+
else:
|
|
541
|
+
if follow_up_messages:
|
|
542
|
+
last_message = follow_up_messages[-1]
|
|
543
|
+
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
|
544
|
+
follow_up_tool_calls = last_message["tool_calls"]
|
|
545
|
+
|
|
546
|
+
messages = follow_up_messages
|
|
547
|
+
|
|
548
|
+
if not follow_up_tool_calls:
|
|
549
|
+
if not stream_output:
|
|
550
|
+
print('\n')
|
|
551
|
+
render_markdown(follow_up_content)
|
|
552
|
+
return follow_up_content, messages
|
|
553
|
+
else:
|
|
554
|
+
if follow_up_content or follow_up_tool_calls:
|
|
555
|
+
assistant_message = {"role": "assistant", "content": follow_up_content}
|
|
556
|
+
if follow_up_tool_calls:
|
|
557
|
+
assistant_message["tool_calls"] = follow_up_tool_calls
|
|
558
|
+
messages.append(assistant_message)
|
|
559
|
+
|
|
560
|
+
tool_calls = follow_up_tool_calls
|
|
561
|
+
print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
|
|
562
|
+
|
|
563
|
+
return None, messages
|
|
564
|
+
|
|
355
565
|
def _resolve_and_copy_mcp_server_path(
|
|
356
566
|
explicit_path: Optional[str],
|
|
357
567
|
current_path: Optional[str],
|
|
@@ -515,13 +725,17 @@ def create_corca_state_and_mcp_client(conversation_id, command_history, npc=None
|
|
|
515
725
|
|
|
516
726
|
return state
|
|
517
727
|
|
|
518
|
-
|
|
728
|
+
|
|
519
729
|
def process_corca_result(
|
|
520
730
|
user_input: str,
|
|
521
731
|
result_state: ShellState,
|
|
522
732
|
output: Any,
|
|
523
733
|
command_history: CommandHistory,
|
|
524
734
|
):
|
|
735
|
+
from npcpy.llm_funcs import get_facts
|
|
736
|
+
from npcpy.memory.memory_processor import memory_approval_ui
|
|
737
|
+
from npcsh._state import format_memory_context
|
|
738
|
+
|
|
525
739
|
team_name = result_state.team.name if result_state.team else "__none__"
|
|
526
740
|
npc_name = result_state.npc.name if isinstance(result_state.npc, NPC) else "__none__"
|
|
527
741
|
|
|
@@ -529,7 +743,8 @@ def process_corca_result(
|
|
|
529
743
|
name="default",
|
|
530
744
|
model=result_state.chat_model,
|
|
531
745
|
provider=result_state.chat_provider,
|
|
532
|
-
db_conn=command_history.engine
|
|
746
|
+
db_conn=command_history.engine
|
|
747
|
+
)
|
|
533
748
|
|
|
534
749
|
save_conversation_message(
|
|
535
750
|
command_history,
|
|
@@ -550,167 +765,20 @@ def process_corca_result(
|
|
|
550
765
|
final_output_str = None
|
|
551
766
|
|
|
552
767
|
if tool_calls and hasattr(result_state, 'mcp_client') and result_state.mcp_client:
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
try:
|
|
562
|
-
if isinstance(tool_args, str):
|
|
563
|
-
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
564
|
-
|
|
565
|
-
except json.JSONDecodeError:
|
|
566
|
-
tool_args = {}
|
|
567
|
-
|
|
568
|
-
try:
|
|
569
|
-
|
|
570
|
-
loop = asyncio.get_event_loop()
|
|
571
|
-
if loop.is_closed():
|
|
572
|
-
loop = asyncio.new_event_loop()
|
|
573
|
-
asyncio.set_event_loop(loop)
|
|
574
|
-
|
|
575
|
-
mcp_result = loop.run_until_complete(
|
|
576
|
-
result_state.mcp_client.session.call_tool(tool_name, tool_args)
|
|
577
|
-
)
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
tool_content = ""
|
|
581
|
-
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
582
|
-
|
|
583
|
-
for i, content_item in enumerate(mcp_result.content):
|
|
584
|
-
|
|
585
|
-
if hasattr(content_item, 'text'):
|
|
586
|
-
tool_content += content_item.text
|
|
587
|
-
else:
|
|
588
|
-
tool_content += str(content_item)
|
|
589
|
-
else:
|
|
590
|
-
tool_content = str(mcp_result)
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
tool_responses.append({
|
|
595
|
-
"role": "tool",
|
|
596
|
-
"tool_call_id": tool_call_id,
|
|
597
|
-
"name": tool_name,
|
|
598
|
-
"content": tool_content
|
|
599
|
-
})
|
|
600
|
-
|
|
601
|
-
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
602
|
-
|
|
603
|
-
except Exception as e:
|
|
604
|
-
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
605
|
-
tool_responses.append({
|
|
606
|
-
"role": "tool",
|
|
607
|
-
"tool_call_id": tool_call_id,
|
|
608
|
-
"name": tool_name,
|
|
609
|
-
"content": f"Error: {str(e)}"
|
|
610
|
-
})
|
|
611
|
-
|
|
612
|
-
result_state.messages.extend(tool_responses)
|
|
613
|
-
|
|
614
|
-
while True:
|
|
615
|
-
follow_up_response = get_llm_response(
|
|
616
|
-
prompt="",
|
|
617
|
-
model=active_npc.model,
|
|
618
|
-
provider=active_npc.provider,
|
|
619
|
-
npc=active_npc,
|
|
620
|
-
messages=result_state.messages,
|
|
621
|
-
tools=result_state.mcp_client.available_tools_llm,
|
|
622
|
-
auto_process_tool_calls=False,
|
|
623
|
-
stream=result_state.stream_output
|
|
624
|
-
)
|
|
625
|
-
|
|
626
|
-
follow_up_messages = follow_up_response.get('messages', [])
|
|
627
|
-
follow_up_content = follow_up_response.get('response', '')
|
|
628
|
-
follow_up_tool_calls = []
|
|
629
|
-
|
|
630
|
-
if result_state.stream_output:
|
|
631
|
-
if hasattr(follow_up_content, '__iter__'):
|
|
632
|
-
collected_content, follow_up_tool_calls = process_mcp_stream(follow_up_content, active_npc)
|
|
633
|
-
else:
|
|
634
|
-
collected_content = str(follow_up_content)
|
|
635
|
-
follow_up_content = collected_content
|
|
636
|
-
else:
|
|
637
|
-
if follow_up_messages:
|
|
638
|
-
last_message = follow_up_messages[-1]
|
|
639
|
-
if last_message.get("role") == "assistant" and "tool_calls" in last_message:
|
|
640
|
-
follow_up_tool_calls = last_message["tool_calls"]
|
|
641
|
-
|
|
642
|
-
result_state.messages = follow_up_messages
|
|
643
|
-
if follow_up_content or follow_up_tool_calls:
|
|
644
|
-
assistant_message = {"role": "assistant", "content": follow_up_content}
|
|
645
|
-
if follow_up_tool_calls:
|
|
646
|
-
assistant_message["tool_calls"] = follow_up_tool_calls
|
|
647
|
-
result_state.messages.append(assistant_message)
|
|
648
|
-
|
|
649
|
-
if not follow_up_tool_calls:
|
|
650
|
-
final_output_str = follow_up_content
|
|
651
|
-
if not result_state.stream_output:
|
|
652
|
-
print('\n')
|
|
653
|
-
render_markdown(final_output_str)
|
|
654
|
-
break
|
|
655
|
-
|
|
656
|
-
print(colored("\n🔧 Executing follow-up MCP tools...", "cyan"))
|
|
657
|
-
for tool_call in follow_up_tool_calls:
|
|
658
|
-
tool_name = tool_call['function']['name']
|
|
659
|
-
tool_args = tool_call['function']['arguments']
|
|
660
|
-
tool_call_id = tool_call['id']
|
|
661
|
-
|
|
662
|
-
try:
|
|
663
|
-
tool_args = json.loads(tool_args) if tool_args.strip() else {}
|
|
664
|
-
except json.JSONDecodeError:
|
|
665
|
-
tool_args = {}
|
|
666
|
-
|
|
667
|
-
try:
|
|
668
|
-
print(f" Calling MCP tool: {tool_name} with args: {tool_args}")
|
|
669
|
-
|
|
670
|
-
loop = asyncio.get_event_loop()
|
|
671
|
-
if loop.is_closed():
|
|
672
|
-
loop = asyncio.new_event_loop()
|
|
673
|
-
asyncio.set_event_loop(loop)
|
|
674
|
-
|
|
675
|
-
mcp_result = loop.run_until_complete(
|
|
676
|
-
result_state.mcp_client.session.call_tool(tool_name, tool_args)
|
|
677
|
-
)
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
tool_content = ""
|
|
681
|
-
if hasattr(mcp_result, 'content') and mcp_result.content:
|
|
682
|
-
for i, content_item in enumerate(mcp_result.content):
|
|
683
|
-
|
|
684
|
-
if hasattr(content_item, 'text') and content_item.text:
|
|
685
|
-
tool_content += content_item.text
|
|
686
|
-
elif hasattr(content_item, 'data'):
|
|
687
|
-
tool_content += str(content_item.data)
|
|
688
|
-
else:
|
|
689
|
-
tool_content += str(content_item)
|
|
690
|
-
result_state.messages.append({
|
|
691
|
-
"role": "tool",
|
|
692
|
-
"tool_call_id": tool_call_id,
|
|
693
|
-
"name": tool_name,
|
|
694
|
-
"content": tool_content
|
|
695
|
-
})
|
|
696
|
-
|
|
697
|
-
print(colored(f" ✓ {tool_name} completed", "green"))
|
|
698
|
-
|
|
699
|
-
except Exception as e:
|
|
700
|
-
print(colored(f" ✗ {tool_name} failed: {e}", "red"))
|
|
701
|
-
result_state.messages.append({
|
|
702
|
-
"role": "tool",
|
|
703
|
-
"tool_call_id": tool_call_id,
|
|
704
|
-
"name": tool_name,
|
|
705
|
-
"content": f"Error: {str(e)}"
|
|
706
|
-
})
|
|
768
|
+
final_output_str, result_state.messages = execute_mcp_tool_calls(
|
|
769
|
+
tool_calls,
|
|
770
|
+
result_state.mcp_client,
|
|
771
|
+
result_state.messages,
|
|
772
|
+
result_state.npc,
|
|
773
|
+
result_state.stream_output
|
|
774
|
+
)
|
|
707
775
|
else:
|
|
708
776
|
print('\n')
|
|
709
777
|
if result_state.stream_output:
|
|
710
778
|
final_output_str = print_and_process_stream_with_markdown(
|
|
711
779
|
output_content,
|
|
712
|
-
|
|
713
|
-
|
|
780
|
+
result_state.npc.model,
|
|
781
|
+
result_state.npc.provider,
|
|
714
782
|
show=True
|
|
715
783
|
)
|
|
716
784
|
else:
|
|
@@ -736,25 +804,88 @@ def process_corca_result(
|
|
|
736
804
|
conversation_turn_text = f"User: {user_input}\nAssistant: {final_output_str}"
|
|
737
805
|
engine = command_history.engine
|
|
738
806
|
|
|
739
|
-
|
|
807
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
808
|
+
npc=npc_name,
|
|
809
|
+
team=team_name,
|
|
810
|
+
directory_path=result_state.current_path
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
memory_context = format_memory_context(memory_examples)
|
|
814
|
+
|
|
815
|
+
approved_facts = []
|
|
816
|
+
try:
|
|
817
|
+
facts = get_facts(
|
|
818
|
+
conversation_turn_text,
|
|
819
|
+
model=active_npc.model,
|
|
820
|
+
provider=active_npc.provider,
|
|
821
|
+
npc=active_npc,
|
|
822
|
+
context=memory_context
|
|
823
|
+
)
|
|
824
|
+
|
|
825
|
+
if facts:
|
|
826
|
+
memories_for_approval = []
|
|
827
|
+
for i, fact in enumerate(facts):
|
|
828
|
+
memories_for_approval.append({
|
|
829
|
+
"memory_id": f"temp_{i}",
|
|
830
|
+
"content": fact['statement'],
|
|
831
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
832
|
+
"npc": npc_name,
|
|
833
|
+
"fact_data": fact
|
|
834
|
+
})
|
|
835
|
+
|
|
836
|
+
approvals = memory_approval_ui(memories_for_approval)
|
|
837
|
+
|
|
838
|
+
for approval in approvals:
|
|
839
|
+
fact_data = next(m['fact_data'] for m in memories_for_approval
|
|
840
|
+
if m['memory_id'] == approval['memory_id'])
|
|
841
|
+
|
|
842
|
+
command_history.add_memory_to_database(
|
|
843
|
+
message_id=f"{result_state.conversation_id}_{len(result_state.messages)}",
|
|
844
|
+
conversation_id=result_state.conversation_id,
|
|
845
|
+
npc=npc_name,
|
|
846
|
+
team=team_name,
|
|
847
|
+
directory_path=result_state.current_path,
|
|
848
|
+
initial_memory=fact_data['statement'],
|
|
849
|
+
status=approval['decision'],
|
|
850
|
+
model=active_npc.model,
|
|
851
|
+
provider=active_npc.provider,
|
|
852
|
+
final_memory=approval.get('final_memory')
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
if approval['decision'] in ['human-approved', 'human-edited']:
|
|
856
|
+
approved_fact = {
|
|
857
|
+
'statement': approval.get('final_memory') or fact_data['statement'],
|
|
858
|
+
'source_text': fact_data.get('source_text', ''),
|
|
859
|
+
'type': fact_data.get('type', 'explicit'),
|
|
860
|
+
'generation': 0
|
|
861
|
+
}
|
|
862
|
+
approved_facts.append(approved_fact)
|
|
863
|
+
|
|
864
|
+
except Exception as e:
|
|
865
|
+
print(colored(f"Memory generation error: {e}", "yellow"))
|
|
866
|
+
|
|
867
|
+
if result_state.build_kg and approved_facts:
|
|
740
868
|
try:
|
|
741
869
|
if not should_skip_kg_processing(user_input, final_output_str):
|
|
742
870
|
npc_kg = load_kg_from_db(engine, team_name, npc_name, result_state.current_path)
|
|
743
871
|
evolved_npc_kg, _ = kg_evolve_incremental(
|
|
744
872
|
existing_kg=npc_kg,
|
|
745
|
-
|
|
873
|
+
new_facts=approved_facts,
|
|
746
874
|
model=active_npc.model,
|
|
747
875
|
provider=active_npc.provider,
|
|
876
|
+
npc=active_npc,
|
|
748
877
|
get_concepts=True,
|
|
749
|
-
link_concepts_facts
|
|
750
|
-
link_concepts_concepts
|
|
751
|
-
link_facts_facts
|
|
878
|
+
link_concepts_facts=False,
|
|
879
|
+
link_concepts_concepts=False,
|
|
880
|
+
link_facts_facts=False,
|
|
881
|
+
)
|
|
882
|
+
save_kg_to_db(
|
|
883
|
+
engine,
|
|
884
|
+
evolved_npc_kg,
|
|
885
|
+
team_name,
|
|
886
|
+
npc_name,
|
|
887
|
+
result_state.current_path
|
|
752
888
|
)
|
|
753
|
-
save_kg_to_db(engine,
|
|
754
|
-
evolved_npc_kg,
|
|
755
|
-
team_name,
|
|
756
|
-
npc_name,
|
|
757
|
-
result_state.current_path)
|
|
758
889
|
except Exception as e:
|
|
759
890
|
print(colored(f"Error during real-time KG evolution: {e}", "red"))
|
|
760
891
|
|
|
@@ -819,8 +950,9 @@ def process_corca_result(
|
|
|
819
950
|
except Exception as e:
|
|
820
951
|
import traceback
|
|
821
952
|
print(colored(f"Could not generate team suggestions: {e}", "yellow"))
|
|
822
|
-
traceback.print_exc()
|
|
823
|
-
|
|
953
|
+
traceback.print_exc()
|
|
954
|
+
|
|
955
|
+
|
|
824
956
|
def _read_npcsh_global_env() -> Dict[str, str]:
|
|
825
957
|
global_env_file = Path(".npcsh_global")
|
|
826
958
|
env_vars = {}
|
|
@@ -14,6 +14,7 @@ from typing import Optional, Dict, Any, List, Union, Callable
|
|
|
14
14
|
from mcp.server.fastmcp import FastMCP
|
|
15
15
|
import importlib
|
|
16
16
|
|
|
17
|
+
from sqlalchemy import text
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
import os
|
|
@@ -46,14 +47,129 @@ mcp = FastMCP("npcsh_mcp")
|
|
|
46
47
|
DEFAULT_WORKSPACE = os.path.join(os.getcwd(), "workspace")
|
|
47
48
|
os.makedirs(DEFAULT_WORKSPACE, exist_ok=True)
|
|
48
49
|
|
|
50
|
+
@mcp.tool()
|
|
51
|
+
async def add_memory(
|
|
52
|
+
npc_name: str,
|
|
53
|
+
team_name: str,
|
|
54
|
+
content: str,
|
|
55
|
+
memory_type: str = "observation",
|
|
56
|
+
directory_path: str = None
|
|
57
|
+
) -> str:
|
|
58
|
+
"""
|
|
59
|
+
Add a memory entry to the database.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
npc_name: Name of the NPC this memory belongs to
|
|
63
|
+
team_name: Name of the team the NPC belongs to
|
|
64
|
+
content: The memory content to store
|
|
65
|
+
memory_type: Type of memory (observation, preference, achievement, etc.)
|
|
66
|
+
directory_path: Directory path context (defaults to current working directory)
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Success message with memory ID or error message
|
|
70
|
+
"""
|
|
71
|
+
if directory_path is None:
|
|
72
|
+
directory_path = os.getcwd()
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
from npcpy.memory.command_history import generate_message_id
|
|
76
|
+
message_id = generate_message_id()
|
|
77
|
+
|
|
78
|
+
memory_id = command_history.add_memory_to_database(
|
|
79
|
+
message_id=message_id,
|
|
80
|
+
conversation_id='mcp_direct',
|
|
81
|
+
npc=npc_name,
|
|
82
|
+
team=team_name,
|
|
83
|
+
directory_path=directory_path,
|
|
84
|
+
initial_memory=content,
|
|
85
|
+
status='active',
|
|
86
|
+
model=None,
|
|
87
|
+
provider=None
|
|
88
|
+
)
|
|
89
|
+
return f"Memory created successfully with ID: {memory_id}"
|
|
90
|
+
except Exception as e:
|
|
91
|
+
return f"Error creating memory: {str(e)}"
|
|
49
92
|
|
|
50
93
|
@mcp.tool()
|
|
51
|
-
async def
|
|
94
|
+
async def search_memory(
|
|
95
|
+
query: str,
|
|
96
|
+
npc_name: str = None,
|
|
97
|
+
team_name: str = None,
|
|
98
|
+
directory_path: str = None,
|
|
99
|
+
status_filter: str = None,
|
|
100
|
+
limit: int = 10
|
|
101
|
+
) -> str:
|
|
102
|
+
"""
|
|
103
|
+
Search memories in the database.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
query: Search query text
|
|
107
|
+
npc_name: Filter by specific NPC (optional)
|
|
108
|
+
team_name: Filter by specific team (optional)
|
|
109
|
+
directory_path: Filter by directory path (optional)
|
|
110
|
+
status_filter: Filter by memory status (active, archived, etc.)
|
|
111
|
+
limit: Maximum number of results to return
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
JSON string of matching memories or error message
|
|
115
|
+
"""
|
|
116
|
+
if directory_path is None:
|
|
117
|
+
directory_path = os.getcwd()
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
results = command_history.search_memory(
|
|
121
|
+
query=query,
|
|
122
|
+
npc=npc_name,
|
|
123
|
+
team=team_name,
|
|
124
|
+
directory_path=directory_path,
|
|
125
|
+
status_filter=status_filter,
|
|
126
|
+
limit=limit
|
|
127
|
+
)
|
|
128
|
+
return json.dumps(results, indent=2)
|
|
129
|
+
except Exception as e:
|
|
130
|
+
return f"Error searching memories: {str(e)}"
|
|
131
|
+
|
|
132
|
+
@mcp.tool()
|
|
133
|
+
async def query_npcsh_database(sql_query: str) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Execute a SQL query against the npcsh_history.db database.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
sql_query: SQL query to execute (SELECT statements only for safety)
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
JSON string of query results or error message
|
|
142
|
+
"""
|
|
143
|
+
# Safety check - only allow SELECT queries
|
|
144
|
+
if not sql_query.strip().upper().startswith('SELECT'):
|
|
145
|
+
return "Error: Only SELECT queries are allowed for safety"
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
with command_history.engine.connect() as conn:
|
|
149
|
+
result = conn.execute(text(sql_query))
|
|
150
|
+
rows = result.fetchall()
|
|
151
|
+
|
|
152
|
+
if not rows:
|
|
153
|
+
return "Query executed successfully but returned no results"
|
|
154
|
+
|
|
155
|
+
# Convert to list of dictionaries
|
|
156
|
+
columns = result.keys()
|
|
157
|
+
results = []
|
|
158
|
+
for row in rows:
|
|
159
|
+
row_dict = dict(zip(columns, row))
|
|
160
|
+
results.append(row_dict)
|
|
161
|
+
|
|
162
|
+
return json.dumps(results, indent=2, default=str)
|
|
163
|
+
except Exception as e:
|
|
164
|
+
return f"Database query error: {str(e)}"
|
|
165
|
+
@mcp.tool()
|
|
166
|
+
async def run_server_command(command: str, wd: str) -> str:
|
|
52
167
|
"""
|
|
53
168
|
Run a terminal command in the workspace.
|
|
54
169
|
|
|
55
170
|
Args:
|
|
56
171
|
command: The shell command to run
|
|
172
|
+
wd: The working directory to run the command in
|
|
57
173
|
|
|
58
174
|
Returns:
|
|
59
175
|
The command output or an error message.
|
|
@@ -61,7 +177,7 @@ async def run_server_command(command: str) -> str:
|
|
|
61
177
|
try:
|
|
62
178
|
result = subprocess.run(
|
|
63
179
|
command,
|
|
64
|
-
cwd=
|
|
180
|
+
cwd=wd,
|
|
65
181
|
shell=True,
|
|
66
182
|
capture_output=True,
|
|
67
183
|
text=True,
|
|
@@ -147,11 +263,7 @@ print("Loading tools from npcpy modules...")
|
|
|
147
263
|
|
|
148
264
|
|
|
149
265
|
def register_selected_npcpy_tools():
|
|
150
|
-
tools = [
|
|
151
|
-
abstract,
|
|
152
|
-
extract_facts,
|
|
153
|
-
zoom_in,
|
|
154
|
-
execute_llm_command,
|
|
266
|
+
tools = [
|
|
155
267
|
gen_image,
|
|
156
268
|
load_file_contents,
|
|
157
269
|
capture_screenshot,
|
|
@@ -78,7 +78,7 @@ extra_files = package_files("npcsh/npc_team/")
|
|
|
78
78
|
|
|
79
79
|
setup(
|
|
80
80
|
name="npcsh",
|
|
81
|
-
version="1.0.
|
|
81
|
+
version="1.0.37",
|
|
82
82
|
packages=find_packages(exclude=["tests*"]),
|
|
83
83
|
install_requires=base_requirements, # Only install base requirements by default
|
|
84
84
|
extras_require={
|
|
@@ -96,7 +96,7 @@ setup(
|
|
|
96
96
|
"pti=npcsh.pti:main",
|
|
97
97
|
"guac=npcsh.guac:main",
|
|
98
98
|
"wander=npcsh.wander:main",
|
|
99
|
-
"spool=npcsh.spool:main",
|
|
99
|
+
"spool=npcsh.spool:main",
|
|
100
100
|
],
|
|
101
101
|
},
|
|
102
102
|
author="Christopher Agostino",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|