npcpy 1.3.2__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/ft/diff.py CHANGED
@@ -3,7 +3,7 @@ try:
3
3
  import torch.nn as nn
4
4
  import torch.nn.functional as F
5
5
  from torch.utils.data import DataLoader, Dataset as TorchDataset
6
- from transformers import CLIPTextModel, CLIPTokenizer
6
+
7
7
  TORCH_AVAILABLE = True
8
8
  except ImportError:
9
9
  torch = None
npcpy/gen/response.py CHANGED
@@ -297,6 +297,16 @@ def get_ollama_response(
297
297
  last_user_idx = len(messages) - 1
298
298
  messages[last_user_idx]["images"] = image_paths
299
299
 
300
+ # Ollama's pydantic model requires tool_calls arguments to be dicts, not strings
301
+ for msg in messages:
302
+ if msg.get("tool_calls"):
303
+ for tc in msg["tool_calls"]:
304
+ if tc.get("function") and isinstance(tc["function"].get("arguments"), str):
305
+ try:
306
+ tc["function"]["arguments"] = json.loads(tc["function"]["arguments"])
307
+ except (json.JSONDecodeError, TypeError):
308
+ tc["function"]["arguments"] = {}
309
+
300
310
  api_params = {
301
311
  "model": model,
302
312
  "messages": messages,
@@ -387,7 +397,7 @@ def get_ollama_response(
387
397
  return result
388
398
 
389
399
 
390
-
400
+ print('Debug', api_params)
391
401
  res = ollama.chat(**api_params, options=options)
392
402
  result["raw_response"] = res
393
403
 
npcpy/llm_funcs.py CHANGED
@@ -14,6 +14,8 @@ from npcpy.npc_sysenv import (
14
14
  request_user_input,
15
15
  get_system_message
16
16
  )
17
+
18
+
17
19
  from npcpy.gen.response import get_litellm_response
18
20
  from npcpy.gen.image_gen import generate_image
19
21
  from npcpy.gen.video_gen import generate_video_diffusers, generate_video_veo3
@@ -216,7 +218,7 @@ def get_llm_response(
216
218
 
217
219
  def _context_suffix(ctx):
218
220
  if ctx is not None:
219
- return f'User Provided Context: {ctx}'
221
+ return f'\n\n\nUser Provided Context: {ctx}'
220
222
  return ''
221
223
 
222
224
  def _build_messages(base_messages, sys_msg, prompt_text, ctx_suffix):
@@ -564,6 +566,7 @@ def check_llm_command(
564
566
  extra_globals=None,
565
567
  max_iterations: int = 5,
566
568
  jinxs: Dict = None,
569
+ tool_capable: bool = None, # If None, will be auto-detected
567
570
  ):
568
571
  """
569
572
  Simple agent loop: try tool calling first, fall back to ReAct if unsupported.
@@ -571,27 +574,21 @@ def check_llm_command(
571
574
  if messages is None:
572
575
  messages = []
573
576
 
574
- # Log incoming messages
575
- import logging
576
- logger = logging.getLogger("npcpy.llm_funcs")
577
- logger.debug(f"[check_llm_command] Received {len(messages)} messages")
578
- for i, msg in enumerate(messages[-5:]): # Log last 5 messages
579
- role = msg.get('role', 'unknown')
580
- content = msg.get('content', '')
581
- content_preview = content[:100] if isinstance(content, str) else str(type(content))
582
- logger.debug(f" [{i}] role={role}, content_preview={content_preview}...")
583
-
584
577
  total_usage = {"input_tokens": 0, "output_tokens": 0}
578
+
585
579
  # Use provided jinxs or get from npc/team
586
580
  if jinxs is None:
587
581
  jinxs = _get_jinxs(npc, team)
588
- tools = _jinxs_to_tools(jinxs) if jinxs else None
582
+
583
+ # Only prepare tools if model supports them
584
+ tools = None
585
+ if tool_capable is not False and jinxs:
586
+ tools = _jinxs_to_tools(jinxs)
589
587
 
590
588
  # Keep full message history, only truncate for API calls to reduce tokens
591
589
  full_messages = messages.copy() if messages else []
592
- logger.debug(f"[check_llm_command] full_messages initialized with {len(full_messages)} messages")
593
590
 
594
- # Try with native tool calling first
591
+ # Make LLM call (with or without tools based on tool_capable)
595
592
 
596
593
  try:
597
594
  response = get_llm_response(
@@ -609,7 +606,7 @@ def check_llm_command(
609
606
  tools=tools,
610
607
  )
611
608
  except Exception as e:
612
- print(colored(f"[check_llm_command] EXCEPTION in get_llm_response: {type(e).__name__}: {e}", "red"))
609
+ print(f"[check_llm_command] EXCEPTION in get_llm_response: {type(e).__name__}: {e}", "red")
613
610
  return {
614
611
  "messages": full_messages,
615
612
  "output": f"LLM call failed: {e}",
@@ -617,8 +614,6 @@ def check_llm_command(
617
614
  "usage": total_usage,
618
615
  }
619
616
 
620
- if response.get("error"):
621
- logger.warning(f"[check_llm_command] Error in response: {response.get('error')}")
622
617
 
623
618
  if response.get("usage"):
624
619
  total_usage["input_tokens"] += response["usage"].get("input_tokens", 0)
@@ -634,7 +629,6 @@ def check_llm_command(
634
629
  # For streaming, the caller (process_result) handles appending after consumption
635
630
  if assistant_response and isinstance(assistant_response, str):
636
631
  full_messages.append({"role": "assistant", "content": assistant_response})
637
- logger.debug(f"[check_llm_command] No tool calls - returning {len(full_messages)} messages")
638
632
  return {
639
633
  "messages": full_messages,
640
634
  "output": assistant_response,
@@ -670,8 +664,7 @@ def check_llm_command(
670
664
  assistant_msg["tool_calls"] = _serialize_tool_calls(tool_calls)
671
665
  full_messages.append(assistant_msg)
672
666
  current_messages = full_messages
673
- logger.debug(f"[check_llm_command] Tool calls detected - current_messages has {len(current_messages)} messages")
674
- for iteration in range(max_iterations):
667
+ for _ in range(max_iterations):
675
668
  for tc in tool_calls:
676
669
  # Handle both dict and object formats
677
670
  if hasattr(tc, 'function'):
@@ -692,15 +685,12 @@ def check_llm_command(
692
685
 
693
686
  if jinx_name in jinxs:
694
687
  try:
695
- from termcolor import colored
696
- print(colored(f" ⚡ {jinx_name}", "cyan"), end="", flush=True)
688
+
689
+ print((f" ⚡ {jinx_name}", "cyan"), end="", flush=True)
697
690
  except:
698
691
  pass
699
692
  output = _execute_jinx(jinxs[jinx_name], inputs, npc, team, current_messages, extra_globals)
700
- try:
701
- print(colored(" ✓", "green"), flush=True)
702
- except:
703
- pass
693
+
704
694
 
705
695
  # Add tool result to messages
706
696
  # Include name for Gemini compatibility
@@ -745,8 +735,6 @@ def check_llm_command(
745
735
  )
746
736
  except Exception as e:
747
737
  # If continuation fails, return what we have so far
748
- # The tool was already executed successfully
749
- logger.warning(f"[check_llm_command] Continuation failed: {e}")
750
738
  return {
751
739
  "messages": current_messages,
752
740
  "output": f"Tool executed successfully. (Continuation error: {type(e).__name__})",
@@ -767,14 +755,12 @@ def check_llm_command(
767
755
 
768
756
  if not tool_calls:
769
757
  # Done - return full message history
770
- logger.debug(f"[check_llm_command] Tool loop done - returning {len(current_messages)} messages")
771
758
  return {
772
759
  "messages": current_messages,
773
760
  "output": assistant_response,
774
761
  "usage": total_usage,
775
762
  }
776
763
 
777
- logger.debug(f"[check_llm_command] Max iterations - returning {len(current_messages)} messages")
778
764
  return {
779
765
  "messages": current_messages,
780
766
  "output": response.get("response", "Max iterations reached"),
@@ -867,16 +853,11 @@ Use EXACT parameter names from the tool definitions above."""
867
853
  context = f"Error: '{jinx_name}' not found. Available: {list(jinxs.keys())}"
868
854
  continue
869
855
 
870
- try:
871
- from termcolor import colored
872
- print(colored(f" ⚡ {jinx_name}", "cyan"), end="", flush=True)
873
- except:
874
- pass
856
+
857
+
875
858
  output = _execute_jinx(jinxs[jinx_name], inputs, npc, team, current_messages, extra_globals)
876
- try:
877
- print(colored(" ✓", "green"), flush=True)
878
- except:
879
- pass
859
+
860
+
880
861
  context = f"Tool '{jinx_name}' returned: {output}"
881
862
  command = f"{command}\n\nPrevious: {context}"
882
863
 
@@ -487,7 +487,10 @@ class CommandHistory:
487
487
  Column('model', String(100)),
488
488
  Column('provider', String(100)),
489
489
  Column('npc', String(100)),
490
- Column('team', String(100))
490
+ Column('team', String(100)),
491
+ Column('reasoning_content', Text), # For thinking tokens / chain of thought
492
+ Column('tool_calls', Text), # JSON array of tool calls made by assistant
493
+ Column('tool_results', Text) # JSON array of tool call results
491
494
  )
492
495
 
493
496
  Table('message_attachments', metadata,
@@ -719,31 +722,41 @@ class CommandHistory:
719
722
 
720
723
 
721
724
  def add_conversation(
722
- self,
725
+ self,
723
726
  message_id,
724
727
  timestamp,
725
- role,
726
- content,
727
- conversation_id,
728
+ role,
729
+ content,
730
+ conversation_id,
728
731
  directory_path,
729
- model=None,
730
- provider=None,
731
- npc=None,
732
+ model=None,
733
+ provider=None,
734
+ npc=None,
732
735
  team=None,
733
736
  attachments=None,
737
+ reasoning_content=None,
738
+ tool_calls=None,
739
+ tool_results=None,
734
740
  ):
735
741
  if isinstance(content, (dict, list)):
736
742
  content = json.dumps(content, cls=CustomJSONEncoder)
737
743
 
744
+ # Serialize tool_calls and tool_results as JSON
745
+ if tool_calls is not None and not isinstance(tool_calls, str):
746
+ tool_calls = json.dumps(tool_calls, cls=CustomJSONEncoder)
747
+ if tool_results is not None and not isinstance(tool_results, str):
748
+ tool_results = json.dumps(tool_results, cls=CustomJSONEncoder)
749
+
738
750
  stmt = """
739
751
  INSERT INTO conversation_history
740
- (message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team)
741
- VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team)
752
+ (message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results)
753
+ VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results)
742
754
  """
743
755
  params = {
744
756
  "message_id": message_id, "timestamp": timestamp, "role": role, "content": content,
745
757
  "conversation_id": conversation_id, "directory_path": directory_path, "model": model,
746
- "provider": provider, "npc": npc, "team": team
758
+ "provider": provider, "npc": npc, "team": team, "reasoning_content": reasoning_content,
759
+ "tool_calls": tool_calls, "tool_results": tool_results
747
760
  }
748
761
  with self.engine.begin() as conn:
749
762
  conn.execute(text(stmt), params)
@@ -756,7 +769,7 @@ class CommandHistory:
756
769
  attachment_type=attachment.get("type"),
757
770
  data=attachment.get("data"),
758
771
  size=attachment.get("size"),
759
- file_path=attachment.get("path")
772
+ file_path=attachment.get("path")
760
773
  )
761
774
 
762
775
  return message_id
@@ -1084,16 +1097,28 @@ class CommandHistory:
1084
1097
  def get_conversations_by_id(self, conversation_id: str) -> List[Dict[str, Any]]:
1085
1098
  stmt = """
1086
1099
  SELECT id, message_id, timestamp, role, content, conversation_id,
1087
- directory_path, model, provider, npc, team
1088
- FROM conversation_history WHERE conversation_id = :conversation_id
1100
+ directory_path, model, provider, npc, team,
1101
+ reasoning_content, tool_calls, tool_results
1102
+ FROM conversation_history WHERE conversation_id = :conversation_id
1089
1103
  ORDER BY timestamp ASC
1090
1104
  """
1091
1105
  results = self._fetch_all(stmt, {"conversation_id": conversation_id})
1092
-
1106
+
1093
1107
  for message_dict in results:
1094
1108
  attachments = self.get_message_attachments(message_dict["message_id"])
1095
1109
  if attachments:
1096
1110
  message_dict["attachments"] = attachments
1111
+ # Parse JSON fields
1112
+ if message_dict.get("tool_calls"):
1113
+ try:
1114
+ message_dict["tool_calls"] = json.loads(message_dict["tool_calls"])
1115
+ except (json.JSONDecodeError, TypeError):
1116
+ pass
1117
+ if message_dict.get("tool_results"):
1118
+ try:
1119
+ message_dict["tool_results"] = json.loads(message_dict["tool_results"])
1120
+ except (json.JSONDecodeError, TypeError):
1121
+ pass
1097
1122
  return results
1098
1123
 
1099
1124
  def get_npc_conversation_stats(self, start_date=None, end_date=None) -> pd.DataFrame:
@@ -1295,9 +1320,13 @@ def save_conversation_message(
1295
1320
  team: str = None,
1296
1321
  attachments: List[Dict] = None,
1297
1322
  message_id: str = None,
1323
+ reasoning_content: str = None,
1324
+ tool_calls: List[Dict] = None,
1325
+ tool_results: List[Dict] = None,
1298
1326
  ):
1299
1327
  """
1300
1328
  Saves a conversation message linked to a conversation ID with optional attachments.
1329
+ Now also supports reasoning_content, tool_calls, and tool_results.
1301
1330
  """
1302
1331
  if wd is None:
1303
1332
  wd = os.getcwd()
@@ -1307,17 +1336,20 @@ def save_conversation_message(
1307
1336
 
1308
1337
 
1309
1338
  return command_history.add_conversation(
1310
- message_id,
1311
- timestamp,
1312
- role,
1313
- content,
1314
- conversation_id,
1315
- wd,
1316
- model=model,
1317
- provider=provider,
1318
- npc=npc,
1319
- team=team,
1320
- attachments=attachments)
1339
+ message_id,
1340
+ timestamp,
1341
+ role,
1342
+ content,
1343
+ conversation_id,
1344
+ wd,
1345
+ model=model,
1346
+ provider=provider,
1347
+ npc=npc,
1348
+ team=team,
1349
+ attachments=attachments,
1350
+ reasoning_content=reasoning_content,
1351
+ tool_calls=tool_calls,
1352
+ tool_results=tool_results)
1321
1353
  def retrieve_last_conversation(
1322
1354
  command_history: CommandHistory, conversation_id: str
1323
1355
  ) -> str:
npcpy/serve.py CHANGED
@@ -216,12 +216,14 @@ class MCPClientNPC:
216
216
  self._exit_stack = None
217
217
 
218
218
 
219
- def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, context=None):
219
+ def get_llm_response_with_handling(prompt, npc,model, provider, messages, tools, stream, team, context=None):
220
220
  """Unified LLM response with basic exception handling (inlined from corca to avoid that dependency)."""
221
221
  try:
222
222
  return get_llm_response(
223
223
  prompt=prompt,
224
224
  npc=npc,
225
+ model=model,
226
+ provider=provider,
225
227
  messages=messages,
226
228
  tools=tools,
227
229
  auto_process_tool_calls=False,
@@ -234,6 +236,8 @@ def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, c
234
236
  return get_llm_response(
235
237
  prompt=prompt,
236
238
  npc=npc,
239
+ model=model,
240
+ provider=provider,
237
241
  messages=messages,
238
242
  tools=tools,
239
243
  auto_process_tool_calls=False,
@@ -241,6 +245,7 @@ def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, c
241
245
  team=team,
242
246
  context=context
243
247
  )
248
+
244
249
  class MCPServerManager:
245
250
  """
246
251
  Simple in-process tracker for launching/stopping MCP servers.
@@ -2920,11 +2925,7 @@ def get_mcp_tools():
2920
2925
  )
2921
2926
  server_path = os.path.abspath(os.path.expanduser(resolved_path))
2922
2927
 
2923
- try:
2924
- from npcsh.corca import MCPClientNPC
2925
- except ImportError:
2926
- return jsonify({"error": "MCP Client (npcsh.corca) not available. Ensure npcsh.corca is installed and importable."}), 500
2927
-
2928
+ # MCPClientNPC is defined inline at the top of this file
2928
2929
  temp_mcp_client = None
2929
2930
  jinx_tools = []
2930
2931
  try:
@@ -3473,61 +3474,6 @@ def stream():
3473
3474
  **tool_args
3474
3475
  )
3475
3476
  messages = stream_response.get('messages', messages)
3476
-
3477
- elif exe_mode == 'npcsh':
3478
- from npcsh._state import execute_command, initial_state
3479
- from npcsh.routes import router
3480
- initial_state.model = model
3481
- initial_state.provider = provider
3482
- initial_state.npc = npc_object
3483
- initial_state.team = team_object
3484
- initial_state.messages = messages
3485
- initial_state.command_history = command_history
3486
-
3487
- state, stream_response = execute_command(
3488
- commandstr,
3489
- initial_state, router=router)
3490
- messages = state.messages
3491
-
3492
- elif exe_mode == 'guac':
3493
- from npcsh.guac import execute_guac_command
3494
- from npcsh.routes import router
3495
- from npcsh._state import initial_state
3496
- from pathlib import Path
3497
- import pandas as pd, numpy as np, matplotlib.pyplot as plt
3498
-
3499
- if not hasattr(app, 'guac_locals'):
3500
- app.guac_locals = {}
3501
-
3502
- if conversation_id not in app.guac_locals:
3503
- app.guac_locals[conversation_id] = {
3504
- 'pd': pd,
3505
- 'np': np,
3506
- 'plt': plt,
3507
- 'datetime': datetime,
3508
- 'Path': Path,
3509
- 'os': os,
3510
- 'sys': sys,
3511
- 'json': json
3512
- }
3513
-
3514
- initial_state.model = model
3515
- initial_state.provider = provider
3516
- initial_state.npc = npc_object
3517
- initial_state.team = team_object
3518
- initial_state.messages = messages
3519
- initial_state.command_history = command_history
3520
-
3521
- state, stream_response = execute_guac_command(
3522
- commandstr,
3523
- initial_state,
3524
- app.guac_locals[conversation_id],
3525
- "guac",
3526
- Path.cwd() / "npc_team",
3527
- router
3528
- )
3529
- messages = state.messages
3530
-
3531
3477
  elif exe_mode == 'tool_agent':
3532
3478
  mcp_server_path_from_request = data.get("mcpServerPath")
3533
3479
  selected_mcp_tools_from_request = data.get("selectedMcpTools", [])
@@ -3576,7 +3522,6 @@ def stream():
3576
3522
 
3577
3523
  mcp_client = app.mcp_clients[state_key]["client"]
3578
3524
  messages = app.mcp_clients[state_key].get("messages", messages)
3579
-
3580
3525
  def stream_mcp_sse():
3581
3526
  nonlocal messages
3582
3527
  iteration = 0
@@ -3599,17 +3544,21 @@ def stream():
3599
3544
  llm_response = get_llm_response_with_handling(
3600
3545
  prompt=prompt,
3601
3546
  npc=npc_object,
3547
+ model=model,
3548
+ provider=provider,
3602
3549
  messages=messages,
3603
3550
  tools=tools_for_llm,
3604
3551
  stream=True,
3605
3552
  team=team_object,
3606
3553
  context=f' The users working directory is {current_path}'
3607
3554
  )
3555
+ print('RESPONSE', llm_response)
3608
3556
 
3609
3557
  stream = llm_response.get("response", [])
3610
3558
  messages = llm_response.get("messages", messages)
3611
3559
  collected_content = ""
3612
3560
  collected_tool_calls = []
3561
+ agent_tool_call_data = {"id": None, "function_name": None, "arguments": ""}
3613
3562
 
3614
3563
  for response_chunk in stream:
3615
3564
  with cancellation_lock:
@@ -3617,7 +3566,62 @@ def stream():
3617
3566
  yield {"type": "interrupt"}
3618
3567
  return
3619
3568
 
3620
- if hasattr(response_chunk, "choices") and response_chunk.choices:
3569
+ if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3570
+ # Ollama returns ChatResponse objects - support both attribute and dict access
3571
+ msg = getattr(response_chunk, "message", None) or (response_chunk.get("message", {}) if hasattr(response_chunk, "get") else {})
3572
+ chunk_content = getattr(msg, "content", None) or (msg.get("content") if hasattr(msg, "get") else "") or ""
3573
+ # Extract Ollama thinking/reasoning tokens
3574
+ reasoning_content = getattr(msg, "thinking", None) or (msg.get("thinking") if hasattr(msg, "get") else None)
3575
+ # Handle tool calls with robust attribute/dict access
3576
+ tool_calls = getattr(msg, "tool_calls", None) or (msg.get("tool_calls") if hasattr(msg, "get") else None)
3577
+ if tool_calls:
3578
+ for tool_call in tool_calls:
3579
+ tc_id = getattr(tool_call, "id", None) or (tool_call.get("id") if hasattr(tool_call, "get") else None)
3580
+ tc_func = getattr(tool_call, "function", None) or (tool_call.get("function") if hasattr(tool_call, "get") else None)
3581
+ if tc_func:
3582
+ tc_name = getattr(tc_func, "name", None) or (tc_func.get("name") if hasattr(tc_func, "get") else None)
3583
+ tc_args = getattr(tc_func, "arguments", None) or (tc_func.get("arguments") if hasattr(tc_func, "get") else None)
3584
+ if tc_name:
3585
+ arg_str = tc_args
3586
+ if isinstance(arg_str, dict):
3587
+ arg_str = json.dumps(arg_str)
3588
+ elif arg_str is None:
3589
+ arg_str = "{}"
3590
+ # Add to collected_tool_calls for Ollama
3591
+ collected_tool_calls.append({
3592
+ "id": tc_id or f"call_{len(collected_tool_calls)}",
3593
+ "type": "function",
3594
+ "function": {"name": tc_name, "arguments": arg_str}
3595
+ })
3596
+ if chunk_content:
3597
+ collected_content += chunk_content
3598
+ # Extract other fields with robust access
3599
+ created_at = getattr(response_chunk, "created_at", None) or (response_chunk.get("created_at") if hasattr(response_chunk, "get") else None)
3600
+ model_name = getattr(response_chunk, "model", None) or (response_chunk.get("model") if hasattr(response_chunk, "get") else model)
3601
+ msg_role = getattr(msg, "role", None) or (msg.get("role") if hasattr(msg, "get") else "assistant")
3602
+ done_reason = getattr(response_chunk, "done_reason", None) or (response_chunk.get("done_reason") if hasattr(response_chunk, "get") else None)
3603
+
3604
+ # Build chunk_data with proper structure
3605
+ chunk_data = {
3606
+ "id": None,
3607
+ "object": None,
3608
+ "created": str(created_at) if created_at else datetime.datetime.now().isoformat(),
3609
+ "model": model_name,
3610
+ "choices": [
3611
+ {
3612
+ "index": 0,
3613
+ "delta": {
3614
+ "content": chunk_content,
3615
+ "role": msg_role,
3616
+ "reasoning_content": reasoning_content
3617
+ },
3618
+ "finish_reason": done_reason
3619
+ }
3620
+ ]
3621
+ }
3622
+ yield chunk_data
3623
+
3624
+ elif hasattr(response_chunk, "choices") and response_chunk.choices:
3621
3625
  delta = response_chunk.choices[0].delta
3622
3626
  if hasattr(delta, "content") and delta.content:
3623
3627
  collected_content += delta.content
@@ -3698,55 +3702,41 @@ def stream():
3698
3702
  try:
3699
3703
  jinx_ctx = jinx_obj.execute(
3700
3704
  input_values=tool_args if isinstance(tool_args, dict) else {},
3701
- npc=npc_object,
3702
- messages=messages
3705
+ npc=npc_object
3703
3706
  )
3704
- tool_content = str(jinx_ctx.get("output", jinx_ctx))
3705
- print(f"[MCP] jinx tool_complete {tool_name}")
3707
+ tool_content = str(jinx_ctx)
3706
3708
  except Exception as e:
3707
- raise Exception(f"Jinx execution failed: {e}")
3709
+ tool_content = f"Jinx execution error: {str(e)}"
3708
3710
  else:
3709
- try:
3710
- loop = asyncio.get_event_loop()
3711
- except RuntimeError:
3712
- loop = asyncio.new_event_loop()
3713
- asyncio.set_event_loop(loop)
3714
- if loop.is_closed():
3715
- loop = asyncio.new_event_loop()
3716
- asyncio.set_event_loop(loop)
3717
- mcp_result = loop.run_until_complete(
3718
- mcp_client.session.call_tool(tool_name, tool_args)
3719
- ) if mcp_client else {"error": "No MCP client"}
3720
- if hasattr(mcp_result, "content") and mcp_result.content:
3721
- for content_item in mcp_result.content:
3722
- if hasattr(content_item, "text"):
3723
- tool_content += content_item.text
3724
- elif hasattr(content_item, "data"):
3725
- tool_content += str(content_item.data)
3711
+ # Execute via MCP client
3712
+ if mcp_client and tool_name in mcp_client.tool_map:
3713
+ try:
3714
+ tool_func = mcp_client.tool_map[tool_name]
3715
+ result = tool_func(**(tool_args if isinstance(tool_args, dict) else {}))
3716
+ # Handle MCP CallToolResult
3717
+ if hasattr(result, 'content'):
3718
+ tool_content = str(result.content[0].text) if result.content else str(result)
3726
3719
  else:
3727
- tool_content += str(content_item)
3720
+ tool_content = str(result)
3721
+ except Exception as mcp_e:
3722
+ tool_content = f"MCP tool error: {str(mcp_e)}"
3728
3723
  else:
3729
- tool_content = str(mcp_result)
3730
-
3731
- tool_results.append({
3724
+ tool_content = f"Tool '{tool_name}' not found in MCP server or Jinxs"
3725
+
3726
+ messages.append({
3732
3727
  "role": "tool",
3733
3728
  "tool_call_id": tool_id,
3734
3729
  "name": tool_name,
3735
3730
  "content": tool_content
3736
3731
  })
3732
+
3733
+ print(f"[MCP] tool_result {tool_name}: {tool_content}")
3734
+ yield {"type": "tool_result", "name": tool_name, "id": tool_id, "result": tool_content}
3737
3735
 
3738
- print(f"[MCP] tool_complete {tool_name}")
3739
- yield {"type": "tool_complete", "name": tool_name, "id": tool_id, "result_preview": tool_content[:4000]}
3740
3736
  except Exception as e:
3741
- err_msg = f"Error executing {tool_name}: {e}"
3742
- tool_results.append({
3743
- "role": "tool",
3744
- "tool_call_id": tool_id,
3745
- "name": tool_name,
3746
- "content": err_msg
3747
- })
3748
- print(f"[MCP] tool_error {tool_name}: {e}")
3749
- yield {"type": "tool_error", "name": tool_name, "id": tool_id, "error": str(e)}
3737
+ error_msg = f"Tool execution error: {str(e)}"
3738
+ print(f"[MCP] tool_error {tool_name}: {error_msg}")
3739
+ yield {"type": "tool_error", "name": tool_name, "id": tool_id, "error": error_msg}
3750
3740
 
3751
3741
  serialized_tool_calls = []
3752
3742
  for tc in collected_tool_calls:
@@ -3770,14 +3760,12 @@ def stream():
3770
3760
  "content": collected_content,
3771
3761
  "tool_calls": serialized_tool_calls
3772
3762
  })
3773
- messages.extend(tool_results)
3774
3763
  tool_results_for_db = tool_results
3775
3764
 
3776
3765
  prompt = ""
3777
3766
 
3778
3767
  app.mcp_clients[state_key]["messages"] = messages
3779
3768
  return
3780
-
3781
3769
  stream_response = stream_mcp_sse()
3782
3770
 
3783
3771
  else:
@@ -3814,6 +3802,8 @@ def stream():
3814
3802
 
3815
3803
  def event_stream(current_stream_id):
3816
3804
  complete_response = []
3805
+ complete_reasoning = [] # Accumulate reasoning content
3806
+ accumulated_tool_calls = [] # Accumulate all tool calls
3817
3807
  dot_count = 0
3818
3808
  interrupted = False
3819
3809
  tool_call_data = {"id": None, "function_name": None, "arguments": ""}
@@ -3839,17 +3829,30 @@ def stream():
3839
3829
  content_piece = delta.get("content")
3840
3830
  if content_piece:
3841
3831
  complete_response.append(content_piece)
3832
+ # Accumulate reasoning content from generator chunks
3833
+ reasoning_piece = delta.get("reasoning_content")
3834
+ if reasoning_piece:
3835
+ complete_reasoning.append(reasoning_piece)
3836
+ # Accumulate tool calls from generator chunks
3837
+ if chunk.get("type") == "tool_call":
3838
+ tc = chunk.get("tool_call", {})
3839
+ if tc.get("id") and tc.get("name"):
3840
+ accumulated_tool_calls.append({
3841
+ "id": tc.get("id"),
3842
+ "function_name": tc.get("name"),
3843
+ "arguments": tc.get("arguments", "")
3844
+ })
3845
+ if chunk.get("type") == "tool_result":
3846
+ tool_results_for_db.append({
3847
+ "name": chunk.get("name"),
3848
+ "tool_call_id": chunk.get("id"),
3849
+ "content": chunk.get("result", "")
3850
+ })
3842
3851
  continue
3843
3852
  yield f"data: {json.dumps({'choices':[{'delta':{'content': str(chunk), 'role': 'assistant'},'finish_reason':None}]})}\n\n"
3844
- # ensure stream termination and cleanup for generator flows
3845
- yield "data: [DONE]\n\n"
3846
- with cancellation_lock:
3847
- if current_stream_id in cancellation_flags:
3848
- del cancellation_flags[current_stream_id]
3849
- print(f"Cleaned up cancellation flag for stream ID: {current_stream_id}")
3850
- return
3853
+ # Generator finished - skip the other stream handling paths
3851
3854
 
3852
- if isinstance(stream_response, str) :
3855
+ elif isinstance(stream_response, str) :
3853
3856
  print('stream a str and not a gen')
3854
3857
  chunk_data = {
3855
3858
  "id": None,
@@ -3869,7 +3872,7 @@ def stream():
3869
3872
  ]
3870
3873
  }
3871
3874
  yield f"data: {json.dumps(chunk_data)}\n\n"
3872
- return
3875
+
3873
3876
  elif isinstance(stream_response, dict) and 'output' in stream_response and isinstance(stream_response.get('output'), str):
3874
3877
  print('stream a str and not a gen')
3875
3878
  chunk_data = {
@@ -3890,78 +3893,97 @@ def stream():
3890
3893
  ]
3891
3894
  }
3892
3895
  yield f"data: {json.dumps(chunk_data)}\n\n"
3893
- return
3894
- for response_chunk in stream_response.get('response', stream_response.get('output')):
3895
- with cancellation_lock:
3896
- if cancellation_flags.get(current_stream_id, False):
3897
- print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
3898
- interrupted = True
3899
- break
3900
3896
 
3901
- print('.', end="", flush=True)
3902
- dot_count += 1
3903
- if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3904
- # Ollama returns ChatResponse objects - support both attribute and dict access
3905
- msg = getattr(response_chunk, "message", None) or response_chunk.get("message", {}) if hasattr(response_chunk, "get") else {}
3906
- chunk_content = getattr(msg, "content", None) or (msg.get("content") if hasattr(msg, "get") else "") or ""
3907
- # Extract Ollama thinking/reasoning tokens
3908
- thinking_content = getattr(msg, "thinking", None) or (msg.get("thinking") if hasattr(msg, "get") else None)
3909
- # Handle tool calls with robust attribute/dict access
3910
- tool_calls = getattr(msg, "tool_calls", None) or (msg.get("tool_calls") if hasattr(msg, "get") else None)
3911
- if tool_calls:
3912
- for tool_call in tool_calls:
3913
- tc_id = getattr(tool_call, "id", None) or (tool_call.get("id") if hasattr(tool_call, "get") else None)
3914
- if tc_id:
3915
- tool_call_data["id"] = tc_id
3916
- tc_func = getattr(tool_call, "function", None) or (tool_call.get("function") if hasattr(tool_call, "get") else None)
3917
- if tc_func:
3918
- tc_name = getattr(tc_func, "name", None) or (tc_func.get("name") if hasattr(tc_func, "get") else None)
3919
- if tc_name:
3920
- tool_call_data["function_name"] = tc_name
3921
- tc_args = getattr(tc_func, "arguments", None) or (tc_func.get("arguments") if hasattr(tc_func, "get") else None)
3922
- if tc_args:
3923
- arg_val = tc_args
3924
- if isinstance(arg_val, dict):
3925
- arg_val = json.dumps(arg_val)
3926
- tool_call_data["arguments"] += arg_val
3927
- if chunk_content:
3928
- complete_response.append(chunk_content)
3929
- # Extract other fields with robust access
3930
- created_at = getattr(response_chunk, "created_at", None) or (response_chunk.get("created_at") if hasattr(response_chunk, "get") else None)
3931
- model_name = getattr(response_chunk, "model", None) or (response_chunk.get("model") if hasattr(response_chunk, "get") else model)
3932
- msg_role = getattr(msg, "role", None) or (msg.get("role") if hasattr(msg, "get") else "assistant")
3933
- done_reason = getattr(response_chunk, "done_reason", None) or (response_chunk.get("done_reason") if hasattr(response_chunk, "get") else None)
3934
- chunk_data = {
3935
- "id": None, "object": None,
3936
- "created": created_at or datetime.datetime.now(),
3937
- "model": model_name,
3938
- "choices": [{"index": 0, "delta": {"content": chunk_content, "role": msg_role, "reasoning_content": thinking_content}, "finish_reason": done_reason}]
3939
- }
3940
- yield f"data: {json.dumps(chunk_data)}\n\n"
3941
- else:
3942
- chunk_content = ""
3943
- reasoning_content = ""
3944
- for choice in response_chunk.choices:
3945
- if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
3946
- for tool_call in choice.delta.tool_calls:
3947
- if tool_call.id:
3948
- tool_call_data["id"] = tool_call.id
3949
- if tool_call.function:
3950
- if hasattr(tool_call.function, "name") and tool_call.function.name:
3951
- tool_call_data["function_name"] = tool_call.function.name
3952
- if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
3953
- tool_call_data["arguments"] += tool_call.function.arguments
3954
- for choice in response_chunk.choices:
3955
- if hasattr(choice.delta, "reasoning_content"):
3956
- reasoning_content += choice.delta.reasoning_content
3957
- chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
3958
- if chunk_content:
3959
- complete_response.append(chunk_content)
3960
- chunk_data = {
3961
- "id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
3962
- "choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
3963
- }
3964
- yield f"data: {json.dumps(chunk_data)}\n\n"
3897
+ elif isinstance(stream_response, dict):
3898
+ for response_chunk in stream_response.get('response', stream_response.get('output')):
3899
+ with cancellation_lock:
3900
+ if cancellation_flags.get(current_stream_id, False):
3901
+ print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
3902
+ interrupted = True
3903
+ break
3904
+
3905
+ print('.', end="", flush=True)
3906
+ dot_count += 1
3907
+ if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3908
+ # Ollama returns ChatResponse objects - support both attribute and dict access
3909
+ msg = getattr(response_chunk, "message", None) or response_chunk.get("message", {}) if hasattr(response_chunk, "get") else {}
3910
+ chunk_content = getattr(msg, "content", None) or (msg.get("content") if hasattr(msg, "get") else "") or ""
3911
+ # Extract Ollama thinking/reasoning tokens
3912
+ reasoning_content = getattr(msg, "thinking", None) or (msg.get("thinking") if hasattr(msg, "get") else None)
3913
+ # Handle tool calls with robust attribute/dict access
3914
+ tool_calls = getattr(msg, "tool_calls", None) or (msg.get("tool_calls") if hasattr(msg, "get") else None)
3915
+ if tool_calls:
3916
+ for tool_call in tool_calls:
3917
+ tc_id = getattr(tool_call, "id", None) or (tool_call.get("id") if hasattr(tool_call, "get") else None)
3918
+ if tc_id:
3919
+ tool_call_data["id"] = tc_id
3920
+ tc_func = getattr(tool_call, "function", None) or (tool_call.get("function") if hasattr(tool_call, "get") else None)
3921
+ if tc_func:
3922
+ tc_name = getattr(tc_func, "name", None) or (tc_func.get("name") if hasattr(tc_func, "get") else None)
3923
+ if tc_name:
3924
+ tool_call_data["function_name"] = tc_name
3925
+ tc_args = getattr(tc_func, "arguments", None) or (tc_func.get("arguments") if hasattr(tc_func, "get") else None)
3926
+ if tc_args:
3927
+ arg_val = tc_args
3928
+ if isinstance(arg_val, dict):
3929
+ arg_val = json.dumps(arg_val)
3930
+ tool_call_data["arguments"] += arg_val
3931
+ # Accumulate complete tool call info for DB storage (Ollama path)
3932
+ if tc_id and tc_func and tc_name:
3933
+ accumulated_tool_calls.append({
3934
+ "id": tc_id,
3935
+ "function_name": tc_name,
3936
+ "arguments": arg_val if tc_args else ""
3937
+ })
3938
+ # Accumulate reasoning content
3939
+ if reasoning_content:
3940
+ complete_reasoning.append(reasoning_content)
3941
+ if chunk_content:
3942
+ complete_response.append(chunk_content)
3943
+ # Extract other fields with robust access
3944
+ created_at = getattr(response_chunk, "created_at", None) or (response_chunk.get("created_at") if hasattr(response_chunk, "get") else None)
3945
+ model_name = getattr(response_chunk, "model", None) or (response_chunk.get("model") if hasattr(response_chunk, "get") else model)
3946
+ msg_role = getattr(msg, "role", None) or (msg.get("role") if hasattr(msg, "get") else "assistant")
3947
+ done_reason = getattr(response_chunk, "done_reason", None) or (response_chunk.get("done_reason") if hasattr(response_chunk, "get") else None)
3948
+ chunk_data = {
3949
+ "id": None, "object": None,
3950
+ "created": created_at or datetime.datetime.now(),
3951
+ "model": model_name,
3952
+ "choices": [{"index": 0, "delta": {"content": chunk_content, "role": msg_role, "reasoning_content": reasoning_content}, "finish_reason": done_reason}]
3953
+ }
3954
+ yield f"data: {json.dumps(chunk_data)}\n\n"
3955
+ else:
3956
+ chunk_content = ""
3957
+ reasoning_content = ""
3958
+ for choice in response_chunk.choices:
3959
+ if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
3960
+ for tool_call in choice.delta.tool_calls:
3961
+ if tool_call.id:
3962
+ tool_call_data["id"] = tool_call.id
3963
+ if tool_call.function:
3964
+ if hasattr(tool_call.function, "name") and tool_call.function.name:
3965
+ tool_call_data["function_name"] = tool_call.function.name
3966
+ if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
3967
+ tool_call_data["arguments"] += tool_call.function.arguments
3968
+ # Accumulate complete tool call info for DB storage
3969
+ if tool_call.id and tool_call.function and tool_call.function.name:
3970
+ accumulated_tool_calls.append({
3971
+ "id": tool_call.id,
3972
+ "function_name": tool_call.function.name,
3973
+ "arguments": tool_call.function.arguments or ""
3974
+ })
3975
+ for choice in response_chunk.choices:
3976
+ if hasattr(choice.delta, "reasoning_content") and choice.delta.reasoning_content:
3977
+ reasoning_content += choice.delta.reasoning_content
3978
+ complete_reasoning.append(choice.delta.reasoning_content)
3979
+ chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
3980
+ if chunk_content:
3981
+ complete_response.append(chunk_content)
3982
+ chunk_data = {
3983
+ "id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
3984
+ "choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
3985
+ }
3986
+ yield f"data: {json.dumps(chunk_data)}\n\n"
3965
3987
 
3966
3988
  except Exception as e:
3967
3989
  print(f"\nAn exception occurred during streaming for {current_stream_id}: {e}")
@@ -4007,7 +4029,7 @@ def stream():
4007
4029
  message_id=generate_message_id(),
4008
4030
  )
4009
4031
 
4010
- # Save assistant message to the database
4032
+ # Save assistant message to the database with reasoning content and tool calls
4011
4033
  npc_name_to_save = npc_object.name if npc_object else ''
4012
4034
  save_conversation_message(
4013
4035
  command_history,
@@ -4020,6 +4042,9 @@ def stream():
4020
4042
  npc=npc_name_to_save,
4021
4043
  team=team,
4022
4044
  message_id=message_id,
4045
+ reasoning_content=''.join(complete_reasoning) if complete_reasoning else None,
4046
+ tool_calls=accumulated_tool_calls if accumulated_tool_calls else None,
4047
+ tool_results=tool_results_for_db if tool_results_for_db else None,
4023
4048
  )
4024
4049
 
4025
4050
  # Start background tasks for memory extraction and context compression
@@ -4181,11 +4206,24 @@ def get_conversation_messages(conversation_id):
4181
4206
  try:
4182
4207
  engine = get_db_connection()
4183
4208
  with engine.connect() as conn:
4184
-
4209
+
4185
4210
  query = text("""
4186
4211
  WITH ranked_messages AS (
4187
4212
  SELECT
4188
- ch.*,
4213
+ ch.id,
4214
+ ch.message_id,
4215
+ ch.timestamp,
4216
+ ch.role,
4217
+ ch.content,
4218
+ ch.conversation_id,
4219
+ ch.directory_path,
4220
+ ch.model,
4221
+ ch.provider,
4222
+ ch.npc,
4223
+ ch.team,
4224
+ ch.reasoning_content,
4225
+ ch.tool_calls,
4226
+ ch.tool_results,
4189
4227
  GROUP_CONCAT(ma.id) as attachment_ids,
4190
4228
  ROW_NUMBER() OVER (
4191
4229
  PARTITION BY ch.role, strftime('%s', ch.timestamp)
@@ -4206,20 +4244,32 @@ def get_conversation_messages(conversation_id):
4206
4244
  result = conn.execute(query, {"conversation_id": conversation_id})
4207
4245
  messages = result.fetchall()
4208
4246
 
4247
+ def parse_json_field(value):
4248
+ """Parse a JSON string field, returning None if empty or invalid."""
4249
+ if not value:
4250
+ return None
4251
+ try:
4252
+ return json.loads(value)
4253
+ except (json.JSONDecodeError, TypeError):
4254
+ return None
4255
+
4209
4256
  return jsonify(
4210
4257
  {
4211
4258
  "messages": [
4212
4259
  {
4213
- "message_id": msg[1] if len(msg) > 1 else None,
4260
+ "message_id": msg[1] if len(msg) > 1 else None,
4214
4261
  "role": msg[3] if len(msg) > 3 else None,
4215
4262
  "content": msg[4] if len(msg) > 4 else None,
4216
- "timestamp": msg[5] if len(msg) > 5 else None,
4217
- "model": msg[6] if len(msg) > 6 else None,
4218
- "provider": msg[7] if len(msg) > 7 else None,
4219
- "npc": msg[8] if len(msg) > 8 else None,
4263
+ "timestamp": msg[2] if len(msg) > 2 else None,
4264
+ "model": msg[7] if len(msg) > 7 else None,
4265
+ "provider": msg[8] if len(msg) > 8 else None,
4266
+ "npc": msg[9] if len(msg) > 9 else None,
4267
+ "reasoningContent": msg[11] if len(msg) > 11 else None,
4268
+ "toolCalls": parse_json_field(msg[12]) if len(msg) > 12 else None,
4269
+ "toolResults": parse_json_field(msg[13]) if len(msg) > 13 else None,
4220
4270
  "attachments": (
4221
4271
  get_message_attachments(msg[1])
4222
- if len(msg) > 1 and msg[-1]
4272
+ if len(msg) > 1 and msg[14] # attachment_ids is at index 14
4223
4273
  else []
4224
4274
  ),
4225
4275
  }
@@ -4263,31 +4313,19 @@ def ollama_status():
4263
4313
  @app.route("/api/ollama/tool_models", methods=["GET"])
4264
4314
  def get_ollama_tool_models():
4265
4315
  """
4266
- Best-effort detection of Ollama models whose templates include tool-call support.
4267
- We scan templates for tool placeholders; if none are found we assume tools are unsupported.
4316
+ Returns all Ollama models. Tool capability detection is unreliable,
4317
+ so we don't filter - let the user try and the backend will handle failures.
4268
4318
  """
4269
4319
  try:
4270
4320
  detected = []
4271
4321
  listing = ollama.list()
4272
4322
  for model in listing.get("models", []):
4273
4323
  name = getattr(model, "model", None) or model.get("name") if isinstance(model, dict) else None
4274
- if not name:
4275
- continue
4276
- try:
4277
- details = ollama.show(name)
4278
- tmpl = details.get("template") or ""
4279
- if "{{- if .Tools" in tmpl or "{{- range .Tools" in tmpl or "{{- if .ToolCalls" in tmpl:
4280
- detected.append(name)
4281
- continue
4282
- metadata = details.get("metadata") or {}
4283
- if metadata.get("tools") or metadata.get("tool_calls"):
4284
- detected.append(name)
4285
- except Exception as inner_e:
4286
- print(f"Warning: could not inspect ollama model {name} for tool support: {inner_e}")
4287
- continue
4324
+ if name:
4325
+ detected.append(name)
4288
4326
  return jsonify({"models": detected, "error": None})
4289
4327
  except Exception as e:
4290
- print(f"Error listing Ollama tool-capable models: {e}")
4328
+ print(f"Error listing Ollama models: {e}")
4291
4329
  return jsonify({"models": [], "error": str(e)}), 500
4292
4330
 
4293
4331
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.2
3
+ Version: 1.3.4
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -1,12 +1,12 @@
1
1
  npcpy/__init__.py,sha256=uJcJGjR1mWvE69GySNAufkgiRwJA28zdObDBWaxp0tY,505
2
- npcpy/llm_funcs.py,sha256=KJpjN6q5iW_qdUfgt4tzYENCAu86376io8eFZ7wp76Y,78081
2
+ npcpy/llm_funcs.py,sha256=sc-sXA48VpJFy7vWyh8M5ZY2HQ362le_BfAtDA3Gh7Y,76489
3
3
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
4
4
  npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
5
5
  npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
6
6
  npcpy/npc_compiler.py,sha256=X2BjMqKL7hbS37PPkSDGgZSF_PF_GNVGLd92ePRNRwQ,111868
7
7
  npcpy/npc_sysenv.py,sha256=rtE3KrXvIuOEpMq1CW5eK5K0o3f6mXagNXCeMnhHob4,36736
8
8
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
9
- npcpy/serve.py,sha256=Nxigo7NR189RFDhdh3whVeWEyjTaopCzXfk6HsTJP4A,176384
9
+ npcpy/serve.py,sha256=5S3v4lp3fPozsnp_48SuVenf3JTiSiIBRxgfAReovmM,181539
10
10
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
11
11
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
12
12
  npcpy/data/audio.py,sha256=3qryGXnWHa4JFMonjuX-lf0fCrF8jmbHe7mHAuOdua0,12397
@@ -17,7 +17,7 @@ npcpy/data/text.py,sha256=jP0a1qZZaSJdK-LdZTn2Jjdxqmkd3efxDLEoxflJQeY,5010
17
17
  npcpy/data/video.py,sha256=H-V3mTu_ktD9u-QhYeo4aW3u9z0AtoAdRZmvRPEpE98,2887
18
18
  npcpy/data/web.py,sha256=ARGoVKUlQmaiX0zJbSvvFmRCwOv_Z7Pcan9c5GxYObQ,5117
19
19
  npcpy/ft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- npcpy/ft/diff.py,sha256=OjdrVn_rFkFcP3MnzVgIhCdtfnbtnxQcvl5XCc6Wi-o,12376
20
+ npcpy/ft/diff.py,sha256=2-NbY0p0CP5Qr9mnnncxRBwzmxRq9NKcl8B5BeT1vQ4,12319
21
21
  npcpy/ft/ge.py,sha256=0VzIiXq2wCzGcK1x0Wd-myJ3xRf-FNaPg0GkHEZegUM,3552
22
22
  npcpy/ft/memory_trainer.py,sha256=QZPznxEEwXbOGroHdMUMa5xpqlNwgV6nqOazI2xgrnQ,6635
23
23
  npcpy/ft/model_ensembler.py,sha256=BRX4hJ_rvF1vKTzjMhlahZqPttUgc3PqmzUJDqIfIps,10038
@@ -29,10 +29,10 @@ npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
29
29
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
30
30
  npcpy/gen/image_gen.py,sha256=VflU_wJsKWJarOVwZtL2M8ymDFfKNz8WX66Rwk4obeo,21778
31
31
  npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
32
- npcpy/gen/response.py,sha256=8oZRRmoh85RyR6sgGsk-H6cpXcCjkBsn-8Wix0mW3bE,40101
32
+ npcpy/gen/response.py,sha256=lH3fR3Sx1Cm8Zc0MJyHzTuuwTjVPgSJUZVxnkFIhzLE,40643
33
33
  npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
34
34
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- npcpy/memory/command_history.py,sha256=vWzZ4F4o0XOSHn50SkdP885jG1aZIZvfcPAh8EZWlQk,54497
35
+ npcpy/memory/command_history.py,sha256=1488weOYtnm-wyenUvZKHaNgZe5OKOZSaQ35WNeceiM,56226
36
36
  npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
37
37
  npcpy/memory/knowledge_graph.py,sha256=2XpIlsyPdAOnzQ6kkwP6MWPGwL3P6V33_3suNJYMMJE,48681
38
38
  npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
@@ -50,8 +50,8 @@ npcpy/work/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
51
51
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
52
52
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
53
- npcpy-1.3.2.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
54
- npcpy-1.3.2.dist-info/METADATA,sha256=KmIJEKnatu027fuhR2XJQs7kNlrqaVgzTqG4eKQECCc,37884
55
- npcpy-1.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
- npcpy-1.3.2.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
57
- npcpy-1.3.2.dist-info/RECORD,,
53
+ npcpy-1.3.4.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
54
+ npcpy-1.3.4.dist-info/METADATA,sha256=-dPHdZlVOsr_QtmxxbLl7W4cSkXH8Ccaf-w-qRB0yAY,37884
55
+ npcpy-1.3.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
+ npcpy-1.3.4.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
57
+ npcpy-1.3.4.dist-info/RECORD,,
File without changes