npcpy 1.3.2__tar.gz → 1.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {npcpy-1.3.2/npcpy.egg-info → npcpy-1.3.4}/PKG-INFO +1 -1
  2. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/diff.py +1 -1
  3. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/response.py +11 -1
  4. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/llm_funcs.py +20 -39
  5. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/memory/command_history.py +58 -26
  6. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/serve.py +246 -208
  7. {npcpy-1.3.2 → npcpy-1.3.4/npcpy.egg-info}/PKG-INFO +1 -1
  8. {npcpy-1.3.2 → npcpy-1.3.4}/setup.py +1 -1
  9. {npcpy-1.3.2 → npcpy-1.3.4}/LICENSE +0 -0
  10. {npcpy-1.3.2 → npcpy-1.3.4}/MANIFEST.in +0 -0
  11. {npcpy-1.3.2 → npcpy-1.3.4}/README.md +0 -0
  12. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/__init__.py +0 -0
  13. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/__init__.py +0 -0
  14. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/audio.py +0 -0
  15. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/data_models.py +0 -0
  16. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/image.py +0 -0
  17. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/load.py +0 -0
  18. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/text.py +0 -0
  19. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/video.py +0 -0
  20. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/data/web.py +0 -0
  21. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/__init__.py +0 -0
  22. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/ge.py +0 -0
  23. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/memory_trainer.py +0 -0
  24. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/model_ensembler.py +0 -0
  25. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/rl.py +0 -0
  26. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/sft.py +0 -0
  27. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ft/usft.py +0 -0
  28. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/__init__.py +0 -0
  29. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/audio_gen.py +0 -0
  30. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/embeddings.py +0 -0
  31. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/image_gen.py +0 -0
  32. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/ocr.py +0 -0
  33. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/gen/video_gen.py +0 -0
  34. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/main.py +0 -0
  35. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/memory/__init__.py +0 -0
  36. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/memory/kg_vis.py +0 -0
  37. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/memory/knowledge_graph.py +0 -0
  38. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/memory/memory_processor.py +0 -0
  39. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/memory/search.py +0 -0
  40. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/mix/__init__.py +0 -0
  41. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/mix/debate.py +0 -0
  42. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/ml_funcs.py +0 -0
  43. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/npc_array.py +0 -0
  44. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/npc_compiler.py +0 -0
  45. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/npc_sysenv.py +0 -0
  46. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/npcs.py +0 -0
  47. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/__init__.py +0 -0
  48. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/ai_function_tools.py +0 -0
  49. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/database_ai_adapters.py +0 -0
  50. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/database_ai_functions.py +0 -0
  51. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/model_runner.py +0 -0
  52. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/npcsql.py +0 -0
  53. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/sql/sql_model_compiler.py +0 -0
  54. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/tools.py +0 -0
  55. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/work/__init__.py +0 -0
  56. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/work/desktop.py +0 -0
  57. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/work/plan.py +0 -0
  58. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy/work/trigger.py +0 -0
  59. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy.egg-info/SOURCES.txt +0 -0
  60. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy.egg-info/dependency_links.txt +0 -0
  61. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy.egg-info/requires.txt +0 -0
  62. {npcpy-1.3.2 → npcpy-1.3.4}/npcpy.egg-info/top_level.txt +0 -0
  63. {npcpy-1.3.2 → npcpy-1.3.4}/setup.cfg +0 -0
  64. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_audio.py +0 -0
  65. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_command_history.py +0 -0
  66. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_image.py +0 -0
  67. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_llm_funcs.py +0 -0
  68. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_load.py +0 -0
  69. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_npc_array.py +0 -0
  70. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_npc_compiler.py +0 -0
  71. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_npcsql.py +0 -0
  72. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_response.py +0 -0
  73. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_serve.py +0 -0
  74. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_text.py +0 -0
  75. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_tools.py +0 -0
  76. {npcpy-1.3.2 → npcpy-1.3.4}/tests/test_web.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.2
3
+ Version: 1.3.4
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -3,7 +3,7 @@ try:
3
3
  import torch.nn as nn
4
4
  import torch.nn.functional as F
5
5
  from torch.utils.data import DataLoader, Dataset as TorchDataset
6
- from transformers import CLIPTextModel, CLIPTokenizer
6
+
7
7
  TORCH_AVAILABLE = True
8
8
  except ImportError:
9
9
  torch = None
@@ -297,6 +297,16 @@ def get_ollama_response(
297
297
  last_user_idx = len(messages) - 1
298
298
  messages[last_user_idx]["images"] = image_paths
299
299
 
300
+ # Ollama's pydantic model requires tool_calls arguments to be dicts, not strings
301
+ for msg in messages:
302
+ if msg.get("tool_calls"):
303
+ for tc in msg["tool_calls"]:
304
+ if tc.get("function") and isinstance(tc["function"].get("arguments"), str):
305
+ try:
306
+ tc["function"]["arguments"] = json.loads(tc["function"]["arguments"])
307
+ except (json.JSONDecodeError, TypeError):
308
+ tc["function"]["arguments"] = {}
309
+
300
310
  api_params = {
301
311
  "model": model,
302
312
  "messages": messages,
@@ -387,7 +397,7 @@ def get_ollama_response(
387
397
  return result
388
398
 
389
399
 
390
-
400
+ print('Debug', api_params)
391
401
  res = ollama.chat(**api_params, options=options)
392
402
  result["raw_response"] = res
393
403
 
@@ -14,6 +14,8 @@ from npcpy.npc_sysenv import (
14
14
  request_user_input,
15
15
  get_system_message
16
16
  )
17
+
18
+
17
19
  from npcpy.gen.response import get_litellm_response
18
20
  from npcpy.gen.image_gen import generate_image
19
21
  from npcpy.gen.video_gen import generate_video_diffusers, generate_video_veo3
@@ -216,7 +218,7 @@ def get_llm_response(
216
218
 
217
219
  def _context_suffix(ctx):
218
220
  if ctx is not None:
219
- return f'User Provided Context: {ctx}'
221
+ return f'\n\n\nUser Provided Context: {ctx}'
220
222
  return ''
221
223
 
222
224
  def _build_messages(base_messages, sys_msg, prompt_text, ctx_suffix):
@@ -564,6 +566,7 @@ def check_llm_command(
564
566
  extra_globals=None,
565
567
  max_iterations: int = 5,
566
568
  jinxs: Dict = None,
569
+ tool_capable: bool = None, # If None, will be auto-detected
567
570
  ):
568
571
  """
569
572
  Simple agent loop: try tool calling first, fall back to ReAct if unsupported.
@@ -571,27 +574,21 @@ def check_llm_command(
571
574
  if messages is None:
572
575
  messages = []
573
576
 
574
- # Log incoming messages
575
- import logging
576
- logger = logging.getLogger("npcpy.llm_funcs")
577
- logger.debug(f"[check_llm_command] Received {len(messages)} messages")
578
- for i, msg in enumerate(messages[-5:]): # Log last 5 messages
579
- role = msg.get('role', 'unknown')
580
- content = msg.get('content', '')
581
- content_preview = content[:100] if isinstance(content, str) else str(type(content))
582
- logger.debug(f" [{i}] role={role}, content_preview={content_preview}...")
583
-
584
577
  total_usage = {"input_tokens": 0, "output_tokens": 0}
578
+
585
579
  # Use provided jinxs or get from npc/team
586
580
  if jinxs is None:
587
581
  jinxs = _get_jinxs(npc, team)
588
- tools = _jinxs_to_tools(jinxs) if jinxs else None
582
+
583
+ # Only prepare tools if model supports them
584
+ tools = None
585
+ if tool_capable is not False and jinxs:
586
+ tools = _jinxs_to_tools(jinxs)
589
587
 
590
588
  # Keep full message history, only truncate for API calls to reduce tokens
591
589
  full_messages = messages.copy() if messages else []
592
- logger.debug(f"[check_llm_command] full_messages initialized with {len(full_messages)} messages")
593
590
 
594
- # Try with native tool calling first
591
+ # Make LLM call (with or without tools based on tool_capable)
595
592
 
596
593
  try:
597
594
  response = get_llm_response(
@@ -609,7 +606,7 @@ def check_llm_command(
609
606
  tools=tools,
610
607
  )
611
608
  except Exception as e:
612
- print(colored(f"[check_llm_command] EXCEPTION in get_llm_response: {type(e).__name__}: {e}", "red"))
609
+ print(f"[check_llm_command] EXCEPTION in get_llm_response: {type(e).__name__}: {e}", "red")
613
610
  return {
614
611
  "messages": full_messages,
615
612
  "output": f"LLM call failed: {e}",
@@ -617,8 +614,6 @@ def check_llm_command(
617
614
  "usage": total_usage,
618
615
  }
619
616
 
620
- if response.get("error"):
621
- logger.warning(f"[check_llm_command] Error in response: {response.get('error')}")
622
617
 
623
618
  if response.get("usage"):
624
619
  total_usage["input_tokens"] += response["usage"].get("input_tokens", 0)
@@ -634,7 +629,6 @@ def check_llm_command(
634
629
  # For streaming, the caller (process_result) handles appending after consumption
635
630
  if assistant_response and isinstance(assistant_response, str):
636
631
  full_messages.append({"role": "assistant", "content": assistant_response})
637
- logger.debug(f"[check_llm_command] No tool calls - returning {len(full_messages)} messages")
638
632
  return {
639
633
  "messages": full_messages,
640
634
  "output": assistant_response,
@@ -670,8 +664,7 @@ def check_llm_command(
670
664
  assistant_msg["tool_calls"] = _serialize_tool_calls(tool_calls)
671
665
  full_messages.append(assistant_msg)
672
666
  current_messages = full_messages
673
- logger.debug(f"[check_llm_command] Tool calls detected - current_messages has {len(current_messages)} messages")
674
- for iteration in range(max_iterations):
667
+ for _ in range(max_iterations):
675
668
  for tc in tool_calls:
676
669
  # Handle both dict and object formats
677
670
  if hasattr(tc, 'function'):
@@ -692,15 +685,12 @@ def check_llm_command(
692
685
 
693
686
  if jinx_name in jinxs:
694
687
  try:
695
- from termcolor import colored
696
- print(colored(f" ⚡ {jinx_name}", "cyan"), end="", flush=True)
688
+
689
+ print((f" ⚡ {jinx_name}", "cyan"), end="", flush=True)
697
690
  except:
698
691
  pass
699
692
  output = _execute_jinx(jinxs[jinx_name], inputs, npc, team, current_messages, extra_globals)
700
- try:
701
- print(colored(" ✓", "green"), flush=True)
702
- except:
703
- pass
693
+
704
694
 
705
695
  # Add tool result to messages
706
696
  # Include name for Gemini compatibility
@@ -745,8 +735,6 @@ def check_llm_command(
745
735
  )
746
736
  except Exception as e:
747
737
  # If continuation fails, return what we have so far
748
- # The tool was already executed successfully
749
- logger.warning(f"[check_llm_command] Continuation failed: {e}")
750
738
  return {
751
739
  "messages": current_messages,
752
740
  "output": f"Tool executed successfully. (Continuation error: {type(e).__name__})",
@@ -767,14 +755,12 @@ def check_llm_command(
767
755
 
768
756
  if not tool_calls:
769
757
  # Done - return full message history
770
- logger.debug(f"[check_llm_command] Tool loop done - returning {len(current_messages)} messages")
771
758
  return {
772
759
  "messages": current_messages,
773
760
  "output": assistant_response,
774
761
  "usage": total_usage,
775
762
  }
776
763
 
777
- logger.debug(f"[check_llm_command] Max iterations - returning {len(current_messages)} messages")
778
764
  return {
779
765
  "messages": current_messages,
780
766
  "output": response.get("response", "Max iterations reached"),
@@ -867,16 +853,11 @@ Use EXACT parameter names from the tool definitions above."""
867
853
  context = f"Error: '{jinx_name}' not found. Available: {list(jinxs.keys())}"
868
854
  continue
869
855
 
870
- try:
871
- from termcolor import colored
872
- print(colored(f" ⚡ {jinx_name}", "cyan"), end="", flush=True)
873
- except:
874
- pass
856
+
857
+
875
858
  output = _execute_jinx(jinxs[jinx_name], inputs, npc, team, current_messages, extra_globals)
876
- try:
877
- print(colored(" ✓", "green"), flush=True)
878
- except:
879
- pass
859
+
860
+
880
861
  context = f"Tool '{jinx_name}' returned: {output}"
881
862
  command = f"{command}\n\nPrevious: {context}"
882
863
 
@@ -487,7 +487,10 @@ class CommandHistory:
487
487
  Column('model', String(100)),
488
488
  Column('provider', String(100)),
489
489
  Column('npc', String(100)),
490
- Column('team', String(100))
490
+ Column('team', String(100)),
491
+ Column('reasoning_content', Text), # For thinking tokens / chain of thought
492
+ Column('tool_calls', Text), # JSON array of tool calls made by assistant
493
+ Column('tool_results', Text) # JSON array of tool call results
491
494
  )
492
495
 
493
496
  Table('message_attachments', metadata,
@@ -719,31 +722,41 @@ class CommandHistory:
719
722
 
720
723
 
721
724
  def add_conversation(
722
- self,
725
+ self,
723
726
  message_id,
724
727
  timestamp,
725
- role,
726
- content,
727
- conversation_id,
728
+ role,
729
+ content,
730
+ conversation_id,
728
731
  directory_path,
729
- model=None,
730
- provider=None,
731
- npc=None,
732
+ model=None,
733
+ provider=None,
734
+ npc=None,
732
735
  team=None,
733
736
  attachments=None,
737
+ reasoning_content=None,
738
+ tool_calls=None,
739
+ tool_results=None,
734
740
  ):
735
741
  if isinstance(content, (dict, list)):
736
742
  content = json.dumps(content, cls=CustomJSONEncoder)
737
743
 
744
+ # Serialize tool_calls and tool_results as JSON
745
+ if tool_calls is not None and not isinstance(tool_calls, str):
746
+ tool_calls = json.dumps(tool_calls, cls=CustomJSONEncoder)
747
+ if tool_results is not None and not isinstance(tool_results, str):
748
+ tool_results = json.dumps(tool_results, cls=CustomJSONEncoder)
749
+
738
750
  stmt = """
739
751
  INSERT INTO conversation_history
740
- (message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team)
741
- VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team)
752
+ (message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results)
753
+ VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results)
742
754
  """
743
755
  params = {
744
756
  "message_id": message_id, "timestamp": timestamp, "role": role, "content": content,
745
757
  "conversation_id": conversation_id, "directory_path": directory_path, "model": model,
746
- "provider": provider, "npc": npc, "team": team
758
+ "provider": provider, "npc": npc, "team": team, "reasoning_content": reasoning_content,
759
+ "tool_calls": tool_calls, "tool_results": tool_results
747
760
  }
748
761
  with self.engine.begin() as conn:
749
762
  conn.execute(text(stmt), params)
@@ -756,7 +769,7 @@ class CommandHistory:
756
769
  attachment_type=attachment.get("type"),
757
770
  data=attachment.get("data"),
758
771
  size=attachment.get("size"),
759
- file_path=attachment.get("path")
772
+ file_path=attachment.get("path")
760
773
  )
761
774
 
762
775
  return message_id
@@ -1084,16 +1097,28 @@ class CommandHistory:
1084
1097
  def get_conversations_by_id(self, conversation_id: str) -> List[Dict[str, Any]]:
1085
1098
  stmt = """
1086
1099
  SELECT id, message_id, timestamp, role, content, conversation_id,
1087
- directory_path, model, provider, npc, team
1088
- FROM conversation_history WHERE conversation_id = :conversation_id
1100
+ directory_path, model, provider, npc, team,
1101
+ reasoning_content, tool_calls, tool_results
1102
+ FROM conversation_history WHERE conversation_id = :conversation_id
1089
1103
  ORDER BY timestamp ASC
1090
1104
  """
1091
1105
  results = self._fetch_all(stmt, {"conversation_id": conversation_id})
1092
-
1106
+
1093
1107
  for message_dict in results:
1094
1108
  attachments = self.get_message_attachments(message_dict["message_id"])
1095
1109
  if attachments:
1096
1110
  message_dict["attachments"] = attachments
1111
+ # Parse JSON fields
1112
+ if message_dict.get("tool_calls"):
1113
+ try:
1114
+ message_dict["tool_calls"] = json.loads(message_dict["tool_calls"])
1115
+ except (json.JSONDecodeError, TypeError):
1116
+ pass
1117
+ if message_dict.get("tool_results"):
1118
+ try:
1119
+ message_dict["tool_results"] = json.loads(message_dict["tool_results"])
1120
+ except (json.JSONDecodeError, TypeError):
1121
+ pass
1097
1122
  return results
1098
1123
 
1099
1124
  def get_npc_conversation_stats(self, start_date=None, end_date=None) -> pd.DataFrame:
@@ -1295,9 +1320,13 @@ def save_conversation_message(
1295
1320
  team: str = None,
1296
1321
  attachments: List[Dict] = None,
1297
1322
  message_id: str = None,
1323
+ reasoning_content: str = None,
1324
+ tool_calls: List[Dict] = None,
1325
+ tool_results: List[Dict] = None,
1298
1326
  ):
1299
1327
  """
1300
1328
  Saves a conversation message linked to a conversation ID with optional attachments.
1329
+ Now also supports reasoning_content, tool_calls, and tool_results.
1301
1330
  """
1302
1331
  if wd is None:
1303
1332
  wd = os.getcwd()
@@ -1307,17 +1336,20 @@ def save_conversation_message(
1307
1336
 
1308
1337
 
1309
1338
  return command_history.add_conversation(
1310
- message_id,
1311
- timestamp,
1312
- role,
1313
- content,
1314
- conversation_id,
1315
- wd,
1316
- model=model,
1317
- provider=provider,
1318
- npc=npc,
1319
- team=team,
1320
- attachments=attachments)
1339
+ message_id,
1340
+ timestamp,
1341
+ role,
1342
+ content,
1343
+ conversation_id,
1344
+ wd,
1345
+ model=model,
1346
+ provider=provider,
1347
+ npc=npc,
1348
+ team=team,
1349
+ attachments=attachments,
1350
+ reasoning_content=reasoning_content,
1351
+ tool_calls=tool_calls,
1352
+ tool_results=tool_results)
1321
1353
  def retrieve_last_conversation(
1322
1354
  command_history: CommandHistory, conversation_id: str
1323
1355
  ) -> str: