npcpy 1.3.1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/ft/diff.py CHANGED
@@ -3,7 +3,7 @@ try:
3
3
  import torch.nn as nn
4
4
  import torch.nn.functional as F
5
5
  from torch.utils.data import DataLoader, Dataset as TorchDataset
6
- from transformers import CLIPTextModel, CLIPTokenizer
6
+
7
7
  TORCH_AVAILABLE = True
8
8
  except ImportError:
9
9
  torch = None
npcpy/gen/response.py CHANGED
@@ -297,6 +297,16 @@ def get_ollama_response(
297
297
  last_user_idx = len(messages) - 1
298
298
  messages[last_user_idx]["images"] = image_paths
299
299
 
300
+ # Ollama's pydantic model requires tool_calls arguments to be dicts, not strings
301
+ for msg in messages:
302
+ if msg.get("tool_calls"):
303
+ for tc in msg["tool_calls"]:
304
+ if tc.get("function") and isinstance(tc["function"].get("arguments"), str):
305
+ try:
306
+ tc["function"]["arguments"] = json.loads(tc["function"]["arguments"])
307
+ except (json.JSONDecodeError, TypeError):
308
+ tc["function"]["arguments"] = {}
309
+
300
310
  api_params = {
301
311
  "model": model,
302
312
  "messages": messages,
@@ -346,7 +356,12 @@ def get_ollama_response(
346
356
  res = ollama.chat(**api_params, options=options)
347
357
  result["raw_response"] = res
348
358
 
349
- # Extract usage from ollama response
359
+ if stream:
360
+ # Return immediately for streaming - don't check 'in' on generator as it consumes it
361
+ result["response"] = res
362
+ return result
363
+
364
+ # Extract usage from ollama response (only for non-streaming)
350
365
  if hasattr(res, 'prompt_eval_count') or 'prompt_eval_count' in res:
351
366
  input_tokens = getattr(res, 'prompt_eval_count', None) or res.get('prompt_eval_count', 0) or 0
352
367
  output_tokens = getattr(res, 'eval_count', None) or res.get('eval_count', 0) or 0
@@ -354,10 +369,6 @@ def get_ollama_response(
354
369
  "input_tokens": input_tokens,
355
370
  "output_tokens": output_tokens,
356
371
  }
357
-
358
- if stream:
359
- result["response"] = res
360
- return result
361
372
  else:
362
373
 
363
374
  message = res.get("message", {})
@@ -386,7 +397,7 @@ def get_ollama_response(
386
397
  return result
387
398
 
388
399
 
389
-
400
+ print('Debug', api_params)
390
401
  res = ollama.chat(**api_params, options=options)
391
402
  result["raw_response"] = res
392
403
 
npcpy/llm_funcs.py CHANGED
@@ -216,7 +216,7 @@ def get_llm_response(
216
216
 
217
217
  def _context_suffix(ctx):
218
218
  if ctx is not None:
219
- return f'User Provided Context: {ctx}'
219
+ return f'\n\n\nUser Provided Context: {ctx}'
220
220
  return ''
221
221
 
222
222
  def _build_messages(base_messages, sys_msg, prompt_text, ctx_suffix):
@@ -487,7 +487,10 @@ class CommandHistory:
487
487
  Column('model', String(100)),
488
488
  Column('provider', String(100)),
489
489
  Column('npc', String(100)),
490
- Column('team', String(100))
490
+ Column('team', String(100)),
491
+ Column('reasoning_content', Text), # For thinking tokens / chain of thought
492
+ Column('tool_calls', Text), # JSON array of tool calls made by assistant
493
+ Column('tool_results', Text) # JSON array of tool call results
491
494
  )
492
495
 
493
496
  Table('message_attachments', metadata,
@@ -719,31 +722,41 @@ class CommandHistory:
719
722
 
720
723
 
721
724
  def add_conversation(
722
- self,
725
+ self,
723
726
  message_id,
724
727
  timestamp,
725
- role,
726
- content,
727
- conversation_id,
728
+ role,
729
+ content,
730
+ conversation_id,
728
731
  directory_path,
729
- model=None,
730
- provider=None,
731
- npc=None,
732
+ model=None,
733
+ provider=None,
734
+ npc=None,
732
735
  team=None,
733
736
  attachments=None,
737
+ reasoning_content=None,
738
+ tool_calls=None,
739
+ tool_results=None,
734
740
  ):
735
741
  if isinstance(content, (dict, list)):
736
742
  content = json.dumps(content, cls=CustomJSONEncoder)
737
743
 
744
+ # Serialize tool_calls and tool_results as JSON
745
+ if tool_calls is not None and not isinstance(tool_calls, str):
746
+ tool_calls = json.dumps(tool_calls, cls=CustomJSONEncoder)
747
+ if tool_results is not None and not isinstance(tool_results, str):
748
+ tool_results = json.dumps(tool_results, cls=CustomJSONEncoder)
749
+
738
750
  stmt = """
739
751
  INSERT INTO conversation_history
740
- (message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team)
741
- VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team)
752
+ (message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results)
753
+ VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results)
742
754
  """
743
755
  params = {
744
756
  "message_id": message_id, "timestamp": timestamp, "role": role, "content": content,
745
757
  "conversation_id": conversation_id, "directory_path": directory_path, "model": model,
746
- "provider": provider, "npc": npc, "team": team
758
+ "provider": provider, "npc": npc, "team": team, "reasoning_content": reasoning_content,
759
+ "tool_calls": tool_calls, "tool_results": tool_results
747
760
  }
748
761
  with self.engine.begin() as conn:
749
762
  conn.execute(text(stmt), params)
@@ -756,7 +769,7 @@ class CommandHistory:
756
769
  attachment_type=attachment.get("type"),
757
770
  data=attachment.get("data"),
758
771
  size=attachment.get("size"),
759
- file_path=attachment.get("path")
772
+ file_path=attachment.get("path")
760
773
  )
761
774
 
762
775
  return message_id
@@ -1084,16 +1097,28 @@ class CommandHistory:
1084
1097
  def get_conversations_by_id(self, conversation_id: str) -> List[Dict[str, Any]]:
1085
1098
  stmt = """
1086
1099
  SELECT id, message_id, timestamp, role, content, conversation_id,
1087
- directory_path, model, provider, npc, team
1088
- FROM conversation_history WHERE conversation_id = :conversation_id
1100
+ directory_path, model, provider, npc, team,
1101
+ reasoning_content, tool_calls, tool_results
1102
+ FROM conversation_history WHERE conversation_id = :conversation_id
1089
1103
  ORDER BY timestamp ASC
1090
1104
  """
1091
1105
  results = self._fetch_all(stmt, {"conversation_id": conversation_id})
1092
-
1106
+
1093
1107
  for message_dict in results:
1094
1108
  attachments = self.get_message_attachments(message_dict["message_id"])
1095
1109
  if attachments:
1096
1110
  message_dict["attachments"] = attachments
1111
+ # Parse JSON fields
1112
+ if message_dict.get("tool_calls"):
1113
+ try:
1114
+ message_dict["tool_calls"] = json.loads(message_dict["tool_calls"])
1115
+ except (json.JSONDecodeError, TypeError):
1116
+ pass
1117
+ if message_dict.get("tool_results"):
1118
+ try:
1119
+ message_dict["tool_results"] = json.loads(message_dict["tool_results"])
1120
+ except (json.JSONDecodeError, TypeError):
1121
+ pass
1097
1122
  return results
1098
1123
 
1099
1124
  def get_npc_conversation_stats(self, start_date=None, end_date=None) -> pd.DataFrame:
@@ -1295,9 +1320,13 @@ def save_conversation_message(
1295
1320
  team: str = None,
1296
1321
  attachments: List[Dict] = None,
1297
1322
  message_id: str = None,
1323
+ reasoning_content: str = None,
1324
+ tool_calls: List[Dict] = None,
1325
+ tool_results: List[Dict] = None,
1298
1326
  ):
1299
1327
  """
1300
1328
  Saves a conversation message linked to a conversation ID with optional attachments.
1329
+ Now also supports reasoning_content, tool_calls, and tool_results.
1301
1330
  """
1302
1331
  if wd is None:
1303
1332
  wd = os.getcwd()
@@ -1307,17 +1336,20 @@ def save_conversation_message(
1307
1336
 
1308
1337
 
1309
1338
  return command_history.add_conversation(
1310
- message_id,
1311
- timestamp,
1312
- role,
1313
- content,
1314
- conversation_id,
1315
- wd,
1316
- model=model,
1317
- provider=provider,
1318
- npc=npc,
1319
- team=team,
1320
- attachments=attachments)
1339
+ message_id,
1340
+ timestamp,
1341
+ role,
1342
+ content,
1343
+ conversation_id,
1344
+ wd,
1345
+ model=model,
1346
+ provider=provider,
1347
+ npc=npc,
1348
+ team=team,
1349
+ attachments=attachments,
1350
+ reasoning_content=reasoning_content,
1351
+ tool_calls=tool_calls,
1352
+ tool_results=tool_results)
1321
1353
  def retrieve_last_conversation(
1322
1354
  command_history: CommandHistory, conversation_id: str
1323
1355
  ) -> str:
npcpy/npc_compiler.py CHANGED
@@ -468,8 +468,8 @@ class Jinx:
468
468
  }
469
469
 
470
470
  def render_first_pass(
471
- self,
472
- jinja_env_for_macros: Environment,
471
+ self,
472
+ jinja_env_for_macros: Environment,
473
473
  all_jinx_callables: Dict[str, Callable]
474
474
  ):
475
475
  """
@@ -478,40 +478,44 @@ class Jinx:
478
478
  then expands nested Jinx calls (e.g., {{ sh(...) }} or engine: jinx_name)
479
479
  and inline macros.
480
480
  """
481
- # 1. Join the list of raw steps (which are individual YAML lines) into a single string.
482
- # This single string is the complete Jinja template for the 'steps' section.
483
- raw_steps_template_string = "\n".join(self._raw_steps)
484
-
485
- # 2. Render this single string as a Jinja template.
486
- # Jinja will now process the {% for %} and {% if %} directives,
487
- # dynamically generating the YAML structure.
488
- try:
489
- steps_template = jinja_env_for_macros.from_string(raw_steps_template_string)
490
- # Pass globals (like num_tasks, include_greeting from Jinx inputs)
491
- # to the Jinja rendering context for structural templating.
492
- rendered_steps_yaml_string = steps_template.render(**jinja_env_for_macros.globals)
493
- except Exception as e:
494
- # In a real Jinx, this would go to a proper logger.
495
- # For this context, we handle the error gracefully.
496
- # self._log_debug(f"Warning: Error during first-pass templating of Jinx '{self.jinx_name}' steps YAML: {e}")
497
- self.steps = list(self._raw_steps) # Fallback to original raw steps
498
- return
481
+ # Check if steps are already parsed dicts (common case when loaded from YAML)
482
+ # If so, skip the YAML string join/parse cycle and use them directly
483
+ if self._raw_steps and isinstance(self._raw_steps[0], dict):
484
+ structurally_expanded_steps = list(self._raw_steps)
485
+ else:
486
+ # 1. Join the list of raw steps (which are individual YAML lines) into a single string.
487
+ # This single string is the complete Jinja template for the 'steps' section.
488
+ raw_steps_template_string = "\n".join(self._raw_steps)
499
489
 
500
- # 3. Parse the rendered YAML string back into a list of step dictionaries.
501
- # This step will now correctly interpret the YAML structure generated by Jinja.
502
- try:
503
- structurally_expanded_steps = yaml.safe_load(rendered_steps_yaml_string)
504
- if not isinstance(structurally_expanded_steps, list):
505
- # Handle cases where the rendered YAML might be empty or not a list
506
- if structurally_expanded_steps is None:
507
- structurally_expanded_steps = []
508
- else:
509
- raise ValueError(f"Rendered steps YAML did not result in a list: {type(structurally_expanded_steps)}")
510
- self.steps = structurally_expanded_steps
511
- except Exception as e:
512
- # self._log_debug(f"Warning: Error re-parsing structurally expanded steps YAML for Jinx '{self.jinx_name}': {e}")
513
- self.steps = list(self._raw_steps) # Fallback
514
- return
490
+ # 2. Render this single string as a Jinja template.
491
+ # Jinja will now process the {% for %} and {% if %} directives,
492
+ # dynamically generating the YAML structure.
493
+ try:
494
+ steps_template = jinja_env_for_macros.from_string(raw_steps_template_string)
495
+ # Pass globals (like num_tasks, include_greeting from Jinx inputs)
496
+ # to the Jinja rendering context for structural templating.
497
+ rendered_steps_yaml_string = steps_template.render(**jinja_env_for_macros.globals)
498
+ except Exception as e:
499
+ # In a real Jinx, this would go to a proper logger.
500
+ # For this context, we handle the error gracefully.
501
+ # self._log_debug(f"Warning: Error during first-pass templating of Jinx '{self.jinx_name}' steps YAML: {e}")
502
+ self.steps = list(self._raw_steps) # Fallback to original raw steps
503
+ return
504
+
505
+ # 3. Parse the rendered YAML string back into a list of step dictionaries.
506
+ # This step will now correctly interpret the YAML structure generated by Jinja.
507
+ try:
508
+ structurally_expanded_steps = yaml.safe_load(rendered_steps_yaml_string)
509
+ if not isinstance(structurally_expanded_steps, list):
510
+ # Handle cases where the rendered YAML might be empty or not a list
511
+ if structurally_expanded_steps is None:
512
+ structurally_expanded_steps = []
513
+ else:
514
+ raise ValueError(f"Rendered steps YAML did not result in a list: {type(structurally_expanded_steps)}")
515
+ except Exception as e:
516
+ # self._log_debug(f"Warning: Error re-parsing structurally expanded steps YAML for Jinx '{self.jinx_name}': {e}")
517
+ self.steps = list(self._raw_steps) # Fallback
518
+ return
515
519
 
516
520
  # 4. Now, iterate through these `structurally_expanded_steps` to expand
517
521
  # declarative Jinx calls (engine: jinx_name) and inline macros.
npcpy/serve.py CHANGED
@@ -216,12 +216,14 @@ class MCPClientNPC:
216
216
  self._exit_stack = None
217
217
 
218
218
 
219
- def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, context=None):
219
+ def get_llm_response_with_handling(prompt, npc,model, provider, messages, tools, stream, team, context=None):
220
220
  """Unified LLM response with basic exception handling (inlined from corca to avoid that dependency)."""
221
221
  try:
222
222
  return get_llm_response(
223
223
  prompt=prompt,
224
224
  npc=npc,
225
+ model=model,
226
+ provider=provider,
225
227
  messages=messages,
226
228
  tools=tools,
227
229
  auto_process_tool_calls=False,
@@ -234,6 +236,8 @@ def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, c
234
236
  return get_llm_response(
235
237
  prompt=prompt,
236
238
  npc=npc,
239
+ model=model,
240
+ provider=provider,
237
241
  messages=messages,
238
242
  tools=tools,
239
243
  auto_process_tool_calls=False,
@@ -241,6 +245,7 @@ def get_llm_response_with_handling(prompt, npc, messages, tools, stream, team, c
241
245
  team=team,
242
246
  context=context
243
247
  )
248
+
244
249
  class MCPServerManager:
245
250
  """
246
251
  Simple in-process tracker for launching/stopping MCP servers.
@@ -2920,11 +2925,7 @@ def get_mcp_tools():
2920
2925
  )
2921
2926
  server_path = os.path.abspath(os.path.expanduser(resolved_path))
2922
2927
 
2923
- try:
2924
- from npcsh.corca import MCPClientNPC
2925
- except ImportError:
2926
- return jsonify({"error": "MCP Client (npcsh.corca) not available. Ensure npcsh.corca is installed and importable."}), 500
2927
-
2928
+ # MCPClientNPC is defined inline at the top of this file
2928
2929
  temp_mcp_client = None
2929
2930
  jinx_tools = []
2930
2931
  try:
@@ -3473,61 +3474,6 @@ def stream():
3473
3474
  **tool_args
3474
3475
  )
3475
3476
  messages = stream_response.get('messages', messages)
3476
-
3477
- elif exe_mode == 'npcsh':
3478
- from npcsh._state import execute_command, initial_state
3479
- from npcsh.routes import router
3480
- initial_state.model = model
3481
- initial_state.provider = provider
3482
- initial_state.npc = npc_object
3483
- initial_state.team = team_object
3484
- initial_state.messages = messages
3485
- initial_state.command_history = command_history
3486
-
3487
- state, stream_response = execute_command(
3488
- commandstr,
3489
- initial_state, router=router)
3490
- messages = state.messages
3491
-
3492
- elif exe_mode == 'guac':
3493
- from npcsh.guac import execute_guac_command
3494
- from npcsh.routes import router
3495
- from npcsh._state import initial_state
3496
- from pathlib import Path
3497
- import pandas as pd, numpy as np, matplotlib.pyplot as plt
3498
-
3499
- if not hasattr(app, 'guac_locals'):
3500
- app.guac_locals = {}
3501
-
3502
- if conversation_id not in app.guac_locals:
3503
- app.guac_locals[conversation_id] = {
3504
- 'pd': pd,
3505
- 'np': np,
3506
- 'plt': plt,
3507
- 'datetime': datetime,
3508
- 'Path': Path,
3509
- 'os': os,
3510
- 'sys': sys,
3511
- 'json': json
3512
- }
3513
-
3514
- initial_state.model = model
3515
- initial_state.provider = provider
3516
- initial_state.npc = npc_object
3517
- initial_state.team = team_object
3518
- initial_state.messages = messages
3519
- initial_state.command_history = command_history
3520
-
3521
- state, stream_response = execute_guac_command(
3522
- commandstr,
3523
- initial_state,
3524
- app.guac_locals[conversation_id],
3525
- "guac",
3526
- Path.cwd() / "npc_team",
3527
- router
3528
- )
3529
- messages = state.messages
3530
-
3531
3477
  elif exe_mode == 'tool_agent':
3532
3478
  mcp_server_path_from_request = data.get("mcpServerPath")
3533
3479
  selected_mcp_tools_from_request = data.get("selectedMcpTools", [])
@@ -3576,7 +3522,6 @@ def stream():
3576
3522
 
3577
3523
  mcp_client = app.mcp_clients[state_key]["client"]
3578
3524
  messages = app.mcp_clients[state_key].get("messages", messages)
3579
-
3580
3525
  def stream_mcp_sse():
3581
3526
  nonlocal messages
3582
3527
  iteration = 0
@@ -3599,17 +3544,21 @@ def stream():
3599
3544
  llm_response = get_llm_response_with_handling(
3600
3545
  prompt=prompt,
3601
3546
  npc=npc_object,
3547
+ model=model,
3548
+ provider=provider,
3602
3549
  messages=messages,
3603
3550
  tools=tools_for_llm,
3604
3551
  stream=True,
3605
3552
  team=team_object,
3606
3553
  context=f' The users working directory is {current_path}'
3607
3554
  )
3555
+ print('RESPONSE', llm_response)
3608
3556
 
3609
3557
  stream = llm_response.get("response", [])
3610
3558
  messages = llm_response.get("messages", messages)
3611
3559
  collected_content = ""
3612
3560
  collected_tool_calls = []
3561
+ agent_tool_call_data = {"id": None, "function_name": None, "arguments": ""}
3613
3562
 
3614
3563
  for response_chunk in stream:
3615
3564
  with cancellation_lock:
@@ -3617,7 +3566,62 @@ def stream():
3617
3566
  yield {"type": "interrupt"}
3618
3567
  return
3619
3568
 
3620
- if hasattr(response_chunk, "choices") and response_chunk.choices:
3569
+ if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3570
+ # Ollama returns ChatResponse objects - support both attribute and dict access
3571
+ msg = getattr(response_chunk, "message", None) or (response_chunk.get("message", {}) if hasattr(response_chunk, "get") else {})
3572
+ chunk_content = getattr(msg, "content", None) or (msg.get("content") if hasattr(msg, "get") else "") or ""
3573
+ # Extract Ollama thinking/reasoning tokens
3574
+ reasoning_content = getattr(msg, "thinking", None) or (msg.get("thinking") if hasattr(msg, "get") else None)
3575
+ # Handle tool calls with robust attribute/dict access
3576
+ tool_calls = getattr(msg, "tool_calls", None) or (msg.get("tool_calls") if hasattr(msg, "get") else None)
3577
+ if tool_calls:
3578
+ for tool_call in tool_calls:
3579
+ tc_id = getattr(tool_call, "id", None) or (tool_call.get("id") if hasattr(tool_call, "get") else None)
3580
+ tc_func = getattr(tool_call, "function", None) or (tool_call.get("function") if hasattr(tool_call, "get") else None)
3581
+ if tc_func:
3582
+ tc_name = getattr(tc_func, "name", None) or (tc_func.get("name") if hasattr(tc_func, "get") else None)
3583
+ tc_args = getattr(tc_func, "arguments", None) or (tc_func.get("arguments") if hasattr(tc_func, "get") else None)
3584
+ if tc_name:
3585
+ arg_str = tc_args
3586
+ if isinstance(arg_str, dict):
3587
+ arg_str = json.dumps(arg_str)
3588
+ elif arg_str is None:
3589
+ arg_str = "{}"
3590
+ # Add to collected_tool_calls for Ollama
3591
+ collected_tool_calls.append({
3592
+ "id": tc_id or f"call_{len(collected_tool_calls)}",
3593
+ "type": "function",
3594
+ "function": {"name": tc_name, "arguments": arg_str}
3595
+ })
3596
+ if chunk_content:
3597
+ collected_content += chunk_content
3598
+ # Extract other fields with robust access
3599
+ created_at = getattr(response_chunk, "created_at", None) or (response_chunk.get("created_at") if hasattr(response_chunk, "get") else None)
3600
+ model_name = getattr(response_chunk, "model", None) or (response_chunk.get("model") if hasattr(response_chunk, "get") else model)
3601
+ msg_role = getattr(msg, "role", None) or (msg.get("role") if hasattr(msg, "get") else "assistant")
3602
+ done_reason = getattr(response_chunk, "done_reason", None) or (response_chunk.get("done_reason") if hasattr(response_chunk, "get") else None)
3603
+
3604
+ # Build chunk_data with proper structure
3605
+ chunk_data = {
3606
+ "id": None,
3607
+ "object": None,
3608
+ "created": str(created_at) if created_at else datetime.datetime.now().isoformat(),
3609
+ "model": model_name,
3610
+ "choices": [
3611
+ {
3612
+ "index": 0,
3613
+ "delta": {
3614
+ "content": chunk_content,
3615
+ "role": msg_role,
3616
+ "reasoning_content": reasoning_content
3617
+ },
3618
+ "finish_reason": done_reason
3619
+ }
3620
+ ]
3621
+ }
3622
+ yield chunk_data
3623
+
3624
+ elif hasattr(response_chunk, "choices") and response_chunk.choices:
3621
3625
  delta = response_chunk.choices[0].delta
3622
3626
  if hasattr(delta, "content") and delta.content:
3623
3627
  collected_content += delta.content
@@ -3698,55 +3702,41 @@ def stream():
3698
3702
  try:
3699
3703
  jinx_ctx = jinx_obj.execute(
3700
3704
  input_values=tool_args if isinstance(tool_args, dict) else {},
3701
- npc=npc_object,
3702
- messages=messages
3705
+ npc=npc_object
3703
3706
  )
3704
- tool_content = str(jinx_ctx.get("output", jinx_ctx))
3705
- print(f"[MCP] jinx tool_complete {tool_name}")
3707
+ tool_content = str(jinx_ctx)
3706
3708
  except Exception as e:
3707
- raise Exception(f"Jinx execution failed: {e}")
3709
+ tool_content = f"Jinx execution error: {str(e)}"
3708
3710
  else:
3709
- try:
3710
- loop = asyncio.get_event_loop()
3711
- except RuntimeError:
3712
- loop = asyncio.new_event_loop()
3713
- asyncio.set_event_loop(loop)
3714
- if loop.is_closed():
3715
- loop = asyncio.new_event_loop()
3716
- asyncio.set_event_loop(loop)
3717
- mcp_result = loop.run_until_complete(
3718
- mcp_client.session.call_tool(tool_name, tool_args)
3719
- ) if mcp_client else {"error": "No MCP client"}
3720
- if hasattr(mcp_result, "content") and mcp_result.content:
3721
- for content_item in mcp_result.content:
3722
- if hasattr(content_item, "text"):
3723
- tool_content += content_item.text
3724
- elif hasattr(content_item, "data"):
3725
- tool_content += str(content_item.data)
3711
+ # Execute via MCP client
3712
+ if mcp_client and tool_name in mcp_client.tool_map:
3713
+ try:
3714
+ tool_func = mcp_client.tool_map[tool_name]
3715
+ result = tool_func(**(tool_args if isinstance(tool_args, dict) else {}))
3716
+ # Handle MCP CallToolResult
3717
+ if hasattr(result, 'content'):
3718
+ tool_content = str(result.content[0].text) if result.content else str(result)
3726
3719
  else:
3727
- tool_content += str(content_item)
3720
+ tool_content = str(result)
3721
+ except Exception as mcp_e:
3722
+ tool_content = f"MCP tool error: {str(mcp_e)}"
3728
3723
  else:
3729
- tool_content = str(mcp_result)
3730
-
3731
- tool_results.append({
3724
+ tool_content = f"Tool '{tool_name}' not found in MCP server or Jinxs"
3725
+
3726
+ messages.append({
3732
3727
  "role": "tool",
3733
3728
  "tool_call_id": tool_id,
3734
3729
  "name": tool_name,
3735
3730
  "content": tool_content
3736
3731
  })
3732
+
3733
+ print(f"[MCP] tool_result {tool_name}: {tool_content}")
3734
+ yield {"type": "tool_result", "name": tool_name, "id": tool_id, "result": tool_content}
3737
3735
 
3738
- print(f"[MCP] tool_complete {tool_name}")
3739
- yield {"type": "tool_complete", "name": tool_name, "id": tool_id, "result_preview": tool_content[:4000]}
3740
3736
  except Exception as e:
3741
- err_msg = f"Error executing {tool_name}: {e}"
3742
- tool_results.append({
3743
- "role": "tool",
3744
- "tool_call_id": tool_id,
3745
- "name": tool_name,
3746
- "content": err_msg
3747
- })
3748
- print(f"[MCP] tool_error {tool_name}: {e}")
3749
- yield {"type": "tool_error", "name": tool_name, "id": tool_id, "error": str(e)}
3737
+ error_msg = f"Tool execution error: {str(e)}"
3738
+ print(f"[MCP] tool_error {tool_name}: {error_msg}")
3739
+ yield {"type": "tool_error", "name": tool_name, "id": tool_id, "error": error_msg}
3750
3740
 
3751
3741
  serialized_tool_calls = []
3752
3742
  for tc in collected_tool_calls:
@@ -3770,14 +3760,12 @@ def stream():
3770
3760
  "content": collected_content,
3771
3761
  "tool_calls": serialized_tool_calls
3772
3762
  })
3773
- messages.extend(tool_results)
3774
3763
  tool_results_for_db = tool_results
3775
3764
 
3776
3765
  prompt = ""
3777
3766
 
3778
3767
  app.mcp_clients[state_key]["messages"] = messages
3779
3768
  return
3780
-
3781
3769
  stream_response = stream_mcp_sse()
3782
3770
 
3783
3771
  else:
@@ -3814,6 +3802,8 @@ def stream():
3814
3802
 
3815
3803
  def event_stream(current_stream_id):
3816
3804
  complete_response = []
3805
+ complete_reasoning = [] # Accumulate reasoning content
3806
+ accumulated_tool_calls = [] # Accumulate all tool calls
3817
3807
  dot_count = 0
3818
3808
  interrupted = False
3819
3809
  tool_call_data = {"id": None, "function_name": None, "arguments": ""}
@@ -3839,17 +3829,30 @@ def stream():
3839
3829
  content_piece = delta.get("content")
3840
3830
  if content_piece:
3841
3831
  complete_response.append(content_piece)
3832
+ # Accumulate reasoning content from generator chunks
3833
+ reasoning_piece = delta.get("reasoning_content")
3834
+ if reasoning_piece:
3835
+ complete_reasoning.append(reasoning_piece)
3836
+ # Accumulate tool calls from generator chunks
3837
+ if chunk.get("type") == "tool_call":
3838
+ tc = chunk.get("tool_call", {})
3839
+ if tc.get("id") and tc.get("name"):
3840
+ accumulated_tool_calls.append({
3841
+ "id": tc.get("id"),
3842
+ "function_name": tc.get("name"),
3843
+ "arguments": tc.get("arguments", "")
3844
+ })
3845
+ if chunk.get("type") == "tool_result":
3846
+ tool_results_for_db.append({
3847
+ "name": chunk.get("name"),
3848
+ "tool_call_id": chunk.get("id"),
3849
+ "content": chunk.get("result", "")
3850
+ })
3842
3851
  continue
3843
3852
  yield f"data: {json.dumps({'choices':[{'delta':{'content': str(chunk), 'role': 'assistant'},'finish_reason':None}]})}\n\n"
3844
- # ensure stream termination and cleanup for generator flows
3845
- yield "data: [DONE]\n\n"
3846
- with cancellation_lock:
3847
- if current_stream_id in cancellation_flags:
3848
- del cancellation_flags[current_stream_id]
3849
- print(f"Cleaned up cancellation flag for stream ID: {current_stream_id}")
3850
- return
3853
+ # Generator finished - skip the other stream handling paths
3851
3854
 
3852
- if isinstance(stream_response, str) :
3855
+ elif isinstance(stream_response, str) :
3853
3856
  print('stream a str and not a gen')
3854
3857
  chunk_data = {
3855
3858
  "id": None,
@@ -3869,7 +3872,7 @@ def stream():
3869
3872
  ]
3870
3873
  }
3871
3874
  yield f"data: {json.dumps(chunk_data)}\n\n"
3872
- return
3875
+
3873
3876
  elif isinstance(stream_response, dict) and 'output' in stream_response and isinstance(stream_response.get('output'), str):
3874
3877
  print('stream a str and not a gen')
3875
3878
  chunk_data = {
@@ -3890,63 +3893,97 @@ def stream():
3890
3893
  ]
3891
3894
  }
3892
3895
  yield f"data: {json.dumps(chunk_data)}\n\n"
3893
- return
3894
- for response_chunk in stream_response.get('response', stream_response.get('output')):
3895
- with cancellation_lock:
3896
- if cancellation_flags.get(current_stream_id, False):
3897
- print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
3898
- interrupted = True
3899
- break
3900
3896
 
3901
- print('.', end="", flush=True)
3902
- dot_count += 1
3903
- if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3904
- chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
3905
- if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
3906
- for tool_call in response_chunk["message"]["tool_calls"]:
3907
- if "id" in tool_call:
3908
- tool_call_data["id"] = tool_call["id"]
3909
- if "function" in tool_call:
3910
- if "name" in tool_call["function"]:
3911
- tool_call_data["function_name"] = tool_call["function"]["name"]
3912
- if "arguments" in tool_call["function"]:
3913
- arg_val = tool_call["function"]["arguments"]
3914
- if isinstance(arg_val, dict):
3915
- arg_val = json.dumps(arg_val)
3916
- tool_call_data["arguments"] += arg_val
3917
- if chunk_content:
3918
- complete_response.append(chunk_content)
3919
- chunk_data = {
3920
- "id": None, "object": None,
3921
- "created": response_chunk["created_at"] or datetime.datetime.now(),
3922
- "model": response_chunk["model"],
3923
- "choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
3924
- }
3925
- yield f"data: {json.dumps(chunk_data)}\n\n"
3926
- else:
3927
- chunk_content = ""
3928
- reasoning_content = ""
3929
- for choice in response_chunk.choices:
3930
- if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
3931
- for tool_call in choice.delta.tool_calls:
3932
- if tool_call.id:
3933
- tool_call_data["id"] = tool_call.id
3934
- if tool_call.function:
3935
- if hasattr(tool_call.function, "name") and tool_call.function.name:
3936
- tool_call_data["function_name"] = tool_call.function.name
3937
- if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
3938
- tool_call_data["arguments"] += tool_call.function.arguments
3939
- for choice in response_chunk.choices:
3940
- if hasattr(choice.delta, "reasoning_content"):
3941
- reasoning_content += choice.delta.reasoning_content
3942
- chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
3943
- if chunk_content:
3944
- complete_response.append(chunk_content)
3945
- chunk_data = {
3946
- "id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
3947
- "choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
3948
- }
3949
- yield f"data: {json.dumps(chunk_data)}\n\n"
3897
+ elif isinstance(stream_response, dict):
3898
+ for response_chunk in stream_response.get('response', stream_response.get('output')):
3899
+ with cancellation_lock:
3900
+ if cancellation_flags.get(current_stream_id, False):
3901
+ print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
3902
+ interrupted = True
3903
+ break
3904
+
3905
+ print('.', end="", flush=True)
3906
+ dot_count += 1
3907
+ if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
3908
+ # Ollama returns ChatResponse objects - support both attribute and dict access
3909
+ msg = getattr(response_chunk, "message", None) or response_chunk.get("message", {}) if hasattr(response_chunk, "get") else {}
3910
+ chunk_content = getattr(msg, "content", None) or (msg.get("content") if hasattr(msg, "get") else "") or ""
3911
+ # Extract Ollama thinking/reasoning tokens
3912
+ reasoning_content = getattr(msg, "thinking", None) or (msg.get("thinking") if hasattr(msg, "get") else None)
3913
+ # Handle tool calls with robust attribute/dict access
3914
+ tool_calls = getattr(msg, "tool_calls", None) or (msg.get("tool_calls") if hasattr(msg, "get") else None)
3915
+ if tool_calls:
3916
+ for tool_call in tool_calls:
3917
+ tc_id = getattr(tool_call, "id", None) or (tool_call.get("id") if hasattr(tool_call, "get") else None)
3918
+ if tc_id:
3919
+ tool_call_data["id"] = tc_id
3920
+ tc_func = getattr(tool_call, "function", None) or (tool_call.get("function") if hasattr(tool_call, "get") else None)
3921
+ if tc_func:
3922
+ tc_name = getattr(tc_func, "name", None) or (tc_func.get("name") if hasattr(tc_func, "get") else None)
3923
+ if tc_name:
3924
+ tool_call_data["function_name"] = tc_name
3925
+ tc_args = getattr(tc_func, "arguments", None) or (tc_func.get("arguments") if hasattr(tc_func, "get") else None)
3926
+ if tc_args:
3927
+ arg_val = tc_args
3928
+ if isinstance(arg_val, dict):
3929
+ arg_val = json.dumps(arg_val)
3930
+ tool_call_data["arguments"] += arg_val
3931
+ # Accumulate complete tool call info for DB storage (Ollama path)
3932
+ if tc_id and tc_func and tc_name:
3933
+ accumulated_tool_calls.append({
3934
+ "id": tc_id,
3935
+ "function_name": tc_name,
3936
+ "arguments": arg_val if tc_args else ""
3937
+ })
3938
+ # Accumulate reasoning content
3939
+ if reasoning_content:
3940
+ complete_reasoning.append(reasoning_content)
3941
+ if chunk_content:
3942
+ complete_response.append(chunk_content)
3943
+ # Extract other fields with robust access
3944
+ created_at = getattr(response_chunk, "created_at", None) or (response_chunk.get("created_at") if hasattr(response_chunk, "get") else None)
3945
+ model_name = getattr(response_chunk, "model", None) or (response_chunk.get("model") if hasattr(response_chunk, "get") else model)
3946
+ msg_role = getattr(msg, "role", None) or (msg.get("role") if hasattr(msg, "get") else "assistant")
3947
+ done_reason = getattr(response_chunk, "done_reason", None) or (response_chunk.get("done_reason") if hasattr(response_chunk, "get") else None)
3948
+ chunk_data = {
3949
+ "id": None, "object": None,
3950
+ "created": created_at or datetime.datetime.now(),
3951
+ "model": model_name,
3952
+ "choices": [{"index": 0, "delta": {"content": chunk_content, "role": msg_role, "reasoning_content": reasoning_content}, "finish_reason": done_reason}]
3953
+ }
3954
+ yield f"data: {json.dumps(chunk_data)}\n\n"
3955
+ else:
3956
+ chunk_content = ""
3957
+ reasoning_content = ""
3958
+ for choice in response_chunk.choices:
3959
+ if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
3960
+ for tool_call in choice.delta.tool_calls:
3961
+ if tool_call.id:
3962
+ tool_call_data["id"] = tool_call.id
3963
+ if tool_call.function:
3964
+ if hasattr(tool_call.function, "name") and tool_call.function.name:
3965
+ tool_call_data["function_name"] = tool_call.function.name
3966
+ if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
3967
+ tool_call_data["arguments"] += tool_call.function.arguments
3968
+ # Accumulate complete tool call info for DB storage
3969
+ if tool_call.id and tool_call.function and tool_call.function.name:
3970
+ accumulated_tool_calls.append({
3971
+ "id": tool_call.id,
3972
+ "function_name": tool_call.function.name,
3973
+ "arguments": tool_call.function.arguments or ""
3974
+ })
3975
+ for choice in response_chunk.choices:
3976
+ if hasattr(choice.delta, "reasoning_content") and choice.delta.reasoning_content:
3977
+ reasoning_content += choice.delta.reasoning_content
3978
+ complete_reasoning.append(choice.delta.reasoning_content)
3979
+ chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
3980
+ if chunk_content:
3981
+ complete_response.append(chunk_content)
3982
+ chunk_data = {
3983
+ "id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
3984
+ "choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
3985
+ }
3986
+ yield f"data: {json.dumps(chunk_data)}\n\n"
3950
3987
 
3951
3988
  except Exception as e:
3952
3989
  print(f"\nAn exception occurred during streaming for {current_stream_id}: {e}")
@@ -3992,7 +4029,7 @@ def stream():
3992
4029
  message_id=generate_message_id(),
3993
4030
  )
3994
4031
 
3995
- # Save assistant message to the database
4032
+ # Save assistant message to the database with reasoning content and tool calls
3996
4033
  npc_name_to_save = npc_object.name if npc_object else ''
3997
4034
  save_conversation_message(
3998
4035
  command_history,
@@ -4005,6 +4042,9 @@ def stream():
4005
4042
  npc=npc_name_to_save,
4006
4043
  team=team,
4007
4044
  message_id=message_id,
4045
+ reasoning_content=''.join(complete_reasoning) if complete_reasoning else None,
4046
+ tool_calls=accumulated_tool_calls if accumulated_tool_calls else None,
4047
+ tool_results=tool_results_for_db if tool_results_for_db else None,
4008
4048
  )
4009
4049
 
4010
4050
  # Start background tasks for memory extraction and context compression
@@ -4166,11 +4206,24 @@ def get_conversation_messages(conversation_id):
4166
4206
  try:
4167
4207
  engine = get_db_connection()
4168
4208
  with engine.connect() as conn:
4169
-
4209
+
4170
4210
  query = text("""
4171
4211
  WITH ranked_messages AS (
4172
4212
  SELECT
4173
- ch.*,
4213
+ ch.id,
4214
+ ch.message_id,
4215
+ ch.timestamp,
4216
+ ch.role,
4217
+ ch.content,
4218
+ ch.conversation_id,
4219
+ ch.directory_path,
4220
+ ch.model,
4221
+ ch.provider,
4222
+ ch.npc,
4223
+ ch.team,
4224
+ ch.reasoning_content,
4225
+ ch.tool_calls,
4226
+ ch.tool_results,
4174
4227
  GROUP_CONCAT(ma.id) as attachment_ids,
4175
4228
  ROW_NUMBER() OVER (
4176
4229
  PARTITION BY ch.role, strftime('%s', ch.timestamp)
@@ -4191,20 +4244,32 @@ def get_conversation_messages(conversation_id):
4191
4244
  result = conn.execute(query, {"conversation_id": conversation_id})
4192
4245
  messages = result.fetchall()
4193
4246
 
4247
+ def parse_json_field(value):
4248
+ """Parse a JSON string field, returning None if empty or invalid."""
4249
+ if not value:
4250
+ return None
4251
+ try:
4252
+ return json.loads(value)
4253
+ except (json.JSONDecodeError, TypeError):
4254
+ return None
4255
+
4194
4256
  return jsonify(
4195
4257
  {
4196
4258
  "messages": [
4197
4259
  {
4198
- "message_id": msg[1] if len(msg) > 1 else None,
4260
+ "message_id": msg[1] if len(msg) > 1 else None,
4199
4261
  "role": msg[3] if len(msg) > 3 else None,
4200
4262
  "content": msg[4] if len(msg) > 4 else None,
4201
- "timestamp": msg[5] if len(msg) > 5 else None,
4202
- "model": msg[6] if len(msg) > 6 else None,
4203
- "provider": msg[7] if len(msg) > 7 else None,
4204
- "npc": msg[8] if len(msg) > 8 else None,
4263
+ "timestamp": msg[2] if len(msg) > 2 else None,
4264
+ "model": msg[7] if len(msg) > 7 else None,
4265
+ "provider": msg[8] if len(msg) > 8 else None,
4266
+ "npc": msg[9] if len(msg) > 9 else None,
4267
+ "reasoningContent": msg[11] if len(msg) > 11 else None,
4268
+ "toolCalls": parse_json_field(msg[12]) if len(msg) > 12 else None,
4269
+ "toolResults": parse_json_field(msg[13]) if len(msg) > 13 else None,
4205
4270
  "attachments": (
4206
4271
  get_message_attachments(msg[1])
4207
- if len(msg) > 1 and msg[-1]
4272
+ if len(msg) > 1 and msg[14] # attachment_ids is at index 14
4208
4273
  else []
4209
4274
  ),
4210
4275
  }
@@ -4248,31 +4313,19 @@ def ollama_status():
4248
4313
  @app.route("/api/ollama/tool_models", methods=["GET"])
4249
4314
  def get_ollama_tool_models():
4250
4315
  """
4251
- Best-effort detection of Ollama models whose templates include tool-call support.
4252
- We scan templates for tool placeholders; if none are found we assume tools are unsupported.
4316
+ Returns all Ollama models. Tool capability detection is unreliable,
4317
+ so we don't filter - let the user try and the backend will handle failures.
4253
4318
  """
4254
4319
  try:
4255
4320
  detected = []
4256
4321
  listing = ollama.list()
4257
4322
  for model in listing.get("models", []):
4258
4323
  name = getattr(model, "model", None) or model.get("name") if isinstance(model, dict) else None
4259
- if not name:
4260
- continue
4261
- try:
4262
- details = ollama.show(name)
4263
- tmpl = details.get("template") or ""
4264
- if "{{- if .Tools" in tmpl or "{{- range .Tools" in tmpl or "{{- if .ToolCalls" in tmpl:
4265
- detected.append(name)
4266
- continue
4267
- metadata = details.get("metadata") or {}
4268
- if metadata.get("tools") or metadata.get("tool_calls"):
4269
- detected.append(name)
4270
- except Exception as inner_e:
4271
- print(f"Warning: could not inspect ollama model {name} for tool support: {inner_e}")
4272
- continue
4324
+ if name:
4325
+ detected.append(name)
4273
4326
  return jsonify({"models": detected, "error": None})
4274
4327
  except Exception as e:
4275
- print(f"Error listing Ollama tool-capable models: {e}")
4328
+ print(f"Error listing Ollama models: {e}")
4276
4329
  return jsonify({"models": [], "error": str(e)}), 500
4277
4330
 
4278
4331
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.1
3
+ Version: 1.3.3
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -1,12 +1,12 @@
1
1
  npcpy/__init__.py,sha256=uJcJGjR1mWvE69GySNAufkgiRwJA28zdObDBWaxp0tY,505
2
- npcpy/llm_funcs.py,sha256=KJpjN6q5iW_qdUfgt4tzYENCAu86376io8eFZ7wp76Y,78081
2
+ npcpy/llm_funcs.py,sha256=p-lhyU37-M1NrXHQVlCAUYFuD52dHGmhZcW6YsYObag,78087
3
3
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
4
4
  npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
5
5
  npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
6
- npcpy/npc_compiler.py,sha256=956ZMSSrYmVRp52-A4-wasg6wey3QIWHGGirDL-dW8o,111498
6
+ npcpy/npc_compiler.py,sha256=X2BjMqKL7hbS37PPkSDGgZSF_PF_GNVGLd92ePRNRwQ,111868
7
7
  npcpy/npc_sysenv.py,sha256=rtE3KrXvIuOEpMq1CW5eK5K0o3f6mXagNXCeMnhHob4,36736
8
8
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
9
- npcpy/serve.py,sha256=wbIXUFlmfKg72ZYoX_cBJ8FVDFabHsGnbMwMIj-412Y,174839
9
+ npcpy/serve.py,sha256=5S3v4lp3fPozsnp_48SuVenf3JTiSiIBRxgfAReovmM,181539
10
10
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
11
11
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
12
12
  npcpy/data/audio.py,sha256=3qryGXnWHa4JFMonjuX-lf0fCrF8jmbHe7mHAuOdua0,12397
@@ -17,7 +17,7 @@ npcpy/data/text.py,sha256=jP0a1qZZaSJdK-LdZTn2Jjdxqmkd3efxDLEoxflJQeY,5010
17
17
  npcpy/data/video.py,sha256=H-V3mTu_ktD9u-QhYeo4aW3u9z0AtoAdRZmvRPEpE98,2887
18
18
  npcpy/data/web.py,sha256=ARGoVKUlQmaiX0zJbSvvFmRCwOv_Z7Pcan9c5GxYObQ,5117
19
19
  npcpy/ft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- npcpy/ft/diff.py,sha256=OjdrVn_rFkFcP3MnzVgIhCdtfnbtnxQcvl5XCc6Wi-o,12376
20
+ npcpy/ft/diff.py,sha256=2-NbY0p0CP5Qr9mnnncxRBwzmxRq9NKcl8B5BeT1vQ4,12319
21
21
  npcpy/ft/ge.py,sha256=0VzIiXq2wCzGcK1x0Wd-myJ3xRf-FNaPg0GkHEZegUM,3552
22
22
  npcpy/ft/memory_trainer.py,sha256=QZPznxEEwXbOGroHdMUMa5xpqlNwgV6nqOazI2xgrnQ,6635
23
23
  npcpy/ft/model_ensembler.py,sha256=BRX4hJ_rvF1vKTzjMhlahZqPttUgc3PqmzUJDqIfIps,10038
@@ -29,10 +29,10 @@ npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
29
29
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
30
30
  npcpy/gen/image_gen.py,sha256=VflU_wJsKWJarOVwZtL2M8ymDFfKNz8WX66Rwk4obeo,21778
31
31
  npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
32
- npcpy/gen/response.py,sha256=xSFHNZTDsebFo_nptWwSahpCU9_4pbCqabMFZ3X4_Bg,39979
32
+ npcpy/gen/response.py,sha256=lH3fR3Sx1Cm8Zc0MJyHzTuuwTjVPgSJUZVxnkFIhzLE,40643
33
33
  npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
34
34
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- npcpy/memory/command_history.py,sha256=vWzZ4F4o0XOSHn50SkdP885jG1aZIZvfcPAh8EZWlQk,54497
35
+ npcpy/memory/command_history.py,sha256=1488weOYtnm-wyenUvZKHaNgZe5OKOZSaQ35WNeceiM,56226
36
36
  npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
37
37
  npcpy/memory/knowledge_graph.py,sha256=2XpIlsyPdAOnzQ6kkwP6MWPGwL3P6V33_3suNJYMMJE,48681
38
38
  npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
@@ -50,8 +50,8 @@ npcpy/work/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
51
51
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
52
52
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
53
- npcpy-1.3.1.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
54
- npcpy-1.3.1.dist-info/METADATA,sha256=dbTBVm4ZMDwwnGkOC3Ahwwhf9OpcecBO_Tdww1ToUwE,37884
55
- npcpy-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
- npcpy-1.3.1.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
57
- npcpy-1.3.1.dist-info/RECORD,,
53
+ npcpy-1.3.3.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
54
+ npcpy-1.3.3.dist-info/METADATA,sha256=CPpyEgdni51dKJjRF-XZZp29bZme7W7gaCzHVqfi88k,37884
55
+ npcpy-1.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
+ npcpy-1.3.3.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
57
+ npcpy-1.3.3.dist-info/RECORD,,
File without changes