npcpy 1.3.11__py3-none-any.whl → 1.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/gen/response.py CHANGED
@@ -259,6 +259,24 @@ def get_ollama_response(
259
259
  prompt = f"Content from CSV: {os.path.basename(attachment)} (first 100 rows):\n{csv_sample} \n csv description: {csv_data.describe()}"
260
260
  except Exception:
261
261
  pass
262
+ else:
263
+ # Handle text-based files
264
+ text_extensions = {'.txt', '.text', '.log', '.md', '.markdown', '.rst', '.json', '.yaml', '.yml', '.toml', '.ini', '.conf', '.cfg', '.xml', '.html', '.htm', '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.h', '.cpp', '.hpp', '.go', '.rs', '.rb', '.php', '.sh', '.bash', '.sql', '.css', '.scss'}
265
+ filename = os.path.basename(attachment)
266
+ if ext in text_extensions or ext == '':
267
+ try:
268
+ with open(attachment, 'r', encoding='utf-8', errors='replace') as f:
269
+ text_content = f.read()
270
+ max_chars = 50000
271
+ if len(text_content) > max_chars:
272
+ text_content = text_content[:max_chars] + f"\n\n... [truncated]"
273
+ if text_content.strip():
274
+ if prompt:
275
+ prompt += f"\n\nContent from {filename}:\n```\n{text_content}\n```"
276
+ else:
277
+ prompt = f"Content from {filename}:\n```\n{text_content}\n```"
278
+ except Exception:
279
+ pass
262
280
 
263
281
 
264
282
  if prompt:
@@ -797,6 +815,24 @@ def get_litellm_response(
797
815
  prompt = f"Content from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
798
816
  except Exception:
799
817
  pass
818
+ else:
819
+ # Handle text-based files
820
+ text_extensions = {'.txt', '.text', '.log', '.md', '.markdown', '.rst', '.json', '.yaml', '.yml', '.toml', '.ini', '.conf', '.cfg', '.xml', '.html', '.htm', '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.h', '.cpp', '.hpp', '.go', '.rs', '.rb', '.php', '.sh', '.bash', '.sql', '.css', '.scss'}
821
+ filename = os.path.basename(attachment)
822
+ if ext in text_extensions or ext == '':
823
+ try:
824
+ with open(attachment, 'r', encoding='utf-8', errors='replace') as f:
825
+ text_content = f.read()
826
+ max_chars = 50000
827
+ if len(text_content) > max_chars:
828
+ text_content = text_content[:max_chars] + f"\n\n... [truncated]"
829
+ if text_content.strip():
830
+ if prompt:
831
+ prompt += f"\n\nContent from {filename}:\n```\n{text_content}\n```"
832
+ else:
833
+ prompt = f"Content from {filename}:\n```\n{text_content}\n```"
834
+ except Exception:
835
+ pass
800
836
 
801
837
  if prompt:
802
838
  if result['messages'] and result['messages'][-1]["role"] == "user":
@@ -344,7 +344,6 @@ def kg_evolve_incremental(existing_kg,
344
344
 
345
345
  current_gen = existing_kg.get('generation', 0)
346
346
  next_gen = current_gen + 1
347
- print(f"\n--- ABSORBING INFO: Gen {current_gen} -> Gen {next_gen} ---")
348
347
 
349
348
  newly_added_concepts = []
350
349
  concept_links = list(existing_kg.get('concept_links', []))
@@ -359,8 +358,7 @@ def kg_evolve_incremental(existing_kg,
359
358
  all_concept_names = list(existing_concept_names)
360
359
 
361
360
  all_new_facts = []
362
- print(npc, npc.model, npc.provider)
363
-
361
+
364
362
  if new_facts:
365
363
  all_new_facts = new_facts
366
364
  print(f'using pre-approved facts: {len(all_new_facts)}')
npcpy/npc_compiler.py CHANGED
@@ -7,6 +7,41 @@ import sqlite3
7
7
  import numpy as np
8
8
  import pandas as pd
9
9
  import matplotlib.pyplot as plt
10
+ import matplotlib as mpl
11
+
12
+ # Professional plot styling (from kg-research matplotlibrc)
13
+ mpl.rcParams.update({
14
+ 'font.family': 'serif',
15
+ 'axes.labelsize': 20,
16
+ 'axes.grid.axis': 'both',
17
+ 'axes.grid.which': 'major',
18
+ 'axes.prop_cycle': mpl.cycler('color', ['k', 'b', 'r', 'g', 'c', 'm', 'y', 'k']),
19
+ 'xtick.top': True,
20
+ 'xtick.direction': 'in',
21
+ 'xtick.major.size': 10,
22
+ 'xtick.minor.size': 5,
23
+ 'xtick.labelsize': 20,
24
+ 'xtick.minor.visible': True,
25
+ 'xtick.major.top': True,
26
+ 'xtick.major.bottom': True,
27
+ 'xtick.minor.top': True,
28
+ 'xtick.minor.bottom': True,
29
+ 'ytick.left': True,
30
+ 'ytick.right': True,
31
+ 'ytick.direction': 'in',
32
+ 'ytick.major.size': 10,
33
+ 'ytick.minor.size': 5,
34
+ 'ytick.labelsize': 20,
35
+ 'ytick.minor.visible': True,
36
+ 'ytick.major.left': True,
37
+ 'ytick.major.right': True,
38
+ 'ytick.minor.left': True,
39
+ 'ytick.minor.right': True,
40
+ 'legend.frameon': False,
41
+ 'legend.fontsize': 12,
42
+ 'image.cmap': 'plasma',
43
+ 'errorbar.capsize': 1,
44
+ })
10
45
  import re
11
46
  import random
12
47
  from datetime import datetime
@@ -31,9 +66,31 @@ from npcpy.npc_sysenv import (
31
66
  from npcpy.memory.command_history import CommandHistory, generate_message_id
32
67
 
33
68
  class SilentUndefined(Undefined):
69
+ """Undefined that silently returns empty string instead of raising errors"""
34
70
  def _fail_with_undefined_error(self, *args, **kwargs):
35
71
  return ""
36
72
 
73
+ def __str__(self):
74
+ return ""
75
+
76
+ def __repr__(self):
77
+ return ""
78
+
79
+ def __bool__(self):
80
+ return False
81
+
82
+ def __eq__(self, other):
83
+ return other == "" or other is None or isinstance(other, Undefined)
84
+
85
+ def __ne__(self, other):
86
+ return not self.__eq__(other)
87
+
88
+ def __iter__(self):
89
+ return iter([])
90
+
91
+ def __len__(self):
92
+ return 0
93
+
37
94
  import math
38
95
  from PIL import Image
39
96
  from jinja2 import Environment, ChainableUndefined
@@ -152,11 +209,35 @@ def get_log_entries(entity_id, entry_type=None, limit=10, db_path="~/npcsh_histo
152
209
  ]
153
210
 
154
211
 
212
+ def _json_dumps_with_undefined(obj, **kwargs):
213
+ """Custom JSON dumps that handles SilentUndefined objects"""
214
+ def default_handler(o):
215
+ if isinstance(o, Undefined):
216
+ return ""
217
+ raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
218
+ return json.dumps(obj, default=default_handler, **kwargs)
219
+
220
+
155
221
  def load_yaml_file(file_path):
156
- """Load a YAML file with error handling"""
222
+ """Load a YAML file with error handling, rendering Jinja2 first"""
157
223
  try:
158
224
  with open(os.path.expanduser(file_path), 'r') as f:
159
- return yaml.safe_load(f)
225
+ content = f.read()
226
+
227
+ # Check if file has Jinja2 control structures that need pre-rendering
228
+ # Only render if there are {% %} blocks, otherwise parse directly
229
+ if '{%' not in content:
230
+ return yaml.safe_load(content)
231
+
232
+ # First pass: render Jinja2 templates to produce valid YAML
233
+ # This allows {% if %} and other control structures to work
234
+ jinja_env = Environment(undefined=SilentUndefined)
235
+ # Configure tojson filter to handle SilentUndefined
236
+ jinja_env.policies['json.dumps_function'] = _json_dumps_with_undefined
237
+ template = jinja_env.from_string(content)
238
+ rendered_content = template.render({})
239
+
240
+ return yaml.safe_load(rendered_content)
160
241
  except Exception as e:
161
242
  print(f"Error loading YAML file {file_path}: {e}")
162
243
  return None
npcpy/serve.py CHANGED
@@ -46,7 +46,8 @@ from npcsh._state import ShellState, initialize_base_npcs_if_needed
46
46
  from npcsh.config import NPCSH_DB_PATH
47
47
 
48
48
 
49
- from npcpy.memory.knowledge_graph import load_kg_from_db
49
+ from npcpy.memory.knowledge_graph import load_kg_from_db, find_similar_facts_chroma
50
+ from npcpy.memory.command_history import setup_chroma_db
50
51
  from npcpy.memory.search import execute_rag_command, execute_brainblast_command
51
52
  from npcpy.data.load import load_file_contents
52
53
  from npcpy.data.web import search_web
@@ -67,12 +68,14 @@ from npcpy.memory.command_history import (
67
68
  save_conversation_message,
68
69
  generate_message_id,
69
70
  )
70
- from npcpy.npc_compiler import Jinx, NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog, initialize_npc_project
71
+ from npcpy.npc_compiler import Jinx, NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog, initialize_npc_project, load_yaml_file
71
72
 
72
73
  from npcpy.llm_funcs import (
73
74
  get_llm_response, check_llm_command
74
75
  )
76
+ from npcpy.gen.embeddings import get_embeddings
75
77
  from termcolor import cprint
78
+
76
79
  from npcpy.tools import auto_tools
77
80
 
78
81
  import json
@@ -572,7 +575,7 @@ def fetch_messages_for_conversation(conversation_id):
572
575
  try:
573
576
  with engine.connect() as conn:
574
577
  query = text("""
575
- SELECT role, content, timestamp
578
+ SELECT role, content, timestamp, tool_calls, tool_results
576
579
  FROM conversation_history
577
580
  WHERE conversation_id = :conversation_id
578
581
  ORDER BY timestamp ASC
@@ -580,14 +583,45 @@ def fetch_messages_for_conversation(conversation_id):
580
583
  result = conn.execute(query, {"conversation_id": conversation_id})
581
584
  messages = result.fetchall()
582
585
 
583
- return [
584
- {
585
- "role": message[0],
586
- "content": message[1],
587
- "timestamp": message[2],
586
+ parsed_messages = []
587
+ for message in messages:
588
+ role = message[0]
589
+ content = message[1]
590
+
591
+ msg_dict = {
592
+ "role": role,
593
+ "content": content,
594
+ "timestamp": message[2],
588
595
  }
589
- for message in messages
590
- ]
596
+
597
+ # Handle tool messages - extract tool_call_id from content JSON
598
+ if role == "tool" and content:
599
+ try:
600
+ content_parsed = json.loads(content) if isinstance(content, str) else content
601
+ if isinstance(content_parsed, dict):
602
+ if "tool_call_id" in content_parsed:
603
+ msg_dict["tool_call_id"] = content_parsed["tool_call_id"]
604
+ if "tool_name" in content_parsed:
605
+ msg_dict["name"] = content_parsed["tool_name"]
606
+ if "content" in content_parsed:
607
+ msg_dict["content"] = content_parsed["content"]
608
+ except (json.JSONDecodeError, TypeError):
609
+ pass
610
+
611
+ # Parse tool_calls JSON if present (for assistant messages)
612
+ if message[3]:
613
+ try:
614
+ msg_dict["tool_calls"] = json.loads(message[3]) if isinstance(message[3], str) else message[3]
615
+ except (json.JSONDecodeError, TypeError):
616
+ pass
617
+ # Parse tool_results JSON if present
618
+ if message[4]:
619
+ try:
620
+ msg_dict["tool_results"] = json.loads(message[4]) if isinstance(message[4], str) else message[4]
621
+ except (json.JSONDecodeError, TypeError):
622
+ pass
623
+ parsed_messages.append(msg_dict)
624
+ return parsed_messages
591
625
  except Exception as e:
592
626
  print(f"Error fetching messages for conversation: {e}")
593
627
  return []
@@ -681,6 +715,265 @@ def get_centrality_data():
681
715
  concept_degree = {node: cent for node, cent in nx.degree_centrality(G).items() if node in concepts_df['name'].values}
682
716
  return jsonify(centrality={'degree': concept_degree})
683
717
 
718
+ @app.route('/api/kg/search')
719
+ def search_kg():
720
+ """Search facts and concepts by keyword"""
721
+ try:
722
+ q = request.args.get('q', '').strip().lower()
723
+ generation = request.args.get('generation', type=int)
724
+ search_type = request.args.get('type', 'both') # fact, concept, or both
725
+ limit = request.args.get('limit', 50, type=int)
726
+
727
+ if not q:
728
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
729
+
730
+ concepts_df, facts_df, links_df = load_kg_data(generation)
731
+ results = {"facts": [], "concepts": [], "query": q}
732
+
733
+ # Search facts
734
+ if search_type in ('both', 'fact'):
735
+ for _, row in facts_df.iterrows():
736
+ statement = str(row.get('statement', '')).lower()
737
+ source_text = str(row.get('source_text', '')).lower()
738
+ if q in statement or q in source_text:
739
+ results["facts"].append({
740
+ "statement": row.get('statement'),
741
+ "source_text": row.get('source_text'),
742
+ "type": row.get('type'),
743
+ "generation": row.get('generation'),
744
+ "origin": row.get('origin')
745
+ })
746
+ if len(results["facts"]) >= limit:
747
+ break
748
+
749
+ # Search concepts
750
+ if search_type in ('both', 'concept'):
751
+ for _, row in concepts_df.iterrows():
752
+ name = str(row.get('name', '')).lower()
753
+ description = str(row.get('description', '')).lower()
754
+ if q in name or q in description:
755
+ results["concepts"].append({
756
+ "name": row.get('name'),
757
+ "description": row.get('description'),
758
+ "generation": row.get('generation'),
759
+ "origin": row.get('origin')
760
+ })
761
+ if len(results["concepts"]) >= limit:
762
+ break
763
+
764
+ return jsonify(results)
765
+
766
+ except Exception as e:
767
+ traceback.print_exc()
768
+ return jsonify({"error": str(e)}), 500
769
+
770
+ @app.route('/api/kg/embed', methods=['POST'])
771
+ def embed_kg_facts():
772
+ """Embed existing facts from SQL to Chroma for semantic search"""
773
+ try:
774
+ data = request.get_json() or {}
775
+ generation = data.get('generation')
776
+ batch_size = data.get('batch_size', 10)
777
+
778
+ # Load facts from SQL
779
+ _, facts_df, _ = load_kg_data(generation)
780
+
781
+ if facts_df.empty:
782
+ return jsonify({"message": "No facts to embed", "count": 0})
783
+
784
+ # Setup Chroma
785
+ chroma_db_path = os.path.expanduser('~/npcsh_chroma_db')
786
+ _, chroma_collection = setup_chroma_db(
787
+ "knowledge_graph",
788
+ "Facts extracted from various sources",
789
+ chroma_db_path
790
+ )
791
+
792
+ # Process in batches
793
+ from npcpy.memory.knowledge_graph import store_fact_with_embedding
794
+ import hashlib
795
+
796
+ embedded_count = 0
797
+ skipped_count = 0
798
+
799
+ statements = facts_df['statement'].dropna().tolist()
800
+
801
+ for i in range(0, len(statements), batch_size):
802
+ batch = statements[i:i + batch_size]
803
+
804
+ # Get embeddings for batch
805
+ try:
806
+ embeddings = get_embeddings(batch)
807
+ except Exception as e:
808
+ print(f"Failed to get embeddings for batch {i}: {e}")
809
+ continue
810
+
811
+ for j, statement in enumerate(batch):
812
+ fact_id = hashlib.md5(statement.encode()).hexdigest()
813
+
814
+ # Check if already exists
815
+ try:
816
+ existing = chroma_collection.get(ids=[fact_id])
817
+ if existing and existing.get('ids'):
818
+ skipped_count += 1
819
+ continue
820
+ except:
821
+ pass
822
+
823
+ # Get metadata from dataframe
824
+ row = facts_df[facts_df['statement'] == statement].iloc[0] if len(facts_df[facts_df['statement'] == statement]) > 0 else None
825
+ metadata = {
826
+ "generation": int(row.get('generation', 0)) if row is not None and pd.notna(row.get('generation')) else 0,
827
+ "origin": str(row.get('origin', '')) if row is not None else '',
828
+ "type": str(row.get('type', '')) if row is not None else '',
829
+ }
830
+
831
+ # Store with embedding
832
+ result = store_fact_with_embedding(
833
+ chroma_collection, statement, metadata, embeddings[j]
834
+ )
835
+ if result:
836
+ embedded_count += 1
837
+
838
+ return jsonify({
839
+ "message": f"Embedded {embedded_count} facts, skipped {skipped_count} existing",
840
+ "embedded": embedded_count,
841
+ "skipped": skipped_count,
842
+ "total_facts": len(statements)
843
+ })
844
+
845
+ except Exception as e:
846
+ traceback.print_exc()
847
+ return jsonify({"error": str(e)}), 500
848
+
849
+ @app.route('/api/kg/search/semantic')
850
+ def search_kg_semantic():
851
+ """Semantic search for facts using vector similarity"""
852
+ try:
853
+ q = request.args.get('q', '').strip()
854
+ generation = request.args.get('generation', type=int)
855
+ limit = request.args.get('limit', 10, type=int)
856
+
857
+ if not q:
858
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
859
+
860
+ # Setup Chroma connection
861
+ chroma_db_path = os.path.expanduser('~/npcsh_chroma_db')
862
+ try:
863
+ _, chroma_collection = setup_chroma_db(
864
+ "knowledge_graph",
865
+ "Facts extracted from various sources",
866
+ chroma_db_path
867
+ )
868
+ except Exception as e:
869
+ return jsonify({
870
+ "error": f"Chroma DB not available: {str(e)}",
871
+ "facts": [],
872
+ "query": q
873
+ }), 200
874
+
875
+ # Get query embedding
876
+ try:
877
+ query_embedding = get_embeddings([q])[0]
878
+ except Exception as e:
879
+ return jsonify({
880
+ "error": f"Failed to generate embedding: {str(e)}",
881
+ "facts": [],
882
+ "query": q
883
+ }), 200
884
+
885
+ # Build metadata filter for generation if specified
886
+ metadata_filter = None
887
+ if generation is not None:
888
+ metadata_filter = {"generation": generation}
889
+
890
+ # Search Chroma
891
+ similar_facts = find_similar_facts_chroma(
892
+ chroma_collection,
893
+ q,
894
+ query_embedding=query_embedding,
895
+ n_results=limit,
896
+ metadata_filter=metadata_filter
897
+ )
898
+
899
+ # Format results
900
+ results = {
901
+ "facts": [
902
+ {
903
+ "statement": f["fact"],
904
+ "distance": f.get("distance"),
905
+ "metadata": f.get("metadata", {}),
906
+ "id": f.get("id")
907
+ }
908
+ for f in similar_facts
909
+ ],
910
+ "query": q,
911
+ "total": len(similar_facts)
912
+ }
913
+
914
+ return jsonify(results)
915
+
916
+ except Exception as e:
917
+ traceback.print_exc()
918
+ return jsonify({"error": str(e)}), 500
919
+
920
+ @app.route('/api/kg/facts')
921
+ def get_kg_facts():
922
+ """Get facts, optionally filtered by generation"""
923
+ try:
924
+ generation = request.args.get('generation', type=int)
925
+ limit = request.args.get('limit', 100, type=int)
926
+ offset = request.args.get('offset', 0, type=int)
927
+
928
+ _, facts_df, _ = load_kg_data(generation)
929
+
930
+ facts = []
931
+ for i, row in facts_df.iloc[offset:offset+limit].iterrows():
932
+ facts.append({
933
+ "statement": row.get('statement'),
934
+ "source_text": row.get('source_text'),
935
+ "type": row.get('type'),
936
+ "generation": row.get('generation'),
937
+ "origin": row.get('origin')
938
+ })
939
+
940
+ return jsonify({
941
+ "facts": facts,
942
+ "total": len(facts_df),
943
+ "offset": offset,
944
+ "limit": limit
945
+ })
946
+
947
+ except Exception as e:
948
+ traceback.print_exc()
949
+ return jsonify({"error": str(e)}), 500
950
+
951
+ @app.route('/api/kg/concepts')
952
+ def get_kg_concepts():
953
+ """Get concepts, optionally filtered by generation"""
954
+ try:
955
+ generation = request.args.get('generation', type=int)
956
+ limit = request.args.get('limit', 100, type=int)
957
+
958
+ concepts_df, _, _ = load_kg_data(generation)
959
+
960
+ concepts = []
961
+ for _, row in concepts_df.head(limit).iterrows():
962
+ concepts.append({
963
+ "name": row.get('name'),
964
+ "description": row.get('description'),
965
+ "generation": row.get('generation'),
966
+ "origin": row.get('origin')
967
+ })
968
+
969
+ return jsonify({
970
+ "concepts": concepts,
971
+ "total": len(concepts_df)
972
+ })
973
+
974
+ except Exception as e:
975
+ traceback.print_exc()
976
+ return jsonify({"error": str(e)}), 500
684
977
 
685
978
 
686
979
  @app.route("/api/attachments/<message_id>", methods=["GET"])
@@ -822,10 +1115,9 @@ def get_available_jinxs():
822
1115
  def get_jinx_name_from_file(filepath):
823
1116
  """Read jinx_name from file, fallback to filename."""
824
1117
  try:
825
- with open(filepath, 'r') as f:
826
- data = yaml.safe_load(f)
827
- if data and 'jinx_name' in data:
828
- return data['jinx_name']
1118
+ data = load_yaml_file(filepath)
1119
+ if data and 'jinx_name' in data:
1120
+ return data['jinx_name']
829
1121
  except:
830
1122
  pass
831
1123
  return os.path.basename(filepath)[:-5]
@@ -1893,9 +2185,10 @@ def get_jinxs_global():
1893
2185
  for file in files:
1894
2186
  if file.endswith(".jinx"):
1895
2187
  jinx_path = os.path.join(root, file)
1896
- with open(jinx_path, 'r') as f:
1897
- raw_data = yaml.safe_load(f)
1898
-
2188
+ raw_data = load_yaml_file(jinx_path)
2189
+ if raw_data is None:
2190
+ continue
2191
+
1899
2192
  # Preserve full input definitions including defaults
1900
2193
  inputs = raw_data.get("inputs", [])
1901
2194
 
@@ -1929,9 +2222,10 @@ def get_jinxs_project():
1929
2222
  for file in files:
1930
2223
  if file.endswith(".jinx"):
1931
2224
  jinx_path = os.path.join(root, file)
1932
- with open(jinx_path, 'r') as f:
1933
- raw_data = yaml.safe_load(f)
1934
-
2225
+ raw_data = load_yaml_file(jinx_path)
2226
+ if raw_data is None:
2227
+ continue
2228
+
1935
2229
  # Preserve full input definitions including defaults
1936
2230
  inputs = raw_data.get("inputs", [])
1937
2231
 
@@ -2087,8 +2381,9 @@ def get_npc_team_global():
2087
2381
  for file in os.listdir(global_npc_directory):
2088
2382
  if file.endswith(".npc"):
2089
2383
  npc_path = os.path.join(global_npc_directory, file)
2090
- with open(npc_path, 'r') as f:
2091
- raw_data = yaml.safe_load(f)
2384
+ raw_data = load_yaml_file(npc_path)
2385
+ if raw_data is None:
2386
+ continue
2092
2387
 
2093
2388
  npc_data.append({
2094
2389
  "name": raw_data.get("name", file[:-4]),
@@ -2123,8 +2418,9 @@ def get_npc_team_project():
2123
2418
  for file in os.listdir(project_npc_directory):
2124
2419
  if file.endswith(".npc"):
2125
2420
  npc_path = os.path.join(project_npc_directory, file)
2126
- with open(npc_path, 'r') as f:
2127
- raw_npc_data = yaml.safe_load(f)
2421
+ raw_npc_data = load_yaml_file(npc_path)
2422
+ if raw_npc_data is None:
2423
+ continue
2128
2424
 
2129
2425
  serialized_npc = {
2130
2426
  "name": raw_npc_data.get("name", file[:-4]),
@@ -2402,8 +2698,7 @@ def get_package_contents():
2402
2698
  if f.endswith('.npc'):
2403
2699
  npc_path = os.path.join(package_npc_team_dir, f)
2404
2700
  try:
2405
- with open(npc_path, 'r') as file:
2406
- npc_data = yaml.safe_load(file) or {}
2701
+ npc_data = load_yaml_file(npc_path) or {}
2407
2702
  npcs.append({
2408
2703
  "name": npc_data.get("name", f[:-4]),
2409
2704
  "primary_directive": npc_data.get("primary_directive", ""),
@@ -2422,8 +2717,7 @@ def get_package_contents():
2422
2717
  jinx_path = os.path.join(root, f)
2423
2718
  rel_path = os.path.relpath(jinx_path, jinxs_dir)
2424
2719
  try:
2425
- with open(jinx_path, 'r') as file:
2426
- jinx_data = yaml.safe_load(file) or {}
2720
+ jinx_data = load_yaml_file(jinx_path) or {}
2427
2721
  jinxs.append({
2428
2722
  "name": f[:-5],
2429
2723
  "path": rel_path[:-5],
@@ -3935,7 +4229,7 @@ def stream():
3935
4229
  input_values=tool_args if isinstance(tool_args, dict) else {},
3936
4230
  npc=npc_object
3937
4231
  )
3938
- tool_content = str(jinx_ctx)
4232
+ tool_content = str(jinx_ctx.get('output', '')) if isinstance(jinx_ctx, dict) else str(jinx_ctx)
3939
4233
  except Exception as e:
3940
4234
  tool_content = f"Jinx execution error: {str(e)}"
3941
4235
  else:
@@ -4361,19 +4655,103 @@ def approve_memories():
4361
4655
  try:
4362
4656
  data = request.json
4363
4657
  approvals = data.get("approvals", [])
4364
-
4658
+
4365
4659
  command_history = CommandHistory(app.config.get('DB_PATH'))
4366
-
4660
+
4367
4661
  for approval in approvals:
4368
4662
  command_history.update_memory_status(
4369
4663
  approval['memory_id'],
4370
4664
  approval['decision'],
4371
4665
  approval.get('final_memory')
4372
4666
  )
4373
-
4667
+
4374
4668
  return jsonify({"success": True, "processed": len(approvals)})
4375
-
4669
+
4670
+ except Exception as e:
4671
+ return jsonify({"error": str(e)}), 500
4672
+
4673
+ @app.route("/api/memory/search", methods=["GET"])
4674
+ def search_memories():
4675
+ """Search memories with optional scope filtering"""
4676
+ try:
4677
+ q = request.args.get("q", "")
4678
+ npc = request.args.get("npc")
4679
+ team = request.args.get("team")
4680
+ directory_path = request.args.get("directory_path")
4681
+ status = request.args.get("status")
4682
+ limit = int(request.args.get("limit", 50))
4683
+
4684
+ if not q:
4685
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
4686
+
4687
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4688
+ results = command_history.search_memory(
4689
+ query=q,
4690
+ npc=npc,
4691
+ team=team,
4692
+ directory_path=directory_path,
4693
+ status_filter=status,
4694
+ limit=limit
4695
+ )
4696
+
4697
+ return jsonify({"memories": results, "count": len(results)})
4698
+
4699
+ except Exception as e:
4700
+ traceback.print_exc()
4701
+ return jsonify({"error": str(e)}), 500
4702
+
4703
+ @app.route("/api/memory/pending", methods=["GET"])
4704
+ def get_pending_memories():
4705
+ """Get memories awaiting approval"""
4706
+ try:
4707
+ limit = int(request.args.get("limit", 50))
4708
+ npc = request.args.get("npc")
4709
+ team = request.args.get("team")
4710
+ directory_path = request.args.get("directory_path")
4711
+
4712
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4713
+ results = command_history.get_pending_memories(limit=limit)
4714
+
4715
+ # Filter by scope if provided
4716
+ if npc or team or directory_path:
4717
+ filtered = []
4718
+ for mem in results:
4719
+ if npc and mem.get('npc') != npc:
4720
+ continue
4721
+ if team and mem.get('team') != team:
4722
+ continue
4723
+ if directory_path and mem.get('directory_path') != directory_path:
4724
+ continue
4725
+ filtered.append(mem)
4726
+ results = filtered
4727
+
4728
+ return jsonify({"memories": results, "count": len(results)})
4729
+
4730
+ except Exception as e:
4731
+ traceback.print_exc()
4732
+ return jsonify({"error": str(e)}), 500
4733
+
4734
+ @app.route("/api/memory/scope", methods=["GET"])
4735
+ def get_memories_by_scope():
4736
+ """Get memories for a specific scope (npc/team/directory)"""
4737
+ try:
4738
+ npc = request.args.get("npc", "")
4739
+ team = request.args.get("team", "")
4740
+ directory_path = request.args.get("directory_path", "")
4741
+ status = request.args.get("status")
4742
+
4743
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4744
+ results = command_history.get_memories_for_scope(
4745
+ npc=npc,
4746
+ team=team,
4747
+ directory_path=directory_path,
4748
+ status=status
4749
+ )
4750
+
4751
+ return jsonify({"memories": results, "count": len(results)})
4752
+
4376
4753
  except Exception as e:
4754
+ traceback.print_exc()
4377
4755
  return jsonify({"error": str(e)}), 500
4378
4756
 
4379
4757
 
@@ -5388,7 +5766,7 @@ def text_to_speech_endpoint():
5388
5766
  import base64
5389
5767
  from npcpy.gen.audio_gen import (
5390
5768
  text_to_speech, get_available_engines,
5391
- pcm16_to_wav, KOKORO_VOICES
5769
+ pcm16_to_wav
5392
5770
  )
5393
5771
 
5394
5772
  data = request.json or {}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.11
3
+ Version: 1.3.13
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -4,10 +4,10 @@ npcpy/llm_funcs.py,sha256=M7GSSjqpcO2kxh7G2sGRBU34lmdW7Imd5KxYqc1PiO0,75114
4
4
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
5
5
  npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
6
6
  npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
7
- npcpy/npc_compiler.py,sha256=9U6_F7qweURaL2nQgrF7I9OQEmYjOENmkBV-YChr3oM,118402
7
+ npcpy/npc_compiler.py,sha256=W1umvhsbyCYoRYajPUKa642FcsX5Fcadh78n-Vzu2hM,120983
8
8
  npcpy/npc_sysenv.py,sha256=VH7le3xwxHvO55ZYCG1e-gj8X5YTSIqbIiU6ifSqhss,38917
9
9
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
10
- npcpy/serve.py,sha256=VjRwD4cDXgawD6L6yVl-8-oUWyYrfIgaKAhu9c0NjDI,229714
10
+ npcpy/serve.py,sha256=IVbJE2RlqpKB-OllvTk-XU4SE8SA_-4bEtoZ21qJwG8,243318
11
11
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
12
12
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
13
13
  npcpy/data/audio.py,sha256=o4auV8DQrAmZ4y84U3SofiwEuq5-ZBjGEZipQ9zPpGQ,22816
@@ -30,13 +30,13 @@ npcpy/gen/audio_gen.py,sha256=RoSElPUGfQimPBUcl9SP-ziIJxeI6XAr0A1882BZxXE,20646
30
30
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
31
31
  npcpy/gen/image_gen.py,sha256=SOZYpvlxSiAdDK9j750OEBKjm22OUNdXg1kQ10sJSy0,21853
32
32
  npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
33
- npcpy/gen/response.py,sha256=Pw01M0UxjsXOPJlvShAbq9n6IVnvEqxT6MQaLyEwJFs,48505
33
+ npcpy/gen/response.py,sha256=fLd-ORRMI_s3yRNMH1TQodGk17u_G0xofS1lqfqH4r0,51121
34
34
  npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
35
35
  npcpy/gen/world_gen.py,sha256=_8ytE7E3QVQ5qiX8DmOby-xd0d9zV20rRI6Wkpf-qcY,18922
36
36
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  npcpy/memory/command_history.py,sha256=pjqcSBHXzdQTViSjHsBP2ohRSYnJ33h2bYARGcLvBfs,62253
38
38
  npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
39
- npcpy/memory/knowledge_graph.py,sha256=pjqcHjAh-Bfe6Q9fvNkBpg-TMjPTgynB6PhLSWWtPzI,48720
39
+ npcpy/memory/knowledge_graph.py,sha256=X3qqlDcuzGUjRgQWleQzafGKgNw8QRz2ar2gYuCvUq8,48600
40
40
  npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
41
41
  npcpy/memory/search.py,sha256=glN6WYzaixcoDphTEHAXSMX3vKZGjR12Jx9YVL_gYfE,18433
42
42
  npcpy/mix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -53,8 +53,8 @@ npcpy/work/browser.py,sha256=p2PeaoZdAXipFuAgKCCB3aXXLE_p3yIRqC87KlZKZWc,679
53
53
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
54
54
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
55
55
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
56
- npcpy-1.3.11.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
57
- npcpy-1.3.11.dist-info/METADATA,sha256=jRdfngd30yMIL7slCyAgYQkz_Vx32Bn1fxDv45X-lP8,37885
58
- npcpy-1.3.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
- npcpy-1.3.11.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
60
- npcpy-1.3.11.dist-info/RECORD,,
56
+ npcpy-1.3.13.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
57
+ npcpy-1.3.13.dist-info/METADATA,sha256=iImjyVCj8NyORUAC5EoruV2IeNAzvGA46JFtB074pI0,37885
58
+ npcpy-1.3.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
+ npcpy-1.3.13.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
60
+ npcpy-1.3.13.dist-info/RECORD,,
File without changes