npcpy 1.3.12__tar.gz → 1.3.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. {npcpy-1.3.12/npcpy.egg-info → npcpy-1.3.13}/PKG-INFO +1 -1
  2. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/response.py +36 -0
  3. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/memory/knowledge_graph.py +1 -3
  4. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/npc_compiler.py +83 -2
  5. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/serve.py +372 -25
  6. {npcpy-1.3.12 → npcpy-1.3.13/npcpy.egg-info}/PKG-INFO +1 -1
  7. {npcpy-1.3.12 → npcpy-1.3.13}/setup.py +1 -1
  8. {npcpy-1.3.12 → npcpy-1.3.13}/LICENSE +0 -0
  9. {npcpy-1.3.12 → npcpy-1.3.13}/MANIFEST.in +0 -0
  10. {npcpy-1.3.12 → npcpy-1.3.13}/README.md +0 -0
  11. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/__init__.py +0 -0
  12. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/build_funcs.py +0 -0
  13. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/__init__.py +0 -0
  14. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/audio.py +0 -0
  15. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/data_models.py +0 -0
  16. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/image.py +0 -0
  17. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/load.py +0 -0
  18. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/text.py +0 -0
  19. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/video.py +0 -0
  20. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/data/web.py +0 -0
  21. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/__init__.py +0 -0
  22. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/diff.py +0 -0
  23. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/ge.py +0 -0
  24. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/memory_trainer.py +0 -0
  25. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/model_ensembler.py +0 -0
  26. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/rl.py +0 -0
  27. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/sft.py +0 -0
  28. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ft/usft.py +0 -0
  29. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/__init__.py +0 -0
  30. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/audio_gen.py +0 -0
  31. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/embeddings.py +0 -0
  32. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/image_gen.py +0 -0
  33. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/ocr.py +0 -0
  34. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/video_gen.py +0 -0
  35. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/gen/world_gen.py +0 -0
  36. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/llm_funcs.py +0 -0
  37. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/main.py +0 -0
  38. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/memory/__init__.py +0 -0
  39. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/memory/command_history.py +0 -0
  40. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/memory/kg_vis.py +0 -0
  41. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/memory/memory_processor.py +0 -0
  42. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/memory/search.py +0 -0
  43. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/mix/__init__.py +0 -0
  44. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/mix/debate.py +0 -0
  45. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/ml_funcs.py +0 -0
  46. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/npc_array.py +0 -0
  47. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/npc_sysenv.py +0 -0
  48. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/npcs.py +0 -0
  49. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/__init__.py +0 -0
  50. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/ai_function_tools.py +0 -0
  51. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/database_ai_adapters.py +0 -0
  52. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/database_ai_functions.py +0 -0
  53. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/model_runner.py +0 -0
  54. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/npcsql.py +0 -0
  55. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/sql/sql_model_compiler.py +0 -0
  56. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/tools.py +0 -0
  57. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/work/__init__.py +0 -0
  58. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/work/browser.py +0 -0
  59. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/work/desktop.py +0 -0
  60. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/work/plan.py +0 -0
  61. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy/work/trigger.py +0 -0
  62. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy.egg-info/SOURCES.txt +0 -0
  63. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy.egg-info/dependency_links.txt +0 -0
  64. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy.egg-info/requires.txt +0 -0
  65. {npcpy-1.3.12 → npcpy-1.3.13}/npcpy.egg-info/top_level.txt +0 -0
  66. {npcpy-1.3.12 → npcpy-1.3.13}/setup.cfg +0 -0
  67. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_audio.py +0 -0
  68. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_command_history.py +0 -0
  69. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_image.py +0 -0
  70. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_llm_funcs.py +0 -0
  71. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_load.py +0 -0
  72. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_npc_array.py +0 -0
  73. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_npc_compiler.py +0 -0
  74. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_npcsql.py +0 -0
  75. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_response.py +0 -0
  76. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_serve.py +0 -0
  77. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_text.py +0 -0
  78. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_tools.py +0 -0
  79. {npcpy-1.3.12 → npcpy-1.3.13}/tests/test_web.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.12
3
+ Version: 1.3.13
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -259,6 +259,24 @@ def get_ollama_response(
259
259
  prompt = f"Content from CSV: {os.path.basename(attachment)} (first 100 rows):\n{csv_sample} \n csv description: {csv_data.describe()}"
260
260
  except Exception:
261
261
  pass
262
+ else:
263
+ # Handle text-based files
264
+ text_extensions = {'.txt', '.text', '.log', '.md', '.markdown', '.rst', '.json', '.yaml', '.yml', '.toml', '.ini', '.conf', '.cfg', '.xml', '.html', '.htm', '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.h', '.cpp', '.hpp', '.go', '.rs', '.rb', '.php', '.sh', '.bash', '.sql', '.css', '.scss'}
265
+ filename = os.path.basename(attachment)
266
+ if ext in text_extensions or ext == '':
267
+ try:
268
+ with open(attachment, 'r', encoding='utf-8', errors='replace') as f:
269
+ text_content = f.read()
270
+ max_chars = 50000
271
+ if len(text_content) > max_chars:
272
+ text_content = text_content[:max_chars] + f"\n\n... [truncated]"
273
+ if text_content.strip():
274
+ if prompt:
275
+ prompt += f"\n\nContent from {filename}:\n```\n{text_content}\n```"
276
+ else:
277
+ prompt = f"Content from {filename}:\n```\n{text_content}\n```"
278
+ except Exception:
279
+ pass
262
280
 
263
281
 
264
282
  if prompt:
@@ -797,6 +815,24 @@ def get_litellm_response(
797
815
  prompt = f"Content from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
798
816
  except Exception:
799
817
  pass
818
+ else:
819
+ # Handle text-based files
820
+ text_extensions = {'.txt', '.text', '.log', '.md', '.markdown', '.rst', '.json', '.yaml', '.yml', '.toml', '.ini', '.conf', '.cfg', '.xml', '.html', '.htm', '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.h', '.cpp', '.hpp', '.go', '.rs', '.rb', '.php', '.sh', '.bash', '.sql', '.css', '.scss'}
821
+ filename = os.path.basename(attachment)
822
+ if ext in text_extensions or ext == '':
823
+ try:
824
+ with open(attachment, 'r', encoding='utf-8', errors='replace') as f:
825
+ text_content = f.read()
826
+ max_chars = 50000
827
+ if len(text_content) > max_chars:
828
+ text_content = text_content[:max_chars] + f"\n\n... [truncated]"
829
+ if text_content.strip():
830
+ if prompt:
831
+ prompt += f"\n\nContent from {filename}:\n```\n{text_content}\n```"
832
+ else:
833
+ prompt = f"Content from {filename}:\n```\n{text_content}\n```"
834
+ except Exception:
835
+ pass
800
836
 
801
837
  if prompt:
802
838
  if result['messages'] and result['messages'][-1]["role"] == "user":
@@ -344,7 +344,6 @@ def kg_evolve_incremental(existing_kg,
344
344
 
345
345
  current_gen = existing_kg.get('generation', 0)
346
346
  next_gen = current_gen + 1
347
- print(f"\n--- ABSORBING INFO: Gen {current_gen} -> Gen {next_gen} ---")
348
347
 
349
348
  newly_added_concepts = []
350
349
  concept_links = list(existing_kg.get('concept_links', []))
@@ -359,8 +358,7 @@ def kg_evolve_incremental(existing_kg,
359
358
  all_concept_names = list(existing_concept_names)
360
359
 
361
360
  all_new_facts = []
362
- print(npc, npc.model, npc.provider)
363
-
361
+
364
362
  if new_facts:
365
363
  all_new_facts = new_facts
366
364
  print(f'using pre-approved facts: {len(all_new_facts)}')
@@ -7,6 +7,41 @@ import sqlite3
7
7
  import numpy as np
8
8
  import pandas as pd
9
9
  import matplotlib.pyplot as plt
10
+ import matplotlib as mpl
11
+
12
+ # Professional plot styling (from kg-research matplotlibrc)
13
+ mpl.rcParams.update({
14
+ 'font.family': 'serif',
15
+ 'axes.labelsize': 20,
16
+ 'axes.grid.axis': 'both',
17
+ 'axes.grid.which': 'major',
18
+ 'axes.prop_cycle': mpl.cycler('color', ['k', 'b', 'r', 'g', 'c', 'm', 'y', 'k']),
19
+ 'xtick.top': True,
20
+ 'xtick.direction': 'in',
21
+ 'xtick.major.size': 10,
22
+ 'xtick.minor.size': 5,
23
+ 'xtick.labelsize': 20,
24
+ 'xtick.minor.visible': True,
25
+ 'xtick.major.top': True,
26
+ 'xtick.major.bottom': True,
27
+ 'xtick.minor.top': True,
28
+ 'xtick.minor.bottom': True,
29
+ 'ytick.left': True,
30
+ 'ytick.right': True,
31
+ 'ytick.direction': 'in',
32
+ 'ytick.major.size': 10,
33
+ 'ytick.minor.size': 5,
34
+ 'ytick.labelsize': 20,
35
+ 'ytick.minor.visible': True,
36
+ 'ytick.major.left': True,
37
+ 'ytick.major.right': True,
38
+ 'ytick.minor.left': True,
39
+ 'ytick.minor.right': True,
40
+ 'legend.frameon': False,
41
+ 'legend.fontsize': 12,
42
+ 'image.cmap': 'plasma',
43
+ 'errorbar.capsize': 1,
44
+ })
10
45
  import re
11
46
  import random
12
47
  from datetime import datetime
@@ -31,9 +66,31 @@ from npcpy.npc_sysenv import (
31
66
  from npcpy.memory.command_history import CommandHistory, generate_message_id
32
67
 
33
68
  class SilentUndefined(Undefined):
69
+ """Undefined that silently returns empty string instead of raising errors"""
34
70
  def _fail_with_undefined_error(self, *args, **kwargs):
35
71
  return ""
36
72
 
73
+ def __str__(self):
74
+ return ""
75
+
76
+ def __repr__(self):
77
+ return ""
78
+
79
+ def __bool__(self):
80
+ return False
81
+
82
+ def __eq__(self, other):
83
+ return other == "" or other is None or isinstance(other, Undefined)
84
+
85
+ def __ne__(self, other):
86
+ return not self.__eq__(other)
87
+
88
+ def __iter__(self):
89
+ return iter([])
90
+
91
+ def __len__(self):
92
+ return 0
93
+
37
94
  import math
38
95
  from PIL import Image
39
96
  from jinja2 import Environment, ChainableUndefined
@@ -152,11 +209,35 @@ def get_log_entries(entity_id, entry_type=None, limit=10, db_path="~/npcsh_histo
152
209
  ]
153
210
 
154
211
 
212
+ def _json_dumps_with_undefined(obj, **kwargs):
213
+ """Custom JSON dumps that handles SilentUndefined objects"""
214
+ def default_handler(o):
215
+ if isinstance(o, Undefined):
216
+ return ""
217
+ raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
218
+ return json.dumps(obj, default=default_handler, **kwargs)
219
+
220
+
155
221
  def load_yaml_file(file_path):
156
- """Load a YAML file with error handling"""
222
+ """Load a YAML file with error handling, rendering Jinja2 first"""
157
223
  try:
158
224
  with open(os.path.expanduser(file_path), 'r') as f:
159
- return yaml.safe_load(f)
225
+ content = f.read()
226
+
227
+ # Check if file has Jinja2 control structures that need pre-rendering
228
+ # Only render if there are {% %} blocks, otherwise parse directly
229
+ if '{%' not in content:
230
+ return yaml.safe_load(content)
231
+
232
+ # First pass: render Jinja2 templates to produce valid YAML
233
+ # This allows {% if %} and other control structures to work
234
+ jinja_env = Environment(undefined=SilentUndefined)
235
+ # Configure tojson filter to handle SilentUndefined
236
+ jinja_env.policies['json.dumps_function'] = _json_dumps_with_undefined
237
+ template = jinja_env.from_string(content)
238
+ rendered_content = template.render({})
239
+
240
+ return yaml.safe_load(rendered_content)
160
241
  except Exception as e:
161
242
  print(f"Error loading YAML file {file_path}: {e}")
162
243
  return None
@@ -46,7 +46,8 @@ from npcsh._state import ShellState, initialize_base_npcs_if_needed
46
46
  from npcsh.config import NPCSH_DB_PATH
47
47
 
48
48
 
49
- from npcpy.memory.knowledge_graph import load_kg_from_db
49
+ from npcpy.memory.knowledge_graph import load_kg_from_db, find_similar_facts_chroma
50
+ from npcpy.memory.command_history import setup_chroma_db
50
51
  from npcpy.memory.search import execute_rag_command, execute_brainblast_command
51
52
  from npcpy.data.load import load_file_contents
52
53
  from npcpy.data.web import search_web
@@ -67,12 +68,14 @@ from npcpy.memory.command_history import (
67
68
  save_conversation_message,
68
69
  generate_message_id,
69
70
  )
70
- from npcpy.npc_compiler import Jinx, NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog, initialize_npc_project
71
+ from npcpy.npc_compiler import Jinx, NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog, initialize_npc_project, load_yaml_file
71
72
 
72
73
  from npcpy.llm_funcs import (
73
74
  get_llm_response, check_llm_command
74
75
  )
76
+ from npcpy.gen.embeddings import get_embeddings
75
77
  from termcolor import cprint
78
+
76
79
  from npcpy.tools import auto_tools
77
80
 
78
81
  import json
@@ -712,6 +715,265 @@ def get_centrality_data():
712
715
  concept_degree = {node: cent for node, cent in nx.degree_centrality(G).items() if node in concepts_df['name'].values}
713
716
  return jsonify(centrality={'degree': concept_degree})
714
717
 
718
+ @app.route('/api/kg/search')
719
+ def search_kg():
720
+ """Search facts and concepts by keyword"""
721
+ try:
722
+ q = request.args.get('q', '').strip().lower()
723
+ generation = request.args.get('generation', type=int)
724
+ search_type = request.args.get('type', 'both') # fact, concept, or both
725
+ limit = request.args.get('limit', 50, type=int)
726
+
727
+ if not q:
728
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
729
+
730
+ concepts_df, facts_df, links_df = load_kg_data(generation)
731
+ results = {"facts": [], "concepts": [], "query": q}
732
+
733
+ # Search facts
734
+ if search_type in ('both', 'fact'):
735
+ for _, row in facts_df.iterrows():
736
+ statement = str(row.get('statement', '')).lower()
737
+ source_text = str(row.get('source_text', '')).lower()
738
+ if q in statement or q in source_text:
739
+ results["facts"].append({
740
+ "statement": row.get('statement'),
741
+ "source_text": row.get('source_text'),
742
+ "type": row.get('type'),
743
+ "generation": row.get('generation'),
744
+ "origin": row.get('origin')
745
+ })
746
+ if len(results["facts"]) >= limit:
747
+ break
748
+
749
+ # Search concepts
750
+ if search_type in ('both', 'concept'):
751
+ for _, row in concepts_df.iterrows():
752
+ name = str(row.get('name', '')).lower()
753
+ description = str(row.get('description', '')).lower()
754
+ if q in name or q in description:
755
+ results["concepts"].append({
756
+ "name": row.get('name'),
757
+ "description": row.get('description'),
758
+ "generation": row.get('generation'),
759
+ "origin": row.get('origin')
760
+ })
761
+ if len(results["concepts"]) >= limit:
762
+ break
763
+
764
+ return jsonify(results)
765
+
766
+ except Exception as e:
767
+ traceback.print_exc()
768
+ return jsonify({"error": str(e)}), 500
769
+
770
+ @app.route('/api/kg/embed', methods=['POST'])
771
+ def embed_kg_facts():
772
+ """Embed existing facts from SQL to Chroma for semantic search"""
773
+ try:
774
+ data = request.get_json() or {}
775
+ generation = data.get('generation')
776
+ batch_size = data.get('batch_size', 10)
777
+
778
+ # Load facts from SQL
779
+ _, facts_df, _ = load_kg_data(generation)
780
+
781
+ if facts_df.empty:
782
+ return jsonify({"message": "No facts to embed", "count": 0})
783
+
784
+ # Setup Chroma
785
+ chroma_db_path = os.path.expanduser('~/npcsh_chroma_db')
786
+ _, chroma_collection = setup_chroma_db(
787
+ "knowledge_graph",
788
+ "Facts extracted from various sources",
789
+ chroma_db_path
790
+ )
791
+
792
+ # Process in batches
793
+ from npcpy.memory.knowledge_graph import store_fact_with_embedding
794
+ import hashlib
795
+
796
+ embedded_count = 0
797
+ skipped_count = 0
798
+
799
+ statements = facts_df['statement'].dropna().tolist()
800
+
801
+ for i in range(0, len(statements), batch_size):
802
+ batch = statements[i:i + batch_size]
803
+
804
+ # Get embeddings for batch
805
+ try:
806
+ embeddings = get_embeddings(batch)
807
+ except Exception as e:
808
+ print(f"Failed to get embeddings for batch {i}: {e}")
809
+ continue
810
+
811
+ for j, statement in enumerate(batch):
812
+ fact_id = hashlib.md5(statement.encode()).hexdigest()
813
+
814
+ # Check if already exists
815
+ try:
816
+ existing = chroma_collection.get(ids=[fact_id])
817
+ if existing and existing.get('ids'):
818
+ skipped_count += 1
819
+ continue
820
+ except:
821
+ pass
822
+
823
+ # Get metadata from dataframe
824
+ row = facts_df[facts_df['statement'] == statement].iloc[0] if len(facts_df[facts_df['statement'] == statement]) > 0 else None
825
+ metadata = {
826
+ "generation": int(row.get('generation', 0)) if row is not None and pd.notna(row.get('generation')) else 0,
827
+ "origin": str(row.get('origin', '')) if row is not None else '',
828
+ "type": str(row.get('type', '')) if row is not None else '',
829
+ }
830
+
831
+ # Store with embedding
832
+ result = store_fact_with_embedding(
833
+ chroma_collection, statement, metadata, embeddings[j]
834
+ )
835
+ if result:
836
+ embedded_count += 1
837
+
838
+ return jsonify({
839
+ "message": f"Embedded {embedded_count} facts, skipped {skipped_count} existing",
840
+ "embedded": embedded_count,
841
+ "skipped": skipped_count,
842
+ "total_facts": len(statements)
843
+ })
844
+
845
+ except Exception as e:
846
+ traceback.print_exc()
847
+ return jsonify({"error": str(e)}), 500
848
+
849
+ @app.route('/api/kg/search/semantic')
850
+ def search_kg_semantic():
851
+ """Semantic search for facts using vector similarity"""
852
+ try:
853
+ q = request.args.get('q', '').strip()
854
+ generation = request.args.get('generation', type=int)
855
+ limit = request.args.get('limit', 10, type=int)
856
+
857
+ if not q:
858
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
859
+
860
+ # Setup Chroma connection
861
+ chroma_db_path = os.path.expanduser('~/npcsh_chroma_db')
862
+ try:
863
+ _, chroma_collection = setup_chroma_db(
864
+ "knowledge_graph",
865
+ "Facts extracted from various sources",
866
+ chroma_db_path
867
+ )
868
+ except Exception as e:
869
+ return jsonify({
870
+ "error": f"Chroma DB not available: {str(e)}",
871
+ "facts": [],
872
+ "query": q
873
+ }), 200
874
+
875
+ # Get query embedding
876
+ try:
877
+ query_embedding = get_embeddings([q])[0]
878
+ except Exception as e:
879
+ return jsonify({
880
+ "error": f"Failed to generate embedding: {str(e)}",
881
+ "facts": [],
882
+ "query": q
883
+ }), 200
884
+
885
+ # Build metadata filter for generation if specified
886
+ metadata_filter = None
887
+ if generation is not None:
888
+ metadata_filter = {"generation": generation}
889
+
890
+ # Search Chroma
891
+ similar_facts = find_similar_facts_chroma(
892
+ chroma_collection,
893
+ q,
894
+ query_embedding=query_embedding,
895
+ n_results=limit,
896
+ metadata_filter=metadata_filter
897
+ )
898
+
899
+ # Format results
900
+ results = {
901
+ "facts": [
902
+ {
903
+ "statement": f["fact"],
904
+ "distance": f.get("distance"),
905
+ "metadata": f.get("metadata", {}),
906
+ "id": f.get("id")
907
+ }
908
+ for f in similar_facts
909
+ ],
910
+ "query": q,
911
+ "total": len(similar_facts)
912
+ }
913
+
914
+ return jsonify(results)
915
+
916
+ except Exception as e:
917
+ traceback.print_exc()
918
+ return jsonify({"error": str(e)}), 500
919
+
920
+ @app.route('/api/kg/facts')
921
+ def get_kg_facts():
922
+ """Get facts, optionally filtered by generation"""
923
+ try:
924
+ generation = request.args.get('generation', type=int)
925
+ limit = request.args.get('limit', 100, type=int)
926
+ offset = request.args.get('offset', 0, type=int)
927
+
928
+ _, facts_df, _ = load_kg_data(generation)
929
+
930
+ facts = []
931
+ for i, row in facts_df.iloc[offset:offset+limit].iterrows():
932
+ facts.append({
933
+ "statement": row.get('statement'),
934
+ "source_text": row.get('source_text'),
935
+ "type": row.get('type'),
936
+ "generation": row.get('generation'),
937
+ "origin": row.get('origin')
938
+ })
939
+
940
+ return jsonify({
941
+ "facts": facts,
942
+ "total": len(facts_df),
943
+ "offset": offset,
944
+ "limit": limit
945
+ })
946
+
947
+ except Exception as e:
948
+ traceback.print_exc()
949
+ return jsonify({"error": str(e)}), 500
950
+
951
+ @app.route('/api/kg/concepts')
952
+ def get_kg_concepts():
953
+ """Get concepts, optionally filtered by generation"""
954
+ try:
955
+ generation = request.args.get('generation', type=int)
956
+ limit = request.args.get('limit', 100, type=int)
957
+
958
+ concepts_df, _, _ = load_kg_data(generation)
959
+
960
+ concepts = []
961
+ for _, row in concepts_df.head(limit).iterrows():
962
+ concepts.append({
963
+ "name": row.get('name'),
964
+ "description": row.get('description'),
965
+ "generation": row.get('generation'),
966
+ "origin": row.get('origin')
967
+ })
968
+
969
+ return jsonify({
970
+ "concepts": concepts,
971
+ "total": len(concepts_df)
972
+ })
973
+
974
+ except Exception as e:
975
+ traceback.print_exc()
976
+ return jsonify({"error": str(e)}), 500
715
977
 
716
978
 
717
979
  @app.route("/api/attachments/<message_id>", methods=["GET"])
@@ -853,10 +1115,9 @@ def get_available_jinxs():
853
1115
  def get_jinx_name_from_file(filepath):
854
1116
  """Read jinx_name from file, fallback to filename."""
855
1117
  try:
856
- with open(filepath, 'r') as f:
857
- data = yaml.safe_load(f)
858
- if data and 'jinx_name' in data:
859
- return data['jinx_name']
1118
+ data = load_yaml_file(filepath)
1119
+ if data and 'jinx_name' in data:
1120
+ return data['jinx_name']
860
1121
  except:
861
1122
  pass
862
1123
  return os.path.basename(filepath)[:-5]
@@ -1924,9 +2185,10 @@ def get_jinxs_global():
1924
2185
  for file in files:
1925
2186
  if file.endswith(".jinx"):
1926
2187
  jinx_path = os.path.join(root, file)
1927
- with open(jinx_path, 'r') as f:
1928
- raw_data = yaml.safe_load(f)
1929
-
2188
+ raw_data = load_yaml_file(jinx_path)
2189
+ if raw_data is None:
2190
+ continue
2191
+
1930
2192
  # Preserve full input definitions including defaults
1931
2193
  inputs = raw_data.get("inputs", [])
1932
2194
 
@@ -1960,9 +2222,10 @@ def get_jinxs_project():
1960
2222
  for file in files:
1961
2223
  if file.endswith(".jinx"):
1962
2224
  jinx_path = os.path.join(root, file)
1963
- with open(jinx_path, 'r') as f:
1964
- raw_data = yaml.safe_load(f)
1965
-
2225
+ raw_data = load_yaml_file(jinx_path)
2226
+ if raw_data is None:
2227
+ continue
2228
+
1966
2229
  # Preserve full input definitions including defaults
1967
2230
  inputs = raw_data.get("inputs", [])
1968
2231
 
@@ -2118,8 +2381,9 @@ def get_npc_team_global():
2118
2381
  for file in os.listdir(global_npc_directory):
2119
2382
  if file.endswith(".npc"):
2120
2383
  npc_path = os.path.join(global_npc_directory, file)
2121
- with open(npc_path, 'r') as f:
2122
- raw_data = yaml.safe_load(f)
2384
+ raw_data = load_yaml_file(npc_path)
2385
+ if raw_data is None:
2386
+ continue
2123
2387
 
2124
2388
  npc_data.append({
2125
2389
  "name": raw_data.get("name", file[:-4]),
@@ -2154,8 +2418,9 @@ def get_npc_team_project():
2154
2418
  for file in os.listdir(project_npc_directory):
2155
2419
  if file.endswith(".npc"):
2156
2420
  npc_path = os.path.join(project_npc_directory, file)
2157
- with open(npc_path, 'r') as f:
2158
- raw_npc_data = yaml.safe_load(f)
2421
+ raw_npc_data = load_yaml_file(npc_path)
2422
+ if raw_npc_data is None:
2423
+ continue
2159
2424
 
2160
2425
  serialized_npc = {
2161
2426
  "name": raw_npc_data.get("name", file[:-4]),
@@ -2433,8 +2698,7 @@ def get_package_contents():
2433
2698
  if f.endswith('.npc'):
2434
2699
  npc_path = os.path.join(package_npc_team_dir, f)
2435
2700
  try:
2436
- with open(npc_path, 'r') as file:
2437
- npc_data = yaml.safe_load(file) or {}
2701
+ npc_data = load_yaml_file(npc_path) or {}
2438
2702
  npcs.append({
2439
2703
  "name": npc_data.get("name", f[:-4]),
2440
2704
  "primary_directive": npc_data.get("primary_directive", ""),
@@ -2453,8 +2717,7 @@ def get_package_contents():
2453
2717
  jinx_path = os.path.join(root, f)
2454
2718
  rel_path = os.path.relpath(jinx_path, jinxs_dir)
2455
2719
  try:
2456
- with open(jinx_path, 'r') as file:
2457
- jinx_data = yaml.safe_load(file) or {}
2720
+ jinx_data = load_yaml_file(jinx_path) or {}
2458
2721
  jinxs.append({
2459
2722
  "name": f[:-5],
2460
2723
  "path": rel_path[:-5],
@@ -4392,19 +4655,103 @@ def approve_memories():
4392
4655
  try:
4393
4656
  data = request.json
4394
4657
  approvals = data.get("approvals", [])
4395
-
4658
+
4396
4659
  command_history = CommandHistory(app.config.get('DB_PATH'))
4397
-
4660
+
4398
4661
  for approval in approvals:
4399
4662
  command_history.update_memory_status(
4400
4663
  approval['memory_id'],
4401
4664
  approval['decision'],
4402
4665
  approval.get('final_memory')
4403
4666
  )
4404
-
4667
+
4405
4668
  return jsonify({"success": True, "processed": len(approvals)})
4406
-
4669
+
4670
+ except Exception as e:
4671
+ return jsonify({"error": str(e)}), 500
4672
+
4673
+ @app.route("/api/memory/search", methods=["GET"])
4674
+ def search_memories():
4675
+ """Search memories with optional scope filtering"""
4676
+ try:
4677
+ q = request.args.get("q", "")
4678
+ npc = request.args.get("npc")
4679
+ team = request.args.get("team")
4680
+ directory_path = request.args.get("directory_path")
4681
+ status = request.args.get("status")
4682
+ limit = int(request.args.get("limit", 50))
4683
+
4684
+ if not q:
4685
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
4686
+
4687
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4688
+ results = command_history.search_memory(
4689
+ query=q,
4690
+ npc=npc,
4691
+ team=team,
4692
+ directory_path=directory_path,
4693
+ status_filter=status,
4694
+ limit=limit
4695
+ )
4696
+
4697
+ return jsonify({"memories": results, "count": len(results)})
4698
+
4699
+ except Exception as e:
4700
+ traceback.print_exc()
4701
+ return jsonify({"error": str(e)}), 500
4702
+
4703
+ @app.route("/api/memory/pending", methods=["GET"])
4704
+ def get_pending_memories():
4705
+ """Get memories awaiting approval"""
4706
+ try:
4707
+ limit = int(request.args.get("limit", 50))
4708
+ npc = request.args.get("npc")
4709
+ team = request.args.get("team")
4710
+ directory_path = request.args.get("directory_path")
4711
+
4712
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4713
+ results = command_history.get_pending_memories(limit=limit)
4714
+
4715
+ # Filter by scope if provided
4716
+ if npc or team or directory_path:
4717
+ filtered = []
4718
+ for mem in results:
4719
+ if npc and mem.get('npc') != npc:
4720
+ continue
4721
+ if team and mem.get('team') != team:
4722
+ continue
4723
+ if directory_path and mem.get('directory_path') != directory_path:
4724
+ continue
4725
+ filtered.append(mem)
4726
+ results = filtered
4727
+
4728
+ return jsonify({"memories": results, "count": len(results)})
4729
+
4407
4730
  except Exception as e:
4731
+ traceback.print_exc()
4732
+ return jsonify({"error": str(e)}), 500
4733
+
4734
+ @app.route("/api/memory/scope", methods=["GET"])
4735
+ def get_memories_by_scope():
4736
+ """Get memories for a specific scope (npc/team/directory)"""
4737
+ try:
4738
+ npc = request.args.get("npc", "")
4739
+ team = request.args.get("team", "")
4740
+ directory_path = request.args.get("directory_path", "")
4741
+ status = request.args.get("status")
4742
+
4743
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4744
+ results = command_history.get_memories_for_scope(
4745
+ npc=npc,
4746
+ team=team,
4747
+ directory_path=directory_path,
4748
+ status=status
4749
+ )
4750
+
4751
+ return jsonify({"memories": results, "count": len(results)})
4752
+
4753
+ except Exception as e:
4754
+ traceback.print_exc()
4408
4755
  return jsonify({"error": str(e)}), 500
4409
4756
 
4410
4757
 
@@ -5419,7 +5766,7 @@ def text_to_speech_endpoint():
5419
5766
  import base64
5420
5767
  from npcpy.gen.audio_gen import (
5421
5768
  text_to_speech, get_available_engines,
5422
- pcm16_to_wav, KOKORO_VOICES
5769
+ pcm16_to_wav
5423
5770
  )
5424
5771
 
5425
5772
  data = request.json or {}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.12
3
+ Version: 1.3.13
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -83,7 +83,7 @@ extra_files = package_files("npcpy/npc_team/")
83
83
 
84
84
  setup(
85
85
  name="npcpy",
86
- version="1.3.12",
86
+ version="1.3.13",
87
87
  packages=find_packages(exclude=["tests*"]),
88
88
  install_requires=base_requirements,
89
89
  extras_require={
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes