npcpy 1.3.12__py3-none-any.whl → 1.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/ft/diff.py CHANGED
@@ -180,52 +180,66 @@ if TORCH_AVAILABLE:
180
180
  noise = torch.randn_like(x)
181
181
  return sqrt_alpha * x + sqrt_one_minus * noise, noise
182
182
 
183
- def train(self, dataloader):
183
+ def train(self, dataloader, progress_callback=None):
184
184
  optimizer = torch.optim.AdamW(
185
- self.model.parameters(),
185
+ self.model.parameters(),
186
186
  lr=self.config.learning_rate
187
187
  )
188
-
188
+
189
189
  os.makedirs(self.config.output_model_path, exist_ok=True)
190
190
  checkpoint_dir = os.path.join(
191
- self.config.output_model_path,
191
+ self.config.output_model_path,
192
192
  'checkpoints'
193
193
  )
194
194
  os.makedirs(checkpoint_dir, exist_ok=True)
195
-
195
+
196
196
  global_step = 0
197
-
197
+ total_batches = len(dataloader)
198
+ loss_history = []
199
+
198
200
  for epoch in range(self.config.num_epochs):
199
201
  self.model.train()
200
202
  epoch_loss = 0.0
201
-
203
+
202
204
  pbar = tqdm(dataloader, desc=f'Epoch {epoch+1}')
203
205
  for batch_idx, (images, captions) in enumerate(pbar):
204
206
  images = images.to(self.device)
205
207
  batch_size = images.shape[0]
206
-
208
+
207
209
  t = torch.randint(
208
- 0,
209
- self.config.timesteps,
210
- (batch_size,),
210
+ 0,
211
+ self.config.timesteps,
212
+ (batch_size,),
211
213
  device=self.device
212
214
  ).long()
213
-
215
+
214
216
  noisy_images, noise = self.add_noise(images, t)
215
-
217
+
216
218
  predicted_noise = self.model(noisy_images, t)
217
-
219
+
218
220
  loss = F.mse_loss(predicted_noise, noise)
219
-
221
+
220
222
  optimizer.zero_grad()
221
223
  loss.backward()
222
224
  optimizer.step()
223
-
225
+
224
226
  epoch_loss += loss.item()
225
227
  global_step += 1
226
-
228
+
227
229
  pbar.set_postfix({'loss': loss.item()})
228
-
230
+
231
+ # Report progress via callback
232
+ if progress_callback:
233
+ progress_callback({
234
+ 'epoch': epoch + 1,
235
+ 'total_epochs': self.config.num_epochs,
236
+ 'batch': batch_idx + 1,
237
+ 'total_batches': total_batches,
238
+ 'step': global_step,
239
+ 'loss': loss.item(),
240
+ 'loss_history': loss_history[-100:], # Last 100 losses
241
+ })
242
+
229
243
  if global_step % self.config.checkpoint_frequency == 0:
230
244
  ckpt_path = os.path.join(
231
245
  checkpoint_dir,
@@ -238,8 +252,9 @@ if TORCH_AVAILABLE:
238
252
  'optimizer_state_dict': optimizer.state_dict(),
239
253
  'loss': loss.item(),
240
254
  }, ckpt_path)
241
-
255
+
242
256
  avg_loss = epoch_loss / len(dataloader)
257
+ loss_history.append(avg_loss)
243
258
  print(f'Epoch {epoch+1} avg loss: {avg_loss:.6f}')
244
259
 
245
260
  final_path = os.path.join(
@@ -300,35 +315,35 @@ else:
300
315
  DiffusionTrainer = None
301
316
 
302
317
 
303
- def train_diffusion(image_paths, captions=None, config=None,
304
- resume_from=None):
318
+ def train_diffusion(image_paths, captions=None, config=None,
319
+ resume_from=None, progress_callback=None):
305
320
  if not TORCH_AVAILABLE:
306
321
  raise ImportError(
307
322
  "PyTorch not available. Install: pip install torch torchvision"
308
323
  )
309
-
324
+
310
325
  if config is None:
311
326
  config = DiffusionConfig()
312
-
327
+
313
328
  if captions is None:
314
329
  captions = [''] * len(image_paths)
315
-
330
+
316
331
  dataset = ImageDataset(image_paths, captions, config.image_size)
317
332
  dataloader = DataLoader(
318
- dataset,
319
- batch_size=config.batch_size,
333
+ dataset,
334
+ batch_size=config.batch_size,
320
335
  shuffle=True,
321
336
  num_workers=0
322
337
  )
323
-
338
+
324
339
  trainer = DiffusionTrainer(config)
325
-
340
+
326
341
  if resume_from and os.path.exists(resume_from):
327
342
  checkpoint = torch.load(resume_from, map_location=trainer.device)
328
343
  trainer.model.load_state_dict(checkpoint['model_state_dict'])
329
344
  print(f'Resumed from {resume_from}')
330
-
331
- output_path = trainer.train(dataloader)
345
+
346
+ output_path = trainer.train(dataloader, progress_callback=progress_callback)
332
347
 
333
348
  gc.collect()
334
349
  if torch.cuda.is_available():
npcpy/ft/rl.py CHANGED
@@ -1,4 +1,5 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
+ from typing import List
2
3
 
3
4
  from datetime import datetime
4
5
  import glob
@@ -12,7 +13,8 @@ try:
12
13
  import torch
13
14
  from transformers import (
14
15
  AutoModelForCausalLM,
15
- AutoTokenizer
16
+ AutoTokenizer,
17
+ BitsAndBytesConfig
16
18
  )
17
19
  from trl import DPOTrainer, DPOConfig
18
20
  except:
@@ -23,6 +25,7 @@ except:
23
25
  torch = None
24
26
  AutoModelForCausalLM = None
25
27
  AutoTokenizer = None
28
+ BitsAndBytesConfig = None
26
29
 
27
30
 
28
31
  import random
@@ -44,6 +47,24 @@ class RLConfig:
44
47
  beta: float = 0.5
45
48
  max_length: int = 512
46
49
  max_prompt_length: int = 256
50
+ # Quantization options
51
+ use_4bit: bool = False
52
+ use_8bit: bool = False
53
+ # Precision options
54
+ fp16: bool = False
55
+ bf16: bool = False
56
+ # LoRA configuration
57
+ lora_r: int = 8
58
+ lora_alpha: int = 16
59
+ lora_dropout: float = 0.1
60
+ lora_target_modules: List[str] = field(
61
+ default_factory=lambda: ["q_proj", "k_proj", "v_proj", "o_proj"]
62
+ )
63
+ # Training options
64
+ max_pairs: int = 200
65
+ warmup_steps: int = 5
66
+ logging_steps: int = 5
67
+ save_steps: int = 20
47
68
 
48
69
 
49
70
  class TaskExecutor:
@@ -207,8 +228,8 @@ def create_preference_pairs(
207
228
  f"Warning: Only {len(pairs)} pairs found. "
208
229
  "May overfit."
209
230
  )
210
-
211
- return Dataset.from_list(pairs[:100])
231
+
232
+ return Dataset.from_list(pairs)
212
233
 
213
234
 
214
235
  def train_with_dpo(
@@ -218,84 +239,121 @@ def train_with_dpo(
218
239
 
219
240
  if config is None:
220
241
  config = RLConfig()
221
-
242
+
222
243
  preference_dataset = create_preference_pairs(
223
244
  traces,
224
245
  min_reward_gap=config.min_reward_gap
225
246
  )
226
-
247
+
227
248
  if preference_dataset is None or len(preference_dataset) == 0:
228
249
  print("No valid preference pairs. Cannot train.")
229
250
  return None
230
-
251
+
252
+ # Limit pairs if specified
253
+ if config.max_pairs and len(preference_dataset) > config.max_pairs:
254
+ preference_dataset = preference_dataset.select(range(config.max_pairs))
255
+
256
+ print(f"Training with {len(preference_dataset)} preference pairs")
257
+
258
+ # Build model loading kwargs
259
+ model_kwargs = {
260
+ "device_map": "auto",
261
+ "trust_remote_code": True,
262
+ "low_cpu_mem_usage": True
263
+ }
264
+
265
+ # Handle quantization
266
+ if config.use_4bit:
267
+ if BitsAndBytesConfig is None:
268
+ raise ImportError("bitsandbytes required for 4-bit. pip install bitsandbytes")
269
+ model_kwargs["quantization_config"] = BitsAndBytesConfig(
270
+ load_in_4bit=True,
271
+ bnb_4bit_quant_type="nf4",
272
+ bnb_4bit_compute_dtype=torch.float16,
273
+ bnb_4bit_use_double_quant=True
274
+ )
275
+ print("Using 4-bit quantization")
276
+ elif config.use_8bit:
277
+ if BitsAndBytesConfig is None:
278
+ raise ImportError("bitsandbytes required for 8-bit. pip install bitsandbytes")
279
+ model_kwargs["quantization_config"] = BitsAndBytesConfig(
280
+ load_in_8bit=True
281
+ )
282
+ print("Using 8-bit quantization")
283
+ else:
284
+ # Set dtype based on precision config
285
+ if config.bf16:
286
+ model_kwargs["torch_dtype"] = torch.bfloat16
287
+ elif config.fp16:
288
+ model_kwargs["torch_dtype"] = torch.float16
289
+ else:
290
+ model_kwargs["torch_dtype"] = torch.float32
291
+
231
292
  model = AutoModelForCausalLM.from_pretrained(
232
293
  config.base_model_name,
233
- torch_dtype=torch.float32,
234
- device_map="auto",
235
- low_cpu_mem_usage=True
294
+ **model_kwargs
236
295
  )
237
-
296
+
238
297
  tokenizer = AutoTokenizer.from_pretrained(
239
298
  config.base_model_name,
240
299
  trust_remote_code=True
241
300
  )
242
-
301
+
243
302
  if tokenizer.pad_token is None:
244
303
  tokenizer.pad_token = tokenizer.eos_token
245
-
304
+
246
305
  peft_config = LoraConfig(
247
- r=8,
248
- lora_alpha=16,
249
- lora_dropout=0.1,
306
+ r=config.lora_r,
307
+ lora_alpha=config.lora_alpha,
308
+ lora_dropout=config.lora_dropout,
250
309
  bias="none",
251
310
  task_type="CAUSAL_LM",
252
- target_modules=[
253
- "q_proj",
254
- "k_proj",
255
- "v_proj",
256
- "o_proj"
257
- ]
311
+ target_modules=config.lora_target_modules
258
312
  )
259
-
313
+
314
+ # Select optimizer based on quantization
315
+ if config.use_4bit or config.use_8bit:
316
+ optim = "paged_adamw_8bit"
317
+ else:
318
+ optim = "adamw_torch"
319
+
260
320
  training_args = DPOConfig(
261
321
  output_dir="./dpo_results",
262
- per_device_train_batch_size=(
263
- config.per_device_train_batch_size
264
- ),
265
- gradient_accumulation_steps=(
266
- config.gradient_accumulation_steps
267
- ),
322
+ per_device_train_batch_size=config.per_device_train_batch_size,
323
+ gradient_accumulation_steps=config.gradient_accumulation_steps,
268
324
  learning_rate=config.learning_rate,
269
325
  num_train_epochs=config.num_train_epochs,
270
326
  weight_decay=0.1,
271
327
  beta=config.beta,
272
- logging_steps=2,
273
- save_steps=10,
328
+ logging_steps=config.logging_steps,
329
+ save_steps=config.save_steps,
274
330
  remove_unused_columns=False,
275
331
  max_length=config.max_length,
276
332
  max_prompt_length=config.max_prompt_length,
277
333
  dataloader_num_workers=0,
278
- fp16=False,
279
- bf16=False,
280
- optim="adamw_torch",
281
- warmup_steps=2,
334
+ fp16=config.fp16 or config.use_4bit,
335
+ bf16=config.bf16,
336
+ optim=optim,
337
+ warmup_steps=config.warmup_steps,
282
338
  save_strategy="steps",
283
- save_total_limit=3
339
+ save_total_limit=2
284
340
  )
285
-
341
+
286
342
  trainer = DPOTrainer(
287
343
  model,
288
344
  args=training_args,
289
345
  train_dataset=preference_dataset,
290
- peft_config=peft_config
346
+ peft_config=peft_config,
347
+ tokenizer=tokenizer
291
348
  )
292
-
349
+
293
350
  print("Starting DPO training...")
294
351
  trainer.train()
295
-
352
+
353
+ os.makedirs(config.adapter_path, exist_ok=True)
296
354
  trainer.save_model(config.adapter_path)
297
355
  print(f"Adapter saved to {config.adapter_path}")
298
-
356
+
299
357
  return config.adapter_path
300
358
 
301
359
 
@@ -333,28 +391,53 @@ def run_rl_training(
333
391
 
334
392
  def load_rl_model(
335
393
  base_model_id: str,
336
- adapter_path: str
394
+ adapter_path: str,
395
+ use_4bit: bool = False,
396
+ use_8bit: bool = False,
397
+ merge_adapter: bool = True
337
398
  ):
338
-
339
399
  print(f"Loading base model: {base_model_id}")
400
+
401
+ model_kwargs = {
402
+ "device_map": "auto",
403
+ "trust_remote_code": True
404
+ }
405
+
406
+ if use_4bit:
407
+ if BitsAndBytesConfig is None:
408
+ raise ImportError("bitsandbytes required for 4-bit")
409
+ model_kwargs["quantization_config"] = BitsAndBytesConfig(
410
+ load_in_4bit=True,
411
+ bnb_4bit_quant_type="nf4",
412
+ bnb_4bit_compute_dtype=torch.float16,
413
+ bnb_4bit_use_double_quant=True
414
+ )
415
+ elif use_8bit:
416
+ if BitsAndBytesConfig is None:
417
+ raise ImportError("bitsandbytes required for 8-bit")
418
+ model_kwargs["quantization_config"] = BitsAndBytesConfig(
419
+ load_in_8bit=True
420
+ )
421
+ else:
422
+ model_kwargs["torch_dtype"] = torch.float16
423
+
340
424
  model = AutoModelForCausalLM.from_pretrained(
341
425
  base_model_id,
342
- torch_dtype=torch.float32,
343
- device_map="auto",
344
- attn_implementation='eager'
426
+ **model_kwargs
345
427
  )
346
-
428
+
347
429
  tokenizer = AutoTokenizer.from_pretrained(
348
430
  base_model_id,
349
431
  trust_remote_code=True
350
432
  )
351
-
433
+
352
434
  if tokenizer.pad_token is None:
353
435
  tokenizer.pad_token = tokenizer.eos_token
354
-
436
+
355
437
  if adapter_path and os.path.exists(adapter_path):
356
438
  print(f"Loading adapter: {adapter_path}")
357
439
  model = PeftModel.from_pretrained(model, adapter_path)
358
- model = model.merge_and_unload()
359
-
440
+ if merge_adapter and not (use_4bit or use_8bit):
441
+ model = model.merge_and_unload()
442
+
360
443
  return model, tokenizer
npcpy/gen/response.py CHANGED
@@ -259,6 +259,24 @@ def get_ollama_response(
259
259
  prompt = f"Content from CSV: {os.path.basename(attachment)} (first 100 rows):\n{csv_sample} \n csv description: {csv_data.describe()}"
260
260
  except Exception:
261
261
  pass
262
+ else:
263
+ # Handle text-based files
264
+ text_extensions = {'.txt', '.text', '.log', '.md', '.markdown', '.rst', '.json', '.yaml', '.yml', '.toml', '.ini', '.conf', '.cfg', '.xml', '.html', '.htm', '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.h', '.cpp', '.hpp', '.go', '.rs', '.rb', '.php', '.sh', '.bash', '.sql', '.css', '.scss'}
265
+ filename = os.path.basename(attachment)
266
+ if ext in text_extensions or ext == '':
267
+ try:
268
+ with open(attachment, 'r', encoding='utf-8', errors='replace') as f:
269
+ text_content = f.read()
270
+ max_chars = 50000
271
+ if len(text_content) > max_chars:
272
+ text_content = text_content[:max_chars] + f"\n\n... [truncated]"
273
+ if text_content.strip():
274
+ if prompt:
275
+ prompt += f"\n\nContent from {filename}:\n```\n{text_content}\n```"
276
+ else:
277
+ prompt = f"Content from {filename}:\n```\n{text_content}\n```"
278
+ except Exception:
279
+ pass
262
280
 
263
281
 
264
282
  if prompt:
@@ -797,6 +815,24 @@ def get_litellm_response(
797
815
  prompt = f"Content from CSV: {os.path.basename(attachment)} (first 10 rows):\n{csv_sample}"
798
816
  except Exception:
799
817
  pass
818
+ else:
819
+ # Handle text-based files
820
+ text_extensions = {'.txt', '.text', '.log', '.md', '.markdown', '.rst', '.json', '.yaml', '.yml', '.toml', '.ini', '.conf', '.cfg', '.xml', '.html', '.htm', '.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.c', '.h', '.cpp', '.hpp', '.go', '.rs', '.rb', '.php', '.sh', '.bash', '.sql', '.css', '.scss'}
821
+ filename = os.path.basename(attachment)
822
+ if ext in text_extensions or ext == '':
823
+ try:
824
+ with open(attachment, 'r', encoding='utf-8', errors='replace') as f:
825
+ text_content = f.read()
826
+ max_chars = 50000
827
+ if len(text_content) > max_chars:
828
+ text_content = text_content[:max_chars] + f"\n\n... [truncated]"
829
+ if text_content.strip():
830
+ if prompt:
831
+ prompt += f"\n\nContent from {filename}:\n```\n{text_content}\n```"
832
+ else:
833
+ prompt = f"Content from {filename}:\n```\n{text_content}\n```"
834
+ except Exception:
835
+ pass
800
836
 
801
837
  if prompt:
802
838
  if result['messages'] and result['messages'][-1]["role"] == "user":
@@ -344,7 +344,6 @@ def kg_evolve_incremental(existing_kg,
344
344
 
345
345
  current_gen = existing_kg.get('generation', 0)
346
346
  next_gen = current_gen + 1
347
- print(f"\n--- ABSORBING INFO: Gen {current_gen} -> Gen {next_gen} ---")
348
347
 
349
348
  newly_added_concepts = []
350
349
  concept_links = list(existing_kg.get('concept_links', []))
@@ -359,8 +358,7 @@ def kg_evolve_incremental(existing_kg,
359
358
  all_concept_names = list(existing_concept_names)
360
359
 
361
360
  all_new_facts = []
362
- print(npc, npc.model, npc.provider)
363
-
361
+
364
362
  if new_facts:
365
363
  all_new_facts = new_facts
366
364
  print(f'using pre-approved facts: {len(all_new_facts)}')
npcpy/npc_compiler.py CHANGED
@@ -7,6 +7,41 @@ import sqlite3
7
7
  import numpy as np
8
8
  import pandas as pd
9
9
  import matplotlib.pyplot as plt
10
+ import matplotlib as mpl
11
+
12
+ # Professional plot styling (from kg-research matplotlibrc)
13
+ mpl.rcParams.update({
14
+ 'font.family': 'serif',
15
+ 'axes.labelsize': 20,
16
+ 'axes.grid.axis': 'both',
17
+ 'axes.grid.which': 'major',
18
+ 'axes.prop_cycle': mpl.cycler('color', ['k', 'b', 'r', 'g', 'c', 'm', 'y', 'k']),
19
+ 'xtick.top': True,
20
+ 'xtick.direction': 'in',
21
+ 'xtick.major.size': 10,
22
+ 'xtick.minor.size': 5,
23
+ 'xtick.labelsize': 20,
24
+ 'xtick.minor.visible': True,
25
+ 'xtick.major.top': True,
26
+ 'xtick.major.bottom': True,
27
+ 'xtick.minor.top': True,
28
+ 'xtick.minor.bottom': True,
29
+ 'ytick.left': True,
30
+ 'ytick.right': True,
31
+ 'ytick.direction': 'in',
32
+ 'ytick.major.size': 10,
33
+ 'ytick.minor.size': 5,
34
+ 'ytick.labelsize': 20,
35
+ 'ytick.minor.visible': True,
36
+ 'ytick.major.left': True,
37
+ 'ytick.major.right': True,
38
+ 'ytick.minor.left': True,
39
+ 'ytick.minor.right': True,
40
+ 'legend.frameon': False,
41
+ 'legend.fontsize': 12,
42
+ 'image.cmap': 'plasma',
43
+ 'errorbar.capsize': 1,
44
+ })
10
45
  import re
11
46
  import random
12
47
  from datetime import datetime
@@ -31,9 +66,31 @@ from npcpy.npc_sysenv import (
31
66
  from npcpy.memory.command_history import CommandHistory, generate_message_id
32
67
 
33
68
  class SilentUndefined(Undefined):
69
+ """Undefined that silently returns empty string instead of raising errors"""
34
70
  def _fail_with_undefined_error(self, *args, **kwargs):
35
71
  return ""
36
72
 
73
+ def __str__(self):
74
+ return ""
75
+
76
+ def __repr__(self):
77
+ return ""
78
+
79
+ def __bool__(self):
80
+ return False
81
+
82
+ def __eq__(self, other):
83
+ return other == "" or other is None or isinstance(other, Undefined)
84
+
85
+ def __ne__(self, other):
86
+ return not self.__eq__(other)
87
+
88
+ def __iter__(self):
89
+ return iter([])
90
+
91
+ def __len__(self):
92
+ return 0
93
+
37
94
  import math
38
95
  from PIL import Image
39
96
  from jinja2 import Environment, ChainableUndefined
@@ -152,11 +209,35 @@ def get_log_entries(entity_id, entry_type=None, limit=10, db_path="~/npcsh_histo
152
209
  ]
153
210
 
154
211
 
212
+ def _json_dumps_with_undefined(obj, **kwargs):
213
+ """Custom JSON dumps that handles SilentUndefined objects"""
214
+ def default_handler(o):
215
+ if isinstance(o, Undefined):
216
+ return ""
217
+ raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
218
+ return json.dumps(obj, default=default_handler, **kwargs)
219
+
220
+
155
221
  def load_yaml_file(file_path):
156
- """Load a YAML file with error handling"""
222
+ """Load a YAML file with error handling, rendering Jinja2 first"""
157
223
  try:
158
224
  with open(os.path.expanduser(file_path), 'r') as f:
159
- return yaml.safe_load(f)
225
+ content = f.read()
226
+
227
+ # Check if file has Jinja2 control structures that need pre-rendering
228
+ # Only render if there are {% %} blocks, otherwise parse directly
229
+ if '{%' not in content:
230
+ return yaml.safe_load(content)
231
+
232
+ # First pass: render Jinja2 templates to produce valid YAML
233
+ # This allows {% if %} and other control structures to work
234
+ jinja_env = Environment(undefined=SilentUndefined)
235
+ # Configure tojson filter to handle SilentUndefined
236
+ jinja_env.policies['json.dumps_function'] = _json_dumps_with_undefined
237
+ template = jinja_env.from_string(content)
238
+ rendered_content = template.render({})
239
+
240
+ return yaml.safe_load(rendered_content)
160
241
  except Exception as e:
161
242
  print(f"Error loading YAML file {file_path}: {e}")
162
243
  return None
npcpy/serve.py CHANGED
@@ -46,10 +46,12 @@ from npcsh._state import ShellState, initialize_base_npcs_if_needed
46
46
  from npcsh.config import NPCSH_DB_PATH
47
47
 
48
48
 
49
- from npcpy.memory.knowledge_graph import load_kg_from_db
49
+ from npcpy.memory.knowledge_graph import load_kg_from_db, find_similar_facts_chroma
50
+ from npcpy.memory.command_history import setup_chroma_db
50
51
  from npcpy.memory.search import execute_rag_command, execute_brainblast_command
51
52
  from npcpy.data.load import load_file_contents
52
53
  from npcpy.data.web import search_web
54
+ from npcpy.data.image import capture_screenshot
53
55
 
54
56
 
55
57
  import base64
@@ -67,12 +69,14 @@ from npcpy.memory.command_history import (
67
69
  save_conversation_message,
68
70
  generate_message_id,
69
71
  )
70
- from npcpy.npc_compiler import Jinx, NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog, initialize_npc_project
72
+ from npcpy.npc_compiler import Jinx, NPC, Team, load_jinxs_from_directory, build_jinx_tool_catalog, initialize_npc_project, load_yaml_file
71
73
 
72
74
  from npcpy.llm_funcs import (
73
75
  get_llm_response, check_llm_command
74
76
  )
77
+ from npcpy.gen.embeddings import get_embeddings
75
78
  from termcolor import cprint
79
+
76
80
  from npcpy.tools import auto_tools
77
81
 
78
82
  import json
@@ -712,6 +716,265 @@ def get_centrality_data():
712
716
  concept_degree = {node: cent for node, cent in nx.degree_centrality(G).items() if node in concepts_df['name'].values}
713
717
  return jsonify(centrality={'degree': concept_degree})
714
718
 
719
+ @app.route('/api/kg/search')
720
+ def search_kg():
721
+ """Search facts and concepts by keyword"""
722
+ try:
723
+ q = request.args.get('q', '').strip().lower()
724
+ generation = request.args.get('generation', type=int)
725
+ search_type = request.args.get('type', 'both') # fact, concept, or both
726
+ limit = request.args.get('limit', 50, type=int)
727
+
728
+ if not q:
729
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
730
+
731
+ concepts_df, facts_df, links_df = load_kg_data(generation)
732
+ results = {"facts": [], "concepts": [], "query": q}
733
+
734
+ # Search facts
735
+ if search_type in ('both', 'fact'):
736
+ for _, row in facts_df.iterrows():
737
+ statement = str(row.get('statement', '')).lower()
738
+ source_text = str(row.get('source_text', '')).lower()
739
+ if q in statement or q in source_text:
740
+ results["facts"].append({
741
+ "statement": row.get('statement'),
742
+ "source_text": row.get('source_text'),
743
+ "type": row.get('type'),
744
+ "generation": row.get('generation'),
745
+ "origin": row.get('origin')
746
+ })
747
+ if len(results["facts"]) >= limit:
748
+ break
749
+
750
+ # Search concepts
751
+ if search_type in ('both', 'concept'):
752
+ for _, row in concepts_df.iterrows():
753
+ name = str(row.get('name', '')).lower()
754
+ description = str(row.get('description', '')).lower()
755
+ if q in name or q in description:
756
+ results["concepts"].append({
757
+ "name": row.get('name'),
758
+ "description": row.get('description'),
759
+ "generation": row.get('generation'),
760
+ "origin": row.get('origin')
761
+ })
762
+ if len(results["concepts"]) >= limit:
763
+ break
764
+
765
+ return jsonify(results)
766
+
767
+ except Exception as e:
768
+ traceback.print_exc()
769
+ return jsonify({"error": str(e)}), 500
770
+
771
+ @app.route('/api/kg/embed', methods=['POST'])
772
+ def embed_kg_facts():
773
+ """Embed existing facts from SQL to Chroma for semantic search"""
774
+ try:
775
+ data = request.get_json() or {}
776
+ generation = data.get('generation')
777
+ batch_size = data.get('batch_size', 10)
778
+
779
+ # Load facts from SQL
780
+ _, facts_df, _ = load_kg_data(generation)
781
+
782
+ if facts_df.empty:
783
+ return jsonify({"message": "No facts to embed", "count": 0})
784
+
785
+ # Setup Chroma
786
+ chroma_db_path = os.path.expanduser('~/npcsh_chroma_db')
787
+ _, chroma_collection = setup_chroma_db(
788
+ "knowledge_graph",
789
+ "Facts extracted from various sources",
790
+ chroma_db_path
791
+ )
792
+
793
+ # Process in batches
794
+ from npcpy.memory.knowledge_graph import store_fact_with_embedding
795
+ import hashlib
796
+
797
+ embedded_count = 0
798
+ skipped_count = 0
799
+
800
+ statements = facts_df['statement'].dropna().tolist()
801
+
802
+ for i in range(0, len(statements), batch_size):
803
+ batch = statements[i:i + batch_size]
804
+
805
+ # Get embeddings for batch
806
+ try:
807
+ embeddings = get_embeddings(batch)
808
+ except Exception as e:
809
+ print(f"Failed to get embeddings for batch {i}: {e}")
810
+ continue
811
+
812
+ for j, statement in enumerate(batch):
813
+ fact_id = hashlib.md5(statement.encode()).hexdigest()
814
+
815
+ # Check if already exists
816
+ try:
817
+ existing = chroma_collection.get(ids=[fact_id])
818
+ if existing and existing.get('ids'):
819
+ skipped_count += 1
820
+ continue
821
+ except:
822
+ pass
823
+
824
+ # Get metadata from dataframe
825
+ row = facts_df[facts_df['statement'] == statement].iloc[0] if len(facts_df[facts_df['statement'] == statement]) > 0 else None
826
+ metadata = {
827
+ "generation": int(row.get('generation', 0)) if row is not None and pd.notna(row.get('generation')) else 0,
828
+ "origin": str(row.get('origin', '')) if row is not None else '',
829
+ "type": str(row.get('type', '')) if row is not None else '',
830
+ }
831
+
832
+ # Store with embedding
833
+ result = store_fact_with_embedding(
834
+ chroma_collection, statement, metadata, embeddings[j]
835
+ )
836
+ if result:
837
+ embedded_count += 1
838
+
839
+ return jsonify({
840
+ "message": f"Embedded {embedded_count} facts, skipped {skipped_count} existing",
841
+ "embedded": embedded_count,
842
+ "skipped": skipped_count,
843
+ "total_facts": len(statements)
844
+ })
845
+
846
+ except Exception as e:
847
+ traceback.print_exc()
848
+ return jsonify({"error": str(e)}), 500
849
+
850
+ @app.route('/api/kg/search/semantic')
851
+ def search_kg_semantic():
852
+ """Semantic search for facts using vector similarity"""
853
+ try:
854
+ q = request.args.get('q', '').strip()
855
+ generation = request.args.get('generation', type=int)
856
+ limit = request.args.get('limit', 10, type=int)
857
+
858
+ if not q:
859
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
860
+
861
+ # Setup Chroma connection
862
+ chroma_db_path = os.path.expanduser('~/npcsh_chroma_db')
863
+ try:
864
+ _, chroma_collection = setup_chroma_db(
865
+ "knowledge_graph",
866
+ "Facts extracted from various sources",
867
+ chroma_db_path
868
+ )
869
+ except Exception as e:
870
+ return jsonify({
871
+ "error": f"Chroma DB not available: {str(e)}",
872
+ "facts": [],
873
+ "query": q
874
+ }), 200
875
+
876
+ # Get query embedding
877
+ try:
878
+ query_embedding = get_embeddings([q])[0]
879
+ except Exception as e:
880
+ return jsonify({
881
+ "error": f"Failed to generate embedding: {str(e)}",
882
+ "facts": [],
883
+ "query": q
884
+ }), 200
885
+
886
+ # Build metadata filter for generation if specified
887
+ metadata_filter = None
888
+ if generation is not None:
889
+ metadata_filter = {"generation": generation}
890
+
891
+ # Search Chroma
892
+ similar_facts = find_similar_facts_chroma(
893
+ chroma_collection,
894
+ q,
895
+ query_embedding=query_embedding,
896
+ n_results=limit,
897
+ metadata_filter=metadata_filter
898
+ )
899
+
900
+ # Format results
901
+ results = {
902
+ "facts": [
903
+ {
904
+ "statement": f["fact"],
905
+ "distance": f.get("distance"),
906
+ "metadata": f.get("metadata", {}),
907
+ "id": f.get("id")
908
+ }
909
+ for f in similar_facts
910
+ ],
911
+ "query": q,
912
+ "total": len(similar_facts)
913
+ }
914
+
915
+ return jsonify(results)
916
+
917
+ except Exception as e:
918
+ traceback.print_exc()
919
+ return jsonify({"error": str(e)}), 500
920
+
921
+ @app.route('/api/kg/facts')
922
+ def get_kg_facts():
923
+ """Get facts, optionally filtered by generation"""
924
+ try:
925
+ generation = request.args.get('generation', type=int)
926
+ limit = request.args.get('limit', 100, type=int)
927
+ offset = request.args.get('offset', 0, type=int)
928
+
929
+ _, facts_df, _ = load_kg_data(generation)
930
+
931
+ facts = []
932
+ for i, row in facts_df.iloc[offset:offset+limit].iterrows():
933
+ facts.append({
934
+ "statement": row.get('statement'),
935
+ "source_text": row.get('source_text'),
936
+ "type": row.get('type'),
937
+ "generation": row.get('generation'),
938
+ "origin": row.get('origin')
939
+ })
940
+
941
+ return jsonify({
942
+ "facts": facts,
943
+ "total": len(facts_df),
944
+ "offset": offset,
945
+ "limit": limit
946
+ })
947
+
948
+ except Exception as e:
949
+ traceback.print_exc()
950
+ return jsonify({"error": str(e)}), 500
951
+
952
+ @app.route('/api/kg/concepts')
953
+ def get_kg_concepts():
954
+ """Get concepts, optionally filtered by generation"""
955
+ try:
956
+ generation = request.args.get('generation', type=int)
957
+ limit = request.args.get('limit', 100, type=int)
958
+
959
+ concepts_df, _, _ = load_kg_data(generation)
960
+
961
+ concepts = []
962
+ for _, row in concepts_df.head(limit).iterrows():
963
+ concepts.append({
964
+ "name": row.get('name'),
965
+ "description": row.get('description'),
966
+ "generation": row.get('generation'),
967
+ "origin": row.get('origin')
968
+ })
969
+
970
+ return jsonify({
971
+ "concepts": concepts,
972
+ "total": len(concepts_df)
973
+ })
974
+
975
+ except Exception as e:
976
+ traceback.print_exc()
977
+ return jsonify({"error": str(e)}), 500
715
978
 
716
979
 
717
980
  @app.route("/api/attachments/<message_id>", methods=["GET"])
@@ -746,7 +1009,7 @@ def get_attachment(attachment_id):
746
1009
  @app.route("/api/capture_screenshot", methods=["GET"])
747
1010
  def capture():
748
1011
 
749
- screenshot = capture_screenshot(None, full=True)
1012
+ screenshot = capture_screenshot(full=True)
750
1013
 
751
1014
 
752
1015
  if not screenshot:
@@ -853,10 +1116,9 @@ def get_available_jinxs():
853
1116
  def get_jinx_name_from_file(filepath):
854
1117
  """Read jinx_name from file, fallback to filename."""
855
1118
  try:
856
- with open(filepath, 'r') as f:
857
- data = yaml.safe_load(f)
858
- if data and 'jinx_name' in data:
859
- return data['jinx_name']
1119
+ data = load_yaml_file(filepath)
1120
+ if data and 'jinx_name' in data:
1121
+ return data['jinx_name']
860
1122
  except:
861
1123
  pass
862
1124
  return os.path.basename(filepath)[:-5]
@@ -1634,10 +1896,26 @@ def finetune_diffusers():
1634
1896
  'output_dir': output_dir,
1635
1897
  'epochs': num_epochs,
1636
1898
  'current_epoch': 0,
1899
+ 'current_batch': 0,
1900
+ 'total_batches': 0,
1901
+ 'current_loss': None,
1902
+ 'loss_history': [],
1903
+ 'step': 0,
1637
1904
  'start_time': datetime.datetime.now().isoformat()
1638
1905
  }
1639
1906
  print(f"🌋 Finetuning job {job_id} initialized. Output directory: {output_dir}")
1640
-
1907
+
1908
+ def progress_callback(progress_data):
1909
+ """Callback to update job progress from training loop."""
1910
+ finetune_jobs[job_id]['current_epoch'] = progress_data.get('epoch', 0)
1911
+ finetune_jobs[job_id]['epochs'] = progress_data.get('total_epochs', num_epochs)
1912
+ finetune_jobs[job_id]['current_batch'] = progress_data.get('batch', 0)
1913
+ finetune_jobs[job_id]['total_batches'] = progress_data.get('total_batches', 0)
1914
+ finetune_jobs[job_id]['step'] = progress_data.get('step', 0)
1915
+ finetune_jobs[job_id]['current_loss'] = progress_data.get('loss')
1916
+ if progress_data.get('loss_history'):
1917
+ finetune_jobs[job_id]['loss_history'] = progress_data['loss_history']
1918
+
1641
1919
  def run_training_async():
1642
1920
  print(f"🌋 Finetuning job {job_id}: Starting asynchronous training thread...")
1643
1921
  try:
@@ -1647,16 +1925,15 @@ def finetune_diffusers():
1647
1925
  learning_rate=learning_rate,
1648
1926
  output_model_path=output_dir
1649
1927
  )
1650
-
1928
+
1651
1929
  print(f"🌋 Finetuning job {job_id}: Calling train_diffusion with config: {config}")
1652
- # Assuming train_diffusion might print its own progress or allow callbacks
1653
- # For more granular logging, you'd need to modify train_diffusion itself
1654
1930
  model_path = train_diffusion(
1655
1931
  expanded_images,
1656
1932
  captions,
1657
- config=config
1933
+ config=config,
1934
+ progress_callback=progress_callback
1658
1935
  )
1659
-
1936
+
1660
1937
  finetune_jobs[job_id]['status'] = 'complete'
1661
1938
  finetune_jobs[job_id]['model_path'] = model_path
1662
1939
  finetune_jobs[job_id]['end_time'] = datetime.datetime.now().isoformat()
@@ -1686,21 +1963,32 @@ def finetune_diffusers():
1686
1963
  def finetune_status(job_id):
1687
1964
  if job_id not in finetune_jobs:
1688
1965
  return jsonify({'error': 'Job not found'}), 404
1689
-
1966
+
1690
1967
  job = finetune_jobs[job_id]
1691
-
1968
+
1692
1969
  if job['status'] == 'complete':
1693
1970
  return jsonify({
1971
+ 'status': 'complete',
1694
1972
  'complete': True,
1695
- 'outputPath': job.get('model_path', job['output_dir'])
1973
+ 'outputPath': job.get('model_path', job['output_dir']),
1974
+ 'loss_history': job.get('loss_history', [])
1696
1975
  })
1697
1976
  elif job['status'] == 'error':
1698
- return jsonify({'error': job.get('error_msg', 'Unknown error')})
1699
-
1977
+ return jsonify({
1978
+ 'status': 'error',
1979
+ 'error': job.get('error_msg', 'Unknown error')
1980
+ })
1981
+
1700
1982
  return jsonify({
1701
- 'step': job.get('current_epoch', 0),
1702
- 'total': job['epochs'],
1703
- 'status': 'running'
1983
+ 'status': 'running',
1984
+ 'epoch': job.get('current_epoch', 0),
1985
+ 'total_epochs': job.get('epochs', 0),
1986
+ 'batch': job.get('current_batch', 0),
1987
+ 'total_batches': job.get('total_batches', 0),
1988
+ 'step': job.get('step', 0),
1989
+ 'loss': job.get('current_loss'),
1990
+ 'loss_history': job.get('loss_history', []),
1991
+ 'start_time': job.get('start_time')
1704
1992
  })
1705
1993
 
1706
1994
  @app.route("/api/ml/train", methods=["POST"])
@@ -1924,9 +2212,10 @@ def get_jinxs_global():
1924
2212
  for file in files:
1925
2213
  if file.endswith(".jinx"):
1926
2214
  jinx_path = os.path.join(root, file)
1927
- with open(jinx_path, 'r') as f:
1928
- raw_data = yaml.safe_load(f)
1929
-
2215
+ raw_data = load_yaml_file(jinx_path)
2216
+ if raw_data is None:
2217
+ continue
2218
+
1930
2219
  # Preserve full input definitions including defaults
1931
2220
  inputs = raw_data.get("inputs", [])
1932
2221
 
@@ -1960,9 +2249,10 @@ def get_jinxs_project():
1960
2249
  for file in files:
1961
2250
  if file.endswith(".jinx"):
1962
2251
  jinx_path = os.path.join(root, file)
1963
- with open(jinx_path, 'r') as f:
1964
- raw_data = yaml.safe_load(f)
1965
-
2252
+ raw_data = load_yaml_file(jinx_path)
2253
+ if raw_data is None:
2254
+ continue
2255
+
1966
2256
  # Preserve full input definitions including defaults
1967
2257
  inputs = raw_data.get("inputs", [])
1968
2258
 
@@ -2118,8 +2408,9 @@ def get_npc_team_global():
2118
2408
  for file in os.listdir(global_npc_directory):
2119
2409
  if file.endswith(".npc"):
2120
2410
  npc_path = os.path.join(global_npc_directory, file)
2121
- with open(npc_path, 'r') as f:
2122
- raw_data = yaml.safe_load(f)
2411
+ raw_data = load_yaml_file(npc_path)
2412
+ if raw_data is None:
2413
+ continue
2123
2414
 
2124
2415
  npc_data.append({
2125
2416
  "name": raw_data.get("name", file[:-4]),
@@ -2154,8 +2445,9 @@ def get_npc_team_project():
2154
2445
  for file in os.listdir(project_npc_directory):
2155
2446
  if file.endswith(".npc"):
2156
2447
  npc_path = os.path.join(project_npc_directory, file)
2157
- with open(npc_path, 'r') as f:
2158
- raw_npc_data = yaml.safe_load(f)
2448
+ raw_npc_data = load_yaml_file(npc_path)
2449
+ if raw_npc_data is None:
2450
+ continue
2159
2451
 
2160
2452
  serialized_npc = {
2161
2453
  "name": raw_npc_data.get("name", file[:-4]),
@@ -2433,8 +2725,7 @@ def get_package_contents():
2433
2725
  if f.endswith('.npc'):
2434
2726
  npc_path = os.path.join(package_npc_team_dir, f)
2435
2727
  try:
2436
- with open(npc_path, 'r') as file:
2437
- npc_data = yaml.safe_load(file) or {}
2728
+ npc_data = load_yaml_file(npc_path) or {}
2438
2729
  npcs.append({
2439
2730
  "name": npc_data.get("name", f[:-4]),
2440
2731
  "primary_directive": npc_data.get("primary_directive", ""),
@@ -2453,8 +2744,7 @@ def get_package_contents():
2453
2744
  jinx_path = os.path.join(root, f)
2454
2745
  rel_path = os.path.relpath(jinx_path, jinxs_dir)
2455
2746
  try:
2456
- with open(jinx_path, 'r') as file:
2457
- jinx_data = yaml.safe_load(file) or {}
2747
+ jinx_data = load_yaml_file(jinx_path) or {}
2458
2748
  jinxs.append({
2459
2749
  "name": f[:-5],
2460
2750
  "path": rel_path[:-5],
@@ -4392,21 +4682,105 @@ def approve_memories():
4392
4682
  try:
4393
4683
  data = request.json
4394
4684
  approvals = data.get("approvals", [])
4395
-
4685
+
4396
4686
  command_history = CommandHistory(app.config.get('DB_PATH'))
4397
-
4687
+
4398
4688
  for approval in approvals:
4399
4689
  command_history.update_memory_status(
4400
4690
  approval['memory_id'],
4401
4691
  approval['decision'],
4402
4692
  approval.get('final_memory')
4403
4693
  )
4404
-
4694
+
4405
4695
  return jsonify({"success": True, "processed": len(approvals)})
4406
-
4696
+
4407
4697
  except Exception as e:
4408
4698
  return jsonify({"error": str(e)}), 500
4409
4699
 
4700
+ @app.route("/api/memory/search", methods=["GET"])
4701
+ def search_memories():
4702
+ """Search memories with optional scope filtering"""
4703
+ try:
4704
+ q = request.args.get("q", "")
4705
+ npc = request.args.get("npc")
4706
+ team = request.args.get("team")
4707
+ directory_path = request.args.get("directory_path")
4708
+ status = request.args.get("status")
4709
+ limit = int(request.args.get("limit", 50))
4710
+
4711
+ if not q:
4712
+ return jsonify({"error": "Query parameter 'q' is required"}), 400
4713
+
4714
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4715
+ results = command_history.search_memory(
4716
+ query=q,
4717
+ npc=npc,
4718
+ team=team,
4719
+ directory_path=directory_path,
4720
+ status_filter=status,
4721
+ limit=limit
4722
+ )
4723
+
4724
+ return jsonify({"memories": results, "count": len(results)})
4725
+
4726
+ except Exception as e:
4727
+ traceback.print_exc()
4728
+ return jsonify({"error": str(e)}), 500
4729
+
4730
+ @app.route("/api/memory/pending", methods=["GET"])
4731
+ def get_pending_memories():
4732
+ """Get memories awaiting approval"""
4733
+ try:
4734
+ limit = int(request.args.get("limit", 50))
4735
+ npc = request.args.get("npc")
4736
+ team = request.args.get("team")
4737
+ directory_path = request.args.get("directory_path")
4738
+
4739
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4740
+ results = command_history.get_pending_memories(limit=limit)
4741
+
4742
+ # Filter by scope if provided
4743
+ if npc or team or directory_path:
4744
+ filtered = []
4745
+ for mem in results:
4746
+ if npc and mem.get('npc') != npc:
4747
+ continue
4748
+ if team and mem.get('team') != team:
4749
+ continue
4750
+ if directory_path and mem.get('directory_path') != directory_path:
4751
+ continue
4752
+ filtered.append(mem)
4753
+ results = filtered
4754
+
4755
+ return jsonify({"memories": results, "count": len(results)})
4756
+
4757
+ except Exception as e:
4758
+ traceback.print_exc()
4759
+ return jsonify({"error": str(e)}), 500
4760
+
4761
+ @app.route("/api/memory/scope", methods=["GET"])
4762
+ def get_memories_by_scope():
4763
+ """Get memories for a specific scope (npc/team/directory)"""
4764
+ try:
4765
+ npc = request.args.get("npc", "")
4766
+ team = request.args.get("team", "")
4767
+ directory_path = request.args.get("directory_path", "")
4768
+ status = request.args.get("status")
4769
+
4770
+ command_history = CommandHistory(app.config.get('DB_PATH'))
4771
+ results = command_history.get_memories_for_scope(
4772
+ npc=npc,
4773
+ team=team,
4774
+ directory_path=directory_path,
4775
+ status=status
4776
+ )
4777
+
4778
+ return jsonify({"memories": results, "count": len(results)})
4779
+
4780
+ except Exception as e:
4781
+ traceback.print_exc()
4782
+ return jsonify({"error": str(e)}), 500
4783
+
4410
4784
 
4411
4785
 
4412
4786
 
@@ -5419,7 +5793,7 @@ def text_to_speech_endpoint():
5419
5793
  import base64
5420
5794
  from npcpy.gen.audio_gen import (
5421
5795
  text_to_speech, get_available_engines,
5422
- pcm16_to_wav, KOKORO_VOICES
5796
+ pcm16_to_wav
5423
5797
  )
5424
5798
 
5425
5799
  data = request.json or {}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.12
3
+ Version: 1.3.14
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -305,7 +305,7 @@ ggm = NPC(
305
305
  isabel = NPC(
306
306
  name='Isabel Allende',
307
307
  primary_directive='You are Isabel Allende, weaving stories with emotion and history. Analyze texts and provide insight.',
308
- model='llama3.2:8b',
308
+ model='llama3.2',
309
309
  provider='ollama',
310
310
 
311
311
  )
@@ -359,7 +359,7 @@ LLM responses can be obtained without NPCs as well.
359
359
 
360
360
  ```python
361
361
  from npcpy.llm_funcs import get_llm_response
362
- response = get_llm_response("Who was the celtic Messenger god?", model='mistral:7b', provider='ollama')
362
+ response = get_llm_response("Who was the celtic Messenger god?", model='qwen3:4b', provider='ollama')
363
363
  print(response['response'])
364
364
  ```
365
365
 
@@ -400,7 +400,7 @@ Return structured outputs by specifying `format='json'` or passing a Pydantic sc
400
400
 
401
401
  ```python
402
402
  from npcpy.llm_funcs import get_llm_response
403
- response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='claude-4-5-haiku-latest', provider='deepseek', format='json')
403
+ response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='deepseek-chat', provider='deepseek', format='json')
404
404
 
405
405
  print(response['response'])
406
406
  ```
@@ -4,10 +4,10 @@ npcpy/llm_funcs.py,sha256=M7GSSjqpcO2kxh7G2sGRBU34lmdW7Imd5KxYqc1PiO0,75114
4
4
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
5
5
  npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
6
6
  npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
7
- npcpy/npc_compiler.py,sha256=9U6_F7qweURaL2nQgrF7I9OQEmYjOENmkBV-YChr3oM,118402
7
+ npcpy/npc_compiler.py,sha256=W1umvhsbyCYoRYajPUKa642FcsX5Fcadh78n-Vzu2hM,120983
8
8
  npcpy/npc_sysenv.py,sha256=VH7le3xwxHvO55ZYCG1e-gj8X5YTSIqbIiU6ifSqhss,38917
9
9
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
10
- npcpy/serve.py,sha256=0jV4O-6j7efFDk1T_rrx-ZUom1c-dh0zNANlYMMKDYI,231461
10
+ npcpy/serve.py,sha256=wx5pG5SRQbB3WBH1KAoOG1twpd4qtDh29c8TMvt9xT8,244481
11
11
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
12
12
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
13
13
  npcpy/data/audio.py,sha256=o4auV8DQrAmZ4y84U3SofiwEuq5-ZBjGEZipQ9zPpGQ,22816
@@ -18,11 +18,11 @@ npcpy/data/text.py,sha256=jP0a1qZZaSJdK-LdZTn2Jjdxqmkd3efxDLEoxflJQeY,5010
18
18
  npcpy/data/video.py,sha256=H-V3mTu_ktD9u-QhYeo4aW3u9z0AtoAdRZmvRPEpE98,2887
19
19
  npcpy/data/web.py,sha256=pcjCLVAoqfw9enV5a7Dg1A_V7USG0302e6C7wUz2UgE,5235
20
20
  npcpy/ft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- npcpy/ft/diff.py,sha256=2-NbY0p0CP5Qr9mnnncxRBwzmxRq9NKcl8B5BeT1vQ4,12319
21
+ npcpy/ft/diff.py,sha256=0ScRR4AxXtVX2bgZ-Jr_dSwv3LAlU1JXDUq4F4n1Ea4,12839
22
22
  npcpy/ft/ge.py,sha256=0VzIiXq2wCzGcK1x0Wd-myJ3xRf-FNaPg0GkHEZegUM,3552
23
23
  npcpy/ft/memory_trainer.py,sha256=QZPznxEEwXbOGroHdMUMa5xpqlNwgV6nqOazI2xgrnQ,6635
24
24
  npcpy/ft/model_ensembler.py,sha256=BRX4hJ_rvF1vKTzjMhlahZqPttUgc3PqmzUJDqIfIps,10038
25
- npcpy/ft/rl.py,sha256=EcPD8t5MFg0zYWSS-A7KJ9bWd0qCTsL5SSvDxV556Z4,9245
25
+ npcpy/ft/rl.py,sha256=uhK4M4Bxw4dqh9lv5fakiPvy8P_FcdBu_z83i24vlvw,12226
26
26
  npcpy/ft/sft.py,sha256=74gRaJTTrZcO4np4DqRMr79ADkGhPcDKutR74rag03E,6659
27
27
  npcpy/ft/usft.py,sha256=O025GGYGZQf2ZVLowyAmBwh5bJyuy2dUAM6v03YcboY,3435
28
28
  npcpy/gen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -30,13 +30,13 @@ npcpy/gen/audio_gen.py,sha256=RoSElPUGfQimPBUcl9SP-ziIJxeI6XAr0A1882BZxXE,20646
30
30
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
31
31
  npcpy/gen/image_gen.py,sha256=SOZYpvlxSiAdDK9j750OEBKjm22OUNdXg1kQ10sJSy0,21853
32
32
  npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
33
- npcpy/gen/response.py,sha256=Pw01M0UxjsXOPJlvShAbq9n6IVnvEqxT6MQaLyEwJFs,48505
33
+ npcpy/gen/response.py,sha256=fLd-ORRMI_s3yRNMH1TQodGk17u_G0xofS1lqfqH4r0,51121
34
34
  npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
35
35
  npcpy/gen/world_gen.py,sha256=_8ytE7E3QVQ5qiX8DmOby-xd0d9zV20rRI6Wkpf-qcY,18922
36
36
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  npcpy/memory/command_history.py,sha256=pjqcSBHXzdQTViSjHsBP2ohRSYnJ33h2bYARGcLvBfs,62253
38
38
  npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
39
- npcpy/memory/knowledge_graph.py,sha256=pjqcHjAh-Bfe6Q9fvNkBpg-TMjPTgynB6PhLSWWtPzI,48720
39
+ npcpy/memory/knowledge_graph.py,sha256=X3qqlDcuzGUjRgQWleQzafGKgNw8QRz2ar2gYuCvUq8,48600
40
40
  npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
41
41
  npcpy/memory/search.py,sha256=glN6WYzaixcoDphTEHAXSMX3vKZGjR12Jx9YVL_gYfE,18433
42
42
  npcpy/mix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -53,8 +53,8 @@ npcpy/work/browser.py,sha256=p2PeaoZdAXipFuAgKCCB3aXXLE_p3yIRqC87KlZKZWc,679
53
53
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
54
54
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
55
55
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
56
- npcpy-1.3.12.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
57
- npcpy-1.3.12.dist-info/METADATA,sha256=PowYAWEVkhhzpbRWFKJcntLCy8zseMTIccfLlTb7Cw0,37885
58
- npcpy-1.3.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
- npcpy-1.3.12.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
60
- npcpy-1.3.12.dist-info/RECORD,,
56
+ npcpy-1.3.14.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
57
+ npcpy-1.3.14.dist-info/METADATA,sha256=awVWA8mQYm1F3pPqVCDjAyuFrdB1hyks_8I8JjQMPr8,37870
58
+ npcpy-1.3.14.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
+ npcpy-1.3.14.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
60
+ npcpy-1.3.14.dist-info/RECORD,,
File without changes