npcpy 1.2.22__py3-none-any.whl → 1.2.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/llm_funcs.py CHANGED
@@ -1015,7 +1015,7 @@ def execute_multi_step_plan(
1015
1015
 
1016
1016
  step_outputs = []
1017
1017
  current_messages = messages.copy()
1018
- render_markdown(f"### Plan for Command: {command[100:]}")
1018
+ render_markdown(f"### Plan for Command: {command[:100]}")
1019
1019
  for action in planned_actions:
1020
1020
  step_info = json.dumps({'action': action.get('action', ''),
1021
1021
  'explanation': str(action.get('explanation',''))[0:10]+'...'})
@@ -619,6 +619,29 @@ class CommandHistory:
619
619
  }
620
620
 
621
621
  return self._execute_returning_id(stmt, params)
622
+ def get_memories_for_scope(
623
+ self,
624
+ npc: str,
625
+ team: str,
626
+ directory_path: str,
627
+ status: Optional[str] = None
628
+ ) -> List[Dict]:
629
+
630
+ query = """
631
+ SELECT id, initial_memory, final_memory,
632
+ status, timestamp, created_at
633
+ FROM memory_lifecycle
634
+ WHERE npc = :npc AND team = :team AND directory_path = :path
635
+ """
636
+ params = {"npc": npc, "team": team, "path": directory_path}
637
+
638
+ if status:
639
+ query += " AND status = :status"
640
+ params["status"] = status
641
+
642
+ query += " ORDER BY created_at DESC"
643
+ data =self._fetch_all(query, params)
644
+ return data
622
645
 
623
646
  def search_memory(self, query: str, npc: str = None, team: str = None,
624
647
  directory_path: str = None, status_filter: str = None, limit: int = 10):
@@ -17,9 +17,7 @@ class MemoryItem:
17
17
  model: str
18
18
  provider: str
19
19
 
20
-
21
20
  def memory_approval_ui(memories: List[Dict]) -> List[Dict]:
22
- """Simple CLI interface for memory approval"""
23
21
  if not memories:
24
22
  return []
25
23
 
@@ -29,37 +27,55 @@ def memory_approval_ui(memories: List[Dict]) -> List[Dict]:
29
27
  for i, memory in enumerate(memories, 1):
30
28
  print(f"\n--- Memory {i}/{len(memories)} ---")
31
29
  print(f"NPC: {memory['npc']}")
32
- print(f"Content: {memory['content'][:200]}{'...' if len(memory['content']) > 200 else ''}")
30
+ content_preview = memory['content'][:200]
31
+ if len(memory['content']) > 200:
32
+ content_preview += '...'
33
+ print(f"Content: {content_preview}")
33
34
 
34
35
  while True:
35
- choice = input("(a)pprove, (r)eject, (e)dit, (s)kip, (q)uit, (A)pprove all: ").strip().lower()
36
+ choice = input(
37
+ "(a)pprove, (r)eject, (e)dit, (s)kip | "
38
+ "(A)ll approve, (R)all reject, (S)all skip: "
39
+ ).strip().lower()
36
40
 
37
41
  if choice == 'a':
38
- approvals.append({"memory_id": memory['memory_id'], "decision": "human-approved"})
42
+ approvals.append({
43
+ "memory_id": memory['memory_id'],
44
+ "decision": "human-approved"
45
+ })
39
46
  break
40
47
  elif choice == 'r':
41
- approvals.append({"memory_id": memory['memory_id'], "decision": "human-rejected"})
48
+ approvals.append({
49
+ "memory_id": memory['memory_id'],
50
+ "decision": "human-rejected"
51
+ })
42
52
  break
43
53
  elif choice == 'e':
44
54
  edited = input("Edit memory: ").strip()
45
55
  if edited:
46
56
  approvals.append({
47
- "memory_id": memory['memory_id'],
57
+ "memory_id": memory['memory_id'],
48
58
  "decision": "human-edited",
49
59
  "final_memory": edited
50
60
  })
51
61
  break
52
62
  elif choice == 's':
53
63
  break
54
- elif choice == 'q':
55
- return approvals
56
64
  elif choice == 'A':
57
-
58
65
  for remaining_memory in memories[i-1:]:
59
66
  approvals.append({
60
- "memory_id": remaining_memory['memory_id'],
67
+ "memory_id": remaining_memory['memory_id'],
61
68
  "decision": "human-approved"
62
69
  })
63
70
  return approvals
71
+ elif choice == 'R':
72
+ for remaining_memory in memories[i-1:]:
73
+ approvals.append({
74
+ "memory_id": remaining_memory['memory_id'],
75
+ "decision": "human-rejected"
76
+ })
77
+ return approvals
78
+ elif choice == 'S':
79
+ return approvals
64
80
 
65
81
  return approvals
npcpy/npc_sysenv.py CHANGED
@@ -63,7 +63,7 @@ warnings.filterwarnings("ignore", module="torch.serialization")
63
63
  os.environ["PYTHONWARNINGS"] = "ignore"
64
64
  os.environ["SDL_AUDIODRIVER"] = "dummy"
65
65
 
66
- def check_internet_connection(timeout=0.5):
66
+ def check_internet_connection(timeout=5):
67
67
  """
68
68
  Checks for internet connectivity by trying to connect to a well-known host.
69
69
  """
@@ -87,16 +87,78 @@ def get_locally_available_models(project_directory, airplane_mode=False):
87
87
  key, value = line.split("=", 1)
88
88
  env_vars[key.strip()] = value.strip().strip("\"'")
89
89
 
90
-
91
90
  internet_available = check_internet_connection()
92
91
  if not internet_available:
93
- logging.info("No internet connection detected. External API calls will be skipped (effective airplane_mode).")
94
-
92
+ logging.info(
93
+ "No internet connection detected. "
94
+ "External API calls will be skipped."
95
+ )
95
96
  airplane_mode = True
96
97
  else:
97
- logging.info("Internet connection detected. Proceeding based on 'airplane_mode' parameter.")
98
+ logging.info(
99
+ "Internet connection detected. "
100
+ "Proceeding based on 'airplane_mode' parameter."
101
+ )
102
+
103
+ custom_providers = load_custom_providers()
104
+
105
+ for provider_name, config in custom_providers.items():
106
+ api_key_var = config.get('api_key_var')
107
+ if not api_key_var:
108
+ api_key_var = f"{provider_name.upper()}_API_KEY"
109
+
110
+ if api_key_var in env_vars or os.environ.get(api_key_var):
111
+ try:
112
+ import requests
113
+
114
+ def fetch_custom_models():
115
+ base_url = config.get('base_url', '')
116
+ headers = config.get('headers', {})
117
+
118
+ api_key = env_vars.get(api_key_var) or \
119
+ os.environ.get(api_key_var)
120
+ if api_key:
121
+ headers['Authorization'] = f'Bearer {api_key}'
122
+
123
+ models_endpoint = f"{base_url.rstrip('/')}/models"
124
+ response = requests.get(
125
+ models_endpoint,
126
+ headers=headers,
127
+ timeout=3.5
128
+ )
129
+
130
+ if response.status_code == 200:
131
+ data = response.json()
132
+
133
+ if isinstance(data, dict) and 'data' in data:
134
+ return [
135
+ m['id'] for m in data['data']
136
+ if 'id' in m
137
+ ]
138
+ elif isinstance(data, list):
139
+ return [
140
+ m['id'] for m in data
141
+ if isinstance(m, dict) and 'id' in m
142
+ ]
143
+ return []
144
+
145
+ models = fetch_custom_models()
146
+ for model in models:
147
+ available_models[model] = provider_name
148
+
149
+ logging.info(
150
+ f"Loaded {len(models)} models "
151
+ f"from custom provider '{provider_name}'"
152
+ )
153
+
154
+ except Exception as e:
155
+ logging.warning(
156
+ f"Failed to load models from "
157
+ f"custom provider '{provider_name}': {e}"
158
+ )
98
159
 
99
160
 
161
+ airplane_mode = False
100
162
  if not airplane_mode:
101
163
  timeout_seconds = 3.5
102
164
 
@@ -802,50 +864,116 @@ def load_env_from_execution_dir() -> None:
802
864
 
803
865
 
804
866
 
867
+
805
868
  def lookup_provider(model: str) -> str:
806
869
  """
807
- Function Description:
808
- This function determines the provider based on the model name.
870
+ Determine the provider based on the model name.
871
+ Checks custom providers first, then falls back to known providers.
872
+
809
873
  Args:
810
- model (str): The model name.
811
- Keyword Args:
812
- None
874
+ model: The model name
875
+
813
876
  Returns:
814
- str: The provider based on the model name.
877
+ The provider name or None if not found
815
878
  """
879
+ custom_providers = load_custom_providers()
880
+
881
+ for provider_name, config in custom_providers.items():
882
+ if model.startswith(f"{provider_name}-"):
883
+ return provider_name
884
+
885
+ try:
886
+ import requests
887
+ api_key_var = config.get('api_key_var') or \
888
+ f"{provider_name.upper()}_API_KEY"
889
+ api_key = os.environ.get(api_key_var)
890
+
891
+ if api_key:
892
+ base_url = config.get('base_url', '')
893
+ headers = config.get('headers', {})
894
+ headers['Authorization'] = f'Bearer {api_key}'
895
+
896
+ models_endpoint = f"{base_url.rstrip('/')}/models"
897
+ response = requests.get(
898
+ models_endpoint,
899
+ headers=headers,
900
+ timeout=1.0
901
+ )
902
+
903
+ if response.status_code == 200:
904
+ data = response.json()
905
+ models = []
906
+
907
+ if isinstance(data, dict) and 'data' in data:
908
+ models = [m['id'] for m in data['data']]
909
+ elif isinstance(data, list):
910
+ models = [m['id'] for m in data]
911
+
912
+ if model in models:
913
+ return provider_name
914
+ except:
915
+ pass
916
+
816
917
  if model == "deepseek-chat" or model == "deepseek-reasoner":
817
918
  return "deepseek"
919
+
818
920
  ollama_prefixes = [
819
- "llama",
820
- "deepseek",
821
- "qwen",
822
- "llava",
823
- "phi",
824
- "mistral",
825
- "mixtral",
826
- "dolphin",
827
- "codellama",
828
- "gemma",
829
- ]
921
+ "llama", "deepseek", "qwen", "llava",
922
+ "phi", "mistral", "mixtral", "dolphin",
923
+ "codellama", "gemma",]
830
924
  if any(model.startswith(prefix) for prefix in ollama_prefixes):
831
925
  return "ollama"
832
926
 
833
-
834
927
  openai_prefixes = ["gpt-", "dall-e-", "whisper-", "o1"]
835
928
  if any(model.startswith(prefix) for prefix in openai_prefixes):
836
929
  return "openai"
837
930
 
838
-
839
931
  if model.startswith("claude"):
840
932
  return "anthropic"
841
933
  if model.startswith("gemini"):
842
934
  return "gemini"
843
935
  if "diffusion" in model:
844
936
  return "diffusers"
937
+
845
938
  return None
939
+
940
+
941
+ def load_custom_providers():
942
+ """
943
+ Load custom provider configurations from .npcshrc
944
+
945
+ Returns:
946
+ dict: Custom provider configurations keyed by provider name
947
+ """
948
+ custom_providers = {}
949
+ npcshrc_path = os.path.expanduser("~/.npcshrc")
950
+
951
+ if os.path.exists(npcshrc_path):
952
+ with open(npcshrc_path, "r") as f:
953
+ for line in f:
954
+ line = line.split("#")[0].strip()
955
+ if "CUSTOM_PROVIDER_" in line and "=" in line:
956
+ key, value = line.split("=", 1)
957
+ key = key.strip().replace("export ", "")
958
+ value = value.strip().strip("\"'")
959
+
960
+ try:
961
+ config = json.loads(value)
962
+ provider_name = key.replace(
963
+ "CUSTOM_PROVIDER_", ""
964
+ ).lower()
965
+ custom_providers[provider_name] = config
966
+ except json.JSONDecodeError as e:
967
+ logging.warning(
968
+ f"Failed to parse custom provider {key}: {e}"
969
+ )
970
+ continue
971
+
972
+ return custom_providers
846
973
  load_env_from_execution_dir()
847
974
  deepseek_api_key = os.getenv("DEEPSEEK_API_KEY", None)
848
975
  gemini_api_key = os.getenv("GEMINI_API_KEY", None)
849
976
 
850
977
  anthropic_api_key = os.getenv("ANTHROPIC_API_KEY", None)
851
978
  openai_api_key = os.getenv("OPENAI_API_KEY", None)
979
+
npcpy/serve.py CHANGED
@@ -1942,7 +1942,7 @@ def stream():
1942
1942
 
1943
1943
  print('.', end="", flush=True)
1944
1944
  dot_count += 1
1945
- if "hf.co" in model or provider == 'ollama':
1945
+ if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
1946
1946
  chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
1947
1947
  if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
1948
1948
  for tool_call in response_chunk["message"]["tool_calls"]:
@@ -1959,7 +1959,9 @@ def stream():
1959
1959
  if chunk_content:
1960
1960
  complete_response.append(chunk_content)
1961
1961
  chunk_data = {
1962
- "id": None, "object": None, "created": response_chunk["created_at"], "model": response_chunk["model"],
1962
+ "id": None, "object": None,
1963
+ "created": response_chunk["created_at"] or datetime.datetime.now(),
1964
+ "model": response_chunk["model"],
1963
1965
  "choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
1964
1966
  }
1965
1967
  yield f"data: {json.dumps(chunk_data)}\n\n"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.22
3
+ Version: 1.2.24
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -399,6 +399,152 @@ the citizens, being directed by simple and incontestable principles, may tend to
399
399
  maintenance of the Constitution, and the general happiness. ''')
400
400
  # it will play the audio automatically.
401
401
  ```
402
+ ## Fine-Tuning and Evolution
403
+
404
+ `npcpy` provides modular tools for building adaptive AI systems through supervised fine-tuning, reinforcement learning, and genetic algorithms.
405
+
406
+ See examples/fine_tuning_demo.py for a complete working example.
407
+
408
+
409
+ ### Supervised Fine-Tuning (SFT)
410
+
411
+ Train models on specific tasks using simple X, y pairs:
412
+ ```python
413
+ from npcpy.ft.sft import run_sft, load_sft_model, predict_sft
414
+
415
+ X_train = ["translate to french: hello", "translate to french: goodbye"]
416
+ y_train = ["bonjour", "au revoir"]
417
+
418
+ model_path = run_sft(X_train, y_train)
419
+
420
+ model, tokenizer = load_sft_model(model_path)
421
+ response = predict_sft(model, tokenizer, "translate to french: thanks")
422
+ ```
423
+ ### Unsupervised Fine-Tuning (USFT)
424
+ Adapt models to domain-specific text corpora without labels:
425
+ ```python
426
+ from npcpy.ft.usft import run_usft, load_corpus_from_hf
427
+
428
+ texts = load_corpus_from_hf("tiny_shakespeare", split="train[:1000]")
429
+
430
+ model_path = run_usft(
431
+ texts,
432
+ config=USFTConfig(
433
+ output_model_path="models/shakespeare",
434
+ num_train_epochs=3
435
+ )
436
+ )
437
+ Train on your own text corpus:
438
+ pythondomain_texts = [
439
+ "Your domain-specific text 1",
440
+ "Your domain-specific text 2",
441
+ ] * 100
442
+
443
+ model_path = run_usft(domain_texts)
444
+ ```
445
+ ### Diffusion Fine-tuning
446
+ ```
447
+ from npcpy.ft.diff import train_diffusion, generate_image
448
+
449
+ image_paths = ["img1.png", "img2.png", "img3.png"]
450
+ captions = ["a cat", "a dog", "a bird"]
451
+
452
+ model_path = train_diffusion(
453
+ image_paths,
454
+ captions,
455
+ config=DiffusionConfig(
456
+ num_epochs=100,
457
+ batch_size=4
458
+ )
459
+ )
460
+
461
+ generated = generate_image(
462
+ model_path,
463
+ prompt="a white square",
464
+ image_size=128
465
+ )
466
+ Resume training from checkpoint:
467
+ pythonmodel_path = train_diffusion(
468
+ image_paths,
469
+ captions,
470
+ config,
471
+ resume_from="models/diffusion/checkpoints/checkpoint-epoch10-step1000.pt"
472
+ )
473
+ ```
474
+
475
+
476
+ ### Reinforcement Learning (RL)
477
+ Collect agent traces and train with DPO based on reward signals:
478
+ ```python
479
+ from npcpy.ft.rl import collect_traces, run_rl_training
480
+ from npcpy.npc_compiler import NPC
481
+
482
+ tasks = [
483
+ {'prompt': 'Solve 2+2', 'expected': '4'},
484
+ {'prompt': 'Solve 5+3', 'expected': '8'}
485
+ ]
486
+
487
+ agents = [
488
+ NPC(name="farlor", primary_directive="Be concise",
489
+ model="qwen3:0.6b", provider="ollama"),
490
+ NPC(name="tedno", primary_directive="Show your work",
491
+ model="qwen3:0.6b", provider="ollama")
492
+ ]
493
+
494
+ def reward_fn(trace):
495
+ if trace['task_metadata']['expected'] in trace['final_output']:
496
+ return 1.0
497
+ return 0.0
498
+
499
+ adapter_path = run_rl_training(tasks, agents, reward_fn)
500
+ ```
501
+ ### Genetic Evolution
502
+
503
+ Evolve populations of knowledge graphs or model ensembles:
504
+ ```python
505
+ from npcpy.ft.ge import GeneticEvolver, GAConfig
506
+
507
+ config = GAConfig(
508
+ population_size=20,
509
+ generations=50,
510
+ mutation_rate=0.15
511
+ )
512
+
513
+ evolver = GeneticEvolver(
514
+ fitness_fn=your_fitness_function,
515
+ mutate_fn=your_mutation_function,
516
+ crossover_fn=your_crossover_function,
517
+ initialize_fn=your_init_function,
518
+ config=config
519
+ )
520
+
521
+ best_individual = evolver.run()
522
+ ```
523
+
524
+ ### Smart Model Ensembler and response router
525
+ Build fast intuitive responses with fallback to reasoning:
526
+ ```python
527
+ from npcpy.ft.model_ensembler import (
528
+ ResponseRouter,
529
+ create_model_genome
530
+ )
531
+
532
+ genome = create_model_genome(['math', 'code', 'factual'])
533
+ router = ResponseRouter(fast_threshold=0.8)
534
+
535
+ result = router.route_query("What is 2+2?", genome)
536
+
537
+ if result['used_fast_path']:
538
+ print("Fast gut reaction")
539
+ elif result['used_ensemble']:
540
+ print("Ensemble voting")
541
+ else:
542
+ print("Full reasoning")
543
+ ```
544
+ The intention for this model ensembler system is to mimic human cognition: pattern-matched gut reactions (System 1 of Kahneman) for familiar queries, falling back to deliberate reasoning (System 2 of Kahneman) for novel problems. Genetic algorithms evolve both knowledge structures and model specializations over time.
545
+
546
+
547
+
402
548
  ## Serving an NPC Team
403
549
 
404
550
  `npcpy` includes a built-in Flask server that makes it easy to deploy NPC teams for production use. You can serve teams with tools, jinxs, and complex workflows that frontends can interact with via REST APIs.
@@ -1,10 +1,10 @@
1
1
  npcpy/__init__.py,sha256=9imxFtK74_6Rw9rz0kyMnZYl_voPb569tkTlYLt0Urg,131
2
- npcpy/llm_funcs.py,sha256=tvcZuQEcIUJClwEJQXBF6ArEVjSuXt1jAcZOcnYWsVQ,85101
2
+ npcpy/llm_funcs.py,sha256=UkesCnRmclEoqBZPMZa2hKoSTjFzjxDCzPGKgeDegPQ,85101
3
3
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
4
4
  npcpy/npc_compiler.py,sha256=10vu-9WUmlVzaFM_hMJH28iNS1IJXQP3Rb5RT1rZmpA,95326
5
- npcpy/npc_sysenv.py,sha256=lPYlKM_TeR4l4-Jcgiqq3CCge8b2oFHdfISD4L_G7eo,30308
5
+ npcpy/npc_sysenv.py,sha256=H_A7BajE41W_r6TKt-uSUtMqruTHXIngYaRSPGQWFXE,35241
6
6
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
7
- npcpy/serve.py,sha256=O1dxISi0nQ6jsSOSxBXsULgkltnIcyBS6Z0AjfWmuXA,100296
7
+ npcpy/serve.py,sha256=kc3j3puHJJEwUnXMY9nB6we93q1u32gOcRys87abJsM,100400
8
8
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
9
9
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
10
10
  npcpy/data/audio.py,sha256=goon4HfsYgx0bI-n1lhkrzWPrJoejJlycXcB0P62pyk,11280
@@ -15,11 +15,13 @@ npcpy/data/text.py,sha256=jP0a1qZZaSJdK-LdZTn2Jjdxqmkd3efxDLEoxflJQeY,5010
15
15
  npcpy/data/video.py,sha256=aPUgj0fA_lFQ7Jf94-PutggCF4695FVCh3q5mnVthvI,574
16
16
  npcpy/data/web.py,sha256=ARGoVKUlQmaiX0zJbSvvFmRCwOv_Z7Pcan9c5GxYObQ,5117
17
17
  npcpy/ft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- npcpy/ft/diff.py,sha256=R3Qo6v0-6M1iI0wiXhUzyuYI2ja0q_0i9bE0z3coxzU,28
19
- npcpy/ft/ge.py,sha256=my5LtGyVTT40V0i1h9FR-tFFA1FHSga-PeCCgUX1UUI,61
18
+ npcpy/ft/diff.py,sha256=wYFRY_2p-B5xVqO7NDyhJbjQsUt4PrwOfgpE1Icghmk,2906
19
+ npcpy/ft/ge.py,sha256=0VzIiXq2wCzGcK1x0Wd-myJ3xRf-FNaPg0GkHEZegUM,3552
20
20
  npcpy/ft/memory_trainer.py,sha256=QZPznxEEwXbOGroHdMUMa5xpqlNwgV6nqOazI2xgrnQ,6635
21
- npcpy/ft/rl.py,sha256=l3RUkEJe4b2yB6pildveu2LJymtNq0F17COwf_CCq3U,34
22
- npcpy/ft/sft.py,sha256=i4ENygRPArbLWN4XZZuBnPWaehs8M-J68JB_mewGJHI,62
21
+ npcpy/ft/model_ensembler.py,sha256=BRX4hJ_rvF1vKTzjMhlahZqPttUgc3PqmzUJDqIfIps,10038
22
+ npcpy/ft/rl.py,sha256=EcPD8t5MFg0zYWSS-A7KJ9bWd0qCTsL5SSvDxV556Z4,9245
23
+ npcpy/ft/sft.py,sha256=iPCP4sM2Nfri0rif_oR1uFInhqY8HIILwT-iQGk7f10,6064
24
+ npcpy/ft/usft.py,sha256=O025GGYGZQf2ZVLowyAmBwh5bJyuy2dUAM6v03YcboY,3435
23
25
  npcpy/gen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
26
  npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
25
27
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
@@ -27,10 +29,10 @@ npcpy/gen/image_gen.py,sha256=ln71jmLoJHekbZYDJpTe5DtOamVte9gjr2BPQ1DzjMQ,14955
27
29
  npcpy/gen/response.py,sha256=dK0Ux1_0GHo4gOfSHrrp34Ub4YJ-88NjFZfaG3kSrB0,28940
28
30
  npcpy/gen/video_gen.py,sha256=JMp2s2qMp5uy0rOgv6BRZ7nkQI4vdT1hbJ2nSu4s-KA,3243
29
31
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- npcpy/memory/command_history.py,sha256=Ww7vZTSjQDuElQXuOjsvu7NTljOLAg07QIFrfKARpVg,45562
32
+ npcpy/memory/command_history.py,sha256=2VdmNW5VRpMrOkbdrMsgn5p3mvuJHNnzGHnIUEM8XMI,46279
31
33
  npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
32
34
  npcpy/memory/knowledge_graph.py,sha256=2XpIlsyPdAOnzQ6kkwP6MWPGwL3P6V33_3suNJYMMJE,48681
33
- npcpy/memory/memory_processor.py,sha256=bLfzT-uDgwNegs1hVBqW3Hl2fYtdmFQbdc5To_f4i5E,2106
35
+ npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
34
36
  npcpy/memory/search.py,sha256=glN6WYzaixcoDphTEHAXSMX3vKZGjR12Jx9YVL_gYfE,18433
35
37
  npcpy/mix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
38
  npcpy/mix/debate.py,sha256=lQXxC7nl6Rwyf7HIYrsVQILMUmYYx55Tjt2pkTg56qY,9019
@@ -45,8 +47,8 @@ npcpy/work/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
47
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
46
48
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
47
49
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
48
- npcpy-1.2.22.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
49
- npcpy-1.2.22.dist-info/METADATA,sha256=wCegeQ_fbKicQoqXvR4E08dHC_cf2yl_0711U_rCeHk,26025
50
- npcpy-1.2.22.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
51
- npcpy-1.2.22.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
52
- npcpy-1.2.22.dist-info/RECORD,,
50
+ npcpy-1.2.24.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
51
+ npcpy-1.2.24.dist-info/METADATA,sha256=yvWvzVYXVN4jzBRdSeDIGPWWEupFyzSIsAYKc88M5i0,29885
52
+ npcpy-1.2.24.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
53
+ npcpy-1.2.24.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
54
+ npcpy-1.2.24.dist-info/RECORD,,
File without changes