npcpy 1.3.17__tar.gz → 1.3.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.3.17/npcpy.egg-info → npcpy-1.3.19}/PKG-INFO +1 -1
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/web.py +0 -1
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/memory/command_history.py +14 -5
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ml_funcs.py +61 -16
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/npc_array.py +149 -1
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/npc_compiler.py +23 -12
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/npc_sysenv.py +156 -6
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/serve.py +124 -39
- {npcpy-1.3.17 → npcpy-1.3.19/npcpy.egg-info}/PKG-INFO +1 -1
- {npcpy-1.3.17 → npcpy-1.3.19}/setup.py +1 -1
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_ml_funcs.py +58 -16
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_npc_array.py +111 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_npc_compiler.py +208 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_npc_sysenv.py +66 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/LICENSE +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/MANIFEST.in +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/README.md +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/build_funcs.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/audio.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/data_models.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/image.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/load.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/text.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/data/video.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/diff.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/ge.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/memory_trainer.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/model_ensembler.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/rl.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/sft.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/ft/usft.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/image_gen.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/ocr.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/response.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/gen/world_gen.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/llm_funcs.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/main.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/memory/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/memory/knowledge_graph.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/memory/memory_processor.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/memory/search.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/mix/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/mix/debate.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/npcs.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/ai_function_tools.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/database_ai_adapters.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/database_ai_functions.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/npcsql.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/sql/sql_model_compiler.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/tools.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/work/__init__.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/work/browser.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/work/desktop.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/work/plan.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy/work/trigger.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy.egg-info/SOURCES.txt +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/setup.cfg +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_audio.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_browser.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_build_funcs.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_command_history.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_data_models.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_diff.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_documentation_examples.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_genetic_evolver.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_image.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_load.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_memory_processor.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_model_runner.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_npcsql.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_response.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_serve.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_sql_adapters.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_sql_compiler.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_sql_functions.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_text.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_tools.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_video.py +0 -0
- {npcpy-1.3.17 → npcpy-1.3.19}/tests/test_web.py +0 -0
|
@@ -611,7 +611,9 @@ class CommandHistory:
|
|
|
611
611
|
Column('reasoning_content', Text), # For thinking tokens / chain of thought
|
|
612
612
|
Column('tool_calls', Text), # JSON array of tool calls made by assistant
|
|
613
613
|
Column('tool_results', Text), # JSON array of tool call results
|
|
614
|
-
Column('parent_message_id', String(50)) # Links assistant response to parent user message for broadcast grouping
|
|
614
|
+
Column('parent_message_id', String(50)), # Links assistant response to parent user message for broadcast grouping
|
|
615
|
+
Column('device_id', String(255)), # UUID of the device that created this message
|
|
616
|
+
Column('device_name', String(255)) # Human-readable device name
|
|
615
617
|
)
|
|
616
618
|
|
|
617
619
|
Table('message_attachments', metadata,
|
|
@@ -867,6 +869,8 @@ class CommandHistory:
|
|
|
867
869
|
tool_calls=None,
|
|
868
870
|
tool_results=None,
|
|
869
871
|
parent_message_id=None,
|
|
872
|
+
device_id=None,
|
|
873
|
+
device_name=None,
|
|
870
874
|
):
|
|
871
875
|
if isinstance(content, (dict, list)):
|
|
872
876
|
content = json.dumps(content, cls=CustomJSONEncoder)
|
|
@@ -882,14 +886,15 @@ class CommandHistory:
|
|
|
882
886
|
|
|
883
887
|
stmt = """
|
|
884
888
|
INSERT INTO conversation_history
|
|
885
|
-
(message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results, parent_message_id)
|
|
886
|
-
VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results, :parent_message_id)
|
|
889
|
+
(message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results, parent_message_id, device_id, device_name)
|
|
890
|
+
VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results, :parent_message_id, :device_id, :device_name)
|
|
887
891
|
"""
|
|
888
892
|
params = {
|
|
889
893
|
"message_id": message_id, "timestamp": timestamp, "role": role, "content": content,
|
|
890
894
|
"conversation_id": conversation_id, "directory_path": normalized_directory_path, "model": model,
|
|
891
895
|
"provider": provider, "npc": npc, "team": team, "reasoning_content": reasoning_content,
|
|
892
|
-
"tool_calls": tool_calls, "tool_results": tool_results, "parent_message_id": parent_message_id
|
|
896
|
+
"tool_calls": tool_calls, "tool_results": tool_results, "parent_message_id": parent_message_id,
|
|
897
|
+
"device_id": device_id, "device_name": device_name
|
|
893
898
|
}
|
|
894
899
|
with self.engine.begin() as conn:
|
|
895
900
|
conn.execute(text(stmt), params)
|
|
@@ -1461,6 +1466,8 @@ def save_conversation_message(
|
|
|
1461
1466
|
tool_results: List[Dict] = None,
|
|
1462
1467
|
parent_message_id: str = None,
|
|
1463
1468
|
skip_if_exists: bool = True,
|
|
1469
|
+
device_id: str = None,
|
|
1470
|
+
device_name: str = None,
|
|
1464
1471
|
):
|
|
1465
1472
|
"""
|
|
1466
1473
|
Saves a conversation message linked to a conversation ID with optional attachments.
|
|
@@ -1495,7 +1502,9 @@ def save_conversation_message(
|
|
|
1495
1502
|
reasoning_content=reasoning_content,
|
|
1496
1503
|
tool_calls=tool_calls,
|
|
1497
1504
|
tool_results=tool_results,
|
|
1498
|
-
parent_message_id=parent_message_id
|
|
1505
|
+
parent_message_id=parent_message_id,
|
|
1506
|
+
device_id=device_id,
|
|
1507
|
+
device_name=device_name)
|
|
1499
1508
|
def retrieve_last_conversation(
|
|
1500
1509
|
command_history: CommandHistory, conversation_id: str
|
|
1501
1510
|
) -> str:
|
|
@@ -16,7 +16,6 @@ Same interface pattern as llm_funcs:
|
|
|
16
16
|
from __future__ import annotations
|
|
17
17
|
import copy
|
|
18
18
|
import itertools
|
|
19
|
-
import pickle
|
|
20
19
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
21
20
|
from dataclasses import dataclass, field
|
|
22
21
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
@@ -708,21 +707,67 @@ def cross_validate(
|
|
|
708
707
|
|
|
709
708
|
# ==================== Utility Functions ====================
|
|
710
709
|
|
|
711
|
-
def serialize_model(model: Any, path: str =
|
|
712
|
-
"""
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
710
|
+
def serialize_model(model: Any, path: str, format: str = "joblib") -> None:
|
|
711
|
+
"""
|
|
712
|
+
Serialize model to file using safe formats (no pickle).
|
|
713
|
+
|
|
714
|
+
Args:
|
|
715
|
+
model: The model to serialize
|
|
716
|
+
path: File path to write to (required)
|
|
717
|
+
format: Serialization format - "joblib" (default) or "safetensors"
|
|
718
|
+
|
|
719
|
+
Raises:
|
|
720
|
+
ImportError: If required library is not installed
|
|
721
|
+
ValueError: If format is not supported for the model type
|
|
722
|
+
"""
|
|
723
|
+
if format == "safetensors":
|
|
724
|
+
from safetensors.torch import save_file
|
|
725
|
+
if hasattr(model, 'state_dict'):
|
|
726
|
+
save_file(model.state_dict(), path)
|
|
727
|
+
else:
|
|
728
|
+
raise ValueError("safetensors format requires model with state_dict (PyTorch)")
|
|
729
|
+
elif format == "joblib":
|
|
730
|
+
import joblib
|
|
731
|
+
joblib.dump(model, path)
|
|
732
|
+
else:
|
|
733
|
+
raise ValueError(f"Unsupported format: {format}. Use 'joblib' or 'safetensors'.")
|
|
734
|
+
|
|
735
|
+
|
|
736
|
+
def deserialize_model(path: str, format: str = "auto") -> Any:
|
|
737
|
+
"""
|
|
738
|
+
Deserialize model from file using safe formats (no pickle).
|
|
739
|
+
|
|
740
|
+
Args:
|
|
741
|
+
path: File path to load from
|
|
742
|
+
format: "auto" (detect from extension), "joblib", or "safetensors"
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
The deserialized model
|
|
746
|
+
|
|
747
|
+
Raises:
|
|
748
|
+
ImportError: If required library is not installed
|
|
749
|
+
ValueError: If format cannot be determined
|
|
750
|
+
"""
|
|
751
|
+
# Auto-detect format from extension
|
|
752
|
+
if format == "auto":
|
|
753
|
+
if path.endswith('.safetensors'):
|
|
754
|
+
format = "safetensors"
|
|
755
|
+
elif path.endswith('.joblib'):
|
|
756
|
+
format = "joblib"
|
|
757
|
+
else:
|
|
758
|
+
raise ValueError(
|
|
759
|
+
f"Cannot auto-detect format for {path}. "
|
|
760
|
+
"Use .joblib or .safetensors extension, or specify format explicitly."
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
if format == "safetensors":
|
|
764
|
+
from safetensors.torch import load_file
|
|
765
|
+
return load_file(path)
|
|
766
|
+
elif format == "joblib":
|
|
767
|
+
import joblib
|
|
768
|
+
return joblib.load(path)
|
|
769
|
+
else:
|
|
770
|
+
raise ValueError(f"Unsupported format: {format}. Use 'joblib' or 'safetensors'.")
|
|
726
771
|
|
|
727
772
|
|
|
728
773
|
def get_model_params(model: Any) -> Dict[str, Any]:
|
|
@@ -20,7 +20,6 @@ Example:
|
|
|
20
20
|
from __future__ import annotations
|
|
21
21
|
import copy
|
|
22
22
|
import itertools
|
|
23
|
-
import pickle
|
|
24
23
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
25
24
|
from dataclasses import dataclass, field
|
|
26
25
|
from typing import (
|
|
@@ -47,6 +46,7 @@ class OpType(Enum):
|
|
|
47
46
|
REDUCE = "reduce"
|
|
48
47
|
CHAIN = "chain"
|
|
49
48
|
EVOLVE = "evolve"
|
|
49
|
+
JINX = "jinx" # Execute a Jinx workflow across models
|
|
50
50
|
|
|
51
51
|
|
|
52
52
|
@dataclass
|
|
@@ -328,6 +328,61 @@ class NPCArray:
|
|
|
328
328
|
|
|
329
329
|
return cls(specs)
|
|
330
330
|
|
|
331
|
+
@classmethod
|
|
332
|
+
def from_matrix(
|
|
333
|
+
cls,
|
|
334
|
+
matrix: List[Dict[str, Any]]
|
|
335
|
+
) -> 'NPCArray':
|
|
336
|
+
"""
|
|
337
|
+
Create NPCArray from a matrix of model configurations.
|
|
338
|
+
|
|
339
|
+
This is particularly useful for defining model arrays in Jinx templates
|
|
340
|
+
where you want explicit control over each model configuration.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
matrix: List of model configuration dicts. Each dict should have:
|
|
344
|
+
- 'model': model name/reference (required)
|
|
345
|
+
- 'provider': provider name (optional)
|
|
346
|
+
- 'type': model type - 'llm', 'npc', 'sklearn', 'torch' (default: 'llm')
|
|
347
|
+
- Any additional config parameters
|
|
348
|
+
|
|
349
|
+
Example:
|
|
350
|
+
>>> # In a Jinx template, define a matrix of models:
|
|
351
|
+
>>> matrix = [
|
|
352
|
+
... {'model': 'gpt-4', 'provider': 'openai', 'temperature': 0.7},
|
|
353
|
+
... {'model': 'claude-3-opus', 'provider': 'anthropic', 'temperature': 0.5},
|
|
354
|
+
... {'model': 'llama3.2', 'provider': 'ollama', 'temperature': 0.8},
|
|
355
|
+
... ]
|
|
356
|
+
>>> arr = NPCArray.from_matrix(matrix)
|
|
357
|
+
|
|
358
|
+
>>> # Mixed model types:
|
|
359
|
+
>>> matrix = [
|
|
360
|
+
... {'model': 'gpt-4', 'type': 'llm', 'provider': 'openai'},
|
|
361
|
+
... {'model': my_npc, 'type': 'npc'},
|
|
362
|
+
... {'model': sklearn_model, 'type': 'sklearn'},
|
|
363
|
+
... ]
|
|
364
|
+
"""
|
|
365
|
+
specs = []
|
|
366
|
+
for config in matrix:
|
|
367
|
+
model_type = config.get('type', 'llm')
|
|
368
|
+
model_ref = config.get('model')
|
|
369
|
+
provider = config.get('provider')
|
|
370
|
+
|
|
371
|
+
# Extract config params (everything except type, model, provider)
|
|
372
|
+
extra_config = {
|
|
373
|
+
k: v for k, v in config.items()
|
|
374
|
+
if k not in ('type', 'model', 'provider')
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
specs.append(ModelSpec(
|
|
378
|
+
model_type=model_type,
|
|
379
|
+
model_ref=model_ref,
|
|
380
|
+
provider=provider,
|
|
381
|
+
config=extra_config
|
|
382
|
+
))
|
|
383
|
+
|
|
384
|
+
return cls(specs)
|
|
385
|
+
|
|
331
386
|
# ==================== Properties ====================
|
|
332
387
|
|
|
333
388
|
@property
|
|
@@ -490,6 +545,43 @@ class NPCArray:
|
|
|
490
545
|
|
|
491
546
|
return NPCArray(self._specs, new_node)
|
|
492
547
|
|
|
548
|
+
def jinx(
|
|
549
|
+
self,
|
|
550
|
+
jinx_name: str,
|
|
551
|
+
inputs: Optional[Dict[str, Any]] = None,
|
|
552
|
+
**kwargs
|
|
553
|
+
) -> 'LazyResult':
|
|
554
|
+
"""
|
|
555
|
+
Execute a Jinx workflow across all models in the array.
|
|
556
|
+
|
|
557
|
+
Each model in the array will be used as the 'npc' context for the jinx,
|
|
558
|
+
allowing you to run the same workflow template with different models.
|
|
559
|
+
|
|
560
|
+
Args:
|
|
561
|
+
jinx_name: Name of the jinx workflow to execute (e.g., 'analyze', 'summarize')
|
|
562
|
+
inputs: Input values for the jinx template variables
|
|
563
|
+
**kwargs: Additional execution parameters
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
LazyResult with workflow outputs from each model
|
|
567
|
+
|
|
568
|
+
Example:
|
|
569
|
+
>>> models = NPCArray.from_llms(['gpt-4', 'claude-3'])
|
|
570
|
+
>>> results = models.jinx('analyze', inputs={'topic': 'AI safety'}).collect()
|
|
571
|
+
"""
|
|
572
|
+
new_node = GraphNode(
|
|
573
|
+
op_type=OpType.JINX,
|
|
574
|
+
params={
|
|
575
|
+
"jinx_name": jinx_name,
|
|
576
|
+
"inputs": inputs or {},
|
|
577
|
+
**kwargs
|
|
578
|
+
},
|
|
579
|
+
parents=[self._graph],
|
|
580
|
+
shape=(len(self._specs),)
|
|
581
|
+
)
|
|
582
|
+
|
|
583
|
+
return LazyResult(self._specs, new_node)
|
|
584
|
+
|
|
493
585
|
|
|
494
586
|
class LazyResult:
|
|
495
587
|
"""
|
|
@@ -792,6 +884,7 @@ class GraphExecutor:
|
|
|
792
884
|
OpType.REDUCE: self._exec_reduce,
|
|
793
885
|
OpType.CHAIN: self._exec_chain,
|
|
794
886
|
OpType.EVOLVE: self._exec_evolve,
|
|
887
|
+
OpType.JINX: self._exec_jinx,
|
|
795
888
|
}
|
|
796
889
|
|
|
797
890
|
handler = handlers.get(node.op_type)
|
|
@@ -1136,6 +1229,61 @@ class GraphExecutor:
|
|
|
1136
1229
|
metadata={"operation": "evolve", "generation": 1}
|
|
1137
1230
|
)
|
|
1138
1231
|
|
|
1232
|
+
def _exec_jinx(self, node, specs, prompts, parents) -> ResponseTensor:
|
|
1233
|
+
"""Execute a Jinx workflow across models"""
|
|
1234
|
+
from npcpy.npc_compiler import NPC, Jinx
|
|
1235
|
+
|
|
1236
|
+
jinx_name = node.params.get("jinx_name")
|
|
1237
|
+
inputs = node.params.get("inputs", {})
|
|
1238
|
+
extra_kwargs = {k: v for k, v in node.params.items()
|
|
1239
|
+
if k not in ("jinx_name", "inputs")}
|
|
1240
|
+
|
|
1241
|
+
results = []
|
|
1242
|
+
|
|
1243
|
+
def run_jinx_single(spec: ModelSpec) -> str:
|
|
1244
|
+
"""Run jinx for a single model spec"""
|
|
1245
|
+
try:
|
|
1246
|
+
if spec.model_type == "npc":
|
|
1247
|
+
# Use the NPC directly
|
|
1248
|
+
npc = spec.model_ref
|
|
1249
|
+
else:
|
|
1250
|
+
# Create a temporary NPC with the model
|
|
1251
|
+
npc = NPC(
|
|
1252
|
+
name=f"array_npc_{spec.model_ref}",
|
|
1253
|
+
model=spec.model_ref,
|
|
1254
|
+
provider=spec.provider
|
|
1255
|
+
)
|
|
1256
|
+
|
|
1257
|
+
# Execute the jinx
|
|
1258
|
+
result = npc.execute_jinx(
|
|
1259
|
+
jinx_name=jinx_name,
|
|
1260
|
+
input_values=inputs,
|
|
1261
|
+
**extra_kwargs
|
|
1262
|
+
)
|
|
1263
|
+
return result.get("output", str(result))
|
|
1264
|
+
except Exception as e:
|
|
1265
|
+
return f"Error: {e}"
|
|
1266
|
+
|
|
1267
|
+
if self.parallel and len(specs) > 1:
|
|
1268
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
|
1269
|
+
futures = {executor.submit(run_jinx_single, spec): i
|
|
1270
|
+
for i, spec in enumerate(specs)}
|
|
1271
|
+
results = [None] * len(specs)
|
|
1272
|
+
for future in as_completed(futures):
|
|
1273
|
+
idx = futures[future]
|
|
1274
|
+
try:
|
|
1275
|
+
results[idx] = future.result()
|
|
1276
|
+
except Exception as e:
|
|
1277
|
+
results[idx] = f"Error: {e}"
|
|
1278
|
+
else:
|
|
1279
|
+
results = [run_jinx_single(spec) for spec in specs]
|
|
1280
|
+
|
|
1281
|
+
return ResponseTensor(
|
|
1282
|
+
data=np.array(results, dtype=object),
|
|
1283
|
+
model_specs=specs,
|
|
1284
|
+
metadata={"operation": "jinx", "jinx_name": jinx_name, **inputs}
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1139
1287
|
|
|
1140
1288
|
def _compute_response_variance(responses: List[str]) -> float:
|
|
1141
1289
|
"""Compute semantic variance across responses"""
|
|
@@ -52,6 +52,7 @@ import fnmatch
|
|
|
52
52
|
import subprocess
|
|
53
53
|
from typing import Any, Dict, List, Optional, Union, Callable, Tuple
|
|
54
54
|
from jinja2 import Environment, FileSystemLoader, Template, Undefined, DictLoader
|
|
55
|
+
from jinja2.sandbox import SandboxedEnvironment
|
|
55
56
|
from sqlalchemy import create_engine, text
|
|
56
57
|
import npcpy as npy
|
|
57
58
|
from npcpy.tools import auto_tools
|
|
@@ -231,7 +232,8 @@ def load_yaml_file(file_path):
|
|
|
231
232
|
|
|
232
233
|
# First pass: render Jinja2 templates to produce valid YAML
|
|
233
234
|
# This allows {% if %} and other control structures to work
|
|
234
|
-
|
|
235
|
+
# Use SandboxedEnvironment to prevent template injection attacks
|
|
236
|
+
jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
235
237
|
# Configure tojson filter to handle SilentUndefined
|
|
236
238
|
jinja_env.policies['json.dumps_function'] = _json_dumps_with_undefined
|
|
237
239
|
template = jinja_env.from_string(content)
|
|
@@ -694,7 +696,8 @@ class Jinx:
|
|
|
694
696
|
jinja_env: Optional[Environment] = None):
|
|
695
697
|
|
|
696
698
|
if jinja_env is None:
|
|
697
|
-
|
|
699
|
+
# Use SandboxedEnvironment to prevent template injection attacks
|
|
700
|
+
jinja_env = SandboxedEnvironment(
|
|
698
701
|
loader=DictLoader({}),
|
|
699
702
|
undefined=SilentUndefined,
|
|
700
703
|
)
|
|
@@ -771,21 +774,24 @@ class Jinx:
|
|
|
771
774
|
|
|
772
775
|
self._log_debug(f"DEBUG: Executing step '{step_name}' with rendered code: {rendered_code}")
|
|
773
776
|
|
|
777
|
+
# Import NPCArray for array operations in jinx
|
|
778
|
+
from npcpy.npc_array import NPCArray, infer_matrix, ensemble_vote
|
|
779
|
+
|
|
774
780
|
exec_globals = {
|
|
775
781
|
"__builtins__": __builtins__,
|
|
776
782
|
"npc": active_npc,
|
|
777
783
|
"context": context, # Pass context by reference
|
|
778
|
-
"math": math,
|
|
779
|
-
"random": random,
|
|
784
|
+
"math": math,
|
|
785
|
+
"random": random,
|
|
780
786
|
"datetime": datetime,
|
|
781
787
|
"Image": Image,
|
|
782
788
|
"pd": pd,
|
|
783
789
|
"plt": plt,
|
|
784
|
-
"sys": sys,
|
|
790
|
+
"sys": sys,
|
|
785
791
|
"subprocess": subprocess,
|
|
786
792
|
"np": np,
|
|
787
793
|
"os": os,
|
|
788
|
-
're': re,
|
|
794
|
+
're': re,
|
|
789
795
|
"json": json,
|
|
790
796
|
"Path": pathlib.Path,
|
|
791
797
|
"fnmatch": fnmatch,
|
|
@@ -793,6 +799,10 @@ class Jinx:
|
|
|
793
799
|
"subprocess": subprocess,
|
|
794
800
|
"get_llm_response": npy.llm_funcs.get_llm_response,
|
|
795
801
|
"CommandHistory": CommandHistory,
|
|
802
|
+
# NPCArray support for compute graph operations in jinx
|
|
803
|
+
"NPCArray": NPCArray,
|
|
804
|
+
"infer_matrix": infer_matrix,
|
|
805
|
+
"ensemble_vote": ensemble_vote,
|
|
796
806
|
}
|
|
797
807
|
|
|
798
808
|
if extra_globals:
|
|
@@ -1261,7 +1271,8 @@ class NPC:
|
|
|
1261
1271
|
dirs.append(self.jinxs_directory)
|
|
1262
1272
|
|
|
1263
1273
|
# This jinja_env is for the *second pass* (runtime variable resolution in Jinx.execute)
|
|
1264
|
-
|
|
1274
|
+
# Use SandboxedEnvironment to prevent template injection attacks
|
|
1275
|
+
self.jinja_env = SandboxedEnvironment(
|
|
1265
1276
|
loader=FileSystemLoader([
|
|
1266
1277
|
os.path.expanduser(d) for d in dirs
|
|
1267
1278
|
]),
|
|
@@ -1389,13 +1400,13 @@ class NPC:
|
|
|
1389
1400
|
|
|
1390
1401
|
combined_raw_jinxs_dict = {j.jinx_name: j for j in all_available_raw_jinxs}
|
|
1391
1402
|
|
|
1392
|
-
npc_first_pass_jinja_env =
|
|
1393
|
-
|
|
1403
|
+
npc_first_pass_jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
1404
|
+
|
|
1394
1405
|
jinx_macro_globals = {}
|
|
1395
1406
|
for raw_jinx in combined_raw_jinxs_dict.values():
|
|
1396
1407
|
def create_jinx_callable(jinx_obj_in_closure):
|
|
1397
1408
|
def callable_jinx(**kwargs):
|
|
1398
|
-
temp_jinja_env =
|
|
1409
|
+
temp_jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
1399
1410
|
rendered_target_steps = []
|
|
1400
1411
|
for target_step in jinx_obj_in_closure._raw_steps:
|
|
1401
1412
|
temp_rendered_step = {}
|
|
@@ -2506,7 +2517,7 @@ class Team:
|
|
|
2506
2517
|
self._raw_jinxs_list: List['Jinx'] = [] # Temporary storage for raw Team-level Jinx objects
|
|
2507
2518
|
self.jinx_tool_catalog: Dict[str, Dict[str, Any]] = {} # Jinx-derived tool defs ready for MCP/LLM
|
|
2508
2519
|
|
|
2509
|
-
self.jinja_env_for_first_pass =
|
|
2520
|
+
self.jinja_env_for_first_pass = SandboxedEnvironment(undefined=SilentUndefined) # Env for macro expansion
|
|
2510
2521
|
|
|
2511
2522
|
self.db_conn = db_conn
|
|
2512
2523
|
self.team_path = os.path.expanduser(team_path) if team_path else None
|
|
@@ -2700,7 +2711,7 @@ class Team:
|
|
|
2700
2711
|
def callable_jinx(**kwargs):
|
|
2701
2712
|
# This callable will be invoked by the Jinja renderer during the first pass.
|
|
2702
2713
|
# It needs to render the target Jinx's *raw* steps with the provided kwargs.
|
|
2703
|
-
temp_jinja_env =
|
|
2714
|
+
temp_jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
2704
2715
|
|
|
2705
2716
|
rendered_target_steps = []
|
|
2706
2717
|
for target_step in jinx_obj_in_closure._raw_steps:
|
|
@@ -15,6 +15,129 @@ import json
|
|
|
15
15
|
|
|
16
16
|
import requests
|
|
17
17
|
ON_WINDOWS = platform.system() == "Windows"
|
|
18
|
+
ON_MACOS = platform.system() == "Darwin"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# ==================== XDG/Platform-Specific Paths ====================
|
|
22
|
+
|
|
23
|
+
def get_data_dir() -> str:
|
|
24
|
+
"""
|
|
25
|
+
Get the platform-specific data directory for npcsh.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
- Linux: $XDG_DATA_HOME/npcsh or ~/.local/share/npcsh
|
|
29
|
+
- macOS: ~/Library/Application Support/npcsh
|
|
30
|
+
- Windows: %LOCALAPPDATA%/npcsh or ~/AppData/Local/npcsh
|
|
31
|
+
|
|
32
|
+
Falls back to ~/.npcsh for backwards compatibility if the new location
|
|
33
|
+
doesn't exist but the old one does.
|
|
34
|
+
"""
|
|
35
|
+
if ON_WINDOWS:
|
|
36
|
+
base = os.environ.get('LOCALAPPDATA', os.path.expanduser('~/AppData/Local'))
|
|
37
|
+
new_path = os.path.join(base, 'npcsh')
|
|
38
|
+
elif ON_MACOS:
|
|
39
|
+
new_path = os.path.expanduser('~/Library/Application Support/npcsh')
|
|
40
|
+
else:
|
|
41
|
+
# Linux/Unix - use XDG Base Directory Specification
|
|
42
|
+
xdg_data = os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share'))
|
|
43
|
+
new_path = os.path.join(xdg_data, 'npcsh')
|
|
44
|
+
|
|
45
|
+
# Backwards compatibility: if old path exists but new doesn't, use old
|
|
46
|
+
old_path = os.path.expanduser('~/.npcsh')
|
|
47
|
+
if os.path.exists(old_path) and not os.path.exists(new_path):
|
|
48
|
+
return old_path
|
|
49
|
+
|
|
50
|
+
return new_path
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_config_dir() -> str:
|
|
54
|
+
"""
|
|
55
|
+
Get the platform-specific config directory for npcsh.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
- Linux: $XDG_CONFIG_HOME/npcsh or ~/.config/npcsh
|
|
59
|
+
- macOS: ~/Library/Application Support/npcsh (same as data on macOS)
|
|
60
|
+
- Windows: %APPDATA%/npcsh or ~/AppData/Roaming/npcsh
|
|
61
|
+
|
|
62
|
+
Falls back to ~/.npcsh for backwards compatibility if the new location
|
|
63
|
+
doesn't exist but the old one does.
|
|
64
|
+
"""
|
|
65
|
+
if ON_WINDOWS:
|
|
66
|
+
base = os.environ.get('APPDATA', os.path.expanduser('~/AppData/Roaming'))
|
|
67
|
+
new_path = os.path.join(base, 'npcsh')
|
|
68
|
+
elif ON_MACOS:
|
|
69
|
+
new_path = os.path.expanduser('~/Library/Application Support/npcsh')
|
|
70
|
+
else:
|
|
71
|
+
# Linux/Unix - use XDG Base Directory Specification
|
|
72
|
+
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
|
|
73
|
+
new_path = os.path.join(xdg_config, 'npcsh')
|
|
74
|
+
|
|
75
|
+
# Backwards compatibility: if old path exists but new doesn't, use old
|
|
76
|
+
old_path = os.path.expanduser('~/.npcsh')
|
|
77
|
+
if os.path.exists(old_path) and not os.path.exists(new_path):
|
|
78
|
+
return old_path
|
|
79
|
+
|
|
80
|
+
return new_path
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def get_cache_dir() -> str:
|
|
84
|
+
"""
|
|
85
|
+
Get the platform-specific cache directory for npcsh.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
- Linux: $XDG_CACHE_HOME/npcsh or ~/.cache/npcsh
|
|
89
|
+
- macOS: ~/Library/Caches/npcsh
|
|
90
|
+
- Windows: %LOCALAPPDATA%/npcsh/cache
|
|
91
|
+
"""
|
|
92
|
+
if ON_WINDOWS:
|
|
93
|
+
base = os.environ.get('LOCALAPPDATA', os.path.expanduser('~/AppData/Local'))
|
|
94
|
+
return os.path.join(base, 'npcsh', 'cache')
|
|
95
|
+
elif ON_MACOS:
|
|
96
|
+
return os.path.expanduser('~/Library/Caches/npcsh')
|
|
97
|
+
else:
|
|
98
|
+
xdg_cache = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
|
99
|
+
return os.path.join(xdg_cache, 'npcsh')
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_npcshrc_path() -> str:
|
|
103
|
+
"""
|
|
104
|
+
Get the path to the npcshrc config file.
|
|
105
|
+
|
|
106
|
+
Returns the platform-appropriate config file path.
|
|
107
|
+
Falls back to ~/.npcshrc for backwards compatibility.
|
|
108
|
+
"""
|
|
109
|
+
old_path = os.path.expanduser('~/.npcshrc')
|
|
110
|
+
if os.path.exists(old_path):
|
|
111
|
+
return old_path
|
|
112
|
+
|
|
113
|
+
config_dir = get_config_dir()
|
|
114
|
+
return os.path.join(config_dir, 'npcshrc')
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_history_db_path() -> str:
|
|
118
|
+
"""
|
|
119
|
+
Get the path to the history database.
|
|
120
|
+
|
|
121
|
+
Returns the platform-appropriate database path.
|
|
122
|
+
Falls back to ~/npcsh_history.db for backwards compatibility.
|
|
123
|
+
"""
|
|
124
|
+
old_path = os.path.expanduser('~/npcsh_history.db')
|
|
125
|
+
if os.path.exists(old_path):
|
|
126
|
+
return old_path
|
|
127
|
+
|
|
128
|
+
data_dir = get_data_dir()
|
|
129
|
+
return os.path.join(data_dir, 'history.db')
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def get_models_dir() -> str:
|
|
133
|
+
"""Get the directory for storing models."""
|
|
134
|
+
return os.path.join(get_data_dir(), 'models')
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def ensure_npcsh_dirs() -> None:
|
|
138
|
+
"""Ensure all npcsh directories exist."""
|
|
139
|
+
for dir_path in [get_data_dir(), get_config_dir(), get_cache_dir(), get_models_dir()]:
|
|
140
|
+
os.makedirs(dir_path, exist_ok=True)
|
|
18
141
|
|
|
19
142
|
try:
|
|
20
143
|
if not ON_WINDOWS:
|
|
@@ -309,11 +432,13 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
309
432
|
logging.info(f"Error loading Ollama models or timed out: {e}")
|
|
310
433
|
|
|
311
434
|
# Scan for local GGUF/GGML models
|
|
435
|
+
models_dir = get_models_dir()
|
|
312
436
|
gguf_dirs = [
|
|
313
|
-
os.path.
|
|
314
|
-
|
|
437
|
+
os.path.join(models_dir, 'gguf'),
|
|
438
|
+
models_dir,
|
|
315
439
|
os.path.expanduser('~/models'),
|
|
316
|
-
os.path.
|
|
440
|
+
os.path.join(get_cache_dir(), 'huggingface/hub'),
|
|
441
|
+
os.path.expanduser('~/.cache/huggingface/hub'), # Fallback for existing installs
|
|
317
442
|
]
|
|
318
443
|
env_gguf_dir = os.environ.get('NPCSH_GGUF_DIR')
|
|
319
444
|
if env_gguf_dir:
|
|
@@ -358,6 +483,31 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
358
483
|
except Exception as e:
|
|
359
484
|
logging.debug(f"llama.cpp server not available: {e}")
|
|
360
485
|
|
|
486
|
+
# Check for MLX server (OpenAI-compatible API on port 8000)
|
|
487
|
+
try:
|
|
488
|
+
import requests
|
|
489
|
+
response = requests.get('http://127.0.0.1:8000/v1/models', timeout=1)
|
|
490
|
+
if response.ok:
|
|
491
|
+
data = response.json()
|
|
492
|
+
for model in data.get('data', []):
|
|
493
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
494
|
+
available_models[model_id] = "mlx"
|
|
495
|
+
except Exception as e:
|
|
496
|
+
logging.debug(f"MLX server not available: {e}")
|
|
497
|
+
|
|
498
|
+
# Also check common alternative MLX port 5000
|
|
499
|
+
try:
|
|
500
|
+
import requests
|
|
501
|
+
response = requests.get('http://127.0.0.1:5000/v1/models', timeout=1)
|
|
502
|
+
if response.ok:
|
|
503
|
+
data = response.json()
|
|
504
|
+
for model in data.get('data', []):
|
|
505
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
506
|
+
if model_id not in available_models: # Avoid duplicates
|
|
507
|
+
available_models[model_id] = "mlx"
|
|
508
|
+
except Exception as e:
|
|
509
|
+
logging.debug(f"MLX server (port 5000) not available: {e}")
|
|
510
|
+
|
|
361
511
|
# Scan for LoRA adapters (fine-tuned models with adapter_config.json)
|
|
362
512
|
lora_dirs = [
|
|
363
513
|
os.path.expanduser('~/.npcsh/models'),
|
|
@@ -1055,13 +1205,13 @@ def lookup_provider(model: str) -> str:
|
|
|
1055
1205
|
|
|
1056
1206
|
def load_custom_providers():
|
|
1057
1207
|
"""
|
|
1058
|
-
Load custom provider configurations from .
|
|
1059
|
-
|
|
1208
|
+
Load custom provider configurations from npcshrc config file.
|
|
1209
|
+
|
|
1060
1210
|
Returns:
|
|
1061
1211
|
dict: Custom provider configurations keyed by provider name
|
|
1062
1212
|
"""
|
|
1063
1213
|
custom_providers = {}
|
|
1064
|
-
npcshrc_path =
|
|
1214
|
+
npcshrc_path = get_npcshrc_path()
|
|
1065
1215
|
|
|
1066
1216
|
if os.path.exists(npcshrc_path):
|
|
1067
1217
|
with open(npcshrc_path, "r") as f:
|