npcpy 1.3.17__py3-none-any.whl → 1.3.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/data/web.py +0 -1
- npcpy/memory/command_history.py +14 -5
- npcpy/ml_funcs.py +61 -16
- npcpy/npc_array.py +149 -1
- npcpy/npc_compiler.py +23 -12
- npcpy/npc_sysenv.py +156 -6
- npcpy/serve.py +124 -39
- {npcpy-1.3.17.dist-info → npcpy-1.3.19.dist-info}/METADATA +1 -1
- {npcpy-1.3.17.dist-info → npcpy-1.3.19.dist-info}/RECORD +12 -12
- {npcpy-1.3.17.dist-info → npcpy-1.3.19.dist-info}/WHEEL +1 -1
- {npcpy-1.3.17.dist-info → npcpy-1.3.19.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.3.17.dist-info → npcpy-1.3.19.dist-info}/top_level.txt +0 -0
npcpy/data/web.py
CHANGED
npcpy/memory/command_history.py
CHANGED
|
@@ -611,7 +611,9 @@ class CommandHistory:
|
|
|
611
611
|
Column('reasoning_content', Text), # For thinking tokens / chain of thought
|
|
612
612
|
Column('tool_calls', Text), # JSON array of tool calls made by assistant
|
|
613
613
|
Column('tool_results', Text), # JSON array of tool call results
|
|
614
|
-
Column('parent_message_id', String(50)) # Links assistant response to parent user message for broadcast grouping
|
|
614
|
+
Column('parent_message_id', String(50)), # Links assistant response to parent user message for broadcast grouping
|
|
615
|
+
Column('device_id', String(255)), # UUID of the device that created this message
|
|
616
|
+
Column('device_name', String(255)) # Human-readable device name
|
|
615
617
|
)
|
|
616
618
|
|
|
617
619
|
Table('message_attachments', metadata,
|
|
@@ -867,6 +869,8 @@ class CommandHistory:
|
|
|
867
869
|
tool_calls=None,
|
|
868
870
|
tool_results=None,
|
|
869
871
|
parent_message_id=None,
|
|
872
|
+
device_id=None,
|
|
873
|
+
device_name=None,
|
|
870
874
|
):
|
|
871
875
|
if isinstance(content, (dict, list)):
|
|
872
876
|
content = json.dumps(content, cls=CustomJSONEncoder)
|
|
@@ -882,14 +886,15 @@ class CommandHistory:
|
|
|
882
886
|
|
|
883
887
|
stmt = """
|
|
884
888
|
INSERT INTO conversation_history
|
|
885
|
-
(message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results, parent_message_id)
|
|
886
|
-
VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results, :parent_message_id)
|
|
889
|
+
(message_id, timestamp, role, content, conversation_id, directory_path, model, provider, npc, team, reasoning_content, tool_calls, tool_results, parent_message_id, device_id, device_name)
|
|
890
|
+
VALUES (:message_id, :timestamp, :role, :content, :conversation_id, :directory_path, :model, :provider, :npc, :team, :reasoning_content, :tool_calls, :tool_results, :parent_message_id, :device_id, :device_name)
|
|
887
891
|
"""
|
|
888
892
|
params = {
|
|
889
893
|
"message_id": message_id, "timestamp": timestamp, "role": role, "content": content,
|
|
890
894
|
"conversation_id": conversation_id, "directory_path": normalized_directory_path, "model": model,
|
|
891
895
|
"provider": provider, "npc": npc, "team": team, "reasoning_content": reasoning_content,
|
|
892
|
-
"tool_calls": tool_calls, "tool_results": tool_results, "parent_message_id": parent_message_id
|
|
896
|
+
"tool_calls": tool_calls, "tool_results": tool_results, "parent_message_id": parent_message_id,
|
|
897
|
+
"device_id": device_id, "device_name": device_name
|
|
893
898
|
}
|
|
894
899
|
with self.engine.begin() as conn:
|
|
895
900
|
conn.execute(text(stmt), params)
|
|
@@ -1461,6 +1466,8 @@ def save_conversation_message(
|
|
|
1461
1466
|
tool_results: List[Dict] = None,
|
|
1462
1467
|
parent_message_id: str = None,
|
|
1463
1468
|
skip_if_exists: bool = True,
|
|
1469
|
+
device_id: str = None,
|
|
1470
|
+
device_name: str = None,
|
|
1464
1471
|
):
|
|
1465
1472
|
"""
|
|
1466
1473
|
Saves a conversation message linked to a conversation ID with optional attachments.
|
|
@@ -1495,7 +1502,9 @@ def save_conversation_message(
|
|
|
1495
1502
|
reasoning_content=reasoning_content,
|
|
1496
1503
|
tool_calls=tool_calls,
|
|
1497
1504
|
tool_results=tool_results,
|
|
1498
|
-
parent_message_id=parent_message_id
|
|
1505
|
+
parent_message_id=parent_message_id,
|
|
1506
|
+
device_id=device_id,
|
|
1507
|
+
device_name=device_name)
|
|
1499
1508
|
def retrieve_last_conversation(
|
|
1500
1509
|
command_history: CommandHistory, conversation_id: str
|
|
1501
1510
|
) -> str:
|
npcpy/ml_funcs.py
CHANGED
|
@@ -16,7 +16,6 @@ Same interface pattern as llm_funcs:
|
|
|
16
16
|
from __future__ import annotations
|
|
17
17
|
import copy
|
|
18
18
|
import itertools
|
|
19
|
-
import pickle
|
|
20
19
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
21
20
|
from dataclasses import dataclass, field
|
|
22
21
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
|
@@ -708,21 +707,67 @@ def cross_validate(
|
|
|
708
707
|
|
|
709
708
|
# ==================== Utility Functions ====================
|
|
710
709
|
|
|
711
|
-
def serialize_model(model: Any, path: str =
|
|
712
|
-
"""
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
710
|
+
def serialize_model(model: Any, path: str, format: str = "joblib") -> None:
|
|
711
|
+
"""
|
|
712
|
+
Serialize model to file using safe formats (no pickle).
|
|
713
|
+
|
|
714
|
+
Args:
|
|
715
|
+
model: The model to serialize
|
|
716
|
+
path: File path to write to (required)
|
|
717
|
+
format: Serialization format - "joblib" (default) or "safetensors"
|
|
718
|
+
|
|
719
|
+
Raises:
|
|
720
|
+
ImportError: If required library is not installed
|
|
721
|
+
ValueError: If format is not supported for the model type
|
|
722
|
+
"""
|
|
723
|
+
if format == "safetensors":
|
|
724
|
+
from safetensors.torch import save_file
|
|
725
|
+
if hasattr(model, 'state_dict'):
|
|
726
|
+
save_file(model.state_dict(), path)
|
|
727
|
+
else:
|
|
728
|
+
raise ValueError("safetensors format requires model with state_dict (PyTorch)")
|
|
729
|
+
elif format == "joblib":
|
|
730
|
+
import joblib
|
|
731
|
+
joblib.dump(model, path)
|
|
732
|
+
else:
|
|
733
|
+
raise ValueError(f"Unsupported format: {format}. Use 'joblib' or 'safetensors'.")
|
|
734
|
+
|
|
735
|
+
|
|
736
|
+
def deserialize_model(path: str, format: str = "auto") -> Any:
|
|
737
|
+
"""
|
|
738
|
+
Deserialize model from file using safe formats (no pickle).
|
|
739
|
+
|
|
740
|
+
Args:
|
|
741
|
+
path: File path to load from
|
|
742
|
+
format: "auto" (detect from extension), "joblib", or "safetensors"
|
|
743
|
+
|
|
744
|
+
Returns:
|
|
745
|
+
The deserialized model
|
|
746
|
+
|
|
747
|
+
Raises:
|
|
748
|
+
ImportError: If required library is not installed
|
|
749
|
+
ValueError: If format cannot be determined
|
|
750
|
+
"""
|
|
751
|
+
# Auto-detect format from extension
|
|
752
|
+
if format == "auto":
|
|
753
|
+
if path.endswith('.safetensors'):
|
|
754
|
+
format = "safetensors"
|
|
755
|
+
elif path.endswith('.joblib'):
|
|
756
|
+
format = "joblib"
|
|
757
|
+
else:
|
|
758
|
+
raise ValueError(
|
|
759
|
+
f"Cannot auto-detect format for {path}. "
|
|
760
|
+
"Use .joblib or .safetensors extension, or specify format explicitly."
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
if format == "safetensors":
|
|
764
|
+
from safetensors.torch import load_file
|
|
765
|
+
return load_file(path)
|
|
766
|
+
elif format == "joblib":
|
|
767
|
+
import joblib
|
|
768
|
+
return joblib.load(path)
|
|
769
|
+
else:
|
|
770
|
+
raise ValueError(f"Unsupported format: {format}. Use 'joblib' or 'safetensors'.")
|
|
726
771
|
|
|
727
772
|
|
|
728
773
|
def get_model_params(model: Any) -> Dict[str, Any]:
|
npcpy/npc_array.py
CHANGED
|
@@ -20,7 +20,6 @@ Example:
|
|
|
20
20
|
from __future__ import annotations
|
|
21
21
|
import copy
|
|
22
22
|
import itertools
|
|
23
|
-
import pickle
|
|
24
23
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
25
24
|
from dataclasses import dataclass, field
|
|
26
25
|
from typing import (
|
|
@@ -47,6 +46,7 @@ class OpType(Enum):
|
|
|
47
46
|
REDUCE = "reduce"
|
|
48
47
|
CHAIN = "chain"
|
|
49
48
|
EVOLVE = "evolve"
|
|
49
|
+
JINX = "jinx" # Execute a Jinx workflow across models
|
|
50
50
|
|
|
51
51
|
|
|
52
52
|
@dataclass
|
|
@@ -328,6 +328,61 @@ class NPCArray:
|
|
|
328
328
|
|
|
329
329
|
return cls(specs)
|
|
330
330
|
|
|
331
|
+
@classmethod
|
|
332
|
+
def from_matrix(
|
|
333
|
+
cls,
|
|
334
|
+
matrix: List[Dict[str, Any]]
|
|
335
|
+
) -> 'NPCArray':
|
|
336
|
+
"""
|
|
337
|
+
Create NPCArray from a matrix of model configurations.
|
|
338
|
+
|
|
339
|
+
This is particularly useful for defining model arrays in Jinx templates
|
|
340
|
+
where you want explicit control over each model configuration.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
matrix: List of model configuration dicts. Each dict should have:
|
|
344
|
+
- 'model': model name/reference (required)
|
|
345
|
+
- 'provider': provider name (optional)
|
|
346
|
+
- 'type': model type - 'llm', 'npc', 'sklearn', 'torch' (default: 'llm')
|
|
347
|
+
- Any additional config parameters
|
|
348
|
+
|
|
349
|
+
Example:
|
|
350
|
+
>>> # In a Jinx template, define a matrix of models:
|
|
351
|
+
>>> matrix = [
|
|
352
|
+
... {'model': 'gpt-4', 'provider': 'openai', 'temperature': 0.7},
|
|
353
|
+
... {'model': 'claude-3-opus', 'provider': 'anthropic', 'temperature': 0.5},
|
|
354
|
+
... {'model': 'llama3.2', 'provider': 'ollama', 'temperature': 0.8},
|
|
355
|
+
... ]
|
|
356
|
+
>>> arr = NPCArray.from_matrix(matrix)
|
|
357
|
+
|
|
358
|
+
>>> # Mixed model types:
|
|
359
|
+
>>> matrix = [
|
|
360
|
+
... {'model': 'gpt-4', 'type': 'llm', 'provider': 'openai'},
|
|
361
|
+
... {'model': my_npc, 'type': 'npc'},
|
|
362
|
+
... {'model': sklearn_model, 'type': 'sklearn'},
|
|
363
|
+
... ]
|
|
364
|
+
"""
|
|
365
|
+
specs = []
|
|
366
|
+
for config in matrix:
|
|
367
|
+
model_type = config.get('type', 'llm')
|
|
368
|
+
model_ref = config.get('model')
|
|
369
|
+
provider = config.get('provider')
|
|
370
|
+
|
|
371
|
+
# Extract config params (everything except type, model, provider)
|
|
372
|
+
extra_config = {
|
|
373
|
+
k: v for k, v in config.items()
|
|
374
|
+
if k not in ('type', 'model', 'provider')
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
specs.append(ModelSpec(
|
|
378
|
+
model_type=model_type,
|
|
379
|
+
model_ref=model_ref,
|
|
380
|
+
provider=provider,
|
|
381
|
+
config=extra_config
|
|
382
|
+
))
|
|
383
|
+
|
|
384
|
+
return cls(specs)
|
|
385
|
+
|
|
331
386
|
# ==================== Properties ====================
|
|
332
387
|
|
|
333
388
|
@property
|
|
@@ -490,6 +545,43 @@ class NPCArray:
|
|
|
490
545
|
|
|
491
546
|
return NPCArray(self._specs, new_node)
|
|
492
547
|
|
|
548
|
+
def jinx(
|
|
549
|
+
self,
|
|
550
|
+
jinx_name: str,
|
|
551
|
+
inputs: Optional[Dict[str, Any]] = None,
|
|
552
|
+
**kwargs
|
|
553
|
+
) -> 'LazyResult':
|
|
554
|
+
"""
|
|
555
|
+
Execute a Jinx workflow across all models in the array.
|
|
556
|
+
|
|
557
|
+
Each model in the array will be used as the 'npc' context for the jinx,
|
|
558
|
+
allowing you to run the same workflow template with different models.
|
|
559
|
+
|
|
560
|
+
Args:
|
|
561
|
+
jinx_name: Name of the jinx workflow to execute (e.g., 'analyze', 'summarize')
|
|
562
|
+
inputs: Input values for the jinx template variables
|
|
563
|
+
**kwargs: Additional execution parameters
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
LazyResult with workflow outputs from each model
|
|
567
|
+
|
|
568
|
+
Example:
|
|
569
|
+
>>> models = NPCArray.from_llms(['gpt-4', 'claude-3'])
|
|
570
|
+
>>> results = models.jinx('analyze', inputs={'topic': 'AI safety'}).collect()
|
|
571
|
+
"""
|
|
572
|
+
new_node = GraphNode(
|
|
573
|
+
op_type=OpType.JINX,
|
|
574
|
+
params={
|
|
575
|
+
"jinx_name": jinx_name,
|
|
576
|
+
"inputs": inputs or {},
|
|
577
|
+
**kwargs
|
|
578
|
+
},
|
|
579
|
+
parents=[self._graph],
|
|
580
|
+
shape=(len(self._specs),)
|
|
581
|
+
)
|
|
582
|
+
|
|
583
|
+
return LazyResult(self._specs, new_node)
|
|
584
|
+
|
|
493
585
|
|
|
494
586
|
class LazyResult:
|
|
495
587
|
"""
|
|
@@ -792,6 +884,7 @@ class GraphExecutor:
|
|
|
792
884
|
OpType.REDUCE: self._exec_reduce,
|
|
793
885
|
OpType.CHAIN: self._exec_chain,
|
|
794
886
|
OpType.EVOLVE: self._exec_evolve,
|
|
887
|
+
OpType.JINX: self._exec_jinx,
|
|
795
888
|
}
|
|
796
889
|
|
|
797
890
|
handler = handlers.get(node.op_type)
|
|
@@ -1136,6 +1229,61 @@ class GraphExecutor:
|
|
|
1136
1229
|
metadata={"operation": "evolve", "generation": 1}
|
|
1137
1230
|
)
|
|
1138
1231
|
|
|
1232
|
+
def _exec_jinx(self, node, specs, prompts, parents) -> ResponseTensor:
|
|
1233
|
+
"""Execute a Jinx workflow across models"""
|
|
1234
|
+
from npcpy.npc_compiler import NPC, Jinx
|
|
1235
|
+
|
|
1236
|
+
jinx_name = node.params.get("jinx_name")
|
|
1237
|
+
inputs = node.params.get("inputs", {})
|
|
1238
|
+
extra_kwargs = {k: v for k, v in node.params.items()
|
|
1239
|
+
if k not in ("jinx_name", "inputs")}
|
|
1240
|
+
|
|
1241
|
+
results = []
|
|
1242
|
+
|
|
1243
|
+
def run_jinx_single(spec: ModelSpec) -> str:
|
|
1244
|
+
"""Run jinx for a single model spec"""
|
|
1245
|
+
try:
|
|
1246
|
+
if spec.model_type == "npc":
|
|
1247
|
+
# Use the NPC directly
|
|
1248
|
+
npc = spec.model_ref
|
|
1249
|
+
else:
|
|
1250
|
+
# Create a temporary NPC with the model
|
|
1251
|
+
npc = NPC(
|
|
1252
|
+
name=f"array_npc_{spec.model_ref}",
|
|
1253
|
+
model=spec.model_ref,
|
|
1254
|
+
provider=spec.provider
|
|
1255
|
+
)
|
|
1256
|
+
|
|
1257
|
+
# Execute the jinx
|
|
1258
|
+
result = npc.execute_jinx(
|
|
1259
|
+
jinx_name=jinx_name,
|
|
1260
|
+
input_values=inputs,
|
|
1261
|
+
**extra_kwargs
|
|
1262
|
+
)
|
|
1263
|
+
return result.get("output", str(result))
|
|
1264
|
+
except Exception as e:
|
|
1265
|
+
return f"Error: {e}"
|
|
1266
|
+
|
|
1267
|
+
if self.parallel and len(specs) > 1:
|
|
1268
|
+
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
|
1269
|
+
futures = {executor.submit(run_jinx_single, spec): i
|
|
1270
|
+
for i, spec in enumerate(specs)}
|
|
1271
|
+
results = [None] * len(specs)
|
|
1272
|
+
for future in as_completed(futures):
|
|
1273
|
+
idx = futures[future]
|
|
1274
|
+
try:
|
|
1275
|
+
results[idx] = future.result()
|
|
1276
|
+
except Exception as e:
|
|
1277
|
+
results[idx] = f"Error: {e}"
|
|
1278
|
+
else:
|
|
1279
|
+
results = [run_jinx_single(spec) for spec in specs]
|
|
1280
|
+
|
|
1281
|
+
return ResponseTensor(
|
|
1282
|
+
data=np.array(results, dtype=object),
|
|
1283
|
+
model_specs=specs,
|
|
1284
|
+
metadata={"operation": "jinx", "jinx_name": jinx_name, **inputs}
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1139
1287
|
|
|
1140
1288
|
def _compute_response_variance(responses: List[str]) -> float:
|
|
1141
1289
|
"""Compute semantic variance across responses"""
|
npcpy/npc_compiler.py
CHANGED
|
@@ -52,6 +52,7 @@ import fnmatch
|
|
|
52
52
|
import subprocess
|
|
53
53
|
from typing import Any, Dict, List, Optional, Union, Callable, Tuple
|
|
54
54
|
from jinja2 import Environment, FileSystemLoader, Template, Undefined, DictLoader
|
|
55
|
+
from jinja2.sandbox import SandboxedEnvironment
|
|
55
56
|
from sqlalchemy import create_engine, text
|
|
56
57
|
import npcpy as npy
|
|
57
58
|
from npcpy.tools import auto_tools
|
|
@@ -231,7 +232,8 @@ def load_yaml_file(file_path):
|
|
|
231
232
|
|
|
232
233
|
# First pass: render Jinja2 templates to produce valid YAML
|
|
233
234
|
# This allows {% if %} and other control structures to work
|
|
234
|
-
|
|
235
|
+
# Use SandboxedEnvironment to prevent template injection attacks
|
|
236
|
+
jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
235
237
|
# Configure tojson filter to handle SilentUndefined
|
|
236
238
|
jinja_env.policies['json.dumps_function'] = _json_dumps_with_undefined
|
|
237
239
|
template = jinja_env.from_string(content)
|
|
@@ -694,7 +696,8 @@ class Jinx:
|
|
|
694
696
|
jinja_env: Optional[Environment] = None):
|
|
695
697
|
|
|
696
698
|
if jinja_env is None:
|
|
697
|
-
|
|
699
|
+
# Use SandboxedEnvironment to prevent template injection attacks
|
|
700
|
+
jinja_env = SandboxedEnvironment(
|
|
698
701
|
loader=DictLoader({}),
|
|
699
702
|
undefined=SilentUndefined,
|
|
700
703
|
)
|
|
@@ -771,21 +774,24 @@ class Jinx:
|
|
|
771
774
|
|
|
772
775
|
self._log_debug(f"DEBUG: Executing step '{step_name}' with rendered code: {rendered_code}")
|
|
773
776
|
|
|
777
|
+
# Import NPCArray for array operations in jinx
|
|
778
|
+
from npcpy.npc_array import NPCArray, infer_matrix, ensemble_vote
|
|
779
|
+
|
|
774
780
|
exec_globals = {
|
|
775
781
|
"__builtins__": __builtins__,
|
|
776
782
|
"npc": active_npc,
|
|
777
783
|
"context": context, # Pass context by reference
|
|
778
|
-
"math": math,
|
|
779
|
-
"random": random,
|
|
784
|
+
"math": math,
|
|
785
|
+
"random": random,
|
|
780
786
|
"datetime": datetime,
|
|
781
787
|
"Image": Image,
|
|
782
788
|
"pd": pd,
|
|
783
789
|
"plt": plt,
|
|
784
|
-
"sys": sys,
|
|
790
|
+
"sys": sys,
|
|
785
791
|
"subprocess": subprocess,
|
|
786
792
|
"np": np,
|
|
787
793
|
"os": os,
|
|
788
|
-
're': re,
|
|
794
|
+
're': re,
|
|
789
795
|
"json": json,
|
|
790
796
|
"Path": pathlib.Path,
|
|
791
797
|
"fnmatch": fnmatch,
|
|
@@ -793,6 +799,10 @@ class Jinx:
|
|
|
793
799
|
"subprocess": subprocess,
|
|
794
800
|
"get_llm_response": npy.llm_funcs.get_llm_response,
|
|
795
801
|
"CommandHistory": CommandHistory,
|
|
802
|
+
# NPCArray support for compute graph operations in jinx
|
|
803
|
+
"NPCArray": NPCArray,
|
|
804
|
+
"infer_matrix": infer_matrix,
|
|
805
|
+
"ensemble_vote": ensemble_vote,
|
|
796
806
|
}
|
|
797
807
|
|
|
798
808
|
if extra_globals:
|
|
@@ -1261,7 +1271,8 @@ class NPC:
|
|
|
1261
1271
|
dirs.append(self.jinxs_directory)
|
|
1262
1272
|
|
|
1263
1273
|
# This jinja_env is for the *second pass* (runtime variable resolution in Jinx.execute)
|
|
1264
|
-
|
|
1274
|
+
# Use SandboxedEnvironment to prevent template injection attacks
|
|
1275
|
+
self.jinja_env = SandboxedEnvironment(
|
|
1265
1276
|
loader=FileSystemLoader([
|
|
1266
1277
|
os.path.expanduser(d) for d in dirs
|
|
1267
1278
|
]),
|
|
@@ -1389,13 +1400,13 @@ class NPC:
|
|
|
1389
1400
|
|
|
1390
1401
|
combined_raw_jinxs_dict = {j.jinx_name: j for j in all_available_raw_jinxs}
|
|
1391
1402
|
|
|
1392
|
-
npc_first_pass_jinja_env =
|
|
1393
|
-
|
|
1403
|
+
npc_first_pass_jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
1404
|
+
|
|
1394
1405
|
jinx_macro_globals = {}
|
|
1395
1406
|
for raw_jinx in combined_raw_jinxs_dict.values():
|
|
1396
1407
|
def create_jinx_callable(jinx_obj_in_closure):
|
|
1397
1408
|
def callable_jinx(**kwargs):
|
|
1398
|
-
temp_jinja_env =
|
|
1409
|
+
temp_jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
1399
1410
|
rendered_target_steps = []
|
|
1400
1411
|
for target_step in jinx_obj_in_closure._raw_steps:
|
|
1401
1412
|
temp_rendered_step = {}
|
|
@@ -2506,7 +2517,7 @@ class Team:
|
|
|
2506
2517
|
self._raw_jinxs_list: List['Jinx'] = [] # Temporary storage for raw Team-level Jinx objects
|
|
2507
2518
|
self.jinx_tool_catalog: Dict[str, Dict[str, Any]] = {} # Jinx-derived tool defs ready for MCP/LLM
|
|
2508
2519
|
|
|
2509
|
-
self.jinja_env_for_first_pass =
|
|
2520
|
+
self.jinja_env_for_first_pass = SandboxedEnvironment(undefined=SilentUndefined) # Env for macro expansion
|
|
2510
2521
|
|
|
2511
2522
|
self.db_conn = db_conn
|
|
2512
2523
|
self.team_path = os.path.expanduser(team_path) if team_path else None
|
|
@@ -2700,7 +2711,7 @@ class Team:
|
|
|
2700
2711
|
def callable_jinx(**kwargs):
|
|
2701
2712
|
# This callable will be invoked by the Jinja renderer during the first pass.
|
|
2702
2713
|
# It needs to render the target Jinx's *raw* steps with the provided kwargs.
|
|
2703
|
-
temp_jinja_env =
|
|
2714
|
+
temp_jinja_env = SandboxedEnvironment(undefined=SilentUndefined)
|
|
2704
2715
|
|
|
2705
2716
|
rendered_target_steps = []
|
|
2706
2717
|
for target_step in jinx_obj_in_closure._raw_steps:
|
npcpy/npc_sysenv.py
CHANGED
|
@@ -15,6 +15,129 @@ import json
|
|
|
15
15
|
|
|
16
16
|
import requests
|
|
17
17
|
ON_WINDOWS = platform.system() == "Windows"
|
|
18
|
+
ON_MACOS = platform.system() == "Darwin"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# ==================== XDG/Platform-Specific Paths ====================
|
|
22
|
+
|
|
23
|
+
def get_data_dir() -> str:
|
|
24
|
+
"""
|
|
25
|
+
Get the platform-specific data directory for npcsh.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
- Linux: $XDG_DATA_HOME/npcsh or ~/.local/share/npcsh
|
|
29
|
+
- macOS: ~/Library/Application Support/npcsh
|
|
30
|
+
- Windows: %LOCALAPPDATA%/npcsh or ~/AppData/Local/npcsh
|
|
31
|
+
|
|
32
|
+
Falls back to ~/.npcsh for backwards compatibility if the new location
|
|
33
|
+
doesn't exist but the old one does.
|
|
34
|
+
"""
|
|
35
|
+
if ON_WINDOWS:
|
|
36
|
+
base = os.environ.get('LOCALAPPDATA', os.path.expanduser('~/AppData/Local'))
|
|
37
|
+
new_path = os.path.join(base, 'npcsh')
|
|
38
|
+
elif ON_MACOS:
|
|
39
|
+
new_path = os.path.expanduser('~/Library/Application Support/npcsh')
|
|
40
|
+
else:
|
|
41
|
+
# Linux/Unix - use XDG Base Directory Specification
|
|
42
|
+
xdg_data = os.environ.get('XDG_DATA_HOME', os.path.expanduser('~/.local/share'))
|
|
43
|
+
new_path = os.path.join(xdg_data, 'npcsh')
|
|
44
|
+
|
|
45
|
+
# Backwards compatibility: if old path exists but new doesn't, use old
|
|
46
|
+
old_path = os.path.expanduser('~/.npcsh')
|
|
47
|
+
if os.path.exists(old_path) and not os.path.exists(new_path):
|
|
48
|
+
return old_path
|
|
49
|
+
|
|
50
|
+
return new_path
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_config_dir() -> str:
|
|
54
|
+
"""
|
|
55
|
+
Get the platform-specific config directory for npcsh.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
- Linux: $XDG_CONFIG_HOME/npcsh or ~/.config/npcsh
|
|
59
|
+
- macOS: ~/Library/Application Support/npcsh (same as data on macOS)
|
|
60
|
+
- Windows: %APPDATA%/npcsh or ~/AppData/Roaming/npcsh
|
|
61
|
+
|
|
62
|
+
Falls back to ~/.npcsh for backwards compatibility if the new location
|
|
63
|
+
doesn't exist but the old one does.
|
|
64
|
+
"""
|
|
65
|
+
if ON_WINDOWS:
|
|
66
|
+
base = os.environ.get('APPDATA', os.path.expanduser('~/AppData/Roaming'))
|
|
67
|
+
new_path = os.path.join(base, 'npcsh')
|
|
68
|
+
elif ON_MACOS:
|
|
69
|
+
new_path = os.path.expanduser('~/Library/Application Support/npcsh')
|
|
70
|
+
else:
|
|
71
|
+
# Linux/Unix - use XDG Base Directory Specification
|
|
72
|
+
xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
|
|
73
|
+
new_path = os.path.join(xdg_config, 'npcsh')
|
|
74
|
+
|
|
75
|
+
# Backwards compatibility: if old path exists but new doesn't, use old
|
|
76
|
+
old_path = os.path.expanduser('~/.npcsh')
|
|
77
|
+
if os.path.exists(old_path) and not os.path.exists(new_path):
|
|
78
|
+
return old_path
|
|
79
|
+
|
|
80
|
+
return new_path
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def get_cache_dir() -> str:
|
|
84
|
+
"""
|
|
85
|
+
Get the platform-specific cache directory for npcsh.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
- Linux: $XDG_CACHE_HOME/npcsh or ~/.cache/npcsh
|
|
89
|
+
- macOS: ~/Library/Caches/npcsh
|
|
90
|
+
- Windows: %LOCALAPPDATA%/npcsh/cache
|
|
91
|
+
"""
|
|
92
|
+
if ON_WINDOWS:
|
|
93
|
+
base = os.environ.get('LOCALAPPDATA', os.path.expanduser('~/AppData/Local'))
|
|
94
|
+
return os.path.join(base, 'npcsh', 'cache')
|
|
95
|
+
elif ON_MACOS:
|
|
96
|
+
return os.path.expanduser('~/Library/Caches/npcsh')
|
|
97
|
+
else:
|
|
98
|
+
xdg_cache = os.environ.get('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
|
99
|
+
return os.path.join(xdg_cache, 'npcsh')
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_npcshrc_path() -> str:
|
|
103
|
+
"""
|
|
104
|
+
Get the path to the npcshrc config file.
|
|
105
|
+
|
|
106
|
+
Returns the platform-appropriate config file path.
|
|
107
|
+
Falls back to ~/.npcshrc for backwards compatibility.
|
|
108
|
+
"""
|
|
109
|
+
old_path = os.path.expanduser('~/.npcshrc')
|
|
110
|
+
if os.path.exists(old_path):
|
|
111
|
+
return old_path
|
|
112
|
+
|
|
113
|
+
config_dir = get_config_dir()
|
|
114
|
+
return os.path.join(config_dir, 'npcshrc')
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_history_db_path() -> str:
|
|
118
|
+
"""
|
|
119
|
+
Get the path to the history database.
|
|
120
|
+
|
|
121
|
+
Returns the platform-appropriate database path.
|
|
122
|
+
Falls back to ~/npcsh_history.db for backwards compatibility.
|
|
123
|
+
"""
|
|
124
|
+
old_path = os.path.expanduser('~/npcsh_history.db')
|
|
125
|
+
if os.path.exists(old_path):
|
|
126
|
+
return old_path
|
|
127
|
+
|
|
128
|
+
data_dir = get_data_dir()
|
|
129
|
+
return os.path.join(data_dir, 'history.db')
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def get_models_dir() -> str:
|
|
133
|
+
"""Get the directory for storing models."""
|
|
134
|
+
return os.path.join(get_data_dir(), 'models')
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def ensure_npcsh_dirs() -> None:
|
|
138
|
+
"""Ensure all npcsh directories exist."""
|
|
139
|
+
for dir_path in [get_data_dir(), get_config_dir(), get_cache_dir(), get_models_dir()]:
|
|
140
|
+
os.makedirs(dir_path, exist_ok=True)
|
|
18
141
|
|
|
19
142
|
try:
|
|
20
143
|
if not ON_WINDOWS:
|
|
@@ -309,11 +432,13 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
309
432
|
logging.info(f"Error loading Ollama models or timed out: {e}")
|
|
310
433
|
|
|
311
434
|
# Scan for local GGUF/GGML models
|
|
435
|
+
models_dir = get_models_dir()
|
|
312
436
|
gguf_dirs = [
|
|
313
|
-
os.path.
|
|
314
|
-
|
|
437
|
+
os.path.join(models_dir, 'gguf'),
|
|
438
|
+
models_dir,
|
|
315
439
|
os.path.expanduser('~/models'),
|
|
316
|
-
os.path.
|
|
440
|
+
os.path.join(get_cache_dir(), 'huggingface/hub'),
|
|
441
|
+
os.path.expanduser('~/.cache/huggingface/hub'), # Fallback for existing installs
|
|
317
442
|
]
|
|
318
443
|
env_gguf_dir = os.environ.get('NPCSH_GGUF_DIR')
|
|
319
444
|
if env_gguf_dir:
|
|
@@ -358,6 +483,31 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
358
483
|
except Exception as e:
|
|
359
484
|
logging.debug(f"llama.cpp server not available: {e}")
|
|
360
485
|
|
|
486
|
+
# Check for MLX server (OpenAI-compatible API on port 8000)
|
|
487
|
+
try:
|
|
488
|
+
import requests
|
|
489
|
+
response = requests.get('http://127.0.0.1:8000/v1/models', timeout=1)
|
|
490
|
+
if response.ok:
|
|
491
|
+
data = response.json()
|
|
492
|
+
for model in data.get('data', []):
|
|
493
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
494
|
+
available_models[model_id] = "mlx"
|
|
495
|
+
except Exception as e:
|
|
496
|
+
logging.debug(f"MLX server not available: {e}")
|
|
497
|
+
|
|
498
|
+
# Also check common alternative MLX port 5000
|
|
499
|
+
try:
|
|
500
|
+
import requests
|
|
501
|
+
response = requests.get('http://127.0.0.1:5000/v1/models', timeout=1)
|
|
502
|
+
if response.ok:
|
|
503
|
+
data = response.json()
|
|
504
|
+
for model in data.get('data', []):
|
|
505
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
506
|
+
if model_id not in available_models: # Avoid duplicates
|
|
507
|
+
available_models[model_id] = "mlx"
|
|
508
|
+
except Exception as e:
|
|
509
|
+
logging.debug(f"MLX server (port 5000) not available: {e}")
|
|
510
|
+
|
|
361
511
|
# Scan for LoRA adapters (fine-tuned models with adapter_config.json)
|
|
362
512
|
lora_dirs = [
|
|
363
513
|
os.path.expanduser('~/.npcsh/models'),
|
|
@@ -1055,13 +1205,13 @@ def lookup_provider(model: str) -> str:
|
|
|
1055
1205
|
|
|
1056
1206
|
def load_custom_providers():
|
|
1057
1207
|
"""
|
|
1058
|
-
Load custom provider configurations from .
|
|
1059
|
-
|
|
1208
|
+
Load custom provider configurations from npcshrc config file.
|
|
1209
|
+
|
|
1060
1210
|
Returns:
|
|
1061
1211
|
dict: Custom provider configurations keyed by provider name
|
|
1062
1212
|
"""
|
|
1063
1213
|
custom_providers = {}
|
|
1064
|
-
npcshrc_path =
|
|
1214
|
+
npcshrc_path = get_npcshrc_path()
|
|
1065
1215
|
|
|
1066
1216
|
if os.path.exists(npcshrc_path):
|
|
1067
1217
|
with open(npcshrc_path, "r") as f:
|
npcpy/serve.py
CHANGED
|
@@ -58,12 +58,12 @@ import base64
|
|
|
58
58
|
import shutil
|
|
59
59
|
import uuid
|
|
60
60
|
|
|
61
|
-
from npcpy.llm_funcs import gen_image, breathe
|
|
61
|
+
from npcpy.llm_funcs import gen_image, gen_video, breathe
|
|
62
62
|
|
|
63
63
|
from sqlalchemy import create_engine, text
|
|
64
64
|
from sqlalchemy.orm import sessionmaker
|
|
65
65
|
|
|
66
|
-
from npcpy.npc_sysenv import get_locally_available_models
|
|
66
|
+
from npcpy.npc_sysenv import get_locally_available_models, get_data_dir, get_models_dir, get_cache_dir
|
|
67
67
|
from npcpy.memory.command_history import (
|
|
68
68
|
CommandHistory,
|
|
69
69
|
save_conversation_message,
|
|
@@ -2675,7 +2675,7 @@ def inject_individuals():
|
|
|
2675
2675
|
|
|
2676
2676
|
@app.route("/api/ml/train", methods=["POST"])
|
|
2677
2677
|
def train_ml_model():
|
|
2678
|
-
import
|
|
2678
|
+
import joblib
|
|
2679
2679
|
import numpy as np
|
|
2680
2680
|
from sklearn.linear_model import LinearRegression, LogisticRegression
|
|
2681
2681
|
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
|
|
@@ -2683,7 +2683,7 @@ def train_ml_model():
|
|
|
2683
2683
|
from sklearn.cluster import KMeans
|
|
2684
2684
|
from sklearn.model_selection import train_test_split
|
|
2685
2685
|
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score
|
|
2686
|
-
|
|
2686
|
+
|
|
2687
2687
|
data = request.json
|
|
2688
2688
|
model_name = data.get("name")
|
|
2689
2689
|
model_type = data.get("type")
|
|
@@ -2691,13 +2691,13 @@ def train_ml_model():
|
|
|
2691
2691
|
features = data.get("features")
|
|
2692
2692
|
training_data = data.get("data")
|
|
2693
2693
|
hyperparams = data.get("hyperparameters", {})
|
|
2694
|
-
|
|
2694
|
+
|
|
2695
2695
|
df = pd.DataFrame(training_data)
|
|
2696
2696
|
X = df[features].values
|
|
2697
|
-
|
|
2697
|
+
|
|
2698
2698
|
metrics = {}
|
|
2699
2699
|
model = None
|
|
2700
|
-
|
|
2700
|
+
|
|
2701
2701
|
if model_type == "linear_regression":
|
|
2702
2702
|
y = df[target].values
|
|
2703
2703
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
|
|
@@ -2708,7 +2708,7 @@ def train_ml_model():
|
|
|
2708
2708
|
"r2_score": r2_score(y_test, y_pred),
|
|
2709
2709
|
"rmse": np.sqrt(mean_squared_error(y_test, y_pred))
|
|
2710
2710
|
}
|
|
2711
|
-
|
|
2711
|
+
|
|
2712
2712
|
elif model_type == "logistic_regression":
|
|
2713
2713
|
y = df[target].values
|
|
2714
2714
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
|
|
@@ -2716,7 +2716,7 @@ def train_ml_model():
|
|
|
2716
2716
|
model.fit(X_train, y_train)
|
|
2717
2717
|
y_pred = model.predict(X_test)
|
|
2718
2718
|
metrics = {"accuracy": accuracy_score(y_test, y_pred)}
|
|
2719
|
-
|
|
2719
|
+
|
|
2720
2720
|
elif model_type == "random_forest":
|
|
2721
2721
|
y = df[target].values
|
|
2722
2722
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
|
|
@@ -2727,13 +2727,13 @@ def train_ml_model():
|
|
|
2727
2727
|
"r2_score": r2_score(y_test, y_pred),
|
|
2728
2728
|
"rmse": np.sqrt(mean_squared_error(y_test, y_pred))
|
|
2729
2729
|
}
|
|
2730
|
-
|
|
2730
|
+
|
|
2731
2731
|
elif model_type == "clustering":
|
|
2732
2732
|
n_clusters = hyperparams.get("n_clusters", 3)
|
|
2733
2733
|
model = KMeans(n_clusters=n_clusters)
|
|
2734
2734
|
labels = model.fit_predict(X)
|
|
2735
2735
|
metrics = {"inertia": model.inertia_, "n_clusters": n_clusters}
|
|
2736
|
-
|
|
2736
|
+
|
|
2737
2737
|
elif model_type == "gradient_boost":
|
|
2738
2738
|
y = df[target].values
|
|
2739
2739
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
|
|
@@ -2744,19 +2744,18 @@ def train_ml_model():
|
|
|
2744
2744
|
"r2_score": r2_score(y_test, y_pred),
|
|
2745
2745
|
"rmse": np.sqrt(mean_squared_error(y_test, y_pred))
|
|
2746
2746
|
}
|
|
2747
|
-
|
|
2747
|
+
|
|
2748
2748
|
model_id = f"{model_name}_{int(time.time())}"
|
|
2749
|
-
model_path = os.path.
|
|
2749
|
+
model_path = os.path.join(get_models_dir(), f"{model_id}.joblib")
|
|
2750
2750
|
os.makedirs(os.path.dirname(model_path), exist_ok=True)
|
|
2751
|
-
|
|
2752
|
-
|
|
2753
|
-
|
|
2754
|
-
|
|
2755
|
-
|
|
2756
|
-
|
|
2757
|
-
|
|
2758
|
-
|
|
2759
|
-
|
|
2751
|
+
|
|
2752
|
+
joblib.dump({
|
|
2753
|
+
"model": model,
|
|
2754
|
+
"features": features,
|
|
2755
|
+
"target": target,
|
|
2756
|
+
"type": model_type
|
|
2757
|
+
}, model_path)
|
|
2758
|
+
|
|
2760
2759
|
return jsonify({
|
|
2761
2760
|
"model_id": model_id,
|
|
2762
2761
|
"metrics": metrics,
|
|
@@ -2766,26 +2765,25 @@ def train_ml_model():
|
|
|
2766
2765
|
|
|
2767
2766
|
@app.route("/api/ml/predict", methods=["POST"])
|
|
2768
2767
|
def ml_predict():
|
|
2769
|
-
import
|
|
2770
|
-
|
|
2768
|
+
import joblib
|
|
2769
|
+
|
|
2771
2770
|
data = request.json
|
|
2772
2771
|
model_name = data.get("model_name")
|
|
2773
2772
|
input_data = data.get("input_data")
|
|
2774
|
-
|
|
2775
|
-
model_dir =
|
|
2773
|
+
|
|
2774
|
+
model_dir = get_models_dir()
|
|
2776
2775
|
model_files = [f for f in os.listdir(model_dir) if f.startswith(model_name)]
|
|
2777
|
-
|
|
2776
|
+
|
|
2778
2777
|
if not model_files:
|
|
2779
2778
|
return jsonify({"error": f"Model {model_name} not found"})
|
|
2780
|
-
|
|
2779
|
+
|
|
2781
2780
|
model_path = os.path.join(model_dir, model_files[0])
|
|
2782
|
-
|
|
2783
|
-
|
|
2784
|
-
|
|
2785
|
-
|
|
2781
|
+
|
|
2782
|
+
model_data = joblib.load(model_path)
|
|
2783
|
+
|
|
2786
2784
|
model = model_data["model"]
|
|
2787
2785
|
prediction = model.predict([input_data])
|
|
2788
|
-
|
|
2786
|
+
|
|
2789
2787
|
return jsonify({
|
|
2790
2788
|
"prediction": prediction.tolist(),
|
|
2791
2789
|
"error": None
|
|
@@ -4335,6 +4333,89 @@ def get_image_models_api():
|
|
|
4335
4333
|
return jsonify({"models": [], "error": str(e)}), 500
|
|
4336
4334
|
|
|
4337
4335
|
|
|
4336
|
+
@app.route("/api/generate_video", methods=["POST"])
|
|
4337
|
+
def generate_video_api():
|
|
4338
|
+
"""
|
|
4339
|
+
API endpoint for video generation.
|
|
4340
|
+
"""
|
|
4341
|
+
try:
|
|
4342
|
+
data = request.get_json()
|
|
4343
|
+
prompt = data.get("prompt", "")
|
|
4344
|
+
model = data.get("model", "veo-3.1-generate-preview")
|
|
4345
|
+
provider = data.get("provider", "gemini")
|
|
4346
|
+
duration = data.get("duration", 5)
|
|
4347
|
+
output_dir = data.get("output_dir") # Optional user-specified path
|
|
4348
|
+
negative_prompt = data.get("negative_prompt", "")
|
|
4349
|
+
reference_image = data.get("reference_image") # Optional base64 image
|
|
4350
|
+
|
|
4351
|
+
if not prompt:
|
|
4352
|
+
return jsonify({"error": "Prompt is required"}), 400
|
|
4353
|
+
|
|
4354
|
+
# Create output directory - use user-specified path or default to ~/.npcsh/videos
|
|
4355
|
+
if output_dir:
|
|
4356
|
+
save_dir = os.path.expanduser(output_dir)
|
|
4357
|
+
else:
|
|
4358
|
+
save_dir = os.path.expanduser("~/.npcsh/videos")
|
|
4359
|
+
os.makedirs(save_dir, exist_ok=True)
|
|
4360
|
+
|
|
4361
|
+
# Generate unique filename
|
|
4362
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
4363
|
+
output_filename = f"video_{timestamp}.mp4"
|
|
4364
|
+
output_path = os.path.join(save_dir, output_filename)
|
|
4365
|
+
|
|
4366
|
+
# Calculate num_frames based on duration (assuming ~25fps for diffusers)
|
|
4367
|
+
num_frames = int(duration * 25) if provider == "diffusers" else 25
|
|
4368
|
+
|
|
4369
|
+
print(f"Generating video with model={model}, provider={provider}, duration={duration}s")
|
|
4370
|
+
|
|
4371
|
+
result = gen_video(
|
|
4372
|
+
prompt=prompt,
|
|
4373
|
+
model=model,
|
|
4374
|
+
provider=provider,
|
|
4375
|
+
output_path=output_path,
|
|
4376
|
+
num_frames=num_frames,
|
|
4377
|
+
negative_prompt=negative_prompt,
|
|
4378
|
+
)
|
|
4379
|
+
|
|
4380
|
+
if result and "output" in result:
|
|
4381
|
+
# Read the generated video file and encode to base64
|
|
4382
|
+
video_path = output_path
|
|
4383
|
+
if os.path.exists(video_path):
|
|
4384
|
+
with open(video_path, "rb") as f:
|
|
4385
|
+
video_data = f.read()
|
|
4386
|
+
video_base64 = base64.b64encode(video_data).decode("utf-8")
|
|
4387
|
+
|
|
4388
|
+
return jsonify({
|
|
4389
|
+
"success": True,
|
|
4390
|
+
"video_path": video_path,
|
|
4391
|
+
"video_base64": f"data:video/mp4;base64,{video_base64}",
|
|
4392
|
+
"message": result.get("output", "Video generated successfully")
|
|
4393
|
+
})
|
|
4394
|
+
else:
|
|
4395
|
+
return jsonify({"error": "Video file was not created"}), 500
|
|
4396
|
+
else:
|
|
4397
|
+
return jsonify({"error": result.get("output", "Video generation failed")}), 500
|
|
4398
|
+
|
|
4399
|
+
except Exception as e:
|
|
4400
|
+
print(f"Error generating video: {e}")
|
|
4401
|
+
traceback.print_exc()
|
|
4402
|
+
return jsonify({"error": str(e)}), 500
|
|
4403
|
+
|
|
4404
|
+
|
|
4405
|
+
@app.route("/api/video_models", methods=["GET"])
|
|
4406
|
+
def get_video_models_api():
|
|
4407
|
+
"""
|
|
4408
|
+
API endpoint to retrieve available video generation models.
|
|
4409
|
+
"""
|
|
4410
|
+
video_models = [
|
|
4411
|
+
# Google Veo via Gemini API (requires GEMINI_API_KEY)
|
|
4412
|
+
{"value": "veo-3.1-generate-preview", "display_name": "Veo 3.1 | gemini", "provider": "gemini", "max_duration": 8},
|
|
4413
|
+
{"value": "veo-3.1-fast-generate-preview", "display_name": "Veo 3.1 Fast | gemini", "provider": "gemini", "max_duration": 8},
|
|
4414
|
+
{"value": "veo-2.0-generate-001", "display_name": "Veo 2 | gemini", "provider": "gemini", "max_duration": 8},
|
|
4415
|
+
# Diffusers - damo-vilab/text-to-video-ms-1.7b (local)
|
|
4416
|
+
{"value": "damo-vilab/text-to-video-ms-1.7b", "display_name": "ModelScope 1.7B (Local) | diffusers", "provider": "diffusers", "max_duration": 4},
|
|
4417
|
+
]
|
|
4418
|
+
return jsonify({"models": video_models, "error": None})
|
|
4338
4419
|
|
|
4339
4420
|
|
|
4340
4421
|
|
|
@@ -6253,12 +6334,14 @@ def scan_gguf_models():
|
|
|
6253
6334
|
"""Scan for GGUF/GGML model files in specified or default directories."""
|
|
6254
6335
|
directory = request.args.get('directory')
|
|
6255
6336
|
|
|
6256
|
-
# Default directories to scan
|
|
6337
|
+
# Default directories to scan (using platform-specific paths)
|
|
6338
|
+
models_dir = get_models_dir()
|
|
6257
6339
|
default_dirs = [
|
|
6258
|
-
os.path.
|
|
6259
|
-
|
|
6340
|
+
os.path.join(models_dir, 'gguf'),
|
|
6341
|
+
models_dir,
|
|
6260
6342
|
os.path.expanduser('~/models'),
|
|
6261
|
-
os.path.
|
|
6343
|
+
os.path.join(get_cache_dir(), 'huggingface/hub'),
|
|
6344
|
+
os.path.expanduser('~/.cache/huggingface/hub'), # Fallback
|
|
6262
6345
|
]
|
|
6263
6346
|
|
|
6264
6347
|
# Add env var directory if set
|
|
@@ -6300,7 +6383,8 @@ def download_hf_model():
|
|
|
6300
6383
|
"""Download a GGUF model from HuggingFace."""
|
|
6301
6384
|
data = request.json
|
|
6302
6385
|
url = data.get('url', '')
|
|
6303
|
-
|
|
6386
|
+
default_target = os.path.join(get_models_dir(), 'gguf')
|
|
6387
|
+
target_dir = data.get('target_dir', default_target)
|
|
6304
6388
|
|
|
6305
6389
|
target_dir = os.path.expanduser(target_dir)
|
|
6306
6390
|
os.makedirs(target_dir, exist_ok=True)
|
|
@@ -6466,7 +6550,8 @@ def download_hf_file():
|
|
|
6466
6550
|
data = request.json
|
|
6467
6551
|
repo_id = data.get('repo_id', '')
|
|
6468
6552
|
filename = data.get('filename', '')
|
|
6469
|
-
|
|
6553
|
+
default_target = os.path.join(get_models_dir(), 'gguf')
|
|
6554
|
+
target_dir = data.get('target_dir', default_target)
|
|
6470
6555
|
|
|
6471
6556
|
if not repo_id or not filename:
|
|
6472
6557
|
return jsonify({'error': 'repo_id and filename are required'}), 400
|
|
@@ -2,12 +2,12 @@ npcpy/__init__.py,sha256=uJcJGjR1mWvE69GySNAufkgiRwJA28zdObDBWaxp0tY,505
|
|
|
2
2
|
npcpy/build_funcs.py,sha256=vOz6pjV0zS-kYKo0ux-pn9AcppVaR8KIDi2ldOxb3RQ,7479
|
|
3
3
|
npcpy/llm_funcs.py,sha256=M7GSSjqpcO2kxh7G2sGRBU34lmdW7Imd5KxYqc1PiO0,75114
|
|
4
4
|
npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
|
|
5
|
-
npcpy/ml_funcs.py,sha256=
|
|
6
|
-
npcpy/npc_array.py,sha256=
|
|
7
|
-
npcpy/npc_compiler.py,sha256=
|
|
8
|
-
npcpy/npc_sysenv.py,sha256=
|
|
5
|
+
npcpy/ml_funcs.py,sha256=smgeOLnjxGWjmDngE-bcA2ozXX_IzY_7_pS9h2iocEg,24249
|
|
6
|
+
npcpy/npc_array.py,sha256=5qjaA9KjmJ_Zk_VxLrCyVrj73aDXpm3iJf0ngq1yIJk,45721
|
|
7
|
+
npcpy/npc_compiler.py,sha256=6-SYOddpi2jTJ1KtbMxtNGC7ksaiuuLd59bmc0eiOUA,121619
|
|
8
|
+
npcpy/npc_sysenv.py,sha256=1E2zwMj7aPrtRJuJSowGkvNApi07Vue3FhXsipi1XDs,45251
|
|
9
9
|
npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
|
|
10
|
-
npcpy/serve.py,sha256=
|
|
10
|
+
npcpy/serve.py,sha256=G8slQRslfOnGnSUD8GP1IOiqQhUWu6vF_azClvI7jLs,279284
|
|
11
11
|
npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
|
|
12
12
|
npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
|
|
13
13
|
npcpy/data/audio.py,sha256=o4auV8DQrAmZ4y84U3SofiwEuq5-ZBjGEZipQ9zPpGQ,22816
|
|
@@ -16,7 +16,7 @@ npcpy/data/image.py,sha256=UQcioNPDd5HYMLL_KStf45SuiIPXDcUY-dEFHwSWUeE,6564
|
|
|
16
16
|
npcpy/data/load.py,sha256=rVe1xSHerIpo6MDaY5eIeqRSm0gssX5sHukNsUNVwJw,9228
|
|
17
17
|
npcpy/data/text.py,sha256=jP0a1qZZaSJdK-LdZTn2Jjdxqmkd3efxDLEoxflJQeY,5010
|
|
18
18
|
npcpy/data/video.py,sha256=H-V3mTu_ktD9u-QhYeo4aW3u9z0AtoAdRZmvRPEpE98,2887
|
|
19
|
-
npcpy/data/web.py,sha256=
|
|
19
|
+
npcpy/data/web.py,sha256=cc4ikPZWMYsSz6itWI19ZkM_g5nQE3lPI-AJZEcoA04,5193
|
|
20
20
|
npcpy/ft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
npcpy/ft/diff.py,sha256=0ScRR4AxXtVX2bgZ-Jr_dSwv3LAlU1JXDUq4F4n1Ea4,12839
|
|
22
22
|
npcpy/ft/ge.py,sha256=0VzIiXq2wCzGcK1x0Wd-myJ3xRf-FNaPg0GkHEZegUM,3552
|
|
@@ -34,7 +34,7 @@ npcpy/gen/response.py,sha256=EYsIOvNOmn6dBs-4j3SyZNMvDf5N9lW-QxMbpjnF7Kw,57081
|
|
|
34
34
|
npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
|
|
35
35
|
npcpy/gen/world_gen.py,sha256=_8ytE7E3QVQ5qiX8DmOby-xd0d9zV20rRI6Wkpf-qcY,18922
|
|
36
36
|
npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
37
|
-
npcpy/memory/command_history.py,sha256=
|
|
37
|
+
npcpy/memory/command_history.py,sha256=AXHrtP4GKObSVtiWNGZJ16rUyJD5FLor4EuaDEvsku0,62707
|
|
38
38
|
npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
|
|
39
39
|
npcpy/memory/knowledge_graph.py,sha256=X3qqlDcuzGUjRgQWleQzafGKgNw8QRz2ar2gYuCvUq8,48600
|
|
40
40
|
npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
|
|
@@ -53,8 +53,8 @@ npcpy/work/browser.py,sha256=p2PeaoZdAXipFuAgKCCB3aXXLE_p3yIRqC87KlZKZWc,679
|
|
|
53
53
|
npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
|
|
54
54
|
npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
|
|
55
55
|
npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
|
|
56
|
-
npcpy-1.3.
|
|
57
|
-
npcpy-1.3.
|
|
58
|
-
npcpy-1.3.
|
|
59
|
-
npcpy-1.3.
|
|
60
|
-
npcpy-1.3.
|
|
56
|
+
npcpy-1.3.19.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
|
|
57
|
+
npcpy-1.3.19.dist-info/METADATA,sha256=18bGYxznkKrwP59mZ_06rQfMFQBe_hdAjB_VbHyZBYY,37870
|
|
58
|
+
npcpy-1.3.19.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
|
|
59
|
+
npcpy-1.3.19.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
|
|
60
|
+
npcpy-1.3.19.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|