npcpy 1.0.26__py3-none-any.whl → 1.2.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/__init__.py +0 -7
- npcpy/data/audio.py +16 -99
- npcpy/data/image.py +43 -42
- npcpy/data/load.py +83 -124
- npcpy/data/text.py +28 -28
- npcpy/data/video.py +8 -32
- npcpy/data/web.py +51 -23
- npcpy/ft/diff.py +110 -0
- npcpy/ft/ge.py +115 -0
- npcpy/ft/memory_trainer.py +171 -0
- npcpy/ft/model_ensembler.py +357 -0
- npcpy/ft/rl.py +360 -0
- npcpy/ft/sft.py +248 -0
- npcpy/ft/usft.py +128 -0
- npcpy/gen/audio_gen.py +24 -0
- npcpy/gen/embeddings.py +13 -13
- npcpy/gen/image_gen.py +262 -117
- npcpy/gen/response.py +615 -415
- npcpy/gen/video_gen.py +53 -7
- npcpy/llm_funcs.py +1869 -437
- npcpy/main.py +1 -1
- npcpy/memory/command_history.py +844 -510
- npcpy/memory/kg_vis.py +833 -0
- npcpy/memory/knowledge_graph.py +892 -1845
- npcpy/memory/memory_processor.py +81 -0
- npcpy/memory/search.py +188 -90
- npcpy/mix/debate.py +192 -3
- npcpy/npc_compiler.py +1672 -801
- npcpy/npc_sysenv.py +593 -1266
- npcpy/serve.py +3120 -0
- npcpy/sql/ai_function_tools.py +257 -0
- npcpy/sql/database_ai_adapters.py +186 -0
- npcpy/sql/database_ai_functions.py +163 -0
- npcpy/sql/model_runner.py +19 -19
- npcpy/sql/npcsql.py +706 -507
- npcpy/sql/sql_model_compiler.py +156 -0
- npcpy/tools.py +183 -0
- npcpy/work/plan.py +13 -279
- npcpy/work/trigger.py +3 -3
- npcpy-1.2.32.dist-info/METADATA +803 -0
- npcpy-1.2.32.dist-info/RECORD +54 -0
- npcpy/data/dataframes.py +0 -171
- npcpy/memory/deep_research.py +0 -125
- npcpy/memory/sleep.py +0 -557
- npcpy/modes/_state.py +0 -78
- npcpy/modes/alicanto.py +0 -1075
- npcpy/modes/guac.py +0 -785
- npcpy/modes/mcp_npcsh.py +0 -822
- npcpy/modes/npc.py +0 -213
- npcpy/modes/npcsh.py +0 -1158
- npcpy/modes/plonk.py +0 -409
- npcpy/modes/pti.py +0 -234
- npcpy/modes/serve.py +0 -1637
- npcpy/modes/spool.py +0 -312
- npcpy/modes/wander.py +0 -549
- npcpy/modes/yap.py +0 -572
- npcpy/npc_team/alicanto.npc +0 -2
- npcpy/npc_team/alicanto.png +0 -0
- npcpy/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcpy/npc_team/corca.npc +0 -13
- npcpy/npc_team/foreman.npc +0 -7
- npcpy/npc_team/frederic.npc +0 -6
- npcpy/npc_team/frederic4.png +0 -0
- npcpy/npc_team/guac.png +0 -0
- npcpy/npc_team/jinxs/automator.jinx +0 -18
- npcpy/npc_team/jinxs/bash_executer.jinx +0 -31
- npcpy/npc_team/jinxs/calculator.jinx +0 -11
- npcpy/npc_team/jinxs/edit_file.jinx +0 -96
- npcpy/npc_team/jinxs/file_chat.jinx +0 -14
- npcpy/npc_team/jinxs/gui_controller.jinx +0 -28
- npcpy/npc_team/jinxs/image_generation.jinx +0 -29
- npcpy/npc_team/jinxs/internet_search.jinx +0 -30
- npcpy/npc_team/jinxs/local_search.jinx +0 -152
- npcpy/npc_team/jinxs/npcsh_executor.jinx +0 -31
- npcpy/npc_team/jinxs/python_executor.jinx +0 -8
- npcpy/npc_team/jinxs/screen_cap.jinx +0 -25
- npcpy/npc_team/jinxs/sql_executor.jinx +0 -33
- npcpy/npc_team/kadiefa.npc +0 -3
- npcpy/npc_team/kadiefa.png +0 -0
- npcpy/npc_team/npcsh.ctx +0 -9
- npcpy/npc_team/npcsh_sibiji.png +0 -0
- npcpy/npc_team/plonk.npc +0 -2
- npcpy/npc_team/plonk.png +0 -0
- npcpy/npc_team/plonkjr.npc +0 -2
- npcpy/npc_team/plonkjr.png +0 -0
- npcpy/npc_team/sibiji.npc +0 -5
- npcpy/npc_team/sibiji.png +0 -0
- npcpy/npc_team/spool.png +0 -0
- npcpy/npc_team/templates/analytics/celona.npc +0 -0
- npcpy/npc_team/templates/hr_support/raone.npc +0 -0
- npcpy/npc_team/templates/humanities/eriane.npc +0 -4
- npcpy/npc_team/templates/it_support/lineru.npc +0 -0
- npcpy/npc_team/templates/marketing/slean.npc +0 -4
- npcpy/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcpy/npc_team/templates/sales/turnic.npc +0 -4
- npcpy/npc_team/templates/software/welxor.npc +0 -0
- npcpy/npc_team/yap.png +0 -0
- npcpy/routes.py +0 -958
- npcpy/work/mcp_helpers.py +0 -357
- npcpy/work/mcp_server.py +0 -194
- npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/automator.jinx +0 -18
- npcpy-1.0.26.data/data/npcpy/npc_team/bash_executer.jinx +0 -31
- npcpy-1.0.26.data/data/npcpy/npc_team/calculator.jinx +0 -11
- npcpy-1.0.26.data/data/npcpy/npc_team/celona.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/corca.npc +0 -13
- npcpy-1.0.26.data/data/npcpy/npc_team/edit_file.jinx +0 -96
- npcpy-1.0.26.data/data/npcpy/npc_team/eriane.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/file_chat.jinx +0 -14
- npcpy-1.0.26.data/data/npcpy/npc_team/foreman.npc +0 -7
- npcpy-1.0.26.data/data/npcpy/npc_team/frederic.npc +0 -6
- npcpy-1.0.26.data/data/npcpy/npc_team/frederic4.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/guac.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/gui_controller.jinx +0 -28
- npcpy-1.0.26.data/data/npcpy/npc_team/image_generation.jinx +0 -29
- npcpy-1.0.26.data/data/npcpy/npc_team/internet_search.jinx +0 -30
- npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.npc +0 -3
- npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/lineru.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/local_search.jinx +0 -152
- npcpy-1.0.26.data/data/npcpy/npc_team/maurawa.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh.ctx +0 -9
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_executor.jinx +0 -31
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_sibiji.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/plonk.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/plonk.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/python_executor.jinx +0 -8
- npcpy-1.0.26.data/data/npcpy/npc_team/raone.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/screen_cap.jinx +0 -25
- npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.npc +0 -5
- npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/slean.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/spool.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/sql_executor.jinx +0 -33
- npcpy-1.0.26.data/data/npcpy/npc_team/test_pipeline.py +0 -181
- npcpy-1.0.26.data/data/npcpy/npc_team/turnic.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/welxor.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/yap.png +0 -0
- npcpy-1.0.26.dist-info/METADATA +0 -827
- npcpy-1.0.26.dist-info/RECORD +0 -139
- npcpy-1.0.26.dist-info/entry_points.txt +0 -11
- /npcpy/{modes → ft}/__init__.py +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import yaml
|
|
3
|
+
import sqlalchemy
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing import Dict, Any, Optional
|
|
6
|
+
|
|
7
|
+
class SQLModelCompiler:
|
|
8
|
+
"""
|
|
9
|
+
Compile and execute SQL models across different database engines
|
|
10
|
+
"""
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
models_dir: str,
|
|
14
|
+
engine: Optional[sqlalchemy.engine.base.Engine] = None,
|
|
15
|
+
engine_type: str = 'sqlite'
|
|
16
|
+
):
|
|
17
|
+
"""
|
|
18
|
+
Initialize SQL Model Compiler
|
|
19
|
+
|
|
20
|
+
:param models_dir: Directory containing SQL model files
|
|
21
|
+
:param engine: SQLAlchemy database engine
|
|
22
|
+
:param engine_type: Type of database engine (sqlite, snowflake, bigquery, etc.)
|
|
23
|
+
"""
|
|
24
|
+
self.models_dir = models_dir
|
|
25
|
+
self.engine = engine
|
|
26
|
+
self.engine_type = engine_type.lower()
|
|
27
|
+
self.models = {}
|
|
28
|
+
|
|
29
|
+
# Discover models
|
|
30
|
+
self._discover_models()
|
|
31
|
+
|
|
32
|
+
def _discover_models(self):
|
|
33
|
+
"""
|
|
34
|
+
Discover and load SQL model files
|
|
35
|
+
"""
|
|
36
|
+
for filename in os.listdir(self.models_dir):
|
|
37
|
+
if filename.endswith('.sql'):
|
|
38
|
+
model_name = os.path.splitext(filename)[0]
|
|
39
|
+
model_path = os.path.join(self.models_dir, filename)
|
|
40
|
+
|
|
41
|
+
with open(model_path, 'r') as f:
|
|
42
|
+
model_content = f.read()
|
|
43
|
+
|
|
44
|
+
self.models[model_name] = {
|
|
45
|
+
'name': model_name,
|
|
46
|
+
'content': model_content,
|
|
47
|
+
'path': model_path
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
def _compile_model(self, model_name: str) -> str:
|
|
51
|
+
"""
|
|
52
|
+
Compile a SQL model for the specific engine
|
|
53
|
+
|
|
54
|
+
:param model_name: Name of the model to compile
|
|
55
|
+
:return: Compiled SQL query
|
|
56
|
+
"""
|
|
57
|
+
model = self.models[model_name]
|
|
58
|
+
content = model['content']
|
|
59
|
+
|
|
60
|
+
# Engine-specific compilation
|
|
61
|
+
if self.engine_type == 'snowflake':
|
|
62
|
+
# Snowflake-specific transformations
|
|
63
|
+
content = content.replace('{{', 'SNOWFLAKE.').replace('}}', '')
|
|
64
|
+
elif self.engine_type == 'bigquery':
|
|
65
|
+
# BigQuery-specific transformations
|
|
66
|
+
content = content.replace('{{', 'ML.').replace('}}', '')
|
|
67
|
+
|
|
68
|
+
return content
|
|
69
|
+
|
|
70
|
+
def execute_model(
|
|
71
|
+
self,
|
|
72
|
+
model_name: str,
|
|
73
|
+
seed_data: Optional[Dict[str, pd.DataFrame]] = None
|
|
74
|
+
) -> pd.DataFrame:
|
|
75
|
+
"""
|
|
76
|
+
Execute a SQL model
|
|
77
|
+
|
|
78
|
+
:param model_name: Name of the model to execute
|
|
79
|
+
:param seed_data: Optional seed data for the model
|
|
80
|
+
:return: Result DataFrame
|
|
81
|
+
"""
|
|
82
|
+
if model_name not in self.models:
|
|
83
|
+
raise ValueError(f"Model {model_name} not found")
|
|
84
|
+
|
|
85
|
+
# Compile model for specific engine
|
|
86
|
+
compiled_sql = self._compile_model(model_name)
|
|
87
|
+
|
|
88
|
+
# If seed data is provided, prepare the database
|
|
89
|
+
if seed_data and self.engine:
|
|
90
|
+
for table_name, df in seed_data.items():
|
|
91
|
+
df.to_sql(table_name, self.engine, if_exists='replace', index=False)
|
|
92
|
+
|
|
93
|
+
# Execute the model
|
|
94
|
+
if self.engine:
|
|
95
|
+
return pd.read_sql(compiled_sql, self.engine)
|
|
96
|
+
else:
|
|
97
|
+
# Fallback to pandas evaluation
|
|
98
|
+
import sqlite3
|
|
99
|
+
|
|
100
|
+
# Create an in-memory SQLite database for evaluation
|
|
101
|
+
conn = sqlite3.connect(':memory:')
|
|
102
|
+
|
|
103
|
+
# Load seed data if available
|
|
104
|
+
if seed_data:
|
|
105
|
+
for table_name, df in seed_data.items():
|
|
106
|
+
df.to_sql(table_name, conn, if_exists='replace', index=False)
|
|
107
|
+
|
|
108
|
+
return pd.read_sql(compiled_sql, conn)
|
|
109
|
+
|
|
110
|
+
def run_all_models(self, seed_data: Optional[Dict[str, pd.DataFrame]] = None):
|
|
111
|
+
"""
|
|
112
|
+
Run all discovered models
|
|
113
|
+
|
|
114
|
+
:param seed_data: Optional seed data for models
|
|
115
|
+
:return: Dictionary of model results
|
|
116
|
+
"""
|
|
117
|
+
results = {}
|
|
118
|
+
for model_name in self.models:
|
|
119
|
+
results[model_name] = self.execute_model(model_name, seed_data)
|
|
120
|
+
return results
|
|
121
|
+
|
|
122
|
+
# Example usage in a pipeline
|
|
123
|
+
def create_model_compiler(
|
|
124
|
+
models_dir: str,
|
|
125
|
+
engine_type: str = 'sqlite',
|
|
126
|
+
connection_params: Optional[Dict[str, Any]] = None
|
|
127
|
+
) -> SQLModelCompiler:
|
|
128
|
+
"""
|
|
129
|
+
Create a SQL Model Compiler with the specified engine
|
|
130
|
+
|
|
131
|
+
:param models_dir: Directory containing SQL model files
|
|
132
|
+
:param engine_type: Type of database engine
|
|
133
|
+
:param connection_params: Connection parameters for the database
|
|
134
|
+
:return: SQLModelCompiler instance
|
|
135
|
+
"""
|
|
136
|
+
if engine_type == 'snowflake':
|
|
137
|
+
from sqlalchemy.dialects.snowflake import base
|
|
138
|
+
engine = sqlalchemy.create_engine(
|
|
139
|
+
f"snowflake://{connection_params['username']}:{connection_params['password']}@"
|
|
140
|
+
f"{connection_params['account']}/{connection_params['database']}/{connection_params['schema']}"
|
|
141
|
+
)
|
|
142
|
+
elif engine_type == 'bigquery':
|
|
143
|
+
from google.cloud import bigquery
|
|
144
|
+
from sqlalchemy.dialects.bigquery import base
|
|
145
|
+
engine = sqlalchemy.create_engine(
|
|
146
|
+
f"bigquery://{connection_params['project_id']}"
|
|
147
|
+
)
|
|
148
|
+
else:
|
|
149
|
+
# Default to SQLite
|
|
150
|
+
engine = sqlalchemy.create_engine('sqlite:///models.db')
|
|
151
|
+
|
|
152
|
+
return SQLModelCompiler(
|
|
153
|
+
models_dir=models_dir,
|
|
154
|
+
engine=engine,
|
|
155
|
+
engine_type=engine_type
|
|
156
|
+
)
|
npcpy/tools.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool utilities for automatic schema generation from Python functions.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import inspect
|
|
6
|
+
import json
|
|
7
|
+
from typing import Any, Dict, List, Callable, Union, get_type_hints, get_origin, get_args
|
|
8
|
+
|
|
9
|
+
from docstring_parser import parse as parse_docstring
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def python_type_to_json_schema(py_type: type) -> Dict[str, Any]:
|
|
13
|
+
"""Convert Python type hints to JSON schema types."""
|
|
14
|
+
|
|
15
|
+
if get_origin(py_type) is Union:
|
|
16
|
+
args = get_args(py_type)
|
|
17
|
+
|
|
18
|
+
if len(args) == 2 and type(None) in args:
|
|
19
|
+
non_none_type = args[0] if args[1] is type(None) else args[1]
|
|
20
|
+
return python_type_to_json_schema(non_none_type)
|
|
21
|
+
|
|
22
|
+
return python_type_to_json_schema(args[0])
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
if get_origin(py_type) is list:
|
|
26
|
+
item_type = get_args(py_type)[0] if get_args(py_type) else str
|
|
27
|
+
return {
|
|
28
|
+
"type": "array",
|
|
29
|
+
"items": python_type_to_json_schema(item_type)
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
if get_origin(py_type) is dict:
|
|
34
|
+
return {"type": "object"}
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
type_mapping = {
|
|
38
|
+
str: {"type": "string"},
|
|
39
|
+
int: {"type": "integer"},
|
|
40
|
+
float: {"type": "number"},
|
|
41
|
+
bool: {"type": "boolean"},
|
|
42
|
+
list: {"type": "array"},
|
|
43
|
+
dict: {"type": "object"},
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
return type_mapping.get(py_type, {"type": "string"})
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def extract_function_info(func: Callable) -> Dict[str, Any]:
|
|
50
|
+
"""Extract function information including name, description, and parameters."""
|
|
51
|
+
|
|
52
|
+
sig = inspect.signature(func)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
type_hints = get_type_hints(func)
|
|
57
|
+
except Exception:
|
|
58
|
+
type_hints = {}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
docstring = inspect.getdoc(func)
|
|
62
|
+
parsed_doc = None
|
|
63
|
+
if docstring:
|
|
64
|
+
try:
|
|
65
|
+
parsed_doc = parse_docstring(docstring)
|
|
66
|
+
except Exception:
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
func_name = func.__name__
|
|
71
|
+
description = ""
|
|
72
|
+
|
|
73
|
+
if parsed_doc and hasattr(parsed_doc, 'short_description') and parsed_doc.short_description:
|
|
74
|
+
description = parsed_doc.short_description
|
|
75
|
+
if hasattr(parsed_doc, 'long_description') and parsed_doc.long_description:
|
|
76
|
+
description += f". {parsed_doc.long_description}"
|
|
77
|
+
elif docstring:
|
|
78
|
+
|
|
79
|
+
description = docstring.split('\n')[0].strip()
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
properties = {}
|
|
83
|
+
required = []
|
|
84
|
+
param_descriptions = {}
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
if parsed_doc and hasattr(parsed_doc, 'params'):
|
|
88
|
+
for param in parsed_doc.params:
|
|
89
|
+
param_descriptions[param.arg_name] = param.description or ""
|
|
90
|
+
|
|
91
|
+
for param_name, param in sig.parameters.items():
|
|
92
|
+
|
|
93
|
+
if param_name == 'self':
|
|
94
|
+
continue
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
param_type = type_hints.get(param_name, str)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
param_schema = python_type_to_json_schema(param_type)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
if param_name in param_descriptions:
|
|
104
|
+
param_schema["description"] = param_descriptions[param_name]
|
|
105
|
+
else:
|
|
106
|
+
param_schema["description"] = f"The {param_name} parameter"
|
|
107
|
+
|
|
108
|
+
properties[param_name] = param_schema
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
if param.default is inspect.Parameter.empty:
|
|
112
|
+
required.append(param_name)
|
|
113
|
+
|
|
114
|
+
return {
|
|
115
|
+
"name": func_name,
|
|
116
|
+
"description": description or f"Call the {func_name} function",
|
|
117
|
+
"parameters": {
|
|
118
|
+
"type": "object",
|
|
119
|
+
"properties": properties,
|
|
120
|
+
"required": required
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def create_tool_schema(functions: List[Callable]) -> List[Dict[str, Any]]:
|
|
126
|
+
"""Create OpenAI-style tool schema from a list of functions."""
|
|
127
|
+
schema = []
|
|
128
|
+
|
|
129
|
+
for func in functions:
|
|
130
|
+
func_info = extract_function_info(func)
|
|
131
|
+
schema.append({
|
|
132
|
+
"type": "function",
|
|
133
|
+
"function": func_info
|
|
134
|
+
})
|
|
135
|
+
|
|
136
|
+
return schema
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def create_tool_map(functions: List[Callable]) -> Dict[str, Callable]:
|
|
140
|
+
"""Create a tool map from a list of functions."""
|
|
141
|
+
return {func.__name__: func for func in functions}
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def auto_tools(functions: List[Callable]) -> tuple[List[Dict[str, Any]], Dict[str, Callable]]:
|
|
145
|
+
"""
|
|
146
|
+
Automatically create both tool schema and tool map from functions.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
functions: List of Python functions to convert to tools
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Tuple of (tools_schema, tool_map)
|
|
153
|
+
|
|
154
|
+
Example:
|
|
155
|
+
```python
|
|
156
|
+
def get_weather(location: str) -> str:
|
|
157
|
+
'''Get weather information for a location'''
|
|
158
|
+
return f"The weather in {location} is sunny and 75°F"
|
|
159
|
+
|
|
160
|
+
def calculate_math(expression: str) -> str:
|
|
161
|
+
'''Calculate a mathematical expression'''
|
|
162
|
+
try:
|
|
163
|
+
result = eval(expression)
|
|
164
|
+
return f"The result of {expression} is {result}"
|
|
165
|
+
except:
|
|
166
|
+
return "Invalid mathematical expression"
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
tools_schema, tool_map = auto_tools([get_weather, calculate_math])
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
response = get_llm_response(
|
|
173
|
+
"What's the weather in Paris and what's 15 * 23?",
|
|
174
|
+
model='gpt-4o-mini',
|
|
175
|
+
provider='openai',
|
|
176
|
+
tools=tools_schema,
|
|
177
|
+
tool_map=tool_map
|
|
178
|
+
)
|
|
179
|
+
```
|
|
180
|
+
"""
|
|
181
|
+
schema = create_tool_schema(functions)
|
|
182
|
+
tool_map = create_tool_map(functions)
|
|
183
|
+
return schema, tool_map
|
npcpy/work/plan.py
CHANGED
|
@@ -1,12 +1,6 @@
|
|
|
1
|
-
|
|
2
|
-
import time
|
|
1
|
+
|
|
3
2
|
import platform
|
|
4
|
-
try:
|
|
5
|
-
import pyautogui
|
|
6
|
-
except KeyError as e:
|
|
7
|
-
print(f"Could not load pyautogui due to the following error: {e}")
|
|
8
3
|
|
|
9
|
-
from npcpy.data.image import capture_screenshot
|
|
10
4
|
from npcpy.llm_funcs import get_llm_response
|
|
11
5
|
|
|
12
6
|
import subprocess
|
|
@@ -16,7 +10,8 @@ import tempfile
|
|
|
16
10
|
from typing import Any
|
|
17
11
|
|
|
18
12
|
def execute_plan_command(
|
|
19
|
-
command,
|
|
13
|
+
command,
|
|
14
|
+
**kwargs,
|
|
20
15
|
):
|
|
21
16
|
parts = command.split(maxsplit=1)
|
|
22
17
|
if len(parts) < 2:
|
|
@@ -28,22 +23,22 @@ def execute_plan_command(
|
|
|
28
23
|
request = parts[1]
|
|
29
24
|
platform_system = platform.system()
|
|
30
25
|
|
|
31
|
-
|
|
26
|
+
|
|
32
27
|
jobs_dir = os.path.expanduser("~/.npcsh/jobs")
|
|
33
28
|
logs_dir = os.path.expanduser("~/.npcsh/logs")
|
|
34
29
|
os.makedirs(jobs_dir, exist_ok=True)
|
|
35
30
|
os.makedirs(logs_dir, exist_ok=True)
|
|
36
31
|
|
|
37
|
-
|
|
32
|
+
|
|
38
33
|
linux_request = f"""Convert this scheduling request into a crontab-based script:
|
|
39
34
|
Request: {request}
|
|
40
35
|
|
|
41
36
|
"""
|
|
42
37
|
|
|
43
|
-
|
|
38
|
+
|
|
44
39
|
linux_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
45
40
|
{
|
|
46
|
-
"script": "
|
|
41
|
+
"script": "
|
|
47
42
|
set -euo pipefail
|
|
48
43
|
IFS=$'\\n\\t'
|
|
49
44
|
|
|
@@ -84,7 +79,7 @@ record_cpu",
|
|
|
84
79
|
|
|
85
80
|
mac_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
86
81
|
{
|
|
87
|
-
"script": "
|
|
82
|
+
"script": "
|
|
88
83
|
set -euo pipefail
|
|
89
84
|
IFS=$'\\n\\t'
|
|
90
85
|
|
|
@@ -167,7 +162,7 @@ Get-CpuUsage",
|
|
|
167
162
|
|
|
168
163
|
prompt = prompts[platform_system]
|
|
169
164
|
response = get_llm_response(
|
|
170
|
-
prompt,
|
|
165
|
+
prompt, format="json", **kwargs
|
|
171
166
|
)
|
|
172
167
|
schedule_info = response.get("response")
|
|
173
168
|
print("Received schedule info:", schedule_info)
|
|
@@ -181,7 +176,7 @@ Get-CpuUsage",
|
|
|
181
176
|
|
|
182
177
|
log_path = os.path.join(logs_dir, f"{job_name}.log")
|
|
183
178
|
|
|
184
|
-
|
|
179
|
+
|
|
185
180
|
with open(script_path, "w") as f:
|
|
186
181
|
f.write(schedule_info["script"])
|
|
187
182
|
os.chmod(script_path, 0o755)
|
|
@@ -249,7 +244,7 @@ Get-CpuUsage",
|
|
|
249
244
|
elif platform_system == "Windows":
|
|
250
245
|
task_name = f"NPCSH_{job_name}"
|
|
251
246
|
|
|
252
|
-
|
|
247
|
+
|
|
253
248
|
schedule_params = schedule_info["schedule"].split()
|
|
254
249
|
|
|
255
250
|
cmd = (
|
|
@@ -263,7 +258,7 @@ Get-CpuUsage",
|
|
|
263
258
|
]
|
|
264
259
|
+ schedule_params
|
|
265
260
|
+ ["/f"]
|
|
266
|
-
)
|
|
261
|
+
)
|
|
267
262
|
|
|
268
263
|
subprocess.run(cmd, check=True)
|
|
269
264
|
|
|
@@ -274,268 +269,7 @@ Get-CpuUsage",
|
|
|
274
269
|
- Log: {log_path}
|
|
275
270
|
- Task name: {task_name}"""
|
|
276
271
|
|
|
277
|
-
return {"messages": messages, "output": output}
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
def execute_plan_command(
|
|
282
|
-
command, npc=None, model=None, provider=None, messages=None, api_url=None
|
|
283
|
-
):
|
|
284
|
-
parts = command.split(maxsplit=1)
|
|
285
|
-
if len(parts) < 2:
|
|
286
|
-
return {
|
|
287
|
-
"messages": messages,
|
|
288
|
-
"output": "Usage: /plan <command and schedule description>",
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
request = parts[1]
|
|
292
|
-
platform_system = platform.system()
|
|
293
|
-
|
|
294
|
-
# Create standard directories
|
|
295
|
-
jobs_dir = os.path.expanduser("~/.npcsh/jobs")
|
|
296
|
-
logs_dir = os.path.expanduser("~/.npcsh/logs")
|
|
297
|
-
os.makedirs(jobs_dir, exist_ok=True)
|
|
298
|
-
os.makedirs(logs_dir, exist_ok=True)
|
|
299
|
-
|
|
300
|
-
# First part - just the request formatting
|
|
301
|
-
linux_request = f"""Convert this scheduling request into a crontab-based script:
|
|
302
|
-
Request: {request}
|
|
303
|
-
|
|
304
|
-
"""
|
|
305
|
-
|
|
306
|
-
# Second part - the static prompt with examples and requirements
|
|
307
|
-
linux_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
308
|
-
{
|
|
309
|
-
"script": "#!/bin/bash
|
|
310
|
-
set -euo pipefail
|
|
311
|
-
IFS=$'\\n\\t'
|
|
312
|
-
|
|
313
|
-
LOGFILE=\"$HOME/.npcsh/logs/cpu_usage.log\"
|
|
314
|
-
|
|
315
|
-
log_info() {
|
|
316
|
-
echo \"[$(date '+%Y-%m-%d %H:%M:%S')] [INFO] $*\" >> \"$LOGFILE\"
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
log_error() {
|
|
320
|
-
echo \"[$(date '+%Y-%m-%d %H:%M:%S')] [ERROR] $*\" >> \"$LOGFILE\"
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
record_cpu() {
|
|
324
|
-
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
325
|
-
local cpu_usage=$(top -bn1 | grep 'Cpu(s)' | awk '{print $2}')
|
|
326
|
-
log_info \"CPU Usage: $cpu_usage%\"
|
|
327
|
-
}
|
|
328
|
-
|
|
329
|
-
record_cpu",
|
|
330
|
-
"schedule": "*/10 * * * *",
|
|
331
|
-
"description": "Record CPU usage every 10 minutes",
|
|
332
|
-
"name": "record_cpu_usage"
|
|
333
|
-
}
|
|
334
|
-
|
|
335
|
-
Your response must be valid json with the following keys:
|
|
336
|
-
- script: The shell script content with proper functions and error handling. special characters must be escaped to ensure python json.loads will work correctly.
|
|
337
|
-
- schedule: Crontab expression (5 fields: minute hour day month weekday)
|
|
338
|
-
- description: A human readable description
|
|
339
|
-
- name: A unique name for the job
|
|
340
|
-
|
|
341
|
-
Do not include any additional markdown formatting in your response or leading ```json tags."""
|
|
342
|
-
|
|
343
|
-
mac_request = f"""Convert this scheduling request into a launchd-compatible script:
|
|
344
|
-
Request: {request}
|
|
345
|
-
|
|
346
|
-
"""
|
|
347
|
-
|
|
348
|
-
mac_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
349
|
-
{
|
|
350
|
-
"script": "#!/bin/bash
|
|
351
|
-
set -euo pipefail
|
|
352
|
-
IFS=$'\\n\\t'
|
|
353
|
-
|
|
354
|
-
LOGFILE=\"$HOME/.npcsh/logs/cpu_usage.log\"
|
|
355
|
-
|
|
356
|
-
log_info() {
|
|
357
|
-
echo \"[$(date '+%Y-%m-%d %H:%M:%S')] [INFO] $*\" >> \"$LOGFILE\"
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
log_error() {
|
|
361
|
-
echo \"[$(date '+%Y-%m-%d %H:%M:%S')] [ERROR] $*\" >> \"$LOGFILE\"
|
|
362
|
-
}
|
|
363
|
-
|
|
364
|
-
record_cpu() {
|
|
365
|
-
local timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
366
|
-
local cpu_usage=$(top -l 1 | grep 'CPU usage' | awk '{print $3}' | tr -d '%')
|
|
367
|
-
log_info \"CPU Usage: $cpu_usage%\"
|
|
368
|
-
}
|
|
369
|
-
|
|
370
|
-
record_cpu",
|
|
371
|
-
"schedule": "600",
|
|
372
|
-
"description": "Record CPU usage every 10 minutes",
|
|
373
|
-
"name": "record_cpu_usage"
|
|
374
|
-
}
|
|
272
|
+
return {"messages": kwargs.get('messages'), "output": output}
|
|
375
273
|
|
|
376
|
-
Your response must be valid json with the following keys:
|
|
377
|
-
- script: The shell script content with proper functions and error handling. special characters must be escaped to ensure python json.loads will work correctly.
|
|
378
|
-
- schedule: Interval in seconds (e.g. 600 for 10 minutes)
|
|
379
|
-
- description: A human readable description
|
|
380
|
-
- name: A unique name for the job
|
|
381
|
-
|
|
382
|
-
Do not include any additional markdown formatting in your response or leading ```json tags."""
|
|
383
|
-
|
|
384
|
-
windows_request = f"""Convert this scheduling request into a PowerShell script with Task Scheduler parameters:
|
|
385
|
-
Request: {request}
|
|
386
|
-
|
|
387
|
-
"""
|
|
388
|
-
|
|
389
|
-
windows_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
390
|
-
{
|
|
391
|
-
"script": "$ErrorActionPreference = 'Stop'
|
|
392
|
-
|
|
393
|
-
$LogFile = \"$HOME\\.npcsh\\logs\\cpu_usage.log\"
|
|
394
|
-
|
|
395
|
-
function Write-Log {
|
|
396
|
-
param($Message, $Type = 'INFO')
|
|
397
|
-
$timestamp = Get-Date -Format 'yyyy-MM-dd HH:mm:ss'
|
|
398
|
-
\"[$timestamp] [$Type] $Message\" | Out-File -FilePath $LogFile -Append
|
|
399
|
-
}
|
|
400
|
-
|
|
401
|
-
function Get-CpuUsage {
|
|
402
|
-
try {
|
|
403
|
-
$cpu = (Get-Counter '\\Processor(_Total)\\% Processor Time').CounterSamples.CookedValue
|
|
404
|
-
Write-Log \"CPU Usage: $($cpu)%\"
|
|
405
|
-
} catch {
|
|
406
|
-
Write-Log $_.Exception.Message 'ERROR'
|
|
407
|
-
throw
|
|
408
|
-
}
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
Get-CpuUsage",
|
|
412
|
-
"schedule": "/sc minute /mo 10",
|
|
413
|
-
"description": "Record CPU usage every 10 minutes",
|
|
414
|
-
"name": "record_cpu_usage"
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
Your response must be valid json with the following keys:
|
|
418
|
-
- script: The PowerShell script content with proper functions and error handling. special characters must be escaped to ensure python json.loads will work correctly.
|
|
419
|
-
- schedule: Task Scheduler parameters (e.g. /sc minute /mo 10)
|
|
420
|
-
- description: A human readable description
|
|
421
|
-
- name: A unique name for the job
|
|
422
|
-
|
|
423
|
-
Do not include any additional markdown formatting in your response or leading ```json tags."""
|
|
424
|
-
|
|
425
|
-
prompts = {
|
|
426
|
-
"Linux": linux_request + linux_prompt_static,
|
|
427
|
-
"Darwin": mac_request + mac_prompt_static,
|
|
428
|
-
"Windows": windows_request + windows_prompt_static,
|
|
429
|
-
}
|
|
430
|
-
|
|
431
|
-
prompt = prompts[platform_system]
|
|
432
|
-
response = get_llm_response(
|
|
433
|
-
prompt, npc=npc, model=model, provider=provider, format="json"
|
|
434
|
-
)
|
|
435
|
-
schedule_info = response.get("response")
|
|
436
|
-
print("Received schedule info:", schedule_info)
|
|
437
|
-
|
|
438
|
-
job_name = f"job_{schedule_info['name']}"
|
|
439
|
-
|
|
440
|
-
if platform_system == "Windows":
|
|
441
|
-
script_path = os.path.join(jobs_dir, f"{job_name}.ps1")
|
|
442
|
-
else:
|
|
443
|
-
script_path = os.path.join(jobs_dir, f"{job_name}.sh")
|
|
444
|
-
|
|
445
|
-
log_path = os.path.join(logs_dir, f"{job_name}.log")
|
|
446
|
-
|
|
447
|
-
# Write the script
|
|
448
|
-
with open(script_path, "w") as f:
|
|
449
|
-
f.write(schedule_info["script"])
|
|
450
|
-
os.chmod(script_path, 0o755)
|
|
451
|
-
|
|
452
|
-
if platform_system == "Linux":
|
|
453
|
-
try:
|
|
454
|
-
current_crontab = subprocess.check_output(["crontab", "-l"], text=True)
|
|
455
|
-
except subprocess.CalledProcessError:
|
|
456
|
-
current_crontab = ""
|
|
457
|
-
|
|
458
|
-
crontab_line = f"{schedule_info['schedule']} {script_path} >> {log_path} 2>&1"
|
|
459
|
-
new_crontab = current_crontab.strip() + "\n" + crontab_line + "\n"
|
|
460
|
-
|
|
461
|
-
with tempfile.NamedTemporaryFile(mode="w") as tmp:
|
|
462
|
-
tmp.write(new_crontab)
|
|
463
|
-
tmp.flush()
|
|
464
|
-
subprocess.run(["crontab", tmp.name], check=True)
|
|
465
|
-
|
|
466
|
-
output = f"""Job created successfully:
|
|
467
|
-
- Description: {schedule_info['description']}
|
|
468
|
-
- Schedule: {schedule_info['schedule']}
|
|
469
|
-
- Script: {script_path}
|
|
470
|
-
- Log: {log_path}
|
|
471
|
-
- Crontab entry: {crontab_line}"""
|
|
472
|
-
|
|
473
|
-
elif platform_system == "Darwin":
|
|
474
|
-
plist_dir = os.path.expanduser("~/Library/LaunchAgents")
|
|
475
|
-
os.makedirs(plist_dir, exist_ok=True)
|
|
476
|
-
plist_path = os.path.join(plist_dir, f"com.npcsh.{job_name}.plist")
|
|
477
|
-
|
|
478
|
-
plist_content = f"""<?xml version="1.0" encoding="UTF-8"?>
|
|
479
|
-
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
|
480
|
-
<plist version="1.0">
|
|
481
|
-
<dict>
|
|
482
|
-
<key>Label</key>
|
|
483
|
-
<string>com.npcsh.{job_name}</string>
|
|
484
|
-
<key>ProgramArguments</key>
|
|
485
|
-
<array>
|
|
486
|
-
<string>{script_path}</string>
|
|
487
|
-
</array>
|
|
488
|
-
<key>StartInterval</key>
|
|
489
|
-
<integer>{schedule_info['schedule']}</integer>
|
|
490
|
-
<key>StandardOutPath</key>
|
|
491
|
-
<string>{log_path}</string>
|
|
492
|
-
<key>StandardErrorPath</key>
|
|
493
|
-
<string>{log_path}</string>
|
|
494
|
-
<key>RunAtLoad</key>
|
|
495
|
-
<true/>
|
|
496
|
-
</dict>
|
|
497
|
-
</plist>"""
|
|
498
|
-
|
|
499
|
-
with open(plist_path, "w") as f:
|
|
500
|
-
f.write(plist_content)
|
|
501
|
-
|
|
502
|
-
subprocess.run(["launchctl", "unload", plist_path], check=False)
|
|
503
|
-
subprocess.run(["launchctl", "load", plist_path], check=True)
|
|
504
|
-
|
|
505
|
-
output = f"""Job created successfully:
|
|
506
|
-
- Description: {schedule_info['description']}
|
|
507
|
-
- Schedule: Every {schedule_info['schedule']} seconds
|
|
508
|
-
- Script: {script_path}
|
|
509
|
-
- Log: {log_path}
|
|
510
|
-
- Launchd plist: {plist_path}"""
|
|
511
|
-
|
|
512
|
-
elif platform_system == "Windows":
|
|
513
|
-
task_name = f"NPCSH_{job_name}"
|
|
514
|
-
|
|
515
|
-
# Parse schedule_info['schedule'] into individual parameters
|
|
516
|
-
schedule_params = schedule_info["schedule"].split()
|
|
517
|
-
|
|
518
|
-
cmd = (
|
|
519
|
-
[
|
|
520
|
-
"schtasks",
|
|
521
|
-
"/create",
|
|
522
|
-
"/tn",
|
|
523
|
-
task_name,
|
|
524
|
-
"/tr",
|
|
525
|
-
f"powershell -NoProfile -ExecutionPolicy Bypass -File {script_path}",
|
|
526
|
-
]
|
|
527
|
-
+ schedule_params
|
|
528
|
-
+ ["/f"]
|
|
529
|
-
) # /f forces creation if task exists
|
|
530
|
-
|
|
531
|
-
subprocess.run(cmd, check=True)
|
|
532
|
-
|
|
533
|
-
output = f"""Job created successfully:
|
|
534
|
-
- Description: {schedule_info['description']}
|
|
535
|
-
- Schedule: {schedule_info['schedule']}
|
|
536
|
-
- Script: {script_path}
|
|
537
|
-
- Log: {log_path}
|
|
538
|
-
- Task name: {task_name}"""
|
|
539
274
|
|
|
540
|
-
return {"messages": messages, "output": output}
|
|
541
275
|
|