npcpy 1.1.28__py3-none-any.whl → 1.2.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/data/audio.py +16 -38
- npcpy/data/image.py +29 -29
- npcpy/data/load.py +4 -3
- npcpy/data/text.py +28 -28
- npcpy/data/video.py +6 -6
- npcpy/data/web.py +49 -21
- npcpy/ft/__init__.py +0 -0
- npcpy/ft/diff.py +110 -0
- npcpy/ft/ge.py +115 -0
- npcpy/ft/memory_trainer.py +171 -0
- npcpy/ft/model_ensembler.py +357 -0
- npcpy/ft/rl.py +360 -0
- npcpy/ft/sft.py +248 -0
- npcpy/ft/usft.py +128 -0
- npcpy/gen/audio_gen.py +24 -0
- npcpy/gen/embeddings.py +13 -13
- npcpy/gen/image_gen.py +37 -15
- npcpy/gen/response.py +287 -111
- npcpy/gen/video_gen.py +10 -9
- npcpy/llm_funcs.py +447 -79
- npcpy/memory/command_history.py +201 -48
- npcpy/memory/kg_vis.py +74 -74
- npcpy/memory/knowledge_graph.py +482 -115
- npcpy/memory/memory_processor.py +81 -0
- npcpy/memory/search.py +70 -70
- npcpy/mix/debate.py +192 -3
- npcpy/npc_compiler.py +1541 -879
- npcpy/npc_sysenv.py +250 -78
- npcpy/serve.py +1036 -321
- npcpy/sql/ai_function_tools.py +257 -0
- npcpy/sql/database_ai_adapters.py +186 -0
- npcpy/sql/database_ai_functions.py +163 -0
- npcpy/sql/model_runner.py +19 -19
- npcpy/sql/npcsql.py +706 -507
- npcpy/sql/sql_model_compiler.py +156 -0
- npcpy/tools.py +20 -20
- npcpy/work/plan.py +8 -8
- npcpy/work/trigger.py +3 -3
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/METADATA +169 -9
- npcpy-1.2.32.dist-info/RECORD +54 -0
- npcpy-1.1.28.dist-info/RECORD +0 -40
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import yaml
|
|
3
|
+
import sqlalchemy
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from typing import Dict, Any, Optional
|
|
6
|
+
|
|
7
|
+
class SQLModelCompiler:
|
|
8
|
+
"""
|
|
9
|
+
Compile and execute SQL models across different database engines
|
|
10
|
+
"""
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
models_dir: str,
|
|
14
|
+
engine: Optional[sqlalchemy.engine.base.Engine] = None,
|
|
15
|
+
engine_type: str = 'sqlite'
|
|
16
|
+
):
|
|
17
|
+
"""
|
|
18
|
+
Initialize SQL Model Compiler
|
|
19
|
+
|
|
20
|
+
:param models_dir: Directory containing SQL model files
|
|
21
|
+
:param engine: SQLAlchemy database engine
|
|
22
|
+
:param engine_type: Type of database engine (sqlite, snowflake, bigquery, etc.)
|
|
23
|
+
"""
|
|
24
|
+
self.models_dir = models_dir
|
|
25
|
+
self.engine = engine
|
|
26
|
+
self.engine_type = engine_type.lower()
|
|
27
|
+
self.models = {}
|
|
28
|
+
|
|
29
|
+
# Discover models
|
|
30
|
+
self._discover_models()
|
|
31
|
+
|
|
32
|
+
def _discover_models(self):
|
|
33
|
+
"""
|
|
34
|
+
Discover and load SQL model files
|
|
35
|
+
"""
|
|
36
|
+
for filename in os.listdir(self.models_dir):
|
|
37
|
+
if filename.endswith('.sql'):
|
|
38
|
+
model_name = os.path.splitext(filename)[0]
|
|
39
|
+
model_path = os.path.join(self.models_dir, filename)
|
|
40
|
+
|
|
41
|
+
with open(model_path, 'r') as f:
|
|
42
|
+
model_content = f.read()
|
|
43
|
+
|
|
44
|
+
self.models[model_name] = {
|
|
45
|
+
'name': model_name,
|
|
46
|
+
'content': model_content,
|
|
47
|
+
'path': model_path
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
def _compile_model(self, model_name: str) -> str:
|
|
51
|
+
"""
|
|
52
|
+
Compile a SQL model for the specific engine
|
|
53
|
+
|
|
54
|
+
:param model_name: Name of the model to compile
|
|
55
|
+
:return: Compiled SQL query
|
|
56
|
+
"""
|
|
57
|
+
model = self.models[model_name]
|
|
58
|
+
content = model['content']
|
|
59
|
+
|
|
60
|
+
# Engine-specific compilation
|
|
61
|
+
if self.engine_type == 'snowflake':
|
|
62
|
+
# Snowflake-specific transformations
|
|
63
|
+
content = content.replace('{{', 'SNOWFLAKE.').replace('}}', '')
|
|
64
|
+
elif self.engine_type == 'bigquery':
|
|
65
|
+
# BigQuery-specific transformations
|
|
66
|
+
content = content.replace('{{', 'ML.').replace('}}', '')
|
|
67
|
+
|
|
68
|
+
return content
|
|
69
|
+
|
|
70
|
+
def execute_model(
|
|
71
|
+
self,
|
|
72
|
+
model_name: str,
|
|
73
|
+
seed_data: Optional[Dict[str, pd.DataFrame]] = None
|
|
74
|
+
) -> pd.DataFrame:
|
|
75
|
+
"""
|
|
76
|
+
Execute a SQL model
|
|
77
|
+
|
|
78
|
+
:param model_name: Name of the model to execute
|
|
79
|
+
:param seed_data: Optional seed data for the model
|
|
80
|
+
:return: Result DataFrame
|
|
81
|
+
"""
|
|
82
|
+
if model_name not in self.models:
|
|
83
|
+
raise ValueError(f"Model {model_name} not found")
|
|
84
|
+
|
|
85
|
+
# Compile model for specific engine
|
|
86
|
+
compiled_sql = self._compile_model(model_name)
|
|
87
|
+
|
|
88
|
+
# If seed data is provided, prepare the database
|
|
89
|
+
if seed_data and self.engine:
|
|
90
|
+
for table_name, df in seed_data.items():
|
|
91
|
+
df.to_sql(table_name, self.engine, if_exists='replace', index=False)
|
|
92
|
+
|
|
93
|
+
# Execute the model
|
|
94
|
+
if self.engine:
|
|
95
|
+
return pd.read_sql(compiled_sql, self.engine)
|
|
96
|
+
else:
|
|
97
|
+
# Fallback to pandas evaluation
|
|
98
|
+
import sqlite3
|
|
99
|
+
|
|
100
|
+
# Create an in-memory SQLite database for evaluation
|
|
101
|
+
conn = sqlite3.connect(':memory:')
|
|
102
|
+
|
|
103
|
+
# Load seed data if available
|
|
104
|
+
if seed_data:
|
|
105
|
+
for table_name, df in seed_data.items():
|
|
106
|
+
df.to_sql(table_name, conn, if_exists='replace', index=False)
|
|
107
|
+
|
|
108
|
+
return pd.read_sql(compiled_sql, conn)
|
|
109
|
+
|
|
110
|
+
def run_all_models(self, seed_data: Optional[Dict[str, pd.DataFrame]] = None):
|
|
111
|
+
"""
|
|
112
|
+
Run all discovered models
|
|
113
|
+
|
|
114
|
+
:param seed_data: Optional seed data for models
|
|
115
|
+
:return: Dictionary of model results
|
|
116
|
+
"""
|
|
117
|
+
results = {}
|
|
118
|
+
for model_name in self.models:
|
|
119
|
+
results[model_name] = self.execute_model(model_name, seed_data)
|
|
120
|
+
return results
|
|
121
|
+
|
|
122
|
+
# Example usage in a pipeline
|
|
123
|
+
def create_model_compiler(
|
|
124
|
+
models_dir: str,
|
|
125
|
+
engine_type: str = 'sqlite',
|
|
126
|
+
connection_params: Optional[Dict[str, Any]] = None
|
|
127
|
+
) -> SQLModelCompiler:
|
|
128
|
+
"""
|
|
129
|
+
Create a SQL Model Compiler with the specified engine
|
|
130
|
+
|
|
131
|
+
:param models_dir: Directory containing SQL model files
|
|
132
|
+
:param engine_type: Type of database engine
|
|
133
|
+
:param connection_params: Connection parameters for the database
|
|
134
|
+
:return: SQLModelCompiler instance
|
|
135
|
+
"""
|
|
136
|
+
if engine_type == 'snowflake':
|
|
137
|
+
from sqlalchemy.dialects.snowflake import base
|
|
138
|
+
engine = sqlalchemy.create_engine(
|
|
139
|
+
f"snowflake://{connection_params['username']}:{connection_params['password']}@"
|
|
140
|
+
f"{connection_params['account']}/{connection_params['database']}/{connection_params['schema']}"
|
|
141
|
+
)
|
|
142
|
+
elif engine_type == 'bigquery':
|
|
143
|
+
from google.cloud import bigquery
|
|
144
|
+
from sqlalchemy.dialects.bigquery import base
|
|
145
|
+
engine = sqlalchemy.create_engine(
|
|
146
|
+
f"bigquery://{connection_params['project_id']}"
|
|
147
|
+
)
|
|
148
|
+
else:
|
|
149
|
+
# Default to SQLite
|
|
150
|
+
engine = sqlalchemy.create_engine('sqlite:///models.db')
|
|
151
|
+
|
|
152
|
+
return SQLModelCompiler(
|
|
153
|
+
models_dir=models_dir,
|
|
154
|
+
engine=engine,
|
|
155
|
+
engine_type=engine_type
|
|
156
|
+
)
|
npcpy/tools.py
CHANGED
|
@@ -11,17 +11,17 @@ from docstring_parser import parse as parse_docstring
|
|
|
11
11
|
|
|
12
12
|
def python_type_to_json_schema(py_type: type) -> Dict[str, Any]:
|
|
13
13
|
"""Convert Python type hints to JSON schema types."""
|
|
14
|
-
|
|
14
|
+
|
|
15
15
|
if get_origin(py_type) is Union:
|
|
16
16
|
args = get_args(py_type)
|
|
17
|
-
|
|
17
|
+
|
|
18
18
|
if len(args) == 2 and type(None) in args:
|
|
19
19
|
non_none_type = args[0] if args[1] is type(None) else args[1]
|
|
20
20
|
return python_type_to_json_schema(non_none_type)
|
|
21
|
-
|
|
21
|
+
|
|
22
22
|
return python_type_to_json_schema(args[0])
|
|
23
23
|
|
|
24
|
-
|
|
24
|
+
|
|
25
25
|
if get_origin(py_type) is list:
|
|
26
26
|
item_type = get_args(py_type)[0] if get_args(py_type) else str
|
|
27
27
|
return {
|
|
@@ -29,11 +29,11 @@ def python_type_to_json_schema(py_type: type) -> Dict[str, Any]:
|
|
|
29
29
|
"items": python_type_to_json_schema(item_type)
|
|
30
30
|
}
|
|
31
31
|
|
|
32
|
-
|
|
32
|
+
|
|
33
33
|
if get_origin(py_type) is dict:
|
|
34
34
|
return {"type": "object"}
|
|
35
35
|
|
|
36
|
-
|
|
36
|
+
|
|
37
37
|
type_mapping = {
|
|
38
38
|
str: {"type": "string"},
|
|
39
39
|
int: {"type": "integer"},
|
|
@@ -48,16 +48,16 @@ def python_type_to_json_schema(py_type: type) -> Dict[str, Any]:
|
|
|
48
48
|
|
|
49
49
|
def extract_function_info(func: Callable) -> Dict[str, Any]:
|
|
50
50
|
"""Extract function information including name, description, and parameters."""
|
|
51
|
-
|
|
51
|
+
|
|
52
52
|
sig = inspect.signature(func)
|
|
53
53
|
|
|
54
|
-
|
|
54
|
+
|
|
55
55
|
try:
|
|
56
56
|
type_hints = get_type_hints(func)
|
|
57
57
|
except Exception:
|
|
58
58
|
type_hints = {}
|
|
59
59
|
|
|
60
|
-
|
|
60
|
+
|
|
61
61
|
docstring = inspect.getdoc(func)
|
|
62
62
|
parsed_doc = None
|
|
63
63
|
if docstring:
|
|
@@ -66,7 +66,7 @@ def extract_function_info(func: Callable) -> Dict[str, Any]:
|
|
|
66
66
|
except Exception:
|
|
67
67
|
pass
|
|
68
68
|
|
|
69
|
-
|
|
69
|
+
|
|
70
70
|
func_name = func.__name__
|
|
71
71
|
description = ""
|
|
72
72
|
|
|
@@ -75,31 +75,31 @@ def extract_function_info(func: Callable) -> Dict[str, Any]:
|
|
|
75
75
|
if hasattr(parsed_doc, 'long_description') and parsed_doc.long_description:
|
|
76
76
|
description += f". {parsed_doc.long_description}"
|
|
77
77
|
elif docstring:
|
|
78
|
-
|
|
78
|
+
|
|
79
79
|
description = docstring.split('\n')[0].strip()
|
|
80
80
|
|
|
81
|
-
|
|
81
|
+
|
|
82
82
|
properties = {}
|
|
83
83
|
required = []
|
|
84
84
|
param_descriptions = {}
|
|
85
85
|
|
|
86
|
-
|
|
86
|
+
|
|
87
87
|
if parsed_doc and hasattr(parsed_doc, 'params'):
|
|
88
88
|
for param in parsed_doc.params:
|
|
89
89
|
param_descriptions[param.arg_name] = param.description or ""
|
|
90
90
|
|
|
91
91
|
for param_name, param in sig.parameters.items():
|
|
92
|
-
|
|
92
|
+
|
|
93
93
|
if param_name == 'self':
|
|
94
94
|
continue
|
|
95
95
|
|
|
96
|
-
|
|
96
|
+
|
|
97
97
|
param_type = type_hints.get(param_name, str)
|
|
98
98
|
|
|
99
|
-
|
|
99
|
+
|
|
100
100
|
param_schema = python_type_to_json_schema(param_type)
|
|
101
101
|
|
|
102
|
-
|
|
102
|
+
|
|
103
103
|
if param_name in param_descriptions:
|
|
104
104
|
param_schema["description"] = param_descriptions[param_name]
|
|
105
105
|
else:
|
|
@@ -107,7 +107,7 @@ def extract_function_info(func: Callable) -> Dict[str, Any]:
|
|
|
107
107
|
|
|
108
108
|
properties[param_name] = param_schema
|
|
109
109
|
|
|
110
|
-
|
|
110
|
+
|
|
111
111
|
if param.default is inspect.Parameter.empty:
|
|
112
112
|
required.append(param_name)
|
|
113
113
|
|
|
@@ -165,10 +165,10 @@ def auto_tools(functions: List[Callable]) -> tuple[List[Dict[str, Any]], Dict[st
|
|
|
165
165
|
except:
|
|
166
166
|
return "Invalid mathematical expression"
|
|
167
167
|
|
|
168
|
-
|
|
168
|
+
|
|
169
169
|
tools_schema, tool_map = auto_tools([get_weather, calculate_math])
|
|
170
170
|
|
|
171
|
-
|
|
171
|
+
|
|
172
172
|
response = get_llm_response(
|
|
173
173
|
"What's the weather in Paris and what's 15 * 23?",
|
|
174
174
|
model='gpt-4o-mini',
|
npcpy/work/plan.py
CHANGED
|
@@ -23,22 +23,22 @@ def execute_plan_command(
|
|
|
23
23
|
request = parts[1]
|
|
24
24
|
platform_system = platform.system()
|
|
25
25
|
|
|
26
|
-
|
|
26
|
+
|
|
27
27
|
jobs_dir = os.path.expanduser("~/.npcsh/jobs")
|
|
28
28
|
logs_dir = os.path.expanduser("~/.npcsh/logs")
|
|
29
29
|
os.makedirs(jobs_dir, exist_ok=True)
|
|
30
30
|
os.makedirs(logs_dir, exist_ok=True)
|
|
31
31
|
|
|
32
|
-
|
|
32
|
+
|
|
33
33
|
linux_request = f"""Convert this scheduling request into a crontab-based script:
|
|
34
34
|
Request: {request}
|
|
35
35
|
|
|
36
36
|
"""
|
|
37
37
|
|
|
38
|
-
|
|
38
|
+
|
|
39
39
|
linux_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
40
40
|
{
|
|
41
|
-
"script": "
|
|
41
|
+
"script": "
|
|
42
42
|
set -euo pipefail
|
|
43
43
|
IFS=$'\\n\\t'
|
|
44
44
|
|
|
@@ -79,7 +79,7 @@ record_cpu",
|
|
|
79
79
|
|
|
80
80
|
mac_prompt_static = """Example for "record CPU usage every 10 minutes":
|
|
81
81
|
{
|
|
82
|
-
"script": "
|
|
82
|
+
"script": "
|
|
83
83
|
set -euo pipefail
|
|
84
84
|
IFS=$'\\n\\t'
|
|
85
85
|
|
|
@@ -176,7 +176,7 @@ Get-CpuUsage",
|
|
|
176
176
|
|
|
177
177
|
log_path = os.path.join(logs_dir, f"{job_name}.log")
|
|
178
178
|
|
|
179
|
-
|
|
179
|
+
|
|
180
180
|
with open(script_path, "w") as f:
|
|
181
181
|
f.write(schedule_info["script"])
|
|
182
182
|
os.chmod(script_path, 0o755)
|
|
@@ -244,7 +244,7 @@ Get-CpuUsage",
|
|
|
244
244
|
elif platform_system == "Windows":
|
|
245
245
|
task_name = f"NPCSH_{job_name}"
|
|
246
246
|
|
|
247
|
-
|
|
247
|
+
|
|
248
248
|
schedule_params = schedule_info["schedule"].split()
|
|
249
249
|
|
|
250
250
|
cmd = (
|
|
@@ -258,7 +258,7 @@ Get-CpuUsage",
|
|
|
258
258
|
]
|
|
259
259
|
+ schedule_params
|
|
260
260
|
+ ["/f"]
|
|
261
|
-
)
|
|
261
|
+
)
|
|
262
262
|
|
|
263
263
|
subprocess.run(cmd, check=True)
|
|
264
264
|
|
npcpy/work/trigger.py
CHANGED
|
@@ -215,7 +215,7 @@ Status:
|
|
|
215
215
|
|
|
216
216
|
task_name = f"NPCSH_{trigger_name}"
|
|
217
217
|
|
|
218
|
-
|
|
218
|
+
|
|
219
219
|
cmd = [
|
|
220
220
|
"schtasks",
|
|
221
221
|
"/create",
|
|
@@ -227,12 +227,12 @@ Status:
|
|
|
227
227
|
"onstart",
|
|
228
228
|
"/ru",
|
|
229
229
|
"System",
|
|
230
|
-
"/f",
|
|
230
|
+
"/f",
|
|
231
231
|
]
|
|
232
232
|
|
|
233
233
|
subprocess.run(cmd, check=True)
|
|
234
234
|
|
|
235
|
-
|
|
235
|
+
|
|
236
236
|
subprocess.run(["schtasks", "/run", "/tn", task_name])
|
|
237
237
|
|
|
238
238
|
output = f"""Trigger service created:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcpy
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.2.32
|
|
4
4
|
Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcpy
|
|
6
6
|
Author: Christopher Agostino
|
|
@@ -16,6 +16,8 @@ Requires-Dist: scipy
|
|
|
16
16
|
Requires-Dist: numpy
|
|
17
17
|
Requires-Dist: requests
|
|
18
18
|
Requires-Dist: docx
|
|
19
|
+
Requires-Dist: exa-py
|
|
20
|
+
Requires-Dist: elevenlabs
|
|
19
21
|
Requires-Dist: matplotlib
|
|
20
22
|
Requires-Dist: markdown
|
|
21
23
|
Requires-Dist: networkx
|
|
@@ -53,6 +55,7 @@ Requires-Dist: kuzu; extra == "local"
|
|
|
53
55
|
Requires-Dist: chromadb; extra == "local"
|
|
54
56
|
Requires-Dist: diffusers; extra == "local"
|
|
55
57
|
Requires-Dist: torch; extra == "local"
|
|
58
|
+
Requires-Dist: datasets; extra == "local"
|
|
56
59
|
Provides-Extra: yap
|
|
57
60
|
Requires-Dist: pyaudio; extra == "yap"
|
|
58
61
|
Requires-Dist: gtts; extra == "yap"
|
|
@@ -72,6 +75,7 @@ Requires-Dist: kuzu; extra == "all"
|
|
|
72
75
|
Requires-Dist: chromadb; extra == "all"
|
|
73
76
|
Requires-Dist: diffusers; extra == "all"
|
|
74
77
|
Requires-Dist: torch; extra == "all"
|
|
78
|
+
Requires-Dist: datasets; extra == "all"
|
|
75
79
|
Requires-Dist: pyaudio; extra == "all"
|
|
76
80
|
Requires-Dist: gtts; extra == "all"
|
|
77
81
|
Requires-Dist: playsound==1.2.2; extra == "all"
|
|
@@ -91,19 +95,15 @@ Dynamic: requires-python
|
|
|
91
95
|
Dynamic: summary
|
|
92
96
|
|
|
93
97
|
<p align="center">
|
|
94
|
-
<
|
|
98
|
+
<a href= "https://github.com/cagostino/npcpy/blob/main/docs/npcpy.md">
|
|
99
|
+
<img src="https://raw.githubusercontent.com/cagostino/npcpy/main/npcpy/npc-python.png" alt="npc-python logo" width=250></a>
|
|
95
100
|
</p>
|
|
96
101
|
|
|
97
|
-
|
|
98
102
|
# npcpy
|
|
99
103
|
|
|
100
104
|
Welcome to `npcpy`, the core library of the NPC Toolkit that supercharges natural language processing pipelines and agent tooling. `npcpy` is a flexible framework for building state-of-the-art applications and conducting novel research with LLMs.
|
|
101
105
|
|
|
102
106
|
|
|
103
|
-
<p align="center">
|
|
104
|
-
<a href= "https://github.com/cagostino/npcpy/blob/main/docs/npcpy.md">
|
|
105
|
-
<img src="https://raw.githubusercontent.com/cagostino/npcpy/main/npcpy/npc-python.png" alt="npc-python logo" width=250></a>
|
|
106
|
-
</p>
|
|
107
107
|
|
|
108
108
|
|
|
109
109
|
Here is an example for getting responses for a particular agent:
|
|
@@ -330,7 +330,7 @@ Users are not required to pass agents to get_llm_response, so you can work with
|
|
|
330
330
|
```python
|
|
331
331
|
from npcpy.npc_sysenv import print_and_process_stream
|
|
332
332
|
from npcpy.llm_funcs import get_llm_response
|
|
333
|
-
response = get_llm_response("When did the united states government begin sending advisors to vietnam?", model='
|
|
333
|
+
response = get_llm_response("When did the united states government begin sending advisors to vietnam?", model='qwen3:latest', provider='ollama', stream = True)
|
|
334
334
|
|
|
335
335
|
full_response = print_and_process_stream(response['response'], 'llama3.2', 'ollama')
|
|
336
336
|
```
|
|
@@ -338,7 +338,7 @@ Return structured outputs by specifying `format='json'` or passing a Pydantic sc
|
|
|
338
338
|
|
|
339
339
|
```python
|
|
340
340
|
from npcpy.llm_funcs import get_llm_response
|
|
341
|
-
response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='
|
|
341
|
+
response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='claude-4-5-haiku-latest', provider='deepseek', format='json')
|
|
342
342
|
|
|
343
343
|
print(response['response'])
|
|
344
344
|
```
|
|
@@ -384,6 +384,166 @@ from npcpy.llm_funcs import gen_video
|
|
|
384
384
|
video = gen_video("make a video of the moon in the summer of marco polo", model='runwayml/stable-diffusion-v1-5', provider='diffusers')
|
|
385
385
|
```
|
|
386
386
|
|
|
387
|
+
Or audio TTS and STT:
|
|
388
|
+
```
|
|
389
|
+
from npcpy.gen.audio_gen import tts_elevenlabs
|
|
390
|
+
audio = tts_elevenlabs('''The representatives of the people of France, formed into a National Assembly,
|
|
391
|
+
considering that ignorance, neglect, or contempt of human rights, are the sole causes of
|
|
392
|
+
public misfortunes and corruptions of Government, have resolved to set forth in a solemn
|
|
393
|
+
declaration, these natural, imprescriptible, and inalienable rights: that this declaration
|
|
394
|
+
being constantly present to the minds of the members of the body social, they may be for
|
|
395
|
+
ever kept attentive to their rights and their duties; that the acts of the legislative and
|
|
396
|
+
executive powers of government, being capable of being every moment compared with
|
|
397
|
+
the end of political institutions, may be more respected; and also, that the future claims of
|
|
398
|
+
the citizens, being directed by simple and incontestable principles, may tend to the
|
|
399
|
+
maintenance of the Constitution, and the general happiness. ''')
|
|
400
|
+
# it will play the audio automatically.
|
|
401
|
+
```
|
|
402
|
+
## Fine-Tuning and Evolution
|
|
403
|
+
|
|
404
|
+
`npcpy` provides modular tools for building adaptive AI systems through supervised fine-tuning, reinforcement learning, and genetic algorithms.
|
|
405
|
+
|
|
406
|
+
See examples/fine_tuning_demo.py for a complete working example.
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
### Supervised Fine-Tuning (SFT)
|
|
410
|
+
|
|
411
|
+
Train models on specific tasks using simple X, y pairs:
|
|
412
|
+
```python
|
|
413
|
+
from npcpy.ft.sft import run_sft, load_sft_model, predict_sft
|
|
414
|
+
|
|
415
|
+
X_train = ["translate to french: hello", "translate to french: goodbye"]
|
|
416
|
+
y_train = ["bonjour", "au revoir"]
|
|
417
|
+
|
|
418
|
+
model_path = run_sft(X_train, y_train)
|
|
419
|
+
|
|
420
|
+
model, tokenizer = load_sft_model(model_path)
|
|
421
|
+
response = predict_sft(model, tokenizer, "translate to french: thanks")
|
|
422
|
+
```
|
|
423
|
+
### Unsupervised Fine-Tuning (USFT)
|
|
424
|
+
Adapt models to domain-specific text corpora without labels:
|
|
425
|
+
```python
|
|
426
|
+
from npcpy.ft.usft import run_usft, load_corpus_from_hf
|
|
427
|
+
|
|
428
|
+
texts = load_corpus_from_hf("tiny_shakespeare", split="train[:1000]")
|
|
429
|
+
|
|
430
|
+
model_path = run_usft(
|
|
431
|
+
texts,
|
|
432
|
+
config=USFTConfig(
|
|
433
|
+
output_model_path="models/shakespeare",
|
|
434
|
+
num_train_epochs=3
|
|
435
|
+
)
|
|
436
|
+
)
|
|
437
|
+
Train on your own text corpus:
|
|
438
|
+
pythondomain_texts = [
|
|
439
|
+
"Your domain-specific text 1",
|
|
440
|
+
"Your domain-specific text 2",
|
|
441
|
+
] * 100
|
|
442
|
+
|
|
443
|
+
model_path = run_usft(domain_texts)
|
|
444
|
+
```
|
|
445
|
+
### Diffusion Fine-tuning
|
|
446
|
+
```
|
|
447
|
+
from npcpy.ft.diff import train_diffusion, generate_image
|
|
448
|
+
|
|
449
|
+
image_paths = ["img1.png", "img2.png", "img3.png"]
|
|
450
|
+
captions = ["a cat", "a dog", "a bird"]
|
|
451
|
+
|
|
452
|
+
model_path = train_diffusion(
|
|
453
|
+
image_paths,
|
|
454
|
+
captions,
|
|
455
|
+
config=DiffusionConfig(
|
|
456
|
+
num_epochs=100,
|
|
457
|
+
batch_size=4
|
|
458
|
+
)
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
generated = generate_image(
|
|
462
|
+
model_path,
|
|
463
|
+
prompt="a white square",
|
|
464
|
+
image_size=128
|
|
465
|
+
)
|
|
466
|
+
Resume training from checkpoint:
|
|
467
|
+
pythonmodel_path = train_diffusion(
|
|
468
|
+
image_paths,
|
|
469
|
+
captions,
|
|
470
|
+
config,
|
|
471
|
+
resume_from="models/diffusion/checkpoints/checkpoint-epoch10-step1000.pt"
|
|
472
|
+
)
|
|
473
|
+
```
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
### Reinforcement Learning (RL)
|
|
477
|
+
Collect agent traces and train with DPO based on reward signals:
|
|
478
|
+
```python
|
|
479
|
+
from npcpy.ft.rl import collect_traces, run_rl_training
|
|
480
|
+
from npcpy.npc_compiler import NPC
|
|
481
|
+
|
|
482
|
+
tasks = [
|
|
483
|
+
{'prompt': 'Solve 2+2', 'expected': '4'},
|
|
484
|
+
{'prompt': 'Solve 5+3', 'expected': '8'}
|
|
485
|
+
]
|
|
486
|
+
|
|
487
|
+
agents = [
|
|
488
|
+
NPC(name="farlor", primary_directive="Be concise",
|
|
489
|
+
model="qwen3:0.6b", provider="ollama"),
|
|
490
|
+
NPC(name="tedno", primary_directive="Show your work",
|
|
491
|
+
model="qwen3:0.6b", provider="ollama")
|
|
492
|
+
]
|
|
493
|
+
|
|
494
|
+
def reward_fn(trace):
|
|
495
|
+
if trace['task_metadata']['expected'] in trace['final_output']:
|
|
496
|
+
return 1.0
|
|
497
|
+
return 0.0
|
|
498
|
+
|
|
499
|
+
adapter_path = run_rl_training(tasks, agents, reward_fn)
|
|
500
|
+
```
|
|
501
|
+
### Genetic Evolution
|
|
502
|
+
|
|
503
|
+
Evolve populations of knowledge graphs or model ensembles:
|
|
504
|
+
```python
|
|
505
|
+
from npcpy.ft.ge import GeneticEvolver, GAConfig
|
|
506
|
+
|
|
507
|
+
config = GAConfig(
|
|
508
|
+
population_size=20,
|
|
509
|
+
generations=50,
|
|
510
|
+
mutation_rate=0.15
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
evolver = GeneticEvolver(
|
|
514
|
+
fitness_fn=your_fitness_function,
|
|
515
|
+
mutate_fn=your_mutation_function,
|
|
516
|
+
crossover_fn=your_crossover_function,
|
|
517
|
+
initialize_fn=your_init_function,
|
|
518
|
+
config=config
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
best_individual = evolver.run()
|
|
522
|
+
```
|
|
523
|
+
|
|
524
|
+
### Smart Model Ensembler and response router
|
|
525
|
+
Build fast intuitive responses with fallback to reasoning:
|
|
526
|
+
```python
|
|
527
|
+
from npcpy.ft.model_ensembler import (
|
|
528
|
+
ResponseRouter,
|
|
529
|
+
create_model_genome
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
genome = create_model_genome(['math', 'code', 'factual'])
|
|
533
|
+
router = ResponseRouter(fast_threshold=0.8)
|
|
534
|
+
|
|
535
|
+
result = router.route_query("What is 2+2?", genome)
|
|
536
|
+
|
|
537
|
+
if result['used_fast_path']:
|
|
538
|
+
print("Fast gut reaction")
|
|
539
|
+
elif result['used_ensemble']:
|
|
540
|
+
print("Ensemble voting")
|
|
541
|
+
else:
|
|
542
|
+
print("Full reasoning")
|
|
543
|
+
```
|
|
544
|
+
The intention for this model ensembler system is to mimic human cognition: pattern-matched gut reactions (System 1 of Kahneman) for familiar queries, falling back to deliberate reasoning (System 2 of Kahneman) for novel problems. Genetic algorithms evolve both knowledge structures and model specializations over time.
|
|
545
|
+
|
|
546
|
+
|
|
387
547
|
|
|
388
548
|
## Serving an NPC Team
|
|
389
549
|
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
npcpy/__init__.py,sha256=9imxFtK74_6Rw9rz0kyMnZYl_voPb569tkTlYLt0Urg,131
|
|
2
|
+
npcpy/llm_funcs.py,sha256=qC-WmNvUl3YAS1u-xPXh1YDlr2e9cv80_wXK6wCr7TA,85546
|
|
3
|
+
npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
|
|
4
|
+
npcpy/npc_compiler.py,sha256=j3JYZPKPLi42HAEA_i3Cp5GBGGUcpzBk8OEzZEvxzY4,89458
|
|
5
|
+
npcpy/npc_sysenv.py,sha256=t9AswM-9_P2NaGsnlzTMc2hUfdSthi9ofbud6F1G7LM,35974
|
|
6
|
+
npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
|
|
7
|
+
npcpy/serve.py,sha256=P01tYsY1ctq408nn-t3sLPGuGJg5KoaApy4gNECDRgo,118007
|
|
8
|
+
npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
|
|
9
|
+
npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
|
|
10
|
+
npcpy/data/audio.py,sha256=goon4HfsYgx0bI-n1lhkrzWPrJoejJlycXcB0P62pyk,11280
|
|
11
|
+
npcpy/data/data_models.py,sha256=q7xpI4_nK5HvlOE1XB5u5nFQs4SE5zcgt0kIZJF2dhs,682
|
|
12
|
+
npcpy/data/image.py,sha256=UQcioNPDd5HYMLL_KStf45SuiIPXDcUY-dEFHwSWUeE,6564
|
|
13
|
+
npcpy/data/load.py,sha256=7Ay-TYNhCvjJLwdQ5qAgxXSrGwow9ZrazHFVPqMw_cI,4274
|
|
14
|
+
npcpy/data/text.py,sha256=jP0a1qZZaSJdK-LdZTn2Jjdxqmkd3efxDLEoxflJQeY,5010
|
|
15
|
+
npcpy/data/video.py,sha256=aPUgj0fA_lFQ7Jf94-PutggCF4695FVCh3q5mnVthvI,574
|
|
16
|
+
npcpy/data/web.py,sha256=ARGoVKUlQmaiX0zJbSvvFmRCwOv_Z7Pcan9c5GxYObQ,5117
|
|
17
|
+
npcpy/ft/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
|
+
npcpy/ft/diff.py,sha256=wYFRY_2p-B5xVqO7NDyhJbjQsUt4PrwOfgpE1Icghmk,2906
|
|
19
|
+
npcpy/ft/ge.py,sha256=0VzIiXq2wCzGcK1x0Wd-myJ3xRf-FNaPg0GkHEZegUM,3552
|
|
20
|
+
npcpy/ft/memory_trainer.py,sha256=QZPznxEEwXbOGroHdMUMa5xpqlNwgV6nqOazI2xgrnQ,6635
|
|
21
|
+
npcpy/ft/model_ensembler.py,sha256=BRX4hJ_rvF1vKTzjMhlahZqPttUgc3PqmzUJDqIfIps,10038
|
|
22
|
+
npcpy/ft/rl.py,sha256=EcPD8t5MFg0zYWSS-A7KJ9bWd0qCTsL5SSvDxV556Z4,9245
|
|
23
|
+
npcpy/ft/sft.py,sha256=74gRaJTTrZcO4np4DqRMr79ADkGhPcDKutR74rag03E,6659
|
|
24
|
+
npcpy/ft/usft.py,sha256=O025GGYGZQf2ZVLowyAmBwh5bJyuy2dUAM6v03YcboY,3435
|
|
25
|
+
npcpy/gen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
|
+
npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
|
|
27
|
+
npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
|
|
28
|
+
npcpy/gen/image_gen.py,sha256=mAlLG9jo9RnuuMU0jJVV0CpIgHqdizU9sfC6A0w5kKE,15599
|
|
29
|
+
npcpy/gen/response.py,sha256=6iAOi4hxUxkTZ1d2suBUASOssT6pQnr3HFwZWrvmATg,31925
|
|
30
|
+
npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
|
|
31
|
+
npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
+
npcpy/memory/command_history.py,sha256=2VdmNW5VRpMrOkbdrMsgn5p3mvuJHNnzGHnIUEM8XMI,46279
|
|
33
|
+
npcpy/memory/kg_vis.py,sha256=TrQQCRh_E7Pyr-GPAHLSsayubAfGyf4HOEFrPB6W86Q,31280
|
|
34
|
+
npcpy/memory/knowledge_graph.py,sha256=2XpIlsyPdAOnzQ6kkwP6MWPGwL3P6V33_3suNJYMMJE,48681
|
|
35
|
+
npcpy/memory/memory_processor.py,sha256=6PfVnSBA9ag5EhHJinXoODfEPTlDDoaT0PtCCuZO6HI,2598
|
|
36
|
+
npcpy/memory/search.py,sha256=glN6WYzaixcoDphTEHAXSMX3vKZGjR12Jx9YVL_gYfE,18433
|
|
37
|
+
npcpy/mix/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
|
+
npcpy/mix/debate.py,sha256=lQXxC7nl6Rwyf7HIYrsVQILMUmYYx55Tjt2pkTg56qY,9019
|
|
39
|
+
npcpy/sql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
40
|
+
npcpy/sql/ai_function_tools.py,sha256=ZCpjVHtaMRdL2dXxbQy5NhhjtPrVViGT1wyEl8ADrks,7755
|
|
41
|
+
npcpy/sql/database_ai_adapters.py,sha256=CMlNGOhmJZhGB47RPvLIMqB61m_eYPVg1lwx42_b0jQ,6865
|
|
42
|
+
npcpy/sql/database_ai_functions.py,sha256=XQCmaFOE1lNCnwrLTNpotYOlv6sx41bb8hxZI_sqpy8,6335
|
|
43
|
+
npcpy/sql/model_runner.py,sha256=hJZ7hx2mwI-8DAh47Q6BwOsRjx30-HzebL4ajEUO4HA,5734
|
|
44
|
+
npcpy/sql/npcsql.py,sha256=-PmV7AXSKwRog4gPHTeHzmvPrnDZOiccjgkUGv4DwEU,35614
|
|
45
|
+
npcpy/sql/sql_model_compiler.py,sha256=G-0dpTlgzc-dXy9YEsdWGjO8xaQ3jFNbc6oUja1Ef4M,5364
|
|
46
|
+
npcpy/work/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
47
|
+
npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
|
|
48
|
+
npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
|
|
49
|
+
npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
|
|
50
|
+
npcpy-1.2.32.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
|
|
51
|
+
npcpy-1.2.32.dist-info/METADATA,sha256=Bii6xZThq-8YL-AByfstXOd6xYhtzH4deF4QQZ44LAk,29895
|
|
52
|
+
npcpy-1.2.32.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
53
|
+
npcpy-1.2.32.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
|
|
54
|
+
npcpy-1.2.32.dist-info/RECORD,,
|