npcpy 1.2.26__tar.gz → 1.2.28__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.2.26/npcpy.egg-info → npcpy-1.2.28}/PKG-INFO +3 -3
- {npcpy-1.2.26 → npcpy-1.2.28}/README.md +2 -2
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/gen/response.py +1 -1
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/npc_compiler.py +69 -321
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/npc_sysenv.py +3 -3
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/serve.py +96 -82
- npcpy-1.2.28/npcpy/sql/npcsql.py +804 -0
- {npcpy-1.2.26 → npcpy-1.2.28/npcpy.egg-info}/PKG-INFO +3 -3
- {npcpy-1.2.26 → npcpy-1.2.28}/setup.py +1 -1
- npcpy-1.2.26/npcpy/sql/npcsql.py +0 -377
- {npcpy-1.2.26 → npcpy-1.2.28}/LICENSE +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/MANIFEST.in +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/audio.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/data_models.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/image.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/load.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/text.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/video.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/data/web.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/diff.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/ge.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/memory_trainer.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/model_ensembler.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/rl.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/sft.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/ft/usft.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/gen/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/gen/image_gen.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/llm_funcs.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/main.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/memory/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/memory/command_history.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/memory/knowledge_graph.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/memory/memory_processor.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/memory/search.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/mix/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/mix/debate.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/npcs.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/sql/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/sql/ai_function_tools.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/sql/database_ai_adapters.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/sql/database_ai_functions.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/sql/sql_model_compiler.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/tools.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/work/__init__.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/work/desktop.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/work/plan.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy/work/trigger.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy.egg-info/SOURCES.txt +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/setup.cfg +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_audio.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_command_history.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_image.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_load.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_npc_compiler.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_npcsql.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_response.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_serve.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_text.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_tools.py +0 -0
- {npcpy-1.2.26 → npcpy-1.2.28}/tests/test_web.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: npcpy
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.28
|
|
4
4
|
Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
|
|
5
5
|
Home-page: https://github.com/NPC-Worldwide/npcpy
|
|
6
6
|
Author: Christopher Agostino
|
|
@@ -330,7 +330,7 @@ Users are not required to pass agents to get_llm_response, so you can work with
|
|
|
330
330
|
```python
|
|
331
331
|
from npcpy.npc_sysenv import print_and_process_stream
|
|
332
332
|
from npcpy.llm_funcs import get_llm_response
|
|
333
|
-
response = get_llm_response("When did the united states government begin sending advisors to vietnam?", model='
|
|
333
|
+
response = get_llm_response("When did the united states government begin sending advisors to vietnam?", model='qwen3:latest', provider='ollama', stream = True)
|
|
334
334
|
|
|
335
335
|
full_response = print_and_process_stream(response['response'], 'llama3.2', 'ollama')
|
|
336
336
|
```
|
|
@@ -338,7 +338,7 @@ Return structured outputs by specifying `format='json'` or passing a Pydantic sc
|
|
|
338
338
|
|
|
339
339
|
```python
|
|
340
340
|
from npcpy.llm_funcs import get_llm_response
|
|
341
|
-
response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='
|
|
341
|
+
response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='claude-4-5-haiku-latest', provider='deepseek', format='json')
|
|
342
342
|
|
|
343
343
|
print(response['response'])
|
|
344
344
|
```
|
|
@@ -234,7 +234,7 @@ Users are not required to pass agents to get_llm_response, so you can work with
|
|
|
234
234
|
```python
|
|
235
235
|
from npcpy.npc_sysenv import print_and_process_stream
|
|
236
236
|
from npcpy.llm_funcs import get_llm_response
|
|
237
|
-
response = get_llm_response("When did the united states government begin sending advisors to vietnam?", model='
|
|
237
|
+
response = get_llm_response("When did the united states government begin sending advisors to vietnam?", model='qwen3:latest', provider='ollama', stream = True)
|
|
238
238
|
|
|
239
239
|
full_response = print_and_process_stream(response['response'], 'llama3.2', 'ollama')
|
|
240
240
|
```
|
|
@@ -242,7 +242,7 @@ Return structured outputs by specifying `format='json'` or passing a Pydantic sc
|
|
|
242
242
|
|
|
243
243
|
```python
|
|
244
244
|
from npcpy.llm_funcs import get_llm_response
|
|
245
|
-
response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='
|
|
245
|
+
response = get_llm_response("What is the sentiment of the american people towards the repeal of Roe v Wade? Return a json object with `sentiment` as the key and a float value from -1 to 1 as the value", model='claude-4-5-haiku-latest', provider='deepseek', format='json')
|
|
246
246
|
|
|
247
247
|
print(response['response'])
|
|
248
248
|
```
|
|
@@ -561,7 +561,7 @@ def get_litellm_response(
|
|
|
561
561
|
if provider =='enpisi' and api_url is None:
|
|
562
562
|
api_params['api_base'] = 'https://api.enpisi.com'
|
|
563
563
|
if api_key is None:
|
|
564
|
-
api_key = os.environ.get('
|
|
564
|
+
api_key = os.environ.get('NPC_STUDIO_LICENSE_KEY')
|
|
565
565
|
api_params['api_key'] = api_key
|
|
566
566
|
if '-npc' in model:
|
|
567
567
|
model = model.split('-npc')[0]
|
|
@@ -477,23 +477,23 @@ output = {mcp_tool.__module__}.{name}(
|
|
|
477
477
|
except:
|
|
478
478
|
pass
|
|
479
479
|
|
|
480
|
-
|
|
481
480
|
def load_jinxs_from_directory(directory):
|
|
482
|
-
"""Load all jinxs from a directory"""
|
|
481
|
+
"""Load all jinxs from a directory recursively"""
|
|
483
482
|
jinxs = []
|
|
484
483
|
directory = os.path.expanduser(directory)
|
|
485
484
|
|
|
486
485
|
if not os.path.exists(directory):
|
|
487
486
|
return jinxs
|
|
488
|
-
|
|
489
|
-
for
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
487
|
+
|
|
488
|
+
for root, dirs, files in os.walk(directory):
|
|
489
|
+
for filename in files:
|
|
490
|
+
if filename.endswith(".jinx"):
|
|
491
|
+
try:
|
|
492
|
+
jinx_path = os.path.join(root, filename)
|
|
493
|
+
jinx = Jinx(jinx_path=jinx_path)
|
|
494
|
+
jinxs.append(jinx)
|
|
495
|
+
except Exception as e:
|
|
496
|
+
print(f"Error loading jinx {filename}: {e}")
|
|
497
497
|
|
|
498
498
|
return jinxs
|
|
499
499
|
|
|
@@ -693,7 +693,8 @@ class NPC:
|
|
|
693
693
|
self.jinxs_directory = os.path.expanduser('~/.npcsh/npc_team/jinxs/')
|
|
694
694
|
else:
|
|
695
695
|
self.jinxs_directory = None
|
|
696
|
-
self.npc_directory = None
|
|
696
|
+
self.npc_directory = None
|
|
697
|
+
|
|
697
698
|
self.team = team
|
|
698
699
|
if tools is not None:
|
|
699
700
|
tools_schema, tool_map = auto_tools(tools)
|
|
@@ -1057,18 +1058,16 @@ class NPC:
|
|
|
1057
1058
|
"""Load and process NPC-specific jinxs"""
|
|
1058
1059
|
npc_jinxs = []
|
|
1059
1060
|
|
|
1060
|
-
if self.jinxs_directory is None:
|
|
1061
|
-
self.jinxs_dict = {}
|
|
1062
|
-
return None
|
|
1063
|
-
|
|
1064
1061
|
if jinxs == "*":
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1062
|
+
if self.team and hasattr(self.team, 'jinxs_dict'):
|
|
1063
|
+
for jinx in self.team.jinxs_dict.values():
|
|
1064
|
+
npc_jinxs.append(jinx)
|
|
1065
|
+
elif self.use_global_jinxs or (hasattr(self, 'jinxs_directory') and self.jinxs_directory):
|
|
1066
|
+
jinxs_dir = self.jinxs_directory or os.path.expanduser('~/.npcsh/npc_team/jinxs/')
|
|
1067
|
+
if os.path.exists(jinxs_dir):
|
|
1068
|
+
npc_jinxs.extend(load_jinxs_from_directory(jinxs_dir))
|
|
1069
1069
|
|
|
1070
1070
|
self.jinxs_dict = {jinx.jinx_name: jinx for jinx in npc_jinxs}
|
|
1071
|
-
|
|
1072
1071
|
return npc_jinxs
|
|
1073
1072
|
|
|
1074
1073
|
for jinx in jinxs:
|
|
@@ -1076,13 +1075,13 @@ class NPC:
|
|
|
1076
1075
|
npc_jinxs.append(jinx)
|
|
1077
1076
|
elif isinstance(jinx, dict):
|
|
1078
1077
|
npc_jinxs.append(Jinx(jinx_data=jinx))
|
|
1079
|
-
|
|
1078
|
+
elif isinstance(jinx, str):
|
|
1080
1079
|
jinx_path = None
|
|
1081
1080
|
jinx_name = jinx
|
|
1082
1081
|
if not jinx_name.endswith(".jinx"):
|
|
1083
1082
|
jinx_name += ".jinx"
|
|
1084
1083
|
|
|
1085
|
-
if hasattr(self, 'jinxs_directory') and os.path.exists(self.jinxs_directory):
|
|
1084
|
+
if hasattr(self, 'jinxs_directory') and self.jinxs_directory and os.path.exists(self.jinxs_directory):
|
|
1086
1085
|
candidate_path = os.path.join(self.jinxs_directory, jinx_name)
|
|
1087
1086
|
if os.path.exists(candidate_path):
|
|
1088
1087
|
jinx_path = candidate_path
|
|
@@ -1092,27 +1091,33 @@ class NPC:
|
|
|
1092
1091
|
npc_jinxs.append(jinx_obj)
|
|
1093
1092
|
|
|
1094
1093
|
self.jinxs_dict = {jinx.jinx_name: jinx for jinx in npc_jinxs}
|
|
1094
|
+
print(npc_jinxs)
|
|
1095
1095
|
return npc_jinxs
|
|
1096
|
-
|
|
1097
1096
|
def get_llm_response(self,
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1097
|
+
request,
|
|
1098
|
+
jinxs=None,
|
|
1099
|
+
tools: Optional[list] = None,
|
|
1100
|
+
tool_map: Optional[dict] = None,
|
|
1101
|
+
tool_choice=None,
|
|
1102
|
+
messages=None,
|
|
1103
|
+
auto_process_tool_calls=True,
|
|
1104
|
+
use_core_tools: bool = False,
|
|
1105
|
+
**kwargs):
|
|
1106
|
+
all_candidate_functions = []
|
|
1107
|
+
|
|
1108
|
+
if tools is not None and tool_map is not None:
|
|
1109
|
+
all_candidate_functions.extend([func for func in tool_map.values() if callable(func)])
|
|
1110
|
+
elif hasattr(self, 'tool_map') and self.tool_map:
|
|
1111
|
+
all_candidate_functions.extend([func for func in self.tool_map.values() if callable(func)])
|
|
1112
|
+
|
|
1113
|
+
if use_core_tools:
|
|
1114
|
+
dynamic_core_tools_list = [
|
|
1110
1115
|
self.think_step_by_step,
|
|
1111
1116
|
self.write_code
|
|
1112
1117
|
]
|
|
1113
|
-
|
|
1118
|
+
|
|
1114
1119
|
if self.command_history:
|
|
1115
|
-
|
|
1120
|
+
dynamic_core_tools_list.extend([
|
|
1116
1121
|
self.search_my_conversations,
|
|
1117
1122
|
self.search_my_memories,
|
|
1118
1123
|
self.create_memory,
|
|
@@ -1124,35 +1129,44 @@ class NPC:
|
|
|
1124
1129
|
self.archive_old_memories,
|
|
1125
1130
|
self.get_memory_stats
|
|
1126
1131
|
])
|
|
1127
|
-
|
|
1132
|
+
|
|
1128
1133
|
if self.db_conn:
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1133
|
-
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1134
|
+
dynamic_core_tools_list.append(self.query_database)
|
|
1135
|
+
|
|
1136
|
+
all_candidate_functions.extend(dynamic_core_tools_list)
|
|
1137
|
+
|
|
1138
|
+
unique_functions = []
|
|
1139
|
+
seen_names = set()
|
|
1140
|
+
for func in all_candidate_functions:
|
|
1141
|
+
if func.__name__ not in seen_names:
|
|
1142
|
+
unique_functions.append(func)
|
|
1143
|
+
seen_names.add(func.__name__)
|
|
1144
|
+
|
|
1145
|
+
final_tools_schema = None
|
|
1146
|
+
final_tool_map_dict = None
|
|
1147
|
+
|
|
1148
|
+
if unique_functions:
|
|
1149
|
+
final_tools_schema, final_tool_map_dict = auto_tools(unique_functions)
|
|
1150
|
+
|
|
1151
|
+
if tool_choice is None:
|
|
1152
|
+
if final_tools_schema:
|
|
1153
|
+
tool_choice = "auto"
|
|
1154
|
+
else:
|
|
1155
|
+
tool_choice = "none"
|
|
1139
1156
|
|
|
1140
1157
|
response = npy.llm_funcs.get_llm_response(
|
|
1141
1158
|
request,
|
|
1142
|
-
model=self.model,
|
|
1143
|
-
provider=self.provider,
|
|
1144
1159
|
npc=self,
|
|
1145
1160
|
jinxs=jinxs,
|
|
1146
|
-
tools=
|
|
1147
|
-
tool_map=
|
|
1161
|
+
tools=final_tools_schema,
|
|
1162
|
+
tool_map=final_tool_map_dict,
|
|
1148
1163
|
tool_choice=tool_choice,
|
|
1149
1164
|
auto_process_tool_calls=auto_process_tool_calls,
|
|
1150
1165
|
messages=self.memory if messages is None else messages,
|
|
1151
1166
|
**kwargs
|
|
1152
1167
|
)
|
|
1153
|
-
|
|
1154
|
-
return response
|
|
1155
1168
|
|
|
1169
|
+
return response
|
|
1156
1170
|
|
|
1157
1171
|
|
|
1158
1172
|
|
|
@@ -2383,269 +2397,3 @@ class Team:
|
|
|
2383
2397
|
context_parts.append("")
|
|
2384
2398
|
|
|
2385
2399
|
return "\n".join(context_parts)
|
|
2386
|
-
|
|
2387
|
-
class Pipeline:
|
|
2388
|
-
def __init__(self, pipeline_data=None, pipeline_path=None, npc_team=None):
|
|
2389
|
-
"""Initialize a pipeline from data or file path"""
|
|
2390
|
-
self.npc_team = npc_team
|
|
2391
|
-
self.steps = []
|
|
2392
|
-
|
|
2393
|
-
if pipeline_path:
|
|
2394
|
-
self._load_from_path(pipeline_path)
|
|
2395
|
-
elif pipeline_data:
|
|
2396
|
-
self.name = pipeline_data.get("name", "unnamed_pipeline")
|
|
2397
|
-
self.steps = pipeline_data.get("steps", [])
|
|
2398
|
-
else:
|
|
2399
|
-
raise ValueError("Either pipeline_data or pipeline_path must be provided")
|
|
2400
|
-
|
|
2401
|
-
def _load_from_path(self, path):
|
|
2402
|
-
"""Load pipeline from file"""
|
|
2403
|
-
pipeline_data = load_yaml_file(path)
|
|
2404
|
-
if not pipeline_data:
|
|
2405
|
-
raise ValueError(f"Failed to load pipeline from {path}")
|
|
2406
|
-
|
|
2407
|
-
self.name = os.path.splitext(os.path.basename(path))[0]
|
|
2408
|
-
self.steps = pipeline_data.get("steps", [])
|
|
2409
|
-
self.pipeline_path = path
|
|
2410
|
-
|
|
2411
|
-
def execute(self, initial_context=None):
|
|
2412
|
-
"""Execute the pipeline with given context"""
|
|
2413
|
-
context = initial_context or {}
|
|
2414
|
-
results = {}
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
init_db_tables()
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
pipeline_hash = self._generate_hash()
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
results_table = f"{self.name}_results"
|
|
2424
|
-
self._ensure_results_table(results_table)
|
|
2425
|
-
|
|
2426
|
-
|
|
2427
|
-
run_id = self._create_run_entry(pipeline_hash)
|
|
2428
|
-
|
|
2429
|
-
|
|
2430
|
-
context.update({
|
|
2431
|
-
"ref": lambda step_name: results.get(step_name),
|
|
2432
|
-
"source": self._fetch_data_from_source,
|
|
2433
|
-
})
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
for step in self.steps:
|
|
2437
|
-
step_name = step.get("step_name")
|
|
2438
|
-
if not step_name:
|
|
2439
|
-
raise ValueError(f"Missing step_name in step: {step}")
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
npc_name = self._render_template(step.get("npc", ""), context)
|
|
2443
|
-
npc = self._get_npc(npc_name)
|
|
2444
|
-
if not npc:
|
|
2445
|
-
raise ValueError(f"NPC {npc_name} not found for step {step_name}")
|
|
2446
|
-
|
|
2447
|
-
|
|
2448
|
-
task = self._render_template(step.get("task", ""), context)
|
|
2449
|
-
|
|
2450
|
-
|
|
2451
|
-
model = step.get("model", npc.model)
|
|
2452
|
-
provider = step.get("provider", npc.provider)
|
|
2453
|
-
|
|
2454
|
-
|
|
2455
|
-
mixa = step.get("mixa", False)
|
|
2456
|
-
if mixa:
|
|
2457
|
-
response = self._execute_mixa_step(step, context, npc, model, provider)
|
|
2458
|
-
else:
|
|
2459
|
-
|
|
2460
|
-
source_matches = re.findall(r"{{\s*source\('([^']+)'\)\s*}}", task)
|
|
2461
|
-
if source_matches:
|
|
2462
|
-
response = self._execute_data_source_step(step, context, source_matches, npc, model, provider)
|
|
2463
|
-
else:
|
|
2464
|
-
|
|
2465
|
-
llm_response = npy.llm_funcs.get_llm_response(task, model=model, provider=provider, npc=npc)
|
|
2466
|
-
response = llm_response.get("response", "")
|
|
2467
|
-
|
|
2468
|
-
|
|
2469
|
-
results[step_name] = response
|
|
2470
|
-
context[step_name] = response
|
|
2471
|
-
|
|
2472
|
-
|
|
2473
|
-
self._store_step_result(run_id, step_name, npc_name, model, provider,
|
|
2474
|
-
{"task": task}, response, results_table)
|
|
2475
|
-
|
|
2476
|
-
|
|
2477
|
-
return {
|
|
2478
|
-
"results": results,
|
|
2479
|
-
"run_id": run_id
|
|
2480
|
-
}
|
|
2481
|
-
|
|
2482
|
-
def _render_template(self, template_str, context):
|
|
2483
|
-
"""Render a template with the given context"""
|
|
2484
|
-
if not template_str:
|
|
2485
|
-
return ""
|
|
2486
|
-
|
|
2487
|
-
try:
|
|
2488
|
-
template = Template(template_str)
|
|
2489
|
-
return template.render(**context)
|
|
2490
|
-
except Exception as e:
|
|
2491
|
-
print(f"Error rendering template: {e}")
|
|
2492
|
-
return template_str
|
|
2493
|
-
|
|
2494
|
-
def _get_npc(self, npc_name):
|
|
2495
|
-
"""Get NPC by name from team"""
|
|
2496
|
-
if not self.npc_team:
|
|
2497
|
-
raise ValueError("No NPC team available")
|
|
2498
|
-
|
|
2499
|
-
return self.npc_team.get_npc(npc_name)
|
|
2500
|
-
|
|
2501
|
-
def _generate_hash(self):
|
|
2502
|
-
"""Generate a hash for the pipeline"""
|
|
2503
|
-
if hasattr(self, 'pipeline_path') and self.pipeline_path:
|
|
2504
|
-
with open(self.pipeline_path, 'r') as f:
|
|
2505
|
-
content = f.read()
|
|
2506
|
-
return hashlib.sha256(content.encode()).hexdigest()
|
|
2507
|
-
else:
|
|
2508
|
-
|
|
2509
|
-
content = json.dumps(self.steps)
|
|
2510
|
-
return hashlib.sha256(content.encode()).hexdigest()
|
|
2511
|
-
|
|
2512
|
-
def _ensure_results_table(self, table_name):
|
|
2513
|
-
"""Ensure results table exists"""
|
|
2514
|
-
db_path = "~/npcsh_history.db"
|
|
2515
|
-
with sqlite3.connect(os.path.expanduser(db_path)) as conn:
|
|
2516
|
-
conn.execute(f"""
|
|
2517
|
-
CREATE TABLE IF NOT EXISTS {table_name} (
|
|
2518
|
-
result_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
2519
|
-
run_id INTEGER,
|
|
2520
|
-
step_name TEXT,
|
|
2521
|
-
npc_name TEXT,
|
|
2522
|
-
model TEXT,
|
|
2523
|
-
provider TEXT,
|
|
2524
|
-
inputs TEXT,
|
|
2525
|
-
outputs TEXT,
|
|
2526
|
-
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
|
2527
|
-
FOREIGN KEY(run_id) REFERENCES pipeline_runs(run_id)
|
|
2528
|
-
)
|
|
2529
|
-
""")
|
|
2530
|
-
conn.commit()
|
|
2531
|
-
|
|
2532
|
-
def _create_run_entry(self, pipeline_hash):
|
|
2533
|
-
"""Create run entry in pipeline_runs table"""
|
|
2534
|
-
db_path = "~/npcsh_history.db"
|
|
2535
|
-
with sqlite3.connect(os.path.expanduser(db_path)) as conn:
|
|
2536
|
-
cursor = conn.execute(
|
|
2537
|
-
"INSERT INTO pipeline_runs (pipeline_name, pipeline_hash, timestamp) VALUES (?, ?, ?)",
|
|
2538
|
-
(self.name, pipeline_hash, datetime.now())
|
|
2539
|
-
)
|
|
2540
|
-
conn.commit()
|
|
2541
|
-
return cursor.lastrowid
|
|
2542
|
-
|
|
2543
|
-
def _store_step_result(self, run_id, step_name, npc_name, model, provider, inputs, outputs, table_name):
|
|
2544
|
-
"""Store step result in database"""
|
|
2545
|
-
db_path = "~/npcsh_history.db"
|
|
2546
|
-
with sqlite3.connect(os.path.expanduser(db_path)) as conn:
|
|
2547
|
-
conn.execute(
|
|
2548
|
-
f"""
|
|
2549
|
-
INSERT INTO {table_name}
|
|
2550
|
-
(run_id, step_name, npc_name, model, provider, inputs, outputs)
|
|
2551
|
-
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
2552
|
-
""",
|
|
2553
|
-
(
|
|
2554
|
-
run_id,
|
|
2555
|
-
step_name,
|
|
2556
|
-
npc_name,
|
|
2557
|
-
model,
|
|
2558
|
-
provider,
|
|
2559
|
-
json.dumps(self._clean_for_json(inputs)),
|
|
2560
|
-
json.dumps(self._clean_for_json(outputs))
|
|
2561
|
-
)
|
|
2562
|
-
)
|
|
2563
|
-
conn.commit()
|
|
2564
|
-
|
|
2565
|
-
def _clean_for_json(self, obj):
|
|
2566
|
-
"""Clean an object for JSON serialization"""
|
|
2567
|
-
if isinstance(obj, dict):
|
|
2568
|
-
return {
|
|
2569
|
-
k: self._clean_for_json(v)
|
|
2570
|
-
for k, v in obj.items()
|
|
2571
|
-
if not k.startswith("_") and not callable(v)
|
|
2572
|
-
}
|
|
2573
|
-
elif isinstance(obj, list):
|
|
2574
|
-
return [self._clean_for_json(i) for i in obj]
|
|
2575
|
-
elif isinstance(obj, (str, int, float, bool, type(None))):
|
|
2576
|
-
return obj
|
|
2577
|
-
else:
|
|
2578
|
-
return str(obj)
|
|
2579
|
-
|
|
2580
|
-
def _fetch_data_from_source(self, table_name):
|
|
2581
|
-
"""Fetch data from a database table"""
|
|
2582
|
-
db_path = "~/npcsh_history.db"
|
|
2583
|
-
try:
|
|
2584
|
-
engine = create_engine(f"sqlite:///{os.path.expanduser(db_path)}")
|
|
2585
|
-
df = pd.read_sql(f"SELECT * FROM {table_name}", engine)
|
|
2586
|
-
return df.to_json(orient="records")
|
|
2587
|
-
except Exception as e:
|
|
2588
|
-
print(f"Error fetching data from {table_name}: {e}")
|
|
2589
|
-
return "[]"
|
|
2590
|
-
|
|
2591
|
-
def _execute_mixa_step(self, step, context, npc, model, provider):
|
|
2592
|
-
"""Execute a mixture of agents step"""
|
|
2593
|
-
|
|
2594
|
-
task = self._render_template(step.get("task", ""), context)
|
|
2595
|
-
|
|
2596
|
-
|
|
2597
|
-
mixa_turns = step.get("mixa_turns", 5)
|
|
2598
|
-
num_generating_agents = len(step.get("mixa_agents", []))
|
|
2599
|
-
if num_generating_agents == 0:
|
|
2600
|
-
num_generating_agents = 3
|
|
2601
|
-
|
|
2602
|
-
num_voting_agents = len(step.get("mixa_voters", []))
|
|
2603
|
-
if num_voting_agents == 0:
|
|
2604
|
-
num_voting_agents = 3
|
|
2605
|
-
|
|
2606
|
-
|
|
2607
|
-
round_responses = []
|
|
2608
|
-
|
|
2609
|
-
|
|
2610
|
-
return
|
|
2611
|
-
|
|
2612
|
-
def _execute_data_source_step(self, step, context, source_matches, npc, model, provider):
|
|
2613
|
-
"""Execute a step with data source"""
|
|
2614
|
-
task_template = step.get("task", "")
|
|
2615
|
-
table_name = source_matches[0]
|
|
2616
|
-
|
|
2617
|
-
try:
|
|
2618
|
-
|
|
2619
|
-
db_path = "~/npcsh_history.db"
|
|
2620
|
-
engine = create_engine(f"sqlite:///{os.path.expanduser(db_path)}")
|
|
2621
|
-
df = pd.read_sql(f"SELECT * FROM {table_name}", engine)
|
|
2622
|
-
|
|
2623
|
-
|
|
2624
|
-
if step.get("batch_mode", False):
|
|
2625
|
-
|
|
2626
|
-
data_str = df.to_json(orient="records")
|
|
2627
|
-
task = task_template.replace(f"{{{{ source('{table_name}') }}}}", data_str)
|
|
2628
|
-
task = self._render_template(task, context)
|
|
2629
|
-
|
|
2630
|
-
|
|
2631
|
-
response = npy.llm_funcs.get_llm_response(task, model=model, provider=provider, npc=npc)
|
|
2632
|
-
return response.get("response", "")
|
|
2633
|
-
else:
|
|
2634
|
-
|
|
2635
|
-
results = []
|
|
2636
|
-
for _, row in df.iterrows():
|
|
2637
|
-
|
|
2638
|
-
row_data = json.dumps(row.to_dict())
|
|
2639
|
-
row_task = task_template.replace(f"{{{{ source('{table_name}') }}}}", row_data)
|
|
2640
|
-
row_task = self._render_template(row_task, context)
|
|
2641
|
-
|
|
2642
|
-
|
|
2643
|
-
response = npy.llm_funcs.get_llm_response(row_task, model=model, provider=provider, npc=npc)
|
|
2644
|
-
results.append(response.get("response", ""))
|
|
2645
|
-
|
|
2646
|
-
return results
|
|
2647
|
-
except Exception as e:
|
|
2648
|
-
print(f"Error processing data source {table_name}: {e}")
|
|
2649
|
-
return f"Error: {str(e)}"
|
|
2650
|
-
|
|
2651
|
-
|
|
@@ -164,18 +164,18 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
164
164
|
|
|
165
165
|
|
|
166
166
|
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
|
|
167
|
-
if '
|
|
167
|
+
if 'NPC_STUDIO_LICENSE_KEY' in env_vars or os.environ.get('NPC_STUDIO_LICENSE_KEY'):
|
|
168
168
|
try:
|
|
169
169
|
def fetch_enpisi_models():
|
|
170
170
|
import requests
|
|
171
171
|
|
|
172
172
|
api_url = 'https://api.enpisi.com'
|
|
173
173
|
headers = {
|
|
174
|
-
'Authorization': f"Bearer {env_vars.get('
|
|
174
|
+
'Authorization': f"Bearer {env_vars.get('NPC_STUDIO_LICENSE_KEY') or os.environ.get('NPC_STUDIO_LICENSE_KEY')}",
|
|
175
175
|
'Content-Type': 'application/json'
|
|
176
176
|
}
|
|
177
|
-
import requests
|
|
178
177
|
response = requests.get(f"{api_url}/models", headers=headers)
|
|
178
|
+
|
|
179
179
|
return [model['id'] for model in response.json().get('data','')]
|
|
180
180
|
for model in fetch_enpisi_models():
|
|
181
181
|
available_models[model+'-npc'] = 'enpisi'
|