npcpy 1.2.28__tar.gz → 1.2.30__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {npcpy-1.2.28/npcpy.egg-info → npcpy-1.2.30}/PKG-INFO +1 -1
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/sft.py +25 -7
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/gen/image_gen.py +37 -15
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/llm_funcs.py +17 -9
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/npc_compiler.py +116 -77
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/serve.py +46 -13
- {npcpy-1.2.28 → npcpy-1.2.30/npcpy.egg-info}/PKG-INFO +1 -1
- {npcpy-1.2.28 → npcpy-1.2.30}/setup.py +1 -1
- {npcpy-1.2.28 → npcpy-1.2.30}/LICENSE +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/MANIFEST.in +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/README.md +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/audio.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/data_models.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/image.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/load.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/text.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/video.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/data/web.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/diff.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/ge.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/memory_trainer.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/model_ensembler.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/rl.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/ft/usft.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/gen/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/gen/audio_gen.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/gen/embeddings.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/gen/response.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/gen/video_gen.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/main.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/memory/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/memory/command_history.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/memory/kg_vis.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/memory/knowledge_graph.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/memory/memory_processor.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/memory/search.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/mix/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/mix/debate.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/npc_sysenv.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/npcs.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/ai_function_tools.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/database_ai_adapters.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/database_ai_functions.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/model_runner.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/npcsql.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/sql/sql_model_compiler.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/tools.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/work/__init__.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/work/desktop.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/work/plan.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy/work/trigger.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy.egg-info/SOURCES.txt +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy.egg-info/dependency_links.txt +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy.egg-info/requires.txt +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/npcpy.egg-info/top_level.txt +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/setup.cfg +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_audio.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_command_history.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_image.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_llm_funcs.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_load.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_npc_compiler.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_npcsql.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_response.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_serve.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_text.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_tools.py +0 -0
- {npcpy-1.2.28 → npcpy-1.2.30}/tests/test_web.py +0 -0
|
@@ -154,13 +154,17 @@ def run_sft(
|
|
|
154
154
|
save_steps=config.save_steps,
|
|
155
155
|
weight_decay=config.weight_decay,
|
|
156
156
|
)
|
|
157
|
-
|
|
157
|
+
|
|
158
|
+
def formatting_func(example):
|
|
159
|
+
return example["text"]
|
|
160
|
+
|
|
158
161
|
trainer = SFTTrainer(
|
|
159
162
|
model=model,
|
|
160
163
|
train_dataset=dataset,
|
|
161
164
|
peft_config=peft_config,
|
|
162
165
|
args=training_args,
|
|
163
|
-
|
|
166
|
+
processing_class=tokenizer,
|
|
167
|
+
formatting_func=formatting_func
|
|
164
168
|
)
|
|
165
169
|
|
|
166
170
|
print(f"Training on {len(dataset)} examples")
|
|
@@ -190,8 +194,6 @@ def load_sft_model(model_path: str):
|
|
|
190
194
|
tokenizer.pad_token = tokenizer.eos_token
|
|
191
195
|
|
|
192
196
|
return model, tokenizer
|
|
193
|
-
|
|
194
|
-
|
|
195
197
|
def predict_sft(
|
|
196
198
|
model,
|
|
197
199
|
tokenizer,
|
|
@@ -202,8 +204,13 @@ def predict_sft(
|
|
|
202
204
|
|
|
203
205
|
device = next(model.parameters()).device
|
|
204
206
|
|
|
207
|
+
formatted_prompt = (
|
|
208
|
+
f"<start_of_turn>user\n{prompt}<end_of_turn>\n"
|
|
209
|
+
f"<start_of_turn>model\n"
|
|
210
|
+
)
|
|
211
|
+
|
|
205
212
|
inputs = tokenizer(
|
|
206
|
-
|
|
213
|
+
formatted_prompt,
|
|
207
214
|
return_tensors="pt",
|
|
208
215
|
truncation=True,
|
|
209
216
|
max_length=512
|
|
@@ -222,9 +229,20 @@ def predict_sft(
|
|
|
222
229
|
pad_token_id=tokenizer.eos_token_id
|
|
223
230
|
)
|
|
224
231
|
|
|
225
|
-
|
|
232
|
+
full_response = tokenizer.decode(
|
|
226
233
|
outputs[0],
|
|
227
|
-
skip_special_tokens=
|
|
234
|
+
skip_special_tokens=False
|
|
228
235
|
)
|
|
229
236
|
|
|
237
|
+
if "<start_of_turn>model\n" in full_response:
|
|
238
|
+
response = full_response.split(
|
|
239
|
+
"<start_of_turn>model\n"
|
|
240
|
+
)[-1]
|
|
241
|
+
response = response.split("<end_of_turn>")[0].strip()
|
|
242
|
+
else:
|
|
243
|
+
response = tokenizer.decode(
|
|
244
|
+
outputs[0][len(input_ids[0]):],
|
|
245
|
+
skip_special_tokens=True
|
|
246
|
+
)
|
|
247
|
+
|
|
230
248
|
return response
|
|
@@ -86,6 +86,16 @@ def generate_image_diffusers(
|
|
|
86
86
|
else:
|
|
87
87
|
raise e
|
|
88
88
|
|
|
89
|
+
import os
|
|
90
|
+
import base64
|
|
91
|
+
import io
|
|
92
|
+
from typing import Union, List, Optional
|
|
93
|
+
|
|
94
|
+
import PIL
|
|
95
|
+
from PIL import Image
|
|
96
|
+
|
|
97
|
+
import requests
|
|
98
|
+
from urllib.request import urlopen
|
|
89
99
|
|
|
90
100
|
def openai_image_gen(
|
|
91
101
|
prompt: str,
|
|
@@ -97,36 +107,47 @@ def openai_image_gen(
|
|
|
97
107
|
):
|
|
98
108
|
"""Generate or edit an image using the OpenAI API."""
|
|
99
109
|
from openai import OpenAI
|
|
100
|
-
|
|
110
|
+
|
|
101
111
|
client = OpenAI()
|
|
102
|
-
|
|
112
|
+
|
|
103
113
|
if height is None:
|
|
104
114
|
height = 1024
|
|
105
115
|
if width is None:
|
|
106
|
-
width = 1024
|
|
107
|
-
|
|
108
|
-
size_str = f"{width}x{height}"
|
|
116
|
+
width = 1024
|
|
117
|
+
|
|
118
|
+
size_str = f"{width}x{height}"
|
|
109
119
|
|
|
110
120
|
if attachments is not None:
|
|
111
121
|
processed_images = []
|
|
122
|
+
files_to_close = []
|
|
112
123
|
for attachment in attachments:
|
|
113
124
|
if isinstance(attachment, str):
|
|
114
|
-
|
|
125
|
+
file_handle = open(attachment, "rb")
|
|
126
|
+
processed_images.append(file_handle)
|
|
127
|
+
files_to_close.append(file_handle)
|
|
115
128
|
elif isinstance(attachment, bytes):
|
|
116
|
-
|
|
129
|
+
img_byte_arr = io.BytesIO(attachment)
|
|
130
|
+
img_byte_arr.name = 'image.png' # FIX: Add filename hint
|
|
131
|
+
processed_images.append(img_byte_arr)
|
|
117
132
|
elif isinstance(attachment, Image.Image):
|
|
118
133
|
img_byte_arr = io.BytesIO()
|
|
119
134
|
attachment.save(img_byte_arr, format='PNG')
|
|
120
135
|
img_byte_arr.seek(0)
|
|
136
|
+
img_byte_arr.name = 'image.png' # FIX: Add filename hint
|
|
121
137
|
processed_images.append(img_byte_arr)
|
|
122
138
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
139
|
+
try:
|
|
140
|
+
result = client.images.edit(
|
|
141
|
+
model=model,
|
|
142
|
+
image=processed_images[0],
|
|
143
|
+
prompt=prompt,
|
|
144
|
+
n=n_images,
|
|
145
|
+
size=size_str,
|
|
146
|
+
)
|
|
147
|
+
finally:
|
|
148
|
+
# This ensures any files we opened are properly closed
|
|
149
|
+
for f in files_to_close:
|
|
150
|
+
f.close()
|
|
130
151
|
else:
|
|
131
152
|
result = client.images.generate(
|
|
132
153
|
model=model,
|
|
@@ -134,7 +155,7 @@ def openai_image_gen(
|
|
|
134
155
|
n=n_images,
|
|
135
156
|
size=size_str,
|
|
136
157
|
)
|
|
137
|
-
|
|
158
|
+
|
|
138
159
|
collected_images = []
|
|
139
160
|
for item_data in result.data:
|
|
140
161
|
if model == 'gpt-image-1':
|
|
@@ -153,6 +174,7 @@ def openai_image_gen(
|
|
|
153
174
|
return collected_images
|
|
154
175
|
|
|
155
176
|
|
|
177
|
+
|
|
156
178
|
def gemini_image_gen(
|
|
157
179
|
prompt: str,
|
|
158
180
|
model: str = "gemini-2.5-flash",
|
|
@@ -378,7 +378,6 @@ def execute_llm_command(
|
|
|
378
378
|
"messages": messages,
|
|
379
379
|
"output": "Max attempts reached. Unable to execute the command successfully.",
|
|
380
380
|
}
|
|
381
|
-
|
|
382
381
|
def handle_jinx_call(
|
|
383
382
|
command: str,
|
|
384
383
|
jinx_name: str,
|
|
@@ -391,6 +390,7 @@ def handle_jinx_call(
|
|
|
391
390
|
n_attempts=3,
|
|
392
391
|
attempt=0,
|
|
393
392
|
context=None,
|
|
393
|
+
extra_globals=None, # ADD THIS
|
|
394
394
|
**kwargs
|
|
395
395
|
) -> Union[str, Dict[str, Any]]:
|
|
396
396
|
"""This function handles a jinx call.
|
|
@@ -568,6 +568,8 @@ def handle_jinx_call(
|
|
|
568
568
|
jinja_env,
|
|
569
569
|
npc=npc,
|
|
570
570
|
messages=messages,
|
|
571
|
+
extra_globals=extra_globals # ADD THIS
|
|
572
|
+
|
|
571
573
|
)
|
|
572
574
|
except Exception as e:
|
|
573
575
|
print(f"An error occurred while executing the jinx: {e}")
|
|
@@ -664,10 +666,10 @@ def jinx_handler(command, extracted_data, **kwargs):
|
|
|
664
666
|
api_key=kwargs.get('api_key'),
|
|
665
667
|
messages=kwargs.get('messages'),
|
|
666
668
|
npc=kwargs.get('npc'),
|
|
667
|
-
team
|
|
669
|
+
team=kwargs.get('team'),
|
|
668
670
|
stream=kwargs.get('stream'),
|
|
669
|
-
|
|
670
|
-
|
|
671
|
+
context=kwargs.get('context'),
|
|
672
|
+
extra_globals=kwargs.get('extra_globals') # ADD THIS
|
|
671
673
|
)
|
|
672
674
|
|
|
673
675
|
def answer_handler(command, extracted_data, **kwargs):
|
|
@@ -714,6 +716,7 @@ def check_llm_command(
|
|
|
714
716
|
stream=False,
|
|
715
717
|
context=None,
|
|
716
718
|
actions: Dict[str, Dict] = None,
|
|
719
|
+
extra_globals=None,
|
|
717
720
|
):
|
|
718
721
|
"""This function checks an LLM command and returns sequences of steps with parallel actions."""
|
|
719
722
|
if messages is None:
|
|
@@ -734,6 +737,7 @@ def check_llm_command(
|
|
|
734
737
|
stream=stream,
|
|
735
738
|
context=context,
|
|
736
739
|
actions=actions,
|
|
740
|
+
extra_globals=extra_globals,
|
|
737
741
|
|
|
738
742
|
)
|
|
739
743
|
return exec
|
|
@@ -873,6 +877,7 @@ def plan_multi_step_actions(
|
|
|
873
877
|
api_key: str = None,
|
|
874
878
|
context: str = None,
|
|
875
879
|
messages: List[Dict[str, str]] = None,
|
|
880
|
+
|
|
876
881
|
|
|
877
882
|
):
|
|
878
883
|
"""
|
|
@@ -992,6 +997,7 @@ def execute_multi_step_plan(
|
|
|
992
997
|
messages=messages,
|
|
993
998
|
team=team,
|
|
994
999
|
|
|
1000
|
+
|
|
995
1001
|
)
|
|
996
1002
|
|
|
997
1003
|
if not planned_actions:
|
|
@@ -1007,7 +1013,8 @@ def execute_multi_step_plan(
|
|
|
1007
1013
|
stream=stream,
|
|
1008
1014
|
team = team,
|
|
1009
1015
|
images=images,
|
|
1010
|
-
context=context
|
|
1016
|
+
context=context
|
|
1017
|
+
)
|
|
1011
1018
|
return {"messages": result.get('messages',
|
|
1012
1019
|
messages),
|
|
1013
1020
|
"output": result.get('response')}
|
|
@@ -1037,7 +1044,7 @@ def execute_multi_step_plan(
|
|
|
1037
1044
|
render_markdown(
|
|
1038
1045
|
f"- Executing Action: {action_name} \n- Explanation: {action_data.get('explanation')}\n "
|
|
1039
1046
|
)
|
|
1040
|
-
|
|
1047
|
+
|
|
1041
1048
|
result = handler(
|
|
1042
1049
|
command=command,
|
|
1043
1050
|
extracted_data=action_data,
|
|
@@ -1049,10 +1056,10 @@ def execute_multi_step_plan(
|
|
|
1049
1056
|
npc=npc,
|
|
1050
1057
|
team=team,
|
|
1051
1058
|
stream=stream,
|
|
1052
|
-
|
|
1053
1059
|
context=context+step_context,
|
|
1054
|
-
images=images
|
|
1055
|
-
)
|
|
1060
|
+
images=images,
|
|
1061
|
+
extra_globals=kwargs.get('extra_globals') # ADD THIS
|
|
1062
|
+
)
|
|
1056
1063
|
except KeyError as e:
|
|
1057
1064
|
|
|
1058
1065
|
return execute_multi_step_plan(
|
|
@@ -1068,6 +1075,7 @@ def execute_multi_step_plan(
|
|
|
1068
1075
|
stream=stream,
|
|
1069
1076
|
context=context,
|
|
1070
1077
|
actions=actions,
|
|
1078
|
+
|
|
1071
1079
|
**kwargs,
|
|
1072
1080
|
)
|
|
1073
1081
|
|
|
@@ -264,7 +264,6 @@ class Jinx:
|
|
|
264
264
|
self.inputs = jinx_data.get("inputs", [])
|
|
265
265
|
self.description = jinx_data.get("description", "")
|
|
266
266
|
self.steps = self._parse_steps(jinx_data.get("steps", []))
|
|
267
|
-
|
|
268
267
|
def _parse_steps(self, steps):
|
|
269
268
|
"""Parse steps from jinx definition"""
|
|
270
269
|
parsed_steps = []
|
|
@@ -275,66 +274,71 @@ class Jinx:
|
|
|
275
274
|
"engine": step.get("engine", "natural"),
|
|
276
275
|
"code": step.get("code", "")
|
|
277
276
|
}
|
|
277
|
+
if "mode" in step:
|
|
278
|
+
parsed_step["mode"] = step["mode"]
|
|
278
279
|
parsed_steps.append(parsed_step)
|
|
279
280
|
else:
|
|
280
281
|
raise ValueError(f"Invalid step format: {step}")
|
|
281
282
|
return parsed_steps
|
|
282
|
-
|
|
283
|
+
|
|
283
284
|
def execute(self,
|
|
284
|
-
input_values,
|
|
285
|
-
jinxs_dict,
|
|
286
|
-
jinja_env = None,
|
|
287
|
-
npc = None,
|
|
288
|
-
messages=None
|
|
289
|
-
|
|
285
|
+
input_values: Dict[str, Any],
|
|
286
|
+
jinxs_dict: Dict[str, 'Jinx'],
|
|
287
|
+
jinja_env: Optional[Environment] = None,
|
|
288
|
+
npc: Optional[Any] = None,
|
|
289
|
+
messages: Optional[List[Dict[str, str]]] = None,
|
|
290
|
+
**kwargs: Any):
|
|
291
|
+
"""
|
|
292
|
+
Execute the jinx with given inputs.
|
|
293
|
+
**kwargs can be used to pass 'extra_globals' for the python engine.
|
|
294
|
+
"""
|
|
290
295
|
if jinja_env is None:
|
|
291
|
-
|
|
292
|
-
|
|
293
296
|
from jinja2 import DictLoader
|
|
294
297
|
jinja_env = Environment(
|
|
295
|
-
loader=DictLoader({}),
|
|
298
|
+
loader=DictLoader({}),
|
|
296
299
|
undefined=SilentUndefined,
|
|
297
300
|
)
|
|
298
301
|
|
|
299
|
-
context = (npc.shared_context.copy() if npc else {})
|
|
302
|
+
context = (npc.shared_context.copy() if npc and hasattr(npc, 'shared_context') else {})
|
|
300
303
|
context.update(input_values)
|
|
301
304
|
context.update({
|
|
302
305
|
"jinxs": jinxs_dict,
|
|
303
306
|
"llm_response": None,
|
|
304
|
-
"output": None,
|
|
307
|
+
"output": None,
|
|
305
308
|
"messages": messages,
|
|
306
309
|
})
|
|
307
310
|
|
|
308
|
-
|
|
311
|
+
# This is the key change: Extract 'extra_globals' from kwargs
|
|
312
|
+
extra_globals = kwargs.get('extra_globals')
|
|
313
|
+
|
|
309
314
|
for i, step in enumerate(self.steps):
|
|
310
315
|
context = self._execute_step(
|
|
311
|
-
step,
|
|
316
|
+
step,
|
|
312
317
|
context,
|
|
313
|
-
jinja_env,
|
|
314
|
-
npc=npc,
|
|
315
|
-
messages=messages,
|
|
316
|
-
|
|
317
|
-
)
|
|
318
|
+
jinja_env,
|
|
319
|
+
npc=npc,
|
|
320
|
+
messages=messages,
|
|
321
|
+
extra_globals=extra_globals # Pass it down to the step executor
|
|
322
|
+
)
|
|
318
323
|
|
|
319
324
|
return context
|
|
320
|
-
|
|
325
|
+
|
|
321
326
|
def _execute_step(self,
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
):
|
|
328
|
-
"""
|
|
327
|
+
step: Dict[str, Any],
|
|
328
|
+
context: Dict[str, Any],
|
|
329
|
+
jinja_env: Environment,
|
|
330
|
+
npc: Optional[Any] = None,
|
|
331
|
+
messages: Optional[List[Dict[str, str]]] = None,
|
|
332
|
+
extra_globals: Optional[Dict[str, Any]] = None):
|
|
333
|
+
"""
|
|
334
|
+
Execute a single step of the jinx.
|
|
335
|
+
"""
|
|
329
336
|
engine = step.get("engine", "natural")
|
|
330
337
|
code = step.get("code", "")
|
|
331
338
|
step_name = step.get("name", "unnamed_step")
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
339
|
+
mode = step.get("mode", "chat")
|
|
335
340
|
|
|
336
341
|
try:
|
|
337
|
-
|
|
338
342
|
template = jinja_env.from_string(code)
|
|
339
343
|
rendered_code = template.render(**context)
|
|
340
344
|
|
|
@@ -346,24 +350,32 @@ class Jinx:
|
|
|
346
350
|
rendered_code = code
|
|
347
351
|
rendered_engine = engine
|
|
348
352
|
|
|
349
|
-
|
|
350
353
|
if rendered_engine == "natural":
|
|
351
354
|
if rendered_code.strip():
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
355
|
+
if mode == "agent":
|
|
356
|
+
response = npc.get_llm_response(
|
|
357
|
+
rendered_code,
|
|
358
|
+
context=context,
|
|
359
|
+
messages=messages,
|
|
360
|
+
auto_process_tool_calls=True,
|
|
361
|
+
use_core_tools=True
|
|
362
|
+
)
|
|
363
|
+
else:
|
|
364
|
+
response = npc.get_llm_response(
|
|
365
|
+
rendered_code,
|
|
366
|
+
context=context,
|
|
367
|
+
messages=messages,
|
|
368
|
+
)
|
|
369
|
+
|
|
359
370
|
response_text = response.get("response", "")
|
|
360
371
|
context['output'] = response_text
|
|
361
372
|
context["llm_response"] = response_text
|
|
362
373
|
context["results"] = response_text
|
|
363
374
|
context[step_name] = response_text
|
|
364
375
|
context['messages'] = response.get('messages')
|
|
376
|
+
|
|
365
377
|
elif rendered_engine == "python":
|
|
366
|
-
|
|
378
|
+
# Base globals available to all python jinxes, defined within the library (npcpy)
|
|
367
379
|
exec_globals = {
|
|
368
380
|
"__builtins__": __builtins__,
|
|
369
381
|
"npc": npc,
|
|
@@ -378,49 +390,57 @@ class Jinx:
|
|
|
378
390
|
"fnmatch": fnmatch,
|
|
379
391
|
"pathlib": pathlib,
|
|
380
392
|
"subprocess": subprocess,
|
|
381
|
-
"get_llm_response": npy.llm_funcs.get_llm_response,
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
393
|
+
"get_llm_response": npy.llm_funcs.get_llm_response,
|
|
394
|
+
"CommandHistory": CommandHistory, # This is fine, it's part of npcpy
|
|
395
|
+
}
|
|
385
396
|
|
|
397
|
+
# This is the fix: Update the globals with the dictionary passed in from the application (npcsh)
|
|
398
|
+
if extra_globals:
|
|
399
|
+
exec_globals.update(extra_globals)
|
|
386
400
|
|
|
387
401
|
exec_locals = {}
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
402
|
+
try:
|
|
403
|
+
exec(rendered_code, exec_globals, exec_locals)
|
|
404
|
+
except Exception as e:
|
|
405
|
+
# Provide a clear error message in the output if execution fails
|
|
406
|
+
error_msg = f"Error executing jinx python code: {type(e).__name__}: {e}"
|
|
407
|
+
context['output'] = error_msg
|
|
408
|
+
return context
|
|
409
|
+
|
|
391
410
|
context.update(exec_locals)
|
|
392
411
|
|
|
393
|
-
|
|
394
412
|
if "output" in exec_locals:
|
|
395
413
|
outp = exec_locals["output"]
|
|
396
414
|
context["output"] = outp
|
|
397
415
|
context[step_name] = outp
|
|
398
|
-
messages
|
|
399
|
-
|
|
400
|
-
|
|
416
|
+
if messages is not None:
|
|
417
|
+
messages.append({'role':'assistant',
|
|
418
|
+
'content': f'Jinx executed with following output: {outp}'})
|
|
419
|
+
context['messages'] = messages
|
|
401
420
|
|
|
402
421
|
else:
|
|
403
|
-
|
|
404
422
|
context[step_name] = {"error": f"Unsupported engine: {rendered_engine}"}
|
|
405
423
|
|
|
406
424
|
return context
|
|
407
|
-
|
|
408
425
|
def to_dict(self):
|
|
409
426
|
"""Convert to dictionary representation"""
|
|
427
|
+
steps_list = []
|
|
428
|
+
for i, step in enumerate(self.steps):
|
|
429
|
+
step_dict = {
|
|
430
|
+
"name": step.get("name", f"step_{i}"),
|
|
431
|
+
"engine": step.get("engine"),
|
|
432
|
+
"code": step.get("code")
|
|
433
|
+
}
|
|
434
|
+
if "mode" in step:
|
|
435
|
+
step_dict["mode"] = step["mode"]
|
|
436
|
+
steps_list.append(step_dict)
|
|
437
|
+
|
|
410
438
|
return {
|
|
411
439
|
"jinx_name": self.jinx_name,
|
|
412
440
|
"description": self.description,
|
|
413
441
|
"inputs": self.inputs,
|
|
414
|
-
"steps":
|
|
415
|
-
{
|
|
416
|
-
"name": step.get("name", f"step_{i}"),
|
|
417
|
-
"engine": step.get("engine"),
|
|
418
|
-
"code": step.get("code")
|
|
419
|
-
}
|
|
420
|
-
for i, step in enumerate(self.steps)
|
|
421
|
-
]
|
|
442
|
+
"steps": steps_list
|
|
422
443
|
}
|
|
423
|
-
|
|
424
444
|
def save(self, directory):
|
|
425
445
|
"""Save jinx to file"""
|
|
426
446
|
jinx_path = os.path.join(directory, f"{self.jinx_name}.jinx")
|
|
@@ -564,6 +584,9 @@ def get_npc_action_space(npc=None, team=None):
|
|
|
564
584
|
|
|
565
585
|
return actions
|
|
566
586
|
def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
|
|
587
|
+
print(f"DEBUG extract_jinx_inputs called with args: {args}")
|
|
588
|
+
print(f"DEBUG jinx.inputs: {jinx.inputs}")
|
|
589
|
+
|
|
567
590
|
inputs = {}
|
|
568
591
|
|
|
569
592
|
flag_mapping = {}
|
|
@@ -588,7 +611,6 @@ def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
|
|
|
588
611
|
else:
|
|
589
612
|
used_args = set()
|
|
590
613
|
|
|
591
|
-
|
|
592
614
|
for i, arg in enumerate(args):
|
|
593
615
|
if i in used_args:
|
|
594
616
|
continue
|
|
@@ -606,21 +628,38 @@ def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
|
|
|
606
628
|
|
|
607
629
|
unused_args = [arg for i, arg in enumerate(args) if i not in used_args]
|
|
608
630
|
|
|
609
|
-
|
|
631
|
+
print(f"DEBUG unused_args: {unused_args}")
|
|
632
|
+
|
|
633
|
+
# Find first required input (no default value)
|
|
634
|
+
first_required = None
|
|
610
635
|
for input_ in jinx.inputs:
|
|
611
636
|
if isinstance(input_, str):
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
637
|
+
first_required = input_
|
|
638
|
+
break
|
|
639
|
+
|
|
640
|
+
print(f"DEBUG first_required: {first_required}")
|
|
641
|
+
|
|
642
|
+
# Give all unused args to first required input
|
|
643
|
+
if first_required and unused_args:
|
|
644
|
+
inputs[first_required] = ' '.join(unused_args).strip()
|
|
645
|
+
print(f"DEBUG assigned to first_required: {inputs[first_required]}")
|
|
617
646
|
else:
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
647
|
+
# Fallback to original behavior
|
|
648
|
+
jinx_input_names = []
|
|
649
|
+
for input_ in jinx.inputs:
|
|
650
|
+
if isinstance(input_, str):
|
|
651
|
+
jinx_input_names.append(input_)
|
|
652
|
+
elif isinstance(input_, dict):
|
|
653
|
+
jinx_input_names.append(list(input_.keys())[0])
|
|
654
|
+
|
|
655
|
+
if len(jinx_input_names) == 1:
|
|
656
|
+
inputs[jinx_input_names[0]] = ' '.join(unused_args).strip()
|
|
657
|
+
else:
|
|
658
|
+
for i, arg in enumerate(unused_args):
|
|
659
|
+
if i < len(jinx_input_names):
|
|
660
|
+
input_name = jinx_input_names[i]
|
|
661
|
+
if input_name not in inputs:
|
|
662
|
+
inputs[input_name] = arg
|
|
624
663
|
|
|
625
664
|
for input_ in jinx.inputs:
|
|
626
665
|
if isinstance(input_, str):
|
|
@@ -632,8 +671,8 @@ def extract_jinx_inputs(args: List[str], jinx: Jinx) -> Dict[str, Any]:
|
|
|
632
671
|
if key not in inputs:
|
|
633
672
|
inputs[key] = default_value
|
|
634
673
|
|
|
674
|
+
print(f"DEBUG final inputs: {inputs}")
|
|
635
675
|
return inputs
|
|
636
|
-
|
|
637
676
|
from npcpy.memory.command_history import load_kg_from_db, save_kg_to_db
|
|
638
677
|
from npcpy.memory.knowledge_graph import kg_initial, kg_evolve_incremental, kg_sleep_process, kg_dream_process
|
|
639
678
|
from npcpy.llm_funcs import get_llm_response, breathe
|
|
@@ -149,7 +149,7 @@ def load_kg_data(generation=None):
|
|
|
149
149
|
app = Flask(__name__)
|
|
150
150
|
app.config["REDIS_URL"] = "redis://localhost:6379"
|
|
151
151
|
app.config['DB_PATH'] = ''
|
|
152
|
-
|
|
152
|
+
app.jinx_conversation_contexts ={}
|
|
153
153
|
|
|
154
154
|
redis_client = redis.Redis(host="localhost", port=6379, decode_responses=True)
|
|
155
155
|
|
|
@@ -585,19 +585,27 @@ def execute_jinx():
|
|
|
585
585
|
with cancellation_lock:
|
|
586
586
|
cancellation_flags[stream_id] = False
|
|
587
587
|
|
|
588
|
-
print(
|
|
588
|
+
print(f"--- Jinx Execution Request for streamId: {stream_id} ---")
|
|
589
|
+
print(f"Request Data: {json.dumps(data, indent=2)}")
|
|
589
590
|
|
|
590
591
|
jinx_name = data.get("jinxName")
|
|
591
592
|
jinx_args = data.get("jinxArgs", [])
|
|
592
|
-
print(jinx_args)
|
|
593
|
+
print(f"Jinx Name: {jinx_name}, Jinx Args: {jinx_args}")
|
|
593
594
|
conversation_id = data.get("conversationId")
|
|
594
595
|
model = data.get("model")
|
|
595
596
|
provider = data.get("provider")
|
|
597
|
+
|
|
598
|
+
# --- IMPORTANT: Ensure conversation_id is present for context persistence ---
|
|
599
|
+
if not conversation_id:
|
|
600
|
+
print("ERROR: conversationId is required for Jinx execution with persistent variables")
|
|
601
|
+
return jsonify({"error": "conversationId is required for Jinx execution with persistent variables"}), 400
|
|
602
|
+
|
|
596
603
|
npc_name = data.get("npc")
|
|
597
604
|
npc_source = data.get("npcSource", "global")
|
|
598
605
|
current_path = data.get("currentPath")
|
|
599
606
|
|
|
600
607
|
if not jinx_name:
|
|
608
|
+
print("ERROR: jinxName is required")
|
|
601
609
|
return jsonify({"error": "jinxName is required"}), 400
|
|
602
610
|
|
|
603
611
|
# Load project environment if applicable
|
|
@@ -632,12 +640,12 @@ def execute_jinx():
|
|
|
632
640
|
jinx = Jinx(jinx_path=global_jinx_path)
|
|
633
641
|
|
|
634
642
|
if not jinx:
|
|
643
|
+
print(f"ERROR: Jinx '{jinx_name}' not found")
|
|
635
644
|
return jsonify({"error": f"Jinx '{jinx_name}' not found"}), 404
|
|
636
645
|
|
|
637
646
|
# Extract inputs from args
|
|
638
647
|
from npcpy.npc_compiler import extract_jinx_inputs
|
|
639
648
|
|
|
640
|
-
# --- Start of Fix ---
|
|
641
649
|
# Re-assemble arguments that were incorrectly split by spaces.
|
|
642
650
|
fixed_args = []
|
|
643
651
|
i = 0
|
|
@@ -666,15 +674,11 @@ def execute_jinx():
|
|
|
666
674
|
# This handles positional arguments, just in case.
|
|
667
675
|
fixed_args.append(arg)
|
|
668
676
|
i += 1
|
|
669
|
-
# --- End of Fix ---
|
|
670
677
|
|
|
671
678
|
# Now, use the corrected arguments to extract inputs.
|
|
672
679
|
input_values = extract_jinx_inputs(fixed_args, jinx)
|
|
673
680
|
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
print('executing jinx with input_values ,', input_values)
|
|
681
|
+
print(f'Executing jinx with input_values: {input_values}')
|
|
678
682
|
# Get conversation history
|
|
679
683
|
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
680
684
|
messages = fetch_messages_for_conversation(conversation_id)
|
|
@@ -684,20 +688,48 @@ def execute_jinx():
|
|
|
684
688
|
if npc_object and hasattr(npc_object, 'jinxs_dict'):
|
|
685
689
|
all_jinxs.update(npc_object.jinxs_dict)
|
|
686
690
|
|
|
691
|
+
# --- IMPORTANT: Retrieve or initialize the persistent Jinx context for this conversation ---
|
|
692
|
+
if conversation_id not in app.jinx_conversation_contexts:
|
|
693
|
+
app.jinx_conversation_contexts[conversation_id] = {}
|
|
694
|
+
jinx_local_context = app.jinx_conversation_contexts[conversation_id]
|
|
695
|
+
|
|
696
|
+
print(f"--- CONTEXT STATE (conversationId: {conversation_id}) ---")
|
|
697
|
+
print(f"jinx_local_context BEFORE Jinx execution: {jinx_local_context}")
|
|
698
|
+
|
|
687
699
|
def event_stream(current_stream_id):
|
|
688
700
|
try:
|
|
689
|
-
#
|
|
701
|
+
# --- IMPORTANT: Pass the persistent context as 'extra_globals' ---
|
|
690
702
|
result = jinx.execute(
|
|
691
703
|
input_values=input_values,
|
|
692
704
|
jinxs_dict=all_jinxs,
|
|
693
705
|
jinja_env=npc_object.jinja_env if npc_object else None,
|
|
694
706
|
npc=npc_object,
|
|
695
|
-
messages=messages
|
|
707
|
+
messages=messages,
|
|
708
|
+
extra_globals=jinx_local_context # <--- THIS IS WHERE THE PERSISTENT CONTEXT IS PASSED
|
|
696
709
|
)
|
|
697
710
|
|
|
698
|
-
#
|
|
711
|
+
# --- CRITICAL FIX: Capture and update local_vars from the Jinx's result ---
|
|
712
|
+
# The Jinx.execute method returns its internal 'context' dictionary.
|
|
713
|
+
# We need to update our persistent 'jinx_local_context' with the new variables
|
|
714
|
+
# from the Jinx's returned context.
|
|
715
|
+
if isinstance(result, dict):
|
|
716
|
+
# We need to be careful not to overwrite core Jinx/NPC context keys
|
|
717
|
+
# that are not meant for variable persistence.
|
|
718
|
+
keys_to_exclude = ['output', 'llm_response', 'messages', 'results', 'npc', 'context', 'jinxs', 'team']
|
|
719
|
+
|
|
720
|
+
# Update jinx_local_context with all non-excluded keys from the result
|
|
721
|
+
for key, value in result.items():
|
|
722
|
+
if key not in keys_to_exclude and not key.startswith('_'): # Exclude internal/temporary keys
|
|
723
|
+
jinx_local_context[key] = value
|
|
724
|
+
|
|
725
|
+
print(f"jinx_local_context UPDATED from Jinx result: {jinx_local_context}") # NEW LOG
|
|
726
|
+
|
|
727
|
+
# Get output (this still comes from the 'output' key in the result)
|
|
699
728
|
output = result.get('output', str(result))
|
|
700
729
|
messages_updated = result.get('messages', messages)
|
|
730
|
+
|
|
731
|
+
print(f"jinx_local_context AFTER Jinx execution (final state): {jinx_local_context}")
|
|
732
|
+
print(f"Jinx execution result output: {output}")
|
|
701
733
|
|
|
702
734
|
# Check for interruption
|
|
703
735
|
with cancellation_lock:
|
|
@@ -774,7 +806,7 @@ def execute_jinx():
|
|
|
774
806
|
)
|
|
775
807
|
|
|
776
808
|
except Exception as e:
|
|
777
|
-
print(f"
|
|
809
|
+
print(f"ERROR: Exception during jinx execution {jinx_name}: {str(e)}")
|
|
778
810
|
traceback.print_exc()
|
|
779
811
|
error_data = {
|
|
780
812
|
"type": "error",
|
|
@@ -786,6 +818,7 @@ def execute_jinx():
|
|
|
786
818
|
with cancellation_lock:
|
|
787
819
|
if current_stream_id in cancellation_flags:
|
|
788
820
|
del cancellation_flags[current_stream_id]
|
|
821
|
+
print(f"--- Jinx Execution Finished for streamId: {stream_id} ---")
|
|
789
822
|
|
|
790
823
|
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
791
824
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|