npcpy 1.1.28__py3-none-any.whl → 1.2.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/data/audio.py +16 -38
- npcpy/data/image.py +29 -29
- npcpy/data/load.py +4 -3
- npcpy/data/text.py +28 -28
- npcpy/data/video.py +6 -6
- npcpy/data/web.py +49 -21
- npcpy/ft/__init__.py +0 -0
- npcpy/ft/diff.py +110 -0
- npcpy/ft/ge.py +115 -0
- npcpy/ft/memory_trainer.py +171 -0
- npcpy/ft/model_ensembler.py +357 -0
- npcpy/ft/rl.py +360 -0
- npcpy/ft/sft.py +248 -0
- npcpy/ft/usft.py +128 -0
- npcpy/gen/audio_gen.py +24 -0
- npcpy/gen/embeddings.py +13 -13
- npcpy/gen/image_gen.py +37 -15
- npcpy/gen/response.py +287 -111
- npcpy/gen/video_gen.py +10 -9
- npcpy/llm_funcs.py +447 -79
- npcpy/memory/command_history.py +201 -48
- npcpy/memory/kg_vis.py +74 -74
- npcpy/memory/knowledge_graph.py +482 -115
- npcpy/memory/memory_processor.py +81 -0
- npcpy/memory/search.py +70 -70
- npcpy/mix/debate.py +192 -3
- npcpy/npc_compiler.py +1541 -879
- npcpy/npc_sysenv.py +250 -78
- npcpy/serve.py +1036 -321
- npcpy/sql/ai_function_tools.py +257 -0
- npcpy/sql/database_ai_adapters.py +186 -0
- npcpy/sql/database_ai_functions.py +163 -0
- npcpy/sql/model_runner.py +19 -19
- npcpy/sql/npcsql.py +706 -507
- npcpy/sql/sql_model_compiler.py +156 -0
- npcpy/tools.py +20 -20
- npcpy/work/plan.py +8 -8
- npcpy/work/trigger.py +3 -3
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/METADATA +169 -9
- npcpy-1.2.32.dist-info/RECORD +54 -0
- npcpy-1.1.28.dist-info/RECORD +0 -40
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.1.28.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/serve.py
CHANGED
|
@@ -4,7 +4,9 @@ from flask_sse import sse
|
|
|
4
4
|
import redis
|
|
5
5
|
import threading
|
|
6
6
|
import uuid
|
|
7
|
+
import sys
|
|
7
8
|
import traceback
|
|
9
|
+
import glob
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
from flask_cors import CORS
|
|
@@ -41,6 +43,7 @@ from npcpy.npc_sysenv import get_locally_available_models
|
|
|
41
43
|
from npcpy.memory.command_history import (
|
|
42
44
|
CommandHistory,
|
|
43
45
|
save_conversation_message,
|
|
46
|
+
generate_message_id,
|
|
44
47
|
)
|
|
45
48
|
from npcpy.npc_compiler import Jinx, NPC, Team
|
|
46
49
|
|
|
@@ -57,11 +60,11 @@ import os
|
|
|
57
60
|
from pathlib import Path
|
|
58
61
|
from flask_cors import CORS
|
|
59
62
|
|
|
60
|
-
# Path for storing settings
|
|
61
|
-
# instead of a static path relative to server launch directory
|
|
62
63
|
|
|
63
64
|
|
|
64
|
-
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
|
|
65
68
|
cancellation_flags = {}
|
|
66
69
|
cancellation_lock = threading.Lock()
|
|
67
70
|
|
|
@@ -79,9 +82,10 @@ def get_project_npc_directory(current_path=None):
|
|
|
79
82
|
if current_path:
|
|
80
83
|
return os.path.join(current_path, "npc_team")
|
|
81
84
|
else:
|
|
82
|
-
|
|
85
|
+
|
|
83
86
|
return os.path.abspath("./npc_team")
|
|
84
87
|
|
|
88
|
+
|
|
85
89
|
def load_project_env(current_path):
|
|
86
90
|
"""
|
|
87
91
|
Load environment variables from a project's .env file
|
|
@@ -100,12 +104,12 @@ def load_project_env(current_path):
|
|
|
100
104
|
|
|
101
105
|
if os.path.exists(env_path):
|
|
102
106
|
print(f"Loading project environment from {env_path}")
|
|
103
|
-
|
|
104
|
-
|
|
107
|
+
|
|
108
|
+
|
|
105
109
|
success = load_dotenv(env_path, override=True)
|
|
106
110
|
|
|
107
111
|
if success:
|
|
108
|
-
|
|
112
|
+
|
|
109
113
|
with open(env_path, "r") as f:
|
|
110
114
|
for line in f:
|
|
111
115
|
line = line.strip()
|
|
@@ -122,7 +126,9 @@ def load_project_env(current_path):
|
|
|
122
126
|
|
|
123
127
|
return loaded_vars
|
|
124
128
|
|
|
125
|
-
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
|
|
126
132
|
def load_kg_data(generation=None):
|
|
127
133
|
"""Helper function to load data up to a specific generation."""
|
|
128
134
|
engine = create_engine('sqlite:///' + app.config.get('DB_PATH'))
|
|
@@ -132,7 +138,7 @@ def load_kg_data(generation=None):
|
|
|
132
138
|
concepts_df = pd.read_sql_query(f"SELECT * FROM kg_concepts{query_suffix}", engine)
|
|
133
139
|
facts_df = pd.read_sql_query(f"SELECT * FROM kg_facts{query_suffix}", engine)
|
|
134
140
|
|
|
135
|
-
|
|
141
|
+
|
|
136
142
|
all_links_df = pd.read_sql_query("SELECT * FROM kg_links", engine)
|
|
137
143
|
valid_nodes = set(concepts_df['name']).union(set(facts_df['statement']))
|
|
138
144
|
links_df = all_links_df[all_links_df['source'].isin(valid_nodes) & all_links_df['target'].isin(valid_nodes)]
|
|
@@ -143,7 +149,7 @@ def load_kg_data(generation=None):
|
|
|
143
149
|
app = Flask(__name__)
|
|
144
150
|
app.config["REDIS_URL"] = "redis://localhost:6379"
|
|
145
151
|
app.config['DB_PATH'] = ''
|
|
146
|
-
|
|
152
|
+
app.jinx_conversation_contexts ={}
|
|
147
153
|
|
|
148
154
|
redis_client = redis.Redis(host="localhost", port=6379, decode_responses=True)
|
|
149
155
|
|
|
@@ -210,15 +216,15 @@ def load_npc_by_name_and_source(name, source, db_conn=None, current_path=None):
|
|
|
210
216
|
if not db_conn:
|
|
211
217
|
db_conn = get_db_connection()
|
|
212
218
|
|
|
213
|
-
|
|
219
|
+
|
|
214
220
|
if source == 'project':
|
|
215
221
|
npc_directory = get_project_npc_directory(current_path)
|
|
216
222
|
print(f"Looking for project NPC in: {npc_directory}")
|
|
217
|
-
else:
|
|
223
|
+
else:
|
|
218
224
|
npc_directory = app.config['user_npc_directory']
|
|
219
225
|
print(f"Looking for global NPC in: {npc_directory}")
|
|
220
226
|
|
|
221
|
-
|
|
227
|
+
|
|
222
228
|
npc_path = os.path.join(npc_directory, f"{name}.npc")
|
|
223
229
|
|
|
224
230
|
if os.path.exists(npc_path):
|
|
@@ -252,9 +258,9 @@ def get_conversation_history(conversation_id):
|
|
|
252
258
|
|
|
253
259
|
return [
|
|
254
260
|
{
|
|
255
|
-
"role": msg[0],
|
|
256
|
-
"content": msg[1],
|
|
257
|
-
"timestamp": msg[2],
|
|
261
|
+
"role": msg[0],
|
|
262
|
+
"content": msg[1],
|
|
263
|
+
"timestamp": msg[2],
|
|
258
264
|
}
|
|
259
265
|
for msg in messages
|
|
260
266
|
]
|
|
@@ -279,9 +285,9 @@ def fetch_messages_for_conversation(conversation_id):
|
|
|
279
285
|
|
|
280
286
|
return [
|
|
281
287
|
{
|
|
282
|
-
"role": message[0],
|
|
283
|
-
"content": message[1],
|
|
284
|
-
"timestamp": message[2],
|
|
288
|
+
"role": message[0],
|
|
289
|
+
"content": message[1],
|
|
290
|
+
"timestamp": message[2],
|
|
285
291
|
}
|
|
286
292
|
for message in messages
|
|
287
293
|
]
|
|
@@ -296,13 +302,13 @@ def fetch_messages_for_conversation(conversation_id):
|
|
|
296
302
|
def list_generations():
|
|
297
303
|
try:
|
|
298
304
|
engine = create_engine('sqlite:///' + app.config.get('DB_PATH'))
|
|
299
|
-
|
|
305
|
+
|
|
300
306
|
query = "SELECT DISTINCT generation FROM kg_concepts UNION SELECT DISTINCT generation FROM kg_facts"
|
|
301
307
|
generations_df = pd.read_sql_query(query, engine)
|
|
302
308
|
generations = generations_df.iloc[:, 0].tolist()
|
|
303
309
|
return jsonify({"generations": sorted([g for g in generations if g is not None])})
|
|
304
310
|
except Exception as e:
|
|
305
|
-
|
|
311
|
+
|
|
306
312
|
print(f"Error listing generations (likely new DB): {e}")
|
|
307
313
|
return jsonify({"generations": []})
|
|
308
314
|
|
|
@@ -399,7 +405,7 @@ def get_attachment(attachment_id):
|
|
|
399
405
|
data, name, type = command_history.get_attachment_data(attachment_id)
|
|
400
406
|
|
|
401
407
|
if data:
|
|
402
|
-
|
|
408
|
+
|
|
403
409
|
base64_data = base64.b64encode(data).decode("utf-8")
|
|
404
410
|
return jsonify(
|
|
405
411
|
{"data": base64_data, "name": name, "type": type, "error": None}
|
|
@@ -411,10 +417,10 @@ def get_attachment(attachment_id):
|
|
|
411
417
|
|
|
412
418
|
@app.route("/api/capture_screenshot", methods=["GET"])
|
|
413
419
|
def capture():
|
|
414
|
-
|
|
420
|
+
|
|
415
421
|
screenshot = capture_screenshot(None, full=True)
|
|
416
422
|
|
|
417
|
-
|
|
423
|
+
|
|
418
424
|
if not screenshot:
|
|
419
425
|
print("Screenshot capture failed")
|
|
420
426
|
return None
|
|
@@ -430,14 +436,14 @@ def get_global_settings():
|
|
|
430
436
|
try:
|
|
431
437
|
npcshrc_path = os.path.expanduser("~/.npcshrc")
|
|
432
438
|
|
|
433
|
-
|
|
439
|
+
|
|
434
440
|
global_settings = {
|
|
435
441
|
"model": "llama3.2",
|
|
436
442
|
"provider": "ollama",
|
|
437
443
|
"embedding_model": "nomic-embed-text",
|
|
438
444
|
"embedding_provider": "ollama",
|
|
439
445
|
"search_provider": "perplexity",
|
|
440
|
-
"
|
|
446
|
+
"NPC_STUDIO_LICENSE_KEY": "",
|
|
441
447
|
"default_folder": os.path.expanduser("~/.npcsh/"),
|
|
442
448
|
}
|
|
443
449
|
global_vars = {}
|
|
@@ -445,7 +451,7 @@ def get_global_settings():
|
|
|
445
451
|
if os.path.exists(npcshrc_path):
|
|
446
452
|
with open(npcshrc_path, "r") as f:
|
|
447
453
|
for line in f:
|
|
448
|
-
|
|
454
|
+
|
|
449
455
|
line = line.split("#")[0].strip()
|
|
450
456
|
if not line:
|
|
451
457
|
continue
|
|
@@ -453,27 +459,27 @@ def get_global_settings():
|
|
|
453
459
|
if "=" not in line:
|
|
454
460
|
continue
|
|
455
461
|
|
|
456
|
-
|
|
462
|
+
|
|
457
463
|
key, value = line.split("=", 1)
|
|
458
464
|
key = key.strip()
|
|
459
465
|
if key.startswith("export "):
|
|
460
466
|
key = key[7:]
|
|
461
467
|
|
|
462
|
-
|
|
468
|
+
|
|
463
469
|
value = value.strip()
|
|
464
470
|
if value.startswith('"') and value.endswith('"'):
|
|
465
471
|
value = value[1:-1]
|
|
466
472
|
elif value.startswith("'") and value.endswith("'"):
|
|
467
473
|
value = value[1:-1]
|
|
468
474
|
|
|
469
|
-
|
|
475
|
+
|
|
470
476
|
key_mapping = {
|
|
471
477
|
"NPCSH_MODEL": "model",
|
|
472
478
|
"NPCSH_PROVIDER": "provider",
|
|
473
479
|
"NPCSH_EMBEDDING_MODEL": "embedding_model",
|
|
474
480
|
"NPCSH_EMBEDDING_PROVIDER": "embedding_provider",
|
|
475
481
|
"NPCSH_SEARCH_PROVIDER": "search_provider",
|
|
476
|
-
"
|
|
482
|
+
"NPC_STUDIO_LICENSE_KEY": "NPC_STUDIO_LICENSE_KEY",
|
|
477
483
|
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
478
484
|
"NPC_STUDIO_DEFAULT_FOLDER": "default_folder",
|
|
479
485
|
}
|
|
@@ -496,7 +502,325 @@ def get_global_settings():
|
|
|
496
502
|
except Exception as e:
|
|
497
503
|
print(f"Error in get_global_settings: {str(e)}")
|
|
498
504
|
return jsonify({"error": str(e)}), 500
|
|
505
|
+
def _get_jinx_files_recursively(directory):
|
|
506
|
+
"""Helper to recursively find all .jinx file paths."""
|
|
507
|
+
jinx_paths = []
|
|
508
|
+
if os.path.exists(directory):
|
|
509
|
+
for root, _, files in os.walk(directory):
|
|
510
|
+
for filename in files:
|
|
511
|
+
if filename.endswith(".jinx"):
|
|
512
|
+
jinx_paths.append(os.path.join(root, filename))
|
|
513
|
+
return jinx_paths
|
|
514
|
+
|
|
515
|
+
@app.route("/api/jinxs/available", methods=["GET"])
|
|
516
|
+
def get_available_jinxs():
|
|
517
|
+
try:
|
|
518
|
+
current_path = request.args.get('currentPath')
|
|
519
|
+
jinx_names = set()
|
|
520
|
+
|
|
521
|
+
if current_path:
|
|
522
|
+
team_jinxs_dir = os.path.join(current_path, 'npc_team', 'jinxs')
|
|
523
|
+
jinx_paths = _get_jinx_files_recursively(team_jinxs_dir)
|
|
524
|
+
for path in jinx_paths:
|
|
525
|
+
jinx_names.add(os.path.basename(path)[:-5])
|
|
526
|
+
|
|
527
|
+
global_jinxs_dir = os.path.expanduser('~/.npcsh/npc_team/jinxs')
|
|
528
|
+
jinx_paths = _get_jinx_files_recursively(global_jinxs_dir)
|
|
529
|
+
for path in jinx_paths:
|
|
530
|
+
jinx_names.add(os.path.basename(path)[:-5])
|
|
531
|
+
|
|
532
|
+
return jsonify({'jinxs': sorted(list(jinx_names)), 'error': None})
|
|
533
|
+
except Exception as e:
|
|
534
|
+
print(f"Error getting available jinxs: {str(e)}")
|
|
535
|
+
traceback.print_exc()
|
|
536
|
+
return jsonify({'jinxs': [], 'error': str(e)}), 500
|
|
537
|
+
|
|
538
|
+
@app.route("/api/jinxs/global", methods=["GET"])
|
|
539
|
+
def get_global_jinxs():
|
|
540
|
+
jinxs_dir = os.path.join(os.path.expanduser("~"), ".npcsh", "npc_team", "jinxs")
|
|
541
|
+
jinx_paths = _get_jinx_files_recursively(jinxs_dir)
|
|
542
|
+
jinxs = []
|
|
543
|
+
for path in jinx_paths:
|
|
544
|
+
try:
|
|
545
|
+
with open(path, "r") as f:
|
|
546
|
+
jinx_data = yaml.safe_load(f)
|
|
547
|
+
jinxs.append(jinx_data)
|
|
548
|
+
except Exception as e:
|
|
549
|
+
print(f"Error loading global jinx {path}: {e}")
|
|
550
|
+
return jsonify({"jinxs": jinxs})
|
|
551
|
+
|
|
552
|
+
@app.route("/api/jinxs/project", methods=["GET"])
|
|
553
|
+
def get_project_jinxs():
|
|
554
|
+
current_path = request.args.get("currentPath")
|
|
555
|
+
if not current_path:
|
|
556
|
+
return jsonify({"jinxs": []})
|
|
557
|
+
|
|
558
|
+
if not current_path.endswith("npc_team"):
|
|
559
|
+
current_path = os.path.join(current_path, "npc_team")
|
|
560
|
+
|
|
561
|
+
jinxs_dir = os.path.join(current_path, "jinxs")
|
|
562
|
+
jinx_paths = _get_jinx_files_recursively(jinxs_dir)
|
|
563
|
+
jinxs = []
|
|
564
|
+
for path in jinx_paths:
|
|
565
|
+
try:
|
|
566
|
+
with open(path, "r") as f:
|
|
567
|
+
jinx_data = yaml.safe_load(f)
|
|
568
|
+
jinxs.append(jinx_data)
|
|
569
|
+
except Exception as e:
|
|
570
|
+
print(f"Error loading project jinx {path}: {e}")
|
|
571
|
+
return jsonify({"jinxs": jinxs})
|
|
499
572
|
|
|
573
|
+
@app.route("/api/jinx/execute", methods=["POST"])
|
|
574
|
+
def execute_jinx():
|
|
575
|
+
"""
|
|
576
|
+
Execute a specific jinx with provided arguments.
|
|
577
|
+
Streams the output back to the client.
|
|
578
|
+
"""
|
|
579
|
+
data = request.json
|
|
580
|
+
|
|
581
|
+
stream_id = data.get("streamId")
|
|
582
|
+
if not stream_id:
|
|
583
|
+
stream_id = str(uuid.uuid4())
|
|
584
|
+
|
|
585
|
+
with cancellation_lock:
|
|
586
|
+
cancellation_flags[stream_id] = False
|
|
587
|
+
|
|
588
|
+
print(f"--- Jinx Execution Request for streamId: {stream_id} ---")
|
|
589
|
+
print(f"Request Data: {json.dumps(data, indent=2)}")
|
|
590
|
+
|
|
591
|
+
jinx_name = data.get("jinxName")
|
|
592
|
+
jinx_args = data.get("jinxArgs", [])
|
|
593
|
+
print(f"Jinx Name: {jinx_name}, Jinx Args: {jinx_args}")
|
|
594
|
+
conversation_id = data.get("conversationId")
|
|
595
|
+
model = data.get("model")
|
|
596
|
+
provider = data.get("provider")
|
|
597
|
+
|
|
598
|
+
# --- IMPORTANT: Ensure conversation_id is present for context persistence ---
|
|
599
|
+
if not conversation_id:
|
|
600
|
+
print("ERROR: conversationId is required for Jinx execution with persistent variables")
|
|
601
|
+
return jsonify({"error": "conversationId is required for Jinx execution with persistent variables"}), 400
|
|
602
|
+
|
|
603
|
+
npc_name = data.get("npc")
|
|
604
|
+
npc_source = data.get("npcSource", "global")
|
|
605
|
+
current_path = data.get("currentPath")
|
|
606
|
+
|
|
607
|
+
if not jinx_name:
|
|
608
|
+
print("ERROR: jinxName is required")
|
|
609
|
+
return jsonify({"error": "jinxName is required"}), 400
|
|
610
|
+
|
|
611
|
+
# Load project environment if applicable
|
|
612
|
+
if current_path:
|
|
613
|
+
load_project_env(current_path)
|
|
614
|
+
|
|
615
|
+
# Load the NPC
|
|
616
|
+
npc_object = None
|
|
617
|
+
if npc_name:
|
|
618
|
+
db_conn = get_db_connection()
|
|
619
|
+
npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
|
|
620
|
+
if not npc_object and npc_source == 'project':
|
|
621
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
622
|
+
|
|
623
|
+
# Try to find the jinx
|
|
624
|
+
jinx = None
|
|
625
|
+
|
|
626
|
+
# Check NPC's jinxs
|
|
627
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict') and jinx_name in npc_object.jinxs_dict:
|
|
628
|
+
jinx = npc_object.jinxs_dict[jinx_name]
|
|
629
|
+
|
|
630
|
+
# Check team jinxs
|
|
631
|
+
if not jinx and current_path:
|
|
632
|
+
team_jinx_path = os.path.join(current_path, 'npc_team', 'jinxs', f'{jinx_name}.jinx')
|
|
633
|
+
if os.path.exists(team_jinx_path):
|
|
634
|
+
jinx = Jinx(jinx_path=team_jinx_path)
|
|
635
|
+
|
|
636
|
+
# Check global jinxs
|
|
637
|
+
if not jinx:
|
|
638
|
+
global_jinx_path = os.path.expanduser(f'~/.npcsh/npc_team/jinxs/{jinx_name}.jinx')
|
|
639
|
+
if os.path.exists(global_jinx_path):
|
|
640
|
+
jinx = Jinx(jinx_path=global_jinx_path)
|
|
641
|
+
|
|
642
|
+
if not jinx:
|
|
643
|
+
print(f"ERROR: Jinx '{jinx_name}' not found")
|
|
644
|
+
return jsonify({"error": f"Jinx '{jinx_name}' not found"}), 404
|
|
645
|
+
|
|
646
|
+
# Extract inputs from args
|
|
647
|
+
from npcpy.npc_compiler import extract_jinx_inputs
|
|
648
|
+
|
|
649
|
+
# Re-assemble arguments that were incorrectly split by spaces.
|
|
650
|
+
fixed_args = []
|
|
651
|
+
i = 0
|
|
652
|
+
while i < len(jinx_args):
|
|
653
|
+
arg = jinx_args[i]
|
|
654
|
+
if arg.startswith('-'):
|
|
655
|
+
fixed_args.append(arg)
|
|
656
|
+
value_parts = []
|
|
657
|
+
i += 1
|
|
658
|
+
# Collect all subsequent parts until the next flag or the end of the list.
|
|
659
|
+
while i < len(jinx_args) and not jinx_args[i].startswith('-'):
|
|
660
|
+
value_parts.append(jinx_args[i])
|
|
661
|
+
i += 1
|
|
662
|
+
|
|
663
|
+
if value_parts:
|
|
664
|
+
# Join the parts back into a single string.
|
|
665
|
+
full_value = " ".join(value_parts)
|
|
666
|
+
# Clean up the extraneous quotes that the initial bad split left behind.
|
|
667
|
+
if full_value.startswith("'") and full_value.endswith("'"):
|
|
668
|
+
full_value = full_value[1:-1]
|
|
669
|
+
elif full_value.startswith('"') and full_value.endswith('"'):
|
|
670
|
+
full_value = full_value[1:-1]
|
|
671
|
+
fixed_args.append(full_value)
|
|
672
|
+
# The 'i' counter is already advanced, so the loop continues from the next flag.
|
|
673
|
+
else:
|
|
674
|
+
# This handles positional arguments, just in case.
|
|
675
|
+
fixed_args.append(arg)
|
|
676
|
+
i += 1
|
|
677
|
+
|
|
678
|
+
# Now, use the corrected arguments to extract inputs.
|
|
679
|
+
input_values = extract_jinx_inputs(fixed_args, jinx)
|
|
680
|
+
|
|
681
|
+
print(f'Executing jinx with input_values: {input_values}')
|
|
682
|
+
# Get conversation history
|
|
683
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
684
|
+
messages = fetch_messages_for_conversation(conversation_id)
|
|
685
|
+
|
|
686
|
+
# Prepare jinxs_dict for execution
|
|
687
|
+
all_jinxs = {}
|
|
688
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict'):
|
|
689
|
+
all_jinxs.update(npc_object.jinxs_dict)
|
|
690
|
+
|
|
691
|
+
# --- IMPORTANT: Retrieve or initialize the persistent Jinx context for this conversation ---
|
|
692
|
+
if conversation_id not in app.jinx_conversation_contexts:
|
|
693
|
+
app.jinx_conversation_contexts[conversation_id] = {}
|
|
694
|
+
jinx_local_context = app.jinx_conversation_contexts[conversation_id]
|
|
695
|
+
|
|
696
|
+
print(f"--- CONTEXT STATE (conversationId: {conversation_id}) ---")
|
|
697
|
+
print(f"jinx_local_context BEFORE Jinx execution: {jinx_local_context}")
|
|
698
|
+
|
|
699
|
+
def event_stream(current_stream_id):
|
|
700
|
+
try:
|
|
701
|
+
# --- IMPORTANT: Pass the persistent context as 'extra_globals' ---
|
|
702
|
+
result = jinx.execute(
|
|
703
|
+
input_values=input_values,
|
|
704
|
+
jinxs_dict=all_jinxs,
|
|
705
|
+
jinja_env=npc_object.jinja_env if npc_object else None,
|
|
706
|
+
npc=npc_object,
|
|
707
|
+
messages=messages,
|
|
708
|
+
extra_globals=jinx_local_context # <--- THIS IS WHERE THE PERSISTENT CONTEXT IS PASSED
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
# --- CRITICAL FIX: Capture and update local_vars from the Jinx's result ---
|
|
712
|
+
# The Jinx.execute method returns its internal 'context' dictionary.
|
|
713
|
+
# We need to update our persistent 'jinx_local_context' with the new variables
|
|
714
|
+
# from the Jinx's returned context.
|
|
715
|
+
if isinstance(result, dict):
|
|
716
|
+
# We need to be careful not to overwrite core Jinx/NPC context keys
|
|
717
|
+
# that are not meant for variable persistence.
|
|
718
|
+
keys_to_exclude = ['output', 'llm_response', 'messages', 'results', 'npc', 'context', 'jinxs', 'team']
|
|
719
|
+
|
|
720
|
+
# Update jinx_local_context with all non-excluded keys from the result
|
|
721
|
+
for key, value in result.items():
|
|
722
|
+
if key not in keys_to_exclude and not key.startswith('_'): # Exclude internal/temporary keys
|
|
723
|
+
jinx_local_context[key] = value
|
|
724
|
+
|
|
725
|
+
print(f"jinx_local_context UPDATED from Jinx result: {jinx_local_context}") # NEW LOG
|
|
726
|
+
|
|
727
|
+
# Get output (this still comes from the 'output' key in the result)
|
|
728
|
+
output = result.get('output', str(result))
|
|
729
|
+
messages_updated = result.get('messages', messages)
|
|
730
|
+
|
|
731
|
+
print(f"jinx_local_context AFTER Jinx execution (final state): {jinx_local_context}")
|
|
732
|
+
print(f"Jinx execution result output: {output}")
|
|
733
|
+
|
|
734
|
+
# Check for interruption
|
|
735
|
+
with cancellation_lock:
|
|
736
|
+
if cancellation_flags.get(current_stream_id, False):
|
|
737
|
+
yield f"data: {json.dumps({'type': 'interrupted'})}\n\n"
|
|
738
|
+
return
|
|
739
|
+
|
|
740
|
+
# Stream the output in chunks for consistent UI experience
|
|
741
|
+
if isinstance(output, str):
|
|
742
|
+
chunk_size = 50 # Characters per chunk
|
|
743
|
+
for i in range(0, len(output), chunk_size):
|
|
744
|
+
chunk = output[i:i + chunk_size]
|
|
745
|
+
chunk_data = {
|
|
746
|
+
"id": None,
|
|
747
|
+
"object": None,
|
|
748
|
+
"created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
|
749
|
+
"model": model,
|
|
750
|
+
"choices": [{
|
|
751
|
+
"index": 0,
|
|
752
|
+
"delta": {
|
|
753
|
+
"content": chunk,
|
|
754
|
+
"role": "assistant"
|
|
755
|
+
},
|
|
756
|
+
"finish_reason": None
|
|
757
|
+
}]
|
|
758
|
+
}
|
|
759
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
760
|
+
else:
|
|
761
|
+
# Non-string output, send as single chunk
|
|
762
|
+
chunk_data = {
|
|
763
|
+
"id": None,
|
|
764
|
+
"object": None,
|
|
765
|
+
"created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
|
766
|
+
"model": model,
|
|
767
|
+
"choices": [{
|
|
768
|
+
"index": 0,
|
|
769
|
+
"delta": {
|
|
770
|
+
"content": str(output),
|
|
771
|
+
"role": "assistant"
|
|
772
|
+
},
|
|
773
|
+
"finish_reason": None
|
|
774
|
+
}]
|
|
775
|
+
}
|
|
776
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
777
|
+
|
|
778
|
+
# Send completion message
|
|
779
|
+
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
780
|
+
|
|
781
|
+
# Save to conversation history
|
|
782
|
+
message_id = generate_message_id()
|
|
783
|
+
save_conversation_message(
|
|
784
|
+
command_history,
|
|
785
|
+
conversation_id,
|
|
786
|
+
"user",
|
|
787
|
+
f"/{jinx_name} {' '.join(jinx_args)}",
|
|
788
|
+
wd=current_path,
|
|
789
|
+
model=model,
|
|
790
|
+
provider=provider,
|
|
791
|
+
npc=npc_name,
|
|
792
|
+
message_id=message_id
|
|
793
|
+
)
|
|
794
|
+
|
|
795
|
+
message_id = generate_message_id()
|
|
796
|
+
save_conversation_message(
|
|
797
|
+
command_history,
|
|
798
|
+
conversation_id,
|
|
799
|
+
"assistant",
|
|
800
|
+
str(output),
|
|
801
|
+
wd=current_path,
|
|
802
|
+
model=model,
|
|
803
|
+
provider=provider,
|
|
804
|
+
npc=npc_name,
|
|
805
|
+
message_id=message_id
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
except Exception as e:
|
|
809
|
+
print(f"ERROR: Exception during jinx execution {jinx_name}: {str(e)}")
|
|
810
|
+
traceback.print_exc()
|
|
811
|
+
error_data = {
|
|
812
|
+
"type": "error",
|
|
813
|
+
"error": str(e)
|
|
814
|
+
}
|
|
815
|
+
yield f"data: {json.dumps(error_data)}\n\n"
|
|
816
|
+
|
|
817
|
+
finally:
|
|
818
|
+
with cancellation_lock:
|
|
819
|
+
if current_stream_id in cancellation_flags:
|
|
820
|
+
del cancellation_flags[current_stream_id]
|
|
821
|
+
print(f"--- Jinx Execution Finished for streamId: {stream_id} ---")
|
|
822
|
+
|
|
823
|
+
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
500
824
|
|
|
501
825
|
@app.route("/api/settings/global", methods=["POST", "OPTIONS"])
|
|
502
826
|
def save_global_settings():
|
|
@@ -513,7 +837,7 @@ def save_global_settings():
|
|
|
513
837
|
"embedding_model": "NPCSH_EMBEDDING_MODEL",
|
|
514
838
|
"embedding_provider": "NPCSH_EMBEDDING_PROVIDER",
|
|
515
839
|
"search_provider": "NPCSH_SEARCH_PROVIDER",
|
|
516
|
-
"
|
|
840
|
+
"NPC_STUDIO_LICENSE_KEY": "NPC_STUDIO_LICENSE_KEY",
|
|
517
841
|
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
518
842
|
"default_folder": "NPC_STUDIO_DEFAULT_FOLDER",
|
|
519
843
|
}
|
|
@@ -521,15 +845,15 @@ def save_global_settings():
|
|
|
521
845
|
os.makedirs(os.path.dirname(npcshrc_path), exist_ok=True)
|
|
522
846
|
print(data)
|
|
523
847
|
with open(npcshrc_path, "w") as f:
|
|
524
|
-
|
|
848
|
+
|
|
525
849
|
for key, value in data.get("global_settings", {}).items():
|
|
526
850
|
if key in key_mapping and value:
|
|
527
|
-
|
|
851
|
+
|
|
528
852
|
if " " in str(value):
|
|
529
853
|
value = f'"{value}"'
|
|
530
854
|
f.write(f"export {key_mapping[key]}={value}\n")
|
|
531
855
|
|
|
532
|
-
|
|
856
|
+
|
|
533
857
|
for key, value in data.get("global_vars", {}).items():
|
|
534
858
|
if key and value:
|
|
535
859
|
if " " in str(value):
|
|
@@ -542,8 +866,7 @@ def save_global_settings():
|
|
|
542
866
|
print(f"Error in save_global_settings: {str(e)}")
|
|
543
867
|
return jsonify({"error": str(e)}), 500
|
|
544
868
|
|
|
545
|
-
|
|
546
|
-
@app.route("/api/settings/project", methods=["GET", "OPTIONS"]) # Add OPTIONS
|
|
869
|
+
@app.route("/api/settings/project", methods=["GET", "OPTIONS"])
|
|
547
870
|
def get_project_settings():
|
|
548
871
|
if request.method == "OPTIONS":
|
|
549
872
|
return "", 200
|
|
@@ -572,7 +895,7 @@ def get_project_settings():
|
|
|
572
895
|
return jsonify({"error": str(e)}), 500
|
|
573
896
|
|
|
574
897
|
|
|
575
|
-
@app.route("/api/settings/project", methods=["POST", "OPTIONS"])
|
|
898
|
+
@app.route("/api/settings/project", methods=["POST", "OPTIONS"])
|
|
576
899
|
def save_project_settings():
|
|
577
900
|
if request.method == "OPTIONS":
|
|
578
901
|
return "", 200
|
|
@@ -607,35 +930,35 @@ def get_models():
|
|
|
607
930
|
global available_models
|
|
608
931
|
current_path = request.args.get("currentPath")
|
|
609
932
|
if not current_path:
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
current_path = os.path.expanduser("~/.npcsh")
|
|
933
|
+
|
|
934
|
+
|
|
935
|
+
current_path = os.path.expanduser("~/.npcsh")
|
|
613
936
|
print("Warning: No currentPath provided for /api/models, using default.")
|
|
614
|
-
|
|
937
|
+
|
|
615
938
|
|
|
616
939
|
try:
|
|
617
|
-
|
|
940
|
+
|
|
618
941
|
available_models = get_locally_available_models(current_path)
|
|
619
942
|
|
|
620
|
-
|
|
621
|
-
|
|
943
|
+
|
|
944
|
+
|
|
622
945
|
formatted_models = []
|
|
623
946
|
for m, p in available_models.items():
|
|
624
|
-
|
|
947
|
+
|
|
625
948
|
text_only = (
|
|
626
949
|
"(text only)"
|
|
627
950
|
if p == "ollama"
|
|
628
|
-
and m in ["llama3.2", "deepseek-v3", "phi4"]
|
|
951
|
+
and m in ["llama3.2", "deepseek-v3", "phi4", "gemma3:1b"]
|
|
629
952
|
else ""
|
|
630
953
|
)
|
|
631
|
-
|
|
954
|
+
|
|
632
955
|
display_model = m
|
|
633
956
|
if "claude-3-5-haiku-latest" in m:
|
|
634
957
|
display_model = "claude-3.5-haiku"
|
|
635
958
|
elif "claude-3-5-sonnet-latest" in m:
|
|
636
959
|
display_model = "claude-3.5-sonnet"
|
|
637
960
|
elif "gemini-1.5-flash" in m:
|
|
638
|
-
display_model = "gemini-1.5-flash"
|
|
961
|
+
display_model = "gemini-1.5-flash"
|
|
639
962
|
elif "gemini-2.0-flash-lite-preview-02-05" in m:
|
|
640
963
|
display_model = "gemini-2.0-flash-lite-preview"
|
|
641
964
|
|
|
@@ -643,7 +966,7 @@ def get_models():
|
|
|
643
966
|
|
|
644
967
|
formatted_models.append(
|
|
645
968
|
{
|
|
646
|
-
"value": m,
|
|
969
|
+
"value": m,
|
|
647
970
|
"provider": p,
|
|
648
971
|
"display_name": display_name,
|
|
649
972
|
}
|
|
@@ -655,29 +978,29 @@ def get_models():
|
|
|
655
978
|
print(f"Error getting available models: {str(e)}")
|
|
656
979
|
|
|
657
980
|
traceback.print_exc()
|
|
658
|
-
|
|
981
|
+
|
|
659
982
|
return jsonify({"models": [], "error": str(e)}), 500
|
|
660
983
|
|
|
661
984
|
@app.route('/api/<command>', methods=['POST'])
|
|
662
985
|
def api_command(command):
|
|
663
986
|
data = request.json or {}
|
|
664
987
|
|
|
665
|
-
|
|
988
|
+
|
|
666
989
|
handler = router.get_route(command)
|
|
667
990
|
if not handler:
|
|
668
991
|
return jsonify({"error": f"Unknown command: {command}"})
|
|
669
992
|
|
|
670
|
-
|
|
993
|
+
|
|
671
994
|
if router.shell_only.get(command, False):
|
|
672
995
|
return jsonify({"error": f"Command {command} is only available in shell mode"})
|
|
673
996
|
|
|
674
|
-
|
|
997
|
+
|
|
675
998
|
try:
|
|
676
|
-
|
|
999
|
+
|
|
677
1000
|
args = data.get('args', [])
|
|
678
1001
|
kwargs = data.get('kwargs', {})
|
|
679
1002
|
|
|
680
|
-
|
|
1003
|
+
|
|
681
1004
|
command_str = command
|
|
682
1005
|
if args:
|
|
683
1006
|
command_str += " " + " ".join(str(arg) for arg in args)
|
|
@@ -694,13 +1017,13 @@ def get_npc_team_global():
|
|
|
694
1017
|
|
|
695
1018
|
npc_data = []
|
|
696
1019
|
|
|
697
|
-
|
|
1020
|
+
|
|
698
1021
|
for file in os.listdir(global_npc_directory):
|
|
699
1022
|
if file.endswith(".npc"):
|
|
700
1023
|
npc_path = os.path.join(global_npc_directory, file)
|
|
701
1024
|
npc = NPC(file=npc_path, db_conn=db_conn)
|
|
702
1025
|
|
|
703
|
-
|
|
1026
|
+
|
|
704
1027
|
serialized_npc = {
|
|
705
1028
|
"name": npc.name,
|
|
706
1029
|
"primary_directive": npc.primary_directive,
|
|
@@ -733,49 +1056,6 @@ def get_npc_team_global():
|
|
|
733
1056
|
return jsonify({"npcs": [], "error": str(e)})
|
|
734
1057
|
|
|
735
1058
|
|
|
736
|
-
@app.route("/api/jinxs/global", methods=["GET"])
|
|
737
|
-
def get_global_jinxs():
|
|
738
|
-
# try:
|
|
739
|
-
user_home = os.path.expanduser("~")
|
|
740
|
-
jinxs_dir = os.path.join(user_home, ".npcsh", "npc_team", "jinxs")
|
|
741
|
-
jinxs = []
|
|
742
|
-
if os.path.exists(jinxs_dir):
|
|
743
|
-
for file in os.listdir(jinxs_dir):
|
|
744
|
-
if file.endswith(".jinx"):
|
|
745
|
-
with open(os.path.join(jinxs_dir, file), "r") as f:
|
|
746
|
-
jinx_data = yaml.safe_load(f)
|
|
747
|
-
jinxs.append(jinx_data)
|
|
748
|
-
print("file", file)
|
|
749
|
-
|
|
750
|
-
return jsonify({"jinxs": jinxs})
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
# except Exception as e:
|
|
754
|
-
# return jsonify({"error": str(e)}), 500
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
@app.route("/api/jinxs/project", methods=["GET"])
|
|
758
|
-
def get_project_jinxs():
|
|
759
|
-
current_path = request.args.get(
|
|
760
|
-
"currentPath"
|
|
761
|
-
) # Correctly retrieves `currentPath` from query params
|
|
762
|
-
if not current_path:
|
|
763
|
-
return jsonify({"jinxs": []})
|
|
764
|
-
|
|
765
|
-
if not current_path.endswith("npc_team"):
|
|
766
|
-
current_path = os.path.join(current_path, "npc_team")
|
|
767
|
-
|
|
768
|
-
jinxs_dir = os.path.join(current_path, "jinxs")
|
|
769
|
-
jinxs = []
|
|
770
|
-
if os.path.exists(jinxs_dir):
|
|
771
|
-
for file in os.listdir(jinxs_dir):
|
|
772
|
-
if file.endswith(".jinx"):
|
|
773
|
-
with open(os.path.join(jinxs_dir, file), "r") as f:
|
|
774
|
-
jinx_data = yaml.safe_load(f)
|
|
775
|
-
jinxs.append(jinx_data)
|
|
776
|
-
return jsonify({"jinxs": jinxs})
|
|
777
|
-
|
|
778
|
-
|
|
779
1059
|
@app.route("/api/jinxs/save", methods=["POST"])
|
|
780
1060
|
def save_jinx():
|
|
781
1061
|
try:
|
|
@@ -799,7 +1079,7 @@ def save_jinx():
|
|
|
799
1079
|
|
|
800
1080
|
os.makedirs(jinxs_dir, exist_ok=True)
|
|
801
1081
|
|
|
802
|
-
|
|
1082
|
+
|
|
803
1083
|
jinx_yaml = {
|
|
804
1084
|
"description": jinx_data.get("description", ""),
|
|
805
1085
|
"inputs": jinx_data.get("inputs", []),
|
|
@@ -826,16 +1106,16 @@ def save_npc():
|
|
|
826
1106
|
if not npc_data or "name" not in npc_data:
|
|
827
1107
|
return jsonify({"error": "Invalid NPC data"}), 400
|
|
828
1108
|
|
|
829
|
-
|
|
1109
|
+
|
|
830
1110
|
if is_global:
|
|
831
1111
|
npc_directory = os.path.expanduser("~/.npcsh/npc_team")
|
|
832
1112
|
else:
|
|
833
1113
|
npc_directory = os.path.join(current_path, "npc_team")
|
|
834
1114
|
|
|
835
|
-
|
|
1115
|
+
|
|
836
1116
|
os.makedirs(npc_directory, exist_ok=True)
|
|
837
1117
|
|
|
838
|
-
|
|
1118
|
+
|
|
839
1119
|
yaml_content = f"""name: {npc_data['name']}
|
|
840
1120
|
primary_directive: "{npc_data['primary_directive']}"
|
|
841
1121
|
model: {npc_data['model']}
|
|
@@ -844,7 +1124,7 @@ api_url: {npc_data.get('api_url', '')}
|
|
|
844
1124
|
use_global_jinxs: {str(npc_data.get('use_global_jinxs', True)).lower()}
|
|
845
1125
|
"""
|
|
846
1126
|
|
|
847
|
-
|
|
1127
|
+
|
|
848
1128
|
file_path = os.path.join(npc_directory, f"{npc_data['name']}.npc")
|
|
849
1129
|
with open(file_path, "w") as f:
|
|
850
1130
|
f.write(yaml_content)
|
|
@@ -873,7 +1153,7 @@ def get_npc_team_project():
|
|
|
873
1153
|
npc_path = os.path.join(project_npc_directory, file)
|
|
874
1154
|
npc = NPC(file=npc_path, db_conn=db_conn)
|
|
875
1155
|
|
|
876
|
-
|
|
1156
|
+
|
|
877
1157
|
serialized_npc = {
|
|
878
1158
|
"name": npc.name,
|
|
879
1159
|
"primary_directive": npc.primary_directive,
|
|
@@ -949,7 +1229,7 @@ def get_last_used_model_and_npc_in_conversation(conversation_id):
|
|
|
949
1229
|
print(f"Error getting last used model/NPC for conversation {conversation_id}: {e}")
|
|
950
1230
|
return {"model": None, "npc": None, "error": str(e)}
|
|
951
1231
|
|
|
952
|
-
|
|
1232
|
+
|
|
953
1233
|
|
|
954
1234
|
@app.route("/api/last_used_in_directory", methods=["GET"])
|
|
955
1235
|
def api_get_last_used_in_directory():
|
|
@@ -970,18 +1250,20 @@ def api_get_last_used_in_conversation():
|
|
|
970
1250
|
|
|
971
1251
|
result = get_last_used_model_and_npc_in_conversation(conversation_id)
|
|
972
1252
|
return jsonify(result)
|
|
973
|
-
|
|
1253
|
+
|
|
974
1254
|
def get_ctx_path(is_global, current_path=None):
|
|
975
1255
|
"""Determines the path to the .ctx file."""
|
|
976
1256
|
if is_global:
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
return
|
|
1257
|
+
ctx_dir = os.path.join(os.path.expanduser("~/.npcsh/npc_team/"))
|
|
1258
|
+
ctx_files = glob.glob(os.path.join(ctx_dir, "*.ctx"))
|
|
1259
|
+
return ctx_files[0] if ctx_files else None
|
|
980
1260
|
else:
|
|
981
1261
|
if not current_path:
|
|
982
1262
|
return None
|
|
983
|
-
|
|
984
|
-
|
|
1263
|
+
|
|
1264
|
+
ctx_dir = os.path.join(current_path, "npc_team")
|
|
1265
|
+
ctx_files = glob.glob(os.path.join(ctx_dir, "*.ctx"))
|
|
1266
|
+
return ctx_files[0] if ctx_files else None
|
|
985
1267
|
|
|
986
1268
|
|
|
987
1269
|
def read_ctx_file(file_path):
|
|
@@ -991,15 +1273,15 @@ def read_ctx_file(file_path):
|
|
|
991
1273
|
try:
|
|
992
1274
|
data = yaml.safe_load(f) or {}
|
|
993
1275
|
|
|
994
|
-
|
|
1276
|
+
|
|
995
1277
|
if 'databases' in data and isinstance(data['databases'], list):
|
|
996
1278
|
data['databases'] = [{"value": item} for item in data['databases']]
|
|
997
1279
|
|
|
998
|
-
|
|
1280
|
+
|
|
999
1281
|
if 'mcp_servers' in data and isinstance(data['mcp_servers'], list):
|
|
1000
1282
|
data['mcp_servers'] = [{"value": item} for item in data['mcp_servers']]
|
|
1001
1283
|
|
|
1002
|
-
|
|
1284
|
+
|
|
1003
1285
|
if 'preferences' in data and isinstance(data['preferences'], list):
|
|
1004
1286
|
data['preferences'] = [{"value": item} for item in data['preferences']]
|
|
1005
1287
|
|
|
@@ -1007,25 +1289,25 @@ def read_ctx_file(file_path):
|
|
|
1007
1289
|
except yaml.YAMLError as e:
|
|
1008
1290
|
print(f"YAML parsing error in {file_path}: {e}")
|
|
1009
1291
|
return {"error": "Failed to parse YAML."}
|
|
1010
|
-
return {}
|
|
1292
|
+
return {}
|
|
1011
1293
|
|
|
1012
1294
|
def write_ctx_file(file_path, data):
|
|
1013
1295
|
"""Writes a dictionary to a YAML .ctx file, denormalizing list of objects back to strings."""
|
|
1014
1296
|
if not file_path:
|
|
1015
1297
|
return False
|
|
1016
1298
|
|
|
1017
|
-
|
|
1299
|
+
|
|
1018
1300
|
data_to_save = json.loads(json.dumps(data))
|
|
1019
1301
|
|
|
1020
|
-
|
|
1302
|
+
|
|
1021
1303
|
if 'databases' in data_to_save and isinstance(data_to_save['databases'], list):
|
|
1022
1304
|
data_to_save['databases'] = [item.get("value", "") for item in data_to_save['databases'] if isinstance(item, dict)]
|
|
1023
1305
|
|
|
1024
|
-
|
|
1306
|
+
|
|
1025
1307
|
if 'mcp_servers' in data_to_save and isinstance(data_to_save['mcp_servers'], list):
|
|
1026
1308
|
data_to_save['mcp_servers'] = [item.get("value", "") for item in data_to_save['mcp_servers'] if isinstance(item, dict)]
|
|
1027
1309
|
|
|
1028
|
-
|
|
1310
|
+
|
|
1029
1311
|
if 'preferences' in data_to_save and isinstance(data_to_save['preferences'], list):
|
|
1030
1312
|
data_to_save['preferences'] = [item.get("value", "") for item in data_to_save['preferences'] if isinstance(item, dict)]
|
|
1031
1313
|
|
|
@@ -1097,7 +1379,7 @@ def save_project_context():
|
|
|
1097
1379
|
|
|
1098
1380
|
|
|
1099
1381
|
|
|
1100
|
-
|
|
1382
|
+
|
|
1101
1383
|
|
|
1102
1384
|
@app.route("/api/get_attachment_response", methods=["POST"])
|
|
1103
1385
|
def get_attachment_response():
|
|
@@ -1114,12 +1396,12 @@ def get_attachment_response():
|
|
|
1114
1396
|
provider = data.get("provider")
|
|
1115
1397
|
message_id = data.get("messageId")
|
|
1116
1398
|
|
|
1117
|
-
|
|
1399
|
+
|
|
1118
1400
|
if current_path:
|
|
1119
1401
|
loaded_vars = load_project_env(current_path)
|
|
1120
1402
|
print(f"Loaded project env variables for attachment response: {list(loaded_vars.keys())}")
|
|
1121
1403
|
|
|
1122
|
-
|
|
1404
|
+
|
|
1123
1405
|
npc_object = None
|
|
1124
1406
|
if npc_name:
|
|
1125
1407
|
db_conn = get_db_connection()
|
|
@@ -1171,17 +1453,32 @@ def get_attachment_response():
|
|
|
1171
1453
|
messages = response["messages"]
|
|
1172
1454
|
response = response["response"]
|
|
1173
1455
|
|
|
1174
|
-
|
|
1456
|
+
|
|
1175
1457
|
save_conversation_message(
|
|
1176
|
-
command_history,
|
|
1177
|
-
|
|
1178
|
-
|
|
1458
|
+
command_history,
|
|
1459
|
+
conversation_id,
|
|
1460
|
+
"user",
|
|
1461
|
+
message_to_send,
|
|
1462
|
+
wd=current_path,
|
|
1463
|
+
team=team,
|
|
1464
|
+
model=model,
|
|
1465
|
+
provider=provider,
|
|
1466
|
+
npc=npc_name,
|
|
1467
|
+
attachments=attachments_loaded
|
|
1179
1468
|
)
|
|
1180
1469
|
|
|
1181
1470
|
save_conversation_message(
|
|
1182
|
-
command_history,
|
|
1183
|
-
|
|
1184
|
-
|
|
1471
|
+
command_history,
|
|
1472
|
+
conversation_id,
|
|
1473
|
+
"assistant",
|
|
1474
|
+
response,
|
|
1475
|
+
wd=current_path,
|
|
1476
|
+
team=team,
|
|
1477
|
+
model=model,
|
|
1478
|
+
provider=provider,
|
|
1479
|
+
npc=npc_name,
|
|
1480
|
+
attachments=attachments_loaded,
|
|
1481
|
+
message_id=message_id
|
|
1185
1482
|
)
|
|
1186
1483
|
|
|
1187
1484
|
return jsonify({
|
|
@@ -1200,7 +1497,7 @@ IMAGE_MODELS = {
|
|
|
1200
1497
|
],
|
|
1201
1498
|
"gemini": [
|
|
1202
1499
|
{"value": "gemini-2.5-flash-image-preview", "display_name": "Gemini 2.5 Flash Image"},
|
|
1203
|
-
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
1500
|
+
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
1204
1501
|
],
|
|
1205
1502
|
"diffusers": [
|
|
1206
1503
|
{"value": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion v1.5"},
|
|
@@ -1212,13 +1509,13 @@ def get_available_image_models(current_path=None):
|
|
|
1212
1509
|
Retrieves available image generation models based on environment variables
|
|
1213
1510
|
and predefined configurations.
|
|
1214
1511
|
"""
|
|
1215
|
-
|
|
1512
|
+
|
|
1216
1513
|
if current_path:
|
|
1217
|
-
load_project_env(current_path)
|
|
1514
|
+
load_project_env(current_path)
|
|
1218
1515
|
|
|
1219
1516
|
all_image_models = []
|
|
1220
1517
|
|
|
1221
|
-
|
|
1518
|
+
|
|
1222
1519
|
env_image_model = os.getenv("NPCSH_IMAGE_MODEL")
|
|
1223
1520
|
env_image_provider = os.getenv("NPCSH_IMAGE_PROVIDER")
|
|
1224
1521
|
|
|
@@ -1229,9 +1526,9 @@ def get_available_image_models(current_path=None):
|
|
|
1229
1526
|
"display_name": f"{env_image_model} | {env_image_provider} (Configured)"
|
|
1230
1527
|
})
|
|
1231
1528
|
|
|
1232
|
-
|
|
1529
|
+
|
|
1233
1530
|
for provider_key, models_list in IMAGE_MODELS.items():
|
|
1234
|
-
|
|
1531
|
+
|
|
1235
1532
|
if provider_key == "openai":
|
|
1236
1533
|
if os.environ.get("OPENAI_API_KEY"):
|
|
1237
1534
|
all_image_models.extend([
|
|
@@ -1239,21 +1536,21 @@ def get_available_image_models(current_path=None):
|
|
|
1239
1536
|
for model in models_list
|
|
1240
1537
|
])
|
|
1241
1538
|
elif provider_key == "gemini":
|
|
1242
|
-
if os.environ.get("GEMINI_API_KEY"):
|
|
1539
|
+
if os.environ.get("GEMINI_API_KEY"):
|
|
1243
1540
|
all_image_models.extend([
|
|
1244
1541
|
{**model, "provider": provider_key, "display_name": f"{model['display_name']} | {provider_key}"}
|
|
1245
1542
|
for model in models_list
|
|
1246
1543
|
])
|
|
1247
1544
|
elif provider_key == "diffusers":
|
|
1248
|
-
|
|
1249
|
-
|
|
1545
|
+
|
|
1546
|
+
|
|
1250
1547
|
all_image_models.extend([
|
|
1251
1548
|
{**model, "provider": provider_key, "display_name": f"{model['display_name']} | {provider_key}"}
|
|
1252
1549
|
for model in models_list
|
|
1253
1550
|
])
|
|
1254
|
-
|
|
1551
|
+
|
|
1255
1552
|
|
|
1256
|
-
|
|
1553
|
+
|
|
1257
1554
|
seen_models = set()
|
|
1258
1555
|
unique_models = []
|
|
1259
1556
|
for model_entry in all_image_models:
|
|
@@ -1264,6 +1561,187 @@ def get_available_image_models(current_path=None):
|
|
|
1264
1561
|
|
|
1265
1562
|
return unique_models
|
|
1266
1563
|
|
|
1564
|
+
@app.route('/api/generative_fill', methods=['POST'])
|
|
1565
|
+
def generative_fill():
|
|
1566
|
+
data = request.get_json()
|
|
1567
|
+
image_path = data.get('imagePath')
|
|
1568
|
+
mask_data = data.get('mask')
|
|
1569
|
+
prompt = data.get('prompt')
|
|
1570
|
+
model = data.get('model')
|
|
1571
|
+
provider = data.get('provider')
|
|
1572
|
+
|
|
1573
|
+
if not all([image_path, mask_data, prompt, model, provider]):
|
|
1574
|
+
return jsonify({"error": "Missing required fields"}), 400
|
|
1575
|
+
|
|
1576
|
+
try:
|
|
1577
|
+
image_path = os.path.expanduser(image_path)
|
|
1578
|
+
|
|
1579
|
+
mask_b64 = mask_data.split(',')[1] if ',' in mask_data else mask_data
|
|
1580
|
+
mask_bytes = base64.b64decode(mask_b64)
|
|
1581
|
+
mask_image = Image.open(BytesIO(mask_bytes))
|
|
1582
|
+
|
|
1583
|
+
original_image = Image.open(image_path)
|
|
1584
|
+
|
|
1585
|
+
if provider == 'openai':
|
|
1586
|
+
result = inpaint_openai(original_image, mask_image, prompt, model)
|
|
1587
|
+
elif provider == 'gemini':
|
|
1588
|
+
result = inpaint_gemini(original_image, mask_image, prompt, model)
|
|
1589
|
+
elif provider == 'diffusers':
|
|
1590
|
+
result = inpaint_diffusers(original_image, mask_image, prompt, model)
|
|
1591
|
+
else:
|
|
1592
|
+
return jsonify({"error": f"Provider {provider} not supported"}), 400
|
|
1593
|
+
|
|
1594
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1595
|
+
filename = f"inpaint_{timestamp}.png"
|
|
1596
|
+
save_dir = os.path.dirname(image_path)
|
|
1597
|
+
result_path = os.path.join(save_dir, filename)
|
|
1598
|
+
|
|
1599
|
+
result.save(result_path)
|
|
1600
|
+
|
|
1601
|
+
return jsonify({"resultPath": result_path, "error": None})
|
|
1602
|
+
|
|
1603
|
+
except Exception as e:
|
|
1604
|
+
traceback.print_exc()
|
|
1605
|
+
return jsonify({"error": str(e)}), 500
|
|
1606
|
+
|
|
1607
|
+
|
|
1608
|
+
def inpaint_openai(image, mask, prompt, model):
|
|
1609
|
+
import io
|
|
1610
|
+
from openai import OpenAI
|
|
1611
|
+
from PIL import Image
|
|
1612
|
+
import base64
|
|
1613
|
+
|
|
1614
|
+
client = OpenAI()
|
|
1615
|
+
|
|
1616
|
+
original_size = image.size
|
|
1617
|
+
|
|
1618
|
+
if model == 'dall-e-2':
|
|
1619
|
+
valid_sizes = ['256x256', '512x512', '1024x1024']
|
|
1620
|
+
max_dim = max(image.width, image.height)
|
|
1621
|
+
|
|
1622
|
+
if max_dim <= 256:
|
|
1623
|
+
target_size = (256, 256)
|
|
1624
|
+
size_str = '256x256'
|
|
1625
|
+
elif max_dim <= 512:
|
|
1626
|
+
target_size = (512, 512)
|
|
1627
|
+
size_str = '512x512'
|
|
1628
|
+
else:
|
|
1629
|
+
target_size = (1024, 1024)
|
|
1630
|
+
size_str = '1024x1024'
|
|
1631
|
+
else:
|
|
1632
|
+
valid_sizes = {
|
|
1633
|
+
(1024, 1024): "1024x1024",
|
|
1634
|
+
(1024, 1536): "1024x1536",
|
|
1635
|
+
(1536, 1024): "1536x1024"
|
|
1636
|
+
}
|
|
1637
|
+
|
|
1638
|
+
target_size = (1024, 1024)
|
|
1639
|
+
for size in valid_sizes.keys():
|
|
1640
|
+
if image.width > image.height and size == (1536, 1024):
|
|
1641
|
+
target_size = size
|
|
1642
|
+
break
|
|
1643
|
+
elif image.height > image.width and size == (1024, 1536):
|
|
1644
|
+
target_size = size
|
|
1645
|
+
break
|
|
1646
|
+
|
|
1647
|
+
size_str = valid_sizes[target_size]
|
|
1648
|
+
|
|
1649
|
+
resized_image = image.resize(target_size, Image.Resampling.LANCZOS)
|
|
1650
|
+
resized_mask = mask.resize(target_size, Image.Resampling.LANCZOS)
|
|
1651
|
+
|
|
1652
|
+
img_bytes = io.BytesIO()
|
|
1653
|
+
resized_image.save(img_bytes, format='PNG')
|
|
1654
|
+
img_bytes.seek(0)
|
|
1655
|
+
img_bytes.name = 'image.png'
|
|
1656
|
+
|
|
1657
|
+
mask_bytes = io.BytesIO()
|
|
1658
|
+
resized_mask.save(mask_bytes, format='PNG')
|
|
1659
|
+
mask_bytes.seek(0)
|
|
1660
|
+
mask_bytes.name = 'mask.png'
|
|
1661
|
+
|
|
1662
|
+
response = client.images.edit(
|
|
1663
|
+
model=model,
|
|
1664
|
+
image=img_bytes,
|
|
1665
|
+
mask=mask_bytes,
|
|
1666
|
+
prompt=prompt,
|
|
1667
|
+
n=1,
|
|
1668
|
+
size=size_str
|
|
1669
|
+
)
|
|
1670
|
+
|
|
1671
|
+
if response.data[0].url:
|
|
1672
|
+
import requests
|
|
1673
|
+
img_data = requests.get(response.data[0].url).content
|
|
1674
|
+
elif hasattr(response.data[0], 'b64_json'):
|
|
1675
|
+
img_data = base64.b64decode(response.data[0].b64_json)
|
|
1676
|
+
else:
|
|
1677
|
+
raise Exception("No image data in response")
|
|
1678
|
+
|
|
1679
|
+
result_image = Image.open(io.BytesIO(img_data))
|
|
1680
|
+
return result_image.resize(original_size, Image.Resampling.LANCZOS)
|
|
1681
|
+
|
|
1682
|
+
def inpaint_diffusers(image, mask, prompt, model):
|
|
1683
|
+
from diffusers import StableDiffusionInpaintPipeline
|
|
1684
|
+
import torch
|
|
1685
|
+
|
|
1686
|
+
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
|
1687
|
+
model,
|
|
1688
|
+
torch_dtype=torch.float16
|
|
1689
|
+
)
|
|
1690
|
+
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
|
1691
|
+
|
|
1692
|
+
result = pipe(
|
|
1693
|
+
prompt=prompt,
|
|
1694
|
+
image=image,
|
|
1695
|
+
mask_image=mask
|
|
1696
|
+
).images[0]
|
|
1697
|
+
|
|
1698
|
+
return result
|
|
1699
|
+
def inpaint_gemini(image, mask, prompt, model):
|
|
1700
|
+
from npcpy.gen.image_gen import generate_image
|
|
1701
|
+
import io
|
|
1702
|
+
import numpy as np
|
|
1703
|
+
|
|
1704
|
+
mask_np = np.array(mask.convert('L'))
|
|
1705
|
+
ys, xs = np.where(mask_np > 128)
|
|
1706
|
+
|
|
1707
|
+
if len(xs) == 0:
|
|
1708
|
+
return image
|
|
1709
|
+
|
|
1710
|
+
x_center = int(np.mean(xs))
|
|
1711
|
+
y_center = int(np.mean(ys))
|
|
1712
|
+
width_pct = (xs.max() - xs.min()) / image.width * 100
|
|
1713
|
+
height_pct = (ys.max() - ys.min()) / image.height * 100
|
|
1714
|
+
|
|
1715
|
+
position = "center"
|
|
1716
|
+
if y_center < image.height / 3:
|
|
1717
|
+
position = "top"
|
|
1718
|
+
elif y_center > 2 * image.height / 3:
|
|
1719
|
+
position = "bottom"
|
|
1720
|
+
|
|
1721
|
+
if x_center < image.width / 3:
|
|
1722
|
+
position += " left"
|
|
1723
|
+
elif x_center > 2 * image.width / 3:
|
|
1724
|
+
position += " right"
|
|
1725
|
+
|
|
1726
|
+
img_bytes = io.BytesIO()
|
|
1727
|
+
image.save(img_bytes, format='PNG')
|
|
1728
|
+
img_bytes.seek(0)
|
|
1729
|
+
|
|
1730
|
+
full_prompt = f"""Using the provided image, change only the region in the {position}
|
|
1731
|
+
approximately {int(width_pct)}% wide by {int(height_pct)}% tall) to: {prompt}.
|
|
1732
|
+
|
|
1733
|
+
Keep everything else exactly the same, matching the original lighting and style.
|
|
1734
|
+
You are in-painting the image. You should not be changing anything other than what was requested in prompt: {prompt}
|
|
1735
|
+
"""
|
|
1736
|
+
results = generate_image(
|
|
1737
|
+
prompt=full_prompt,
|
|
1738
|
+
model=model,
|
|
1739
|
+
provider='gemini',
|
|
1740
|
+
attachments=[img_bytes],
|
|
1741
|
+
n_images=1
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
return results[0] if results else None
|
|
1267
1745
|
|
|
1268
1746
|
@app.route('/api/generate_images', methods=['POST'])
|
|
1269
1747
|
def generate_images():
|
|
@@ -1282,11 +1760,11 @@ def generate_images():
|
|
|
1282
1760
|
if not model_name or not provider_name:
|
|
1283
1761
|
return jsonify({"error": "Image model and provider are required."}), 400
|
|
1284
1762
|
|
|
1285
|
-
|
|
1763
|
+
|
|
1286
1764
|
save_dir = os.path.expanduser(save_dir)
|
|
1287
1765
|
os.makedirs(save_dir, exist_ok=True)
|
|
1288
1766
|
|
|
1289
|
-
|
|
1767
|
+
|
|
1290
1768
|
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1291
1769
|
base_filename_with_time = f"{base_filename}_{timestamp}"
|
|
1292
1770
|
|
|
@@ -1295,7 +1773,7 @@ def generate_images():
|
|
|
1295
1773
|
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
1296
1774
|
|
|
1297
1775
|
try:
|
|
1298
|
-
|
|
1776
|
+
|
|
1299
1777
|
input_images = []
|
|
1300
1778
|
attachments_loaded = []
|
|
1301
1779
|
|
|
@@ -1309,7 +1787,7 @@ def generate_images():
|
|
|
1309
1787
|
pil_img = Image.open(image_path)
|
|
1310
1788
|
input_images.append(pil_img)
|
|
1311
1789
|
|
|
1312
|
-
|
|
1790
|
+
|
|
1313
1791
|
with open(image_path, 'rb') as f:
|
|
1314
1792
|
img_data = f.read()
|
|
1315
1793
|
attachments_loaded.append({
|
|
@@ -1321,7 +1799,7 @@ def generate_images():
|
|
|
1321
1799
|
except Exception as e:
|
|
1322
1800
|
print(f"Warning: Could not load attachment image {image_path}: {e}")
|
|
1323
1801
|
|
|
1324
|
-
|
|
1802
|
+
|
|
1325
1803
|
images_list = gen_image(
|
|
1326
1804
|
prompt,
|
|
1327
1805
|
model=model_name,
|
|
@@ -1336,16 +1814,16 @@ def generate_images():
|
|
|
1336
1814
|
generated_attachments = []
|
|
1337
1815
|
for i, pil_image in enumerate(images_list):
|
|
1338
1816
|
if isinstance(pil_image, Image.Image):
|
|
1339
|
-
|
|
1817
|
+
|
|
1340
1818
|
filename = f"{base_filename_with_time}_{i+1:03d}.png" if n > 1 else f"{base_filename_with_time}.png"
|
|
1341
1819
|
filepath = os.path.join(save_dir, filename)
|
|
1342
1820
|
print(f'saved file to {filepath}')
|
|
1343
1821
|
|
|
1344
|
-
|
|
1822
|
+
|
|
1345
1823
|
pil_image.save(filepath, format="PNG")
|
|
1346
1824
|
generated_filenames.append(filepath)
|
|
1347
1825
|
|
|
1348
|
-
|
|
1826
|
+
|
|
1349
1827
|
buffered = BytesIO()
|
|
1350
1828
|
pil_image.save(buffered, format="PNG")
|
|
1351
1829
|
img_data = buffered.getvalue()
|
|
@@ -1357,19 +1835,19 @@ def generate_images():
|
|
|
1357
1835
|
"size": len(img_data)
|
|
1358
1836
|
})
|
|
1359
1837
|
|
|
1360
|
-
|
|
1838
|
+
|
|
1361
1839
|
img_str = base64.b64encode(img_data).decode("utf-8")
|
|
1362
1840
|
generated_images_base64.append(f"data:image/png;base64,{img_str}")
|
|
1363
1841
|
else:
|
|
1364
1842
|
print(f"Warning: gen_image returned non-PIL object ({type(pil_image)}). Skipping image conversion.")
|
|
1365
1843
|
|
|
1366
|
-
# Save generation record to database
|
|
1367
|
-
generation_id = command_history.generate_message_id()
|
|
1368
1844
|
|
|
1369
|
-
|
|
1845
|
+
generation_id = generate_message_id()
|
|
1846
|
+
|
|
1847
|
+
|
|
1370
1848
|
save_conversation_message(
|
|
1371
1849
|
command_history,
|
|
1372
|
-
generation_id,
|
|
1850
|
+
generation_id,
|
|
1373
1851
|
"user",
|
|
1374
1852
|
f"Generate {n} image(s): {prompt}",
|
|
1375
1853
|
wd=save_dir,
|
|
@@ -1380,11 +1858,11 @@ def generate_images():
|
|
|
1380
1858
|
message_id=generation_id
|
|
1381
1859
|
)
|
|
1382
1860
|
|
|
1383
|
-
|
|
1861
|
+
|
|
1384
1862
|
response_message = f"Generated {len(generated_images_base64)} image(s) saved to {save_dir}"
|
|
1385
1863
|
save_conversation_message(
|
|
1386
1864
|
command_history,
|
|
1387
|
-
generation_id,
|
|
1865
|
+
generation_id,
|
|
1388
1866
|
"assistant",
|
|
1389
1867
|
response_message,
|
|
1390
1868
|
wd=save_dir,
|
|
@@ -1392,20 +1870,77 @@ def generate_images():
|
|
|
1392
1870
|
provider=provider_name,
|
|
1393
1871
|
npc="vixynt",
|
|
1394
1872
|
attachments=generated_attachments,
|
|
1395
|
-
message_id=
|
|
1873
|
+
message_id=generate_message_id()
|
|
1396
1874
|
)
|
|
1397
1875
|
|
|
1398
1876
|
return jsonify({
|
|
1399
1877
|
"images": generated_images_base64,
|
|
1400
1878
|
"filenames": generated_filenames,
|
|
1401
|
-
"generation_id": generation_id,
|
|
1879
|
+
"generation_id": generation_id,
|
|
1402
1880
|
"error": None
|
|
1403
1881
|
})
|
|
1404
1882
|
except Exception as e:
|
|
1405
1883
|
print(f"Image generation error: {str(e)}")
|
|
1406
1884
|
traceback.print_exc()
|
|
1407
1885
|
return jsonify({"images": [], "filenames": [], "error": str(e)}), 500
|
|
1886
|
+
|
|
1887
|
+
|
|
1888
|
+
|
|
1889
|
+
@app.route("/api/mcp_tools", methods=["GET"])
|
|
1890
|
+
def get_mcp_tools():
|
|
1891
|
+
"""
|
|
1892
|
+
API endpoint to retrieve the list of tools available from a given MCP server script.
|
|
1893
|
+
It will try to use an existing client from corca_states if available and matching,
|
|
1894
|
+
otherwise it creates a temporary client.
|
|
1895
|
+
"""
|
|
1896
|
+
server_path = request.args.get("mcpServerPath")
|
|
1897
|
+
conversation_id = request.args.get("conversationId")
|
|
1898
|
+
npc_name = request.args.get("npc")
|
|
1899
|
+
|
|
1900
|
+
if not server_path:
|
|
1901
|
+
return jsonify({"error": "mcpServerPath parameter is required."}), 400
|
|
1902
|
+
|
|
1408
1903
|
|
|
1904
|
+
try:
|
|
1905
|
+
from npcsh.corca import MCPClientNPC
|
|
1906
|
+
except ImportError:
|
|
1907
|
+
return jsonify({"error": "MCP Client (npcsh.corca) not available. Ensure npcsh.corca is installed and importable."}), 500
|
|
1908
|
+
|
|
1909
|
+
temp_mcp_client = None
|
|
1910
|
+
try:
|
|
1911
|
+
|
|
1912
|
+
if conversation_id and npc_name and hasattr(app, 'corca_states'):
|
|
1913
|
+
state_key = f"{conversation_id}_{npc_name or 'default'}"
|
|
1914
|
+
if state_key in app.corca_states:
|
|
1915
|
+
existing_corca_state = app.corca_states[state_key]
|
|
1916
|
+
if hasattr(existing_corca_state, 'mcp_client') and existing_corca_state.mcp_client \
|
|
1917
|
+
and existing_corca_state.mcp_client.server_script_path == server_path:
|
|
1918
|
+
print(f"Using existing MCP client for {state_key} to fetch tools.")
|
|
1919
|
+
temp_mcp_client = existing_corca_state.mcp_client
|
|
1920
|
+
return jsonify({"tools": temp_mcp_client.available_tools_llm, "error": None})
|
|
1921
|
+
|
|
1922
|
+
|
|
1923
|
+
print(f"Creating a temporary MCP client to fetch tools for {server_path}.")
|
|
1924
|
+
temp_mcp_client = MCPClientNPC()
|
|
1925
|
+
if temp_mcp_client.connect_sync(server_path):
|
|
1926
|
+
return jsonify({"tools": temp_mcp_client.available_tools_llm, "error": None})
|
|
1927
|
+
else:
|
|
1928
|
+
return jsonify({"error": f"Failed to connect to MCP server at {server_path}."}), 500
|
|
1929
|
+
except FileNotFoundError as e:
|
|
1930
|
+
return jsonify({"error": f"MCP Server script not found: {e}"}), 404
|
|
1931
|
+
except ValueError as e:
|
|
1932
|
+
return jsonify({"error": f"Invalid MCP Server script: {e}"}), 400
|
|
1933
|
+
except Exception as e:
|
|
1934
|
+
print(f"Error getting MCP tools for {server_path}: {traceback.format_exc()}")
|
|
1935
|
+
return jsonify({"error": f"An unexpected error occurred: {e}"}), 500
|
|
1936
|
+
finally:
|
|
1937
|
+
|
|
1938
|
+
if temp_mcp_client and temp_mcp_client.session and (
|
|
1939
|
+
not (conversation_id and npc_name and hasattr(app, 'corca_states') and state_key in app.corca_states and getattr(app.corca_states[state_key], 'mcp_client', None) == temp_mcp_client)
|
|
1940
|
+
):
|
|
1941
|
+
print(f"Disconnecting temporary MCP client for {server_path}.")
|
|
1942
|
+
temp_mcp_client.disconnect_sync()
|
|
1943
|
+
|
|
1409
1944
|
|
|
1410
1945
|
@app.route("/api/image_models", methods=["GET"])
|
|
1411
1946
|
def get_image_models_api():
|
|
@@ -1421,6 +1956,12 @@ def get_image_models_api():
|
|
|
1421
1956
|
traceback.print_exc()
|
|
1422
1957
|
return jsonify({"models": [], "error": str(e)}), 500
|
|
1423
1958
|
|
|
1959
|
+
|
|
1960
|
+
|
|
1961
|
+
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
|
|
1424
1965
|
@app.route("/api/stream", methods=["POST"])
|
|
1425
1966
|
def stream():
|
|
1426
1967
|
data = request.json
|
|
@@ -1451,22 +1992,16 @@ def stream():
|
|
|
1451
1992
|
|
|
1452
1993
|
npc_object = None
|
|
1453
1994
|
team_object = None
|
|
1454
|
-
team = None
|
|
1455
|
-
|
|
1995
|
+
team = None
|
|
1456
1996
|
if npc_name:
|
|
1457
|
-
# First check registered teams and capture team name if found
|
|
1458
|
-
print('checking')
|
|
1459
1997
|
if hasattr(app, 'registered_teams'):
|
|
1460
|
-
print('has registered teams')
|
|
1461
1998
|
for team_name, team_object in app.registered_teams.items():
|
|
1462
|
-
print('team', team_object)
|
|
1463
|
-
|
|
1464
1999
|
if hasattr(team_object, 'npcs'):
|
|
1465
2000
|
team_npcs = team_object.npcs
|
|
1466
2001
|
if isinstance(team_npcs, dict):
|
|
1467
2002
|
if npc_name in team_npcs:
|
|
1468
2003
|
npc_object = team_npcs[npc_name]
|
|
1469
|
-
team = team_name
|
|
2004
|
+
team = team_name
|
|
1470
2005
|
npc_object.team = team_object
|
|
1471
2006
|
print(f"Found NPC {npc_name} in registered team {team_name}")
|
|
1472
2007
|
break
|
|
@@ -1474,7 +2009,7 @@ def stream():
|
|
|
1474
2009
|
for npc in team_npcs:
|
|
1475
2010
|
if hasattr(npc, 'name') and npc.name == npc_name:
|
|
1476
2011
|
npc_object = npc
|
|
1477
|
-
team = team_name
|
|
2012
|
+
team = team_name
|
|
1478
2013
|
npc_object.team = team_object
|
|
1479
2014
|
print(f"Found NPC {npc_name} in registered team {team_name}")
|
|
1480
2015
|
break
|
|
@@ -1484,7 +2019,7 @@ def stream():
|
|
|
1484
2019
|
npc_object = team_object.forenpc
|
|
1485
2020
|
npc_object.team = team_object
|
|
1486
2021
|
|
|
1487
|
-
team = team_name
|
|
2022
|
+
team = team_name
|
|
1488
2023
|
print(f"Found NPC {npc_name} as forenpc in team {team_name}")
|
|
1489
2024
|
break
|
|
1490
2025
|
|
|
@@ -1515,18 +2050,16 @@ def stream():
|
|
|
1515
2050
|
print('team', team_object)
|
|
1516
2051
|
|
|
1517
2052
|
else:
|
|
1518
|
-
# Create team with just this NPC
|
|
1519
2053
|
team_object = Team(npcs=[npc_object], db_conn=db_conn)
|
|
1520
2054
|
team_object.name = os.path.basename(team_directory) if team_directory else f"{npc_name}_team"
|
|
1521
2055
|
npc_object.team = team_object
|
|
1522
2056
|
print('team', team_object)
|
|
1523
2057
|
team_name = team_object.name
|
|
1524
|
-
|
|
2058
|
+
|
|
1525
2059
|
if not hasattr(app, 'registered_teams'):
|
|
1526
2060
|
app.registered_teams = {}
|
|
1527
2061
|
app.registered_teams[team_name] = team_object
|
|
1528
2062
|
|
|
1529
|
-
# Set the team variable for this request
|
|
1530
2063
|
team = team_name
|
|
1531
2064
|
|
|
1532
2065
|
print(f"Created and registered team '{team_name}' with NPC {npc_name}")
|
|
@@ -1551,10 +2084,9 @@ def stream():
|
|
|
1551
2084
|
attachments_for_db = []
|
|
1552
2085
|
attachment_paths_for_llm = []
|
|
1553
2086
|
|
|
1554
|
-
message_id =
|
|
2087
|
+
message_id = generate_message_id()
|
|
1555
2088
|
if attachments:
|
|
1556
|
-
|
|
1557
|
-
attachment_dir = os.path.expanduser(f"~/.npcsh/attachments/{conversation_id}/{message_id}")
|
|
2089
|
+
attachment_dir = os.path.expanduser(f"~/.npcsh/attachments/{conversation_id+message_id}/")
|
|
1558
2090
|
os.makedirs(attachment_dir, exist_ok=True)
|
|
1559
2091
|
|
|
1560
2092
|
for attachment in attachments:
|
|
@@ -1596,10 +2128,6 @@ def stream():
|
|
|
1596
2128
|
except Exception as e:
|
|
1597
2129
|
print(f"Error processing attachment {attachment.get('name', 'N/A')}: {e}")
|
|
1598
2130
|
traceback.print_exc()
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
2131
|
messages = fetch_messages_for_conversation(conversation_id)
|
|
1604
2132
|
if len(messages) == 0 and npc_object is not None:
|
|
1605
2133
|
messages = [{'role': 'system',
|
|
@@ -1629,8 +2157,7 @@ def stream():
|
|
|
1629
2157
|
|
|
1630
2158
|
|
|
1631
2159
|
exe_mode = data.get('executionMode','chat')
|
|
1632
|
-
|
|
1633
|
-
print(data)
|
|
2160
|
+
|
|
1634
2161
|
if exe_mode == 'chat':
|
|
1635
2162
|
stream_response = get_llm_response(
|
|
1636
2163
|
commandstr,
|
|
@@ -1639,6 +2166,7 @@ def stream():
|
|
|
1639
2166
|
model=model,
|
|
1640
2167
|
provider=provider,
|
|
1641
2168
|
npc=npc_object,
|
|
2169
|
+
api_url = npc_object.api_url if npc_object.api_url else None,
|
|
1642
2170
|
team=team_object,
|
|
1643
2171
|
stream=True,
|
|
1644
2172
|
attachments=attachment_paths_for_llm,
|
|
@@ -1646,25 +2174,7 @@ def stream():
|
|
|
1646
2174
|
**tool_args
|
|
1647
2175
|
)
|
|
1648
2176
|
messages = stream_response.get('messages', messages)
|
|
1649
|
-
|
|
1650
|
-
if isinstance(messages[-1].get('content'), list):
|
|
1651
|
-
for cont in messages[-1].get('content'):
|
|
1652
|
-
txt = cont.get('text')
|
|
1653
|
-
if txt is not None:
|
|
1654
|
-
user_message_filled +=txt
|
|
1655
|
-
save_conversation_message(
|
|
1656
|
-
command_history,
|
|
1657
|
-
conversation_id,
|
|
1658
|
-
"user",
|
|
1659
|
-
user_message_filled if len(user_message_filled)>0 else commandstr,
|
|
1660
|
-
wd=current_path,
|
|
1661
|
-
model=model,
|
|
1662
|
-
provider=provider,
|
|
1663
|
-
npc=npc_name,
|
|
1664
|
-
team=team,
|
|
1665
|
-
attachments=attachments_for_db,
|
|
1666
|
-
message_id=message_id,
|
|
1667
|
-
)
|
|
2177
|
+
|
|
1668
2178
|
elif exe_mode == 'npcsh':
|
|
1669
2179
|
from npcsh._state import execute_command, initial_state
|
|
1670
2180
|
from npcsh.routes import router
|
|
@@ -1673,38 +2183,161 @@ def stream():
|
|
|
1673
2183
|
initial_state.npc = npc_object
|
|
1674
2184
|
initial_state.team = team_object
|
|
1675
2185
|
initial_state.messages = messages
|
|
1676
|
-
|
|
1677
|
-
|
|
2186
|
+
initial_state.command_history = command_history
|
|
2187
|
+
|
|
1678
2188
|
state, stream_response = execute_command(
|
|
1679
2189
|
commandstr,
|
|
1680
2190
|
initial_state, router=router)
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
# user_message_filled = ''
|
|
1684
|
-
#if isinstance(messages[-1].get('content'), list):
|
|
1685
|
-
# for cont in messages[-1].get('content'):
|
|
1686
|
-
# txt = cont.get('text')
|
|
1687
|
-
# if txt is not None:
|
|
1688
|
-
# user_message_filled +=txt
|
|
1689
|
-
#save_conversation_message(
|
|
1690
|
-
# command_history,
|
|
1691
|
-
# conversation_id,
|
|
1692
|
-
# "user",
|
|
1693
|
-
# user_message_filled if len(user_message_filled)>0 else commandstr,
|
|
1694
|
-
## wd=current_path,
|
|
1695
|
-
# model=model,
|
|
1696
|
-
# provider=provider,
|
|
1697
|
-
# npc=npc_name,
|
|
1698
|
-
# team=team,
|
|
1699
|
-
# attachments=attachments_for_db,
|
|
1700
|
-
# message_id=message_id,
|
|
2191
|
+
messages = state.messages
|
|
2192
|
+
|
|
1701
2193
|
elif exe_mode == 'guac':
|
|
1702
|
-
|
|
2194
|
+
from npcsh.guac import execute_guac_command
|
|
2195
|
+
from npcsh.routes import router
|
|
2196
|
+
from npcsh._state import initial_state
|
|
2197
|
+
from pathlib import Path
|
|
2198
|
+
import pandas as pd, numpy as np, matplotlib.pyplot as plt
|
|
2199
|
+
|
|
2200
|
+
if not hasattr(app, 'guac_locals'):
|
|
2201
|
+
app.guac_locals = {}
|
|
2202
|
+
|
|
2203
|
+
if conversation_id not in app.guac_locals:
|
|
2204
|
+
app.guac_locals[conversation_id] = {
|
|
2205
|
+
'pd': pd,
|
|
2206
|
+
'np': np,
|
|
2207
|
+
'plt': plt,
|
|
2208
|
+
'datetime': datetime,
|
|
2209
|
+
'Path': Path,
|
|
2210
|
+
'os': os,
|
|
2211
|
+
'sys': sys,
|
|
2212
|
+
'json': json
|
|
2213
|
+
}
|
|
2214
|
+
|
|
2215
|
+
initial_state.model = model
|
|
2216
|
+
initial_state.provider = provider
|
|
2217
|
+
initial_state.npc = npc_object
|
|
2218
|
+
initial_state.team = team_object
|
|
2219
|
+
initial_state.messages = messages
|
|
2220
|
+
initial_state.command_history = command_history
|
|
2221
|
+
|
|
2222
|
+
state, stream_response = execute_guac_command(
|
|
2223
|
+
commandstr,
|
|
2224
|
+
initial_state,
|
|
2225
|
+
app.guac_locals[conversation_id],
|
|
2226
|
+
"guac",
|
|
2227
|
+
Path.cwd() / "npc_team",
|
|
2228
|
+
router
|
|
2229
|
+
)
|
|
2230
|
+
messages = state.messages
|
|
1703
2231
|
|
|
1704
2232
|
elif exe_mode == 'corca':
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
2233
|
+
|
|
2234
|
+
try:
|
|
2235
|
+
from npcsh.corca import execute_command_corca, create_corca_state_and_mcp_client, MCPClientNPC
|
|
2236
|
+
from npcsh._state import initial_state as state
|
|
2237
|
+
except ImportError:
|
|
2238
|
+
|
|
2239
|
+
print("ERROR: npcsh.corca or MCPClientNPC not found. Corca mode is disabled.", file=sys.stderr)
|
|
2240
|
+
state = None
|
|
2241
|
+
stream_response = {"output": "Corca mode is not available due to missing dependencies.", "messages": messages}
|
|
2242
|
+
|
|
2243
|
+
|
|
2244
|
+
if state is not None:
|
|
2245
|
+
|
|
2246
|
+
mcp_server_path_from_request = data.get("mcpServerPath")
|
|
2247
|
+
selected_mcp_tools_from_request = data.get("selectedMcpTools", [])
|
|
2248
|
+
|
|
2249
|
+
|
|
2250
|
+
effective_mcp_server_path = mcp_server_path_from_request
|
|
2251
|
+
if not effective_mcp_server_path and team_object and hasattr(team_object, 'team_ctx') and team_object.team_ctx:
|
|
2252
|
+
mcp_servers_list = team_object.team_ctx.get('mcp_servers', [])
|
|
2253
|
+
if mcp_servers_list and isinstance(mcp_servers_list, list):
|
|
2254
|
+
first_server_obj = next((s for s in mcp_servers_list if isinstance(s, dict) and 'value' in s), None)
|
|
2255
|
+
if first_server_obj:
|
|
2256
|
+
effective_mcp_server_path = first_server_obj['value']
|
|
2257
|
+
elif isinstance(team_object.team_ctx.get('mcp_server'), str):
|
|
2258
|
+
effective_mcp_server_path = team_object.team_ctx.get('mcp_server')
|
|
2259
|
+
|
|
2260
|
+
|
|
2261
|
+
if not hasattr(app, 'corca_states'):
|
|
2262
|
+
app.corca_states = {}
|
|
2263
|
+
|
|
2264
|
+
state_key = f"{conversation_id}_{npc_name or 'default'}"
|
|
2265
|
+
|
|
2266
|
+
corca_state = None
|
|
2267
|
+
if state_key not in app.corca_states:
|
|
2268
|
+
|
|
2269
|
+
corca_state = create_corca_state_and_mcp_client(
|
|
2270
|
+
conversation_id=conversation_id,
|
|
2271
|
+
command_history=command_history,
|
|
2272
|
+
npc=npc_object,
|
|
2273
|
+
team=team_object,
|
|
2274
|
+
current_path=current_path,
|
|
2275
|
+
mcp_server_path=effective_mcp_server_path
|
|
2276
|
+
)
|
|
2277
|
+
app.corca_states[state_key] = corca_state
|
|
2278
|
+
else:
|
|
2279
|
+
corca_state = app.corca_states[state_key]
|
|
2280
|
+
corca_state.npc = npc_object
|
|
2281
|
+
corca_state.team = team_object
|
|
2282
|
+
corca_state.current_path = current_path
|
|
2283
|
+
corca_state.messages = messages
|
|
2284
|
+
corca_state.command_history = command_history
|
|
2285
|
+
|
|
2286
|
+
|
|
2287
|
+
current_mcp_client_path = getattr(corca_state.mcp_client, 'server_script_path', None)
|
|
2288
|
+
|
|
2289
|
+
if effective_mcp_server_path != current_mcp_client_path:
|
|
2290
|
+
print(f"MCP server path changed/updated for {state_key}. Disconnecting old client (if any) and reconnecting to {effective_mcp_server_path or 'None'}.")
|
|
2291
|
+
if corca_state.mcp_client and corca_state.mcp_client.session:
|
|
2292
|
+
corca_state.mcp_client.disconnect_sync()
|
|
2293
|
+
corca_state.mcp_client = None
|
|
2294
|
+
|
|
2295
|
+
if effective_mcp_server_path:
|
|
2296
|
+
new_mcp_client = MCPClientNPC()
|
|
2297
|
+
if new_mcp_client.connect_sync(effective_mcp_server_path):
|
|
2298
|
+
corca_state.mcp_client = new_mcp_client
|
|
2299
|
+
print(f"Successfully reconnected MCP client for {state_key} to {effective_mcp_server_path}.")
|
|
2300
|
+
else:
|
|
2301
|
+
print(f"Failed to reconnect MCP client for {state_key} to {effective_mcp_server_path}. Corca will have no tools.")
|
|
2302
|
+
corca_state.mcp_client = None
|
|
2303
|
+
|
|
2304
|
+
|
|
2305
|
+
|
|
2306
|
+
state, stream_response = execute_command_corca(
|
|
2307
|
+
commandstr,
|
|
2308
|
+
corca_state,
|
|
2309
|
+
command_history,
|
|
2310
|
+
selected_mcp_tools_names=selected_mcp_tools_from_request
|
|
2311
|
+
)
|
|
2312
|
+
|
|
2313
|
+
|
|
2314
|
+
app.corca_states[state_key] = state
|
|
2315
|
+
messages = state.messages
|
|
2316
|
+
|
|
2317
|
+
|
|
2318
|
+
user_message_filled = ''
|
|
2319
|
+
|
|
2320
|
+
if isinstance(messages[-1].get('content'), list):
|
|
2321
|
+
for cont in messages[-1].get('content'):
|
|
2322
|
+
txt = cont.get('text')
|
|
2323
|
+
if txt is not None:
|
|
2324
|
+
user_message_filled +=txt
|
|
2325
|
+
save_conversation_message(
|
|
2326
|
+
command_history,
|
|
2327
|
+
conversation_id,
|
|
2328
|
+
"user",
|
|
2329
|
+
user_message_filled if len(user_message_filled)>0 else commandstr,
|
|
2330
|
+
wd=current_path,
|
|
2331
|
+
model=model,
|
|
2332
|
+
provider=provider,
|
|
2333
|
+
npc=npc_name,
|
|
2334
|
+
team=team,
|
|
2335
|
+
attachments=attachments_for_db,
|
|
2336
|
+
message_id=message_id,
|
|
2337
|
+
)
|
|
2338
|
+
|
|
2339
|
+
|
|
2340
|
+
message_id = generate_message_id()
|
|
1708
2341
|
|
|
1709
2342
|
def event_stream(current_stream_id):
|
|
1710
2343
|
complete_response = []
|
|
@@ -1713,7 +2346,8 @@ def stream():
|
|
|
1713
2346
|
tool_call_data = {"id": None, "function_name": None, "arguments": ""}
|
|
1714
2347
|
|
|
1715
2348
|
try:
|
|
1716
|
-
if isinstance(stream_response, str)
|
|
2349
|
+
if isinstance(stream_response, str) :
|
|
2350
|
+
print('stream a str and not a gen')
|
|
1717
2351
|
chunk_data = {
|
|
1718
2352
|
"id": None,
|
|
1719
2353
|
"object": None,
|
|
@@ -1724,7 +2358,28 @@ def stream():
|
|
|
1724
2358
|
"index": 0,
|
|
1725
2359
|
"delta":
|
|
1726
2360
|
{
|
|
1727
|
-
"content": stream_response
|
|
2361
|
+
"content": stream_response,
|
|
2362
|
+
"role": "assistant"
|
|
2363
|
+
},
|
|
2364
|
+
"finish_reason": 'done'
|
|
2365
|
+
}
|
|
2366
|
+
]
|
|
2367
|
+
}
|
|
2368
|
+
yield f"data: {json.dumps(chunk_data)}"
|
|
2369
|
+
return
|
|
2370
|
+
elif isinstance(stream_response, dict) and 'output' in stream_response and isinstance(stream_response.get('output'), str):
|
|
2371
|
+
print('stream a str and not a gen')
|
|
2372
|
+
chunk_data = {
|
|
2373
|
+
"id": None,
|
|
2374
|
+
"object": None,
|
|
2375
|
+
"created": datetime.datetime.now().strftime('YYYY-DD-MM-HHMMSS'),
|
|
2376
|
+
"model": model,
|
|
2377
|
+
"choices": [
|
|
2378
|
+
{
|
|
2379
|
+
"index": 0,
|
|
2380
|
+
"delta":
|
|
2381
|
+
{
|
|
2382
|
+
"content": stream_response.get('output') ,
|
|
1728
2383
|
"role": "assistant"
|
|
1729
2384
|
},
|
|
1730
2385
|
"finish_reason": 'done'
|
|
@@ -1742,7 +2397,7 @@ def stream():
|
|
|
1742
2397
|
|
|
1743
2398
|
print('.', end="", flush=True)
|
|
1744
2399
|
dot_count += 1
|
|
1745
|
-
if "hf.co" in model or provider == 'ollama':
|
|
2400
|
+
if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
|
|
1746
2401
|
chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
|
|
1747
2402
|
if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
|
|
1748
2403
|
for tool_call in response_chunk["message"]["tool_calls"]:
|
|
@@ -1759,7 +2414,9 @@ def stream():
|
|
|
1759
2414
|
if chunk_content:
|
|
1760
2415
|
complete_response.append(chunk_content)
|
|
1761
2416
|
chunk_data = {
|
|
1762
|
-
"id": None, "object": None,
|
|
2417
|
+
"id": None, "object": None,
|
|
2418
|
+
"created": response_chunk["created_at"] or datetime.datetime.now(),
|
|
2419
|
+
"model": response_chunk["model"],
|
|
1763
2420
|
"choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
|
|
1764
2421
|
}
|
|
1765
2422
|
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
@@ -1823,6 +2480,28 @@ def stream():
|
|
|
1823
2480
|
|
|
1824
2481
|
|
|
1825
2482
|
|
|
2483
|
+
@app.route("/api/memory/approve", methods=["POST"])
|
|
2484
|
+
def approve_memories():
|
|
2485
|
+
try:
|
|
2486
|
+
data = request.json
|
|
2487
|
+
approvals = data.get("approvals", [])
|
|
2488
|
+
|
|
2489
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
2490
|
+
|
|
2491
|
+
for approval in approvals:
|
|
2492
|
+
command_history.update_memory_status(
|
|
2493
|
+
approval['memory_id'],
|
|
2494
|
+
approval['decision'],
|
|
2495
|
+
approval.get('final_memory')
|
|
2496
|
+
)
|
|
2497
|
+
|
|
2498
|
+
return jsonify({"success": True, "processed": len(approvals)})
|
|
2499
|
+
|
|
2500
|
+
except Exception as e:
|
|
2501
|
+
return jsonify({"error": str(e)}), 500
|
|
2502
|
+
|
|
2503
|
+
|
|
2504
|
+
|
|
1826
2505
|
|
|
1827
2506
|
@app.route("/api/execute", methods=["POST"])
|
|
1828
2507
|
def execute():
|
|
@@ -1834,12 +2513,12 @@ def execute():
|
|
|
1834
2513
|
import uuid
|
|
1835
2514
|
stream_id = str(uuid.uuid4())
|
|
1836
2515
|
|
|
1837
|
-
|
|
2516
|
+
|
|
1838
2517
|
with cancellation_lock:
|
|
1839
2518
|
cancellation_flags[stream_id] = False
|
|
1840
2519
|
print(f"Starting execute stream with ID: {stream_id}")
|
|
1841
2520
|
|
|
1842
|
-
|
|
2521
|
+
|
|
1843
2522
|
commandstr = data.get("commandstr")
|
|
1844
2523
|
conversation_id = data.get("conversationId")
|
|
1845
2524
|
model = data.get("model", 'llama3.2')
|
|
@@ -1860,7 +2539,7 @@ def execute():
|
|
|
1860
2539
|
npc_object = None
|
|
1861
2540
|
team_object = None
|
|
1862
2541
|
|
|
1863
|
-
|
|
2542
|
+
|
|
1864
2543
|
if team:
|
|
1865
2544
|
print(team)
|
|
1866
2545
|
if hasattr(app, 'registered_teams') and team in app.registered_teams:
|
|
@@ -1869,13 +2548,13 @@ def execute():
|
|
|
1869
2548
|
else:
|
|
1870
2549
|
print(f"Warning: Team {team} not found in registered teams")
|
|
1871
2550
|
|
|
1872
|
-
|
|
2551
|
+
|
|
1873
2552
|
if npc_name:
|
|
1874
|
-
|
|
2553
|
+
|
|
1875
2554
|
if team and hasattr(app, 'registered_teams') and team in app.registered_teams:
|
|
1876
2555
|
team_object = app.registered_teams[team]
|
|
1877
2556
|
print('team', team_object)
|
|
1878
|
-
|
|
2557
|
+
|
|
1879
2558
|
if hasattr(team_object, 'npcs'):
|
|
1880
2559
|
team_npcs = team_object.npcs
|
|
1881
2560
|
if isinstance(team_npcs, dict):
|
|
@@ -1888,18 +2567,18 @@ def execute():
|
|
|
1888
2567
|
npc_object = npc
|
|
1889
2568
|
print(f"Found NPC {npc_name} in registered team {team}")
|
|
1890
2569
|
break
|
|
1891
|
-
|
|
2570
|
+
|
|
1892
2571
|
if not npc_object and hasattr(team_object, 'forenpc') and hasattr(team_object.forenpc, 'name'):
|
|
1893
2572
|
if team_object.forenpc.name == npc_name:
|
|
1894
2573
|
npc_object = team_object.forenpc
|
|
1895
2574
|
print(f"Found NPC {npc_name} as forenpc in team {team}")
|
|
1896
2575
|
|
|
1897
|
-
|
|
2576
|
+
|
|
1898
2577
|
if not npc_object and hasattr(app, 'registered_npcs') and npc_name in app.registered_npcs:
|
|
1899
2578
|
npc_object = app.registered_npcs[npc_name]
|
|
1900
2579
|
print(f"Found NPC {npc_name} in registered NPCs")
|
|
1901
2580
|
|
|
1902
|
-
|
|
2581
|
+
|
|
1903
2582
|
if not npc_object:
|
|
1904
2583
|
db_conn = get_db_connection()
|
|
1905
2584
|
npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
|
|
@@ -1947,7 +2626,7 @@ def execute():
|
|
|
1947
2626
|
if npc_object is not None and messages and messages[0]['role'] == 'system':
|
|
1948
2627
|
messages[0]['content'] = npc_object.get_system_prompt()
|
|
1949
2628
|
|
|
1950
|
-
message_id =
|
|
2629
|
+
message_id = generate_message_id()
|
|
1951
2630
|
save_conversation_message(
|
|
1952
2631
|
command_history, conversation_id, "user", commandstr,
|
|
1953
2632
|
wd=current_path, model=model, provider=provider, npc=npc_name,
|
|
@@ -1958,18 +2637,18 @@ def execute():
|
|
|
1958
2637
|
provider=provider, npc=npc_object, team=team_object, stream=True
|
|
1959
2638
|
)
|
|
1960
2639
|
print(response_gen)
|
|
1961
|
-
|
|
1962
|
-
message_id =
|
|
2640
|
+
|
|
2641
|
+
message_id = generate_message_id()
|
|
1963
2642
|
|
|
1964
2643
|
def event_stream(current_stream_id):
|
|
1965
2644
|
complete_response = []
|
|
1966
2645
|
dot_count = 0
|
|
1967
2646
|
interrupted = False
|
|
1968
2647
|
tool_call_data = {"id": None, "function_name": None, "arguments": ""}
|
|
2648
|
+
memory_data = None
|
|
1969
2649
|
|
|
1970
2650
|
try:
|
|
1971
|
-
for response_chunk in
|
|
1972
|
-
# --- Check the cancellation flag on every iteration ---
|
|
2651
|
+
for response_chunk in stream_response.get('response', stream_response.get('output')):
|
|
1973
2652
|
with cancellation_lock:
|
|
1974
2653
|
if cancellation_flags.get(current_stream_id, False):
|
|
1975
2654
|
print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
|
|
@@ -1979,28 +2658,7 @@ def execute():
|
|
|
1979
2658
|
print('.', end="", flush=True)
|
|
1980
2659
|
dot_count += 1
|
|
1981
2660
|
|
|
1982
|
-
|
|
1983
|
-
if isinstance(response_chunk, dict) and response_chunk.get("role") == "decision":
|
|
1984
|
-
# Stream decision immediately in standard format
|
|
1985
|
-
chunk_data = {
|
|
1986
|
-
"id": None, "object": None, "created": None, "model": model,
|
|
1987
|
-
"choices": [
|
|
1988
|
-
{
|
|
1989
|
-
"index": 0,
|
|
1990
|
-
"delta":
|
|
1991
|
-
{
|
|
1992
|
-
"content": response_chunk.get('content', ''),
|
|
1993
|
-
"role": "assistant"
|
|
1994
|
-
},
|
|
1995
|
-
"finish_reason": None
|
|
1996
|
-
}
|
|
1997
|
-
]
|
|
1998
|
-
}
|
|
1999
|
-
complete_response.append(response_chunk.get('content', ''))
|
|
2000
|
-
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2001
|
-
continue
|
|
2002
|
-
|
|
2003
|
-
elif "hf.co" in model or provider == 'ollama':
|
|
2661
|
+
if "hf.co" in model or provider == 'ollama':
|
|
2004
2662
|
chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
|
|
2005
2663
|
if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
|
|
2006
2664
|
for tool_call in response_chunk["message"]["tool_calls"]:
|
|
@@ -2010,46 +2668,41 @@ def execute():
|
|
|
2010
2668
|
if "name" in tool_call["function"]:
|
|
2011
2669
|
tool_call_data["function_name"] = tool_call["function"]["name"]
|
|
2012
2670
|
if "arguments" in tool_call["function"]:
|
|
2013
|
-
|
|
2671
|
+
arg_val = tool_call["function"]["arguments"]
|
|
2672
|
+
if isinstance(arg_val, dict):
|
|
2673
|
+
arg_val = json.dumps(arg_val)
|
|
2674
|
+
tool_call_data["arguments"] += arg_val
|
|
2014
2675
|
if chunk_content:
|
|
2015
2676
|
complete_response.append(chunk_content)
|
|
2016
2677
|
chunk_data = {
|
|
2017
2678
|
"id": None, "object": None, "created": response_chunk["created_at"], "model": response_chunk["model"],
|
|
2018
2679
|
"choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
|
|
2019
2680
|
}
|
|
2681
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2020
2682
|
else:
|
|
2021
2683
|
chunk_content = ""
|
|
2022
2684
|
reasoning_content = ""
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
|
|
2026
|
-
|
|
2027
|
-
|
|
2028
|
-
|
|
2029
|
-
if tool_call.function:
|
|
2030
|
-
|
|
2031
|
-
|
|
2032
|
-
|
|
2033
|
-
|
|
2034
|
-
|
|
2035
|
-
|
|
2036
|
-
|
|
2037
|
-
|
|
2038
|
-
if chunk_content:
|
|
2039
|
-
complete_response.append(chunk_content)
|
|
2040
|
-
chunk_data = {
|
|
2041
|
-
"id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
|
|
2042
|
-
"choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
|
|
2043
|
-
}
|
|
2044
|
-
else: # its a string so assemble it
|
|
2045
|
-
chunk_content = response_chunk
|
|
2685
|
+
for choice in response_chunk.choices:
|
|
2686
|
+
if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
|
|
2687
|
+
for tool_call in choice.delta.tool_calls:
|
|
2688
|
+
if tool_call.id:
|
|
2689
|
+
tool_call_data["id"] = tool_call.id
|
|
2690
|
+
if tool_call.function:
|
|
2691
|
+
if hasattr(tool_call.function, "name") and tool_call.function.name:
|
|
2692
|
+
tool_call_data["function_name"] = tool_call.function.name
|
|
2693
|
+
if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
|
|
2694
|
+
tool_call_data["arguments"] += tool_call.function.arguments
|
|
2695
|
+
for choice in response_chunk.choices:
|
|
2696
|
+
if hasattr(choice.delta, "reasoning_content"):
|
|
2697
|
+
reasoning_content += choice.delta.reasoning_content
|
|
2698
|
+
chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
|
|
2699
|
+
if chunk_content:
|
|
2046
2700
|
complete_response.append(chunk_content)
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2701
|
+
chunk_data = {
|
|
2702
|
+
"id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
|
|
2703
|
+
"choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
|
|
2704
|
+
}
|
|
2705
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2053
2706
|
|
|
2054
2707
|
except Exception as e:
|
|
2055
2708
|
print(f"\nAn exception occurred during streaming for {current_stream_id}: {e}")
|
|
@@ -2061,13 +2714,74 @@ def execute():
|
|
|
2061
2714
|
print('\r' + ' ' * dot_count*2 + '\r', end="", flush=True)
|
|
2062
2715
|
|
|
2063
2716
|
final_response_text = ''.join(complete_response)
|
|
2717
|
+
|
|
2718
|
+
conversation_turn_text = f"User: {commandstr}\nAssistant: {final_response_text}"
|
|
2719
|
+
|
|
2720
|
+
try:
|
|
2721
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
2722
|
+
npc=npc_name,
|
|
2723
|
+
team=team,
|
|
2724
|
+
directory_path=current_path
|
|
2725
|
+
)
|
|
2726
|
+
|
|
2727
|
+
memory_context = format_memory_context(memory_examples)
|
|
2728
|
+
|
|
2729
|
+
facts = get_facts(
|
|
2730
|
+
conversation_turn_text,
|
|
2731
|
+
model=npc_object.model if npc_object else model,
|
|
2732
|
+
provider=npc_object.provider if npc_object else provider,
|
|
2733
|
+
npc=npc_object,
|
|
2734
|
+
context=memory_context
|
|
2735
|
+
)
|
|
2736
|
+
|
|
2737
|
+
if facts:
|
|
2738
|
+
memories_for_approval = []
|
|
2739
|
+
for i, fact in enumerate(facts):
|
|
2740
|
+
memory_id = command_history.add_memory_to_database(
|
|
2741
|
+
message_id=f"{conversation_id}_{datetime.now().strftime('%H%M%S')}_{i}",
|
|
2742
|
+
conversation_id=conversation_id,
|
|
2743
|
+
npc=npc_name or "default",
|
|
2744
|
+
team=team or "default",
|
|
2745
|
+
directory_path=current_path or "/",
|
|
2746
|
+
initial_memory=fact['statement'],
|
|
2747
|
+
status="pending_approval",
|
|
2748
|
+
model=npc_object.model if npc_object else model,
|
|
2749
|
+
provider=npc_object.provider if npc_object else provider
|
|
2750
|
+
)
|
|
2751
|
+
|
|
2752
|
+
memories_for_approval.append({
|
|
2753
|
+
"memory_id": memory_id,
|
|
2754
|
+
"content": fact['statement'],
|
|
2755
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
2756
|
+
"npc": npc_name or "default"
|
|
2757
|
+
})
|
|
2758
|
+
|
|
2759
|
+
memory_data = {
|
|
2760
|
+
"type": "memory_approval",
|
|
2761
|
+
"memories": memories_for_approval,
|
|
2762
|
+
"conversation_id": conversation_id
|
|
2763
|
+
}
|
|
2764
|
+
|
|
2765
|
+
except Exception as e:
|
|
2766
|
+
print(f"Memory generation error: {e}")
|
|
2767
|
+
|
|
2768
|
+
if memory_data:
|
|
2769
|
+
yield f"data: {json.dumps(memory_data)}\n\n"
|
|
2770
|
+
|
|
2064
2771
|
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
2065
2772
|
|
|
2066
2773
|
npc_name_to_save = npc_object.name if npc_object else ''
|
|
2067
2774
|
save_conversation_message(
|
|
2068
|
-
command_history,
|
|
2069
|
-
|
|
2070
|
-
|
|
2775
|
+
command_history,
|
|
2776
|
+
conversation_id,
|
|
2777
|
+
"assistant",
|
|
2778
|
+
final_response_text,
|
|
2779
|
+
wd=current_path,
|
|
2780
|
+
model=model,
|
|
2781
|
+
provider=provider,
|
|
2782
|
+
npc=npc_name_to_save,
|
|
2783
|
+
team=team,
|
|
2784
|
+
message_id=message_id,
|
|
2071
2785
|
)
|
|
2072
2786
|
|
|
2073
2787
|
with cancellation_lock:
|
|
@@ -2076,6 +2790,7 @@ def execute():
|
|
|
2076
2790
|
print(f"Cleaned up cancellation flag for stream ID: {current_stream_id}")
|
|
2077
2791
|
|
|
2078
2792
|
|
|
2793
|
+
|
|
2079
2794
|
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
2080
2795
|
|
|
2081
2796
|
@app.route("/api/interrupt", methods=["POST"])
|
|
@@ -2116,7 +2831,7 @@ def get_conversations():
|
|
|
2116
2831
|
ORDER BY MAX(timestamp) DESC
|
|
2117
2832
|
""")
|
|
2118
2833
|
|
|
2119
|
-
|
|
2834
|
+
|
|
2120
2835
|
path_without_slash = path.rstrip('/')
|
|
2121
2836
|
path_with_slash = path_without_slash + '/'
|
|
2122
2837
|
|
|
@@ -2130,11 +2845,11 @@ def get_conversations():
|
|
|
2130
2845
|
{
|
|
2131
2846
|
"conversations": [
|
|
2132
2847
|
{
|
|
2133
|
-
"id": conv[0],
|
|
2134
|
-
"timestamp": conv[1],
|
|
2135
|
-
"last_message_timestamp": conv[2],
|
|
2848
|
+
"id": conv[0],
|
|
2849
|
+
"timestamp": conv[1],
|
|
2850
|
+
"last_message_timestamp": conv[2],
|
|
2136
2851
|
"preview": (
|
|
2137
|
-
conv[3][:100] + "..."
|
|
2852
|
+
conv[3][:100] + "..."
|
|
2138
2853
|
if conv[3] and len(conv[3]) > 100
|
|
2139
2854
|
else conv[3]
|
|
2140
2855
|
),
|
|
@@ -2158,7 +2873,7 @@ def get_conversation_messages(conversation_id):
|
|
|
2158
2873
|
try:
|
|
2159
2874
|
engine = get_db_connection()
|
|
2160
2875
|
with engine.connect() as conn:
|
|
2161
|
-
|
|
2876
|
+
|
|
2162
2877
|
query = text("""
|
|
2163
2878
|
WITH ranked_messages AS (
|
|
2164
2879
|
SELECT
|
|
@@ -2187,7 +2902,7 @@ def get_conversation_messages(conversation_id):
|
|
|
2187
2902
|
{
|
|
2188
2903
|
"messages": [
|
|
2189
2904
|
{
|
|
2190
|
-
"message_id": msg[1] if len(msg) > 1 else None,
|
|
2905
|
+
"message_id": msg[1] if len(msg) > 1 else None,
|
|
2191
2906
|
"role": msg[3] if len(msg) > 3 else None,
|
|
2192
2907
|
"content": msg[4] if len(msg) > 4 else None,
|
|
2193
2908
|
"timestamp": msg[5] if len(msg) > 5 else None,
|
|
@@ -2196,7 +2911,7 @@ def get_conversation_messages(conversation_id):
|
|
|
2196
2911
|
"npc": msg[8] if len(msg) > 8 else None,
|
|
2197
2912
|
"attachments": (
|
|
2198
2913
|
get_message_attachments(msg[1])
|
|
2199
|
-
if len(msg) > 1 and msg[-1]
|
|
2914
|
+
if len(msg) > 1 and msg[-1]
|
|
2200
2915
|
else []
|
|
2201
2916
|
),
|
|
2202
2917
|
}
|
|
@@ -2224,12 +2939,12 @@ def after_request(response):
|
|
|
2224
2939
|
@app.route('/api/ollama/status', methods=['GET'])
|
|
2225
2940
|
def ollama_status():
|
|
2226
2941
|
try:
|
|
2227
|
-
|
|
2228
|
-
|
|
2942
|
+
|
|
2943
|
+
|
|
2229
2944
|
ollama.list()
|
|
2230
2945
|
return jsonify({"status": "running"})
|
|
2231
2946
|
except ollama.RequestError as e:
|
|
2232
|
-
|
|
2947
|
+
|
|
2233
2948
|
print(f"Ollama status check failed: {e}")
|
|
2234
2949
|
return jsonify({"status": "not_found"})
|
|
2235
2950
|
except Exception as e:
|
|
@@ -2242,7 +2957,7 @@ def get_ollama_models():
|
|
|
2242
2957
|
response = ollama.list()
|
|
2243
2958
|
models_list = []
|
|
2244
2959
|
|
|
2245
|
-
|
|
2960
|
+
|
|
2246
2961
|
for model_obj in response['models']:
|
|
2247
2962
|
models_list.append({
|
|
2248
2963
|
"name": model_obj.model,
|
|
@@ -2264,7 +2979,7 @@ def delete_ollama_model():
|
|
|
2264
2979
|
ollama.delete(model_name)
|
|
2265
2980
|
return jsonify({"success": True, "message": f"Model {model_name} deleted."})
|
|
2266
2981
|
except ollama.ResponseError as e:
|
|
2267
|
-
|
|
2982
|
+
|
|
2268
2983
|
return jsonify({"error": e.error}), e.status_code
|
|
2269
2984
|
except Exception as e:
|
|
2270
2985
|
return jsonify({"error": str(e)}), 500
|
|
@@ -2281,8 +2996,8 @@ def pull_ollama_model():
|
|
|
2281
2996
|
try:
|
|
2282
2997
|
stream = ollama.pull(model_name, stream=True)
|
|
2283
2998
|
for progress_obj in stream:
|
|
2284
|
-
|
|
2285
|
-
|
|
2999
|
+
|
|
3000
|
+
|
|
2286
3001
|
yield json.dumps({
|
|
2287
3002
|
'status': getattr(progress_obj, 'status', None),
|
|
2288
3003
|
'digest': getattr(progress_obj, 'digest', None),
|
|
@@ -2356,7 +3071,7 @@ def start_flask_server(
|
|
|
2356
3071
|
user_npc_directory = None
|
|
2357
3072
|
):
|
|
2358
3073
|
try:
|
|
2359
|
-
|
|
3074
|
+
|
|
2360
3075
|
if teams:
|
|
2361
3076
|
app.registered_teams = teams
|
|
2362
3077
|
print(f"Registered {len(teams)} teams: {list(teams.keys())}")
|
|
@@ -2368,14 +3083,14 @@ def start_flask_server(
|
|
|
2368
3083
|
print(f"Registered {len(npcs)} NPCs: {list(npcs.keys())}")
|
|
2369
3084
|
else:
|
|
2370
3085
|
app.registered_npcs = {}
|
|
2371
|
-
|
|
3086
|
+
|
|
2372
3087
|
app.config['DB_PATH'] = db_path
|
|
2373
3088
|
app.config['user_npc_directory'] = user_npc_directory
|
|
2374
3089
|
|
|
2375
3090
|
command_history = CommandHistory(db_path)
|
|
2376
3091
|
app.command_history = command_history
|
|
2377
3092
|
|
|
2378
|
-
|
|
3093
|
+
|
|
2379
3094
|
if cors_origins:
|
|
2380
3095
|
|
|
2381
3096
|
CORS(
|
|
@@ -2387,7 +3102,7 @@ def start_flask_server(
|
|
|
2387
3102
|
|
|
2388
3103
|
)
|
|
2389
3104
|
|
|
2390
|
-
|
|
3105
|
+
|
|
2391
3106
|
print(f"Starting Flask server on http://0.0.0.0:{port}")
|
|
2392
3107
|
app.run(host="0.0.0.0", port=port, debug=debug, threaded=True)
|
|
2393
3108
|
except Exception as e:
|
|
@@ -2398,8 +3113,8 @@ if __name__ == "__main__":
|
|
|
2398
3113
|
|
|
2399
3114
|
SETTINGS_FILE = Path(os.path.expanduser("~/.npcshrc"))
|
|
2400
3115
|
|
|
2401
|
-
|
|
3116
|
+
|
|
2402
3117
|
db_path = os.path.expanduser("~/npcsh_history.db")
|
|
2403
3118
|
user_npc_directory = os.path.expanduser("~/.npcsh/npc_team")
|
|
2404
|
-
|
|
3119
|
+
|
|
2405
3120
|
start_flask_server(db_path=db_path, user_npc_directory=user_npc_directory)
|