npcpy 1.0.26__py3-none-any.whl → 1.2.32__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/__init__.py +0 -7
- npcpy/data/audio.py +16 -99
- npcpy/data/image.py +43 -42
- npcpy/data/load.py +83 -124
- npcpy/data/text.py +28 -28
- npcpy/data/video.py +8 -32
- npcpy/data/web.py +51 -23
- npcpy/ft/diff.py +110 -0
- npcpy/ft/ge.py +115 -0
- npcpy/ft/memory_trainer.py +171 -0
- npcpy/ft/model_ensembler.py +357 -0
- npcpy/ft/rl.py +360 -0
- npcpy/ft/sft.py +248 -0
- npcpy/ft/usft.py +128 -0
- npcpy/gen/audio_gen.py +24 -0
- npcpy/gen/embeddings.py +13 -13
- npcpy/gen/image_gen.py +262 -117
- npcpy/gen/response.py +615 -415
- npcpy/gen/video_gen.py +53 -7
- npcpy/llm_funcs.py +1869 -437
- npcpy/main.py +1 -1
- npcpy/memory/command_history.py +844 -510
- npcpy/memory/kg_vis.py +833 -0
- npcpy/memory/knowledge_graph.py +892 -1845
- npcpy/memory/memory_processor.py +81 -0
- npcpy/memory/search.py +188 -90
- npcpy/mix/debate.py +192 -3
- npcpy/npc_compiler.py +1672 -801
- npcpy/npc_sysenv.py +593 -1266
- npcpy/serve.py +3120 -0
- npcpy/sql/ai_function_tools.py +257 -0
- npcpy/sql/database_ai_adapters.py +186 -0
- npcpy/sql/database_ai_functions.py +163 -0
- npcpy/sql/model_runner.py +19 -19
- npcpy/sql/npcsql.py +706 -507
- npcpy/sql/sql_model_compiler.py +156 -0
- npcpy/tools.py +183 -0
- npcpy/work/plan.py +13 -279
- npcpy/work/trigger.py +3 -3
- npcpy-1.2.32.dist-info/METADATA +803 -0
- npcpy-1.2.32.dist-info/RECORD +54 -0
- npcpy/data/dataframes.py +0 -171
- npcpy/memory/deep_research.py +0 -125
- npcpy/memory/sleep.py +0 -557
- npcpy/modes/_state.py +0 -78
- npcpy/modes/alicanto.py +0 -1075
- npcpy/modes/guac.py +0 -785
- npcpy/modes/mcp_npcsh.py +0 -822
- npcpy/modes/npc.py +0 -213
- npcpy/modes/npcsh.py +0 -1158
- npcpy/modes/plonk.py +0 -409
- npcpy/modes/pti.py +0 -234
- npcpy/modes/serve.py +0 -1637
- npcpy/modes/spool.py +0 -312
- npcpy/modes/wander.py +0 -549
- npcpy/modes/yap.py +0 -572
- npcpy/npc_team/alicanto.npc +0 -2
- npcpy/npc_team/alicanto.png +0 -0
- npcpy/npc_team/assembly_lines/test_pipeline.py +0 -181
- npcpy/npc_team/corca.npc +0 -13
- npcpy/npc_team/foreman.npc +0 -7
- npcpy/npc_team/frederic.npc +0 -6
- npcpy/npc_team/frederic4.png +0 -0
- npcpy/npc_team/guac.png +0 -0
- npcpy/npc_team/jinxs/automator.jinx +0 -18
- npcpy/npc_team/jinxs/bash_executer.jinx +0 -31
- npcpy/npc_team/jinxs/calculator.jinx +0 -11
- npcpy/npc_team/jinxs/edit_file.jinx +0 -96
- npcpy/npc_team/jinxs/file_chat.jinx +0 -14
- npcpy/npc_team/jinxs/gui_controller.jinx +0 -28
- npcpy/npc_team/jinxs/image_generation.jinx +0 -29
- npcpy/npc_team/jinxs/internet_search.jinx +0 -30
- npcpy/npc_team/jinxs/local_search.jinx +0 -152
- npcpy/npc_team/jinxs/npcsh_executor.jinx +0 -31
- npcpy/npc_team/jinxs/python_executor.jinx +0 -8
- npcpy/npc_team/jinxs/screen_cap.jinx +0 -25
- npcpy/npc_team/jinxs/sql_executor.jinx +0 -33
- npcpy/npc_team/kadiefa.npc +0 -3
- npcpy/npc_team/kadiefa.png +0 -0
- npcpy/npc_team/npcsh.ctx +0 -9
- npcpy/npc_team/npcsh_sibiji.png +0 -0
- npcpy/npc_team/plonk.npc +0 -2
- npcpy/npc_team/plonk.png +0 -0
- npcpy/npc_team/plonkjr.npc +0 -2
- npcpy/npc_team/plonkjr.png +0 -0
- npcpy/npc_team/sibiji.npc +0 -5
- npcpy/npc_team/sibiji.png +0 -0
- npcpy/npc_team/spool.png +0 -0
- npcpy/npc_team/templates/analytics/celona.npc +0 -0
- npcpy/npc_team/templates/hr_support/raone.npc +0 -0
- npcpy/npc_team/templates/humanities/eriane.npc +0 -4
- npcpy/npc_team/templates/it_support/lineru.npc +0 -0
- npcpy/npc_team/templates/marketing/slean.npc +0 -4
- npcpy/npc_team/templates/philosophy/maurawa.npc +0 -0
- npcpy/npc_team/templates/sales/turnic.npc +0 -4
- npcpy/npc_team/templates/software/welxor.npc +0 -0
- npcpy/npc_team/yap.png +0 -0
- npcpy/routes.py +0 -958
- npcpy/work/mcp_helpers.py +0 -357
- npcpy/work/mcp_server.py +0 -194
- npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/alicanto.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/automator.jinx +0 -18
- npcpy-1.0.26.data/data/npcpy/npc_team/bash_executer.jinx +0 -31
- npcpy-1.0.26.data/data/npcpy/npc_team/calculator.jinx +0 -11
- npcpy-1.0.26.data/data/npcpy/npc_team/celona.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/corca.npc +0 -13
- npcpy-1.0.26.data/data/npcpy/npc_team/edit_file.jinx +0 -96
- npcpy-1.0.26.data/data/npcpy/npc_team/eriane.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/file_chat.jinx +0 -14
- npcpy-1.0.26.data/data/npcpy/npc_team/foreman.npc +0 -7
- npcpy-1.0.26.data/data/npcpy/npc_team/frederic.npc +0 -6
- npcpy-1.0.26.data/data/npcpy/npc_team/frederic4.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/guac.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/gui_controller.jinx +0 -28
- npcpy-1.0.26.data/data/npcpy/npc_team/image_generation.jinx +0 -29
- npcpy-1.0.26.data/data/npcpy/npc_team/internet_search.jinx +0 -30
- npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.npc +0 -3
- npcpy-1.0.26.data/data/npcpy/npc_team/kadiefa.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/lineru.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/local_search.jinx +0 -152
- npcpy-1.0.26.data/data/npcpy/npc_team/maurawa.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh.ctx +0 -9
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_executor.jinx +0 -31
- npcpy-1.0.26.data/data/npcpy/npc_team/npcsh_sibiji.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/plonk.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/plonk.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.npc +0 -2
- npcpy-1.0.26.data/data/npcpy/npc_team/plonkjr.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/python_executor.jinx +0 -8
- npcpy-1.0.26.data/data/npcpy/npc_team/raone.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/screen_cap.jinx +0 -25
- npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.npc +0 -5
- npcpy-1.0.26.data/data/npcpy/npc_team/sibiji.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/slean.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/spool.png +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/sql_executor.jinx +0 -33
- npcpy-1.0.26.data/data/npcpy/npc_team/test_pipeline.py +0 -181
- npcpy-1.0.26.data/data/npcpy/npc_team/turnic.npc +0 -4
- npcpy-1.0.26.data/data/npcpy/npc_team/welxor.npc +0 -0
- npcpy-1.0.26.data/data/npcpy/npc_team/yap.png +0 -0
- npcpy-1.0.26.dist-info/METADATA +0 -827
- npcpy-1.0.26.dist-info/RECORD +0 -139
- npcpy-1.0.26.dist-info/entry_points.txt +0 -11
- /npcpy/{modes → ft}/__init__.py +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/WHEEL +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.0.26.dist-info → npcpy-1.2.32.dist-info}/top_level.txt +0 -0
npcpy/serve.py
ADDED
|
@@ -0,0 +1,3120 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
from flask import Flask, request, jsonify, Response
|
|
3
|
+
from flask_sse import sse
|
|
4
|
+
import redis
|
|
5
|
+
import threading
|
|
6
|
+
import uuid
|
|
7
|
+
import sys
|
|
8
|
+
import traceback
|
|
9
|
+
import glob
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
from flask_cors import CORS
|
|
13
|
+
import os
|
|
14
|
+
import sqlite3
|
|
15
|
+
import json
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
import yaml
|
|
18
|
+
from dotenv import load_dotenv
|
|
19
|
+
|
|
20
|
+
from PIL import Image
|
|
21
|
+
from PIL import ImageFile
|
|
22
|
+
from io import BytesIO
|
|
23
|
+
import networkx as nx
|
|
24
|
+
from collections import defaultdict
|
|
25
|
+
import numpy as np
|
|
26
|
+
import pandas as pd
|
|
27
|
+
import subprocess
|
|
28
|
+
try:
|
|
29
|
+
import ollama
|
|
30
|
+
except:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
import base64
|
|
34
|
+
import shutil
|
|
35
|
+
import uuid
|
|
36
|
+
|
|
37
|
+
from npcpy.llm_funcs import gen_image
|
|
38
|
+
|
|
39
|
+
from sqlalchemy import create_engine, text
|
|
40
|
+
from sqlalchemy.orm import sessionmaker
|
|
41
|
+
|
|
42
|
+
from npcpy.npc_sysenv import get_locally_available_models
|
|
43
|
+
from npcpy.memory.command_history import (
|
|
44
|
+
CommandHistory,
|
|
45
|
+
save_conversation_message,
|
|
46
|
+
generate_message_id,
|
|
47
|
+
)
|
|
48
|
+
from npcpy.npc_compiler import Jinx, NPC, Team
|
|
49
|
+
|
|
50
|
+
from npcpy.llm_funcs import (
|
|
51
|
+
get_llm_response, check_llm_command
|
|
52
|
+
)
|
|
53
|
+
from npcpy.npc_compiler import NPC
|
|
54
|
+
import base64
|
|
55
|
+
|
|
56
|
+
from npcpy.tools import auto_tools
|
|
57
|
+
|
|
58
|
+
import json
|
|
59
|
+
import os
|
|
60
|
+
from pathlib import Path
|
|
61
|
+
from flask_cors import CORS
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
cancellation_flags = {}
|
|
69
|
+
cancellation_lock = threading.Lock()
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_project_npc_directory(current_path=None):
|
|
73
|
+
"""
|
|
74
|
+
Get the project NPC directory based on the current path
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
current_path: The current path where project NPCs should be looked for
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Path to the project's npc_team directory
|
|
81
|
+
"""
|
|
82
|
+
if current_path:
|
|
83
|
+
return os.path.join(current_path, "npc_team")
|
|
84
|
+
else:
|
|
85
|
+
|
|
86
|
+
return os.path.abspath("./npc_team")
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def load_project_env(current_path):
|
|
90
|
+
"""
|
|
91
|
+
Load environment variables from a project's .env file
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
current_path: The current project directory path
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Dictionary of environment variables that were loaded
|
|
98
|
+
"""
|
|
99
|
+
if not current_path:
|
|
100
|
+
return {}
|
|
101
|
+
|
|
102
|
+
env_path = os.path.join(current_path, ".env")
|
|
103
|
+
loaded_vars = {}
|
|
104
|
+
|
|
105
|
+
if os.path.exists(env_path):
|
|
106
|
+
print(f"Loading project environment from {env_path}")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
success = load_dotenv(env_path, override=True)
|
|
110
|
+
|
|
111
|
+
if success:
|
|
112
|
+
|
|
113
|
+
with open(env_path, "r") as f:
|
|
114
|
+
for line in f:
|
|
115
|
+
line = line.strip()
|
|
116
|
+
if line and not line.startswith("#"):
|
|
117
|
+
if "=" in line:
|
|
118
|
+
key, value = line.split("=", 1)
|
|
119
|
+
loaded_vars[key.strip()] = value.strip().strip("\"'")
|
|
120
|
+
|
|
121
|
+
print(f"Loaded {len(loaded_vars)} variables from project .env file")
|
|
122
|
+
else:
|
|
123
|
+
print(f"Failed to load environment variables from {env_path}")
|
|
124
|
+
else:
|
|
125
|
+
print(f"No .env file found at {env_path}")
|
|
126
|
+
|
|
127
|
+
return loaded_vars
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def load_kg_data(generation=None):
|
|
133
|
+
"""Helper function to load data up to a specific generation."""
|
|
134
|
+
engine = create_engine('sqlite:///' + app.config.get('DB_PATH'))
|
|
135
|
+
|
|
136
|
+
query_suffix = f" WHERE generation <= {generation}" if generation is not None else ""
|
|
137
|
+
|
|
138
|
+
concepts_df = pd.read_sql_query(f"SELECT * FROM kg_concepts{query_suffix}", engine)
|
|
139
|
+
facts_df = pd.read_sql_query(f"SELECT * FROM kg_facts{query_suffix}", engine)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
all_links_df = pd.read_sql_query("SELECT * FROM kg_links", engine)
|
|
143
|
+
valid_nodes = set(concepts_df['name']).union(set(facts_df['statement']))
|
|
144
|
+
links_df = all_links_df[all_links_df['source'].isin(valid_nodes) & all_links_df['target'].isin(valid_nodes)]
|
|
145
|
+
|
|
146
|
+
return concepts_df, facts_df, links_df
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
app = Flask(__name__)
|
|
150
|
+
app.config["REDIS_URL"] = "redis://localhost:6379"
|
|
151
|
+
app.config['DB_PATH'] = ''
|
|
152
|
+
app.jinx_conversation_contexts ={}
|
|
153
|
+
|
|
154
|
+
redis_client = redis.Redis(host="localhost", port=6379, decode_responses=True)
|
|
155
|
+
|
|
156
|
+
available_models = {}
|
|
157
|
+
CORS(
|
|
158
|
+
app,
|
|
159
|
+
origins=["http://localhost:5173"],
|
|
160
|
+
allow_headers=["Content-Type", "Authorization"],
|
|
161
|
+
methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
|
162
|
+
supports_credentials=True,
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def get_db_connection():
|
|
166
|
+
engine = create_engine('sqlite:///' + app.config.get('DB_PATH'))
|
|
167
|
+
return engine
|
|
168
|
+
|
|
169
|
+
def get_db_session():
|
|
170
|
+
engine = get_db_connection()
|
|
171
|
+
Session = sessionmaker(bind=engine)
|
|
172
|
+
return Session()
|
|
173
|
+
|
|
174
|
+
extension_map = {
|
|
175
|
+
"PNG": "images",
|
|
176
|
+
"JPG": "images",
|
|
177
|
+
"JPEG": "images",
|
|
178
|
+
"GIF": "images",
|
|
179
|
+
"SVG": "images",
|
|
180
|
+
"MP4": "videos",
|
|
181
|
+
"AVI": "videos",
|
|
182
|
+
"MOV": "videos",
|
|
183
|
+
"WMV": "videos",
|
|
184
|
+
"MPG": "videos",
|
|
185
|
+
"MPEG": "videos",
|
|
186
|
+
"DOC": "documents",
|
|
187
|
+
"DOCX": "documents",
|
|
188
|
+
"PDF": "documents",
|
|
189
|
+
"PPT": "documents",
|
|
190
|
+
"PPTX": "documents",
|
|
191
|
+
"XLS": "documents",
|
|
192
|
+
"XLSX": "documents",
|
|
193
|
+
"TXT": "documents",
|
|
194
|
+
"CSV": "documents",
|
|
195
|
+
"ZIP": "archives",
|
|
196
|
+
"RAR": "archives",
|
|
197
|
+
"7Z": "archives",
|
|
198
|
+
"TAR": "archives",
|
|
199
|
+
"GZ": "archives",
|
|
200
|
+
"BZ2": "archives",
|
|
201
|
+
"ISO": "archives",
|
|
202
|
+
}
|
|
203
|
+
def load_npc_by_name_and_source(name, source, db_conn=None, current_path=None):
|
|
204
|
+
"""
|
|
205
|
+
Loads an NPC from either project or global directory based on source
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
name: The name of the NPC to load
|
|
209
|
+
source: Either 'project' or 'global' indicating where to look for the NPC
|
|
210
|
+
db_conn: Optional database connection
|
|
211
|
+
current_path: The current path where project NPCs should be looked for
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
NPC object or None if not found
|
|
215
|
+
"""
|
|
216
|
+
if not db_conn:
|
|
217
|
+
db_conn = get_db_connection()
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
if source == 'project':
|
|
221
|
+
npc_directory = get_project_npc_directory(current_path)
|
|
222
|
+
print(f"Looking for project NPC in: {npc_directory}")
|
|
223
|
+
else:
|
|
224
|
+
npc_directory = app.config['user_npc_directory']
|
|
225
|
+
print(f"Looking for global NPC in: {npc_directory}")
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
npc_path = os.path.join(npc_directory, f"{name}.npc")
|
|
229
|
+
|
|
230
|
+
if os.path.exists(npc_path):
|
|
231
|
+
try:
|
|
232
|
+
npc = NPC(file=npc_path, db_conn=db_conn)
|
|
233
|
+
return npc
|
|
234
|
+
except Exception as e:
|
|
235
|
+
print(f"Error loading NPC {name} from {source}: {str(e)}")
|
|
236
|
+
return None
|
|
237
|
+
else:
|
|
238
|
+
print(f"NPC file not found: {npc_path}")
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def get_conversation_history(conversation_id):
|
|
243
|
+
"""Fetch all messages for a conversation in chronological order."""
|
|
244
|
+
if not conversation_id:
|
|
245
|
+
return []
|
|
246
|
+
|
|
247
|
+
engine = get_db_connection()
|
|
248
|
+
try:
|
|
249
|
+
with engine.connect() as conn:
|
|
250
|
+
query = text("""
|
|
251
|
+
SELECT role, content, timestamp
|
|
252
|
+
FROM conversation_history
|
|
253
|
+
WHERE conversation_id = :conversation_id
|
|
254
|
+
ORDER BY timestamp ASC
|
|
255
|
+
""")
|
|
256
|
+
result = conn.execute(query, {"conversation_id": conversation_id})
|
|
257
|
+
messages = result.fetchall()
|
|
258
|
+
|
|
259
|
+
return [
|
|
260
|
+
{
|
|
261
|
+
"role": msg[0],
|
|
262
|
+
"content": msg[1],
|
|
263
|
+
"timestamp": msg[2],
|
|
264
|
+
}
|
|
265
|
+
for msg in messages
|
|
266
|
+
]
|
|
267
|
+
except Exception as e:
|
|
268
|
+
print(f"Error fetching conversation history: {e}")
|
|
269
|
+
return []
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def fetch_messages_for_conversation(conversation_id):
|
|
273
|
+
"""Fetch all messages for a conversation in chronological order."""
|
|
274
|
+
engine = get_db_connection()
|
|
275
|
+
try:
|
|
276
|
+
with engine.connect() as conn:
|
|
277
|
+
query = text("""
|
|
278
|
+
SELECT role, content, timestamp
|
|
279
|
+
FROM conversation_history
|
|
280
|
+
WHERE conversation_id = :conversation_id
|
|
281
|
+
ORDER BY timestamp ASC
|
|
282
|
+
""")
|
|
283
|
+
result = conn.execute(query, {"conversation_id": conversation_id})
|
|
284
|
+
messages = result.fetchall()
|
|
285
|
+
|
|
286
|
+
return [
|
|
287
|
+
{
|
|
288
|
+
"role": message[0],
|
|
289
|
+
"content": message[1],
|
|
290
|
+
"timestamp": message[2],
|
|
291
|
+
}
|
|
292
|
+
for message in messages
|
|
293
|
+
]
|
|
294
|
+
except Exception as e:
|
|
295
|
+
print(f"Error fetching messages for conversation: {e}")
|
|
296
|
+
return []
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
@app.route('/api/kg/generations')
|
|
302
|
+
def list_generations():
|
|
303
|
+
try:
|
|
304
|
+
engine = create_engine('sqlite:///' + app.config.get('DB_PATH'))
|
|
305
|
+
|
|
306
|
+
query = "SELECT DISTINCT generation FROM kg_concepts UNION SELECT DISTINCT generation FROM kg_facts"
|
|
307
|
+
generations_df = pd.read_sql_query(query, engine)
|
|
308
|
+
generations = generations_df.iloc[:, 0].tolist()
|
|
309
|
+
return jsonify({"generations": sorted([g for g in generations if g is not None])})
|
|
310
|
+
except Exception as e:
|
|
311
|
+
|
|
312
|
+
print(f"Error listing generations (likely new DB): {e}")
|
|
313
|
+
return jsonify({"generations": []})
|
|
314
|
+
|
|
315
|
+
@app.route('/api/kg/graph')
|
|
316
|
+
def get_graph_data():
|
|
317
|
+
generation_str = request.args.get('generation')
|
|
318
|
+
generation = int(generation_str) if generation_str and generation_str != 'null' else None
|
|
319
|
+
|
|
320
|
+
concepts_df, facts_df, links_df = load_kg_data(generation)
|
|
321
|
+
|
|
322
|
+
nodes = []
|
|
323
|
+
nodes.extend([{'id': name, 'type': 'concept'} for name in concepts_df['name']])
|
|
324
|
+
nodes.extend([{'id': statement, 'type': 'fact'} for statement in facts_df['statement']])
|
|
325
|
+
|
|
326
|
+
links = [{'source': row['source'], 'target': row['target']} for _, row in links_df.iterrows()]
|
|
327
|
+
|
|
328
|
+
return jsonify(graph={'nodes': nodes, 'links': links})
|
|
329
|
+
|
|
330
|
+
@app.route('/api/kg/network-stats')
|
|
331
|
+
def get_network_stats():
|
|
332
|
+
generation = request.args.get('generation', type=int)
|
|
333
|
+
_, _, links_df = load_kg_data(generation)
|
|
334
|
+
G = nx.DiGraph()
|
|
335
|
+
for _, link in links_df.iterrows():
|
|
336
|
+
G.add_edge(link['source'], link['target'])
|
|
337
|
+
n_nodes = G.number_of_nodes()
|
|
338
|
+
if n_nodes == 0:
|
|
339
|
+
return jsonify(stats={'nodes': 0, 'edges': 0, 'density': 0, 'avg_degree': 0, 'node_degrees': {}})
|
|
340
|
+
degrees = dict(G.degree())
|
|
341
|
+
stats = {
|
|
342
|
+
'nodes': n_nodes, 'edges': G.number_of_edges(), 'density': nx.density(G),
|
|
343
|
+
'avg_degree': np.mean(list(degrees.values())) if degrees else 0, 'node_degrees': degrees
|
|
344
|
+
}
|
|
345
|
+
return jsonify(stats=stats)
|
|
346
|
+
|
|
347
|
+
@app.route('/api/kg/cooccurrence')
|
|
348
|
+
def get_cooccurrence_network():
|
|
349
|
+
generation = request.args.get('generation', type=int)
|
|
350
|
+
min_cooccurrence = request.args.get('min_cooccurrence', 2, type=int)
|
|
351
|
+
_, _, links_df = load_kg_data(generation)
|
|
352
|
+
fact_to_concepts = defaultdict(set)
|
|
353
|
+
for _, link in links_df.iterrows():
|
|
354
|
+
if link['type'] == 'fact_to_concept':
|
|
355
|
+
fact_to_concepts[link['source']].add(link['target'])
|
|
356
|
+
cooccurrence = defaultdict(int)
|
|
357
|
+
for concepts in fact_to_concepts.values():
|
|
358
|
+
concepts_list = list(concepts)
|
|
359
|
+
for i, c1 in enumerate(concepts_list):
|
|
360
|
+
for c2 in concepts_list[i+1:]:
|
|
361
|
+
pair = tuple(sorted((c1, c2)))
|
|
362
|
+
cooccurrence[pair] += 1
|
|
363
|
+
G_cooccur = nx.Graph()
|
|
364
|
+
for (c1, c2), weight in cooccurrence.items():
|
|
365
|
+
if weight >= min_cooccurrence:
|
|
366
|
+
G_cooccur.add_edge(c1, c2, weight=weight)
|
|
367
|
+
if G_cooccur.number_of_nodes() == 0:
|
|
368
|
+
return jsonify(network={'nodes': [], 'links': []})
|
|
369
|
+
components = list(nx.connected_components(G_cooccur))
|
|
370
|
+
node_to_community = {node: i for i, component in enumerate(components) for node in component}
|
|
371
|
+
nodes = [{'id': node, 'type': 'concept', 'community': node_to_community.get(node, 0)} for node in G_cooccur.nodes()]
|
|
372
|
+
links = [{'source': u, 'target': v, 'weight': d['weight']} for u, v, d in G_cooccur.edges(data=True)]
|
|
373
|
+
return jsonify(network={'nodes': nodes, 'links': links})
|
|
374
|
+
|
|
375
|
+
@app.route('/api/kg/centrality')
|
|
376
|
+
def get_centrality_data():
|
|
377
|
+
generation = request.args.get('generation', type=int)
|
|
378
|
+
concepts_df, _, links_df = load_kg_data(generation)
|
|
379
|
+
G = nx.Graph()
|
|
380
|
+
fact_concept_links = links_df[links_df['type'] == 'fact_to_concept']
|
|
381
|
+
for _, link in fact_concept_links.iterrows():
|
|
382
|
+
if link['target'] in concepts_df['name'].values:
|
|
383
|
+
G.add_edge(link['source'], link['target'])
|
|
384
|
+
concept_degree = {node: cent for node, cent in nx.degree_centrality(G).items() if node in concepts_df['name'].values}
|
|
385
|
+
return jsonify(centrality={'degree': concept_degree})
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
@app.route("/api/attachments/<message_id>", methods=["GET"])
|
|
390
|
+
def get_message_attachments(message_id):
|
|
391
|
+
"""Get all attachments for a message"""
|
|
392
|
+
try:
|
|
393
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
394
|
+
attachments = command_history.get_message_attachments(message_id)
|
|
395
|
+
return jsonify({"attachments": attachments, "error": None})
|
|
396
|
+
except Exception as e:
|
|
397
|
+
return jsonify({"error": str(e)}), 500
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
@app.route("/api/attachment/<attachment_id>", methods=["GET"])
|
|
401
|
+
def get_attachment(attachment_id):
|
|
402
|
+
"""Get specific attachment data"""
|
|
403
|
+
try:
|
|
404
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
405
|
+
data, name, type = command_history.get_attachment_data(attachment_id)
|
|
406
|
+
|
|
407
|
+
if data:
|
|
408
|
+
|
|
409
|
+
base64_data = base64.b64encode(data).decode("utf-8")
|
|
410
|
+
return jsonify(
|
|
411
|
+
{"data": base64_data, "name": name, "type": type, "error": None}
|
|
412
|
+
)
|
|
413
|
+
return jsonify({"error": "Attachment not found"}), 404
|
|
414
|
+
except Exception as e:
|
|
415
|
+
return jsonify({"error": str(e)}), 500
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
@app.route("/api/capture_screenshot", methods=["GET"])
|
|
419
|
+
def capture():
|
|
420
|
+
|
|
421
|
+
screenshot = capture_screenshot(None, full=True)
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
if not screenshot:
|
|
425
|
+
print("Screenshot capture failed")
|
|
426
|
+
return None
|
|
427
|
+
|
|
428
|
+
return jsonify({"screenshot": screenshot})
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
@app.route("/api/settings/global", methods=["GET", "OPTIONS"])
|
|
432
|
+
def get_global_settings():
|
|
433
|
+
if request.method == "OPTIONS":
|
|
434
|
+
return "", 200
|
|
435
|
+
|
|
436
|
+
try:
|
|
437
|
+
npcshrc_path = os.path.expanduser("~/.npcshrc")
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
global_settings = {
|
|
441
|
+
"model": "llama3.2",
|
|
442
|
+
"provider": "ollama",
|
|
443
|
+
"embedding_model": "nomic-embed-text",
|
|
444
|
+
"embedding_provider": "ollama",
|
|
445
|
+
"search_provider": "perplexity",
|
|
446
|
+
"NPC_STUDIO_LICENSE_KEY": "",
|
|
447
|
+
"default_folder": os.path.expanduser("~/.npcsh/"),
|
|
448
|
+
}
|
|
449
|
+
global_vars = {}
|
|
450
|
+
|
|
451
|
+
if os.path.exists(npcshrc_path):
|
|
452
|
+
with open(npcshrc_path, "r") as f:
|
|
453
|
+
for line in f:
|
|
454
|
+
|
|
455
|
+
line = line.split("#")[0].strip()
|
|
456
|
+
if not line:
|
|
457
|
+
continue
|
|
458
|
+
|
|
459
|
+
if "=" not in line:
|
|
460
|
+
continue
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
key, value = line.split("=", 1)
|
|
464
|
+
key = key.strip()
|
|
465
|
+
if key.startswith("export "):
|
|
466
|
+
key = key[7:]
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
value = value.strip()
|
|
470
|
+
if value.startswith('"') and value.endswith('"'):
|
|
471
|
+
value = value[1:-1]
|
|
472
|
+
elif value.startswith("'") and value.endswith("'"):
|
|
473
|
+
value = value[1:-1]
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
key_mapping = {
|
|
477
|
+
"NPCSH_MODEL": "model",
|
|
478
|
+
"NPCSH_PROVIDER": "provider",
|
|
479
|
+
"NPCSH_EMBEDDING_MODEL": "embedding_model",
|
|
480
|
+
"NPCSH_EMBEDDING_PROVIDER": "embedding_provider",
|
|
481
|
+
"NPCSH_SEARCH_PROVIDER": "search_provider",
|
|
482
|
+
"NPC_STUDIO_LICENSE_KEY": "NPC_STUDIO_LICENSE_KEY",
|
|
483
|
+
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
484
|
+
"NPC_STUDIO_DEFAULT_FOLDER": "default_folder",
|
|
485
|
+
}
|
|
486
|
+
|
|
487
|
+
if key in key_mapping:
|
|
488
|
+
global_settings[key_mapping[key]] = value
|
|
489
|
+
else:
|
|
490
|
+
global_vars[key] = value
|
|
491
|
+
|
|
492
|
+
print("Global settings loaded from .npcshrc")
|
|
493
|
+
print(global_settings)
|
|
494
|
+
return jsonify(
|
|
495
|
+
{
|
|
496
|
+
"global_settings": global_settings,
|
|
497
|
+
"global_vars": global_vars,
|
|
498
|
+
"error": None,
|
|
499
|
+
}
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
except Exception as e:
|
|
503
|
+
print(f"Error in get_global_settings: {str(e)}")
|
|
504
|
+
return jsonify({"error": str(e)}), 500
|
|
505
|
+
def _get_jinx_files_recursively(directory):
|
|
506
|
+
"""Helper to recursively find all .jinx file paths."""
|
|
507
|
+
jinx_paths = []
|
|
508
|
+
if os.path.exists(directory):
|
|
509
|
+
for root, _, files in os.walk(directory):
|
|
510
|
+
for filename in files:
|
|
511
|
+
if filename.endswith(".jinx"):
|
|
512
|
+
jinx_paths.append(os.path.join(root, filename))
|
|
513
|
+
return jinx_paths
|
|
514
|
+
|
|
515
|
+
@app.route("/api/jinxs/available", methods=["GET"])
|
|
516
|
+
def get_available_jinxs():
|
|
517
|
+
try:
|
|
518
|
+
current_path = request.args.get('currentPath')
|
|
519
|
+
jinx_names = set()
|
|
520
|
+
|
|
521
|
+
if current_path:
|
|
522
|
+
team_jinxs_dir = os.path.join(current_path, 'npc_team', 'jinxs')
|
|
523
|
+
jinx_paths = _get_jinx_files_recursively(team_jinxs_dir)
|
|
524
|
+
for path in jinx_paths:
|
|
525
|
+
jinx_names.add(os.path.basename(path)[:-5])
|
|
526
|
+
|
|
527
|
+
global_jinxs_dir = os.path.expanduser('~/.npcsh/npc_team/jinxs')
|
|
528
|
+
jinx_paths = _get_jinx_files_recursively(global_jinxs_dir)
|
|
529
|
+
for path in jinx_paths:
|
|
530
|
+
jinx_names.add(os.path.basename(path)[:-5])
|
|
531
|
+
|
|
532
|
+
return jsonify({'jinxs': sorted(list(jinx_names)), 'error': None})
|
|
533
|
+
except Exception as e:
|
|
534
|
+
print(f"Error getting available jinxs: {str(e)}")
|
|
535
|
+
traceback.print_exc()
|
|
536
|
+
return jsonify({'jinxs': [], 'error': str(e)}), 500
|
|
537
|
+
|
|
538
|
+
@app.route("/api/jinxs/global", methods=["GET"])
|
|
539
|
+
def get_global_jinxs():
|
|
540
|
+
jinxs_dir = os.path.join(os.path.expanduser("~"), ".npcsh", "npc_team", "jinxs")
|
|
541
|
+
jinx_paths = _get_jinx_files_recursively(jinxs_dir)
|
|
542
|
+
jinxs = []
|
|
543
|
+
for path in jinx_paths:
|
|
544
|
+
try:
|
|
545
|
+
with open(path, "r") as f:
|
|
546
|
+
jinx_data = yaml.safe_load(f)
|
|
547
|
+
jinxs.append(jinx_data)
|
|
548
|
+
except Exception as e:
|
|
549
|
+
print(f"Error loading global jinx {path}: {e}")
|
|
550
|
+
return jsonify({"jinxs": jinxs})
|
|
551
|
+
|
|
552
|
+
@app.route("/api/jinxs/project", methods=["GET"])
|
|
553
|
+
def get_project_jinxs():
|
|
554
|
+
current_path = request.args.get("currentPath")
|
|
555
|
+
if not current_path:
|
|
556
|
+
return jsonify({"jinxs": []})
|
|
557
|
+
|
|
558
|
+
if not current_path.endswith("npc_team"):
|
|
559
|
+
current_path = os.path.join(current_path, "npc_team")
|
|
560
|
+
|
|
561
|
+
jinxs_dir = os.path.join(current_path, "jinxs")
|
|
562
|
+
jinx_paths = _get_jinx_files_recursively(jinxs_dir)
|
|
563
|
+
jinxs = []
|
|
564
|
+
for path in jinx_paths:
|
|
565
|
+
try:
|
|
566
|
+
with open(path, "r") as f:
|
|
567
|
+
jinx_data = yaml.safe_load(f)
|
|
568
|
+
jinxs.append(jinx_data)
|
|
569
|
+
except Exception as e:
|
|
570
|
+
print(f"Error loading project jinx {path}: {e}")
|
|
571
|
+
return jsonify({"jinxs": jinxs})
|
|
572
|
+
|
|
573
|
+
@app.route("/api/jinx/execute", methods=["POST"])
|
|
574
|
+
def execute_jinx():
|
|
575
|
+
"""
|
|
576
|
+
Execute a specific jinx with provided arguments.
|
|
577
|
+
Streams the output back to the client.
|
|
578
|
+
"""
|
|
579
|
+
data = request.json
|
|
580
|
+
|
|
581
|
+
stream_id = data.get("streamId")
|
|
582
|
+
if not stream_id:
|
|
583
|
+
stream_id = str(uuid.uuid4())
|
|
584
|
+
|
|
585
|
+
with cancellation_lock:
|
|
586
|
+
cancellation_flags[stream_id] = False
|
|
587
|
+
|
|
588
|
+
print(f"--- Jinx Execution Request for streamId: {stream_id} ---")
|
|
589
|
+
print(f"Request Data: {json.dumps(data, indent=2)}")
|
|
590
|
+
|
|
591
|
+
jinx_name = data.get("jinxName")
|
|
592
|
+
jinx_args = data.get("jinxArgs", [])
|
|
593
|
+
print(f"Jinx Name: {jinx_name}, Jinx Args: {jinx_args}")
|
|
594
|
+
conversation_id = data.get("conversationId")
|
|
595
|
+
model = data.get("model")
|
|
596
|
+
provider = data.get("provider")
|
|
597
|
+
|
|
598
|
+
# --- IMPORTANT: Ensure conversation_id is present for context persistence ---
|
|
599
|
+
if not conversation_id:
|
|
600
|
+
print("ERROR: conversationId is required for Jinx execution with persistent variables")
|
|
601
|
+
return jsonify({"error": "conversationId is required for Jinx execution with persistent variables"}), 400
|
|
602
|
+
|
|
603
|
+
npc_name = data.get("npc")
|
|
604
|
+
npc_source = data.get("npcSource", "global")
|
|
605
|
+
current_path = data.get("currentPath")
|
|
606
|
+
|
|
607
|
+
if not jinx_name:
|
|
608
|
+
print("ERROR: jinxName is required")
|
|
609
|
+
return jsonify({"error": "jinxName is required"}), 400
|
|
610
|
+
|
|
611
|
+
# Load project environment if applicable
|
|
612
|
+
if current_path:
|
|
613
|
+
load_project_env(current_path)
|
|
614
|
+
|
|
615
|
+
# Load the NPC
|
|
616
|
+
npc_object = None
|
|
617
|
+
if npc_name:
|
|
618
|
+
db_conn = get_db_connection()
|
|
619
|
+
npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
|
|
620
|
+
if not npc_object and npc_source == 'project':
|
|
621
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
622
|
+
|
|
623
|
+
# Try to find the jinx
|
|
624
|
+
jinx = None
|
|
625
|
+
|
|
626
|
+
# Check NPC's jinxs
|
|
627
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict') and jinx_name in npc_object.jinxs_dict:
|
|
628
|
+
jinx = npc_object.jinxs_dict[jinx_name]
|
|
629
|
+
|
|
630
|
+
# Check team jinxs
|
|
631
|
+
if not jinx and current_path:
|
|
632
|
+
team_jinx_path = os.path.join(current_path, 'npc_team', 'jinxs', f'{jinx_name}.jinx')
|
|
633
|
+
if os.path.exists(team_jinx_path):
|
|
634
|
+
jinx = Jinx(jinx_path=team_jinx_path)
|
|
635
|
+
|
|
636
|
+
# Check global jinxs
|
|
637
|
+
if not jinx:
|
|
638
|
+
global_jinx_path = os.path.expanduser(f'~/.npcsh/npc_team/jinxs/{jinx_name}.jinx')
|
|
639
|
+
if os.path.exists(global_jinx_path):
|
|
640
|
+
jinx = Jinx(jinx_path=global_jinx_path)
|
|
641
|
+
|
|
642
|
+
if not jinx:
|
|
643
|
+
print(f"ERROR: Jinx '{jinx_name}' not found")
|
|
644
|
+
return jsonify({"error": f"Jinx '{jinx_name}' not found"}), 404
|
|
645
|
+
|
|
646
|
+
# Extract inputs from args
|
|
647
|
+
from npcpy.npc_compiler import extract_jinx_inputs
|
|
648
|
+
|
|
649
|
+
# Re-assemble arguments that were incorrectly split by spaces.
|
|
650
|
+
fixed_args = []
|
|
651
|
+
i = 0
|
|
652
|
+
while i < len(jinx_args):
|
|
653
|
+
arg = jinx_args[i]
|
|
654
|
+
if arg.startswith('-'):
|
|
655
|
+
fixed_args.append(arg)
|
|
656
|
+
value_parts = []
|
|
657
|
+
i += 1
|
|
658
|
+
# Collect all subsequent parts until the next flag or the end of the list.
|
|
659
|
+
while i < len(jinx_args) and not jinx_args[i].startswith('-'):
|
|
660
|
+
value_parts.append(jinx_args[i])
|
|
661
|
+
i += 1
|
|
662
|
+
|
|
663
|
+
if value_parts:
|
|
664
|
+
# Join the parts back into a single string.
|
|
665
|
+
full_value = " ".join(value_parts)
|
|
666
|
+
# Clean up the extraneous quotes that the initial bad split left behind.
|
|
667
|
+
if full_value.startswith("'") and full_value.endswith("'"):
|
|
668
|
+
full_value = full_value[1:-1]
|
|
669
|
+
elif full_value.startswith('"') and full_value.endswith('"'):
|
|
670
|
+
full_value = full_value[1:-1]
|
|
671
|
+
fixed_args.append(full_value)
|
|
672
|
+
# The 'i' counter is already advanced, so the loop continues from the next flag.
|
|
673
|
+
else:
|
|
674
|
+
# This handles positional arguments, just in case.
|
|
675
|
+
fixed_args.append(arg)
|
|
676
|
+
i += 1
|
|
677
|
+
|
|
678
|
+
# Now, use the corrected arguments to extract inputs.
|
|
679
|
+
input_values = extract_jinx_inputs(fixed_args, jinx)
|
|
680
|
+
|
|
681
|
+
print(f'Executing jinx with input_values: {input_values}')
|
|
682
|
+
# Get conversation history
|
|
683
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
684
|
+
messages = fetch_messages_for_conversation(conversation_id)
|
|
685
|
+
|
|
686
|
+
# Prepare jinxs_dict for execution
|
|
687
|
+
all_jinxs = {}
|
|
688
|
+
if npc_object and hasattr(npc_object, 'jinxs_dict'):
|
|
689
|
+
all_jinxs.update(npc_object.jinxs_dict)
|
|
690
|
+
|
|
691
|
+
# --- IMPORTANT: Retrieve or initialize the persistent Jinx context for this conversation ---
|
|
692
|
+
if conversation_id not in app.jinx_conversation_contexts:
|
|
693
|
+
app.jinx_conversation_contexts[conversation_id] = {}
|
|
694
|
+
jinx_local_context = app.jinx_conversation_contexts[conversation_id]
|
|
695
|
+
|
|
696
|
+
print(f"--- CONTEXT STATE (conversationId: {conversation_id}) ---")
|
|
697
|
+
print(f"jinx_local_context BEFORE Jinx execution: {jinx_local_context}")
|
|
698
|
+
|
|
699
|
+
def event_stream(current_stream_id):
|
|
700
|
+
try:
|
|
701
|
+
# --- IMPORTANT: Pass the persistent context as 'extra_globals' ---
|
|
702
|
+
result = jinx.execute(
|
|
703
|
+
input_values=input_values,
|
|
704
|
+
jinxs_dict=all_jinxs,
|
|
705
|
+
jinja_env=npc_object.jinja_env if npc_object else None,
|
|
706
|
+
npc=npc_object,
|
|
707
|
+
messages=messages,
|
|
708
|
+
extra_globals=jinx_local_context # <--- THIS IS WHERE THE PERSISTENT CONTEXT IS PASSED
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
# --- CRITICAL FIX: Capture and update local_vars from the Jinx's result ---
|
|
712
|
+
# The Jinx.execute method returns its internal 'context' dictionary.
|
|
713
|
+
# We need to update our persistent 'jinx_local_context' with the new variables
|
|
714
|
+
# from the Jinx's returned context.
|
|
715
|
+
if isinstance(result, dict):
|
|
716
|
+
# We need to be careful not to overwrite core Jinx/NPC context keys
|
|
717
|
+
# that are not meant for variable persistence.
|
|
718
|
+
keys_to_exclude = ['output', 'llm_response', 'messages', 'results', 'npc', 'context', 'jinxs', 'team']
|
|
719
|
+
|
|
720
|
+
# Update jinx_local_context with all non-excluded keys from the result
|
|
721
|
+
for key, value in result.items():
|
|
722
|
+
if key not in keys_to_exclude and not key.startswith('_'): # Exclude internal/temporary keys
|
|
723
|
+
jinx_local_context[key] = value
|
|
724
|
+
|
|
725
|
+
print(f"jinx_local_context UPDATED from Jinx result: {jinx_local_context}") # NEW LOG
|
|
726
|
+
|
|
727
|
+
# Get output (this still comes from the 'output' key in the result)
|
|
728
|
+
output = result.get('output', str(result))
|
|
729
|
+
messages_updated = result.get('messages', messages)
|
|
730
|
+
|
|
731
|
+
print(f"jinx_local_context AFTER Jinx execution (final state): {jinx_local_context}")
|
|
732
|
+
print(f"Jinx execution result output: {output}")
|
|
733
|
+
|
|
734
|
+
# Check for interruption
|
|
735
|
+
with cancellation_lock:
|
|
736
|
+
if cancellation_flags.get(current_stream_id, False):
|
|
737
|
+
yield f"data: {json.dumps({'type': 'interrupted'})}\n\n"
|
|
738
|
+
return
|
|
739
|
+
|
|
740
|
+
# Stream the output in chunks for consistent UI experience
|
|
741
|
+
if isinstance(output, str):
|
|
742
|
+
chunk_size = 50 # Characters per chunk
|
|
743
|
+
for i in range(0, len(output), chunk_size):
|
|
744
|
+
chunk = output[i:i + chunk_size]
|
|
745
|
+
chunk_data = {
|
|
746
|
+
"id": None,
|
|
747
|
+
"object": None,
|
|
748
|
+
"created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
|
749
|
+
"model": model,
|
|
750
|
+
"choices": [{
|
|
751
|
+
"index": 0,
|
|
752
|
+
"delta": {
|
|
753
|
+
"content": chunk,
|
|
754
|
+
"role": "assistant"
|
|
755
|
+
},
|
|
756
|
+
"finish_reason": None
|
|
757
|
+
}]
|
|
758
|
+
}
|
|
759
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
760
|
+
else:
|
|
761
|
+
# Non-string output, send as single chunk
|
|
762
|
+
chunk_data = {
|
|
763
|
+
"id": None,
|
|
764
|
+
"object": None,
|
|
765
|
+
"created": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
|
766
|
+
"model": model,
|
|
767
|
+
"choices": [{
|
|
768
|
+
"index": 0,
|
|
769
|
+
"delta": {
|
|
770
|
+
"content": str(output),
|
|
771
|
+
"role": "assistant"
|
|
772
|
+
},
|
|
773
|
+
"finish_reason": None
|
|
774
|
+
}]
|
|
775
|
+
}
|
|
776
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
777
|
+
|
|
778
|
+
# Send completion message
|
|
779
|
+
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
780
|
+
|
|
781
|
+
# Save to conversation history
|
|
782
|
+
message_id = generate_message_id()
|
|
783
|
+
save_conversation_message(
|
|
784
|
+
command_history,
|
|
785
|
+
conversation_id,
|
|
786
|
+
"user",
|
|
787
|
+
f"/{jinx_name} {' '.join(jinx_args)}",
|
|
788
|
+
wd=current_path,
|
|
789
|
+
model=model,
|
|
790
|
+
provider=provider,
|
|
791
|
+
npc=npc_name,
|
|
792
|
+
message_id=message_id
|
|
793
|
+
)
|
|
794
|
+
|
|
795
|
+
message_id = generate_message_id()
|
|
796
|
+
save_conversation_message(
|
|
797
|
+
command_history,
|
|
798
|
+
conversation_id,
|
|
799
|
+
"assistant",
|
|
800
|
+
str(output),
|
|
801
|
+
wd=current_path,
|
|
802
|
+
model=model,
|
|
803
|
+
provider=provider,
|
|
804
|
+
npc=npc_name,
|
|
805
|
+
message_id=message_id
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
except Exception as e:
|
|
809
|
+
print(f"ERROR: Exception during jinx execution {jinx_name}: {str(e)}")
|
|
810
|
+
traceback.print_exc()
|
|
811
|
+
error_data = {
|
|
812
|
+
"type": "error",
|
|
813
|
+
"error": str(e)
|
|
814
|
+
}
|
|
815
|
+
yield f"data: {json.dumps(error_data)}\n\n"
|
|
816
|
+
|
|
817
|
+
finally:
|
|
818
|
+
with cancellation_lock:
|
|
819
|
+
if current_stream_id in cancellation_flags:
|
|
820
|
+
del cancellation_flags[current_stream_id]
|
|
821
|
+
print(f"--- Jinx Execution Finished for streamId: {stream_id} ---")
|
|
822
|
+
|
|
823
|
+
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
824
|
+
|
|
825
|
+
@app.route("/api/settings/global", methods=["POST", "OPTIONS"])
|
|
826
|
+
def save_global_settings():
|
|
827
|
+
if request.method == "OPTIONS":
|
|
828
|
+
return "", 200
|
|
829
|
+
|
|
830
|
+
try:
|
|
831
|
+
data = request.json
|
|
832
|
+
npcshrc_path = os.path.expanduser("~/.npcshrc")
|
|
833
|
+
|
|
834
|
+
key_mapping = {
|
|
835
|
+
"model": "NPCSH_CHAT_MODEL",
|
|
836
|
+
"provider": "NPCSH_CHAT_PROVIDER",
|
|
837
|
+
"embedding_model": "NPCSH_EMBEDDING_MODEL",
|
|
838
|
+
"embedding_provider": "NPCSH_EMBEDDING_PROVIDER",
|
|
839
|
+
"search_provider": "NPCSH_SEARCH_PROVIDER",
|
|
840
|
+
"NPC_STUDIO_LICENSE_KEY": "NPC_STUDIO_LICENSE_KEY",
|
|
841
|
+
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
842
|
+
"default_folder": "NPC_STUDIO_DEFAULT_FOLDER",
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
os.makedirs(os.path.dirname(npcshrc_path), exist_ok=True)
|
|
846
|
+
print(data)
|
|
847
|
+
with open(npcshrc_path, "w") as f:
|
|
848
|
+
|
|
849
|
+
for key, value in data.get("global_settings", {}).items():
|
|
850
|
+
if key in key_mapping and value:
|
|
851
|
+
|
|
852
|
+
if " " in str(value):
|
|
853
|
+
value = f'"{value}"'
|
|
854
|
+
f.write(f"export {key_mapping[key]}={value}\n")
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
for key, value in data.get("global_vars", {}).items():
|
|
858
|
+
if key and value:
|
|
859
|
+
if " " in str(value):
|
|
860
|
+
value = f'"{value}"'
|
|
861
|
+
f.write(f"export {key}={value}\n")
|
|
862
|
+
|
|
863
|
+
return jsonify({"message": "Global settings saved successfully", "error": None})
|
|
864
|
+
|
|
865
|
+
except Exception as e:
|
|
866
|
+
print(f"Error in save_global_settings: {str(e)}")
|
|
867
|
+
return jsonify({"error": str(e)}), 500
|
|
868
|
+
|
|
869
|
+
@app.route("/api/settings/project", methods=["GET", "OPTIONS"])
|
|
870
|
+
def get_project_settings():
|
|
871
|
+
if request.method == "OPTIONS":
|
|
872
|
+
return "", 200
|
|
873
|
+
|
|
874
|
+
try:
|
|
875
|
+
current_dir = request.args.get("path")
|
|
876
|
+
if not current_dir:
|
|
877
|
+
return jsonify({"error": "No path provided"}), 400
|
|
878
|
+
|
|
879
|
+
env_path = os.path.join(current_dir, ".env")
|
|
880
|
+
env_vars = {}
|
|
881
|
+
|
|
882
|
+
if os.path.exists(env_path):
|
|
883
|
+
with open(env_path, "r") as f:
|
|
884
|
+
for line in f:
|
|
885
|
+
line = line.strip()
|
|
886
|
+
if line and not line.startswith("#"):
|
|
887
|
+
if "=" in line:
|
|
888
|
+
key, value = line.split("=", 1)
|
|
889
|
+
env_vars[key.strip()] = value.strip().strip("\"'")
|
|
890
|
+
|
|
891
|
+
return jsonify({"env_vars": env_vars, "error": None})
|
|
892
|
+
|
|
893
|
+
except Exception as e:
|
|
894
|
+
print(f"Error in get_project_settings: {str(e)}")
|
|
895
|
+
return jsonify({"error": str(e)}), 500
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
@app.route("/api/settings/project", methods=["POST", "OPTIONS"])
|
|
899
|
+
def save_project_settings():
|
|
900
|
+
if request.method == "OPTIONS":
|
|
901
|
+
return "", 200
|
|
902
|
+
|
|
903
|
+
try:
|
|
904
|
+
current_dir = request.args.get("path")
|
|
905
|
+
if not current_dir:
|
|
906
|
+
return jsonify({"error": "No path provided"}), 400
|
|
907
|
+
|
|
908
|
+
data = request.json
|
|
909
|
+
env_path = os.path.join(current_dir, ".env")
|
|
910
|
+
|
|
911
|
+
with open(env_path, "w") as f:
|
|
912
|
+
for key, value in data.get("env_vars", {}).items():
|
|
913
|
+
f.write(f"{key}={value}\n")
|
|
914
|
+
|
|
915
|
+
return jsonify(
|
|
916
|
+
{"message": "Project settings saved successfully", "error": None}
|
|
917
|
+
)
|
|
918
|
+
|
|
919
|
+
except Exception as e:
|
|
920
|
+
print(f"Error in save_project_settings: {str(e)}")
|
|
921
|
+
return jsonify({"error": str(e)}), 500
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+
@app.route("/api/models", methods=["GET"])
|
|
925
|
+
def get_models():
|
|
926
|
+
"""
|
|
927
|
+
Endpoint to retrieve available models based on the current project path.
|
|
928
|
+
Checks for local configurations (.env) and Ollama.
|
|
929
|
+
"""
|
|
930
|
+
global available_models
|
|
931
|
+
current_path = request.args.get("currentPath")
|
|
932
|
+
if not current_path:
|
|
933
|
+
|
|
934
|
+
|
|
935
|
+
current_path = os.path.expanduser("~/.npcsh")
|
|
936
|
+
print("Warning: No currentPath provided for /api/models, using default.")
|
|
937
|
+
|
|
938
|
+
|
|
939
|
+
try:
|
|
940
|
+
|
|
941
|
+
available_models = get_locally_available_models(current_path)
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
formatted_models = []
|
|
946
|
+
for m, p in available_models.items():
|
|
947
|
+
|
|
948
|
+
text_only = (
|
|
949
|
+
"(text only)"
|
|
950
|
+
if p == "ollama"
|
|
951
|
+
and m in ["llama3.2", "deepseek-v3", "phi4", "gemma3:1b"]
|
|
952
|
+
else ""
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
display_model = m
|
|
956
|
+
if "claude-3-5-haiku-latest" in m:
|
|
957
|
+
display_model = "claude-3.5-haiku"
|
|
958
|
+
elif "claude-3-5-sonnet-latest" in m:
|
|
959
|
+
display_model = "claude-3.5-sonnet"
|
|
960
|
+
elif "gemini-1.5-flash" in m:
|
|
961
|
+
display_model = "gemini-1.5-flash"
|
|
962
|
+
elif "gemini-2.0-flash-lite-preview-02-05" in m:
|
|
963
|
+
display_model = "gemini-2.0-flash-lite-preview"
|
|
964
|
+
|
|
965
|
+
display_name = f"{display_model} | {p} {text_only}".strip()
|
|
966
|
+
|
|
967
|
+
formatted_models.append(
|
|
968
|
+
{
|
|
969
|
+
"value": m,
|
|
970
|
+
"provider": p,
|
|
971
|
+
"display_name": display_name,
|
|
972
|
+
}
|
|
973
|
+
)
|
|
974
|
+
print(m, p)
|
|
975
|
+
return jsonify({"models": formatted_models, "error": None})
|
|
976
|
+
|
|
977
|
+
except Exception as e:
|
|
978
|
+
print(f"Error getting available models: {str(e)}")
|
|
979
|
+
|
|
980
|
+
traceback.print_exc()
|
|
981
|
+
|
|
982
|
+
return jsonify({"models": [], "error": str(e)}), 500
|
|
983
|
+
|
|
984
|
+
@app.route('/api/<command>', methods=['POST'])
|
|
985
|
+
def api_command(command):
|
|
986
|
+
data = request.json or {}
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
handler = router.get_route(command)
|
|
990
|
+
if not handler:
|
|
991
|
+
return jsonify({"error": f"Unknown command: {command}"})
|
|
992
|
+
|
|
993
|
+
|
|
994
|
+
if router.shell_only.get(command, False):
|
|
995
|
+
return jsonify({"error": f"Command {command} is only available in shell mode"})
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
try:
|
|
999
|
+
|
|
1000
|
+
args = data.get('args', [])
|
|
1001
|
+
kwargs = data.get('kwargs', {})
|
|
1002
|
+
|
|
1003
|
+
|
|
1004
|
+
command_str = command
|
|
1005
|
+
if args:
|
|
1006
|
+
command_str += " " + " ".join(str(arg) for arg in args)
|
|
1007
|
+
|
|
1008
|
+
result = handler(command_str, **kwargs)
|
|
1009
|
+
return jsonify(result)
|
|
1010
|
+
except Exception as e:
|
|
1011
|
+
return jsonify({"error": str(e)})
|
|
1012
|
+
@app.route("/api/npc_team_global")
|
|
1013
|
+
def get_npc_team_global():
|
|
1014
|
+
try:
|
|
1015
|
+
db_conn = get_db_connection()
|
|
1016
|
+
global_npc_directory = os.path.expanduser("~/.npcsh/npc_team")
|
|
1017
|
+
|
|
1018
|
+
npc_data = []
|
|
1019
|
+
|
|
1020
|
+
|
|
1021
|
+
for file in os.listdir(global_npc_directory):
|
|
1022
|
+
if file.endswith(".npc"):
|
|
1023
|
+
npc_path = os.path.join(global_npc_directory, file)
|
|
1024
|
+
npc = NPC(file=npc_path, db_conn=db_conn)
|
|
1025
|
+
|
|
1026
|
+
|
|
1027
|
+
serialized_npc = {
|
|
1028
|
+
"name": npc.name,
|
|
1029
|
+
"primary_directive": npc.primary_directive,
|
|
1030
|
+
"model": npc.model,
|
|
1031
|
+
"provider": npc.provider,
|
|
1032
|
+
"api_url": npc.api_url,
|
|
1033
|
+
"use_global_jinxs": npc.use_global_jinxs,
|
|
1034
|
+
"jinxs": [
|
|
1035
|
+
{
|
|
1036
|
+
"jinx_name": jinx.jinx_name,
|
|
1037
|
+
"inputs": jinx.inputs,
|
|
1038
|
+
"steps": [
|
|
1039
|
+
{
|
|
1040
|
+
"name": step.get("name", f"step_{i}"),
|
|
1041
|
+
"engine": step.get("engine", "natural"),
|
|
1042
|
+
"code": step.get("code", "")
|
|
1043
|
+
}
|
|
1044
|
+
for i, step in enumerate(jinx.steps)
|
|
1045
|
+
]
|
|
1046
|
+
}
|
|
1047
|
+
for jinx in npc.jinxs
|
|
1048
|
+
],
|
|
1049
|
+
}
|
|
1050
|
+
npc_data.append(serialized_npc)
|
|
1051
|
+
|
|
1052
|
+
return jsonify({"npcs": npc_data, "error": None})
|
|
1053
|
+
|
|
1054
|
+
except Exception as e:
|
|
1055
|
+
print(f"Error loading global NPCs: {str(e)}")
|
|
1056
|
+
return jsonify({"npcs": [], "error": str(e)})
|
|
1057
|
+
|
|
1058
|
+
|
|
1059
|
+
@app.route("/api/jinxs/save", methods=["POST"])
|
|
1060
|
+
def save_jinx():
|
|
1061
|
+
try:
|
|
1062
|
+
data = request.json
|
|
1063
|
+
jinx_data = data.get("jinx")
|
|
1064
|
+
is_global = data.get("isGlobal")
|
|
1065
|
+
current_path = data.get("currentPath")
|
|
1066
|
+
jinx_name = jinx_data.get("jinx_name")
|
|
1067
|
+
|
|
1068
|
+
if not jinx_name:
|
|
1069
|
+
return jsonify({"error": "Jinx name is required"}), 400
|
|
1070
|
+
|
|
1071
|
+
if is_global:
|
|
1072
|
+
jinxs_dir = os.path.join(
|
|
1073
|
+
os.path.expanduser("~"), ".npcsh", "npc_team", "jinxs"
|
|
1074
|
+
)
|
|
1075
|
+
else:
|
|
1076
|
+
if not current_path.endswith("npc_team"):
|
|
1077
|
+
current_path = os.path.join(current_path, "npc_team")
|
|
1078
|
+
jinxs_dir = os.path.join(current_path, "jinxs")
|
|
1079
|
+
|
|
1080
|
+
os.makedirs(jinxs_dir, exist_ok=True)
|
|
1081
|
+
|
|
1082
|
+
|
|
1083
|
+
jinx_yaml = {
|
|
1084
|
+
"description": jinx_data.get("description", ""),
|
|
1085
|
+
"inputs": jinx_data.get("inputs", []),
|
|
1086
|
+
"steps": jinx_data.get("steps", []),
|
|
1087
|
+
}
|
|
1088
|
+
|
|
1089
|
+
file_path = os.path.join(jinxs_dir, f"{jinx_name}.jinx")
|
|
1090
|
+
with open(file_path, "w") as f:
|
|
1091
|
+
yaml.safe_dump(jinx_yaml, f, sort_keys=False)
|
|
1092
|
+
|
|
1093
|
+
return jsonify({"status": "success"})
|
|
1094
|
+
except Exception as e:
|
|
1095
|
+
return jsonify({"error": str(e)}), 500
|
|
1096
|
+
|
|
1097
|
+
|
|
1098
|
+
@app.route("/api/save_npc", methods=["POST"])
|
|
1099
|
+
def save_npc():
|
|
1100
|
+
try:
|
|
1101
|
+
data = request.json
|
|
1102
|
+
npc_data = data.get("npc")
|
|
1103
|
+
is_global = data.get("isGlobal")
|
|
1104
|
+
current_path = data.get("currentPath")
|
|
1105
|
+
|
|
1106
|
+
if not npc_data or "name" not in npc_data:
|
|
1107
|
+
return jsonify({"error": "Invalid NPC data"}), 400
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
if is_global:
|
|
1111
|
+
npc_directory = os.path.expanduser("~/.npcsh/npc_team")
|
|
1112
|
+
else:
|
|
1113
|
+
npc_directory = os.path.join(current_path, "npc_team")
|
|
1114
|
+
|
|
1115
|
+
|
|
1116
|
+
os.makedirs(npc_directory, exist_ok=True)
|
|
1117
|
+
|
|
1118
|
+
|
|
1119
|
+
yaml_content = f"""name: {npc_data['name']}
|
|
1120
|
+
primary_directive: "{npc_data['primary_directive']}"
|
|
1121
|
+
model: {npc_data['model']}
|
|
1122
|
+
provider: {npc_data['provider']}
|
|
1123
|
+
api_url: {npc_data.get('api_url', '')}
|
|
1124
|
+
use_global_jinxs: {str(npc_data.get('use_global_jinxs', True)).lower()}
|
|
1125
|
+
"""
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
file_path = os.path.join(npc_directory, f"{npc_data['name']}.npc")
|
|
1129
|
+
with open(file_path, "w") as f:
|
|
1130
|
+
f.write(yaml_content)
|
|
1131
|
+
|
|
1132
|
+
return jsonify({"message": "NPC saved successfully", "error": None})
|
|
1133
|
+
|
|
1134
|
+
except Exception as e:
|
|
1135
|
+
print(f"Error saving NPC: {str(e)}")
|
|
1136
|
+
return jsonify({"error": str(e)}), 500
|
|
1137
|
+
|
|
1138
|
+
|
|
1139
|
+
@app.route("/api/npc_team_project", methods=["GET"])
|
|
1140
|
+
def get_npc_team_project():
|
|
1141
|
+
try:
|
|
1142
|
+
db_conn = get_db_connection()
|
|
1143
|
+
|
|
1144
|
+
project_npc_directory = request.args.get("currentPath")
|
|
1145
|
+
if not project_npc_directory.endswith("npc_team"):
|
|
1146
|
+
project_npc_directory = os.path.join(project_npc_directory, "npc_team")
|
|
1147
|
+
|
|
1148
|
+
npc_data = []
|
|
1149
|
+
|
|
1150
|
+
for file in os.listdir(project_npc_directory):
|
|
1151
|
+
print(file)
|
|
1152
|
+
if file.endswith(".npc"):
|
|
1153
|
+
npc_path = os.path.join(project_npc_directory, file)
|
|
1154
|
+
npc = NPC(file=npc_path, db_conn=db_conn)
|
|
1155
|
+
|
|
1156
|
+
|
|
1157
|
+
serialized_npc = {
|
|
1158
|
+
"name": npc.name,
|
|
1159
|
+
"primary_directive": npc.primary_directive,
|
|
1160
|
+
"model": npc.model,
|
|
1161
|
+
"provider": npc.provider,
|
|
1162
|
+
"api_url": npc.api_url,
|
|
1163
|
+
"use_global_jinxs": npc.use_global_jinxs,
|
|
1164
|
+
"jinxs": [
|
|
1165
|
+
{
|
|
1166
|
+
"jinx_name": jinx.jinx_name,
|
|
1167
|
+
"inputs": jinx.inputs,
|
|
1168
|
+
"steps": [
|
|
1169
|
+
{
|
|
1170
|
+
"name": step.get("name", f"step_{i}"),
|
|
1171
|
+
"engine": step.get("engine", "natural"),
|
|
1172
|
+
"code": step.get("code", "")
|
|
1173
|
+
}
|
|
1174
|
+
for i, step in enumerate(jinx.steps)
|
|
1175
|
+
]
|
|
1176
|
+
}
|
|
1177
|
+
for jinx in npc.jinxs
|
|
1178
|
+
],
|
|
1179
|
+
}
|
|
1180
|
+
npc_data.append(serialized_npc)
|
|
1181
|
+
|
|
1182
|
+
print(npc_data)
|
|
1183
|
+
return jsonify({"npcs": npc_data, "error": None})
|
|
1184
|
+
|
|
1185
|
+
except Exception as e:
|
|
1186
|
+
print(f"Error fetching NPC team: {str(e)}")
|
|
1187
|
+
return jsonify({"npcs": [], "error": str(e)})
|
|
1188
|
+
def get_last_used_model_and_npc_in_directory(directory_path):
|
|
1189
|
+
"""
|
|
1190
|
+
Fetches the model and NPC from the most recent message in any conversation
|
|
1191
|
+
within the given directory.
|
|
1192
|
+
"""
|
|
1193
|
+
engine = get_db_connection()
|
|
1194
|
+
try:
|
|
1195
|
+
with engine.connect() as conn:
|
|
1196
|
+
query = text("""
|
|
1197
|
+
SELECT model, npc
|
|
1198
|
+
FROM conversation_history
|
|
1199
|
+
WHERE directory_path = :directory_path
|
|
1200
|
+
AND model IS NOT NULL AND npc IS NOT NULL
|
|
1201
|
+
AND model != '' AND npc != ''
|
|
1202
|
+
ORDER BY timestamp DESC, id DESC
|
|
1203
|
+
LIMIT 1
|
|
1204
|
+
""")
|
|
1205
|
+
result = conn.execute(query, {"directory_path": directory_path}).fetchone()
|
|
1206
|
+
return {"model": result[0], "npc": result[1]} if result else {"model": None, "npc": None}
|
|
1207
|
+
except Exception as e:
|
|
1208
|
+
print(f"Error getting last used model/NPC for directory {directory_path}: {e}")
|
|
1209
|
+
return {"model": None, "npc": None, "error": str(e)}
|
|
1210
|
+
def get_last_used_model_and_npc_in_conversation(conversation_id):
|
|
1211
|
+
"""
|
|
1212
|
+
Fetches the model and NPC from the most recent message within a specific conversation.
|
|
1213
|
+
"""
|
|
1214
|
+
engine = get_db_connection()
|
|
1215
|
+
try:
|
|
1216
|
+
with engine.connect() as conn:
|
|
1217
|
+
query = text("""
|
|
1218
|
+
SELECT model, npc
|
|
1219
|
+
FROM conversation_history
|
|
1220
|
+
WHERE conversation_id = :conversation_id
|
|
1221
|
+
AND model IS NOT NULL AND npc IS NOT NULL
|
|
1222
|
+
AND model != '' AND npc != ''
|
|
1223
|
+
ORDER BY timestamp DESC, id DESC
|
|
1224
|
+
LIMIT 1
|
|
1225
|
+
""")
|
|
1226
|
+
result = conn.execute(query, {"conversation_id": conversation_id}).fetchone()
|
|
1227
|
+
return {"model": result[0], "npc": result[1]} if result else {"model": None, "npc": None}
|
|
1228
|
+
except Exception as e:
|
|
1229
|
+
print(f"Error getting last used model/NPC for conversation {conversation_id}: {e}")
|
|
1230
|
+
return {"model": None, "npc": None, "error": str(e)}
|
|
1231
|
+
|
|
1232
|
+
|
|
1233
|
+
|
|
1234
|
+
@app.route("/api/last_used_in_directory", methods=["GET"])
|
|
1235
|
+
def api_get_last_used_in_directory():
|
|
1236
|
+
"""API endpoint to get the last used model/NPC in a given directory."""
|
|
1237
|
+
current_path = request.args.get("path")
|
|
1238
|
+
if not current_path:
|
|
1239
|
+
return jsonify({"error": "Path parameter is required."}), 400
|
|
1240
|
+
|
|
1241
|
+
result = get_last_used_model_and_npc_in_directory(current_path)
|
|
1242
|
+
return jsonify(result)
|
|
1243
|
+
|
|
1244
|
+
@app.route("/api/last_used_in_conversation", methods=["GET"])
|
|
1245
|
+
def api_get_last_used_in_conversation():
|
|
1246
|
+
"""API endpoint to get the last used model/NPC in a specific conversation."""
|
|
1247
|
+
conversation_id = request.args.get("conversationId")
|
|
1248
|
+
if not conversation_id:
|
|
1249
|
+
return jsonify({"error": "conversationId parameter is required."}), 400
|
|
1250
|
+
|
|
1251
|
+
result = get_last_used_model_and_npc_in_conversation(conversation_id)
|
|
1252
|
+
return jsonify(result)
|
|
1253
|
+
|
|
1254
|
+
def get_ctx_path(is_global, current_path=None):
|
|
1255
|
+
"""Determines the path to the .ctx file."""
|
|
1256
|
+
if is_global:
|
|
1257
|
+
ctx_dir = os.path.join(os.path.expanduser("~/.npcsh/npc_team/"))
|
|
1258
|
+
ctx_files = glob.glob(os.path.join(ctx_dir, "*.ctx"))
|
|
1259
|
+
return ctx_files[0] if ctx_files else None
|
|
1260
|
+
else:
|
|
1261
|
+
if not current_path:
|
|
1262
|
+
return None
|
|
1263
|
+
|
|
1264
|
+
ctx_dir = os.path.join(current_path, "npc_team")
|
|
1265
|
+
ctx_files = glob.glob(os.path.join(ctx_dir, "*.ctx"))
|
|
1266
|
+
return ctx_files[0] if ctx_files else None
|
|
1267
|
+
|
|
1268
|
+
|
|
1269
|
+
def read_ctx_file(file_path):
|
|
1270
|
+
"""Reads and parses a YAML .ctx file, normalizing list of strings to list of objects."""
|
|
1271
|
+
if file_path and os.path.exists(file_path):
|
|
1272
|
+
with open(file_path, 'r') as f:
|
|
1273
|
+
try:
|
|
1274
|
+
data = yaml.safe_load(f) or {}
|
|
1275
|
+
|
|
1276
|
+
|
|
1277
|
+
if 'databases' in data and isinstance(data['databases'], list):
|
|
1278
|
+
data['databases'] = [{"value": item} for item in data['databases']]
|
|
1279
|
+
|
|
1280
|
+
|
|
1281
|
+
if 'mcp_servers' in data and isinstance(data['mcp_servers'], list):
|
|
1282
|
+
data['mcp_servers'] = [{"value": item} for item in data['mcp_servers']]
|
|
1283
|
+
|
|
1284
|
+
|
|
1285
|
+
if 'preferences' in data and isinstance(data['preferences'], list):
|
|
1286
|
+
data['preferences'] = [{"value": item} for item in data['preferences']]
|
|
1287
|
+
|
|
1288
|
+
return data
|
|
1289
|
+
except yaml.YAMLError as e:
|
|
1290
|
+
print(f"YAML parsing error in {file_path}: {e}")
|
|
1291
|
+
return {"error": "Failed to parse YAML."}
|
|
1292
|
+
return {}
|
|
1293
|
+
|
|
1294
|
+
def write_ctx_file(file_path, data):
|
|
1295
|
+
"""Writes a dictionary to a YAML .ctx file, denormalizing list of objects back to strings."""
|
|
1296
|
+
if not file_path:
|
|
1297
|
+
return False
|
|
1298
|
+
|
|
1299
|
+
|
|
1300
|
+
data_to_save = json.loads(json.dumps(data))
|
|
1301
|
+
|
|
1302
|
+
|
|
1303
|
+
if 'databases' in data_to_save and isinstance(data_to_save['databases'], list):
|
|
1304
|
+
data_to_save['databases'] = [item.get("value", "") for item in data_to_save['databases'] if isinstance(item, dict)]
|
|
1305
|
+
|
|
1306
|
+
|
|
1307
|
+
if 'mcp_servers' in data_to_save and isinstance(data_to_save['mcp_servers'], list):
|
|
1308
|
+
data_to_save['mcp_servers'] = [item.get("value", "") for item in data_to_save['mcp_servers'] if isinstance(item, dict)]
|
|
1309
|
+
|
|
1310
|
+
|
|
1311
|
+
if 'preferences' in data_to_save and isinstance(data_to_save['preferences'], list):
|
|
1312
|
+
data_to_save['preferences'] = [item.get("value", "") for item in data_to_save['preferences'] if isinstance(item, dict)]
|
|
1313
|
+
|
|
1314
|
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
|
1315
|
+
with open(file_path, 'w') as f:
|
|
1316
|
+
yaml.dump(data_to_save, f, default_flow_style=False, sort_keys=False)
|
|
1317
|
+
return True
|
|
1318
|
+
|
|
1319
|
+
|
|
1320
|
+
@app.route("/api/context/global", methods=["GET"])
|
|
1321
|
+
def get_global_context():
|
|
1322
|
+
"""Gets the global team.ctx content."""
|
|
1323
|
+
try:
|
|
1324
|
+
ctx_path = get_ctx_path(is_global=True)
|
|
1325
|
+
data = read_ctx_file(ctx_path)
|
|
1326
|
+
return jsonify({"context": data, "path": ctx_path, "error": None})
|
|
1327
|
+
except Exception as e:
|
|
1328
|
+
print(f"Error getting global context: {e}")
|
|
1329
|
+
return jsonify({"error": str(e)}), 500
|
|
1330
|
+
|
|
1331
|
+
@app.route("/api/context/global", methods=["POST"])
|
|
1332
|
+
def save_global_context():
|
|
1333
|
+
"""Saves the global team.ctx content."""
|
|
1334
|
+
try:
|
|
1335
|
+
data = request.json.get("context", {})
|
|
1336
|
+
ctx_path = get_ctx_path(is_global=True)
|
|
1337
|
+
if write_ctx_file(ctx_path, data):
|
|
1338
|
+
return jsonify({"message": "Global context saved.", "error": None})
|
|
1339
|
+
else:
|
|
1340
|
+
return jsonify({"error": "Failed to write global context file."}), 500
|
|
1341
|
+
except Exception as e:
|
|
1342
|
+
print(f"Error saving global context: {e}")
|
|
1343
|
+
return jsonify({"error": str(e)}), 500
|
|
1344
|
+
|
|
1345
|
+
@app.route("/api/context/project", methods=["GET"])
|
|
1346
|
+
def get_project_context():
|
|
1347
|
+
"""Gets the project-specific team.ctx content."""
|
|
1348
|
+
try:
|
|
1349
|
+
current_path = request.args.get("path")
|
|
1350
|
+
if not current_path:
|
|
1351
|
+
return jsonify({"error": "Project path is required."}), 400
|
|
1352
|
+
|
|
1353
|
+
ctx_path = get_ctx_path(is_global=False, current_path=current_path)
|
|
1354
|
+
data = read_ctx_file(ctx_path)
|
|
1355
|
+
return jsonify({"context": data, "path": ctx_path, "error": None})
|
|
1356
|
+
except Exception as e:
|
|
1357
|
+
print(f"Error getting project context: {e}")
|
|
1358
|
+
return jsonify({"error": str(e)}), 500
|
|
1359
|
+
|
|
1360
|
+
@app.route("/api/context/project", methods=["POST"])
|
|
1361
|
+
def save_project_context():
|
|
1362
|
+
"""Saves the project-specific team.ctx content."""
|
|
1363
|
+
try:
|
|
1364
|
+
data = request.json
|
|
1365
|
+
current_path = data.get("path")
|
|
1366
|
+
context_data = data.get("context", {})
|
|
1367
|
+
|
|
1368
|
+
if not current_path:
|
|
1369
|
+
return jsonify({"error": "Project path is required."}), 400
|
|
1370
|
+
|
|
1371
|
+
ctx_path = get_ctx_path(is_global=False, current_path=current_path)
|
|
1372
|
+
if write_ctx_file(ctx_path, context_data):
|
|
1373
|
+
return jsonify({"message": "Project context saved.", "error": None})
|
|
1374
|
+
else:
|
|
1375
|
+
return jsonify({"error": "Failed to write project context file."}), 500
|
|
1376
|
+
except Exception as e:
|
|
1377
|
+
print(f"Error saving project context: {e}")
|
|
1378
|
+
return jsonify({"error": str(e)}), 500
|
|
1379
|
+
|
|
1380
|
+
|
|
1381
|
+
|
|
1382
|
+
|
|
1383
|
+
|
|
1384
|
+
@app.route("/api/get_attachment_response", methods=["POST"])
|
|
1385
|
+
def get_attachment_response():
|
|
1386
|
+
data = request.json
|
|
1387
|
+
attachments = data.get("attachments", [])
|
|
1388
|
+
messages = data.get("messages")
|
|
1389
|
+
conversation_id = data.get("conversationId")
|
|
1390
|
+
current_path = data.get("currentPath")
|
|
1391
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
1392
|
+
model = data.get("model")
|
|
1393
|
+
npc_name = data.get("npc")
|
|
1394
|
+
npc_source = data.get("npcSource", "global")
|
|
1395
|
+
team = data.get("team")
|
|
1396
|
+
provider = data.get("provider")
|
|
1397
|
+
message_id = data.get("messageId")
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
if current_path:
|
|
1401
|
+
loaded_vars = load_project_env(current_path)
|
|
1402
|
+
print(f"Loaded project env variables for attachment response: {list(loaded_vars.keys())}")
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
npc_object = None
|
|
1406
|
+
if npc_name:
|
|
1407
|
+
db_conn = get_db_connection()
|
|
1408
|
+
npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
|
|
1409
|
+
|
|
1410
|
+
if not npc_object and npc_source == 'project':
|
|
1411
|
+
print(f"NPC {npc_name} not found in project directory, trying global...")
|
|
1412
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
1413
|
+
|
|
1414
|
+
if npc_object:
|
|
1415
|
+
print(f"Successfully loaded NPC {npc_name} from {npc_source} directory")
|
|
1416
|
+
else:
|
|
1417
|
+
print(f"Warning: Could not load NPC {npc_name}")
|
|
1418
|
+
|
|
1419
|
+
images = []
|
|
1420
|
+
attachments_loaded = []
|
|
1421
|
+
|
|
1422
|
+
for attachment in attachments:
|
|
1423
|
+
extension = attachment["name"].split(".")[-1]
|
|
1424
|
+
extension_mapped = extension_map.get(extension.upper(), "others")
|
|
1425
|
+
file_path = os.path.expanduser("~/.npcsh/" + extension_mapped + "/" + attachment["name"])
|
|
1426
|
+
|
|
1427
|
+
if extension_mapped == "images":
|
|
1428
|
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
|
1429
|
+
img = Image.open(attachment["path"])
|
|
1430
|
+
img_byte_arr = BytesIO()
|
|
1431
|
+
img.save(img_byte_arr, format="PNG")
|
|
1432
|
+
img_byte_arr.seek(0)
|
|
1433
|
+
img.save(file_path, optimize=True, quality=50)
|
|
1434
|
+
images.append(file_path)
|
|
1435
|
+
attachments_loaded.append({
|
|
1436
|
+
"name": attachment["name"], "type": extension_mapped,
|
|
1437
|
+
"data": img_byte_arr.read(), "size": os.path.getsize(file_path)
|
|
1438
|
+
})
|
|
1439
|
+
|
|
1440
|
+
message_to_send = messages[-1]["content"]
|
|
1441
|
+
if isinstance(message_to_send, list):
|
|
1442
|
+
message_to_send = message_to_send[0]
|
|
1443
|
+
|
|
1444
|
+
response = get_llm_response(
|
|
1445
|
+
message_to_send,
|
|
1446
|
+
images=images,
|
|
1447
|
+
messages=messages,
|
|
1448
|
+
model=model,
|
|
1449
|
+
provider=provider,
|
|
1450
|
+
npc=npc_object,
|
|
1451
|
+
)
|
|
1452
|
+
|
|
1453
|
+
messages = response["messages"]
|
|
1454
|
+
response = response["response"]
|
|
1455
|
+
|
|
1456
|
+
|
|
1457
|
+
save_conversation_message(
|
|
1458
|
+
command_history,
|
|
1459
|
+
conversation_id,
|
|
1460
|
+
"user",
|
|
1461
|
+
message_to_send,
|
|
1462
|
+
wd=current_path,
|
|
1463
|
+
team=team,
|
|
1464
|
+
model=model,
|
|
1465
|
+
provider=provider,
|
|
1466
|
+
npc=npc_name,
|
|
1467
|
+
attachments=attachments_loaded
|
|
1468
|
+
)
|
|
1469
|
+
|
|
1470
|
+
save_conversation_message(
|
|
1471
|
+
command_history,
|
|
1472
|
+
conversation_id,
|
|
1473
|
+
"assistant",
|
|
1474
|
+
response,
|
|
1475
|
+
wd=current_path,
|
|
1476
|
+
team=team,
|
|
1477
|
+
model=model,
|
|
1478
|
+
provider=provider,
|
|
1479
|
+
npc=npc_name,
|
|
1480
|
+
attachments=attachments_loaded,
|
|
1481
|
+
message_id=message_id
|
|
1482
|
+
)
|
|
1483
|
+
|
|
1484
|
+
return jsonify({
|
|
1485
|
+
"status": "success",
|
|
1486
|
+
"message": response,
|
|
1487
|
+
"conversationId": conversation_id,
|
|
1488
|
+
"messages": messages,
|
|
1489
|
+
})
|
|
1490
|
+
|
|
1491
|
+
|
|
1492
|
+
IMAGE_MODELS = {
|
|
1493
|
+
"openai": [
|
|
1494
|
+
{"value": "dall-e-3", "display_name": "DALL-E 3"},
|
|
1495
|
+
{"value": "dall-e-2", "display_name": "DALL-E 2"},
|
|
1496
|
+
{"value": "gpt-image-1", "display_name": "GPT-Image-1"},
|
|
1497
|
+
],
|
|
1498
|
+
"gemini": [
|
|
1499
|
+
{"value": "gemini-2.5-flash-image-preview", "display_name": "Gemini 2.5 Flash Image"},
|
|
1500
|
+
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
1501
|
+
],
|
|
1502
|
+
"diffusers": [
|
|
1503
|
+
{"value": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion v1.5"},
|
|
1504
|
+
],
|
|
1505
|
+
}
|
|
1506
|
+
|
|
1507
|
+
def get_available_image_models(current_path=None):
|
|
1508
|
+
"""
|
|
1509
|
+
Retrieves available image generation models based on environment variables
|
|
1510
|
+
and predefined configurations.
|
|
1511
|
+
"""
|
|
1512
|
+
|
|
1513
|
+
if current_path:
|
|
1514
|
+
load_project_env(current_path)
|
|
1515
|
+
|
|
1516
|
+
all_image_models = []
|
|
1517
|
+
|
|
1518
|
+
|
|
1519
|
+
env_image_model = os.getenv("NPCSH_IMAGE_MODEL")
|
|
1520
|
+
env_image_provider = os.getenv("NPCSH_IMAGE_PROVIDER")
|
|
1521
|
+
|
|
1522
|
+
if env_image_model and env_image_provider:
|
|
1523
|
+
all_image_models.append({
|
|
1524
|
+
"value": env_image_model,
|
|
1525
|
+
"provider": env_image_provider,
|
|
1526
|
+
"display_name": f"{env_image_model} | {env_image_provider} (Configured)"
|
|
1527
|
+
})
|
|
1528
|
+
|
|
1529
|
+
|
|
1530
|
+
for provider_key, models_list in IMAGE_MODELS.items():
|
|
1531
|
+
|
|
1532
|
+
if provider_key == "openai":
|
|
1533
|
+
if os.environ.get("OPENAI_API_KEY"):
|
|
1534
|
+
all_image_models.extend([
|
|
1535
|
+
{**model, "provider": provider_key, "display_name": f"{model['display_name']} | {provider_key}"}
|
|
1536
|
+
for model in models_list
|
|
1537
|
+
])
|
|
1538
|
+
elif provider_key == "gemini":
|
|
1539
|
+
if os.environ.get("GEMINI_API_KEY"):
|
|
1540
|
+
all_image_models.extend([
|
|
1541
|
+
{**model, "provider": provider_key, "display_name": f"{model['display_name']} | {provider_key}"}
|
|
1542
|
+
for model in models_list
|
|
1543
|
+
])
|
|
1544
|
+
elif provider_key == "diffusers":
|
|
1545
|
+
|
|
1546
|
+
|
|
1547
|
+
all_image_models.extend([
|
|
1548
|
+
{**model, "provider": provider_key, "display_name": f"{model['display_name']} | {provider_key}"}
|
|
1549
|
+
for model in models_list
|
|
1550
|
+
])
|
|
1551
|
+
|
|
1552
|
+
|
|
1553
|
+
|
|
1554
|
+
seen_models = set()
|
|
1555
|
+
unique_models = []
|
|
1556
|
+
for model_entry in all_image_models:
|
|
1557
|
+
key = (model_entry["value"], model_entry["provider"])
|
|
1558
|
+
if key not in seen_models:
|
|
1559
|
+
seen_models.add(key)
|
|
1560
|
+
unique_models.append(model_entry)
|
|
1561
|
+
|
|
1562
|
+
return unique_models
|
|
1563
|
+
|
|
1564
|
+
@app.route('/api/generative_fill', methods=['POST'])
|
|
1565
|
+
def generative_fill():
|
|
1566
|
+
data = request.get_json()
|
|
1567
|
+
image_path = data.get('imagePath')
|
|
1568
|
+
mask_data = data.get('mask')
|
|
1569
|
+
prompt = data.get('prompt')
|
|
1570
|
+
model = data.get('model')
|
|
1571
|
+
provider = data.get('provider')
|
|
1572
|
+
|
|
1573
|
+
if not all([image_path, mask_data, prompt, model, provider]):
|
|
1574
|
+
return jsonify({"error": "Missing required fields"}), 400
|
|
1575
|
+
|
|
1576
|
+
try:
|
|
1577
|
+
image_path = os.path.expanduser(image_path)
|
|
1578
|
+
|
|
1579
|
+
mask_b64 = mask_data.split(',')[1] if ',' in mask_data else mask_data
|
|
1580
|
+
mask_bytes = base64.b64decode(mask_b64)
|
|
1581
|
+
mask_image = Image.open(BytesIO(mask_bytes))
|
|
1582
|
+
|
|
1583
|
+
original_image = Image.open(image_path)
|
|
1584
|
+
|
|
1585
|
+
if provider == 'openai':
|
|
1586
|
+
result = inpaint_openai(original_image, mask_image, prompt, model)
|
|
1587
|
+
elif provider == 'gemini':
|
|
1588
|
+
result = inpaint_gemini(original_image, mask_image, prompt, model)
|
|
1589
|
+
elif provider == 'diffusers':
|
|
1590
|
+
result = inpaint_diffusers(original_image, mask_image, prompt, model)
|
|
1591
|
+
else:
|
|
1592
|
+
return jsonify({"error": f"Provider {provider} not supported"}), 400
|
|
1593
|
+
|
|
1594
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1595
|
+
filename = f"inpaint_{timestamp}.png"
|
|
1596
|
+
save_dir = os.path.dirname(image_path)
|
|
1597
|
+
result_path = os.path.join(save_dir, filename)
|
|
1598
|
+
|
|
1599
|
+
result.save(result_path)
|
|
1600
|
+
|
|
1601
|
+
return jsonify({"resultPath": result_path, "error": None})
|
|
1602
|
+
|
|
1603
|
+
except Exception as e:
|
|
1604
|
+
traceback.print_exc()
|
|
1605
|
+
return jsonify({"error": str(e)}), 500
|
|
1606
|
+
|
|
1607
|
+
|
|
1608
|
+
def inpaint_openai(image, mask, prompt, model):
|
|
1609
|
+
import io
|
|
1610
|
+
from openai import OpenAI
|
|
1611
|
+
from PIL import Image
|
|
1612
|
+
import base64
|
|
1613
|
+
|
|
1614
|
+
client = OpenAI()
|
|
1615
|
+
|
|
1616
|
+
original_size = image.size
|
|
1617
|
+
|
|
1618
|
+
if model == 'dall-e-2':
|
|
1619
|
+
valid_sizes = ['256x256', '512x512', '1024x1024']
|
|
1620
|
+
max_dim = max(image.width, image.height)
|
|
1621
|
+
|
|
1622
|
+
if max_dim <= 256:
|
|
1623
|
+
target_size = (256, 256)
|
|
1624
|
+
size_str = '256x256'
|
|
1625
|
+
elif max_dim <= 512:
|
|
1626
|
+
target_size = (512, 512)
|
|
1627
|
+
size_str = '512x512'
|
|
1628
|
+
else:
|
|
1629
|
+
target_size = (1024, 1024)
|
|
1630
|
+
size_str = '1024x1024'
|
|
1631
|
+
else:
|
|
1632
|
+
valid_sizes = {
|
|
1633
|
+
(1024, 1024): "1024x1024",
|
|
1634
|
+
(1024, 1536): "1024x1536",
|
|
1635
|
+
(1536, 1024): "1536x1024"
|
|
1636
|
+
}
|
|
1637
|
+
|
|
1638
|
+
target_size = (1024, 1024)
|
|
1639
|
+
for size in valid_sizes.keys():
|
|
1640
|
+
if image.width > image.height and size == (1536, 1024):
|
|
1641
|
+
target_size = size
|
|
1642
|
+
break
|
|
1643
|
+
elif image.height > image.width and size == (1024, 1536):
|
|
1644
|
+
target_size = size
|
|
1645
|
+
break
|
|
1646
|
+
|
|
1647
|
+
size_str = valid_sizes[target_size]
|
|
1648
|
+
|
|
1649
|
+
resized_image = image.resize(target_size, Image.Resampling.LANCZOS)
|
|
1650
|
+
resized_mask = mask.resize(target_size, Image.Resampling.LANCZOS)
|
|
1651
|
+
|
|
1652
|
+
img_bytes = io.BytesIO()
|
|
1653
|
+
resized_image.save(img_bytes, format='PNG')
|
|
1654
|
+
img_bytes.seek(0)
|
|
1655
|
+
img_bytes.name = 'image.png'
|
|
1656
|
+
|
|
1657
|
+
mask_bytes = io.BytesIO()
|
|
1658
|
+
resized_mask.save(mask_bytes, format='PNG')
|
|
1659
|
+
mask_bytes.seek(0)
|
|
1660
|
+
mask_bytes.name = 'mask.png'
|
|
1661
|
+
|
|
1662
|
+
response = client.images.edit(
|
|
1663
|
+
model=model,
|
|
1664
|
+
image=img_bytes,
|
|
1665
|
+
mask=mask_bytes,
|
|
1666
|
+
prompt=prompt,
|
|
1667
|
+
n=1,
|
|
1668
|
+
size=size_str
|
|
1669
|
+
)
|
|
1670
|
+
|
|
1671
|
+
if response.data[0].url:
|
|
1672
|
+
import requests
|
|
1673
|
+
img_data = requests.get(response.data[0].url).content
|
|
1674
|
+
elif hasattr(response.data[0], 'b64_json'):
|
|
1675
|
+
img_data = base64.b64decode(response.data[0].b64_json)
|
|
1676
|
+
else:
|
|
1677
|
+
raise Exception("No image data in response")
|
|
1678
|
+
|
|
1679
|
+
result_image = Image.open(io.BytesIO(img_data))
|
|
1680
|
+
return result_image.resize(original_size, Image.Resampling.LANCZOS)
|
|
1681
|
+
|
|
1682
|
+
def inpaint_diffusers(image, mask, prompt, model):
|
|
1683
|
+
from diffusers import StableDiffusionInpaintPipeline
|
|
1684
|
+
import torch
|
|
1685
|
+
|
|
1686
|
+
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
|
1687
|
+
model,
|
|
1688
|
+
torch_dtype=torch.float16
|
|
1689
|
+
)
|
|
1690
|
+
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
|
1691
|
+
|
|
1692
|
+
result = pipe(
|
|
1693
|
+
prompt=prompt,
|
|
1694
|
+
image=image,
|
|
1695
|
+
mask_image=mask
|
|
1696
|
+
).images[0]
|
|
1697
|
+
|
|
1698
|
+
return result
|
|
1699
|
+
def inpaint_gemini(image, mask, prompt, model):
|
|
1700
|
+
from npcpy.gen.image_gen import generate_image
|
|
1701
|
+
import io
|
|
1702
|
+
import numpy as np
|
|
1703
|
+
|
|
1704
|
+
mask_np = np.array(mask.convert('L'))
|
|
1705
|
+
ys, xs = np.where(mask_np > 128)
|
|
1706
|
+
|
|
1707
|
+
if len(xs) == 0:
|
|
1708
|
+
return image
|
|
1709
|
+
|
|
1710
|
+
x_center = int(np.mean(xs))
|
|
1711
|
+
y_center = int(np.mean(ys))
|
|
1712
|
+
width_pct = (xs.max() - xs.min()) / image.width * 100
|
|
1713
|
+
height_pct = (ys.max() - ys.min()) / image.height * 100
|
|
1714
|
+
|
|
1715
|
+
position = "center"
|
|
1716
|
+
if y_center < image.height / 3:
|
|
1717
|
+
position = "top"
|
|
1718
|
+
elif y_center > 2 * image.height / 3:
|
|
1719
|
+
position = "bottom"
|
|
1720
|
+
|
|
1721
|
+
if x_center < image.width / 3:
|
|
1722
|
+
position += " left"
|
|
1723
|
+
elif x_center > 2 * image.width / 3:
|
|
1724
|
+
position += " right"
|
|
1725
|
+
|
|
1726
|
+
img_bytes = io.BytesIO()
|
|
1727
|
+
image.save(img_bytes, format='PNG')
|
|
1728
|
+
img_bytes.seek(0)
|
|
1729
|
+
|
|
1730
|
+
full_prompt = f"""Using the provided image, change only the region in the {position}
|
|
1731
|
+
approximately {int(width_pct)}% wide by {int(height_pct)}% tall) to: {prompt}.
|
|
1732
|
+
|
|
1733
|
+
Keep everything else exactly the same, matching the original lighting and style.
|
|
1734
|
+
You are in-painting the image. You should not be changing anything other than what was requested in prompt: {prompt}
|
|
1735
|
+
"""
|
|
1736
|
+
results = generate_image(
|
|
1737
|
+
prompt=full_prompt,
|
|
1738
|
+
model=model,
|
|
1739
|
+
provider='gemini',
|
|
1740
|
+
attachments=[img_bytes],
|
|
1741
|
+
n_images=1
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
return results[0] if results else None
|
|
1745
|
+
|
|
1746
|
+
@app.route('/api/generate_images', methods=['POST'])
|
|
1747
|
+
def generate_images():
|
|
1748
|
+
data = request.get_json()
|
|
1749
|
+
prompt = data.get('prompt')
|
|
1750
|
+
n = data.get('n', 1)
|
|
1751
|
+
model_name = data.get('model')
|
|
1752
|
+
provider_name = data.get('provider')
|
|
1753
|
+
attachments = data.get('attachments', [])
|
|
1754
|
+
base_filename = data.get('base_filename', 'vixynt_gen')
|
|
1755
|
+
save_dir = data.get('currentPath', '~/.npcsh/images')
|
|
1756
|
+
|
|
1757
|
+
if not prompt:
|
|
1758
|
+
return jsonify({"error": "Prompt is required."}), 400
|
|
1759
|
+
|
|
1760
|
+
if not model_name or not provider_name:
|
|
1761
|
+
return jsonify({"error": "Image model and provider are required."}), 400
|
|
1762
|
+
|
|
1763
|
+
|
|
1764
|
+
save_dir = os.path.expanduser(save_dir)
|
|
1765
|
+
os.makedirs(save_dir, exist_ok=True)
|
|
1766
|
+
|
|
1767
|
+
|
|
1768
|
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
1769
|
+
base_filename_with_time = f"{base_filename}_{timestamp}"
|
|
1770
|
+
|
|
1771
|
+
generated_images_base64 = []
|
|
1772
|
+
generated_filenames = []
|
|
1773
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
1774
|
+
|
|
1775
|
+
try:
|
|
1776
|
+
|
|
1777
|
+
input_images = []
|
|
1778
|
+
attachments_loaded = []
|
|
1779
|
+
|
|
1780
|
+
if attachments:
|
|
1781
|
+
for attachment in attachments:
|
|
1782
|
+
print(attachment)
|
|
1783
|
+
if isinstance(attachment, dict) and 'path' in attachment:
|
|
1784
|
+
image_path = attachment['path']
|
|
1785
|
+
if os.path.exists(image_path):
|
|
1786
|
+
try:
|
|
1787
|
+
pil_img = Image.open(image_path)
|
|
1788
|
+
input_images.append(pil_img)
|
|
1789
|
+
|
|
1790
|
+
|
|
1791
|
+
with open(image_path, 'rb') as f:
|
|
1792
|
+
img_data = f.read()
|
|
1793
|
+
attachments_loaded.append({
|
|
1794
|
+
"name": os.path.basename(image_path),
|
|
1795
|
+
"type": "images",
|
|
1796
|
+
"data": img_data,
|
|
1797
|
+
"size": len(img_data)
|
|
1798
|
+
})
|
|
1799
|
+
except Exception as e:
|
|
1800
|
+
print(f"Warning: Could not load attachment image {image_path}: {e}")
|
|
1801
|
+
|
|
1802
|
+
|
|
1803
|
+
images_list = gen_image(
|
|
1804
|
+
prompt,
|
|
1805
|
+
model=model_name,
|
|
1806
|
+
provider=provider_name,
|
|
1807
|
+
n_images=n,
|
|
1808
|
+
input_images=input_images if input_images else None
|
|
1809
|
+
)
|
|
1810
|
+
print(images_list)
|
|
1811
|
+
if not isinstance(images_list, list):
|
|
1812
|
+
images_list = [images_list] if images_list is not None else []
|
|
1813
|
+
|
|
1814
|
+
generated_attachments = []
|
|
1815
|
+
for i, pil_image in enumerate(images_list):
|
|
1816
|
+
if isinstance(pil_image, Image.Image):
|
|
1817
|
+
|
|
1818
|
+
filename = f"{base_filename_with_time}_{i+1:03d}.png" if n > 1 else f"{base_filename_with_time}.png"
|
|
1819
|
+
filepath = os.path.join(save_dir, filename)
|
|
1820
|
+
print(f'saved file to {filepath}')
|
|
1821
|
+
|
|
1822
|
+
|
|
1823
|
+
pil_image.save(filepath, format="PNG")
|
|
1824
|
+
generated_filenames.append(filepath)
|
|
1825
|
+
|
|
1826
|
+
|
|
1827
|
+
buffered = BytesIO()
|
|
1828
|
+
pil_image.save(buffered, format="PNG")
|
|
1829
|
+
img_data = buffered.getvalue()
|
|
1830
|
+
|
|
1831
|
+
generated_attachments.append({
|
|
1832
|
+
"name": filename,
|
|
1833
|
+
"type": "images",
|
|
1834
|
+
"data": img_data,
|
|
1835
|
+
"size": len(img_data)
|
|
1836
|
+
})
|
|
1837
|
+
|
|
1838
|
+
|
|
1839
|
+
img_str = base64.b64encode(img_data).decode("utf-8")
|
|
1840
|
+
generated_images_base64.append(f"data:image/png;base64,{img_str}")
|
|
1841
|
+
else:
|
|
1842
|
+
print(f"Warning: gen_image returned non-PIL object ({type(pil_image)}). Skipping image conversion.")
|
|
1843
|
+
|
|
1844
|
+
|
|
1845
|
+
generation_id = generate_message_id()
|
|
1846
|
+
|
|
1847
|
+
|
|
1848
|
+
save_conversation_message(
|
|
1849
|
+
command_history,
|
|
1850
|
+
generation_id,
|
|
1851
|
+
"user",
|
|
1852
|
+
f"Generate {n} image(s): {prompt}",
|
|
1853
|
+
wd=save_dir,
|
|
1854
|
+
model=model_name,
|
|
1855
|
+
provider=provider_name,
|
|
1856
|
+
npc="vixynt",
|
|
1857
|
+
attachments=attachments_loaded,
|
|
1858
|
+
message_id=generation_id
|
|
1859
|
+
)
|
|
1860
|
+
|
|
1861
|
+
|
|
1862
|
+
response_message = f"Generated {len(generated_images_base64)} image(s) saved to {save_dir}"
|
|
1863
|
+
save_conversation_message(
|
|
1864
|
+
command_history,
|
|
1865
|
+
generation_id,
|
|
1866
|
+
"assistant",
|
|
1867
|
+
response_message,
|
|
1868
|
+
wd=save_dir,
|
|
1869
|
+
model=model_name,
|
|
1870
|
+
provider=provider_name,
|
|
1871
|
+
npc="vixynt",
|
|
1872
|
+
attachments=generated_attachments,
|
|
1873
|
+
message_id=generate_message_id()
|
|
1874
|
+
)
|
|
1875
|
+
|
|
1876
|
+
return jsonify({
|
|
1877
|
+
"images": generated_images_base64,
|
|
1878
|
+
"filenames": generated_filenames,
|
|
1879
|
+
"generation_id": generation_id,
|
|
1880
|
+
"error": None
|
|
1881
|
+
})
|
|
1882
|
+
except Exception as e:
|
|
1883
|
+
print(f"Image generation error: {str(e)}")
|
|
1884
|
+
traceback.print_exc()
|
|
1885
|
+
return jsonify({"images": [], "filenames": [], "error": str(e)}), 500
|
|
1886
|
+
|
|
1887
|
+
|
|
1888
|
+
|
|
1889
|
+
@app.route("/api/mcp_tools", methods=["GET"])
|
|
1890
|
+
def get_mcp_tools():
|
|
1891
|
+
"""
|
|
1892
|
+
API endpoint to retrieve the list of tools available from a given MCP server script.
|
|
1893
|
+
It will try to use an existing client from corca_states if available and matching,
|
|
1894
|
+
otherwise it creates a temporary client.
|
|
1895
|
+
"""
|
|
1896
|
+
server_path = request.args.get("mcpServerPath")
|
|
1897
|
+
conversation_id = request.args.get("conversationId")
|
|
1898
|
+
npc_name = request.args.get("npc")
|
|
1899
|
+
|
|
1900
|
+
if not server_path:
|
|
1901
|
+
return jsonify({"error": "mcpServerPath parameter is required."}), 400
|
|
1902
|
+
|
|
1903
|
+
|
|
1904
|
+
try:
|
|
1905
|
+
from npcsh.corca import MCPClientNPC
|
|
1906
|
+
except ImportError:
|
|
1907
|
+
return jsonify({"error": "MCP Client (npcsh.corca) not available. Ensure npcsh.corca is installed and importable."}), 500
|
|
1908
|
+
|
|
1909
|
+
temp_mcp_client = None
|
|
1910
|
+
try:
|
|
1911
|
+
|
|
1912
|
+
if conversation_id and npc_name and hasattr(app, 'corca_states'):
|
|
1913
|
+
state_key = f"{conversation_id}_{npc_name or 'default'}"
|
|
1914
|
+
if state_key in app.corca_states:
|
|
1915
|
+
existing_corca_state = app.corca_states[state_key]
|
|
1916
|
+
if hasattr(existing_corca_state, 'mcp_client') and existing_corca_state.mcp_client \
|
|
1917
|
+
and existing_corca_state.mcp_client.server_script_path == server_path:
|
|
1918
|
+
print(f"Using existing MCP client for {state_key} to fetch tools.")
|
|
1919
|
+
temp_mcp_client = existing_corca_state.mcp_client
|
|
1920
|
+
return jsonify({"tools": temp_mcp_client.available_tools_llm, "error": None})
|
|
1921
|
+
|
|
1922
|
+
|
|
1923
|
+
print(f"Creating a temporary MCP client to fetch tools for {server_path}.")
|
|
1924
|
+
temp_mcp_client = MCPClientNPC()
|
|
1925
|
+
if temp_mcp_client.connect_sync(server_path):
|
|
1926
|
+
return jsonify({"tools": temp_mcp_client.available_tools_llm, "error": None})
|
|
1927
|
+
else:
|
|
1928
|
+
return jsonify({"error": f"Failed to connect to MCP server at {server_path}."}), 500
|
|
1929
|
+
except FileNotFoundError as e:
|
|
1930
|
+
return jsonify({"error": f"MCP Server script not found: {e}"}), 404
|
|
1931
|
+
except ValueError as e:
|
|
1932
|
+
return jsonify({"error": f"Invalid MCP Server script: {e}"}), 400
|
|
1933
|
+
except Exception as e:
|
|
1934
|
+
print(f"Error getting MCP tools for {server_path}: {traceback.format_exc()}")
|
|
1935
|
+
return jsonify({"error": f"An unexpected error occurred: {e}"}), 500
|
|
1936
|
+
finally:
|
|
1937
|
+
|
|
1938
|
+
if temp_mcp_client and temp_mcp_client.session and (
|
|
1939
|
+
not (conversation_id and npc_name and hasattr(app, 'corca_states') and state_key in app.corca_states and getattr(app.corca_states[state_key], 'mcp_client', None) == temp_mcp_client)
|
|
1940
|
+
):
|
|
1941
|
+
print(f"Disconnecting temporary MCP client for {server_path}.")
|
|
1942
|
+
temp_mcp_client.disconnect_sync()
|
|
1943
|
+
|
|
1944
|
+
|
|
1945
|
+
@app.route("/api/image_models", methods=["GET"])
|
|
1946
|
+
def get_image_models_api():
|
|
1947
|
+
"""
|
|
1948
|
+
API endpoint to retrieve available image generation models.
|
|
1949
|
+
"""
|
|
1950
|
+
current_path = request.args.get("currentPath")
|
|
1951
|
+
try:
|
|
1952
|
+
image_models = get_available_image_models(current_path)
|
|
1953
|
+
return jsonify({"models": image_models, "error": None})
|
|
1954
|
+
except Exception as e:
|
|
1955
|
+
print(f"Error getting available image models: {str(e)}")
|
|
1956
|
+
traceback.print_exc()
|
|
1957
|
+
return jsonify({"models": [], "error": str(e)}), 500
|
|
1958
|
+
|
|
1959
|
+
|
|
1960
|
+
|
|
1961
|
+
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
|
|
1965
|
+
@app.route("/api/stream", methods=["POST"])
|
|
1966
|
+
def stream():
|
|
1967
|
+
data = request.json
|
|
1968
|
+
|
|
1969
|
+
stream_id = data.get("streamId")
|
|
1970
|
+
if not stream_id:
|
|
1971
|
+
import uuid
|
|
1972
|
+
stream_id = str(uuid.uuid4())
|
|
1973
|
+
|
|
1974
|
+
with cancellation_lock:
|
|
1975
|
+
cancellation_flags[stream_id] = False
|
|
1976
|
+
print(f"Starting stream with ID: {stream_id}")
|
|
1977
|
+
|
|
1978
|
+
commandstr = data.get("commandstr")
|
|
1979
|
+
conversation_id = data.get("conversationId")
|
|
1980
|
+
model = data.get("model", None)
|
|
1981
|
+
provider = data.get("provider", None)
|
|
1982
|
+
if provider is None:
|
|
1983
|
+
provider = available_models.get(model)
|
|
1984
|
+
|
|
1985
|
+
npc_name = data.get("npc", None)
|
|
1986
|
+
npc_source = data.get("npcSource", "global")
|
|
1987
|
+
current_path = data.get("currentPath")
|
|
1988
|
+
|
|
1989
|
+
if current_path:
|
|
1990
|
+
loaded_vars = load_project_env(current_path)
|
|
1991
|
+
print(f"Loaded project env variables for stream request: {list(loaded_vars.keys())}")
|
|
1992
|
+
|
|
1993
|
+
npc_object = None
|
|
1994
|
+
team_object = None
|
|
1995
|
+
team = None
|
|
1996
|
+
if npc_name:
|
|
1997
|
+
if hasattr(app, 'registered_teams'):
|
|
1998
|
+
for team_name, team_object in app.registered_teams.items():
|
|
1999
|
+
if hasattr(team_object, 'npcs'):
|
|
2000
|
+
team_npcs = team_object.npcs
|
|
2001
|
+
if isinstance(team_npcs, dict):
|
|
2002
|
+
if npc_name in team_npcs:
|
|
2003
|
+
npc_object = team_npcs[npc_name]
|
|
2004
|
+
team = team_name
|
|
2005
|
+
npc_object.team = team_object
|
|
2006
|
+
print(f"Found NPC {npc_name} in registered team {team_name}")
|
|
2007
|
+
break
|
|
2008
|
+
elif isinstance(team_npcs, list):
|
|
2009
|
+
for npc in team_npcs:
|
|
2010
|
+
if hasattr(npc, 'name') and npc.name == npc_name:
|
|
2011
|
+
npc_object = npc
|
|
2012
|
+
team = team_name
|
|
2013
|
+
npc_object.team = team_object
|
|
2014
|
+
print(f"Found NPC {npc_name} in registered team {team_name}")
|
|
2015
|
+
break
|
|
2016
|
+
|
|
2017
|
+
if not npc_object and hasattr(team_object, 'forenpc') and hasattr(team_object.forenpc, 'name'):
|
|
2018
|
+
if team_object.forenpc.name == npc_name:
|
|
2019
|
+
npc_object = team_object.forenpc
|
|
2020
|
+
npc_object.team = team_object
|
|
2021
|
+
|
|
2022
|
+
team = team_name
|
|
2023
|
+
print(f"Found NPC {npc_name} as forenpc in team {team_name}")
|
|
2024
|
+
break
|
|
2025
|
+
|
|
2026
|
+
|
|
2027
|
+
if npc_object:
|
|
2028
|
+
break
|
|
2029
|
+
|
|
2030
|
+
|
|
2031
|
+
if not npc_object and hasattr(app, 'registered_npcs') and npc_name in app.registered_npcs:
|
|
2032
|
+
npc_object = app.registered_npcs[npc_name]
|
|
2033
|
+
print(f"Found NPC {npc_name} in registered NPCs (no specific team)")
|
|
2034
|
+
team_object = Team(team_path=npc_object.npc_directory, db_conn=db_conn)
|
|
2035
|
+
npc_object.team = team_object
|
|
2036
|
+
if not npc_object:
|
|
2037
|
+
db_conn = get_db_connection()
|
|
2038
|
+
npc_object = load_npc_by_name_and_source(npc_name,
|
|
2039
|
+
npc_source,
|
|
2040
|
+
db_conn,
|
|
2041
|
+
current_path)
|
|
2042
|
+
if not npc_object and npc_source == 'project':
|
|
2043
|
+
print(f"NPC {npc_name} not found in project directory, trying global...")
|
|
2044
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
2045
|
+
if npc_object and hasattr(npc_object, 'npc_directory') and npc_object.npc_directory:
|
|
2046
|
+
team_directory = npc_object.npc_directory
|
|
2047
|
+
|
|
2048
|
+
if os.path.exists(team_directory):
|
|
2049
|
+
team_object = Team(team_path=team_directory, db_conn=db_conn)
|
|
2050
|
+
print('team', team_object)
|
|
2051
|
+
|
|
2052
|
+
else:
|
|
2053
|
+
team_object = Team(npcs=[npc_object], db_conn=db_conn)
|
|
2054
|
+
team_object.name = os.path.basename(team_directory) if team_directory else f"{npc_name}_team"
|
|
2055
|
+
npc_object.team = team_object
|
|
2056
|
+
print('team', team_object)
|
|
2057
|
+
team_name = team_object.name
|
|
2058
|
+
|
|
2059
|
+
if not hasattr(app, 'registered_teams'):
|
|
2060
|
+
app.registered_teams = {}
|
|
2061
|
+
app.registered_teams[team_name] = team_object
|
|
2062
|
+
|
|
2063
|
+
team = team_name
|
|
2064
|
+
|
|
2065
|
+
print(f"Created and registered team '{team_name}' with NPC {npc_name}")
|
|
2066
|
+
|
|
2067
|
+
if npc_object:
|
|
2068
|
+
npc_object.team = team_object
|
|
2069
|
+
|
|
2070
|
+
print(f"Successfully loaded NPC {npc_name} from {npc_source} directory")
|
|
2071
|
+
else:
|
|
2072
|
+
print(f"Warning: Could not load NPC {npc_name}")
|
|
2073
|
+
if npc_object:
|
|
2074
|
+
print(f"Successfully loaded NPC {npc_name} from {npc_source} directory")
|
|
2075
|
+
else:
|
|
2076
|
+
print(f"Warning: Could not load NPC {npc_name}")
|
|
2077
|
+
|
|
2078
|
+
|
|
2079
|
+
|
|
2080
|
+
|
|
2081
|
+
attachments = data.get("attachments", [])
|
|
2082
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
2083
|
+
images = []
|
|
2084
|
+
attachments_for_db = []
|
|
2085
|
+
attachment_paths_for_llm = []
|
|
2086
|
+
|
|
2087
|
+
message_id = generate_message_id()
|
|
2088
|
+
if attachments:
|
|
2089
|
+
attachment_dir = os.path.expanduser(f"~/.npcsh/attachments/{conversation_id+message_id}/")
|
|
2090
|
+
os.makedirs(attachment_dir, exist_ok=True)
|
|
2091
|
+
|
|
2092
|
+
for attachment in attachments:
|
|
2093
|
+
try:
|
|
2094
|
+
file_name = attachment["name"]
|
|
2095
|
+
|
|
2096
|
+
extension = file_name.split(".")[-1].upper() if "." in file_name else ""
|
|
2097
|
+
extension_mapped = extension_map.get(extension, "others")
|
|
2098
|
+
|
|
2099
|
+
save_path = os.path.join(attachment_dir, file_name)
|
|
2100
|
+
|
|
2101
|
+
if "data" in attachment and attachment["data"]:
|
|
2102
|
+
decoded_data = base64.b64decode(attachment["data"])
|
|
2103
|
+
with open(save_path, "wb") as f:
|
|
2104
|
+
f.write(decoded_data)
|
|
2105
|
+
|
|
2106
|
+
elif "path" in attachment and attachment["path"]:
|
|
2107
|
+
shutil.copy(attachment["path"], save_path)
|
|
2108
|
+
|
|
2109
|
+
else:
|
|
2110
|
+
continue
|
|
2111
|
+
|
|
2112
|
+
attachment_paths_for_llm.append(save_path)
|
|
2113
|
+
|
|
2114
|
+
if extension_mapped == "images":
|
|
2115
|
+
images.append(save_path)
|
|
2116
|
+
|
|
2117
|
+
with open(save_path, "rb") as f:
|
|
2118
|
+
file_content_bytes = f.read()
|
|
2119
|
+
|
|
2120
|
+
attachments_for_db.append({
|
|
2121
|
+
"name": file_name,
|
|
2122
|
+
"path": save_path,
|
|
2123
|
+
"type": extension_mapped,
|
|
2124
|
+
"data": file_content_bytes,
|
|
2125
|
+
"size": os.path.getsize(save_path)
|
|
2126
|
+
})
|
|
2127
|
+
|
|
2128
|
+
except Exception as e:
|
|
2129
|
+
print(f"Error processing attachment {attachment.get('name', 'N/A')}: {e}")
|
|
2130
|
+
traceback.print_exc()
|
|
2131
|
+
messages = fetch_messages_for_conversation(conversation_id)
|
|
2132
|
+
if len(messages) == 0 and npc_object is not None:
|
|
2133
|
+
messages = [{'role': 'system',
|
|
2134
|
+
'content': npc_object.get_system_prompt()}]
|
|
2135
|
+
elif len(messages) > 0 and messages[0]['role'] != 'system' and npc_object is not None:
|
|
2136
|
+
messages.insert(0, {'role': 'system',
|
|
2137
|
+
'content': npc_object.get_system_prompt()})
|
|
2138
|
+
elif len(messages) > 0 and npc_object is not None:
|
|
2139
|
+
messages[0]['content'] = npc_object.get_system_prompt()
|
|
2140
|
+
if npc_object is not None and messages and messages[0]['role'] == 'system':
|
|
2141
|
+
messages[0]['content'] = npc_object.get_system_prompt()
|
|
2142
|
+
tool_args = {}
|
|
2143
|
+
if npc_object is not None:
|
|
2144
|
+
if hasattr(npc_object, 'tools') and npc_object.tools:
|
|
2145
|
+
if isinstance(npc_object.tools, list) and callable(npc_object.tools[0]):
|
|
2146
|
+
tools_schema, tool_map = auto_tools(npc_object.tools)
|
|
2147
|
+
tool_args['tools'] = tools_schema
|
|
2148
|
+
tool_args['tool_map'] = tool_map
|
|
2149
|
+
else:
|
|
2150
|
+
tool_args['tools'] = npc_object.tools
|
|
2151
|
+
if hasattr(npc_object, 'tool_map') and npc_object.tool_map:
|
|
2152
|
+
tool_args['tool_map'] = npc_object.tool_map
|
|
2153
|
+
elif hasattr(npc_object, 'tool_map') and npc_object.tool_map:
|
|
2154
|
+
tool_args['tool_map'] = npc_object.tool_map
|
|
2155
|
+
if 'tools' in tool_args and tool_args['tools']:
|
|
2156
|
+
tool_args['tool_choice'] = {"type": "auto"}
|
|
2157
|
+
|
|
2158
|
+
|
|
2159
|
+
exe_mode = data.get('executionMode','chat')
|
|
2160
|
+
|
|
2161
|
+
if exe_mode == 'chat':
|
|
2162
|
+
stream_response = get_llm_response(
|
|
2163
|
+
commandstr,
|
|
2164
|
+
messages=messages,
|
|
2165
|
+
images=images,
|
|
2166
|
+
model=model,
|
|
2167
|
+
provider=provider,
|
|
2168
|
+
npc=npc_object,
|
|
2169
|
+
api_url = npc_object.api_url if npc_object.api_url else None,
|
|
2170
|
+
team=team_object,
|
|
2171
|
+
stream=True,
|
|
2172
|
+
attachments=attachment_paths_for_llm,
|
|
2173
|
+
auto_process_tool_calls=True,
|
|
2174
|
+
**tool_args
|
|
2175
|
+
)
|
|
2176
|
+
messages = stream_response.get('messages', messages)
|
|
2177
|
+
|
|
2178
|
+
elif exe_mode == 'npcsh':
|
|
2179
|
+
from npcsh._state import execute_command, initial_state
|
|
2180
|
+
from npcsh.routes import router
|
|
2181
|
+
initial_state.model = model
|
|
2182
|
+
initial_state.provider = provider
|
|
2183
|
+
initial_state.npc = npc_object
|
|
2184
|
+
initial_state.team = team_object
|
|
2185
|
+
initial_state.messages = messages
|
|
2186
|
+
initial_state.command_history = command_history
|
|
2187
|
+
|
|
2188
|
+
state, stream_response = execute_command(
|
|
2189
|
+
commandstr,
|
|
2190
|
+
initial_state, router=router)
|
|
2191
|
+
messages = state.messages
|
|
2192
|
+
|
|
2193
|
+
elif exe_mode == 'guac':
|
|
2194
|
+
from npcsh.guac import execute_guac_command
|
|
2195
|
+
from npcsh.routes import router
|
|
2196
|
+
from npcsh._state import initial_state
|
|
2197
|
+
from pathlib import Path
|
|
2198
|
+
import pandas as pd, numpy as np, matplotlib.pyplot as plt
|
|
2199
|
+
|
|
2200
|
+
if not hasattr(app, 'guac_locals'):
|
|
2201
|
+
app.guac_locals = {}
|
|
2202
|
+
|
|
2203
|
+
if conversation_id not in app.guac_locals:
|
|
2204
|
+
app.guac_locals[conversation_id] = {
|
|
2205
|
+
'pd': pd,
|
|
2206
|
+
'np': np,
|
|
2207
|
+
'plt': plt,
|
|
2208
|
+
'datetime': datetime,
|
|
2209
|
+
'Path': Path,
|
|
2210
|
+
'os': os,
|
|
2211
|
+
'sys': sys,
|
|
2212
|
+
'json': json
|
|
2213
|
+
}
|
|
2214
|
+
|
|
2215
|
+
initial_state.model = model
|
|
2216
|
+
initial_state.provider = provider
|
|
2217
|
+
initial_state.npc = npc_object
|
|
2218
|
+
initial_state.team = team_object
|
|
2219
|
+
initial_state.messages = messages
|
|
2220
|
+
initial_state.command_history = command_history
|
|
2221
|
+
|
|
2222
|
+
state, stream_response = execute_guac_command(
|
|
2223
|
+
commandstr,
|
|
2224
|
+
initial_state,
|
|
2225
|
+
app.guac_locals[conversation_id],
|
|
2226
|
+
"guac",
|
|
2227
|
+
Path.cwd() / "npc_team",
|
|
2228
|
+
router
|
|
2229
|
+
)
|
|
2230
|
+
messages = state.messages
|
|
2231
|
+
|
|
2232
|
+
elif exe_mode == 'corca':
|
|
2233
|
+
|
|
2234
|
+
try:
|
|
2235
|
+
from npcsh.corca import execute_command_corca, create_corca_state_and_mcp_client, MCPClientNPC
|
|
2236
|
+
from npcsh._state import initial_state as state
|
|
2237
|
+
except ImportError:
|
|
2238
|
+
|
|
2239
|
+
print("ERROR: npcsh.corca or MCPClientNPC not found. Corca mode is disabled.", file=sys.stderr)
|
|
2240
|
+
state = None
|
|
2241
|
+
stream_response = {"output": "Corca mode is not available due to missing dependencies.", "messages": messages}
|
|
2242
|
+
|
|
2243
|
+
|
|
2244
|
+
if state is not None:
|
|
2245
|
+
|
|
2246
|
+
mcp_server_path_from_request = data.get("mcpServerPath")
|
|
2247
|
+
selected_mcp_tools_from_request = data.get("selectedMcpTools", [])
|
|
2248
|
+
|
|
2249
|
+
|
|
2250
|
+
effective_mcp_server_path = mcp_server_path_from_request
|
|
2251
|
+
if not effective_mcp_server_path and team_object and hasattr(team_object, 'team_ctx') and team_object.team_ctx:
|
|
2252
|
+
mcp_servers_list = team_object.team_ctx.get('mcp_servers', [])
|
|
2253
|
+
if mcp_servers_list and isinstance(mcp_servers_list, list):
|
|
2254
|
+
first_server_obj = next((s for s in mcp_servers_list if isinstance(s, dict) and 'value' in s), None)
|
|
2255
|
+
if first_server_obj:
|
|
2256
|
+
effective_mcp_server_path = first_server_obj['value']
|
|
2257
|
+
elif isinstance(team_object.team_ctx.get('mcp_server'), str):
|
|
2258
|
+
effective_mcp_server_path = team_object.team_ctx.get('mcp_server')
|
|
2259
|
+
|
|
2260
|
+
|
|
2261
|
+
if not hasattr(app, 'corca_states'):
|
|
2262
|
+
app.corca_states = {}
|
|
2263
|
+
|
|
2264
|
+
state_key = f"{conversation_id}_{npc_name or 'default'}"
|
|
2265
|
+
|
|
2266
|
+
corca_state = None
|
|
2267
|
+
if state_key not in app.corca_states:
|
|
2268
|
+
|
|
2269
|
+
corca_state = create_corca_state_and_mcp_client(
|
|
2270
|
+
conversation_id=conversation_id,
|
|
2271
|
+
command_history=command_history,
|
|
2272
|
+
npc=npc_object,
|
|
2273
|
+
team=team_object,
|
|
2274
|
+
current_path=current_path,
|
|
2275
|
+
mcp_server_path=effective_mcp_server_path
|
|
2276
|
+
)
|
|
2277
|
+
app.corca_states[state_key] = corca_state
|
|
2278
|
+
else:
|
|
2279
|
+
corca_state = app.corca_states[state_key]
|
|
2280
|
+
corca_state.npc = npc_object
|
|
2281
|
+
corca_state.team = team_object
|
|
2282
|
+
corca_state.current_path = current_path
|
|
2283
|
+
corca_state.messages = messages
|
|
2284
|
+
corca_state.command_history = command_history
|
|
2285
|
+
|
|
2286
|
+
|
|
2287
|
+
current_mcp_client_path = getattr(corca_state.mcp_client, 'server_script_path', None)
|
|
2288
|
+
|
|
2289
|
+
if effective_mcp_server_path != current_mcp_client_path:
|
|
2290
|
+
print(f"MCP server path changed/updated for {state_key}. Disconnecting old client (if any) and reconnecting to {effective_mcp_server_path or 'None'}.")
|
|
2291
|
+
if corca_state.mcp_client and corca_state.mcp_client.session:
|
|
2292
|
+
corca_state.mcp_client.disconnect_sync()
|
|
2293
|
+
corca_state.mcp_client = None
|
|
2294
|
+
|
|
2295
|
+
if effective_mcp_server_path:
|
|
2296
|
+
new_mcp_client = MCPClientNPC()
|
|
2297
|
+
if new_mcp_client.connect_sync(effective_mcp_server_path):
|
|
2298
|
+
corca_state.mcp_client = new_mcp_client
|
|
2299
|
+
print(f"Successfully reconnected MCP client for {state_key} to {effective_mcp_server_path}.")
|
|
2300
|
+
else:
|
|
2301
|
+
print(f"Failed to reconnect MCP client for {state_key} to {effective_mcp_server_path}. Corca will have no tools.")
|
|
2302
|
+
corca_state.mcp_client = None
|
|
2303
|
+
|
|
2304
|
+
|
|
2305
|
+
|
|
2306
|
+
state, stream_response = execute_command_corca(
|
|
2307
|
+
commandstr,
|
|
2308
|
+
corca_state,
|
|
2309
|
+
command_history,
|
|
2310
|
+
selected_mcp_tools_names=selected_mcp_tools_from_request
|
|
2311
|
+
)
|
|
2312
|
+
|
|
2313
|
+
|
|
2314
|
+
app.corca_states[state_key] = state
|
|
2315
|
+
messages = state.messages
|
|
2316
|
+
|
|
2317
|
+
|
|
2318
|
+
user_message_filled = ''
|
|
2319
|
+
|
|
2320
|
+
if isinstance(messages[-1].get('content'), list):
|
|
2321
|
+
for cont in messages[-1].get('content'):
|
|
2322
|
+
txt = cont.get('text')
|
|
2323
|
+
if txt is not None:
|
|
2324
|
+
user_message_filled +=txt
|
|
2325
|
+
save_conversation_message(
|
|
2326
|
+
command_history,
|
|
2327
|
+
conversation_id,
|
|
2328
|
+
"user",
|
|
2329
|
+
user_message_filled if len(user_message_filled)>0 else commandstr,
|
|
2330
|
+
wd=current_path,
|
|
2331
|
+
model=model,
|
|
2332
|
+
provider=provider,
|
|
2333
|
+
npc=npc_name,
|
|
2334
|
+
team=team,
|
|
2335
|
+
attachments=attachments_for_db,
|
|
2336
|
+
message_id=message_id,
|
|
2337
|
+
)
|
|
2338
|
+
|
|
2339
|
+
|
|
2340
|
+
message_id = generate_message_id()
|
|
2341
|
+
|
|
2342
|
+
def event_stream(current_stream_id):
|
|
2343
|
+
complete_response = []
|
|
2344
|
+
dot_count = 0
|
|
2345
|
+
interrupted = False
|
|
2346
|
+
tool_call_data = {"id": None, "function_name": None, "arguments": ""}
|
|
2347
|
+
|
|
2348
|
+
try:
|
|
2349
|
+
if isinstance(stream_response, str) :
|
|
2350
|
+
print('stream a str and not a gen')
|
|
2351
|
+
chunk_data = {
|
|
2352
|
+
"id": None,
|
|
2353
|
+
"object": None,
|
|
2354
|
+
"created": datetime.datetime.now().strftime('YYYY-DD-MM-HHMMSS'),
|
|
2355
|
+
"model": model,
|
|
2356
|
+
"choices": [
|
|
2357
|
+
{
|
|
2358
|
+
"index": 0,
|
|
2359
|
+
"delta":
|
|
2360
|
+
{
|
|
2361
|
+
"content": stream_response,
|
|
2362
|
+
"role": "assistant"
|
|
2363
|
+
},
|
|
2364
|
+
"finish_reason": 'done'
|
|
2365
|
+
}
|
|
2366
|
+
]
|
|
2367
|
+
}
|
|
2368
|
+
yield f"data: {json.dumps(chunk_data)}"
|
|
2369
|
+
return
|
|
2370
|
+
elif isinstance(stream_response, dict) and 'output' in stream_response and isinstance(stream_response.get('output'), str):
|
|
2371
|
+
print('stream a str and not a gen')
|
|
2372
|
+
chunk_data = {
|
|
2373
|
+
"id": None,
|
|
2374
|
+
"object": None,
|
|
2375
|
+
"created": datetime.datetime.now().strftime('YYYY-DD-MM-HHMMSS'),
|
|
2376
|
+
"model": model,
|
|
2377
|
+
"choices": [
|
|
2378
|
+
{
|
|
2379
|
+
"index": 0,
|
|
2380
|
+
"delta":
|
|
2381
|
+
{
|
|
2382
|
+
"content": stream_response.get('output') ,
|
|
2383
|
+
"role": "assistant"
|
|
2384
|
+
},
|
|
2385
|
+
"finish_reason": 'done'
|
|
2386
|
+
}
|
|
2387
|
+
]
|
|
2388
|
+
}
|
|
2389
|
+
yield f"data: {json.dumps(chunk_data)}"
|
|
2390
|
+
return
|
|
2391
|
+
for response_chunk in stream_response.get('response', stream_response.get('output')):
|
|
2392
|
+
with cancellation_lock:
|
|
2393
|
+
if cancellation_flags.get(current_stream_id, False):
|
|
2394
|
+
print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
|
|
2395
|
+
interrupted = True
|
|
2396
|
+
break
|
|
2397
|
+
|
|
2398
|
+
print('.', end="", flush=True)
|
|
2399
|
+
dot_count += 1
|
|
2400
|
+
if "hf.co" in model or provider == 'ollama' and 'gpt-oss' not in model:
|
|
2401
|
+
chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
|
|
2402
|
+
if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
|
|
2403
|
+
for tool_call in response_chunk["message"]["tool_calls"]:
|
|
2404
|
+
if "id" in tool_call:
|
|
2405
|
+
tool_call_data["id"] = tool_call["id"]
|
|
2406
|
+
if "function" in tool_call:
|
|
2407
|
+
if "name" in tool_call["function"]:
|
|
2408
|
+
tool_call_data["function_name"] = tool_call["function"]["name"]
|
|
2409
|
+
if "arguments" in tool_call["function"]:
|
|
2410
|
+
arg_val = tool_call["function"]["arguments"]
|
|
2411
|
+
if isinstance(arg_val, dict):
|
|
2412
|
+
arg_val = json.dumps(arg_val)
|
|
2413
|
+
tool_call_data["arguments"] += arg_val
|
|
2414
|
+
if chunk_content:
|
|
2415
|
+
complete_response.append(chunk_content)
|
|
2416
|
+
chunk_data = {
|
|
2417
|
+
"id": None, "object": None,
|
|
2418
|
+
"created": response_chunk["created_at"] or datetime.datetime.now(),
|
|
2419
|
+
"model": response_chunk["model"],
|
|
2420
|
+
"choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
|
|
2421
|
+
}
|
|
2422
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2423
|
+
else:
|
|
2424
|
+
chunk_content = ""
|
|
2425
|
+
reasoning_content = ""
|
|
2426
|
+
for choice in response_chunk.choices:
|
|
2427
|
+
if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
|
|
2428
|
+
for tool_call in choice.delta.tool_calls:
|
|
2429
|
+
if tool_call.id:
|
|
2430
|
+
tool_call_data["id"] = tool_call.id
|
|
2431
|
+
if tool_call.function:
|
|
2432
|
+
if hasattr(tool_call.function, "name") and tool_call.function.name:
|
|
2433
|
+
tool_call_data["function_name"] = tool_call.function.name
|
|
2434
|
+
if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
|
|
2435
|
+
tool_call_data["arguments"] += tool_call.function.arguments
|
|
2436
|
+
for choice in response_chunk.choices:
|
|
2437
|
+
if hasattr(choice.delta, "reasoning_content"):
|
|
2438
|
+
reasoning_content += choice.delta.reasoning_content
|
|
2439
|
+
chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
|
|
2440
|
+
if chunk_content:
|
|
2441
|
+
complete_response.append(chunk_content)
|
|
2442
|
+
chunk_data = {
|
|
2443
|
+
"id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
|
|
2444
|
+
"choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
|
|
2445
|
+
}
|
|
2446
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2447
|
+
|
|
2448
|
+
except Exception as e:
|
|
2449
|
+
print(f"\nAn exception occurred during streaming for {current_stream_id}: {e}")
|
|
2450
|
+
traceback.print_exc()
|
|
2451
|
+
interrupted = True
|
|
2452
|
+
|
|
2453
|
+
finally:
|
|
2454
|
+
print(f"\nStream {current_stream_id} finished. Interrupted: {interrupted}")
|
|
2455
|
+
print('\r' + ' ' * dot_count*2 + '\r', end="", flush=True)
|
|
2456
|
+
|
|
2457
|
+
final_response_text = ''.join(complete_response)
|
|
2458
|
+
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
2459
|
+
|
|
2460
|
+
npc_name_to_save = npc_object.name if npc_object else ''
|
|
2461
|
+
save_conversation_message(
|
|
2462
|
+
command_history,
|
|
2463
|
+
conversation_id,
|
|
2464
|
+
"assistant",
|
|
2465
|
+
final_response_text,
|
|
2466
|
+
wd=current_path,
|
|
2467
|
+
model=model,
|
|
2468
|
+
provider=provider,
|
|
2469
|
+
npc=npc_name_to_save,
|
|
2470
|
+
team=team,
|
|
2471
|
+
message_id=message_id,
|
|
2472
|
+
)
|
|
2473
|
+
|
|
2474
|
+
with cancellation_lock:
|
|
2475
|
+
if current_stream_id in cancellation_flags:
|
|
2476
|
+
del cancellation_flags[current_stream_id]
|
|
2477
|
+
print(f"Cleaned up cancellation flag for stream ID: {current_stream_id}")
|
|
2478
|
+
|
|
2479
|
+
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
2480
|
+
|
|
2481
|
+
|
|
2482
|
+
|
|
2483
|
+
@app.route("/api/memory/approve", methods=["POST"])
|
|
2484
|
+
def approve_memories():
|
|
2485
|
+
try:
|
|
2486
|
+
data = request.json
|
|
2487
|
+
approvals = data.get("approvals", [])
|
|
2488
|
+
|
|
2489
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
2490
|
+
|
|
2491
|
+
for approval in approvals:
|
|
2492
|
+
command_history.update_memory_status(
|
|
2493
|
+
approval['memory_id'],
|
|
2494
|
+
approval['decision'],
|
|
2495
|
+
approval.get('final_memory')
|
|
2496
|
+
)
|
|
2497
|
+
|
|
2498
|
+
return jsonify({"success": True, "processed": len(approvals)})
|
|
2499
|
+
|
|
2500
|
+
except Exception as e:
|
|
2501
|
+
return jsonify({"error": str(e)}), 500
|
|
2502
|
+
|
|
2503
|
+
|
|
2504
|
+
|
|
2505
|
+
|
|
2506
|
+
@app.route("/api/execute", methods=["POST"])
|
|
2507
|
+
def execute():
|
|
2508
|
+
data = request.json
|
|
2509
|
+
|
|
2510
|
+
|
|
2511
|
+
stream_id = data.get("streamId")
|
|
2512
|
+
if not stream_id:
|
|
2513
|
+
import uuid
|
|
2514
|
+
stream_id = str(uuid.uuid4())
|
|
2515
|
+
|
|
2516
|
+
|
|
2517
|
+
with cancellation_lock:
|
|
2518
|
+
cancellation_flags[stream_id] = False
|
|
2519
|
+
print(f"Starting execute stream with ID: {stream_id}")
|
|
2520
|
+
|
|
2521
|
+
|
|
2522
|
+
commandstr = data.get("commandstr")
|
|
2523
|
+
conversation_id = data.get("conversationId")
|
|
2524
|
+
model = data.get("model", 'llama3.2')
|
|
2525
|
+
provider = data.get("provider", 'ollama')
|
|
2526
|
+
if provider is None:
|
|
2527
|
+
provider = available_models.get(model)
|
|
2528
|
+
|
|
2529
|
+
|
|
2530
|
+
npc_name = data.get("npc", "sibiji")
|
|
2531
|
+
npc_source = data.get("npcSource", "global")
|
|
2532
|
+
team = data.get("team", None)
|
|
2533
|
+
current_path = data.get("currentPath")
|
|
2534
|
+
|
|
2535
|
+
if current_path:
|
|
2536
|
+
loaded_vars = load_project_env(current_path)
|
|
2537
|
+
print(f"Loaded project env variables for stream request: {list(loaded_vars.keys())}")
|
|
2538
|
+
|
|
2539
|
+
npc_object = None
|
|
2540
|
+
team_object = None
|
|
2541
|
+
|
|
2542
|
+
|
|
2543
|
+
if team:
|
|
2544
|
+
print(team)
|
|
2545
|
+
if hasattr(app, 'registered_teams') and team in app.registered_teams:
|
|
2546
|
+
team_object = app.registered_teams[team]
|
|
2547
|
+
print(f"Using registered team: {team}")
|
|
2548
|
+
else:
|
|
2549
|
+
print(f"Warning: Team {team} not found in registered teams")
|
|
2550
|
+
|
|
2551
|
+
|
|
2552
|
+
if npc_name:
|
|
2553
|
+
|
|
2554
|
+
if team and hasattr(app, 'registered_teams') and team in app.registered_teams:
|
|
2555
|
+
team_object = app.registered_teams[team]
|
|
2556
|
+
print('team', team_object)
|
|
2557
|
+
|
|
2558
|
+
if hasattr(team_object, 'npcs'):
|
|
2559
|
+
team_npcs = team_object.npcs
|
|
2560
|
+
if isinstance(team_npcs, dict):
|
|
2561
|
+
if npc_name in team_npcs:
|
|
2562
|
+
npc_object = team_npcs[npc_name]
|
|
2563
|
+
print(f"Found NPC {npc_name} in registered team {team}")
|
|
2564
|
+
elif isinstance(team_npcs, list):
|
|
2565
|
+
for npc in team_npcs:
|
|
2566
|
+
if hasattr(npc, 'name') and npc.name == npc_name:
|
|
2567
|
+
npc_object = npc
|
|
2568
|
+
print(f"Found NPC {npc_name} in registered team {team}")
|
|
2569
|
+
break
|
|
2570
|
+
|
|
2571
|
+
if not npc_object and hasattr(team_object, 'forenpc') and hasattr(team_object.forenpc, 'name'):
|
|
2572
|
+
if team_object.forenpc.name == npc_name:
|
|
2573
|
+
npc_object = team_object.forenpc
|
|
2574
|
+
print(f"Found NPC {npc_name} as forenpc in team {team}")
|
|
2575
|
+
|
|
2576
|
+
|
|
2577
|
+
if not npc_object and hasattr(app, 'registered_npcs') and npc_name in app.registered_npcs:
|
|
2578
|
+
npc_object = app.registered_npcs[npc_name]
|
|
2579
|
+
print(f"Found NPC {npc_name} in registered NPCs")
|
|
2580
|
+
|
|
2581
|
+
|
|
2582
|
+
if not npc_object:
|
|
2583
|
+
db_conn = get_db_connection()
|
|
2584
|
+
npc_object = load_npc_by_name_and_source(npc_name, npc_source, db_conn, current_path)
|
|
2585
|
+
|
|
2586
|
+
if not npc_object and npc_source == 'project':
|
|
2587
|
+
print(f"NPC {npc_name} not found in project directory, trying global...")
|
|
2588
|
+
npc_object = load_npc_by_name_and_source(npc_name, 'global', db_conn)
|
|
2589
|
+
|
|
2590
|
+
if npc_object:
|
|
2591
|
+
print(f"Successfully loaded NPC {npc_name} from {npc_source} directory")
|
|
2592
|
+
else:
|
|
2593
|
+
print(f"Warning: Could not load NPC {npc_name}")
|
|
2594
|
+
|
|
2595
|
+
attachments = data.get("attachments", [])
|
|
2596
|
+
command_history = CommandHistory(app.config.get('DB_PATH'))
|
|
2597
|
+
images = []
|
|
2598
|
+
attachments_loaded = []
|
|
2599
|
+
|
|
2600
|
+
|
|
2601
|
+
if attachments:
|
|
2602
|
+
for attachment in attachments:
|
|
2603
|
+
extension = attachment["name"].split(".")[-1]
|
|
2604
|
+
extension_mapped = extension_map.get(extension.upper(), "others")
|
|
2605
|
+
file_path = os.path.expanduser("~/.npcsh/" + extension_mapped + "/" + attachment["name"])
|
|
2606
|
+
if extension_mapped == "images":
|
|
2607
|
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
|
2608
|
+
img = Image.open(attachment["path"])
|
|
2609
|
+
img_byte_arr = BytesIO()
|
|
2610
|
+
img.save(img_byte_arr, format="PNG")
|
|
2611
|
+
img_byte_arr.seek(0)
|
|
2612
|
+
img.save(file_path, optimize=True, quality=50)
|
|
2613
|
+
images.append(file_path)
|
|
2614
|
+
attachments_loaded.append({
|
|
2615
|
+
"name": attachment["name"], "type": extension_mapped,
|
|
2616
|
+
"data": img_byte_arr.read(), "size": os.path.getsize(file_path)
|
|
2617
|
+
})
|
|
2618
|
+
|
|
2619
|
+
messages = fetch_messages_for_conversation(conversation_id)
|
|
2620
|
+
if len(messages) == 0 and npc_object is not None:
|
|
2621
|
+
messages = [{'role': 'system', 'content': npc_object.get_system_prompt()}]
|
|
2622
|
+
elif len(messages)>0 and messages[0]['role'] != 'system' and npc_object is not None:
|
|
2623
|
+
messages.insert(0, {'role': 'system', 'content': npc_object.get_system_prompt()})
|
|
2624
|
+
elif len(messages) > 0 and npc_object is not None:
|
|
2625
|
+
messages[0]['content'] = npc_object.get_system_prompt()
|
|
2626
|
+
if npc_object is not None and messages and messages[0]['role'] == 'system':
|
|
2627
|
+
messages[0]['content'] = npc_object.get_system_prompt()
|
|
2628
|
+
|
|
2629
|
+
message_id = generate_message_id()
|
|
2630
|
+
save_conversation_message(
|
|
2631
|
+
command_history, conversation_id, "user", commandstr,
|
|
2632
|
+
wd=current_path, model=model, provider=provider, npc=npc_name,
|
|
2633
|
+
team=team, attachments=attachments_loaded, message_id=message_id,
|
|
2634
|
+
)
|
|
2635
|
+
response_gen = check_llm_command(
|
|
2636
|
+
commandstr, messages=messages, images=images, model=model,
|
|
2637
|
+
provider=provider, npc=npc_object, team=team_object, stream=True
|
|
2638
|
+
)
|
|
2639
|
+
print(response_gen)
|
|
2640
|
+
|
|
2641
|
+
message_id = generate_message_id()
|
|
2642
|
+
|
|
2643
|
+
def event_stream(current_stream_id):
|
|
2644
|
+
complete_response = []
|
|
2645
|
+
dot_count = 0
|
|
2646
|
+
interrupted = False
|
|
2647
|
+
tool_call_data = {"id": None, "function_name": None, "arguments": ""}
|
|
2648
|
+
memory_data = None
|
|
2649
|
+
|
|
2650
|
+
try:
|
|
2651
|
+
for response_chunk in stream_response.get('response', stream_response.get('output')):
|
|
2652
|
+
with cancellation_lock:
|
|
2653
|
+
if cancellation_flags.get(current_stream_id, False):
|
|
2654
|
+
print(f"Cancellation flag triggered for {current_stream_id}. Breaking loop.")
|
|
2655
|
+
interrupted = True
|
|
2656
|
+
break
|
|
2657
|
+
|
|
2658
|
+
print('.', end="", flush=True)
|
|
2659
|
+
dot_count += 1
|
|
2660
|
+
|
|
2661
|
+
if "hf.co" in model or provider == 'ollama':
|
|
2662
|
+
chunk_content = response_chunk["message"]["content"] if "message" in response_chunk and "content" in response_chunk["message"] else ""
|
|
2663
|
+
if "message" in response_chunk and "tool_calls" in response_chunk["message"]:
|
|
2664
|
+
for tool_call in response_chunk["message"]["tool_calls"]:
|
|
2665
|
+
if "id" in tool_call:
|
|
2666
|
+
tool_call_data["id"] = tool_call["id"]
|
|
2667
|
+
if "function" in tool_call:
|
|
2668
|
+
if "name" in tool_call["function"]:
|
|
2669
|
+
tool_call_data["function_name"] = tool_call["function"]["name"]
|
|
2670
|
+
if "arguments" in tool_call["function"]:
|
|
2671
|
+
arg_val = tool_call["function"]["arguments"]
|
|
2672
|
+
if isinstance(arg_val, dict):
|
|
2673
|
+
arg_val = json.dumps(arg_val)
|
|
2674
|
+
tool_call_data["arguments"] += arg_val
|
|
2675
|
+
if chunk_content:
|
|
2676
|
+
complete_response.append(chunk_content)
|
|
2677
|
+
chunk_data = {
|
|
2678
|
+
"id": None, "object": None, "created": response_chunk["created_at"], "model": response_chunk["model"],
|
|
2679
|
+
"choices": [{"index": 0, "delta": {"content": chunk_content, "role": response_chunk["message"]["role"]}, "finish_reason": response_chunk.get("done_reason")}]
|
|
2680
|
+
}
|
|
2681
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2682
|
+
else:
|
|
2683
|
+
chunk_content = ""
|
|
2684
|
+
reasoning_content = ""
|
|
2685
|
+
for choice in response_chunk.choices:
|
|
2686
|
+
if hasattr(choice.delta, "tool_calls") and choice.delta.tool_calls:
|
|
2687
|
+
for tool_call in choice.delta.tool_calls:
|
|
2688
|
+
if tool_call.id:
|
|
2689
|
+
tool_call_data["id"] = tool_call.id
|
|
2690
|
+
if tool_call.function:
|
|
2691
|
+
if hasattr(tool_call.function, "name") and tool_call.function.name:
|
|
2692
|
+
tool_call_data["function_name"] = tool_call.function.name
|
|
2693
|
+
if hasattr(tool_call.function, "arguments") and tool_call.function.arguments:
|
|
2694
|
+
tool_call_data["arguments"] += tool_call.function.arguments
|
|
2695
|
+
for choice in response_chunk.choices:
|
|
2696
|
+
if hasattr(choice.delta, "reasoning_content"):
|
|
2697
|
+
reasoning_content += choice.delta.reasoning_content
|
|
2698
|
+
chunk_content = "".join(choice.delta.content for choice in response_chunk.choices if choice.delta.content is not None)
|
|
2699
|
+
if chunk_content:
|
|
2700
|
+
complete_response.append(chunk_content)
|
|
2701
|
+
chunk_data = {
|
|
2702
|
+
"id": response_chunk.id, "object": response_chunk.object, "created": response_chunk.created, "model": response_chunk.model,
|
|
2703
|
+
"choices": [{"index": choice.index, "delta": {"content": choice.delta.content, "role": choice.delta.role, "reasoning_content": reasoning_content if hasattr(choice.delta, "reasoning_content") else None}, "finish_reason": choice.finish_reason} for choice in response_chunk.choices]
|
|
2704
|
+
}
|
|
2705
|
+
yield f"data: {json.dumps(chunk_data)}\n\n"
|
|
2706
|
+
|
|
2707
|
+
except Exception as e:
|
|
2708
|
+
print(f"\nAn exception occurred during streaming for {current_stream_id}: {e}")
|
|
2709
|
+
traceback.print_exc()
|
|
2710
|
+
interrupted = True
|
|
2711
|
+
|
|
2712
|
+
finally:
|
|
2713
|
+
print(f"\nStream {current_stream_id} finished. Interrupted: {interrupted}")
|
|
2714
|
+
print('\r' + ' ' * dot_count*2 + '\r', end="", flush=True)
|
|
2715
|
+
|
|
2716
|
+
final_response_text = ''.join(complete_response)
|
|
2717
|
+
|
|
2718
|
+
conversation_turn_text = f"User: {commandstr}\nAssistant: {final_response_text}"
|
|
2719
|
+
|
|
2720
|
+
try:
|
|
2721
|
+
memory_examples = command_history.get_memory_examples_for_context(
|
|
2722
|
+
npc=npc_name,
|
|
2723
|
+
team=team,
|
|
2724
|
+
directory_path=current_path
|
|
2725
|
+
)
|
|
2726
|
+
|
|
2727
|
+
memory_context = format_memory_context(memory_examples)
|
|
2728
|
+
|
|
2729
|
+
facts = get_facts(
|
|
2730
|
+
conversation_turn_text,
|
|
2731
|
+
model=npc_object.model if npc_object else model,
|
|
2732
|
+
provider=npc_object.provider if npc_object else provider,
|
|
2733
|
+
npc=npc_object,
|
|
2734
|
+
context=memory_context
|
|
2735
|
+
)
|
|
2736
|
+
|
|
2737
|
+
if facts:
|
|
2738
|
+
memories_for_approval = []
|
|
2739
|
+
for i, fact in enumerate(facts):
|
|
2740
|
+
memory_id = command_history.add_memory_to_database(
|
|
2741
|
+
message_id=f"{conversation_id}_{datetime.now().strftime('%H%M%S')}_{i}",
|
|
2742
|
+
conversation_id=conversation_id,
|
|
2743
|
+
npc=npc_name or "default",
|
|
2744
|
+
team=team or "default",
|
|
2745
|
+
directory_path=current_path or "/",
|
|
2746
|
+
initial_memory=fact['statement'],
|
|
2747
|
+
status="pending_approval",
|
|
2748
|
+
model=npc_object.model if npc_object else model,
|
|
2749
|
+
provider=npc_object.provider if npc_object else provider
|
|
2750
|
+
)
|
|
2751
|
+
|
|
2752
|
+
memories_for_approval.append({
|
|
2753
|
+
"memory_id": memory_id,
|
|
2754
|
+
"content": fact['statement'],
|
|
2755
|
+
"context": f"Type: {fact.get('type', 'unknown')}, Source: {fact.get('source_text', '')}",
|
|
2756
|
+
"npc": npc_name or "default"
|
|
2757
|
+
})
|
|
2758
|
+
|
|
2759
|
+
memory_data = {
|
|
2760
|
+
"type": "memory_approval",
|
|
2761
|
+
"memories": memories_for_approval,
|
|
2762
|
+
"conversation_id": conversation_id
|
|
2763
|
+
}
|
|
2764
|
+
|
|
2765
|
+
except Exception as e:
|
|
2766
|
+
print(f"Memory generation error: {e}")
|
|
2767
|
+
|
|
2768
|
+
if memory_data:
|
|
2769
|
+
yield f"data: {json.dumps(memory_data)}\n\n"
|
|
2770
|
+
|
|
2771
|
+
yield f"data: {json.dumps({'type': 'message_stop'})}\n\n"
|
|
2772
|
+
|
|
2773
|
+
npc_name_to_save = npc_object.name if npc_object else ''
|
|
2774
|
+
save_conversation_message(
|
|
2775
|
+
command_history,
|
|
2776
|
+
conversation_id,
|
|
2777
|
+
"assistant",
|
|
2778
|
+
final_response_text,
|
|
2779
|
+
wd=current_path,
|
|
2780
|
+
model=model,
|
|
2781
|
+
provider=provider,
|
|
2782
|
+
npc=npc_name_to_save,
|
|
2783
|
+
team=team,
|
|
2784
|
+
message_id=message_id,
|
|
2785
|
+
)
|
|
2786
|
+
|
|
2787
|
+
with cancellation_lock:
|
|
2788
|
+
if current_stream_id in cancellation_flags:
|
|
2789
|
+
del cancellation_flags[current_stream_id]
|
|
2790
|
+
print(f"Cleaned up cancellation flag for stream ID: {current_stream_id}")
|
|
2791
|
+
|
|
2792
|
+
|
|
2793
|
+
|
|
2794
|
+
return Response(event_stream(stream_id), mimetype="text/event-stream")
|
|
2795
|
+
|
|
2796
|
+
@app.route("/api/interrupt", methods=["POST"])
|
|
2797
|
+
def interrupt_stream():
|
|
2798
|
+
data = request.json
|
|
2799
|
+
stream_id_to_cancel = data.get("streamId")
|
|
2800
|
+
|
|
2801
|
+
if not stream_id_to_cancel:
|
|
2802
|
+
return jsonify({"error": "streamId is required"}), 400
|
|
2803
|
+
|
|
2804
|
+
with cancellation_lock:
|
|
2805
|
+
print(f"Received interruption request for stream ID: {stream_id_to_cancel}")
|
|
2806
|
+
cancellation_flags[stream_id_to_cancel] = True
|
|
2807
|
+
|
|
2808
|
+
return jsonify({"success": True, "message": f"Interruption for stream {stream_id_to_cancel} registered."})
|
|
2809
|
+
|
|
2810
|
+
|
|
2811
|
+
|
|
2812
|
+
@app.route("/api/conversations", methods=["GET"])
|
|
2813
|
+
def get_conversations():
|
|
2814
|
+
try:
|
|
2815
|
+
path = request.args.get("path")
|
|
2816
|
+
|
|
2817
|
+
if not path:
|
|
2818
|
+
return jsonify({"error": "No path provided", "conversations": []}), 400
|
|
2819
|
+
|
|
2820
|
+
engine = get_db_connection()
|
|
2821
|
+
try:
|
|
2822
|
+
with engine.connect() as conn:
|
|
2823
|
+
query = text("""
|
|
2824
|
+
SELECT DISTINCT conversation_id,
|
|
2825
|
+
MIN(timestamp) as start_time,
|
|
2826
|
+
MAX(timestamp) as last_message_timestamp,
|
|
2827
|
+
GROUP_CONCAT(content) as preview
|
|
2828
|
+
FROM conversation_history
|
|
2829
|
+
WHERE directory_path = :path_without_slash OR directory_path = :path_with_slash
|
|
2830
|
+
GROUP BY conversation_id
|
|
2831
|
+
ORDER BY MAX(timestamp) DESC
|
|
2832
|
+
""")
|
|
2833
|
+
|
|
2834
|
+
|
|
2835
|
+
path_without_slash = path.rstrip('/')
|
|
2836
|
+
path_with_slash = path_without_slash + '/'
|
|
2837
|
+
|
|
2838
|
+
result = conn.execute(query, {
|
|
2839
|
+
"path_without_slash": path_without_slash,
|
|
2840
|
+
"path_with_slash": path_with_slash
|
|
2841
|
+
})
|
|
2842
|
+
conversations = result.fetchall()
|
|
2843
|
+
|
|
2844
|
+
return jsonify(
|
|
2845
|
+
{
|
|
2846
|
+
"conversations": [
|
|
2847
|
+
{
|
|
2848
|
+
"id": conv[0],
|
|
2849
|
+
"timestamp": conv[1],
|
|
2850
|
+
"last_message_timestamp": conv[2],
|
|
2851
|
+
"preview": (
|
|
2852
|
+
conv[3][:100] + "..."
|
|
2853
|
+
if conv[3] and len(conv[3]) > 100
|
|
2854
|
+
else conv[3]
|
|
2855
|
+
),
|
|
2856
|
+
}
|
|
2857
|
+
for conv in conversations
|
|
2858
|
+
],
|
|
2859
|
+
"error": None,
|
|
2860
|
+
}
|
|
2861
|
+
)
|
|
2862
|
+
finally:
|
|
2863
|
+
engine.dispose()
|
|
2864
|
+
|
|
2865
|
+
except Exception as e:
|
|
2866
|
+
print(f"Error getting conversations: {str(e)}")
|
|
2867
|
+
return jsonify({"error": str(e), "conversations": []}), 500
|
|
2868
|
+
|
|
2869
|
+
|
|
2870
|
+
|
|
2871
|
+
@app.route("/api/conversation/<conversation_id>/messages", methods=["GET"])
|
|
2872
|
+
def get_conversation_messages(conversation_id):
|
|
2873
|
+
try:
|
|
2874
|
+
engine = get_db_connection()
|
|
2875
|
+
with engine.connect() as conn:
|
|
2876
|
+
|
|
2877
|
+
query = text("""
|
|
2878
|
+
WITH ranked_messages AS (
|
|
2879
|
+
SELECT
|
|
2880
|
+
ch.*,
|
|
2881
|
+
GROUP_CONCAT(ma.id) as attachment_ids,
|
|
2882
|
+
ROW_NUMBER() OVER (
|
|
2883
|
+
PARTITION BY ch.role, strftime('%s', ch.timestamp)
|
|
2884
|
+
ORDER BY ch.id DESC
|
|
2885
|
+
) as rn
|
|
2886
|
+
FROM conversation_history ch
|
|
2887
|
+
LEFT JOIN message_attachments ma
|
|
2888
|
+
ON ch.message_id = ma.message_id
|
|
2889
|
+
WHERE ch.conversation_id = :conversation_id
|
|
2890
|
+
GROUP BY ch.id, ch.timestamp
|
|
2891
|
+
)
|
|
2892
|
+
SELECT *
|
|
2893
|
+
FROM ranked_messages
|
|
2894
|
+
WHERE rn = 1
|
|
2895
|
+
ORDER BY timestamp ASC, id ASC
|
|
2896
|
+
""")
|
|
2897
|
+
|
|
2898
|
+
result = conn.execute(query, {"conversation_id": conversation_id})
|
|
2899
|
+
messages = result.fetchall()
|
|
2900
|
+
|
|
2901
|
+
return jsonify(
|
|
2902
|
+
{
|
|
2903
|
+
"messages": [
|
|
2904
|
+
{
|
|
2905
|
+
"message_id": msg[1] if len(msg) > 1 else None,
|
|
2906
|
+
"role": msg[3] if len(msg) > 3 else None,
|
|
2907
|
+
"content": msg[4] if len(msg) > 4 else None,
|
|
2908
|
+
"timestamp": msg[5] if len(msg) > 5 else None,
|
|
2909
|
+
"model": msg[6] if len(msg) > 6 else None,
|
|
2910
|
+
"provider": msg[7] if len(msg) > 7 else None,
|
|
2911
|
+
"npc": msg[8] if len(msg) > 8 else None,
|
|
2912
|
+
"attachments": (
|
|
2913
|
+
get_message_attachments(msg[1])
|
|
2914
|
+
if len(msg) > 1 and msg[-1]
|
|
2915
|
+
else []
|
|
2916
|
+
),
|
|
2917
|
+
}
|
|
2918
|
+
for msg in messages
|
|
2919
|
+
],
|
|
2920
|
+
"error": None,
|
|
2921
|
+
}
|
|
2922
|
+
)
|
|
2923
|
+
|
|
2924
|
+
except Exception as e:
|
|
2925
|
+
print(f"Error getting conversation messages: {str(e)}")
|
|
2926
|
+
return jsonify({"error": str(e), "messages": []}), 500
|
|
2927
|
+
|
|
2928
|
+
|
|
2929
|
+
|
|
2930
|
+
@app.after_request
|
|
2931
|
+
def after_request(response):
|
|
2932
|
+
response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization")
|
|
2933
|
+
response.headers.add("Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS")
|
|
2934
|
+
response.headers.add("Access-Control-Allow-Credentials", "true")
|
|
2935
|
+
return response
|
|
2936
|
+
|
|
2937
|
+
|
|
2938
|
+
|
|
2939
|
+
@app.route('/api/ollama/status', methods=['GET'])
|
|
2940
|
+
def ollama_status():
|
|
2941
|
+
try:
|
|
2942
|
+
|
|
2943
|
+
|
|
2944
|
+
ollama.list()
|
|
2945
|
+
return jsonify({"status": "running"})
|
|
2946
|
+
except ollama.RequestError as e:
|
|
2947
|
+
|
|
2948
|
+
print(f"Ollama status check failed: {e}")
|
|
2949
|
+
return jsonify({"status": "not_found"})
|
|
2950
|
+
except Exception as e:
|
|
2951
|
+
print(f"An unexpected error occurred during Ollama status check: {e}")
|
|
2952
|
+
return jsonify({"status": "not_found"})
|
|
2953
|
+
|
|
2954
|
+
|
|
2955
|
+
@app.route('/api/ollama/models', methods=['GET'])
|
|
2956
|
+
def get_ollama_models():
|
|
2957
|
+
response = ollama.list()
|
|
2958
|
+
models_list = []
|
|
2959
|
+
|
|
2960
|
+
|
|
2961
|
+
for model_obj in response['models']:
|
|
2962
|
+
models_list.append({
|
|
2963
|
+
"name": model_obj.model,
|
|
2964
|
+
"size": model_obj.details.parameter_size,
|
|
2965
|
+
|
|
2966
|
+
})
|
|
2967
|
+
|
|
2968
|
+
return jsonify(models_list)
|
|
2969
|
+
|
|
2970
|
+
|
|
2971
|
+
|
|
2972
|
+
@app.route('/api/ollama/delete', methods=['POST'])
|
|
2973
|
+
def delete_ollama_model():
|
|
2974
|
+
data = request.get_json()
|
|
2975
|
+
model_name = data.get('name')
|
|
2976
|
+
if not model_name:
|
|
2977
|
+
return jsonify({"error": "Model name is required"}), 400
|
|
2978
|
+
try:
|
|
2979
|
+
ollama.delete(model_name)
|
|
2980
|
+
return jsonify({"success": True, "message": f"Model {model_name} deleted."})
|
|
2981
|
+
except ollama.ResponseError as e:
|
|
2982
|
+
|
|
2983
|
+
return jsonify({"error": e.error}), e.status_code
|
|
2984
|
+
except Exception as e:
|
|
2985
|
+
return jsonify({"error": str(e)}), 500
|
|
2986
|
+
|
|
2987
|
+
|
|
2988
|
+
@app.route('/api/ollama/pull', methods=['POST'])
|
|
2989
|
+
def pull_ollama_model():
|
|
2990
|
+
data = request.get_json()
|
|
2991
|
+
model_name = data.get('name')
|
|
2992
|
+
if not model_name:
|
|
2993
|
+
return jsonify({"error": "Model name is required"}), 400
|
|
2994
|
+
|
|
2995
|
+
def generate_progress():
|
|
2996
|
+
try:
|
|
2997
|
+
stream = ollama.pull(model_name, stream=True)
|
|
2998
|
+
for progress_obj in stream:
|
|
2999
|
+
|
|
3000
|
+
|
|
3001
|
+
yield json.dumps({
|
|
3002
|
+
'status': getattr(progress_obj, 'status', None),
|
|
3003
|
+
'digest': getattr(progress_obj, 'digest', None),
|
|
3004
|
+
'total': getattr(progress_obj, 'total', None),
|
|
3005
|
+
'completed': getattr(progress_obj, 'completed', None)
|
|
3006
|
+
}) + '\n'
|
|
3007
|
+
except ollama.ResponseError as e:
|
|
3008
|
+
error_message = {"status": "Error", "details": e.error}
|
|
3009
|
+
yield json.dumps(error_message) + '\n'
|
|
3010
|
+
except Exception as e:
|
|
3011
|
+
error_message = {"status": "Error", "details": str(e)}
|
|
3012
|
+
yield json.dumps(error_message) + '\n'
|
|
3013
|
+
|
|
3014
|
+
return Response(generate_progress(), content_type='application/x-ndjson')
|
|
3015
|
+
@app.route('/api/ollama/install', methods=['POST'])
|
|
3016
|
+
def install_ollama():
|
|
3017
|
+
try:
|
|
3018
|
+
install_command = "curl -fsSL https://ollama.com/install.sh | sh"
|
|
3019
|
+
result = subprocess.run(install_command, shell=True, check=True, capture_output=True, text=True)
|
|
3020
|
+
return jsonify({"success": True, "output": result.stdout})
|
|
3021
|
+
except Exception as e:
|
|
3022
|
+
return jsonify({"error": str(e)}), 500
|
|
3023
|
+
|
|
3024
|
+
extension_map = {
|
|
3025
|
+
"PNG": "images",
|
|
3026
|
+
"JPG": "images",
|
|
3027
|
+
"JPEG": "images",
|
|
3028
|
+
"GIF": "images",
|
|
3029
|
+
"SVG": "images",
|
|
3030
|
+
"MP4": "videos",
|
|
3031
|
+
"AVI": "videos",
|
|
3032
|
+
"MOV": "videos",
|
|
3033
|
+
"WMV": "videos",
|
|
3034
|
+
"MPG": "videos",
|
|
3035
|
+
"MPEG": "videos",
|
|
3036
|
+
"DOC": "documents",
|
|
3037
|
+
"DOCX": "documents",
|
|
3038
|
+
"PDF": "documents",
|
|
3039
|
+
"PPT": "documents",
|
|
3040
|
+
"PPTX": "documents",
|
|
3041
|
+
"XLS": "documents",
|
|
3042
|
+
"XLSX": "documents",
|
|
3043
|
+
"TXT": "documents",
|
|
3044
|
+
"CSV": "documents",
|
|
3045
|
+
"ZIP": "archives",
|
|
3046
|
+
"RAR": "archives",
|
|
3047
|
+
"7Z": "archives",
|
|
3048
|
+
"TAR": "archives",
|
|
3049
|
+
"GZ": "archives",
|
|
3050
|
+
"BZ2": "archives",
|
|
3051
|
+
"ISO": "archives",
|
|
3052
|
+
}
|
|
3053
|
+
|
|
3054
|
+
|
|
3055
|
+
|
|
3056
|
+
|
|
3057
|
+
|
|
3058
|
+
@app.route("/api/health", methods=["GET"])
|
|
3059
|
+
def health_check():
|
|
3060
|
+
return jsonify({"status": "ok", "error": None})
|
|
3061
|
+
|
|
3062
|
+
|
|
3063
|
+
def start_flask_server(
|
|
3064
|
+
port=5337,
|
|
3065
|
+
cors_origins=None,
|
|
3066
|
+
static_files=None,
|
|
3067
|
+
debug=False,
|
|
3068
|
+
teams=None,
|
|
3069
|
+
npcs=None,
|
|
3070
|
+
db_path: str ='',
|
|
3071
|
+
user_npc_directory = None
|
|
3072
|
+
):
|
|
3073
|
+
try:
|
|
3074
|
+
|
|
3075
|
+
if teams:
|
|
3076
|
+
app.registered_teams = teams
|
|
3077
|
+
print(f"Registered {len(teams)} teams: {list(teams.keys())}")
|
|
3078
|
+
else:
|
|
3079
|
+
app.registered_teams = {}
|
|
3080
|
+
|
|
3081
|
+
if npcs:
|
|
3082
|
+
app.registered_npcs = npcs
|
|
3083
|
+
print(f"Registered {len(npcs)} NPCs: {list(npcs.keys())}")
|
|
3084
|
+
else:
|
|
3085
|
+
app.registered_npcs = {}
|
|
3086
|
+
|
|
3087
|
+
app.config['DB_PATH'] = db_path
|
|
3088
|
+
app.config['user_npc_directory'] = user_npc_directory
|
|
3089
|
+
|
|
3090
|
+
command_history = CommandHistory(db_path)
|
|
3091
|
+
app.command_history = command_history
|
|
3092
|
+
|
|
3093
|
+
|
|
3094
|
+
if cors_origins:
|
|
3095
|
+
|
|
3096
|
+
CORS(
|
|
3097
|
+
app,
|
|
3098
|
+
origins=cors_origins,
|
|
3099
|
+
allow_headers=["Content-Type", "Authorization"],
|
|
3100
|
+
methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
|
3101
|
+
supports_credentials=True,
|
|
3102
|
+
|
|
3103
|
+
)
|
|
3104
|
+
|
|
3105
|
+
|
|
3106
|
+
print(f"Starting Flask server on http://0.0.0.0:{port}")
|
|
3107
|
+
app.run(host="0.0.0.0", port=port, debug=debug, threaded=True)
|
|
3108
|
+
except Exception as e:
|
|
3109
|
+
print(f"Error starting server: {str(e)}")
|
|
3110
|
+
|
|
3111
|
+
|
|
3112
|
+
if __name__ == "__main__":
|
|
3113
|
+
|
|
3114
|
+
SETTINGS_FILE = Path(os.path.expanduser("~/.npcshrc"))
|
|
3115
|
+
|
|
3116
|
+
|
|
3117
|
+
db_path = os.path.expanduser("~/npcsh_history.db")
|
|
3118
|
+
user_npc_directory = os.path.expanduser("~/.npcsh/npc_team")
|
|
3119
|
+
|
|
3120
|
+
start_flask_server(db_path=db_path, user_npc_directory=user_npc_directory)
|