npcpy 1.3.8__py3-none-any.whl → 1.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/gen/response.py +28 -2
- npcpy/npc_compiler.py +9 -5
- npcpy/npc_sysenv.py +24 -0
- npcpy/serve.py +83 -18
- {npcpy-1.3.8.dist-info → npcpy-1.3.9.dist-info}/METADATA +1 -1
- {npcpy-1.3.8.dist-info → npcpy-1.3.9.dist-info}/RECORD +9 -9
- {npcpy-1.3.8.dist-info → npcpy-1.3.9.dist-info}/WHEEL +0 -0
- {npcpy-1.3.8.dist-info → npcpy-1.3.9.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.3.8.dist-info → npcpy-1.3.9.dist-info}/top_level.txt +0 -0
npcpy/gen/response.py
CHANGED
|
@@ -745,6 +745,23 @@ def get_litellm_response(
|
|
|
745
745
|
auto_process_tool_calls=auto_process_tool_calls,
|
|
746
746
|
**kwargs
|
|
747
747
|
)
|
|
748
|
+
elif provider == 'lmstudio' or (model and '.lmstudio' in str(model)):
|
|
749
|
+
# LM Studio uses OpenAI-compatible API on port 1234
|
|
750
|
+
# Also detect models with .lmstudio in path (e.g., /home/user/.lmstudio/models/...)
|
|
751
|
+
api_url = api_url or "http://127.0.0.1:1234/v1"
|
|
752
|
+
provider = "openai"
|
|
753
|
+
api_key = api_key or "lm-studio" # LM Studio doesn't require real API key
|
|
754
|
+
# Default timeout for local CPU inference (can be overridden via kwargs)
|
|
755
|
+
if 'timeout' not in kwargs:
|
|
756
|
+
kwargs['timeout'] = 300 # 5 minutes for CPU inference with large prompts
|
|
757
|
+
elif provider == 'llamacpp-server':
|
|
758
|
+
# llama.cpp server uses OpenAI-compatible API on port 8080
|
|
759
|
+
api_url = api_url or "http://127.0.0.1:8080/v1"
|
|
760
|
+
provider = "openai"
|
|
761
|
+
api_key = api_key or "llamacpp" # llama.cpp server doesn't require real API key
|
|
762
|
+
# Default timeout for local CPU inference (can be overridden via kwargs)
|
|
763
|
+
if 'timeout' not in kwargs:
|
|
764
|
+
kwargs['timeout'] = 300 # 5 minutes for CPU inference with large prompts
|
|
748
765
|
|
|
749
766
|
if attachments:
|
|
750
767
|
for attachment in attachments:
|
|
@@ -875,7 +892,16 @@ def get_litellm_response(
|
|
|
875
892
|
if provider is None:
|
|
876
893
|
provider = os.environ.get("NPCSH_CHAT_PROVIDER")
|
|
877
894
|
|
|
878
|
-
|
|
895
|
+
# For OpenAI-compatible endpoints with api_base, always prefix with provider
|
|
896
|
+
# LiteLLM needs this to know how to route the request
|
|
897
|
+
# Also handle file paths (starting with /) which contain slashes but still need prefix
|
|
898
|
+
if "api_base" in api_params and provider == "openai":
|
|
899
|
+
api_params["model"] = f"openai/{model}"
|
|
900
|
+
elif "/" not in model or model.startswith("/"):
|
|
901
|
+
# No provider prefix in model, or model is a file path - add provider prefix
|
|
902
|
+
api_params["model"] = f"{provider}/{model}"
|
|
903
|
+
else:
|
|
904
|
+
api_params["model"] = model
|
|
879
905
|
if api_key is not None:
|
|
880
906
|
api_params["api_key"] = api_key
|
|
881
907
|
if tools:
|
|
@@ -888,7 +914,7 @@ def get_litellm_response(
|
|
|
888
914
|
if key in [
|
|
889
915
|
"stop", "temperature", "top_p", "max_tokens", "max_completion_tokens",
|
|
890
916
|
"extra_headers", "parallel_tool_calls",
|
|
891
|
-
"response_format", "user",
|
|
917
|
+
"response_format", "user", "timeout",
|
|
892
918
|
]:
|
|
893
919
|
api_params[key] = value
|
|
894
920
|
|
npcpy/npc_compiler.py
CHANGED
|
@@ -185,13 +185,17 @@ def initialize_npc_project(
|
|
|
185
185
|
directory = os.getcwd()
|
|
186
186
|
directory = os.path.expanduser(os.fspath(directory))
|
|
187
187
|
|
|
188
|
+
# Create top-level directories for assets
|
|
189
|
+
for subdir in ["images", "models", "attachments", "mcp_servers"]:
|
|
190
|
+
os.makedirs(os.path.join(directory, subdir), exist_ok=True)
|
|
191
|
+
|
|
188
192
|
npc_team_dir = os.path.join(directory, "npc_team")
|
|
189
193
|
os.makedirs(npc_team_dir, exist_ok=True)
|
|
190
|
-
|
|
191
|
-
for subdir in ["jinxs",
|
|
192
|
-
"assembly_lines",
|
|
193
|
-
"sql_models",
|
|
194
|
-
"jobs",
|
|
194
|
+
|
|
195
|
+
for subdir in ["jinxs",
|
|
196
|
+
"assembly_lines",
|
|
197
|
+
"sql_models",
|
|
198
|
+
"jobs",
|
|
195
199
|
"triggers",
|
|
196
200
|
"tools"]:
|
|
197
201
|
os.makedirs(os.path.join(npc_team_dir, subdir), exist_ok=True)
|
npcpy/npc_sysenv.py
CHANGED
|
@@ -334,6 +334,30 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
334
334
|
except Exception as e:
|
|
335
335
|
logging.info(f"Error scanning GGUF directory {scan_dir}: {e}")
|
|
336
336
|
|
|
337
|
+
# Check for LM Studio server (OpenAI-compatible API on port 1234)
|
|
338
|
+
try:
|
|
339
|
+
import requests
|
|
340
|
+
response = requests.get('http://127.0.0.1:1234/v1/models', timeout=1)
|
|
341
|
+
if response.ok:
|
|
342
|
+
data = response.json()
|
|
343
|
+
for model in data.get('data', []):
|
|
344
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
345
|
+
available_models[model_id] = "lmstudio"
|
|
346
|
+
except Exception as e:
|
|
347
|
+
logging.debug(f"LM Studio not available: {e}")
|
|
348
|
+
|
|
349
|
+
# Check for llama.cpp server (OpenAI-compatible API on port 8080)
|
|
350
|
+
try:
|
|
351
|
+
import requests
|
|
352
|
+
response = requests.get('http://127.0.0.1:8080/v1/models', timeout=1)
|
|
353
|
+
if response.ok:
|
|
354
|
+
data = response.json()
|
|
355
|
+
for model in data.get('data', []):
|
|
356
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
357
|
+
available_models[model_id] = "llamacpp-server"
|
|
358
|
+
except Exception as e:
|
|
359
|
+
logging.debug(f"llama.cpp server not available: {e}")
|
|
360
|
+
|
|
337
361
|
return available_models
|
|
338
362
|
|
|
339
363
|
|
npcpy/serve.py
CHANGED
|
@@ -737,9 +737,10 @@ def get_global_settings():
|
|
|
737
737
|
"embedding_provider": "ollama",
|
|
738
738
|
"search_provider": "perplexity",
|
|
739
739
|
"default_folder": os.path.expanduser("~/.npcsh/"),
|
|
740
|
-
"is_predictive_text_enabled": False,
|
|
741
|
-
"predictive_text_model": "llama3.2",
|
|
742
|
-
"predictive_text_provider": "ollama",
|
|
740
|
+
"is_predictive_text_enabled": False,
|
|
741
|
+
"predictive_text_model": "llama3.2",
|
|
742
|
+
"predictive_text_provider": "ollama",
|
|
743
|
+
"backend_python_path": "", # Empty means use bundled backend
|
|
743
744
|
}
|
|
744
745
|
global_vars = {}
|
|
745
746
|
|
|
@@ -772,9 +773,10 @@ def get_global_settings():
|
|
|
772
773
|
"NPCSH_SEARCH_PROVIDER": "search_provider",
|
|
773
774
|
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
774
775
|
"NPC_STUDIO_DEFAULT_FOLDER": "default_folder",
|
|
775
|
-
"NPC_STUDIO_PREDICTIVE_TEXT_ENABLED": "is_predictive_text_enabled",
|
|
776
|
-
"NPC_STUDIO_PREDICTIVE_TEXT_MODEL": "predictive_text_model",
|
|
777
|
-
"NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER": "predictive_text_provider",
|
|
776
|
+
"NPC_STUDIO_PREDICTIVE_TEXT_ENABLED": "is_predictive_text_enabled",
|
|
777
|
+
"NPC_STUDIO_PREDICTIVE_TEXT_MODEL": "predictive_text_model",
|
|
778
|
+
"NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER": "predictive_text_provider",
|
|
779
|
+
"BACKEND_PYTHON_PATH": "backend_python_path", # Custom Python for backend
|
|
778
780
|
}
|
|
779
781
|
|
|
780
782
|
if key in key_mapping:
|
|
@@ -1067,9 +1069,10 @@ def save_global_settings():
|
|
|
1067
1069
|
"search_provider": "NPCSH_SEARCH_PROVIDER",
|
|
1068
1070
|
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
1069
1071
|
"default_folder": "NPC_STUDIO_DEFAULT_FOLDER",
|
|
1070
|
-
"is_predictive_text_enabled": "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED",
|
|
1071
|
-
"predictive_text_model": "NPC_STUDIO_PREDICTIVE_TEXT_MODEL",
|
|
1072
|
-
"predictive_text_provider": "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER",
|
|
1072
|
+
"is_predictive_text_enabled": "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED",
|
|
1073
|
+
"predictive_text_model": "NPC_STUDIO_PREDICTIVE_TEXT_MODEL",
|
|
1074
|
+
"predictive_text_provider": "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER",
|
|
1075
|
+
"backend_python_path": "BACKEND_PYTHON_PATH", # Custom Python for backend (requires restart)
|
|
1073
1076
|
}
|
|
1074
1077
|
|
|
1075
1078
|
os.makedirs(os.path.dirname(npcshrc_path), exist_ok=True)
|
|
@@ -2349,6 +2352,43 @@ def init_project_team():
|
|
|
2349
2352
|
print(f"Error initializing project team: {e}")
|
|
2350
2353
|
return jsonify({"error": str(e)}), 500
|
|
2351
2354
|
|
|
2355
|
+
@app.route("/api/npcsh/check", methods=["GET"])
|
|
2356
|
+
def check_npcsh_folder():
|
|
2357
|
+
"""Check if ~/.npcsh folder exists and has a valid npc_team."""
|
|
2358
|
+
try:
|
|
2359
|
+
npcsh_path = os.path.expanduser("~/.npcsh")
|
|
2360
|
+
npc_team_path = os.path.join(npcsh_path, "npc_team")
|
|
2361
|
+
|
|
2362
|
+
exists = os.path.exists(npcsh_path)
|
|
2363
|
+
has_npc_team = os.path.exists(npc_team_path)
|
|
2364
|
+
has_forenpc = os.path.exists(os.path.join(npc_team_path, "forenpc.npc")) if has_npc_team else False
|
|
2365
|
+
|
|
2366
|
+
return jsonify({
|
|
2367
|
+
"exists": exists,
|
|
2368
|
+
"has_npc_team": has_npc_team,
|
|
2369
|
+
"has_forenpc": has_forenpc,
|
|
2370
|
+
"path": npcsh_path,
|
|
2371
|
+
"error": None
|
|
2372
|
+
})
|
|
2373
|
+
except Exception as e:
|
|
2374
|
+
print(f"Error checking npcsh folder: {e}")
|
|
2375
|
+
return jsonify({"error": str(e)}), 500
|
|
2376
|
+
|
|
2377
|
+
@app.route("/api/npcsh/init", methods=["POST"])
|
|
2378
|
+
def init_npcsh_folder():
|
|
2379
|
+
"""Initialize the ~/.npcsh folder with a default npc_team."""
|
|
2380
|
+
try:
|
|
2381
|
+
npcsh_path = os.path.expanduser("~/.npcsh")
|
|
2382
|
+
result = initialize_npc_project(directory=npcsh_path)
|
|
2383
|
+
return jsonify({
|
|
2384
|
+
"message": result,
|
|
2385
|
+
"path": npcsh_path,
|
|
2386
|
+
"error": None
|
|
2387
|
+
})
|
|
2388
|
+
except Exception as e:
|
|
2389
|
+
print(f"Error initializing npcsh folder: {e}")
|
|
2390
|
+
return jsonify({"error": str(e)}), 500
|
|
2391
|
+
|
|
2352
2392
|
@app.route("/api/context/websites", methods=["GET"])
|
|
2353
2393
|
def get_context_websites():
|
|
2354
2394
|
"""Gets the websites list from a .ctx file."""
|
|
@@ -2537,13 +2577,15 @@ def get_attachment_response():
|
|
|
2537
2577
|
|
|
2538
2578
|
IMAGE_MODELS = {
|
|
2539
2579
|
"openai": [
|
|
2580
|
+
{"value": "gpt-image-1.5", "display_name": "GPT-Image-1.5"},
|
|
2581
|
+
{"value": "gpt-image-1", "display_name": "GPT-Image-1"},
|
|
2540
2582
|
{"value": "dall-e-3", "display_name": "DALL-E 3"},
|
|
2541
2583
|
{"value": "dall-e-2", "display_name": "DALL-E 2"},
|
|
2542
|
-
{"value": "gpt-image-1", "display_name": "GPT-Image-1"},
|
|
2543
2584
|
],
|
|
2544
2585
|
"gemini": [
|
|
2586
|
+
{"value": "gemini-3-pro-image-preview", "display_name": "Gemini 3 Pro Image"},
|
|
2545
2587
|
{"value": "gemini-2.5-flash-image-preview", "display_name": "Gemini 2.5 Flash Image"},
|
|
2546
|
-
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
2588
|
+
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
2547
2589
|
],
|
|
2548
2590
|
"diffusers": [
|
|
2549
2591
|
{"value": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion v1.5"},
|
|
@@ -3551,7 +3593,15 @@ def stream():
|
|
|
3551
3593
|
stream_response = {"output": "", "messages": messages}
|
|
3552
3594
|
|
|
3553
3595
|
exe_mode = data.get('executionMode','chat')
|
|
3554
|
-
|
|
3596
|
+
|
|
3597
|
+
# Initialize api_url with default before checking npc_object
|
|
3598
|
+
api_url = None
|
|
3599
|
+
if npc_object is not None:
|
|
3600
|
+
try:
|
|
3601
|
+
api_url = npc_object.api_url if npc_object.api_url else None
|
|
3602
|
+
except AttributeError:
|
|
3603
|
+
api_url = None
|
|
3604
|
+
|
|
3555
3605
|
if exe_mode == 'chat':
|
|
3556
3606
|
stream_response = get_llm_response(
|
|
3557
3607
|
commandstr,
|
|
@@ -3560,7 +3610,7 @@ def stream():
|
|
|
3560
3610
|
model=model,
|
|
3561
3611
|
provider=provider,
|
|
3562
3612
|
npc=npc_object,
|
|
3563
|
-
api_url =
|
|
3613
|
+
api_url = api_url,
|
|
3564
3614
|
team=team_object,
|
|
3565
3615
|
stream=True,
|
|
3566
3616
|
attachments=attachment_paths_for_llm,
|
|
@@ -4935,22 +4985,37 @@ def get_local_model_status():
|
|
|
4935
4985
|
import requests
|
|
4936
4986
|
response = requests.get('http://127.0.0.1:1234/v1/models', timeout=2)
|
|
4937
4987
|
if response.ok:
|
|
4938
|
-
return jsonify({'status': 'running'})
|
|
4988
|
+
return jsonify({'status': 'running', 'running': True})
|
|
4939
4989
|
except:
|
|
4940
4990
|
pass
|
|
4941
|
-
return jsonify({'status': 'not_running'})
|
|
4991
|
+
return jsonify({'status': 'not_running', 'running': False})
|
|
4942
4992
|
|
|
4943
4993
|
elif provider == 'llamacpp':
|
|
4944
4994
|
try:
|
|
4945
4995
|
import requests
|
|
4946
4996
|
response = requests.get('http://127.0.0.1:8080/v1/models', timeout=2)
|
|
4947
4997
|
if response.ok:
|
|
4948
|
-
return jsonify({'status': 'running'})
|
|
4998
|
+
return jsonify({'status': 'running', 'running': True})
|
|
4949
4999
|
except:
|
|
4950
5000
|
pass
|
|
4951
|
-
return jsonify({'status': 'not_running'})
|
|
5001
|
+
return jsonify({'status': 'not_running', 'running': False})
|
|
5002
|
+
|
|
5003
|
+
return jsonify({'status': 'unknown', 'running': False, 'error': f'Unknown provider: {provider}'})
|
|
4952
5004
|
|
|
4953
|
-
|
|
5005
|
+
|
|
5006
|
+
# ============== Activity Tracking ==============
|
|
5007
|
+
@app.route('/api/activity/track', methods=['POST'])
|
|
5008
|
+
def track_activity():
|
|
5009
|
+
"""Track user activity for predictive features."""
|
|
5010
|
+
try:
|
|
5011
|
+
data = request.json or {}
|
|
5012
|
+
# For now, just acknowledge the activity - can be expanded later
|
|
5013
|
+
# to store in database for RNN-based predictions
|
|
5014
|
+
activity_type = data.get('type', 'unknown')
|
|
5015
|
+
return jsonify({'success': True, 'tracked': activity_type})
|
|
5016
|
+
except Exception as e:
|
|
5017
|
+
print(f"Error tracking activity: {e}")
|
|
5018
|
+
return jsonify({'success': False, 'error': str(e)}), 500
|
|
4954
5019
|
|
|
4955
5020
|
|
|
4956
5021
|
def start_flask_server(
|
|
@@ -4,10 +4,10 @@ npcpy/llm_funcs.py,sha256=YtKwLnJLfhr6U3EEp-js41bQbbni-0HVH_zcqANoOVU,76040
|
|
|
4
4
|
npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
|
|
5
5
|
npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
|
|
6
6
|
npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
|
|
7
|
-
npcpy/npc_compiler.py,sha256=
|
|
8
|
-
npcpy/npc_sysenv.py,sha256=
|
|
7
|
+
npcpy/npc_compiler.py,sha256=9U6_F7qweURaL2nQgrF7I9OQEmYjOENmkBV-YChr3oM,118402
|
|
8
|
+
npcpy/npc_sysenv.py,sha256=VH7le3xwxHvO55ZYCG1e-gj8X5YTSIqbIiU6ifSqhss,38917
|
|
9
9
|
npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
|
|
10
|
-
npcpy/serve.py,sha256=
|
|
10
|
+
npcpy/serve.py,sha256=rl1SFx5F2W3ejJTwz3BfvMj8acEoPKCpHZzacjs-aGw,203964
|
|
11
11
|
npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
|
|
12
12
|
npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
|
|
13
13
|
npcpy/data/audio.py,sha256=3qryGXnWHa4JFMonjuX-lf0fCrF8jmbHe7mHAuOdua0,12397
|
|
@@ -30,7 +30,7 @@ npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
|
|
|
30
30
|
npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
|
|
31
31
|
npcpy/gen/image_gen.py,sha256=SOZYpvlxSiAdDK9j750OEBKjm22OUNdXg1kQ10sJSy0,21853
|
|
32
32
|
npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
|
|
33
|
-
npcpy/gen/response.py,sha256=
|
|
33
|
+
npcpy/gen/response.py,sha256=Pw01M0UxjsXOPJlvShAbq9n6IVnvEqxT6MQaLyEwJFs,48505
|
|
34
34
|
npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
|
|
35
35
|
npcpy/gen/world_gen.py,sha256=_8ytE7E3QVQ5qiX8DmOby-xd0d9zV20rRI6Wkpf-qcY,18922
|
|
36
36
|
npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -53,8 +53,8 @@ npcpy/work/browser.py,sha256=p2PeaoZdAXipFuAgKCCB3aXXLE_p3yIRqC87KlZKZWc,679
|
|
|
53
53
|
npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
|
|
54
54
|
npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
|
|
55
55
|
npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
|
|
56
|
-
npcpy-1.3.
|
|
57
|
-
npcpy-1.3.
|
|
58
|
-
npcpy-1.3.
|
|
59
|
-
npcpy-1.3.
|
|
60
|
-
npcpy-1.3.
|
|
56
|
+
npcpy-1.3.9.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
|
|
57
|
+
npcpy-1.3.9.dist-info/METADATA,sha256=aQk_TjR_0MsXFWx5UU-ZN_Ze3s2PPd9m6EPL678WMps,37884
|
|
58
|
+
npcpy-1.3.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
59
|
+
npcpy-1.3.9.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
|
|
60
|
+
npcpy-1.3.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|