npcpy 1.3.8__py3-none-any.whl → 1.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- npcpy/gen/response.py +28 -2
- npcpy/npc_compiler.py +9 -5
- npcpy/npc_sysenv.py +24 -0
- npcpy/serve.py +83 -19
- {npcpy-1.3.8.dist-info → npcpy-1.3.10.dist-info}/METADATA +1 -1
- {npcpy-1.3.8.dist-info → npcpy-1.3.10.dist-info}/RECORD +9 -9
- {npcpy-1.3.8.dist-info → npcpy-1.3.10.dist-info}/WHEEL +0 -0
- {npcpy-1.3.8.dist-info → npcpy-1.3.10.dist-info}/licenses/LICENSE +0 -0
- {npcpy-1.3.8.dist-info → npcpy-1.3.10.dist-info}/top_level.txt +0 -0
npcpy/gen/response.py
CHANGED
|
@@ -745,6 +745,23 @@ def get_litellm_response(
|
|
|
745
745
|
auto_process_tool_calls=auto_process_tool_calls,
|
|
746
746
|
**kwargs
|
|
747
747
|
)
|
|
748
|
+
elif provider == 'lmstudio' or (model and '.lmstudio' in str(model)):
|
|
749
|
+
# LM Studio uses OpenAI-compatible API on port 1234
|
|
750
|
+
# Also detect models with .lmstudio in path (e.g., /home/user/.lmstudio/models/...)
|
|
751
|
+
api_url = api_url or "http://127.0.0.1:1234/v1"
|
|
752
|
+
provider = "openai"
|
|
753
|
+
api_key = api_key or "lm-studio" # LM Studio doesn't require real API key
|
|
754
|
+
# Default timeout for local CPU inference (can be overridden via kwargs)
|
|
755
|
+
if 'timeout' not in kwargs:
|
|
756
|
+
kwargs['timeout'] = 300 # 5 minutes for CPU inference with large prompts
|
|
757
|
+
elif provider == 'llamacpp-server':
|
|
758
|
+
# llama.cpp server uses OpenAI-compatible API on port 8080
|
|
759
|
+
api_url = api_url or "http://127.0.0.1:8080/v1"
|
|
760
|
+
provider = "openai"
|
|
761
|
+
api_key = api_key or "llamacpp" # llama.cpp server doesn't require real API key
|
|
762
|
+
# Default timeout for local CPU inference (can be overridden via kwargs)
|
|
763
|
+
if 'timeout' not in kwargs:
|
|
764
|
+
kwargs['timeout'] = 300 # 5 minutes for CPU inference with large prompts
|
|
748
765
|
|
|
749
766
|
if attachments:
|
|
750
767
|
for attachment in attachments:
|
|
@@ -875,7 +892,16 @@ def get_litellm_response(
|
|
|
875
892
|
if provider is None:
|
|
876
893
|
provider = os.environ.get("NPCSH_CHAT_PROVIDER")
|
|
877
894
|
|
|
878
|
-
|
|
895
|
+
# For OpenAI-compatible endpoints with api_base, always prefix with provider
|
|
896
|
+
# LiteLLM needs this to know how to route the request
|
|
897
|
+
# Also handle file paths (starting with /) which contain slashes but still need prefix
|
|
898
|
+
if "api_base" in api_params and provider == "openai":
|
|
899
|
+
api_params["model"] = f"openai/{model}"
|
|
900
|
+
elif "/" not in model or model.startswith("/"):
|
|
901
|
+
# No provider prefix in model, or model is a file path - add provider prefix
|
|
902
|
+
api_params["model"] = f"{provider}/{model}"
|
|
903
|
+
else:
|
|
904
|
+
api_params["model"] = model
|
|
879
905
|
if api_key is not None:
|
|
880
906
|
api_params["api_key"] = api_key
|
|
881
907
|
if tools:
|
|
@@ -888,7 +914,7 @@ def get_litellm_response(
|
|
|
888
914
|
if key in [
|
|
889
915
|
"stop", "temperature", "top_p", "max_tokens", "max_completion_tokens",
|
|
890
916
|
"extra_headers", "parallel_tool_calls",
|
|
891
|
-
"response_format", "user",
|
|
917
|
+
"response_format", "user", "timeout",
|
|
892
918
|
]:
|
|
893
919
|
api_params[key] = value
|
|
894
920
|
|
npcpy/npc_compiler.py
CHANGED
|
@@ -185,13 +185,17 @@ def initialize_npc_project(
|
|
|
185
185
|
directory = os.getcwd()
|
|
186
186
|
directory = os.path.expanduser(os.fspath(directory))
|
|
187
187
|
|
|
188
|
+
# Create top-level directories for assets
|
|
189
|
+
for subdir in ["images", "models", "attachments", "mcp_servers"]:
|
|
190
|
+
os.makedirs(os.path.join(directory, subdir), exist_ok=True)
|
|
191
|
+
|
|
188
192
|
npc_team_dir = os.path.join(directory, "npc_team")
|
|
189
193
|
os.makedirs(npc_team_dir, exist_ok=True)
|
|
190
|
-
|
|
191
|
-
for subdir in ["jinxs",
|
|
192
|
-
"assembly_lines",
|
|
193
|
-
"sql_models",
|
|
194
|
-
"jobs",
|
|
194
|
+
|
|
195
|
+
for subdir in ["jinxs",
|
|
196
|
+
"assembly_lines",
|
|
197
|
+
"sql_models",
|
|
198
|
+
"jobs",
|
|
195
199
|
"triggers",
|
|
196
200
|
"tools"]:
|
|
197
201
|
os.makedirs(os.path.join(npc_team_dir, subdir), exist_ok=True)
|
npcpy/npc_sysenv.py
CHANGED
|
@@ -334,6 +334,30 @@ def get_locally_available_models(project_directory, airplane_mode=False):
|
|
|
334
334
|
except Exception as e:
|
|
335
335
|
logging.info(f"Error scanning GGUF directory {scan_dir}: {e}")
|
|
336
336
|
|
|
337
|
+
# Check for LM Studio server (OpenAI-compatible API on port 1234)
|
|
338
|
+
try:
|
|
339
|
+
import requests
|
|
340
|
+
response = requests.get('http://127.0.0.1:1234/v1/models', timeout=1)
|
|
341
|
+
if response.ok:
|
|
342
|
+
data = response.json()
|
|
343
|
+
for model in data.get('data', []):
|
|
344
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
345
|
+
available_models[model_id] = "lmstudio"
|
|
346
|
+
except Exception as e:
|
|
347
|
+
logging.debug(f"LM Studio not available: {e}")
|
|
348
|
+
|
|
349
|
+
# Check for llama.cpp server (OpenAI-compatible API on port 8080)
|
|
350
|
+
try:
|
|
351
|
+
import requests
|
|
352
|
+
response = requests.get('http://127.0.0.1:8080/v1/models', timeout=1)
|
|
353
|
+
if response.ok:
|
|
354
|
+
data = response.json()
|
|
355
|
+
for model in data.get('data', []):
|
|
356
|
+
model_id = model.get('id', model.get('name', 'unknown'))
|
|
357
|
+
available_models[model_id] = "llamacpp-server"
|
|
358
|
+
except Exception as e:
|
|
359
|
+
logging.debug(f"llama.cpp server not available: {e}")
|
|
360
|
+
|
|
337
361
|
return available_models
|
|
338
362
|
|
|
339
363
|
|
npcpy/serve.py
CHANGED
|
@@ -42,7 +42,8 @@ class SilentUndefined(Undefined):
|
|
|
42
42
|
return ""
|
|
43
43
|
|
|
44
44
|
# Import ShellState and helper functions from npcsh
|
|
45
|
-
from npcsh._state import ShellState
|
|
45
|
+
from npcsh._state import ShellState, initialize_base_npcs_if_needed
|
|
46
|
+
from npcsh.config import NPCSH_DB_PATH
|
|
46
47
|
|
|
47
48
|
|
|
48
49
|
from npcpy.memory.knowledge_graph import load_kg_from_db
|
|
@@ -737,9 +738,10 @@ def get_global_settings():
|
|
|
737
738
|
"embedding_provider": "ollama",
|
|
738
739
|
"search_provider": "perplexity",
|
|
739
740
|
"default_folder": os.path.expanduser("~/.npcsh/"),
|
|
740
|
-
"is_predictive_text_enabled": False,
|
|
741
|
-
"predictive_text_model": "llama3.2",
|
|
742
|
-
"predictive_text_provider": "ollama",
|
|
741
|
+
"is_predictive_text_enabled": False,
|
|
742
|
+
"predictive_text_model": "llama3.2",
|
|
743
|
+
"predictive_text_provider": "ollama",
|
|
744
|
+
"backend_python_path": "", # Empty means use bundled backend
|
|
743
745
|
}
|
|
744
746
|
global_vars = {}
|
|
745
747
|
|
|
@@ -772,9 +774,10 @@ def get_global_settings():
|
|
|
772
774
|
"NPCSH_SEARCH_PROVIDER": "search_provider",
|
|
773
775
|
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
774
776
|
"NPC_STUDIO_DEFAULT_FOLDER": "default_folder",
|
|
775
|
-
"NPC_STUDIO_PREDICTIVE_TEXT_ENABLED": "is_predictive_text_enabled",
|
|
776
|
-
"NPC_STUDIO_PREDICTIVE_TEXT_MODEL": "predictive_text_model",
|
|
777
|
-
"NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER": "predictive_text_provider",
|
|
777
|
+
"NPC_STUDIO_PREDICTIVE_TEXT_ENABLED": "is_predictive_text_enabled",
|
|
778
|
+
"NPC_STUDIO_PREDICTIVE_TEXT_MODEL": "predictive_text_model",
|
|
779
|
+
"NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER": "predictive_text_provider",
|
|
780
|
+
"BACKEND_PYTHON_PATH": "backend_python_path", # Custom Python for backend
|
|
778
781
|
}
|
|
779
782
|
|
|
780
783
|
if key in key_mapping:
|
|
@@ -1067,9 +1070,10 @@ def save_global_settings():
|
|
|
1067
1070
|
"search_provider": "NPCSH_SEARCH_PROVIDER",
|
|
1068
1071
|
"NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
|
|
1069
1072
|
"default_folder": "NPC_STUDIO_DEFAULT_FOLDER",
|
|
1070
|
-
"is_predictive_text_enabled": "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED",
|
|
1071
|
-
"predictive_text_model": "NPC_STUDIO_PREDICTIVE_TEXT_MODEL",
|
|
1072
|
-
"predictive_text_provider": "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER",
|
|
1073
|
+
"is_predictive_text_enabled": "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED",
|
|
1074
|
+
"predictive_text_model": "NPC_STUDIO_PREDICTIVE_TEXT_MODEL",
|
|
1075
|
+
"predictive_text_provider": "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER",
|
|
1076
|
+
"backend_python_path": "BACKEND_PYTHON_PATH", # Custom Python for backend (requires restart)
|
|
1073
1077
|
}
|
|
1074
1078
|
|
|
1075
1079
|
os.makedirs(os.path.dirname(npcshrc_path), exist_ok=True)
|
|
@@ -2349,6 +2353,41 @@ def init_project_team():
|
|
|
2349
2353
|
print(f"Error initializing project team: {e}")
|
|
2350
2354
|
return jsonify({"error": str(e)}), 500
|
|
2351
2355
|
|
|
2356
|
+
@app.route("/api/npcsh/check", methods=["GET"])
|
|
2357
|
+
def check_npcsh_folder():
|
|
2358
|
+
"""Check if npcsh has been initialized by looking for actual npc_team content."""
|
|
2359
|
+
try:
|
|
2360
|
+
npcsh_path = os.path.expanduser("~/.npcsh")
|
|
2361
|
+
npc_team_path = os.path.join(npcsh_path, "npc_team")
|
|
2362
|
+
# Check if npc_team exists and has .npc files (actual initialization)
|
|
2363
|
+
initialized = os.path.isdir(npc_team_path) and any(
|
|
2364
|
+
f.endswith('.npc') for f in os.listdir(npc_team_path)
|
|
2365
|
+
) if os.path.exists(npc_team_path) else False
|
|
2366
|
+
return jsonify({
|
|
2367
|
+
"initialized": initialized,
|
|
2368
|
+
"path": npcsh_path,
|
|
2369
|
+
"error": None
|
|
2370
|
+
})
|
|
2371
|
+
except Exception as e:
|
|
2372
|
+
print(f"Error checking npcsh: {e}")
|
|
2373
|
+
return jsonify({"error": str(e)}), 500
|
|
2374
|
+
|
|
2375
|
+
@app.route("/api/npcsh/init", methods=["POST"])
|
|
2376
|
+
def init_npcsh_folder():
|
|
2377
|
+
"""Initialize npcsh with config and default npc_team."""
|
|
2378
|
+
try:
|
|
2379
|
+
db_path = os.path.expanduser(NPCSH_DB_PATH)
|
|
2380
|
+
os.makedirs(os.path.dirname(db_path), exist_ok=True)
|
|
2381
|
+
initialize_base_npcs_if_needed(db_path)
|
|
2382
|
+
return jsonify({
|
|
2383
|
+
"message": "npcsh initialized",
|
|
2384
|
+
"path": os.path.expanduser("~/.npcsh"),
|
|
2385
|
+
"error": None
|
|
2386
|
+
})
|
|
2387
|
+
except Exception as e:
|
|
2388
|
+
print(f"Error initializing npcsh: {e}")
|
|
2389
|
+
return jsonify({"error": str(e)}), 500
|
|
2390
|
+
|
|
2352
2391
|
@app.route("/api/context/websites", methods=["GET"])
|
|
2353
2392
|
def get_context_websites():
|
|
2354
2393
|
"""Gets the websites list from a .ctx file."""
|
|
@@ -2537,13 +2576,15 @@ def get_attachment_response():
|
|
|
2537
2576
|
|
|
2538
2577
|
IMAGE_MODELS = {
|
|
2539
2578
|
"openai": [
|
|
2579
|
+
{"value": "gpt-image-1.5", "display_name": "GPT-Image-1.5"},
|
|
2580
|
+
{"value": "gpt-image-1", "display_name": "GPT-Image-1"},
|
|
2540
2581
|
{"value": "dall-e-3", "display_name": "DALL-E 3"},
|
|
2541
2582
|
{"value": "dall-e-2", "display_name": "DALL-E 2"},
|
|
2542
|
-
{"value": "gpt-image-1", "display_name": "GPT-Image-1"},
|
|
2543
2583
|
],
|
|
2544
2584
|
"gemini": [
|
|
2585
|
+
{"value": "gemini-3-pro-image-preview", "display_name": "Gemini 3 Pro Image"},
|
|
2545
2586
|
{"value": "gemini-2.5-flash-image-preview", "display_name": "Gemini 2.5 Flash Image"},
|
|
2546
|
-
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
2587
|
+
{"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
|
|
2547
2588
|
],
|
|
2548
2589
|
"diffusers": [
|
|
2549
2590
|
{"value": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion v1.5"},
|
|
@@ -3551,7 +3592,15 @@ def stream():
|
|
|
3551
3592
|
stream_response = {"output": "", "messages": messages}
|
|
3552
3593
|
|
|
3553
3594
|
exe_mode = data.get('executionMode','chat')
|
|
3554
|
-
|
|
3595
|
+
|
|
3596
|
+
# Initialize api_url with default before checking npc_object
|
|
3597
|
+
api_url = None
|
|
3598
|
+
if npc_object is not None:
|
|
3599
|
+
try:
|
|
3600
|
+
api_url = npc_object.api_url if npc_object.api_url else None
|
|
3601
|
+
except AttributeError:
|
|
3602
|
+
api_url = None
|
|
3603
|
+
|
|
3555
3604
|
if exe_mode == 'chat':
|
|
3556
3605
|
stream_response = get_llm_response(
|
|
3557
3606
|
commandstr,
|
|
@@ -3560,7 +3609,7 @@ def stream():
|
|
|
3560
3609
|
model=model,
|
|
3561
3610
|
provider=provider,
|
|
3562
3611
|
npc=npc_object,
|
|
3563
|
-
api_url =
|
|
3612
|
+
api_url = api_url,
|
|
3564
3613
|
team=team_object,
|
|
3565
3614
|
stream=True,
|
|
3566
3615
|
attachments=attachment_paths_for_llm,
|
|
@@ -4935,22 +4984,37 @@ def get_local_model_status():
|
|
|
4935
4984
|
import requests
|
|
4936
4985
|
response = requests.get('http://127.0.0.1:1234/v1/models', timeout=2)
|
|
4937
4986
|
if response.ok:
|
|
4938
|
-
return jsonify({'status': 'running'})
|
|
4987
|
+
return jsonify({'status': 'running', 'running': True})
|
|
4939
4988
|
except:
|
|
4940
4989
|
pass
|
|
4941
|
-
return jsonify({'status': 'not_running'})
|
|
4990
|
+
return jsonify({'status': 'not_running', 'running': False})
|
|
4942
4991
|
|
|
4943
4992
|
elif provider == 'llamacpp':
|
|
4944
4993
|
try:
|
|
4945
4994
|
import requests
|
|
4946
4995
|
response = requests.get('http://127.0.0.1:8080/v1/models', timeout=2)
|
|
4947
4996
|
if response.ok:
|
|
4948
|
-
return jsonify({'status': 'running'})
|
|
4997
|
+
return jsonify({'status': 'running', 'running': True})
|
|
4949
4998
|
except:
|
|
4950
4999
|
pass
|
|
4951
|
-
return jsonify({'status': 'not_running'})
|
|
5000
|
+
return jsonify({'status': 'not_running', 'running': False})
|
|
5001
|
+
|
|
5002
|
+
return jsonify({'status': 'unknown', 'running': False, 'error': f'Unknown provider: {provider}'})
|
|
4952
5003
|
|
|
4953
|
-
|
|
5004
|
+
|
|
5005
|
+
# ============== Activity Tracking ==============
|
|
5006
|
+
@app.route('/api/activity/track', methods=['POST'])
|
|
5007
|
+
def track_activity():
|
|
5008
|
+
"""Track user activity for predictive features."""
|
|
5009
|
+
try:
|
|
5010
|
+
data = request.json or {}
|
|
5011
|
+
# For now, just acknowledge the activity - can be expanded later
|
|
5012
|
+
# to store in database for RNN-based predictions
|
|
5013
|
+
activity_type = data.get('type', 'unknown')
|
|
5014
|
+
return jsonify({'success': True, 'tracked': activity_type})
|
|
5015
|
+
except Exception as e:
|
|
5016
|
+
print(f"Error tracking activity: {e}")
|
|
5017
|
+
return jsonify({'success': False, 'error': str(e)}), 500
|
|
4954
5018
|
|
|
4955
5019
|
|
|
4956
5020
|
def start_flask_server(
|
|
@@ -4,10 +4,10 @@ npcpy/llm_funcs.py,sha256=YtKwLnJLfhr6U3EEp-js41bQbbni-0HVH_zcqANoOVU,76040
|
|
|
4
4
|
npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
|
|
5
5
|
npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
|
|
6
6
|
npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
|
|
7
|
-
npcpy/npc_compiler.py,sha256=
|
|
8
|
-
npcpy/npc_sysenv.py,sha256=
|
|
7
|
+
npcpy/npc_compiler.py,sha256=9U6_F7qweURaL2nQgrF7I9OQEmYjOENmkBV-YChr3oM,118402
|
|
8
|
+
npcpy/npc_sysenv.py,sha256=VH7le3xwxHvO55ZYCG1e-gj8X5YTSIqbIiU6ifSqhss,38917
|
|
9
9
|
npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
|
|
10
|
-
npcpy/serve.py,sha256=
|
|
10
|
+
npcpy/serve.py,sha256=pNrjo2Jk9CuvLizBIHUCVvEgGxJWKX1wqT5omtaMhOE,204093
|
|
11
11
|
npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
|
|
12
12
|
npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
|
|
13
13
|
npcpy/data/audio.py,sha256=3qryGXnWHa4JFMonjuX-lf0fCrF8jmbHe7mHAuOdua0,12397
|
|
@@ -30,7 +30,7 @@ npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
|
|
|
30
30
|
npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
|
|
31
31
|
npcpy/gen/image_gen.py,sha256=SOZYpvlxSiAdDK9j750OEBKjm22OUNdXg1kQ10sJSy0,21853
|
|
32
32
|
npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
|
|
33
|
-
npcpy/gen/response.py,sha256=
|
|
33
|
+
npcpy/gen/response.py,sha256=Pw01M0UxjsXOPJlvShAbq9n6IVnvEqxT6MQaLyEwJFs,48505
|
|
34
34
|
npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
|
|
35
35
|
npcpy/gen/world_gen.py,sha256=_8ytE7E3QVQ5qiX8DmOby-xd0d9zV20rRI6Wkpf-qcY,18922
|
|
36
36
|
npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -53,8 +53,8 @@ npcpy/work/browser.py,sha256=p2PeaoZdAXipFuAgKCCB3aXXLE_p3yIRqC87KlZKZWc,679
|
|
|
53
53
|
npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
|
|
54
54
|
npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
|
|
55
55
|
npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
|
|
56
|
-
npcpy-1.3.
|
|
57
|
-
npcpy-1.3.
|
|
58
|
-
npcpy-1.3.
|
|
59
|
-
npcpy-1.3.
|
|
60
|
-
npcpy-1.3.
|
|
56
|
+
npcpy-1.3.10.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
|
|
57
|
+
npcpy-1.3.10.dist-info/METADATA,sha256=_BcYfu_xY5b9TeksWeL0h8XI4JSPm8fzfZ5mtfX1qeE,37885
|
|
58
|
+
npcpy-1.3.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
59
|
+
npcpy-1.3.10.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
|
|
60
|
+
npcpy-1.3.10.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|