npcpy 1.3.7__py3-none-any.whl → 1.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/gen/response.py CHANGED
@@ -745,6 +745,23 @@ def get_litellm_response(
745
745
  auto_process_tool_calls=auto_process_tool_calls,
746
746
  **kwargs
747
747
  )
748
+ elif provider == 'lmstudio' or (model and '.lmstudio' in str(model)):
749
+ # LM Studio uses OpenAI-compatible API on port 1234
750
+ # Also detect models with .lmstudio in path (e.g., /home/user/.lmstudio/models/...)
751
+ api_url = api_url or "http://127.0.0.1:1234/v1"
752
+ provider = "openai"
753
+ api_key = api_key or "lm-studio" # LM Studio doesn't require real API key
754
+ # Default timeout for local CPU inference (can be overridden via kwargs)
755
+ if 'timeout' not in kwargs:
756
+ kwargs['timeout'] = 300 # 5 minutes for CPU inference with large prompts
757
+ elif provider == 'llamacpp-server':
758
+ # llama.cpp server uses OpenAI-compatible API on port 8080
759
+ api_url = api_url or "http://127.0.0.1:8080/v1"
760
+ provider = "openai"
761
+ api_key = api_key or "llamacpp" # llama.cpp server doesn't require real API key
762
+ # Default timeout for local CPU inference (can be overridden via kwargs)
763
+ if 'timeout' not in kwargs:
764
+ kwargs['timeout'] = 300 # 5 minutes for CPU inference with large prompts
748
765
 
749
766
  if attachments:
750
767
  for attachment in attachments:
@@ -875,7 +892,16 @@ def get_litellm_response(
875
892
  if provider is None:
876
893
  provider = os.environ.get("NPCSH_CHAT_PROVIDER")
877
894
 
878
- api_params["model"] = f"{provider}/{model}" if "/" not in model else model
895
+ # For OpenAI-compatible endpoints with api_base, always prefix with provider
896
+ # LiteLLM needs this to know how to route the request
897
+ # Also handle file paths (starting with /) which contain slashes but still need prefix
898
+ if "api_base" in api_params and provider == "openai":
899
+ api_params["model"] = f"openai/{model}"
900
+ elif "/" not in model or model.startswith("/"):
901
+ # No provider prefix in model, or model is a file path - add provider prefix
902
+ api_params["model"] = f"{provider}/{model}"
903
+ else:
904
+ api_params["model"] = model
879
905
  if api_key is not None:
880
906
  api_params["api_key"] = api_key
881
907
  if tools:
@@ -888,7 +914,7 @@ def get_litellm_response(
888
914
  if key in [
889
915
  "stop", "temperature", "top_p", "max_tokens", "max_completion_tokens",
890
916
  "extra_headers", "parallel_tool_calls",
891
- "response_format", "user",
917
+ "response_format", "user", "timeout",
892
918
  ]:
893
919
  api_params[key] = value
894
920
 
npcpy/llm_funcs.py CHANGED
@@ -589,8 +589,20 @@ def check_llm_command(
589
589
  # If we have jinxs, use ReAct fallback (JSON prompting) instead of tool_calls
590
590
  if jinxs:
591
591
  return _react_fallback(
592
- command, model, provider, api_url, api_key, npc, team,
593
- full_messages, images, stream, context, jinxs, extra_globals, max_iterations
592
+ command,
593
+ model,
594
+ provider,
595
+ api_url,
596
+ api_key,
597
+ npc,
598
+ team,
599
+ full_messages,
600
+ images,
601
+ stream,
602
+ context,
603
+ jinxs,
604
+ extra_globals,
605
+ max_iterations
594
606
  )
595
607
 
596
608
  # No jinxs - just get a direct response
@@ -633,8 +645,20 @@ def check_llm_command(
633
645
 
634
646
 
635
647
  def _react_fallback(
636
- command, model, provider, api_url, api_key, npc, team,
637
- messages, images, stream, context, jinxs, extra_globals, max_iterations
648
+ command,
649
+ model,
650
+ provider,
651
+ api_url,
652
+ api_key,
653
+ npc,
654
+ team,
655
+ messages,
656
+ images,
657
+ stream,
658
+ context,
659
+ jinxs,
660
+ extra_globals,
661
+ max_iterations
638
662
  ):
639
663
  """ReAct-style fallback for models without tool calling."""
640
664
  import logging
@@ -705,6 +729,12 @@ Instructions:
705
729
  print(f"[REACT-DEBUG] Full response keys: {response.keys()}")
706
730
  print(f"[REACT-DEBUG] Raw response['response']: {str(response.get('response', 'NONE'))[:500]}")
707
731
  print(f"[REACT-DEBUG] Raw decision type: {type(decision)}, value: {str(decision)[:500]}")
732
+
733
+ # Handle None response - model decided no action needed
734
+ if decision is None:
735
+ logger.debug(f"[_react_fallback] Decision is None, returning current output")
736
+ return {"messages": current_messages, "output": "", "usage": total_usage, "jinx_executions": jinx_executions}
737
+
708
738
  if isinstance(decision, str):
709
739
  try:
710
740
  decision = json.loads(decision)
@@ -712,7 +742,7 @@ Instructions:
712
742
  logger.debug(f"[_react_fallback] Could not parse JSON, returning as text")
713
743
  return {"messages": current_messages, "output": decision, "usage": total_usage, "jinx_executions": jinx_executions}
714
744
 
715
- logger.debug(f"[_react_fallback] Parsed decision action: {decision.get('action')}")
745
+ logger.debug(f"[_react_fallback] Parsed decision action: {decision.get('action') if decision else 'None'}")
716
746
  if decision.get("action") == "answer":
717
747
  output = decision.get("response", "")
718
748
 
npcpy/npc_compiler.py CHANGED
@@ -185,13 +185,17 @@ def initialize_npc_project(
185
185
  directory = os.getcwd()
186
186
  directory = os.path.expanduser(os.fspath(directory))
187
187
 
188
+ # Create top-level directories for assets
189
+ for subdir in ["images", "models", "attachments", "mcp_servers"]:
190
+ os.makedirs(os.path.join(directory, subdir), exist_ok=True)
191
+
188
192
  npc_team_dir = os.path.join(directory, "npc_team")
189
193
  os.makedirs(npc_team_dir, exist_ok=True)
190
-
191
- for subdir in ["jinxs",
192
- "assembly_lines",
193
- "sql_models",
194
- "jobs",
194
+
195
+ for subdir in ["jinxs",
196
+ "assembly_lines",
197
+ "sql_models",
198
+ "jobs",
195
199
  "triggers",
196
200
  "tools"]:
197
201
  os.makedirs(os.path.join(npc_team_dir, subdir), exist_ok=True)
@@ -2114,17 +2118,19 @@ Requirements:
2114
2118
  context=None,
2115
2119
  team=None,
2116
2120
  stream=False,
2117
- jinxs=None):
2121
+ jinxs=None,
2122
+ use_jinxs=True):
2118
2123
  """Check if a command is for the LLM"""
2119
2124
  if context is None:
2120
2125
  context = self.shared_context
2121
2126
 
2122
2127
  if team:
2123
2128
  self._current_team = team
2124
-
2125
- # Use provided jinxs or fall back to NPC's own jinxs
2126
- jinxs_to_use = jinxs if jinxs is not None else self.jinxs_dict
2127
-
2129
+ if jinxs is None and use_jinxs:
2130
+ jinxs_to_use = self.jinxs_dict
2131
+ elif jinxs is not None and use_jinxs:
2132
+ jinxs_to_use = jinxs
2133
+
2128
2134
  return npy.llm_funcs.check_llm_command(
2129
2135
  command,
2130
2136
  model=self.model,
npcpy/npc_sysenv.py CHANGED
@@ -266,6 +266,8 @@ def get_locally_available_models(project_directory, airplane_mode=False):
266
266
  'gemini-2.0-pro',
267
267
  'gemini-1.5-pro',
268
268
  'gemini-1.5-flash'
269
+ 'gemini-3-flash-preview',
270
+ 'gemini-3-pro-preview',
269
271
  ]
270
272
 
271
273
  for m in client.models.list():
@@ -332,6 +334,30 @@ def get_locally_available_models(project_directory, airplane_mode=False):
332
334
  except Exception as e:
333
335
  logging.info(f"Error scanning GGUF directory {scan_dir}: {e}")
334
336
 
337
+ # Check for LM Studio server (OpenAI-compatible API on port 1234)
338
+ try:
339
+ import requests
340
+ response = requests.get('http://127.0.0.1:1234/v1/models', timeout=1)
341
+ if response.ok:
342
+ data = response.json()
343
+ for model in data.get('data', []):
344
+ model_id = model.get('id', model.get('name', 'unknown'))
345
+ available_models[model_id] = "lmstudio"
346
+ except Exception as e:
347
+ logging.debug(f"LM Studio not available: {e}")
348
+
349
+ # Check for llama.cpp server (OpenAI-compatible API on port 8080)
350
+ try:
351
+ import requests
352
+ response = requests.get('http://127.0.0.1:8080/v1/models', timeout=1)
353
+ if response.ok:
354
+ data = response.json()
355
+ for model in data.get('data', []):
356
+ model_id = model.get('id', model.get('name', 'unknown'))
357
+ available_models[model_id] = "llamacpp-server"
358
+ except Exception as e:
359
+ logging.debug(f"llama.cpp server not available: {e}")
360
+
335
361
  return available_models
336
362
 
337
363
 
@@ -853,8 +879,7 @@ The current date and time are : {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
853
879
  db_path = npc.db_conn.url.database
854
880
  elif hasattr(npc.db_conn, "database"):
855
881
  db_path = npc.db_conn.database
856
- system_message += """What follows is in
857
- formation about the database connection. If you are asked to execute queries with tools, use this information.
882
+ system_message += """What follows is information about the database connection. If you are asked to execute queries with tools, use this information.
858
883
  If you are asked for help with debugging queries, use this information.
859
884
  Do not unnecessarily reference that you possess this information unless it is
860
885
  specifically relevant to the request.
npcpy/serve.py CHANGED
@@ -737,9 +737,10 @@ def get_global_settings():
737
737
  "embedding_provider": "ollama",
738
738
  "search_provider": "perplexity",
739
739
  "default_folder": os.path.expanduser("~/.npcsh/"),
740
- "is_predictive_text_enabled": False, # Default value for the new setting
741
- "predictive_text_model": "llama3.2", # Default predictive text model
742
- "predictive_text_provider": "ollama", # Default predictive text provider
740
+ "is_predictive_text_enabled": False,
741
+ "predictive_text_model": "llama3.2",
742
+ "predictive_text_provider": "ollama",
743
+ "backend_python_path": "", # Empty means use bundled backend
743
744
  }
744
745
  global_vars = {}
745
746
 
@@ -772,9 +773,10 @@ def get_global_settings():
772
773
  "NPCSH_SEARCH_PROVIDER": "search_provider",
773
774
  "NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
774
775
  "NPC_STUDIO_DEFAULT_FOLDER": "default_folder",
775
- "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED": "is_predictive_text_enabled", # New mapping
776
- "NPC_STUDIO_PREDICTIVE_TEXT_MODEL": "predictive_text_model", # New mapping
777
- "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER": "predictive_text_provider", # New mapping
776
+ "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED": "is_predictive_text_enabled",
777
+ "NPC_STUDIO_PREDICTIVE_TEXT_MODEL": "predictive_text_model",
778
+ "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER": "predictive_text_provider",
779
+ "BACKEND_PYTHON_PATH": "backend_python_path", # Custom Python for backend
778
780
  }
779
781
 
780
782
  if key in key_mapping:
@@ -1067,9 +1069,10 @@ def save_global_settings():
1067
1069
  "search_provider": "NPCSH_SEARCH_PROVIDER",
1068
1070
  "NPCSH_STREAM_OUTPUT": "NPCSH_STREAM_OUTPUT",
1069
1071
  "default_folder": "NPC_STUDIO_DEFAULT_FOLDER",
1070
- "is_predictive_text_enabled": "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED", # New mapping
1071
- "predictive_text_model": "NPC_STUDIO_PREDICTIVE_TEXT_MODEL", # New mapping
1072
- "predictive_text_provider": "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER", # New mapping
1072
+ "is_predictive_text_enabled": "NPC_STUDIO_PREDICTIVE_TEXT_ENABLED",
1073
+ "predictive_text_model": "NPC_STUDIO_PREDICTIVE_TEXT_MODEL",
1074
+ "predictive_text_provider": "NPC_STUDIO_PREDICTIVE_TEXT_PROVIDER",
1075
+ "backend_python_path": "BACKEND_PYTHON_PATH", # Custom Python for backend (requires restart)
1073
1076
  }
1074
1077
 
1075
1078
  os.makedirs(os.path.dirname(npcshrc_path), exist_ok=True)
@@ -2349,6 +2352,43 @@ def init_project_team():
2349
2352
  print(f"Error initializing project team: {e}")
2350
2353
  return jsonify({"error": str(e)}), 500
2351
2354
 
2355
+ @app.route("/api/npcsh/check", methods=["GET"])
2356
+ def check_npcsh_folder():
2357
+ """Check if ~/.npcsh folder exists and has a valid npc_team."""
2358
+ try:
2359
+ npcsh_path = os.path.expanduser("~/.npcsh")
2360
+ npc_team_path = os.path.join(npcsh_path, "npc_team")
2361
+
2362
+ exists = os.path.exists(npcsh_path)
2363
+ has_npc_team = os.path.exists(npc_team_path)
2364
+ has_forenpc = os.path.exists(os.path.join(npc_team_path, "forenpc.npc")) if has_npc_team else False
2365
+
2366
+ return jsonify({
2367
+ "exists": exists,
2368
+ "has_npc_team": has_npc_team,
2369
+ "has_forenpc": has_forenpc,
2370
+ "path": npcsh_path,
2371
+ "error": None
2372
+ })
2373
+ except Exception as e:
2374
+ print(f"Error checking npcsh folder: {e}")
2375
+ return jsonify({"error": str(e)}), 500
2376
+
2377
+ @app.route("/api/npcsh/init", methods=["POST"])
2378
+ def init_npcsh_folder():
2379
+ """Initialize the ~/.npcsh folder with a default npc_team."""
2380
+ try:
2381
+ npcsh_path = os.path.expanduser("~/.npcsh")
2382
+ result = initialize_npc_project(directory=npcsh_path)
2383
+ return jsonify({
2384
+ "message": result,
2385
+ "path": npcsh_path,
2386
+ "error": None
2387
+ })
2388
+ except Exception as e:
2389
+ print(f"Error initializing npcsh folder: {e}")
2390
+ return jsonify({"error": str(e)}), 500
2391
+
2352
2392
  @app.route("/api/context/websites", methods=["GET"])
2353
2393
  def get_context_websites():
2354
2394
  """Gets the websites list from a .ctx file."""
@@ -2537,13 +2577,15 @@ def get_attachment_response():
2537
2577
 
2538
2578
  IMAGE_MODELS = {
2539
2579
  "openai": [
2580
+ {"value": "gpt-image-1.5", "display_name": "GPT-Image-1.5"},
2581
+ {"value": "gpt-image-1", "display_name": "GPT-Image-1"},
2540
2582
  {"value": "dall-e-3", "display_name": "DALL-E 3"},
2541
2583
  {"value": "dall-e-2", "display_name": "DALL-E 2"},
2542
- {"value": "gpt-image-1", "display_name": "GPT-Image-1"},
2543
2584
  ],
2544
2585
  "gemini": [
2586
+ {"value": "gemini-3-pro-image-preview", "display_name": "Gemini 3 Pro Image"},
2545
2587
  {"value": "gemini-2.5-flash-image-preview", "display_name": "Gemini 2.5 Flash Image"},
2546
- {"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
2588
+ {"value": "imagen-3.0-generate-002", "display_name": "Imagen 3.0 Generate (Preview)"},
2547
2589
  ],
2548
2590
  "diffusers": [
2549
2591
  {"value": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion v1.5"},
@@ -3551,7 +3593,15 @@ def stream():
3551
3593
  stream_response = {"output": "", "messages": messages}
3552
3594
 
3553
3595
  exe_mode = data.get('executionMode','chat')
3554
-
3596
+
3597
+ # Initialize api_url with default before checking npc_object
3598
+ api_url = None
3599
+ if npc_object is not None:
3600
+ try:
3601
+ api_url = npc_object.api_url if npc_object.api_url else None
3602
+ except AttributeError:
3603
+ api_url = None
3604
+
3555
3605
  if exe_mode == 'chat':
3556
3606
  stream_response = get_llm_response(
3557
3607
  commandstr,
@@ -3560,7 +3610,7 @@ def stream():
3560
3610
  model=model,
3561
3611
  provider=provider,
3562
3612
  npc=npc_object,
3563
- api_url = npc_object.api_url if npc_object.api_url else None,
3613
+ api_url = api_url,
3564
3614
  team=team_object,
3565
3615
  stream=True,
3566
3616
  attachments=attachment_paths_for_llm,
@@ -4935,22 +4985,37 @@ def get_local_model_status():
4935
4985
  import requests
4936
4986
  response = requests.get('http://127.0.0.1:1234/v1/models', timeout=2)
4937
4987
  if response.ok:
4938
- return jsonify({'status': 'running'})
4988
+ return jsonify({'status': 'running', 'running': True})
4939
4989
  except:
4940
4990
  pass
4941
- return jsonify({'status': 'not_running'})
4991
+ return jsonify({'status': 'not_running', 'running': False})
4942
4992
 
4943
4993
  elif provider == 'llamacpp':
4944
4994
  try:
4945
4995
  import requests
4946
4996
  response = requests.get('http://127.0.0.1:8080/v1/models', timeout=2)
4947
4997
  if response.ok:
4948
- return jsonify({'status': 'running'})
4998
+ return jsonify({'status': 'running', 'running': True})
4949
4999
  except:
4950
5000
  pass
4951
- return jsonify({'status': 'not_running'})
5001
+ return jsonify({'status': 'not_running', 'running': False})
5002
+
5003
+ return jsonify({'status': 'unknown', 'running': False, 'error': f'Unknown provider: {provider}'})
4952
5004
 
4953
- return jsonify({'status': 'unknown', 'error': f'Unknown provider: {provider}'})
5005
+
5006
+ # ============== Activity Tracking ==============
5007
+ @app.route('/api/activity/track', methods=['POST'])
5008
+ def track_activity():
5009
+ """Track user activity for predictive features."""
5010
+ try:
5011
+ data = request.json or {}
5012
+ # For now, just acknowledge the activity - can be expanded later
5013
+ # to store in database for RNN-based predictions
5014
+ activity_type = data.get('type', 'unknown')
5015
+ return jsonify({'success': True, 'tracked': activity_type})
5016
+ except Exception as e:
5017
+ print(f"Error tracking activity: {e}")
5018
+ return jsonify({'success': False, 'error': str(e)}), 500
4954
5019
 
4955
5020
 
4956
5021
  def start_flask_server(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.3.7
3
+ Version: 1.3.9
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -1,13 +1,13 @@
1
1
  npcpy/__init__.py,sha256=uJcJGjR1mWvE69GySNAufkgiRwJA28zdObDBWaxp0tY,505
2
2
  npcpy/build_funcs.py,sha256=vOz6pjV0zS-kYKo0ux-pn9AcppVaR8KIDi2ldOxb3RQ,7479
3
- npcpy/llm_funcs.py,sha256=1ZEIOrNhkMKdoufYwjA9Ej24pYKAEa85xgE3UkuYkok,75493
3
+ npcpy/llm_funcs.py,sha256=YtKwLnJLfhr6U3EEp-js41bQbbni-0HVH_zcqANoOVU,76040
4
4
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
5
5
  npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
6
6
  npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
7
- npcpy/npc_compiler.py,sha256=trMvZHZjGDHo0TvjxZc8QJZXit78-rPDpxU0wsxtocw,118139
8
- npcpy/npc_sysenv.py,sha256=QOwnxiMmJloYxYbVtu5QjWnaukNgXppsC0kiLXT2uCc,37806
7
+ npcpy/npc_compiler.py,sha256=9U6_F7qweURaL2nQgrF7I9OQEmYjOENmkBV-YChr3oM,118402
8
+ npcpy/npc_sysenv.py,sha256=VH7le3xwxHvO55ZYCG1e-gj8X5YTSIqbIiU6ifSqhss,38917
9
9
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
10
- npcpy/serve.py,sha256=mSLVLZSDy-269gKI1CbwM4-QCqxnlmBQrt82xk0QOWI,201503
10
+ npcpy/serve.py,sha256=rl1SFx5F2W3ejJTwz3BfvMj8acEoPKCpHZzacjs-aGw,203964
11
11
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
12
12
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
13
13
  npcpy/data/audio.py,sha256=3qryGXnWHa4JFMonjuX-lf0fCrF8jmbHe7mHAuOdua0,12397
@@ -30,7 +30,7 @@ npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
30
30
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
31
31
  npcpy/gen/image_gen.py,sha256=SOZYpvlxSiAdDK9j750OEBKjm22OUNdXg1kQ10sJSy0,21853
32
32
  npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
33
- npcpy/gen/response.py,sha256=g39md6cbBTu1f30pGWxtgIXYaBUv4875kFH7-Gj7-qg,46923
33
+ npcpy/gen/response.py,sha256=Pw01M0UxjsXOPJlvShAbq9n6IVnvEqxT6MQaLyEwJFs,48505
34
34
  npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
35
35
  npcpy/gen/world_gen.py,sha256=_8ytE7E3QVQ5qiX8DmOby-xd0d9zV20rRI6Wkpf-qcY,18922
36
36
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -53,8 +53,8 @@ npcpy/work/browser.py,sha256=p2PeaoZdAXipFuAgKCCB3aXXLE_p3yIRqC87KlZKZWc,679
53
53
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
54
54
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
55
55
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
56
- npcpy-1.3.7.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
57
- npcpy-1.3.7.dist-info/METADATA,sha256=6Va5OUEV4r5QfZ2B41TIwMO0S2ybymwrexoFFtoXwMo,37884
58
- npcpy-1.3.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
- npcpy-1.3.7.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
60
- npcpy-1.3.7.dist-info/RECORD,,
56
+ npcpy-1.3.9.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
57
+ npcpy-1.3.9.dist-info/METADATA,sha256=aQk_TjR_0MsXFWx5UU-ZN_Ze3s2PPd9m6EPL678WMps,37884
58
+ npcpy-1.3.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
59
+ npcpy-1.3.9.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
60
+ npcpy-1.3.9.dist-info/RECORD,,
File without changes