npcpy 1.2.35__py3-none-any.whl → 1.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
npcpy/sql/npcsql.py CHANGED
@@ -253,56 +253,81 @@ class NPCSQLOperations:
253
253
  return None
254
254
 
255
255
  def execute_ai_function(
256
- self,
257
- func_name: str,
258
- df: pd.DataFrame,
256
+ self,
257
+ func_name: str,
258
+ df: pd.DataFrame,
259
259
  **params
260
260
  ) -> pd.Series:
261
261
  if func_name not in self.function_map:
262
262
  raise ValueError(f"Unknown AI function: {func_name}")
263
-
263
+
264
264
  func = self.function_map[func_name]
265
-
265
+
266
266
  npc_ref = params.get('npc', '')
267
267
  resolved_npc = self._resolve_npc_reference(npc_ref)
268
-
268
+
269
269
  resolved_team = self._get_team()
270
270
  if not resolved_team and hasattr(resolved_npc, 'team'):
271
271
  resolved_team = resolved_npc.team
272
-
273
- def apply_function_to_row(row):
272
+
273
+ total_rows = len(df)
274
+ print(f"NQL: Executing {func_name} on {total_rows} rows with NPC '{npc_ref}'...")
275
+
276
+ results = []
277
+ for idx, (row_idx, row) in enumerate(df.iterrows()):
274
278
  query_template = params.get('query', '')
275
279
  column_name = params.get('column', '')
276
-
280
+
277
281
  column_value = str(row[column_name]) if column_name and column_name in row.index else column_name
278
282
 
279
283
  if query_template:
280
284
  row_data = {
281
- col: str(row[col])
285
+ col: str(row[col])
282
286
  for col in df.columns
283
287
  }
284
- row_data['column_value'] = column_value
288
+ row_data['column_value'] = column_value
285
289
  query = query_template.format(**row_data)
286
290
  else:
287
291
  query = column_value
288
-
292
+
293
+ print(f" [{idx+1}/{total_rows}] Processing row {row_idx}...", end=" ", flush=True)
294
+
289
295
  sig = py_inspect.signature(func)
296
+
297
+ # Extract model/provider from NPC if available
298
+ npc_model = None
299
+ npc_provider = None
300
+ if resolved_npc and hasattr(resolved_npc, 'model'):
301
+ npc_model = resolved_npc.model
302
+ if resolved_npc and hasattr(resolved_npc, 'provider'):
303
+ npc_provider = resolved_npc.provider
304
+
290
305
  func_params = {
291
306
  k: v for k, v in {
292
- 'prompt': query,
293
- 'text': query,
307
+ 'prompt': query,
308
+ 'text': query,
294
309
  'npc': resolved_npc,
295
310
  'team': resolved_team,
296
- 'context': params.get('context', '')
311
+ 'context': params.get('context', ''),
312
+ 'model': npc_model or 'gpt-4o-mini',
313
+ 'provider': npc_provider or 'openai'
297
314
  }.items() if k in sig.parameters
298
315
  }
299
-
300
- result = func(**func_params)
301
- return (result.get("response", "")
302
- if isinstance(result, dict)
303
- else str(result))
304
-
305
- return df.apply(apply_function_to_row, axis=1)
316
+
317
+ try:
318
+ result = func(**func_params)
319
+ result_value = (result.get("response", "")
320
+ if isinstance(result, dict)
321
+ else str(result))
322
+ print(f"OK ({len(result_value)} chars)")
323
+ except Exception as e:
324
+ print(f"ERROR: {e}")
325
+ result_value = None
326
+
327
+ results.append(result_value)
328
+
329
+ print(f"NQL: Completed {func_name} on {total_rows} rows.")
330
+ return pd.Series(results, index=df.index)
306
331
 
307
332
 
308
333
  # --- SQL Model Definition ---
@@ -360,11 +385,10 @@ class SQLModel:
360
385
  def _extract_ai_functions(self) -> Dict[str, Dict]:
361
386
  """Extract AI function calls from SQL content with improved robustness."""
362
387
  import types
363
-
388
+
364
389
  ai_functions = {}
365
- # More robust pattern that handles nested parentheses better
366
- # This captures: nql.function_name(args...)
367
- pattern = r"nql\.(\w+)\s*\(((?:[^()]|\([^()]*\))*)\)"
390
+ # Pattern that captures: nql.function_name(args...) as alias
391
+ pattern = r"nql\.(\w+)\s*\(((?:[^()]|\([^()]*\))*)\)(\s+as\s+(\w+))?"
368
392
 
369
393
  matches = re.finditer(pattern, self.content, flags=re.DOTALL | re.IGNORECASE)
370
394
 
@@ -424,13 +448,17 @@ class SQLModel:
424
448
  if self.npc_directory and npc_param.startswith(self.npc_directory):
425
449
  npc_param = npc_param[len(self.npc_directory):].strip('/')
426
450
 
451
+ # Extract alias if present (group 4 from the pattern)
452
+ alias = match.group(4) if match.lastindex >= 4 and match.group(4) else f"{func_name}_result"
453
+
427
454
  ai_functions[func_name] = {
428
455
  "column": column_param,
429
456
  "npc": npc_param,
430
457
  "query": query_param,
431
458
  "context": context_param,
432
459
  "full_call_string": full_call_string,
433
- "original_func_name": match.group(1) # Store original case
460
+ "original_func_name": match.group(1), # Store original case
461
+ "alias": alias
434
462
  }
435
463
  else:
436
464
  print(f"DEBUG SQLModel: Function '{func_name}' not found in available LLM funcs ({available_functions}). Skipping this NQL call.")
@@ -546,14 +574,23 @@ class ModelCompiler:
546
574
 
547
575
  def replace_ref(match):
548
576
  model_name = match.group(1)
549
- if model_name not in self.models:
550
- raise ValueError(
551
- f"Model '{model_name}' referenced by '{{{{ ref('{model_name}') }}}}' not found during compilation."
552
- )
553
-
554
- if self.target_schema:
555
- return f"{self.target_schema}.{model_name}"
556
- return model_name
577
+
578
+ # First check if it's a model we're compiling
579
+ if model_name in self.models:
580
+ if self.target_schema:
581
+ return f"{self.target_schema}.{model_name}"
582
+ return model_name
583
+
584
+ # Otherwise, check if it's an existing table in the database
585
+ if self._table_exists(model_name):
586
+ if self.target_schema:
587
+ return f"{self.target_schema}.{model_name}"
588
+ return model_name
589
+
590
+ # If neither, raise an error
591
+ raise ValueError(
592
+ f"Model or table '{model_name}' referenced by '{{{{ ref('{model_name}') }}}}' not found during compilation."
593
+ )
557
594
 
558
595
  replaced_sql = re.sub(ref_pattern, replace_ref, sql_content)
559
596
  return replaced_sql
@@ -665,42 +702,42 @@ class ModelCompiler:
665
702
  for func_name, params in model.ai_functions.items():
666
703
  try:
667
704
  result_series = self.npc_operations.execute_ai_function(func_name, df, **params)
668
- result_column_name = f"{func_name}_{params.get('column', 'result')}" # Use a more specific alias if possible
705
+ # Use the SQL alias if available, otherwise generate one
706
+ result_column_name = params.get('alias', f"{func_name}_result")
669
707
  df[result_column_name] = result_series
670
- print(f"DEBUG: Python-driven AI function '{func_name}' executed. Result in column '{result_column_name}'.")
708
+ print(f"DEBUG: AI function '{func_name}' result stored in column '{result_column_name}'.")
671
709
  except Exception as e:
672
- print(f"ERROR: Executing Python-driven AI function '{func_name}': {e}. Assigning NULL.")
673
- df[f"{func_name}_{params.get('column', 'result')}"] = None
710
+ print(f"ERROR: Executing AI function '{func_name}': {e}. Assigning NULL.")
711
+ result_column_name = params.get('alias', f"{func_name}_result")
712
+ df[result_column_name] = None
674
713
 
675
714
  return df
676
715
 
677
716
  def _replace_nql_calls_with_null(self, sql_content: str, model: SQLModel) -> str:
678
717
  """
679
- Replaces specific nql.func(...) as alias calls with NULL as alias.
680
- This is used for the fallback path or to clean up any NQL calls missed by native translation.
718
+ Replaces nql.func(...) calls with NULL placeholders.
719
+ This is used for the fallback path where we execute SQL first, then apply AI functions in Python.
681
720
  """
682
721
  modified_sql = sql_content
683
- for func_name, params in model.ai_functions.items():
684
- original_nql_call = params.get('full_call_string')
685
- if not original_nql_call:
686
- print(f"WARNING: 'full_call_string' not found for NQL function '{func_name}'. Cannot replace with NULL.")
687
- continue
688
722
 
689
- # Extract alias from the original_nql_call string for NULL replacement
690
- alias_match = re.search(r'\s+as\s+(\w+)(?:\W|$)', original_nql_call, re.IGNORECASE)
691
- alias_name = alias_match.group(1) if alias_match else f"{func_name}_{params.get('column', 'result')}"
723
+ # Pattern to match nql.function_name(...) with nested parentheses support
724
+ # Also captures the 'as alias' part if present
725
+ nql_pattern = r'nql\.(\w+)\s*\(((?:[^()]|\([^()]*\))*)\)(\s+as\s+(\w+))?'
692
726
 
693
- # Create a robust pattern for the original NQL call to handle whitespace variability
694
- escaped_original_call = re.escape(original_nql_call.strip())
695
- pattern_to_sub = re.compile(r"\s*".join(escaped_original_call.split()), flags=re.IGNORECASE)
727
+ def replace_with_null(match):
728
+ func_name = match.group(1)
729
+ alias_part = match.group(3) or ''
730
+ alias_name = match.group(4)
696
731
 
697
- # Perform the replacement with NULL as alias
698
- old_sql = modified_sql
699
- modified_sql, count = pattern_to_sub.subn(f"NULL as {alias_name}", modified_sql)
700
- if count == 0:
701
- print(f"WARNING: NULL replacement failed for NQL call '{original_nql_call}' (no change to SQL). SQL still contains NQL call.")
702
- else:
703
- print(f"DEBUG: Replaced NQL call '{original_nql_call}' with 'NULL as {alias_name}'.")
732
+ # If no alias specified, generate one from function name
733
+ if not alias_name:
734
+ alias_name = f"{func_name}_result"
735
+ alias_part = f" as {alias_name}"
736
+
737
+ print(f"DEBUG: Replacing nql.{func_name}(...) with NULL{alias_part}")
738
+ return f"NULL{alias_part}"
739
+
740
+ modified_sql = re.sub(nql_pattern, replace_with_null, modified_sql, flags=re.IGNORECASE | re.DOTALL)
704
741
 
705
742
  return modified_sql
706
743
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: npcpy
3
- Version: 1.2.35
3
+ Version: 1.2.37
4
4
  Summary: npcpy is the premier open-source library for integrating LLMs and Agents into python systems.
5
5
  Home-page: https://github.com/NPC-Worldwide/npcpy
6
6
  Author: Christopher Agostino
@@ -607,6 +607,178 @@ else:
607
607
  The intention for this model ensembler system is to mimic human cognition: pattern-matched gut reactions (System 1 of Kahneman) for familiar queries, falling back to deliberate reasoning (System 2 of Kahneman) for novel problems. Genetic algorithms evolve both knowledge structures and model specializations over time.
608
608
 
609
609
 
610
+ ## NPCArray - NumPy for AI
611
+
612
+ `npcpy` provides `NPCArray`, a NumPy-like interface for working with populations of models (LLMs, sklearn, PyTorch) at scale. Think of it as vectorized operations over AI models.
613
+
614
+ ### Core Concepts
615
+ - Model arrays support vectorized operations
616
+ - Operations are lazy until `.collect()` is called (like Spark)
617
+ - Same interface works for single models (treated as length-1 arrays)
618
+ - Supports ensemble voting, consensus, evolution, and more
619
+
620
+ ### Basic Usage
621
+
622
+ ```python
623
+ from npcpy.npc_array import NPCArray
624
+
625
+ # Create array of LLMs
626
+ models = NPCArray.from_llms(
627
+ ['llama3.2', 'gemma3:1b'],
628
+ providers='ollama'
629
+ )
630
+
631
+ print(f"Model array shape: {models.shape}") # (2,)
632
+
633
+ # Inference across all models - returns shape (n_models, n_prompts)
634
+ result = models.infer("What is 2+2? Just the number.").collect()
635
+
636
+ print(f"Model 1: {result.data[0, 0]}")
637
+ print(f"Model 2: {result.data[1, 0]}")
638
+ ```
639
+
640
+ ### Lazy Chaining & Ensemble Operations
641
+
642
+ ```python
643
+ from npcpy.npc_array import NPCArray
644
+
645
+ models = NPCArray.from_llms(['llama3.2', 'gemma3:1b', 'mistral:7b'])
646
+
647
+ # Build lazy computation graph - nothing executed yet
648
+ result = (
649
+ models
650
+ .infer("Is Python compiled or interpreted? One word.")
651
+ .map(lambda r: r.strip().lower()) # Clean responses
652
+ .vote(axis=0) # Majority voting across models
653
+ )
654
+
655
+ # Show the computation plan
656
+ result.explain()
657
+
658
+ # Now execute
659
+ answer = result.collect()
660
+ print(f"Consensus: {answer.data[0]}")
661
+ ```
662
+
663
+ ### Parameter Sweeps with Meshgrid
664
+
665
+ ```python
666
+ from npcpy.npc_array import NPCArray
667
+
668
+ # Cartesian product over parameters
669
+ configs = NPCArray.meshgrid(
670
+ models=['llama3.2', 'gemma3:1b'],
671
+ temperatures=[0.0, 0.5, 1.0]
672
+ )
673
+
674
+ print(f"Config array shape: {configs.shape}") # (6,) = 2 models × 3 temps
675
+
676
+ # Run inference with each config
677
+ result = configs.infer("Complete: The quick brown fox").collect()
678
+ ```
679
+
680
+ ### Matrix Sampling with get_llm_response
681
+
682
+ The `get_llm_response` function supports `matrix` and `n_samples` parameters for exploration:
683
+
684
+ ```python
685
+ from npcpy.llm_funcs import get_llm_response
686
+
687
+ # Matrix parameter - cartesian product over specified params
688
+ result = get_llm_response(
689
+ "Write a creative opening line.",
690
+ matrix={
691
+ 'model': ['llama3.2', 'gemma3:1b'],
692
+ 'temperature': [0.5, 1.0]
693
+ }
694
+ )
695
+ print(f"Number of runs: {len(result['runs'])}") # 4 = 2×2
696
+
697
+ # n_samples - multiple samples from same config
698
+ result = get_llm_response(
699
+ "Pick a random number 1-100.",
700
+ model='llama3.2',
701
+ n_samples=5
702
+ )
703
+ print(f"Samples: {[r['response'] for r in result['runs']]}")
704
+
705
+ # Combine both for full exploration
706
+ result = get_llm_response(
707
+ "Flip a coin: heads or tails?",
708
+ matrix={'model': ['llama3.2', 'gemma3:1b']},
709
+ n_samples=3 # 2 models × 3 samples = 6 runs
710
+ )
711
+ ```
712
+
713
+ ### sklearn Integration
714
+
715
+ ```python
716
+ from npcpy.npc_array import NPCArray
717
+ from sklearn.ensemble import RandomForestClassifier
718
+ from sklearn.linear_model import LogisticRegression
719
+ import numpy as np
720
+
721
+ # Create sample data
722
+ X_train = np.random.randn(100, 4)
723
+ y_train = (X_train[:, 0] > 0).astype(int)
724
+
725
+ # Pre-fit models
726
+ rf = RandomForestClassifier(n_estimators=10).fit(X_train, y_train)
727
+ lr = LogisticRegression().fit(X_train, y_train)
728
+
729
+ # Create array from fitted models
730
+ models = NPCArray.from_sklearn([rf, lr])
731
+
732
+ # Vectorized prediction
733
+ X_test = np.random.randn(20, 4)
734
+ predictions = models.predict(X_test).collect()
735
+
736
+ print(f"RF predictions: {predictions.data[0]}")
737
+ print(f"LR predictions: {predictions.data[1]}")
738
+ ```
739
+
740
+ ### ML Functions with Grid Search
741
+
742
+ ```python
743
+ from npcpy.ml_funcs import fit_model, score_model, ensemble_predict
744
+
745
+ # Grid search via matrix parameter
746
+ result = fit_model(
747
+ X_train, y_train,
748
+ model='RandomForestClassifier',
749
+ matrix={
750
+ 'n_estimators': [10, 50, 100],
751
+ 'max_depth': [3, 5, 10]
752
+ }
753
+ )
754
+
755
+ print(f"Fitted {len(result['models'])} model configurations")
756
+
757
+ # Ensemble voting with multiple models
758
+ predictions = ensemble_predict(X_test, result['models'], method='vote')
759
+ ```
760
+
761
+ ### Quick Utilities
762
+
763
+ ```python
764
+ from npcpy.npc_array import infer_matrix, ensemble_vote
765
+
766
+ # Quick matrix inference
767
+ result = infer_matrix(
768
+ prompts=["Hello", "Goodbye"],
769
+ models=['llama3.2', 'gemma3:1b']
770
+ )
771
+
772
+ # Quick ensemble vote
773
+ answer = ensemble_vote(
774
+ "What is the capital of France? One word.",
775
+ models=['llama3.2', 'gemma3:1b']
776
+ )
777
+ print(f"Voted answer: {answer}")
778
+ ```
779
+
780
+ See `examples/npc_array_examples.py` for more comprehensive examples.
781
+
610
782
 
611
783
  ## Serving an NPC Team
612
784
 
@@ -1,10 +1,12 @@
1
- npcpy/__init__.py,sha256=9imxFtK74_6Rw9rz0kyMnZYl_voPb569tkTlYLt0Urg,131
2
- npcpy/llm_funcs.py,sha256=RtZAtX1_nvfn-X3IHVyQggDBXDzGxKEix_sS_iliNN0,87172
1
+ npcpy/__init__.py,sha256=uJcJGjR1mWvE69GySNAufkgiRwJA28zdObDBWaxp0tY,505
2
+ npcpy/llm_funcs.py,sha256=KJpjN6q5iW_qdUfgt4tzYENCAu86376io8eFZ7wp76Y,78081
3
3
  npcpy/main.py,sha256=RWoRIj6VQLxKdOKvdVyaq2kwG35oRpeXPvp1CAAoG-w,81
4
- npcpy/npc_compiler.py,sha256=lF0uhByMly5L-TJbsfPNavxSYFLVM9OdwoVMv6Ci2ko,99493
5
- npcpy/npc_sysenv.py,sha256=t9AswM-9_P2NaGsnlzTMc2hUfdSthi9ofbud6F1G7LM,35974
4
+ npcpy/ml_funcs.py,sha256=UI7k7JR4XOH_VXR-xxLaO4r9Kyx_jBaEnp3TUIY7ZLQ,22657
5
+ npcpy/npc_array.py,sha256=fVTxcMiXV-lvltmuwaRnTU9D3ikPq3-7k5wzp7MA5OY,40224
6
+ npcpy/npc_compiler.py,sha256=oGSn9-X-Miq-K37QfNo8_TFcWbOl8WTNHBPtkf6paws,104619
7
+ npcpy/npc_sysenv.py,sha256=rtE3KrXvIuOEpMq1CW5eK5K0o3f6mXagNXCeMnhHob4,36736
6
8
  npcpy/npcs.py,sha256=eExuVsbTfrRobTRRptRpDm46jCLWUgbvy4_U7IUQo-c,744
7
- npcpy/serve.py,sha256=lXVIxkGj69C0hV2L9X0OfzDCqfMtgF38S2SJUkiJqLk,148478
9
+ npcpy/serve.py,sha256=wbIXUFlmfKg72ZYoX_cBJ8FVDFabHsGnbMwMIj-412Y,174839
8
10
  npcpy/tools.py,sha256=A5_oVmZkzGnI3BI-NmneuxeXQq-r29PbpAZP4nV4jrc,5303
9
11
  npcpy/data/__init__.py,sha256=1tcoChR-Hjn905JDLqaW9ElRmcISCTJdE7BGXPlym2Q,642
10
12
  npcpy/data/audio.py,sha256=3qryGXnWHa4JFMonjuX-lf0fCrF8jmbHe7mHAuOdua0,12397
@@ -25,9 +27,9 @@ npcpy/ft/usft.py,sha256=O025GGYGZQf2ZVLowyAmBwh5bJyuy2dUAM6v03YcboY,3435
25
27
  npcpy/gen/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
28
  npcpy/gen/audio_gen.py,sha256=w4toESu7nmli1T5FOwRRCGC_QK9W-SMWknYYkbRv9jE,635
27
29
  npcpy/gen/embeddings.py,sha256=QStTJ2ELiC379OEZsLEgGGIIFD267Y8zQchs7HRn2Zg,2089
28
- npcpy/gen/image_gen.py,sha256=PFaJAjMB0P1DV_x4tWXh25qGyjkntRejFLBnVPKSsqY,21730
30
+ npcpy/gen/image_gen.py,sha256=VflU_wJsKWJarOVwZtL2M8ymDFfKNz8WX66Rwk4obeo,21778
29
31
  npcpy/gen/ocr.py,sha256=rgmXWHrCYX1Po-qG_LrNFbVYEZ8aaupxFTgparcoB_Y,6554
30
- npcpy/gen/response.py,sha256=6iAOi4hxUxkTZ1d2suBUASOssT6pQnr3HFwZWrvmATg,31925
32
+ npcpy/gen/response.py,sha256=xSFHNZTDsebFo_nptWwSahpCU9_4pbCqabMFZ3X4_Bg,39979
31
33
  npcpy/gen/video_gen.py,sha256=RFi3Zcq_Hn3HIcfoF3mijQ6G7RYFZaM_9pjPTh-8E64,3239
32
34
  npcpy/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
35
  npcpy/memory/command_history.py,sha256=vWzZ4F4o0XOSHn50SkdP885jG1aZIZvfcPAh8EZWlQk,54497
@@ -42,14 +44,14 @@ npcpy/sql/ai_function_tools.py,sha256=ZCpjVHtaMRdL2dXxbQy5NhhjtPrVViGT1wyEl8ADrk
42
44
  npcpy/sql/database_ai_adapters.py,sha256=CMlNGOhmJZhGB47RPvLIMqB61m_eYPVg1lwx42_b0jQ,6865
43
45
  npcpy/sql/database_ai_functions.py,sha256=XQCmaFOE1lNCnwrLTNpotYOlv6sx41bb8hxZI_sqpy8,6335
44
46
  npcpy/sql/model_runner.py,sha256=hJZ7hx2mwI-8DAh47Q6BwOsRjx30-HzebL4ajEUO4HA,5734
45
- npcpy/sql/npcsql.py,sha256=-PmV7AXSKwRog4gPHTeHzmvPrnDZOiccjgkUGv4DwEU,35614
47
+ npcpy/sql/npcsql.py,sha256=YRBbcsMPNWLhzZqoF0wAL2MaT6OOc0wh33Z0hb01rjc,36591
46
48
  npcpy/sql/sql_model_compiler.py,sha256=G-0dpTlgzc-dXy9YEsdWGjO8xaQ3jFNbc6oUja1Ef4M,5364
47
49
  npcpy/work/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
50
  npcpy/work/desktop.py,sha256=F3I8mUtJp6LAkXodsh8hGZIncoads6c_2Utty-0EdDA,2986
49
51
  npcpy/work/plan.py,sha256=QyUwg8vElWiHuoS-xK4jXTxxHvkMD3VkaCEsCmrEPQk,8300
50
52
  npcpy/work/trigger.py,sha256=P1Y8u1wQRsS2WACims_2IdkBEar-iBQix-2TDWoW0OM,9948
51
- npcpy-1.2.35.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
52
- npcpy-1.2.35.dist-info/METADATA,sha256=_Ic5v_Q2AzFXGX13twz6tOcjhePm7OlVZZUZruHsebU,33537
53
- npcpy-1.2.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
54
- npcpy-1.2.35.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
55
- npcpy-1.2.35.dist-info/RECORD,,
53
+ npcpy-1.2.37.dist-info/licenses/LICENSE,sha256=j0YPvce7Ng9e32zYOu0EmXjXeJ0Nwawd0RA3uSGGH4E,1070
54
+ npcpy-1.2.37.dist-info/METADATA,sha256=xviLiFxgZ1uK876GGrIIm-P3uEJlwBvCtbbgn0vFUP4,37940
55
+ npcpy-1.2.37.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
+ npcpy-1.2.37.dist-info/top_level.txt,sha256=g1pbSvrOOncB74Bg5-J0Olg4V0A5VzDw-Xz5YObq8BU,6
57
+ npcpy-1.2.37.dist-info/RECORD,,
File without changes