dayhoff-tools 1.1.21__py3-none-any.whl → 1.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -573,13 +573,16 @@ def update_dependencies(
573
573
  print(f"Updating {pyproject_path} version constraint...")
574
574
  try:
575
575
  content = pyproject_path.read_text()
576
+ package_name_re = re.escape(
577
+ "dayhoff-tools"
578
+ ) # Use the actual package name
576
579
  pattern = re.compile(
577
- "^(\s*['\"])dayhoff-tools(?:[><=~^][^'\"\[,]*)?(['\"].*)$", # Match rest of line
580
+ rf"^(\\s*['\"])({package_name_re})(\\[[^\\]]+\\])?(?:[^'\"[\,\\s]*)?(['\"].*)$",
578
581
  re.MULTILINE,
579
582
  )
580
- package_name = "dayhoff-tools"
583
+ # package_name variable is still 'dayhoff-tools'
581
584
  new_constraint_text = f">={locked_version}"
582
- replacement_string = f"\g<1>{package_name}{new_constraint_text}\g<2>"
585
+ replacement_string = rf"\\g<1>\\g<2>\\g<3>{new_constraint_text}\\g<4>"
583
586
  new_content, num_replacements = pattern.subn(
584
587
  replacement_string, content
585
588
  )
@@ -175,6 +175,10 @@ class MMSeqsProfileProcessor(Processor):
175
175
  else:
176
176
  self.mmseqs_args = default_mmseqs_args
177
177
 
178
+ # Log dayhoff-tools version
179
+ from dayhoff_tools import __version__
180
+
181
+ logger.info(f"dayhoff-tools version: {__version__}")
178
182
  logger.info(
179
183
  f"MMSeqsProfileProcessor initialized with query: {self.query_fasta_path}"
180
184
  )
@@ -456,35 +460,44 @@ class MMSeqsProfileProcessor(Processor):
456
460
  # We might still want to raise e here, depending on desired error handling for CSV conversion failure
457
461
 
458
462
  # 9. Extract hit sequences from M8 results using subset_fasta
459
- logger.info(f"Parsing M8 results from: {intermediate_results_m8_file}")
463
+ logger.info(
464
+ f"PROCESSOR: Parsing M8 results from: {intermediate_results_m8_file}"
465
+ )
460
466
  hit_sequence_ids = set()
461
467
  try:
462
468
  if not intermediate_results_m8_file.exists():
463
469
  logger.warning(
464
- f"M8 results file {intermediate_results_m8_file} not found. Hits FASTA will be empty."
470
+ f"PROCESSOR: M8 results file {intermediate_results_m8_file} not found. Hits FASTA will be empty.",
471
+ exc_info=True,
465
472
  )
466
473
  intermediate_hits_fasta_file.touch() # Create empty hits file
467
474
  else:
468
475
  with open(intermediate_results_m8_file, "r") as m8_file:
469
476
  for line in m8_file:
470
- if line.strip(): # Ensure line is not empty
477
+ if line.strip():
471
478
  columns = line.strip().split("\t")
472
479
  if len(columns) >= 2:
473
- hit_sequence_ids.add(
474
- columns[1]
475
- ) # Target ID is the second column
480
+ hit_sequence_ids.add(columns[1])
476
481
  logger.info(
477
- f"Found {len(hit_sequence_ids)} unique target IDs in M8 results."
482
+ f"PROCESSOR: Found {len(hit_sequence_ids)} unique target IDs in M8 results."
478
483
  )
479
484
 
480
485
  if not hit_sequence_ids:
481
486
  logger.warning(
482
- f"No target IDs found in {intermediate_results_m8_file} after parsing. Output hits FASTA will be empty."
487
+ f"PROCESSOR: No target IDs found in {intermediate_results_m8_file} after parsing. Output hits FASTA will be empty.",
488
+ exc_info=True,
483
489
  )
484
- intermediate_hits_fasta_file.touch() # Create empty file
490
+ intermediate_hits_fasta_file.touch()
485
491
  else:
492
+ logger.info(f"PROCESSOR: === CALLING subset_fasta ===")
486
493
  logger.info(
487
- f"Extracting {len(hit_sequence_ids)} hit sequences from {local_target_file} to {intermediate_hits_fasta_file} using subset_fasta."
494
+ f"PROCESSOR: Input FASTA for subset_fasta: {local_target_file}"
495
+ )
496
+ logger.info(
497
+ f"PROCESSOR: Output FASTA for subset_fasta: {intermediate_hits_fasta_file}"
498
+ )
499
+ logger.info(
500
+ f"PROCESSOR: Number of target IDs for subset_fasta: {len(hit_sequence_ids)}"
488
501
  )
489
502
  try:
490
503
  subset_fasta(
@@ -495,25 +508,34 @@ class MMSeqsProfileProcessor(Processor):
495
508
  return_written_ids=False,
496
509
  )
497
510
  logger.info(
498
- f"Successfully created hits FASTA: {intermediate_hits_fasta_file} using subset_fasta."
511
+ f"PROCESSOR: === RETURNED from subset_fasta ==="
512
+ )
513
+ logger.info(
514
+ f"PROCESSOR: Successfully created hits FASTA: {intermediate_hits_fasta_file} using subset_fasta."
499
515
  )
500
- except FileNotFoundError as e:
516
+ # More specific error catching can be added if subset_fasta raises custom exceptions
517
+ except FileNotFoundError as e_fnf:
501
518
  logger.error(
502
- f"subset_fasta FileNotFoundError: {e}. Ensuring {intermediate_hits_fasta_file} exists as empty."
519
+ f"PROCESSOR: subset_fasta FileNotFoundError: {e_fnf}. Ensuring {intermediate_hits_fasta_file} exists as empty.",
520
+ exc_info=True,
503
521
  )
504
522
  if not intermediate_hits_fasta_file.exists():
505
523
  intermediate_hits_fasta_file.touch()
506
524
  raise
507
- except Exception as e:
525
+ except (
526
+ Exception
527
+ ) as e_sub: # Catch any other exception from subset_fasta
508
528
  logger.error(
509
- f"subset_fasta failed to create {intermediate_hits_fasta_file}: {e}"
529
+ f"PROCESSOR: subset_fasta failed to create {intermediate_hits_fasta_file}: {e_sub}",
530
+ exc_info=True,
510
531
  )
511
532
  if not intermediate_hits_fasta_file.exists():
512
533
  intermediate_hits_fasta_file.touch()
513
534
  raise
514
- except Exception as e:
535
+ except Exception as e_m8_proc:
515
536
  logger.error(
516
- f"Error processing M8 file {intermediate_results_m8_file} or its hits extraction: {e}"
537
+ f"PROCESSOR: Error processing M8 file {intermediate_results_m8_file} or its hits extraction: {e_m8_proc}",
538
+ exc_info=True,
517
539
  )
518
540
  if not intermediate_hits_fasta_file.exists():
519
541
  intermediate_hits_fasta_file.touch()
@@ -521,7 +543,7 @@ class MMSeqsProfileProcessor(Processor):
521
543
 
522
544
  # 10. Write the set of hit sequence IDs to a .txt file
523
545
  logger.info(
524
- f"Writing {len(hit_sequence_ids)} hit sequence IDs to {final_hits_txt_file}"
546
+ f"PROCESSOR: Writing {len(hit_sequence_ids)} hit sequence IDs to {final_hits_txt_file}"
525
547
  )
526
548
  try:
527
549
  with open(final_hits_txt_file, "w") as txt_out:
@@ -529,13 +551,15 @@ class MMSeqsProfileProcessor(Processor):
529
551
  list(hit_sequence_ids)
530
552
  ): # Sort for consistent output
531
553
  txt_out.write(f"{seq_id}\n")
532
- logger.info(f"Successfully wrote hit IDs to {final_hits_txt_file}")
554
+ logger.info(
555
+ f"PROCESSOR: Successfully wrote hit IDs to {final_hits_txt_file}"
556
+ )
533
557
  except Exception as e:
534
558
  logger.error(f"Failed to write hit IDs to {final_hits_txt_file}: {e}")
535
559
  # The main workflow should still proceed even if this supplementary file fails
536
560
 
537
561
  logger.info(
538
- f"MMseqs2 workflow completed successfully. Intermediate outputs in {mmseqs_temp_dir}"
562
+ f"PROCESSOR: MMseqs2 workflow and FASTA/TXT generation completed successfully. Intermediate outputs in {mmseqs_temp_dir}"
539
563
  )
540
564
 
541
565
  # Move and rename final output files from mmseqs_temp_dir to run_base_dir
@@ -439,6 +439,11 @@ class Operator:
439
439
  - For AWS spot instances, uses IMDSv2 to check instance-action metadata
440
440
  - For GCP preemptible VMs, checks both maintenance-event and preempted metadata
441
441
  """
442
+ logger.info(
443
+ "DEBUG: _check_for_termination has been temporarily disabled for testing."
444
+ )
445
+ return # DEBUG: Temporarily disable to test if this is causing premature shutdown
446
+
442
447
  while not _shutdown_requested.is_set():
443
448
  try:
444
449
  # Check AWS spot termination using IMDSv2 (token-based auth)
dayhoff_tools/fasta.py CHANGED
@@ -8,7 +8,7 @@ import sqlite3
8
8
  import time
9
9
  from functools import partial
10
10
  from pathlib import Path
11
- from typing import Dict, Iterator, List, Optional, Set, Tuple, Union
11
+ from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, Union
12
12
 
13
13
  import requests
14
14
  from Bio import SeqIO
@@ -604,29 +604,48 @@ def process_chunk(
604
604
  ) -> Tuple[List[str], Set[str]]:
605
605
  output_sequences = []
606
606
  written_ids = set()
607
- current_id = ""
608
- current_seq = []
609
-
610
- def id_matches(seq_id: str) -> bool:
611
- return any(part.lower() in target_ids_lower for part in seq_id.split("|"))
612
-
613
- for line in chunk:
614
- line = line.strip()
615
- if line.startswith(">"):
616
- if current_id and current_seq:
617
- if id_matches(current_id) != exclude:
618
- output_sequences.append(f">{current_id}\n{''.join(current_seq)}\n")
619
- written_ids.add(current_id)
620
- current_id = line[1:]
621
- current_seq = []
622
- elif current_id:
623
- current_seq.append(line)
624
-
625
- # Process the last sequence in the chunk
626
- if current_id and current_seq and id_matches(current_id) != exclude:
627
- output_sequences.append(f">{current_id}\n{''.join(current_seq)}\n")
628
- written_ids.add(current_id)
607
+ current_id: str = ""
608
+ current_seq: List[str] = []
609
+
610
+ # Get a unique worker ID, could be process ID
611
+ worker_id = os.getpid()
612
+ logger.debug(
613
+ f"SUBSET_FASTA_PROCESS_CHUNK: Worker {worker_id} processing a chunk. Target IDs count: {len(target_ids_lower)}, Exclude: {exclude}"
614
+ )
615
+ try:
616
+
617
+ def id_matches(seq_id: str) -> bool:
618
+ return any(part.lower() in target_ids_lower for part in seq_id.split("|"))
619
+
620
+ for line in chunk:
621
+ line = line.strip()
622
+ if line.startswith(">"):
623
+ if current_id and current_seq:
624
+ if id_matches(current_id) != exclude:
625
+ output_sequences.append(
626
+ f">{current_id}\n{''.join(current_seq)}\n"
627
+ )
628
+ written_ids.add(current_id)
629
+ current_id = line[1:]
630
+ current_seq = []
631
+ elif current_id:
632
+ current_seq.append(line)
633
+
634
+ # Process the last sequence in the chunk
635
+ if current_id and current_seq and id_matches(current_id) != exclude:
636
+ output_sequences.append(f">{current_id}\n{''.join(current_seq)}\n")
637
+ written_ids.add(current_id)
629
638
 
639
+ except Exception as e:
640
+ logger.error(
641
+ f"SUBSET_FASTA_PROCESS_CHUNK: Worker {worker_id} encountered error: {e}",
642
+ exc_info=True,
643
+ )
644
+ # Re-raising the exception so the main process's pool error handling can catch it
645
+ raise
646
+ logger.debug(
647
+ f"SUBSET_FASTA_PROCESS_CHUNK: Worker {worker_id} finished chunk. Output sequences: {len(output_sequences)}, Written IDs: {len(written_ids)}"
648
+ )
630
649
  return output_sequences, written_ids
631
650
 
632
651
 
@@ -655,50 +674,98 @@ def subset_fasta(
655
674
  Raises:
656
675
  FileExistsError: If the output file already exists.
657
676
  """
677
+ logger.info(
678
+ f"SUBSET_FASTA: Starting for input '{fasta_file}', output '{output_path}'. Target IDs: {len(target_ids)}, Exclude: {exclude}"
679
+ )
658
680
  _check_output_file(output_path)
659
681
 
660
682
  target_ids_lower = {id.lower() for id in target_ids}
661
683
  total_size = os.path.getsize(fasta_file)
662
- chunk_size = max(
663
- 1, total_size // (multiprocessing.cpu_count() * 2)
664
- ) # Adjust chunk size based on CPU count
665
684
 
666
- def chunk_reader(file_obj, chunk_size: int):
685
+ # Determine a reasonable number of processes
686
+ num_processes = multiprocessing.cpu_count()
687
+ # Adjust chunk size based on number of processes to balance load vs memory
688
+ # Aim for at least a few chunks per process if possible, but not too many small chunks.
689
+ # This is a heuristic and might need tuning.
690
+ # Let's make chunks reasonably large, e.g., 10-50MB, or ensure at least num_processes chunks.
691
+ # If total_size is very small, chunk_size could become 0 if not handled.
692
+ desired_chunk_size_mb = 32
693
+ chunk_size = max(1, desired_chunk_size_mb * 1024 * 1024)
694
+ num_chunks = max(1, math.ceil(total_size / chunk_size))
695
+
696
+ def chunk_reader(
697
+ file_obj, cs: int
698
+ ) -> Iterator[List[str]]: # Explicitly Iterator[List[str]]
667
699
  chunk = []
668
700
  chunk_bytes = 0
669
701
  for line in file_obj:
670
702
  chunk.append(line)
671
703
  chunk_bytes += len(line)
672
- if chunk_bytes >= chunk_size and line.startswith(">"):
704
+ if chunk_bytes >= cs and line.startswith(">"):
673
705
  yield chunk
674
706
  chunk = [line]
675
707
  chunk_bytes = len(line)
676
708
  if chunk:
677
709
  yield chunk
678
710
 
679
- open_func = gzip.open if fasta_file.endswith(".gz") else open
680
- mode = "rt" if fasta_file.endswith(".gz") else "r"
711
+ mode = "rt" # text mode for both gzip and regular open
681
712
 
682
- with open_func(fasta_file, mode) as input_file:
683
- with multiprocessing.Pool() as pool:
684
- process_func = partial(
685
- process_chunk, target_ids_lower=target_ids_lower, exclude=exclude
713
+ all_written_ids: Set[str] = set()
714
+ try:
715
+ with open(fasta_file, mode) as input_file:
716
+ logger.info(
717
+ f"SUBSET_FASTA: Using up to {num_processes} worker processes for {num_chunks} potential chunks."
686
718
  )
687
- results = list(
688
- tqdm(
689
- pool.imap(process_func, chunk_reader(input_file, chunk_size)),
690
- total=total_size // chunk_size,
691
- desc="Processing FASTA",
719
+
720
+ with multiprocessing.Pool(processes=num_processes) as pool:
721
+ logger.info(
722
+ f"SUBSET_FASTA: Multiprocessing pool created (intended processes: {num_processes})."
692
723
  )
693
- )
694
724
 
695
- all_written_ids = set()
696
- with open(output_path, "w") as output_file:
697
- for output_sequences, written_ids in results:
698
- output_file.writelines(output_sequences)
699
- all_written_ids.update(written_ids)
725
+ process_func = partial(
726
+ process_chunk, target_ids_lower=target_ids_lower, exclude=exclude
727
+ )
700
728
 
701
- print(f"Wrote {len(all_written_ids)} sequences to {output_path}")
729
+ # Using imap_unordered can sometimes be better for memory with many results,
730
+ # as results are processed as they complete.
731
+ # However, for aggregation later, order doesn't strictly matter for building the final set/list of strings.
732
+ # tqdm will work with imap and imap_unordered.
733
+
734
+ # Calculate total for tqdm more robustly
735
+ actual_num_chunks_for_tqdm = num_chunks # Use the calculated num_chunks
736
+
737
+ try:
738
+ results_buffer = []
739
+ for result_tuple in tqdm(
740
+ pool.imap(process_func, chunk_reader(input_file, chunk_size)),
741
+ total=actual_num_chunks_for_tqdm, # Use calculated number of chunks
742
+ desc="Processing FASTA (subset_fasta)",
743
+ ):
744
+ results_buffer.append(result_tuple)
745
+ logger.debug("SUBSET_FASTA: pool.imap completed.")
746
+ except Exception as e_pool:
747
+ logger.error(
748
+ f"SUBSET_FASTA: Error during multiprocessing pool.imap: {e_pool}",
749
+ exc_info=True,
750
+ )
751
+ raise
752
+
753
+ logger.debug(
754
+ f"SUBSET_FASTA: Aggregating results from {len(results_buffer)} processed chunks."
755
+ )
756
+ with open(output_path, "w") as output_file:
757
+ for output_sequences, written_ids_chunk in results_buffer:
758
+ output_file.writelines(output_sequences)
759
+ all_written_ids.update(written_ids_chunk)
760
+ except Exception as e_main:
761
+ logger.error(
762
+ f"SUBSET_FASTA: Error in main processing logic: {e_main}", exc_info=True
763
+ )
764
+ raise
765
+
766
+ logger.info(
767
+ f"SUBSET_FASTA: Wrote {len(all_written_ids)} sequences to {output_path}. Finished."
768
+ )
702
769
  return all_written_ids if return_written_ids else None
703
770
 
704
771
 
@@ -779,7 +846,7 @@ def fasta_to_sqlite(fasta_file: str, db_file: str, batch_size: int = 1000) -> No
779
846
  batch = []
780
847
 
781
848
  for protein_id, sequence in tqdm(
782
- _protein_generator(fasta_file),
849
+ _protein_generator(Path(fasta_file)), # Pass as Path object
783
850
  total=estimated_records,
784
851
  desc="Processing proteins",
785
852
  ):
@@ -804,22 +871,27 @@ def fasta_to_sqlite(fasta_file: str, db_file: str, batch_size: int = 1000) -> No
804
871
  print(f"Conversion completed. SQLite database saved to {db_file}")
805
872
 
806
873
 
807
- def _protein_generator(fasta_path: Path) -> Iterator[tuple[str, str]]:
874
+ def _protein_generator(
875
+ fasta_path: Path,
876
+ ) -> Iterator[tuple[str, str]]: # fasta_path is Path
808
877
  """
809
878
  Generate protein data from a FASTA file.
810
-
811
879
  Args:
812
880
  fasta_path (Path): Path to the FASTA file.
813
-
814
881
  Yields:
815
882
  tuple[str, str]: A tuple containing protein_id and sequence.
816
883
  """
817
- for record in SeqIO.parse(fasta_path, "fasta"):
818
- protein_id = record.id.split()[
819
- 0
820
- ] # Assumes the first part of the id is the protein_id
821
- sequence = str(record.seq)
822
- yield protein_id, sequence
884
+ # Ensure we use 'rt' for text mode reading, especially if gzipped
885
+ open_func = gzip.open if str(fasta_path).endswith(".gz") else open
886
+ mode = "rt"
887
+
888
+ with open_func(fasta_path, mode) as handle:
889
+ for record in SeqIO.parse(handle, "fasta"):
890
+ protein_id = record.id.split()[
891
+ 0
892
+ ] # Assumes the first part of the id is the protein_id
893
+ sequence = str(record.seq)
894
+ yield protein_id, sequence
823
895
 
824
896
 
825
897
  def check_fasta_duplicates(fasta_path: str) -> tuple[set[str], set[str]]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: dayhoff-tools
3
- Version: 1.1.21
3
+ Version: 1.1.23
4
4
  Summary: Common tools for all the repos at Dayhoff Labs
5
5
  Author: Daniel Martin-Alarcon
6
6
  Author-email: dma@dayhofflabs.com
@@ -5,16 +5,16 @@ dayhoff_tools/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
5
5
  dayhoff_tools/cli/cloud_commands.py,sha256=NGux28-cjDyCADF-L1tjdEMzkCMYX8V4xNvpK6EWcZA,40802
6
6
  dayhoff_tools/cli/main.py,sha256=47EGb28ALaYFc7oAUGlY1D66AIDmc4RZiXxN-gPVrpQ,4519
7
7
  dayhoff_tools/cli/swarm_commands.py,sha256=5EyKj8yietvT5lfoz8Zx0iQvVaNgc3SJX1z2zQR6o6M,5614
8
- dayhoff_tools/cli/utility_commands.py,sha256=08MMLlQZEjJ_r7Cd6b92aYYPjH7gMvMQvCOcpoNnFVo,24352
8
+ dayhoff_tools/cli/utility_commands.py,sha256=vB6WLAq_VNqN00cvCUnPBdo_b_1poM8jH6ZRhIp8Qn4,24500
9
9
  dayhoff_tools/deployment/base.py,sha256=8tXwsPYvRo-zV-aNhHw1c7Rji-KWg8S5xoCCznFnVVI,17412
10
10
  dayhoff_tools/deployment/deploy_aws.py,sha256=O0gQxHioSU_sNU8T8MD4wSOPvWc--V8eRRZzlRu035I,16446
11
11
  dayhoff_tools/deployment/deploy_gcp.py,sha256=DxBM4sUzwPK9RWLP9bSfr38n1HHl-TVrp4TsbdN8pUA,5795
12
12
  dayhoff_tools/deployment/deploy_utils.py,sha256=StFwbqnr2_FWiKVg3xnJF4kagTHzndqqDkpaIOaAn_4,26027
13
13
  dayhoff_tools/deployment/job_runner.py,sha256=4tmdplpvqSE9bVxRWHo2U5kwkYrYod0Uwzpg2Q7qG5o,4850
14
- dayhoff_tools/deployment/processors.py,sha256=0D2r2-NgVcFfxF0QSqLZy86cxEWSn78jLTKYf60fLBQ,25519
15
- dayhoff_tools/deployment/swarm.py,sha256=MGcS2_x4RNFtnVjWlU_SwNfhICz8NlGYr9cYBK4ZKDA,21688
14
+ dayhoff_tools/deployment/processors.py,sha256=kdXbS354DUKCN-kkj2sbt6T06cNG8Hphjj9e_nyCt1g,26676
15
+ dayhoff_tools/deployment/swarm.py,sha256=Xoe-lLQYDT3FwCrPzImgpbHdWRmsK6WERh1IMMNWb2c,21898
16
16
  dayhoff_tools/embedders.py,sha256=CRgcb2z7KeeFrRQawyUZuJ4Yi0-J5jSr0hwuRhjG_FI,36513
17
- dayhoff_tools/fasta.py,sha256=e7xw3pInoupqCGE0-fJTOzmW_earL1M7qPyoqIPfUT4,46269
17
+ dayhoff_tools/fasta.py,sha256=HJ25D_u5F-tU6fZMkJfIhvqMSmnR32JK1QdCPXoHJ5g,49785
18
18
  dayhoff_tools/file_ops.py,sha256=JlGowvr-CUJFidV-4g_JmhUTN9bsYuaxtqKmnKomm-Q,8506
19
19
  dayhoff_tools/h5.py,sha256=j1nxxaiHsMidVX_XwB33P1Pz9d7K8ZKiDZwJWQUUQSY,21158
20
20
  dayhoff_tools/intake/gcp.py,sha256=uCeEskhbEwJIYpN6ne6siT1dbpTizCjjel-hRe0kReE,3030
@@ -26,7 +26,7 @@ dayhoff_tools/intake/uniprot.py,sha256=BZYJQF63OtPcBBnQ7_P9gulxzJtqyorgyuDiPeOJq
26
26
  dayhoff_tools/logs.py,sha256=DKdeP0k0kliRcilwvX0mUB2eipO5BdWUeHwh-VnsICs,838
27
27
  dayhoff_tools/sqlite.py,sha256=jV55ikF8VpTfeQqqlHSbY8OgfyfHj8zgHNpZjBLos_E,18672
28
28
  dayhoff_tools/warehouse.py,sha256=TqV8nex1AluNaL4JuXH5zuu9P7qmE89lSo6f_oViy6U,14965
29
- dayhoff_tools-1.1.21.dist-info/METADATA,sha256=R6Q1CWHxPpXkuYs0OwJ-WSSb9pnPIS8V4xbZHKjugZE,2225
30
- dayhoff_tools-1.1.21.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
31
- dayhoff_tools-1.1.21.dist-info/entry_points.txt,sha256=iAf4jteNqW3cJm6CO6czLxjW3vxYKsyGLZ8WGmxamSc,49
32
- dayhoff_tools-1.1.21.dist-info/RECORD,,
29
+ dayhoff_tools-1.1.23.dist-info/METADATA,sha256=STOmmTUXdEuJpGqd5HX4RL_jI0RHbTqFM1t-yMv4Gkw,2225
30
+ dayhoff_tools-1.1.23.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
31
+ dayhoff_tools-1.1.23.dist-info/entry_points.txt,sha256=iAf4jteNqW3cJm6CO6czLxjW3vxYKsyGLZ8WGmxamSc,49
32
+ dayhoff_tools-1.1.23.dist-info/RECORD,,