boltz-vsynthes 1.0.9__py3-none-any.whl → 1.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,60 @@
1
+ from pathlib import Path
2
+ from typing import Optional
3
+
4
+ from rdkit import Chem
5
+ from rdkit.Chem.rdchem import Mol
6
+
7
+ from boltz.data.types import Target
8
+ from boltz.data.parse.yaml import parse_boltz_schema
9
+
10
+
11
+ def parse_sdf(
12
+ path: Path,
13
+ ccd: dict[str, Mol],
14
+ mol_dir: Path,
15
+ boltz2: bool = False,
16
+ ) -> Target:
17
+ """Parse an SDF file.
18
+
19
+ Parameters
20
+ ----------
21
+ path : Path
22
+ Path to the SDF file.
23
+ ccd : Dict
24
+ Dictionary of CCD components.
25
+ mol_dir : Path
26
+ Path to the directory containing the molecules.
27
+ boltz2 : bool
28
+ Whether to parse the input for Boltz2.
29
+
30
+ Returns
31
+ -------
32
+ Target
33
+ The parsed target.
34
+ """
35
+ # Read SDF file
36
+ supplier = Chem.SDMolSupplier(str(path))
37
+
38
+ # Convert to yaml format
39
+ sequences = []
40
+ for i, mol in enumerate(supplier):
41
+ if mol is not None:
42
+ # Get SMILES
43
+ smiles = Chem.MolToSmiles(mol)
44
+
45
+ molecule = {
46
+ "ligand": {
47
+ "id": f"L{i+1}", # Use L1, L2, etc. as chain IDs
48
+ "smiles": smiles,
49
+ },
50
+ }
51
+ sequences.append(molecule)
52
+
53
+ data = {
54
+ "sequences": sequences,
55
+ "bonds": [],
56
+ "version": 1,
57
+ }
58
+
59
+ name = path.stem
60
+ return parse_boltz_schema(name, data, ccd, mol_dir, boltz2)
boltz/main.py CHANGED
@@ -27,6 +27,9 @@ from boltz.data.msa.mmseqs2 import run_mmseqs2
27
27
  from boltz.data.parse.a3m import parse_a3m
28
28
  from boltz.data.parse.csv import parse_csv
29
29
  from boltz.data.parse.fasta import parse_fasta
30
+ from boltz.data.parse.pdb import parse_pdb
31
+ from boltz.data.parse.pdb_download import parse_pdb_id
32
+ from boltz.data.parse.sdf import parse_sdf
30
33
  from boltz.data.parse.yaml import parse_yaml
31
34
  from boltz.data.types import MSA, Manifest, Record
32
35
  from boltz.data.write.writer import BoltzAffinityWriter, BoltzWriter
@@ -289,44 +292,31 @@ def check_inputs(data: Path) -> list[Path]:
289
292
 
290
293
  # Check if data is a directory
291
294
  if data.is_dir():
292
- # Get all files recursively
293
- valid_data = []
294
- for ext in (".fa", ".fas", ".fasta", ".yml", ".yaml"):
295
- valid_data.extend(data.glob(f"**/*{ext}"))
296
-
297
- if not valid_data:
298
- msg = f"No .fasta or .yaml files found in {data}"
299
- raise RuntimeError(msg)
300
-
301
- # Filter out directories and invalid file types
302
- filtered_data = []
303
- for d in valid_data:
295
+ data: list[Path] = list(data.glob("*"))
296
+
297
+ # Filter out non .fasta, .yaml, .pdb, or .sdf files, raise
298
+ # an error on directory and other file types
299
+ for d in data:
304
300
  if d.is_dir():
305
- msg = f"Found directory {d} instead of .fasta or .yaml."
306
- click.echo(f"Warning: {msg}")
307
- continue
308
- if d.suffix not in (".fa", ".fas", ".fasta", ".yml", ".yaml"):
309
- msg = f"Warning: Skipping file with unsupported extension {d.suffix}"
310
- click.echo(msg)
311
- continue
312
- filtered_data.append(d)
313
-
314
- if not filtered_data:
315
- msg = "No valid input files found after filtering."
316
- raise RuntimeError(msg)
317
-
318
- data = filtered_data
301
+ msg = f"Found directory {d} instead of .fasta, .yaml, .pdb, or .sdf."
302
+ raise RuntimeError(msg)
303
+ if d.suffix not in (".fa", ".fas", ".fasta", ".yml", ".yaml", ".pdb", ".sdf"):
304
+ msg = (
305
+ f"Unable to parse filetype {d.suffix}, "
306
+ "please provide a .fasta, .yaml, .pdb, or .sdf file."
307
+ )
308
+ raise RuntimeError(msg)
319
309
  else:
320
- # Single file case
321
- if data.suffix not in (".fa", ".fas", ".fasta", ".yml", ".yaml"):
322
- msg = (
323
- f"Unable to parse filetype {data.suffix}, "
324
- "please provide a .fasta or .yaml file."
325
- )
326
- raise RuntimeError(msg)
327
- data = [data]
310
+ # Check if input is a PDB ID (4 characters)
311
+ if len(data.stem) == 4 and data.stem.isalnum():
312
+ # Create a temporary file to store the PDB ID
313
+ temp_file = data.parent / f"{data.stem}.pdb"
314
+ with temp_file.open("w") as f:
315
+ f.write(data.stem)
316
+ data = [temp_file]
317
+ else:
318
+ data = [data]
328
319
 
329
- click.echo(f"Found {len(data)} valid input files.")
330
320
  return data
331
321
 
332
322
 
@@ -518,13 +508,23 @@ def process_input( # noqa: C901, PLR0912, PLR0915, D103
518
508
  target = parse_fasta(path, ccd, mol_dir, boltz2)
519
509
  elif path.suffix in (".yml", ".yaml"):
520
510
  target = parse_yaml(path, ccd, mol_dir, boltz2)
511
+ elif path.suffix == ".pdb":
512
+ # Check if this is a PDB ID file
513
+ if path.stat().st_size <= 4: # File only contains PDB ID
514
+ with path.open("r") as f:
515
+ pdb_id = f.read().strip()
516
+ target = parse_pdb_id(pdb_id, ccd, mol_dir, path.parent, boltz2)
517
+ else:
518
+ target = parse_pdb(path, ccd, mol_dir, boltz2)
519
+ elif path.suffix == ".sdf":
520
+ target = parse_sdf(path, ccd, mol_dir, boltz2)
521
521
  elif path.is_dir():
522
- msg = f"Found directory {path} instead of .fasta or .yaml, skipping."
522
+ msg = f"Found directory {path} instead of .fasta, .yaml, .pdb, or .sdf, skipping."
523
523
  raise RuntimeError(msg) # noqa: TRY301
524
524
  else:
525
525
  msg = (
526
526
  f"Unable to parse filetype {path.suffix}, "
527
- "please provide a .fasta or .yaml file."
527
+ "please provide a .fasta, .yaml, .pdb, or .sdf file."
528
528
  )
529
529
  raise RuntimeError(msg) # noqa: TRY301
530
530
 
@@ -626,6 +626,7 @@ def process_input( # noqa: C901, PLR0912, PLR0915, D103
626
626
  print(f"Failed to process {path}. Skipping. Error: {e}.") # noqa: T201
627
627
 
628
628
 
629
+ @rank_zero_only
629
630
  def process_inputs(
630
631
  data: list[Path],
631
632
  out_dir: Path,
@@ -663,106 +664,87 @@ def process_inputs(
663
664
  The manifest of the processed input data.
664
665
 
665
666
  """
666
- all_records = []
667
-
668
- # Create processed directory
669
- processed_dir = out_dir / "processed"
670
- processed_dir.mkdir(parents=True, exist_ok=True)
671
-
672
- # Create central directories
673
- structure_dir = processed_dir / "structures"
674
- msa_dir = processed_dir / "msa"
675
- records_dir = processed_dir / "records"
676
- processed_msa_dir = processed_dir / "processed" / "msa"
677
- processed_constraints_dir = processed_dir / "processed" / "constraints"
678
- processed_templates_dir = processed_dir / "processed" / "templates"
679
- processed_mols_dir = processed_dir / "processed" / "mols"
680
- predictions_dir = processed_dir / "predictions"
681
-
682
- # Create all central directories
683
- structure_dir.mkdir(parents=True, exist_ok=True)
667
+ # Check if records exist at output path
668
+ records_dir = out_dir / "processed" / "records"
669
+ if records_dir.exists():
670
+ # Load existing records
671
+ existing = [Record.load(p) for p in records_dir.glob("*.json")]
672
+ processed_ids = {record.id for record in existing}
673
+
674
+ # Filter to missing only
675
+ data = [d for d in data if d.stem not in processed_ids]
676
+
677
+ # Nothing to do, update the manifest and return
678
+ if data:
679
+ click.echo(
680
+ f"Found {len(existing)} existing processed inputs, skipping them."
681
+ )
682
+ else:
683
+ click.echo("All inputs are already processed.")
684
+ updated_manifest = Manifest(existing)
685
+ updated_manifest.dump(out_dir / "processed" / "manifest.json")
686
+
687
+ # Create output directories
688
+ msa_dir = out_dir / "msa"
689
+ records_dir = out_dir / "processed" / "records"
690
+ structure_dir = out_dir / "processed" / "structures"
691
+ processed_msa_dir = out_dir / "processed" / "msa"
692
+ processed_constraints_dir = out_dir / "processed" / "constraints"
693
+ processed_templates_dir = out_dir / "processed" / "templates"
694
+ processed_mols_dir = out_dir / "processed" / "mols"
695
+ predictions_dir = out_dir / "predictions"
696
+
697
+ out_dir.mkdir(parents=True, exist_ok=True)
684
698
  msa_dir.mkdir(parents=True, exist_ok=True)
685
699
  records_dir.mkdir(parents=True, exist_ok=True)
700
+ structure_dir.mkdir(parents=True, exist_ok=True)
686
701
  processed_msa_dir.mkdir(parents=True, exist_ok=True)
687
702
  processed_constraints_dir.mkdir(parents=True, exist_ok=True)
688
703
  processed_templates_dir.mkdir(parents=True, exist_ok=True)
689
704
  processed_mols_dir.mkdir(parents=True, exist_ok=True)
690
705
  predictions_dir.mkdir(parents=True, exist_ok=True)
691
-
692
- # Process each input file in its own directory
693
- for input_file in data:
694
- # Create a subdirectory for this input file
695
- file_out_dir = out_dir / input_file.stem
696
- file_out_dir.mkdir(parents=True, exist_ok=True)
697
-
698
- # Create output directories for this file
699
- file_msa_dir = file_out_dir / "msa"
700
- file_records_dir = file_out_dir / "processed" / "records"
701
- file_processed_msa_dir = file_out_dir / "processed" / "msa"
702
- file_processed_constraints_dir = file_out_dir / "processed" / "constraints"
703
- file_processed_templates_dir = file_out_dir / "processed" / "templates"
704
- file_processed_mols_dir = file_out_dir / "processed" / "mols"
705
- file_predictions_dir = file_out_dir / "predictions"
706
-
707
- # Create all file-specific directories
708
- file_out_dir.mkdir(parents=True, exist_ok=True)
709
- file_msa_dir.mkdir(parents=True, exist_ok=True)
710
- file_records_dir.mkdir(parents=True, exist_ok=True)
711
- file_processed_msa_dir.mkdir(parents=True, exist_ok=True)
712
- file_processed_constraints_dir.mkdir(parents=True, exist_ok=True)
713
- file_processed_templates_dir.mkdir(parents=True, exist_ok=True)
714
- file_processed_mols_dir.mkdir(parents=True, exist_ok=True)
715
- file_predictions_dir.mkdir(parents=True, exist_ok=True)
716
-
717
- # Load CCD
718
- if boltz2:
719
- ccd = load_canonicals(mol_dir)
720
- else:
721
- with ccd_path.open("rb") as file:
722
- ccd = pickle.load(file) # noqa: S301
723
-
724
- # Create partial function
725
- process_input_partial = partial(
726
- process_input,
727
- ccd=ccd,
728
- msa_dir=file_msa_dir,
729
- mol_dir=mol_dir,
730
- boltz2=boltz2,
731
- use_msa_server=use_msa_server,
732
- msa_server_url=msa_server_url,
733
- msa_pairing_strategy=msa_pairing_strategy,
734
- max_msa_seqs=max_msa_seqs,
735
- processed_msa_dir=file_processed_msa_dir,
736
- processed_constraints_dir=file_processed_constraints_dir,
737
- processed_templates_dir=file_processed_templates_dir,
738
- processed_mols_dir=file_processed_mols_dir,
739
- structure_dir=structure_dir, # Use the central structure directory
740
- records_dir=file_records_dir,
741
- )
742
706
 
743
- # Process this input file
744
- click.echo(f"Processing {input_file.name}")
745
- try:
746
- process_input_partial(input_file)
747
- except Exception as e:
748
- click.echo(f"Error processing {input_file.name}: {str(e)}")
749
- continue
750
-
751
- # Copy MSA files to central MSA directory
752
- for msa_file in file_processed_msa_dir.glob("*.npz"):
753
- target_msa_file = msa_dir / msa_file.name
754
- if not target_msa_file.exists():
755
- import shutil
756
- shutil.copy2(msa_file, target_msa_file)
757
-
758
- # Load records for this file
759
- records = [Record.load(p) for p in file_records_dir.glob("*.json")]
760
- all_records.extend(records)
761
-
762
- # Create combined manifest
763
- manifest = Manifest(all_records)
764
- manifest.dump(processed_dir / "manifest.json")
765
- return manifest
707
+ # Load CCD
708
+ if boltz2:
709
+ ccd = load_canonicals(mol_dir)
710
+ else:
711
+ with ccd_path.open("rb") as file:
712
+ ccd = pickle.load(file) # noqa: S301
713
+
714
+ # Create partial function
715
+ process_input_partial = partial(
716
+ process_input,
717
+ ccd=ccd,
718
+ msa_dir=msa_dir,
719
+ mol_dir=mol_dir,
720
+ boltz2=boltz2,
721
+ use_msa_server=use_msa_server,
722
+ msa_server_url=msa_server_url,
723
+ msa_pairing_strategy=msa_pairing_strategy,
724
+ max_msa_seqs=max_msa_seqs,
725
+ processed_msa_dir=processed_msa_dir,
726
+ processed_constraints_dir=processed_constraints_dir,
727
+ processed_templates_dir=processed_templates_dir,
728
+ processed_mols_dir=processed_mols_dir,
729
+ structure_dir=structure_dir,
730
+ records_dir=records_dir,
731
+ )
732
+
733
+ # Parse input data
734
+ preprocessing_threads = min(preprocessing_threads, len(data))
735
+ click.echo(f"Processing {len(data)} inputs with {preprocessing_threads} threads.")
736
+
737
+ if preprocessing_threads > 1 and len(data) > 1:
738
+ with Pool(preprocessing_threads) as pool:
739
+ list(tqdm(pool.imap(process_input_partial, data), total=len(data)))
740
+ else:
741
+ for path in tqdm(data):
742
+ process_input_partial(path)
743
+
744
+ # Load all records and write manifest
745
+ records = [Record.load(p) for p in records_dir.glob("*.json")]
746
+ manifest = Manifest(records)
747
+ manifest.dump(out_dir / "processed" / "manifest.json")
766
748
 
767
749
 
768
750
  @click.group()
@@ -1065,7 +1047,7 @@ def predict( # noqa: C901, PLR0915, PLR0912
1065
1047
  # Process inputs
1066
1048
  ccd_path = cache / "ccd.pkl"
1067
1049
  mol_dir = cache / "mols"
1068
- manifest = process_inputs(
1050
+ process_inputs(
1069
1051
  data=data,
1070
1052
  out_dir=out_dir,
1071
1053
  ccd_path=ccd_path,
@@ -1078,6 +1060,9 @@ def predict( # noqa: C901, PLR0915, PLR0912
1078
1060
  max_msa_seqs=max_msa_seqs,
1079
1061
  )
1080
1062
 
1063
+ # Load manifest
1064
+ manifest = Manifest.load(out_dir / "processed" / "manifest.json")
1065
+
1081
1066
  # Filter out existing predictions
1082
1067
  filtered_manifest = filter_inputs_structure(
1083
1068
  manifest=manifest,
@@ -1235,89 +1220,72 @@ def predict( # noqa: C901, PLR0915, PLR0912
1235
1220
  # Print header
1236
1221
  click.echo("\nPredicting property: affinity\n")
1237
1222
 
1238
- # Group records by protein-ligand pairs
1239
- affinity_groups = {}
1240
- for record in manifest.records:
1241
- if record.affinity:
1242
- key = (record.affinity["binder"], record.affinity["ligand"])
1243
- if key not in affinity_groups:
1244
- affinity_groups[key] = []
1245
- affinity_groups[key].append(record)
1246
-
1247
- # Process each protein-ligand pair
1248
- for (binder, ligand), records in affinity_groups.items():
1249
- # Create subfolder for this protein-ligand pair
1250
- pair_dir = out_dir / "predictions" / f"{binder}_{ligand}"
1251
- pair_dir.mkdir(parents=True, exist_ok=True)
1252
-
1253
- # Create manifest for this pair
1254
- pair_manifest = Manifest(records)
1255
-
1256
- # Validate inputs
1257
- pair_manifest_filtered = filter_inputs_affinity(
1258
- manifest=pair_manifest,
1259
- outdir=pair_dir,
1260
- override=override,
1261
- )
1262
- if not pair_manifest_filtered.records:
1263
- click.echo(f"Found existing affinity predictions for {binder}_{ligand}, skipping.")
1264
- continue
1223
+ # Validate inputs
1224
+ manifest_filtered = filter_inputs_affinity(
1225
+ manifest=manifest,
1226
+ outdir=out_dir,
1227
+ override=override,
1228
+ )
1229
+ if not manifest_filtered.records:
1230
+ click.echo("Found existing affinity predictions for all inputs, skipping.")
1231
+ return
1265
1232
 
1266
- msg = f"Running affinity prediction for {binder} with {ligand}"
1267
- click.echo(msg)
1233
+ msg = f"Running affinity prediction for {len(manifest_filtered.records)} input"
1234
+ msg += "s." if len(manifest_filtered.records) > 1 else "."
1235
+ click.echo(msg)
1268
1236
 
1269
- pred_writer = BoltzAffinityWriter(
1270
- data_dir=processed.targets_dir,
1271
- output_dir=pair_dir,
1272
- )
1237
+ pred_writer = BoltzAffinityWriter(
1238
+ data_dir=processed.targets_dir,
1239
+ output_dir=out_dir / "predictions",
1240
+ )
1273
1241
 
1274
- data_module = Boltz2InferenceDataModule(
1275
- manifest=pair_manifest_filtered,
1276
- target_dir=out_dir / "predictions",
1277
- msa_dir=processed.msa_dir,
1278
- mol_dir=mol_dir,
1279
- num_workers=num_workers,
1280
- constraints_dir=processed.constraints_dir,
1281
- template_dir=processed.template_dir,
1282
- extra_mols_dir=processed.extra_mols_dir,
1283
- override_method="other",
1284
- affinity=True,
1285
- )
1242
+ data_module = Boltz2InferenceDataModule(
1243
+ manifest=manifest_filtered,
1244
+ target_dir=out_dir / "predictions",
1245
+ msa_dir=processed.msa_dir,
1246
+ mol_dir=mol_dir,
1247
+ num_workers=num_workers,
1248
+ constraints_dir=processed.constraints_dir,
1249
+ template_dir=processed.template_dir,
1250
+ extra_mols_dir=processed.extra_mols_dir,
1251
+ override_method="other",
1252
+ affinity=True,
1253
+ )
1286
1254
 
1287
- predict_affinity_args = {
1288
- "recycling_steps": 5,
1289
- "sampling_steps": sampling_steps_affinity,
1290
- "diffusion_samples": diffusion_samples_affinity,
1291
- "max_parallel_samples": 1,
1292
- "write_confidence_summary": False,
1293
- "write_full_pae": False,
1294
- "write_full_pde": False,
1295
- }
1296
-
1297
- # Load affinity model
1298
- if affinity_checkpoint is None:
1299
- affinity_checkpoint = cache / "boltz2_aff.ckpt"
1300
-
1301
- model_module = Boltz2.load_from_checkpoint(
1302
- affinity_checkpoint,
1303
- strict=True,
1304
- predict_args=predict_affinity_args,
1305
- map_location="cpu",
1306
- diffusion_process_args=asdict(diffusion_params),
1307
- ema=False,
1308
- pairformer_args=asdict(pairformer_args),
1309
- msa_args=asdict(msa_args),
1310
- steering_args={"fk_steering": False, "guidance_update": False},
1311
- affinity_mw_correction=affinity_mw_correction,
1312
- )
1313
- model_module.eval()
1255
+ predict_affinity_args = {
1256
+ "recycling_steps": 5,
1257
+ "sampling_steps": sampling_steps_affinity,
1258
+ "diffusion_samples": diffusion_samples_affinity,
1259
+ "max_parallel_samples": 1,
1260
+ "write_confidence_summary": False,
1261
+ "write_full_pae": False,
1262
+ "write_full_pde": False,
1263
+ }
1314
1264
 
1315
- trainer.callbacks[0] = pred_writer
1316
- trainer.predict(
1317
- model_module,
1318
- datamodule=data_module,
1319
- return_predictions=False,
1320
- )
1265
+ # Load affinity model
1266
+ if affinity_checkpoint is None:
1267
+ affinity_checkpoint = cache / "boltz2_aff.ckpt"
1268
+
1269
+ model_module = Boltz2.load_from_checkpoint(
1270
+ affinity_checkpoint,
1271
+ strict=True,
1272
+ predict_args=predict_affinity_args,
1273
+ map_location="cpu",
1274
+ diffusion_process_args=asdict(diffusion_params),
1275
+ ema=False,
1276
+ pairformer_args=asdict(pairformer_args),
1277
+ msa_args=asdict(msa_args),
1278
+ steering_args={"fk_steering": False, "guidance_update": False},
1279
+ affinity_mw_correction=affinity_mw_correction,
1280
+ )
1281
+ model_module.eval()
1282
+
1283
+ trainer.callbacks[0] = pred_writer
1284
+ trainer.predict(
1285
+ model_module,
1286
+ datamodule=data_module,
1287
+ return_predictions=False,
1288
+ )
1321
1289
 
1322
1290
 
1323
1291
  if __name__ == "__main__":
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: boltz-vsynthes
3
- Version: 1.0.9
4
- Summary: Boltz for V-Synthes
3
+ Version: 1.0.10
4
+ Summary: Boltz for VSYNTHES
5
5
  Requires-Python: <3.13,>=3.10
6
6
  Description-Content-Type: text/markdown
7
7
  License-File: LICENSE
@@ -1,8 +1,8 @@
1
1
  boltz/__init__.py,sha256=F_-so3S40iZrSZ89Ge4TS6aZqwWyZXq_H4AXGDlbA_g,187
2
- boltz/main.py,sha256=VpCVMACmYA4nsJ9XFuh6JUFR0pdaZuqPWefjF5-Uh7U,42439
2
+ boltz/main.py,sha256=fFXeW6PmVsbZtx0EVGBiczf3ulncuheeai1wpRVEhOI,40803
3
3
  boltz/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  boltz/data/const.py,sha256=1M-88Z6HkfKY6MkNtqcj3b9P-oX9xEXluh3qM_u8dNU,26779
5
- boltz/data/mol.py,sha256=kPytx81filtBASGp7BOf9INvMqIijQaSh8HgU7JQsJ0,34398
5
+ boltz/data/mol.py,sha256=maOpPHEGX1VVXCIFY6pQNGF7gUBZPAfgSvuPf2QO1yc,34268
6
6
  boltz/data/pad.py,sha256=O4CGOOc5TwFuuWeP7hKjMIIsljdfLj-VJtXQeVXFx8s,2066
7
7
  boltz/data/types.py,sha256=4w9brpOCQe16AyByNrxz7pjIzrgzFNihtik3aaHvKaE,21965
8
8
  boltz/data/crop/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -32,13 +32,16 @@ boltz/data/module/training.py,sha256=iNzmq9ufs20S4M947CCzdYzGTFjmCTf2tFExJ2PtXnA
32
32
  boltz/data/module/trainingv2.py,sha256=ZsYUHYXxfuPgIpbTwCj5QLO0XK__xjsqIw6GARSNGW0,21276
33
33
  boltz/data/msa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  boltz/data/msa/mmseqs2.py,sha256=Im3s0h9lQVl-bXDfn4T6X6bxhLyF3XEyUXDjWFCjsvs,8202
35
- boltz/data/parse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ boltz/data/parse/__init__.py,sha256=28ltwqi_0kqIjgnjUr7b730rqcEykh73rTYph7Mt3n8,642
36
36
  boltz/data/parse/a3m.py,sha256=I5nD16kYVW1NPKeLEMm7I4GnEQrtZk9bFtxw89wmKi0,3318
37
37
  boltz/data/parse/csv.py,sha256=Hcq8rJW2njczahEr8jfd_o-zxLaNSgJ3YIoC9srIqpw,2518
38
38
  boltz/data/parse/fasta.py,sha256=taI4s_CqPtyF0XaLJAsVAJHCL0GXm2g1g8Qeccdxikk,3906
39
39
  boltz/data/parse/mmcif.py,sha256=25kEXCkx-OuaawAs7cdz0fxdRu5_CCO0AV00u84PrjQ,36822
40
40
  boltz/data/parse/mmcif_with_constraints.py,sha256=WHYZckSqUwu-Nb9vmVmxHmC7uxwVrF7AVUeVKsc5wGQ,51473
41
- boltz/data/parse/schema.py,sha256=5VANtvxFZ0FTelESWHA58QJ810XVfSdXHSB8YtJVCuw,37097
41
+ boltz/data/parse/pdb.py,sha256=2Xurs2uzgNcwz_TsGYw0UhTkVdVGFTBbRz6HWledTBM,1793
42
+ boltz/data/parse/pdb_download.py,sha256=JapOOccbxT84964VRZiZ8NEzWbKdiWVpA0UbWhVSsZE,2916
43
+ boltz/data/parse/schema.py,sha256=gnHZBy0Io2Ecrw3KAD1lE1-cKnWL4ypKUI3EmeV5FDQ,59566
44
+ boltz/data/parse/sdf.py,sha256=myFA3bL6MkdPdMFfZHotxJ8yNMGpsc_u6w06YFadeiw,1364
42
45
  boltz/data/parse/yaml.py,sha256=GRFRMtDD4PQ4PIpA_S1jj0vRaEu2LlZd_g4rN1zUrNo,1505
43
46
  boltz/data/sample/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
47
  boltz/data/sample/cluster.py,sha256=9Sx8qP7zGZOAyEspwYFtCTbGTBZnuN-zfCKFbbA_6oI,8175
@@ -104,9 +107,9 @@ boltz/model/optim/scheduler.py,sha256=nB4jz0CZ4pR4n08LQngExL_pNycIdYI8AXVoHPnZWQ
104
107
  boltz/model/potentials/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
108
  boltz/model/potentials/potentials.py,sha256=vev8Vjfs-ML1hyrdv_R8DynG4wSFahJ6nzPWp7CYQqw,17507
106
109
  boltz/model/potentials/schedules.py,sha256=m7XJjfuF9uTX3bR9VisXv1rvzJjxiD8PobXRpcBBu1c,968
107
- boltz_vsynthes-1.0.9.dist-info/licenses/LICENSE,sha256=8GZ_1eZsUeG6jdqgJJxtciWzADfgLEV4LY8sKUOsJhc,1102
108
- boltz_vsynthes-1.0.9.dist-info/METADATA,sha256=_HnBtfMTbZT71l94bMVcMEEKq0InYcmMnuVFO0NQSHc,7171
109
- boltz_vsynthes-1.0.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
110
- boltz_vsynthes-1.0.9.dist-info/entry_points.txt,sha256=n5a5I35ntu9lmyr16oZgHPFY0b0YxjiixY7m7nbMTLc,41
111
- boltz_vsynthes-1.0.9.dist-info/top_level.txt,sha256=MgU3Jfb-ctWm07YGMts68PMjSh9v26D0gfG3dFRmVFA,6
112
- boltz_vsynthes-1.0.9.dist-info/RECORD,,
110
+ boltz_vsynthes-1.0.10.dist-info/licenses/LICENSE,sha256=8GZ_1eZsUeG6jdqgJJxtciWzADfgLEV4LY8sKUOsJhc,1102
111
+ boltz_vsynthes-1.0.10.dist-info/METADATA,sha256=Xwu6e2a0v7LV6dLrEZpRt869aeHDgPhGJ8lbRDkXtY0,7171
112
+ boltz_vsynthes-1.0.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
113
+ boltz_vsynthes-1.0.10.dist-info/entry_points.txt,sha256=n5a5I35ntu9lmyr16oZgHPFY0b0YxjiixY7m7nbMTLc,41
114
+ boltz_vsynthes-1.0.10.dist-info/top_level.txt,sha256=MgU3Jfb-ctWm07YGMts68PMjSh9v26D0gfG3dFRmVFA,6
115
+ boltz_vsynthes-1.0.10.dist-info/RECORD,,