scdataloader 1.9.2__py3-none-any.whl → 2.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scdataloader/__main__.py +4 -5
- scdataloader/collator.py +76 -78
- scdataloader/config.py +25 -9
- scdataloader/data.json +384 -0
- scdataloader/data.py +134 -77
- scdataloader/datamodule.py +638 -245
- scdataloader/mapped.py +104 -43
- scdataloader/preprocess.py +136 -110
- scdataloader/utils.py +158 -52
- {scdataloader-1.9.2.dist-info → scdataloader-2.0.2.dist-info}/METADATA +6 -7
- scdataloader-2.0.2.dist-info/RECORD +16 -0
- {scdataloader-1.9.2.dist-info → scdataloader-2.0.2.dist-info}/WHEEL +1 -1
- scdataloader-2.0.2.dist-info/licenses/LICENSE +21 -0
- scdataloader/VERSION +0 -1
- scdataloader-1.9.2.dist-info/RECORD +0 -16
- scdataloader-1.9.2.dist-info/licenses/LICENSE +0 -674
- {scdataloader-1.9.2.dist-info → scdataloader-2.0.2.dist-info}/entry_points.txt +0 -0
scdataloader/utils.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import io
|
|
2
2
|
import os
|
|
3
|
+
import random
|
|
4
|
+
import string
|
|
3
5
|
import urllib
|
|
4
6
|
from collections import Counter
|
|
5
7
|
from functools import lru_cache
|
|
@@ -13,50 +15,67 @@ import torch
|
|
|
13
15
|
from anndata import AnnData
|
|
14
16
|
from biomart import BiomartServer
|
|
15
17
|
from django.db import IntegrityError
|
|
18
|
+
from lamindb.errors import DoesNotExist
|
|
16
19
|
from scipy.sparse import csr_matrix
|
|
17
20
|
from scipy.stats import median_abs_deviation
|
|
18
21
|
from torch import Tensor
|
|
19
22
|
|
|
20
23
|
|
|
21
|
-
def
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
24
|
+
def fileToList(filename: str, strconv: callable = lambda x: x) -> list:
|
|
25
|
+
"""
|
|
26
|
+
loads an input file with a\\n b\\n.. into a list [a,b,..]
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
input_str (str): The input string to be completed.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
str: The completed string with 'complete' appended.
|
|
33
|
+
"""
|
|
34
|
+
with open(filename) as f:
|
|
35
|
+
return [strconv(val[:-1]) for val in f.readlines()]
|
|
26
36
|
|
|
27
37
|
|
|
28
|
-
def
|
|
38
|
+
def listToFile(
|
|
39
|
+
li: List[str], filename: str, strconv: callable = lambda x: str(x)
|
|
40
|
+
) -> None:
|
|
29
41
|
"""
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
The noise is applied based on the renoise parameter,
|
|
33
|
-
the total counts of the matrix, and the number of genes. The function first calculates the noise
|
|
34
|
-
threshold (scaler) based on the renoise parameter. It then generates an initial matrix count by
|
|
35
|
-
applying a Poisson distribution to a random tensor scaled by the total counts and the number of genes.
|
|
36
|
-
The function then models the sampling zeros by applying a Poisson distribution to a random tensor
|
|
37
|
-
scaled by the noise threshold, the total counts, and the number of genes. The function also models
|
|
38
|
-
the technical zeros by generating a random tensor and comparing it to the noise threshold. The final
|
|
39
|
-
matrix count is calculated by subtracting the sampling zeros from the initial matrix count and
|
|
40
|
-
multiplying by the technical zeros. The function ensures that the final matrix count is not less
|
|
41
|
-
than zero by taking the maximum of the final matrix count and a tensor of zeros. The function
|
|
42
|
-
returns the final matrix count.
|
|
42
|
+
listToFile loads a list with [a,b,..] into an input file a\\n b\\n..
|
|
43
43
|
|
|
44
44
|
Args:
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
l (list): The list of elements to be written to the file.
|
|
46
|
+
filename (str): The name of the file where the list will be written.
|
|
47
|
+
strconv (callable, optional): A function to convert each element of the list to a string. Defaults to str.
|
|
47
48
|
|
|
48
49
|
Returns:
|
|
49
|
-
|
|
50
|
+
None
|
|
50
51
|
"""
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
52
|
+
with open(filename, "w") as f:
|
|
53
|
+
for item in li:
|
|
54
|
+
f.write("%s\n" % strconv(item))
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def slurm_restart_count(use_mine: bool = False):
|
|
58
|
+
if use_mine:
|
|
59
|
+
return int(os.getenv("MY_SLURM_RESTART_COUNT", 0))
|
|
60
|
+
else:
|
|
61
|
+
return int(os.getenv("SLURM_RESTART_COUNT", 0))
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def revert_to_raw(adata, mode="logp1"):
|
|
65
|
+
res = adata.X
|
|
66
|
+
if mode == "rlogp1":
|
|
67
|
+
res = np.exp(res) - 1
|
|
68
|
+
elif mode == "logp1":
|
|
69
|
+
res = (2**res) - 1
|
|
70
|
+
elif mode == "sqrt":
|
|
71
|
+
res = (res**2) - 1
|
|
72
|
+
res = (
|
|
73
|
+
(res.T / np.array([res[i][res[i] != 0].min() for i in range(res.shape[0])]))
|
|
74
|
+
.round()
|
|
75
|
+
.T
|
|
76
|
+
) # .sum()
|
|
77
|
+
adata.X = res
|
|
78
|
+
return adata
|
|
60
79
|
|
|
61
80
|
|
|
62
81
|
def createFoldersFor(filepath: str):
|
|
@@ -71,7 +90,7 @@ def createFoldersFor(filepath: str):
|
|
|
71
90
|
|
|
72
91
|
|
|
73
92
|
def _fetchFromServer(
|
|
74
|
-
ensemble_server: str, attributes:
|
|
93
|
+
ensemble_server: str, attributes: List[str], database: str = "hsapiens_gene_ensembl"
|
|
75
94
|
):
|
|
76
95
|
"""
|
|
77
96
|
Fetches data from the specified ensemble server.
|
|
@@ -248,7 +267,10 @@ def validate(adata: AnnData, organism: str, need_all=False):
|
|
|
248
267
|
if not bt.Gene.validate(
|
|
249
268
|
adata.var.index, field="ensembl_gene_id", organism=organism
|
|
250
269
|
).all():
|
|
251
|
-
|
|
270
|
+
if not bt.Gene.validate(
|
|
271
|
+
adata.var.index, field="stable_id", organism=organism
|
|
272
|
+
).all():
|
|
273
|
+
raise ValueError("Invalid gene ensembl id found")
|
|
252
274
|
return True
|
|
253
275
|
|
|
254
276
|
|
|
@@ -274,7 +296,7 @@ def get_descendants(val, df):
|
|
|
274
296
|
return r_onto | ontos
|
|
275
297
|
|
|
276
298
|
|
|
277
|
-
def get_ancestry_mapping(all_elem:
|
|
299
|
+
def get_ancestry_mapping(all_elem: List[str], onto_df: pd.DataFrame):
|
|
278
300
|
"""
|
|
279
301
|
This function generates a mapping of all elements to their ancestors in the ontology dataframe.
|
|
280
302
|
|
|
@@ -293,7 +315,7 @@ def get_ancestry_mapping(all_elem: list, onto_df: pd.DataFrame):
|
|
|
293
315
|
for val in ancestors.values():
|
|
294
316
|
full_ancestors |= set(val)
|
|
295
317
|
# removing ancestors that are not in our datasets
|
|
296
|
-
full_ancestors = full_ancestors & set(ancestors.keys())
|
|
318
|
+
# full_ancestors = full_ancestors & set(ancestors.keys())
|
|
297
319
|
leafs = set(all_elem) - full_ancestors
|
|
298
320
|
full_ancestors = full_ancestors - leafs
|
|
299
321
|
|
|
@@ -370,12 +392,14 @@ def load_dataset_local(
|
|
|
370
392
|
return dataset
|
|
371
393
|
|
|
372
394
|
|
|
373
|
-
def load_genes(
|
|
395
|
+
def load_genes(
|
|
396
|
+
organisms: Union[str, List[str]] = "NCBITaxon:9606",
|
|
397
|
+
): # "NCBITaxon:10090",
|
|
374
398
|
"""
|
|
375
399
|
Loads genes from the given organisms.
|
|
376
400
|
|
|
377
401
|
Args:
|
|
378
|
-
organisms (Union[str,
|
|
402
|
+
organisms (Union[str, List[str]]): The organisms to load genes from.
|
|
379
403
|
|
|
380
404
|
Returns:
|
|
381
405
|
pd.DataFrame: The genes dataframe.
|
|
@@ -387,6 +411,9 @@ def load_genes(organisms: Union[str, list] = "NCBITaxon:9606"): # "NCBITaxon:10
|
|
|
387
411
|
genesdf = bt.Gene.filter(
|
|
388
412
|
organism_id=bt.Organism.filter(ontology_id=organism).first().id
|
|
389
413
|
).df()
|
|
414
|
+
genesdf.loc[genesdf.ensembl_gene_id.isna(), "ensembl_gene_id"] = genesdf.loc[
|
|
415
|
+
genesdf.ensembl_gene_id.isna(), "stable_id"
|
|
416
|
+
]
|
|
390
417
|
genesdf = genesdf.drop_duplicates(subset="ensembl_gene_id")
|
|
391
418
|
genesdf = genesdf.set_index("ensembl_gene_id").sort_index()
|
|
392
419
|
# mitochondrial genes
|
|
@@ -408,6 +435,9 @@ def load_genes(organisms: Union[str, list] = "NCBITaxon:9606"): # "NCBITaxon:10
|
|
|
408
435
|
"_aux",
|
|
409
436
|
"_branch_code",
|
|
410
437
|
"space_id",
|
|
438
|
+
"ncbi_gene_ids",
|
|
439
|
+
"synonyms",
|
|
440
|
+
"description",
|
|
411
441
|
]:
|
|
412
442
|
if col in organismdf.columns:
|
|
413
443
|
organismdf.drop(columns=[col], inplace=True)
|
|
@@ -577,6 +607,54 @@ def load_genes(organisms: Union[str, list] = "NCBITaxon:9606"): # "NCBITaxon:10
|
|
|
577
607
|
return organismdf
|
|
578
608
|
|
|
579
609
|
|
|
610
|
+
def _adding_scbasecamp_genes(
|
|
611
|
+
species=[],
|
|
612
|
+
):
|
|
613
|
+
if len(species) == 0:
|
|
614
|
+
species = set(
|
|
615
|
+
bt.Organism.using("laminlabs/arc-virtual-cell-atlas").df().ontology_id
|
|
616
|
+
)
|
|
617
|
+
-set(["NCBITaxon:10090", "NCBITaxon:9606"])
|
|
618
|
+
species = list(species)
|
|
619
|
+
if len(bt.Organism.filter(ontology_id="NCBITaxon:9593")) == 0:
|
|
620
|
+
bt.Organism(
|
|
621
|
+
name="gorilla gorilla",
|
|
622
|
+
ontology_id="NCBITaxon:9593",
|
|
623
|
+
scientific_name="Gorilla gorilla gorilla",
|
|
624
|
+
).save()
|
|
625
|
+
if len(bt.Organism.filter(ontology_id="NCBITaxon:9594")) == 0:
|
|
626
|
+
bt.Organism(
|
|
627
|
+
name="rice",
|
|
628
|
+
ontology_id="NCBITaxon:4530",
|
|
629
|
+
scientific_name="Oryza sativa (main)",
|
|
630
|
+
).save()
|
|
631
|
+
|
|
632
|
+
for i in species:
|
|
633
|
+
print(i)
|
|
634
|
+
df = (
|
|
635
|
+
bt.Gene.using("laminlabs/arc-virtual-cell-atlas")
|
|
636
|
+
.filter(organism__ontology_id=i)
|
|
637
|
+
.all()
|
|
638
|
+
.df()
|
|
639
|
+
)
|
|
640
|
+
genes = []
|
|
641
|
+
org = bt.Organism.filter(ontology_id=i).one()
|
|
642
|
+
ido = org.id
|
|
643
|
+
for row in df.to_dict(orient="records"):
|
|
644
|
+
row["organism_id"] = ido
|
|
645
|
+
gene = bt.Gene(
|
|
646
|
+
ensembl_gene_id=row["ensembl_gene_id"],
|
|
647
|
+
stable_id=row["stable_id"],
|
|
648
|
+
description=row["description"],
|
|
649
|
+
symbol=row["symbol"],
|
|
650
|
+
biotype=row["biotype"],
|
|
651
|
+
organism=org,
|
|
652
|
+
_skip_validation=True,
|
|
653
|
+
)
|
|
654
|
+
genes.append(gene)
|
|
655
|
+
ln.save(genes, ignore_conflicts=True)
|
|
656
|
+
|
|
657
|
+
|
|
580
658
|
def populate_my_ontology(
|
|
581
659
|
sex: List[str] = ["PATO:0000384", "PATO:0000383"],
|
|
582
660
|
celltypes: List[str] = [],
|
|
@@ -585,7 +663,8 @@ def populate_my_ontology(
|
|
|
585
663
|
tissues: List[str] = [],
|
|
586
664
|
diseases: List[str] = [],
|
|
587
665
|
dev_stages: List[str] = [],
|
|
588
|
-
organisms_clade: List[str] = ["vertebrates", "plants"],
|
|
666
|
+
organisms_clade: List[str] = ["vertebrates", "plants", "metazoa"],
|
|
667
|
+
genes_from: List[str] = ["NCBITaxon:10090", "NCBITaxon:9606"],
|
|
589
668
|
):
|
|
590
669
|
"""
|
|
591
670
|
creates a local version of the lamin ontologies and add the required missing values in base ontologies
|
|
@@ -601,8 +680,6 @@ def populate_my_ontology(
|
|
|
601
680
|
`df["assay_ontology_term_id"].unique()`
|
|
602
681
|
|
|
603
682
|
Args:
|
|
604
|
-
lb (lamindb): lamindb instance.
|
|
605
|
-
organisms (list, optional): List of organisms. Defaults to ["NCBITaxon:10090", "NCBITaxon:9606"].
|
|
606
683
|
sex (list, optional): List of sexes. Defaults to ["PATO:0000384", "PATO:0000383"].
|
|
607
684
|
celltypes (list, optional): List of cell types. Defaults to [].
|
|
608
685
|
ethnicities (list, optional): List of ethnicities. Defaults to [].
|
|
@@ -610,6 +687,7 @@ def populate_my_ontology(
|
|
|
610
687
|
tissues (list, optional): List of tissues. Defaults to [].
|
|
611
688
|
diseases (list, optional): List of diseases. Defaults to [].
|
|
612
689
|
dev_stages (list, optional): List of developmental stages. Defaults to [].
|
|
690
|
+
organisms_clade (list, optional): List of organisms clade. Defaults to ["vertebrates", "plants"].
|
|
613
691
|
"""
|
|
614
692
|
# cell type
|
|
615
693
|
if celltypes is not None:
|
|
@@ -620,17 +698,17 @@ def populate_my_ontology(
|
|
|
620
698
|
records = bt.CellType.from_values(names, field="ontology_id")
|
|
621
699
|
ln.save(records)
|
|
622
700
|
bt.CellType(name="unknown", ontology_id="unknown").save()
|
|
623
|
-
#
|
|
701
|
+
# OrganismClade
|
|
624
702
|
if organisms_clade is not None:
|
|
625
703
|
records = []
|
|
626
704
|
for organism_clade in organisms_clade:
|
|
627
705
|
names = bt.Organism.public(organism=organism_clade).df().index
|
|
628
|
-
source = bt.
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
706
|
+
source = bt.Source.filter(name="ensembl", organism=organism_clade).last()
|
|
707
|
+
for name in names:
|
|
708
|
+
try:
|
|
709
|
+
records.append(bt.Organism.from_source(name=name, source=source))
|
|
710
|
+
except DoesNotExist:
|
|
711
|
+
print(f"Organism {name} not found in source {source}")
|
|
634
712
|
nrecords = []
|
|
635
713
|
prevrec = set()
|
|
636
714
|
for rec in records:
|
|
@@ -646,7 +724,7 @@ def populate_my_ontology(
|
|
|
646
724
|
# Phenotype
|
|
647
725
|
if sex is not None:
|
|
648
726
|
names = bt.Phenotype.public().df().index if not sex else sex
|
|
649
|
-
source = bt.
|
|
727
|
+
source = bt.Source.filter(name="pato").first()
|
|
650
728
|
records = [
|
|
651
729
|
bt.Phenotype.from_source(ontology_id=i, source=source) for i in names
|
|
652
730
|
]
|
|
@@ -687,7 +765,7 @@ def populate_my_ontology(
|
|
|
687
765
|
if dev_stages is not None:
|
|
688
766
|
if len(dev_stages) == 0:
|
|
689
767
|
bt.DevelopmentalStage.import_source()
|
|
690
|
-
source = bt.
|
|
768
|
+
source = bt.Source.filter(organism="mouse", name="mmusdv").last()
|
|
691
769
|
bt.DevelopmentalStage.import_source(source=source)
|
|
692
770
|
else:
|
|
693
771
|
names = (
|
|
@@ -710,7 +788,7 @@ def populate_my_ontology(
|
|
|
710
788
|
bt.Disease(name="normal", ontology_id="PATO:0000461").save()
|
|
711
789
|
bt.Disease(name="unknown", ontology_id="unknown").save()
|
|
712
790
|
# genes
|
|
713
|
-
for organism in
|
|
791
|
+
for organism in genes_from:
|
|
714
792
|
# convert onto to name
|
|
715
793
|
organism = bt.Organism.filter(ontology_id=organism).one().name
|
|
716
794
|
names = bt.Gene.public(organism=organism).df()["ensembl_gene_id"]
|
|
@@ -727,6 +805,29 @@ def populate_my_ontology(
|
|
|
727
805
|
ln.save(records)
|
|
728
806
|
|
|
729
807
|
|
|
808
|
+
def random_str(stringLength=6, stype="all", withdigits=True):
|
|
809
|
+
"""
|
|
810
|
+
Generate a random string of letters and digits
|
|
811
|
+
|
|
812
|
+
Args:
|
|
813
|
+
stringLength (int, optional): the amount of char. Defaults to 6.
|
|
814
|
+
stype (str, optional): one of lowercase, uppercase, all. Defaults to 'all'.
|
|
815
|
+
withdigits (bool, optional): digits allowed in the string? Defaults to True.
|
|
816
|
+
|
|
817
|
+
Returns:
|
|
818
|
+
str: random string
|
|
819
|
+
"""
|
|
820
|
+
if stype == "lowercase":
|
|
821
|
+
lettersAndDigits = string.ascii_lowercase
|
|
822
|
+
elif stype == "uppercase":
|
|
823
|
+
lettersAndDigits = string.ascii_uppercase
|
|
824
|
+
else:
|
|
825
|
+
lettersAndDigits = string.ascii_letters
|
|
826
|
+
if withdigits:
|
|
827
|
+
lettersAndDigits += string.digits
|
|
828
|
+
return "".join(random.choice(lettersAndDigits) for i in range(stringLength))
|
|
829
|
+
|
|
830
|
+
|
|
730
831
|
def is_outlier(adata: AnnData, metric: str, nmads: int):
|
|
731
832
|
"""
|
|
732
833
|
is_outlier detects outliers in adata.obs[metric]
|
|
@@ -792,11 +893,16 @@ def translate(
|
|
|
792
893
|
obj = bt.Disease
|
|
793
894
|
elif t == "self_reported_ethnicity_ontology_term_id":
|
|
794
895
|
obj = bt.Ethnicity
|
|
896
|
+
elif t == "organism_ontology_term_id":
|
|
897
|
+
obj = bt.Organism
|
|
795
898
|
else:
|
|
796
899
|
return None
|
|
797
900
|
if type(val) is str:
|
|
798
901
|
return {val: obj.filter(ontology_id=val).one().name}
|
|
799
|
-
elif type(val) is list or type(val) is set:
|
|
800
|
-
return {i: obj.filter(ontology_id=i).one().name for i in set(val)}
|
|
801
902
|
elif type(val) is dict or type(val) is Counter:
|
|
802
903
|
return {obj.filter(ontology_id=k).one().name: v for k, v in val.items()}
|
|
904
|
+
elif type(val) is set:
|
|
905
|
+
return {i: obj.filter(ontology_id=i).one().name for i in val}
|
|
906
|
+
else:
|
|
907
|
+
rl = {i: obj.filter(ontology_id=i).one().name for i in set(val)}
|
|
908
|
+
return [rl.get(i, None) for i in val]
|
|
@@ -1,30 +1,29 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: scdataloader
|
|
3
|
-
Version:
|
|
3
|
+
Version: 2.0.2
|
|
4
4
|
Summary: a dataloader for single cell data in lamindb
|
|
5
5
|
Project-URL: repository, https://github.com/jkobject/scDataLoader
|
|
6
6
|
Author-email: jkobject <jkobject@gmail.com>
|
|
7
7
|
License-Expression: MIT
|
|
8
8
|
License-File: LICENSE
|
|
9
9
|
Keywords: dataloader,lamindb,pytorch,scPRINT,scRNAseq
|
|
10
|
-
Requires-Python: <3.
|
|
10
|
+
Requires-Python: <3.13,>=3.10
|
|
11
11
|
Requires-Dist: anndata>=0.9.0
|
|
12
12
|
Requires-Dist: biomart>=0.9.0
|
|
13
13
|
Requires-Dist: cellxgene-census>=0.1.0
|
|
14
14
|
Requires-Dist: django>=4.0.0
|
|
15
|
-
Requires-Dist: harmonypy>=0.0.10
|
|
16
15
|
Requires-Dist: ipykernel>=6.20.0
|
|
17
16
|
Requires-Dist: jupytext>=1.16.0
|
|
18
|
-
Requires-Dist: lamindb[bionty,
|
|
17
|
+
Requires-Dist: lamindb[bionty,jupyter,zarr]==1.6.2
|
|
19
18
|
Requires-Dist: leidenalg>=0.8.0
|
|
19
|
+
Requires-Dist: lightning>=2.3.0
|
|
20
20
|
Requires-Dist: matplotlib>=3.5.0
|
|
21
|
-
Requires-Dist: numpy
|
|
22
|
-
Requires-Dist: palantir>=1.3.3
|
|
21
|
+
Requires-Dist: numpy<=2.2.0
|
|
23
22
|
Requires-Dist: pandas>=2.0.0
|
|
24
23
|
Requires-Dist: pytorch-lightning>=2.3.0
|
|
25
24
|
Requires-Dist: scikit-misc>=0.5.0
|
|
26
25
|
Requires-Dist: seaborn>=0.11.0
|
|
27
|
-
Requires-Dist: torch
|
|
26
|
+
Requires-Dist: torch>=2.2.0
|
|
28
27
|
Requires-Dist: torchdata>=0.5.0
|
|
29
28
|
Requires-Dist: zarr>=2.10.0
|
|
30
29
|
Provides-Extra: dev
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
scdataloader/__init__.py,sha256=Z5HURehoWw1GrecImmTXIkv4ih8Q5RxNQWPm8zjjXOA,226
|
|
2
|
+
scdataloader/__main__.py,sha256=xPOtrEpQQQZUGTnm8KTvsQcA_jR45oMG_VHqd0Ny7_M,8677
|
|
3
|
+
scdataloader/base.py,sha256=M1gD59OffRdLOgS1vHKygOomUoAMuzjpRtAfM3SBKF8,338
|
|
4
|
+
scdataloader/collator.py,sha256=pITHfsWUkrUW7lMfgXfs1AfekgcfW9XfGHwi9LlKwm8,13651
|
|
5
|
+
scdataloader/config.py,sha256=nM8J11z2-lornryy1KxDE9675Rcxge4RGhdmpeiMhuI,7173
|
|
6
|
+
scdataloader/data.json,sha256=Zb8c27yk3rwMgtAU8kkiWWAyUwYBrlCqKUyEtaAx9i8,8785
|
|
7
|
+
scdataloader/data.py,sha256=aiSpw4rd5L162ox2kuD-8ujWNix5fvVlXozdlfthMNU,18176
|
|
8
|
+
scdataloader/datamodule.py,sha256=pGPPuxDrWz0GPBUz_vb4FUprbuNKkjq1hjr46m-fRVU,35783
|
|
9
|
+
scdataloader/mapped.py,sha256=h9YKQ8SG9tyZL8c6_Wu5Xov5ODGK6FzVuFopz58xwN4,29887
|
|
10
|
+
scdataloader/preprocess.py,sha256=4iqHqeSVE-oKRvwD0KKl_QH6HQWVYSMRPo9QNSq-3Pk,39179
|
|
11
|
+
scdataloader/utils.py,sha256=Z6td0cIphrYDLVrPrV8q4jUC_HtwGQmi-NcbpdbWrns,31034
|
|
12
|
+
scdataloader-2.0.2.dist-info/METADATA,sha256=QsyNBOyn_U9_TjFVbN-5WIGIkmKWf2sSksZUzKcNlqE,10314
|
|
13
|
+
scdataloader-2.0.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
14
|
+
scdataloader-2.0.2.dist-info/entry_points.txt,sha256=VXAN1m_CjbdLJ6SKYR0sBLGDV4wvv31ri7fWWuwbpno,60
|
|
15
|
+
scdataloader-2.0.2.dist-info/licenses/LICENSE,sha256=rGy_eYmnxtbOvKs7qt5V0czSWxJwgX_MlgMyTZwDHbc,1073
|
|
16
|
+
scdataloader-2.0.2.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Jérémie Kalfon
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
scdataloader/VERSION
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
1.9.2
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
scdataloader/VERSION,sha256=u1mRkBjudkgsI_HORCGc5MnGOTxl7w3a5A0y151BO7U,6
|
|
2
|
-
scdataloader/__init__.py,sha256=Z5HURehoWw1GrecImmTXIkv4ih8Q5RxNQWPm8zjjXOA,226
|
|
3
|
-
scdataloader/__main__.py,sha256=3aZnqYrH8XDT9nW9Dbb3o9kr-sx1STmXDQHxBo_h_q0,8719
|
|
4
|
-
scdataloader/base.py,sha256=M1gD59OffRdLOgS1vHKygOomUoAMuzjpRtAfM3SBKF8,338
|
|
5
|
-
scdataloader/collator.py,sha256=qb1SDQ358R4w56cxOXvVLpodlZpGfVDCXocFIhqpJ0I,12867
|
|
6
|
-
scdataloader/config.py,sha256=YQUKCyYTg4wTseBWumPDHKtmqI7DMR-zu5FPJUWkG-c,6549
|
|
7
|
-
scdataloader/data.py,sha256=xWlNU6cJmrzP4BFMsJDIksLaxe1pUfgDBlQ_IeLIXj0,15578
|
|
8
|
-
scdataloader/datamodule.py,sha256=MaBSH0MqZdDbwiGnCM4xjz0KF05WT00sycJStS7GL5w,19786
|
|
9
|
-
scdataloader/mapped.py,sha256=qzhGYQ2S3IDXnnO1EM1wO_png5lDiOtfuDeYc1pQaXg,27303
|
|
10
|
-
scdataloader/preprocess.py,sha256=LNXlP80rj8Ze2ElyIgLuF9x_lNA78jAI-seGMOyMKGs,37496
|
|
11
|
-
scdataloader/utils.py,sha256=7ycZoV01Gn3WDHOTmXqxMXlzBPSfYtjc9NbGI7gjdwI,28445
|
|
12
|
-
scdataloader-1.9.2.dist-info/METADATA,sha256=jLx2v-SJTAv-jsUjmgLNIrTenwtnfUHTnKYuy9hI3lg,10363
|
|
13
|
-
scdataloader-1.9.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
14
|
-
scdataloader-1.9.2.dist-info/entry_points.txt,sha256=VXAN1m_CjbdLJ6SKYR0sBLGDV4wvv31ri7fWWuwbpno,60
|
|
15
|
-
scdataloader-1.9.2.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
16
|
-
scdataloader-1.9.2.dist-info/RECORD,,
|