scdataloader 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
scdataloader/utils.py CHANGED
@@ -10,6 +10,7 @@ from biomart import BiomartServer
10
10
  from django.db import IntegrityError
11
11
  from scipy.sparse import csr_matrix
12
12
  from scipy.stats import median_abs_deviation
13
+ from functools import lru_cache
13
14
 
14
15
 
15
16
  def createFoldersFor(filepath):
@@ -24,14 +25,22 @@ def createFoldersFor(filepath):
24
25
 
25
26
 
26
27
  def _fetchFromServer(ensemble_server, attributes):
28
+ """
29
+ Fetches data from the specified ensemble server.
30
+
31
+ Args:
32
+ ensemble_server (str): The URL of the ensemble server to fetch data from.
33
+ attributes (list): The list of attributes to fetch from the server.
34
+
35
+ Returns:
36
+ pd.DataFrame: A pandas DataFrame containing the fetched data.
37
+ """
27
38
  server = BiomartServer(ensemble_server)
28
39
  ensmbl = server.datasets["hsapiens_gene_ensembl"]
29
40
  print(attributes)
30
41
  res = pd.read_csv(
31
42
  io.StringIO(
32
- ensmbl.search(
33
- {"attributes": attributes}, header=1
34
- ).content.decode()
43
+ ensmbl.search({"attributes": attributes}, header=1).content.decode()
35
44
  ),
36
45
  sep="\t",
37
46
  )
@@ -93,7 +102,7 @@ def getBiomartTable(
93
102
  return res
94
103
 
95
104
 
96
- def validate(adata, lb, organism):
105
+ def validate(adata, organism):
97
106
  """
98
107
  validate checks if the adata object is valid for lamindb
99
108
 
@@ -116,8 +125,7 @@ def validate(adata, lb, organism):
116
125
  Returns:
117
126
  bool: True if the adata object is valid
118
127
  """
119
- organism = lb.Organism.filter(ontology_id=organism).one().name
120
- lb.settings.organism = organism
128
+ organism = bt.Organism.filter(ontology_id=organism).one().name
121
129
 
122
130
  if adata.var.index.duplicated().any():
123
131
  raise ValueError("Duplicate gene names found in adata.var.index")
@@ -136,33 +144,33 @@ def validate(adata, lb, organism):
136
144
  raise ValueError(
137
145
  f"Column '{val}' is missing in the provided anndata object."
138
146
  )
139
- bionty_source = lb.BiontySource.filter(
147
+ bionty_source = bt.PublicSource.filter(
140
148
  entity="DevelopmentalStage", organism=organism
141
149
  ).one()
142
150
 
143
- if not lb.Ethnicity.validate(
151
+ if not bt.Ethnicity.validate(
144
152
  adata.obs["self_reported_ethnicity_ontology_term_id"],
145
153
  field="ontology_id",
146
154
  ).all():
147
155
  raise ValueError("Invalid ethnicity ontology term id found")
148
- if not lb.Organism.validate(
156
+ if not bt.Organism.validate(
149
157
  adata.obs["organism_ontology_term_id"], field="ontology_id"
150
158
  ).all():
151
159
  raise ValueError("Invalid organism ontology term id found")
152
- if not lb.Phenotype.validate(
160
+ if not bt.Phenotype.validate(
153
161
  adata.obs["sex_ontology_term_id"], field="ontology_id"
154
162
  ).all():
155
163
  raise ValueError("Invalid sex ontology term id found")
156
- if not lb.Disease.validate(
164
+ if not bt.Disease.validate(
157
165
  adata.obs["disease_ontology_term_id"], field="ontology_id"
158
166
  ).all():
159
167
  raise ValueError("Invalid disease ontology term id found")
160
- if not lb.CellType.validate(
168
+ if not bt.CellType.validate(
161
169
  adata.obs["cell_type_ontology_term_id"], field="ontology_id"
162
170
  ).all():
163
171
  raise ValueError("Invalid cell type ontology term id found")
164
172
  if (
165
- not lb.DevelopmentalStage.filter(bionty_source=bionty_source)
173
+ not bt.DevelopmentalStage.filter(bionty_source=bionty_source)
166
174
  .validate(
167
175
  adata.obs["development_stage_ontology_term_id"],
168
176
  field="ontology_id",
@@ -170,16 +178,16 @@ def validate(adata, lb, organism):
170
178
  .all()
171
179
  ):
172
180
  raise ValueError("Invalid dev stage ontology term id found")
173
- if not lb.Tissue.validate(
181
+ if not bt.Tissue.validate(
174
182
  adata.obs["tissue_ontology_term_id"], field="ontology_id"
175
183
  ).all():
176
184
  raise ValueError("Invalid tissue ontology term id found")
177
- if not lb.ExperimentalFactor.validate(
185
+ if not bt.ExperimentalFactor.validate(
178
186
  adata.obs["assay_ontology_term_id"], field="ontology_id"
179
187
  ).all():
180
188
  raise ValueError("Invalid assay ontology term id found")
181
189
  if (
182
- not lb.Gene.filter(organism=lb.settings.organism)
190
+ not bt.Gene.filter(organism=bt.settings.organism)
183
191
  .validate(adata.var.index, field="ensembl_gene_id")
184
192
  .all()
185
193
  ):
@@ -187,6 +195,8 @@ def validate(adata, lb, organism):
187
195
  return True
188
196
 
189
197
 
198
+ # setting a cache of 200 elements
199
+ # @lru_cache(maxsize=200)
190
200
  def get_all_ancestors(val, df):
191
201
  if val not in df.index:
192
202
  return set()
@@ -194,9 +204,7 @@ def get_all_ancestors(val, df):
194
204
  if parents is None or len(parents) == 0:
195
205
  return set()
196
206
  else:
197
- return set.union(
198
- set(parents), *[get_all_ancestors(val, df) for val in parents]
199
- )
207
+ return set.union(set(parents), *[get_all_ancestors(val, df) for val in parents])
200
208
 
201
209
 
202
210
  def get_ancestry_mapping(all_elem, onto_df):
@@ -234,7 +242,6 @@ def get_ancestry_mapping(all_elem, onto_df):
234
242
 
235
243
 
236
244
  def load_dataset_local(
237
- lb,
238
245
  remote_dataset,
239
246
  download_folder,
240
247
  name,
@@ -258,9 +265,7 @@ def load_dataset_local(
258
265
  lamindb.Dataset: The local dataset.
259
266
  """
260
267
  saved_files = []
261
- default_storage = ln.Storage.filter(
262
- root=ln.settings.storage.as_posix()
263
- ).one()
268
+ default_storage = ln.Storage.filter(root=ln.settings.storage.as_posix()).one()
264
269
  files = (
265
270
  remote_dataset.artifacts.all()
266
271
  if not only
@@ -275,17 +280,15 @@ def load_dataset_local(
275
280
  if len(organism) == 0:
276
281
  print("No organism detected")
277
282
  continue
278
- organism = lb.Organism.filter(ontology_id=organism[0]).one().name
279
- # lb.settings.organism = organism
283
+ organism = bt.Organism.filter(ontology_id=organism[0]).one().name
284
+ # bt.settings.organism = organism
280
285
  path = file.path
281
286
  try:
282
287
  file.save()
283
288
  except IntegrityError:
284
289
  print(f"File {file.key} already exists in storage")
285
290
  # if location already has a file, don't save again
286
- if use_cache and os.path.exists(
287
- os.path.expanduser(download_folder + file.key)
288
- ):
291
+ if use_cache and os.path.exists(os.path.expanduser(download_folder + file.key)):
289
292
  print(f"File {file.key} already exists in storage")
290
293
  else:
291
294
  path.download_to(download_folder + file.key)
@@ -295,13 +298,34 @@ def load_dataset_local(
295
298
  except IntegrityError:
296
299
  print(f"File {file.key} already exists in storage")
297
300
  saved_files.append(file)
298
- dataset = ln.Dataset(saved_files, name=name, description=description)
301
+ dataset = ln.Collection(saved_files, name=name, description=description)
299
302
  dataset.save()
300
303
  return dataset
301
304
 
302
305
 
306
+ def load_genes(organisms):
307
+ organismdf = []
308
+ if type(organisms) == str:
309
+ organisms = [organisms]
310
+ for organism in organisms:
311
+ genesdf = bt.Gene.filter(
312
+ organism_id=bt.Organism.filter(ontology_id=organism).first().id
313
+ ).df()
314
+ genesdf = genesdf[~genesdf["public_source_id"].isna()]
315
+ genesdf = genesdf.drop_duplicates(subset="ensembl_gene_id")
316
+ genesdf = genesdf.set_index("ensembl_gene_id")
317
+ # mitochondrial genes
318
+ genesdf["mt"] = genesdf.symbol.astype(str).str.startswith("MT-")
319
+ # ribosomal genes
320
+ genesdf["ribo"] = genesdf.symbol.astype(str).str.startswith(("RPS", "RPL"))
321
+ # hemoglobin genes.
322
+ genesdf["hb"] = genesdf.symbol.astype(str).str.contains(("^HB[^(P)]"))
323
+ genesdf["organism"] = organism
324
+ organismdf.append(genesdf)
325
+ return pd.concat(organismdf)
326
+
327
+
303
328
  def populate_my_ontology(
304
- lb,
305
329
  organisms=["NCBITaxon:10090", "NCBITaxon:9606"],
306
330
  sex=["PATO:0000384", "PATO:0000383"],
307
331
  celltypes=[],
@@ -316,11 +340,11 @@ def populate_my_ontology(
316
340
 
317
341
  run this function just one for each new lamin storage
318
342
 
319
- erase everything with lb.$ontology.filter().delete()
343
+ erase everything with bt.$ontology.filter().delete()
320
344
 
321
345
  add whatever value you need afterward like it is done here with:
322
346
 
323
- `lb.$ontology(name="ddd", ontology_id="ddddd").save()`
347
+ `bt.$ontology(name="ddd", ontology_id="ddddd").save()`
324
348
 
325
349
  `df["assay_ontology_term_id"].unique()`
326
350
 
@@ -336,73 +360,72 @@ def populate_my_ontology(
336
360
  dev_stages (list, optional): List of developmental stages. Defaults to [].
337
361
  """
338
362
 
339
- names = bt.CellType().df().index if not celltypes else celltypes
340
- records = lb.CellType.from_values(names, field=lb.CellType.ontology_id)
363
+ names = bt.CellType.from_public().df().index if not celltypes else celltypes
364
+ records = bt.CellType.from_values(names, field="ontology_id")
341
365
  ln.save(records)
342
- lb.CellType(name="unknown", ontology_id="unknown").save()
366
+ bt.CellType(name="unknown", ontology_id="unknown").save()
343
367
  # Organism
344
- # names = bt.Organism().df().index if not organisms else organisms
345
- # records = lb.Organism.from_values(names, field=lb.Organism.ontology_id)
346
- # ln.save(records)
347
- # lb.Organism(name="unknown", ontology_id="unknown").save()
368
+ names = bt.Organism.from_public().df().index if not organisms else organisms
369
+ records = [
370
+ i[0] if type(i) is list else i
371
+ for i in [bt.Organism.from_public(ontology_id=i) for i in names]
372
+ ]
373
+ ln.save(records)
374
+ bt.Organism(name="unknown", ontology_id="unknown").save()
348
375
  # Phenotype
349
- name = bt.Phenotype().df().index if not sex else sex
350
- records = lb.Phenotype.from_values(
351
- name,
352
- field=lb.Phenotype.ontology_id,
353
- bionty_source=lb.BiontySource.filter(
354
- entity="Phenotype", source="pato"
355
- ).one(),
356
- )
376
+ names = bt.Phenotype.from_public().df().index if not sex else sex
377
+ records = [
378
+ bt.Phenotype.from_public(
379
+ ontology_id=i,
380
+ public_source=bt.PublicSource.filter(
381
+ entity="Phenotype", source="pato"
382
+ ).one(),
383
+ )
384
+ for i in names
385
+ ]
357
386
  ln.save(records)
358
- lb.Phenotype(name="unknown", ontology_id="unknown").save()
387
+ bt.Phenotype(name="unknown", ontology_id="unknown").save()
359
388
  # ethnicity
360
- names = bt.Ethnicity().df().index if not ethnicities else ethnicities
361
- records = lb.Ethnicity.from_values(names, field=lb.Ethnicity.ontology_id)
389
+ names = bt.Ethnicity.from_public().df().index if not ethnicities else ethnicities
390
+ records = bt.Ethnicity.from_values(names, field="ontology_id")
362
391
  ln.save(records)
363
- lb.Ethnicity(
392
+ bt.Ethnicity(
364
393
  name="unknown", ontology_id="unknown"
365
394
  ).save() # multi ethnic will have to get renamed
366
395
  # ExperimentalFactor
367
- names = bt.ExperimentalFactor().df().index if not assays else assays
368
- records = lb.ExperimentalFactor.from_values(
369
- names, field=lb.ExperimentalFactor.ontology_id
370
- )
396
+ names = bt.ExperimentalFactor.from_public().df().index if not assays else assays
397
+ records = bt.ExperimentalFactor.from_values(names, field="ontology_id")
371
398
  ln.save(records)
372
- lb.ExperimentalFactor(name="unknown", ontology_id="unknown").save()
373
- # lookup = lb.ExperimentalFactor.lookup()
399
+ bt.ExperimentalFactor(name="unknown", ontology_id="unknown").save()
400
+ # lookup = bt.ExperimentalFactor.lookup()
374
401
  # lookup.smart_seq_v4.parents.add(lookup.smart_like)
375
402
  # Tissue
376
- names = bt.Tissue().df().index if not tissues else tissues
377
- records = lb.Tissue.from_values(names, field=lb.Tissue.ontology_id)
403
+ names = bt.Tissue.from_public().df().index if not tissues else tissues
404
+ records = bt.Tissue.from_values(names, field="ontology_id")
378
405
  ln.save(records)
379
- lb.Tissue(name="unknown", ontology_id="unknown").save()
406
+ bt.Tissue(name="unknown", ontology_id="unknown").save()
380
407
  # DevelopmentalStage
381
408
  names = (
382
- bt.DevelopmentalStage().df().index if not dev_stages else dev_stages
383
- )
384
- records = lb.DevelopmentalStage.from_values(
385
- names, field=lb.DevelopmentalStage.ontology_id
409
+ bt.DevelopmentalStage.from_public().df().index if not dev_stages else dev_stages
386
410
  )
411
+ records = bt.DevelopmentalStage.from_values(names, field="ontology_id")
387
412
  ln.save(records)
388
- lb.DevelopmentalStage(name="unknown", ontology_id="unknown").save()
413
+ bt.DevelopmentalStage(name="unknown", ontology_id="unknown").save()
389
414
  # Disease
390
- names = bt.Disease().df().index if not diseases else diseases
391
- records = lb.Disease.from_values(names, field=lb.Disease.ontology_id)
415
+ names = bt.Disease.from_public().df().index if not diseases else diseases
416
+ records = bt.Disease.from_values(names, field="ontology_id")
392
417
  ln.save(records)
393
- lb.Disease(name="normal", ontology_id="PATO:0000461").save()
394
- lb.Disease(name="unknown", ontology_id="unknown").save()
418
+ bt.Disease(name="normal", ontology_id="PATO:0000461").save()
419
+ bt.Disease(name="unknown", ontology_id="unknown").save()
395
420
  # genes
396
- for organism in organisms:
421
+ for organism in ["NCBITaxon:10090", "NCBITaxon:9606"]:
397
422
  # convert onto to name
398
- organism = lb.Organism.filter(ontology_id=organism).one().name
399
- names = bt.Gene(organism=organism).df()["ensembl_gene_id"]
400
- records = lb.Gene.from_values(
423
+ organism = bt.Organism.filter(ontology_id=organism).one().name
424
+ names = bt.Gene.public(organism=organism).df()["ensembl_gene_id"]
425
+ records = bt.Gene.from_values(
401
426
  names,
402
427
  field="ensembl_gene_id",
403
- bionty_source=lb.BiontySource.filter(
404
- entity="Gene", organism=organism
405
- ).first(),
428
+ organism=organism,
406
429
  )
407
430
  ln.save(records)
408
431
 
@@ -1,31 +1,33 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: scdataloader
3
- Version: 0.0.2
3
+ Version: 0.0.3
4
4
  Summary: a dataloader for single cell data in lamindb
5
- Home-page: https://github.com/jkobject/scPrint
5
+ Home-page: https://github.com/jkobject/scDataLoader
6
6
  License: GPL3
7
7
  Keywords: scRNAseq,dataloader,pytorch,lamindb,scPrint
8
8
  Author: jkobject
9
- Requires-Python: >=3.10,<4.0
9
+ Requires-Python: ==3.10.*
10
10
  Classifier: License :: Other/Proprietary License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.10
13
- Classifier: Programming Language :: Python :: 3.11
14
- Classifier: Programming Language :: Python :: 3.12
15
13
  Requires-Dist: anndata
16
14
  Requires-Dist: biomart
15
+ Requires-Dist: bionty
17
16
  Requires-Dist: cellxgene-census
18
17
  Requires-Dist: decoupler
19
18
  Requires-Dist: django
20
19
  Requires-Dist: ipykernel
21
20
  Requires-Dist: lamindb
22
21
  Requires-Dist: leidenalg
22
+ Requires-Dist: lightning
23
+ Requires-Dist: lnschema-bionty
23
24
  Requires-Dist: matplotlib
24
25
  Requires-Dist: pandas (>=2.0.0)
26
+ Requires-Dist: scikit-misc
25
27
  Requires-Dist: seaborn
26
28
  Requires-Dist: torch
27
29
  Requires-Dist: torchdata
28
- Project-URL: Repository, https://github.com/jkobject/scPrint
30
+ Project-URL: Repository, https://github.com/jkobject/scDataLoader
29
31
  Description-Content-Type: text/markdown
30
32
 
31
33
  # scdataloader
@@ -33,7 +35,9 @@ Description-Content-Type: text/markdown
33
35
  [![codecov](https://codecov.io/gh/jkobject/scDataLoader/branch/main/graph/badge.svg?token=scDataLoader_token_here)](https://codecov.io/gh/jkobject/scDataLoader)
34
36
  [![CI](https://github.com/jkobject/scDataLoader/actions/workflows/main.yml/badge.svg)](https://github.com/jkobject/scDataLoader/actions/workflows/main.yml)
35
37
 
36
- Awesome single cell dataloader created by @jkobject
38
+ Awesome single cell dataloader created by @jkobject
39
+
40
+ built on top of `lamindb` and the `.mapped()` function by Sergey: https://github.com/Koncopd
37
41
 
38
42
  This data loader is designed to be used with:
39
43
 
@@ -51,12 +55,34 @@ It allows you to:
51
55
  3. create a more complex single cell dataset
52
56
  4. extend it to your need
53
57
 
58
+ ## About
59
+
60
+ the idea is to use it to train models like scGPT / GeneFormer (and soon, scPrint ;)). It is:
61
+
62
+ 1. loading from lamin
63
+ 2. doing some dataset specific preprocessing if needed
64
+ 3. creating a dataset object on top of .mapped() (that is needed for mapping genes, cell labels etc..)
65
+ 4. passing it to a dataloader object that can work with it correctly
66
+
67
+ Currently one would have to use the preprocess function to make the dataset fit for different tools like scGPT / Geneformer. But I would want to enable it through different Collators. This is still missing and a WIP... (please do contribute!)
68
+
69
+ ![](docs/scdataloader.drawio.png)
70
+
54
71
  ## Install it from PyPI
55
72
 
56
73
  ```bash
57
74
  pip install scdataloader
58
75
  ```
59
76
 
77
+ ### Install it locally and run the notebooks:
78
+
79
+ ```bash
80
+ git clone https://github.com/jkobject/scDataLoader.git
81
+ cd scDataLoader
82
+ poetry install
83
+ ```
84
+ then run the notebooks with the poetry installed environment
85
+
60
86
  ## Usage
61
87
 
62
88
  see the notebooks in [docs](https://jkobject.github.io/scDataLoader/):
@@ -0,0 +1,15 @@
1
+ scdataloader/VERSION,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
2
+ scdataloader/__init__.py,sha256=cuj9n8np6jXU05e0VzDkUQv4CYJI6StvQ0TAsURS7wg,122
3
+ scdataloader/__main__.py,sha256=x-EDMcfJscSM5ViRZmH0ekCm7QoYRgRF7qVeNKg2Dyc,5733
4
+ scdataloader/base.py,sha256=M1gD59OffRdLOgS1vHKygOomUoAMuzjpRtAfM3SBKF8,338
5
+ scdataloader/collator.py,sha256=vV4kuygk_x_HthyitKvJNn1yDzcL1COMvsP8N5vaME0,9524
6
+ scdataloader/data.py,sha256=8G-ric6pmHf1U4X_0VnTS-nKcA6ztKtrhWJwjXsmUV0,13029
7
+ scdataloader/dataloader.py,sha256=MqASZkmu3FG0z_cIG6L_7_T1uJd5iyVtAyEgap8Fv6c,10281
8
+ scdataloader/mapped.py,sha256=ldBgCXnbFQUlEJ7dSWFgJ0654b6e_AK41mMxAgRn1hM,12635
9
+ scdataloader/preprocess.py,sha256=aX69Z7cDrRG0qBa1yKngMyfkLK7DvAUnUXkiKUE-bbo,22550
10
+ scdataloader/utils.py,sha256=7RKTZIAw0fLAmG31ph0WVvzEQPyPLRUpWjjg3P53ofc,17282
11
+ scdataloader-0.0.3.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
12
+ scdataloader-0.0.3.dist-info/METADATA,sha256=jY_1yqWY5KYiy1jiaRSvo9z5bQcmedLBaRPk5J8WHlo,38289
13
+ scdataloader-0.0.3.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
14
+ scdataloader-0.0.3.dist-info/entry_points.txt,sha256=nLqucZaa5wiF7-1FCgMXO916WDQ9Qm0TcxQp0f1DwE4,59
15
+ scdataloader-0.0.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.8.1
2
+ Generator: poetry-core 1.7.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,12 +0,0 @@
1
- scdataloader/VERSION,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
2
- scdataloader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- scdataloader/base.py,sha256=M1gD59OffRdLOgS1vHKygOomUoAMuzjpRtAfM3SBKF8,338
4
- scdataloader/data.py,sha256=5-w4WL0Ho5RW47J37N-zdNhV4Fjs0b7lb6c6ugeTMi4,12793
5
- scdataloader/mapped.py,sha256=wQN2K7GnJv-UiNIlC41HItrVMW50tECAjc8mt-QV-1I,12290
6
- scdataloader/preprocess.py,sha256=sm5OPREZFJaGVF9VsTKGvT1jHT7sOouX_ql0mWx3_4Q,23103
7
- scdataloader/utils.py,sha256=Ih1LLnmRZYOpIk1IoAJKyRAT361zrgBgUhwJM04V6Pw,16115
8
- scdataloader-0.0.2.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
9
- scdataloader-0.0.2.dist-info/METADATA,sha256=4ICXsQcdWkwrAZZVDIYG1L3d7JCpaxpr3MYlnVsD1Qw,37340
10
- scdataloader-0.0.2.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
11
- scdataloader-0.0.2.dist-info/entry_points.txt,sha256=nLqucZaa5wiF7-1FCgMXO916WDQ9Qm0TcxQp0f1DwE4,59
12
- scdataloader-0.0.2.dist-info/RECORD,,