CytoTable 0.0.10__tar.gz → 0.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,19 +1,19 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: CytoTable
3
- Version: 0.0.10
3
+ Version: 0.0.12
4
4
  Summary: Transform CellProfiler and DeepProfiler data for processing image-based profiling readouts with Pycytominer and other Cytomining tools.
5
5
  Home-page: https://github.com/cytomining/CytoTable
6
6
  License: BSD-3-Clause License
7
7
  Keywords: python,cellprofiler,single-cell-analysis,way-lab
8
8
  Author: Cytomining Community
9
- Requires-Python: >=3.8,<3.13
9
+ Requires-Python: >=3.9,<3.14
10
10
  Classifier: License :: Other/Proprietary License
11
11
  Classifier: Programming Language :: Python :: 3
12
- Classifier: Programming Language :: Python :: 3.8
13
12
  Classifier: Programming Language :: Python :: 3.9
14
13
  Classifier: Programming Language :: Python :: 3.10
15
14
  Classifier: Programming Language :: Python :: 3.11
16
15
  Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
17
  Requires-Dist: cloudpathlib[all,s3] (>=0.18.0,<0.19.0)
18
18
  Requires-Dist: duckdb (>=0.10.1)
19
19
  Requires-Dist: numpy (<=1.24.4) ; python_version < "3.12"
@@ -3,7 +3,7 @@ __init__.py for cytotable
3
3
  """
4
4
 
5
5
  # note: version data is maintained by poetry-dynamic-versioning (do not edit)
6
- __version__ = "0.0.10"
6
+ __version__ = "0.0.12"
7
7
 
8
8
  from .convert import convert
9
9
  from .exceptions import (
@@ -173,6 +173,106 @@ def _prep_cast_column_data_types(
173
173
  return columns
174
174
 
175
175
 
176
+ @python_app
177
+ def _set_tablenumber(
178
+ sources: Dict[str, List[Dict[str, Any]]],
179
+ add_tablenumber: Optional[bool] = None,
180
+ ) -> Dict[str, List[Dict[str, Any]]]:
181
+ """
182
+ Gathers a "TableNumber" from the image table (if CSV) or
183
+ SQLite file (if SQLite source) which is a unique identifier
184
+ intended to help differentiate between imagenumbers
185
+ to create distinct records for single-cell profiles
186
+ referenced across multiple source data exports.
187
+ For example, ImageNumber column values from CellProfiler
188
+ will repeat across exports, meaning we may lose distinction
189
+ when combining multiple export files together through CytoTable.
190
+
191
+ Note:
192
+ - If using CSV data sources, the image.csv table is used for checksum.
193
+ - If using SQLite data sources, the entire SQLite database is used for checksum.
194
+
195
+ Args:
196
+ sources: Dict[str, List[Dict[str, Any]]]
197
+ Contains metadata about data tables and related contents.
198
+ add_tablenumber: Optional[bool]
199
+ Whether to add a calculated tablenumber.
200
+ Note: when False, adds None as the tablenumber
201
+
202
+ Returns:
203
+ List[Dict[str, Any]]
204
+ New source group with added TableNumber details.
205
+ """
206
+
207
+ from cloudpathlib import AnyPath
208
+
209
+ from cytotable.utils import _gather_tablenumber_checksum
210
+
211
+ image_table_groups = {
212
+ # create a data structure with the common parent for each dataset
213
+ # and the calculated checksum from the image table.
214
+ # note: the source_path parent is used for non-SQLite files
215
+ # whereas the direct source path is used for SQLite files.
216
+ (
217
+ str(source["source_path"].parent)
218
+ if source["source_path"].suffix != "sqlite"
219
+ else source["source_path"]
220
+ ): source["source_path"]
221
+ for source_group_name, source_group_vals in sources.items()
222
+ # use the image tables references only for the basis of the
223
+ # these calculations.
224
+ if any(
225
+ value in str(AnyPath(source_group_name).stem).lower()
226
+ for value in ["image", "per_image"]
227
+ )
228
+ for source in source_group_vals
229
+ }
230
+
231
+ # determine if we need to add tablenumber data
232
+ if (
233
+ # case for detecting multiple image tables which need to be differentiated
234
+ add_tablenumber is None
235
+ and (len(image_table_groups) <= 1)
236
+ ) or (
237
+ # case for explicitly set no tablenumbers
238
+ add_tablenumber
239
+ is False
240
+ ):
241
+ return {
242
+ source_group_name: [
243
+ dict(
244
+ source,
245
+ **{
246
+ "tablenumber": None,
247
+ },
248
+ )
249
+ for source in source_group_vals
250
+ ]
251
+ for source_group_name, source_group_vals in sources.items()
252
+ }
253
+
254
+ # gather the image table from the source_group
255
+ tablenumber_table = {
256
+ # create a data structure with the common parent for each dataset
257
+ # and the calculated checksum from the image table
258
+ group: _gather_tablenumber_checksum(path)
259
+ for group, path in image_table_groups.items()
260
+ }
261
+
262
+ # return a modified sources data structure with the tablenumber added
263
+ return {
264
+ source_group_name: [
265
+ dict(
266
+ source,
267
+ **{"tablenumber": tablenumber_table[str(source["source_path"].parent)]},
268
+ )
269
+ for source in source_group_vals
270
+ if str(source["source_path"].parent) in list(tablenumber_table.keys())
271
+ ]
272
+ for source_group_name, source_group_vals in sources.items()
273
+ }
274
+
275
+
176
276
  @python_app
177
277
  def _get_table_keyset_pagination_sets(
178
278
  chunk_size: int,
@@ -310,6 +410,18 @@ def _source_pageset_to_parquet(
310
410
  )
311
411
  pathlib.Path(source_dest_path).mkdir(parents=True, exist_ok=True)
312
412
 
413
+ # build tablenumber segment addition (if necessary)
414
+ tablenumber_sql = (
415
+ # to become tablenumber in sql select later with bigint (8-byte integer)
416
+ # we cast here to bigint to avoid concat or join conflicts later due to
417
+ # misaligned automatic data typing.
418
+ f"CAST({source['tablenumber']} AS BIGINT) as TableNumber, "
419
+ if source["tablenumber"] is not None
420
+ # don't introduce the column if we aren't supposed to add tablenumber
421
+ # as per parameter.
422
+ else ""
423
+ )
424
+
313
425
  # add source table columns
314
426
  casted_source_cols = [
315
427
  # here we cast the column to the specified type ensure the colname remains the same
@@ -317,8 +429,8 @@ def _source_pageset_to_parquet(
317
429
  for column in source["columns"]
318
430
  ]
319
431
 
320
- # create selection statement from lists above
321
- select_columns = ",".join(
432
+ # create selection statement from tablenumber_sql + lists above
433
+ select_columns = tablenumber_sql + ",".join(
322
434
  # if we should sort the output, add the metadata_cols
323
435
  casted_source_cols
324
436
  if sort_output
@@ -376,6 +488,7 @@ def _source_pageset_to_parquet(
376
488
  page_key=source["page_key"],
377
489
  pageset=pageset,
378
490
  sort_output=sort_output,
491
+ tablenumber=source["tablenumber"],
379
492
  ),
380
493
  where=result_filepath,
381
494
  )
@@ -994,8 +1107,9 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals
994
1107
  sort_output: bool,
995
1108
  page_keys: Dict[str, str],
996
1109
  data_type_cast_map: Optional[Dict[str, str]] = None,
1110
+ add_tablenumber: Optional[bool] = None,
997
1111
  **kwargs,
998
- ) -> Union[Dict[str, List[Dict[str, Any]]], str]:
1112
+ ) -> Union[Dict[str, List[Dict[str, Any]]], List[Any], str]:
999
1113
  """
1000
1114
  Export data to parquet.
1001
1115
 
@@ -1137,6 +1251,12 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals
1137
1251
  for source_group_name, source_group_vals in invalid_files_dropped.items()
1138
1252
  }
1139
1253
 
1254
+ # add tablenumber details, appending None if not add_tablenumber
1255
+ tablenumber_prepared = _set_tablenumber(
1256
+ sources=evaluate_futures(column_names_and_types_gathered),
1257
+ add_tablenumber=add_tablenumber,
1258
+ ).result()
1259
+
1140
1260
  results = {
1141
1261
  source_group_name: [
1142
1262
  dict(
@@ -1165,7 +1285,7 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals
1165
1285
  for source in source_group_vals
1166
1286
  ]
1167
1287
  for source_group_name, source_group_vals in evaluate_futures(
1168
- column_names_and_types_gathered
1288
+ tablenumber_prepared
1169
1289
  ).items()
1170
1290
  }
1171
1291
 
@@ -1244,15 +1364,19 @@ def _to_parquet( # pylint: disable=too-many-arguments, too-many-locals
1244
1364
  ).result()
1245
1365
  ]
1246
1366
 
1247
- # concat our join chunks together as one cohesive dataset
1248
- # return results in common format which includes metadata
1249
- # for lineage and debugging
1250
- results = _concat_join_sources(
1251
- dest_path=expanded_dest_path,
1252
- join_sources=[join.result() for join in join_sources_result],
1253
- sources=evaluated_results,
1254
- sort_output=sort_output,
1255
- )
1367
+ if concat:
1368
+ # concat our join chunks together as one cohesive dataset
1369
+ # return results in common format which includes metadata
1370
+ # for lineage and debugging
1371
+ results = _concat_join_sources(
1372
+ dest_path=expanded_dest_path,
1373
+ join_sources=[join.result() for join in join_sources_result],
1374
+ sources=evaluated_results,
1375
+ sort_output=sort_output,
1376
+ )
1377
+ else:
1378
+ # else we leave the joined chunks as-is and return them
1379
+ return evaluate_futures(join_sources_result)
1256
1380
 
1257
1381
  # wrap the final result as a future and return
1258
1382
  return evaluate_futures(results)
@@ -1273,12 +1397,13 @@ def convert( # pylint: disable=too-many-arguments,too-many-locals
1273
1397
  infer_common_schema: bool = True,
1274
1398
  drop_null: bool = False,
1275
1399
  data_type_cast_map: Optional[Dict[str, str]] = None,
1400
+ add_tablenumber: Optional[bool] = None,
1276
1401
  page_keys: Optional[Dict[str, str]] = None,
1277
1402
  sort_output: bool = True,
1278
1403
  preset: Optional[str] = "cellprofiler_csv",
1279
1404
  parsl_config: Optional[parsl.Config] = None,
1280
1405
  **kwargs,
1281
- ) -> Union[Dict[str, List[Dict[str, Any]]], str]:
1406
+ ) -> Union[Dict[str, List[Dict[str, Any]]], List[Any], str]:
1282
1407
  """
1283
1408
  Convert file-based data from various sources to Pycytominer-compatible standards.
1284
1409
 
@@ -1322,6 +1447,11 @@ def convert( # pylint: disable=too-many-arguments,too-many-locals
1322
1447
  A dictionary mapping data type groups to specific types.
1323
1448
  Roughly includes Arrow data types language from:
1324
1449
  https://arrow.apache.org/docs/python/api/datatypes.html
1450
+ add_tablenumber: Optional[bool]
1451
+ Whether to add a calculated tablenumber which helps differentiate
1452
+ various repeated values (such as ObjectNumber) within source data.
1453
+ Useful for processing multiple SQLite or CSV data sources together
1454
+ to retain distinction from each dataset.
1325
1455
  page_keys: str:
1326
1456
  The table and column names to be used for key pagination.
1327
1457
  Uses the form: {"table_name":"column_name"}.
@@ -1462,6 +1592,7 @@ def convert( # pylint: disable=too-many-arguments,too-many-locals
1462
1592
  infer_common_schema=infer_common_schema,
1463
1593
  drop_null=drop_null,
1464
1594
  data_type_cast_map=data_type_cast_map,
1595
+ add_tablenumber=add_tablenumber,
1465
1596
  sort_output=sort_output,
1466
1597
  page_keys=cast(dict, page_keys),
1467
1598
  **kwargs,
@@ -41,6 +41,7 @@ config = {
41
41
  "CONFIG_JOINS": """
42
42
  SELECT
43
43
  image.Metadata_ImageNumber,
44
+ COLUMNS('Image_FileName_.*'),
44
45
  cytoplasm.* EXCLUDE (Metadata_ImageNumber),
45
46
  cells.* EXCLUDE (Metadata_ImageNumber, Metadata_ObjectNumber),
46
47
  nuclei.* EXCLUDE (Metadata_ImageNumber, Metadata_ObjectNumber)
@@ -92,6 +93,7 @@ config = {
92
93
  per_image.Metadata_ImageNumber,
93
94
  per_image.Image_Metadata_Well,
94
95
  per_image.Image_Metadata_Plate,
96
+ COLUMNS('Image_FileName_.*'),
95
97
  per_cytoplasm.* EXCLUDE (Metadata_ImageNumber),
96
98
  per_cells.* EXCLUDE (Metadata_ImageNumber),
97
99
  per_nuclei.* EXCLUDE (Metadata_ImageNumber)
@@ -148,6 +150,7 @@ config = {
148
150
  image.Metadata_Well,
149
151
  image.Image_Metadata_Site,
150
152
  image.Image_Metadata_Row,
153
+ COLUMNS('Image_FileName_.*'),
151
154
  cytoplasm.* EXCLUDE (Metadata_ImageNumber),
152
155
  cells.* EXCLUDE (Metadata_ImageNumber),
153
156
  nuclei.* EXCLUDE (Metadata_ImageNumber)
@@ -206,6 +209,7 @@ config = {
206
209
  per_image.Metadata_ImageNumber,
207
210
  per_image.Image_Metadata_Well,
208
211
  per_image.Image_Metadata_Plate,
212
+ COLUMNS('Image_FileName_.*'),
209
213
  per_cytoplasm.* EXCLUDE (Metadata_ImageNumber),
210
214
  per_cells.* EXCLUDE (Metadata_ImageNumber),
211
215
  per_nuclei.* EXCLUDE (Metadata_ImageNumber)
@@ -265,6 +269,7 @@ config = {
265
269
  image.Metadata_ImageNumber,
266
270
  image.Image_Metadata_Well,
267
271
  image.Image_Metadata_Plate,
272
+ COLUMNS('Image_FileName_.*'),
268
273
  cytoplasm.* EXCLUDE (Metadata_TableNumber, Metadata_ImageNumber),
269
274
  cells.* EXCLUDE (Metadata_TableNumber, Metadata_ImageNumber),
270
275
  nuclei.* EXCLUDE (Metadata_TableNumber, Metadata_ImageNumber)
@@ -163,14 +163,17 @@ def _get_source_filepaths(
163
163
  for unique_source in set(source["source_path"].name for source in sources):
164
164
  grouped_sources[unique_source.capitalize()] = [
165
165
  # case for files besides sqlite
166
- source if source["source_path"].suffix.lower() != ".sqlite"
167
- # if we have sqlite entries, update the source_path to the parent
168
- # (the parent table database file) as grouped key name will now
169
- # encapsulate the table name details.
170
- else {
171
- "source_path": source["source_path"].parent,
172
- "table_name": source["table_name"],
173
- }
166
+ (
167
+ source
168
+ if source["source_path"].suffix.lower() != ".sqlite"
169
+ # if we have sqlite entries, update the source_path to the parent
170
+ # (the parent table database file) as grouped key name will now
171
+ # encapsulate the table name details.
172
+ else {
173
+ "source_path": source["source_path"].parent,
174
+ "table_name": source["table_name"],
175
+ }
176
+ )
174
177
  for source in sources
175
178
  # focus only on entries which include the unique_source name
176
179
  if source["source_path"].name == unique_source
@@ -166,6 +166,12 @@ def _duckdb_reader() -> duckdb.DuckDBPyConnection:
166
166
  https://duckdb.org/docs/sql/configuration#configuration-reference
167
167
  */
168
168
  PRAGMA preserve_insertion_order=FALSE;
169
+
170
+ /*
171
+ Disable progress bar from displaying (defaults to TRUE)
172
+ See earlier documentation references above for more information.
173
+ */
174
+ SET enable_progress_bar=FALSE;
169
175
  """,
170
176
  )
171
177
 
@@ -176,6 +182,7 @@ def _sqlite_mixed_type_query_to_parquet(
176
182
  page_key: str,
177
183
  pageset: Tuple[Union[int, float], Union[int, float]],
178
184
  sort_output: bool,
185
+ tablenumber: Optional[int] = None,
179
186
  ) -> str:
180
187
  """
181
188
  Performs SQLite table data extraction where one or many
@@ -195,6 +202,9 @@ def _sqlite_mixed_type_query_to_parquet(
195
202
  Specifies whether to sort cytotable output or not.
196
203
  add_cytotable_meta: bool, default=False:
197
204
  Whether to add CytoTable metadata fields or not
205
+ tablenumber: Optional[int], default=None:
206
+ An optional table number to append to the results.
207
+ Defaults to None.
198
208
 
199
209
  Returns:
200
210
  pyarrow.Table:
@@ -250,9 +260,19 @@ def _sqlite_mixed_type_query_to_parquet(
250
260
  # return the translated type for use in SQLite
251
261
  return translated_type[0]
252
262
 
263
+ # build tablenumber segment addition (if necessary)
264
+ tablenumber_sql = (
265
+ # to become tablenumber in sql select later with integer
266
+ f"CAST({tablenumber} AS INTEGER) as TableNumber, "
267
+ if tablenumber is not None
268
+ # if we don't have a tablenumber value, don't introduce the column
269
+ else ""
270
+ )
271
+
253
272
  # create cases for mixed-type handling in each column discovered above
254
- query_parts = [
255
- f"""
273
+ query_parts = tablenumber_sql + ", ".join(
274
+ [
275
+ f"""
256
276
  CASE
257
277
  /* when the storage class type doesn't match the column, return nulltype */
258
278
  WHEN typeof({col['column_name']}) !=
@@ -261,13 +281,14 @@ def _sqlite_mixed_type_query_to_parquet(
261
281
  ELSE {col['column_name']}
262
282
  END AS {col['column_name']}
263
283
  """
264
- for col in column_info
265
- ]
284
+ for col in column_info
285
+ ]
286
+ )
266
287
 
267
288
  # perform the select using the cases built above and using chunksize + offset
268
289
  sql_stmt = f"""
269
290
  SELECT
270
- {', '.join(query_parts)}
291
+ {query_parts}
271
292
  FROM {table_name}
272
293
  WHERE {page_key} BETWEEN {pageset[0]} AND {pageset[1]}
273
294
  {"ORDER BY " + page_key if sort_output else ""};
@@ -476,6 +497,47 @@ def _write_parquet_table_with_metadata(table: pa.Table, **kwargs) -> None:
476
497
  )
477
498
 
478
499
 
500
+ def _gather_tablenumber_checksum(pathname: str, buffer_size: int = 1048576) -> int:
501
+ """
502
+ Build and return a checksum for use as a unique identifier across datasets
503
+ referenced from cytominer-database:
504
+ https://github.com/cytomining/cytominer-database/blob/master/cytominer_database/ingest_variable_engine.py#L129
505
+
506
+ Args:
507
+ pathname: str:
508
+ A path to a file with which to generate the checksum on.
509
+ buffer_size: int:
510
+ Buffer size to use for reading data.
511
+
512
+ Returns:
513
+ int
514
+ an integer representing the checksum of the pathname file.
515
+ """
516
+
517
+ import os
518
+ import zlib
519
+
520
+ # check whether the buffer size is larger than the file_size
521
+ file_size = os.path.getsize(pathname)
522
+ if file_size < buffer_size:
523
+ buffer_size = file_size
524
+
525
+ # open file
526
+ with open(str(pathname), "rb") as stream:
527
+ # begin result formation
528
+ result = zlib.crc32(bytes(0))
529
+ while True:
530
+ # read data from stream using buffer size
531
+ buffer = stream.read(buffer_size)
532
+ if not buffer:
533
+ # if we have no more data to use, break while loop
534
+ break
535
+ # use buffer read data to form checksum
536
+ result = zlib.crc32(buffer, result)
537
+
538
+ return result & 0xFFFFFFFF
539
+
540
+
479
541
  def _unwrap_value(val: Union[parsl.dataflow.futures.AppFuture, Any]) -> Any:
480
542
  """
481
543
  Helper function to unwrap futures from values or return values
@@ -531,14 +593,16 @@ def _unwrap_source(
531
593
  return _unwrap_value(source)
532
594
 
533
595
 
534
- def evaluate_futures(sources: Union[Dict[str, List[Dict[str, Any]]], str]) -> Any:
596
+ def evaluate_futures(
597
+ sources: Union[Dict[str, List[Dict[str, Any]]], List[Any], str]
598
+ ) -> Any:
535
599
  """
536
600
  Evaluates any Parsl futures for use within other tasks.
537
601
  This enables a pattern of Parsl app usage as "tasks" and delayed
538
602
  future result evaluation for concurrency.
539
603
 
540
604
  Args:
541
- sources: Union[Dict[str, List[Dict[str, Any]]], str]
605
+ sources: Union[Dict[str, List[Dict[str, Any]]], List[Any], str]
542
606
  Sources are an internal data structure used by CytoTable for
543
607
  processing and organizing data results. They may include futures
544
608
  which require asynchronous processing through Parsl, so we
@@ -1,68 +1,59 @@
1
+ [build-system]
2
+ build-backend = "poetry_dynamic_versioning.backend"
3
+ requires = [ "poetry-core>=1", "poetry-dynamic-versioning>=1,<2" ]
4
+
1
5
  [tool.poetry]
2
6
  name = "CytoTable"
3
7
  # note: version data is maintained by poetry-dynamic-versioning (do not edit)
4
- version = "0.0.10"
8
+ version = "0.0.12"
5
9
  description = "Transform CellProfiler and DeepProfiler data for processing image-based profiling readouts with Pycytominer and other Cytomining tools."
6
- authors = ["Cytomining Community"]
10
+ authors = [ "Cytomining Community" ]
7
11
  license = "BSD-3-Clause License"
8
- packages = [{include = "cytotable"}]
12
+ packages = [ { include = "cytotable" } ]
9
13
  readme = "readme.md"
10
14
  repository = "https://github.com/cytomining/CytoTable"
11
15
  documentation = "https://cytomining.github.io/CytoTable/"
12
- keywords = ["python", "cellprofiler","single-cell-analysis", "way-lab"]
13
-
14
- [tool.poetry-dynamic-versioning]
15
- enable = false
16
- style = "pep440"
17
- vcs = "git"
18
-
19
- [build-system]
20
- requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning>=1.0.0,<2.0.0"]
21
- build-backend = "poetry_dynamic_versioning.backend"
22
-
23
- [tool.setuptools_scm]
16
+ keywords = [ "python", "cellprofiler", "single-cell-analysis", "way-lab" ]
24
17
 
25
18
  [tool.poetry.dependencies]
26
- python = ">=3.8,<3.13"
19
+ python = ">=3.9,<3.14"
27
20
  pyarrow = ">=13.0.0"
28
- cloudpathlib = {extras = ["all", "s3"], version = "^0.18.0"}
21
+ cloudpathlib = { extras = [ "all", "s3" ], version = "^0.18.0" }
29
22
  duckdb = ">=0.8.0,!=0.10.0,>=0.10.1"
30
23
  parsl = ">=2023.9.25"
31
24
  numpy = [
32
- {version = "<=1.24.4", python = "<3.12"},
33
- {version = ">=1.26.0", python = ">=3.12"}
25
+ { version = "<=1.24.4", python = "<3.12" },
26
+ { version = ">=1.26.0", python = ">=3.12" },
34
27
  ]
35
28
  scipy = [
36
- {version = "<1.12.0", python = "<3.9"},
37
- {version = "^1.12.0", python = ">=3.9"}
29
+ { version = "<1.12.0", python = "<3.9" },
30
+ { version = "^1.12.0", python = ">=3.9" },
38
31
  ]
39
32
 
40
33
  [tool.poetry.group.dev.dependencies]
41
- pytest = "^7.4.0"
42
- pytest-cov = "^4.1.0"
43
- Sphinx = "^6.0.0"
44
- myst-parser = "^2.0.0"
45
- sphinxcontrib-mermaid = "^0.9.0"
34
+ pytest = ">=7.4,<9.0"
35
+ pytest-cov = ">=4.1,<6.0"
36
+ Sphinx = ">=6,<8"
37
+ myst-parser = ">=2,<4"
38
+ sphinxcontrib-mermaid = ">=0.9,<1.1"
46
39
  cytominer-database = "^0.3.4"
47
40
  pycytominer = "^1.1.0"
48
41
  dunamai = "^1.19.0"
49
- botocore = "^1.34.133" # added to help avoid dependency reolution issues
42
+ botocore = "^1.34.133" # added to help avoid dependency reolution issues
50
43
 
51
- [tool.vulture]
52
- min_confidence = 80
53
- paths = ["cytotable"]
54
- sort_by_size = true
55
- verbose = true
44
+ [tool.poetry-dynamic-versioning]
45
+ enable = false
46
+ style = "pep440"
47
+ vcs = "git"
48
+
49
+ [tool.setuptools_scm]
50
+ root = "."
56
51
 
57
52
  [tool.isort]
58
53
  profile = "black"
59
54
 
60
- [tool.bandit]
61
- exclude_dirs = ["tests"]
62
- skips = ["B608"]
63
-
64
55
  [tool.codespell]
65
- ignore-words=".codespell-ignore"
56
+ ignore-words = ".codespell-ignore"
66
57
 
67
58
  [tool.pytest.ini_options]
68
59
  filterwarnings = [
@@ -70,5 +61,15 @@ filterwarnings = [
70
61
  "ignore::DeprecationWarning:cytominer_database",
71
62
  ]
72
63
  markers = [
73
- "large_data_tests: tests which involve the use of large data.",
64
+ "large_data_tests: tests which involve the use of large data.",
74
65
  ]
66
+
67
+ [tool.vulture]
68
+ min_confidence = 80
69
+ paths = [ "cytotable" ]
70
+ sort_by_size = true
71
+ verbose = true
72
+
73
+ [tool.bandit]
74
+ exclude_dirs = [ "tests" ]
75
+ skips = [ "B608" ]
File without changes
File without changes