spacr 0.0.70__tar.gz → 0.0.80__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. {spacr-0.0.70/spacr.egg-info → spacr-0.0.80}/PKG-INFO +10 -8
  2. {spacr-0.0.70 → spacr-0.0.80}/README.rst +8 -6
  3. {spacr-0.0.70 → spacr-0.0.80}/setup.py +3 -7
  4. {spacr-0.0.70 → spacr-0.0.80}/spacr/__init__.py +4 -1
  5. spacr-0.0.80/spacr/__main__.py +6 -0
  6. {spacr-0.0.70 → spacr-0.0.80}/spacr/annotate_app.py +75 -61
  7. {spacr-0.0.70 → spacr-0.0.80}/spacr/core.py +39 -246
  8. {spacr-0.0.70 → spacr-0.0.80}/spacr/io.py +53 -116
  9. {spacr-0.0.70 → spacr-0.0.80}/spacr/measure.py +46 -59
  10. {spacr-0.0.70 → spacr-0.0.80}/spacr/plot.py +117 -81
  11. {spacr-0.0.70 → spacr-0.0.80}/spacr/sequencing.py +508 -491
  12. {spacr-0.0.70 → spacr-0.0.80}/spacr/sim.py +24 -29
  13. {spacr-0.0.70 → spacr-0.0.80}/spacr/utils.py +487 -260
  14. {spacr-0.0.70 → spacr-0.0.80/spacr.egg-info}/PKG-INFO +10 -8
  15. {spacr-0.0.70 → spacr-0.0.80}/spacr.egg-info/SOURCES.txt +0 -5
  16. {spacr-0.0.70 → spacr-0.0.80}/spacr.egg-info/requires.txt +1 -1
  17. spacr-0.0.70/spacr/__main__.py +0 -13
  18. spacr-0.0.70/spacr/alpha.py +0 -807
  19. spacr-0.0.70/spacr/cli.py +0 -41
  20. spacr-0.0.70/spacr/foldseek.py +0 -779
  21. spacr-0.0.70/spacr/get_alfafold_structures.py +0 -72
  22. spacr-0.0.70/spacr/old_code.py +0 -358
  23. {spacr-0.0.70 → spacr-0.0.80}/LICENSE +0 -0
  24. {spacr-0.0.70 → spacr-0.0.80}/MANIFEST.in +0 -0
  25. {spacr-0.0.70 → spacr-0.0.80}/setup.cfg +0 -0
  26. {spacr-0.0.70 → spacr-0.0.80}/spacr/chris.py +0 -0
  27. {spacr-0.0.70 → spacr-0.0.80}/spacr/deep_spacr.py +0 -0
  28. {spacr-0.0.70 → spacr-0.0.80}/spacr/graph_learning.py +0 -0
  29. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui.py +0 -0
  30. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui_2.py +0 -0
  31. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui_classify_app.py +0 -0
  32. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui_mask_app.py +0 -0
  33. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui_measure_app.py +0 -0
  34. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui_sim_app.py +0 -0
  35. {spacr-0.0.70 → spacr-0.0.80}/spacr/gui_utils.py +0 -0
  36. {spacr-0.0.70 → spacr-0.0.80}/spacr/logger.py +0 -0
  37. {spacr-0.0.70 → spacr-0.0.80}/spacr/mask_app.py +0 -0
  38. {spacr-0.0.70 → spacr-0.0.80}/spacr/models/cp/toxo_pv_lumen.CP_model +0 -0
  39. {spacr-0.0.70 → spacr-0.0.80}/spacr/timelapse.py +0 -0
  40. {spacr-0.0.70 → spacr-0.0.80}/spacr/version.py +0 -0
  41. {spacr-0.0.70 → spacr-0.0.80}/spacr.egg-info/dependency_links.txt +0 -0
  42. {spacr-0.0.70 → spacr-0.0.80}/spacr.egg-info/entry_points.txt +0 -0
  43. {spacr-0.0.70 → spacr-0.0.80}/spacr.egg-info/top_level.txt +0 -0
  44. {spacr-0.0.70 → spacr-0.0.80}/tests/test_annotate_app.py +0 -0
  45. {spacr-0.0.70 → spacr-0.0.80}/tests/test_core.py +0 -0
  46. {spacr-0.0.70 → spacr-0.0.80}/tests/test_gui_classify_app.py +0 -0
  47. {spacr-0.0.70 → spacr-0.0.80}/tests/test_gui_mask_app.py +0 -0
  48. {spacr-0.0.70 → spacr-0.0.80}/tests/test_gui_measure_app.py +0 -0
  49. {spacr-0.0.70 → spacr-0.0.80}/tests/test_gui_sim_app.py +0 -0
  50. {spacr-0.0.70 → spacr-0.0.80}/tests/test_gui_utils.py +0 -0
  51. {spacr-0.0.70 → spacr-0.0.80}/tests/test_io.py +0 -0
  52. {spacr-0.0.70 → spacr-0.0.80}/tests/test_mask_app.py +0 -0
  53. {spacr-0.0.70 → spacr-0.0.80}/tests/test_measure.py +0 -0
  54. {spacr-0.0.70 → spacr-0.0.80}/tests/test_plot.py +0 -0
  55. {spacr-0.0.70 → spacr-0.0.80}/tests/test_sim.py +0 -0
  56. {spacr-0.0.70 → spacr-0.0.80}/tests/test_timelapse.py +0 -0
  57. {spacr-0.0.70 → spacr-0.0.80}/tests/test_train.py +0 -0
  58. {spacr-0.0.70 → spacr-0.0.80}/tests/test_umap.py +0 -0
  59. {spacr-0.0.70 → spacr-0.0.80}/tests/test_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: spacr
3
- Version: 0.0.70
3
+ Version: 0.0.80
4
4
  Summary: Spatial phenotype analysis of crisp screens (SpaCr)
5
5
  Home-page: https://github.com/EinarOlafsson/spacr
6
6
  Author: Einar Birnir Olafsson
@@ -9,7 +9,7 @@ Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
11
  License-File: LICENSE
12
- Requires-Dist: dgl
12
+ Requires-Dist: dgl==0.9.1
13
13
  Requires-Dist: torch<3.0,>=2.2.1
14
14
  Requires-Dist: torchvision<1.0,>=0.17.1
15
15
  Requires-Dist: torch-geometric<3.0,>=2.5.1
@@ -46,6 +46,8 @@ Requires-Dist: opencv-python-headless; extra == "headless"
46
46
  Provides-Extra: full
47
47
  Requires-Dist: opencv-python; extra == "full"
48
48
 
49
+ .. |Documentation Status| image:: https://readthedocs.org/projects/spacr/badge/?version=latest
50
+ :target: https://spacr.readthedocs.io/en/latest/?badge=latest
49
51
  .. |PyPI version| image:: https://badge.fury.io/py/spacr.svg
50
52
  :target: https://badge.fury.io/py/spacr
51
53
  .. |Python version| image:: https://img.shields.io/pypi/pyversions/spacr
@@ -55,25 +57,25 @@ Requires-Dist: opencv-python; extra == "full"
55
57
  .. |repo size| image:: https://img.shields.io/github/repo-size/EinarOlafsson/spacr
56
58
  :target: https://github.com/EinarOlafsson/spacr/
57
59
 
58
- |PyPI version| |Python version| |Licence: GPL v3| |repo size|
60
+ |Documentation Status| |PyPI version| |Python version| |Licence: GPL v3| |repo size|
59
61
 
60
62
  SpaCr
61
63
  =====
62
64
 
63
- Spatial phenotype analysis of CRISPR-Cas9 screens (SpaCr). The spatial organization of organelles and proteins within cells constitutes a key level of functional regulation. In the context of infectious disease, the spatial relationships between host cell structures and intracellular pathogens are critical to understand host clearance mechanisms and how pathogens evade them. Spacr is a Python-based software package for generating single cell image data for deep-learning sub-cellular/cellular phenotypic classification from pooled genetic CRISPR-Cas9 screens. Spacr provides a flexible toolset to extract single cell images and measurements from high content cell painting experiments, train deep-learning models to classify cellular/subcellular phenotypes, simulate and analyze pooled CRISPR-Cas9 imaging screens.
65
+ Spatial phenotype analysis of CRISPR-Cas9 screens (SpaCr). The spatial organization of organelles and proteins within cells constitutes a key level of functional regulation. In the context of infectious disease, the spatial relationships between host cell structures and intracellular pathogens are critical to understand host clearance mechanisms and how pathogens evade them. SpaCr is a Python-based software package for generating single-cell image data for deep-learning sub-cellular/cellular phenotypic classification from pooled genetic CRISPR-Cas9 screens. SpaCr provides a flexible toolset to extract single-cell images and measurements from high-content cell painting experiments, train deep-learning models to classify cellular/subcellular phenotypes, simulate, and analyze pooled CRISPR-Cas9 imaging screens.
64
66
 
65
67
  Features
66
68
  --------
67
69
 
68
70
  - **Generate Masks:** Generate cellpose masks of cell, nuclei, and pathogen objects.
69
71
 
70
- - **Object Measurements:** Measurements for each object including scikit-image-regionprops, intensity percentiles, shannon-entropy, pearsons and manders correlations, homogeneity and radial distribution. Measurements are saved to a SQL database in object level tables.
72
+ - **Object Measurements:** Measurements for each object including scikit-image-regionprops, intensity percentiles, shannon-entropy, pearsons and manders correlations, homogeneity, and radial distribution. Measurements are saved to a SQL database in object-level tables.
71
73
 
72
- - **Crop Images:** Objects (e.gcells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in a SQL database that can be annotated and used to train CNNs/Transformer models for classification tasks.
74
+ - **Crop Images:** Objects (e.g., cells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in a SQL database that can be annotated and used to train CNNs/Transformer models for classification tasks.
73
75
 
74
76
  - **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing.
75
77
 
76
- - **Manual Annotation:** Supports manual annotation of single cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
78
+ - **Manual Annotation:** Supports manual annotation of single-cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
77
79
 
78
80
  - **Finetune Cellpose Models:** Adjust pre-existing Cellpose models to your specific dataset for improved performance.
79
81
 
@@ -93,7 +95,7 @@ Requires Tkinter for graphical user interface features.
93
95
  Ubuntu
94
96
  ~~~~~~
95
97
 
96
- Before installing spacr, ensure Tkinter is installed:
98
+ Before installing SpaCr, ensure Tkinter is installed:
97
99
 
98
100
  (Tkinter is included with the standard Python installation on macOS, and Windows)
99
101
 
@@ -1,3 +1,5 @@
1
+ .. |Documentation Status| image:: https://readthedocs.org/projects/spacr/badge/?version=latest
2
+ :target: https://spacr.readthedocs.io/en/latest/?badge=latest
1
3
  .. |PyPI version| image:: https://badge.fury.io/py/spacr.svg
2
4
  :target: https://badge.fury.io/py/spacr
3
5
  .. |Python version| image:: https://img.shields.io/pypi/pyversions/spacr
@@ -7,25 +9,25 @@
7
9
  .. |repo size| image:: https://img.shields.io/github/repo-size/EinarOlafsson/spacr
8
10
  :target: https://github.com/EinarOlafsson/spacr/
9
11
 
10
- |PyPI version| |Python version| |Licence: GPL v3| |repo size|
12
+ |Documentation Status| |PyPI version| |Python version| |Licence: GPL v3| |repo size|
11
13
 
12
14
  SpaCr
13
15
  =====
14
16
 
15
- Spatial phenotype analysis of CRISPR-Cas9 screens (SpaCr). The spatial organization of organelles and proteins within cells constitutes a key level of functional regulation. In the context of infectious disease, the spatial relationships between host cell structures and intracellular pathogens are critical to understand host clearance mechanisms and how pathogens evade them. Spacr is a Python-based software package for generating single cell image data for deep-learning sub-cellular/cellular phenotypic classification from pooled genetic CRISPR-Cas9 screens. Spacr provides a flexible toolset to extract single cell images and measurements from high content cell painting experiments, train deep-learning models to classify cellular/subcellular phenotypes, simulate and analyze pooled CRISPR-Cas9 imaging screens.
17
+ Spatial phenotype analysis of CRISPR-Cas9 screens (SpaCr). The spatial organization of organelles and proteins within cells constitutes a key level of functional regulation. In the context of infectious disease, the spatial relationships between host cell structures and intracellular pathogens are critical to understand host clearance mechanisms and how pathogens evade them. SpaCr is a Python-based software package for generating single-cell image data for deep-learning sub-cellular/cellular phenotypic classification from pooled genetic CRISPR-Cas9 screens. SpaCr provides a flexible toolset to extract single-cell images and measurements from high-content cell painting experiments, train deep-learning models to classify cellular/subcellular phenotypes, simulate, and analyze pooled CRISPR-Cas9 imaging screens.
16
18
 
17
19
  Features
18
20
  --------
19
21
 
20
22
  - **Generate Masks:** Generate cellpose masks of cell, nuclei, and pathogen objects.
21
23
 
22
- - **Object Measurements:** Measurements for each object including scikit-image-regionprops, intensity percentiles, shannon-entropy, pearsons and manders correlations, homogeneity and radial distribution. Measurements are saved to a SQL database in object level tables.
24
+ - **Object Measurements:** Measurements for each object including scikit-image-regionprops, intensity percentiles, shannon-entropy, pearsons and manders correlations, homogeneity, and radial distribution. Measurements are saved to a SQL database in object-level tables.
23
25
 
24
- - **Crop Images:** Objects (e.gcells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in a SQL database that can be annotated and used to train CNNs/Transformer models for classification tasks.
26
+ - **Crop Images:** Objects (e.g., cells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in a SQL database that can be annotated and used to train CNNs/Transformer models for classification tasks.
25
27
 
26
28
  - **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing.
27
29
 
28
- - **Manual Annotation:** Supports manual annotation of single cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
30
+ - **Manual Annotation:** Supports manual annotation of single-cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
29
31
 
30
32
  - **Finetune Cellpose Models:** Adjust pre-existing Cellpose models to your specific dataset for improved performance.
31
33
 
@@ -45,7 +47,7 @@ Requires Tkinter for graphical user interface features.
45
47
  Ubuntu
46
48
  ~~~~~~
47
49
 
48
- Before installing spacr, ensure Tkinter is installed:
50
+ Before installing SpaCr, ensure Tkinter is installed:
49
51
 
50
52
  (Tkinter is included with the standard Python installation on macOS, and Windows)
51
53
 
@@ -13,13 +13,9 @@ def get_cuda_version():
13
13
  cuda_version = get_cuda_version()
14
14
 
15
15
  if cuda_version:
16
- dgl_dependency = f'dgl-cu{cuda_version}'
16
+ dgl_dependency = f'dgl-cu{cuda_version}==0.9.1' # Specify the version of DGL compatible with your setup
17
17
  else:
18
- dgl_dependency = 'dgl' # Fallback to CPU version if no CUDA is detected
19
-
20
- # Ensure you have read the README.rst content into a variable, e.g., `long_description`
21
- #with open("README.md", "r", encoding="utf-8") as fh:
22
- # long_description = fh.read()
18
+ dgl_dependency = 'dgl==0.9.1' # Fallback to CPU version if no CUDA is detected
23
19
 
24
20
  # Ensure you have read the README.rst content into a variable, e.g., `long_description`
25
21
  with open("README.rst", "r", encoding="utf-8") as fh:
@@ -60,7 +56,7 @@ dependencies = [
60
56
 
61
57
  setup(
62
58
  name="spacr",
63
- version="0.0.70",
59
+ version="0.0.80",
64
60
  author="Einar Birnir Olafsson",
65
61
  author_email="olafsson@med.umich.com",
66
62
  description="Spatial phenotype analysis of crisp screens (SpaCr)",
@@ -8,16 +8,18 @@ from . import utils
8
8
  from . import plot
9
9
  from . import measure
10
10
  from . import sim
11
+ from . import sequencing
11
12
  from . import timelapse
12
13
  from . import deep_spacr
13
- from . import mask_app
14
14
  from . import annotate_app
15
15
  from . import gui_utils
16
+ from . import mask_app
16
17
  from . import gui_mask_app
17
18
  from . import gui_measure_app
18
19
  from . import gui_classify_app
19
20
  from . import logger
20
21
 
22
+
21
23
  __all__ = [
22
24
  "core",
23
25
  "io",
@@ -25,6 +27,7 @@ __all__ = [
25
27
  "plot",
26
28
  "measure",
27
29
  "sim",
30
+ "sequencing"
28
31
  "timelapse",
29
32
  "deep_spacr",
30
33
  "annotate_app",
@@ -0,0 +1,6 @@
1
+ """
2
+ Copyright © 2024 Something
3
+ """
4
+
5
+ if __name__ == "__main__":
6
+ main()
@@ -10,13 +10,16 @@ from IPython.display import display, HTML
10
10
  import tkinter as tk
11
11
  from tkinter import ttk
12
12
  from ttkthemes import ThemedTk
13
+ from skimage.exposure import rescale_intensity
14
+ import cv2
15
+ import matplotlib.pyplot as plt
13
16
 
14
17
  from .logger import log_function_call
15
18
 
16
19
  from .gui_utils import ScrollableFrame, set_default_font, set_dark_style, create_dark_mode, style_text_boxes, create_menu_bar
17
20
 
18
21
  class ImageApp:
19
- def __init__(self, root, db_path, src, image_type=None, channels=None, grid_rows=None, grid_cols=None, image_size=(200, 200), annotation_column='annotate'):
22
+ def __init__(self, root, db_path, src, image_type=None, channels=None, grid_rows=None, grid_cols=None, image_size=(200, 200), annotation_column='annotate', normalize=False, percentiles=(1,99)):
20
23
  """
21
24
  Initializes an instance of the ImageApp class.
22
25
 
@@ -30,6 +33,7 @@ class ImageApp:
30
33
  - grid_cols (int): The number of columns in the image grid.
31
34
  - image_size (tuple): The size of the displayed images.
32
35
  - annotation_column (str): The column name for image annotations in the database.
36
+ - normalize (bool): Whether to normalize images to their 2nd and 98th percentiles. Defaults to False.
33
37
  """
34
38
  self.root = root
35
39
  self.db_path = db_path
@@ -41,6 +45,8 @@ class ImageApp:
41
45
  self.annotation_column = annotation_column
42
46
  self.image_type = image_type
43
47
  self.channels = channels
48
+ self.normalize = normalize
49
+ self.percentiles = percentiles
44
50
  self.images = {}
45
51
  self.pending_updates = {}
46
52
  self.labels = []
@@ -119,49 +125,80 @@ class ImageApp:
119
125
  label.bind('<Button-3>', self.get_on_image_click(path, label, img))
120
126
 
121
127
  self.root.update()
122
-
128
+
129
+ def load_single_image(self, path_annotation_tuple):
130
+ """
131
+ Loads a single image from the given path and annotation tuple.
132
+
133
+ Args:
134
+ path_annotation_tuple (tuple): A tuple containing the image path and its annotation.
135
+
136
+ Returns:
137
+ img (PIL.Image.Image): The loaded image.
138
+ annotation: The annotation associated with the image.
139
+ """
140
+ path, annotation = path_annotation_tuple
141
+ img = Image.open(path)
142
+ img = self.normalize_image(img, self.normalize, self.percentiles)
143
+ img = img.convert('RGB')
144
+ img = self.filter_channels(img)
145
+ img = img.resize(self.image_size)
146
+ return img, annotation
147
+
123
148
  @staticmethod
124
- def normalize_image(img):
149
+ def normalize_image(img, normalize=False, percentiles=(1, 99)):
125
150
  """
126
- Normalize the pixel values of an image to the range [0, 255].
151
+ Normalize the pixel values of an image based on the 2nd and 98th percentiles or the image min and max values,
152
+ and ensure the image is exported as 8-bit.
127
153
 
128
154
  Parameters:
129
- - img: PIL.Image.Image
130
- The input image to be normalized.
155
+ - img: PIL.Image.Image. The input image to be normalized.
156
+ - normalize: bool. Whether to normalize based on the 2nd and 98th percentiles.
157
+ - percentiles: tuple. The percentiles to use for normalization.
131
158
 
132
159
  Returns:
133
- - PIL.Image.Image
134
- The normalized image.
160
+ - PIL.Image.Image. The normalized and 8-bit converted image.
135
161
  """
136
162
  img_array = np.array(img)
137
- img_array = ((img_array - img_array.min()) * (1/(img_array.max() - img_array.min()) * 255)).astype('uint8')
138
- return Image.fromarray(img_array)
139
163
 
164
+ if normalize:
165
+ if img_array.ndim == 2: # Grayscale image
166
+ p2, p98 = np.percentile(img_array, percentiles)
167
+ img_array = rescale_intensity(img_array, in_range=(p2, p98), out_range=(0, 255))
168
+ else: # Color image or multi-channel image
169
+ for channel in range(img_array.shape[2]):
170
+ p2, p98 = np.percentile(img_array[:, :, channel], percentiles)
171
+ img_array[:, :, channel] = rescale_intensity(img_array[:, :, channel], in_range=(p2, p98), out_range=(0, 255))
172
+
173
+ img_array = np.clip(img_array, 0, 255).astype('uint8')
174
+
175
+ return Image.fromarray(img_array)
176
+
140
177
  def add_colored_border(self, img, border_width, border_color):
141
- """
142
- Adds a colored border to an image.
143
-
144
- Args:
145
- img (PIL.Image.Image): The input image.
146
- border_width (int): The width of the border in pixels.
147
- border_color (str): The color of the border in RGB format.
148
-
149
- Returns:
150
- PIL.Image.Image: The image with the colored border.
151
- """
152
- top_border = Image.new('RGB', (img.width, border_width), color=border_color)
153
- bottom_border = Image.new('RGB', (img.width, border_width), color=border_color)
154
- left_border = Image.new('RGB', (border_width, img.height), color=border_color)
155
- right_border = Image.new('RGB', (border_width, img.height), color=border_color)
156
-
157
- bordered_img = Image.new('RGB', (img.width + 2 * border_width, img.height + 2 * border_width), color='white')
158
- bordered_img.paste(top_border, (border_width, 0))
159
- bordered_img.paste(bottom_border, (border_width, img.height + border_width))
160
- bordered_img.paste(left_border, (0, border_width))
161
- bordered_img.paste(right_border, (img.width + border_width, border_width))
162
- bordered_img.paste(img, (border_width, border_width))
163
-
164
- return bordered_img
178
+ """
179
+ Adds a colored border to an image.
180
+
181
+ Args:
182
+ img (PIL.Image.Image): The input image.
183
+ border_width (int): The width of the border in pixels.
184
+ border_color (str): The color of the border in RGB format.
185
+
186
+ Returns:
187
+ PIL.Image.Image: The image with the colored border.
188
+ """
189
+ top_border = Image.new('RGB', (img.width, border_width), color=border_color)
190
+ bottom_border = Image.new('RGB', (img.width, border_width), color=border_color)
191
+ left_border = Image.new('RGB', (border_width, img.height), color=border_color)
192
+ right_border = Image.new('RGB', (border_width, img.height), color=border_color)
193
+
194
+ bordered_img = Image.new('RGB', (img.width + 2 * border_width, img.height + 2 * border_width), color='white')
195
+ bordered_img.paste(top_border, (border_width, 0))
196
+ bordered_img.paste(bottom_border, (border_width, img.height + border_width))
197
+ bordered_img.paste(left_border, (0, border_width))
198
+ bordered_img.paste(right_border, (img.width + border_width, border_width))
199
+ bordered_img.paste(img, (border_width, border_width))
200
+
201
+ return bordered_img
165
202
 
166
203
  def filter_channels(self, img):
167
204
  """
@@ -189,26 +226,6 @@ class ImageApp:
189
226
 
190
227
  return Image.merge("RGB", (r, g, b))
191
228
 
192
- def load_single_image(self, path_annotation_tuple):
193
- """
194
- Loads a single image from the given path and annotation tuple.
195
-
196
- Args:
197
- path_annotation_tuple (tuple): A tuple containing the image path and its annotation.
198
-
199
- Returns:
200
- img (PIL.Image.Image): The loaded image.
201
- annotation: The annotation associated with the image.
202
- """
203
- path, annotation = path_annotation_tuple
204
- img = Image.open(path)
205
- if img.mode == "I":
206
- img = self.normalize_image(img)
207
- img = img.convert('RGB')
208
- img = self.filter_channels(img)
209
- img = img.resize(self.image_size)
210
- return img, annotation
211
-
212
229
  def get_on_image_click(self, path, label, img):
213
230
  """
214
231
  Returns a callback function that handles the click event on an image.
@@ -244,7 +261,7 @@ class ImageApp:
244
261
  self.root.update()
245
262
 
246
263
  return on_image_click
247
-
264
+
248
265
  @staticmethod
249
266
  def update_html(text):
250
267
  display(HTML(f"""
@@ -349,7 +366,7 @@ class ImageApp:
349
366
  self.root.destroy()
350
367
  print(f'Quit application')
351
368
 
352
- def annotate(src, image_type=None, channels=None, geom="1000x1100", img_size=(200, 200), rows=5, columns=5, annotation_column='annotate'):
369
+ def annotate(src, image_type=None, channels=None, geom="1000x1100", img_size=(200, 200), rows=5, columns=5, annotation_column='annotate', normalize=False, percentiles=(1,99)):
353
370
  """
354
371
  Annotates images in a database using a graphical user interface.
355
372
 
@@ -363,11 +380,9 @@ def annotate(src, image_type=None, channels=None, geom="1000x1100", img_size=(20
363
380
  rows (int, optional): The number of rows in the image grid. Defaults to 5.
364
381
  columns (int, optional): The number of columns in the image grid. Defaults to 5.
365
382
  annotation_column (str, optional): The name of the annotation column in the database table. Defaults to 'annotate'.
383
+ normalize (bool, optional): Whether to normalize images to their 2nd and 98th percentiles. Defaults to False.
366
384
  """
367
385
  db = os.path.join(src, 'measurements/measurements.db')
368
- #print('src', src)
369
- #print('db', db)
370
-
371
386
  conn = sqlite3.connect(db)
372
387
  c = conn.cursor()
373
388
  c.execute('PRAGMA table_info(png_list)')
@@ -379,7 +394,7 @@ def annotate(src, image_type=None, channels=None, geom="1000x1100", img_size=(20
379
394
 
380
395
  root = tk.Tk()
381
396
  root.geometry(geom)
382
- app = ImageApp(root, db, src, image_type=image_type, channels=channels, image_size=img_size, grid_rows=rows, grid_cols=columns, annotation_column=annotation_column)
397
+ app = ImageApp(root, db, src, image_type=image_type, channels=channels, image_size=img_size, grid_rows=rows, grid_cols=columns, annotation_column=annotation_column, normalize=normalize, percentiles=percentiles)
383
398
  next_button = tk.Button(root, text="Next", command=app.next_page)
384
399
  next_button.grid(row=app.grid_rows, column=app.grid_cols - 1)
385
400
  back_button = tk.Button(root, text="Back", command=app.previous_page)
@@ -390,7 +405,6 @@ def annotate(src, image_type=None, channels=None, geom="1000x1100", img_size=(20
390
405
  app.load_images()
391
406
  root.mainloop()
392
407
 
393
-
394
408
  def check_for_duplicates(db):
395
409
  """
396
410
  Check for duplicates in the given SQLite database.