alignfaces 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alignfaces might be problematic. Click here for more details.

Files changed (32) hide show
  1. alignfaces-1.0.0/LICENSE.txt +13 -0
  2. alignfaces-1.0.0/PKG-INFO +113 -0
  3. alignfaces-1.0.0/README.md +99 -0
  4. alignfaces-1.0.0/pyproject.toml +26 -0
  5. alignfaces-1.0.0/setup.cfg +4 -0
  6. alignfaces-1.0.0/src/alignfaces/__init__.py +15 -0
  7. alignfaces-1.0.0/src/alignfaces/aperture_tools.py +213 -0
  8. alignfaces-1.0.0/src/alignfaces/contrast_tools.py +106 -0
  9. alignfaces-1.0.0/src/alignfaces/contrast_tools_.py +106 -0
  10. alignfaces-1.0.0/src/alignfaces/data/shape_predictor_68_face_landmarks.dat +0 -0
  11. alignfaces-1.0.0/src/alignfaces/face_landmarks.py +233 -0
  12. alignfaces-1.0.0/src/alignfaces/make_aligned_faces.py +1217 -0
  13. alignfaces-1.0.0/src/alignfaces/make_aligned_faces_.py +1209 -0
  14. alignfaces-1.0.0/src/alignfaces/make_files.py +42 -0
  15. alignfaces-1.0.0/src/alignfaces/make_files_.py +42 -0
  16. alignfaces-1.0.0/src/alignfaces/make_files_OLD.py +86 -0
  17. alignfaces-1.0.0/src/alignfaces/phase_cong_3.py +524 -0
  18. alignfaces-1.0.0/src/alignfaces/plot_tools.py +170 -0
  19. alignfaces-1.0.0/src/alignfaces/procrustes_tools.py +217 -0
  20. alignfaces-1.0.0/src/alignfaces/tests/R/align_reference.csv +1 -0
  21. alignfaces-1.0.0/src/alignfaces/tests/R/align_shapes.csv +40 -0
  22. alignfaces-1.0.0/src/alignfaces/tests/R/input_shapes.csv +40 -0
  23. alignfaces-1.0.0/src/alignfaces/tests/__init__.py +0 -0
  24. alignfaces-1.0.0/src/alignfaces/tests/_test_pawarp.py +267 -0
  25. alignfaces-1.0.0/src/alignfaces/tests/test_procrustes_tools.py +569 -0
  26. alignfaces-1.0.0/src/alignfaces/tests/test_warp_tools.py +316 -0
  27. alignfaces-1.0.0/src/alignfaces/warp_tools.py +279 -0
  28. alignfaces-1.0.0/src/alignfaces.egg-info/PKG-INFO +113 -0
  29. alignfaces-1.0.0/src/alignfaces.egg-info/SOURCES.txt +30 -0
  30. alignfaces-1.0.0/src/alignfaces.egg-info/dependency_links.txt +1 -0
  31. alignfaces-1.0.0/src/alignfaces.egg-info/requires.txt +2 -0
  32. alignfaces-1.0.0/src/alignfaces.egg-info/top_level.txt +1 -0
@@ -0,0 +1,13 @@
1
+ Copyright 2021 Carl Michael Gaspar
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
@@ -0,0 +1,113 @@
1
+ Metadata-Version: 2.4
2
+ Name: alignfaces
3
+ Version: 1.0.0
4
+ Summary: Automatically align and warp face images
5
+ Author-email: Carl Michael Gaspar <carl.michael.gaspar@icloud.com>, "Oliver G.B. Garrod" <oliver.garrod@fabdata.io>
6
+ License: Apache-2.0
7
+ Keywords: face-alignment,face-morphing,psychophysics,psychology-experiments
8
+ Requires-Python: >=3.9
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE.txt
11
+ Requires-Dist: dlib
12
+ Requires-Dist: scikit-image
13
+ Dynamic: license-file
14
+
15
+ Automatic Face Alignment (AFA)
16
+ ================
17
+ Carl M. Gaspar & Oliver G.B. Garrod
18
+
19
+ #### You have lots of photos of faces like this:
20
+ ![](demos/demo_1_alignment/collage_originals.png)
21
+
22
+ #### But you want to line up all of the faces like this:
23
+ ![](demos/demo_1_alignment/collage_aligned.png)
24
+
25
+ #### Perhaps you would also like to window the faces to show only inner facial features like this:
26
+ ![](demos/demo_1_alignment/collage_aligned_windowed.png)
27
+
28
+ #### All of the above can be done using AFA like this:
29
+ ```python
30
+ import alignfaces as afa
31
+
32
+ faces_path = "/Users/Me/faces_for_my_study/"
33
+ afa.get_landmarks(faces_path)
34
+ aligned_path = afa.align_procrustes(faces_path)
35
+ afa.get_landmarks(aligned_path)
36
+ the_aperture, aperture_path = afa.place_aperture(aligned_path)
37
+ ```
38
+ To better understand how to write a script for your specific purposes, we direct you to [demo 1](demos/demo_1_alignment/README.md). [Demo 1](demos/demo_1_alignment/README.md) also describes how AFA alignment works.
39
+
40
+ All of these functions depend on reliable detection of facial landmarks, which is provided by the [DLIB](http://dlib.net) library. Alignment is based on generalized Procrustes analysis (GPA), which extensively unit tested.
41
+
42
+ # Additional functions (warping)
43
+ Automatic landmark detection means that it is also easy to separate **shape** and **texture** in order to produce various kinds of **warped** images.
44
+
45
+ AFA provides functions for two types of face-warping manipulations common in face perception research.
46
+
47
+ ### Morphing between faces
48
+ To learn how to do this please see [demo 2](demos/demo_2_morphing/README.md).
49
+
50
+ ### Enhanced average of facial identity
51
+ To learn how to do this please see [demo 3](demos/demo_3_averaging/README.md).
52
+
53
+ # Setup
54
+
55
+ It is highly recommended that you have **conda** installed, preferably **miniconda** rather than full fat **anaconda**.
56
+
57
+ If you do have **conda**, then this is the easiest way to install:
58
+
59
+ ```bash
60
+ conda create --name myenv "conda-forge::dlib<19.24.2" "python>=3.9" scikit-image
61
+
62
+ conda activate myenv
63
+
64
+ conda install -c conda-forge matplotlib
65
+ ```
66
+
67
+ To install AFA next you have two options:
68
+
69
+ You either do this:
70
+
71
+ ```bash
72
+ pip install "alignfaces @ git+https://git@github.com/SourCherries/auto-face-align.git"
73
+ ```
74
+
75
+ Or if instead you want a readable and editable copy of AFA on your local machine, then first clone this repository, go to the root folder `auto-face-align`, and then do this:
76
+
77
+ ```bash
78
+ pip install .
79
+ ```
80
+
81
+ Regardless of how you installed AFA, the above process will create a new virtual environment called `myenv`. You can use another name for that. You'll need to activate this environment using `conda activate myenv` whenever you want to use AFA. To deactivate, simply type `conda deactivate myenv`.
82
+
83
+ If you have a readable/editable copy of AFA on your local machine, you will have copies of all the demos. Most users will want those demo scripts to get started on their projects.
84
+
85
+ Other users may want a readable/editable copy of AFA to contribute to AFA, or to evaluate AFA by running the analyses under `results` or the unit tests. To run the unit tests, go to the root folder `auto-face-align` then do this:
86
+
87
+ ```bash
88
+ pip install -U pytest
89
+ pytest -v src/alignfaces/tests/
90
+ ```
91
+
92
+ # How well does this work?
93
+ In addition to unit-testing critical computations, I evaluated both landmark estimation (DLIB) and the outcome of the entire alignment procedure using various face databases. The results are described [here](results/README.md).
94
+
95
+ <!-- ## Ensure that you have the proper C compiler
96
+ On Linux, you will already have an appropriate C compiler.
97
+
98
+ On Windows, you need to install Microsoft Visual Studio.
99
+
100
+ On Mac, you need to install Xcode Command Line Tools.
101
+ 1. Find an Xcode version compatible with your [macOS version](https://en.wikipedia.org/wiki/Xcode).
102
+ 2. Get the right version of [Xcode Command Line Tools](https://developer.apple.com/downloads/index.action).
103
+ ``` -->
104
+
105
+ # Citation
106
+ If you use this package for your research, please cite the following preprint:
107
+ >Gaspar, C. M., & Garrod, O. G. B. (2021, November 8). A Python toolbox for Automatic Face Alignment (AFA). Retrieved from psyarxiv.com/erc8a
108
+
109
+ DOI:
110
+ >10.31234/osf.io/erc8a
111
+
112
+ # License
113
+ This module is under an Apache-2.0 license.
@@ -0,0 +1,99 @@
1
+ Automatic Face Alignment (AFA)
2
+ ================
3
+ Carl M. Gaspar & Oliver G.B. Garrod
4
+
5
+ #### You have lots of photos of faces like this:
6
+ ![](demos/demo_1_alignment/collage_originals.png)
7
+
8
+ #### But you want to line up all of the faces like this:
9
+ ![](demos/demo_1_alignment/collage_aligned.png)
10
+
11
+ #### Perhaps you would also like to window the faces to show only inner facial features like this:
12
+ ![](demos/demo_1_alignment/collage_aligned_windowed.png)
13
+
14
+ #### All of the above can be done using AFA like this:
15
+ ```python
16
+ import alignfaces as afa
17
+
18
+ faces_path = "/Users/Me/faces_for_my_study/"
19
+ afa.get_landmarks(faces_path)
20
+ aligned_path = afa.align_procrustes(faces_path)
21
+ afa.get_landmarks(aligned_path)
22
+ the_aperture, aperture_path = afa.place_aperture(aligned_path)
23
+ ```
24
+ To better understand how to write a script for your specific purposes, we direct you to [demo 1](demos/demo_1_alignment/README.md). [Demo 1](demos/demo_1_alignment/README.md) also describes how AFA alignment works.
25
+
26
+ All of these functions depend on reliable detection of facial landmarks, which is provided by the [DLIB](http://dlib.net) library. Alignment is based on generalized Procrustes analysis (GPA), which extensively unit tested.
27
+
28
+ # Additional functions (warping)
29
+ Automatic landmark detection means that it is also easy to separate **shape** and **texture** in order to produce various kinds of **warped** images.
30
+
31
+ AFA provides functions for two types of face-warping manipulations common in face perception research.
32
+
33
+ ### Morphing between faces
34
+ To learn how to do this please see [demo 2](demos/demo_2_morphing/README.md).
35
+
36
+ ### Enhanced average of facial identity
37
+ To learn how to do this please see [demo 3](demos/demo_3_averaging/README.md).
38
+
39
+ # Setup
40
+
41
+ It is highly recommended that you have **conda** installed, preferably **miniconda** rather than full fat **anaconda**.
42
+
43
+ If you do have **conda**, then this is the easiest way to install:
44
+
45
+ ```bash
46
+ conda create --name myenv "conda-forge::dlib<19.24.2" "python>=3.9" scikit-image
47
+
48
+ conda activate myenv
49
+
50
+ conda install -c conda-forge matplotlib
51
+ ```
52
+
53
+ To install AFA next you have two options:
54
+
55
+ You either do this:
56
+
57
+ ```bash
58
+ pip install "alignfaces @ git+https://git@github.com/SourCherries/auto-face-align.git"
59
+ ```
60
+
61
+ Or if instead you want a readable and editable copy of AFA on your local machine, then first clone this repository, go to the root folder `auto-face-align`, and then do this:
62
+
63
+ ```bash
64
+ pip install .
65
+ ```
66
+
67
+ Regardless of how you installed AFA, the above process will create a new virtual environment called `myenv`. You can use another name for that. You'll need to activate this environment using `conda activate myenv` whenever you want to use AFA. To deactivate, simply type `conda deactivate myenv`.
68
+
69
+ If you have a readable/editable copy of AFA on your local machine, you will have copies of all the demos. Most users will want those demo scripts to get started on their projects.
70
+
71
+ Other users may want a readable/editable copy of AFA to contribute to AFA, or to evaluate AFA by running the analyses under `results` or the unit tests. To run the unit tests, go to the root folder `auto-face-align` then do this:
72
+
73
+ ```bash
74
+ pip install -U pytest
75
+ pytest -v src/alignfaces/tests/
76
+ ```
77
+
78
+ # How well does this work?
79
+ In addition to unit-testing critical computations, I evaluated both landmark estimation (DLIB) and the outcome of the entire alignment procedure using various face databases. The results are described [here](results/README.md).
80
+
81
+ <!-- ## Ensure that you have the proper C compiler
82
+ On Linux, you will already have an appropriate C compiler.
83
+
84
+ On Windows, you need to install Microsoft Visual Studio.
85
+
86
+ On Mac, you need to install Xcode Command Line Tools.
87
+ 1. Find an Xcode version compatible with your [macOS version](https://en.wikipedia.org/wiki/Xcode).
88
+ 2. Get the right version of [Xcode Command Line Tools](https://developer.apple.com/downloads/index.action).
89
+ ``` -->
90
+
91
+ # Citation
92
+ If you use this package for your research, please cite the following preprint:
93
+ >Gaspar, C. M., & Garrod, O. G. B. (2021, November 8). A Python toolbox for Automatic Face Alignment (AFA). Retrieved from psyarxiv.com/erc8a
94
+
95
+ DOI:
96
+ >10.31234/osf.io/erc8a
97
+
98
+ # License
99
+ This module is under an Apache-2.0 license.
@@ -0,0 +1,26 @@
1
+ [project]
2
+ name = "alignfaces"
3
+ authors = [
4
+ {name = "Carl Michael Gaspar", email = "carl.michael.gaspar@icloud.com"},
5
+ {name = "Oliver G.B. Garrod", email = "oliver.garrod@fabdata.io"}
6
+ ]
7
+ description = "Automatically align and warp face images"
8
+ readme = "README.md"
9
+ keywords = ["face-alignment", "face-morphing","psychophysics","psychology-experiments"]
10
+ license = {text = "Apache-2.0"}
11
+ requires-python = ">=3.9"
12
+ dependencies = [
13
+ "dlib",
14
+ "scikit-image"
15
+ ]
16
+ version = "1.0.0"
17
+
18
+
19
+ [build-system]
20
+ requires = ["setuptools", "wheel"]
21
+ build-backend = "setuptools.build_meta"
22
+
23
+
24
+ [tool.setuptools.package-data]
25
+ "alignfaces.data" = ["*.dat"]
26
+ "alignfaces.tests.R" = ["*.csv"]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,15 @@
1
+ """
2
+ *******************************************************
3
+ *
4
+ * AlignFaces - INIT FILE
5
+ *
6
+ * Version: Version 1.0
7
+ * License: Apache 2.0
8
+ * Written by: Carl Michael Gaspar
9
+ * Created on: March 14, 2019
10
+ * Last updated: March 14, 2019
11
+ *
12
+ *******************************************************
13
+ """
14
+
15
+ from .make_aligned_faces import *
@@ -0,0 +1,213 @@
1
+ import numpy as np
2
+ # Might need to import below into make_aligned_faces:
3
+ # from cv2 import imread, cvtColor, COLOR_BGR2GRAY, imwrite
4
+ from skimage.filters import gaussian
5
+
6
+
7
+ # Function to fit an ellipse using a very simple method.
8
+ # Semi-major axis (vertical) length is fixed as argument.
9
+ # Increase length of semi-minor until widest distance among landmarks fits.
10
+ # Immediate code above is redundant with this function.
11
+ def fit_ellipse_semi_minor(semi_major, landmarks, center):
12
+ X, Y = landmarks[0], landmarks[1]
13
+ CX, CY = center[0], center[1]
14
+ Xc = X - CX
15
+ Yc = Y - CY
16
+ a_min = np.floor((Xc.max() - Xc.min()) * 3 / 10)
17
+ a = a_min
18
+ all_in = (((Xc**2/a**2) + (Yc**2/semi_major**2)) <= 1).all()
19
+ while (not all_in):
20
+ a += 1
21
+ all_in = (((Xc**2/a**2) + (Yc**2/semi_major**2)) <= 1).all()
22
+ return a
23
+
24
+
25
+ def make_ellipse_map(semi_minor, semi_major, center, size, soften=True):
26
+ CX, CY = center[0], center[1]
27
+ x = np.array([i-CX for i in range(size[1])])
28
+ y = np.array([i-CY for i in range(size[0])])
29
+ xv, yv = np.meshgrid(x, y)
30
+ R = (xv**2) / semi_minor**2 + (yv**2) / semi_major**2
31
+ if soften:
32
+ # Soften edges using Butterworth as a function of radius from (CX, CY)
33
+ filter_n = 10
34
+ aperture = 1 / np.sqrt(1 + R**(2*filter_n))
35
+ else:
36
+ aperture = R <= 1
37
+ return aperture
38
+
39
+
40
+ # Function to make a binary map of a circle within image of size = size.
41
+ def make_circle_map(cxy, radius, size):
42
+ size = (size[1], size[0])
43
+ xx = np.array([[x - cxy[0] for x in range(1, size[0]+1)]
44
+ for y in range(size[1])])
45
+ yy = np.array([[y - cxy[1] for y in range(1, size[1]+1)]
46
+ for x in range(size[0])]).T
47
+ rr = np.sqrt(xx**2 + yy**2)
48
+ return rr <= radius
49
+
50
+
51
+ # Function to make binary map selecting for entire image area below below_y
52
+ def make_map_below_y(below_y, size):
53
+ size = (size[1], size[0])
54
+ yy = np.array([[y for y in range(1, size[1]+1)] for x in range(size[0])]).T
55
+ return yy > below_y
56
+
57
+
58
+ # Function to make a binary aperture in shape of Moss's Egg.
59
+ #
60
+ # 1. ABC isosceles with point B facing down
61
+ # a. define upc, midpoint between A and C
62
+ # b. upc fraction along vector from mean of inter-eye midpoints
63
+ # to center of all landmarks
64
+ # i. fraction default is 1/4 but set as argument
65
+ # c. radius_upper is fraction of ellipse_width
66
+ # i. defined in ellipse-fitting functions
67
+ # ii. fraction default is 47/100 but set as argument
68
+ # d. A is upc shifted left by radius_upper
69
+ # e. C is upc shifted right by radius_upper
70
+ # f. B[x] is upc[x] and B[y] is mean of all nose-tips
71
+ # 2. Rest of procedure follows basic construction of Moss's egg
72
+ def make_moss_egg(landmark_features, center, size,
73
+ fraction_width=47/100, soften=True):
74
+ CX, CY = center[0], center[1]
75
+
76
+ # Set radius_upper using same method used when fitting an elliptical
77
+ # aperture.
78
+ shapes = np.array(landmark_features['AllSubjectsLandmarksDict'])
79
+ X = shapes[:, 0::2].reshape(-1,)
80
+ Y = shapes[:, 1::2].reshape(-1,)
81
+ # Longest vertical length of ellipse that fits within image.
82
+ if (size[0] / 2) < CY:
83
+ ellipse_height = (size[0] - CY) * 2
84
+ elif (size[0] / 2) > CY:
85
+ ellipse_height = CY * 2
86
+ else:
87
+ ellipse_height = size[0]
88
+ semi_major = ellipse_height / 2
89
+ semi_minor = fit_ellipse_semi_minor(semi_major=semi_major,
90
+ landmarks=(X, Y),
91
+ center=(CX, CY))
92
+ ellipse_width = semi_minor * 2
93
+ radius_upper = ellipse_width * fraction_width
94
+
95
+ # Upper circle, centered on upc (midpoint of AC in ABC).
96
+ # Top half defines top of Moss Egg.
97
+ to_center = 1 / 4
98
+ eye_midpoints = landmark_features['eye_midpoints']
99
+ eye_midpoint = np.array(eye_midpoints).mean(axis=0)
100
+ upc = ((CX, CY) - eye_midpoint) * to_center + (eye_midpoint)
101
+ horizontal_alignment = upc[0]
102
+
103
+ # Now make two large circles whose intersection defines middle part.
104
+
105
+ # Large circle on left, centered on cac
106
+ radius_large = radius_upper * 2
107
+ cac = (horizontal_alignment - radius_upper, upc[1])
108
+
109
+ # Large circle on right, centered on cbc
110
+ cbc = (horizontal_alignment + radius_upper, upc[1])
111
+
112
+ # Now make small circle at bottom, centered on lm.
113
+ nosey = np.array(landmark_features['nose_tips']).mean(axis=0)[1]
114
+ lm = (horizontal_alignment, nosey)
115
+
116
+ # Isosceles triangle cac -- lm -- cbc (ABC) with apex at lm.
117
+ # Ensure that angle at lm is greater than 60 degrees.
118
+ v1 = np.asarray(cac) - np.asarray(lm)
119
+ v2 = np.asarray(cbc) - np.asarray(lm)
120
+ acos = np.sum(v1 * v2) / (np.sqrt(np.sum(v1**2)) * np.sqrt(np.sum(v2**2)))
121
+ DegABC = np.arccos(acos) * 180 / np.pi
122
+ assert DegABC > 60
123
+
124
+ # Line defined by A (center of large circle) to lm.
125
+ # m * x + y_intercept
126
+ # m * x + c
127
+ delta = np.array(lm) - cac
128
+ m = delta[1] / delta[0]
129
+ t_intercept = -cac[0] / delta[0]
130
+ y_intercept = t_intercept * delta[1] + cac[1]
131
+
132
+ # Intersection of Ca with above line.
133
+ #
134
+ # (x - cac[0])**2 + (m * x + y_intercept - cac[1])**2 = radius_large**2
135
+ # (x - p)**2 + (m * x + c - q)**2 = r**2
136
+ A = m**2 + 1
137
+ B = 2 * (m * y_intercept - m*cac[1] - cac[0])
138
+ C = (cac[1]**2 - radius_large**2 + cac[0]**2 -
139
+ 2*y_intercept*cac[1] + y_intercept**2)
140
+
141
+ assert B**2 - 4*A*C > 0
142
+
143
+ # Radius defined by distance from lm to above intersection.
144
+ # x_m = (-B - np.sqrt(B**2 - 4*A*C)) / (2*A)
145
+ x_p = (-B + np.sqrt(B**2 - 4*A*C)) / (2*A)
146
+ Ex = x_p
147
+ Ey = m * Ex + y_intercept
148
+ lower_radius = np.sqrt((((Ex, Ey) - np.array(lm))**2).sum())
149
+
150
+ Ca = make_circle_map(cxy=cac, radius=radius_large, size=size)
151
+ Cb = make_circle_map(cxy=cbc, radius=radius_large, size=size)
152
+ Cu = make_circle_map(cxy=upc, radius=radius_upper, size=size)
153
+ Cc = make_circle_map(cxy=lm, radius=lower_radius, size=size)
154
+
155
+ # LM1 = make_map_below_y(below_y=horizontal_alignment, size=size)
156
+ LM1 = make_map_below_y(below_y=upc[1], size=size)
157
+ LM2 = make_map_below_y(below_y=Ey, size=size)
158
+
159
+ EggA = Cu
160
+ EggB = Ca & Cb & LM1 & (LM2 == False)
161
+ EggC = Cc & LM2
162
+ # plt.imshow(np.c_[EggA, EggB, EggC])
163
+
164
+ MossEgg = EggA | EggB | EggC
165
+
166
+ if soften:
167
+ ME = MossEgg.astype(float)
168
+ IP = landmark_features['IrisPoints']
169
+ IPD = [np.sqrt(sum((I[1] - I[0])**2)) for I in IP]
170
+ sigma = round(np.asarray(IPD).mean() * 0.05)
171
+ MossEgg = gaussian(ME, sigma=(sigma, sigma),
172
+ truncate=3.5 * sigma)
173
+ # MossEgg = gaussian(ME, sigma=(sigma, sigma),
174
+ # truncate=3.5 * sigma, multichannel=False)
175
+
176
+ # package critical variables for visualizing moss's egg construction
177
+ egg_params = {}
178
+ egg_params['A'] = cac
179
+ egg_params['B'] = lm
180
+ egg_params['C'] = cbc
181
+ egg_params['upc'] = upc
182
+ egg_params['radius_large'] = radius_large
183
+ egg_params['radius_upper'] = radius_upper
184
+ egg_params['radius_lower'] = lower_radius
185
+
186
+ return MossEgg, egg_params
187
+
188
+
189
+ # Pack all into a four-channel image of unsigned 8-bit integers.
190
+ def make_four_channel_image(img, aperture):
191
+ # assert aperture.min() >= 0 and aperture.max() <= 1
192
+ if not (aperture.min() >= 0 and aperture.max() <= 1):
193
+ aperture = aperture - aperture.min()
194
+ aperture = aperture / aperture.max()
195
+ alpha = (aperture * 255).astype(np.uint8)
196
+ if img.ndim == 2:
197
+ assert type(img[0, 0]) is np.uint8
198
+ size = img.shape
199
+ BGRA = np.zeros((size[0], size[1], 4), np.uint8)
200
+ for i in range(3):
201
+ BGRA[:, :, i] = img
202
+ BGRA[:, :, 3] = alpha
203
+ elif img.ndim == 3:
204
+ assert type(img[0, 0, 0]) is np.uint8
205
+ size = img.shape
206
+ BGRA = np.zeros((size[0], size[1], 4), np.uint8)
207
+ for i in range(3):
208
+ BGRA[:, :, i] = img[:, :, i]
209
+ BGRA[:, :, 3] = alpha
210
+ else:
211
+ BGRA = []
212
+ print("Warning: Image is neither grayscale nor RGB.")
213
+ return BGRA
@@ -0,0 +1,106 @@
1
+ import numpy as np
2
+ from skimage import exposure
3
+
4
+ # Ensures that values are centered on 127.5 and either reach until 0 or 255.
5
+ # full_image, numpy image array (any range of values)
6
+ # inner_locs, optional numpy binary map of inner face
7
+ # if supplied, all values are normalized but only properties
8
+ # within map are centered on 127.5 and reach until 0 or 255.
9
+ # no clipping occurs within map, but can occur outside.
10
+ #
11
+ # output image is numpy array, unsigned 8-bit integers.
12
+ def max_stretch_around_127(full_image, inner_locs=None):
13
+ if inner_locs is None:
14
+ inner_locs = np.ones(full_image.shape) == 1
15
+ else:
16
+ print("\nWarning: application of max_stretch_around_127 to subregion" +
17
+ "can result in clipping outside that subregion.")
18
+
19
+ inner_values = full_image[inner_locs]
20
+ om = inner_values.mean() # original mean value within binary map
21
+ inner_values = inner_values - om
22
+ if abs(inner_values.max()) > abs(inner_values.min()):
23
+ S = 127.5 / abs(inner_values.max())
24
+ elif abs(inner_values.max()) < abs(inner_values.min()):
25
+ S = 127.5 / abs(inner_values.min())
26
+
27
+ full_image = (full_image - om) * S + 127.5
28
+ return full_image.astype(np.uint8)
29
+
30
+
31
+ # Ensures that values are centered on original mean and either reach until 0 or 255.
32
+ # full_image, numpy image array either [0-1] or [0-255]
33
+ # if [0-1] then multiplied by 255 to get original mean
34
+ # inner_locs, optional numpy binary map of inner face
35
+ # if supplied, all values are normalized but only properties
36
+ # within map are centered on 127.5 and reach until 0 or 255.
37
+ # no clipping occurs within map, but can occur outside.
38
+ #
39
+ # output image is numpy array, unsigned 8-bit integers.
40
+ def max_stretch_around_original_mean(full_image, inner_locs=None):
41
+ if (full_image.min() >=0) and (full_image.max()<=1):
42
+ full_image = full_image * 255
43
+ if inner_locs is None:
44
+ inner_locs = np.ones(full_image.shape) == 1
45
+ else:
46
+ print("\nWarning: application of max_stretch_around_original_mean" +
47
+ "to subregion can result in clipping outside that subregion.")
48
+
49
+ inner_values = full_image[inner_locs]
50
+ om = inner_values.mean() # original mean value within binary map
51
+ inner_values = inner_values - om
52
+ if abs(inner_values.max()) > abs(inner_values.min()):
53
+ # S = 127.5 / abs(inner_values.max())
54
+ # [om to 255]
55
+ # so maximum should now be equal to 255-om
56
+ # so multiply all by S where:
57
+ # MX * S = 255 - om
58
+ # S = (255 - om) / MX
59
+ S = (255 - om) / inner_values.max()
60
+ elif abs(inner_values.max()) < abs(inner_values.min()):
61
+ # S = 127.5 / abs(inner_values.min())
62
+ # [0 to om]
63
+ # so minimum should now be equal to -om
64
+ # so multiply all by:
65
+ # MN * S = -om
66
+ # S = -om / MN
67
+ S = -om / inner_values.min()
68
+ full_image = (full_image - om) * S + om
69
+ return full_image.astype(np.uint8)
70
+
71
+
72
+ def max_stretch(full_image, inner_locs=None):
73
+ if inner_locs is None:
74
+ inner_locs = np.ones(full_image.shape) == 1
75
+ else:
76
+ print("\nWarning: application of max_stretch to subregion" +
77
+ "can result in clipping outside that subregion.")
78
+
79
+ inner_values = full_image[inner_locs]
80
+ omin = inner_values.min() # original mean value within binary map
81
+ inner_values = inner_values - omin
82
+ omax = inner_values.max()
83
+
84
+ full_image = (full_image - omin) / omax
85
+ full_image = full_image * 255
86
+ return full_image.astype(np.uint8)
87
+
88
+
89
+ def contrast_stretch(full_image, inner_locs=None, type="max"):
90
+ if full_image.ndim == 3:
91
+ assert (type=="max") or (type==None)
92
+ if type == "max":
93
+ out_image = exposure.rescale_intensity(full_image)
94
+ return out_image
95
+ if type == "max":
96
+ out_image = max_stretch(full_image, inner_locs)
97
+ elif type == "mean_127":
98
+ out_image = max_stretch_around_127(full_image, inner_locs)
99
+ elif type == "mean_keep":
100
+ out_image = max_stretch_around_original_mean(full_image, inner_locs)
101
+ elif type == None:
102
+ out_image = full_image
103
+ else:
104
+ out_image = full_image
105
+ print("Warning: Invalid argument (type) to constrast_stretch.")
106
+ return out_image