pyvtools 1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyvtools-1.1/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) [2024] [Valeria Pais]
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
pyvtools-1.1/PKG-INFO ADDED
@@ -0,0 +1,25 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyvtools
3
+ Version: 1.1
4
+ Summary: Python tools
5
+ Author-email: Valeria Pais <valeriarpais@gmail.com>
6
+ License: MIT License
7
+ Project-URL: Repository, https://github.com/0xInfty/PyVTools.git
8
+ Project-URL: Homepage, https://github.com/0xInfty/PyVTools
9
+ Project-URL: Issues, https://github.com/0xInfty/PyVTools/issues
10
+ Keywords: deep learning,pytorch
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Requires-Python: >=3.0
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: numpy
18
+ Requires-Dist: matplotlib
19
+ Requires-Dist: opencv-python
20
+ Requires-Dist: scikit-image
21
+ Requires-Dist: pillow
22
+ Requires-Dist: scikit-learn
23
+ Requires-Dist: tifffile
24
+
25
+ # PyVTools
pyvtools-1.1/README.md ADDED
@@ -0,0 +1 @@
1
+ # PyVTools
@@ -0,0 +1,35 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "pyvtools"
7
+ description = "Python tools"
8
+ version = "1.1"
9
+ requires-python = ">= 3.0"
10
+ dependencies = [
11
+ "numpy",
12
+ "matplotlib",
13
+ "opencv-python",
14
+ "scikit-image",
15
+ "pillow",
16
+ "scikit-learn",
17
+ "tifffile",
18
+ ]
19
+ authors = [
20
+ {name = "Valeria Pais", email = "valeriarpais@gmail.com"},
21
+ ]
22
+ readme = "README.md"
23
+ license = {text = "MIT License"}
24
+ keywords = ["deep learning", "pytorch"]
25
+
26
+ classifiers = [
27
+ "Development Status :: 3 - Alpha",
28
+ "License :: OSI Approved :: MIT License",
29
+ "Programming Language :: Python :: 3",
30
+ ]
31
+
32
+ [project.urls]
33
+ Repository = "https://github.com/0xInfty/PyVTools.git"
34
+ Homepage = "https://github.com/0xInfty/PyVTools"
35
+ Issues = "https://github.com/0xInfty/PyVTools/issues"
File without changes
@@ -0,0 +1,516 @@
1
+ import os
2
+ import numpy as np
3
+ # import rawpy
4
+ import cv2
5
+ import tifffile as tiff
6
+ from PIL import Image
7
+ from math import log10, sqrt
8
+ from skimage.transform import hough_line#, hough_line_peaks
9
+ from skimage.metrics import structural_similarity
10
+ import matplotlib.pyplot as plt
11
+
12
+ #%% GLOBAL VARIABLES
13
+
14
+ IMAGE_RGB_FILE_TYPES = [".png", ".jpeg", ".jpg"]
15
+ IMAGE_RAW_FILE_TYPES = [".tif", ".tiff", ".npy"] # "dng"
16
+ IMAGE_FILE_TYPES = [*IMAGE_RAW_FILE_TYPES, *IMAGE_RGB_FILE_TYPES]
17
+
18
+ #%% LOADING AND SAVING
19
+
20
+ def load_rgb_image(filepath:str, dtype=np.float32):
21
+ """Loads an RGB image as a Numpy aray
22
+
23
+ Parameters
24
+ ----------
25
+ filepath : str
26
+ Filepath to image
27
+ dtype : Numpy datatype, optional
28
+ Desired output data type. Default is `np.float32`.
29
+
30
+ Returns
31
+ -------
32
+ Image as a Numpy array (RGB, not BGR)
33
+
34
+ See also
35
+ --------
36
+ PIL.Image.load
37
+ """
38
+
39
+ return np.array(Image.open(filepath)).astype(dtype)
40
+
41
+ def load_tiff_image(filepath:str, dtype=np.float32):
42
+ """Loads a TIFF image as a Numpy aray
43
+
44
+ Parameters
45
+ ----------
46
+ filepath : str
47
+ Filepath to image
48
+ dtype : Numpy datatype, optional
49
+ Desired output data type. Default is `np.float32`.
50
+
51
+ Returns
52
+ -------
53
+ Image as a Numpy array (probably RGGB)
54
+
55
+ See also
56
+ --------
57
+ tifffile.imread
58
+ """
59
+
60
+ return np.array(tiff.imread(filepath)).astype(dtype)
61
+
62
+ def load_npy_image(filepath : str, dtype=np.float32):
63
+ """Loads an image saved as .npy into a Numpy aray
64
+
65
+ Parameters
66
+ ----------
67
+ filepath : str
68
+ Filepath to image, must have ".npy" extension
69
+ dtype : Numpy datatype, optional
70
+ Desired output data type. Default is `np.float32`.
71
+
72
+ Returns
73
+ -------
74
+ Image as a Numpy array (probably RGGB)
75
+
76
+ See also
77
+ --------
78
+ np.load
79
+ """
80
+
81
+ return np.load(filepath).astype(dtype)
82
+
83
+ def load_image(filepath:str, dtype=np.float32):
84
+ """Loads an image as a Numpy aray
85
+
86
+ Currently supported formats: jpg, png, tiff, npy
87
+
88
+ Parameters
89
+ ----------
90
+ filepath : str
91
+ Filepath to image
92
+ dtype : Numpy datatype, optional
93
+ Desired output data type. Default is `np.float32`.
94
+
95
+ Returns
96
+ -------
97
+ Image as a Numpy array (RGB, not BGR; probably RGGB if raw)
98
+ """
99
+
100
+ file_type = os.path.splitext(filepath)[1].lower()
101
+ assert file_type in IMAGE_FILE_TYPES, "Image extension is not supported"
102
+
103
+ if file_type in IMAGE_RGB_FILE_TYPES:
104
+ return load_rgb_image(filepath, dtype)
105
+ elif file_type == "npy":
106
+ return load_npy_image(filepath, dtype)
107
+ # if file_type == "dng":
108
+ # return np.array(rawpy.imread(filepath).raw_image_visible).astype(dtype)
109
+ elif file_type == "tiff" or file_type == "tif":
110
+ return load_tiff_image(filepath, dtype)
111
+ else:
112
+ raise ValueError("Image extension is not supported")
113
+
114
+ def save_figure(name:str, sub_name:str=None, path:str=os.getcwd(), divider="_", filetype=".png"):
115
+
116
+ if not os.path.isdir(path): os.makedirs(path)
117
+ filepath = os.path.join(path, name+divider+sub_name+filetype)
118
+ plt.savefig(filepath, bbox_inches="tight", pad_inches=0.1)
119
+
120
+ #%% MULTICHANNEL
121
+
122
+ def demosaic(raw_image):
123
+ """Simple demosaicing to visualize RAW images
124
+
125
+ Based on AIM22 Reverse ISP challenge's starter code.
126
+
127
+ Parameters
128
+ ----------
129
+ raw_image : np.array
130
+ Raw RGGB image with shape (H, W, 4)
131
+
132
+ Returns
133
+ -------
134
+ image : np.array
135
+ Demosaiced RGB image with shape (H*2, W*2, 3)
136
+ """
137
+
138
+ image = rggb2rgb(raw_image) # Shape (H, W, 3)
139
+
140
+ shape = raw_image.shape
141
+ image = cv2.resize(image, (shape[1]*2, shape[0]*2))
142
+
143
+ return image
144
+
145
+
146
+ def rggb2rgb(raw_image):
147
+ """Simple RAW to RGB conversion, with no upsampling
148
+
149
+ Could also convert BGGR to BGR
150
+
151
+ Parameters
152
+ ----------
153
+ raw_image : np.array
154
+ Raw RGGB image with shape (H, W, 4)
155
+
156
+ Returns
157
+ -------
158
+ image : np.array
159
+ RGB image with shape (H, W, 3)
160
+ """
161
+
162
+ assert raw_image.shape[-1] == 4
163
+
164
+ red = raw_image[:,:,0]
165
+ green_red = raw_image[:,:,1]
166
+ green_blue = raw_image[:,:,2]
167
+ blue = raw_image[:,:,3]
168
+ avg_green = (green_red + green_blue) / 2
169
+
170
+ image = np.stack((red, avg_green, blue), axis=-1)
171
+
172
+ return image
173
+
174
+ def rgb2gray(rgb_image):
175
+ """Converts an RGB image into a grayscale image
176
+
177
+ Grayscale image intensity is the luminance: 0.3R + 0.6G + 0.1B
178
+
179
+ Parameters
180
+ ----------
181
+ rgb_image : np.array
182
+ RGB image array with shape (H,W,3)
183
+
184
+ Returns
185
+ -------
186
+ gray_image : np.array
187
+ Grayscale image array with shape (H,W)
188
+ """
189
+
190
+ return cv2.cvtColor(rgb_image, cv2.COLOR_RGB2GRAY)
191
+
192
+ def list_images(path, filetypes=IMAGE_FILE_TYPES):
193
+ """List images in the selected directory"""
194
+
195
+ return [fname for fname in os.listdir(path)
196
+ if os.path.splitext(fname)[1].lower() in filetypes]
197
+
198
+ def list_rgb_images(path):
199
+ """List RGB images in the selected directory"""
200
+ return list_images(path, IMAGE_RGB_FILE_TYPES)
201
+
202
+ def list_raw_images(path):
203
+ """List raw images in the selected directory"""
204
+ return list_images(path, IMAGE_RAW_FILE_TYPES)
205
+
206
+ #%% IMAGE PROCESSING
207
+
208
+ def normalize_image(image, bitdepth):
209
+ return image / (2**bitdepth-1)
210
+
211
+ def invert_normalize_image(image, bitdepth):
212
+ return image * (2**bitdepth-1)
213
+
214
+ def quantise_image(image, bitdepth, dtype=np.float32):
215
+ return np.clip(image.round(), 0, 2**bitdepth-1).astype(dtype)
216
+
217
+ #%% IMAGE ANALYSIS
218
+
219
+ def fourier_transform(image:np.ndarray):
220
+ """Calculates the discrete fourier transform of a grayscale image.
221
+
222
+ Based on https://www.geeksforgeeks.org/how-to-find-the-fourier-transform-of-an-image-using-opencv-python/
223
+
224
+ Parameters
225
+ ----------
226
+ image : np.array
227
+ Grayscale image shaped (H,W)
228
+
229
+ Returns
230
+ -------
231
+ magnitude : np.array
232
+ DFT of the image, centered on the zero-frequency component
233
+
234
+ See also
235
+ --------
236
+ cv2.dft
237
+ """
238
+
239
+ # Compute the discrete Fourier Transform of the image
240
+ fourier = cv2.dft(np.float32(image), flags=cv2.DFT_COMPLEX_OUTPUT)
241
+
242
+ # Shift the zero-frequency component to the center of the spectrum
243
+ fourier_shift = np.fft.fftshift(fourier)
244
+
245
+ # Calculate the magnitude of the Fourier Transform
246
+ magnitude = 20*np.log(cv2.magnitude(fourier_shift[:,:,0],fourier_shift[:,:,1])) # dB
247
+
248
+ # Scale the magnitude for display
249
+ # magnitude = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
250
+
251
+ return magnitude
252
+
253
+ def hough_transform(image:np.ndarray, n_angles:int=180):
254
+ """Calculates the Hough transform of a grayscale image.
255
+
256
+ Based on https://scikit-image.org/docs/stable/auto_examples/edges/plot_line_hough_transform.html
257
+
258
+ Parameters
259
+ ----------
260
+ image : np.array
261
+ Grayscale image shaped (H,W)
262
+ n_angles : int, optional
263
+
264
+
265
+ Returns
266
+ -------
267
+ radius : np.array
268
+ Hough transform radial distance coordinate, expressed in pixels
269
+ theta : np.array
270
+ Hough transform angular coordinate, expressed in radians, from -pi/2 to pi/2
271
+ hough : np.array
272
+ Hough transform, shaped (2*sqrt(2)*max(H,W), n_angles)
273
+
274
+ See also
275
+ --------
276
+ skimage.transform.hough_line
277
+ """
278
+
279
+ tested_angles = np.linspace(-np.pi / 2, np.pi / 2, n_angles, endpoint=False)
280
+ hough, theta, radius = hough_line(image, theta=tested_angles)
281
+
282
+ return radius, theta, hough
283
+
284
+ #%% IMAGE METRICS
285
+
286
+ def MSE(image_1, image_2):
287
+ """Mean-Square Error (MSE) to compare two images"""
288
+
289
+ image_1, image_2 = np.asarray(image_1), np.asarray(image_2)
290
+ image_1 = image_1.astype(np.float32)
291
+ image_2 = image_2.astype(np.float32)
292
+ mse = np.mean( ( image_1 - image_2 )**2 )
293
+
294
+ return mse
295
+
296
+ def PSNR(image_1, image_2, byte_depth=8):
297
+ """Peak Signal-to-Noise Ratio (PSNR) to compare two images.
298
+
299
+ Parameters
300
+ ----------
301
+ image_1, image_2 : np.array
302
+ The two pictures to compare. Must have the same shape.
303
+ byte_depth : int, optional
304
+ Image byte depth. Default is 8 for 8-bit images.
305
+
306
+ Returns
307
+ -------
308
+ psnr : float
309
+ """
310
+
311
+ mse = MSE(image_1, image_2)
312
+
313
+ if(mse == 0): return np.inf
314
+ # If MSE is null, then the two pictures are equal
315
+
316
+ maximum_pixel = 2**byte_depth - 1
317
+
318
+ psnr = 20 * log10(maximum_pixel / sqrt(mse)) # dB
319
+
320
+ return psnr
321
+
322
+ def SSIM(image_1, image_2, byte_depth=8, win_size=None):
323
+ """Structural Similarity Index Measure (SSIM) to compare two images.
324
+
325
+ Parameters
326
+ ----------
327
+ image_1, image_2 : np.array
328
+ The two images to compare. Must have the same shape.
329
+ byte_depth : int, optional
330
+ Image byte depth. Default is 8 for 8-bit images.
331
+
332
+ Returns
333
+ -------
334
+ ssim : float
335
+
336
+ See also
337
+ --------
338
+ skimage.metrics.structural_similarity
339
+ """
340
+
341
+ data_range = 2**byte_depth
342
+
343
+ image_1, image_2 = np.asarray(image_1), np.asarray(image_2)
344
+
345
+ return structural_similarity(image_1, image_2,
346
+ data_range=data_range, win_size=win_size)
347
+
348
+ def IOU(mask_1, mask_2):
349
+ """Intersection Over Union (IOU) to compare two boolean masks.
350
+
351
+ Parameters
352
+ ----------
353
+ mask_1, mask_2 : np.array, torch.Tensor
354
+ The two image masks to compare. Must have the same shape.
355
+
356
+ Returns
357
+ -------
358
+ iou : float
359
+ """
360
+
361
+ image_1, image_2 = np.asarray(image_1), np.asarray(image_2)
362
+ intersection_count = int( np.sum(np.logical_and(mask_1, mask_2)) )
363
+ union_count = int( np.sum(np.logical_or(mask_1, mask_2)) )
364
+
365
+ return intersection_count / union_count
366
+
367
+ #%% PLOTTING TOOLS
368
+
369
+ def plot_image(image, title=None, dark=True, colormap="viridis",
370
+ figsize=(2.66, 1.7), dpi=200, ax=None, **kwargs):
371
+ """Plots an image
372
+
373
+ Parameters
374
+ ----------
375
+ image : np.array
376
+ Either grayscale (H,W) or RGB (H,W,3) image.
377
+ title : str, optional
378
+ Plot title. Default presents no title.
379
+ dark : bool, optional
380
+ Whether to use a black figure background or a white one.
381
+ Default is True, to produce a black background.
382
+ colormap : str, matplotlib.colors.Colormap, optional
383
+ Colormap used if input is a grayscale image. Default is 'viridis'.
384
+ figsize : tuple of floats, optional
385
+ Figure size in inches. Default is (2.66, 1.7) standing for
386
+ height, width.
387
+ dpi : int, optional
388
+ Dots per inch. Default is 200.
389
+ ax : matplotlib.axes, optional
390
+ Axes to plot in. If none is provided, then a new figure is set up
391
+ and its main axes are used (default behaviour).
392
+ **kwargs : dict, optional
393
+ Accepts Matplotlib's `imshow` kwargs.
394
+ """
395
+
396
+ if ax is None:
397
+ fig, ax = plt.subplots(figsize=figsize, dpi=dpi,
398
+ gridspec_kw=dict(left=0, right=1, top=1, bottom=0))
399
+ else: fig = ax.get_figure()
400
+
401
+ ax.imshow(image, cmap=colormap, **kwargs)
402
+ if title is not None:
403
+ if dark: ax.set_title(title, fontsize="small", color="w")
404
+ else: ax.set_title(title, fontsize="small")
405
+ if dark: fig.patch.set_facecolor('k')
406
+ ax.axis("off") # Remove axes and padding
407
+
408
+ return
409
+
410
+ def plot_images(*images, labels:str|list[str]=None, title:str=None,
411
+ dark=True, colormap="viridis", dpi=200,
412
+ shape_ratio=1, **kwargs):
413
+ """Plots several images
414
+
415
+ Parameters
416
+ ----------
417
+ images : list or tuple of np.ndarray
418
+ Either grayscale (H,W) or RGB (H,W,3) images.
419
+ labels : list of str, optional
420
+ Image titles. Defaults present no title.
421
+ dark : bool, optional
422
+ Whether to use a black figure background or a white one.
423
+ Default is True, to produce a black background.
424
+ colormap : str, matplotlib.colors.Colormap, optional
425
+ Colormap used if input is a grayscale image. Default is 'viridis'.
426
+ dpi : int, optional
427
+ Dots per inch. Default is 200.
428
+ **kwargs : dict, optional
429
+ Accepts Matplotlib's `imshow` kwargs.
430
+ """
431
+
432
+ if labels is None: labels = [None]*len(images)
433
+
434
+ if title is not None: top = 1.01
435
+ else: top = 1
436
+
437
+ fig, axes = plt.subplots(ncols=len(images),
438
+ figsize=(5.1/shape_ratio, 1.7*len(images)),
439
+ dpi=dpi, squeeze=False,
440
+ gridspec_kw=dict(left=0, right=1, top=top, bottom=0))
441
+
442
+ for k, image in enumerate(images):
443
+ plot_image(image, labels[k], ax=axes[0][k], dpi=dpi,
444
+ dark=dark, colormap=colormap, **kwargs)
445
+
446
+ if title is not None:
447
+ if dark: plt.suptitle(title, fontsize="medium", color="white", y=0.98)
448
+ else: plt.suptitle(title, fontsize="medium", y=0.98)
449
+
450
+ return
451
+
452
+ def plot_images_grid(*images_grid:list[np.ndarray]|np.ndarray,
453
+ columns_labels:list[str]=None,
454
+ rows_labels:list[str]=None,
455
+ rows_info:dict[str, list|np.ndarray]=None,
456
+ dark=True, colormap="viridis", dpi=200):
457
+ """Plots a grid of images
458
+
459
+ Parameters
460
+ ----------
461
+ images : list or tuple of np.ndarray
462
+ Either grayscale (H,W) or RGB (H,W,3) images.
463
+ columns_labels : list of str, optional
464
+ Column titles. Defaults present no title.
465
+ rows_labels : list of str, optional
466
+ Rows titles. Defaults present no title.
467
+ rows_info : dict of lists or np.ndarrays
468
+ Additional row information. Dictionary keys could be metric labels
469
+ such as "MSE" or "SSIM" in case the value iterables contain the
470
+ metric associated to each row.
471
+ dark : bool, optional
472
+ Whether to use a black figure background or a white one.
473
+ Default is True, to produce a black background.
474
+ colormap : str, matplotlib.colors.Colormap, optional
475
+ Colormap used if input is a grayscale image. Default is 'viridis'.
476
+ dpi : int, optional
477
+ Dots per inch. Default is 200.
478
+ **kwargs : dict, optional
479
+ Accepts Matplotlib's `imshow` kwargs.
480
+ """
481
+
482
+ if not isinstance(images_grid)==np.ndarray:
483
+ images_grid = np.array(images_grid)
484
+ assert images_grid.dim >= 2, "Images must be on a 2D grid"
485
+
486
+ n_columns = len(images_grid)
487
+ n_rows = len(images_grid[0])
488
+ mid_column = int(np.floor(n_columns/2))
489
+
490
+ if columns_labels is None:
491
+ columns_labels = [None]*len(n_columns)
492
+
493
+ if rows_labels is not None:
494
+ labels = [[lab+lab_2 for lab_2 in rows_labels] for lab in columns_labels]
495
+ else:
496
+ labels = [[lab]+[None]*(n_rows-1) for lab in columns_labels]
497
+
498
+ sec_labels = []
499
+ if rows_info!={}:
500
+ for i in range(n_rows):
501
+ sec_labels.append([f"{k} {values[i]}" for k, values in rows_info.items()])
502
+ if len(rows_info)>1:
503
+ sec_labels = [" : "+", ".join(ls) for ls in sec_labels]
504
+ if len(sec_labels)==0:
505
+ sec_labels = [""]*n_rows
506
+
507
+ fig, axes = plt.subplots(n_rows, n_columns, figsize=(1.7*n_columns, 1.7*n_rows),
508
+ dpi=dpi, squeeze=False)
509
+ for i in range(n_rows):
510
+ for k, ims in enumerate(images_grid):
511
+ if k==mid_column: label = labels[k][i]+sec_labels[i]
512
+ else: label = labels[k][i]
513
+ plot_image(ims[i].detach(), label, ax=axes[i][k],
514
+ dark=dark, colormap=colormap, dpi=dpi)
515
+
516
+ return
@@ -0,0 +1,16 @@
1
+ import numpy as np
2
+
3
+ def round_and_clip(array, minimum=None, maximum=None, dtype=np.float32):
4
+ """Rounds up values in an array, limiting values to [min, max]"""
5
+ if minimum is None: minimum = np.min(array)
6
+ if maximum is None: maximum = np.max(array)
7
+ return np.clip(array.round(), minimum, maximum).astype(dtype)
8
+
9
+ def minmax_normalize(array, minimum=None, maximum=None, symmetric=False):
10
+ if minimum is None: minimum = np.min(array)
11
+ if maximum is None: maximum = np.max(array)
12
+ if symmetric: return array * 2 / (maximum - minimum)
13
+ return (array - minimum) / (maximum - minimum)
14
+
15
+ def invert_minmax_normalize(array, minimum, maximum):
16
+ return array*(maximum - minimum) + minimum
@@ -0,0 +1,74 @@
1
+ from re import findall
2
+
3
+ #%% SINGLE STRING
4
+
5
+ def find_numbers(string: str):
6
+ """Returns a list of numbers (int or float) found on a given string"""
7
+
8
+ numbers = findall(r"[-+]?\d*\.\d+|[-+]?\d+", string)
9
+
10
+ if not numbers:
11
+ raise TypeError("There's no number in this string")
12
+
13
+ for i, n in enumerate(numbers):
14
+ if '.' in n:
15
+ numbers[i] = float(n)
16
+ else:
17
+ numbers[i] = int(n)
18
+
19
+ return numbers
20
+
21
+ def change_separator(string, current_separator, new_separator):
22
+ return new_separator.join(string.split(current_separator))
23
+
24
+ def break_into_lines(string):
25
+ return change_separator(string, " ", "\n")
26
+
27
+ #%% LIST OF STRINGS
28
+
29
+ def filter_by_string_must(string_list, string_must, must=True, start_on=False, end_on=False):
30
+ """Filters list of str by a str required to be always present or absent.
31
+
32
+ Parameters
33
+ ----------
34
+ string_list : list of str
35
+ The list of strings to filter.
36
+ string_must : str
37
+ The string, or list of strings, that must always be present or
38
+ always absent on each of the list elements.
39
+ must=True : bool
40
+ If true, then the string must always be present. If not, the string
41
+ must always be absent.
42
+
43
+ Returns
44
+ -------
45
+ filtered_string_list: list of str
46
+ The filtered list of strings.
47
+ """
48
+
49
+ if not isinstance(string_must, list):
50
+ string_must = [string_must]
51
+
52
+ if start_on and not end_on:
53
+ check = lambda s, smust : s[:len(smust)] == smust
54
+ elif end_on and not start_on:
55
+ check = lambda s, smust : s[-len(smust):] == smust
56
+ elif start_on and end_on:
57
+ check = lambda s, smust : smust == s
58
+ else:
59
+ check = lambda s, smust : smust in s
60
+
61
+ filtered_string_list = []
62
+ for s in string_list:
63
+ do_append = True
64
+ for smust in string_must:
65
+ if must and not check(s, smust):
66
+ do_append = False
67
+ break
68
+ elif not must and check(s, smust):
69
+ do_append = False
70
+ break
71
+ if do_append:
72
+ filtered_string_list.append(s)
73
+
74
+ return filtered_string_list
@@ -0,0 +1,25 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyvtools
3
+ Version: 1.1
4
+ Summary: Python tools
5
+ Author-email: Valeria Pais <valeriarpais@gmail.com>
6
+ License: MIT License
7
+ Project-URL: Repository, https://github.com/0xInfty/PyVTools.git
8
+ Project-URL: Homepage, https://github.com/0xInfty/PyVTools
9
+ Project-URL: Issues, https://github.com/0xInfty/PyVTools/issues
10
+ Keywords: deep learning,pytorch
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Requires-Python: >=3.0
15
+ Description-Content-Type: text/markdown
16
+ License-File: LICENSE
17
+ Requires-Dist: numpy
18
+ Requires-Dist: matplotlib
19
+ Requires-Dist: opencv-python
20
+ Requires-Dist: scikit-image
21
+ Requires-Dist: pillow
22
+ Requires-Dist: scikit-learn
23
+ Requires-Dist: tifffile
24
+
25
+ # PyVTools
@@ -0,0 +1,12 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ pyvtools/__init__.py
5
+ pyvtools/image.py
6
+ pyvtools/math.py
7
+ pyvtools/text.py
8
+ pyvtools.egg-info/PKG-INFO
9
+ pyvtools.egg-info/SOURCES.txt
10
+ pyvtools.egg-info/dependency_links.txt
11
+ pyvtools.egg-info/requires.txt
12
+ pyvtools.egg-info/top_level.txt
@@ -0,0 +1,7 @@
1
+ numpy
2
+ matplotlib
3
+ opencv-python
4
+ scikit-image
5
+ pillow
6
+ scikit-learn
7
+ tifffile
@@ -0,0 +1 @@
1
+ pyvtools
pyvtools-1.1/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+