FlowerPower 0.11.5.8__py3-none-any.whl → 0.11.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,406 @@
1
+ import numpy as np
2
+ import polars as pl
3
+ import pyarrow as pa
4
+ import pyarrow.compute as pc
5
+
6
+ # Pre-compiled regex patterns (identical to original)
7
+ INTEGER_REGEX = r"^[-+]?\d+$"
8
+ FLOAT_REGEX = r"^[-+]?(?:\d*[.,])?\d+(?:[eE][-+]?\d+)?$"
9
+ BOOLEAN_REGEX = r"^(true|false|1|0|yes|ja|no|nein|t|f|y|j|n)$"
10
+ BOOLEAN_TRUE_REGEX = r"^(true|1|yes|ja|t|y|j)$"
11
+ DATETIME_REGEX = (
12
+ r"^("
13
+ r"\d{4}-\d{2}-\d{2}" # ISO: 2023-12-31
14
+ r"|"
15
+ r"\d{2}/\d{2}/\d{4}" # US: 12/31/2023
16
+ r"|"
17
+ r"\d{2}\.\d{2}\.\d{4}" # German: 31.12.2023
18
+ r"|"
19
+ r"\d{8}" # Compact: 20231231
20
+ r")"
21
+ r"([ T]\d{2}:\d{2}(:\d{2}(\.\d{1,6})?)?)?" # Optional time: 23:59[:59[.123456]]
22
+ r"([+-]\d{2}:?\d{2}|Z)?" # Optional timezone: +01:00, -0500, Z
23
+ r"$"
24
+ )
25
+
26
+ # Float32 range limits
27
+ F32_MIN = float(np.finfo(np.float32).min)
28
+ F32_MAX = float(np.finfo(np.float32).max)
29
+
30
+
31
+ def unify_schemas(
32
+ schemas: list[pa.Schema], use_large_dtypes: bool = False
33
+ ) -> pa.Schema:
34
+ """
35
+ Unify a list of PyArrow schemas into a single schema.
36
+
37
+ Args:
38
+ schemas (list[pa.Schema]): List of PyArrow schemas to unify.
39
+
40
+ Returns:
41
+ pa.Schema: A unified PyArrow schema.
42
+ """
43
+ try:
44
+ return pa.unify_schemas(schemas, promote_options="permissive")
45
+ except (pa.lib.ArrowInvalid, pa.lib.ArrowTypeError) as e:
46
+ _ = e.args[0]
47
+ # If unify_schemas fails, we can try to create a schema with empty tables
48
+ schema = (
49
+ pl.concat(
50
+ [
51
+ # pl.from_arrow(pa.Table.from_pylist([], schema=schema))
52
+ pl.from_arrow(schema.empty_table())
53
+ for schema in schemas
54
+ ],
55
+ how="diagonal_relaxed",
56
+ )
57
+ .to_arrow()
58
+ .schema
59
+ )
60
+ if not use_large_dtypes:
61
+ return convert_large_types_to_normal(schema)
62
+ return schema
63
+
64
+
65
+ def cast_schema(table: pa.Table, schema: pa.Schema) -> pa.Table:
66
+ """
67
+ Cast a PyArrow table to a given schema, updating the schema to match the table's columns.
68
+
69
+ Args:
70
+ table (pa.Table): The PyArrow table to cast.
71
+ schema (pa.Schema): The target schema to cast the table to.
72
+
73
+ Returns:
74
+ pa.Table: A new PyArrow table with the specified schema.
75
+ """
76
+ # Filter schema fields to only those present in the table
77
+ table_columns = set(table.schema.names)
78
+ filtered_fields = [field for field in schema if field.name in table_columns]
79
+ updated_schema = pa.schema(filtered_fields)
80
+ return table.select(updated_schema.names).cast(updated_schema)
81
+
82
+
83
+ def convert_large_types_to_normal(schema: pa.Schema) -> pa.Schema:
84
+ """
85
+ Convert large types in a PyArrow schema to their standard types.
86
+
87
+ Args:
88
+ schema (pa.Schema): The PyArrow schema to convert.
89
+
90
+ Returns:
91
+ pa.Schema: A new PyArrow schema with large types converted to standard types.
92
+ """
93
+ # Define mapping of large types to standard types
94
+ type_mapping = {
95
+ pa.large_string(): pa.string(),
96
+ pa.large_binary(): pa.binary(),
97
+ pa.large_utf8(): pa.utf8(),
98
+ pa.large_list(pa.null()): pa.list_(pa.null()),
99
+ pa.large_list_view(pa.null()): pa.list_view(pa.null()),
100
+ }
101
+ # Convert fields
102
+ new_fields = []
103
+ for field in schema:
104
+ field_type = field.type
105
+ # Check if type exists in mapping
106
+ if field_type in type_mapping:
107
+ new_field = pa.field(
108
+ name=field.name,
109
+ type=type_mapping[field_type],
110
+ nullable=field.nullable,
111
+ metadata=field.metadata,
112
+ )
113
+ new_fields.append(new_field)
114
+ # Handle large lists with nested types
115
+ elif isinstance(field_type, pa.LargeListType):
116
+ new_field = pa.field(
117
+ name=field.name,
118
+ type=pa.list_(
119
+ type_mapping[field_type.value_type]
120
+ if field_type.value_type in type_mapping
121
+ else field_type.value_type
122
+ ),
123
+ nullable=field.nullable,
124
+ metadata=field.metadata,
125
+ )
126
+ new_fields.append(new_field)
127
+ # Handle dictionary with large_string, large_utf8, or large_binary values
128
+ elif isinstance(field_type, pa.DictionaryType):
129
+ new_field = pa.field(
130
+ name=field.name,
131
+ type=pa.dictionary(
132
+ field_type.index_type,
133
+ type_mapping[field_type.value_type]
134
+ if field_type.value_type in type_mapping
135
+ else field_type.value_type,
136
+ field_type.ordered,
137
+ ),
138
+ # nullable=field.nullable,
139
+ metadata=field.metadata,
140
+ )
141
+ new_fields.append(new_field)
142
+ else:
143
+ new_fields.append(field)
144
+
145
+ return pa.schema(new_fields)
146
+
147
+
148
+ def _clean_string_array(array: pa.Array) -> pa.Array:
149
+ """
150
+ Clean string values in a PyArrow array using vectorized operations.
151
+ """
152
+ if len(array) == 0 or array.null_count == len(array):
153
+ return array
154
+
155
+ # Trim whitespace using compute functions
156
+ trimmed = pc.utf8_trim_whitespace(array)
157
+
158
+ # Create mask for values to convert to null
159
+ empty_mask = pc.equal(trimmed, "")
160
+ dash_mask = pc.equal(trimmed, "-")
161
+ none_mask = pc.equal(trimmed, "None")
162
+
163
+ null_mask = pc.or_(pc.or_(empty_mask, dash_mask), none_mask)
164
+
165
+ # Apply the mask to set matching values to null
166
+ return pc.if_else(null_mask, None, trimmed)
167
+
168
+
169
+ def _can_downcast_to_float32(array: pa.Array) -> bool:
170
+ """
171
+ Check if float values are within Float32 range using vectorized operations.
172
+ """
173
+ if len(array) == 0 or array.null_count == len(array):
174
+ return True
175
+
176
+ # Use compute functions to filter finite values and calculate min/max
177
+ is_finite = pc.is_finite(array)
178
+
179
+ # Skip if no finite values
180
+ if not pc.any(is_finite).as_py():
181
+ return True
182
+
183
+ # Filter out non-finite values
184
+ finite_array = pc.filter(array, is_finite)
185
+
186
+ min_val = pc.min(finite_array).as_py()
187
+ max_val = pc.max(finite_array).as_py()
188
+
189
+ return F32_MIN <= min_val <= max_val <= F32_MAX
190
+
191
+
192
+ def _get_optimal_int_type(array: pa.Array) -> pa.DataType:
193
+ """
194
+ Determine the most efficient integer type based on data range.
195
+ """
196
+ # Handle empty or all-null arrays
197
+ if len(array) == 0 or array.null_count == len(array):
198
+ return pa.int8()
199
+
200
+ # Use compute functions to get min and max values
201
+ min_max = pc.min_max(array)
202
+ min_val = min_max["min"].as_py()
203
+ max_val = min_max["max"].as_py()
204
+
205
+ if min_val >= 0: # Unsigned
206
+ if max_val <= 255:
207
+ return pa.uint8()
208
+ elif max_val <= 65535:
209
+ return pa.uint16()
210
+ elif max_val <= 4294967295:
211
+ return pa.uint32()
212
+ else:
213
+ return pa.uint64()
214
+ else: # Signed
215
+ if -128 <= min_val and max_val <= 127:
216
+ return pa.int8()
217
+ elif -32768 <= min_val and max_val <= 32767:
218
+ return pa.int16()
219
+ elif -2147483648 <= min_val and max_val <= 2147483647:
220
+ return pa.int32()
221
+ else:
222
+ return pa.int64()
223
+
224
+
225
+ def _optimize_numeric_array(array: pa.Array, shrink: bool) -> pa.Array:
226
+ """
227
+ Optimize numeric PyArrow array by downcasting when possible.
228
+ Uses vectorized operations for efficiency.
229
+ """
230
+ if not shrink or len(array) == 0 or array.null_count == len(array):
231
+ return array if len(array) > 0 else pa.array([], type=pa.int8())
232
+
233
+ # Handle floating point types
234
+ if pa.types.is_floating(array.type):
235
+ if array.type == pa.float64() and _can_downcast_to_float32(array):
236
+ return pc.cast(array, pa.float32())
237
+ return array
238
+
239
+ # Handle integer types
240
+ if pa.types.is_integer(array.type):
241
+ # Skip if already optimized to smallest types
242
+ if array.type in [pa.int8(), pa.uint8()]:
243
+ return array
244
+
245
+ optimal_type = _get_optimal_int_type(array)
246
+ return pc.cast(array, optimal_type)
247
+
248
+ # Default: return unchanged
249
+ return array
250
+
251
+
252
+ def _all_match_regex(array: pa.Array, pattern: str) -> bool:
253
+ """
254
+ Check if all non-null values in array match regex pattern.
255
+ Uses pyarrow.compute.match_substring_regex for vectorized evaluation.
256
+ """
257
+ if len(array) == 0 or array.null_count == len(array):
258
+ return True
259
+
260
+ # Check if al values match the pattern
261
+ return pc.all(pc.match_substring_regex(array, pattern, ignore_case=True)).as_py()
262
+
263
+
264
+ def _optimize_string_array(
265
+ array: pa.Array, col_name: str, shrink_numerics: bool, time_zone: str | None = None
266
+ ) -> pa.Array:
267
+ """
268
+ Convert string PyArrow array to appropriate type based on content analysis.
269
+ Uses fully vectorized operations wherever possible.
270
+ """
271
+ # Handle empty or all-null arrays
272
+ if len(array) == 0:
273
+ return pa.array([], type=pa.int8())
274
+ if array.null_count == len(array):
275
+ return pa.array([None] * len(array), type=pa.int8())
276
+
277
+ # Clean string values
278
+ cleaned_array = _clean_string_array(array)
279
+
280
+ try:
281
+ # Check for boolean values
282
+ if _all_match_regex(cleaned_array, BOOLEAN_REGEX):
283
+ # Match with TRUE pattern
284
+ true_matches = pc.match_substring_regex(
285
+ array, BOOLEAN_TRUE_REGEX, ignore_case=True
286
+ )
287
+
288
+ # Convert to boolean type
289
+ return pc.cast(true_matches, pa.bool_())
290
+
291
+ elif _all_match_regex(cleaned_array, INTEGER_REGEX):
292
+ # Convert to integer
293
+ # First replace commas with periods in Polars, then cast
294
+ int_array = pc.cast(
295
+ pc.replace_substring(cleaned_array, ",", "."), pa.int64()
296
+ )
297
+
298
+ if shrink_numerics:
299
+ optimal_type = _get_optimal_int_type(int_array)
300
+ return pc.cast(int_array, optimal_type)
301
+
302
+ return int_array
303
+
304
+ # Check for numeric values
305
+ elif _all_match_regex(cleaned_array, FLOAT_REGEX):
306
+ # Convert to float
307
+ # First replace commas with periods in Polars
308
+ float_array = pc.cast(
309
+ pc.replace_substring(cleaned_array, ",", "."), pa.float64()
310
+ )
311
+ if shrink_numerics and _can_downcast_to_float32(float_array):
312
+ return pc.cast(float_array, pa.float32())
313
+
314
+ return float_array
315
+
316
+ # Check for datetime values - use polars for conversion as specified
317
+ elif _all_match_regex(cleaned_array, DATETIME_REGEX):
318
+ # Convert via polars
319
+
320
+ pl_series = pl.Series(col_name, cleaned_array)
321
+ converted = pl_series.str.to_datetime(
322
+ strict=False, time_unit="us", time_zone=time_zone
323
+ )
324
+ # Convert polars datetime back to pyarrow
325
+ return converted.to_arrow()
326
+
327
+ except Exception:
328
+ # Fallback: return cleaned strings on any error
329
+ return cleaned_array
330
+
331
+ # Default: return cleaned strings
332
+ return cleaned_array
333
+
334
+
335
+ def _process_column(
336
+ table: pa.Table, col_name: str, shrink_numerics: bool, time_zone: str | None = None
337
+ ) -> pa.Array:
338
+ """
339
+ Process a single column for type optimization.
340
+ """
341
+ array = table[col_name]
342
+
343
+ # Handle all-null columns
344
+ if array.null_count == len(array):
345
+ return pa.array([None] * len(array), type=pa.int8())
346
+
347
+ # Process based on current type
348
+ if pa.types.is_floating(array.type) or pa.types.is_integer(array.type):
349
+ return _optimize_numeric_array(array, shrink_numerics)
350
+ elif pa.types.is_string(array.type):
351
+ return _optimize_string_array(array, col_name, shrink_numerics, time_zone)
352
+
353
+ # Keep original for other types
354
+ return array
355
+
356
+
357
+ def opt_dtype(
358
+ table: pa.Table,
359
+ include: str | list[str] | None = None,
360
+ exclude: str | list[str] | None = None,
361
+ time_zone: str | None = None,
362
+ shrink_numerics: bool = True,
363
+ ) -> pa.Table:
364
+ """
365
+ Optimize data types of a PyArrow Table for performance and memory efficiency.
366
+
367
+ This function analyzes each column and converts it to the most appropriate
368
+ data type based on content, handling string-to-type conversions and
369
+ numeric type downcasting. It is the PyArrow equivalent of the Polars
370
+ `opt_dtype` function.
371
+
372
+ Args:
373
+ table: PyArrow Table to optimize
374
+ include: Column(s) to include in optimization (default: all columns)
375
+ exclude: Column(s) to exclude from optimization
376
+ time_zone: Optional time zone for datetime parsing
377
+ shrink_numerics: Whether to downcast numeric types when possible
378
+
379
+ Returns:
380
+ PyArrow Table with optimized data types
381
+ """
382
+ # Normalize include/exclude parameters
383
+ if isinstance(include, str):
384
+ include = [include]
385
+ if isinstance(exclude, str):
386
+ exclude = [exclude]
387
+
388
+ # Determine columns to process
389
+ cols_to_process = table.column_names
390
+ if include:
391
+ cols_to_process = [col for col in include if col in table.column_names]
392
+ if exclude:
393
+ cols_to_process = [col for col in cols_to_process if col not in exclude]
394
+
395
+ # Process each column and build a new table
396
+ new_columns = []
397
+ for col_name in table.column_names:
398
+ if col_name in cols_to_process:
399
+ new_columns.append(
400
+ _process_column(table, col_name, shrink_numerics, time_zone)
401
+ )
402
+ else:
403
+ new_columns.append(table[col_name])
404
+
405
+ # Create a new table with the optimized columns
406
+ return pa.Table.from_arrays(new_columns, names=table.column_names)
@@ -3,4 +3,4 @@ import os
3
3
  PIPELINES_DIR = os.getenv("FP_PIPELINES_DIR", "pipelines")
4
4
  CONFIG_DIR = os.getenv("FP_CONFIG_DIR", "conf")
5
5
  HOOKS_DIR = os.getenv("FP_HOOKS_DIR", "hooks")
6
- CACHE_DIR = os.getenv("FP_CACHE_DIR", "~/.flowerpower/cache")
6
+ CACHE_DIR = os.getenv("FP_CACHE_DIR", "~/.flowerpower/cache")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: FlowerPower
3
- Version: 0.11.5.8
3
+ Version: 0.11.6
4
4
  Summary: A simple workflow framework. Hamilton + APScheduler = FlowerPower
5
5
  Author-email: "Volker L." <ligno.blades@gmail.com>
6
6
  Project-URL: Homepage, https://github.com/legout/flowerpower
@@ -18,7 +18,7 @@ flowerpower/cli/pipeline.py,sha256=60P6u_QOSgp0jJXEMxazEEo5Sh7-SWFo-Kkuaz21YuI,3
18
18
  flowerpower/cli/utils.py,sha256=nDSSj_1nlYlMmj252kRZeohhFqHv9yvdgDEduQCyWOc,5152
19
19
  flowerpower/fs/__init__.py,sha256=uZaPXErEfQqQRbKRIjkB9yiygd45X5_psYn9-VVrBTQ,910
20
20
  flowerpower/fs/base.py,sha256=TqgqBsaFj13O1NpAr8kHuGJ9CTlaSWViMB8Ai_iuCjs,22761
21
- flowerpower/fs/ext.py,sha256=gsCJ87VxVdy22oVtNRaN4M-SLO8WORVf5JRyDeQjjEs,63834
21
+ flowerpower/fs/ext.py,sha256=zTZO-j__O6Om7gbOpXJL7uDo2Cki6hOdlx_GDJ-Xujw,67625
22
22
  flowerpower/fs/storage_options.py,sha256=msq5TpxAU8tcE_Bxjw6SyxaFa75UjdYnR4-O9U2wmbk,48034
23
23
  flowerpower/job_queue/__init__.py,sha256=a25hIqv2xoFKb4JZlyUukS0ppZ9-2sJKH3XAvbk3rlk,10788
24
24
  flowerpower/job_queue/base.py,sha256=YwLunDQSyqkSU_vJ69C5SSybJeJP1bAiZ3teUtOchxA,13640
@@ -37,17 +37,18 @@ flowerpower/job_queue/rq/utils.py,sha256=QjyNhSM1_gMZkV2cO8eR99XeEji4AMwpxE1TANa
37
37
  flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py,sha256=X8rKfSbuITpWiwCyNqN-WVeodBkafFuj-zvbZb7z6lw,7511
38
38
  flowerpower/job_queue/rq/concurrent_workers/thread_worker.py,sha256=M_jjci-pypEsbHe-gCQS6jmJBrA-Tb7W19sEWi6htFU,7801
39
39
  flowerpower/pipeline/__init__.py,sha256=xbEn_RN0vVNqLZMSFOCdV41ggUkYrghFVJYd_EC0C44,75
40
- flowerpower/pipeline/base.py,sha256=N3N0iqiVo2vUVli_WSADAQMq283mG9OdFql58LXeF2Q,3275
40
+ flowerpower/pipeline/base.py,sha256=CO2PsOACRHFd8-g-0BUTc1ZMVdaBnhQftJda7HDxPZI,3305
41
41
  flowerpower/pipeline/io.py,sha256=8Mlw4G7ehHHZEk4Qui-HcKBM3tBF4FuqUbjfNxK09iU,15963
42
42
  flowerpower/pipeline/job_queue.py,sha256=hl38-0QZCH5wujUf0qIqYznIPDLsJAoNDcOD7YGVQ6s,26114
43
43
  flowerpower/pipeline/manager.py,sha256=KVpOclUEUAETUNJamJJGuKt3oxCaLitQgxWxkE1q028,74460
44
- flowerpower/pipeline/registry.py,sha256=WWQoaxtgnlntFEIPQzYM1gk0zUXwrH2PmDLGbTzhrZs,18991
44
+ flowerpower/pipeline/registry.py,sha256=6ngmHyKyQsxvIO4qRYxljedY0BE1wE3lpfksEGOzjNs,18963
45
45
  flowerpower/pipeline/runner.py,sha256=dsSVYixFXqlxFk8EJfT4wV_7IwgkXq0ErwH_yf_NGS8,25654
46
46
  flowerpower/pipeline/visualizer.py,sha256=amjMrl5NetErE198HzZBPWVZBi_t5jj9ydxWpuNLoTI,5013
47
- flowerpower/plugins/io/base.py,sha256=pWQt7vS_n3jR3J9YaVgMCXQ6utWuo3uKYZdTJ3pLiuY,79071
47
+ flowerpower/plugins/io/base.py,sha256=tyFbvx8Ij8gTKP8p8GfwpP5dpIWNncGJfcuK_hPCPN0,79383
48
48
  flowerpower/plugins/io/metadata.py,sha256=PCrepLilXRWKDsB5BKFF_-OFs712s1zBeitW-84lDLQ,7005
49
49
  flowerpower/plugins/io/helpers/datetime.py,sha256=1WBUg2ywcsodJQwoF6JiIGc9yhVobvE2IErWp4i95m4,10649
50
- flowerpower/plugins/io/helpers/polars.py,sha256=xWLjmZBUhbJPd0m5pkHreMSMeNLyJThJt0R7V7yzWEs,18068
50
+ flowerpower/plugins/io/helpers/polars.py,sha256=346DBHG-HvoGZWF-DWxgz7H3KlZu8bFylKIqMOnVJSk,27031
51
+ flowerpower/plugins/io/helpers/pyarrow.py,sha256=b1JVi-3V5hkb4y4jMpHq8W6ZeTDk-Eox_hWMNy3dH_Y,13574
51
52
  flowerpower/plugins/io/helpers/sql.py,sha256=BPIxjarKF3p93EdtUu-md8KislE9q8IWNSeZ5toFU6U,7298
52
53
  flowerpower/plugins/io/loader/__init__.py,sha256=MKH42nvVokaWas0wFgX1yrpU5iLpvHjRqqF-KzwLHCg,780
53
54
  flowerpower/plugins/io/loader/csv.py,sha256=Q5bmcbbr530sT1kQ2YiJwvsMUPqi0VcZWsLOygmzRyI,827
@@ -81,7 +82,7 @@ flowerpower/plugins/mqtt/manager.py,sha256=WJIKu_0wfrxph_L0AjV-7RYVFtie2cOmdVGjj
81
82
  flowerpower/settings/__init__.py,sha256=g1JKxxQKgBSbghzhCvEPOgD4ofjCGJdWCf8T0husyMQ,178
82
83
  flowerpower/settings/backend.py,sha256=jb4H3DXil5uN4BDo3ipqP6-1Hpnrm6qvIZQWiHZcDXg,4994
83
84
  flowerpower/settings/executor.py,sha256=vNF383g1gMGkq_CXUzateGMNcJZig-vHkVVb0Hi_b74,249
84
- flowerpower/settings/general.py,sha256=6kGLT1arCYo7jgFgQoNeqNztmIBkk882MFSnR7EAct0,226
85
+ flowerpower/settings/general.py,sha256=RxY6PGF_L8ApFlLPHulZ2I8_-aHYqOj63fUu9kSQTjI,227
85
86
  flowerpower/settings/hamilton.py,sha256=GVzWKz3B-wy07etY1mNUstEa4DFrQ_lM2cjE0qG_6qw,623
86
87
  flowerpower/settings/job_queue.py,sha256=wrehB9Suwks9y5Gvfg-cex6SQOoeFfSwZDxdKNmKhcA,2972
87
88
  flowerpower/settings/logging.py,sha256=xzoiM-87WGduC-RJu8RfyeweHGTM7SnHXHQPbxKweLE,67
@@ -93,9 +94,9 @@ flowerpower/utils/monkey.py,sha256=VPl3yimoWhwD9kI05BFsjNvtyQiDyLfY4Q85Bb6Ma0w,2
93
94
  flowerpower/utils/open_telemetry.py,sha256=fQWJWbIQFtKIxMBjAWeF12NGnqT0isO3A3j-DSOv_vE,949
94
95
  flowerpower/utils/scheduler.py,sha256=2zJ_xmLXpvXUQNF1XS2Gqm3Ogo907ctZ50GtvQB_rhE,9354
95
96
  flowerpower/utils/templates.py,sha256=ouyEeSDqa9PjW8c32fGpcINlpC0WToawRFZkMPtwsLE,1591
96
- flowerpower-0.11.5.8.dist-info/licenses/LICENSE,sha256=9AkLexxrmr0aBgSHiqxpJk9wgazpP1CTJyiDyr56J9k,1063
97
- flowerpower-0.11.5.8.dist-info/METADATA,sha256=ADH8WX3HkFVTcdB-WmwvC1yN0mRcj_zp5EjyQYV3Zfk,21612
98
- flowerpower-0.11.5.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
99
- flowerpower-0.11.5.8.dist-info/entry_points.txt,sha256=61X11i5a2IwC9LBiP20XCDl5zMOigGCjMCx17B7bDbQ,52
100
- flowerpower-0.11.5.8.dist-info/top_level.txt,sha256=VraH4WtEUfSxs5L-rXwDQhzQb9eLHTUtgvmFZ2dAYnA,12
101
- flowerpower-0.11.5.8.dist-info/RECORD,,
97
+ flowerpower-0.11.6.dist-info/licenses/LICENSE,sha256=9AkLexxrmr0aBgSHiqxpJk9wgazpP1CTJyiDyr56J9k,1063
98
+ flowerpower-0.11.6.dist-info/METADATA,sha256=5Lg0RYLDvqAzJw05z2Qp-OmldA5Cy_FA7XTXlOa2Oos,21610
99
+ flowerpower-0.11.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
100
+ flowerpower-0.11.6.dist-info/entry_points.txt,sha256=61X11i5a2IwC9LBiP20XCDl5zMOigGCjMCx17B7bDbQ,52
101
+ flowerpower-0.11.6.dist-info/top_level.txt,sha256=VraH4WtEUfSxs5L-rXwDQhzQb9eLHTUtgvmFZ2dAYnA,12
102
+ flowerpower-0.11.6.dist-info/RECORD,,