xulbux 1.9.5__cp311-cp311-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- 455848faf89d8974b22a__mypyc.cpython-311-darwin.so +0 -0
- xulbux/__init__.cpython-311-darwin.so +0 -0
- xulbux/__init__.py +46 -0
- xulbux/base/consts.cpython-311-darwin.so +0 -0
- xulbux/base/consts.py +172 -0
- xulbux/base/decorators.cpython-311-darwin.so +0 -0
- xulbux/base/decorators.py +28 -0
- xulbux/base/exceptions.cpython-311-darwin.so +0 -0
- xulbux/base/exceptions.py +23 -0
- xulbux/base/types.cpython-311-darwin.so +0 -0
- xulbux/base/types.py +118 -0
- xulbux/cli/help.cpython-311-darwin.so +0 -0
- xulbux/cli/help.py +77 -0
- xulbux/code.cpython-311-darwin.so +0 -0
- xulbux/code.py +137 -0
- xulbux/color.cpython-311-darwin.so +0 -0
- xulbux/color.py +1331 -0
- xulbux/console.cpython-311-darwin.so +0 -0
- xulbux/console.py +2069 -0
- xulbux/data.cpython-311-darwin.so +0 -0
- xulbux/data.py +798 -0
- xulbux/env_path.cpython-311-darwin.so +0 -0
- xulbux/env_path.py +123 -0
- xulbux/file.cpython-311-darwin.so +0 -0
- xulbux/file.py +74 -0
- xulbux/file_sys.cpython-311-darwin.so +0 -0
- xulbux/file_sys.py +266 -0
- xulbux/format_codes.cpython-311-darwin.so +0 -0
- xulbux/format_codes.py +722 -0
- xulbux/json.cpython-311-darwin.so +0 -0
- xulbux/json.py +200 -0
- xulbux/regex.cpython-311-darwin.so +0 -0
- xulbux/regex.py +247 -0
- xulbux/string.cpython-311-darwin.so +0 -0
- xulbux/string.py +161 -0
- xulbux/system.cpython-311-darwin.so +0 -0
- xulbux/system.py +313 -0
- xulbux-1.9.5.dist-info/METADATA +271 -0
- xulbux-1.9.5.dist-info/RECORD +43 -0
- xulbux-1.9.5.dist-info/WHEEL +6 -0
- xulbux-1.9.5.dist-info/entry_points.txt +2 -0
- xulbux-1.9.5.dist-info/licenses/LICENSE +21 -0
- xulbux-1.9.5.dist-info/top_level.txt +2 -0
xulbux/data.py
ADDED
|
@@ -0,0 +1,798 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides the `Data` class, which offers
|
|
3
|
+
methods to work with nested data structures.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .base.types import DataStructureTypes, IndexIterableTypes, DataStructure, IndexIterable
|
|
7
|
+
|
|
8
|
+
from .format_codes import FormatCodes
|
|
9
|
+
from .string import String
|
|
10
|
+
from .regex import Regex
|
|
11
|
+
|
|
12
|
+
from typing import Optional, Literal, Final, Any, cast
|
|
13
|
+
import base64 as _base64
|
|
14
|
+
import math as _math
|
|
15
|
+
import re as _re
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
_DEFAULT_SYNTAX_HL: Final[dict[str, tuple[str, str]]] = {
|
|
19
|
+
"str": ("[br:blue]", "[_c]"),
|
|
20
|
+
"number": ("[br:magenta]", "[_c]"),
|
|
21
|
+
"literal": ("[magenta]", "[_c]"),
|
|
22
|
+
"type": ("[i|green]", "[_i|_c]"),
|
|
23
|
+
"punctuation": ("[br:black]", "[_c]"),
|
|
24
|
+
}
|
|
25
|
+
"""Default syntax highlighting styles for data structure rendering."""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class Data:
|
|
29
|
+
"""This class includes methods to work with nested data structures (dictionaries and lists)."""
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def serialize_bytes(cls, data: bytes | bytearray) -> dict[str, str]:
|
|
33
|
+
"""Converts bytes or bytearray to a JSON-compatible format (dictionary) with explicit keys.\n
|
|
34
|
+
----------------------------------------------------------------------------------------------
|
|
35
|
+
- `data` -⠀the bytes or bytearray to serialize"""
|
|
36
|
+
key = "bytearray" if isinstance(data, bytearray) else "bytes"
|
|
37
|
+
|
|
38
|
+
try:
|
|
39
|
+
return {key: cast(bytes | bytearray, data).decode("utf-8"), "encoding": "utf-8"}
|
|
40
|
+
except UnicodeDecodeError:
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
return {key: _base64.b64encode(data).decode("utf-8"), "encoding": "base64"}
|
|
44
|
+
|
|
45
|
+
@classmethod
|
|
46
|
+
def deserialize_bytes(cls, obj: dict[str, str]) -> bytes | bytearray:
|
|
47
|
+
"""Tries to converts a JSON-compatible bytes/bytearray format (dictionary) back to its original type.\n
|
|
48
|
+
--------------------------------------------------------------------------------------------------------
|
|
49
|
+
- `obj` -⠀the dictionary to deserialize\n
|
|
50
|
+
--------------------------------------------------------------------------------------------------------
|
|
51
|
+
If the serialized object was created with `Data.serialize_bytes()`, it will work.
|
|
52
|
+
If it fails to decode the data, it will raise a `ValueError`."""
|
|
53
|
+
for key in ("bytes", "bytearray"):
|
|
54
|
+
if key in obj and "encoding" in obj:
|
|
55
|
+
if obj["encoding"] == "utf-8":
|
|
56
|
+
data = obj[key].encode("utf-8")
|
|
57
|
+
elif obj["encoding"] == "base64":
|
|
58
|
+
data = _base64.b64decode(obj[key].encode("utf-8"))
|
|
59
|
+
else:
|
|
60
|
+
raise ValueError(f"Unknown encoding method '{obj['encoding']}'")
|
|
61
|
+
|
|
62
|
+
return bytearray(data) if key == "bytearray" else data
|
|
63
|
+
|
|
64
|
+
raise ValueError(f"Invalid serialized data:\n {obj}")
|
|
65
|
+
|
|
66
|
+
@classmethod
|
|
67
|
+
def chars_count(cls, data: DataStructure) -> int:
|
|
68
|
+
"""The sum of all the characters amount including the keys in dictionaries.\n
|
|
69
|
+
------------------------------------------------------------------------------
|
|
70
|
+
- `data` -⠀the data structure to count the characters from"""
|
|
71
|
+
chars_count = 0
|
|
72
|
+
|
|
73
|
+
if isinstance(data, dict):
|
|
74
|
+
for k, v in data.items():
|
|
75
|
+
chars_count += len(str(k)) + (cls.chars_count(v) if isinstance(v, DataStructureTypes) else len(str(v)))
|
|
76
|
+
|
|
77
|
+
elif isinstance(data, IndexIterableTypes):
|
|
78
|
+
for item in data:
|
|
79
|
+
chars_count += cls.chars_count(item) if isinstance(item, DataStructureTypes) else len(str(item))
|
|
80
|
+
|
|
81
|
+
return chars_count
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def strip(cls, data: DataStructure) -> DataStructure:
|
|
85
|
+
"""Removes leading and trailing whitespaces from the data structure's items.\n
|
|
86
|
+
-------------------------------------------------------------------------------
|
|
87
|
+
- `data` -⠀the data structure to strip the items from"""
|
|
88
|
+
if isinstance(data, dict):
|
|
89
|
+
return {k.strip(): cls.strip(v) if isinstance(v, DataStructureTypes) else v.strip() for k, v in data.items()}
|
|
90
|
+
|
|
91
|
+
if isinstance(data, IndexIterableTypes):
|
|
92
|
+
return type(data)(cls.strip(item) if isinstance(item, DataStructureTypes) else item.strip() for item in data)
|
|
93
|
+
|
|
94
|
+
raise TypeError(f"Unsupported data structure type: {type(data)}")
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def remove_empty_items(cls, data: DataStructure, spaces_are_empty: bool = False) -> DataStructure:
|
|
98
|
+
"""Removes empty items from the data structure.\n
|
|
99
|
+
---------------------------------------------------------------------------------
|
|
100
|
+
- `data` -⠀the data structure to remove empty items from.
|
|
101
|
+
- `spaces_are_empty` -⠀if true, it will count items with only spaces as empty"""
|
|
102
|
+
if isinstance(data, dict):
|
|
103
|
+
return {
|
|
104
|
+
k: (v if not isinstance(v, DataStructureTypes) else cls.remove_empty_items(v, spaces_are_empty))
|
|
105
|
+
for k, v in data.items() if not String.is_empty(v, spaces_are_empty)
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
if isinstance(data, IndexIterableTypes):
|
|
109
|
+
return type(data)(
|
|
110
|
+
item for item in
|
|
111
|
+
(
|
|
112
|
+
(item if not isinstance(item, DataStructureTypes) else cls.remove_empty_items(item, spaces_are_empty)) \
|
|
113
|
+
for item in data if not (isinstance(item, (str, type(None))) and String.is_empty(item, spaces_are_empty))
|
|
114
|
+
)
|
|
115
|
+
if item not in ([], (), {}, set(), frozenset())
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
raise TypeError(f"Unsupported data structure type: {type(data)}")
|
|
119
|
+
|
|
120
|
+
@classmethod
|
|
121
|
+
def remove_duplicates(cls, data: DataStructure) -> DataStructure:
|
|
122
|
+
"""Removes all duplicates from the data structure.\n
|
|
123
|
+
-----------------------------------------------------------
|
|
124
|
+
- `data` -⠀the data structure to remove duplicates from"""
|
|
125
|
+
if isinstance(data, dict):
|
|
126
|
+
return {k: cls.remove_duplicates(v) if isinstance(v, DataStructureTypes) else v for k, v in data.items()}
|
|
127
|
+
|
|
128
|
+
if isinstance(data, (list, tuple)):
|
|
129
|
+
result: list[Any] = []
|
|
130
|
+
for item in data:
|
|
131
|
+
processed_item = cls.remove_duplicates(item) if isinstance(item, DataStructureTypes) else item
|
|
132
|
+
is_duplicate: bool = False
|
|
133
|
+
|
|
134
|
+
for existing_item in result:
|
|
135
|
+
if processed_item == existing_item:
|
|
136
|
+
is_duplicate = True
|
|
137
|
+
break
|
|
138
|
+
|
|
139
|
+
if not is_duplicate:
|
|
140
|
+
result.append(processed_item)
|
|
141
|
+
|
|
142
|
+
return type(data)(result)
|
|
143
|
+
|
|
144
|
+
if isinstance(data, (set, frozenset)):
|
|
145
|
+
processed_elements = set()
|
|
146
|
+
for item in data:
|
|
147
|
+
processed_item = cls.remove_duplicates(item) if isinstance(item, DataStructureTypes) else item
|
|
148
|
+
processed_elements.add(processed_item)
|
|
149
|
+
return type(data)(processed_elements)
|
|
150
|
+
|
|
151
|
+
raise TypeError(f"Unsupported data structure type: {type(data)}")
|
|
152
|
+
|
|
153
|
+
@classmethod
|
|
154
|
+
def remove_comments(
|
|
155
|
+
cls,
|
|
156
|
+
data: DataStructure,
|
|
157
|
+
comment_start: str = ">>",
|
|
158
|
+
comment_end: str = "<<",
|
|
159
|
+
comment_sep: str = "",
|
|
160
|
+
) -> DataStructure:
|
|
161
|
+
"""Remove comments from a list, tuple or dictionary.\n
|
|
162
|
+
---------------------------------------------------------------------------------------------------------------
|
|
163
|
+
- `data` -⠀list, tuple or dictionary, where the comments should get removed from
|
|
164
|
+
- `comment_start` -⠀the string that marks the start of a comment inside `data`
|
|
165
|
+
- `comment_end` -⠀the string that marks the end of a comment inside `data`
|
|
166
|
+
- `comment_sep` -⠀the string with which a comment will be replaced, if it is in the middle of a value\n
|
|
167
|
+
---------------------------------------------------------------------------------------------------------------
|
|
168
|
+
#### Examples:
|
|
169
|
+
```python
|
|
170
|
+
data = {
|
|
171
|
+
"key1": [
|
|
172
|
+
">> COMMENT IN THE BEGINNING OF THE STRING << value1",
|
|
173
|
+
"value2 >> COMMENT IN THE END OF THE STRING",
|
|
174
|
+
"val>> COMMENT IN THE MIDDLE OF THE STRING <<ue3",
|
|
175
|
+
">> FULL VALUE IS A COMMENT value4",
|
|
176
|
+
],
|
|
177
|
+
">> FULL KEY + ALL ITS VALUES ARE A COMMENT key2": [
|
|
178
|
+
"value",
|
|
179
|
+
"value",
|
|
180
|
+
"value",
|
|
181
|
+
],
|
|
182
|
+
"key3": ">> ALL THE KEYS VALUES ARE COMMENTS value",
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
processed_data = Data.remove_comments(
|
|
186
|
+
data,
|
|
187
|
+
comment_start=">>",
|
|
188
|
+
comment_end="<<",
|
|
189
|
+
comment_sep="__",
|
|
190
|
+
)
|
|
191
|
+
```\n
|
|
192
|
+
---------------------------------------------------------------------------------------------------------------
|
|
193
|
+
For this example, `processed_data` will be:
|
|
194
|
+
```python
|
|
195
|
+
{
|
|
196
|
+
"key1": [
|
|
197
|
+
"value1",
|
|
198
|
+
"value2",
|
|
199
|
+
"val__ue3",
|
|
200
|
+
],
|
|
201
|
+
"key3": None,
|
|
202
|
+
}
|
|
203
|
+
```\n
|
|
204
|
+
- For `key1`, all the comments will just be removed, except at `value3` and `value4`:
|
|
205
|
+
* `value3` The comment is removed and the parts left and right are joined through `comment_sep`.
|
|
206
|
+
* `value4` The whole value is removed, since the whole value was a comment.
|
|
207
|
+
- For `key2`, the key, including its whole values will be removed.
|
|
208
|
+
- For `key3`, since all its values are just comments, the key will still exist, but with a value of `None`."""
|
|
209
|
+
if len(comment_start) == 0:
|
|
210
|
+
raise ValueError("The 'comment_start' parameter string must not be empty.")
|
|
211
|
+
|
|
212
|
+
return _DataRemoveCommentsHelper(
|
|
213
|
+
data=data,
|
|
214
|
+
comment_start=comment_start,
|
|
215
|
+
comment_end=comment_end,
|
|
216
|
+
comment_sep=comment_sep,
|
|
217
|
+
)()
|
|
218
|
+
|
|
219
|
+
@classmethod
|
|
220
|
+
def is_equal(
|
|
221
|
+
cls,
|
|
222
|
+
data1: DataStructure,
|
|
223
|
+
data2: DataStructure,
|
|
224
|
+
ignore_paths: str | list[str] = "",
|
|
225
|
+
path_sep: str = "->",
|
|
226
|
+
comment_start: str = ">>",
|
|
227
|
+
comment_end: str = "<<",
|
|
228
|
+
) -> bool:
|
|
229
|
+
"""Compares two structures and returns `True` if they are equal and `False` otherwise.\n
|
|
230
|
+
⇾ Will not detect, if a key-name has changed, only if removed or added.\n
|
|
231
|
+
------------------------------------------------------------------------------------------------
|
|
232
|
+
- `data1` -⠀the first data structure to compare
|
|
233
|
+
- `data2` -⠀the second data structure to compare
|
|
234
|
+
- `ignore_paths` -⠀a path or list of paths to key/s and item/s to ignore during comparison:<br>
|
|
235
|
+
Comments are not ignored when comparing. `comment_start` and `comment_end` are only used
|
|
236
|
+
to correctly recognize the keys in the `ignore_paths`.
|
|
237
|
+
- `path_sep` -⠀the separator between the keys/indexes in the `ignore_paths`
|
|
238
|
+
- `comment_start` -⠀the string that marks the start of a comment inside `data1` and `data2`
|
|
239
|
+
- `comment_end` -⠀the string that marks the end of a comment inside `data1` and `data2`\n
|
|
240
|
+
------------------------------------------------------------------------------------------------
|
|
241
|
+
The paths from `ignore_paths` and the `path_sep` parameter work exactly the same way as for
|
|
242
|
+
the method `Data.get_path_id()`. See its documentation for more details."""
|
|
243
|
+
if len(path_sep) == 0:
|
|
244
|
+
raise ValueError("The 'path_sep' parameter string must not be empty.")
|
|
245
|
+
|
|
246
|
+
if isinstance(ignore_paths, str):
|
|
247
|
+
ignore_paths = [ignore_paths]
|
|
248
|
+
|
|
249
|
+
return cls._compare_nested(
|
|
250
|
+
data1=cls.remove_comments(data1, comment_start, comment_end),
|
|
251
|
+
data2=cls.remove_comments(data2, comment_start, comment_end),
|
|
252
|
+
ignore_paths=[str(path).split(path_sep) for path in ignore_paths if path],
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
@classmethod
|
|
256
|
+
def get_path_id(
|
|
257
|
+
cls,
|
|
258
|
+
data: DataStructure,
|
|
259
|
+
value_paths: str | list[str],
|
|
260
|
+
path_sep: str = "->",
|
|
261
|
+
comment_start: str = ">>",
|
|
262
|
+
comment_end: str = "<<",
|
|
263
|
+
ignore_not_found: bool = False,
|
|
264
|
+
) -> Optional[str | list[Optional[str]]]:
|
|
265
|
+
"""Generates a unique ID based on the path to a specific value within a nested data structure.\n
|
|
266
|
+
--------------------------------------------------------------------------------------------------
|
|
267
|
+
-`data` -⠀the list, tuple, or dictionary, which the id should be generated for
|
|
268
|
+
- `value_paths` -⠀a path or list of paths to the value/s to generate the id for (explained below)
|
|
269
|
+
- `path_sep` -⠀the separator between the keys/indexes in the `value_paths`
|
|
270
|
+
- `comment_start` -⠀the string that marks the start of a comment inside `data`
|
|
271
|
+
- `comment_end` -⠀the string that marks the end of a comment inside `data`
|
|
272
|
+
- `ignore_not_found` -⠀if true, the function will return `None` if the value is not found
|
|
273
|
+
instead of raising an error\n
|
|
274
|
+
--------------------------------------------------------------------------------------------------
|
|
275
|
+
The param `value_path` is a sort of path (or a list of paths) to the value/s to be updated.
|
|
276
|
+
#### In this example:
|
|
277
|
+
```python
|
|
278
|
+
{
|
|
279
|
+
"healthy": {
|
|
280
|
+
"fruit": ["apples", "bananas", "oranges"],
|
|
281
|
+
"vegetables": ["carrots", "broccoli", "celery"]
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
```
|
|
285
|
+
… if you want to change the value of `"apples"` to `"strawberries"`, the value path would be
|
|
286
|
+
`healthy->fruit->apples` or if you don't know that the value is `"apples"` you can also use the
|
|
287
|
+
index of the value, so `healthy->fruit->0`."""
|
|
288
|
+
if len(path_sep) == 0:
|
|
289
|
+
raise ValueError("The 'path_sep' parameter string must not be empty.")
|
|
290
|
+
|
|
291
|
+
data = cls.remove_comments(data, comment_start, comment_end)
|
|
292
|
+
if isinstance(value_paths, str):
|
|
293
|
+
return _DataGetPathIdHelper(value_paths, path_sep, data, ignore_not_found)()
|
|
294
|
+
|
|
295
|
+
results = [_DataGetPathIdHelper(path, path_sep, data, ignore_not_found)() for path in value_paths]
|
|
296
|
+
return results if len(results) > 1 else results[0] if results else None
|
|
297
|
+
|
|
298
|
+
@classmethod
|
|
299
|
+
def get_value_by_path_id(cls, data: DataStructure, path_id: str, get_key: bool = False) -> Any:
|
|
300
|
+
"""Retrieves the value from `data` using the provided `path_id`, as long as the data structure
|
|
301
|
+
hasn't changed since creating the path ID.\n
|
|
302
|
+
--------------------------------------------------------------------------------------------------
|
|
303
|
+
- `data` -⠀the list, tuple, or dictionary to retrieve the value from
|
|
304
|
+
- `path_id` -⠀the path ID to the value to retrieve, created before using `Data.get_path_id()`
|
|
305
|
+
- `get_key` -⠀if true and the final item is in a dict, it returns the key instead of the value"""
|
|
306
|
+
parent: Optional[DataStructure] = None
|
|
307
|
+
path = cls._sep_path_id(path_id)
|
|
308
|
+
current_data: Any = data
|
|
309
|
+
|
|
310
|
+
for i, path_idx in enumerate(path):
|
|
311
|
+
if isinstance(current_data, dict):
|
|
312
|
+
keys = list(current_data.keys())
|
|
313
|
+
if i == len(path) - 1 and get_key:
|
|
314
|
+
return keys[path_idx]
|
|
315
|
+
parent = current_data
|
|
316
|
+
current_data = current_data[keys[path_idx]]
|
|
317
|
+
|
|
318
|
+
elif isinstance(current_data, IndexIterableTypes):
|
|
319
|
+
if i == len(path) - 1 and get_key:
|
|
320
|
+
if parent is None or not isinstance(parent, dict):
|
|
321
|
+
raise ValueError(f"Cannot get key from a non-dict parent at path '{path[:i + 1]}'")
|
|
322
|
+
return next(key for key, value in parent.items() if value is current_data)
|
|
323
|
+
parent = current_data
|
|
324
|
+
current_data = list(current_data)[path_idx] # CONVERT TO LIST FOR INDEXING
|
|
325
|
+
|
|
326
|
+
else:
|
|
327
|
+
raise TypeError(f"Unsupported type '{type(current_data)}' at path '{path[:i + 1]}'")
|
|
328
|
+
|
|
329
|
+
return current_data
|
|
330
|
+
|
|
331
|
+
@classmethod
|
|
332
|
+
def set_value_by_path_id(cls, data: DataStructure, update_values: dict[str, Any]) -> DataStructure:
|
|
333
|
+
"""Updates the value/s from `update_values` in the `data`, as long as the data structure
|
|
334
|
+
hasn't changed since creating the path ID to that value.\n
|
|
335
|
+
-----------------------------------------------------------------------------------------
|
|
336
|
+
- `data` -⠀the list, tuple, or dictionary to update the value/s in
|
|
337
|
+
- `update_values` -⠀a dictionary where keys are path IDs and values are the new values
|
|
338
|
+
to insert, for example:
|
|
339
|
+
```python
|
|
340
|
+
{ "1>012": "new value", "1>31": ["new value 1", "new value 2"], … }
|
|
341
|
+
```
|
|
342
|
+
The path IDs should have been created using `Data.get_path_id()`."""
|
|
343
|
+
if not (valid_update_values := [(path_id, new_val) for path_id, new_val in update_values.items()]):
|
|
344
|
+
raise ValueError(f"No valid 'update_values' found in dictionary:\n{update_values!r}")
|
|
345
|
+
|
|
346
|
+
for path_id, new_val in valid_update_values:
|
|
347
|
+
data = cls._set_nested_val(data, id_path=cls._sep_path_id(path_id), value=new_val)
|
|
348
|
+
|
|
349
|
+
return data
|
|
350
|
+
|
|
351
|
+
@classmethod
|
|
352
|
+
def render(
|
|
353
|
+
cls,
|
|
354
|
+
data: DataStructure,
|
|
355
|
+
indent: int = 4,
|
|
356
|
+
compactness: Literal[0, 1, 2] = 1,
|
|
357
|
+
max_width: int = 127,
|
|
358
|
+
sep: str = ", ",
|
|
359
|
+
as_json: bool = False,
|
|
360
|
+
syntax_highlighting: dict[str, str] | bool = False,
|
|
361
|
+
) -> str:
|
|
362
|
+
"""Get nicely formatted data structure-strings.\n
|
|
363
|
+
---------------------------------------------------------------------------------------------------------------
|
|
364
|
+
- `data` -⠀the data structure to format
|
|
365
|
+
- `indent` -⠀the amount of spaces to use for indentation
|
|
366
|
+
- `compactness` -⠀the level of compactness for the output (explained below – section 1)
|
|
367
|
+
- `max_width` -⠀the maximum width of a line before expanding (only used if `compactness` is `1`)
|
|
368
|
+
- `sep` -⠀the separator between items in the data structure
|
|
369
|
+
- `as_json` -⠀if true, the output will be in valid JSON format
|
|
370
|
+
- `syntax_highlighting` -⠀a dictionary defining the syntax highlighting styles (explained below – section 2)
|
|
371
|
+
or `True` to apply default syntax highlighting styles or `False`/`None` to disable syntax highlighting\n
|
|
372
|
+
---------------------------------------------------------------------------------------------------------------
|
|
373
|
+
There are three different levels of `compactness`:
|
|
374
|
+
- `0` expands everything possible
|
|
375
|
+
- `1` only expands if there's other lists, tuples or dicts inside of data or,
|
|
376
|
+
if the data's content is longer than `max_width`
|
|
377
|
+
- `2` keeps everything collapsed (all on one line)\n
|
|
378
|
+
---------------------------------------------------------------------------------------------------------------
|
|
379
|
+
The `syntax_highlighting` dictionary has 5 keys for each part of the data.<br>
|
|
380
|
+
The key's values are the formatting codes to apply to this data part.<br>
|
|
381
|
+
The formatting can be changed by simply adding the key with the new value
|
|
382
|
+
inside the `syntax_highlighting` dictionary.\n
|
|
383
|
+
The keys with their default values are:
|
|
384
|
+
- `str: "br:blue"`
|
|
385
|
+
- `number: "br:magenta"`
|
|
386
|
+
- `literal: "magenta"`
|
|
387
|
+
- `type: "i|green"`
|
|
388
|
+
- `punctuation: "br:black"`\n
|
|
389
|
+
---------------------------------------------------------------------------------------------------------------
|
|
390
|
+
For more detailed information about formatting codes, see the `format_codes` module documentation."""
|
|
391
|
+
if indent < 0:
|
|
392
|
+
raise ValueError("The 'indent' parameter must be a non-negative integer.")
|
|
393
|
+
if max_width <= 0:
|
|
394
|
+
raise ValueError("The 'max_width' parameter must be a positive integer.")
|
|
395
|
+
|
|
396
|
+
return _DataRenderHelper(
|
|
397
|
+
cls,
|
|
398
|
+
data=data,
|
|
399
|
+
indent=indent,
|
|
400
|
+
compactness=compactness,
|
|
401
|
+
max_width=max_width,
|
|
402
|
+
sep=sep,
|
|
403
|
+
as_json=as_json,
|
|
404
|
+
syntax_highlighting=syntax_highlighting,
|
|
405
|
+
)()
|
|
406
|
+
|
|
407
|
+
@classmethod
|
|
408
|
+
def print(
|
|
409
|
+
cls,
|
|
410
|
+
data: DataStructure,
|
|
411
|
+
indent: int = 4,
|
|
412
|
+
compactness: Literal[0, 1, 2] = 1,
|
|
413
|
+
max_width: int = 127,
|
|
414
|
+
sep: str = ", ",
|
|
415
|
+
end: str = "\n",
|
|
416
|
+
as_json: bool = False,
|
|
417
|
+
syntax_highlighting: dict[str, str] | bool = {},
|
|
418
|
+
) -> None:
|
|
419
|
+
"""Print nicely formatted data structures.\n
|
|
420
|
+
---------------------------------------------------------------------------------------------------------------
|
|
421
|
+
- `data` -⠀the data structure to format and print
|
|
422
|
+
- `indent` -⠀the amount of spaces to use for indentation
|
|
423
|
+
- `compactness` -⠀the level of compactness for the output (explained below – section 1)
|
|
424
|
+
- `max_width` -⠀the maximum width of a line before expanding (only used if `compactness` is `1`)
|
|
425
|
+
- `sep` -⠀the separator between items in the data structure
|
|
426
|
+
- `end` -⠀the string appended after the last value, default a newline `\\n`
|
|
427
|
+
- `as_json` -⠀if true, the output will be in valid JSON format
|
|
428
|
+
- `syntax_highlighting` -⠀a dictionary defining the syntax highlighting styles (explained below – section 2)\n
|
|
429
|
+
---------------------------------------------------------------------------------------------------------------
|
|
430
|
+
There are three different levels of `compactness`:
|
|
431
|
+
- `0` expands everything possible
|
|
432
|
+
- `1` only expands if there's other lists, tuples or dicts inside of data or,
|
|
433
|
+
if the data's content is longer than `max_width`
|
|
434
|
+
- `2` keeps everything collapsed (all on one line)\n
|
|
435
|
+
---------------------------------------------------------------------------------------------------------------
|
|
436
|
+
The `syntax_highlighting` parameter is a dictionary with 5 keys for each part of the data.<br>
|
|
437
|
+
The key's values are the formatting codes to apply to this data part.<br>
|
|
438
|
+
The formatting can be changed by simply adding the key with the new value inside the
|
|
439
|
+
`syntax_highlighting` dictionary.\n
|
|
440
|
+
The keys with their default values are:
|
|
441
|
+
- `str: "br:blue"`
|
|
442
|
+
- `number: "br:magenta"`
|
|
443
|
+
- `literal: "magenta"`
|
|
444
|
+
- `type: "i|green"`
|
|
445
|
+
- `punctuation: "br:black"`\n
|
|
446
|
+
For no syntax highlighting, set `syntax_highlighting` to `False` or `None`.\n
|
|
447
|
+
---------------------------------------------------------------------------------------------------------------
|
|
448
|
+
For more detailed information about formatting codes, see the `format_codes` module documentation."""
|
|
449
|
+
FormatCodes.print(
|
|
450
|
+
cls.render(
|
|
451
|
+
data=data,
|
|
452
|
+
indent=indent,
|
|
453
|
+
compactness=compactness,
|
|
454
|
+
max_width=max_width,
|
|
455
|
+
sep=sep,
|
|
456
|
+
as_json=as_json,
|
|
457
|
+
syntax_highlighting=syntax_highlighting,
|
|
458
|
+
),
|
|
459
|
+
end=end,
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
@classmethod
|
|
463
|
+
def _compare_nested(
|
|
464
|
+
cls,
|
|
465
|
+
data1: Any,
|
|
466
|
+
data2: Any,
|
|
467
|
+
ignore_paths: list[list[str]],
|
|
468
|
+
current_path: list[str] = [],
|
|
469
|
+
) -> bool:
|
|
470
|
+
if any(current_path == path[:len(current_path)] for path in ignore_paths):
|
|
471
|
+
return True
|
|
472
|
+
|
|
473
|
+
if type(data1) is not type(data2):
|
|
474
|
+
return False
|
|
475
|
+
|
|
476
|
+
if isinstance(data1, dict) and isinstance(data2, dict):
|
|
477
|
+
if set(data1.keys()) != set(data2.keys()):
|
|
478
|
+
return False
|
|
479
|
+
return all(cls._compare_nested( \
|
|
480
|
+
data1=data1[key],
|
|
481
|
+
data2=data2[key],
|
|
482
|
+
ignore_paths=ignore_paths,
|
|
483
|
+
current_path=current_path + [key],
|
|
484
|
+
) for key in data1)
|
|
485
|
+
|
|
486
|
+
elif isinstance(data1, (list, tuple)):
|
|
487
|
+
if len(data1) != len(data2):
|
|
488
|
+
return False
|
|
489
|
+
return all(cls._compare_nested( \
|
|
490
|
+
data1=item1,
|
|
491
|
+
data2=item2,
|
|
492
|
+
ignore_paths=ignore_paths,
|
|
493
|
+
current_path=current_path + [str(i)],
|
|
494
|
+
) for i, (item1, item2) in enumerate(zip(data1, data2)))
|
|
495
|
+
|
|
496
|
+
elif isinstance(data1, (set, frozenset)):
|
|
497
|
+
return data1 == data2
|
|
498
|
+
|
|
499
|
+
return data1 == data2
|
|
500
|
+
|
|
501
|
+
@staticmethod
|
|
502
|
+
def _sep_path_id(path_id: str) -> list[int]:
|
|
503
|
+
"""Internal method to separate a path-ID string into its ID parts as a list of integers."""
|
|
504
|
+
if len(split_id := path_id.split(">")) == 2:
|
|
505
|
+
id_part_len, path_id_parts = split_id
|
|
506
|
+
|
|
507
|
+
if (id_part_len.isdigit() and path_id_parts.isdigit()):
|
|
508
|
+
id_part_len_int = int(id_part_len)
|
|
509
|
+
|
|
510
|
+
if id_part_len_int > 0 and (len(path_id_parts) % id_part_len_int == 0):
|
|
511
|
+
return [int(path_id_parts[i:i + id_part_len_int]) for i in range(0, len(path_id_parts), id_part_len_int)]
|
|
512
|
+
|
|
513
|
+
raise ValueError(f"Path ID '{path_id}' is an invalid format.")
|
|
514
|
+
|
|
515
|
+
@classmethod
|
|
516
|
+
def _set_nested_val(cls, data: DataStructure, id_path: list[int], value: Any) -> Any:
|
|
517
|
+
"""Internal method to set a value in a nested data structure based on the provided ID path."""
|
|
518
|
+
current_data: Any = data
|
|
519
|
+
|
|
520
|
+
if len(id_path) == 1:
|
|
521
|
+
if isinstance(current_data, dict):
|
|
522
|
+
keys, data_dict = list(current_data.keys()), dict(current_data)
|
|
523
|
+
data_dict[keys[id_path[0]]] = value
|
|
524
|
+
return data_dict
|
|
525
|
+
elif isinstance(current_data, IndexIterableTypes):
|
|
526
|
+
was_t, data_list = type(current_data), list(current_data)
|
|
527
|
+
data_list[id_path[0]] = value
|
|
528
|
+
return was_t(data_list)
|
|
529
|
+
|
|
530
|
+
else:
|
|
531
|
+
if isinstance(current_data, dict):
|
|
532
|
+
keys, data_dict = list(current_data.keys()), dict(current_data)
|
|
533
|
+
data_dict[keys[id_path[0]]] = cls._set_nested_val(data_dict[keys[id_path[0]]], id_path[1:], value)
|
|
534
|
+
return data_dict
|
|
535
|
+
elif isinstance(current_data, IndexIterableTypes):
|
|
536
|
+
was_t, data_list = type(current_data), list(current_data)
|
|
537
|
+
data_list[id_path[0]] = cls._set_nested_val(data_list[id_path[0]], id_path[1:], value)
|
|
538
|
+
return was_t(data_list)
|
|
539
|
+
|
|
540
|
+
return current_data
|
|
541
|
+
|
|
542
|
+
|
|
543
|
+
class _DataRemoveCommentsHelper:
|
|
544
|
+
"""Internal, callable helper class to remove all comments from nested data structures."""
|
|
545
|
+
|
|
546
|
+
def __init__(self, data: DataStructure, comment_start: str, comment_end: str, comment_sep: str):
|
|
547
|
+
self.data = data
|
|
548
|
+
self.comment_start = comment_start
|
|
549
|
+
self.comment_end = comment_end
|
|
550
|
+
self.comment_sep = comment_sep
|
|
551
|
+
|
|
552
|
+
self.pattern = _re.compile(Regex._clean( \
|
|
553
|
+
rf"""^(
|
|
554
|
+
(?:(?!{_re.escape(comment_start)}).)*
|
|
555
|
+
)
|
|
556
|
+
{_re.escape(comment_start)}
|
|
557
|
+
(?:(?:(?!{_re.escape(comment_end)}).)*)
|
|
558
|
+
(?:{_re.escape(comment_end)})?
|
|
559
|
+
(.*?)$"""
|
|
560
|
+
)) if len(comment_end) > 0 else None
|
|
561
|
+
|
|
562
|
+
def __call__(self) -> DataStructure:
|
|
563
|
+
return self.remove_nested_comments(self.data)
|
|
564
|
+
|
|
565
|
+
def remove_nested_comments(self, item: Any) -> Any:
|
|
566
|
+
if isinstance(item, dict):
|
|
567
|
+
return {
|
|
568
|
+
key: val
|
|
569
|
+
for key, val in ( \
|
|
570
|
+
(self.remove_nested_comments(k), self.remove_nested_comments(v)) for k, v in item.items()
|
|
571
|
+
) if key is not None
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
if isinstance(item, IndexIterableTypes):
|
|
575
|
+
processed = (v for v in map(self.remove_nested_comments, item) if v is not None)
|
|
576
|
+
return type(item)(processed)
|
|
577
|
+
|
|
578
|
+
if isinstance(item, str):
|
|
579
|
+
if self.pattern:
|
|
580
|
+
if (match := self.pattern.match(item)):
|
|
581
|
+
start, end = match.group(1).strip(), match.group(2).strip()
|
|
582
|
+
return f"{start}{self.comment_sep if start and end else ''}{end}" or None
|
|
583
|
+
return item.strip() or None
|
|
584
|
+
else:
|
|
585
|
+
return None if item.lstrip().startswith(self.comment_start) else item.strip() or None
|
|
586
|
+
|
|
587
|
+
return item
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
class _DataGetPathIdHelper:
|
|
591
|
+
"""Internal, callable helper class to process a data path and generate its unique path ID."""
|
|
592
|
+
|
|
593
|
+
def __init__(self, path: str, path_sep: str, data_obj: DataStructure, ignore_not_found: bool):
|
|
594
|
+
self.keys = path.split(path_sep)
|
|
595
|
+
self.data_obj = data_obj
|
|
596
|
+
self.ignore_not_found = ignore_not_found
|
|
597
|
+
|
|
598
|
+
self.path_ids: list[str] = []
|
|
599
|
+
self.max_id_length = 0
|
|
600
|
+
self.current_data: Any = data_obj
|
|
601
|
+
|
|
602
|
+
def __call__(self) -> Optional[str]:
|
|
603
|
+
for key in self.keys:
|
|
604
|
+
if not self.process_key(key):
|
|
605
|
+
break
|
|
606
|
+
|
|
607
|
+
if not self.path_ids:
|
|
608
|
+
return None
|
|
609
|
+
return f"{self.max_id_length}>{''.join(id.zfill(self.max_id_length) for id in self.path_ids)}"
|
|
610
|
+
|
|
611
|
+
def process_key(self, key: str) -> bool:
|
|
612
|
+
"""Process a single key and update `path_ids`. Returns `False` if processing should stop."""
|
|
613
|
+
idx: Optional[int] = None
|
|
614
|
+
|
|
615
|
+
if isinstance(self.current_data, dict):
|
|
616
|
+
if (idx := self.process_dict_key(key)) is None:
|
|
617
|
+
return False
|
|
618
|
+
elif isinstance(self.current_data, IndexIterableTypes):
|
|
619
|
+
if (idx := self.process_iterable_key(key)) is None:
|
|
620
|
+
return False
|
|
621
|
+
else:
|
|
622
|
+
return False
|
|
623
|
+
|
|
624
|
+
self.path_ids.append(str(idx))
|
|
625
|
+
self.max_id_length = max(self.max_id_length, len(str(idx)))
|
|
626
|
+
return True
|
|
627
|
+
|
|
628
|
+
def process_dict_key(self, key: str) -> Optional[int]:
|
|
629
|
+
"""Process a key for dictionary data. Returns the index or `None` if not found."""
|
|
630
|
+
if key.isdigit():
|
|
631
|
+
if self.ignore_not_found:
|
|
632
|
+
return None
|
|
633
|
+
raise TypeError(f"Key '{key}' is invalid for a dict type.")
|
|
634
|
+
|
|
635
|
+
try:
|
|
636
|
+
idx = list(self.current_data.keys()).index(key)
|
|
637
|
+
self.current_data = self.current_data[key]
|
|
638
|
+
return idx
|
|
639
|
+
except (ValueError, KeyError):
|
|
640
|
+
if self.ignore_not_found:
|
|
641
|
+
return None
|
|
642
|
+
raise KeyError(f"Key '{key}' not found in dict.")
|
|
643
|
+
|
|
644
|
+
def process_iterable_key(self, key: str) -> Optional[int]:
|
|
645
|
+
"""Process a key for iterable data. Returns the index or `None` if not found."""
|
|
646
|
+
try:
|
|
647
|
+
idx = int(key)
|
|
648
|
+
self.current_data = list(self.current_data)[idx]
|
|
649
|
+
return idx
|
|
650
|
+
except ValueError:
|
|
651
|
+
try:
|
|
652
|
+
idx = list(self.current_data).index(key)
|
|
653
|
+
self.current_data = list(self.current_data)[idx]
|
|
654
|
+
return idx
|
|
655
|
+
except ValueError:
|
|
656
|
+
if self.ignore_not_found:
|
|
657
|
+
return None
|
|
658
|
+
raise ValueError(f"Value '{key}' not found in '{type(self.current_data).__name__}'")
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
class _DataRenderHelper:
|
|
662
|
+
"""Internal, callable helper class to format data structures as strings."""
|
|
663
|
+
|
|
664
|
+
def __init__(
|
|
665
|
+
self,
|
|
666
|
+
cls: type[Data],
|
|
667
|
+
data: DataStructure,
|
|
668
|
+
indent: int,
|
|
669
|
+
compactness: Literal[0, 1, 2],
|
|
670
|
+
max_width: int,
|
|
671
|
+
sep: str,
|
|
672
|
+
as_json: bool,
|
|
673
|
+
syntax_highlighting: dict[str, str] | bool,
|
|
674
|
+
):
|
|
675
|
+
self.cls = cls
|
|
676
|
+
self.data = data
|
|
677
|
+
self.indent = indent
|
|
678
|
+
self.compactness = compactness
|
|
679
|
+
self.max_width = max_width
|
|
680
|
+
self.as_json = as_json
|
|
681
|
+
|
|
682
|
+
self.syntax_hl: dict[str, tuple[str, str]] = _DEFAULT_SYNTAX_HL.copy()
|
|
683
|
+
self.do_syntax_hl = syntax_highlighting not in {None, False}
|
|
684
|
+
|
|
685
|
+
if self.do_syntax_hl:
|
|
686
|
+
if syntax_highlighting is True:
|
|
687
|
+
syntax_highlighting = {}
|
|
688
|
+
elif not isinstance(syntax_highlighting, dict):
|
|
689
|
+
raise TypeError(f"Expected 'syntax_highlighting' to be a dict or bool. Got: {type(syntax_highlighting)}")
|
|
690
|
+
|
|
691
|
+
self.syntax_hl.update({
|
|
692
|
+
k: (f"[{v}]", "[_]") if k in self.syntax_hl and v not in {"", None} else ("", "")
|
|
693
|
+
for k, v in syntax_highlighting.items()
|
|
694
|
+
})
|
|
695
|
+
|
|
696
|
+
sep = f"{self.syntax_hl['punctuation'][0]}{sep}{self.syntax_hl['punctuation'][1]}"
|
|
697
|
+
|
|
698
|
+
self.sep = sep
|
|
699
|
+
|
|
700
|
+
punct_map: dict[str, str | tuple[str, str]] = {"(": ("/(", "("), **{c: c for c in "'\":)[]{}"}}
|
|
701
|
+
self.punct: dict[str, str] = {
|
|
702
|
+
k: ((f"{self.syntax_hl['punctuation'][0]}{v[0]}{self.syntax_hl['punctuation'][1]}" if self.do_syntax_hl else v[1])
|
|
703
|
+
if isinstance(v, (list, tuple)) else
|
|
704
|
+
(f"{self.syntax_hl['punctuation'][0]}{v}{self.syntax_hl['punctuation'][1]}" if self.do_syntax_hl else v))
|
|
705
|
+
for k, v in punct_map.items()
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
def __call__(self) -> str:
|
|
709
|
+
return _re.sub(
|
|
710
|
+
r"\s+(?=\n)", "",
|
|
711
|
+
self.format_dict(self.data, 0) if isinstance(self.data, dict) else self.format_sequence(self.data, 0)
|
|
712
|
+
)
|
|
713
|
+
|
|
714
|
+
def format_value(self, value: Any, current_indent: Optional[int] = None) -> str:
|
|
715
|
+
if current_indent is not None and isinstance(value, dict):
|
|
716
|
+
return self.format_dict(value, current_indent + self.indent)
|
|
717
|
+
elif current_indent is not None and hasattr(value, "__dict__"):
|
|
718
|
+
return self.format_dict(value.__dict__, current_indent + self.indent)
|
|
719
|
+
elif current_indent is not None and isinstance(value, IndexIterableTypes):
|
|
720
|
+
return self.format_sequence(value, current_indent + self.indent)
|
|
721
|
+
elif current_indent is not None and isinstance(value, (bytes, bytearray)):
|
|
722
|
+
obj_dict = self.cls.serialize_bytes(value)
|
|
723
|
+
return (
|
|
724
|
+
self.format_dict(obj_dict, current_indent + self.indent) if self.as_json else (
|
|
725
|
+
f"{self.syntax_hl['type'][0]}{(k := next(iter(obj_dict)))}{self.syntax_hl['type'][1]}"
|
|
726
|
+
+ self.format_sequence((obj_dict[k], obj_dict["encoding"]), current_indent + self.indent)
|
|
727
|
+
if self.do_syntax_hl else (k := next(iter(obj_dict)))
|
|
728
|
+
+ self.format_sequence((obj_dict[k], obj_dict["encoding"]), current_indent + self.indent)
|
|
729
|
+
)
|
|
730
|
+
)
|
|
731
|
+
elif isinstance(value, bool):
|
|
732
|
+
val = str(value).lower() if self.as_json else str(value)
|
|
733
|
+
return f"{self.syntax_hl['literal'][0]}{val}{self.syntax_hl['literal'][1]}" if self.do_syntax_hl else val
|
|
734
|
+
elif isinstance(value, (int, float)):
|
|
735
|
+
val = "null" if self.as_json and (_math.isinf(value) or _math.isnan(value)) else str(value)
|
|
736
|
+
return f"{self.syntax_hl['number'][0]}{val}{self.syntax_hl['number'][1]}" if self.do_syntax_hl else val
|
|
737
|
+
elif current_indent is not None and isinstance(value, complex):
|
|
738
|
+
return (
|
|
739
|
+
self.format_value(str(value).strip("()")) if self.as_json else (
|
|
740
|
+
f"{self.syntax_hl['type'][0]}complex{self.syntax_hl['type'][1]}"
|
|
741
|
+
+ self.format_sequence((value.real, value.imag), current_indent + self.indent) if self.do_syntax_hl else
|
|
742
|
+
f"complex{self.format_sequence((value.real, value.imag), current_indent + self.indent)}"
|
|
743
|
+
)
|
|
744
|
+
)
|
|
745
|
+
elif value is None:
|
|
746
|
+
val = "null" if self.as_json else "None"
|
|
747
|
+
return f"{self.syntax_hl['literal'][0]}{val}{self.syntax_hl['literal'][1]}" if self.do_syntax_hl else val
|
|
748
|
+
else:
|
|
749
|
+
return ((
|
|
750
|
+
self.punct['"'] + self.syntax_hl["str"][0] + String.escape(str(value), '"') + self.syntax_hl["str"][1]
|
|
751
|
+
+ self.punct['"'] if self.do_syntax_hl else self.punct['"'] + String.escape(str(value), '"') + self.punct['"']
|
|
752
|
+
) if self.as_json else (
|
|
753
|
+
self.punct["'"] + self.syntax_hl["str"][0] + String.escape(str(value), "'") + self.syntax_hl["str"][1]
|
|
754
|
+
+ self.punct["'"] if self.do_syntax_hl else self.punct["'"] + String.escape(str(value), "'") + self.punct["'"]
|
|
755
|
+
))
|
|
756
|
+
|
|
757
|
+
def should_expand(self, seq: IndexIterable) -> bool:
|
|
758
|
+
if self.compactness == 0:
|
|
759
|
+
return True
|
|
760
|
+
if self.compactness == 2:
|
|
761
|
+
return False
|
|
762
|
+
|
|
763
|
+
complex_types: tuple[type, ...] = (list, tuple, dict, set, frozenset)
|
|
764
|
+
if self.as_json:
|
|
765
|
+
complex_types += (bytes, bytearray)
|
|
766
|
+
|
|
767
|
+
complex_items = sum(1 for item in seq if isinstance(item, complex_types))
|
|
768
|
+
|
|
769
|
+
return complex_items > 1 \
|
|
770
|
+
or (complex_items == 1 and len(seq) > 1) \
|
|
771
|
+
or self.cls.chars_count(seq) + (len(seq) * len(self.sep)) > self.max_width
|
|
772
|
+
|
|
773
|
+
def format_dict(self, d: dict, current_indent: int) -> str:
|
|
774
|
+
if self.compactness == 2 or not d or not self.should_expand(list(d.values())):
|
|
775
|
+
return self.punct["{"] + self.sep.join(
|
|
776
|
+
f"{self.format_value(k)}{self.punct[':']} {self.format_value(v, current_indent)}" for k, v in d.items()
|
|
777
|
+
) + self.punct["}"]
|
|
778
|
+
|
|
779
|
+
items = []
|
|
780
|
+
for k, val in d.items():
|
|
781
|
+
formatted_value = self.format_value(val, current_indent)
|
|
782
|
+
items.append(f"{' ' * (current_indent + self.indent)}{self.format_value(k)}{self.punct[':']} {formatted_value}")
|
|
783
|
+
|
|
784
|
+
return self.punct["{"] + "\n" + f"{self.sep}\n".join(items) + f"\n{' ' * current_indent}" + self.punct["}"]
|
|
785
|
+
|
|
786
|
+
def format_sequence(self, seq, current_indent: int) -> str:
|
|
787
|
+
if self.as_json:
|
|
788
|
+
seq = list(seq)
|
|
789
|
+
|
|
790
|
+
brackets = (self.punct["["], self.punct["]"]) if isinstance(seq, list) else (self.punct["("], self.punct[")"])
|
|
791
|
+
|
|
792
|
+
if self.compactness == 2 or not seq or not self.should_expand(seq):
|
|
793
|
+
return f"{brackets[0]}{self.sep.join(self.format_value(item, current_indent) for item in seq)}{brackets[1]}"
|
|
794
|
+
|
|
795
|
+
items = [self.format_value(item, current_indent) for item in seq]
|
|
796
|
+
formatted_items = f"{self.sep}\n".join(f'{" " * (current_indent + self.indent)}{item}' for item in items)
|
|
797
|
+
|
|
798
|
+
return f"{brackets[0]}\n{formatted_items}\n{' ' * current_indent}{brackets[1]}"
|