xulbux 1.6.8__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xulbux might be problematic. Click here for more details.

xulbux/xx_format_codes.py CHANGED
@@ -152,10 +152,10 @@ Per default, you can also use `+` and `-` to get lighter and darker `default_col
152
152
 
153
153
  from ._consts_ import ANSI
154
154
  from .xx_string import String
155
- from .xx_regex import Regex
156
- from .xx_color import Color, rgba, hexa
155
+ from .xx_regex import Regex, Match, Pattern
156
+ from .xx_color import Color, rgba, Rgba, Hexa
157
157
 
158
- from typing import Optional, Pattern
158
+ from typing import Optional
159
159
  import ctypes as _ctypes
160
160
  import regex as _rx
161
161
  import sys as _sys
@@ -205,7 +205,7 @@ class FormatCodes:
205
205
  @staticmethod
206
206
  def print(
207
207
  *values: object,
208
- default_color: rgba | hexa = None,
208
+ default_color: Optional[Rgba | Hexa] = None,
209
209
  brightness_steps: int = 20,
210
210
  sep: str = " ",
211
211
  end: str = "\n",
@@ -223,7 +223,7 @@ class FormatCodes:
223
223
  @staticmethod
224
224
  def input(
225
225
  prompt: object = "",
226
- default_color: rgba | hexa = None,
226
+ default_color: Optional[Rgba | Hexa] = None,
227
227
  brightness_steps: int = 20,
228
228
  reset_ansi: bool = False,
229
229
  ) -> str:
@@ -243,7 +243,7 @@ class FormatCodes:
243
243
  @staticmethod
244
244
  def to_ansi(
245
245
  string: str,
246
- default_color: rgba | hexa = None,
246
+ default_color: Optional[Rgba | Hexa] = None,
247
247
  brightness_steps: int = 20,
248
248
  _default_start: bool = True,
249
249
  ) -> str:
@@ -253,9 +253,9 @@ class FormatCodes:
253
253
  `xx_format_codes` module documentation."""
254
254
  if not isinstance(string, str):
255
255
  string = str(string)
256
- if Color.is_valid_rgba(default_color, False):
256
+ if default_color and Color.is_valid_rgba(default_color, False): # type: ignore[assignment]
257
257
  use_default = True
258
- elif Color.is_valid_hexa(default_color, False):
258
+ elif default_color and Color.is_valid_hexa(default_color, False): # type: ignore[assignment]
259
259
  use_default, default_color = True, Color.to_rgba(default_color)
260
260
  else:
261
261
  use_default = False
@@ -264,9 +264,9 @@ class FormatCodes:
264
264
  string = _COMPILED["*color"].sub(r"[\1default\2]", string) # REPLACE `[…|*color|…]` WITH `[…|default|…]`
265
265
 
266
266
  def is_valid_color(color: str) -> bool:
267
- return color in ANSI.color_map or Color.is_valid_rgba(color) or Color.is_valid_hexa(color)
267
+ return bool((color in ANSI.color_map) or Color.is_valid_rgba(color) or Color.is_valid_hexa(color))
268
268
 
269
- def replace_keys(match: _re.Match) -> str:
269
+ def replace_keys(match: Match) -> str:
270
270
  _formats = formats = match.group(1)
271
271
  auto_reset_escaped = match.group(2)
272
272
  auto_reset_txt = match.group(3)
@@ -280,8 +280,8 @@ class FormatCodes:
280
280
  formats = FormatCodes.to_ansi(formats, default_color, brightness_steps, False)
281
281
  format_keys = [k.strip() for k in formats.split("|") if k.strip()]
282
282
  ansi_formats = [
283
- r if (r := FormatCodes.__get_replacement(k, default_color, brightness_steps)) != k else f"[{k}]"
284
- for k in format_keys
283
+ r if (r := FormatCodes.__get_replacement(k, default_color, brightness_steps)) != k # type: ignore[assignment]
284
+ else f"[{k}]" for k in format_keys
285
285
  ]
286
286
  if auto_reset_txt and not auto_reset_escaped:
287
287
  reset_keys = []
@@ -306,8 +306,9 @@ class FormatCodes:
306
306
  else:
307
307
  reset_keys.append(f"_{k}")
308
308
  ansi_resets = [
309
- r for k in reset_keys if (r := FormatCodes.__get_replacement(k, default_color, brightness_steps)
310
- ).startswith(f"{ANSI.char}{ANSI.start}")
309
+ r for k in reset_keys
310
+ if (r := FormatCodes.__get_replacement(k, default_color, brightness_steps) # type: ignore[assignment]
311
+ ).startswith(f"{ANSI.char}{ANSI.start}")
311
312
  ]
312
313
  else:
313
314
  ansi_resets = []
@@ -325,7 +326,8 @@ class FormatCodes:
325
326
  )
326
327
 
327
328
  string = "\n".join(_COMPILED["formatting"].sub(replace_keys, line) for line in string.split("\n"))
328
- return ((FormatCodes.__get_default_ansi(default_color) if _default_start else "") + string) if use_default else string
329
+ return (((FormatCodes.__get_default_ansi(default_color) or "") if _default_start else "") # type: ignore[assignment]
330
+ + string) if use_default else string
329
331
 
330
332
  @staticmethod
331
333
  def escape_ansi(ansi_string: str) -> str:
@@ -333,33 +335,50 @@ class FormatCodes:
333
335
  return ansi_string.replace(ANSI.char, ANSI.escaped_char)
334
336
 
335
337
  @staticmethod
336
- def remove_ansi(ansi_string: str, get_removals: bool = False) -> str | tuple[str, tuple[tuple[int, str], ...]]:
338
+ def remove_ansi(
339
+ ansi_string: str,
340
+ get_removals: bool = False,
341
+ _ignore_linebreaks: bool = False,
342
+ ) -> str | tuple[str, tuple[tuple[int, str], ...]]:
337
343
  """Removes all ANSI codes from the string.\n
338
344
  --------------------------------------------------------------------------------------------------
339
345
  If `get_removals` is true, additionally to the cleaned string, a list of tuples will be returned.
340
- Each tuple contains the position of the removed ansi code and the removed ansi code."""
346
+ Each tuple contains the position of the removed ansi code and the removed ansi code.\n
347
+ If `_ignore_linebreaks` is true, linebreaks will be ignored for the removal positions."""
341
348
  if get_removals:
342
349
  removals = []
343
350
 
344
- def replacement(match: _re.Match) -> str:
351
+ def replacement(match: Match) -> str:
345
352
  start_pos = match.start() - sum(len(removed) for _, removed in removals)
346
353
  if removals and removals[-1][0] == start_pos:
347
354
  start_pos = removals[-1][0]
348
355
  removals.append((start_pos, match.group()))
349
356
  return ""
350
357
 
351
- clean_string = _COMPILED["ansi_seq"].sub(replacement, ansi_string)
352
- return clean_string, tuple(removals)
358
+ clean_string = _COMPILED["ansi_seq"].sub(
359
+ replacement,
360
+ ansi_string.replace("\n", "") if _ignore_linebreaks else ansi_string
361
+ )
362
+ return _COMPILED["ansi_seq"].sub("", ansi_string) if _ignore_linebreaks else clean_string, tuple(removals)
353
363
  else:
354
364
  return _COMPILED["ansi_seq"].sub("", ansi_string)
355
365
 
356
366
  @staticmethod
357
- def remove_formatting(string: str, get_removals: bool = False) -> str | tuple[str, tuple[tuple[int, str], ...]]:
367
+ def remove_formatting(
368
+ string: str,
369
+ get_removals: bool = False,
370
+ _ignore_linebreaks: bool = False,
371
+ ) -> str | tuple[str, tuple[tuple[int, str], ...]]:
358
372
  """Removes all formatting codes from the string.\n
359
- ----------------------------------------------------------------------------------------------------
373
+ ---------------------------------------------------------------------------------------------------
360
374
  If `get_removals` is true, additionally to the cleaned string, a list of tuples will be returned.
361
- Each tuple contains the position of the removed formatting code and the removed formatting code."""
362
- return FormatCodes.remove_ansi(FormatCodes.to_ansi(string), get_removals=get_removals)
375
+ Each tuple contains the position of the removed formatting code and the removed formatting code.\n
376
+ If `_ignore_linebreaks` is true, linebreaks will be ignored for the removal positions."""
377
+ return FormatCodes.remove_ansi(
378
+ FormatCodes.to_ansi(string),
379
+ get_removals=get_removals,
380
+ _ignore_linebreaks=_ignore_linebreaks,
381
+ )
363
382
 
364
383
  @staticmethod
365
384
  def __config_console() -> None:
@@ -377,8 +396,8 @@ class FormatCodes:
377
396
  @staticmethod
378
397
  def __get_default_ansi(
379
398
  default_color: tuple,
380
- format_key: str = None,
381
- brightness_steps: int = None,
399
+ format_key: Optional[str] = None,
400
+ brightness_steps: Optional[int] = None,
382
401
  _modifiers: tuple[str, str] = (ANSI.default_color_modifiers["lighten"], ANSI.default_color_modifiers["darken"]),
383
402
  ) -> Optional[str]:
384
403
  """Get the `default_color` and lighter/darker versions of it as ANSI code."""
@@ -386,8 +405,9 @@ class FormatCodes:
386
405
  return (ANSI.seq_bg_color if format_key and _COMPILED["bg_default"].search(format_key) else ANSI.seq_color).format(
387
406
  *default_color[:3]
388
407
  )
389
- if not (format_key in _modifiers[0] or format_key in _modifiers[1]):
408
+ if format_key is None or not (format_key in _modifiers[0] or format_key in _modifiers[1]):
390
409
  return None
410
+ assert format_key is not None
391
411
  match = _COMPILED["modifier"].match(format_key)
392
412
  if not match:
393
413
  return None
@@ -398,23 +418,25 @@ class FormatCodes:
398
418
  if adjust and adjust > 0:
399
419
  modifiers = mod
400
420
  break
421
+ new_rgb = default_color
401
422
  if adjust == 0:
402
423
  return None
403
424
  elif modifiers in _modifiers[0]:
404
- new_rgb = Color.adjust_lightness(default_color, (brightness_steps / 100) * adjust)
425
+ new_rgb = tuple(Color.adjust_lightness(default_color, (brightness_steps / 100) * adjust))
405
426
  elif modifiers in _modifiers[1]:
406
- new_rgb = Color.adjust_lightness(default_color, -(brightness_steps / 100) * adjust)
427
+ new_rgb = tuple(Color.adjust_lightness(default_color, -(brightness_steps / 100) * adjust))
407
428
  return (ANSI.seq_bg_color if is_bg else ANSI.seq_color).format(*new_rgb[:3])
408
429
 
409
430
  @staticmethod
410
- def __get_replacement(format_key: str, default_color: rgba = None, brightness_steps: int = 20) -> str:
431
+ def __get_replacement(format_key: str, default_color: Optional[Rgba] = None, brightness_steps: int = 20) -> str:
411
432
  """Gives you the corresponding ANSI code for the given format key.
412
433
  If `default_color` is not `None`, the text color will be `default_color` if all formats
413
434
  are reset or you can get lighter or darker version of `default_color` (also as BG)"""
414
435
  use_default = default_color and Color.is_valid_rgba(default_color, False)
436
+ _default_color = tuple(Color.to_rgba(default_color)) if use_default else () # type: ignore[assignment]
415
437
  _format_key, format_key = format_key, FormatCodes.__normalize_key(format_key) # NORMALIZE KEY AND SAVE ORIGINAL
416
438
  if use_default:
417
- if new_default_color := FormatCodes.__get_default_ansi(default_color, format_key, brightness_steps):
439
+ if new_default_color := FormatCodes.__get_default_ansi(_default_color, format_key, brightness_steps):
418
440
  return new_default_color
419
441
  for map_key in ANSI.codes_map:
420
442
  if (isinstance(map_key, tuple) and format_key in map_key) or format_key == map_key:
@@ -423,8 +445,8 @@ class FormatCodes:
423
445
  v for k, v in ANSI.codes_map.items() if format_key == k or (isinstance(k, tuple) and format_key in k)
424
446
  ), None)
425
447
  )
426
- rgb_match = _re.match(_COMPILED["rgb"], format_key)
427
- hex_match = _re.match(_COMPILED["hex"], format_key)
448
+ rgb_match = _COMPILED["rgb"].match(format_key)
449
+ hex_match = _COMPILED["hex"].match(format_key)
428
450
  try:
429
451
  if rgb_match:
430
452
  is_bg = rgb_match.group(1)
xulbux/xx_json.py CHANGED
@@ -1,8 +1,9 @@
1
1
  from .xx_data import Data
2
2
  from .xx_file import File
3
+ from .xx_path import Path
3
4
 
5
+ from typing import Any
4
6
  import json as _json
5
- import os as _os
6
7
 
7
8
 
8
9
  class Json:
@@ -15,89 +16,144 @@ class Json:
15
16
  return_original: bool = False,
16
17
  ) -> dict | tuple[dict, dict]:
17
18
  """Read JSON files, ignoring comments.\n
18
- -------------------------------------------------------------------------
19
+ ------------------------------------------------------------------
19
20
  If only `comment_start` is found at the beginning of an item,
20
21
  the whole item is counted as a comment and therefore ignored.
21
22
  If `comment_start` and `comment_end` are found inside an item,
22
23
  the the section from `comment_start` to `comment_end` is ignored.
23
- If `return_original` is set to `True`, the original JSON is returned
24
+ If `return_original` is true, the original JSON is returned
24
25
  additionally. (returns: `[processed_json, original_json]`)"""
25
26
  if not json_file.endswith(".json"):
26
27
  json_file += ".json"
27
- file_path = File.extend_or_make_path(json_file, prefer_base_dir=True)
28
+ file_path = Path.extend_or_make(json_file, prefer_script_dir=True)
29
+ if file_path is None:
30
+ raise FileNotFoundError(f"Could not find JSON file: {json_file}")
28
31
  with open(file_path, "r") as f:
29
32
  content = f.read()
30
33
  try:
31
34
  data = _json.loads(content)
32
35
  except _json.JSONDecodeError as e:
33
36
  raise ValueError(f"Error parsing JSON in '{file_path}': {str(e)}")
34
- processed_data = Data.remove_comments(data, comment_start, comment_end)
37
+ processed_data = dict(Data.remove_comments(data, comment_start, comment_end))
35
38
  if not processed_data:
36
39
  raise ValueError(f"The JSON file '{file_path}' is empty or contains only comments.")
37
40
  return (processed_data, data) if return_original else processed_data
38
41
 
39
42
  @staticmethod
40
43
  def create(
41
- content: dict,
42
- new_file: str = "config",
44
+ json_file: str,
45
+ data: dict,
43
46
  indent: int = 2,
44
47
  compactness: int = 1,
45
48
  force: bool = False,
46
49
  ) -> str:
47
- if not new_file.endswith(".json"):
48
- new_file += ".json"
49
- file_path = File.extend_or_make_path(new_file, prefer_base_dir=True)
50
- if _os.path.exists(file_path) and not force:
51
- with open(file_path, "r", encoding="utf-8") as existing_f:
52
- existing_content = _json.load(existing_f)
53
- if existing_content == content:
54
- raise FileExistsError("Already created this file. (nothing changed)")
55
- raise FileExistsError("File already exists.")
56
- with open(file_path, "w", encoding="utf-8") as f:
57
- f.write(Data.to_str(content, indent, compactness, as_json=True))
58
- full_path = _os.path.abspath(file_path)
59
- return full_path
50
+ """Create a nicely formatted JSON file from a dictionary.\n
51
+ ----------------------------------------------------------------------
52
+ The `indent` is the amount of spaces to use for indentation.\n
53
+ The `compactness` can be `0`, `1` or `2` and indicates how compact
54
+ the data should be formatted (see `Data.to_str()`).\n
55
+ The function will throw a `FileExistsError` if a file with the same
56
+ name already exists and a `SameContentFileExistsError` if a file with
57
+ the same name and content already exists.
58
+ To always overwrite the file, set the `force` parameter to `True`."""
59
+ if not json_file.endswith(".json"):
60
+ json_file += ".json"
61
+ file_path = Path.extend_or_make(json_file, prefer_script_dir=True)
62
+ File.create(
63
+ file=file_path,
64
+ content=Data.to_str(data, indent, compactness, as_json=True),
65
+ force=force,
66
+ )
67
+ return file_path
60
68
 
61
69
  @staticmethod
62
70
  def update(
63
71
  json_file: str,
64
- update_values: str | list[str],
72
+ update_values: dict[str, Any],
65
73
  comment_start: str = ">>",
66
74
  comment_end: str = "<<",
67
- sep: tuple[str, str] = ("->", "::"),
75
+ path_sep: str = "->",
68
76
  ) -> None:
69
- """Function to easily update single/multiple values inside JSON files.\n
70
- ------------------------------------------------------------------------------------------------------
71
- The param `json_file` is the path to the JSON file or just the name of the JSON file to be updated.\n
72
- ------------------------------------------------------------------------------------------------------
73
- The param `update_values` is a sort of path (or a list of paths) to the value/s to be updated, with
74
- the new value at the end of the path.\n
75
- In this example:
77
+ """Update single/multiple values inside JSON files, without needing to know the rest of the data.\n
78
+ ----------------------------------------------------------------------------------------------------
79
+ The `update_values` parameter is a dictionary, where the keys are the paths to the data to update,
80
+ and the values are the new values to set.\n
81
+ Example: For this JSON data:
76
82
  ```python
77
83
  {
78
- 'healthy': {
79
- 'fruit': ['apples', 'bananas', 'oranges'],
80
- 'vegetables': ['carrots', 'broccoli', 'celery']
81
- }
84
+ "healthy": {
85
+ "fruit": ["apples", "bananas", "oranges"],
86
+ "vegetables": ["carrots", "broccoli", "celery"]
87
+ }
82
88
  }
83
89
  ```
84
- ... if you want to change the value of `'apples'` to `'strawberries'`, `update_values` would be
85
- `healthy->fruit->apples::strawberries` or if you don't know that the value to update is `apples` you
86
- can also use the position of the value, so `healthy->fruit->0::strawberries`.\n
87
- ⇾ If the path from `update_values` doesn't exist, it will be created.\n
88
- ------------------------------------------------------------------------------------------------------
90
+ ... the `update_values` dictionary could look like this:
91
+ ```python
92
+ {
93
+ # CHANGE VALUE "apples" TO "strawberries"
94
+ "healthy->fruit->0": "strawberries",
95
+ # CHANGE VALUE UNDER KEY "vegetables" TO [1, 2, 3]
96
+ "healthy->vegetables": [1, 2, 3]
97
+ }
98
+ ```
99
+ In this example, if you want to change the value of `"apples"`, you can use `healthy->fruit->apples`
100
+ as the value-path. If you don't know that the first list item is `"apples"`, you can use the items
101
+ list index inside the value-path, so `healthy->fruit->0`.\n
102
+ ⇾ If the given value-path doesn't exist, it will be created.\n
103
+ -----------------------------------------------------------------------------------------------------
89
104
  If only `comment_start` is found at the beginning of an item, the whole item is counted as a comment
90
- and therefore ignored. If `comment_start` and `comment_end` are found inside an item, the the section
91
- from `comment_start` to `comment_end` is ignored."""
92
- if isinstance(update_values, str):
93
- update_values = [update_values]
94
- valid_entries = [(parts[0].strip(), parts[1]) for update_value in update_values
95
- if len(parts := update_value.split(str(sep[1]).strip())) == 2]
96
- value_paths, new_values = zip(*valid_entries) if valid_entries else ([], [])
105
+ and therefore completely ignored. If `comment_start` and `comment_end` are found inside an item, the
106
+ section from `comment_start` to `comment_end` is counted as a comment and ignored."""
97
107
  processed_data, data = Json.read(json_file, comment_start, comment_end, return_original=True)
98
- update = []
99
- for value_path, new_value in zip(value_paths, new_values):
100
- path_id = Data.get_path_id(processed_data, value_path)
101
- update.append(f"{path_id}::{new_value}")
102
- updated = Data.set_value_by_path_id(data, update)
103
- Json.create(updated, json_file, force=True)
108
+
109
+ def create_nested_path(data_obj: dict, path_keys: list[str], value: Any) -> dict:
110
+ current = data_obj
111
+ last_idx = len(path_keys) - 1
112
+ for i, key in enumerate(path_keys):
113
+ if i == last_idx:
114
+ if isinstance(current, dict):
115
+ current[key] = value
116
+ elif isinstance(current, list) and key.isdigit():
117
+ idx = int(key)
118
+ while len(current) <= idx:
119
+ current.append(None)
120
+ current[idx] = value
121
+ else:
122
+ raise TypeError(f"Cannot set key '{key}' on {type(current).__name__}")
123
+ else:
124
+ next_key = path_keys[i + 1]
125
+ if isinstance(current, dict):
126
+ if key not in current:
127
+ current[key] = [] if next_key.isdigit() else {}
128
+ current = current[key]
129
+ elif isinstance(current, list) and key.isdigit():
130
+ idx = int(key)
131
+ while len(current) <= idx:
132
+ current.append(None)
133
+ if current[idx] is None:
134
+ current[idx] = [] if next_key.isdigit() else {}
135
+ current = current[idx]
136
+ else:
137
+ raise TypeError(f"Cannot navigate through {type(current).__name__}")
138
+ return data_obj
139
+
140
+ update = {}
141
+ for value_path, new_value in update_values.items():
142
+ try:
143
+ path_id = Data.get_path_id(
144
+ data=processed_data,
145
+ value_paths=value_path,
146
+ path_sep=path_sep,
147
+ )
148
+ if path_id is not None:
149
+ update[path_id] = new_value
150
+ else:
151
+ keys = value_path.split(path_sep)
152
+ keys = value_path.split(path_sep)
153
+ data = create_nested_path(data, keys, new_value)
154
+ except Exception:
155
+ keys = value_path.split(path_sep)
156
+ data = create_nested_path(data, keys, new_value)
157
+ if "update" in locals() and update:
158
+ data = Data.set_value_by_path_id(data, update)
159
+ Json.create(json_file=json_file, data=dict(data), force=True)
xulbux/xx_path.py CHANGED
@@ -6,42 +6,60 @@ import sys as _sys
6
6
  import os as _os
7
7
 
8
8
 
9
- # YAPF: disable
10
- class ProcessNotFoundError(Exception):
11
- pass
9
+ class PathNotFoundError(FileNotFoundError):
10
+ ...
11
+
12
12
 
13
13
  class _Cwd:
14
+
14
15
  def __get__(self, obj, owner=None):
15
16
  return _os.getcwd()
16
17
 
18
+
17
19
  class _ScriptDir:
20
+
18
21
  def __get__(self, obj, owner=None):
19
22
  if getattr(_sys, "frozen", False):
20
23
  base_path = _os.path.dirname(_sys.executable)
21
24
  else:
22
25
  main_module = _sys.modules["__main__"]
23
- if hasattr(main_module, "__file__"):
26
+ if hasattr(main_module, "__file__") and main_module.__file__ is not None:
24
27
  base_path = _os.path.dirname(_os.path.abspath(main_module.__file__))
25
- elif (hasattr(main_module, "__spec__") and main_module.__spec__
26
- and getattr(main_module.__spec__, "origin", None)):
28
+ elif (hasattr(main_module, "__spec__") and main_module.__spec__ and main_module.__spec__.origin is not None):
27
29
  base_path = _os.path.dirname(_os.path.abspath(main_module.__spec__.origin))
28
30
  else:
29
31
  raise RuntimeError("Can only get base directory if accessed from a file.")
30
32
  return base_path
31
- # YAPF: enable
32
33
 
33
34
 
34
35
  class Path:
35
36
 
36
- cwd: str = _Cwd()
37
+ cwd: str = _Cwd() # type: ignore[assignment]
37
38
  """The path to the current working directory."""
38
- script_dir: str = _ScriptDir()
39
+ script_dir: str = _ScriptDir() # type: ignore[assignment]
39
40
  """The path to the directory of the current script."""
40
41
 
41
42
  @staticmethod
42
- def extend(path: str, search_in: str | list[str] = None, raise_error: bool = False, correct_path: bool = False) -> str:
43
- if path in (None, ""):
44
- return path
43
+ def extend(
44
+ rel_path: str,
45
+ search_in: Optional[str | list[str]] = None,
46
+ raise_error: bool = False,
47
+ use_closest_match: bool = False,
48
+ ) -> Optional[str]:
49
+ """Tries to locate and extend a relative path to an absolute path.\n
50
+ --------------------------------------------------------------------------------
51
+ If the `rel_path` couldn't be located in predefined directories, it will be
52
+ searched in the `search_in` directory/s. If the `rel_path` is still not found,
53
+ it returns `None` or raises a `PathNotFoundError` if `raise_error` is true.\n
54
+ --------------------------------------------------------------------------------
55
+ If `use_closest_match` is true, it is possible to have typos in the `search_in`
56
+ path/s and it will still find the file if it is under one of those paths."""
57
+ if rel_path in (None, ""):
58
+ if raise_error:
59
+ raise PathNotFoundError("Path is empty.")
60
+ return None
61
+ elif _os.path.isabs(rel_path):
62
+ return rel_path
45
63
 
46
64
  def get_closest_match(dir: str, part: str) -> Optional[str]:
47
65
  try:
@@ -56,7 +74,7 @@ class Path:
56
74
  for part in parts:
57
75
  if _os.path.isfile(current):
58
76
  return current
59
- closest_match = get_closest_match(current, part) if correct_path else part
77
+ closest_match = get_closest_match(current, part) if use_closest_match else part
60
78
  current = _os.path.join(current, closest_match) if closest_match else None
61
79
  if current is None:
62
80
  return None
@@ -71,20 +89,20 @@ class Path:
71
89
  parts[i] = _os.environ[parts[i].upper()]
72
90
  return "".join(parts)
73
91
 
74
- path = _os.path.normpath(expand_env_path(path))
75
- if _os.path.isabs(path):
76
- drive, rel_path = _os.path.splitdrive(path)
92
+ rel_path = _os.path.normpath(expand_env_path(rel_path))
93
+ if _os.path.isabs(rel_path):
94
+ drive, rel_path = _os.path.splitdrive(rel_path)
77
95
  rel_path = rel_path.lstrip(_os.sep)
78
- search_dirs = (drive + _os.sep) if drive else [_os.sep]
96
+ search_dirs = [(drive + _os.sep) if drive else _os.sep]
79
97
  else:
80
- rel_path = path.lstrip(_os.sep)
98
+ rel_path = rel_path.lstrip(_os.sep)
81
99
  base_dir = Path.script_dir
82
- search_dirs = (
100
+ search_dirs = [
83
101
  _os.getcwd(),
84
102
  base_dir,
85
103
  _os.path.expanduser("~"),
86
104
  _tempfile.gettempdir(),
87
- )
105
+ ]
88
106
  if search_in:
89
107
  search_dirs.extend([search_in] if isinstance(search_in, str) else search_in)
90
108
  path_parts = rel_path.split(_os.sep)
@@ -92,19 +110,51 @@ class Path:
92
110
  full_path = _os.path.join(search_dir, rel_path)
93
111
  if _os.path.exists(full_path):
94
112
  return full_path
95
- match = find_path(search_dir, path_parts) if correct_path else None
113
+ match = find_path(search_dir, path_parts) if use_closest_match else None
96
114
  if match:
97
115
  return match
98
116
  if raise_error:
99
- raise FileNotFoundError(f"Path '{path}' not found in specified directories.")
100
- return _os.path.join(search_dirs[0], rel_path)
117
+ raise PathNotFoundError(f"Path '{rel_path}' not found in specified directories.")
118
+ return None
119
+
120
+ @staticmethod
121
+ def extend_or_make(
122
+ rel_path: str,
123
+ search_in: Optional[str | list[str]] = None,
124
+ prefer_script_dir: bool = True,
125
+ use_closest_match: bool = False,
126
+ ) -> str:
127
+ """Tries to locate and extend a relative path to an absolute path, and if the `rel_path`
128
+ couldn't be located, it generates a path, as if it was located.\n
129
+ -----------------------------------------------------------------------------------------
130
+ If the `rel_path` couldn't be located in predefined directories, it will be searched in
131
+ the `search_in` directory/s. If the `rel_path` is still not found, it will makes a path
132
+ that points to where the `rel_path` would be in the script directory, even though the
133
+ `rel_path` doesn't exist there. If `prefer_script_dir` is false, it will instead make a
134
+ path that points to where the `rel_path` would be in the CWD.\n
135
+ -----------------------------------------------------------------------------------------
136
+ If `use_closest_match` is true, it is possible to have typos in the `search_in` path/s
137
+ and it will still find the file if it is under one of those paths."""
138
+ try:
139
+ return str(Path.extend(rel_path, search_in, raise_error=True, use_closest_match=use_closest_match))
140
+ except PathNotFoundError:
141
+ normalized_rel_path = _os.path.normpath(rel_path)
142
+ base = Path.script_dir if prefer_script_dir else _os.getcwd()
143
+ return _os.path.join(base, normalized_rel_path)
101
144
 
102
145
  @staticmethod
103
146
  def remove(path: str, only_content: bool = False) -> None:
147
+ """Removes the directory or the directory's content at the specified path.\n
148
+ -----------------------------------------------------------------------------
149
+ Normally it removes the directory and its content, but if `only_content` is
150
+ true, the directory is kept and only its contents are removed."""
104
151
  if not _os.path.exists(path):
105
152
  return None
106
153
  if not only_content:
107
- _shutil.rmtree(path)
154
+ if _os.path.isfile(path) or _os.path.islink(path):
155
+ _os.unlink(path)
156
+ elif _os.path.isdir(path):
157
+ _shutil.rmtree(path)
108
158
  elif _os.path.isdir(path):
109
159
  for filename in _os.listdir(path):
110
160
  file_path = _os.path.join(path, filename)
xulbux/xx_regex.py CHANGED
@@ -1,11 +1,12 @@
1
- """
2
- Very useful and complicated (generated) regex patterns.
3
- """
4
-
1
+ from typing import TypeAlias, Optional
5
2
  import regex as _rx
6
3
  import re as _re
7
4
 
8
5
 
6
+ Pattern: TypeAlias = _re.Pattern[str] | _rx.Pattern[str]
7
+ Match: TypeAlias = _re.Match[str] | _rx.Match[str]
8
+
9
+
9
10
  class Regex:
10
11
 
11
12
  @staticmethod
@@ -67,7 +68,7 @@ class Regex:
67
68
  return rf'({"" if is_group else "?:"}(?:(?!{ignore_pattern}).)*(?:(?!{Regex.outside_strings(disallowed_pattern)}).)*)'
68
69
 
69
70
  @staticmethod
70
- def func_call(func_name: str = None) -> str:
71
+ def func_call(func_name: Optional[str] = None) -> str:
71
72
  """Match a function call, and get back two groups:
72
73
  1. function name
73
74
  2. the function's arguments\n
@@ -93,7 +94,7 @@ class Regex:
93
94
  - `r` 0-255 (int: red)
94
95
  - `g` 0-255 (int: green)
95
96
  - `b` 0-255 (int: blue)
96
- - `a` 0-1 (float: opacity)\n
97
+ - `a` 0.0-1.0 (float: opacity)\n
97
98
  ----------------------------------------------------------------------------
98
99
  If the `fix_sep` is set to nothing, any char that is not a letter or number
99
100
  can be used to separate the RGBA values, including just a space."""
@@ -126,7 +127,7 @@ class Regex:
126
127
  - `h` 0-360 (int: hue)
127
128
  - `s` 0-100 (int: saturation)
128
129
  - `l` 0-100 (int: lightness)
129
- - `a` 0-1 (float: opacity)\n
130
+ - `a` 0.0-1.0 (float: opacity)\n
130
131
  ----------------------------------------------------------------------------
131
132
  If the `fix_sep` is set to nothing, any char that is not a letter or number
132
133
  can be used to separate the HSLA values, including just a space."""
@@ -134,9 +135,9 @@ class Regex:
134
135
  fix_sep = r"[^0-9A-Z]"
135
136
  else:
136
137
  fix_sep = _re.escape(fix_sep)
137
- hsl_part = rf"""((?:0*(?:360|3[0-5][0-9]|[12][0-9][0-9]|[1-9]?[0-9])))
138
- (?:\s*{fix_sep}\s*)((?:0*(?:100|[1-9][0-9]|[0-9])))
139
- (?:\s*{fix_sep}\s*)((?:0*(?:100|[1-9][0-9]|[0-9])))"""
138
+ hsl_part = rf"""((?:0*(?:360|3[0-5][0-9]|[12][0-9][0-9]|[1-9]?[0-9]))(?:\s*°)?)
139
+ (?:\s*{fix_sep}\s*)((?:0*(?:100|[1-9][0-9]|[0-9]))(?:\s*%)?)
140
+ (?:\s*{fix_sep}\s*)((?:0*(?:100|[1-9][0-9]|[0-9]))(?:\s*%)?)"""
140
141
  return (
141
142
  rf"""(?ix)
142
143
  (?:hsl|hsla)?\s*(?:\(?\s*{hsl_part}