intersphinx-registry 0.2511.7__py2.py3-none-any.whl → 0.2601.5__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,720 @@
1
+ import re
2
+ from pathlib import Path
3
+ from typing import Iterable, NamedTuple, Optional, Tuple, Union
4
+
5
+ from .reverse_lookup import ReverseLookupResult, _do_reverse_lookup
6
+ from .utils import _are_dependencies_available, _compress_user_path
7
+
8
+ # ANSI escape sequences
9
+ RED = "\033[31m"
10
+ RED_BG = "\033[41;37m"
11
+ GREEN = "\033[32m"
12
+ GREEN_BG = "\033[42;30m"
13
+ BLUE = "\033[34m"
14
+ CYAN = "\033[36m"
15
+ YELLOW = "\033[33m"
16
+ YELLOW_BG = "\033[43;30m"
17
+ RESET = "\033[0m"
18
+
19
+
20
+ class Unchanged(str):
21
+ """Token representing unchanged text."""
22
+
23
+
24
+ class Removed(str):
25
+ """Token representing removed text."""
26
+
27
+
28
+ class Added(str):
29
+ """Token representing added text."""
30
+
31
+
32
+ Token = Union[Unchanged, Removed, Added]
33
+
34
+ # OutputReplacementContext: tuple of three token sequences
35
+ # (context_before_tokens, target_line_tokens, context_after_tokens)
36
+ OutputReplacementContext = Tuple[
37
+ Tuple[Token, ...],
38
+ Tuple[Token, ...],
39
+ Tuple[Token, ...],
40
+ ]
41
+
42
+
43
+ def normalise_token_stream(tokens: Tuple[Token, ...]) -> Tuple[Token, ...]:
44
+ """
45
+ Normalize a token stream by:
46
+ 1. Filtering out empty tokens
47
+ 2. Merging consecutive tokens of the same type
48
+
49
+ This is useful for comparing token streams that are semantically
50
+ equivalent but may have different tokenization.
51
+ """
52
+ if not tokens:
53
+ return (Unchanged(""),)
54
+
55
+ normalized = []
56
+ current_type = None
57
+ current_content = ""
58
+
59
+ for token in tokens:
60
+ if not str(token):
61
+ continue
62
+
63
+ token_type = type(token)
64
+
65
+ if token_type == current_type:
66
+ current_content += str(token)
67
+ else:
68
+ if current_type is not None:
69
+ normalized.append(current_type(current_content))
70
+ current_type = token_type
71
+ current_content = str(token)
72
+
73
+ if current_type is not None:
74
+ normalized.append(current_type(current_content))
75
+
76
+ if not normalized:
77
+ return (Unchanged(""),)
78
+
79
+ return tuple(normalized)
80
+
81
+
82
+ def _make_line_tokens(
83
+ line: str,
84
+ start: int,
85
+ end: int,
86
+ old_text: str,
87
+ new_text: str,
88
+ ) -> tuple[tuple[Token, ...], tuple[Token, ...]]:
89
+ """
90
+ Build old and new token lists for a line where text is replaced.
91
+
92
+ Returns (old_tokens, new_tokens) where:
93
+ - old_tokens: [Unchanged(prefix), Removed(old_text), Unchanged(suffix)]
94
+ - new_tokens: [Unchanged(prefix), Added(new_text), Unchanged(suffix)]
95
+ """
96
+ old_tokens: list[Token] = []
97
+ new_tokens: list[Token] = []
98
+
99
+ if start > 0:
100
+ prefix = Unchanged(line[:start])
101
+ old_tokens.append(prefix)
102
+ new_tokens.append(prefix)
103
+
104
+ old_tokens.append(Removed(old_text))
105
+ new_tokens.append(Added(new_text))
106
+
107
+ if end < len(line):
108
+ suffix = Unchanged(line[end:])
109
+ old_tokens.append(suffix)
110
+ new_tokens.append(suffix)
111
+
112
+ return tuple(old_tokens), tuple(new_tokens)
113
+
114
+
115
+ class UrlReplacement(NamedTuple):
116
+ """
117
+ Information about a URL replacement in an RST file.
118
+
119
+ Attributes
120
+ ----------
121
+ line_num : int
122
+ Line number where the URL was found
123
+ matched_url : str
124
+ The URL that was matched in the text and replaced
125
+ context_old : OutputReplacementContext
126
+ The old context (before replacement) with tokenized (context_before_tokens, target_line_tokens, context_after_tokens)
127
+ context_new : OutputReplacementContext
128
+ The new context (after replacement) with tokenized (context_before_tokens, target_line_tokens, context_after_tokens)
129
+ inventory_url : Optional[str]
130
+ The inventory URL used for the lookup, or None
131
+ """
132
+
133
+ line_num: int
134
+ matched_url: str
135
+ context_old: OutputReplacementContext
136
+ context_new: OutputReplacementContext
137
+ inventory_url: Optional[str]
138
+
139
+
140
+ class ReplacementContext(NamedTuple):
141
+ """
142
+ Context for a replacement operation.
143
+
144
+ Attributes
145
+ ----------
146
+ context_before : str
147
+ The context line before the target line. Empty string if there is no context before.
148
+ target_line : str
149
+ The target line to be replaced.
150
+ context_after : str
151
+ The context line after the target line. Empty string if there is no context after.
152
+ """
153
+
154
+ context_before: str
155
+ target_line: str
156
+ context_after: str
157
+
158
+
159
+ def _normalize_replacement(
160
+ context_old: OutputReplacementContext,
161
+ context_new: OutputReplacementContext,
162
+ ) -> tuple[OutputReplacementContext, OutputReplacementContext]:
163
+ """Normalize token streams in both contexts."""
164
+ ctx_before_old, target_old, ctx_after_old = context_old
165
+ ctx_before_new, target_new, ctx_after_new = context_new
166
+
167
+ normalized_old = (
168
+ normalise_token_stream(ctx_before_old),
169
+ normalise_token_stream(target_old),
170
+ normalise_token_stream(ctx_after_old),
171
+ )
172
+ normalized_new = (
173
+ normalise_token_stream(ctx_before_new),
174
+ normalise_token_stream(target_new),
175
+ normalise_token_stream(ctx_after_new),
176
+ )
177
+ return normalized_old, normalized_new
178
+
179
+
180
+ def _make_replacement(
181
+ context_before: str,
182
+ context_after: str,
183
+ target_tokens_old: tuple[Token, ...],
184
+ target_tokens_new: tuple[Token, ...],
185
+ ) -> tuple[OutputReplacementContext, OutputReplacementContext]:
186
+ """Build normalized replacement contexts with unchanged context lines."""
187
+ ctx = (Unchanged(context_before),), (Unchanged(context_after),)
188
+ context_old: OutputReplacementContext = (ctx[0], target_tokens_old, ctx[1])
189
+ context_new: OutputReplacementContext = (ctx[0], target_tokens_new, ctx[1])
190
+ return _normalize_replacement(context_old, context_new)
191
+
192
+
193
+ # TODO:
194
+ # I'm pretty sure instead of having a before[Unchange|Removed] and after[Unchanged|added]
195
+ # we can have a single diff[Unchange|Removed|added] but I't more complicated to code.
196
+ # so if you think you can do it, please feel free.
197
+ # we can likely properly find the index of URL(s), backticks and everything,
198
+ # split and do the replacement on the token stream/
199
+ def _compute_full_link_replacement(
200
+ original_line: str,
201
+ context_before_str: str,
202
+ context_after_str: str,
203
+ lookup_result: ReverseLookupResult,
204
+ target: str,
205
+ ) -> Optional[tuple[OutputReplacementContext, OutputReplacementContext]]:
206
+ """
207
+ Handle full RST link replacement.
208
+
209
+ Handles cases like:
210
+ - `` `setuptools documentation <https://setuptools.pypa.io/en/latest/setuptools.html>`__ ``
211
+ - `` `link text <URL>`_ ``
212
+ - `` See `link text <URL>`__ for details ``
213
+
214
+ Returns None if the pattern doesn't match.
215
+ """
216
+ full_link_match = re.search(
217
+ r"`([^`<>]+)\s*<" + re.escape(lookup_result.url) + r"[.,;:!?)]*>`__?",
218
+ original_line,
219
+ )
220
+ if not full_link_match:
221
+ return None
222
+
223
+ link_text = full_link_match.group(1).strip()
224
+ original_text = full_link_match.group(0)
225
+ start_idx = full_link_match.start()
226
+ end_idx = full_link_match.end()
227
+ domain_prefix = f":{lookup_result.domain}:"
228
+
229
+ # Find where the URL is within the matched link text
230
+ url_match_in_link = re.search(
231
+ r"<" + re.escape(lookup_result.url) + r"[.,;:!?)]*>", original_text
232
+ )
233
+
234
+ old_tokens: list[Token] = []
235
+ new_tokens: list[Token] = []
236
+
237
+ # Add prefix (text before the link)
238
+ if start_idx > 0:
239
+ prefix = Unchanged(original_line[:start_idx])
240
+ old_tokens.append(prefix)
241
+ new_tokens.append(prefix)
242
+
243
+ if url_match_in_link:
244
+ # Fine-grained diff: show URL replacement within the link structure
245
+ url_start = url_match_in_link.start()
246
+ url_end = url_match_in_link.end()
247
+
248
+ space_before = (
249
+ " " if url_start > 0 and original_text[url_start - 1] == " " else ""
250
+ )
251
+
252
+ # Old: `link text <URL>`_ (show parts around URL as unchanged)
253
+ old_tokens.append(
254
+ Unchanged(original_text[: url_start + 1])
255
+ ) # up to and including <
256
+ old_tokens.append(
257
+ Removed(original_text[url_start + 1 : url_end - 1])
258
+ ) # URL only
259
+ old_tokens.append(Unchanged(original_text[url_end - 1 : url_end])) # >
260
+
261
+ # Handle trailing backtick and underscores
262
+ after_angle = original_text[url_end:]
263
+ if after_angle.startswith("`"):
264
+ old_tokens.append(Unchanged(after_angle[0])) # `
265
+ if len(after_angle) > 1:
266
+ old_tokens.append(Removed(after_angle[1:])) # underscores
267
+ else:
268
+ old_tokens.append(Removed(after_angle))
269
+
270
+ # New: :domain:`link text <target>`
271
+ new_tokens.append(Added(domain_prefix))
272
+ new_tokens.append(Unchanged("`" + link_text + space_before + "<"))
273
+ new_tokens.append(Added(target))
274
+ new_tokens.append(Unchanged(">`"))
275
+ else:
276
+ # Simple replacement: whole link becomes role reference
277
+ old_tokens.append(Removed(original_text))
278
+ new_tokens.append(Added(domain_prefix))
279
+ new_tokens.append(Unchanged("`" + link_text))
280
+ new_tokens.append(Added(f" <{target}>`"))
281
+
282
+ # Add suffix (text after the link)
283
+ if end_idx < len(original_line):
284
+ suffix = Unchanged(original_line[end_idx:])
285
+ old_tokens.append(suffix)
286
+ new_tokens.append(suffix)
287
+
288
+ return _make_replacement(
289
+ context_before_str, context_after_str, tuple(old_tokens), tuple(new_tokens)
290
+ )
291
+
292
+
293
+ def _compute_simple_link_replacement(
294
+ original_line: str,
295
+ context_before_str: str,
296
+ context_after_str: str,
297
+ lookup_result: ReverseLookupResult,
298
+ target: str,
299
+ rst_ref: str,
300
+ ) -> Optional[tuple[OutputReplacementContext, OutputReplacementContext]]:
301
+ """
302
+ Handle simple RST link replacement (may span multiple lines).
303
+
304
+ Handles cases like:
305
+ - `` `<https://docs.python.org/3/library/os.html>`_ ``
306
+ - `` `<https://docs.python.org/3/library/os.html>`__ ``
307
+ - Multi-line: context_before has `` `link text ``, target_line has `` <URL>`_ ``
308
+
309
+ Returns None if the pattern doesn't match.
310
+ """
311
+ simple_link_match = re.search(
312
+ r"`?<" + re.escape(lookup_result.url) + r"[.,;:!?)]*>`__?", original_line
313
+ )
314
+ if not simple_link_match:
315
+ return None
316
+
317
+ original_text = simple_link_match.group(0)
318
+ start_idx = simple_link_match.start()
319
+ end_idx = simple_link_match.end()
320
+
321
+ # Check if link text is on the previous line (multi-line case)
322
+ link_text_match = re.search(r"`([^`]+)$", context_before_str)
323
+ if link_text_match:
324
+ link_text = link_text_match.group(1).strip()
325
+
326
+ # Build context_before tokens (with changes)
327
+ ctx_old, ctx_new = _make_line_tokens(
328
+ context_before_str,
329
+ link_text_match.start(),
330
+ link_text_match.end(),
331
+ context_before_str[link_text_match.start() : link_text_match.end()],
332
+ f":{lookup_result.domain}:`{link_text}",
333
+ )
334
+
335
+ # Build target line tokens
336
+ target_old, target_new = _make_line_tokens(
337
+ original_line, start_idx, end_idx, original_text, f"<{target}>`"
338
+ )
339
+
340
+ ctx_after = (Unchanged(context_after_str),)
341
+ context_old: OutputReplacementContext = (ctx_old, target_old, ctx_after)
342
+ context_new: OutputReplacementContext = (ctx_new, target_new, ctx_after)
343
+ return _normalize_replacement(context_old, context_new)
344
+
345
+ # Simple case: just replace the link on the target line
346
+ old_tokens, new_tokens = _make_line_tokens(
347
+ original_line, start_idx, end_idx, original_text, rst_ref
348
+ )
349
+ return _make_replacement(
350
+ context_before_str, context_after_str, old_tokens, new_tokens
351
+ )
352
+
353
+
354
+ def _compute_url_replacement(
355
+ original_line: str,
356
+ context_before_str: str,
357
+ context_after_str: str,
358
+ lookup_result: ReverseLookupResult,
359
+ rst_ref: str,
360
+ ) -> tuple[OutputReplacementContext, OutputReplacementContext]:
361
+ """
362
+ Handle plain URL replacement in text.
363
+
364
+ Handles cases like:
365
+ - `` See https://docs.python.org/3/library/os.html for details ``
366
+ - `` Check https://docs.python.org/3/library/os.html. ``
367
+ - `` https://docs.python.org/3/library/os.html is the documentation ``
368
+
369
+ This is the fallback case when no RST link pattern matches.
370
+ """
371
+ url_match = re.search(re.escape(lookup_result.url) + r"[.,;:!?)]*", original_line)
372
+ if url_match:
373
+ old_tokens, new_tokens = _make_line_tokens(
374
+ original_line,
375
+ url_match.start(),
376
+ url_match.end(),
377
+ lookup_result.url,
378
+ rst_ref,
379
+ )
380
+ else:
381
+ old_tokens = (Removed(original_line),)
382
+ new_tokens = (Added(rst_ref),)
383
+
384
+ return _make_replacement(
385
+ context_before_str, context_after_str, old_tokens, new_tokens
386
+ )
387
+
388
+
389
+ def _compute_replacement(
390
+ original: ReplacementContext,
391
+ lookup_result: ReverseLookupResult,
392
+ ) -> tuple[OutputReplacementContext, OutputReplacementContext]:
393
+ """
394
+ Compute the replacement line(s) for a URL in an RST file.
395
+
396
+ Tries patterns in order: full RST link, simple link, plain URL.
397
+ Returns (context_old, context_new) tuple of normalized token streams.
398
+ """
399
+ target = f"{lookup_result.package}:{lookup_result.rst_entry}"
400
+ rst_ref = f":{lookup_result.domain}:`{target}`"
401
+ ctx_before, line, ctx_after = original
402
+
403
+ result = _compute_full_link_replacement(
404
+ line, ctx_before, ctx_after, lookup_result, target
405
+ )
406
+ if result is not None:
407
+ return result
408
+
409
+ result = _compute_simple_link_replacement(
410
+ line, ctx_before, ctx_after, lookup_result, target, rst_ref
411
+ )
412
+ if result is not None:
413
+ return result
414
+
415
+ return _compute_url_replacement(line, ctx_before, ctx_after, lookup_result, rst_ref)
416
+
417
+
418
+ def process_one_file(rst_file: Path):
419
+ """
420
+ Process a single RST file to find URLs that can be replaced with Sphinx references.
421
+
422
+ Yields UrlReplacement objects for each URL found that has a corresponding
423
+ inventory entry. Files are read, URLs are extracted and looked up, and
424
+ replacements are computed with token-based diffs.
425
+
426
+ Parameters
427
+ ----------
428
+ rst_file : Path
429
+ Path to the RST file to process
430
+
431
+ Yields
432
+ ------
433
+ UrlReplacement
434
+ Information about each URL replacement found in the file
435
+ """
436
+ url_pattern = re.compile(r'https?://[^\s<>"{}|\\^`\[\]]+')
437
+ url_locations: dict[str, list[tuple[int, str]]] = {}
438
+ all_lines = []
439
+
440
+ try:
441
+ with open(rst_file, "r", encoding="utf-8") as f:
442
+ all_lines = f.readlines()
443
+ for line_num, line in enumerate(all_lines, start=1):
444
+ urls = url_pattern.findall(line)
445
+ for url in urls:
446
+ url = url.rstrip(".,;:!?)")
447
+ url_locations.setdefault(url, []).append((line_num, line.rstrip()))
448
+ except Exception:
449
+ return
450
+
451
+ if not url_locations:
452
+ return
453
+
454
+ urls = list(url_locations.keys())
455
+ results = _do_reverse_lookup(urls)
456
+
457
+ replaceable = [
458
+ (result, url_locations[result.url])
459
+ for result in results
460
+ if result.package is not None
461
+ ]
462
+
463
+ if not replaceable:
464
+ return
465
+
466
+ for lookup_result, line_infos in replaceable:
467
+ for line_num, original_line in line_infos:
468
+ context_before = all_lines[line_num - 2].rstrip() if line_num > 1 else ""
469
+ context_after = (
470
+ all_lines[line_num].rstrip() if line_num < len(all_lines) else ""
471
+ )
472
+
473
+ if lookup_result.rst_entry is not None:
474
+ context_old, context_new = _compute_replacement(
475
+ ReplacementContext(context_before, original_line, context_after),
476
+ lookup_result,
477
+ )
478
+ yield UrlReplacement(
479
+ line_num,
480
+ lookup_result.url,
481
+ context_old,
482
+ context_new,
483
+ lookup_result.inventory_url,
484
+ )
485
+ else:
486
+ # No replacement available - create empty contexts
487
+ empty_context: OutputReplacementContext = (
488
+ (Unchanged(context_before),),
489
+ (Unchanged(original_line),),
490
+ (Unchanged(context_after),),
491
+ )
492
+ yield UrlReplacement(
493
+ line_num,
494
+ lookup_result.url,
495
+ empty_context,
496
+ empty_context,
497
+ lookup_result.inventory_url,
498
+ )
499
+
500
+
501
+ def format_tokens(
502
+ tokens: Tuple[Token, ...],
503
+ prefix: str = "",
504
+ defaultFG: str = "",
505
+ AddedHighlight: str = "",
506
+ RemovedHighlight: str = "",
507
+ ) -> str:
508
+ """
509
+ Format tokens with appropriate colors.
510
+
511
+ Parameters
512
+ ----------
513
+ tokens : Tuple[Token, ...]
514
+ Sequence of tokens to format
515
+ prefix : str
516
+ Prefix to add before the tokens (e.g., " - " or " ")
517
+ defaultFG : str
518
+ Default foreground color for the line (e.g., RED, GREEN, BLUE). Empty string for no color.
519
+ AddedHighlight : str
520
+ Highlight style for Added tokens (e.g., GREEN_BG for background). Empty string for no highlight.
521
+ RemovedHighlight : str
522
+ Highlight style for Removed tokens (e.g., RED_BG for background). Empty string for no highlight.
523
+
524
+ Returns
525
+ -------
526
+ str
527
+ Formatted string with ANSI color codes
528
+ """
529
+ if not tokens:
530
+ return ""
531
+
532
+ output = ""
533
+ # Add prefix with defaultFG color
534
+ output += defaultFG + prefix + RESET + defaultFG
535
+
536
+ for token in tokens:
537
+ if isinstance(token, Added):
538
+ output += f"{AddedHighlight}{str(token)}{RESET}{defaultFG}"
539
+ elif isinstance(token, Removed):
540
+ output += f"{RemovedHighlight}{str(token)}{RESET}{defaultFG}"
541
+ else: # Unchanged
542
+ output += str(token)
543
+
544
+ output += RESET
545
+
546
+ return output
547
+
548
+
549
+ def rev_search(directory: str) -> None:
550
+ """
551
+ Search for URLs in .rst files that can be replaced with Sphinx references.
552
+
553
+ Parameters
554
+ ----------
555
+ directory : str
556
+ Path to a directory to search for .rst files, or a single .rst file path
557
+ """
558
+ if not _are_dependencies_available():
559
+ return
560
+
561
+ directory_path = Path(directory)
562
+ if directory_path.is_file():
563
+ rst_files: Iterable[Path] = (
564
+ [directory_path] if directory_path.suffix == ".rst" else []
565
+ )
566
+ else:
567
+ rst_files = directory_path.rglob("*.rst")
568
+
569
+ for rst_file in rst_files:
570
+ search_one_file(rst_file)
571
+
572
+
573
+ def search_one_file(rst_file: Path) -> None:
574
+ """
575
+ Search a single RST file and print formatted diffs for replaceable URLs.
576
+
577
+ Processes the file to find replaceable URLs and prints a formatted diff
578
+ showing the original and replacement text with color-coded token highlights.
579
+
580
+ Parameters
581
+ ----------
582
+ rst_file : Path
583
+ Path to the RST file to search and display results for
584
+ """
585
+ display_path = _compress_user_path(str(rst_file))
586
+ for replacement in process_one_file(rst_file):
587
+ print(f"{CYAN}{display_path}:{replacement.line_num}{RESET}")
588
+
589
+ if replacement.context_old == replacement.context_new:
590
+ ctx_before_tokens_old, target_tokens_old, ctx_after_tokens_old = (
591
+ replacement.context_old
592
+ )
593
+
594
+ if ctx_before_tokens_old:
595
+ print(format_tokens(ctx_before_tokens_old, " "))
596
+
597
+ print(format_tokens(target_tokens_old, " ? ", defaultFG=BLUE))
598
+
599
+ if ctx_after_tokens_old:
600
+ print(format_tokens(ctx_after_tokens_old, " "))
601
+
602
+ print()
603
+ continue
604
+
605
+ ctx_before_tokens_old, target_tokens_old, ctx_after_tokens_old = (
606
+ replacement.context_old
607
+ )
608
+ ctx_before_tokens_new, target_tokens_new, ctx_after_tokens_new = (
609
+ replacement.context_new
610
+ )
611
+
612
+ if ctx_before_tokens_old or ctx_before_tokens_new:
613
+ if ctx_before_tokens_old != ctx_before_tokens_new:
614
+ print(
615
+ format_tokens(
616
+ ctx_before_tokens_old,
617
+ " - ",
618
+ defaultFG=RED,
619
+ RemovedHighlight=RED_BG,
620
+ )
621
+ )
622
+ else:
623
+ print(format_tokens(ctx_before_tokens_old, " "))
624
+ print(
625
+ format_tokens(
626
+ target_tokens_old, " - ", defaultFG=RED, RemovedHighlight=RED_BG
627
+ )
628
+ )
629
+
630
+ if replacement.inventory_url:
631
+ old_text = "".join(
632
+ str(token)
633
+ for token in target_tokens_old
634
+ if not isinstance(token, Added)
635
+ )
636
+ if replacement.inventory_url not in old_text:
637
+ https_pos = old_text.find("https://")
638
+ if https_pos >= 0:
639
+ spaces = " " * (7 + https_pos)
640
+ else:
641
+ spaces = " "
642
+
643
+ matched_url = replacement.matched_url
644
+ inventory_url = replacement.inventory_url
645
+
646
+ prefix_len = 0
647
+ while (
648
+ prefix_len < len(matched_url)
649
+ and prefix_len < len(inventory_url)
650
+ and matched_url[prefix_len] == inventory_url[prefix_len]
651
+ ):
652
+ prefix_len += 1
653
+
654
+ suffix_len = 0
655
+ while (
656
+ suffix_len < len(matched_url) - prefix_len
657
+ and suffix_len < len(inventory_url) - prefix_len
658
+ and matched_url[-(suffix_len + 1)]
659
+ == inventory_url[-(suffix_len + 1)]
660
+ ):
661
+ suffix_len += 1
662
+
663
+ if prefix_len > 0 or suffix_len > 0:
664
+ prefix = inventory_url[:prefix_len]
665
+ middle = (
666
+ inventory_url[prefix_len : len(inventory_url) - suffix_len]
667
+ if suffix_len > 0
668
+ else inventory_url[prefix_len:]
669
+ )
670
+ suffix = inventory_url[-suffix_len:] if suffix_len > 0 else ""
671
+ highlighted_url = f"{YELLOW}{prefix}{YELLOW_BG}{middle}{RESET}{YELLOW}{suffix}{RESET}"
672
+ else:
673
+ highlighted_url = f"{YELLOW_BG}{inventory_url}{RESET}"
674
+
675
+ print(f"{spaces}{highlighted_url}")
676
+
677
+ if ctx_before_tokens_old or ctx_before_tokens_new:
678
+ if ctx_before_tokens_old != ctx_before_tokens_new:
679
+ print(
680
+ format_tokens(
681
+ ctx_before_tokens_new,
682
+ " + ",
683
+ defaultFG=GREEN,
684
+ AddedHighlight=GREEN_BG,
685
+ RemovedHighlight=RED_BG,
686
+ )
687
+ )
688
+ print(
689
+ format_tokens(
690
+ target_tokens_new,
691
+ " + ",
692
+ defaultFG=GREEN,
693
+ AddedHighlight=GREEN_BG,
694
+ RemovedHighlight=RED_BG,
695
+ )
696
+ )
697
+
698
+ if ctx_after_tokens_old or ctx_after_tokens_new:
699
+ if ctx_after_tokens_old != ctx_after_tokens_new:
700
+ print(
701
+ format_tokens(
702
+ ctx_after_tokens_old,
703
+ " - ",
704
+ defaultFG=RED,
705
+ RemovedHighlight=RED_BG,
706
+ )
707
+ )
708
+ print(
709
+ format_tokens(
710
+ ctx_after_tokens_new,
711
+ " + ",
712
+ defaultFG=GREEN,
713
+ AddedHighlight=GREEN_BG,
714
+ RemovedHighlight=RED_BG,
715
+ )
716
+ )
717
+ else:
718
+ print(format_tokens(ctx_after_tokens_old, " "))
719
+
720
+ print()