pyDiffTools 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,188 @@
1
+ """Rearrange TeX files according to a plan file."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import argparse
6
+ import pathlib
7
+ import re
8
+ import sys
9
+ from typing import List, Sequence
10
+
11
+
12
+ S_CMD = re.compile(
13
+ r"""^s
14
+ /((?:\\.|[^/])*) # pattern
15
+ /((?:\\.|[^/])*) # replacement
16
+ /([gi]{0,2})$ # flags
17
+ """,
18
+ re.VERBOSE,
19
+ )
20
+
21
+
22
+ def unescape_slashes(s: str) -> str:
23
+ return s.replace(r"\/", "/")
24
+
25
+
26
+ def apply_s_cmd(line: str, cmd: str):
27
+ m = S_CMD.match(cmd)
28
+ if not m:
29
+ raise ValueError(f"Bad s/// command: {cmd!r}")
30
+ pat_raw, rep_raw, flags = m.groups()
31
+ pat = unescape_slashes(pat_raw)
32
+ rep = unescape_slashes(rep_raw)
33
+ reflags = re.IGNORECASE if "i" in flags else 0
34
+ count = 0 if "g" in flags else 1
35
+ # Use re.subn so callers can tell whether the substitution matched any text.
36
+ result, replaced = re.subn(pat, rep, line, count=count, flags=reflags)
37
+ return result, replaced
38
+
39
+
40
+ def parse_plan_line(s: str):
41
+ s = s.rstrip("\n")
42
+ if not s.strip():
43
+ return ("comment", "")
44
+ stripped = s.lstrip()
45
+ if stripped.startswith("#"):
46
+ return ("comment", stripped[1:].strip())
47
+ line = stripped
48
+ idx = 0
49
+ while idx < len(line) and not line[idx].isspace():
50
+ idx += 1
51
+ line_token = line[:idx]
52
+ # Allow plans to specify either a single source line or an inclusive range like "10-20".
53
+ if "-" in line_token:
54
+ range_parts = line_token.split("-")
55
+ if len(range_parts) != 2:
56
+ raise ValueError(f"Bad line range in plan: {line_token!r}")
57
+ start = int(range_parts[0])
58
+ end = int(range_parts[1])
59
+ if end < start:
60
+ raise ValueError(f"Line range out of order: {line_token!r}")
61
+ line_numbers = list(range(start, end + 1))
62
+ else:
63
+ line_numbers = [int(line_token)]
64
+ scratch = False
65
+ s_cmds = []
66
+ pos = idx
67
+ # Walk the remainder of the line manually so spaces inside s/// survive tokenization.
68
+ while pos < len(line):
69
+ while pos < len(line) and line[pos].isspace():
70
+ pos += 1
71
+ if pos >= len(line):
72
+ break
73
+ if line[pos : pos + 7].lower() == "scratch" and (
74
+ pos + 7 == len(line) or line[pos + 7].isspace()
75
+ ):
76
+ scratch = True
77
+ pos += 7
78
+ continue
79
+ if line.startswith("s/", pos):
80
+ start = pos
81
+ pos += 2
82
+ slash_count = 0
83
+ while pos < len(line):
84
+ ch = line[pos]
85
+ if ch == "\\":
86
+ pos += 2
87
+ continue
88
+ if ch == "/":
89
+ slash_count += 1
90
+ pos += 1
91
+ if slash_count == 2:
92
+ while pos < len(line) and line[pos] in "gi":
93
+ pos += 1
94
+ cmd = line[start:pos]
95
+ if not S_CMD.match(cmd):
96
+ raise ValueError(f"Bad token in plan: {cmd!r}")
97
+ s_cmds.append(cmd)
98
+ break
99
+ continue
100
+ pos += 1
101
+ else:
102
+ raise ValueError(f"Unterminated s/// command in plan: {line!r}")
103
+ continue
104
+ end = pos
105
+ while end < len(line) and not line[end].isspace():
106
+ end += 1
107
+ bad = line[pos:end]
108
+ raise ValueError(f"Bad token in plan: {bad!r}")
109
+ return ("directive", line_numbers, scratch, s_cmds)
110
+
111
+
112
+ def run(argv: Sequence[str] | None = None) -> None:
113
+ parser = argparse.ArgumentParser(
114
+ prog="pydifftools rearrange", description="Rearrange TeX file lines"
115
+ )
116
+ parser.add_argument(
117
+ "tex_path", type=pathlib.Path, help="TeX file (modified in place)"
118
+ )
119
+ parser.add_argument(
120
+ "plan_path", type=pathlib.Path, help="Rearrangement plan file (.rrng)"
121
+ )
122
+ args = parser.parse_args(argv)
123
+
124
+ tex_lines = args.tex_path.read_text(encoding="utf-8").splitlines(keepends=False)
125
+ n = len(tex_lines)
126
+
127
+ items = []
128
+ used: List[int] = []
129
+ with args.plan_path.open("r", encoding="utf-8") as f:
130
+ for raw in f:
131
+ kind, *rest = parse_plan_line(raw)
132
+ if kind == "comment":
133
+ items.append(("comment", rest[0]))
134
+ else:
135
+ line_numbers, scratch, s_cmds = rest
136
+ for ln in line_numbers:
137
+ if not (1 <= ln <= n):
138
+ raise ValueError(f"Line number {ln} out of range 1..{n}")
139
+ items.append(("directive", line_numbers, scratch, s_cmds))
140
+ used.extend(line_numbers)
141
+
142
+ missing = sorted(set(range(1, n + 1)) - set(used))
143
+ dupes = sorted([x for x in set(used) if used.count(x) > 1])
144
+ if missing:
145
+ sys.exit(f"ERROR: Plan missing lines: {missing}")
146
+ if dupes:
147
+ sys.exit(f"ERROR: Plan duplicated lines: {dupes}")
148
+
149
+ out_main: List[str] = []
150
+ out_scratch: List[str] = []
151
+ for it in items:
152
+ if it[0] == "comment":
153
+ out_main.append("% " + it[1])
154
+ continue
155
+ _, line_numbers, scratch, s_cmds = it
156
+ # Track how many replacements each substitution performs across the
157
+ # referenced lines so we can surface errors when a pattern never
158
+ # matches. This mirrors the Perl s/// behavior the tool emulates.
159
+ replaced_counts = [0] * len(s_cmds)
160
+ for ln in line_numbers:
161
+ mod = tex_lines[ln - 1]
162
+ for idx, cmd in enumerate(s_cmds):
163
+ mod, replaced = apply_s_cmd(mod, cmd)
164
+ replaced_counts[idx] += replaced
165
+ if scratch:
166
+ out_scratch.append("% " + mod)
167
+ else:
168
+ out_main.append(mod)
169
+ for idx, replaced in enumerate(replaced_counts):
170
+ if replaced == 0:
171
+ raise ValueError(
172
+ f"Pattern {s_cmds[idx]!r} not found in lines {line_numbers}"
173
+ )
174
+
175
+ if out_scratch:
176
+ out_main.append("% --- SCRATCH ---")
177
+ out_main.extend(out_scratch)
178
+
179
+ with args.tex_path.open("w", encoding="utf-8") as f:
180
+ f.write("\n".join(out_main) + "\n")
181
+
182
+
183
+ def main() -> None:
184
+ run()
185
+
186
+
187
+ if __name__ == "__main__":
188
+ main()
@@ -0,0 +1,80 @@
1
+ import re
2
+ import os
3
+ from pathlib import Path
4
+
5
+
6
+ def replace_acros(pathtofile):
7
+ clean_unused = False
8
+ if pathtofile.parent.resolve() == Path.cwd():
9
+ clean_unused = True
10
+ print(
11
+ "I'm going to clean out the unused acronyms, since myacronyms.sty lives in the current directory"
12
+ )
13
+ else:
14
+ print(pathtofile.parent, "not equal to", Path.cwd())
15
+ acro_restr = r"\\newacronym(?:\[[^\[\]]*\])?{(\w+)}{(\w+)}{.*}"
16
+ acro_re = re.compile(acro_restr)
17
+ regex_replacements = []
18
+ with open(pathtofile, "r", encoding="utf-8") as fp:
19
+ for line in fp:
20
+ m = acro_re.match(line)
21
+ if m:
22
+ inside, toreplace = m.groups()
23
+ regex_replacements.append(
24
+ (r"\b" + toreplace + r"\b", r"\\gls{" + inside + "}")
25
+ )
26
+ regex_replacements.append(
27
+ (r"\b" + toreplace + r"s\b", r"\\glspl{" + inside + "}")
28
+ )
29
+
30
+ def replace_in_files(regex_replacements, exclude=[pathtofile, "ms.tex"]):
31
+ """
32
+ This function will replace all occurences of specified regex patterns with their corresponding replacements in all .tex files in the current directory except for the file specified in 'exclude'.
33
+ It returns a list of unused regex patterns.
34
+ """
35
+ # Get the current working directory
36
+ directory = os.getcwd()
37
+
38
+ unused_patterns = set(regex for regex, _ in regex_replacements)
39
+
40
+ # Loop over all files in the directory
41
+ for filename in os.listdir(directory):
42
+ # Only operate on .tex files and exclude the specified file
43
+ if filename.endswith(".tex") and filename not in exclude:
44
+ filepath = os.path.join(directory, filename)
45
+ with open(filepath, "r", encoding="utf-8") as file:
46
+ filedata = file.read()
47
+
48
+ # Perform the replacements
49
+ for regex, replacement in regex_replacements:
50
+ if re.search(regex, filedata):
51
+ filedata = re.sub(regex, replacement, filedata)
52
+ unused_patterns.discard(regex)
53
+ elif re.search(replacement, filedata):
54
+ unused_patterns.discard(regex)
55
+ elif re.search(
56
+ replacement.replace("gls", "Gls"), filedata
57
+ ):
58
+ unused_patterns.discard(regex)
59
+ filedata = re.sub(r"(\.\s*\n\s*)\\gls", r"\1\\Gls", filedata)
60
+ # Write the file out again
61
+ with open(filepath, "w", encoding="utf-8") as file:
62
+ file.write(filedata)
63
+ return list(unused_patterns)
64
+
65
+ unused = replace_in_files(regex_replacements)
66
+ print("unused acronyms:", unused)
67
+ if clean_unused:
68
+ # {{{ get rid of unused
69
+ with open(pathtofile, "r", encoding="utf-8") as fp:
70
+ filedata = fp.read()
71
+ for regex in unused:
72
+ fullreg = r"\\newacronym{(\w+)}{" + regex + "}{.*}"
73
+ if re.search(fullreg, filedata):
74
+ filedata = re.sub(fullreg, "", filedata)
75
+ else:
76
+ raise ValueError("couldn't find", fullreg)
77
+ filedata = re.sub("\n+", "\n", filedata)
78
+ with open(pathtofile, "w", encoding="utf-8") as file:
79
+ file.write(filedata)
80
+ # }}}
@@ -0,0 +1,73 @@
1
+ import re
2
+ from .comment_functions import (
3
+ generate_alphabetnumber,
4
+ matchingbrackets,
5
+ comment_definition,
6
+ )
7
+
8
+
9
+ def tex_sepcomments(texfile):
10
+ if texfile[-4:] == ".tex":
11
+ base_filename = texfile[:-4]
12
+ print("yes, a tex file called", base_filename, ".tex")
13
+ else:
14
+ raise RuntimeError("not a tex file??")
15
+ with open(base_filename + ".tex", "r", encoding="utf-8") as fp:
16
+ content = fp.read()
17
+ comment_string = "%%NUMBER OF COMMENTS"
18
+ a = content.find(comment_string)
19
+ if a > 0:
20
+ b = content.find("\n", a + len(comment_string))
21
+ num_matches = int(content[a + len(comment_string) : b])
22
+ print("found %d comments already!" % num_matches)
23
+ else:
24
+ num_matches = 0
25
+ content = content.replace(
26
+ r"\begin{document}",
27
+ "\\include{%s_comments}\n\\begin{document}" % base_filename,
28
+ )
29
+ comment_collection = ""
30
+ names = ["pdfcommentAG", "pdfcommentAB", "pdfcommentJF", "pdfcommentG"]
31
+ name_list = "(" + "|".join(names) + ")"
32
+ comment_re = re.compile(r"\\%s([\[\{])" % (name_list))
33
+ thismatch = comment_re.search(
34
+ content
35
+ ) # match doesn't work with newlines, apparently
36
+ while thismatch:
37
+ before = content[: thismatch.start()]
38
+ thisname, bracket_type = thismatch.groups()
39
+ a, b = matchingbrackets(content, thismatch.start(), bracket_type)
40
+ if bracket_type == "[":
41
+ highlight = content[a + 1 : b]
42
+ a, b = matchingbrackets(content, b, "{")
43
+ print("found comment:", content[a : b + 1])
44
+ comment = content[a + 1 : b]
45
+ endpoint = b
46
+ else:
47
+ highlight = ""
48
+ comment = content[a + 1 : b]
49
+ endpoint = b
50
+ after = content[endpoint + 1 :]
51
+ # replace and search again
52
+ print("type of num_matches", num_matches, type(num_matches))
53
+ envstring = thisname + generate_alphabetnumber(num_matches)
54
+ print("%s--------------------" % envstring)
55
+ print("highlight:\n", highlight)
56
+ print("comment:\n", comment)
57
+ print("--------------------")
58
+ print("before replace:\n", content[thismatch.start() : endpoint])
59
+ content = before + r"\%s" % envstring + "{" + highlight + "}" + after
60
+ print("--------------------")
61
+ print("after replace:\n", content[thismatch.start() : endpoint])
62
+ print("--------------------")
63
+ comment_collection += comment_definition(envstring, thisname, comment)
64
+ thismatch = comment_re.search(content)
65
+ num_matches += 1
66
+ with open(base_filename + ".tex", "w", encoding="utf-8") as fp:
67
+ comment_string = "%%%%NUMBER OF COMMENTS %d\n" % num_matches
68
+ content = content.replace(
69
+ r"\begin{document}", comment_string + "\\begin{document}"
70
+ )
71
+ fp.write(content)
72
+ with open(base_filename + "_comments.tex", "w", encoding="utf-8") as fp:
73
+ fp.write(comment_collection)
@@ -0,0 +1,213 @@
1
+ # again rerun
2
+ import os
3
+ import re
4
+
5
+
6
+ def run(arguments):
7
+ needsspace_re = re.compile(r'(\w[):;"-\.,!}]*) +([^%}~])')
8
+ allmarkers_re = re.compile(
9
+ r"(.*?)%(\[ORIG|ORIG\]\[NEW|NEW\])%(.*\n*)"
10
+ ) # in all these, the newline at the end is just so it doesn't gobble up the newline
11
+
12
+ def parse_line(
13
+ orig_log_text, new_log_text, log_in_orig, log_in_new, text_to_parse
14
+ ):
15
+ match = allmarkers_re.match(text_to_parse)
16
+ if match:
17
+ textone, typeofmarker, texttwo = match.groups()
18
+ if typeofmarker == "NEW]":
19
+ print(
20
+ "found a matching line, current status is (",
21
+ log_in_orig,
22
+ ",",
23
+ log_in_new,
24
+ ")",
25
+ )
26
+ if log_in_new and not log_in_orig:
27
+ switchto = (True, True) # log in orig, log in new
28
+ print(
29
+ "in text:\n",
30
+ text_to_parse,
31
+ "\n--> encountered an end marker, switching to",
32
+ switchto,
33
+ )
34
+ else:
35
+ raise ValueError(
36
+ "I encountered an %NEW]% marker, but I was not leaving orig along and logging only in new (False,True), but rather "
37
+ + repr(log_in_orig)
38
+ + ","
39
+ + repr(log_in_new)
40
+ + ":\n"
41
+ + text_to_parse
42
+ )
43
+ elif typeofmarker == "ORIG][NEW":
44
+ print(
45
+ "found a matching line, current status is (",
46
+ log_in_orig,
47
+ ",",
48
+ log_in_new,
49
+ ")",
50
+ )
51
+ if log_in_orig and not log_in_new:
52
+ switchto = (False, True) # log in orig, log in new
53
+ print(
54
+ "in text:\n",
55
+ text_to_parse,
56
+ "\n--> encountered a middle marker, switching to",
57
+ switchto,
58
+ )
59
+ else:
60
+ raise ValueError(
61
+ "I encountered an %ORIG][NEW% marker, but I was not logging in orig but not in new, but rather "
62
+ + repr(log_in_orig)
63
+ + ","
64
+ + repr(log_in_new),
65
+ ":\n",
66
+ text_to_parse,
67
+ )
68
+ elif typeofmarker == "[ORIG":
69
+ print(
70
+ "found a matching line, current status is (",
71
+ log_in_orig,
72
+ ",",
73
+ log_in_new,
74
+ ")",
75
+ )
76
+ if log_in_new and log_in_orig:
77
+ switchto = (True, False) # log in orig, log in new
78
+ print(
79
+ "in text:\n",
80
+ text_to_parse,
81
+ "\n--> encountered an %[ORIG% marker, switching to",
82
+ switchto,
83
+ )
84
+ else:
85
+ raise ValueError(
86
+ "I encountered an %[ORIG% marker, but I was not logging in both orig and new, but rather"
87
+ + repr(log_in_orig)
88
+ + ","
89
+ + repr(log_in_new)
90
+ + ":\n"
91
+ + text_to_parse
92
+ )
93
+ else:
94
+ textone = text_to_parse
95
+ texttwo = None
96
+ # }}} check to see if I have a separator
97
+ # regardless, dump the first group into the current bin
98
+ if log_in_orig:
99
+ orig_log_text += textone
100
+ if log_in_new:
101
+ new_log_text += textone
102
+ if match:
103
+ log_in_orig, log_in_new = switchto
104
+ print("yes, I am actually switching the binning")
105
+ print("so that status is (", log_in_orig, ",", log_in_new, ")")
106
+ # if there is a second group (if I have a separator), change which bin I'm in, and add to the end of the current line!
107
+ if texttwo is not None:
108
+ orig_log_text, new_log_text, log_in_orig, log_in_new = parse_line(
109
+ orig_log_text, new_log_text, log_in_orig, log_in_new, texttwo
110
+ )
111
+ return orig_log_text, new_log_text, log_in_orig, log_in_new
112
+
113
+ fp = open(arguments[0], "r")
114
+ text_list = []
115
+ print("opened", arguments[0])
116
+ log_in_orig = True
117
+ log_in_new = True
118
+ head_title = None
119
+ new_title = None
120
+ # {{{ pull out just the part between the document text
121
+ orig_textlist = []
122
+ new_textlist = []
123
+ j = 0
124
+ for thisline in fp:
125
+ if j == 0:
126
+ if thisline[:12] == "%ONEWORDDIFF":
127
+ print("found %ONEWORDDIFF marker, title is:")
128
+ head_title = "HEAD\n"
129
+ new_title = thisline[14:]
130
+ print(new_title)
131
+ this_is_a_onewordfile = True
132
+ else:
133
+ this_is_a_onewordfile = False
134
+ if this_is_a_onewordfile:
135
+ print("I found this to be a oneword format file")
136
+ else:
137
+ print("I did not find this to be a oneword format file")
138
+ if this_is_a_onewordfile: # this is only stored if it's a onewordfile
139
+ # new processing for oneworddiff
140
+ # {{{ check to see if I have a separator, and set switchto, to show where I switch
141
+ orig_log_text, new_log_text, log_in_orig, log_in_new = parse_line(
142
+ "", "", log_in_orig, log_in_new, thisline
143
+ )
144
+ if len(orig_log_text) > 0:
145
+ orig_textlist.append(orig_log_text)
146
+ if len(new_log_text) > 0:
147
+ new_textlist.append(new_log_text)
148
+ else:
149
+ # standard processing
150
+ if (
151
+ thisline[-11:] == "%FIRSTSET%\n"
152
+ ): # if the first set, treat like it's not a comment
153
+ if log_in_orig:
154
+ orig_textlist.append(thisline)
155
+ if log_in_new:
156
+ new_textlist.append(thisline)
157
+ else:
158
+ if thisline[:7] == "<<<<<<<":
159
+ log_in_orig = True
160
+ log_in_new = False
161
+ if (
162
+ head_title is None
163
+ ): # for the first marker, store the title
164
+ head_title = thisline[7:]
165
+ elif thisline[7:] == head_title:
166
+ pass
167
+ else:
168
+ raise ValueError(
169
+ "I don't understand line %d, which seems to give an inconsistent head title. It gave:\n%s\nvs expected:\n%s"
170
+ % (j, thisline[7:], head_title)
171
+ )
172
+ elif thisline[:7] == ">>>>>>>":
173
+ log_in_orig = True
174
+ log_in_new = True
175
+ if (
176
+ new_title is None
177
+ ): # for the first marker, store the title
178
+ new_title = thisline[7:]
179
+ elif thisline[7:] == new_title:
180
+ pass
181
+ else:
182
+ raise ValueError(
183
+ "I don't understand line %d, which seems to give an inconsistent new title. It gave:\n%s\nvs expected:\n%s"
184
+ % (j, thisline[7:], new_title)
185
+ )
186
+ elif thisline[:7] == "=======":
187
+ log_in_orig = False
188
+ log_in_new = True
189
+ else:
190
+ if log_in_orig:
191
+ orig_textlist.append(thisline)
192
+ if log_in_new:
193
+ new_textlist.append(thisline)
194
+ j += 1
195
+ if this_is_a_onewordfile:
196
+ print("I found this to be a oneword format file")
197
+ else:
198
+ print("I did not find this to be a oneword format file")
199
+ fp.close()
200
+ # {{{ write out the result
201
+ newfile = re.sub(r"(.*)", r"\1.merge_new", arguments[0])
202
+ fp = open(newfile, "w")
203
+ new_textlist = [
204
+ "#%%%%%BRANCH TITLE (This side is saved): " + new_title
205
+ ] + new_textlist
206
+ fp.write("".join(new_textlist))
207
+ fp.close()
208
+ newfile = re.sub(r"(.*)", r"\1.merge_head", arguments[0])
209
+ fp = open(newfile, "w")
210
+ orig_textlist = ["#%%%%%BRANCH TITLE: " + head_title] + orig_textlist
211
+ fp.write("".join(orig_textlist))
212
+ fp.close()
213
+ # }}}
@@ -0,0 +1,69 @@
1
+ import re
2
+ from .comment_functions import matchingbrackets
3
+
4
+
5
+ def tex_unsepcomments(texfile):
6
+ if texfile[-12:] == "_sepcomm.tex":
7
+ base_filename = texfile[:-12]
8
+ print("yes, a _sepcomm.tex file called", base_filename, ".tex")
9
+ elif texfile[-4:] == ".tex":
10
+ base_filename = texfile[:-4]
11
+ print("yes, a .tex file called", base_filename, ".tex")
12
+ else:
13
+ raise RuntimeError("not a tex file??")
14
+ with open(base_filename + "_comments.tex", "r", encoding="utf-8") as fp:
15
+ content = fp.read()
16
+ # comment_def_re = re.compile(r"\\newcommand\{\%s[A-Z]+")
17
+ names = ["pdfcommentAG", "pdfcommentAB", "pdfcommentJF", "pdfcommentG"]
18
+ list_of_names = []
19
+ list_of_commands = []
20
+ list_of_content = []
21
+ for j in range(0, len(names)):
22
+ comment_def_re = re.compile(
23
+ r"\\newcommand\{\\(%s[a-z]+)\}\{" % (names[j])
24
+ )
25
+ for m in comment_def_re.finditer(content):
26
+ print("found %d:%d" % (m.start(), m.end()), m.groups()[0])
27
+ print("text:", content[m.start() : m.end()])
28
+ a, b = matchingbrackets(content, m.end() - 1, "{")
29
+ print("found from %d to %d" % (a, b))
30
+ print("-----content------")
31
+ print(content[a : b + 1])
32
+ print("------------------")
33
+ list_of_names.append(names[j])
34
+ list_of_commands.append(m.groups()[0])
35
+ list_of_content.append(content[a + 1 : b])
36
+ with open(texfile, "r", encoding="utf-8") as fp:
37
+ content = fp.read()
38
+ for j in range(0, len(list_of_names)):
39
+ a = content.find("\\%s" % list_of_commands[j])
40
+ if a < 0:
41
+ raise RuntimeError(
42
+ "couldn't find command \\%s" % list_of_commands[j]
43
+ )
44
+ else:
45
+ starthighlight, b = matchingbrackets(content, a, "{")
46
+ highlight = content[starthighlight + 1 : b]
47
+ print(
48
+ "found command \\%s with highlight {%s} and going to add"
49
+ " content {%s}"
50
+ % (list_of_commands[j], highlight, list_of_content[j])
51
+ )
52
+ if len(highlight) > 0:
53
+ content = (
54
+ content[:a]
55
+ + "\\%s[%s]{%s}"
56
+ % (list_of_names[j], highlight, list_of_content[j])
57
+ + content[b + 1 :]
58
+ )
59
+ else:
60
+ content = (
61
+ content[:a]
62
+ + "\\%s{%s}" % (list_of_names[j], list_of_content[j])
63
+ + content[b + 1 :]
64
+ )
65
+ content = re.sub("\\\\include{%s_comments}\n" % base_filename, "", content)
66
+ content = re.sub("%%NUMBER OF COMMENTS [0-9]+ *\n", "", content)
67
+ with open(base_filename + ".tex", "w", encoding="utf-8") as fp:
68
+ fp.write(content)
69
+ print("wrote output to", base_filename + ".tex")
@@ -0,0 +1,31 @@
1
+ import importlib.metadata
2
+ import json
3
+ import urllib.request
4
+ import urllib.error
5
+
6
+
7
+ # Return the installed version, the latest version, and whether an update exists.
8
+ # Network errors and malformed responses are ignored so this never blocks the CLI
9
+ # when the network is down.
10
+ def check_update(package_name, timeout=1):
11
+ current_version = None
12
+ try:
13
+ current_version = importlib.metadata.version(package_name)
14
+ except importlib.metadata.PackageNotFoundError:
15
+ return None, None, False
16
+
17
+ url = f"https://pypi.org/pypi/{package_name}/json"
18
+ try:
19
+ with urllib.request.urlopen(url, timeout=timeout) as response:
20
+ data = json.load(response)
21
+ except (urllib.error.URLError, TimeoutError, OSError, ValueError):
22
+ return current_version, None, False
23
+
24
+ if "info" not in data or "version" not in data["info"]:
25
+ return current_version, None, False
26
+
27
+ return (
28
+ current_version,
29
+ data["info"]["version"],
30
+ current_version != data["info"]["version"],
31
+ )