chgksuite 0.25.1__py3-none-any.whl → 0.26.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@ from chgksuite.composer.composer_common import BaseExporter, backtick_replace, p
9
9
  from pptx import Presentation
10
10
  from pptx.dml.color import RGBColor
11
11
  from pptx.enum.text import MSO_AUTO_SIZE, MSO_VERTICAL_ANCHOR, PP_ALIGN
12
+ from pptx.enum.lang import MSO_LANGUAGE_ID
12
13
  from pptx.util import Inches as PptxInches
13
14
  from pptx.util import Pt as PptxPt
14
15
 
@@ -55,6 +56,8 @@ class PptxExporter(BaseExporter):
55
56
  color = self.c["textbox"].get("color")
56
57
  if color:
57
58
  r.font.color.rgb = RGBColor(*color)
59
+ if self.args.language == "ru":
60
+ r.font.language_id = MSO_LANGUAGE_ID.RUSSIAN
58
61
  return r
59
62
 
60
63
  def pptx_format(self, el, para, tf, slide, replace_spaces=True):
@@ -179,9 +182,8 @@ class PptxExporter(BaseExporter):
179
182
  r = self.add_run(
180
183
  p,
181
184
  self._replace_no_break(
182
- ("\n" if add_line_break else "")
185
+ ("\n\n" if add_line_break else "")
183
186
  + self.pptx_process_text(editor[0][1])
184
- + "\n"
185
187
  ),
186
188
  )
187
189
  add_line_break = True
@@ -190,9 +192,8 @@ class PptxExporter(BaseExporter):
190
192
  r = self.add_run(
191
193
  p,
192
194
  self._replace_no_break(
193
- ("\n" if add_line_break else "")
195
+ ("\n\n" if add_line_break else "")
194
196
  + self.pptx_process_text(element[1])
195
- + "\n"
196
197
  ),
197
198
  )
198
199
  add_line_break = True
@@ -437,6 +438,8 @@ class PptxExporter(BaseExporter):
437
438
  fields = ["answer"]
438
439
  if q.get("zachet") and self.c.get("add_zachet"):
439
440
  fields.append("zachet")
441
+ if q.get("nezachet") and self.c.get("add_zachet"):
442
+ fields.append("nezachet")
440
443
  if self.c["add_comment"] and "comment" in q:
441
444
  fields.append("comment")
442
445
  if self.c.get("add_source") and "source" in q:
@@ -461,6 +464,10 @@ class PptxExporter(BaseExporter):
461
464
  text_for_size += "\n" + self.recursive_join(
462
465
  self.pptx_process_text(q["zachet"], strip_brackets=False)
463
466
  )
467
+ if q.get("nezachet") and self.c.get("add_zachet"):
468
+ text_for_size += "\n" + self.recursive_join(
469
+ self.pptx_process_text(q["nezachet"], strip_brackets=False)
470
+ )
464
471
  if q.get("comment") and self.c.get("add_comment"):
465
472
  text_for_size += "\n" + self.recursive_join(
466
473
  self.pptx_process_text(q["comment"])
@@ -487,6 +494,11 @@ class PptxExporter(BaseExporter):
487
494
  r = self.add_run(p, f"\n{self.get_label(q, 'zachet')}: ")
488
495
  r.font.bold = True
489
496
  self.pptx_format(zachet_text, p, tf, slide)
497
+ if q.get("nezachet") and self.c.get("add_zachet"):
498
+ nezachet_text = self.pptx_process_text(q["nezachet"], strip_brackets=False)
499
+ r = self.add_run(p, f"\n{self.get_label(q, 'nezachet')}: ")
500
+ r.font.bold = True
501
+ self.pptx_format(nezachet_text, p, tf, slide)
490
502
  if self.c["add_comment"] and "comment" in q:
491
503
  comment_text = self.pptx_process_text(q["comment"])
492
504
  r = self.add_run(p, f"\n{self.get_label(q, 'comment')}: ")
@@ -16,6 +16,11 @@ from chgksuite.composer.composer_common import BaseExporter, parseimg
16
16
  from chgksuite.composer.telegram_bot import run_bot_in_thread
17
17
 
18
18
 
19
+ def get_text(msg_data):
20
+ if "message" in msg_data and "text" in msg_data["message"]:
21
+ return msg_data["message"]["text"]
22
+
23
+
19
24
  class TelegramExporter(BaseExporter):
20
25
  def __init__(self, *args, **kwargs):
21
26
  super().__init__(*args, **kwargs)
@@ -37,6 +42,7 @@ class TelegramExporter(BaseExporter):
37
42
  self.channel_id = None # Target channel ID
38
43
  self.chat_id = None # Discussion group ID linked to the channel
39
44
  self.auth_uuid = uuid.uuid4().hex[:8]
45
+ self.chat_auth_uuid = uuid.uuid4().hex[:8]
40
46
  self.init_telegram()
41
47
 
42
48
  def check_connectivity(self):
@@ -119,6 +125,9 @@ class TelegramExporter(BaseExporter):
119
125
 
120
126
  if result:
121
127
  msg_data = json.loads(result["raw_data"])
128
+ if msg_data["message"]["chat"]["type"] != "private":
129
+ print("You should post to the PRIVATE chat, not to the channel/group")
130
+ continue
122
131
  self.control_chat_id = msg_data["message"]["chat"]["id"]
123
132
  self.send_api_request(
124
133
  "sendMessage",
@@ -860,6 +869,7 @@ class TelegramExporter(BaseExporter):
860
869
  raise Exception("Failed to get channel ID from forwarded message")
861
870
  else:
862
871
  raise Exception("Channel ID is undefined")
872
+
863
873
  # Handle chat resolution
864
874
  if isinstance(chat_result, int):
865
875
  chat_id = chat_result
@@ -868,9 +878,10 @@ class TelegramExporter(BaseExporter):
868
878
  if not chat_id:
869
879
  print("\n" + "=" * 50)
870
880
  print(
871
- "Please forward any message from the discussion group to the bot."
881
+ f"Please write a message in the discussion group with text: {self.chat_auth_uuid}"
872
882
  )
873
883
  print("This will allow me to extract the group ID automatically.")
884
+ print("The bot MUST be added do the group and made admin, else it won't work!")
874
885
  print("=" * 50 + "\n")
875
886
 
876
887
  # Wait for a forwarded message with chat information
@@ -883,8 +894,7 @@ class TelegramExporter(BaseExporter):
883
894
  while chat_id == channel_id:
884
895
  error_msg = (
885
896
  "Chat ID and channel ID are the same. The problem may be that "
886
- "you forwarded a message from discussion group that itself was automatically forwarded "
887
- "from the channel by Telegram. Please forward a message that was sent directly in the discussion group."
897
+ "you posted a message in the channel, not in the discussion group."
888
898
  )
889
899
  self.logger.error(error_msg)
890
900
  chat_id = self.wait_for_forwarded_message(
@@ -903,7 +913,10 @@ class TelegramExporter(BaseExporter):
903
913
  raise Exception("Chat ID is undefined")
904
914
 
905
915
  self.channel_id = f"-100{channel_id}"
906
- self.chat_id = f"-100{chat_id}"
916
+ if not str(chat_id).startswith("-100"):
917
+ self.chat_id = f"-100{chat_id}"
918
+ else:
919
+ self.chat_id = chat_id
907
920
 
908
921
  self.logger.info(
909
922
  f"Using channel ID {self.channel_id} and discussion group ID {self.chat_id}"
@@ -1087,7 +1100,10 @@ class TelegramExporter(BaseExporter):
1087
1100
  failure_message = "❌ Failed to extract channel ID."
1088
1101
  else:
1089
1102
  entity_name = "discussion group"
1090
- instruction_message = "🔄 Please forward any message from the discussion group\n\n⚠️ IMPORTANT: Do NOT forward messages that were automatically posted from the channel. Forward messages that were sent directly in the discussion group."
1103
+ instruction_message = (
1104
+ f"🔄 Please post to the discussion group a message with text: {self.chat_auth_uuid}\n\n"
1105
+ "⚠️ IMPORTANT: Bot should be added to the discussion group and have ADMIN rights!"
1106
+ )
1091
1107
  success_message = "✅ Successfully extracted discussion group ID: {}"
1092
1108
  failure_message = "❌ Failed to extract discussion group ID."
1093
1109
 
@@ -1104,6 +1120,7 @@ class TelegramExporter(BaseExporter):
1104
1120
  resolved = False
1105
1121
  retry_count = 0
1106
1122
  max_retries = 30 # 5 minutes (10 seconds per retry)
1123
+ extracted_id = None
1107
1124
 
1108
1125
  # Extract channel ID for comparison if we're looking for a discussion group
1109
1126
  channel_numeric_id = None
@@ -1132,58 +1149,63 @@ class TelegramExporter(BaseExporter):
1132
1149
  messages = cursor.fetchall()
1133
1150
 
1134
1151
  for row in messages:
1152
+ if self.args.debug:
1153
+ self.logger.info(row["raw_data"])
1135
1154
  if self.created_at and row["created_at"] < self.created_at:
1136
1155
  break
1137
1156
  msg_data = json.loads(row["raw_data"])
1138
- if msg_data["message"]["chat"]["id"] != self.control_chat_id:
1139
- continue
1140
- if "message" in msg_data and "forward_from_chat" in msg_data["message"]:
1141
- forward_info = msg_data["message"]["forward_from_chat"]
1142
-
1143
- # Extract chat ID from the message
1144
- chat_id = forward_info.get("id")
1145
- # Remove -100 prefix if present
1146
- if str(chat_id).startswith("-100"):
1147
- extracted_id = int(str(chat_id)[4:])
1148
- else:
1149
- extracted_id = chat_id
1150
-
1151
- # If we're looking for a discussion group, verify it's not the same as the channel ID
1152
- if entity_type == "chat" and channel_numeric_id:
1153
- if extracted_id == channel_numeric_id:
1154
- self.logger.warning(
1155
- "User forwarded a message from the channel, not the discussion group"
1156
- )
1157
- self.send_api_request(
1158
- "sendMessage",
1159
- {
1160
- "chat_id": self.control_chat_id,
1161
- "text": "⚠️ You forwarded a message from the channel, not from the discussion group.\n\nPlease forward a message that was originally sent IN the discussion group, not an automatic repost from the channel.",
1162
- },
1163
- )
1164
- # Skip this message and continue waiting
1165
- continue
1166
-
1167
- # For channels, check the type; for chats, accept any type except "channel" if check_type is False
1168
- if (check_type and forward_info.get("type") == "channel") or (
1169
- not check_type
1170
- ):
1171
- resolved = True
1172
- self.created_at = row["created_at"]
1173
- self.logger.info(
1174
- f"Extracted {entity_name} ID: {extracted_id} from forwarded message"
1157
+ if entity_type == "chat":
1158
+ if get_text(msg_data) != self.chat_auth_uuid:
1159
+ continue
1160
+ extracted_id = msg_data["message"]["chat"]["id"]
1161
+ if extracted_id == channel_numeric_id or extracted_id == self.control_chat_id:
1162
+ self.logger.warning(
1163
+ "User posted a message in the channel, not the discussion group"
1175
1164
  )
1176
-
1177
- # Send confirmation message
1178
1165
  self.send_api_request(
1179
1166
  "sendMessage",
1180
1167
  {
1181
1168
  "chat_id": self.control_chat_id,
1182
- "text": success_message.format(extracted_id),
1169
+ "text": (
1170
+ "⚠️ You posted a message in the channel, not in the discussion group."
1171
+ )
1183
1172
  },
1184
1173
  )
1174
+ # Skip this message and continue waiting
1175
+ continue
1176
+ elif entity_type == "channel":
1177
+ if msg_data["message"]["chat"]["id"] != self.control_chat_id:
1178
+ continue
1179
+ if "message" in msg_data and "forward_from_chat" in msg_data["message"]:
1180
+ forward_info = msg_data["message"]["forward_from_chat"]
1181
+
1182
+ # Extract chat ID from the message
1183
+ chat_id = forward_info.get("id")
1184
+ # Remove -100 prefix if present
1185
+ if str(chat_id).startswith("-100"):
1186
+ extracted_id = int(str(chat_id)[4:])
1187
+ else:
1188
+ extracted_id = chat_id
1189
+ # For channels, check the type; for chats, accept any type except "channel" if check_type is False
1190
+ if extracted_id and ((check_type and forward_info.get("type") == "channel") or (
1191
+ not check_type
1192
+ )):
1193
+ resolved = True
1194
+ self.created_at = row["created_at"]
1195
+ self.logger.info(
1196
+ f"Extracted {entity_name} ID: {extracted_id} from forwarded message"
1197
+ )
1198
+
1199
+ # Send confirmation message
1200
+ self.send_api_request(
1201
+ "sendMessage",
1202
+ {
1203
+ "chat_id": self.control_chat_id,
1204
+ "text": success_message.format(extracted_id),
1205
+ },
1206
+ )
1185
1207
 
1186
- return extracted_id
1208
+ return extracted_id
1187
1209
 
1188
1210
  retry_count += 1
1189
1211
 
File without changes
@@ -0,0 +1,143 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ import itertools
4
+ import os
5
+ import re
6
+ from collections import defaultdict
7
+
8
+ import toml
9
+
10
+ from chgksuite.common import get_source_dirs
11
+ from chgksuite.composer.chgksuite_parser import parse_4s
12
+ from chgksuite.composer.composer_common import _parse_4s_elem, parseimg
13
+ from chgksuite.handouter.utils import read_file, write_file
14
+
15
+
16
+ def postprocess(s):
17
+ return s.replace("\\_", "_")
18
+
19
+
20
+ def generate_handouts_list(handouts, output_dir, base_name, parsed):
21
+ """Generate a human-readable file with question numbers that have handouts."""
22
+ question_numbers = sorted([int(h["for_question"]) for h in handouts])
23
+
24
+ content = "ВОПРОСЫ С РАЗДАТОЧНЫМ МАТЕРИАЛОМ:\n\n"
25
+ content += f"Сквозная нумерация:\n{', '.join(map(str, question_numbers))}\n\n"
26
+
27
+ content += "По турам:\n"
28
+ tour = 0
29
+ by_tour = {}
30
+ for tup in parsed:
31
+ if tup[0] == "section":
32
+ tour += 1
33
+ by_tour[tour] = []
34
+ if tup[0] == "Question":
35
+ if tour == 0:
36
+ tour = 1
37
+ by_tour[tour] = []
38
+ if tup[1]["number"] in question_numbers:
39
+ by_tour[tour].append(tup[1]["number"])
40
+
41
+ for tour in sorted(by_tour):
42
+ tour_handouts = by_tour[tour]
43
+ if tour_handouts:
44
+ content += f"Тур {tour}: {', '.join(map(str, tour_handouts))}\n"
45
+ else:
46
+ content += f"Тур {tour}: нет раздаток\n"
47
+
48
+ output_fn = os.path.join(output_dir, base_name + "_handouts_list.txt")
49
+ write_file(output_fn, content)
50
+ print(f"File with list of handouts: {output_fn}")
51
+ print(content)
52
+
53
+
54
+ def generate_handouts(args):
55
+ _, resourcedir = get_source_dirs()
56
+ labels = toml.loads(
57
+ read_file(os.path.join(resourcedir, f"labels_{args.lang}.toml"))
58
+ )
59
+ handout_re = re.compile(
60
+ "\\["
61
+ + labels["question_labels"]["handout_short"]
62
+ + ".+?:( |\n)(?P<handout_text>.+?)\\]",
63
+ flags=re.DOTALL,
64
+ )
65
+
66
+ cnt = read_file(args.filename)
67
+ parsed = parse_4s(cnt)
68
+
69
+ questions = [q[1] for q in parsed if q[0] == "Question"]
70
+ handouts = []
71
+ for q in questions:
72
+ if isinstance(q["question"], list):
73
+ question_text = "\n".join(itertools.chain.from_iterable(q["question"]))
74
+ else:
75
+ question_text = q["question"]
76
+ question_text_lower = question_text.lower()
77
+ srch = handout_re.search(question_text)
78
+ if srch:
79
+ text = postprocess(srch.group("handout_text"))
80
+ elems = _parse_4s_elem(text)
81
+ img = [el for el in elems if el[0] == "img"]
82
+ if img:
83
+ try:
84
+ parsed_img = parseimg(img[0][1])
85
+ except:
86
+ print(
87
+ f"Image file for question {q['number']} not found, add it by hand"
88
+ )
89
+ continue
90
+ else:
91
+ parsed_img = None
92
+ res = {"for_question": q["number"]}
93
+ if parsed_img:
94
+ res["image"] = parsed_img["imgfile"]
95
+ else:
96
+ res["text"] = text
97
+ handouts.append(res)
98
+ elif (
99
+ "раздат" in question_text_lower
100
+ or "роздан" in question_text_lower
101
+ or "(img" in question_text_lower
102
+ ):
103
+ print(f"probably badly formatted handout for question {q['number']}")
104
+ res = {"for_question": q["number"], "text": postprocess(question_text)}
105
+ handouts.append(res)
106
+ result = []
107
+ result_by_question = defaultdict(list)
108
+ for handout in handouts:
109
+ if "image" in handout:
110
+ key = "image"
111
+ prefix = "image: "
112
+ else:
113
+ key = "text"
114
+ prefix = ""
115
+ value = handout[key]
116
+ formatted = (
117
+ f"for_question: {handout['for_question']}\n" if not args.separate else ""
118
+ ) + f"columns: 3\n\n{prefix}{value}"
119
+ result.append(formatted)
120
+ result_by_question[handout["for_question"]].append(formatted)
121
+ output_dir = os.path.dirname(os.path.abspath(args.filename))
122
+ bn, _ = os.path.splitext(os.path.basename(args.filename))
123
+
124
+ if args.separate:
125
+ for k, v in result_by_question.items():
126
+ if len(v) > 1:
127
+ for i, cnt in enumerate(v):
128
+ output_fn = os.path.join(
129
+ output_dir, f"{bn}_q{k.zfill(2)}_{i + 1}.txt"
130
+ )
131
+ print(output_fn)
132
+ write_file(output_fn, cnt)
133
+ else:
134
+ output_fn = os.path.join(output_dir, f"{bn}_q{str(k).zfill(2)}.txt")
135
+ print(output_fn)
136
+ write_file(output_fn, v[0])
137
+ else:
138
+ output_fn = os.path.join(output_dir, bn + "_handouts.txt")
139
+ print(f"output filename: {output_fn}")
140
+ write_file(output_fn, "\n---\n".join(result))
141
+
142
+ if args.list_handouts:
143
+ generate_handouts_list(handouts, output_dir, bn, parsed)
@@ -0,0 +1,245 @@
1
+ import functools
2
+ import os
3
+ import platform
4
+ import re
5
+ import shutil
6
+ import subprocess
7
+ import tarfile
8
+ import zipfile
9
+
10
+ import requests
11
+
12
+
13
+ def get_utils_dir():
14
+ path = os.path.join(os.path.expanduser("~"), ".pecheny_utils")
15
+ if not os.path.exists(path):
16
+ os.mkdir(path)
17
+ return path
18
+
19
+
20
+ def escape_latex(text):
21
+ text = text.replace("\\", "\\textbackslash")
22
+ text = text.replace("~", "\\textasciitilde")
23
+ text = text.replace("^", "\\textasciicircum")
24
+ for char in ("%", "&", "$", "#", "{", "}", "_"):
25
+ text = text.replace(char, "\\" + char)
26
+ text = text.replace("\n", "\\linebreak\n")
27
+ return text
28
+
29
+
30
+ def check_tectonic_path(tectonic_path):
31
+ proc = subprocess.run([tectonic_path, "--help"], capture_output=True, check=True)
32
+ return proc.returncode == 0
33
+
34
+
35
+ def get_tectonic_path():
36
+ errors = []
37
+ system = platform.system()
38
+
39
+ cpdir = get_utils_dir()
40
+ if system == "Windows":
41
+ binary_name = "tectonic.exe"
42
+ tectonic_path = os.path.join(cpdir, binary_name)
43
+ else:
44
+ binary_name = "tectonic"
45
+ tectonic_path = os.path.join(cpdir, binary_name)
46
+
47
+ tectonic_ok = False
48
+ try:
49
+ tectonic_ok = check_tectonic_path(binary_name)
50
+ except FileNotFoundError:
51
+ pass # tectonic not found in PATH
52
+ except subprocess.CalledProcessError as e:
53
+ errors.append(f"tectonic --version failed: {type(e)} {e}")
54
+ if tectonic_ok:
55
+ return binary_name
56
+ if os.path.isfile(tectonic_path):
57
+ try:
58
+ tectonic_ok = check_tectonic_path(tectonic_path)
59
+ except subprocess.CalledProcessError as e:
60
+ errors.append(f"tectonic --version failed: {type(e)} {e}")
61
+ if tectonic_ok:
62
+ return tectonic_path
63
+
64
+
65
+ def github_get_latest_release(repo):
66
+ url = f"https://api.github.com/repos/{repo}/releases/latest"
67
+ req = requests.get(url)
68
+ assets_url = req.json()["assets_url"]
69
+ assets_req = requests.get(assets_url)
70
+ return {asset["name"]: asset["browser_download_url"] for asset in assets_req.json()}
71
+
72
+
73
+ def darwin_is_emulated():
74
+ try:
75
+ sub = subprocess.run(
76
+ ["sysctl", "-n", "sysctl.proc_translated"], capture_output=True, check=True
77
+ )
78
+ out = sub.stdout.decode("utf8").strip()
79
+ return int(out)
80
+ except subprocess.CalledProcessError:
81
+ print("couldn't tell if emulated, returning 0")
82
+ return 0
83
+
84
+
85
+ def parse_tectonic_archive_name(archive_name):
86
+ if archive_name.endswith(".tar.gz"):
87
+ archive_name = archive_name[: -len(".tar.gz")]
88
+ elif archive_name.endswith(".zip"):
89
+ archive_name = archive_name[: -len(".zip")]
90
+ else:
91
+ return
92
+ sp = archive_name.split("-")
93
+ result = {
94
+ "version": sp[1],
95
+ "arch": sp[2],
96
+ "manufacturer": sp[3],
97
+ "system": sp[4],
98
+ }
99
+ if len(sp) > 5:
100
+ result["toolchain"] = sp[5]
101
+ return result
102
+
103
+
104
+ # download_file function taken from https://stackoverflow.com/a/39217788
105
+ def download_file(url):
106
+ print(f"downloading from {url}...")
107
+ local_filename = url.split("/")[-1]
108
+ with requests.get(url, stream=True) as resp:
109
+ resp.raw.read = functools.partial(resp.raw.read, decode_content=True)
110
+ with open(local_filename, "wb") as f:
111
+ shutil.copyfileobj(resp.raw, f, length=16 * 1024 * 1024)
112
+ return local_filename
113
+
114
+
115
+ def extract_zip(zip_file, dirname=None):
116
+ if dirname is None:
117
+ dirname = zip_file[:-4]
118
+ with zipfile.ZipFile(zip_file, "r") as zip_ref:
119
+ zip_ref.extractall(dirname)
120
+ os.remove(zip_file)
121
+
122
+
123
+ def extract_tar(tar_file, dirname=None):
124
+ if dirname is None:
125
+ dirname = tar_file[: tar_file.lower().index(".tar")]
126
+ tf = tarfile.open(tar_file)
127
+ tf.extractall(dirname)
128
+ os.remove(tar_file)
129
+
130
+
131
+ def extract_archive(filename, dirname=None):
132
+ if filename.lower().endswith((".tar", ".tar.gz")):
133
+ extract_tar(filename, dirname=dirname)
134
+ elif filename.lower().endswith(".zip"):
135
+ extract_zip(filename, dirname=dirname)
136
+
137
+
138
+ def guess_archive_url(assets):
139
+ system = platform.system()
140
+ proc = platform.processor()
141
+ if system == "Darwin":
142
+ if proc == "arm" or (proc == "i386" and darwin_is_emulated()):
143
+ arch = "aarch64"
144
+ else:
145
+ arch = "x86_64"
146
+ for k, v in assets.items():
147
+ parsed = parse_tectonic_archive_name(k)
148
+ if not parsed:
149
+ continue
150
+ if parsed["arch"] == arch and parsed["system"] == "darwin":
151
+ return v
152
+ elif system == "Windows":
153
+ for k, v in assets.items():
154
+ parsed = parse_tectonic_archive_name(k)
155
+ if not parsed:
156
+ continue
157
+ if (
158
+ parsed["arch"] == "x86_64"
159
+ and parsed["system"] == "windows"
160
+ and parsed["toolchain"] == "msvc"
161
+ ):
162
+ return v
163
+ elif system == "Linux":
164
+ for k, v in assets.items():
165
+ parsed = parse_tectonic_archive_name(k)
166
+ if not parsed:
167
+ continue
168
+ if (
169
+ (not proc or (proc and parsed["arch"] == proc))
170
+ and parsed["system"] == "linux"
171
+ and parsed["toolchain"] == "musl"
172
+ ):
173
+ return v
174
+ raise Exception(f"Archive for system {system} proc {proc} not found")
175
+
176
+
177
+ def archive_url_from_regex(assets, regex):
178
+ for k, v in assets.items():
179
+ if re.match(regex, k):
180
+ return v
181
+ raise Exception(f"Archive for regex {regex} not found")
182
+
183
+
184
+ def install_tectonic(args):
185
+ system = platform.system()
186
+ assets = github_get_latest_release("tectonic-typesetting/tectonic")
187
+ if args.tectonic_package_regex:
188
+ archive_url = archive_url_from_regex(assets, args.tectonic_package_regex)
189
+ else:
190
+ archive_url = guess_archive_url(assets)
191
+ downloaded = download_file(archive_url)
192
+ dirname = "tectonic_folder"
193
+ extract_archive(downloaded, dirname=dirname)
194
+ if system == "Windows":
195
+ filename = "tectonic.exe"
196
+ else:
197
+ filename = "tectonic"
198
+ target_path = os.path.join(get_utils_dir(), filename)
199
+ shutil.move(os.path.join(dirname, filename), target_path)
200
+ shutil.rmtree(dirname)
201
+ return target_path
202
+
203
+
204
+ def install_font(url):
205
+ fn = url.split("/")[-1].split("?")[0]
206
+ bn, ext = os.path.splitext(fn)
207
+ if "." in bn:
208
+ new_fn = bn.replace(".", "_") + ext
209
+ else:
210
+ new_fn = fn
211
+ dir_name = new_fn[:-4]
212
+ dir_name_base = dir_name.split(os.pathsep)[-1]
213
+ fonts_dir = os.path.join(get_utils_dir(), "fonts")
214
+ if not os.path.exists(fonts_dir):
215
+ os.makedirs(fonts_dir)
216
+ target_dir = os.path.join(fonts_dir, dir_name_base)
217
+ if os.path.isdir(target_dir):
218
+ print(f"{target_dir} already exists")
219
+ return
220
+ download_file(url)
221
+ if fn != new_fn:
222
+ os.rename(fn, new_fn)
223
+ extract_archive(new_fn, dirname=dir_name)
224
+ if not os.path.isdir(target_dir):
225
+ shutil.copytree(dir_name, target_dir)
226
+ shutil.rmtree(dir_name)
227
+
228
+
229
+ def find_font(file_name, root_dir=None):
230
+ root_dir = root_dir or os.path.join(get_utils_dir(), "fonts")
231
+ if not os.path.isdir(root_dir):
232
+ os.makedirs(root_dir, exist_ok=True)
233
+ for dir_, _, files in os.walk(root_dir):
234
+ for fn in files:
235
+ if fn == file_name:
236
+ return os.path.join(dir_, fn)
237
+ raise Exception(f"{file_name} not found")
238
+
239
+
240
+ def install_font_from_github_wrapper(repo):
241
+ latest = github_get_latest_release(repo)
242
+ for k, v in latest.items():
243
+ if k.endswith(".zip"):
244
+ install_font(v)
245
+ break