chgksuite 0.27.0b5__py3-none-any.whl → 0.27.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chgksuite/_html2md.py ADDED
@@ -0,0 +1,94 @@
1
+ """Simple HTML table to Markdown converter.
2
+
3
+ Replaces dashtable.html2md with a minimal implementation that avoids
4
+ deprecated BeautifulSoup methods.
5
+ """
6
+
7
+ from bs4 import BeautifulSoup
8
+
9
+
10
+ def html2md(html_string: str) -> str:
11
+ """Convert an HTML table to a Markdown table string.
12
+
13
+ Parameters
14
+ ----------
15
+ html_string : str
16
+ HTML string containing a table
17
+
18
+ Returns
19
+ -------
20
+ str
21
+ The table formatted as Markdown
22
+ """
23
+ soup = BeautifulSoup(html_string, "html.parser")
24
+ table = soup.find("table")
25
+
26
+ if not table:
27
+ return ""
28
+
29
+ rows = table.find_all("tr")
30
+ if not rows:
31
+ return ""
32
+
33
+ # Extract all rows as lists of cell texts
34
+ data = []
35
+ for row in rows:
36
+ # Check for header cells first, then data cells
37
+ cells = row.find_all("th")
38
+ if not cells:
39
+ cells = row.find_all("td")
40
+
41
+ row_data = []
42
+ for cell in cells:
43
+ # Get text, normalize whitespace
44
+ text = " ".join(cell.get_text().split())
45
+ row_data.append(text)
46
+ if row_data:
47
+ data.append(row_data)
48
+
49
+ if not data:
50
+ return ""
51
+
52
+ # Normalize row lengths (pad shorter rows)
53
+ max_cols = max(len(row) for row in data)
54
+ for row in data:
55
+ while len(row) < max_cols:
56
+ row.append("")
57
+
58
+ # Calculate column widths (minimum 3 for markdown separator)
59
+ # Add 2 for space cushions on each side
60
+ widths = []
61
+ for col in range(max_cols):
62
+ width = max(len(row[col]) for row in data)
63
+ widths.append(max(width + 2, 3))
64
+
65
+ # Build markdown table
66
+ lines = []
67
+
68
+ # Header row (centered, with padding)
69
+ header = (
70
+ "|" + "|".join(_center(cell, widths[i]) for i, cell in enumerate(data[0])) + "|"
71
+ )
72
+ lines.append(header)
73
+
74
+ # Separator row (no spaces to avoid typotools converting --- to em-dash)
75
+ separator = "|" + "|".join("-" * w for w in widths) + "|"
76
+ lines.append(separator)
77
+
78
+ # Data rows (centered, with padding)
79
+ for row in data[1:]:
80
+ line = (
81
+ "|" + "|".join(_center(cell, widths[i]) for i, cell in enumerate(row)) + "|"
82
+ )
83
+ lines.append(line)
84
+
85
+ return "\n".join(lines)
86
+
87
+
88
+ def _center(text: str, width: int) -> str:
89
+ """Center text within width, with space padding."""
90
+ text = text.strip()
91
+ padding = width - len(text)
92
+ left = padding // 2
93
+ right = padding - left
94
+ return " " * left + text + " " * right
chgksuite/cli.py CHANGED
@@ -596,6 +596,15 @@ class ArgparseBuilder:
596
596
  caption="Имя 4s-файла",
597
597
  filetypes=[("chgksuite markup files", "*.4s")],
598
598
  )
599
+ cmdcompose_markdown = cmdcompose_filetype.add_parser("markdown")
600
+ self.add_argument(
601
+ cmdcompose_markdown,
602
+ "filename",
603
+ nargs="*",
604
+ help="file(s) to compose from.",
605
+ caption="Имя 4s-файла",
606
+ filetypes=[("chgksuite markup files", "*.4s")],
607
+ )
599
608
  cmdcompose_pptx = cmdcompose_filetype.add_parser("pptx")
600
609
  self.add_argument(
601
610
  cmdcompose_pptx,
chgksuite/common.py CHANGED
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  import argparse
4
- import codecs
5
4
  import csv
6
5
  import itertools
7
6
  import json
@@ -28,7 +27,7 @@ QUESTION_LABELS = [
28
27
  "number",
29
28
  "setcounter",
30
29
  ]
31
- SEP = os.linesep
30
+ SEP = "\n"
32
31
  try:
33
32
  ENC = sys.stdout.encoding or "utf8"
34
33
  except AttributeError:
@@ -114,7 +113,7 @@ class DefaultArgs:
114
113
  def set_lastdir(path):
115
114
  chgksuite_dir = get_chgksuite_dir()
116
115
  lastdir = os.path.join(chgksuite_dir, "lastdir")
117
- with codecs.open(lastdir, "w", "utf8") as f:
116
+ with open(lastdir, "w", encoding="utf-8") as f:
118
117
  f.write(path)
119
118
 
120
119
 
@@ -122,7 +121,7 @@ def get_lastdir():
122
121
  chgksuite_dir = get_chgksuite_dir()
123
122
  lastdir = os.path.join(chgksuite_dir, "lastdir")
124
123
  if os.path.isfile(lastdir):
125
- with codecs.open(lastdir, "r", "utf8") as f:
124
+ with open(lastdir, "r", encoding="utf-8") as f:
126
125
  return f.read().rstrip()
127
126
  return "."
128
127
 
@@ -153,6 +152,19 @@ def ensure_utf8(s):
153
152
  return s
154
153
 
155
154
 
155
+ def read_text_file(filepath, encoding="utf-8"):
156
+ """Read a text file, fixing corrupted line endings (\r\r\n -> \n) if present."""
157
+ with open(filepath, "rb") as f:
158
+ raw = f.read()
159
+ # Fix corrupted line endings at byte level before decoding
160
+ if b"\r\r\n" in raw:
161
+ raw = raw.replace(b"\r\r\n", b"\n")
162
+ text = raw.decode(encoding)
163
+ # Normalize any remaining line endings
164
+ text = text.replace("\r\n", "\n").replace("\r", "\n")
165
+ return text
166
+
167
+
156
168
  class DummyLogger(object):
157
169
  def info(self, *args, **kwargs):
158
170
  pass
@@ -371,6 +383,5 @@ def compose_4s(structure, args=None):
371
383
  + SEP
372
384
  )
373
385
  tmp = re.sub(r"{}+".format(SEP), SEP, tmp)
374
- tmp = tmp.replace("\r\r", "\r")
375
386
  result += tmp + SEP
376
387
  return result
@@ -1,6 +1,5 @@
1
1
  #!usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
- import codecs
4
3
  import json
5
4
  import os
6
5
  import shutil
@@ -13,6 +12,7 @@ from chgksuite.common import (
13
12
  get_source_dirs,
14
13
  init_logger,
15
14
  log_wrap,
15
+ read_text_file,
16
16
  set_lastdir,
17
17
  )
18
18
  from chgksuite.composer.chgksuite_parser import parse_4s
@@ -22,7 +22,7 @@ from chgksuite.composer.docx import DocxExporter
22
22
  from chgksuite.composer.latex import LatexExporter
23
23
  from chgksuite.composer.lj import LjExporter
24
24
  from chgksuite.composer.pptx import PptxExporter
25
- from chgksuite.composer.reddit import RedditExporter
25
+ from chgksuite.composer.markdown import MarkdownExporter
26
26
  from chgksuite.composer.stats import StatsAdder
27
27
  from chgksuite.composer.telegram import TelegramExporter
28
28
  from chgksuite.composer.openquiz import OpenquizExporter
@@ -75,11 +75,11 @@ def process_file_wrapper(filename, sourcedir, targetdir, args):
75
75
 
76
76
  def parse_filepath(filepath, args=None):
77
77
  args = args or DefaultArgs()
78
- with codecs.open(filepath, "r", "utf8") as input_file:
79
- input_text = input_file.read()
80
- input_text = input_text.replace("\r", "")
78
+ input_text = read_text_file(filepath)
81
79
  debug_dir = os.path.dirname(os.path.abspath(filepath))
82
- return parse_4s(input_text, randomize=args.randomize, debug=args.debug, debug_dir=debug_dir)
80
+ return parse_4s(
81
+ input_text, randomize=args.randomize, debug=args.debug, debug_dir=debug_dir
82
+ )
83
83
 
84
84
 
85
85
  def make_merged_filename(filelist):
@@ -106,7 +106,7 @@ def process_file(filename, tmp_dir, targetdir, args=None, logger=None):
106
106
  targetdir,
107
107
  make_filename(os.path.basename(filename), "dbg", args),
108
108
  )
109
- with codecs.open(debug_fn, "w", "utf8") as output_file:
109
+ with open(debug_fn, "w", encoding="utf-8") as output_file:
110
110
  output_file.write(json.dumps(structure, indent=2, ensure_ascii=False))
111
111
 
112
112
  if not args.filetype:
@@ -147,8 +147,8 @@ def process_file(filename, tmp_dir, targetdir, args=None, logger=None):
147
147
  outfilename = os.path.join(targetdir, make_filename(filename, "txt", args))
148
148
  exporter.export(outfilename)
149
149
 
150
- if args.filetype == "redditmd":
151
- exporter = RedditExporter(structure, args, dir_kwargs)
150
+ if args.filetype in ("redditmd", "markdown"):
151
+ exporter = MarkdownExporter(structure, args, dir_kwargs)
152
152
  outfilename = os.path.join(targetdir, make_filename(filename, "md", args))
153
153
  exporter.export(outfilename)
154
154
 
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import os
3
2
  import random
4
3
  import re
@@ -125,7 +124,7 @@ def parse_4s(s, randomize=False, debug=False, logger=None, debug_dir=None):
125
124
  if debug:
126
125
  debug_dir = debug_dir or get_chgksuite_dir()
127
126
  debug_path = os.path.join(debug_dir, "raw.debug")
128
- with codecs.open(debug_path, "w", "utf8") as debugf:
127
+ with open(debug_path, "w", encoding="utf-8") as debugf:
129
128
  debugf.write(log_wrap(s.split("\n")))
130
129
 
131
130
  s = replace_counters(s)
@@ -147,7 +146,7 @@ def parse_4s(s, randomize=False, debug=False, logger=None, debug_dir=None):
147
146
  counter = 1
148
147
 
149
148
  if debug:
150
- with codecs.open("debug1st.debug", "w", "utf8") as debugf:
149
+ with open("debug1st.debug", "w", encoding="utf-8") as debugf:
151
150
  debugf.write(log_wrap(structure))
152
151
 
153
152
  for element in structure:
@@ -240,7 +239,7 @@ def parse_4s(s, randomize=False, debug=False, logger=None, debug_dir=None):
240
239
  i += 1
241
240
 
242
241
  if debug:
243
- with codecs.open("debug.debug", "w", "utf8") as debugf:
242
+ with open("debug.debug", "w", encoding="utf-8") as debugf:
244
243
  debugf.write(log_wrap(final_structure))
245
244
 
246
245
  for element in final_structure:
chgksuite/composer/db.py CHANGED
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import datetime
3
2
  import os
4
3
  import re
@@ -211,7 +210,7 @@ class DbExporter(BaseExporter):
211
210
  if res:
212
211
  result.append(res)
213
212
  text = "".join(result)
214
- with codecs.open(outfilename, "w", "utf8") as f:
213
+ with open(outfilename, "w", encoding="utf-8") as f:
215
214
  f.write(text)
216
215
  self.logger.info("Output: {}".format(outfilename))
217
216
  if self.args.clipboard:
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import hashlib
3
2
  import os
4
3
  import re
@@ -221,7 +220,7 @@ class LatexExporter(BaseExporter):
221
220
 
222
221
  tex += "\\end{document}"
223
222
 
224
- with codecs.open(outfilename, "w", "utf8") as outfile:
223
+ with open(outfilename, "w", encoding="utf-8") as outfile:
225
224
  outfile.write(tex)
226
225
  cwd = os.getcwd()
227
226
  os.chdir(self.dir_kwargs["tmp_dir"])
chgksuite/composer/lj.py CHANGED
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import datetime
3
2
  import os
4
3
  import random
@@ -240,7 +239,7 @@ class LjExporter(BaseExporter):
240
239
  "general_impressions_text"
241
240
  ]
242
241
  if self.args.debug:
243
- with codecs.open("lj.debug", "w", "utf8") as f:
242
+ with open("lj.debug", "w", encoding="utf-8") as f:
244
243
  f.write(log_wrap(final_structure))
245
244
  return final_structure
246
245
 
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import os
3
2
 
4
3
  from chgksuite.composer.composer_common import (
@@ -9,20 +8,20 @@ from chgksuite.composer.composer_common import (
9
8
  )
10
9
 
11
10
 
12
- class RedditExporter(BaseExporter):
11
+ class MarkdownExporter(BaseExporter):
13
12
  def __init__(self, *args, **kwargs):
14
13
  super().__init__(*args, **kwargs)
15
14
  self.im = Imgur(self.args.imgur_client_id or IMGUR_CLIENT_ID)
16
15
  self.qcount = 1
17
16
 
18
- def reddityapper(self, e):
17
+ def markdownyapper(self, e):
19
18
  if isinstance(e, str):
20
- return self.reddit_element_layout(e)
19
+ return self.markdown_element_layout(e)
21
20
  elif isinstance(e, list):
22
21
  if not any(isinstance(x, list) for x in e):
23
- return self.reddit_element_layout(e)
22
+ return self.markdown_element_layout(e)
24
23
  else:
25
- return " \n".join([self.reddit_element_layout(x) for x in e])
24
+ return " \n".join([self.markdown_element_layout(x) for x in e])
26
25
 
27
26
  def parse_and_upload_image(self, path):
28
27
  parsed_image = parseimg(
@@ -37,11 +36,13 @@ class RedditExporter(BaseExporter):
37
36
  imglink = uploaded_image["data"]["link"]
38
37
  return imglink
39
38
 
40
- def redditformat(self, s):
39
+ def markdownformat(self, s):
41
40
  res = ""
42
41
  for run in self.parse_4s_elem(s):
43
- if run[0] in ("", "hyperlink"):
42
+ if run[0] == "":
44
43
  res += run[1]
44
+ if run[0] == "hyperlink":
45
+ res += "<{}>".format(run[1])
45
46
  if run[0] == "screen":
46
47
  res += run[1]["for_screen"]
47
48
  if run[0] == "italic":
@@ -51,61 +52,70 @@ class RedditExporter(BaseExporter):
51
52
  imglink = run[1]
52
53
  else:
53
54
  imglink = self.parse_and_upload_image(run[1])
54
- res += "[картинка]({})".format(imglink)
55
+ if self.args.filetype == "redditmd":
56
+ res += "[картинка]({})".format(imglink)
57
+ else:
58
+ res += "![]({})".format(imglink)
55
59
  while res.endswith("\n"):
56
60
  res = res[:-1]
57
61
  res = res.replace("\n", " \n")
58
62
  return res
59
63
 
60
- def reddit_element_layout(self, e):
64
+ def markdown_element_layout(self, e):
61
65
  res = ""
62
66
  if isinstance(e, str):
63
- res = self.redditformat(e)
67
+ res = self.markdownformat(e)
64
68
  return res
65
69
  if isinstance(e, list):
66
70
  res = " \n".join(
67
71
  [
68
- "{}\\. {}".format(i + 1, self.reddit_element_layout(x))
72
+ "{}\\. {}".format(i + 1, self.markdown_element_layout(x))
69
73
  for i, x in enumerate(e)
70
74
  ]
71
75
  )
72
76
  return res
73
77
 
74
- def reddit_format_element(self, pair):
78
+ def markdown_format_element(self, pair):
75
79
  if pair[0] == "Question":
76
- return self.reddit_format_question(pair[1])
80
+ return self.markdown_format_question(pair[1])
77
81
 
78
- def reddit_format_question(self, q):
82
+ def markdown_format_question(self, q):
79
83
  if "setcounter" in q:
80
84
  self.qcount = int(q["setcounter"])
81
85
  res = "__Вопрос {}__: {} \n".format(
82
86
  self.qcount if "number" not in q else q["number"],
83
- self.reddityapper(q["question"]),
87
+ self.markdownyapper(q["question"]),
84
88
  )
85
89
  if "number" not in q:
86
90
  self.qcount += 1
87
- res += "__Ответ:__ >!{} \n".format(self.reddityapper(q["answer"]))
91
+ spoiler_start = ">!" if self.args.filetype == "redditmd" else ""
92
+ spoiler_end = "!<" if self.args.filetype == "redditmd" else ""
93
+ res += "__Ответ:__ {}{} \n".format(
94
+ spoiler_start, self.markdownyapper(q["answer"])
95
+ )
88
96
  if "zachet" in q:
89
- res += "__Зачёт:__ {} \n".format(self.reddityapper(q["zachet"]))
97
+ res += "__Зачёт:__ {} \n".format(self.markdownyapper(q["zachet"]))
90
98
  if "nezachet" in q:
91
- res += "__Незачёт:__ {} \n".format(self.reddityapper(q["nezachet"]))
99
+ res += "__Незачёт:__ {} \n".format(self.markdownyapper(q["nezachet"]))
92
100
  if "comment" in q:
93
- res += "__Комментарий:__ {} \n".format(self.reddityapper(q["comment"]))
101
+ res += "__Комментарий:__ {} \n".format(self.markdownyapper(q["comment"]))
94
102
  if "source" in q:
95
- res += "__Источник:__ {} \n".format(self.reddityapper(q["source"]))
103
+ res += "__Источник:__ {} \n".format(self.markdownyapper(q["source"]))
96
104
  if "author" in q:
97
- res += "!<\n__Автор:__ {} \n".format(self.reddityapper(q["author"]))
105
+ res += "{}\n__Автор:__ {} \n".format(
106
+ spoiler_end, self.markdownyapper(q["author"])
107
+ )
98
108
  else:
99
- res += "!<\n"
109
+ res += spoiler_end + "\n"
100
110
  return res
101
111
 
102
112
  def export(self, outfile):
103
113
  result = []
104
114
  for pair in self.structure:
105
- res = self.reddit_format_element(pair)
115
+ res = self.markdown_format_element(pair)
106
116
  if res:
107
117
  result.append(res)
108
118
  text = "\n\n".join(result)
109
- with codecs.open(outfile, "w", "utf8") as f:
119
+ with open(outfile, "w", encoding="utf-8") as f:
110
120
  f.write(text)
111
121
  self.logger.info("Output: {}".format(outfile))
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import copy
3
2
  import re
4
3
  import json
@@ -175,5 +174,5 @@ class OpenquizExporter(BaseExporter):
175
174
  result = []
176
175
  for q in questions:
177
176
  result.append(self.oq_format_question(q))
178
- with codecs.open(outfilename, "w", "utf8") as f:
177
+ with open(outfilename, "w", encoding="utf-8") as f:
179
178
  f.write(json.dumps(result, indent=2, ensure_ascii=False))
File without changes
@@ -14,6 +14,9 @@ from chgksuite.handouter.gen import generate_handouts
14
14
  from chgksuite.handouter.pack import pack_handouts
15
15
  from chgksuite.handouter.installer import get_tectonic_path, install_tectonic
16
16
  from chgksuite.handouter.tex_internals import (
17
+ EDGE_DASHED,
18
+ EDGE_NONE,
19
+ EDGE_SOLID,
17
20
  GREYTEXT,
18
21
  HEADER,
19
22
  IMG,
@@ -61,7 +64,28 @@ class HandoutGenerator:
61
64
  )
62
65
  return GREYTEXT.replace("<GREYTEXT>", handout_text)
63
66
 
64
- def make_tikzbox(self, block):
67
+ def make_tikzbox(self, block, edges=None, ext=None):
68
+ """
69
+ Create a TikZ box with configurable edge styles and extensions.
70
+ edges is a dict with keys 'top', 'bottom', 'left', 'right'
71
+ values are EDGE_DASHED or EDGE_SOLID
72
+ ext is a dict with edge extensions to close gaps at boundaries
73
+ """
74
+ if edges is None:
75
+ edges = {
76
+ "top": EDGE_DASHED,
77
+ "bottom": EDGE_DASHED,
78
+ "left": EDGE_DASHED,
79
+ "right": EDGE_DASHED,
80
+ }
81
+ if ext is None:
82
+ ext = {
83
+ "top": ("0pt", "0pt"),
84
+ "bottom": ("0pt", "0pt"),
85
+ "left": ("0pt", "0pt"),
86
+ "right": ("0pt", "0pt"),
87
+ }
88
+
65
89
  if block.get("no_center"):
66
90
  align = ""
67
91
  else:
@@ -79,19 +103,218 @@ class HandoutGenerator:
79
103
  .replace("<ALIGN>", align)
80
104
  .replace("<TEXTWIDTH>", textwidth)
81
105
  .replace("<FONTSIZE>", fontsize)
106
+ .replace("<TOP>", edges["top"])
107
+ .replace("<BOTTOM>", edges["bottom"])
108
+ .replace("<LEFT>", edges["left"])
109
+ .replace("<RIGHT>", edges["right"])
110
+ .replace("<TOP_EXT_L>", ext["top"][0])
111
+ .replace("<TOP_EXT_R>", ext["top"][1])
112
+ .replace("<BOTTOM_EXT_L>", ext["bottom"][0])
113
+ .replace("<BOTTOM_EXT_R>", ext["bottom"][1])
114
+ .replace("<LEFT_EXT_T>", ext["left"][0])
115
+ .replace("<LEFT_EXT_B>", ext["left"][1])
116
+ .replace("<RIGHT_EXT_T>", ext["right"][0])
117
+ .replace("<RIGHT_EXT_B>", ext["right"][1])
82
118
  )
83
119
 
84
120
  def get_page_width(self):
85
121
  return self.args.paperwidth - self.args.margin_left - self.args.margin_right - 2
86
122
 
123
+ def get_cut_direction(
124
+ self, columns, num_rows, handouts_per_team, grouping="horizontal"
125
+ ):
126
+ """
127
+ Determine team rectangle dimensions.
128
+ Returns (team_cols, team_rows) where each team is a team_cols × team_rows block.
129
+
130
+ Falls back to (None, None) if handouts can't be evenly divided into teams.
131
+
132
+ Args:
133
+ grouping: "horizontal" (default) prefers wider teams (smaller team_rows),
134
+ "vertical" prefers taller teams (smaller team_cols).
135
+ """
136
+ total = columns * num_rows
137
+
138
+ # Check if total handouts can be evenly divided
139
+ if total % handouts_per_team != 0:
140
+ return None, None
141
+
142
+ num_teams = total // handouts_per_team
143
+ if num_teams < 1:
144
+ return None, None # Invalid configuration
145
+
146
+ # Find all valid team rectangle sizes (team_cols × team_rows = handouts_per_team)
147
+ valid_layouts = []
148
+ for team_rows in range(1, handouts_per_team + 1):
149
+ if handouts_per_team % team_rows == 0:
150
+ team_cols = handouts_per_team // team_rows
151
+ if columns % team_cols == 0 and num_rows % team_rows == 0:
152
+ valid_layouts.append((team_cols, team_rows))
153
+
154
+ if not valid_layouts:
155
+ return None, None
156
+
157
+ # Sort based on grouping preference
158
+ if grouping == "vertical":
159
+ # Prefer vertical grouping (smaller team_cols = taller teams)
160
+ valid_layouts.sort(key=lambda x: x[0])
161
+ else:
162
+ # Prefer horizontal grouping (smaller team_rows = wider teams)
163
+ valid_layouts.sort(key=lambda x: x[1])
164
+
165
+ return valid_layouts[0]
166
+
167
+ def get_edge_styles(
168
+ self, row_idx, col_idx, num_rows, columns, team_cols, team_rows
169
+ ):
170
+ """
171
+ Determine edge styles and extensions for a box at position (row_idx, col_idx).
172
+ Outer edges of team rectangles are solid (thicker), inner edges are dashed.
173
+ Extensions are used to close gaps in ALL solid lines.
174
+ Duplicate dashed edges are skipped to avoid double lines.
175
+
176
+ team_cols and team_rows define the dimensions of each team rectangle.
177
+ """
178
+ # Default: all dashed, no extension
179
+ edges = {
180
+ "top": EDGE_DASHED,
181
+ "bottom": EDGE_DASHED,
182
+ "left": EDGE_DASHED,
183
+ "right": EDGE_DASHED,
184
+ }
185
+ ext = {
186
+ "top": ("0pt", "0pt"),
187
+ "bottom": ("0pt", "0pt"),
188
+ "left": ("0pt", "0pt"),
189
+ "right": ("0pt", "0pt"),
190
+ }
191
+
192
+ # Gap sizes (half of spacing to extend into)
193
+ h_gap = "0.75mm" # half of SPACE (1.5mm)
194
+ v_gap = "0.5mm" # half of vspace (1mm)
195
+
196
+ # Helper functions to check if position is at a team boundary
197
+ def is_at_right_team_boundary():
198
+ """Is this box at the right edge of its team (but not at grid edge)?"""
199
+ if not team_cols:
200
+ return False
201
+ return (col_idx + 1) % team_cols == 0 and col_idx < columns - 1
202
+
203
+ def is_at_left_team_boundary():
204
+ """Is this box at the left edge of its team (but not at grid edge)?"""
205
+ if not team_cols:
206
+ return False
207
+ return col_idx % team_cols == 0 and col_idx > 0
208
+
209
+ def is_at_bottom_team_boundary():
210
+ """Is this box at the bottom edge of its team (but not at grid edge)?"""
211
+ if not team_rows:
212
+ return False
213
+ return (row_idx + 1) % team_rows == 0 and row_idx < num_rows - 1
214
+
215
+ def is_at_top_team_boundary():
216
+ """Is this box at the top edge of its team (but not at grid edge)?"""
217
+ if not team_rows:
218
+ return False
219
+ return row_idx % team_rows == 0 and row_idx > 0
220
+
221
+ # Determine which edges are solid
222
+ # Only apply solid edges if we have valid team dimensions
223
+ # Otherwise fall back to all-dashed (default)
224
+ if team_cols is not None and team_rows is not None:
225
+ # Outer edges of the entire grid
226
+ if row_idx == 0:
227
+ edges["top"] = EDGE_SOLID
228
+ if row_idx == num_rows - 1:
229
+ edges["bottom"] = EDGE_SOLID
230
+ if col_idx == 0:
231
+ edges["left"] = EDGE_SOLID
232
+ if col_idx == columns - 1:
233
+ edges["right"] = EDGE_SOLID
234
+
235
+ # Team boundary edges
236
+ if is_at_right_team_boundary():
237
+ edges["right"] = EDGE_SOLID
238
+ if is_at_left_team_boundary():
239
+ edges["left"] = EDGE_SOLID
240
+ if is_at_bottom_team_boundary():
241
+ edges["bottom"] = EDGE_SOLID
242
+ if is_at_top_team_boundary():
243
+ edges["top"] = EDGE_SOLID
244
+
245
+ # Skip duplicate dashed edges (to avoid double lines between adjacent boxes)
246
+ if edges["left"] == EDGE_DASHED and col_idx > 0:
247
+ edges["left"] = EDGE_NONE
248
+
249
+ if edges["top"] == EDGE_DASHED and row_idx > 0:
250
+ edges["top"] = EDGE_NONE
251
+
252
+ # Calculate extensions for solid edges to close gaps
253
+ # But don't extend into team boundary gaps!
254
+
255
+ if edges["top"] == EDGE_SOLID:
256
+ at_left_boundary = is_at_left_team_boundary()
257
+ ext_left = "-" + h_gap if col_idx > 0 and not at_left_boundary else "0pt"
258
+ at_right_boundary = is_at_right_team_boundary()
259
+ ext_right = (
260
+ h_gap if col_idx < columns - 1 and not at_right_boundary else "0pt"
261
+ )
262
+ ext["top"] = (ext_left, ext_right)
263
+
264
+ if edges["bottom"] == EDGE_SOLID:
265
+ at_left_boundary = is_at_left_team_boundary()
266
+ ext_left = "-" + h_gap if col_idx > 0 and not at_left_boundary else "0pt"
267
+ at_right_boundary = is_at_right_team_boundary()
268
+ ext_right = (
269
+ h_gap if col_idx < columns - 1 and not at_right_boundary else "0pt"
270
+ )
271
+ ext["bottom"] = (ext_left, ext_right)
272
+
273
+ if edges["left"] == EDGE_SOLID:
274
+ at_top_boundary = is_at_top_team_boundary()
275
+ ext_top = v_gap if row_idx > 0 and not at_top_boundary else "0pt"
276
+ at_bottom_boundary = is_at_bottom_team_boundary()
277
+ ext_bottom = (
278
+ "-" + v_gap
279
+ if row_idx < num_rows - 1 and not at_bottom_boundary
280
+ else "0pt"
281
+ )
282
+ ext["left"] = (ext_top, ext_bottom)
283
+
284
+ if edges["right"] == EDGE_SOLID:
285
+ at_top_boundary = is_at_top_team_boundary()
286
+ ext_top = v_gap if row_idx > 0 and not at_top_boundary else "0pt"
287
+ at_bottom_boundary = is_at_bottom_team_boundary()
288
+ ext_bottom = (
289
+ "-" + v_gap
290
+ if row_idx < num_rows - 1 and not at_bottom_boundary
291
+ else "0pt"
292
+ )
293
+ ext["right"] = (ext_top, ext_bottom)
294
+
295
+ return edges, ext
296
+
87
297
  def generate_regular_block(self, block_):
88
298
  block = block_.copy()
89
299
  if not (block.get("image") or block.get("text")):
90
300
  return
91
301
  columns = block["columns"]
92
- spaces = block["columns"] - 1
302
+ num_rows = block.get("rows") or 1
303
+ handouts_per_team = block.get("handouts_per_team") or 3
304
+ grouping = block.get("grouping") or "horizontal"
305
+
306
+ # Determine team rectangle dimensions
307
+ team_cols, team_rows = self.get_cut_direction(
308
+ columns, num_rows, handouts_per_team, grouping
309
+ )
310
+ if self.args.debug:
311
+ print(
312
+ f"team_cols: {team_cols}, team_rows: {team_rows}, grouping: {grouping}"
313
+ )
314
+
315
+ spaces = columns - 1
93
316
  boxwidth = self.args.boxwidth or round(
94
- (self.get_page_width() - spaces * self.SPACE) / block["columns"],
317
+ (self.get_page_width() - spaces * self.SPACE) / columns,
95
318
  3,
96
319
  )
97
320
  total_width = boxwidth * columns + spaces * self.SPACE
@@ -104,7 +327,6 @@ class HandoutGenerator:
104
327
  r"\setlength{\boxwidth}{<Q>mm}%".replace("<Q>", str(boxwidth)),
105
328
  r"\setlength{\boxwidthinner}{<Q>mm}%".replace("<Q>", str(boxwidthinner)),
106
329
  ]
107
- rows = []
108
330
  contents = []
109
331
  if block.get("image"):
110
332
  img_qwidth = block.get("resize_image") or 1.0
@@ -119,10 +341,18 @@ class HandoutGenerator:
119
341
  block["centering"] = ""
120
342
  else:
121
343
  block["centering"] = "\\centering"
122
- for _ in range(block.get("rows") or 1):
344
+
345
+ rows = []
346
+ for row_idx in range(num_rows):
347
+ row_boxes = []
348
+ for col_idx in range(columns):
349
+ edges, ext = self.get_edge_styles(
350
+ row_idx, col_idx, num_rows, columns, team_cols, team_rows
351
+ )
352
+ row_boxes.append(self.make_tikzbox(block, edges, ext))
123
353
  row = (
124
354
  TIKZBOX_START.replace("<CENTERING>", block["centering"])
125
- + "\n".join([self.make_tikzbox(block)] * block["columns"])
355
+ + "\n".join(row_boxes)
126
356
  + TIKZBOX_END
127
357
  )
128
358
  rows.append(row)
@@ -13,7 +13,7 @@ HEADER = r"""
13
13
  \begin{document}
14
14
  \fontsize{14pt}{16pt}\selectfont
15
15
  \setlength\parindent{0pt}
16
- \tikzstyle{box}=[draw, dashed, rectangle, inner sep=<TIKZ_MM>mm]
16
+ \tikzstyle{box}=[rectangle, inner sep=<TIKZ_MM>mm]
17
17
  \raggedright
18
18
  \raggedbottom
19
19
  """.strip()
@@ -25,10 +25,20 @@ TIKZBOX_START = r"""{<CENTERING>
25
25
 
26
26
  TIKZBOX_INNER = r"""
27
27
  \begin{tikzpicture}
28
- \node[box, minimum width=\boxwidth<TEXTWIDTH><ALIGN>] {<FONTSIZE><CONTENTS>\par};
28
+ \node[box, minimum width=\boxwidth<TEXTWIDTH><ALIGN>] (b) {<FONTSIZE><CONTENTS>\par};
29
+ \useasboundingbox (b.south west) rectangle (b.north east);
30
+ \draw[<TOP>] ([xshift=<TOP_EXT_L>]b.north west) -- ([xshift=<TOP_EXT_R>]b.north east);
31
+ \draw[<BOTTOM>] ([xshift=<BOTTOM_EXT_L>]b.south west) -- ([xshift=<BOTTOM_EXT_R>]b.south east);
32
+ \draw[<LEFT>] ([yshift=<LEFT_EXT_T>]b.north west) -- ([yshift=<LEFT_EXT_B>]b.south west);
33
+ \draw[<RIGHT>] ([yshift=<RIGHT_EXT_T>]b.north east) -- ([yshift=<RIGHT_EXT_B>]b.south east);
29
34
  \end{tikzpicture}
30
35
  """.strip()
31
36
 
37
+ # Line styles for box edges
38
+ EDGE_SOLID = "line width=0.8pt"
39
+ EDGE_DASHED = "dashed"
40
+ EDGE_NONE = "draw=none" # Don't draw this edge (to avoid double dashed lines)
41
+
32
42
  TIKZBOX_END = "\n}"
33
43
 
34
44
  IMG = r"""\includegraphics<IMGWIDTH>{<IMGPATH>}"""
@@ -14,6 +14,7 @@ RESERVED_WORDS = [
14
14
  "raw_tex",
15
15
  "color",
16
16
  "handouts_per_team",
17
+ "grouping",
17
18
  ]
18
19
 
19
20
 
@@ -42,6 +43,13 @@ def wrap_val(key, val):
42
43
  return int(val.strip())
43
44
  if key in ("resize_image", "font_size"):
44
45
  return float(val.strip())
46
+ if key == "grouping":
47
+ val = val.strip().lower()
48
+ if val not in ("horizontal", "vertical"):
49
+ raise ValueError(
50
+ f"Invalid grouping value: {val}. Must be 'horizontal' or 'vertical'."
51
+ )
52
+ return val
45
53
  return val.strip()
46
54
 
47
55
 
chgksuite/lastdir ADDED
@@ -0,0 +1 @@
1
+ /Users/pecheny/chgksuite1/tmpz_2mf3o8/tmptke0lqfv
chgksuite/parser.py CHANGED
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  import base64
4
- import codecs
5
4
  import datetime
6
5
  import hashlib
7
6
  import itertools
@@ -18,8 +17,9 @@ import time
18
17
 
19
18
  import bs4
20
19
  import chardet
21
- import dashtable
22
20
  import mammoth
21
+
22
+ from chgksuite._html2md import html2md
23
23
  import pypandoc
24
24
  import requests
25
25
  import toml
@@ -38,6 +38,7 @@ from chgksuite.common import (
38
38
  init_logger,
39
39
  load_settings,
40
40
  log_wrap,
41
+ read_text_file,
41
42
  set_lastdir,
42
43
  )
43
44
  from chgksuite.composer import gui_compose
@@ -47,7 +48,7 @@ from chgksuite.typotools import re_url
47
48
  from chgksuite.typotools import remove_excessive_whitespace as rew
48
49
 
49
50
 
50
- SEP = os.linesep
51
+ SEP = "\n"
51
52
  EDITORS = {
52
53
  "win32": "notepad",
53
54
  "linux2": "xdg-open", # python2
@@ -61,7 +62,7 @@ def partition(alist, indices):
61
62
 
62
63
 
63
64
  def load_regexes(regexfile):
64
- with codecs.open(regexfile, "r", "utf8") as f:
65
+ with open(regexfile, "r", encoding="utf-8") as f:
65
66
  regexes = json.loads(f.read())
66
67
  return {k: re.compile(v) for k, v in regexes.items()}
67
68
 
@@ -526,7 +527,7 @@ class ChgkParser:
526
527
  )
527
528
 
528
529
  if debug:
529
- with codecs.open("debug_0.txt", "w", "utf8") as f:
530
+ with open("debug_0.txt", "w", encoding="utf-8") as f:
530
531
  f.write(text)
531
532
 
532
533
  # 1.
@@ -558,7 +559,7 @@ class ChgkParser:
558
559
  i = 0
559
560
 
560
561
  if debug:
561
- with codecs.open("debug_1.json", "w", "utf8") as f:
562
+ with open("debug_1.json", "w", encoding="utf-8") as f:
562
563
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
563
564
 
564
565
  self.process_single_number_lines()
@@ -574,7 +575,7 @@ class ChgkParser:
574
575
  element[0] = "question"
575
576
 
576
577
  if debug:
577
- with codecs.open("debug_1a.json", "w", "utf8") as f:
578
+ with open("debug_1a.json", "w", encoding="utf-8") as f:
578
579
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
579
580
 
580
581
  # 2.
@@ -584,7 +585,7 @@ class ChgkParser:
584
585
  self.merge_to_x_until_nextfield("comment")
585
586
 
586
587
  if debug:
587
- with codecs.open("debug_2.json", "w", "utf8") as f:
588
+ with open("debug_2.json", "w", encoding="utf-8") as f:
588
589
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
589
590
 
590
591
  # 3.
@@ -647,7 +648,7 @@ class ChgkParser:
647
648
  self.merge_to_x_until_nextfield("nezachet")
648
649
 
649
650
  if debug:
650
- with codecs.open("debug_3.json", "w", "utf8") as f:
651
+ with open("debug_3.json", "w", encoding="utf-8") as f:
651
652
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
652
653
 
653
654
  # 4.
@@ -660,7 +661,7 @@ class ChgkParser:
660
661
  self.merge_to_next(0)
661
662
 
662
663
  if debug:
663
- with codecs.open("debug_3a.json", "w", "utf8") as f:
664
+ with open("debug_3a.json", "w", encoding="utf-8") as f:
664
665
  f.write(
665
666
  json.dumps(
666
667
  list(enumerate(self.structure)), ensure_ascii=False, indent=4
@@ -718,7 +719,7 @@ class ChgkParser:
718
719
  idx += 1
719
720
 
720
721
  if debug:
721
- with codecs.open("debug_4.json", "w", "utf8") as f:
722
+ with open("debug_4.json", "w", encoding="utf-8") as f:
722
723
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
723
724
 
724
725
  # 5.
@@ -798,7 +799,7 @@ class ChgkParser:
798
799
  )
799
800
 
800
801
  if debug:
801
- with codecs.open("debug_5.json", "w", "utf8") as f:
802
+ with open("debug_5.json", "w", encoding="utf-8") as f:
802
803
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
803
804
 
804
805
  # 6.
@@ -853,7 +854,7 @@ class ChgkParser:
853
854
  final_structure.append(["Question", current_question])
854
855
 
855
856
  if debug:
856
- with codecs.open("debug_6.json", "w", "utf8") as f:
857
+ with open("debug_6.json", "w", encoding="utf-8") as f:
857
858
  f.write(json.dumps(final_structure, ensure_ascii=False, indent=4))
858
859
 
859
860
  # 7.
@@ -899,7 +900,7 @@ class ChgkParser:
899
900
  element[1] = self._process_images_in_text(element[1])
900
901
 
901
902
  if debug:
902
- with codecs.open("debug_final.json", "w", "utf8") as f:
903
+ with open("debug_final.json", "w", encoding="utf-8") as f:
903
904
  f.write(json.dumps(final_structure, ensure_ascii=False, indent=4))
904
905
  return final_structure
905
906
 
@@ -916,6 +917,9 @@ class UnknownEncodingException(Exception):
916
917
 
917
918
  def chgk_parse_txt(txtfile, encoding=None, defaultauthor="", args=None, logger=None):
918
919
  raw = open(txtfile, "rb").read()
920
+ # Fix corrupted line endings at byte level before decoding
921
+ if b"\r\r\n" in raw:
922
+ raw = raw.replace(b"\r\r\n", b"\n")
919
923
  if not encoding:
920
924
  if chardet.detect(raw)["confidence"] > 0.7:
921
925
  encoding = chardet.detect(raw)["encoding"]
@@ -926,9 +930,10 @@ def chgk_parse_txt(txtfile, encoding=None, defaultauthor="", args=None, logger=N
926
930
  "or resave with a less exotic encoding".format(txtfile)
927
931
  )
928
932
  text = raw.decode(encoding)
929
- text = text.replace("\r", "")
933
+ # Normalize any remaining line endings
934
+ text = text.replace("\r\n", "\n").replace("\r", "\n")
930
935
  if text[0:10] == "Чемпионат:":
931
- return chgk_parse_db(text.replace("\r", ""), debug=args.debug, logger=logger)
936
+ return chgk_parse_db(text, debug=args.debug, logger=logger)
932
937
  return chgk_parse(text.replace("_", "\\_"), defaultauthor=defaultauthor, args=args)
933
938
 
934
939
 
@@ -982,8 +987,8 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
982
987
  with open(docxfile, "rb") as docx_file:
983
988
  html = mammoth.convert_to_html(docx_file).value
984
989
  if args.debug:
985
- with codecs.open(
986
- os.path.join(target_dir, "debugdebug.pydocx"), "w", "utf8"
990
+ with open(
991
+ os.path.join(target_dir, "debugdebug.pydocx"), "w", encoding="utf-8"
987
992
  ) as dbg:
988
993
  dbg.write(html)
989
994
  input_docx = (
@@ -994,8 +999,8 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
994
999
  bsoup = BeautifulSoup(input_docx, "html.parser")
995
1000
 
996
1001
  if args.debug:
997
- with codecs.open(
998
- os.path.join(target_dir, "debug.pydocx"), "w", "utf8"
1002
+ with open(
1003
+ os.path.join(target_dir, "debug.pydocx"), "w", encoding="utf-8"
999
1004
  ) as dbg:
1000
1005
  dbg.write(input_docx)
1001
1006
 
@@ -1065,7 +1070,7 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
1065
1070
  ensure_line_breaks(tag)
1066
1071
  for tag in bsoup.find_all("table"):
1067
1072
  try:
1068
- table = dashtable.html2md(str(tag))
1073
+ table = html2md(str(tag))
1069
1074
  tag.insert_before(table)
1070
1075
  except (TypeError, ValueError):
1071
1076
  logger.error(f"couldn't parse html table: {str(tag)}")
@@ -1096,12 +1101,12 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
1096
1101
  tag.unwrap()
1097
1102
 
1098
1103
  if args.debug:
1099
- with codecs.open(
1100
- os.path.join(target_dir, "debug_raw.html"), "w", "utf8"
1104
+ with open(
1105
+ os.path.join(target_dir, "debug_raw.html"), "w", encoding="utf-8"
1101
1106
  ) as dbg:
1102
1107
  dbg.write(str(bsoup))
1103
- with codecs.open(
1104
- os.path.join(target_dir, "debug.html"), "w", "utf8"
1108
+ with open(
1109
+ os.path.join(target_dir, "debug.html"), "w", encoding="utf-8"
1105
1110
  ) as dbg:
1106
1111
  dbg.write(bsoup.prettify())
1107
1112
 
@@ -1139,7 +1144,9 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
1139
1144
  txt = txt.replace(f"IMGPATH({i})", elem)
1140
1145
 
1141
1146
  if args.debug:
1142
- with codecs.open(os.path.join(target_dir, "debug.debug"), "w", "utf8") as dbg:
1147
+ with open(
1148
+ os.path.join(target_dir, "debug.debug"), "w", encoding="utf-8"
1149
+ ) as dbg:
1143
1150
  dbg.write(txt)
1144
1151
 
1145
1152
  final_structure = chgk_parse(txt, defaultauthor=defaultauthor, args=args)
@@ -1173,7 +1180,7 @@ def chgk_parse_wrapper(path, args, logger=None):
1173
1180
  sys.exit()
1174
1181
  outfilename = os.path.join(target_dir, make_filename(abspath, "4s", args))
1175
1182
  logger.info("Output: {}".format(os.path.abspath(outfilename)))
1176
- with codecs.open(outfilename, "w", "utf8") as output_file:
1183
+ with open(outfilename, "w", encoding="utf-8") as output_file:
1177
1184
  output_file.write(compose_4s(final_structure, args=args))
1178
1185
  return outfilename
1179
1186
 
chgksuite/parser_db.py CHANGED
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- import codecs
5
4
  import json
6
5
  import os
7
6
  import re
@@ -436,7 +435,7 @@ def chgk_parse_db(text, debug=False, logger=False):
436
435
  append_question(lexer)
437
436
 
438
437
  if debug:
439
- with codecs.open("debug_final.json", "w", "utf8") as f:
438
+ with open("debug_final.json", "w", encoding="utf-8") as f:
440
439
  f.write(json.dumps(lexer.structure, ensure_ascii=False, indent=4))
441
440
 
442
441
  return lexer.structure
chgksuite/trello.py CHANGED
@@ -1,6 +1,5 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
- import codecs
4
3
  import json
5
4
  import os
6
5
  import pdb
@@ -16,6 +15,7 @@ from chgksuite.common import (
16
15
  get_lastdir,
17
16
  get_source_dirs,
18
17
  log_wrap,
18
+ read_text_file,
19
19
  set_lastdir,
20
20
  )
21
21
 
@@ -51,9 +51,7 @@ def upload_file(filepath, trello, list_name=None):
51
51
  raise Exception(f"list '{list_name}' not found")
52
52
  assert lid is not None
53
53
  print(f"uploading to list '{list_['name']}'")
54
- content = ""
55
- with codecs.open(filepath, "r", "utf8") as f:
56
- content = f.read()
54
+ content = read_text_file(filepath)
57
55
  cards = re.split(r"(\r?\n){2,}", content)
58
56
  cards = [x for x in cards if x != "" and x != "\n" and x != "\r\n"]
59
57
  for card in cards:
@@ -255,7 +253,7 @@ def gui_trello_download(args):
255
253
 
256
254
  board_id_path = os.path.join(args.folder, ".board_id")
257
255
  if os.path.isfile(board_id_path):
258
- with codecs.open(board_id_path, "r", "utf8") as f:
256
+ with open(board_id_path, "r", encoding="utf-8") as f:
259
257
  board_id = f.read().rstrip()
260
258
  else:
261
259
  board_id = get_board_id(path=args.folder)
@@ -401,14 +399,14 @@ def gui_trello_download(args):
401
399
  result.extend(_lists[_list["name"]])
402
400
  filename = "singlefile.4s"
403
401
  print("outputting {}".format(filename))
404
- with codecs.open(filename, "w", "utf8") as f:
402
+ with open(filename, "w", encoding="utf-8") as f:
405
403
  for item in result:
406
404
  f.write("\n" + item + "\n")
407
405
  else:
408
406
  for _list in _lists:
409
407
  filename = "{}.4s".format(_list)
410
408
  print("outputting {}".format(filename))
411
- with codecs.open(filename, "w", "utf8") as f:
409
+ with open(filename, "w", encoding="utf-8") as f:
412
410
  for item in _lists[_list]:
413
411
  f.write("\n" + item + "\n")
414
412
 
@@ -426,7 +424,7 @@ def get_board_id(path=None):
426
424
  if "trello.com" in board_id:
427
425
  board_id = re_bi.search(board_id).group(1)
428
426
  if path:
429
- with codecs.open(os.path.join(path, ".board_id"), "w", "utf8") as f:
427
+ with open(os.path.join(path, ".board_id"), "w", encoding="utf-8") as f:
430
428
  f.write(board_id)
431
429
  return board_id
432
430
 
@@ -437,7 +435,7 @@ def get_token(tokenpath, args):
437
435
  else:
438
436
  webbrowser.open(TRELLO_URL)
439
437
  token = input("Please paste the obtained token: ").rstrip()
440
- with codecs.open(tokenpath, "w", "utf8") as f:
438
+ with open(tokenpath, "w", encoding="utf-8") as f:
441
439
  f.write(token)
442
440
  return token
443
441
 
@@ -452,7 +450,7 @@ def gui_trello(args):
452
450
  if not os.path.isfile(tokenpath):
453
451
  token = get_token(tokenpath, args)
454
452
  else:
455
- with codecs.open(tokenpath, "r", "utf8") as f:
453
+ with open(tokenpath, "r", encoding="utf-8") as f:
456
454
  token = f.read().rstrip()
457
455
 
458
456
  with open(os.path.join(resourcedir, "trello.json")) as f:
chgksuite/typotools.py CHANGED
@@ -92,13 +92,13 @@ def uni_normalize(k):
92
92
 
93
93
 
94
94
  def cyr_lat_check_char(i, char, word):
95
- if char in CYRILLIC_CHARS:
95
+ if char.lower() in CYRILLIC_CHARS:
96
96
  return
97
97
  if not (
98
- (i == 0 or word[i - 1] in CYRILLIC_CHARS or not word[i - 1].isalpha())
98
+ (i == 0 or word[i - 1].lower() in CYRILLIC_CHARS or not word[i - 1].isalpha())
99
99
  and (
100
100
  i == len(word) - 1
101
- or word[i + 1] in CYRILLIC_CHARS
101
+ or word[i + 1].lower() in CYRILLIC_CHARS
102
102
  or not word[i + 1].isalpha()
103
103
  )
104
104
  ):
@@ -121,7 +121,7 @@ def cyr_lat_check_word(word):
121
121
  if check_result:
122
122
  replacements[char] = check_result
123
123
  elif (
124
- char in CYRILLIC_CHARS
124
+ char.lower() in CYRILLIC_CHARS
125
125
  and i < len(word) - 1
126
126
  and word[i + 1] in ACCENTS_TO_FIX
127
127
  ):
chgksuite/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.27.0b5"
1
+ __version__ = "0.27.2"
@@ -1,23 +1,21 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chgksuite
3
- Version: 0.27.0b5
3
+ Version: 0.27.2
4
4
  Summary: A package for chgk automation
5
+ Project-URL: Homepage, https://gitlab.com/peczony/chgksuite
5
6
  Author-email: Alexander Pecheny <ap@pecheny.me>
6
7
  License-Expression: MIT
7
- Project-URL: Homepage, https://gitlab.com/peczony/chgksuite
8
- Classifier: Programming Language :: Python :: 3
8
+ License-File: LICENSE
9
9
  Classifier: Operating System :: OS Independent
10
+ Classifier: Programming Language :: Python :: 3
10
11
  Requires-Python: >=3.9
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
13
12
  Requires-Dist: beautifulsoup4
14
13
  Requires-Dist: chardet
15
- Requires-Dist: dashtable
16
14
  Requires-Dist: dateparser
17
15
  Requires-Dist: mammoth
18
16
  Requires-Dist: openpyxl
19
17
  Requires-Dist: parse
20
- Requires-Dist: Pillow
18
+ Requires-Dist: pillow
21
19
  Requires-Dist: ply
22
20
  Requires-Dist: pypandoc
23
21
  Requires-Dist: pypdf
@@ -29,7 +27,7 @@ Requires-Dist: requests
29
27
  Requires-Dist: toml
30
28
  Requires-Dist: urllib3>=2.6.2
31
29
  Requires-Dist: watchdog
32
- Dynamic: license-file
30
+ Description-Content-Type: text/markdown
33
31
 
34
32
  **chgksuite** is an utility that helps chgk editors.
35
33
 
@@ -1,23 +1,25 @@
1
1
  chgksuite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  chgksuite/__main__.py,sha256=0-_jfloveTW3SZYW5XEagbyaHKGCiDhGNgcLxsT_dMs,140
3
- chgksuite/cli.py,sha256=95Xj1jrnybtso9qSCAwgv6utCrhJF43oFXlH5LWyHKA,40918
4
- chgksuite/common.py,sha256=M4mYjVAzF7uk9ElF0XmO3eggxSFf2bPlKinT2nEZPXA,11350
5
- chgksuite/parser.py,sha256=2UL-2dJtCFYEjakUfeh3n1E4epMKPo0pjdaC_cBZplM,45775
6
- chgksuite/parser_db.py,sha256=gh5AM80UR_hQ0r_-AwWLbqZCl2V0j3HAgj6BilImXyc,11116
7
- chgksuite/trello.py,sha256=I1JmIbZVJjJuuRO0g26fNt8deRo9hDlk1xBIf8w31Rs,14724
8
- chgksuite/typotools.py,sha256=0fQOjt5H3NRl0JeGTp2x-k-2ZN68E9Z9WX8D7lCwXmA,12765
9
- chgksuite/version.py,sha256=WOGW6JeN53_4Rrn5xAY_lTn9BFzuA5RLVZ8RjXiwSec,25
3
+ chgksuite/_html2md.py,sha256=IzPlRo4dVZNVdlnoCQFjSEfvpFZ0KdvVzfKSeIt15lM,2454
4
+ chgksuite/cli.py,sha256=fHa7HNJeQeUNXpbqnMnSXHOMpbxpaph6d3KdVNx9uNg,41257
5
+ chgksuite/common.py,sha256=27HEx5Us0xXywUAWtjlkhsXs67-Cx7ylJSlUViUsOgU,11778
6
+ chgksuite/lastdir,sha256=BbZVRYZnXBQzJUrl7a2e4xuUcfmq6asNI705pTxBfD4,49
7
+ chgksuite/parser.py,sha256=JJ_koqLDHAl-IHUe4sTyOK2gySL6uVW5HJmO1xcqD8Q,46042
8
+ chgksuite/parser_db.py,sha256=Ngh2ZYhAyetb6Sa-5xC9aX8quX9Ar1WheSfhSy-JADw,11105
9
+ chgksuite/trello.py,sha256=hc_RMlWrNrPzg9AsgC8KE9ow3JUbetMJM_HuiJlBSIk,14693
10
+ chgksuite/typotools.py,sha256=J2AEQbfcR0HHSu5WCALpj4Ya_ngOMXRh7esJgcybWQM,12797
11
+ chgksuite/version.py,sha256=_CerHyxnUJ2hU0sB7noT3JBjjI0ohEM5boAPBoFxOg4,23
10
12
  chgksuite/vulture_whitelist.py,sha256=P__p_X0zt10ivddIf81uyxsobV14vFg8uS2lt4foYpc,3582
11
- chgksuite/composer/__init__.py,sha256=k6bnuB8ftPsBY-Q-iLm42S10jWXAJebmYTdW_BZiKAM,6570
12
- chgksuite/composer/chgksuite_parser.py,sha256=g1KP0Jm4Zt4I06WGbFfKz6qIv4x1bLuPozu4QOV2wfw,9049
13
+ chgksuite/composer/__init__.py,sha256=a02nOz1QqPi84621-v3JD0Nn1_M0_fHyzKqG5Wp2ahI,6511
14
+ chgksuite/composer/chgksuite_parser.py,sha256=ItlTenviFDuqP-f1960nzD-gRPFDQy4RdOL39PswTvg,9044
13
15
  chgksuite/composer/composer_common.py,sha256=kc-_Tc9NjevfXGj4fXoa9fye9AO0EuMSnEPJnS0n-aQ,16281
14
- chgksuite/composer/db.py,sha256=71cINE_V8s6YidvqpmBmmlWbcXraUEGZA1xpVFAUENw,8173
16
+ chgksuite/composer/db.py,sha256=DI-goR4V69S8bufNfW5smTFG9puiyjcxC1u1TFuhfYs,8162
15
17
  chgksuite/composer/docx.py,sha256=cxPgjykAlU3XxKCtM3K13Wm6IcKqAHo0ngLqa8-2opM,23716
16
- chgksuite/composer/latex.py,sha256=_IKylzdDcokgXYvvxsVSiq-Ba5fVirWcfCp6eOyx6zQ,9242
17
- chgksuite/composer/lj.py,sha256=nty3Zs3N1H0gNK378U04aAHo71_5cABhCM1Mm9jiUEA,15213
18
- chgksuite/composer/openquiz.py,sha256=D9q7lcEgUGwR1UF6Qp3I-wBJucy9vMnrxoSOst4V45Q,7001
18
+ chgksuite/composer/latex.py,sha256=Ouq3OkUA0oS_zhCOiPZ2tY2sMe3OVg84Cead2GgJi2c,9231
19
+ chgksuite/composer/lj.py,sha256=7zrLrvbgYEoiF-NC7zyDaJTvPMhwt8tQT_KWgsAeG4A,15202
20
+ chgksuite/composer/markdown.py,sha256=hWajBAvop_BTyBI6rzeIwAaj7p_6p2kQre4WDMalD90,4343
21
+ chgksuite/composer/openquiz.py,sha256=BF506tH6b1IoocC61l5xBU969l38S1IjqMi6mqhG_HI,6990
19
22
  chgksuite/composer/pptx.py,sha256=9O0tfx2xpyx9Y4ceatVIXdiwvslnj8gliZIlxsDe5Ow,23971
20
- chgksuite/composer/reddit.py,sha256=-Eg4CqMHhyGGfCteVwdQdtE1pfUXQ42XcP5OYUrBXmo,3878
21
23
  chgksuite/composer/stats.py,sha256=GbraSrjaZ8Mc2URs5aGAsI4ekboAKzlJJOqsbe96ELA,3995
22
24
  chgksuite/composer/telegram.py,sha256=KM9Bnkf2bxdJNMrhjCCw2xx-sWcIQioBc1FdSY4OX-g,47431
23
25
  chgksuite/composer/telegram_bot.py,sha256=xT5D39m4zGmIbHV_ZfyQ9Rc8PAmG2V5FGUeDKpkgyTw,3767
@@ -25,9 +27,9 @@ chgksuite/handouter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
25
27
  chgksuite/handouter/gen.py,sha256=CJfGl8R2vnElnsjBo0n4u-y8KD-l2qxrIuUHj5WZBuI,5241
26
28
  chgksuite/handouter/installer.py,sha256=u4jQKeCn0VjOnaDFezx35g8oRjji5edvYGj5xSHCEW4,7574
27
29
  chgksuite/handouter/pack.py,sha256=H-Ln1JqKK2u3jFI5wwsh7pQdJBpQJ-s8gV9iECQ3kgU,2504
28
- chgksuite/handouter/runner.py,sha256=QPqLKbfNDL0ucJ365lvKCwRcPKrhxRBJ-YFSbQLU9_8,8641
29
- chgksuite/handouter/tex_internals.py,sha256=pxvMXkqlMfzBbAlTkFCJBlcT7t1XS0Z9XOzVboc2f2Y,966
30
- chgksuite/handouter/utils.py,sha256=0RoECvHzfmwWnRL2jj4WKh22oTCPh2MXid_a9ShplDA,2243
30
+ chgksuite/handouter/runner.py,sha256=dnDHgKPaa4WuxcmZf0E8BAJzDdTbFRksrt-gK0lEkiA,17674
31
+ chgksuite/handouter/tex_internals.py,sha256=j9zvai6zmaEg8osXnJ4yaXAx-89ufcpypySjyoxGFM0,1544
32
+ chgksuite/handouter/utils.py,sha256=hfRlBhOYFCPqYrof2yi4NlrQRty5bhM-Xy7jgGc60o8,2518
31
33
  chgksuite/resources/cheader.tex,sha256=Jfe3LESk0VIV0HCObbajSQpEMljaIDAIEGSs6YY9rTk,3454
32
34
  chgksuite/resources/fix-unnumbered-sections.sty,sha256=FN6ZSWC6MvoRoThPm5AxCF98DdgcxbxyBYG6YImM05s,1409
33
35
  chgksuite/resources/labels_az.toml,sha256=hiXt-54nMQ4Ie0RUVbQhXxlj47NEc2qNwe7O1NIMe-8,662
@@ -54,9 +56,8 @@ chgksuite/resources/regexes_uz_cyr.json,sha256=D4AyaEPEY753I47Ky2Fwol_4kxQsl-Yu9
54
56
  chgksuite/resources/template.docx,sha256=Do29TAsg3YbH0rRSaXhVzKEoh4pwXkklW_idWA34HVE,11189
55
57
  chgksuite/resources/template.pptx,sha256=hEFWqE-yYpwZ8ejrMCJIPEyoMT3eDqaqtiEeQ7I4fyk,29777
56
58
  chgksuite/resources/trello.json,sha256=M5Q9JR-AAJF1u16YtNAxDX-7c7VoVTXuq4POTqYvq8o,555
57
- chgksuite-0.27.0b5.dist-info/licenses/LICENSE,sha256=_a1yfntuPmctLsuiE_08xMSORuCfGS8X5hQph2U_PUw,1081
58
- chgksuite-0.27.0b5.dist-info/METADATA,sha256=34FY_Q8wUwKtlnUmcXvXEdRR4RRWJusn1sZ734sgX0w,1201
59
- chgksuite-0.27.0b5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
- chgksuite-0.27.0b5.dist-info/entry_points.txt,sha256=lqjX6ULQZGDt0rgouTXBuwEPiwKkDQkSiNsT877A_Jg,54
61
- chgksuite-0.27.0b5.dist-info/top_level.txt,sha256=cSWiRBOGZW9nIO6Rv1IrEfwPgV2ZWs87QV9wPXeBGqM,10
62
- chgksuite-0.27.0b5.dist-info/RECORD,,
59
+ chgksuite-0.27.2.dist-info/METADATA,sha256=zGPoXuzOtUiNMdE7NDeSJE25K6XHaoQ4pPLBkfBLz_c,1152
60
+ chgksuite-0.27.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
61
+ chgksuite-0.27.2.dist-info/entry_points.txt,sha256=lqjX6ULQZGDt0rgouTXBuwEPiwKkDQkSiNsT877A_Jg,54
62
+ chgksuite-0.27.2.dist-info/licenses/LICENSE,sha256=_a1yfntuPmctLsuiE_08xMSORuCfGS8X5hQph2U_PUw,1081
63
+ chgksuite-0.27.2.dist-info/RECORD,,
@@ -1,5 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
-
@@ -1 +0,0 @@
1
- chgksuite