chgksuite 0.27.0b5__py3-none-any.whl → 0.27.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
chgksuite/_html2md.py ADDED
@@ -0,0 +1,94 @@
1
+ """Simple HTML table to Markdown converter.
2
+
3
+ Replaces dashtable.html2md with a minimal implementation that avoids
4
+ deprecated BeautifulSoup methods.
5
+ """
6
+
7
+ from bs4 import BeautifulSoup
8
+
9
+
10
+ def html2md(html_string: str) -> str:
11
+ """Convert an HTML table to a Markdown table string.
12
+
13
+ Parameters
14
+ ----------
15
+ html_string : str
16
+ HTML string containing a table
17
+
18
+ Returns
19
+ -------
20
+ str
21
+ The table formatted as Markdown
22
+ """
23
+ soup = BeautifulSoup(html_string, "html.parser")
24
+ table = soup.find("table")
25
+
26
+ if not table:
27
+ return ""
28
+
29
+ rows = table.find_all("tr")
30
+ if not rows:
31
+ return ""
32
+
33
+ # Extract all rows as lists of cell texts
34
+ data = []
35
+ for row in rows:
36
+ # Check for header cells first, then data cells
37
+ cells = row.find_all("th")
38
+ if not cells:
39
+ cells = row.find_all("td")
40
+
41
+ row_data = []
42
+ for cell in cells:
43
+ # Get text, normalize whitespace
44
+ text = " ".join(cell.get_text().split())
45
+ row_data.append(text)
46
+ if row_data:
47
+ data.append(row_data)
48
+
49
+ if not data:
50
+ return ""
51
+
52
+ # Normalize row lengths (pad shorter rows)
53
+ max_cols = max(len(row) for row in data)
54
+ for row in data:
55
+ while len(row) < max_cols:
56
+ row.append("")
57
+
58
+ # Calculate column widths (minimum 3 for markdown separator)
59
+ # Add 2 for space cushions on each side
60
+ widths = []
61
+ for col in range(max_cols):
62
+ width = max(len(row[col]) for row in data)
63
+ widths.append(max(width + 2, 3))
64
+
65
+ # Build markdown table
66
+ lines = []
67
+
68
+ # Header row (centered, with padding)
69
+ header = (
70
+ "|" + "|".join(_center(cell, widths[i]) for i, cell in enumerate(data[0])) + "|"
71
+ )
72
+ lines.append(header)
73
+
74
+ # Separator row (no spaces to avoid typotools converting --- to em-dash)
75
+ separator = "|" + "|".join("-" * w for w in widths) + "|"
76
+ lines.append(separator)
77
+
78
+ # Data rows (centered, with padding)
79
+ for row in data[1:]:
80
+ line = (
81
+ "|" + "|".join(_center(cell, widths[i]) for i, cell in enumerate(row)) + "|"
82
+ )
83
+ lines.append(line)
84
+
85
+ return "\n".join(lines)
86
+
87
+
88
+ def _center(text: str, width: int) -> str:
89
+ """Center text within width, with space padding."""
90
+ text = text.strip()
91
+ padding = width - len(text)
92
+ left = padding // 2
93
+ right = padding - left
94
+ return " " * left + text + " " * right
chgksuite/cli.py CHANGED
@@ -596,6 +596,15 @@ class ArgparseBuilder:
596
596
  caption="Имя 4s-файла",
597
597
  filetypes=[("chgksuite markup files", "*.4s")],
598
598
  )
599
+ cmdcompose_markdown = cmdcompose_filetype.add_parser("markdown")
600
+ self.add_argument(
601
+ cmdcompose_markdown,
602
+ "filename",
603
+ nargs="*",
604
+ help="file(s) to compose from.",
605
+ caption="Имя 4s-файла",
606
+ filetypes=[("chgksuite markup files", "*.4s")],
607
+ )
599
608
  cmdcompose_pptx = cmdcompose_filetype.add_parser("pptx")
600
609
  self.add_argument(
601
610
  cmdcompose_pptx,
chgksuite/common.py CHANGED
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  import argparse
4
- import codecs
5
4
  import csv
6
5
  import itertools
7
6
  import json
@@ -114,7 +113,7 @@ class DefaultArgs:
114
113
  def set_lastdir(path):
115
114
  chgksuite_dir = get_chgksuite_dir()
116
115
  lastdir = os.path.join(chgksuite_dir, "lastdir")
117
- with codecs.open(lastdir, "w", "utf8") as f:
116
+ with open(lastdir, "w", encoding="utf-8") as f:
118
117
  f.write(path)
119
118
 
120
119
 
@@ -122,7 +121,7 @@ def get_lastdir():
122
121
  chgksuite_dir = get_chgksuite_dir()
123
122
  lastdir = os.path.join(chgksuite_dir, "lastdir")
124
123
  if os.path.isfile(lastdir):
125
- with codecs.open(lastdir, "r", "utf8") as f:
124
+ with open(lastdir, "r", encoding="utf-8") as f:
126
125
  return f.read().rstrip()
127
126
  return "."
128
127
 
@@ -1,6 +1,5 @@
1
1
  #!usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
- import codecs
4
3
  import json
5
4
  import os
6
5
  import shutil
@@ -22,7 +21,7 @@ from chgksuite.composer.docx import DocxExporter
22
21
  from chgksuite.composer.latex import LatexExporter
23
22
  from chgksuite.composer.lj import LjExporter
24
23
  from chgksuite.composer.pptx import PptxExporter
25
- from chgksuite.composer.reddit import RedditExporter
24
+ from chgksuite.composer.markdown import MarkdownExporter
26
25
  from chgksuite.composer.stats import StatsAdder
27
26
  from chgksuite.composer.telegram import TelegramExporter
28
27
  from chgksuite.composer.openquiz import OpenquizExporter
@@ -75,11 +74,13 @@ def process_file_wrapper(filename, sourcedir, targetdir, args):
75
74
 
76
75
  def parse_filepath(filepath, args=None):
77
76
  args = args or DefaultArgs()
78
- with codecs.open(filepath, "r", "utf8") as input_file:
77
+ with open(filepath, "r", encoding="utf-8") as input_file:
79
78
  input_text = input_file.read()
80
79
  input_text = input_text.replace("\r", "")
81
80
  debug_dir = os.path.dirname(os.path.abspath(filepath))
82
- return parse_4s(input_text, randomize=args.randomize, debug=args.debug, debug_dir=debug_dir)
81
+ return parse_4s(
82
+ input_text, randomize=args.randomize, debug=args.debug, debug_dir=debug_dir
83
+ )
83
84
 
84
85
 
85
86
  def make_merged_filename(filelist):
@@ -106,7 +107,7 @@ def process_file(filename, tmp_dir, targetdir, args=None, logger=None):
106
107
  targetdir,
107
108
  make_filename(os.path.basename(filename), "dbg", args),
108
109
  )
109
- with codecs.open(debug_fn, "w", "utf8") as output_file:
110
+ with open(debug_fn, "w", encoding="utf-8") as output_file:
110
111
  output_file.write(json.dumps(structure, indent=2, ensure_ascii=False))
111
112
 
112
113
  if not args.filetype:
@@ -147,8 +148,8 @@ def process_file(filename, tmp_dir, targetdir, args=None, logger=None):
147
148
  outfilename = os.path.join(targetdir, make_filename(filename, "txt", args))
148
149
  exporter.export(outfilename)
149
150
 
150
- if args.filetype == "redditmd":
151
- exporter = RedditExporter(structure, args, dir_kwargs)
151
+ if args.filetype in ("redditmd", "markdown"):
152
+ exporter = MarkdownExporter(structure, args, dir_kwargs)
152
153
  outfilename = os.path.join(targetdir, make_filename(filename, "md", args))
153
154
  exporter.export(outfilename)
154
155
 
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import os
3
2
  import random
4
3
  import re
@@ -125,7 +124,7 @@ def parse_4s(s, randomize=False, debug=False, logger=None, debug_dir=None):
125
124
  if debug:
126
125
  debug_dir = debug_dir or get_chgksuite_dir()
127
126
  debug_path = os.path.join(debug_dir, "raw.debug")
128
- with codecs.open(debug_path, "w", "utf8") as debugf:
127
+ with open(debug_path, "w", encoding="utf-8") as debugf:
129
128
  debugf.write(log_wrap(s.split("\n")))
130
129
 
131
130
  s = replace_counters(s)
@@ -147,7 +146,7 @@ def parse_4s(s, randomize=False, debug=False, logger=None, debug_dir=None):
147
146
  counter = 1
148
147
 
149
148
  if debug:
150
- with codecs.open("debug1st.debug", "w", "utf8") as debugf:
149
+ with open("debug1st.debug", "w", encoding="utf-8") as debugf:
151
150
  debugf.write(log_wrap(structure))
152
151
 
153
152
  for element in structure:
@@ -240,7 +239,7 @@ def parse_4s(s, randomize=False, debug=False, logger=None, debug_dir=None):
240
239
  i += 1
241
240
 
242
241
  if debug:
243
- with codecs.open("debug.debug", "w", "utf8") as debugf:
242
+ with open("debug.debug", "w", encoding="utf-8") as debugf:
244
243
  debugf.write(log_wrap(final_structure))
245
244
 
246
245
  for element in final_structure:
chgksuite/composer/db.py CHANGED
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import datetime
3
2
  import os
4
3
  import re
@@ -211,7 +210,7 @@ class DbExporter(BaseExporter):
211
210
  if res:
212
211
  result.append(res)
213
212
  text = "".join(result)
214
- with codecs.open(outfilename, "w", "utf8") as f:
213
+ with open(outfilename, "w", encoding="utf-8") as f:
215
214
  f.write(text)
216
215
  self.logger.info("Output: {}".format(outfilename))
217
216
  if self.args.clipboard:
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import hashlib
3
2
  import os
4
3
  import re
@@ -221,7 +220,7 @@ class LatexExporter(BaseExporter):
221
220
 
222
221
  tex += "\\end{document}"
223
222
 
224
- with codecs.open(outfilename, "w", "utf8") as outfile:
223
+ with open(outfilename, "w", encoding="utf-8") as outfile:
225
224
  outfile.write(tex)
226
225
  cwd = os.getcwd()
227
226
  os.chdir(self.dir_kwargs["tmp_dir"])
chgksuite/composer/lj.py CHANGED
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import datetime
3
2
  import os
4
3
  import random
@@ -240,7 +239,7 @@ class LjExporter(BaseExporter):
240
239
  "general_impressions_text"
241
240
  ]
242
241
  if self.args.debug:
243
- with codecs.open("lj.debug", "w", "utf8") as f:
242
+ with open("lj.debug", "w", encoding="utf-8") as f:
244
243
  f.write(log_wrap(final_structure))
245
244
  return final_structure
246
245
 
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import os
3
2
 
4
3
  from chgksuite.composer.composer_common import (
@@ -9,20 +8,20 @@ from chgksuite.composer.composer_common import (
9
8
  )
10
9
 
11
10
 
12
- class RedditExporter(BaseExporter):
11
+ class MarkdownExporter(BaseExporter):
13
12
  def __init__(self, *args, **kwargs):
14
13
  super().__init__(*args, **kwargs)
15
14
  self.im = Imgur(self.args.imgur_client_id or IMGUR_CLIENT_ID)
16
15
  self.qcount = 1
17
16
 
18
- def reddityapper(self, e):
17
+ def markdownyapper(self, e):
19
18
  if isinstance(e, str):
20
- return self.reddit_element_layout(e)
19
+ return self.markdown_element_layout(e)
21
20
  elif isinstance(e, list):
22
21
  if not any(isinstance(x, list) for x in e):
23
- return self.reddit_element_layout(e)
22
+ return self.markdown_element_layout(e)
24
23
  else:
25
- return " \n".join([self.reddit_element_layout(x) for x in e])
24
+ return " \n".join([self.markdown_element_layout(x) for x in e])
26
25
 
27
26
  def parse_and_upload_image(self, path):
28
27
  parsed_image = parseimg(
@@ -37,11 +36,13 @@ class RedditExporter(BaseExporter):
37
36
  imglink = uploaded_image["data"]["link"]
38
37
  return imglink
39
38
 
40
- def redditformat(self, s):
39
+ def markdownformat(self, s):
41
40
  res = ""
42
41
  for run in self.parse_4s_elem(s):
43
- if run[0] in ("", "hyperlink"):
42
+ if run[0] == "":
44
43
  res += run[1]
44
+ if run[0] == "hyperlink":
45
+ res += "<{}>".format(run[1])
45
46
  if run[0] == "screen":
46
47
  res += run[1]["for_screen"]
47
48
  if run[0] == "italic":
@@ -51,61 +52,70 @@ class RedditExporter(BaseExporter):
51
52
  imglink = run[1]
52
53
  else:
53
54
  imglink = self.parse_and_upload_image(run[1])
54
- res += "[картинка]({})".format(imglink)
55
+ if self.args.filetype == "redditmd":
56
+ res += "[картинка]({})".format(imglink)
57
+ else:
58
+ res += "![]({})".format(imglink)
55
59
  while res.endswith("\n"):
56
60
  res = res[:-1]
57
61
  res = res.replace("\n", " \n")
58
62
  return res
59
63
 
60
- def reddit_element_layout(self, e):
64
+ def markdown_element_layout(self, e):
61
65
  res = ""
62
66
  if isinstance(e, str):
63
- res = self.redditformat(e)
67
+ res = self.markdownformat(e)
64
68
  return res
65
69
  if isinstance(e, list):
66
70
  res = " \n".join(
67
71
  [
68
- "{}\\. {}".format(i + 1, self.reddit_element_layout(x))
72
+ "{}\\. {}".format(i + 1, self.markdown_element_layout(x))
69
73
  for i, x in enumerate(e)
70
74
  ]
71
75
  )
72
76
  return res
73
77
 
74
- def reddit_format_element(self, pair):
78
+ def markdown_format_element(self, pair):
75
79
  if pair[0] == "Question":
76
- return self.reddit_format_question(pair[1])
80
+ return self.markdown_format_question(pair[1])
77
81
 
78
- def reddit_format_question(self, q):
82
+ def markdown_format_question(self, q):
79
83
  if "setcounter" in q:
80
84
  self.qcount = int(q["setcounter"])
81
85
  res = "__Вопрос {}__: {} \n".format(
82
86
  self.qcount if "number" not in q else q["number"],
83
- self.reddityapper(q["question"]),
87
+ self.markdownyapper(q["question"]),
84
88
  )
85
89
  if "number" not in q:
86
90
  self.qcount += 1
87
- res += "__Ответ:__ >!{} \n".format(self.reddityapper(q["answer"]))
91
+ spoiler_start = ">!" if self.args.filetype == "redditmd" else ""
92
+ spoiler_end = "!<" if self.args.filetype == "redditmd" else ""
93
+ res += "__Ответ:__ {}{} \n".format(
94
+ spoiler_start, self.markdownyapper(q["answer"])
95
+ )
88
96
  if "zachet" in q:
89
- res += "__Зачёт:__ {} \n".format(self.reddityapper(q["zachet"]))
97
+ res += "__Зачёт:__ {} \n".format(self.markdownyapper(q["zachet"]))
90
98
  if "nezachet" in q:
91
- res += "__Незачёт:__ {} \n".format(self.reddityapper(q["nezachet"]))
99
+ res += "__Незачёт:__ {} \n".format(self.markdownyapper(q["nezachet"]))
92
100
  if "comment" in q:
93
- res += "__Комментарий:__ {} \n".format(self.reddityapper(q["comment"]))
101
+ res += "__Комментарий:__ {} \n".format(self.markdownyapper(q["comment"]))
94
102
  if "source" in q:
95
- res += "__Источник:__ {} \n".format(self.reddityapper(q["source"]))
103
+ res += "__Источник:__ {} \n".format(self.markdownyapper(q["source"]))
96
104
  if "author" in q:
97
- res += "!<\n__Автор:__ {} \n".format(self.reddityapper(q["author"]))
105
+ res += "{}\n__Автор:__ {} \n".format(
106
+ spoiler_end, self.markdownyapper(q["author"])
107
+ )
98
108
  else:
99
- res += "!<\n"
109
+ res += spoiler_end + "\n"
100
110
  return res
101
111
 
102
112
  def export(self, outfile):
103
113
  result = []
104
114
  for pair in self.structure:
105
- res = self.reddit_format_element(pair)
115
+ res = self.markdown_format_element(pair)
106
116
  if res:
107
117
  result.append(res)
108
118
  text = "\n\n".join(result)
109
- with codecs.open(outfile, "w", "utf8") as f:
119
+ with open(outfile, "w", encoding="utf-8") as f:
110
120
  f.write(text)
111
121
  self.logger.info("Output: {}".format(outfile))
@@ -1,4 +1,3 @@
1
- import codecs
2
1
  import copy
3
2
  import re
4
3
  import json
@@ -175,5 +174,5 @@ class OpenquizExporter(BaseExporter):
175
174
  result = []
176
175
  for q in questions:
177
176
  result.append(self.oq_format_question(q))
178
- with codecs.open(outfilename, "w", "utf8") as f:
177
+ with open(outfilename, "w", encoding="utf-8") as f:
179
178
  f.write(json.dumps(result, indent=2, ensure_ascii=False))
File without changes
@@ -14,6 +14,9 @@ from chgksuite.handouter.gen import generate_handouts
14
14
  from chgksuite.handouter.pack import pack_handouts
15
15
  from chgksuite.handouter.installer import get_tectonic_path, install_tectonic
16
16
  from chgksuite.handouter.tex_internals import (
17
+ EDGE_DASHED,
18
+ EDGE_NONE,
19
+ EDGE_SOLID,
17
20
  GREYTEXT,
18
21
  HEADER,
19
22
  IMG,
@@ -61,7 +64,28 @@ class HandoutGenerator:
61
64
  )
62
65
  return GREYTEXT.replace("<GREYTEXT>", handout_text)
63
66
 
64
- def make_tikzbox(self, block):
67
+ def make_tikzbox(self, block, edges=None, ext=None):
68
+ """
69
+ Create a TikZ box with configurable edge styles and extensions.
70
+ edges is a dict with keys 'top', 'bottom', 'left', 'right'
71
+ values are EDGE_DASHED or EDGE_SOLID
72
+ ext is a dict with edge extensions to close gaps at boundaries
73
+ """
74
+ if edges is None:
75
+ edges = {
76
+ "top": EDGE_DASHED,
77
+ "bottom": EDGE_DASHED,
78
+ "left": EDGE_DASHED,
79
+ "right": EDGE_DASHED,
80
+ }
81
+ if ext is None:
82
+ ext = {
83
+ "top": ("0pt", "0pt"),
84
+ "bottom": ("0pt", "0pt"),
85
+ "left": ("0pt", "0pt"),
86
+ "right": ("0pt", "0pt"),
87
+ }
88
+
65
89
  if block.get("no_center"):
66
90
  align = ""
67
91
  else:
@@ -79,19 +103,218 @@ class HandoutGenerator:
79
103
  .replace("<ALIGN>", align)
80
104
  .replace("<TEXTWIDTH>", textwidth)
81
105
  .replace("<FONTSIZE>", fontsize)
106
+ .replace("<TOP>", edges["top"])
107
+ .replace("<BOTTOM>", edges["bottom"])
108
+ .replace("<LEFT>", edges["left"])
109
+ .replace("<RIGHT>", edges["right"])
110
+ .replace("<TOP_EXT_L>", ext["top"][0])
111
+ .replace("<TOP_EXT_R>", ext["top"][1])
112
+ .replace("<BOTTOM_EXT_L>", ext["bottom"][0])
113
+ .replace("<BOTTOM_EXT_R>", ext["bottom"][1])
114
+ .replace("<LEFT_EXT_T>", ext["left"][0])
115
+ .replace("<LEFT_EXT_B>", ext["left"][1])
116
+ .replace("<RIGHT_EXT_T>", ext["right"][0])
117
+ .replace("<RIGHT_EXT_B>", ext["right"][1])
82
118
  )
83
119
 
84
120
  def get_page_width(self):
85
121
  return self.args.paperwidth - self.args.margin_left - self.args.margin_right - 2
86
122
 
123
+ def get_cut_direction(
124
+ self, columns, num_rows, handouts_per_team, grouping="horizontal"
125
+ ):
126
+ """
127
+ Determine team rectangle dimensions.
128
+ Returns (team_cols, team_rows) where each team is a team_cols × team_rows block.
129
+
130
+ Falls back to (None, None) if handouts can't be evenly divided into teams.
131
+
132
+ Args:
133
+ grouping: "horizontal" (default) prefers wider teams (smaller team_rows),
134
+ "vertical" prefers taller teams (smaller team_cols).
135
+ """
136
+ total = columns * num_rows
137
+
138
+ # Check if total handouts can be evenly divided
139
+ if total % handouts_per_team != 0:
140
+ return None, None
141
+
142
+ num_teams = total // handouts_per_team
143
+ if num_teams < 1:
144
+ return None, None # Invalid configuration
145
+
146
+ # Find all valid team rectangle sizes (team_cols × team_rows = handouts_per_team)
147
+ valid_layouts = []
148
+ for team_rows in range(1, handouts_per_team + 1):
149
+ if handouts_per_team % team_rows == 0:
150
+ team_cols = handouts_per_team // team_rows
151
+ if columns % team_cols == 0 and num_rows % team_rows == 0:
152
+ valid_layouts.append((team_cols, team_rows))
153
+
154
+ if not valid_layouts:
155
+ return None, None
156
+
157
+ # Sort based on grouping preference
158
+ if grouping == "vertical":
159
+ # Prefer vertical grouping (smaller team_cols = taller teams)
160
+ valid_layouts.sort(key=lambda x: x[0])
161
+ else:
162
+ # Prefer horizontal grouping (smaller team_rows = wider teams)
163
+ valid_layouts.sort(key=lambda x: x[1])
164
+
165
+ return valid_layouts[0]
166
+
167
+ def get_edge_styles(
168
+ self, row_idx, col_idx, num_rows, columns, team_cols, team_rows
169
+ ):
170
+ """
171
+ Determine edge styles and extensions for a box at position (row_idx, col_idx).
172
+ Outer edges of team rectangles are solid (thicker), inner edges are dashed.
173
+ Extensions are used to close gaps in ALL solid lines.
174
+ Duplicate dashed edges are skipped to avoid double lines.
175
+
176
+ team_cols and team_rows define the dimensions of each team rectangle.
177
+ """
178
+ # Default: all dashed, no extension
179
+ edges = {
180
+ "top": EDGE_DASHED,
181
+ "bottom": EDGE_DASHED,
182
+ "left": EDGE_DASHED,
183
+ "right": EDGE_DASHED,
184
+ }
185
+ ext = {
186
+ "top": ("0pt", "0pt"),
187
+ "bottom": ("0pt", "0pt"),
188
+ "left": ("0pt", "0pt"),
189
+ "right": ("0pt", "0pt"),
190
+ }
191
+
192
+ # Gap sizes (half of spacing to extend into)
193
+ h_gap = "0.75mm" # half of SPACE (1.5mm)
194
+ v_gap = "0.5mm" # half of vspace (1mm)
195
+
196
+ # Helper functions to check if position is at a team boundary
197
+ def is_at_right_team_boundary():
198
+ """Is this box at the right edge of its team (but not at grid edge)?"""
199
+ if not team_cols:
200
+ return False
201
+ return (col_idx + 1) % team_cols == 0 and col_idx < columns - 1
202
+
203
+ def is_at_left_team_boundary():
204
+ """Is this box at the left edge of its team (but not at grid edge)?"""
205
+ if not team_cols:
206
+ return False
207
+ return col_idx % team_cols == 0 and col_idx > 0
208
+
209
+ def is_at_bottom_team_boundary():
210
+ """Is this box at the bottom edge of its team (but not at grid edge)?"""
211
+ if not team_rows:
212
+ return False
213
+ return (row_idx + 1) % team_rows == 0 and row_idx < num_rows - 1
214
+
215
+ def is_at_top_team_boundary():
216
+ """Is this box at the top edge of its team (but not at grid edge)?"""
217
+ if not team_rows:
218
+ return False
219
+ return row_idx % team_rows == 0 and row_idx > 0
220
+
221
+ # Determine which edges are solid
222
+ # Only apply solid edges if we have valid team dimensions
223
+ # Otherwise fall back to all-dashed (default)
224
+ if team_cols is not None and team_rows is not None:
225
+ # Outer edges of the entire grid
226
+ if row_idx == 0:
227
+ edges["top"] = EDGE_SOLID
228
+ if row_idx == num_rows - 1:
229
+ edges["bottom"] = EDGE_SOLID
230
+ if col_idx == 0:
231
+ edges["left"] = EDGE_SOLID
232
+ if col_idx == columns - 1:
233
+ edges["right"] = EDGE_SOLID
234
+
235
+ # Team boundary edges
236
+ if is_at_right_team_boundary():
237
+ edges["right"] = EDGE_SOLID
238
+ if is_at_left_team_boundary():
239
+ edges["left"] = EDGE_SOLID
240
+ if is_at_bottom_team_boundary():
241
+ edges["bottom"] = EDGE_SOLID
242
+ if is_at_top_team_boundary():
243
+ edges["top"] = EDGE_SOLID
244
+
245
+ # Skip duplicate dashed edges (to avoid double lines between adjacent boxes)
246
+ if edges["left"] == EDGE_DASHED and col_idx > 0:
247
+ edges["left"] = EDGE_NONE
248
+
249
+ if edges["top"] == EDGE_DASHED and row_idx > 0:
250
+ edges["top"] = EDGE_NONE
251
+
252
+ # Calculate extensions for solid edges to close gaps
253
+ # But don't extend into team boundary gaps!
254
+
255
+ if edges["top"] == EDGE_SOLID:
256
+ at_left_boundary = is_at_left_team_boundary()
257
+ ext_left = "-" + h_gap if col_idx > 0 and not at_left_boundary else "0pt"
258
+ at_right_boundary = is_at_right_team_boundary()
259
+ ext_right = (
260
+ h_gap if col_idx < columns - 1 and not at_right_boundary else "0pt"
261
+ )
262
+ ext["top"] = (ext_left, ext_right)
263
+
264
+ if edges["bottom"] == EDGE_SOLID:
265
+ at_left_boundary = is_at_left_team_boundary()
266
+ ext_left = "-" + h_gap if col_idx > 0 and not at_left_boundary else "0pt"
267
+ at_right_boundary = is_at_right_team_boundary()
268
+ ext_right = (
269
+ h_gap if col_idx < columns - 1 and not at_right_boundary else "0pt"
270
+ )
271
+ ext["bottom"] = (ext_left, ext_right)
272
+
273
+ if edges["left"] == EDGE_SOLID:
274
+ at_top_boundary = is_at_top_team_boundary()
275
+ ext_top = v_gap if row_idx > 0 and not at_top_boundary else "0pt"
276
+ at_bottom_boundary = is_at_bottom_team_boundary()
277
+ ext_bottom = (
278
+ "-" + v_gap
279
+ if row_idx < num_rows - 1 and not at_bottom_boundary
280
+ else "0pt"
281
+ )
282
+ ext["left"] = (ext_top, ext_bottom)
283
+
284
+ if edges["right"] == EDGE_SOLID:
285
+ at_top_boundary = is_at_top_team_boundary()
286
+ ext_top = v_gap if row_idx > 0 and not at_top_boundary else "0pt"
287
+ at_bottom_boundary = is_at_bottom_team_boundary()
288
+ ext_bottom = (
289
+ "-" + v_gap
290
+ if row_idx < num_rows - 1 and not at_bottom_boundary
291
+ else "0pt"
292
+ )
293
+ ext["right"] = (ext_top, ext_bottom)
294
+
295
+ return edges, ext
296
+
87
297
  def generate_regular_block(self, block_):
88
298
  block = block_.copy()
89
299
  if not (block.get("image") or block.get("text")):
90
300
  return
91
301
  columns = block["columns"]
92
- spaces = block["columns"] - 1
302
+ num_rows = block.get("rows") or 1
303
+ handouts_per_team = block.get("handouts_per_team") or 3
304
+ grouping = block.get("grouping") or "horizontal"
305
+
306
+ # Determine team rectangle dimensions
307
+ team_cols, team_rows = self.get_cut_direction(
308
+ columns, num_rows, handouts_per_team, grouping
309
+ )
310
+ if self.args.debug:
311
+ print(
312
+ f"team_cols: {team_cols}, team_rows: {team_rows}, grouping: {grouping}"
313
+ )
314
+
315
+ spaces = columns - 1
93
316
  boxwidth = self.args.boxwidth or round(
94
- (self.get_page_width() - spaces * self.SPACE) / block["columns"],
317
+ (self.get_page_width() - spaces * self.SPACE) / columns,
95
318
  3,
96
319
  )
97
320
  total_width = boxwidth * columns + spaces * self.SPACE
@@ -104,7 +327,6 @@ class HandoutGenerator:
104
327
  r"\setlength{\boxwidth}{<Q>mm}%".replace("<Q>", str(boxwidth)),
105
328
  r"\setlength{\boxwidthinner}{<Q>mm}%".replace("<Q>", str(boxwidthinner)),
106
329
  ]
107
- rows = []
108
330
  contents = []
109
331
  if block.get("image"):
110
332
  img_qwidth = block.get("resize_image") or 1.0
@@ -119,10 +341,18 @@ class HandoutGenerator:
119
341
  block["centering"] = ""
120
342
  else:
121
343
  block["centering"] = "\\centering"
122
- for _ in range(block.get("rows") or 1):
344
+
345
+ rows = []
346
+ for row_idx in range(num_rows):
347
+ row_boxes = []
348
+ for col_idx in range(columns):
349
+ edges, ext = self.get_edge_styles(
350
+ row_idx, col_idx, num_rows, columns, team_cols, team_rows
351
+ )
352
+ row_boxes.append(self.make_tikzbox(block, edges, ext))
123
353
  row = (
124
354
  TIKZBOX_START.replace("<CENTERING>", block["centering"])
125
- + "\n".join([self.make_tikzbox(block)] * block["columns"])
355
+ + "\n".join(row_boxes)
126
356
  + TIKZBOX_END
127
357
  )
128
358
  rows.append(row)
@@ -13,7 +13,7 @@ HEADER = r"""
13
13
  \begin{document}
14
14
  \fontsize{14pt}{16pt}\selectfont
15
15
  \setlength\parindent{0pt}
16
- \tikzstyle{box}=[draw, dashed, rectangle, inner sep=<TIKZ_MM>mm]
16
+ \tikzstyle{box}=[rectangle, inner sep=<TIKZ_MM>mm]
17
17
  \raggedright
18
18
  \raggedbottom
19
19
  """.strip()
@@ -25,10 +25,20 @@ TIKZBOX_START = r"""{<CENTERING>
25
25
 
26
26
  TIKZBOX_INNER = r"""
27
27
  \begin{tikzpicture}
28
- \node[box, minimum width=\boxwidth<TEXTWIDTH><ALIGN>] {<FONTSIZE><CONTENTS>\par};
28
+ \node[box, minimum width=\boxwidth<TEXTWIDTH><ALIGN>] (b) {<FONTSIZE><CONTENTS>\par};
29
+ \useasboundingbox (b.south west) rectangle (b.north east);
30
+ \draw[<TOP>] ([xshift=<TOP_EXT_L>]b.north west) -- ([xshift=<TOP_EXT_R>]b.north east);
31
+ \draw[<BOTTOM>] ([xshift=<BOTTOM_EXT_L>]b.south west) -- ([xshift=<BOTTOM_EXT_R>]b.south east);
32
+ \draw[<LEFT>] ([yshift=<LEFT_EXT_T>]b.north west) -- ([yshift=<LEFT_EXT_B>]b.south west);
33
+ \draw[<RIGHT>] ([yshift=<RIGHT_EXT_T>]b.north east) -- ([yshift=<RIGHT_EXT_B>]b.south east);
29
34
  \end{tikzpicture}
30
35
  """.strip()
31
36
 
37
+ # Line styles for box edges
38
+ EDGE_SOLID = "line width=0.8pt"
39
+ EDGE_DASHED = "dashed"
40
+ EDGE_NONE = "draw=none" # Don't draw this edge (to avoid double dashed lines)
41
+
32
42
  TIKZBOX_END = "\n}"
33
43
 
34
44
  IMG = r"""\includegraphics<IMGWIDTH>{<IMGPATH>}"""
@@ -14,6 +14,7 @@ RESERVED_WORDS = [
14
14
  "raw_tex",
15
15
  "color",
16
16
  "handouts_per_team",
17
+ "grouping",
17
18
  ]
18
19
 
19
20
 
@@ -42,6 +43,13 @@ def wrap_val(key, val):
42
43
  return int(val.strip())
43
44
  if key in ("resize_image", "font_size"):
44
45
  return float(val.strip())
46
+ if key == "grouping":
47
+ val = val.strip().lower()
48
+ if val not in ("horizontal", "vertical"):
49
+ raise ValueError(
50
+ f"Invalid grouping value: {val}. Must be 'horizontal' or 'vertical'."
51
+ )
52
+ return val
45
53
  return val.strip()
46
54
 
47
55
 
chgksuite/lastdir ADDED
@@ -0,0 +1 @@
1
+ /Users/pecheny/chgksuite1/tmpz_2mf3o8/tmptke0lqfv
chgksuite/parser.py CHANGED
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
  import base64
4
- import codecs
5
4
  import datetime
6
5
  import hashlib
7
6
  import itertools
@@ -18,8 +17,9 @@ import time
18
17
 
19
18
  import bs4
20
19
  import chardet
21
- import dashtable
22
20
  import mammoth
21
+
22
+ from chgksuite._html2md import html2md
23
23
  import pypandoc
24
24
  import requests
25
25
  import toml
@@ -61,7 +61,7 @@ def partition(alist, indices):
61
61
 
62
62
 
63
63
  def load_regexes(regexfile):
64
- with codecs.open(regexfile, "r", "utf8") as f:
64
+ with open(regexfile, "r", encoding="utf-8") as f:
65
65
  regexes = json.loads(f.read())
66
66
  return {k: re.compile(v) for k, v in regexes.items()}
67
67
 
@@ -526,7 +526,7 @@ class ChgkParser:
526
526
  )
527
527
 
528
528
  if debug:
529
- with codecs.open("debug_0.txt", "w", "utf8") as f:
529
+ with open("debug_0.txt", "w", encoding="utf-8") as f:
530
530
  f.write(text)
531
531
 
532
532
  # 1.
@@ -558,7 +558,7 @@ class ChgkParser:
558
558
  i = 0
559
559
 
560
560
  if debug:
561
- with codecs.open("debug_1.json", "w", "utf8") as f:
561
+ with open("debug_1.json", "w", encoding="utf-8") as f:
562
562
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
563
563
 
564
564
  self.process_single_number_lines()
@@ -574,7 +574,7 @@ class ChgkParser:
574
574
  element[0] = "question"
575
575
 
576
576
  if debug:
577
- with codecs.open("debug_1a.json", "w", "utf8") as f:
577
+ with open("debug_1a.json", "w", encoding="utf-8") as f:
578
578
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
579
579
 
580
580
  # 2.
@@ -584,7 +584,7 @@ class ChgkParser:
584
584
  self.merge_to_x_until_nextfield("comment")
585
585
 
586
586
  if debug:
587
- with codecs.open("debug_2.json", "w", "utf8") as f:
587
+ with open("debug_2.json", "w", encoding="utf-8") as f:
588
588
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
589
589
 
590
590
  # 3.
@@ -647,7 +647,7 @@ class ChgkParser:
647
647
  self.merge_to_x_until_nextfield("nezachet")
648
648
 
649
649
  if debug:
650
- with codecs.open("debug_3.json", "w", "utf8") as f:
650
+ with open("debug_3.json", "w", encoding="utf-8") as f:
651
651
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
652
652
 
653
653
  # 4.
@@ -660,7 +660,7 @@ class ChgkParser:
660
660
  self.merge_to_next(0)
661
661
 
662
662
  if debug:
663
- with codecs.open("debug_3a.json", "w", "utf8") as f:
663
+ with open("debug_3a.json", "w", encoding="utf-8") as f:
664
664
  f.write(
665
665
  json.dumps(
666
666
  list(enumerate(self.structure)), ensure_ascii=False, indent=4
@@ -718,7 +718,7 @@ class ChgkParser:
718
718
  idx += 1
719
719
 
720
720
  if debug:
721
- with codecs.open("debug_4.json", "w", "utf8") as f:
721
+ with open("debug_4.json", "w", encoding="utf-8") as f:
722
722
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
723
723
 
724
724
  # 5.
@@ -798,7 +798,7 @@ class ChgkParser:
798
798
  )
799
799
 
800
800
  if debug:
801
- with codecs.open("debug_5.json", "w", "utf8") as f:
801
+ with open("debug_5.json", "w", encoding="utf-8") as f:
802
802
  f.write(json.dumps(self.structure, ensure_ascii=False, indent=4))
803
803
 
804
804
  # 6.
@@ -853,7 +853,7 @@ class ChgkParser:
853
853
  final_structure.append(["Question", current_question])
854
854
 
855
855
  if debug:
856
- with codecs.open("debug_6.json", "w", "utf8") as f:
856
+ with open("debug_6.json", "w", encoding="utf-8") as f:
857
857
  f.write(json.dumps(final_structure, ensure_ascii=False, indent=4))
858
858
 
859
859
  # 7.
@@ -899,7 +899,7 @@ class ChgkParser:
899
899
  element[1] = self._process_images_in_text(element[1])
900
900
 
901
901
  if debug:
902
- with codecs.open("debug_final.json", "w", "utf8") as f:
902
+ with open("debug_final.json", "w", encoding="utf-8") as f:
903
903
  f.write(json.dumps(final_structure, ensure_ascii=False, indent=4))
904
904
  return final_structure
905
905
 
@@ -982,8 +982,8 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
982
982
  with open(docxfile, "rb") as docx_file:
983
983
  html = mammoth.convert_to_html(docx_file).value
984
984
  if args.debug:
985
- with codecs.open(
986
- os.path.join(target_dir, "debugdebug.pydocx"), "w", "utf8"
985
+ with open(
986
+ os.path.join(target_dir, "debugdebug.pydocx"), "w", encoding="utf-8"
987
987
  ) as dbg:
988
988
  dbg.write(html)
989
989
  input_docx = (
@@ -994,8 +994,8 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
994
994
  bsoup = BeautifulSoup(input_docx, "html.parser")
995
995
 
996
996
  if args.debug:
997
- with codecs.open(
998
- os.path.join(target_dir, "debug.pydocx"), "w", "utf8"
997
+ with open(
998
+ os.path.join(target_dir, "debug.pydocx"), "w", encoding="utf-8"
999
999
  ) as dbg:
1000
1000
  dbg.write(input_docx)
1001
1001
 
@@ -1065,7 +1065,7 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
1065
1065
  ensure_line_breaks(tag)
1066
1066
  for tag in bsoup.find_all("table"):
1067
1067
  try:
1068
- table = dashtable.html2md(str(tag))
1068
+ table = html2md(str(tag))
1069
1069
  tag.insert_before(table)
1070
1070
  except (TypeError, ValueError):
1071
1071
  logger.error(f"couldn't parse html table: {str(tag)}")
@@ -1096,12 +1096,12 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
1096
1096
  tag.unwrap()
1097
1097
 
1098
1098
  if args.debug:
1099
- with codecs.open(
1100
- os.path.join(target_dir, "debug_raw.html"), "w", "utf8"
1099
+ with open(
1100
+ os.path.join(target_dir, "debug_raw.html"), "w", encoding="utf-8"
1101
1101
  ) as dbg:
1102
1102
  dbg.write(str(bsoup))
1103
- with codecs.open(
1104
- os.path.join(target_dir, "debug.html"), "w", "utf8"
1103
+ with open(
1104
+ os.path.join(target_dir, "debug.html"), "w", encoding="utf-8"
1105
1105
  ) as dbg:
1106
1106
  dbg.write(bsoup.prettify())
1107
1107
 
@@ -1139,7 +1139,9 @@ def chgk_parse_docx(docxfile, defaultauthor="", args=None, logger=None):
1139
1139
  txt = txt.replace(f"IMGPATH({i})", elem)
1140
1140
 
1141
1141
  if args.debug:
1142
- with codecs.open(os.path.join(target_dir, "debug.debug"), "w", "utf8") as dbg:
1142
+ with open(
1143
+ os.path.join(target_dir, "debug.debug"), "w", encoding="utf-8"
1144
+ ) as dbg:
1143
1145
  dbg.write(txt)
1144
1146
 
1145
1147
  final_structure = chgk_parse(txt, defaultauthor=defaultauthor, args=args)
@@ -1173,7 +1175,7 @@ def chgk_parse_wrapper(path, args, logger=None):
1173
1175
  sys.exit()
1174
1176
  outfilename = os.path.join(target_dir, make_filename(abspath, "4s", args))
1175
1177
  logger.info("Output: {}".format(os.path.abspath(outfilename)))
1176
- with codecs.open(outfilename, "w", "utf8") as output_file:
1178
+ with open(outfilename, "w", encoding="utf-8") as output_file:
1177
1179
  output_file.write(compose_4s(final_structure, args=args))
1178
1180
  return outfilename
1179
1181
 
chgksuite/parser_db.py CHANGED
@@ -1,7 +1,6 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- import codecs
5
4
  import json
6
5
  import os
7
6
  import re
@@ -436,7 +435,7 @@ def chgk_parse_db(text, debug=False, logger=False):
436
435
  append_question(lexer)
437
436
 
438
437
  if debug:
439
- with codecs.open("debug_final.json", "w", "utf8") as f:
438
+ with open("debug_final.json", "w", encoding="utf-8") as f:
440
439
  f.write(json.dumps(lexer.structure, ensure_ascii=False, indent=4))
441
440
 
442
441
  return lexer.structure
chgksuite/trello.py CHANGED
@@ -1,6 +1,5 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
- import codecs
4
3
  import json
5
4
  import os
6
5
  import pdb
@@ -52,7 +51,7 @@ def upload_file(filepath, trello, list_name=None):
52
51
  assert lid is not None
53
52
  print(f"uploading to list '{list_['name']}'")
54
53
  content = ""
55
- with codecs.open(filepath, "r", "utf8") as f:
54
+ with open(filepath, "r", encoding="utf-8") as f:
56
55
  content = f.read()
57
56
  cards = re.split(r"(\r?\n){2,}", content)
58
57
  cards = [x for x in cards if x != "" and x != "\n" and x != "\r\n"]
@@ -255,7 +254,7 @@ def gui_trello_download(args):
255
254
 
256
255
  board_id_path = os.path.join(args.folder, ".board_id")
257
256
  if os.path.isfile(board_id_path):
258
- with codecs.open(board_id_path, "r", "utf8") as f:
257
+ with open(board_id_path, "r", encoding="utf-8") as f:
259
258
  board_id = f.read().rstrip()
260
259
  else:
261
260
  board_id = get_board_id(path=args.folder)
@@ -401,14 +400,14 @@ def gui_trello_download(args):
401
400
  result.extend(_lists[_list["name"]])
402
401
  filename = "singlefile.4s"
403
402
  print("outputting {}".format(filename))
404
- with codecs.open(filename, "w", "utf8") as f:
403
+ with open(filename, "w", encoding="utf-8") as f:
405
404
  for item in result:
406
405
  f.write("\n" + item + "\n")
407
406
  else:
408
407
  for _list in _lists:
409
408
  filename = "{}.4s".format(_list)
410
409
  print("outputting {}".format(filename))
411
- with codecs.open(filename, "w", "utf8") as f:
410
+ with open(filename, "w", encoding="utf-8") as f:
412
411
  for item in _lists[_list]:
413
412
  f.write("\n" + item + "\n")
414
413
 
@@ -426,7 +425,7 @@ def get_board_id(path=None):
426
425
  if "trello.com" in board_id:
427
426
  board_id = re_bi.search(board_id).group(1)
428
427
  if path:
429
- with codecs.open(os.path.join(path, ".board_id"), "w", "utf8") as f:
428
+ with open(os.path.join(path, ".board_id"), "w", encoding="utf-8") as f:
430
429
  f.write(board_id)
431
430
  return board_id
432
431
 
@@ -437,7 +436,7 @@ def get_token(tokenpath, args):
437
436
  else:
438
437
  webbrowser.open(TRELLO_URL)
439
438
  token = input("Please paste the obtained token: ").rstrip()
440
- with codecs.open(tokenpath, "w", "utf8") as f:
439
+ with open(tokenpath, "w", encoding="utf-8") as f:
441
440
  f.write(token)
442
441
  return token
443
442
 
@@ -452,7 +451,7 @@ def gui_trello(args):
452
451
  if not os.path.isfile(tokenpath):
453
452
  token = get_token(tokenpath, args)
454
453
  else:
455
- with codecs.open(tokenpath, "r", "utf8") as f:
454
+ with open(tokenpath, "r", encoding="utf-8") as f:
456
455
  token = f.read().rstrip()
457
456
 
458
457
  with open(os.path.join(resourcedir, "trello.json")) as f:
chgksuite/typotools.py CHANGED
@@ -92,13 +92,13 @@ def uni_normalize(k):
92
92
 
93
93
 
94
94
  def cyr_lat_check_char(i, char, word):
95
- if char in CYRILLIC_CHARS:
95
+ if char.lower() in CYRILLIC_CHARS:
96
96
  return
97
97
  if not (
98
- (i == 0 or word[i - 1] in CYRILLIC_CHARS or not word[i - 1].isalpha())
98
+ (i == 0 or word[i - 1].lower() in CYRILLIC_CHARS or not word[i - 1].isalpha())
99
99
  and (
100
100
  i == len(word) - 1
101
- or word[i + 1] in CYRILLIC_CHARS
101
+ or word[i + 1].lower() in CYRILLIC_CHARS
102
102
  or not word[i + 1].isalpha()
103
103
  )
104
104
  ):
@@ -121,7 +121,7 @@ def cyr_lat_check_word(word):
121
121
  if check_result:
122
122
  replacements[char] = check_result
123
123
  elif (
124
- char in CYRILLIC_CHARS
124
+ char.lower() in CYRILLIC_CHARS
125
125
  and i < len(word) - 1
126
126
  and word[i + 1] in ACCENTS_TO_FIX
127
127
  ):
chgksuite/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.27.0b5"
1
+ __version__ = "0.27.1"
@@ -1,23 +1,21 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: chgksuite
3
- Version: 0.27.0b5
3
+ Version: 0.27.1
4
4
  Summary: A package for chgk automation
5
+ Project-URL: Homepage, https://gitlab.com/peczony/chgksuite
5
6
  Author-email: Alexander Pecheny <ap@pecheny.me>
6
7
  License-Expression: MIT
7
- Project-URL: Homepage, https://gitlab.com/peczony/chgksuite
8
- Classifier: Programming Language :: Python :: 3
8
+ License-File: LICENSE
9
9
  Classifier: Operating System :: OS Independent
10
+ Classifier: Programming Language :: Python :: 3
10
11
  Requires-Python: >=3.9
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
13
12
  Requires-Dist: beautifulsoup4
14
13
  Requires-Dist: chardet
15
- Requires-Dist: dashtable
16
14
  Requires-Dist: dateparser
17
15
  Requires-Dist: mammoth
18
16
  Requires-Dist: openpyxl
19
17
  Requires-Dist: parse
20
- Requires-Dist: Pillow
18
+ Requires-Dist: pillow
21
19
  Requires-Dist: ply
22
20
  Requires-Dist: pypandoc
23
21
  Requires-Dist: pypdf
@@ -29,7 +27,7 @@ Requires-Dist: requests
29
27
  Requires-Dist: toml
30
28
  Requires-Dist: urllib3>=2.6.2
31
29
  Requires-Dist: watchdog
32
- Dynamic: license-file
30
+ Description-Content-Type: text/markdown
33
31
 
34
32
  **chgksuite** is an utility that helps chgk editors.
35
33
 
@@ -1,23 +1,25 @@
1
1
  chgksuite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  chgksuite/__main__.py,sha256=0-_jfloveTW3SZYW5XEagbyaHKGCiDhGNgcLxsT_dMs,140
3
- chgksuite/cli.py,sha256=95Xj1jrnybtso9qSCAwgv6utCrhJF43oFXlH5LWyHKA,40918
4
- chgksuite/common.py,sha256=M4mYjVAzF7uk9ElF0XmO3eggxSFf2bPlKinT2nEZPXA,11350
5
- chgksuite/parser.py,sha256=2UL-2dJtCFYEjakUfeh3n1E4epMKPo0pjdaC_cBZplM,45775
6
- chgksuite/parser_db.py,sha256=gh5AM80UR_hQ0r_-AwWLbqZCl2V0j3HAgj6BilImXyc,11116
7
- chgksuite/trello.py,sha256=I1JmIbZVJjJuuRO0g26fNt8deRo9hDlk1xBIf8w31Rs,14724
8
- chgksuite/typotools.py,sha256=0fQOjt5H3NRl0JeGTp2x-k-2ZN68E9Z9WX8D7lCwXmA,12765
9
- chgksuite/version.py,sha256=WOGW6JeN53_4Rrn5xAY_lTn9BFzuA5RLVZ8RjXiwSec,25
3
+ chgksuite/_html2md.py,sha256=IzPlRo4dVZNVdlnoCQFjSEfvpFZ0KdvVzfKSeIt15lM,2454
4
+ chgksuite/cli.py,sha256=fHa7HNJeQeUNXpbqnMnSXHOMpbxpaph6d3KdVNx9uNg,41257
5
+ chgksuite/common.py,sha256=TGXXWjdCS8Fy5Gkxuw7ZN3xh9pTS84ZtvRQj7IYU1E4,11341
6
+ chgksuite/lastdir,sha256=BbZVRYZnXBQzJUrl7a2e4xuUcfmq6asNI705pTxBfD4,49
7
+ chgksuite/parser.py,sha256=6T6o3F_oJVe9sl96gCaACCwtuF7lcn2V-24qEMN27zw,45847
8
+ chgksuite/parser_db.py,sha256=Ngh2ZYhAyetb6Sa-5xC9aX8quX9Ar1WheSfhSy-JADw,11105
9
+ chgksuite/trello.py,sha256=UYWemtf6z0jKW3tY-aof7_w2hWqcljYNd891vXFZ8mw,14731
10
+ chgksuite/typotools.py,sha256=J2AEQbfcR0HHSu5WCALpj4Ya_ngOMXRh7esJgcybWQM,12797
11
+ chgksuite/version.py,sha256=lnka9HWRxSmlQAffgSpaSitns-Djhy2OArtj9IVwxrY,23
10
12
  chgksuite/vulture_whitelist.py,sha256=P__p_X0zt10ivddIf81uyxsobV14vFg8uS2lt4foYpc,3582
11
- chgksuite/composer/__init__.py,sha256=k6bnuB8ftPsBY-Q-iLm42S10jWXAJebmYTdW_BZiKAM,6570
12
- chgksuite/composer/chgksuite_parser.py,sha256=g1KP0Jm4Zt4I06WGbFfKz6qIv4x1bLuPozu4QOV2wfw,9049
13
+ chgksuite/composer/__init__.py,sha256=kTQsxgn_ED18DgZjdvh7QuwC36FVbaiv_eYTjj3zdIk,6596
14
+ chgksuite/composer/chgksuite_parser.py,sha256=ItlTenviFDuqP-f1960nzD-gRPFDQy4RdOL39PswTvg,9044
13
15
  chgksuite/composer/composer_common.py,sha256=kc-_Tc9NjevfXGj4fXoa9fye9AO0EuMSnEPJnS0n-aQ,16281
14
- chgksuite/composer/db.py,sha256=71cINE_V8s6YidvqpmBmmlWbcXraUEGZA1xpVFAUENw,8173
16
+ chgksuite/composer/db.py,sha256=DI-goR4V69S8bufNfW5smTFG9puiyjcxC1u1TFuhfYs,8162
15
17
  chgksuite/composer/docx.py,sha256=cxPgjykAlU3XxKCtM3K13Wm6IcKqAHo0ngLqa8-2opM,23716
16
- chgksuite/composer/latex.py,sha256=_IKylzdDcokgXYvvxsVSiq-Ba5fVirWcfCp6eOyx6zQ,9242
17
- chgksuite/composer/lj.py,sha256=nty3Zs3N1H0gNK378U04aAHo71_5cABhCM1Mm9jiUEA,15213
18
- chgksuite/composer/openquiz.py,sha256=D9q7lcEgUGwR1UF6Qp3I-wBJucy9vMnrxoSOst4V45Q,7001
18
+ chgksuite/composer/latex.py,sha256=Ouq3OkUA0oS_zhCOiPZ2tY2sMe3OVg84Cead2GgJi2c,9231
19
+ chgksuite/composer/lj.py,sha256=7zrLrvbgYEoiF-NC7zyDaJTvPMhwt8tQT_KWgsAeG4A,15202
20
+ chgksuite/composer/markdown.py,sha256=hWajBAvop_BTyBI6rzeIwAaj7p_6p2kQre4WDMalD90,4343
21
+ chgksuite/composer/openquiz.py,sha256=BF506tH6b1IoocC61l5xBU969l38S1IjqMi6mqhG_HI,6990
19
22
  chgksuite/composer/pptx.py,sha256=9O0tfx2xpyx9Y4ceatVIXdiwvslnj8gliZIlxsDe5Ow,23971
20
- chgksuite/composer/reddit.py,sha256=-Eg4CqMHhyGGfCteVwdQdtE1pfUXQ42XcP5OYUrBXmo,3878
21
23
  chgksuite/composer/stats.py,sha256=GbraSrjaZ8Mc2URs5aGAsI4ekboAKzlJJOqsbe96ELA,3995
22
24
  chgksuite/composer/telegram.py,sha256=KM9Bnkf2bxdJNMrhjCCw2xx-sWcIQioBc1FdSY4OX-g,47431
23
25
  chgksuite/composer/telegram_bot.py,sha256=xT5D39m4zGmIbHV_ZfyQ9Rc8PAmG2V5FGUeDKpkgyTw,3767
@@ -25,9 +27,9 @@ chgksuite/handouter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
25
27
  chgksuite/handouter/gen.py,sha256=CJfGl8R2vnElnsjBo0n4u-y8KD-l2qxrIuUHj5WZBuI,5241
26
28
  chgksuite/handouter/installer.py,sha256=u4jQKeCn0VjOnaDFezx35g8oRjji5edvYGj5xSHCEW4,7574
27
29
  chgksuite/handouter/pack.py,sha256=H-Ln1JqKK2u3jFI5wwsh7pQdJBpQJ-s8gV9iECQ3kgU,2504
28
- chgksuite/handouter/runner.py,sha256=QPqLKbfNDL0ucJ365lvKCwRcPKrhxRBJ-YFSbQLU9_8,8641
29
- chgksuite/handouter/tex_internals.py,sha256=pxvMXkqlMfzBbAlTkFCJBlcT7t1XS0Z9XOzVboc2f2Y,966
30
- chgksuite/handouter/utils.py,sha256=0RoECvHzfmwWnRL2jj4WKh22oTCPh2MXid_a9ShplDA,2243
30
+ chgksuite/handouter/runner.py,sha256=dnDHgKPaa4WuxcmZf0E8BAJzDdTbFRksrt-gK0lEkiA,17674
31
+ chgksuite/handouter/tex_internals.py,sha256=j9zvai6zmaEg8osXnJ4yaXAx-89ufcpypySjyoxGFM0,1544
32
+ chgksuite/handouter/utils.py,sha256=hfRlBhOYFCPqYrof2yi4NlrQRty5bhM-Xy7jgGc60o8,2518
31
33
  chgksuite/resources/cheader.tex,sha256=Jfe3LESk0VIV0HCObbajSQpEMljaIDAIEGSs6YY9rTk,3454
32
34
  chgksuite/resources/fix-unnumbered-sections.sty,sha256=FN6ZSWC6MvoRoThPm5AxCF98DdgcxbxyBYG6YImM05s,1409
33
35
  chgksuite/resources/labels_az.toml,sha256=hiXt-54nMQ4Ie0RUVbQhXxlj47NEc2qNwe7O1NIMe-8,662
@@ -54,9 +56,8 @@ chgksuite/resources/regexes_uz_cyr.json,sha256=D4AyaEPEY753I47Ky2Fwol_4kxQsl-Yu9
54
56
  chgksuite/resources/template.docx,sha256=Do29TAsg3YbH0rRSaXhVzKEoh4pwXkklW_idWA34HVE,11189
55
57
  chgksuite/resources/template.pptx,sha256=hEFWqE-yYpwZ8ejrMCJIPEyoMT3eDqaqtiEeQ7I4fyk,29777
56
58
  chgksuite/resources/trello.json,sha256=M5Q9JR-AAJF1u16YtNAxDX-7c7VoVTXuq4POTqYvq8o,555
57
- chgksuite-0.27.0b5.dist-info/licenses/LICENSE,sha256=_a1yfntuPmctLsuiE_08xMSORuCfGS8X5hQph2U_PUw,1081
58
- chgksuite-0.27.0b5.dist-info/METADATA,sha256=34FY_Q8wUwKtlnUmcXvXEdRR4RRWJusn1sZ734sgX0w,1201
59
- chgksuite-0.27.0b5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
60
- chgksuite-0.27.0b5.dist-info/entry_points.txt,sha256=lqjX6ULQZGDt0rgouTXBuwEPiwKkDQkSiNsT877A_Jg,54
61
- chgksuite-0.27.0b5.dist-info/top_level.txt,sha256=cSWiRBOGZW9nIO6Rv1IrEfwPgV2ZWs87QV9wPXeBGqM,10
62
- chgksuite-0.27.0b5.dist-info/RECORD,,
59
+ chgksuite-0.27.1.dist-info/METADATA,sha256=kYoAhnCWvqOmq_4-8DCDE2KOQR2-47_GIYEkk1l3buY,1152
60
+ chgksuite-0.27.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
61
+ chgksuite-0.27.1.dist-info/entry_points.txt,sha256=lqjX6ULQZGDt0rgouTXBuwEPiwKkDQkSiNsT877A_Jg,54
62
+ chgksuite-0.27.1.dist-info/licenses/LICENSE,sha256=_a1yfntuPmctLsuiE_08xMSORuCfGS8X5hQph2U_PUw,1081
63
+ chgksuite-0.27.1.dist-info/RECORD,,
@@ -1,5 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: hatchling 1.28.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
-
@@ -1 +0,0 @@
1
- chgksuite