mkv-episode-matcher 0.3.3__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. mkv_episode_matcher/__init__.py +8 -0
  2. mkv_episode_matcher/__main__.py +2 -177
  3. mkv_episode_matcher/asr_models.py +506 -0
  4. mkv_episode_matcher/cli.py +558 -0
  5. mkv_episode_matcher/core/config_manager.py +100 -0
  6. mkv_episode_matcher/core/engine.py +577 -0
  7. mkv_episode_matcher/core/matcher.py +214 -0
  8. mkv_episode_matcher/core/models.py +91 -0
  9. mkv_episode_matcher/core/providers/asr.py +85 -0
  10. mkv_episode_matcher/core/providers/subtitles.py +341 -0
  11. mkv_episode_matcher/core/utils.py +148 -0
  12. mkv_episode_matcher/episode_identification.py +550 -118
  13. mkv_episode_matcher/subtitle_utils.py +82 -0
  14. mkv_episode_matcher/tmdb_client.py +56 -14
  15. mkv_episode_matcher/ui/flet_app.py +708 -0
  16. mkv_episode_matcher/utils.py +262 -139
  17. mkv_episode_matcher-1.0.0.dist-info/METADATA +242 -0
  18. mkv_episode_matcher-1.0.0.dist-info/RECORD +23 -0
  19. {mkv_episode_matcher-0.3.3.dist-info → mkv_episode_matcher-1.0.0.dist-info}/WHEEL +1 -1
  20. mkv_episode_matcher-1.0.0.dist-info/licenses/LICENSE +21 -0
  21. mkv_episode_matcher/config.py +0 -82
  22. mkv_episode_matcher/episode_matcher.py +0 -100
  23. mkv_episode_matcher/libraries/pgs2srt/.gitignore +0 -2
  24. mkv_episode_matcher/libraries/pgs2srt/Libraries/SubZero/SubZero.py +0 -321
  25. mkv_episode_matcher/libraries/pgs2srt/Libraries/SubZero/dictionaries/data.py +0 -16700
  26. mkv_episode_matcher/libraries/pgs2srt/Libraries/SubZero/post_processing.py +0 -260
  27. mkv_episode_matcher/libraries/pgs2srt/README.md +0 -26
  28. mkv_episode_matcher/libraries/pgs2srt/__init__.py +0 -0
  29. mkv_episode_matcher/libraries/pgs2srt/imagemaker.py +0 -89
  30. mkv_episode_matcher/libraries/pgs2srt/pgs2srt.py +0 -150
  31. mkv_episode_matcher/libraries/pgs2srt/pgsreader.py +0 -225
  32. mkv_episode_matcher/libraries/pgs2srt/requirements.txt +0 -4
  33. mkv_episode_matcher/mkv_to_srt.py +0 -302
  34. mkv_episode_matcher/speech_to_text.py +0 -90
  35. mkv_episode_matcher-0.3.3.dist-info/METADATA +0 -125
  36. mkv_episode_matcher-0.3.3.dist-info/RECORD +0 -25
  37. {mkv_episode_matcher-0.3.3.dist-info → mkv_episode_matcher-1.0.0.dist-info}/entry_points.txt +0 -0
  38. {mkv_episode_matcher-0.3.3.dist-info → mkv_episode_matcher-1.0.0.dist-info}/top_level.txt +0 -0
@@ -1,260 +0,0 @@
1
- # MIT License
2
- #
3
- # Copyright (c) 2018 Hannes Tismer
4
- #
5
- # Permission is hereby granted, free of charge, to any person obtaining a copy
6
- # of this software and associated documentation files (the "Software"), to deal
7
- # in the Software without restriction, including without limitation the rights
8
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- # copies of the Software, and to permit persons to whom the Software is
10
- # furnished to do so, subject to the following conditions:
11
- #
12
- # The above copyright notice and this permission notice shall be included in all
13
- # copies or substantial portions of the Software.
14
- #
15
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- # SOFTWARE.
22
- #
23
- #
24
- # Copyright for portions of project Sub-Zero are held by Bram Walet, 2014 as part of project Subliminal.bundle.
25
- # The original license is supplied below.
26
- #
27
- # The MIT License (MIT)
28
- #
29
- # Copyright (c) 2014 Bram Walet
30
- #
31
- # Permission is hereby granted, free of charge, to any person obtaining a copy
32
- # of this software and associated documentation files (the "Software"), to deal
33
- # in the Software without restriction, including without limitation the rights
34
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
35
- # copies of the Software, and to permit persons to whom the Software is
36
- # furnished to do so, subject to the following conditions:
37
- #
38
- # The above copyright notice and this permission notice shall be included in all
39
- # copies or substantial portions of the Software.
40
- #
41
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
42
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
43
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
44
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
46
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
47
- # SOFTWARE.
48
-
49
-
50
- import re
51
-
52
- from Libraries.SubZero.dictionaries.data import data
53
- from Libraries.SubZero.SubZero import (
54
- MultipleLineProcessor,
55
- MultipleWordReProcessor,
56
- NReProcessor,
57
- ReProcessor,
58
- SubtitleTextModification,
59
- WholeLineProcessor,
60
- )
61
- from tld import get_tld
62
-
63
-
64
- class CommonFixes(SubtitleTextModification):
65
- identifier = "common"
66
- description = "Basic common fixes"
67
- exclusive = True
68
- order = 40
69
-
70
- long_description = "Fix common and whitespace/punctuation issues in subtitles"
71
-
72
- processors = [
73
- # normalize hyphens
74
- NReProcessor(re.compile(r"(?u)([‑‐﹘﹣])"), "-", name="CM_hyphens"),
75
- # -- = em dash
76
- NReProcessor(
77
- re.compile(r"(?u)(\w|\b|\s|^)(-\s?-{1,2})"), r"\1—", name="CM_multidash"
78
- ),
79
- # line = _/-/\s
80
- NReProcessor(
81
- re.compile(r'(?u)(^\W*[-_.:<>~"\']+\W*$)'), "", name="CM_non_word_only"
82
- ),
83
- # remove >>
84
- NReProcessor(re.compile(r"(?u)^\s?>>\s*"), "", name="CM_leading_crocodiles"),
85
- # line = : text
86
- NReProcessor(
87
- re.compile(r"(?u)(^\W*:\s*(?=\w+))"), "", name="CM_empty_colon_start"
88
- ),
89
- # fix music symbols
90
- NReProcessor(
91
- re.compile(r"(?u)(^[-\s>~]*[*#¶]+\s+)|(\s*[*#¶]+\s*$)"),
92
- lambda x: "♪ " if x.group(1) else " ♪",
93
- name="CM_music_symbols",
94
- ),
95
- # '' = "
96
- NReProcessor(
97
- re.compile(r"(?u)([\'’ʼ❜‘‛][\'’ʼ❜‘‛]+)"), '"', name="CM_double_apostrophe"
98
- ),
99
- # double quotes instead of single quotes inside words
100
- NReProcessor(
101
- re.compile(r'(?u)([A-zÀ-ž])"([A-zÀ-ž])'),
102
- r"\1'\2",
103
- name="CM_double_as_single",
104
- ),
105
- # normalize quotes
106
- NReProcessor(
107
- re.compile(r'(?u)(\s*["”“‟„])\s*(["”“‟„]["”“‟„\s]*)'),
108
- lambda match: '"' + (" " if match.group(2).endswith(" ") else ""),
109
- name="CM_normalize_quotes",
110
- ),
111
- # normalize single quotes
112
- NReProcessor(re.compile(r"(?u)([\'’ʼ❜‘‛])"), "'", name="CM_normalize_squotes"),
113
- # remove leading ...
114
- NReProcessor(re.compile(r"(?u)^\.\.\.[\s]*"), "", name="CM_leading_ellipsis"),
115
- # remove "downloaded from" tags
116
- NReProcessor(re.compile(r"(?ui).+downloaded\s+from.+"), "", name="CM_crap"),
117
- # no space after ellipsis
118
- NReProcessor(
119
- re.compile(r'(?u)\.\.\.(?![\s.,!?\'"])(?!$)'),
120
- "... ",
121
- name="CM_ellipsis_no_space",
122
- ),
123
- # no space before spaced ellipsis
124
- NReProcessor(
125
- re.compile(r"(?u)(?<=[^\s])(?<!\s)\. \. \."),
126
- " . . .",
127
- name="CM_ellipsis_no_space2",
128
- ),
129
- # multiple spaces
130
- NReProcessor(re.compile(r"(?u)[\s]{2,}"), " ", name="CM_multiple_spaces"),
131
- # more than 3 dots
132
- NReProcessor(re.compile(r"(?u)\.{3,}"), "...", name="CM_dots"),
133
- # no space after starting dash
134
- NReProcessor(re.compile(r"(?u)^-(?![\s-])"), "- ", name="CM_dash_space"),
135
- # remove starting spaced dots (not matching ellipses)
136
- NReProcessor(
137
- re.compile(r"(?u)^(?!\s?(\.\s\.\s\.)|(\s?\.{3}))(?=\.+\s+)[\s.]*"),
138
- "",
139
- name="CM_starting_spacedots",
140
- ),
141
- # space missing before doublequote
142
- ReProcessor(
143
- re.compile(r'(?u)(?<!^)(?<![\s(\["])("[^"]+")'),
144
- r" \1",
145
- name="CM_space_before_dblquote",
146
- ),
147
- # space missing after doublequote
148
- ReProcessor(
149
- re.compile(r'(?u)("[^"\s][^"]+")([^\s.,!?)\]]+)'),
150
- r"\1 \2",
151
- name="CM_space_after_dblquote",
152
- ),
153
- # space before ending doublequote?
154
- # replace uppercase I with lowercase L in words
155
- NReProcessor(
156
- re.compile(r"(?u)([a-zà-ž]+)(I+)"),
157
- lambda match: r"{}{}".format(match.group(1), "l" * len(match.group(2))),
158
- name="CM_uppercase_i_in_word",
159
- ),
160
- # fix spaces in numbers (allows for punctuation: ,.:' (comma/dot only fixed if after space, those may be
161
- # countdowns otherwise); don't break up ellipses
162
- NReProcessor(
163
- re.compile(
164
- r"(?u)(\b[0-9]+[0-9:\']*(?<!\.\.)\s+(?!\.\.)[0-9,.:\'\s]*(?=[0-9]+)[0-9,.:\'])"
165
- ),
166
- lambda match: match.group(1).replace(" ", "")
167
- if match.group(1).count(" ") == 1
168
- else match.group(1),
169
- name="CM_spaces_in_numbers",
170
- ),
171
- # uppercase after dot
172
- # NReProcessor(re.compile(r'(?u)((?<!(?=\s*[A-ZÀ-Ž-_0-9.]\s*))(?:[^.\s])+\.\s+)([a-zà-ž])'),
173
- # lambda match: r'%s%s' % (match.group(1), match.group(2).upper()), name="CM_uppercase_after_dot"),
174
- # remove double interpunction
175
- NReProcessor(
176
- re.compile(r"(?u)(\s*[,!?])\s*([,.!?][,.!?\s]*)"),
177
- lambda match: match.group(1).strip()
178
- + (" " if match.group(2).endswith(" ") else ""),
179
- name="CM_double_interpunct",
180
- ),
181
- # remove spaces before punctuation; don't break spaced ellipses
182
- NReProcessor(
183
- re.compile(r"(?u)(?:(?<=^)|(?<=\w)) +([!?.,](?![!?.,]| \.))"),
184
- r"\1",
185
- name="CM_punctuation_space",
186
- ),
187
- # add space after punctuation
188
- NReProcessor(
189
- re.compile(r"(?u)(([^\s]*)([!?.,:])([A-zÀ-ž]{2,}))"),
190
- lambda match: f"{match.group(2)}{match.group(3)} {match.group(4)}"
191
- if not get_tld(match.group(1), fail_silently=True, fix_protocol=True)
192
- else match.group(1),
193
- name="CM_punctuation_space2",
194
- ),
195
- # fix lowercase I in english
196
- NReProcessor(
197
- re.compile(r"(?u)(\b)i(\b)"),
198
- r"\1I\2",
199
- name="CM_EN_lowercase_i",
200
- # supported=lambda p: p.language == ENGLISH),
201
- ),
202
- ]
203
-
204
-
205
- class FixOCR(SubtitleTextModification):
206
- identifier = "OCR_fixes"
207
- description = "Fix common OCR issues"
208
- exclusive = True
209
- order = 10
210
- data_dict = None
211
-
212
- long_description = "Fix issues that happen when a subtitle gets converted from bitmap to text through OCR"
213
-
214
- def __init__(self, language):
215
- super(FixOCR, self).__init__()
216
- data_dict = data.get(language)
217
- if not data_dict:
218
- # logger.debug("No SnR-data available for language %s", parent.language)
219
- return
220
-
221
- self.data_dict = data_dict
222
- self.processors = self.get_processors()
223
-
224
- def get_processors(self):
225
- if not self.data_dict:
226
- return []
227
-
228
- return [
229
- # remove broken HI tag colons (ANNOUNCER'., ". instead of :) after at least 3 uppercase chars
230
- # don't modify stuff inside quotes
231
- NReProcessor(
232
- re.compile(
233
- r'(?u)(^[^"\'’ʼ❜‘‛”“‟„]*(?<=[A-ZÀ-Ž]{3})[A-ZÀ-Ž-_\s0-9]+)'
234
- r'(["\'’ʼ❜‘‛”“‟„]*[.,‚،⹁、;]+)(\s*)(?!["\'’ʼ❜‘‛”“‟„])'
235
- ),
236
- r"\1:\3",
237
- name="OCR_fix_HI_colons",
238
- ),
239
- # fix F'bla
240
- NReProcessor(
241
- re.compile(r"(?u)(\bF)(\')([A-zÀ-ž]*\b)"), r"\1\3", name="OCR_fix_F"
242
- ),
243
- WholeLineProcessor(self.data_dict["WholeLines"], name="OCR_replace_line"),
244
- MultipleWordReProcessor(
245
- self.data_dict["WholeWords"], name="OCR_replace_word"
246
- ),
247
- MultipleWordReProcessor(
248
- self.data_dict["BeginLines"], name="OCR_replace_beginline"
249
- ),
250
- MultipleWordReProcessor(
251
- self.data_dict["EndLines"], name="OCR_replace_endline"
252
- ),
253
- MultipleWordReProcessor(
254
- self.data_dict["PartialLines"], name="OCR_replace_partialline"
255
- ),
256
- MultipleLineProcessor(
257
- self.data_dict["PartialWordsAlways"],
258
- name="OCR_replace_partialwordsalways",
259
- ),
260
- ]
@@ -1,26 +0,0 @@
1
- # pgs2srt
2
-
3
- Uses [pgsreader](https://github.com/EzraBC/pgsreader) and [pyteseract](https://pypi.org/project/pytesseract/) to convert image based pgs subtitles files (.sup) to text based subrip (.srt) files.
4
-
5
- ## Requirements
6
- Python3, pip3, and Tesseract
7
-
8
- ## Installation
9
- * Run ```git clone https://github.com/PimvanderLoos/pgs2srt.git```
10
- * Inside the repo folder, run ```pip3 install -r requirements.txt```
11
- * In your .bashrc or .zshrc add ```alias pgs2srt='<absolute path to repo>/pgs2srt.py'```
12
-
13
- ## How to run
14
-
15
- pgs2srt <pgs filename>.sup
16
-
17
- ## Improving accuracy
18
- On Debian and Ubuntu, the default trained models files for Tesseract are from the [fast](https://github.com/tesseract-ocr/tessdata_fast) set. While these are a bit faster than other options, this comes at the cost of accuracy. If you want higher accuracy, I'd recommend using either the [legacy](https://github.com/tesseract-ocr/tessdata) or the [best](https://github.com/tesseract-ocr/tessdata_best) trained models. Note that the fast and best options only support the LSTM OCR Engine Mode (oem 1).
19
-
20
- ## Caveats
21
-
22
- This is in no way a perfect converter, and tesseract will make incorrect interpretations of characters. Extremely alpha, issues, pull requests and suggestions welcome!
23
-
24
-
25
- ## Credits
26
- This project uses the common + OCR fixes developed by [Sub-Zero.bundle](https://github.com/pannal/Sub-Zero.bundle).
File without changes
@@ -1,89 +0,0 @@
1
- import numpy as np
2
- from PIL import Image
3
-
4
-
5
- def read_rle_bytes(ods_bytes):
6
- pixels = []
7
- line_builder = []
8
-
9
- i = 0
10
- while i < len(ods_bytes):
11
- if ods_bytes[i]:
12
- incr = 1
13
- color = ods_bytes[i]
14
- length = 1
15
- else:
16
- check = ods_bytes[i + 1]
17
- if check == 0:
18
- incr = 2
19
- color = 0
20
- length = 0
21
- pixels.append(line_builder)
22
- line_builder = []
23
- elif check < 64:
24
- incr = 2
25
- color = 0
26
- length = check
27
- elif check < 128:
28
- incr = 3
29
- color = 0
30
- length = ((check - 64) << 8) + ods_bytes[i + 2]
31
- elif check < 192:
32
- incr = 3
33
- color = ods_bytes[i + 2]
34
- length = check - 128
35
- else:
36
- incr = 4
37
- color = ods_bytes[i + 3]
38
- length = ((check - 192) << 8) + ods_bytes[i + 2]
39
- line_builder.extend([color] * length)
40
- i += incr
41
-
42
- if line_builder:
43
- print(f"Probably an error; hanging pixels: {line_builder}")
44
-
45
- return pixels
46
-
47
-
48
- def ycbcr2rgb(ar):
49
- xform = np.array([[1, 0, 1.402], [1, -0.34414, -0.71414], [1, 1.772, 0]])
50
- rgb = ar.astype(float)
51
- # Subtracting by 128 the R and G channels
52
- rgb[:, [1, 2]] -= 128
53
- # .dot is multiplication of the matrices and xform.T is a transpose of the array axes
54
- rgb = rgb.dot(xform.T)
55
- # Makes any pixel value greater than 255 just be 255 (Max for RGB colorspace)
56
- np.putmask(rgb, rgb > 255, 255)
57
- # Sets any pixel value less than 0 to 0 (Min for RGB colorspace)
58
- np.putmask(rgb, rgb < 0, 0)
59
- return np.uint8(rgb)
60
-
61
-
62
- def px_rgb_a(ods, pds, swap):
63
- px = read_rle_bytes(ods.img_data)
64
- px = np.array([[255] * (ods.width - len(l)) + l for l in px], dtype=np.uint8)
65
-
66
- # Extract the YCbCrA palette data, swapping channels if requested.
67
- if swap:
68
- ycbcr = np.array([(entry.Y, entry.Cb, entry.Cr) for entry in pds.palette])
69
- else:
70
- ycbcr = np.array([(entry.Y, entry.Cr, entry.Cb) for entry in pds.palette])
71
- try:
72
- rgb = ycbcr2rgb(ycbcr)
73
- except AttributeError:
74
- print("Error: The image is not in YCbCr format.")
75
- exit(1)
76
- # Separate the Alpha channel from the YCbCr palette data
77
- a = [entry.Alpha for entry in pds.palette]
78
- a = np.array([[a[x] for x in l] for l in px], dtype=np.uint8)
79
-
80
- return px, rgb, a
81
-
82
-
83
- def make_image(ods, pds, swap=False):
84
- px, rgb, a = px_rgb_a(ods, pds, swap)
85
- alpha = Image.fromarray(a, mode="L")
86
- img = Image.fromarray(px, mode="P")
87
- img.putalpha(alpha)
88
- img.putpalette(rgb)
89
- return img
@@ -1,150 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import argparse
4
- import re
5
- from datetime import datetime, timedelta
6
-
7
- import pytesseract
8
- from imagemaker import make_image
9
- from Libraries.SubZero.post_processing import CommonFixes, FixOCR
10
- from pgsreader import PGSReader
11
- from PIL import Image, ImageOps
12
-
13
- parser = argparse.ArgumentParser(description="Convert PGS subtitles to SubRip format.")
14
-
15
- parser.add_argument("input", type=str, help="The input file (a .sup file).")
16
- parser.add_argument("--output", type=str, help="The output file (a .srt file).")
17
- parser.add_argument(
18
- "--oem",
19
- type=int,
20
- help="The OCR Engine Mode to use (Default: 1).",
21
- default=1,
22
- choices=range(4),
23
- )
24
- parser.add_argument(
25
- "--language", type=str, help="The language to use (Default: eng).", default="eng"
26
- )
27
- parser.add_argument(
28
- "--fix_common",
29
- help="Fixes common whitespace/punctuation issues.",
30
- dest="fix_common",
31
- action="store_true",
32
- )
33
- parser.add_argument(
34
- "--fix_common_ocr",
35
- help="Fixes common OCR issues for supported languages.",
36
- dest="fix_ocr",
37
- action="store_true",
38
- )
39
-
40
- args = parser.parse_args()
41
-
42
- assert args.input is not None
43
-
44
- # Unescape escaped spaces
45
- file = args.input.replace("\\ ", " ")
46
-
47
- print(f"Parsing: {file}")
48
-
49
- # Load a PGS/SUP file.
50
- pgs = PGSReader(file)
51
-
52
- # Set index
53
- i = 0
54
-
55
- # Complete subtitle track index
56
- si = 0
57
-
58
- tesseract_lang = args.language
59
- tesseract_config = f"-c tessedit_char_blacklist=[] --psm 6 --oem {args.oem}"
60
-
61
- # If an output file for the subrip output is provided, use that.
62
- # Otherwise remove the ".sup" extension from the input and append
63
- # ".srt".
64
- output_file = (
65
- args.output
66
- if args.output is not None
67
- else (args.input.replace(".sup", "") + ".srt")
68
- )
69
-
70
- # SubRip output
71
- output = ""
72
-
73
- fix_common = CommonFixes() if args.fix_common else None
74
- fix_ocr = FixOCR(args.language) if args.fix_ocr else None
75
-
76
- # Iterate the pgs generator
77
- for ds in pgs.iter_displaysets():
78
- try:
79
- # If set has image, parse the image
80
- if ds.has_image:
81
- # Get Palette Display Segment
82
- pds = ds.pds[0]
83
- # Get Object Display Segment
84
- ods = ds.ods[0]
85
-
86
- if pds and ods:
87
- # Create and show the bitmap image and convert it to RGBA
88
- src = make_image(ods, pds).convert("RGBA")
89
-
90
- # Create grayscale image with black background
91
- img = Image.new("L", src.size, "BLACK")
92
- # Paste the subtitle bitmap
93
- img.paste(src, (0, 0), src)
94
- # Invert images so the text is readable by Tesseract
95
- img = ImageOps.invert(img)
96
-
97
- # Parse the image with tesesract
98
- text = pytesseract.image_to_string(
99
- img, lang=tesseract_lang, config=tesseract_config
100
- ).strip()
101
-
102
- # Replace "|" with "I"
103
- # Works better than blacklisting "|" in Tesseract,
104
- # which results in I becoming "!" "i" and "1"
105
- text = re.sub(r"[|/\\]", "I", text)
106
- text = re.sub(r"[_]", "L", text)
107
-
108
- if args.fix_common:
109
- text = fix_common.process(text)
110
- if args.fix_ocr:
111
- text = fix_ocr.modify(text)
112
-
113
- start = datetime.fromtimestamp(ods.presentation_timestamp / 1000)
114
- start = start + timedelta(hours=-1)
115
-
116
- else:
117
- # Get Presentation Composition Segment
118
- pcs = ds.pcs[0]
119
-
120
- if pcs:
121
- end = datetime.fromtimestamp(pcs.presentation_timestamp / 1000)
122
- end = end + timedelta(hours=-1)
123
-
124
- if (
125
- isinstance(start, datetime)
126
- and isinstance(end, datetime)
127
- and len(text)
128
- ):
129
- si = si + 1
130
- sub_output = str(si) + "\n"
131
- sub_output += (
132
- start.strftime("%H:%M:%S,%f")[0:12]
133
- + " --> "
134
- + end.strftime("%H:%M:%S,%f")[0:12]
135
- + "\n"
136
- )
137
- sub_output += text + "\n\n"
138
-
139
- output += sub_output
140
- start = end = text = None
141
- i = i + 1
142
-
143
- except Exception as e:
144
- print(e)
145
- exit(1)
146
-
147
- f = open(output_file, "w")
148
- f.write(output)
149
- f.close()
150
- print(f"Saved to: {output_file}")