GameSentenceMiner 2.14.20__py3-none-any.whl → 2.15.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- GameSentenceMiner/config_gui.py +3 -0
- GameSentenceMiner/gsm.py +92 -81
- GameSentenceMiner/obs.py +18 -5
- GameSentenceMiner/ocr/gsm_ocr_config.py +0 -1
- GameSentenceMiner/ocr/owocr_helper.py +0 -1
- GameSentenceMiner/owocr/owocr/ocr.py +24 -22
- GameSentenceMiner/owocr/owocr/run.py +102 -22
- GameSentenceMiner/tools/furigana_filter_preview.py +330 -0
- GameSentenceMiner/util/get_overlay_coords.py +81 -48
- GameSentenceMiner/vad.py +2 -2
- {gamesentenceminer-2.14.20.dist-info → gamesentenceminer-2.15.0.dist-info}/METADATA +2 -2
- {gamesentenceminer-2.14.20.dist-info → gamesentenceminer-2.15.0.dist-info}/RECORD +16 -15
- {gamesentenceminer-2.14.20.dist-info → gamesentenceminer-2.15.0.dist-info}/WHEEL +0 -0
- {gamesentenceminer-2.14.20.dist-info → gamesentenceminer-2.15.0.dist-info}/entry_points.txt +0 -0
- {gamesentenceminer-2.14.20.dist-info → gamesentenceminer-2.15.0.dist-info}/licenses/LICENSE +0 -0
- {gamesentenceminer-2.14.20.dist-info → gamesentenceminer-2.15.0.dist-info}/top_level.txt +0 -0
GameSentenceMiner/config_gui.py
CHANGED
@@ -2192,6 +2192,9 @@ class ConfigApp:
|
|
2192
2192
|
row=self.current_row, column=0)
|
2193
2193
|
self.overlay_monitor = ttk.Combobox(overlay_frame, values=self.monitors, state="readonly")
|
2194
2194
|
self.overlay_monitor.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
2195
|
+
# disable selection for now, default to value 1
|
2196
|
+
self.overlay_monitor.current(1)
|
2197
|
+
self.overlay_monitor.config(state="disabled")
|
2195
2198
|
self.current_row += 1
|
2196
2199
|
|
2197
2200
|
if self.monitors:
|
GameSentenceMiner/gsm.py
CHANGED
@@ -123,8 +123,6 @@ if is_windows():
|
|
123
123
|
procs_to_close = []
|
124
124
|
settings_window: config_gui.ConfigApp = None
|
125
125
|
obs_paused = False
|
126
|
-
icon: Icon
|
127
|
-
menu: Menu
|
128
126
|
root = None
|
129
127
|
warnings.simplefilter("ignore", DeprecationWarning)
|
130
128
|
|
@@ -401,83 +399,94 @@ def open_log():
|
|
401
399
|
logger.info("Log opened.")
|
402
400
|
|
403
401
|
|
404
|
-
def
|
405
|
-
|
406
|
-
if not passed_icon:
|
407
|
-
passed_icon = icon
|
408
|
-
logger.info("Exiting...")
|
409
|
-
passed_icon.stop()
|
410
|
-
cleanup()
|
402
|
+
def open_multimine(icon, item):
|
403
|
+
texthooking_page.open_texthooker()
|
411
404
|
|
412
405
|
|
413
|
-
def
|
414
|
-
|
415
|
-
|
416
|
-
|
406
|
+
def exit_program(passed_icon, item):
|
407
|
+
"""Exit the application."""
|
408
|
+
if not passed_icon:
|
409
|
+
passed_icon = icon
|
410
|
+
logger.info("Exiting...")
|
411
|
+
passed_icon.stop()
|
412
|
+
cleanup()
|
417
413
|
|
414
|
+
class GSMTray(threading.Thread):
|
415
|
+
def __init__(self):
|
416
|
+
super().__init__()
|
417
|
+
self.daemon = True
|
418
|
+
self.menu = None
|
419
|
+
self.icon = None
|
420
|
+
|
421
|
+
def run(self):
|
422
|
+
self.run_tray()
|
423
|
+
|
424
|
+
|
425
|
+
def run_tray(self):
|
426
|
+
self.profile_menu = Menu(
|
427
|
+
*[MenuItem(("Active: " if profile == get_master_config().current_profile else "") + profile, self.switch_profile) for
|
428
|
+
profile in
|
429
|
+
get_master_config().get_all_profile_names()]
|
430
|
+
)
|
431
|
+
|
432
|
+
menu = Menu(
|
433
|
+
MenuItem("Open Settings", open_settings, default=True),
|
434
|
+
MenuItem("Open Texthooker", texthooking_page.open_texthooker),
|
435
|
+
MenuItem("Open Log", open_log),
|
436
|
+
MenuItem("Toggle Replay Buffer", self.play_pause),
|
437
|
+
MenuItem("Restart OBS", restart_obs),
|
438
|
+
MenuItem("Switch Profile", self.profile_menu),
|
439
|
+
MenuItem("Exit", exit_program)
|
440
|
+
)
|
441
|
+
|
442
|
+
self.icon = Icon("TrayApp", create_image(), "GameSentenceMiner", menu)
|
443
|
+
self.icon.run()
|
444
|
+
|
445
|
+
def update_icon(self, profile=None):
|
446
|
+
global menu, icon
|
447
|
+
# Recreate the menu with the updated button text
|
448
|
+
profile_menu = Menu(
|
449
|
+
*[MenuItem(("Active: " if profile == get_master_config().current_profile else "") + profile, self.switch_profile) for
|
450
|
+
profile in
|
451
|
+
get_master_config().get_all_profile_names()]
|
452
|
+
)
|
453
|
+
|
454
|
+
menu = Menu(
|
455
|
+
MenuItem("Open Settings", open_settings, default=True),
|
456
|
+
MenuItem("Open Multi-Mine GUI", open_multimine),
|
457
|
+
MenuItem("Open Log", open_log),
|
458
|
+
MenuItem("Toggle Replay Buffer", self.play_pause),
|
459
|
+
MenuItem("Restart OBS", restart_obs),
|
460
|
+
MenuItem("Switch Profile", profile_menu),
|
461
|
+
MenuItem("Exit", exit_program)
|
462
|
+
)
|
463
|
+
|
464
|
+
self.icon.menu = menu
|
465
|
+
self.icon.update_menu()
|
466
|
+
|
467
|
+
def switch_profile(self, icon, item):
|
468
|
+
if "Active:" in item.text:
|
469
|
+
logger.error("You cannot switch to the currently active profile!")
|
470
|
+
return
|
471
|
+
logger.info(f"Switching to profile: {item.text}")
|
472
|
+
prev_config = get_config()
|
473
|
+
get_master_config().current_profile = item.text
|
474
|
+
switch_profile_and_save(item.text)
|
475
|
+
settings_window.reload_settings()
|
476
|
+
self.update_icon()
|
477
|
+
if get_config().restart_required(prev_config):
|
478
|
+
send_restart_signal()
|
418
479
|
|
419
|
-
def
|
420
|
-
|
480
|
+
def play_pause(self, icon, item):
|
481
|
+
global obs_paused, menu
|
482
|
+
obs.toggle_replay_buffer()
|
483
|
+
self.update_icon()
|
421
484
|
|
485
|
+
def stop(self):
|
486
|
+
if self.icon:
|
487
|
+
self.icon.stop()
|
422
488
|
|
423
|
-
|
424
|
-
global menu, icon
|
425
|
-
# Recreate the menu with the updated button text
|
426
|
-
profile_menu = Menu(
|
427
|
-
*[MenuItem(("Active: " if profile == get_master_config().current_profile else "") + profile, switch_profile) for
|
428
|
-
profile in
|
429
|
-
get_master_config().get_all_profile_names()]
|
430
|
-
)
|
431
|
-
|
432
|
-
menu = Menu(
|
433
|
-
MenuItem("Open Settings", open_settings, default=True),
|
434
|
-
MenuItem("Open Multi-Mine GUI", open_multimine),
|
435
|
-
MenuItem("Open Log", open_log),
|
436
|
-
MenuItem("Toggle Replay Buffer", play_pause),
|
437
|
-
MenuItem("Restart OBS", restart_obs),
|
438
|
-
MenuItem("Switch Profile", profile_menu),
|
439
|
-
MenuItem("Exit", exit_program)
|
440
|
-
)
|
441
|
-
|
442
|
-
icon.menu = menu
|
443
|
-
icon.update_menu()
|
444
|
-
|
445
|
-
|
446
|
-
def switch_profile(icon, item):
|
447
|
-
if "Active:" in item.text:
|
448
|
-
logger.error("You cannot switch to the currently active profile!")
|
449
|
-
return
|
450
|
-
logger.info(f"Switching to profile: {item.text}")
|
451
|
-
prev_config = get_config()
|
452
|
-
get_master_config().current_profile = item.text
|
453
|
-
switch_profile_and_save(item.text)
|
454
|
-
settings_window.reload_settings()
|
455
|
-
update_icon()
|
456
|
-
if get_config().restart_required(prev_config):
|
457
|
-
send_restart_signal()
|
458
|
-
|
459
|
-
|
460
|
-
def run_tray():
|
461
|
-
global menu, icon
|
462
|
-
|
463
|
-
profile_menu = Menu(
|
464
|
-
*[MenuItem(("Active: " if profile == get_master_config().current_profile else "") + profile, switch_profile) for
|
465
|
-
profile in
|
466
|
-
get_master_config().get_all_profile_names()]
|
467
|
-
)
|
468
|
-
|
469
|
-
menu = Menu(
|
470
|
-
MenuItem("Open Settings", open_settings, default=True),
|
471
|
-
MenuItem("Open Texthooker", texthooking_page.open_texthooker),
|
472
|
-
MenuItem("Open Log", open_log),
|
473
|
-
MenuItem("Toggle Replay Buffer", play_pause),
|
474
|
-
MenuItem("Restart OBS", restart_obs),
|
475
|
-
MenuItem("Switch Profile", profile_menu),
|
476
|
-
MenuItem("Exit", exit_program)
|
477
|
-
)
|
478
|
-
|
479
|
-
icon = Icon("TrayApp", create_image(), "GameSentenceMiner", menu)
|
480
|
-
icon.run()
|
489
|
+
gsm_tray = GSMTray()
|
481
490
|
|
482
491
|
|
483
492
|
# def close_obs():
|
@@ -551,8 +560,8 @@ def cleanup():
|
|
551
560
|
proc.kill()
|
552
561
|
logger.error(f"Error terminating process {proc}: {e}")
|
553
562
|
|
554
|
-
if
|
555
|
-
|
563
|
+
if gsm_tray:
|
564
|
+
gsm_tray.stop()
|
556
565
|
|
557
566
|
for video in gsm_state.videos_to_remove:
|
558
567
|
try:
|
@@ -608,7 +617,7 @@ def initialize(reloading=False):
|
|
608
617
|
|
609
618
|
|
610
619
|
def initialize_async():
|
611
|
-
tasks = [connect_websocket
|
620
|
+
tasks = [connect_websocket]
|
612
621
|
threads = []
|
613
622
|
tasks.append(anki.start_monitoring_anki)
|
614
623
|
for task in tasks:
|
@@ -633,11 +642,12 @@ def handle_websocket_message(message: Message):
|
|
633
642
|
case FunctionName.OPEN_LOG:
|
634
643
|
open_log()
|
635
644
|
case FunctionName.TOGGLE_REPLAY_BUFFER:
|
636
|
-
|
645
|
+
obs.toggle_replay_buffer()
|
637
646
|
case FunctionName.RESTART_OBS:
|
638
647
|
restart_obs()
|
639
648
|
case FunctionName.EXIT:
|
640
|
-
|
649
|
+
cleanup()
|
650
|
+
sys.exit(0)
|
641
651
|
case FunctionName.CONNECT:
|
642
652
|
logger.debug("Electron WSS connected")
|
643
653
|
case _:
|
@@ -671,7 +681,7 @@ async def register_scene_switcher_callback():
|
|
671
681
|
all_configured_scenes = [
|
672
682
|
config.scenes for config in get_master_config().configs.values()]
|
673
683
|
print(all_configured_scenes)
|
674
|
-
matching_configs = [name.strip() for name, config in
|
684
|
+
matching_configs = [name.strip() for name, config in get_master_config().configs.items(
|
675
685
|
) if scene.strip() in config.scenes]
|
676
686
|
switch_to = None
|
677
687
|
|
@@ -692,7 +702,7 @@ async def register_scene_switcher_callback():
|
|
692
702
|
get_master_config().current_profile = switch_to
|
693
703
|
switch_profile_and_save(switch_to)
|
694
704
|
settings_window.reload_settings()
|
695
|
-
update_icon()
|
705
|
+
gsm_tray.update_icon()
|
696
706
|
|
697
707
|
await obs.register_scene_change_callback(scene_switcher_callback)
|
698
708
|
|
@@ -763,7 +773,8 @@ async def async_main(reloading=False):
|
|
763
773
|
try:
|
764
774
|
if get_config().general.open_config_on_startup:
|
765
775
|
root.after(50, settings_window.show)
|
766
|
-
|
776
|
+
root.after(50, gsm_tray.start)
|
777
|
+
settings_window.add_save_hook(gsm_tray.update_icon)
|
767
778
|
settings_window.on_exit = exit_program
|
768
779
|
root.mainloop()
|
769
780
|
except KeyboardInterrupt:
|
GameSentenceMiner/obs.py
CHANGED
@@ -1,16 +1,17 @@
|
|
1
1
|
import asyncio
|
2
|
+
import json
|
2
3
|
import os.path
|
3
4
|
import subprocess
|
4
5
|
import threading
|
5
6
|
import time
|
6
|
-
|
7
|
+
import logging
|
7
8
|
|
8
9
|
import psutil
|
9
10
|
|
10
11
|
import obsws_python as obs
|
11
12
|
|
12
13
|
from GameSentenceMiner.util import configuration
|
13
|
-
from GameSentenceMiner.util.configuration import
|
14
|
+
from GameSentenceMiner.util.configuration import get_app_directory, get_config, get_master_config, is_windows, save_full_config, reload_config, logger, gsm_status, gsm_state
|
14
15
|
from GameSentenceMiner.util.gsm_utils import sanitize_filename, make_unique_file_name
|
15
16
|
import tkinter as tk
|
16
17
|
from tkinter import messagebox
|
@@ -54,7 +55,7 @@ class OBSConnectionManager(threading.Thread):
|
|
54
55
|
if gsm_status.obs_connected and not replay_buffer_status and not self.said_no_to_replay_buffer:
|
55
56
|
try:
|
56
57
|
self.check_output()
|
57
|
-
except Exception
|
58
|
+
except Exception:
|
58
59
|
pass
|
59
60
|
except Exception as e:
|
60
61
|
logger.error(f"Error when running Extra Utils in OBS Health Check, Keeping ConnectionManager Alive: {e}")
|
@@ -251,7 +252,6 @@ def connect_to_obs_sync(retry=2, check_output=True):
|
|
251
252
|
logger.error(f"Failed to connect to OBS WebSocket: {e}")
|
252
253
|
client = None
|
253
254
|
event_client = None
|
254
|
-
connecting = False
|
255
255
|
break
|
256
256
|
time.sleep(1)
|
257
257
|
retry -= 1
|
@@ -483,6 +483,9 @@ def set_fit_to_screen_for_scene_items(scene_name: str):
|
|
483
483
|
try:
|
484
484
|
# 1. Get the canvas (base) resolution from OBS video settings
|
485
485
|
video_settings = client.get_video_settings()
|
486
|
+
if not hasattr(video_settings, 'base_width') or not hasattr(video_settings, 'base_height'):
|
487
|
+
logger.debug("Video settings do not have base_width or base_height attributes, probably weird websocket error issue? Idk what causes it..")
|
488
|
+
return
|
486
489
|
canvas_width = video_settings.base_width
|
487
490
|
canvas_height = video_settings.base_height
|
488
491
|
|
@@ -499,12 +502,23 @@ def set_fit_to_screen_for_scene_items(scene_name: str):
|
|
499
502
|
item_id = item['sceneItemId']
|
500
503
|
source_name = item['sourceName']
|
501
504
|
|
505
|
+
scene_item_transform = item.get('sceneItemTransform', {})
|
506
|
+
|
507
|
+
source_width = scene_item_transform.get('sourceWidth', None)
|
508
|
+
source_height = scene_item_transform.get('sourceHeight', None)
|
509
|
+
|
502
510
|
# This transform object is the equivalent of "Fit to Screen"
|
503
511
|
fit_to_screen_transform = {
|
504
512
|
'boundsType': 'OBS_BOUNDS_SCALE_INNER',
|
505
513
|
'alignment': 5, # 5 = Center alignment (horizontal and vertical)
|
506
514
|
'boundsWidth': canvas_width,
|
507
515
|
'boundsHeight': canvas_height,
|
516
|
+
'positionX': 0,
|
517
|
+
'positionY': 0,
|
518
|
+
'cropLeft': 0 if canvas_width >= source_width else (source_width - canvas_width) // 2,
|
519
|
+
'cropRight': 0 if canvas_width >= source_width else (source_width - canvas_width) // 2,
|
520
|
+
'cropTop': 0 if canvas_height >= source_height else (source_height - canvas_height) // 2,
|
521
|
+
'cropBottom': 0 if canvas_height >= source_height else (source_height - canvas_height) // 2,
|
508
522
|
}
|
509
523
|
|
510
524
|
try:
|
@@ -559,7 +573,6 @@ def main():
|
|
559
573
|
disconnect_from_obs()
|
560
574
|
|
561
575
|
if __name__ == '__main__':
|
562
|
-
from mss import mss
|
563
576
|
logging.basicConfig(level=logging.INFO)
|
564
577
|
connect_to_obs_sync()
|
565
578
|
set_fit_to_screen_for_scene_items(get_current_scene())
|
@@ -83,7 +83,6 @@ class OCRConfig:
|
|
83
83
|
]
|
84
84
|
|
85
85
|
def scale_to_custom_size(self, width, height):
|
86
|
-
print(self.pre_scale_rectangles)
|
87
86
|
self.rectangles = self.pre_scale_rectangles.copy()
|
88
87
|
if self.coordinate_system and self.coordinate_system == "percentage":
|
89
88
|
for rectangle in self.rectangles:
|
@@ -106,6 +106,7 @@ def empty_post_process(text):
|
|
106
106
|
|
107
107
|
def post_process(text, keep_blank_lines=False):
|
108
108
|
import jaconv
|
109
|
+
text = text.replace("\"", "")
|
109
110
|
if keep_blank_lines:
|
110
111
|
text = '\n'.join([''.join(i.split()) for i in text.splitlines()])
|
111
112
|
else:
|
@@ -354,8 +355,8 @@ class GoogleLens:
|
|
354
355
|
response_proto = LensOverlayServerResponse().FromString(res.content)
|
355
356
|
response_dict = response_proto.to_dict(betterproto.Casing.SNAKE)
|
356
357
|
|
357
|
-
if os.path.exists(r"C:\Users\Beangate\GSM\
|
358
|
-
with open(os.path.join(r"C:\Users\Beangate\GSM\
|
358
|
+
if os.path.exists(r"C:\Users\Beangate\GSM\test"):
|
359
|
+
with open(os.path.join(r"C:\Users\Beangate\GSM\test", 'glens_response.json'), 'w', encoding='utf-8') as f:
|
359
360
|
json.dump(response_dict, f, indent=4, ensure_ascii=False)
|
360
361
|
res = ''
|
361
362
|
text = response_dict['objects_response']['text']
|
@@ -377,16 +378,13 @@ class GoogleLens:
|
|
377
378
|
res += 'BLANK_LINE'
|
378
379
|
for line in paragraph['lines']:
|
379
380
|
if furigana_filter_sensitivity:
|
381
|
+
line_width = line['geometry']['bounding_box']['width'] * img.width
|
382
|
+
line_height = line['geometry']['bounding_box']['height'] * img.height
|
380
383
|
for word in line['words']:
|
381
384
|
if self.punctuation_regex.findall(word['plain_text']):
|
382
385
|
res += word['plain_text'] + word['text_separator']
|
383
386
|
continue
|
384
|
-
if
|
385
|
-
res += word['plain_text'] + word['text_separator']
|
386
|
-
continue
|
387
|
-
word_width = word['geometry']['bounding_box']['width'] * img.width
|
388
|
-
word_height = word['geometry']['bounding_box']['height'] * img.height
|
389
|
-
if word_width > furigana_filter_sensitivity and word_height > furigana_filter_sensitivity:
|
387
|
+
if line_width > furigana_filter_sensitivity and line_height > furigana_filter_sensitivity:
|
390
388
|
res += word['plain_text'] + word['text_separator']
|
391
389
|
else:
|
392
390
|
skipped.extend(word['plain_text'])
|
@@ -394,7 +392,8 @@ class GoogleLens:
|
|
394
392
|
else:
|
395
393
|
for word in line['words']:
|
396
394
|
res += word['plain_text'] + word['text_separator']
|
397
|
-
|
395
|
+
res += '\n'
|
396
|
+
|
398
397
|
previous_line = paragraph
|
399
398
|
res += '\n'
|
400
399
|
# logger.info(
|
@@ -920,7 +919,7 @@ class OneOCR:
|
|
920
919
|
self.regex = re.compile(
|
921
920
|
r'[a-zA-Z\u00C0-\u00FF\u0100-\u017F\u0180-\u024F\u0250-\u02AF\u1D00-\u1D7F\u1D80-\u1DBF\u1E00-\u1EFF\u2C60-\u2C7F\uA720-\uA7FF\uAB30-\uAB6F]')
|
922
921
|
|
923
|
-
def __call__(self, img, furigana_filter_sensitivity=0, return_coords=False, multiple_crop_coords=False, return_one_box=True):
|
922
|
+
def __call__(self, img, furigana_filter_sensitivity=0, return_coords=False, multiple_crop_coords=False, return_one_box=True, return_dict=False):
|
924
923
|
lang = get_ocr_language()
|
925
924
|
if furigana_filter_sensitivity != None:
|
926
925
|
furigana_filter_sensitivity = get_furigana_filter_sensitivity()
|
@@ -940,6 +939,7 @@ class OneOCR:
|
|
940
939
|
return (False, 'Invalid image provided')
|
941
940
|
crop_coords = None
|
942
941
|
crop_coords_list = []
|
942
|
+
ocr_resp = ''
|
943
943
|
if sys.platform == 'win32':
|
944
944
|
try:
|
945
945
|
ocr_resp = self.model.recognize_pil(img)
|
@@ -959,17 +959,17 @@ class OneOCR:
|
|
959
959
|
boxes = []
|
960
960
|
if furigana_filter_sensitivity > 0:
|
961
961
|
for line in filtered_lines:
|
962
|
+
line_x1, line_x2, line_x3, line_x4 = line['bounding_rect']['x1'], line['bounding_rect']['x2'], \
|
963
|
+
line['bounding_rect']['x3'], line['bounding_rect']['x4']
|
964
|
+
line_y1, line_y2, line_y3, line_y4 = line['bounding_rect']['y1'], line['bounding_rect']['y2'], \
|
965
|
+
line['bounding_rect']['y3'], line['bounding_rect']['y4']
|
966
|
+
line_width = max(line_x2 - line_x1, line_x3 - line_x4)
|
967
|
+
line_height = max(line_y3 - line_y1, line_y4 - line_y2)
|
962
968
|
for char in line['words']:
|
963
969
|
if self.punctuation_regex.findall(char['text']):
|
964
970
|
res += char['text']
|
965
971
|
continue
|
966
|
-
|
967
|
-
char['bounding_rect']['x3'], char['bounding_rect']['x4']
|
968
|
-
y1, y2, y3, y4 = char['bounding_rect']['y1'], char['bounding_rect']['y2'], \
|
969
|
-
char['bounding_rect']['y3'], char['bounding_rect']['y4']
|
970
|
-
width = max(x2 - x1, x3 - x4)
|
971
|
-
height = max(y3 - y1, y4 - y2)
|
972
|
-
if width > furigana_filter_sensitivity and height > furigana_filter_sensitivity:
|
972
|
+
if line_width > furigana_filter_sensitivity and line_height > furigana_filter_sensitivity:
|
973
973
|
res += char['text']
|
974
974
|
else:
|
975
975
|
skipped.extend(char for char in line['text'])
|
@@ -1042,6 +1042,8 @@ class OneOCR:
|
|
1042
1042
|
x.append(crop_coords_list)
|
1043
1043
|
if return_one_box:
|
1044
1044
|
x.append(crop_coords)
|
1045
|
+
if return_dict:
|
1046
|
+
x.append(ocr_resp)
|
1045
1047
|
if is_path:
|
1046
1048
|
img.close()
|
1047
1049
|
return x
|
@@ -1432,10 +1434,10 @@ class localLLMOCR:
|
|
1432
1434
|
self.keep_warm = config.get('keep_warm', True)
|
1433
1435
|
self.custom_prompt = config.get('prompt', None)
|
1434
1436
|
self.available = True
|
1435
|
-
|
1436
|
-
|
1437
|
-
|
1438
|
-
|
1437
|
+
if any(x in self.api_url for x in ['localhost', '127.0.0.1']):
|
1438
|
+
if not self.check_connection(self.api_url):
|
1439
|
+
logger.warning('Local LLM OCR API is not reachable')
|
1440
|
+
return
|
1439
1441
|
self.client = openai.OpenAI(
|
1440
1442
|
base_url=self.api_url.replace('/v1/chat/completions', '/v1'),
|
1441
1443
|
api_key=self.api_key
|
@@ -1492,7 +1494,7 @@ class localLLMOCR:
|
|
1492
1494
|
prompt = self.custom_prompt.strip()
|
1493
1495
|
else:
|
1494
1496
|
prompt = f"""
|
1495
|
-
Extract all {CommonLanguages.from_code(get_ocr_language()).name} Text from Image. Ignore all Furigana. Do not return any commentary, just the text in the image. If there is no text in the image, return "" (Empty String).
|
1497
|
+
Extract all {CommonLanguages.from_code(get_ocr_language()).name} Text from Image. Ignore all Furigana. Do not return any commentary, just the text in the image. Do not Translate. If there is no text in the image, return "" (Empty String).
|
1496
1498
|
"""
|
1497
1499
|
|
1498
1500
|
response = self.client.chat.completions.create(
|
@@ -1,5 +1,6 @@
|
|
1
|
-
from
|
2
|
-
from
|
1
|
+
from GameSentenceMiner.ocr.gsm_ocr_config import set_dpi_awareness, get_scene_ocr_config
|
2
|
+
from GameSentenceMiner.util.electron_config import * # noqa: F403
|
3
|
+
from GameSentenceMiner.util.gsm_utils import do_text_replacements, OCR_REPLACEMENTS_FILE
|
3
4
|
|
4
5
|
try:
|
5
6
|
import win32gui
|
@@ -28,7 +29,6 @@ import signal
|
|
28
29
|
import threading
|
29
30
|
from pathlib import Path
|
30
31
|
import queue
|
31
|
-
import io
|
32
32
|
import re
|
33
33
|
import logging
|
34
34
|
import inspect
|
@@ -39,23 +39,22 @@ import mss
|
|
39
39
|
import asyncio
|
40
40
|
import websockets
|
41
41
|
import socketserver
|
42
|
-
import
|
42
|
+
import cv2
|
43
|
+
import numpy as np
|
43
44
|
|
44
45
|
from datetime import datetime, timedelta
|
45
|
-
from PIL import Image, ImageDraw
|
46
|
+
from PIL import Image, ImageDraw
|
46
47
|
from loguru import logger
|
47
48
|
from desktop_notifier import DesktopNotifierSync
|
48
49
|
import psutil
|
49
50
|
|
50
|
-
import
|
51
|
-
from .ocr import *
|
52
|
-
try:
|
53
|
-
from .secret import *
|
54
|
-
except ImportError:
|
55
|
-
pass
|
51
|
+
from .ocr import * # noqa: F403
|
56
52
|
from .config import Config
|
57
53
|
from .screen_coordinate_picker import get_screen_selection
|
58
|
-
from GameSentenceMiner.util.configuration import get_temporary_directory
|
54
|
+
from GameSentenceMiner.util.configuration import get_temporary_directory
|
55
|
+
|
56
|
+
from skimage.metrics import structural_similarity as ssim
|
57
|
+
from typing import Union
|
59
58
|
|
60
59
|
config = None
|
61
60
|
last_image = None
|
@@ -799,8 +798,6 @@ class ScreenshotThread(threading.Thread):
|
|
799
798
|
self.windows_window_tracker_instance.join()
|
800
799
|
|
801
800
|
|
802
|
-
import cv2
|
803
|
-
import numpy as np
|
804
801
|
|
805
802
|
def apply_adaptive_threshold_filter(img):
|
806
803
|
img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
@@ -865,11 +862,6 @@ def are_images_identical(img1, img2, img2_np=None):
|
|
865
862
|
return (img1_np.shape == img2_np.shape) and np.array_equal(img1_np, img2_np)
|
866
863
|
|
867
864
|
|
868
|
-
import cv2
|
869
|
-
import numpy as np
|
870
|
-
from skimage.metrics import structural_similarity as ssim
|
871
|
-
from typing import Union
|
872
|
-
|
873
865
|
ImageType = Union[np.ndarray, Image.Image]
|
874
866
|
|
875
867
|
def _prepare_image(image: ImageType) -> np.ndarray:
|
@@ -1319,6 +1311,10 @@ def on_screenshot_combo():
|
|
1319
1311
|
def on_window_minimized(minimized):
|
1320
1312
|
global screencapture_window_visible
|
1321
1313
|
screencapture_window_visible = not minimized
|
1314
|
+
|
1315
|
+
|
1316
|
+
def do_configured_ocr_replacements(text: str) -> str:
|
1317
|
+
return do_text_replacements(text, OCR_REPLACEMENTS_FILE)
|
1322
1318
|
|
1323
1319
|
|
1324
1320
|
def process_and_write_results(img_or_path, write_to=None, last_result=None, filtering=None, notify=None, engine=None, ocr_start_time=None, furigana_filter_sensitivity=0):
|
@@ -1364,14 +1360,21 @@ def process_and_write_results(img_or_path, write_to=None, last_result=None, filt
|
|
1364
1360
|
# print(engine_index)
|
1365
1361
|
|
1366
1362
|
if res:
|
1363
|
+
text = do_configured_ocr_replacements(text)
|
1367
1364
|
if filtering:
|
1368
1365
|
text, orig_text = filtering(text, last_result)
|
1369
1366
|
if get_ocr_language() == "ja" or get_ocr_language() == "zh":
|
1370
1367
|
text = post_process(text, keep_blank_lines=get_ocr_keep_newline())
|
1371
|
-
logger.opt(ansi=True).info(
|
1372
|
-
f'Text recognized in {end_time - start_time:0.03f}s using <{engine_color}>{engine_instance.readable_name}</{engine_color}>: {text}')
|
1373
1368
|
if notify and config.get_general('notifications'):
|
1374
1369
|
notifier.send(title='owocr', message='Text recognized: ' + text)
|
1370
|
+
|
1371
|
+
if text and write_to is not None:
|
1372
|
+
if check_text_is_all_menu(text, crop_coords):
|
1373
|
+
logger.opt(ansi=True).info('Text is identified as all menu items, skipping further processing.')
|
1374
|
+
return orig_text, ''
|
1375
|
+
|
1376
|
+
logger.opt(ansi=True).info(
|
1377
|
+
f'Text recognized in {end_time - start_time:0.03f}s using <{engine_color}>{engine_instance.readable_name}</{engine_color}>: {text}')
|
1375
1378
|
|
1376
1379
|
if write_to == 'websocket':
|
1377
1380
|
websocket_server_thread.send_text(text)
|
@@ -1395,6 +1398,83 @@ def process_and_write_results(img_or_path, write_to=None, last_result=None, filt
|
|
1395
1398
|
|
1396
1399
|
return orig_text, text
|
1397
1400
|
|
1401
|
+
def check_text_is_all_menu(text: str, crop_coords: tuple) -> bool:
|
1402
|
+
"""
|
1403
|
+
Checks if the recognized text consists entirely of menu items.
|
1404
|
+
This function checks if the detected text area falls entirely within secondary rectangles (menu areas).
|
1405
|
+
|
1406
|
+
:param text: The recognized text from OCR.
|
1407
|
+
:param crop_coords: Tuple containing (x, y, width, height) of the detected text area relative to the cropped image.
|
1408
|
+
:return: True if the text is all menu items (within secondary rectangles), False otherwise.
|
1409
|
+
"""
|
1410
|
+
if not text or not crop_coords:
|
1411
|
+
return False
|
1412
|
+
|
1413
|
+
original_width = obs_screenshot_thread.width
|
1414
|
+
original_height = obs_screenshot_thread.height
|
1415
|
+
crop_x, crop_y, crop_w, crop_h = crop_coords
|
1416
|
+
|
1417
|
+
ocr_config = get_scene_ocr_config()
|
1418
|
+
|
1419
|
+
if not any(rect.is_secondary for rect in ocr_config.rectangles):
|
1420
|
+
return False
|
1421
|
+
|
1422
|
+
ocr_config.scale_to_custom_size(original_width, original_height)
|
1423
|
+
if not ocr_config or not ocr_config.rectangles:
|
1424
|
+
return False
|
1425
|
+
|
1426
|
+
primary_rectangles = [rect for rect in ocr_config.rectangles if not rect.is_excluded and not rect.is_secondary]
|
1427
|
+
menu_rectangles = [rect for rect in ocr_config.rectangles if rect.is_secondary and not rect.is_excluded]
|
1428
|
+
|
1429
|
+
if not menu_rectangles:
|
1430
|
+
return False
|
1431
|
+
|
1432
|
+
if not primary_rectangles:
|
1433
|
+
if crop_x < 0 or crop_y < 0 or crop_x + crop_w > original_width or crop_y + crop_h > original_height:
|
1434
|
+
return False
|
1435
|
+
for menu_rect in menu_rectangles:
|
1436
|
+
rect_left, rect_top, rect_width, rect_height = menu_rect.coordinates
|
1437
|
+
rect_right = rect_left + rect_width
|
1438
|
+
rect_bottom = rect_top + rect_height
|
1439
|
+
if (crop_x >= rect_left and crop_y >= rect_top and
|
1440
|
+
crop_x + crop_w <= rect_right and crop_y + crop_h <= rect_bottom):
|
1441
|
+
return True
|
1442
|
+
return False
|
1443
|
+
|
1444
|
+
primary_rectangles.sort(key=lambda r: r.coordinates[1])
|
1445
|
+
|
1446
|
+
if len(primary_rectangles) == 1:
|
1447
|
+
primary_rect = primary_rectangles[0]
|
1448
|
+
primary_left, primary_top = primary_rect.coordinates[0], primary_rect.coordinates[1]
|
1449
|
+
original_x = crop_x + primary_left
|
1450
|
+
original_y = crop_y + primary_top
|
1451
|
+
else:
|
1452
|
+
current_y_offset = 0
|
1453
|
+
original_x = None
|
1454
|
+
original_y = None
|
1455
|
+
for i, primary_rect in enumerate(primary_rectangles):
|
1456
|
+
primary_left, primary_top, primary_width, primary_height = primary_rect.coordinates
|
1457
|
+
section_height = primary_height
|
1458
|
+
if crop_y >= current_y_offset and crop_y < current_y_offset + section_height:
|
1459
|
+
original_x = crop_x + primary_left
|
1460
|
+
original_y = (crop_y - current_y_offset) + primary_top
|
1461
|
+
break
|
1462
|
+
current_y_offset += section_height + 50
|
1463
|
+
if original_x is None or original_y is None:
|
1464
|
+
return False
|
1465
|
+
|
1466
|
+
if original_x < 0 or original_y < 0 or original_x > original_width or original_y > original_height:
|
1467
|
+
return False
|
1468
|
+
|
1469
|
+
for menu_rect in menu_rectangles:
|
1470
|
+
rect_left, rect_top, rect_width, rect_height = menu_rect.coordinates
|
1471
|
+
rect_right = rect_left + rect_width
|
1472
|
+
rect_bottom = rect_top + rect_height
|
1473
|
+
if (original_x >= rect_left and original_y >= rect_top and
|
1474
|
+
original_x <= rect_right and original_y <= rect_bottom):
|
1475
|
+
return True
|
1476
|
+
|
1477
|
+
return False
|
1398
1478
|
|
1399
1479
|
def get_path_key(path):
|
1400
1480
|
return path, path.lstat().st_mtime
|
@@ -1785,7 +1865,7 @@ def run(read_from=None,
|
|
1785
1865
|
continue
|
1786
1866
|
|
1787
1867
|
res, text = process_and_write_results(img, write_to, last_result, filtering, notify,
|
1788
|
-
ocr_start_time=ocr_start_time, furigana_filter_sensitivity=
|
1868
|
+
ocr_start_time=ocr_start_time, furigana_filter_sensitivity=None if get_ocr_two_pass_ocr() else get_furigana_filter_sensitivity())
|
1789
1869
|
if not text and not previous_text and time.time() - last_result_time > 10:
|
1790
1870
|
sleep_time_to_add += .005
|
1791
1871
|
logger.info(f"No text detected again, sleeping.")
|
@@ -0,0 +1,330 @@
|
|
1
|
+
import tkinter as tk
|
2
|
+
from tkinter import ttk
|
3
|
+
from PIL import Image, ImageTk
|
4
|
+
import threading
|
5
|
+
|
6
|
+
import regex
|
7
|
+
|
8
|
+
from GameSentenceMiner import obs
|
9
|
+
from GameSentenceMiner.util.configuration import logger
|
10
|
+
from GameSentenceMiner.owocr.owocr.ocr import GoogleLens, OneOCR
|
11
|
+
|
12
|
+
def get_ocr_results_from_image(image_obj: Image.Image) -> tuple:
|
13
|
+
"""
|
14
|
+
This is the function where you will plug in your OCR logic.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
image_obj: A PIL Image object of the screenshot (used by your actual OCR call).
|
18
|
+
|
19
|
+
Returns:
|
20
|
+
A tuple containing the OCR results from both engines.
|
21
|
+
"""
|
22
|
+
lens = GoogleLens()
|
23
|
+
oneocr = OneOCR()
|
24
|
+
oneocr_res = oneocr(image_obj, return_dict=True)
|
25
|
+
res = lens(image_obj, return_coords=True)
|
26
|
+
|
27
|
+
return res[2], oneocr_res[3]
|
28
|
+
|
29
|
+
|
30
|
+
class FuriganaFilterVisualizer:
|
31
|
+
def __init__(self, master, image: Image.Image, current_furigana_sensitivity: int = 0):
|
32
|
+
self.master = master
|
33
|
+
self.image = image
|
34
|
+
self.ocr1_result = None
|
35
|
+
self.ocr2_result = None
|
36
|
+
self.current_ocr = 1
|
37
|
+
self.master.title("Furigana Filter Visualizer - Lens")
|
38
|
+
|
39
|
+
self.words_data = []
|
40
|
+
self.lines_data = []
|
41
|
+
self.drawn_rects = []
|
42
|
+
|
43
|
+
main_frame = tk.Frame(master)
|
44
|
+
main_frame.pack(fill=tk.BOTH, expand=True)
|
45
|
+
|
46
|
+
self.photo_image = ImageTk.PhotoImage(self.image)
|
47
|
+
self.canvas = tk.Canvas(main_frame, width=self.image.width, height=self.image.height)
|
48
|
+
self.canvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
|
49
|
+
self.canvas.create_image(0, 0, image=self.photo_image, anchor=tk.NW)
|
50
|
+
|
51
|
+
self.loading_bg = self.canvas.create_rectangle(
|
52
|
+
self.image.width/2 - 100, self.image.height/2 - 25,
|
53
|
+
self.image.width/2 + 100, self.image.height/2 + 25,
|
54
|
+
fill="black", outline="white", width=2
|
55
|
+
)
|
56
|
+
self.loading_text = self.canvas.create_text(
|
57
|
+
self.image.width / 2, self.image.height / 2,
|
58
|
+
text="Loading OCR data...", fill="white", font=("Helvetica", 16)
|
59
|
+
)
|
60
|
+
|
61
|
+
self.control_frame = tk.Frame(main_frame, padx=10, pady=10)
|
62
|
+
self.control_frame.pack(side=tk.BOTTOM, fill=tk.X)
|
63
|
+
|
64
|
+
ttk.Label(self.control_frame, text="Furigana Filter Sensitivity:").pack(side=tk.LEFT, padx=(0, 10))
|
65
|
+
|
66
|
+
self.slider = ttk.Scale(
|
67
|
+
self.control_frame, from_=0, to=100, orient=tk.HORIZONTAL, command=self.update_filter_visualization
|
68
|
+
)
|
69
|
+
self.slider.set(current_furigana_sensitivity)
|
70
|
+
self.slider.pack(side=tk.LEFT, fill=tk.X, expand=True)
|
71
|
+
|
72
|
+
self.slider_value_label = ttk.Label(self.control_frame, text=f"{self.slider.get():.0f} px", width=6)
|
73
|
+
self.slider_value_label.pack(side=tk.LEFT, padx=(10, 0))
|
74
|
+
|
75
|
+
self.swap_button = ttk.Button(self.control_frame, text="Switch to OneOCR", command=self.swap_ocr)
|
76
|
+
self.swap_button.pack(side=tk.LEFT, padx=(10, 0))
|
77
|
+
|
78
|
+
self.ok_button = ttk.Button(self.control_frame, text="OK", command=self.on_ok)
|
79
|
+
self.ok_button.pack(side=tk.LEFT, padx=(10, 0))
|
80
|
+
|
81
|
+
self.slider.config(state=tk.DISABLED)
|
82
|
+
self.swap_button.config(state=tk.DISABLED)
|
83
|
+
self.ok_button.config(state=tk.DISABLED)
|
84
|
+
|
85
|
+
self.punctuation_regex = regex.compile(r'[\p{P}\p{S}]')
|
86
|
+
self.master.protocol("WM_DELETE_WINDOW", self.on_ok)
|
87
|
+
|
88
|
+
def update_with_ocr_data(self, ocr1_result, ocr2_result):
|
89
|
+
"""Called by the background thread to populate the GUI with OCR data."""
|
90
|
+
self.ocr1_result = ocr1_result
|
91
|
+
self.ocr2_result = ocr2_result
|
92
|
+
|
93
|
+
# Remove loading message
|
94
|
+
self.canvas.delete(self.loading_bg)
|
95
|
+
self.canvas.delete(self.loading_text)
|
96
|
+
|
97
|
+
if not self.ocr1_result:
|
98
|
+
logger.error("OCR processing failed or returned no data.")
|
99
|
+
self.canvas.create_text(
|
100
|
+
self.image.width / 2, self.image.height / 2,
|
101
|
+
text="OCR Failed!", fill="red", font=("Helvetica", 16)
|
102
|
+
)
|
103
|
+
# Still enable OK button to allow closing
|
104
|
+
self.ok_button.config(state=tk.NORMAL)
|
105
|
+
return
|
106
|
+
|
107
|
+
# Enable controls
|
108
|
+
self.slider.config(state=tk.NORMAL)
|
109
|
+
self.ok_button.config(state=tk.NORMAL)
|
110
|
+
if self.ocr2_result:
|
111
|
+
self.swap_button.config(state=tk.NORMAL)
|
112
|
+
|
113
|
+
# Process and display initial data
|
114
|
+
self.pre_process_word_geometries()
|
115
|
+
self.update_filter_visualization(self.slider.get())
|
116
|
+
|
117
|
+
def on_ok(self):
|
118
|
+
print(f"RESULT:[{self.slider.get():.0f}]")
|
119
|
+
self.master.destroy()
|
120
|
+
|
121
|
+
def swap_ocr(self):
|
122
|
+
self.current_ocr = 2 if self.current_ocr == 1 else 1
|
123
|
+
# Change to oneocr or lens, in title too
|
124
|
+
if self.current_ocr == 1:
|
125
|
+
self.swap_button.config(text="Switch to OneOCR")
|
126
|
+
self.master.title("Furigana Filter Visualizer - Lens")
|
127
|
+
else:
|
128
|
+
self.swap_button.config(text="Switch to Lens")
|
129
|
+
self.master.title("Furigana Filter Visualizer - OneOCR")
|
130
|
+
self.pre_process_word_geometries()
|
131
|
+
self.update_filter_visualization(self.slider.get())
|
132
|
+
|
133
|
+
def pre_process_word_geometries(self):
|
134
|
+
"""
|
135
|
+
Parses the OCR result structure (supports both original and new JSON formats),
|
136
|
+
calculates absolute pixel values, and stores them for high-performance updates.
|
137
|
+
"""
|
138
|
+
img_w, img_h = self.image.size
|
139
|
+
logger.info(f"Processing word geometries for image size {img_w}x{img_h}...")
|
140
|
+
|
141
|
+
# Select the current OCR result
|
142
|
+
ocr_result = self.ocr1_result if self.current_ocr == 1 else self.ocr2_result
|
143
|
+
if not ocr_result:
|
144
|
+
return
|
145
|
+
self.words_data.clear()
|
146
|
+
self.lines_data.clear()
|
147
|
+
|
148
|
+
# Try to detect the format: oneocr has 'lines' as a top-level key
|
149
|
+
if 'lines' in ocr_result:
|
150
|
+
for line in ocr_result.get('lines', []):
|
151
|
+
for word in line.get('words', []):
|
152
|
+
try:
|
153
|
+
bbox = word['bounding_rect']
|
154
|
+
x1 = bbox['x1']
|
155
|
+
y1 = bbox['y1']
|
156
|
+
x2 = bbox['x3']
|
157
|
+
y2 = bbox['y3']
|
158
|
+
px_w = abs(x2 - x1)
|
159
|
+
px_h = abs(y2 - y1)
|
160
|
+
self.words_data.append({
|
161
|
+
'text': word.get('text', ''),
|
162
|
+
'px_w': px_w,
|
163
|
+
'px_h': px_h,
|
164
|
+
'coords': (x1, y1, x2, y2)
|
165
|
+
})
|
166
|
+
except Exception as e:
|
167
|
+
logger.warning(f"Skipping malformed word data (new format): {e}. Data: {word}")
|
168
|
+
continue
|
169
|
+
try:
|
170
|
+
bbox = line['bounding_rect']
|
171
|
+
x1 = bbox['x1']
|
172
|
+
y1 = bbox['y1']
|
173
|
+
x2 = bbox['x3']
|
174
|
+
y2 = bbox['y3']
|
175
|
+
px_w = abs(x2 - x1)
|
176
|
+
px_h = abs(y2 - y1)
|
177
|
+
self.lines_data.append({
|
178
|
+
'text': line.get('text', ''),
|
179
|
+
'px_w': px_w,
|
180
|
+
'px_h': px_h,
|
181
|
+
'coords': (x1, y1, x2, y2)
|
182
|
+
})
|
183
|
+
except Exception as e:
|
184
|
+
logger.warning(f"Skipping malformed line data (new format): {e}. Data: {line}")
|
185
|
+
continue
|
186
|
+
else:
|
187
|
+
# Lens format (nested paragraphs/lines/words)
|
188
|
+
text_layout = ocr_result.get('objects_response', {}).get('text', {}).get('text_layout', {})
|
189
|
+
if not text_layout:
|
190
|
+
logger.error("Could not find 'text_layout' in the OCR response.")
|
191
|
+
return
|
192
|
+
for paragraph in text_layout.get('paragraphs', []):
|
193
|
+
for line in paragraph.get('lines', []):
|
194
|
+
for word in line.get('words', []):
|
195
|
+
try:
|
196
|
+
bbox_pct = word['geometry']['bounding_box']
|
197
|
+
width_pct = bbox_pct['width']
|
198
|
+
height_pct = bbox_pct['height']
|
199
|
+
top_left_x_pct = bbox_pct['center_x'] - (width_pct / 2)
|
200
|
+
top_left_y_pct = bbox_pct['center_y'] - (height_pct / 2)
|
201
|
+
px_w = width_pct * img_w
|
202
|
+
px_h = height_pct * img_h
|
203
|
+
x1 = top_left_x_pct * img_w
|
204
|
+
y1 = top_left_y_pct * img_h
|
205
|
+
x2 = x1 + px_w
|
206
|
+
y2 = y1 + px_h
|
207
|
+
self.words_data.append({
|
208
|
+
'text': word.get('plain_text', ''),
|
209
|
+
'px_w': px_w,
|
210
|
+
'px_h': px_h,
|
211
|
+
'coords': (x1, y1, x2, y2)
|
212
|
+
})
|
213
|
+
except (KeyError, TypeError) as e:
|
214
|
+
logger.warning(f"Skipping malformed word data (orig format): {e}. Data: {word}")
|
215
|
+
continue
|
216
|
+
try:
|
217
|
+
line_bbox = line['geometry']['bounding_box']
|
218
|
+
width_pct = line_bbox['width']
|
219
|
+
height_pct = line_bbox['height']
|
220
|
+
top_left_x_pct = line_bbox['center_x'] - (width_pct / 2)
|
221
|
+
top_left_y_pct = line_bbox['center_y'] - (height_pct / 2)
|
222
|
+
px_w = width_pct * img_w
|
223
|
+
px_h = height_pct * img_h
|
224
|
+
x1 = top_left_x_pct * img_w
|
225
|
+
y1 = top_left_y_pct * img_h
|
226
|
+
x2 = x1 + px_w
|
227
|
+
y2 = y1 + px_h
|
228
|
+
self.lines_data.append({
|
229
|
+
'text': ''.join([w.get('plain_text', '') for w in line.get('words', [])]),
|
230
|
+
'px_w': px_w,
|
231
|
+
'px_h': px_h,
|
232
|
+
'coords': (x1, y1, x2, y2)
|
233
|
+
})
|
234
|
+
except (KeyError, TypeError) as e:
|
235
|
+
logger.warning(f"Skipping malformed line data (orig format): {e}. Data: {line}")
|
236
|
+
continue
|
237
|
+
logger.info(f"Successfully pre-processed {len(self.lines_data)} lines.")
|
238
|
+
|
239
|
+
|
240
|
+
def update_filter_visualization(self, slider_value):
|
241
|
+
"""
|
242
|
+
Called on every slider move. Clears old rectangles and draws new ones
|
243
|
+
for words that pass the sensitivity filter.
|
244
|
+
"""
|
245
|
+
sensitivity = float(slider_value)
|
246
|
+
self.slider_value_label.config(text=f"{sensitivity:.0f} px")
|
247
|
+
|
248
|
+
for rect_id in self.drawn_rects:
|
249
|
+
self.canvas.delete(rect_id)
|
250
|
+
self.drawn_rects.clear()
|
251
|
+
|
252
|
+
for line_data in self.lines_data:
|
253
|
+
if line_data['px_w'] > sensitivity and line_data['px_h'] > sensitivity:
|
254
|
+
x1, y1, x2, y2 = line_data['coords']
|
255
|
+
rect_id = self.canvas.create_rectangle(
|
256
|
+
x1, y1, x2, y2, outline='blue', width=2
|
257
|
+
)
|
258
|
+
self.drawn_rects.append(rect_id)
|
259
|
+
|
260
|
+
def scale_down_width_height(width, height):
|
261
|
+
if width == 0 or height == 0:
|
262
|
+
return width, height
|
263
|
+
aspect_ratio = width / height
|
264
|
+
if aspect_ratio > 2.66:
|
265
|
+
# Ultra-wide (32:9) - use 1920x540
|
266
|
+
return 1920, 540
|
267
|
+
elif aspect_ratio > 2.33:
|
268
|
+
# 21:9 - use 1920x800
|
269
|
+
return 1920, 800
|
270
|
+
elif aspect_ratio > 1.77:
|
271
|
+
# 16:9 - use 1280x720
|
272
|
+
return 1280, 720
|
273
|
+
elif aspect_ratio > 1.6:
|
274
|
+
# 16:10 - use 1280x800
|
275
|
+
return 1280, 800
|
276
|
+
elif aspect_ratio > 1.33:
|
277
|
+
# 4:3 - use 960x720
|
278
|
+
return 960, 720
|
279
|
+
elif aspect_ratio > 1.25:
|
280
|
+
# 5:4 - use 900x720
|
281
|
+
return 900, 720
|
282
|
+
elif aspect_ratio > 1.5:
|
283
|
+
# 3:2 - use 1080x720
|
284
|
+
return 1080, 720
|
285
|
+
else:
|
286
|
+
# Default/fallback - use original resolution
|
287
|
+
print(f"Unrecognized aspect ratio {aspect_ratio}. Using original resolution.")
|
288
|
+
return width, height
|
289
|
+
|
290
|
+
def main():
|
291
|
+
import sys
|
292
|
+
current_furigana_sensitivity = int(sys.argv[1]) if len(sys.argv) > 1 else 0
|
293
|
+
|
294
|
+
"""Main execution function."""
|
295
|
+
try:
|
296
|
+
logger.info("Connecting to OBS...")
|
297
|
+
obs.connect_to_obs_sync()
|
298
|
+
except Exception as e:
|
299
|
+
logger.error(f"Failed to connect to OBS. Please ensure OBS is running and the WebSocket server is enabled. Error: {e}")
|
300
|
+
return
|
301
|
+
|
302
|
+
logger.info("Taking OBS screenshot...")
|
303
|
+
screenshot_img = obs.get_screenshot_PIL(compression=90, img_format='jpg')
|
304
|
+
|
305
|
+
screenshot_img = screenshot_img.resize(scale_down_width_height(screenshot_img.width, screenshot_img.height), Image.LANCZOS)
|
306
|
+
|
307
|
+
if not screenshot_img:
|
308
|
+
logger.error("Failed to get screenshot from OBS.")
|
309
|
+
return
|
310
|
+
|
311
|
+
logger.info(f"Screenshot received ({screenshot_img.width}x{screenshot_img.height}).")
|
312
|
+
|
313
|
+
root = tk.Tk()
|
314
|
+
app = FuriganaFilterVisualizer(root, screenshot_img, current_furigana_sensitivity)
|
315
|
+
|
316
|
+
def ocr_worker():
|
317
|
+
logger.info("Starting OCR process in background thread...")
|
318
|
+
try:
|
319
|
+
ocr1_data, ocr2_data = get_ocr_results_from_image(screenshot_img)
|
320
|
+
root.after(0, app.update_with_ocr_data, ocr1_data, ocr2_data)
|
321
|
+
except Exception as e:
|
322
|
+
logger.error(f"Error in OCR background thread: {e}")
|
323
|
+
root.after(0, app.update_with_ocr_data, None, None)
|
324
|
+
|
325
|
+
threading.Thread(target=ocr_worker, daemon=True).start()
|
326
|
+
|
327
|
+
root.mainloop()
|
328
|
+
|
329
|
+
if __name__ == "__main__":
|
330
|
+
main()
|
@@ -94,26 +94,59 @@ class OverlayProcessor:
|
|
94
94
|
except Exception as e:
|
95
95
|
logger.error(f"Error during OCR processing: {e}", exc_info=True)
|
96
96
|
return []
|
97
|
+
|
98
|
+
@staticmethod
|
99
|
+
def get_monitor_workarea(monitor_index=0):
|
100
|
+
"""
|
101
|
+
Return MSS-style dict for monitor area.
|
102
|
+
For primary monitor, excludes taskbar. For others, returns full monitor area.
|
103
|
+
monitor_index: 0 = primary monitor, 1+ = others (as in mss.monitors).
|
104
|
+
"""
|
105
|
+
with mss.mss() as sct:
|
106
|
+
monitors = sct.monitors[1:]
|
107
|
+
print(monitors)
|
108
|
+
if is_windows() and monitor_index == 0:
|
109
|
+
from ctypes import wintypes
|
110
|
+
import ctypes
|
111
|
+
# Get work area for primary monitor (ignores taskbar)
|
112
|
+
SPI_GETWORKAREA = 0x0030
|
113
|
+
rect = wintypes.RECT()
|
114
|
+
res = ctypes.windll.user32.SystemParametersInfoW(
|
115
|
+
SPI_GETWORKAREA, 0, ctypes.byref(rect), 0
|
116
|
+
)
|
117
|
+
if not res:
|
118
|
+
raise ctypes.WinError()
|
119
|
+
|
120
|
+
return {
|
121
|
+
"left": rect.left,
|
122
|
+
"top": rect.top,
|
123
|
+
"width": rect.right - rect.left,
|
124
|
+
"height": rect.bottom - rect.top,
|
125
|
+
}
|
126
|
+
elif is_windows() and monitor_index > 0:
|
127
|
+
# Secondary monitors: just return with a guess of how tall the taskbar is
|
128
|
+
taskbar_height_guess = 48 # A common taskbar height, may vary
|
129
|
+
mon = monitors[monitor_index]
|
130
|
+
return {
|
131
|
+
"left": mon["left"],
|
132
|
+
"top": mon["top"],
|
133
|
+
"width": mon["width"],
|
134
|
+
"height": mon["height"] - taskbar_height_guess
|
135
|
+
}
|
136
|
+
else:
|
137
|
+
# For non-Windows systems or unspecified monitors, return the monitor area as-is
|
138
|
+
return monitors[monitor_index] if 0 <= monitor_index < len(monitors) else monitors[0]
|
139
|
+
|
97
140
|
|
98
141
|
def _get_full_screenshot(self) -> Tuple[Image.Image | None, int, int]:
|
99
142
|
"""Captures a screenshot of the configured monitor."""
|
100
143
|
if not mss:
|
101
144
|
raise RuntimeError("MSS screenshot library is not installed.")
|
102
|
-
|
103
145
|
with mss.mss() as sct:
|
104
|
-
|
105
|
-
# Index 0 is the 'all monitors' virtual screen, so we skip it.
|
106
|
-
monitor_list = monitors[1:] if len(monitors) > 1 else [monitors[0]]
|
107
|
-
|
108
|
-
monitor_index = self.config.overlay.monitor_to_capture
|
109
|
-
if monitor_index >= len(monitor_list):
|
110
|
-
logger.error(f"Monitor index {monitor_index} is out of bounds. Found {len(monitor_list)} monitors.")
|
111
|
-
return None, 0, 0
|
112
|
-
|
113
|
-
monitor = monitor_list[monitor_index]
|
146
|
+
monitor = self.get_monitor_workarea(0) # Get primary monitor work area
|
114
147
|
sct_img = sct.grab(monitor)
|
115
148
|
img = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
|
116
|
-
|
149
|
+
|
117
150
|
return img, monitor['width'], monitor['height']
|
118
151
|
|
119
152
|
def _create_composite_image(
|
@@ -210,7 +243,8 @@ class OverlayProcessor:
|
|
210
243
|
crop_x=0,
|
211
244
|
crop_y=0,
|
212
245
|
crop_width=composite_image.width,
|
213
|
-
crop_height=composite_image.height
|
246
|
+
crop_height=composite_image.height,
|
247
|
+
use_percentages=True
|
214
248
|
)
|
215
249
|
|
216
250
|
return extracted_data
|
@@ -223,7 +257,8 @@ class OverlayProcessor:
|
|
223
257
|
crop_x: int,
|
224
258
|
crop_y: int,
|
225
259
|
crop_width: int,
|
226
|
-
crop_height: int
|
260
|
+
crop_height: int,
|
261
|
+
use_percentages: bool
|
227
262
|
) -> List[Dict[str, Any]]:
|
228
263
|
"""
|
229
264
|
Parses Google Lens API response and converts normalized coordinates
|
@@ -244,9 +279,10 @@ class OverlayProcessor:
|
|
244
279
|
word_text = word.get("plain_text", "")
|
245
280
|
line_text_parts.append(word_text)
|
246
281
|
|
247
|
-
word_box = self.
|
282
|
+
word_box = self._convert_box_to_overlay_coords(
|
248
283
|
word["geometry"]["bounding_box"],
|
249
|
-
crop_x, crop_y, crop_width, crop_height
|
284
|
+
crop_x, crop_y, crop_width, crop_height,
|
285
|
+
use_percentage=use_percentages
|
250
286
|
)
|
251
287
|
|
252
288
|
word_list.append({
|
@@ -258,9 +294,9 @@ class OverlayProcessor:
|
|
258
294
|
continue
|
259
295
|
|
260
296
|
full_line_text = "".join(line_text_parts)
|
261
|
-
line_box = self.
|
297
|
+
line_box = self._convert_box_to_overlay_coords(
|
262
298
|
line["geometry"]["bounding_box"],
|
263
|
-
crop_x, crop_y, crop_width, crop_height
|
299
|
+
crop_x, crop_y, crop_width, crop_height, use_percentage=use_percentages
|
264
300
|
)
|
265
301
|
|
266
302
|
results.append({
|
@@ -270,36 +306,45 @@ class OverlayProcessor:
|
|
270
306
|
})
|
271
307
|
return results
|
272
308
|
|
273
|
-
def
|
309
|
+
def _convert_box_to_overlay_coords(
|
274
310
|
self,
|
275
311
|
bbox_data: Dict[str, float],
|
276
312
|
crop_x: int,
|
277
313
|
crop_y: int,
|
278
314
|
crop_width: int,
|
279
|
-
crop_height: int
|
315
|
+
crop_height: int,
|
316
|
+
use_percentage: bool
|
280
317
|
) -> Dict[str, float]:
|
281
318
|
"""
|
282
319
|
Simplified conversion: scales normalized bbox to pixel coordinates within
|
283
320
|
the cropped region, then offsets by the crop position. Ignores rotation.
|
321
|
+
If use_percentage is True, returns coordinates as percentages of the crop dimensions.
|
284
322
|
"""
|
285
323
|
cx, cy = bbox_data['center_x'], bbox_data['center_y']
|
286
324
|
w, h = bbox_data['width'], bbox_data['height']
|
287
325
|
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
326
|
+
if use_percentage:
|
327
|
+
# Return coordinates as percentages of the crop dimensions
|
328
|
+
box_width = w
|
329
|
+
box_height = h
|
330
|
+
center_x = cx
|
331
|
+
center_y = cy
|
332
|
+
else:
|
333
|
+
# Scale normalized coordinates to pixel coordinates relative to the crop area
|
334
|
+
box_width = w * crop_width
|
335
|
+
box_height = h * crop_height
|
336
|
+
|
337
|
+
# Calculate center within the cropped area and then add the crop offset
|
338
|
+
center_x = (cx * crop_width) + crop_x
|
339
|
+
center_y = (cy * crop_height) + crop_y
|
295
340
|
|
296
341
|
# Calculate corners (unrotated)
|
297
|
-
|
342
|
+
half_w, half_h = box_width / 2, box_height / 2
|
298
343
|
return {
|
299
|
-
"x1":
|
300
|
-
"x2":
|
301
|
-
"x3":
|
302
|
-
"x4":
|
344
|
+
"x1": center_x - half_w, "y1": center_y - half_h,
|
345
|
+
"x2": center_x + half_w, "y2": center_y - half_h,
|
346
|
+
"x3": center_x + half_w, "y3": center_y + half_h,
|
347
|
+
"x4": center_x - half_w, "y4": center_y + half_h,
|
303
348
|
}
|
304
349
|
|
305
350
|
async def main_test_screenshot():
|
@@ -334,22 +379,10 @@ async def main_run_ocr():
|
|
334
379
|
"""
|
335
380
|
Main function to demonstrate running the full OCR process.
|
336
381
|
"""
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
print("OCR process completed successfully.")
|
342
|
-
# print(json.dumps(results, indent=2, ensure_ascii=False))
|
343
|
-
# Find first result with some text
|
344
|
-
for result in results:
|
345
|
-
if result.get("text"):
|
346
|
-
print(f"Found line: '{result['text']}'")
|
347
|
-
print(f" - Line BBox: {result['bounding_rect']}")
|
348
|
-
if result.get("words"):
|
349
|
-
print(f" - First word: '{result['words'][0]['text']}' BBox: {result['words'][0]['bounding_rect']}")
|
350
|
-
break
|
351
|
-
else:
|
352
|
-
print("OCR process did not find any text.")
|
382
|
+
overlay_processor = OverlayProcessor()
|
383
|
+
while True:
|
384
|
+
await overlay_processor.find_box_and_send_to_overlay('')
|
385
|
+
await asyncio.sleep(10)
|
353
386
|
|
354
387
|
|
355
388
|
if __name__ == '__main__':
|
GameSentenceMiner/vad.py
CHANGED
@@ -173,7 +173,7 @@ class WhisperVADProcessor(VADProcessor):
|
|
173
173
|
|
174
174
|
def _detect_voice_activity(self, input_audio):
|
175
175
|
from stable_whisper import WhisperResult
|
176
|
-
# Convert the audio to 16kHz mono WAV
|
176
|
+
# Convert the audio to 16kHz mono WAV, evidence https://discord.com/channels/1286409772383342664/1286518821913362445/1407017127529152533
|
177
177
|
temp_wav = tempfile.NamedTemporaryFile(dir=configuration.get_temporary_directory(), suffix='.wav').name
|
178
178
|
ffmpeg.convert_audio_to_wav(input_audio, temp_wav)
|
179
179
|
|
@@ -376,7 +376,7 @@ vad_processor = VADSystem()
|
|
376
376
|
# Test cases for all VADProcessors
|
377
377
|
def test_vad_processors():
|
378
378
|
logger.setLevel(logging.DEBUG)
|
379
|
-
test_audio = r"C:\Users\Beangate\GSM\GameSentenceMiner\GameSentenceMiner\test\NEKOPARAvol.1_2025-08-18-
|
379
|
+
test_audio = r"C:\Users\Beangate\GSM\GameSentenceMiner\GameSentenceMiner\test\NEKOPARAvol.1_2025-08-18-17-20-43-614.opus"
|
380
380
|
output_dir = r"C:\Users\Beangate\GSM\GameSentenceMiner\GameSentenceMiner\test\output"
|
381
381
|
os.makedirs(output_dir, exist_ok=True)
|
382
382
|
processors = [
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: GameSentenceMiner
|
3
|
-
Version: 2.
|
3
|
+
Version: 2.15.0
|
4
4
|
Summary: A tool for mining sentences from games. Update: Overlay?
|
5
5
|
Author-email: Beangate <bpwhelan95@gmail.com>
|
6
6
|
License: MIT License
|
@@ -57,7 +57,7 @@ An application designed to assist with language learning through games.
|
|
57
57
|
|
58
58
|
Short Demo (Watch this first): https://www.youtube.com/watch?v=FeFBL7py6HY
|
59
59
|
|
60
|
-
Installation: https://
|
60
|
+
Installation: https://www.youtube.com/watch?v=sVL9omRbGc4
|
61
61
|
|
62
62
|
Discord: https://discord.gg/yP8Qse6bb8
|
63
63
|
|
@@ -1,10 +1,10 @@
|
|
1
1
|
GameSentenceMiner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
2
|
GameSentenceMiner/anki.py,sha256=4Tq6OGjfN-5tYorYRWiih7FZjSKMG6amrLv6DFKkFQc,25344
|
3
|
-
GameSentenceMiner/config_gui.py,sha256=
|
3
|
+
GameSentenceMiner/config_gui.py,sha256=Ynr7P0xkhqeOiO-7aiM9OzB3aTLs5JSlEsia5cHyyd4,139669
|
4
4
|
GameSentenceMiner/gametext.py,sha256=fgBgLchezpauWELE9Y5G3kVCLfAneD0X4lJFoI3FYbs,10351
|
5
|
-
GameSentenceMiner/gsm.py,sha256=
|
6
|
-
GameSentenceMiner/obs.py,sha256=
|
7
|
-
GameSentenceMiner/vad.py,sha256=
|
5
|
+
GameSentenceMiner/gsm.py,sha256=4mJn5v4WKqKAJEtph5e0v4YPVDOpvFN1ylV2vQvf_Dg,31913
|
6
|
+
GameSentenceMiner/obs.py,sha256=U7ooVhf3k0vx-T8noSLVQirgCbS27fNnuZmpfM6alnc,24580
|
7
|
+
GameSentenceMiner/vad.py,sha256=YCn4ZIc6_Q3IGOr5QNMiheVT3Ma5nisn8-V8xD53Mw4,19236
|
8
8
|
GameSentenceMiner/ai/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
GameSentenceMiner/ai/ai_prompting.py,sha256=41xdBzE88Jlt12A0D-T_cMfLO5j6MSxfniOptpwNZm0,24068
|
10
10
|
GameSentenceMiner/assets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -19,20 +19,21 @@ GameSentenceMiner/locales/en_us.json,sha256=FAnvsCfsFzWyxYKZKh8HKHsAahi3Oa4wGVek
|
|
19
19
|
GameSentenceMiner/locales/ja_jp.json,sha256=-v0ng0psD88-C4XjYazJL0Rn0gwQU7b2VYspvdatDO4,28326
|
20
20
|
GameSentenceMiner/locales/zh_cn.json,sha256=X5nw6tsu7ACaZIuSUDSUUjG8qPUwmqyG3TKcPbWSIYw,24654
|
21
21
|
GameSentenceMiner/ocr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
-
GameSentenceMiner/ocr/gsm_ocr_config.py,sha256=
|
22
|
+
GameSentenceMiner/ocr/gsm_ocr_config.py,sha256=DfcR3bHTu26JJerLzqfW_KpdgUBSrRV4hqSy_LYclps,5967
|
23
23
|
GameSentenceMiner/ocr/ocrconfig.py,sha256=_tY8mjnzHMJrLS8E5pHqYXZjMuLoGKYgJwdhYgN-ny4,6466
|
24
24
|
GameSentenceMiner/ocr/owocr_area_selector.py,sha256=Rm1_nuZotJhfOfoJ_3mesh9udtOBjYqKhnAvSief6fo,29181
|
25
|
-
GameSentenceMiner/ocr/owocr_helper.py,sha256=
|
25
|
+
GameSentenceMiner/ocr/owocr_helper.py,sha256=8aIpAHSPPByox5qcU4SX-_ECDgQZVEwrXdj4A8AQZ6U,28437
|
26
26
|
GameSentenceMiner/ocr/ss_picker.py,sha256=0IhxUdaKruFpZyBL-8SpxWg7bPrlGpy3lhTcMMZ5rwo,5224
|
27
27
|
GameSentenceMiner/owocr/owocr/__init__.py,sha256=87hfN5u_PbL_onLfMACbc0F5j4KyIK9lKnRCj6oZgR0,49
|
28
28
|
GameSentenceMiner/owocr/owocr/__main__.py,sha256=XQaqZY99EKoCpU-gWQjNbTs7Kg17HvBVE7JY8LqIE0o,157
|
29
29
|
GameSentenceMiner/owocr/owocr/config.py,sha256=qM7kISHdUhuygGXOxmgU6Ef2nwBShrZtdqu4InDCViE,8103
|
30
30
|
GameSentenceMiner/owocr/owocr/lens_betterproto.py,sha256=oNoISsPilVVRBBPVDtb4-roJtAhp8ZAuFTci3TGXtMc,39141
|
31
|
-
GameSentenceMiner/owocr/owocr/ocr.py,sha256=
|
32
|
-
GameSentenceMiner/owocr/owocr/run.py,sha256=
|
31
|
+
GameSentenceMiner/owocr/owocr/ocr.py,sha256=rJKmB8xtSVrayq8XcZ4JKBULyPKDqfJDGKgLjICbUX8,71021
|
32
|
+
GameSentenceMiner/owocr/owocr/run.py,sha256=xbBpyFCVfITZDztsRLT8_sX6BGf1o5LxOPxE9zUWfQc,79975
|
33
33
|
GameSentenceMiner/owocr/owocr/screen_coordinate_picker.py,sha256=Na6XStbQBtpQUSdbN3QhEswtKuU1JjReFk_K8t5ezQE,3395
|
34
34
|
GameSentenceMiner/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
35
|
GameSentenceMiner/tools/audio_offset_selector.py,sha256=8Stk3BP-XVIuzRv9nl9Eqd2D-1yD3JrgU-CamBywJmY,8542
|
36
|
+
GameSentenceMiner/tools/furigana_filter_preview.py,sha256=BXv7FChPEJW_VeG5XYt6suAsMKVArsjr3cEduE9KhYg,13642
|
36
37
|
GameSentenceMiner/tools/ss_selector.py,sha256=cbjMxiKOCuOfbRvLR_PCRlykBrGtm1LXd6u5czPqkmc,4793
|
37
38
|
GameSentenceMiner/tools/window_transparency.py,sha256=GtbxbmZg0-UYPXhfHff-7IKZyY2DKe4B9GdyovfmpeM,8166
|
38
39
|
GameSentenceMiner/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -40,7 +41,7 @@ GameSentenceMiner/util/configuration.py,sha256=JVwaqvfrUrOUiA0kZcznDsCo9hJkJqBpV
|
|
40
41
|
GameSentenceMiner/util/db.py,sha256=2bO0rD4i8A1hhsRBER-wgZy9IK17ibRbI8DHxdKvYsI,16598
|
41
42
|
GameSentenceMiner/util/electron_config.py,sha256=KfeJToeFFVw0IR5MKa-gBzpzaGrU-lyJbR9z-sDEHYU,8767
|
42
43
|
GameSentenceMiner/util/ffmpeg.py,sha256=jA-cFtCmdCWrUSPpdtFSLr-GSoqs4qNUzW20v4HPHf0,28715
|
43
|
-
GameSentenceMiner/util/get_overlay_coords.py,sha256=
|
44
|
+
GameSentenceMiner/util/get_overlay_coords.py,sha256=bSvBSvLFmABogPp-quXcQrN-meWvl5NRB6gFEluoNNg,15142
|
44
45
|
GameSentenceMiner/util/gsm_utils.py,sha256=Piwv88Q9av2LBeN7M6QDi0Mp0_R2lNbkcI6ekK5hd2o,11851
|
45
46
|
GameSentenceMiner/util/model.py,sha256=R-_RYTYLSDNgBoVTPuPBcIHeOznIqi_vBzQ7VQ20WYk,6727
|
46
47
|
GameSentenceMiner/util/notification.py,sha256=YBhf_mSo_i3cjBz-pmeTPx3wchKiG9BK2VBdZSa2prQ,4597
|
@@ -71,9 +72,9 @@ GameSentenceMiner/web/templates/index.html,sha256=LqXZx7-NE42pXSpHNZ3To680rD-vt9
|
|
71
72
|
GameSentenceMiner/web/templates/text_replacements.html,sha256=tV5c8mCaWSt_vKuUpbdbLAzXZ3ATZeDvQ9PnnAfqY0M,8598
|
72
73
|
GameSentenceMiner/web/templates/utility.html,sha256=3flZinKNqUJ7pvrZk6xu__v67z44rXnaK7UTZ303R-8,16946
|
73
74
|
GameSentenceMiner/wip/__init___.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
74
|
-
gamesentenceminer-2.
|
75
|
-
gamesentenceminer-2.
|
76
|
-
gamesentenceminer-2.
|
77
|
-
gamesentenceminer-2.
|
78
|
-
gamesentenceminer-2.
|
79
|
-
gamesentenceminer-2.
|
75
|
+
gamesentenceminer-2.15.0.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
76
|
+
gamesentenceminer-2.15.0.dist-info/METADATA,sha256=Fd7VWnRI-2fkihajfxLaGnHXWOF2h7couT11IqrsJoc,7317
|
77
|
+
gamesentenceminer-2.15.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
78
|
+
gamesentenceminer-2.15.0.dist-info/entry_points.txt,sha256=2APEP25DbfjSxGeHtwBstMH8mulVhLkqF_b9bqzU6vQ,65
|
79
|
+
gamesentenceminer-2.15.0.dist-info/top_level.txt,sha256=V1hUY6xVSyUEohb0uDoN4UIE6rUZ_JYx8yMyPGX4PgQ,18
|
80
|
+
gamesentenceminer-2.15.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|