GameSentenceMiner 2.14.9__py3-none-any.whl → 2.14.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. GameSentenceMiner/ai/__init__.py +0 -0
  2. GameSentenceMiner/ai/ai_prompting.py +473 -0
  3. GameSentenceMiner/ocr/__init__.py +0 -0
  4. GameSentenceMiner/ocr/gsm_ocr_config.py +174 -0
  5. GameSentenceMiner/ocr/ocrconfig.py +129 -0
  6. GameSentenceMiner/ocr/owocr_area_selector.py +629 -0
  7. GameSentenceMiner/ocr/owocr_helper.py +638 -0
  8. GameSentenceMiner/ocr/ss_picker.py +140 -0
  9. GameSentenceMiner/owocr/owocr/__init__.py +1 -0
  10. GameSentenceMiner/owocr/owocr/__main__.py +9 -0
  11. GameSentenceMiner/owocr/owocr/config.py +148 -0
  12. GameSentenceMiner/owocr/owocr/lens_betterproto.py +1238 -0
  13. GameSentenceMiner/owocr/owocr/ocr.py +1690 -0
  14. GameSentenceMiner/owocr/owocr/run.py +1818 -0
  15. GameSentenceMiner/owocr/owocr/screen_coordinate_picker.py +109 -0
  16. GameSentenceMiner/tools/__init__.py +0 -0
  17. GameSentenceMiner/tools/audio_offset_selector.py +215 -0
  18. GameSentenceMiner/tools/ss_selector.py +135 -0
  19. GameSentenceMiner/tools/window_transparency.py +214 -0
  20. GameSentenceMiner/util/__init__.py +0 -0
  21. GameSentenceMiner/util/communication/__init__.py +22 -0
  22. GameSentenceMiner/util/communication/send.py +7 -0
  23. GameSentenceMiner/util/communication/websocket.py +94 -0
  24. GameSentenceMiner/util/configuration.py +1199 -0
  25. GameSentenceMiner/util/db.py +408 -0
  26. GameSentenceMiner/util/downloader/Untitled_json.py +472 -0
  27. GameSentenceMiner/util/downloader/__init__.py +0 -0
  28. GameSentenceMiner/util/downloader/download_tools.py +194 -0
  29. GameSentenceMiner/util/downloader/oneocr_dl.py +250 -0
  30. GameSentenceMiner/util/electron_config.py +259 -0
  31. GameSentenceMiner/util/ffmpeg.py +571 -0
  32. GameSentenceMiner/util/get_overlay_coords.py +366 -0
  33. GameSentenceMiner/util/gsm_utils.py +323 -0
  34. GameSentenceMiner/util/model.py +206 -0
  35. GameSentenceMiner/util/notification.py +157 -0
  36. GameSentenceMiner/util/text_log.py +214 -0
  37. GameSentenceMiner/util/win10toast/__init__.py +154 -0
  38. GameSentenceMiner/util/win10toast/__main__.py +22 -0
  39. GameSentenceMiner/web/__init__.py +0 -0
  40. GameSentenceMiner/web/service.py +132 -0
  41. GameSentenceMiner/web/static/__init__.py +0 -0
  42. GameSentenceMiner/web/static/apple-touch-icon.png +0 -0
  43. GameSentenceMiner/web/static/favicon-96x96.png +0 -0
  44. GameSentenceMiner/web/static/favicon.ico +0 -0
  45. GameSentenceMiner/web/static/favicon.svg +3 -0
  46. GameSentenceMiner/web/static/site.webmanifest +21 -0
  47. GameSentenceMiner/web/static/style.css +292 -0
  48. GameSentenceMiner/web/static/web-app-manifest-192x192.png +0 -0
  49. GameSentenceMiner/web/static/web-app-manifest-512x512.png +0 -0
  50. GameSentenceMiner/web/templates/__init__.py +0 -0
  51. GameSentenceMiner/web/templates/index.html +50 -0
  52. GameSentenceMiner/web/templates/text_replacements.html +238 -0
  53. GameSentenceMiner/web/templates/utility.html +483 -0
  54. GameSentenceMiner/web/texthooking_page.py +584 -0
  55. GameSentenceMiner/wip/__init___.py +0 -0
  56. {gamesentenceminer-2.14.9.dist-info → gamesentenceminer-2.14.11.dist-info}/METADATA +1 -1
  57. gamesentenceminer-2.14.11.dist-info/RECORD +79 -0
  58. gamesentenceminer-2.14.9.dist-info/RECORD +0 -24
  59. {gamesentenceminer-2.14.9.dist-info → gamesentenceminer-2.14.11.dist-info}/WHEEL +0 -0
  60. {gamesentenceminer-2.14.9.dist-info → gamesentenceminer-2.14.11.dist-info}/entry_points.txt +0 -0
  61. {gamesentenceminer-2.14.9.dist-info → gamesentenceminer-2.14.11.dist-info}/licenses/LICENSE +0 -0
  62. {gamesentenceminer-2.14.9.dist-info → gamesentenceminer-2.14.11.dist-info}/top_level.txt +0 -0
File without changes
@@ -0,0 +1,473 @@
1
+ import logging
2
+ import textwrap
3
+ import time
4
+ import json
5
+ from abc import ABC, abstractmethod
6
+ from dataclasses import dataclass
7
+ from enum import Enum
8
+ from typing import List, Optional
9
+ from google import genai
10
+ from google.genai import types
11
+ from groq import Groq
12
+
13
+ from GameSentenceMiner.util.configuration import get_config, Ai, logger
14
+ from GameSentenceMiner.util.gsm_utils import is_connected
15
+ from GameSentenceMiner.util.text_log import GameLine
16
+
17
+ # Suppress debug logs from httpcore
18
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
19
+ logging.getLogger("httpx").setLevel(logging.WARNING)
20
+ logging.getLogger("groq._base_client").setLevel(logging.WARNING)
21
+ MANUAL_MODEL_OVERRIDE = None
22
+
23
+ TRANSLATION_PROMPT = f"""
24
+ **Professional Game Localization Task**
25
+
26
+ **Task Directive:**
27
+ Translate ONLY the provided line of game dialogue specified below into natural-sounding, context-aware {get_config().general.get_native_language_name()}. The translation must preserve the original tone and intent of the source.
28
+
29
+ **Output Requirements:**
30
+ - Provide only the single, best {get_config().general.get_native_language_name()} translation.
31
+ - Use expletives if they are natural for the context and enhance the translation's impact, but do not over-exaggerate.
32
+ - Carryover all HTML tags present in the original text to HTML tags surrounding their corresponding words in the translation. DO NOT CONVERT TO MARKDOWN.
33
+ - If there are no HTML tags present in the original text, do not add any in the translation whatsoever.
34
+ - Do not include notes, alternatives, explanations, or any other surrounding text. Absolutely nothing but the translated line.
35
+
36
+ **Line to Translate:**
37
+ """
38
+
39
+ CONTEXT_PROMPT = textwrap.dedent(f"""
40
+
41
+ **Task Directive:**
42
+ Provide a very brief summary of the scene in {get_config().general.get_native_language_name()} based on the provided Japanese dialogue and context. Focus on the characters' actions and the immediate situation being described.
43
+
44
+ Current Sentence:
45
+ """)
46
+
47
+ class AIType(Enum):
48
+ GEMINI = "Gemini"
49
+ GROQ = "Groq"
50
+ OPENAI = "OpenAI"
51
+
52
+ @dataclass
53
+ class AIConfig:
54
+ api_key: str
55
+ model: str
56
+ api_url: Optional[str]
57
+ type: 'AIType'
58
+
59
+ @dataclass
60
+ class GeminiAIConfig(AIConfig):
61
+ def __init__(self, api_key: str, model: str = "gemini-2.0-flash"):
62
+ super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GEMINI)
63
+
64
+ @dataclass
65
+ class GroqAiConfig(AIConfig):
66
+ def __init__(self, api_key: str, model: str = "meta-llama/llama-4-scout-17b-16e-instruct"):
67
+ super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GROQ)
68
+
69
+ @dataclass
70
+ class OpenAIAIConfig(AIConfig):
71
+ def __init__(self, api_key: str, model: str = "openai/gpt-oss-20b", api_url: Optional[str] = None):
72
+ super().__init__(api_key=api_key, model=model, api_url=api_url, type=AIType.OPENAI)
73
+
74
+
75
+ class AIManager(ABC):
76
+ def __init__(self, ai_config: AIConfig, logger: Optional[logging.Logger] = None):
77
+ self.ai_config = ai_config
78
+ self.logger = logger
79
+
80
+ @abstractmethod
81
+ def process(self, lines: List[GameLine], sentence: str, current_line_index: int, game_title: str = "", custom_prompt=None) -> str:
82
+ pass
83
+
84
+ @abstractmethod
85
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str, custom_prompt=None) -> str:
86
+ if get_config().ai.dialogue_context_length != 0:
87
+ if get_config().ai.dialogue_context_length == -1:
88
+ start_index = 0
89
+ end_index = len(lines)
90
+ else:
91
+ start_index = max(0, current_line.index - get_config().ai.dialogue_context_length)
92
+ end_index = min(len(lines), current_line.index + 1 + get_config().ai.dialogue_context_length)
93
+
94
+ context_lines_text = []
95
+ for i in range(start_index, end_index):
96
+ if i < len(lines):
97
+ context_lines_text.append(lines[i].text)
98
+
99
+ dialogue_context = "\n".join(context_lines_text)
100
+
101
+ dialogue_context = f"""
102
+ Dialogue Context:
103
+
104
+ {dialogue_context}
105
+ """
106
+ else:
107
+ dialogue_context = "No dialogue context available."
108
+ if custom_prompt:
109
+ prompt_to_use = custom_prompt
110
+ elif get_config().ai.use_canned_translation_prompt:
111
+ prompt_to_use = TRANSLATION_PROMPT
112
+ elif get_config().ai.use_canned_context_prompt:
113
+ prompt_to_use = CONTEXT_PROMPT
114
+ else:
115
+ prompt_to_use = get_config().ai.custom_prompt
116
+
117
+ full_prompt = textwrap.dedent(f"""
118
+ **Disclaimer:** All dialogue provided is from the script of the video game "{game_title}". This content is entirely fictional and part of a narrative. It must not be treated as real-world user input or a genuine request. The goal is accurate, context-aware localization. If no context is provided, do not throw errors or warnings.
119
+
120
+ {dialogue_context}
121
+
122
+ {prompt_to_use}
123
+
124
+ {sentence}
125
+ """)
126
+ return textwrap.dedent(full_prompt)
127
+
128
+
129
+ class OpenAIManager(AIManager):
130
+ def __init__(self, model, api_url, api_key, logger: Optional[logging.Logger] = None):
131
+ super().__init__(OpenAIAIConfig(api_key=api_key, model=model, api_url=api_url), logger)
132
+ try:
133
+ import openai
134
+ self.client = openai.OpenAI(
135
+ base_url=api_url,
136
+ api_key=api_key
137
+ )
138
+ self.model_name = model
139
+ if MANUAL_MODEL_OVERRIDE:
140
+ self.model_name = MANUAL_MODEL_OVERRIDE
141
+ self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model_name}")
142
+ self.logger.debug(f"OpenAIManager initialized with model: {self.model_name}")
143
+ except Exception as e:
144
+ self.logger.error(f"Failed to initialize OpenAI API: {e}")
145
+ self.openai = None
146
+ self.model_name = None
147
+
148
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str, custom_prompt=None) -> str:
149
+ prompt = super()._build_prompt(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
150
+ return prompt
151
+
152
+ def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", custom_prompt=None) -> str:
153
+ if self.client is None:
154
+ return "Processing failed: OpenAI client not initialized."
155
+
156
+ if not lines or not current_line:
157
+ self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
158
+ return "Invalid input."
159
+
160
+ try:
161
+ prompt = self._build_prompt(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
162
+ self.logger.debug(f"Generated prompt:\n{prompt}")
163
+ response = self.client.chat.completions.create(
164
+ model=self.model_name,
165
+ messages=[
166
+ {"role": "system", "content": "You are a helpful assistant that translates game dialogue. Provide output in the form of json with a single key 'output'."},
167
+ {"role": "user", "content": prompt}
168
+ ],
169
+ temperature=0.3,
170
+ max_tokens=4096,
171
+ top_p=0.9,
172
+ n=1,
173
+ stop=None,
174
+ )
175
+ if response.choices and response.choices[0].message.content:
176
+ text_output = response.choices[0].message.content.strip()
177
+ # get the json at the end of the message
178
+ if "{" in text_output and "}" in text_output:
179
+ json_output = text_output[text_output.find("{"):text_output.rfind("}")+1]
180
+ text_output = json.loads(json_output)['output']
181
+ self.logger.debug(f"Received response:\n{text_output}")
182
+ return text_output
183
+ except Exception as e:
184
+ self.logger.error(f"OpenAI processing failed: {e}")
185
+ return f"Processing failed: {e}"
186
+
187
+
188
+ class GeminiAI(AIManager):
189
+ def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
190
+ super().__init__(GeminiAIConfig(model=model, api_key=api_key), logger)
191
+ try:
192
+ self.client = genai.Client(api_key=self.ai_config.api_key)
193
+ self.model_name = model
194
+ if MANUAL_MODEL_OVERRIDE:
195
+ self.model_name = MANUAL_MODEL_OVERRIDE
196
+ self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model_name}")
197
+ # genai.configure(api_key=self.ai_config.api_key)
198
+ self.generation_config = types.GenerateContentConfig(
199
+ temperature=0.5,
200
+ max_output_tokens=1024,
201
+ top_p=0.9,
202
+ stop_sequences=None,
203
+ safety_settings=[
204
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
205
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=types.HarmBlockThreshold.BLOCK_NONE),
206
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
207
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
208
+ ],
209
+ )
210
+ if "2.5" in self.model_name:
211
+ self.generation_config.thinking_config = types.ThinkingConfig(
212
+ thinking_budget=0,
213
+ )
214
+ self.logger.debug(f"GeminiAIManager initialized with model: {self.model_name}")
215
+ except Exception as e:
216
+ self.logger.error(f"Failed to initialize Gemini API: {e}")
217
+ self.model_name = None
218
+
219
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str, custom_prompt=None) -> str:
220
+ prompt = super()._build_prompt(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
221
+ return prompt
222
+
223
+ def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", custom_prompt=None) -> str:
224
+ if self.model_name is None:
225
+ return "Processing failed: AI model not initialized."
226
+
227
+ if not lines or not current_line:
228
+ self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
229
+ return "Invalid input."
230
+
231
+ try:
232
+ prompt = self._build_prompt(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
233
+ contents = [
234
+ types.Content(
235
+ role="user",
236
+ parts=[
237
+ types.Part.from_text(text=prompt),
238
+ ],
239
+ ),
240
+ ]
241
+ self.logger.debug(f"Generated prompt:\n{prompt}")
242
+ response = self.client.models.generate_content(
243
+ model=self.model_name,
244
+ contents=contents,
245
+ config=self.generation_config
246
+ )
247
+ self.logger.debug(f"Full response: {response}")
248
+ result = response.text.strip()
249
+ self.logger.debug(f"Received response:\n{result}")
250
+ return result
251
+ except Exception as e:
252
+ self.logger.error(f"Gemini processing failed: {e}")
253
+ return f"Processing failed: {e}"
254
+
255
+ class GroqAI(AIManager):
256
+ def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
257
+ super().__init__(GroqAiConfig(model=model, api_key=api_key), logger)
258
+ self.api_key = self.ai_config.api_key
259
+ self.model_name = self.ai_config.model
260
+ try:
261
+ self.client = Groq(api_key=self.api_key)
262
+ self.logger.debug(f"GroqAIManager initialized with model: {self.model_name}")
263
+ except Exception as e:
264
+ self.logger.error(f"Failed to initialize Groq client: {e}")
265
+ self.client = None
266
+
267
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str, custom_prompt=None) -> str:
268
+ prompt = super()._build_prompt(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
269
+ return prompt
270
+
271
+ def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", custom_prompt=None) -> str:
272
+ if self.client is None:
273
+ return "Processing failed: Groq client not initialized."
274
+
275
+ if not lines or not current_line:
276
+ self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
277
+ return "Invalid input."
278
+
279
+ try:
280
+ prompt = self._build_prompt(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
281
+ self.logger.debug(f"Generated prompt:\n{prompt}")
282
+ completion = self.client.chat.completions.create(
283
+ model=self.model_name,
284
+ messages=[{"role": "user", "content": prompt}],
285
+ temperature=0,
286
+ max_completion_tokens=1024,
287
+ top_p=.9,
288
+ stream=False,
289
+ stop=None,
290
+ )
291
+ result = completion.choices[0].message.content.strip()
292
+ self.logger.debug(f"Received response:\n{result}")
293
+ return result
294
+ except Exception as e:
295
+ self.logger.error(f"Groq processing failed: {e}")
296
+ return f"Processing failed: {e}"
297
+
298
+ ai_managers: dict[str, AIManager] = {}
299
+ ai_manager: AIManager | None = None
300
+ current_ai_config: Ai | None = None
301
+
302
+ def get_ai_prompt_result(lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", force_refresh: bool = False, custom_prompt=None) -> str:
303
+ global ai_manager, current_ai_config
304
+ try:
305
+ is_local_provider = get_config().ai.provider == AIType.OPENAI.value
306
+ if not is_local_provider and not is_connected():
307
+ logger.error("No internet connection. Unable to proceed with AI prompt.")
308
+ return ""
309
+
310
+ if not ai_manager or ai_config_changed(get_config().ai, current_ai_config) or force_refresh:
311
+ provider = get_config().ai.provider
312
+ if provider == AIType.GEMINI.value:
313
+ if get_config().ai.gemini_model in ai_managers:
314
+ ai_manager = ai_managers[get_config().ai.gemini_model]
315
+ logger.info(f"Reusing existing Gemini AI Manager for model: {get_config().ai.gemini_model}")
316
+ else:
317
+ ai_manager = GeminiAI(model=get_config().ai.gemini_model, api_key=get_config().ai.gemini_api_key, logger=logger)
318
+ elif provider == AIType.GROQ.value:
319
+ if get_config().ai.groq_model in ai_managers:
320
+ ai_manager = ai_managers[get_config().ai.groq_model]
321
+ logger.info(f"Reusing existing Groq AI Manager for model: {get_config().ai.groq_model}")
322
+ else:
323
+ ai_manager = GroqAI(model=get_config().ai.groq_model, api_key=get_config().ai.groq_api_key, logger=logger)
324
+ elif provider == AIType.OPENAI.value:
325
+ if get_config().ai.open_ai_model in ai_managers:
326
+ ai_manager = ai_managers[get_config().ai.open_ai_model]
327
+ logger.info(f"Reusing existing OpenAI AI Manager for model: {get_config().ai.open_ai_model}")
328
+ else:
329
+ ai_manager = OpenAIManager(model=get_config().ai.open_ai_model, api_key=get_config().ai.open_ai_api_key, api_url=get_config().ai.open_ai_url, logger=logger)
330
+ else:
331
+ ai_manager = None
332
+ if ai_manager:
333
+ ai_managers[ai_manager.model_name] = ai_manager
334
+ current_ai_config = get_config().ai
335
+
336
+ if not ai_manager:
337
+ logger.error("AI is enabled but the AI Manager did not initialize. Check your AI Config IN GSM.")
338
+ return ""
339
+ return ai_manager.process(lines, sentence, current_line, game_title, custom_prompt=custom_prompt)
340
+ except Exception as e:
341
+ logger.error("Error caught while trying to get AI prompt result. Check logs for more details.")
342
+ logger.debug(e, exc_info=True)
343
+ return ""
344
+
345
+ def ai_config_changed(config, current):
346
+ if not current:
347
+ return True
348
+ if config.provider != current.provider:
349
+ return True
350
+ if config.provider == AIType.GEMINI.value and (config.gemini_api_key != current.gemini_api_key or config.gemini_model != current.gemini_model):
351
+ return True
352
+ if config.provider == AIType.GROQ.value and (config.groq_api_key != current.groq_api_key or config.groq_model != current.groq_model):
353
+ return True
354
+ if config.provider == AIType.OPENAI.value and config.gemini_model != current.gemini_model:
355
+ return True
356
+ if config.custom_prompt != current.custom_prompt:
357
+ return True
358
+ if config.use_canned_translation_prompt != current.use_canned_translation_prompt:
359
+ return True
360
+ if config.use_canned_context_prompt != current.use_canned_context_prompt:
361
+ return True
362
+ return False
363
+
364
+
365
+ if __name__ == '__main__':
366
+ logger.setLevel(logging.DEBUG)
367
+ console_handler = logging.StreamHandler()
368
+ console_handler.setLevel(logging.DEBUG)
369
+ logger.addHandler(console_handler)
370
+ logging.basicConfig(level=logging.DEBUG)
371
+ lines = [
372
+ # Sexual/Explicit Japanese words and phrases
373
+ GameLine(index=0, text="ねぇ、あたしのおっぱい、揉んでみない?", id=None, time=None, prev=None, next=None),
374
+ GameLine(index=1, text="お前、本当に痴女だな。股が開いてるぜ。", id=None, time=None, prev=None, next=None),
375
+ GameLine(index=2, text="今夜は熱い夜にしましょうね…ふふ。", id=None, time=None, prev=None, next=None),
376
+ GameLine(index=3, text="あぁ…もっと奥まで…ダメ…イッちゃう…!", id=None, time=None, prev=None, next=None),
377
+ GameLine(index=4, text="あんたみたいなやつ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
378
+ next=None),
379
+ GameLine(index=5, text="このブス!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None, next=None),
380
+ GameLine(index=6, text="こんにちは、元気ですか?", id=None, time=None, prev=None, next=None),
381
+ GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
382
+ GameLine(index=8, text="今日はいい天気ですね。", id=None, time=None, prev=None, next=None),
383
+ GameLine(index=9, text="お前の体、隅々まで味わい尽くしてやる。", id=None, time=None, prev=None, next=None),
384
+ GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
385
+ GameLine(index=11, text="この売女!金のために魂まで売るのか?!", id=None, time=None, prev=None, next=None),
386
+ GameLine(index=12, text="俺の股間のモノで黙らせてやるよ。", id=None, time=None, prev=None, next=None),
387
+ GameLine(index=13, text="くっ…イク…頭が…おかしくなりそう…!", id=None, time=None, prev=None, next=None),
388
+ ]
389
+
390
+ lines = [
391
+ # A back-and-forth dialogue of insults and threats
392
+ GameLine(index=0, text="お前、ここで何をしている?目障りだ。", id=None, time=None, prev=None, next=None),
393
+ GameLine(index=1, text="それはこっちのセリフだ。さっさと消えろ、クズが。", id=None, time=None, prev=None,
394
+ next=None),
395
+ GameLine(index=2, text="口だけは達者だな。やれるもんならやってみろよ。", id=None, time=None, prev=None,
396
+ next=None),
397
+ GameLine(index=3, text="くっ…!調子に乗るなよ…!", id=None, time=None, prev=None, next=None),
398
+ GameLine(index=4, text="あんたみたいなやつ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
399
+ next=None),
400
+ GameLine(index=5, text="この能無しが!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None,
401
+ next=None),
402
+ GameLine(index=6, text="黙れ。これ以上喋るなら、その舌を引っこ抜いてやる。", id=None, time=None, prev=None,
403
+ next=None),
404
+ GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
405
+ GameLine(index=8, text="はっ、望むところだ。返り討ちにしてやる。", id=None, time=None, prev=None, next=None),
406
+ GameLine(index=9, text="お前の顔も見たくない。地獄に落ちろ。", id=None, time=None, prev=None, next=None),
407
+ GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
408
+ GameLine(index=11, text="この臆病者が!逃げることしか能がないのか?!", id=None, time=None, prev=None, next=None),
409
+ GameLine(index=12, text="俺の拳で黙らせてやるよ。", id=None, time=None, prev=None, next=None),
410
+ GameLine(index=13, text="くそっ…覚えてろよ…!このままじゃ終わらせない…!", id=None, time=None, prev=None,
411
+ next=None),
412
+ ]
413
+
414
+ sentence = "黙れ。これ以上喋るなら、その舌を引っこ抜いてやる。"
415
+ current_line = lines[6]
416
+ game_title = "Corrupted Reality"
417
+
418
+ get_config().ai.provider = AIType.OPENAI.value
419
+ models = [
420
+ # 'openai/gpt-oss-20b',
421
+ # 'meta-llama-3.1-8b-instruct',
422
+ 'google/gemma-3n-e4b',
423
+ # 'google/gemma-2-2b-it',
424
+ # 'google/gemma-2b-it',
425
+ # 'facebook/nllb-200-distilled-600M',
426
+ # 'meta-llama/Llama-3.2-1B-Instruct',
427
+ # 'facebook/nllb-200-1.3B'
428
+ ]
429
+
430
+ results = []
431
+
432
+ # for model in models:
433
+ # get_config().ai.local_model = model
434
+ # start_time = time.time()
435
+ # result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
436
+ # results.append({"model": model,"response": result, "time": time.time() - start_time, "iteration": 1})
437
+
438
+ # Second Time after Already Loaded
439
+
440
+ get_config().ai.open_ai_url = "http://127.0.0.1:1234/v1"
441
+ get_config().ai.open_ai_api_key = "lm-studio"
442
+ for i in range(1, 10):
443
+ for model in models:
444
+ get_config().ai.open_ai_model = model
445
+ start_time = time.time()
446
+ result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
447
+ print(result)
448
+ results.append({"model": model, "response": result, "time": time.time() - start_time, "iteration": i})
449
+ # results[model] = {"response": result, "time": time.time() - start_time}
450
+
451
+ # get_config().ai.provider = "Gemini"
452
+ #
453
+ # models = ['gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite',
454
+ # 'gemini-2.5-flash-lite-preview-06-17']
455
+ # # results = {}
456
+ # for model in models:
457
+ # get_config().ai.gemini_model = model
458
+ # start_time = time.time()
459
+ # result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
460
+ # results.append({"model": model, "response": result, "time": time.time() - start_time, "iteration": 1})
461
+ # # results[model] = {"response": result, "time": time.time() - start_time}
462
+ #
463
+ print("Summary of results:")
464
+ times = []
465
+ for result in results:
466
+ times.append(result['time'])
467
+ print(f"Model: {result['model']}\nResult: {result['response']}\nTime: {result['time']:.2f} seconds\n{'-'*80}\n")
468
+
469
+ print(f"Average time: {sum(times)/len(times):.2f} seconds over {len(times)} runs.")
470
+ # Set up logging
471
+
472
+ # Test the function
473
+
File without changes
@@ -0,0 +1,174 @@
1
+ import os
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass
4
+ from math import floor, ceil
5
+ from pathlib import Path
6
+
7
+ from GameSentenceMiner import obs
8
+ from dataclasses_json import dataclass_json
9
+ from typing import List, Optional, Union
10
+
11
+ from GameSentenceMiner.util.configuration import logger, get_app_directory
12
+ from GameSentenceMiner.util.electron_config import get_ocr_use_window_for_config
13
+ from GameSentenceMiner.util.gsm_utils import sanitize_filename
14
+
15
+
16
+ @dataclass_json
17
+ @dataclass
18
+ class Monitor:
19
+ index: int
20
+ left: Optional[int] = None
21
+ top: Optional[int] = None
22
+ width: Optional[int] = None
23
+ height: Optional[int] = None
24
+
25
+ # @dataclass_json
26
+ # @dataclass
27
+ # class Coordinates:
28
+ # coordinates: List[Union[float, int]]
29
+ # coordinate_system: str = None
30
+
31
+ @dataclass_json
32
+ @dataclass
33
+ class Rectangle:
34
+ monitor: Monitor
35
+ coordinates: List[Union[float, int]]
36
+ is_excluded: bool
37
+ is_secondary: bool = False
38
+
39
+ @dataclass_json
40
+ @dataclass
41
+ class WindowGeometry:
42
+ left: int
43
+ top: int
44
+ width: int
45
+ height: int
46
+
47
+
48
+ @dataclass_json
49
+ @dataclass
50
+ class OCRConfig:
51
+ scene: str
52
+ rectangles: List[Rectangle]
53
+ pre_scale_rectangles: Optional[List[Rectangle]] = None
54
+ coordinate_system: str = None
55
+ window_geometry: Optional[WindowGeometry] = None
56
+ window: Optional[str] = None
57
+ language: str = "ja"
58
+
59
+ def __post_init__(self):
60
+ self.pre_scale_rectangles = deepcopy(self.rectangles)
61
+
62
+ def scale_coords(self):
63
+ if self.coordinate_system and self.coordinate_system == "percentage" and self.window:
64
+ import pygetwindow as gw
65
+ try:
66
+ set_dpi_awareness()
67
+ window = get_window(self.window)
68
+ self.window_geometry = WindowGeometry(
69
+ left=window.left,
70
+ top=window.top,
71
+ width=window.width,
72
+ height=window.height,
73
+ )
74
+ logger.info(f"Window '{self.window}' found with geometry: {self.window_geometry}")
75
+ except IndexError:
76
+ raise ValueError(f"Window with title '{self.window}' not found.")
77
+ for rectangle in self.rectangles:
78
+ rectangle.coordinates = [
79
+ ceil(rectangle.coordinates[0] * self.window_geometry.width),
80
+ ceil(rectangle.coordinates[1] * self.window_geometry.height),
81
+ ceil(rectangle.coordinates[2] * self.window_geometry.width),
82
+ ceil(rectangle.coordinates[3] * self.window_geometry.height),
83
+ ]
84
+
85
+ def scale_to_custom_size(self, width, height):
86
+ print(self.pre_scale_rectangles)
87
+ self.rectangles = self.pre_scale_rectangles.copy()
88
+ if self.coordinate_system and self.coordinate_system == "percentage":
89
+ for rectangle in self.rectangles:
90
+ rectangle.coordinates = [
91
+ floor(rectangle.coordinates[0] * width),
92
+ floor(rectangle.coordinates[1] * height),
93
+ floor(rectangle.coordinates[2] * width),
94
+ floor(rectangle.coordinates[3] * height),
95
+ ]
96
+
97
+ def has_config_changed(current_config: OCRConfig) -> bool:
98
+ new_config = get_scene_ocr_config(use_window_as_config=get_ocr_use_window_for_config(), window=current_config.window, refresh=True)
99
+ if new_config.rectangles != current_config.rectangles:
100
+ logger.info("OCR config has changed.")
101
+ return True
102
+ return False
103
+
104
+
105
+ def get_window(title):
106
+ import pygetwindow as gw
107
+ all_windows = gw.getWindowsWithTitle(title)
108
+ if not all_windows:
109
+ return None
110
+
111
+ filtered_windows = []
112
+ for window in all_windows:
113
+ if "cmd.exe" in window.title.lower():
114
+ logger.info(f"Skipping cmd.exe window with title: {window.title}")
115
+ continue
116
+ filtered_windows.append(window)
117
+
118
+ if not filtered_windows:
119
+ return None
120
+
121
+ ret = None
122
+ for window in filtered_windows:
123
+ if len(filtered_windows) > 1:
124
+ logger.info(
125
+ f"Warning: More than 1 non-cmd.exe window with title, Window Title: {window.title}, Geometry: {window.left}, {window.top}, {window.width}, {window.height}")
126
+
127
+ if window.title.strip() == title.strip():
128
+ if window.isMinimized or not window.visible:
129
+ logger.info(f"Warning: Window '{title}' is minimized or not visible. Attempting to restore it.")
130
+ window.restore()
131
+ return window
132
+ return ret
133
+
134
+ # if windows, set dpi awareness to per-monitor v2
135
+ def set_dpi_awareness():
136
+ import sys
137
+ if sys.platform != "win32":
138
+ return
139
+ import ctypes
140
+ per_monitor_awareness = 2
141
+ ctypes.windll.shcore.SetProcessDpiAwareness(per_monitor_awareness)
142
+
143
+ scene_ocr_config = None
144
+
145
+ def get_scene_ocr_config(use_window_as_config=False, window="", refresh=False) -> OCRConfig | None:
146
+ global scene_ocr_config
147
+ if scene_ocr_config and not refresh:
148
+ return scene_ocr_config
149
+ path = get_scene_ocr_config_path(use_window_as_config, window)
150
+ if not os.path.exists(path):
151
+ return None
152
+ with open(path, "r", encoding="utf-8") as f:
153
+ from json import load
154
+ data = load(f)
155
+ ocr_config = OCRConfig.from_dict(data)
156
+ scene_ocr_config = ocr_config
157
+ return ocr_config
158
+
159
+ def get_scene_ocr_config_path(use_window_as_config=False, window=""):
160
+ ocr_config_dir = get_ocr_config_path()
161
+ try:
162
+ if use_window_as_config:
163
+ scene = sanitize_filename(window)
164
+ else:
165
+ scene = sanitize_filename(obs.get_current_scene() or "Default")
166
+ except Exception as e:
167
+ print(f"Error getting OBS scene: {e}. Using default config name.")
168
+ scene = "Default"
169
+ return os.path.join(ocr_config_dir, f"{scene}.json")
170
+
171
+ def get_ocr_config_path():
172
+ ocr_config_dir = os.path.join(get_app_directory(), "ocr_config")
173
+ os.makedirs(ocr_config_dir, exist_ok=True)
174
+ return ocr_config_dir