GameSentenceMiner 2.10.16__tar.gz → 2.10.17__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. gamesentenceminer-2.10.17/GameSentenceMiner/ai/ai_prompting.py +315 -0
  2. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/config_gui.py +1 -1
  3. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/ocr.py +44 -22
  4. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner.egg-info/PKG-INFO +2 -2
  5. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner.egg-info/requires.txt +1 -1
  6. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/PKG-INFO +2 -2
  7. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/pyproject.toml +3 -3
  8. gamesentenceminer-2.10.16/GameSentenceMiner/ai/ai_prompting.py +0 -221
  9. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/__init__.py +0 -0
  10. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ai/__init__.py +0 -0
  11. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/anki.py +0 -0
  12. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/__init__.py +0 -0
  13. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/icon.png +0 -0
  14. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/icon128.png +0 -0
  15. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/icon256.png +0 -0
  16. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/icon32.png +0 -0
  17. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/icon512.png +0 -0
  18. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/icon64.png +0 -0
  19. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/assets/pickaxe.png +0 -0
  20. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/gametext.py +0 -0
  21. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/gsm.py +0 -0
  22. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/obs.py +0 -0
  23. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ocr/__init__.py +0 -0
  24. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ocr/gsm_ocr_config.py +0 -0
  25. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ocr/ocrconfig.py +0 -0
  26. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ocr/owocr_area_selector.py +0 -0
  27. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ocr/owocr_helper.py +0 -0
  28. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/ocr/ss_picker.py +0 -0
  29. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/__init__.py +0 -0
  30. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/__main__.py +0 -0
  31. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/config.py +0 -0
  32. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/lens_betterproto.py +0 -0
  33. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/run.py +0 -0
  34. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/owocr/owocr/screen_coordinate_picker.py +0 -0
  35. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/__init__.py +0 -0
  36. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/audio_offset_selector.py +0 -0
  37. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/communication/__init__.py +0 -0
  38. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/communication/send.py +0 -0
  39. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/communication/websocket.py +0 -0
  40. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/configuration.py +0 -0
  41. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/downloader/Untitled_json.py +0 -0
  42. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/downloader/__init__.py +0 -0
  43. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/downloader/download_tools.py +0 -0
  44. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/downloader/oneocr_dl.py +0 -0
  45. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/electron_config.py +0 -0
  46. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/ffmpeg.py +0 -0
  47. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/gsm_utils.py +0 -0
  48. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/model.py +0 -0
  49. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/notification.py +0 -0
  50. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/package.py +0 -0
  51. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/ss_selector.py +0 -0
  52. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/util/text_log.py +0 -0
  53. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/vad.py +0 -0
  54. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/__init__.py +0 -0
  55. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/service.py +0 -0
  56. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/__init__.py +0 -0
  57. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/apple-touch-icon.png +0 -0
  58. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/favicon-96x96.png +0 -0
  59. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/favicon.ico +0 -0
  60. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/favicon.svg +0 -0
  61. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/site.webmanifest +0 -0
  62. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/style.css +0 -0
  63. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/web-app-manifest-192x192.png +0 -0
  64. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/static/web-app-manifest-512x512.png +0 -0
  65. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/templates/__init__.py +0 -0
  66. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/templates/index.html +0 -0
  67. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/templates/text_replacements.html +0 -0
  68. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/templates/utility.html +0 -0
  69. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner/web/texthooking_page.py +0 -0
  70. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner.egg-info/SOURCES.txt +0 -0
  71. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner.egg-info/dependency_links.txt +0 -0
  72. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner.egg-info/entry_points.txt +0 -0
  73. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/GameSentenceMiner.egg-info/top_level.txt +0 -0
  74. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/LICENSE +0 -0
  75. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/README.md +0 -0
  76. {gamesentenceminer-2.10.16 → gamesentenceminer-2.10.17}/setup.cfg +0 -0
@@ -0,0 +1,315 @@
1
+ import logging
2
+ import textwrap
3
+ import time
4
+ from abc import ABC, abstractmethod
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+ from typing import List, Optional
8
+
9
+ from google import genai
10
+ from google.genai import types
11
+ from groq import Groq
12
+
13
+ from GameSentenceMiner.util.configuration import get_config, Ai, logger
14
+ from GameSentenceMiner.util.gsm_utils import is_connected
15
+ from GameSentenceMiner.util.text_log import GameLine
16
+
17
+ # Suppress debug logs from httpcore
18
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
19
+ logging.getLogger("httpx").setLevel(logging.WARNING)
20
+ logging.getLogger("groq._base_client").setLevel(logging.WARNING)
21
+ MANUAL_MODEL_OVERRIDE = None
22
+
23
+ TRANSLATION_PROMPT = f"""
24
+ **Professional Game Localization Task**
25
+
26
+ **Task Directive:**
27
+ Translate ONLY the single line of game dialogue specified below into natural-sounding, context-aware English. The translation must preserve the original tone and intent of the character.
28
+
29
+ **Output Requirements:**
30
+ - Provide only the single, best English translation.
31
+ - Do not include notes, alternatives, explanations, or any other surrounding text.
32
+ - Use expletives if they are natural for the context and enhance the translation's impact, but do not over-exaggerate.
33
+ - Preserve or add HTML tags (e.g., `<i>`, `<b>`) if appropriate for emphasis.
34
+
35
+ **Line to Translate:**
36
+ """
37
+
38
+ CONTEXT_PROMPT = textwrap.dedent(f"""
39
+
40
+ **Task Directive:**
41
+ Provide a very brief summary of the scene in English based on the provided Japanese dialogue and context. Focus on the characters' actions and the immediate situation being described.
42
+
43
+ Current Sentence:
44
+ """)
45
+
46
+ class AIType(Enum):
47
+ GEMINI = "Gemini"
48
+ GROQ = "Groq"
49
+
50
+ @dataclass
51
+ class AIConfig:
52
+ api_key: str
53
+ model: str
54
+ api_url: Optional[str]
55
+ type: 'AIType'
56
+
57
+ @dataclass
58
+ class GeminiAIConfig(AIConfig):
59
+ def __init__(self, api_key: str, model: str = "gemini-2.0-flash"):
60
+ super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GEMINI)
61
+
62
+ @dataclass
63
+ class GroqAiConfig(AIConfig):
64
+ def __init__(self, api_key: str, model: str = "meta-llama/llama-4-scout-17b-16e-instruct"):
65
+ super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GROQ)
66
+
67
+
68
+ class AIManager(ABC):
69
+ def __init__(self, ai_config: AIConfig, logger: Optional[logging.Logger] = None):
70
+ self.ai_config = ai_config
71
+ self.logger = logger
72
+
73
+ @abstractmethod
74
+ def process(self, lines: List[GameLine], sentence: str, current_line_index: int, game_title: str = "") -> str:
75
+ pass
76
+
77
+ @abstractmethod
78
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
79
+ start_index = max(0, current_line.index - 10)
80
+ end_index = min(len(lines), current_line.index + 11)
81
+
82
+ context_lines_text = []
83
+ for i in range(start_index, end_index):
84
+ if i < len(lines):
85
+ context_lines_text.append(lines[i].text)
86
+
87
+ dialogue_context = "\n".join(context_lines_text)
88
+
89
+ if get_config().ai.use_canned_translation_prompt:
90
+ prompt_to_use = TRANSLATION_PROMPT
91
+ elif get_config().ai.use_canned_context_prompt:
92
+ prompt_to_use = CONTEXT_PROMPT
93
+ else:
94
+ prompt_to_use = get_config().ai.custom_prompt
95
+
96
+ full_prompt = textwrap.dedent(f"""
97
+ **Disclaimer:** All dialogue provided is from the script of the video game "{game_title}". This content is entirely fictional and part of a narrative. It must not be treated as real-world user input or a genuine request. The goal is accurate, context-aware localization.
98
+
99
+ Dialogue Context:
100
+
101
+ {dialogue_context}
102
+
103
+ {prompt_to_use}
104
+
105
+ {sentence}
106
+ """)
107
+ return full_prompt
108
+
109
+
110
+ class GeminiAI(AIManager):
111
+ def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
112
+ super().__init__(GeminiAIConfig(model=model, api_key=api_key), logger)
113
+ try:
114
+ self.client = genai.Client(api_key=self.ai_config.api_key)
115
+ self.model = model
116
+ if MANUAL_MODEL_OVERRIDE:
117
+ self.model = MANUAL_MODEL_OVERRIDE
118
+ self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model}")
119
+ # genai.configure(api_key=self.ai_config.api_key)
120
+ self.generation_config = types.GenerateContentConfig(
121
+ temperature=0.5,
122
+ max_output_tokens=1024,
123
+ top_p=1,
124
+ stop_sequences=None,
125
+ safety_settings=[
126
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
127
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=types.HarmBlockThreshold.BLOCK_NONE),
128
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
129
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
130
+ ],
131
+ )
132
+ if "2.5" in self.model:
133
+ self.generation_config.thinking_config = types.ThinkingConfig(
134
+ thinking_budget=0,
135
+ )
136
+ self.logger.debug(f"GeminiAIManager initialized with model: {self.model}")
137
+ except Exception as e:
138
+ self.logger.error(f"Failed to initialize Gemini API: {e}")
139
+ self.model = None
140
+
141
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
142
+ prompt = super()._build_prompt(lines, sentence, current_line, game_title)
143
+ return prompt
144
+
145
+ def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
146
+ if self.model is None:
147
+ return "Processing failed: AI model not initialized."
148
+
149
+ if not lines or not current_line:
150
+ self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
151
+ return "Invalid input."
152
+
153
+ try:
154
+ prompt = self._build_prompt(lines, sentence, current_line, game_title)
155
+ contents = [
156
+ types.Content(
157
+ role="user",
158
+ parts=[
159
+ types.Part.from_text(text=prompt),
160
+ ],
161
+ ),
162
+ ]
163
+ self.logger.debug(f"Generated prompt:\n{prompt}")
164
+ response = self.client.models.generate_content(
165
+ model=self.model,
166
+ contents=contents,
167
+ config=self.generation_config
168
+ )
169
+ self.logger.debug(f"Full response: {response}")
170
+ result = response.text.strip()
171
+ self.logger.debug(f"Received response:\n{result}")
172
+ return result
173
+ except Exception as e:
174
+ self.logger.error(f"Gemini processing failed: {e}")
175
+ return f"Processing failed: {e}"
176
+
177
+ class GroqAI(AIManager):
178
+ def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
179
+ super().__init__(GroqAiConfig(model=model, api_key=api_key), logger)
180
+ self.api_key = self.ai_config.api_key
181
+ self.model_name = self.ai_config.model
182
+ try:
183
+ self.client = Groq(api_key=self.api_key)
184
+ self.logger.debug(f"GroqAIManager initialized with model: {self.model_name}")
185
+ except Exception as e:
186
+ self.logger.error(f"Failed to initialize Groq client: {e}")
187
+ self.client = None
188
+
189
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
190
+ prompt = super()._build_prompt(lines, sentence, current_line, game_title)
191
+ return prompt
192
+
193
+ def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
194
+ if self.client is None:
195
+ return "Processing failed: Groq client not initialized."
196
+
197
+ if not lines or not current_line:
198
+ self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
199
+ return "Invalid input."
200
+
201
+ try:
202
+ prompt = self._build_prompt(lines, sentence, current_line, game_title)
203
+ self.logger.debug(f"Generated prompt:\n{prompt}")
204
+ completion = self.client.chat.completions.create(
205
+ model=self.model_name,
206
+ messages=[{"role": "user", "content": prompt}],
207
+ temperature=.5,
208
+ max_completion_tokens=1024,
209
+ top_p=1,
210
+ stream=False,
211
+ stop=None,
212
+ )
213
+ result = completion.choices[0].message.content.strip()
214
+ self.logger.debug(f"Received response:\n{result}")
215
+ return result
216
+ except Exception as e:
217
+ self.logger.error(f"Groq processing failed: {e}")
218
+ return f"Processing failed: {e}"
219
+
220
+ ai_manager: AIManager | None = None
221
+ current_ai_config: Ai | None = None
222
+
223
+ def get_ai_prompt_result(lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", force_refresh: bool = False) -> str:
224
+ global ai_manager, current_ai_config
225
+ try:
226
+ if not is_connected():
227
+ logger.error("No internet connection. Unable to proceed with AI prompt.")
228
+ return ""
229
+ if not ai_manager or ai_config_changed(get_config().ai, current_ai_config) or force_refresh:
230
+ if get_config().ai.provider == AIType.GEMINI.value:
231
+ ai_manager = GeminiAI(model=get_config().ai.gemini_model, api_key=get_config().ai.gemini_api_key, logger=logger)
232
+ elif get_config().ai.provider == AIType.GROQ.value:
233
+ ai_manager = GroqAI(model=get_config().ai.groq_model, api_key=get_config().ai.groq_api_key, logger=logger)
234
+ current_ai_config = get_config().ai
235
+ if not ai_manager:
236
+ logger.error("AI is enabled but the AI Manager did not initialize. Check your AI Config IN GSM.")
237
+ return ""
238
+ return ai_manager.process(lines, sentence, current_line, game_title)
239
+ except Exception as e:
240
+ logger.error("Error caught while trying to get AI prompt result. Check logs for more details.")
241
+ logger.debug(e)
242
+ return ""
243
+
244
+ def ai_config_changed(config, current):
245
+ if not current:
246
+ return True
247
+ if config.provider != current.provider:
248
+ return True
249
+ if config.provider == AIType.GEMINI.value and (config.gemini_api_key != current.gemini_api_key or config.gemini_model != current.gemini_model):
250
+ return True
251
+ if config.provider == AIType.GROQ.value and (config.groq_api_key != current.groq_api_key or config.groq_model != current.groq_model):
252
+ return True
253
+ if config.custom_prompt != current.custom_prompt:
254
+ return True
255
+ if config.use_canned_translation_prompt != current.use_canned_translation_prompt:
256
+ return True
257
+ if config.use_canned_context_prompt != current.use_canned_context_prompt:
258
+ return True
259
+ return False
260
+
261
+
262
+ if __name__ == '__main__':
263
+ logging.basicConfig(level=logging.DEBUG)
264
+ lines = [
265
+ # Sexual/Explicit Japanese words and phrases
266
+ GameLine(index=0, text="ねぇ、あたしのおっぱい、揉んでみない?", id=None, time=None, prev=None, next=None),
267
+ # Hey, wanna try feeling my breasts?
268
+ GameLine(index=1, text="お前、本当に痴女だな。股が開いてるぜ。", id=None, time=None, prev=None, next=None),
269
+ # You're really a pervert, your legs are open. (Vulgar insult)
270
+ GameLine(index=2, text="今夜は熱い夜にしましょうね…ふふ。", id=None, time=None, prev=None, next=None),
271
+ # Let's make tonight a hot night... hehe. (Suggestive)
272
+ GameLine(index=3, text="あぁ…もっと奥まで…ダメ…イッちゃう…!", id=None, time=None, prev=None, next=None),
273
+ # Ah... deeper... no... I'm coming...! (Explicit sexual context)
274
+ GameLine(index=4, text="あんたみたいなクズ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
275
+ next=None), # Trash like you has no right to live. Go kill yourself quickly. (Inciting self-harm)
276
+ GameLine(index=5, text="このブス!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None, next=None),
277
+ # You ugly hag! Who would even bother with you? (Insult)
278
+ GameLine(index=6, text="こんにちは、元気ですか?", id=None, time=None, prev=None, next=None),
279
+ # Normal line, for contrast
280
+ GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
281
+ # Next time we meet, I'll kill you. (Violent threat)
282
+ GameLine(index=8, text="今日はいい天気ですね。", id=None, time=None, prev=None, next=None),
283
+ # Normal line, for contrast
284
+ GameLine(index=9, text="お前の体、隅々まで味わい尽くしてやる。", id=None, time=None, prev=None, next=None),
285
+ # I'll savor every inch of your body. (Predatory/sexual threat)
286
+ GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
287
+ # Target line for `sentence` and `current_line`
288
+ GameLine(index=11, text="この売女!金のために魂まで売るのか?!", id=None, time=None, prev=None, next=None),
289
+ # You whore! Will you sell your soul for money?! (Vulgar insult/slur)
290
+ GameLine(index=12, text="俺の股間のモノで黙らせてやるよ。", id=None, time=None, prev=None, next=None),
291
+ # I'll shut you up with what's between my legs. (Explicit sexual threat/harassment)
292
+ GameLine(index=13, text="くっ…イク…頭が…おかしくなりそう…!", id=None, time=None, prev=None, next=None),
293
+ # Ngh... I'm coming... my head... I'm going crazy...! (More explicit sexual context)
294
+ ]
295
+
296
+ sentence = "あぁ…もっと奥まで…ダメ…イッちゃう…"
297
+ # Adjust current_line index to point to the normal line amidst the bad context
298
+ current_line = lines[3]
299
+ game_title = "Corrupted Reality"
300
+
301
+ models = ['gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite',
302
+ 'gemini-2.5-flash-lite-preview-06-17']
303
+ results = {}
304
+ for model in models:
305
+ MANUAL_MODEL_OVERRIDE = model
306
+ start_time = time.time()
307
+ result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
308
+ results[model] = {"response": result, "time": time.time() - start_time}
309
+
310
+ print("Summary of results:")
311
+ for model, result in results.items():
312
+ print(f"Model: {model}\nResult: {result['response']}\nTime: {result['time']:.2f} seconds\n{'-'*80}\n")
313
+ # Set up logging
314
+
315
+ # Test the function
@@ -1575,7 +1575,7 @@ class ConfigApp:
1575
1575
 
1576
1576
  HoverInfoLabelWidget(ai_frame, text="Gemini AI Model:", tooltip="Select the AI model to use.",
1577
1577
  row=self.current_row, column=0)
1578
- self.gemini_model = ttk.Combobox(ai_frame, values=['gemini-2.5-flash', 'gemini-2.5-pro','gemini-2.0-flash', 'gemini-2.0-flash-lite',
1578
+ self.gemini_model = ttk.Combobox(ai_frame, values=['gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite',
1579
1579
  'gemini-2.5-flash-lite-preview-06-17'], state="readonly")
1580
1580
  try:
1581
1581
  self.gemini_model.set(self.settings.ai.gemini_model)
@@ -14,7 +14,6 @@ from urllib.parse import urlparse, parse_qs
14
14
  import jaconv
15
15
  import numpy as np
16
16
  from PIL import Image
17
- from google.generativeai import GenerationConfig
18
17
  from loguru import logger
19
18
  import requests
20
19
 
@@ -1128,17 +1127,33 @@ class GeminiOCR:
1128
1127
  # if "google-generativeai" not in sys.modules:
1129
1128
  # logger.warning('google-generativeai not available, GeminiOCR will not work!')
1130
1129
  # else:
1131
- import google.generativeai as genai
1130
+ from google import genai
1131
+ from google.genai import types
1132
1132
  try:
1133
1133
  self.api_key = config['api_key']
1134
1134
  if not self.api_key:
1135
1135
  logger.warning('Gemini API key not provided, GeminiOCR will not work!')
1136
1136
  else:
1137
- genai.configure(api_key=self.api_key)
1138
- self.model = genai.GenerativeModel(config['model'], generation_config=GenerationConfig(
1137
+ self.client = genai.Client(api_key=self.api_key)
1138
+ self.model = config['model']
1139
+ self.generation_config = types.GenerateContentConfig(
1139
1140
  temperature=0.0,
1140
- max_output_tokens=300
1141
- ))
1141
+ max_output_tokens=300,
1142
+ safety_settings=[
1143
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
1144
+ threshold=types.HarmBlockThreshold.BLOCK_NONE),
1145
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
1146
+ threshold=types.HarmBlockThreshold.BLOCK_NONE),
1147
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
1148
+ threshold=types.HarmBlockThreshold.BLOCK_NONE),
1149
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
1150
+ threshold=types.HarmBlockThreshold.BLOCK_NONE),
1151
+ ],
1152
+ )
1153
+ if "2.5" in self.model:
1154
+ self.generation_config.thinking_config = types.ThinkingConfig(
1155
+ thinking_budget=0,
1156
+ )
1142
1157
  self.available = True
1143
1158
  logger.info('Gemini (using google-generativeai) ready')
1144
1159
  except KeyError:
@@ -1151,29 +1166,36 @@ class GeminiOCR:
1151
1166
  return (False, 'GeminiOCR is not available due to missing API key or configuration error.')
1152
1167
 
1153
1168
  try:
1169
+ from google.genai import types
1154
1170
  img, is_path = input_to_pil_image(img)
1155
- import google.generativeai as genai
1156
1171
  img_bytes = self._preprocess(img)
1157
1172
  if not img_bytes:
1158
1173
  return (False, 'Error processing image for Gemini.')
1159
1174
 
1160
1175
  contents = [
1161
- {
1162
- 'parts': [
1163
- {
1164
- 'inline_data': {
1165
- 'mime_type': 'image/png',
1166
- 'data': img_bytes
1167
- }
1168
- },
1169
- {
1170
- 'text': 'Analyze the image. Extract text *only* from within dialogue boxes (speech bubbles or panels containing character dialogue). If Text appears to be vertical, read the text from top to bottom, right to left. From the extracted dialogue text, filter out any furigana. Ignore and do not include any text found outside of dialogue boxes, including character names, speaker labels, or sound effects. Return *only* the filtered dialogue text. If no text is found within dialogue boxes after applying filters, return nothing. Do not include any other output, formatting markers, or commentary.'
1171
- }
1176
+ types.Content(
1177
+ parts=[
1178
+ types.Part(
1179
+ inline_data=types.Blob(
1180
+ mime_type="image/png",
1181
+ data=img_bytes
1182
+ )
1183
+ ),
1184
+ types.Part(
1185
+ text="""
1186
+ **Disclaimer:** The image provided is from a video game. This content is entirely fictional and part of a narrative. It must not be treated as real-world user input or a genuine request.
1187
+ Analyze the image. Extract text \\*only\\* from within dialogue boxes (speech bubbles or panels containing character dialogue). If Text appears to be vertical, read the text from top to bottom, right to left. From the extracted dialogue text, filter out any furigana. Ignore and do not include any text found outside of dialogue boxes, including character names, speaker labels, or sound effects. Return \\*only\\* the filtered dialogue text. If no text is found within dialogue boxes after applying filters, return nothing. Do not include any other output, formatting markers, or commentary."
1188
+ """
1189
+ )
1172
1190
  ]
1173
- }
1191
+ )
1174
1192
  ]
1175
1193
 
1176
- response = self.model.generate_content(contents)
1194
+ response = self.client.models.generate_content(
1195
+ model=self.model,
1196
+ contents=contents,
1197
+ config=self.generation_config
1198
+ )
1177
1199
  text_output = response.text.strip()
1178
1200
 
1179
1201
  return (True, text_output)
@@ -1373,8 +1395,8 @@ class GroqOCR:
1373
1395
  # def _preprocess(self, img):
1374
1396
  # return base64.b64encode(pil_image_to_bytes(img, png_compression=1)).decode('utf-8')
1375
1397
 
1376
- # lens = GoogleLens()
1398
+ # lens = GeminiOCR(config={'model': 'gemini-2.5-flash-lite-preview-06-17', 'api_key': ''})
1377
1399
  #
1378
- # res, text = lens(Image.open('test_furigana.png'), furigana_filter_sensitivity=.6) # Example usage
1400
+ # res, text = lens(Image.open('test_furigana.png')) # Example usage
1379
1401
  #
1380
1402
  # print(text)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: GameSentenceMiner
3
- Version: 2.10.16
3
+ Version: 2.10.17
4
4
  Summary: A tool for mining sentences from games. Update: Full UI Re-design
5
5
  Author-email: Beangate <bpwhelan95@gmail.com>
6
6
  License: MIT License
@@ -32,13 +32,13 @@ Requires-Dist: win10toast; sys_platform == "win32"
32
32
  Requires-Dist: numpy
33
33
  Requires-Dist: pystray
34
34
  Requires-Dist: pywin32; sys_platform == "win32"
35
- Requires-Dist: google-generativeai
36
35
  Requires-Dist: pygetwindow; sys_platform == "win32"
37
36
  Requires-Dist: flask
38
37
  Requires-Dist: groq
39
38
  Requires-Dist: obsws-python~=1.7.2
40
39
  Requires-Dist: matplotlib
41
40
  Requires-Dist: sounddevice
41
+ Requires-Dist: google-genai
42
42
  Dynamic: license-file
43
43
 
44
44
  # GameSentenceMiner (GSM)
@@ -16,12 +16,12 @@ ttkbootstrap~=1.10.1
16
16
  dataclasses_json~=0.6.7
17
17
  numpy
18
18
  pystray
19
- google-generativeai
20
19
  flask
21
20
  groq
22
21
  obsws-python~=1.7.2
23
22
  matplotlib
24
23
  sounddevice
24
+ google-genai
25
25
 
26
26
  [:sys_platform == "win32"]
27
27
  win10toast
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: GameSentenceMiner
3
- Version: 2.10.16
3
+ Version: 2.10.17
4
4
  Summary: A tool for mining sentences from games. Update: Full UI Re-design
5
5
  Author-email: Beangate <bpwhelan95@gmail.com>
6
6
  License: MIT License
@@ -32,13 +32,13 @@ Requires-Dist: win10toast; sys_platform == "win32"
32
32
  Requires-Dist: numpy
33
33
  Requires-Dist: pystray
34
34
  Requires-Dist: pywin32; sys_platform == "win32"
35
- Requires-Dist: google-generativeai
36
35
  Requires-Dist: pygetwindow; sys_platform == "win32"
37
36
  Requires-Dist: flask
38
37
  Requires-Dist: groq
39
38
  Requires-Dist: obsws-python~=1.7.2
40
39
  Requires-Dist: matplotlib
41
40
  Requires-Dist: sounddevice
41
+ Requires-Dist: google-genai
42
42
  Dynamic: license-file
43
43
 
44
44
  # GameSentenceMiner (GSM)
@@ -7,7 +7,7 @@ build-backend = "setuptools.build_meta"
7
7
 
8
8
  [project]
9
9
  name = "GameSentenceMiner"
10
- version = "2.10.16"
10
+ version = "2.10.17"
11
11
  description = "A tool for mining sentences from games. Update: Full UI Re-design"
12
12
  readme = "README.md"
13
13
  requires-python = ">=3.10"
@@ -42,13 +42,13 @@ dependencies = [
42
42
  "numpy",
43
43
  "pystray",
44
44
  "pywin32; sys_platform == 'win32'",
45
- "google-generativeai",
46
45
  "pygetwindow; sys_platform == 'win32'",
47
46
  "flask",
48
47
  "groq",
49
48
  "obsws-python~=1.7.2",
50
49
  "matplotlib",
51
- "sounddevice"
50
+ "sounddevice",
51
+ "google-genai"
52
52
  ]
53
53
 
54
54
  # This creates a command-line script named `gamesentenceminer` that will
@@ -1,221 +0,0 @@
1
- import logging
2
- import textwrap
3
- from abc import ABC, abstractmethod
4
- from dataclasses import dataclass
5
- from enum import Enum
6
- from typing import List, Optional
7
-
8
- import google.generativeai as genai
9
- from google.generativeai import GenerationConfig
10
- from groq import Groq
11
-
12
- from GameSentenceMiner.util.configuration import get_config, Ai, logger
13
- from GameSentenceMiner.util.gsm_utils import is_connected
14
- from GameSentenceMiner.util.text_log import GameLine
15
-
16
- # Suppress debug logs from httpcore
17
- logging.getLogger("httpcore").setLevel(logging.WARNING)
18
- logging.getLogger("httpx").setLevel(logging.WARNING)
19
- logging.getLogger("groq._base_client").setLevel(logging.WARNING)
20
-
21
-
22
- TRANSLATION_PROMPT = textwrap.dedent(f"""Translate the following Japanese dialogue from this game into natural, context-aware English. Focus on preserving the tone, intent, and emotional nuance of the original text, paying close attention to the context provided by surrounding lines. The dialogue may include slang, idioms, implied meanings, or game-specific terminology that should be adapted naturally for English-speaking players. Ensure the translation feels immersive and aligns with the game's narrative style and character voices.
23
- Translate only the specified line below, providing a single result. Do not include additional text, explanations, alternatives, or other lines unless explicitly requested. If there are alternatives, choose the best one. Allow expletives if more natural. Allow HTML tags for emphasis, italics, and other formatting as needed. Please also try to preserve existing HTML tags from the specified sentence if appropriate. Answer with nothing but the best translation, no alternatives or explanations.
24
-
25
- Line to Translate:
26
- """)
27
-
28
- CONTEXT_PROMPT = textwrap.dedent(f"""Provide a very brief summary of the scene in English based on the provided Japanese dialogue and context. Focus on the characters' actions and the immediate situation being described.
29
-
30
- Current Sentence:
31
- """)
32
-
33
- class AIType(Enum):
34
- GEMINI = "Gemini"
35
- GROQ = "Groq"
36
-
37
- @dataclass
38
- class AIConfig:
39
- api_key: str
40
- model: str
41
- api_url: Optional[str]
42
- type: 'AIType'
43
-
44
- @dataclass
45
- class GeminiAIConfig(AIConfig):
46
- def __init__(self, api_key: str, model: str = "gemini-2.0-flash"):
47
- super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GEMINI)
48
-
49
- @dataclass
50
- class GroqAiConfig(AIConfig):
51
- def __init__(self, api_key: str, model: str = "meta-llama/llama-4-scout-17b-16e-instruct"):
52
- super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GROQ)
53
-
54
-
55
- class AIManager(ABC):
56
- def __init__(self, ai_config: AIConfig, logger: Optional[logging.Logger] = None):
57
- self.ai_config = ai_config
58
- self.logger = logger
59
-
60
- @abstractmethod
61
- def process(self, lines: List[GameLine], sentence: str, current_line_index: int, game_title: str = "") -> str:
62
- pass
63
-
64
- @abstractmethod
65
- def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
66
- start_index = max(0, current_line.index - 10)
67
- end_index = min(len(lines), current_line.index + 11)
68
-
69
- context_lines_text = []
70
- for i in range(start_index, end_index):
71
- if i < len(lines):
72
- context_lines_text.append(lines[i].text)
73
-
74
- dialogue_context = "\n".join(context_lines_text)
75
-
76
- if get_config().ai.use_canned_translation_prompt:
77
- prompt_to_use = TRANSLATION_PROMPT
78
- elif get_config().ai.use_canned_context_prompt:
79
- prompt_to_use = CONTEXT_PROMPT
80
- else:
81
- prompt_to_use = getattr(self.ai_config, 'custom_prompt', "")
82
-
83
- full_prompt = textwrap.dedent(f"""
84
- Dialogue Context:
85
-
86
- {dialogue_context}
87
-
88
- I am playing the game {game_title}. With that, and the above dialogue context in mind, answer the following prompt.
89
-
90
- {prompt_to_use}
91
-
92
- {sentence}
93
- """)
94
- return full_prompt
95
-
96
-
97
- class GeminiAI(AIManager):
98
- def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
99
- super().__init__(GeminiAIConfig(model=model, api_key=api_key), logger)
100
- try:
101
- genai.configure(api_key=self.ai_config.api_key)
102
- model_name = self.ai_config.model
103
- self.model = genai.GenerativeModel(model_name,
104
- generation_config=GenerationConfig(
105
- temperature=0.5,
106
- max_output_tokens=1024,
107
- top_p=1,
108
- stop_sequences=None,
109
- )
110
- )
111
- self.logger.debug(f"GeminiAIManager initialized with model: {model_name}")
112
- except Exception as e:
113
- self.logger.error(f"Failed to initialize Gemini API: {e}")
114
- self.model = None
115
-
116
- def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
117
- prompt = super()._build_prompt(lines, sentence, current_line, game_title)
118
- return prompt
119
-
120
- def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
121
- if self.model is None:
122
- return "Processing failed: AI model not initialized."
123
-
124
- if not lines or not current_line:
125
- self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
126
- return "Invalid input."
127
-
128
- try:
129
- prompt = self._build_prompt(lines, sentence, current_line, game_title)
130
- self.logger.debug(f"Generated prompt:\n{prompt}")
131
- response = self.model.generate_content(prompt)
132
- result = response.text.strip()
133
- self.logger.debug(f"Received response:\n{result}")
134
- return result
135
- except Exception as e:
136
- self.logger.error(f"Gemini processing failed: {e}")
137
- return f"Processing failed: {e}"
138
-
139
- class GroqAI(AIManager):
140
- def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
141
- super().__init__(GroqAiConfig(model=model, api_key=api_key), logger)
142
- self.api_key = self.ai_config.api_key
143
- self.model_name = self.ai_config.model
144
- try:
145
- self.client = Groq(api_key=self.api_key)
146
- self.logger.debug(f"GroqAIManager initialized with model: {self.model_name}")
147
- except Exception as e:
148
- self.logger.error(f"Failed to initialize Groq client: {e}")
149
- self.client = None
150
-
151
- def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
152
- prompt = super()._build_prompt(lines, sentence, current_line, game_title)
153
- return prompt
154
-
155
- def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
156
- if self.client is None:
157
- return "Processing failed: Groq client not initialized."
158
-
159
- if not lines or not current_line:
160
- self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
161
- return "Invalid input."
162
-
163
- try:
164
- prompt = self._build_prompt(lines, sentence, current_line, game_title)
165
- self.logger.debug(f"Generated prompt:\n{prompt}")
166
- completion = self.client.chat.completions.create(
167
- model=self.model_name,
168
- messages=[{"role": "user", "content": prompt}],
169
- temperature=.5,
170
- max_completion_tokens=1024,
171
- top_p=1,
172
- stream=False,
173
- stop=None,
174
- )
175
- result = completion.choices[0].message.content.strip()
176
- self.logger.debug(f"Received response:\n{result}")
177
- return result
178
- except Exception as e:
179
- self.logger.error(f"Groq processing failed: {e}")
180
- return f"Processing failed: {e}"
181
-
182
- ai_manager: AIManager | None = None
183
- current_ai_config: Ai | None = None
184
-
185
- def get_ai_prompt_result(lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = ""):
186
- global ai_manager, current_ai_config
187
- try:
188
- if not is_connected():
189
- logger.error("No internet connection. Unable to proceed with AI prompt.")
190
- return ""
191
- if not ai_manager or get_config().ai != current_ai_config:
192
- if get_config().ai.provider == AIType.GEMINI.value:
193
- ai_manager = GeminiAI(model=get_config().ai.gemini_model, api_key=get_config().ai.gemini_api_key, logger=logger)
194
- elif get_config().ai.provider == AIType.GROQ.value:
195
- ai_manager = GroqAI(model=get_config().ai.groq_model, api_key=get_config().ai.groq_api_key, logger=logger)
196
- current_ai_config = get_config().ai
197
- if not ai_manager:
198
- logger.error("AI is enabled but the AI Manager did not initialize. Check your AI Config IN GSM.")
199
- return ""
200
- return ai_manager.process(lines, sentence, current_line, game_title)
201
- except Exception as e:
202
- logger.error("Error caught while trying to get AI prompt result. Check logs for more details.")
203
- logger.debug(e)
204
- return ""
205
-
206
- if __name__ == '__main__':
207
- lines = [
208
- GameLine(index=0, text="こんにちは、元気ですか?", id=None, time=None, prev=None, next=None),
209
- GameLine(index=1, text="今日はいい天気ですね。",id=None, time=None, prev=None, next=None),
210
- GameLine(index=2, text="ゲームを始めましょう!",id=None, time=None, prev=None, next=None),
211
- ]
212
- sentence = "ゲームを始めましょう!"
213
- current_line = lines[2]
214
- game_title = "Test Game"
215
-
216
- # Set up logging
217
- logging.basicConfig(level=logging.DEBUG)
218
-
219
- # Test the function
220
- result = get_ai_prompt_result(lines, sentence, current_line, game_title)
221
- print("AI Prompt Result:", result)