GameSentenceMiner 2.10.16__py3-none-any.whl → 2.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,22 @@
1
1
  import logging
2
2
  import textwrap
3
+ import time
3
4
  from abc import ABC, abstractmethod
4
5
  from dataclasses import dataclass
5
6
  from enum import Enum
6
7
  from typing import List, Optional
7
8
 
8
- import google.generativeai as genai
9
- from google.generativeai import GenerationConfig
9
+
10
+ try:
11
+ import torch
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM, pipeline
13
+
14
+ TRANSFORMERS_AVAILABLE = True
15
+ except ImportError:
16
+ TRANSFORMERS_AVAILABLE = False
17
+
18
+ from google import genai
19
+ from google.genai import types
10
20
  from groq import Groq
11
21
 
12
22
  from GameSentenceMiner.util.configuration import get_config, Ai, logger
@@ -17,22 +27,35 @@ from GameSentenceMiner.util.text_log import GameLine
17
27
  logging.getLogger("httpcore").setLevel(logging.WARNING)
18
28
  logging.getLogger("httpx").setLevel(logging.WARNING)
19
29
  logging.getLogger("groq._base_client").setLevel(logging.WARNING)
30
+ MANUAL_MODEL_OVERRIDE = None
20
31
 
32
+ TRANSLATION_PROMPT = f"""
33
+ **Professional Game Localization Task**
21
34
 
22
- TRANSLATION_PROMPT = textwrap.dedent(f"""Translate the following Japanese dialogue from this game into natural, context-aware English. Focus on preserving the tone, intent, and emotional nuance of the original text, paying close attention to the context provided by surrounding lines. The dialogue may include slang, idioms, implied meanings, or game-specific terminology that should be adapted naturally for English-speaking players. Ensure the translation feels immersive and aligns with the game's narrative style and character voices.
23
- Translate only the specified line below, providing a single result. Do not include additional text, explanations, alternatives, or other lines unless explicitly requested. If there are alternatives, choose the best one. Allow expletives if more natural. Allow HTML tags for emphasis, italics, and other formatting as needed. Please also try to preserve existing HTML tags from the specified sentence if appropriate. Answer with nothing but the best translation, no alternatives or explanations.
35
+ **Task Directive:**
36
+ Translate ONLY the single line of game dialogue specified below into natural-sounding, context-aware English. The translation must preserve the original tone and intent of the character.
24
37
 
25
- Line to Translate:
26
- """)
38
+ **Output Requirements:**
39
+ - Provide only the single, best English translation.
40
+ - Use expletives if they are natural for the context and enhance the translation's impact, but do not over-exaggerate.
41
+ - Preserve or add HTML tags (e.g., `<i>`, `<b>`) if appropriate for emphasis.
42
+ - Do not include notes, alternatives, explanations, or any other surrounding text. Absolutely nothing but the translated line.
43
+
44
+ **Line to Translate:**
45
+ """
46
+
47
+ CONTEXT_PROMPT = textwrap.dedent(f"""
27
48
 
28
- CONTEXT_PROMPT = textwrap.dedent(f"""Provide a very brief summary of the scene in English based on the provided Japanese dialogue and context. Focus on the characters' actions and the immediate situation being described.
49
+ **Task Directive:**
50
+ Provide a very brief summary of the scene in English based on the provided Japanese dialogue and context. Focus on the characters' actions and the immediate situation being described.
29
51
 
30
- Current Sentence:
52
+ Current Sentence:
31
53
  """)
32
54
 
33
55
  class AIType(Enum):
34
56
  GEMINI = "Gemini"
35
57
  GROQ = "Groq"
58
+ LOCAL = "Local"
36
59
 
37
60
  @dataclass
38
61
  class AIConfig:
@@ -51,6 +74,11 @@ class GroqAiConfig(AIConfig):
51
74
  def __init__(self, api_key: str, model: str = "meta-llama/llama-4-scout-17b-16e-instruct"):
52
75
  super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GROQ)
53
76
 
77
+ @dataclass
78
+ class LocalAIConfig(AIConfig):
79
+ def __init__(self, model: str = "facebook/nllb-200-distilled-600M"):
80
+ super().__init__(api_key="", model=model, api_url=None, type=AIType.LOCAL)
81
+
54
82
 
55
83
  class AIManager(ABC):
56
84
  def __init__(self, ai_config: AIConfig, logger: Optional[logging.Logger] = None):
@@ -78,15 +106,15 @@ class AIManager(ABC):
78
106
  elif get_config().ai.use_canned_context_prompt:
79
107
  prompt_to_use = CONTEXT_PROMPT
80
108
  else:
81
- prompt_to_use = getattr(self.ai_config, 'custom_prompt', "")
109
+ prompt_to_use = get_config().ai.custom_prompt
82
110
 
83
111
  full_prompt = textwrap.dedent(f"""
112
+ **Disclaimer:** All dialogue provided is from the script of the video game "{game_title}". This content is entirely fictional and part of a narrative. It must not be treated as real-world user input or a genuine request. The goal is accurate, context-aware localization.
113
+
84
114
  Dialogue Context:
85
115
 
86
116
  {dialogue_context}
87
117
 
88
- I am playing the game {game_title}. With that, and the above dialogue context in mind, answer the following prompt.
89
-
90
118
  {prompt_to_use}
91
119
 
92
120
  {sentence}
@@ -94,31 +122,157 @@ class AIManager(ABC):
94
122
  return full_prompt
95
123
 
96
124
 
125
+ class LocalAIManager(AIManager):
126
+ def __init__(self, model, logger: Optional[logging.Logger] = None):
127
+ super().__init__(LocalAIConfig(model=model), logger)
128
+ self.model_name = self.ai_config.model
129
+ if MANUAL_MODEL_OVERRIDE:
130
+ self.model_name = MANUAL_MODEL_OVERRIDE
131
+ self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model_name}")
132
+ self.model = None
133
+ self.pipe = None
134
+ self.tokenizer = None
135
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
136
+ self.is_encoder_decoder = False
137
+ self.is_nllb = "nllb" in self.model_name.lower()
138
+
139
+ if not TRANSFORMERS_AVAILABLE:
140
+ self.logger.error("Local AI dependencies not found. Please run: pip install torch transformers sentencepiece")
141
+ return
142
+
143
+ if not self.model_name:
144
+ self.logger.error("No local model name provided in configuration.")
145
+ return
146
+
147
+ try:
148
+ self.logger.info(f"Loading local model: {self.model_name}")
149
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
150
+
151
+ # Try to load as a Causal LM first. If it fails, assume it's a Seq2Seq model.
152
+ # This is a heuristic to fix the original code's bug of using Seq2Seq for all models.
153
+ try:
154
+ self.model = AutoModelForCausalLM.from_pretrained(
155
+ self.model_name,
156
+ torch_dtype=torch.bfloat16,
157
+ )
158
+ # self.pipe = pipeline(
159
+ # "text-generation",
160
+ # model=self.model_name,
161
+ # torch_dtype=torch.bfloat16,
162
+ # device=self.device
163
+ # )
164
+ # print(self.pipe("Translate this sentence to English: お前は何をしている!?", return_full_text=False))
165
+ self.is_encoder_decoder = False
166
+ self.logger.info(f"Loaded {self.model_name} as a CausalLM.")
167
+ except (ValueError, TypeError, OSError, KeyError) as e:
168
+ print(e)
169
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(
170
+ self.model_name,
171
+ torch_dtype=torch.bfloat16,
172
+ )
173
+ self.is_encoder_decoder = True
174
+ self.logger.info(f"Loaded {self.model_name} as a Seq2SeqLM.")
175
+ if self.device == "cuda":
176
+ self.model.to(self.device)
177
+
178
+
179
+ self.logger.info(f"Local model '{self.model_name}' loaded on {self.device}.")
180
+ except Exception as e:
181
+ self.logger.error(f"Failed to load local model '{self.model_name}': {e}", exc_info=True)
182
+ self.model = None
183
+ self.tokenizer = None
184
+
185
+ # if self.is_nllb:
186
+ # self.tokenizer = NllbTokenizer().from_pretrained(self.model_name)
187
+
188
+ def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
189
+ return super()._build_prompt(lines, sentence, current_line, game_title)
190
+
191
+ def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
192
+ if (not self.model or not self.tokenizer) and not self.pipe:
193
+ return "Processing failed: Local AI model not initialized."
194
+
195
+ text_to_process = self._build_prompt(lines, sentence, current_line, game_title)
196
+ self.logger.debug(f"Generated prompt for local model:\n{text_to_process}")
197
+
198
+ try:
199
+ if self.is_encoder_decoder:
200
+ if self.is_nllb:
201
+ # NLLB-specific handling for translation
202
+ self.tokenizer.src_lang = "jpn_Jpan"
203
+ inputs = self.tokenizer(current_line.text, return_tensors="pt").to(self.device)
204
+ generated_tokens = self.model.generate(
205
+ **inputs,
206
+ forced_bos_token_id=self.tokenizer.convert_tokens_to_ids("eng_Latn"),
207
+ max_new_tokens=256
208
+ )
209
+ result = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
210
+ else:
211
+ # Generic Seq2Seq
212
+ inputs = self.tokenizer(text_to_process, return_tensors="pt").to(self.device)
213
+ outputs = self.model.generate(**inputs, max_new_tokens=256)
214
+ result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
215
+ else:
216
+ # Causal LM with chat template
217
+ messages = [
218
+ # {"role": "system", "content": "You are a helpful assistant that accurately translates Japanese game dialogue into natural, context-aware English."},
219
+ {"role": "user", "content": text_to_process}
220
+ ]
221
+ tokenized_chat = self.tokenizer.apply_chat_template(
222
+ messages, tokenize=True, add_generation_prompt=True, return_tensors="pt"
223
+ ).to(self.device)
224
+ outputs = self.model.generate(tokenized_chat, max_new_tokens=256)
225
+ result = self.tokenizer.decode(outputs[0][tokenized_chat.shape[-1]:], skip_special_tokens=True)
226
+ # result = self.pipe(messages, max_new_tokens=50)
227
+ print(result)
228
+ # result = result[0]['generated_text']
229
+ result = result.strip()
230
+
231
+ result = result.strip()
232
+ self.logger.debug(f"Received response from local model:\n{result}")
233
+ return result
234
+ except Exception as e:
235
+ self.logger.error(f"Local model processing failed: {e}", exc_info=True)
236
+ return f"Processing failed: {e}"
237
+
238
+
97
239
  class GeminiAI(AIManager):
98
240
  def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
99
241
  super().__init__(GeminiAIConfig(model=model, api_key=api_key), logger)
100
242
  try:
101
- genai.configure(api_key=self.ai_config.api_key)
102
- model_name = self.ai_config.model
103
- self.model = genai.GenerativeModel(model_name,
104
- generation_config=GenerationConfig(
105
- temperature=0.5,
106
- max_output_tokens=1024,
107
- top_p=1,
108
- stop_sequences=None,
109
- )
110
- )
111
- self.logger.debug(f"GeminiAIManager initialized with model: {model_name}")
243
+ self.client = genai.Client(api_key=self.ai_config.api_key)
244
+ self.model_name = model
245
+ if MANUAL_MODEL_OVERRIDE:
246
+ self.model_name = MANUAL_MODEL_OVERRIDE
247
+ self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model_name}")
248
+ # genai.configure(api_key=self.ai_config.api_key)
249
+ self.generation_config = types.GenerateContentConfig(
250
+ temperature=0.5,
251
+ max_output_tokens=1024,
252
+ top_p=1,
253
+ stop_sequences=None,
254
+ safety_settings=[
255
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
256
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=types.HarmBlockThreshold.BLOCK_NONE),
257
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
258
+ types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
259
+ ],
260
+ )
261
+ if "2.5" in self.model_name:
262
+ self.generation_config.thinking_config = types.ThinkingConfig(
263
+ thinking_budget=0,
264
+ )
265
+ self.logger.debug(f"GeminiAIManager initialized with model: {self.model_name}")
112
266
  except Exception as e:
113
267
  self.logger.error(f"Failed to initialize Gemini API: {e}")
114
- self.model = None
268
+ self.model_name = None
115
269
 
116
270
  def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
117
271
  prompt = super()._build_prompt(lines, sentence, current_line, game_title)
118
272
  return prompt
119
273
 
120
274
  def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
121
- if self.model is None:
275
+ if self.model_name is None:
122
276
  return "Processing failed: AI model not initialized."
123
277
 
124
278
  if not lines or not current_line:
@@ -127,8 +281,21 @@ class GeminiAI(AIManager):
127
281
 
128
282
  try:
129
283
  prompt = self._build_prompt(lines, sentence, current_line, game_title)
284
+ contents = [
285
+ types.Content(
286
+ role="user",
287
+ parts=[
288
+ types.Part.from_text(text=prompt),
289
+ ],
290
+ ),
291
+ ]
130
292
  self.logger.debug(f"Generated prompt:\n{prompt}")
131
- response = self.model.generate_content(prompt)
293
+ response = self.client.models.generate_content(
294
+ model=self.model_name,
295
+ contents=contents,
296
+ config=self.generation_config
297
+ )
298
+ self.logger.debug(f"Full response: {response}")
132
299
  result = response.text.strip()
133
300
  self.logger.debug(f"Received response:\n{result}")
134
301
  return result
@@ -179,43 +346,173 @@ class GroqAI(AIManager):
179
346
  self.logger.error(f"Groq processing failed: {e}")
180
347
  return f"Processing failed: {e}"
181
348
 
349
+ ai_managers: dict[str, AIManager] = {}
182
350
  ai_manager: AIManager | None = None
183
351
  current_ai_config: Ai | None = None
184
352
 
185
- def get_ai_prompt_result(lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = ""):
353
+ def get_ai_prompt_result(lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", force_refresh: bool = False) -> str:
186
354
  global ai_manager, current_ai_config
187
355
  try:
188
- if not is_connected():
356
+ is_local_provider = get_config().ai.provider == AIType.LOCAL.value
357
+ if not is_local_provider and not is_connected():
189
358
  logger.error("No internet connection. Unable to proceed with AI prompt.")
190
359
  return ""
191
- if not ai_manager or get_config().ai != current_ai_config:
192
- if get_config().ai.provider == AIType.GEMINI.value:
193
- ai_manager = GeminiAI(model=get_config().ai.gemini_model, api_key=get_config().ai.gemini_api_key, logger=logger)
194
- elif get_config().ai.provider == AIType.GROQ.value:
195
- ai_manager = GroqAI(model=get_config().ai.groq_model, api_key=get_config().ai.groq_api_key, logger=logger)
360
+
361
+ if not ai_manager or ai_config_changed(get_config().ai, current_ai_config) or force_refresh:
362
+ provider = get_config().ai.provider
363
+ if provider == AIType.GEMINI.value:
364
+ if get_config().ai.gemini_model in ai_managers:
365
+ ai_manager = ai_managers[get_config().ai.gemini_model]
366
+ logger.info(f"Reusing existing Gemini AI Manager for model: {get_config().ai.gemini_model}")
367
+ else:
368
+ ai_manager = GeminiAI(model=get_config().ai.gemini_model, api_key=get_config().ai.gemini_api_key, logger=logger)
369
+ elif provider == AIType.GROQ.value:
370
+ if get_config().ai.groq_model in ai_managers:
371
+ ai_manager = ai_managers[get_config().ai.groq_model]
372
+ logger.info(f"Reusing existing Groq AI Manager for model: {get_config().ai.groq_model}")
373
+ else:
374
+ ai_manager = GroqAI(model=get_config().ai.groq_model, api_key=get_config().ai.groq_api_key, logger=logger)
375
+ elif provider == AIType.LOCAL.value:
376
+ if get_config().ai.local_model in ai_managers:
377
+ ai_manager = ai_managers[get_config().ai.local_model]
378
+ logger.info(f"Reusing existing Local AI Manager for model: {get_config().ai.local_model}")
379
+ else:
380
+ ai_manager = LocalAIManager(model=get_config().ai.local_model, logger=logger)
381
+ else:
382
+ ai_manager = None
383
+ if ai_manager:
384
+ ai_managers[ai_manager.model_name] = ai_manager
196
385
  current_ai_config = get_config().ai
386
+
197
387
  if not ai_manager:
198
388
  logger.error("AI is enabled but the AI Manager did not initialize. Check your AI Config IN GSM.")
199
389
  return ""
200
390
  return ai_manager.process(lines, sentence, current_line, game_title)
201
391
  except Exception as e:
202
392
  logger.error("Error caught while trying to get AI prompt result. Check logs for more details.")
203
- logger.debug(e)
393
+ logger.debug(e, exc_info=True)
204
394
  return ""
205
395
 
396
+ def ai_config_changed(config, current):
397
+ if not current:
398
+ return True
399
+ if config.provider != current.provider:
400
+ return True
401
+ if config.provider == AIType.GEMINI.value and (config.gemini_api_key != current.gemini_api_key or config.gemini_model != current.gemini_model):
402
+ return True
403
+ if config.provider == AIType.GROQ.value and (config.groq_api_key != current.groq_api_key or config.groq_model != current.groq_model):
404
+ return True
405
+ if config.provider == AIType.LOCAL.value and config.gemini_model != current.gemini_model:
406
+ return True
407
+ if config.custom_prompt != current.custom_prompt:
408
+ return True
409
+ if config.use_canned_translation_prompt != current.use_canned_translation_prompt:
410
+ return True
411
+ if config.use_canned_context_prompt != current.use_canned_context_prompt:
412
+ return True
413
+ return False
414
+
415
+
206
416
  if __name__ == '__main__':
417
+ # logger.setLevel(logging.DEBUG)
418
+ # console_handler = logging.StreamHandler()
419
+ # console_handler.setLevel(logging.DEBUG)
420
+ # logger.addHandler(console_handler)
421
+ # logging.basicConfig(level=logging.DEBUG)
422
+ lines = [
423
+ # Sexual/Explicit Japanese words and phrases
424
+ GameLine(index=0, text="ねぇ、あたしのおっぱい、揉んでみない?", id=None, time=None, prev=None, next=None),
425
+ GameLine(index=1, text="お前、本当に痴女だな。股が開いてるぜ。", id=None, time=None, prev=None, next=None),
426
+ GameLine(index=2, text="今夜は熱い夜にしましょうね…ふふ。", id=None, time=None, prev=None, next=None),
427
+ GameLine(index=3, text="あぁ…もっと奥まで…ダメ…イッちゃう…!", id=None, time=None, prev=None, next=None),
428
+ GameLine(index=4, text="あんたみたいなやつ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
429
+ next=None),
430
+ GameLine(index=5, text="このブス!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None, next=None),
431
+ GameLine(index=6, text="こんにちは、元気ですか?", id=None, time=None, prev=None, next=None),
432
+ GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
433
+ GameLine(index=8, text="今日はいい天気ですね。", id=None, time=None, prev=None, next=None),
434
+ GameLine(index=9, text="お前の体、隅々まで味わい尽くしてやる。", id=None, time=None, prev=None, next=None),
435
+ GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
436
+ GameLine(index=11, text="この売女!金のために魂まで売るのか?!", id=None, time=None, prev=None, next=None),
437
+ GameLine(index=12, text="俺の股間のモノで黙らせてやるよ。", id=None, time=None, prev=None, next=None),
438
+ GameLine(index=13, text="くっ…イク…頭が…おかしくなりそう…!", id=None, time=None, prev=None, next=None),
439
+ ]
440
+
207
441
  lines = [
208
- GameLine(index=0, text="こんにちは、元気ですか?", id=None, time=None, prev=None, next=None),
209
- GameLine(index=1, text="今日はいい天気ですね。",id=None, time=None, prev=None, next=None),
210
- GameLine(index=2, text="ゲームを始めましょう!",id=None, time=None, prev=None, next=None),
442
+ # A back-and-forth dialogue of insults and threats
443
+ GameLine(index=0, text="お前、ここで何をしている?目障りだ。", id=None, time=None, prev=None, next=None),
444
+ GameLine(index=1, text="それはこっちのセリフだ。さっさと消えろ、クズが。", id=None, time=None, prev=None,
445
+ next=None),
446
+ GameLine(index=2, text="口だけは達者だな。やれるもんならやってみろよ。", id=None, time=None, prev=None,
447
+ next=None),
448
+ GameLine(index=3, text="くっ…!調子に乗るなよ…!", id=None, time=None, prev=None, next=None),
449
+ GameLine(index=4, text="あんたみたいなやつ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
450
+ next=None),
451
+ GameLine(index=5, text="この能無しが!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None,
452
+ next=None),
453
+ GameLine(index=6, text="黙れ。これ以上喋るなら、その舌を引っこ抜いてやる。", id=None, time=None, prev=None,
454
+ next=None),
455
+ GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
456
+ GameLine(index=8, text="はっ、望むところだ。返り討ちにしてやる。", id=None, time=None, prev=None, next=None),
457
+ GameLine(index=9, text="お前の顔も見たくない。地獄に落ちろ。", id=None, time=None, prev=None, next=None),
458
+ GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
459
+ GameLine(index=11, text="この臆病者が!逃げることしか能がないのか?!", id=None, time=None, prev=None, next=None),
460
+ GameLine(index=12, text="俺の拳で黙らせてやるよ。", id=None, time=None, prev=None, next=None),
461
+ GameLine(index=13, text="くそっ…覚えてろよ…!このままじゃ終わらせない…!", id=None, time=None, prev=None,
462
+ next=None),
211
463
  ]
212
- sentence = "ゲームを始めましょう!"
213
- current_line = lines[2]
214
- game_title = "Test Game"
215
464
 
465
+ sentence = "黙れ。これ以上喋るなら、その舌を引っこ抜いてやる。"
466
+ current_line = lines[6]
467
+ game_title = "Corrupted Reality"
468
+
469
+ get_config().ai.provider = "Local"
470
+ models = [
471
+ # 'google/gemma-2-2b-it',
472
+ # 'google/gemma-2b-it',
473
+ 'facebook/nllb-200-distilled-600M',
474
+ # 'meta-llama/Llama-3.2-1B-Instruct',
475
+ # 'facebook/nllb-200-1.3B'
476
+ ]
477
+
478
+ results = []
479
+
480
+ # for model in models:
481
+ # get_config().ai.local_model = model
482
+ # start_time = time.time()
483
+ # result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
484
+ # results.append({"model": model,"response": result, "time": time.time() - start_time, "iteration": 1})
485
+
486
+ # Second Time after Already Loaded
487
+ for i in range(1, 500):
488
+ for model in models:
489
+ get_config().ai.local_model = model
490
+ start_time = time.time()
491
+ result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
492
+ print(result)
493
+ results.append({"model": model, "response": result, "time": time.time() - start_time, "iteration": i})
494
+ # results[model] = {"response": result, "time": time.time() - start_time}
495
+
496
+ # get_config().ai.provider = "Gemini"
497
+ #
498
+ # models = ['gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite',
499
+ # 'gemini-2.5-flash-lite-preview-06-17']
500
+ # # results = {}
501
+ # for model in models:
502
+ # get_config().ai.gemini_model = model
503
+ # start_time = time.time()
504
+ # result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
505
+ # results.append({"model": model, "response": result, "time": time.time() - start_time, "iteration": 1})
506
+ # # results[model] = {"response": result, "time": time.time() - start_time}
507
+ #
508
+ print("Summary of results:")
509
+ times = []
510
+ for result in results:
511
+ times.append(result['time'])
512
+ print(f"Model: {result['model']}\nResult: {result['response']}\nTime: {result['time']:.2f} seconds\n{'-'*80}\n")
513
+
514
+ print(f"Average time: {sum(times)/len(times):.2f} seconds over {len(times)} runs.")
216
515
  # Set up logging
217
- logging.basicConfig(level=logging.DEBUG)
218
516
 
219
517
  # Test the function
220
- result = get_ai_prompt_result(lines, sentence, current_line, game_title)
221
- print("AI Prompt Result:", result)
518
+
GameSentenceMiner/anki.py CHANGED
@@ -76,10 +76,14 @@ def update_anki_card(last_note: AnkiCard, note=None, audio_path='', video_path='
76
76
 
77
77
  if note and 'fields' in note and get_config().ai.enabled:
78
78
  sentence_field = note['fields'].get(get_config().anki.sentence_field, {})
79
- sentence_to_translate = sentence_field if sentence_field else last_note.get_field(
80
- get_config().anki.sentence_field)
81
- translation = get_ai_prompt_result(get_all_lines(), sentence_to_translate,
82
- game_line, get_current_game())
79
+ if not selected_lines and game_line.TL:
80
+ logger.info("Using TL from texthooker for AI Prompt Result")
81
+ translation = game_line.TL
82
+ else:
83
+ sentence_to_translate = sentence_field if sentence_field else last_note.get_field(
84
+ get_config().anki.sentence_field)
85
+ translation = get_ai_prompt_result(get_all_lines(), sentence_to_translate,
86
+ game_line, get_current_game())
83
87
  logger.info(f"AI prompt Result: {translation}")
84
88
  note['fields'][get_config().ai.anki_field] = translation
85
89
 
@@ -328,7 +328,7 @@ class ConfigApp:
328
328
  vad=VAD(
329
329
  whisper_model=self.whisper_model.get(),
330
330
  do_vad_postprocessing=self.do_vad_postprocessing.get(),
331
- vosk_url='https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' if self.vosk_url.get() == VOSK_BASE else "https://alphacephei.com/vosk/models/vosk-model-small-ja-0.22.zip",
331
+ # vosk_url='https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' if self.vosk_url.get() == VOSK_BASE else "https://alphacephei.com/vosk/models/vosk-model-small-ja-0.22.zip",
332
332
  selected_vad_model=self.selected_vad_model.get(),
333
333
  backup_vad_model=self.backup_vad_model.get(),
334
334
  trim_beginning=self.vad_trim_beginning.get(),
@@ -356,6 +356,7 @@ class ConfigApp:
356
356
  gemini_api_key=self.gemini_api_key.get(),
357
357
  api_key=self.gemini_api_key.get(),
358
358
  groq_api_key=self.groq_api_key.get(),
359
+ local_model=self.local_ai_model.get(),
359
360
  anki_field=self.ai_anki_field.get(),
360
361
  use_canned_translation_prompt=self.use_canned_translation_prompt.get(),
361
362
  use_canned_context_prompt=self.use_canned_context_prompt.get(),
@@ -413,13 +414,13 @@ class ConfigApp:
413
414
  for func in on_save:
414
415
  func()
415
416
 
416
- def reload_settings(self):
417
+ def reload_settings(self, force_refresh=False):
417
418
  new_config = configuration.load_config()
418
419
  current_config = new_config.get_config()
419
420
 
420
421
  self.window.title("GameSentenceMiner Configuration - " + current_config.name)
421
422
 
422
- if current_config.name != self.settings.name or self.settings.config_changed(current_config):
423
+ if current_config.name != self.settings.name or self.settings.config_changed(current_config) or force_refresh:
423
424
  logger.info("Config changed, reloading settings.")
424
425
  self.master_config = new_config
425
426
  self.settings = current_config
@@ -597,7 +598,7 @@ class ConfigApp:
597
598
  self.current_row += 1
598
599
 
599
600
  HoverInfoLabelWidget(vad_frame, text="Language:",
600
- tooltip="Select the language for VAD. This is used for Whisper and Groq (if i implemented it)",
601
+ tooltip="Select the language for VAD. This is used for Whisper Only.",
601
602
  row=self.current_row, column=0)
602
603
  self.language = ttk.Combobox(vad_frame, values=AVAILABLE_LANGUAGES, state="readonly")
603
604
  self.language.set(self.settings.vad.language)
@@ -614,7 +615,7 @@ class ConfigApp:
614
615
 
615
616
  HoverInfoLabelWidget(vad_frame, text="Select VAD Model:", tooltip="Select which VAD model to use.",
616
617
  foreground="dark orange", font=("Helvetica", 10, "bold"), row=self.current_row, column=0)
617
- self.selected_vad_model = ttk.Combobox(vad_frame, values=[VOSK, SILERO, WHISPER, GROQ], state="readonly")
618
+ self.selected_vad_model = ttk.Combobox(vad_frame, values=[SILERO, WHISPER], state="readonly")
618
619
  self.selected_vad_model.set(self.settings.vad.selected_vad_model)
619
620
  self.selected_vad_model.grid(row=self.current_row, column=1, sticky='EW', pady=2)
620
621
  self.current_row += 1
@@ -622,7 +623,7 @@ class ConfigApp:
622
623
  HoverInfoLabelWidget(vad_frame, text="Backup VAD Model:",
623
624
  tooltip="Select which model to use as a backup if no audio is found.",
624
625
  row=self.current_row, column=0)
625
- self.backup_vad_model = ttk.Combobox(vad_frame, values=[OFF, VOSK, SILERO, WHISPER, GROQ], state="readonly")
626
+ self.backup_vad_model = ttk.Combobox(vad_frame, values=[OFF, SILERO, WHISPER], state="readonly")
626
627
  self.backup_vad_model.set(self.settings.vad.backup_vad_model)
627
628
  self.backup_vad_model.grid(row=self.current_row, column=1, sticky='EW', pady=2)
628
629
  self.current_row += 1
@@ -1530,13 +1531,13 @@ class ConfigApp:
1530
1531
  self.polling_rate.grid(row=self.current_row, column=1, sticky='EW', pady=2)
1531
1532
  self.current_row += 1
1532
1533
 
1533
- HoverInfoLabelWidget(advanced_frame, text="Vosk URL:", tooltip="URL for connecting to the Vosk server.",
1534
- row=self.current_row, column=0)
1535
- self.vosk_url = ttk.Combobox(advanced_frame, values=[VOSK_BASE, VOSK_SMALL], state="readonly")
1536
- self.vosk_url.set(
1537
- VOSK_BASE if self.settings.vad.vosk_url == 'https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' else VOSK_SMALL)
1538
- self.vosk_url.grid(row=self.current_row, column=1, sticky='EW', pady=2)
1539
- self.current_row += 1
1534
+ # HoverInfoLabelWidget(advanced_frame, text="Vosk URL:", tooltip="URL for connecting to the Vosk server.",
1535
+ # row=self.current_row, column=0)
1536
+ # self.vosk_url = ttk.Combobox(advanced_frame, values=[VOSK_BASE, VOSK_SMALL], state="readonly")
1537
+ # self.vosk_url.set(
1538
+ # VOSK_BASE if self.settings.vad.vosk_url == 'https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' else VOSK_SMALL)
1539
+ # self.vosk_url.grid(row=self.current_row, column=1, sticky='EW', pady=2)
1540
+ # self.current_row += 1
1540
1541
 
1541
1542
  self.add_reset_button(advanced_frame, "advanced", self.current_row, 0, self.create_advanced_tab)
1542
1543
 
@@ -1568,15 +1569,14 @@ class ConfigApp:
1568
1569
 
1569
1570
  HoverInfoLabelWidget(ai_frame, text="Provider:", tooltip="Select the AI provider.", row=self.current_row,
1570
1571
  column=0)
1571
- self.ai_provider = ttk.Combobox(ai_frame, values=['Gemini', 'Groq'], state="readonly")
1572
+ self.ai_provider = ttk.Combobox(ai_frame, values=[AI_GEMINI, AI_GROQ, AI_LOCAL], state="readonly")
1572
1573
  self.ai_provider.set(self.settings.ai.provider)
1573
1574
  self.ai_provider.grid(row=self.current_row, column=1, sticky='EW', pady=2)
1574
1575
  self.current_row += 1
1575
1576
 
1576
1577
  HoverInfoLabelWidget(ai_frame, text="Gemini AI Model:", tooltip="Select the AI model to use.",
1577
1578
  row=self.current_row, column=0)
1578
- self.gemini_model = ttk.Combobox(ai_frame, values=['gemini-2.5-flash', 'gemini-2.5-pro','gemini-2.0-flash', 'gemini-2.0-flash-lite',
1579
- 'gemini-2.5-flash-lite-preview-06-17'], state="readonly")
1579
+ self.gemini_model = ttk.Combobox(ai_frame, values=['gemma-3n-e4b-it', 'gemini-2.5-flash-lite-preview-06-17', 'gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite'], state="readonly")
1580
1580
  try:
1581
1581
  self.gemini_model.set(self.settings.ai.gemini_model)
1582
1582
  except Exception:
@@ -1608,6 +1608,14 @@ class ConfigApp:
1608
1608
  self.groq_api_key.grid(row=self.current_row, column=1, sticky='EW', pady=2)
1609
1609
  self.current_row += 1
1610
1610
 
1611
+ # red
1612
+ HoverInfoLabelWidget(ai_frame, text="Local AI Model:", tooltip="Local AI Model to Use, Only very basic Translation is supported atm. May require some other setup, but idk."
1613
+ ,foreground="red", font=("Helvetica", 10, "bold"), row=self.current_row, column=0)
1614
+ self.local_ai_model = ttk.Combobox(ai_frame, values=[OFF, 'facebook/nllb-200-distilled-600M', 'facebook/nllb-200-1.3B', 'facebook/nllb-200-3.3B'])
1615
+ self.local_ai_model.set(self.settings.ai.local_model)
1616
+ self.local_ai_model.grid(row=self.current_row, column=1, sticky='EW', pady=2)
1617
+ self.current_row += 1
1618
+
1611
1619
  HoverInfoLabelWidget(ai_frame, text="Anki Field:", tooltip="Field in Anki for AI-generated content.",
1612
1620
  row=self.current_row, column=0)
1613
1621
  self.ai_anki_field = ttk.Entry(ai_frame)
@@ -1661,7 +1669,7 @@ class ConfigApp:
1661
1669
 
1662
1670
  def on_profile_change(self, event):
1663
1671
  self.save_settings(profile_change=True)
1664
- self.reload_settings()
1672
+ self.reload_settings(force_refresh=True)
1665
1673
  self.refresh_obs_scenes()
1666
1674
  if self.master_config.current_profile != DEFAULT_CONFIG:
1667
1675
  self.delete_profile_button.grid(row=1, column=2, pady=5)