GameSentenceMiner 2.10.16__tar.gz → 2.11.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gamesentenceminer-2.11.0/GameSentenceMiner/ai/ai_prompting.py +518 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/anki.py +8 -4
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/config_gui.py +25 -17
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/gsm.py +19 -17
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ocr/owocr_helper.py +9 -6
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/ocr.py +86 -41
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/run.py +11 -4
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/configuration.py +11 -9
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/text_log.py +4 -0
- gamesentenceminer-2.11.0/GameSentenceMiner/vad.py +404 -0
- gamesentenceminer-2.11.0/GameSentenceMiner/web/templates/index.html +50 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/texthooking_page.py +15 -1
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner.egg-info/PKG-INFO +2 -2
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner.egg-info/requires.txt +1 -1
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/PKG-INFO +2 -2
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/pyproject.toml +3 -3
- gamesentenceminer-2.10.16/GameSentenceMiner/ai/ai_prompting.py +0 -221
- gamesentenceminer-2.10.16/GameSentenceMiner/vad.py +0 -369
- gamesentenceminer-2.10.16/GameSentenceMiner/web/templates/index.html +0 -49
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ai/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/icon.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/icon128.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/icon256.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/icon32.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/icon512.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/icon64.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/assets/pickaxe.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/gametext.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/obs.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ocr/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ocr/gsm_ocr_config.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ocr/ocrconfig.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ocr/owocr_area_selector.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/ocr/ss_picker.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/__main__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/config.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/lens_betterproto.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/owocr/owocr/screen_coordinate_picker.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/audio_offset_selector.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/communication/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/communication/send.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/communication/websocket.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/downloader/Untitled_json.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/downloader/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/downloader/download_tools.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/downloader/oneocr_dl.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/electron_config.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/ffmpeg.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/gsm_utils.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/model.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/notification.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/package.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/util/ss_selector.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/service.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/apple-touch-icon.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/favicon-96x96.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/favicon.ico +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/favicon.svg +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/site.webmanifest +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/style.css +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/web-app-manifest-192x192.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/static/web-app-manifest-512x512.png +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/templates/__init__.py +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/templates/text_replacements.html +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner/web/templates/utility.html +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner.egg-info/SOURCES.txt +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner.egg-info/dependency_links.txt +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner.egg-info/entry_points.txt +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/GameSentenceMiner.egg-info/top_level.txt +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/LICENSE +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/README.md +0 -0
- {gamesentenceminer-2.10.16 → gamesentenceminer-2.11.0}/setup.cfg +0 -0
@@ -0,0 +1,518 @@
|
|
1
|
+
import logging
|
2
|
+
import textwrap
|
3
|
+
import time
|
4
|
+
from abc import ABC, abstractmethod
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from enum import Enum
|
7
|
+
from typing import List, Optional
|
8
|
+
|
9
|
+
|
10
|
+
try:
|
11
|
+
import torch
|
12
|
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM, pipeline
|
13
|
+
|
14
|
+
TRANSFORMERS_AVAILABLE = True
|
15
|
+
except ImportError:
|
16
|
+
TRANSFORMERS_AVAILABLE = False
|
17
|
+
|
18
|
+
from google import genai
|
19
|
+
from google.genai import types
|
20
|
+
from groq import Groq
|
21
|
+
|
22
|
+
from GameSentenceMiner.util.configuration import get_config, Ai, logger
|
23
|
+
from GameSentenceMiner.util.gsm_utils import is_connected
|
24
|
+
from GameSentenceMiner.util.text_log import GameLine
|
25
|
+
|
26
|
+
# Suppress debug logs from httpcore
|
27
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
28
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
29
|
+
logging.getLogger("groq._base_client").setLevel(logging.WARNING)
|
30
|
+
MANUAL_MODEL_OVERRIDE = None
|
31
|
+
|
32
|
+
TRANSLATION_PROMPT = f"""
|
33
|
+
**Professional Game Localization Task**
|
34
|
+
|
35
|
+
**Task Directive:**
|
36
|
+
Translate ONLY the single line of game dialogue specified below into natural-sounding, context-aware English. The translation must preserve the original tone and intent of the character.
|
37
|
+
|
38
|
+
**Output Requirements:**
|
39
|
+
- Provide only the single, best English translation.
|
40
|
+
- Use expletives if they are natural for the context and enhance the translation's impact, but do not over-exaggerate.
|
41
|
+
- Preserve or add HTML tags (e.g., `<i>`, `<b>`) if appropriate for emphasis.
|
42
|
+
- Do not include notes, alternatives, explanations, or any other surrounding text. Absolutely nothing but the translated line.
|
43
|
+
|
44
|
+
**Line to Translate:**
|
45
|
+
"""
|
46
|
+
|
47
|
+
CONTEXT_PROMPT = textwrap.dedent(f"""
|
48
|
+
|
49
|
+
**Task Directive:**
|
50
|
+
Provide a very brief summary of the scene in English based on the provided Japanese dialogue and context. Focus on the characters' actions and the immediate situation being described.
|
51
|
+
|
52
|
+
Current Sentence:
|
53
|
+
""")
|
54
|
+
|
55
|
+
class AIType(Enum):
|
56
|
+
GEMINI = "Gemini"
|
57
|
+
GROQ = "Groq"
|
58
|
+
LOCAL = "Local"
|
59
|
+
|
60
|
+
@dataclass
|
61
|
+
class AIConfig:
|
62
|
+
api_key: str
|
63
|
+
model: str
|
64
|
+
api_url: Optional[str]
|
65
|
+
type: 'AIType'
|
66
|
+
|
67
|
+
@dataclass
|
68
|
+
class GeminiAIConfig(AIConfig):
|
69
|
+
def __init__(self, api_key: str, model: str = "gemini-2.0-flash"):
|
70
|
+
super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GEMINI)
|
71
|
+
|
72
|
+
@dataclass
|
73
|
+
class GroqAiConfig(AIConfig):
|
74
|
+
def __init__(self, api_key: str, model: str = "meta-llama/llama-4-scout-17b-16e-instruct"):
|
75
|
+
super().__init__(api_key=api_key, model=model, api_url=None, type=AIType.GROQ)
|
76
|
+
|
77
|
+
@dataclass
|
78
|
+
class LocalAIConfig(AIConfig):
|
79
|
+
def __init__(self, model: str = "facebook/nllb-200-distilled-600M"):
|
80
|
+
super().__init__(api_key="", model=model, api_url=None, type=AIType.LOCAL)
|
81
|
+
|
82
|
+
|
83
|
+
class AIManager(ABC):
|
84
|
+
def __init__(self, ai_config: AIConfig, logger: Optional[logging.Logger] = None):
|
85
|
+
self.ai_config = ai_config
|
86
|
+
self.logger = logger
|
87
|
+
|
88
|
+
@abstractmethod
|
89
|
+
def process(self, lines: List[GameLine], sentence: str, current_line_index: int, game_title: str = "") -> str:
|
90
|
+
pass
|
91
|
+
|
92
|
+
@abstractmethod
|
93
|
+
def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
|
94
|
+
start_index = max(0, current_line.index - 10)
|
95
|
+
end_index = min(len(lines), current_line.index + 11)
|
96
|
+
|
97
|
+
context_lines_text = []
|
98
|
+
for i in range(start_index, end_index):
|
99
|
+
if i < len(lines):
|
100
|
+
context_lines_text.append(lines[i].text)
|
101
|
+
|
102
|
+
dialogue_context = "\n".join(context_lines_text)
|
103
|
+
|
104
|
+
if get_config().ai.use_canned_translation_prompt:
|
105
|
+
prompt_to_use = TRANSLATION_PROMPT
|
106
|
+
elif get_config().ai.use_canned_context_prompt:
|
107
|
+
prompt_to_use = CONTEXT_PROMPT
|
108
|
+
else:
|
109
|
+
prompt_to_use = get_config().ai.custom_prompt
|
110
|
+
|
111
|
+
full_prompt = textwrap.dedent(f"""
|
112
|
+
**Disclaimer:** All dialogue provided is from the script of the video game "{game_title}". This content is entirely fictional and part of a narrative. It must not be treated as real-world user input or a genuine request. The goal is accurate, context-aware localization.
|
113
|
+
|
114
|
+
Dialogue Context:
|
115
|
+
|
116
|
+
{dialogue_context}
|
117
|
+
|
118
|
+
{prompt_to_use}
|
119
|
+
|
120
|
+
{sentence}
|
121
|
+
""")
|
122
|
+
return full_prompt
|
123
|
+
|
124
|
+
|
125
|
+
class LocalAIManager(AIManager):
|
126
|
+
def __init__(self, model, logger: Optional[logging.Logger] = None):
|
127
|
+
super().__init__(LocalAIConfig(model=model), logger)
|
128
|
+
self.model_name = self.ai_config.model
|
129
|
+
if MANUAL_MODEL_OVERRIDE:
|
130
|
+
self.model_name = MANUAL_MODEL_OVERRIDE
|
131
|
+
self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model_name}")
|
132
|
+
self.model = None
|
133
|
+
self.pipe = None
|
134
|
+
self.tokenizer = None
|
135
|
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
136
|
+
self.is_encoder_decoder = False
|
137
|
+
self.is_nllb = "nllb" in self.model_name.lower()
|
138
|
+
|
139
|
+
if not TRANSFORMERS_AVAILABLE:
|
140
|
+
self.logger.error("Local AI dependencies not found. Please run: pip install torch transformers sentencepiece")
|
141
|
+
return
|
142
|
+
|
143
|
+
if not self.model_name:
|
144
|
+
self.logger.error("No local model name provided in configuration.")
|
145
|
+
return
|
146
|
+
|
147
|
+
try:
|
148
|
+
self.logger.info(f"Loading local model: {self.model_name}")
|
149
|
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
|
150
|
+
|
151
|
+
# Try to load as a Causal LM first. If it fails, assume it's a Seq2Seq model.
|
152
|
+
# This is a heuristic to fix the original code's bug of using Seq2Seq for all models.
|
153
|
+
try:
|
154
|
+
self.model = AutoModelForCausalLM.from_pretrained(
|
155
|
+
self.model_name,
|
156
|
+
torch_dtype=torch.bfloat16,
|
157
|
+
)
|
158
|
+
# self.pipe = pipeline(
|
159
|
+
# "text-generation",
|
160
|
+
# model=self.model_name,
|
161
|
+
# torch_dtype=torch.bfloat16,
|
162
|
+
# device=self.device
|
163
|
+
# )
|
164
|
+
# print(self.pipe("Translate this sentence to English: お前は何をしている!?", return_full_text=False))
|
165
|
+
self.is_encoder_decoder = False
|
166
|
+
self.logger.info(f"Loaded {self.model_name} as a CausalLM.")
|
167
|
+
except (ValueError, TypeError, OSError, KeyError) as e:
|
168
|
+
print(e)
|
169
|
+
self.model = AutoModelForSeq2SeqLM.from_pretrained(
|
170
|
+
self.model_name,
|
171
|
+
torch_dtype=torch.bfloat16,
|
172
|
+
)
|
173
|
+
self.is_encoder_decoder = True
|
174
|
+
self.logger.info(f"Loaded {self.model_name} as a Seq2SeqLM.")
|
175
|
+
if self.device == "cuda":
|
176
|
+
self.model.to(self.device)
|
177
|
+
|
178
|
+
|
179
|
+
self.logger.info(f"Local model '{self.model_name}' loaded on {self.device}.")
|
180
|
+
except Exception as e:
|
181
|
+
self.logger.error(f"Failed to load local model '{self.model_name}': {e}", exc_info=True)
|
182
|
+
self.model = None
|
183
|
+
self.tokenizer = None
|
184
|
+
|
185
|
+
# if self.is_nllb:
|
186
|
+
# self.tokenizer = NllbTokenizer().from_pretrained(self.model_name)
|
187
|
+
|
188
|
+
def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
|
189
|
+
return super()._build_prompt(lines, sentence, current_line, game_title)
|
190
|
+
|
191
|
+
def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
|
192
|
+
if (not self.model or not self.tokenizer) and not self.pipe:
|
193
|
+
return "Processing failed: Local AI model not initialized."
|
194
|
+
|
195
|
+
text_to_process = self._build_prompt(lines, sentence, current_line, game_title)
|
196
|
+
self.logger.debug(f"Generated prompt for local model:\n{text_to_process}")
|
197
|
+
|
198
|
+
try:
|
199
|
+
if self.is_encoder_decoder:
|
200
|
+
if self.is_nllb:
|
201
|
+
# NLLB-specific handling for translation
|
202
|
+
self.tokenizer.src_lang = "jpn_Jpan"
|
203
|
+
inputs = self.tokenizer(current_line.text, return_tensors="pt").to(self.device)
|
204
|
+
generated_tokens = self.model.generate(
|
205
|
+
**inputs,
|
206
|
+
forced_bos_token_id=self.tokenizer.convert_tokens_to_ids("eng_Latn"),
|
207
|
+
max_new_tokens=256
|
208
|
+
)
|
209
|
+
result = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
210
|
+
else:
|
211
|
+
# Generic Seq2Seq
|
212
|
+
inputs = self.tokenizer(text_to_process, return_tensors="pt").to(self.device)
|
213
|
+
outputs = self.model.generate(**inputs, max_new_tokens=256)
|
214
|
+
result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
215
|
+
else:
|
216
|
+
# Causal LM with chat template
|
217
|
+
messages = [
|
218
|
+
# {"role": "system", "content": "You are a helpful assistant that accurately translates Japanese game dialogue into natural, context-aware English."},
|
219
|
+
{"role": "user", "content": text_to_process}
|
220
|
+
]
|
221
|
+
tokenized_chat = self.tokenizer.apply_chat_template(
|
222
|
+
messages, tokenize=True, add_generation_prompt=True, return_tensors="pt"
|
223
|
+
).to(self.device)
|
224
|
+
outputs = self.model.generate(tokenized_chat, max_new_tokens=256)
|
225
|
+
result = self.tokenizer.decode(outputs[0][tokenized_chat.shape[-1]:], skip_special_tokens=True)
|
226
|
+
# result = self.pipe(messages, max_new_tokens=50)
|
227
|
+
print(result)
|
228
|
+
# result = result[0]['generated_text']
|
229
|
+
result = result.strip()
|
230
|
+
|
231
|
+
result = result.strip()
|
232
|
+
self.logger.debug(f"Received response from local model:\n{result}")
|
233
|
+
return result
|
234
|
+
except Exception as e:
|
235
|
+
self.logger.error(f"Local model processing failed: {e}", exc_info=True)
|
236
|
+
return f"Processing failed: {e}"
|
237
|
+
|
238
|
+
|
239
|
+
class GeminiAI(AIManager):
|
240
|
+
def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
|
241
|
+
super().__init__(GeminiAIConfig(model=model, api_key=api_key), logger)
|
242
|
+
try:
|
243
|
+
self.client = genai.Client(api_key=self.ai_config.api_key)
|
244
|
+
self.model_name = model
|
245
|
+
if MANUAL_MODEL_OVERRIDE:
|
246
|
+
self.model_name = MANUAL_MODEL_OVERRIDE
|
247
|
+
self.logger.warning(f"MANUAL MODEL OVERRIDE ENABLED! Using model: {self.model_name}")
|
248
|
+
# genai.configure(api_key=self.ai_config.api_key)
|
249
|
+
self.generation_config = types.GenerateContentConfig(
|
250
|
+
temperature=0.5,
|
251
|
+
max_output_tokens=1024,
|
252
|
+
top_p=1,
|
253
|
+
stop_sequences=None,
|
254
|
+
safety_settings=[
|
255
|
+
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HARASSMENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
|
256
|
+
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH, threshold=types.HarmBlockThreshold.BLOCK_NONE),
|
257
|
+
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
|
258
|
+
types.SafetySetting(category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, threshold=types.HarmBlockThreshold.BLOCK_NONE),
|
259
|
+
],
|
260
|
+
)
|
261
|
+
if "2.5" in self.model_name:
|
262
|
+
self.generation_config.thinking_config = types.ThinkingConfig(
|
263
|
+
thinking_budget=0,
|
264
|
+
)
|
265
|
+
self.logger.debug(f"GeminiAIManager initialized with model: {self.model_name}")
|
266
|
+
except Exception as e:
|
267
|
+
self.logger.error(f"Failed to initialize Gemini API: {e}")
|
268
|
+
self.model_name = None
|
269
|
+
|
270
|
+
def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
|
271
|
+
prompt = super()._build_prompt(lines, sentence, current_line, game_title)
|
272
|
+
return prompt
|
273
|
+
|
274
|
+
def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
|
275
|
+
if self.model_name is None:
|
276
|
+
return "Processing failed: AI model not initialized."
|
277
|
+
|
278
|
+
if not lines or not current_line:
|
279
|
+
self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
|
280
|
+
return "Invalid input."
|
281
|
+
|
282
|
+
try:
|
283
|
+
prompt = self._build_prompt(lines, sentence, current_line, game_title)
|
284
|
+
contents = [
|
285
|
+
types.Content(
|
286
|
+
role="user",
|
287
|
+
parts=[
|
288
|
+
types.Part.from_text(text=prompt),
|
289
|
+
],
|
290
|
+
),
|
291
|
+
]
|
292
|
+
self.logger.debug(f"Generated prompt:\n{prompt}")
|
293
|
+
response = self.client.models.generate_content(
|
294
|
+
model=self.model_name,
|
295
|
+
contents=contents,
|
296
|
+
config=self.generation_config
|
297
|
+
)
|
298
|
+
self.logger.debug(f"Full response: {response}")
|
299
|
+
result = response.text.strip()
|
300
|
+
self.logger.debug(f"Received response:\n{result}")
|
301
|
+
return result
|
302
|
+
except Exception as e:
|
303
|
+
self.logger.error(f"Gemini processing failed: {e}")
|
304
|
+
return f"Processing failed: {e}"
|
305
|
+
|
306
|
+
class GroqAI(AIManager):
|
307
|
+
def __init__(self, model, api_key, logger: Optional[logging.Logger] = None):
|
308
|
+
super().__init__(GroqAiConfig(model=model, api_key=api_key), logger)
|
309
|
+
self.api_key = self.ai_config.api_key
|
310
|
+
self.model_name = self.ai_config.model
|
311
|
+
try:
|
312
|
+
self.client = Groq(api_key=self.api_key)
|
313
|
+
self.logger.debug(f"GroqAIManager initialized with model: {self.model_name}")
|
314
|
+
except Exception as e:
|
315
|
+
self.logger.error(f"Failed to initialize Groq client: {e}")
|
316
|
+
self.client = None
|
317
|
+
|
318
|
+
def _build_prompt(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str) -> str:
|
319
|
+
prompt = super()._build_prompt(lines, sentence, current_line, game_title)
|
320
|
+
return prompt
|
321
|
+
|
322
|
+
def process(self, lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "") -> str:
|
323
|
+
if self.client is None:
|
324
|
+
return "Processing failed: Groq client not initialized."
|
325
|
+
|
326
|
+
if not lines or not current_line:
|
327
|
+
self.logger.warning(f"Invalid input for process: lines={len(lines)}, current_line={current_line.index}")
|
328
|
+
return "Invalid input."
|
329
|
+
|
330
|
+
try:
|
331
|
+
prompt = self._build_prompt(lines, sentence, current_line, game_title)
|
332
|
+
self.logger.debug(f"Generated prompt:\n{prompt}")
|
333
|
+
completion = self.client.chat.completions.create(
|
334
|
+
model=self.model_name,
|
335
|
+
messages=[{"role": "user", "content": prompt}],
|
336
|
+
temperature=.5,
|
337
|
+
max_completion_tokens=1024,
|
338
|
+
top_p=1,
|
339
|
+
stream=False,
|
340
|
+
stop=None,
|
341
|
+
)
|
342
|
+
result = completion.choices[0].message.content.strip()
|
343
|
+
self.logger.debug(f"Received response:\n{result}")
|
344
|
+
return result
|
345
|
+
except Exception as e:
|
346
|
+
self.logger.error(f"Groq processing failed: {e}")
|
347
|
+
return f"Processing failed: {e}"
|
348
|
+
|
349
|
+
ai_managers: dict[str, AIManager] = {}
|
350
|
+
ai_manager: AIManager | None = None
|
351
|
+
current_ai_config: Ai | None = None
|
352
|
+
|
353
|
+
def get_ai_prompt_result(lines: List[GameLine], sentence: str, current_line: GameLine, game_title: str = "", force_refresh: bool = False) -> str:
|
354
|
+
global ai_manager, current_ai_config
|
355
|
+
try:
|
356
|
+
is_local_provider = get_config().ai.provider == AIType.LOCAL.value
|
357
|
+
if not is_local_provider and not is_connected():
|
358
|
+
logger.error("No internet connection. Unable to proceed with AI prompt.")
|
359
|
+
return ""
|
360
|
+
|
361
|
+
if not ai_manager or ai_config_changed(get_config().ai, current_ai_config) or force_refresh:
|
362
|
+
provider = get_config().ai.provider
|
363
|
+
if provider == AIType.GEMINI.value:
|
364
|
+
if get_config().ai.gemini_model in ai_managers:
|
365
|
+
ai_manager = ai_managers[get_config().ai.gemini_model]
|
366
|
+
logger.info(f"Reusing existing Gemini AI Manager for model: {get_config().ai.gemini_model}")
|
367
|
+
else:
|
368
|
+
ai_manager = GeminiAI(model=get_config().ai.gemini_model, api_key=get_config().ai.gemini_api_key, logger=logger)
|
369
|
+
elif provider == AIType.GROQ.value:
|
370
|
+
if get_config().ai.groq_model in ai_managers:
|
371
|
+
ai_manager = ai_managers[get_config().ai.groq_model]
|
372
|
+
logger.info(f"Reusing existing Groq AI Manager for model: {get_config().ai.groq_model}")
|
373
|
+
else:
|
374
|
+
ai_manager = GroqAI(model=get_config().ai.groq_model, api_key=get_config().ai.groq_api_key, logger=logger)
|
375
|
+
elif provider == AIType.LOCAL.value:
|
376
|
+
if get_config().ai.local_model in ai_managers:
|
377
|
+
ai_manager = ai_managers[get_config().ai.local_model]
|
378
|
+
logger.info(f"Reusing existing Local AI Manager for model: {get_config().ai.local_model}")
|
379
|
+
else:
|
380
|
+
ai_manager = LocalAIManager(model=get_config().ai.local_model, logger=logger)
|
381
|
+
else:
|
382
|
+
ai_manager = None
|
383
|
+
if ai_manager:
|
384
|
+
ai_managers[ai_manager.model_name] = ai_manager
|
385
|
+
current_ai_config = get_config().ai
|
386
|
+
|
387
|
+
if not ai_manager:
|
388
|
+
logger.error("AI is enabled but the AI Manager did not initialize. Check your AI Config IN GSM.")
|
389
|
+
return ""
|
390
|
+
return ai_manager.process(lines, sentence, current_line, game_title)
|
391
|
+
except Exception as e:
|
392
|
+
logger.error("Error caught while trying to get AI prompt result. Check logs for more details.")
|
393
|
+
logger.debug(e, exc_info=True)
|
394
|
+
return ""
|
395
|
+
|
396
|
+
def ai_config_changed(config, current):
|
397
|
+
if not current:
|
398
|
+
return True
|
399
|
+
if config.provider != current.provider:
|
400
|
+
return True
|
401
|
+
if config.provider == AIType.GEMINI.value and (config.gemini_api_key != current.gemini_api_key or config.gemini_model != current.gemini_model):
|
402
|
+
return True
|
403
|
+
if config.provider == AIType.GROQ.value and (config.groq_api_key != current.groq_api_key or config.groq_model != current.groq_model):
|
404
|
+
return True
|
405
|
+
if config.provider == AIType.LOCAL.value and config.gemini_model != current.gemini_model:
|
406
|
+
return True
|
407
|
+
if config.custom_prompt != current.custom_prompt:
|
408
|
+
return True
|
409
|
+
if config.use_canned_translation_prompt != current.use_canned_translation_prompt:
|
410
|
+
return True
|
411
|
+
if config.use_canned_context_prompt != current.use_canned_context_prompt:
|
412
|
+
return True
|
413
|
+
return False
|
414
|
+
|
415
|
+
|
416
|
+
if __name__ == '__main__':
|
417
|
+
# logger.setLevel(logging.DEBUG)
|
418
|
+
# console_handler = logging.StreamHandler()
|
419
|
+
# console_handler.setLevel(logging.DEBUG)
|
420
|
+
# logger.addHandler(console_handler)
|
421
|
+
# logging.basicConfig(level=logging.DEBUG)
|
422
|
+
lines = [
|
423
|
+
# Sexual/Explicit Japanese words and phrases
|
424
|
+
GameLine(index=0, text="ねぇ、あたしのおっぱい、揉んでみない?", id=None, time=None, prev=None, next=None),
|
425
|
+
GameLine(index=1, text="お前、本当に痴女だな。股が開いてるぜ。", id=None, time=None, prev=None, next=None),
|
426
|
+
GameLine(index=2, text="今夜は熱い夜にしましょうね…ふふ。", id=None, time=None, prev=None, next=None),
|
427
|
+
GameLine(index=3, text="あぁ…もっと奥まで…ダメ…イッちゃう…!", id=None, time=None, prev=None, next=None),
|
428
|
+
GameLine(index=4, text="あんたみたいなやつ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
|
429
|
+
next=None),
|
430
|
+
GameLine(index=5, text="このブス!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None, next=None),
|
431
|
+
GameLine(index=6, text="こんにちは、元気ですか?", id=None, time=None, prev=None, next=None),
|
432
|
+
GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
|
433
|
+
GameLine(index=8, text="今日はいい天気ですね。", id=None, time=None, prev=None, next=None),
|
434
|
+
GameLine(index=9, text="お前の体、隅々まで味わい尽くしてやる。", id=None, time=None, prev=None, next=None),
|
435
|
+
GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
|
436
|
+
GameLine(index=11, text="この売女!金のために魂まで売るのか?!", id=None, time=None, prev=None, next=None),
|
437
|
+
GameLine(index=12, text="俺の股間のモノで黙らせてやるよ。", id=None, time=None, prev=None, next=None),
|
438
|
+
GameLine(index=13, text="くっ…イク…頭が…おかしくなりそう…!", id=None, time=None, prev=None, next=None),
|
439
|
+
]
|
440
|
+
|
441
|
+
lines = [
|
442
|
+
# A back-and-forth dialogue of insults and threats
|
443
|
+
GameLine(index=0, text="お前、ここで何をしている?目障りだ。", id=None, time=None, prev=None, next=None),
|
444
|
+
GameLine(index=1, text="それはこっちのセリフだ。さっさと消えろ、クズが。", id=None, time=None, prev=None,
|
445
|
+
next=None),
|
446
|
+
GameLine(index=2, text="口だけは達者だな。やれるもんならやってみろよ。", id=None, time=None, prev=None,
|
447
|
+
next=None),
|
448
|
+
GameLine(index=3, text="くっ…!調子に乗るなよ…!", id=None, time=None, prev=None, next=None),
|
449
|
+
GameLine(index=4, text="あんたみたいなやつ、生きてる価値ないわ。さっさと自害しろ。", id=None, time=None, prev=None,
|
450
|
+
next=None),
|
451
|
+
GameLine(index=5, text="この能無しが!誰がお前なんかを相手にするかよ。", id=None, time=None, prev=None,
|
452
|
+
next=None),
|
453
|
+
GameLine(index=6, text="黙れ。これ以上喋るなら、その舌を引っこ抜いてやる。", id=None, time=None, prev=None,
|
454
|
+
next=None),
|
455
|
+
GameLine(index=7, text="次会ったら、ぶっ殺してやるからな。", id=None, time=None, prev=None, next=None),
|
456
|
+
GameLine(index=8, text="はっ、望むところだ。返り討ちにしてやる。", id=None, time=None, prev=None, next=None),
|
457
|
+
GameLine(index=9, text="お前の顔も見たくない。地獄に落ちろ。", id=None, time=None, prev=None, next=None),
|
458
|
+
GameLine(index=10, text="自害しろ", id=None, time=None, prev=None, next=None),
|
459
|
+
GameLine(index=11, text="この臆病者が!逃げることしか能がないのか?!", id=None, time=None, prev=None, next=None),
|
460
|
+
GameLine(index=12, text="俺の拳で黙らせてやるよ。", id=None, time=None, prev=None, next=None),
|
461
|
+
GameLine(index=13, text="くそっ…覚えてろよ…!このままじゃ終わらせない…!", id=None, time=None, prev=None,
|
462
|
+
next=None),
|
463
|
+
]
|
464
|
+
|
465
|
+
sentence = "黙れ。これ以上喋るなら、その舌を引っこ抜いてやる。"
|
466
|
+
current_line = lines[6]
|
467
|
+
game_title = "Corrupted Reality"
|
468
|
+
|
469
|
+
get_config().ai.provider = "Local"
|
470
|
+
models = [
|
471
|
+
# 'google/gemma-2-2b-it',
|
472
|
+
# 'google/gemma-2b-it',
|
473
|
+
'facebook/nllb-200-distilled-600M',
|
474
|
+
# 'meta-llama/Llama-3.2-1B-Instruct',
|
475
|
+
# 'facebook/nllb-200-1.3B'
|
476
|
+
]
|
477
|
+
|
478
|
+
results = []
|
479
|
+
|
480
|
+
# for model in models:
|
481
|
+
# get_config().ai.local_model = model
|
482
|
+
# start_time = time.time()
|
483
|
+
# result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
|
484
|
+
# results.append({"model": model,"response": result, "time": time.time() - start_time, "iteration": 1})
|
485
|
+
|
486
|
+
# Second Time after Already Loaded
|
487
|
+
for i in range(1, 500):
|
488
|
+
for model in models:
|
489
|
+
get_config().ai.local_model = model
|
490
|
+
start_time = time.time()
|
491
|
+
result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
|
492
|
+
print(result)
|
493
|
+
results.append({"model": model, "response": result, "time": time.time() - start_time, "iteration": i})
|
494
|
+
# results[model] = {"response": result, "time": time.time() - start_time}
|
495
|
+
|
496
|
+
# get_config().ai.provider = "Gemini"
|
497
|
+
#
|
498
|
+
# models = ['gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite',
|
499
|
+
# 'gemini-2.5-flash-lite-preview-06-17']
|
500
|
+
# # results = {}
|
501
|
+
# for model in models:
|
502
|
+
# get_config().ai.gemini_model = model
|
503
|
+
# start_time = time.time()
|
504
|
+
# result = get_ai_prompt_result(lines, sentence, current_line, game_title, True)
|
505
|
+
# results.append({"model": model, "response": result, "time": time.time() - start_time, "iteration": 1})
|
506
|
+
# # results[model] = {"response": result, "time": time.time() - start_time}
|
507
|
+
#
|
508
|
+
print("Summary of results:")
|
509
|
+
times = []
|
510
|
+
for result in results:
|
511
|
+
times.append(result['time'])
|
512
|
+
print(f"Model: {result['model']}\nResult: {result['response']}\nTime: {result['time']:.2f} seconds\n{'-'*80}\n")
|
513
|
+
|
514
|
+
print(f"Average time: {sum(times)/len(times):.2f} seconds over {len(times)} runs.")
|
515
|
+
# Set up logging
|
516
|
+
|
517
|
+
# Test the function
|
518
|
+
|
@@ -76,10 +76,14 @@ def update_anki_card(last_note: AnkiCard, note=None, audio_path='', video_path='
|
|
76
76
|
|
77
77
|
if note and 'fields' in note and get_config().ai.enabled:
|
78
78
|
sentence_field = note['fields'].get(get_config().anki.sentence_field, {})
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
79
|
+
if not selected_lines and game_line.TL:
|
80
|
+
logger.info("Using TL from texthooker for AI Prompt Result")
|
81
|
+
translation = game_line.TL
|
82
|
+
else:
|
83
|
+
sentence_to_translate = sentence_field if sentence_field else last_note.get_field(
|
84
|
+
get_config().anki.sentence_field)
|
85
|
+
translation = get_ai_prompt_result(get_all_lines(), sentence_to_translate,
|
86
|
+
game_line, get_current_game())
|
83
87
|
logger.info(f"AI prompt Result: {translation}")
|
84
88
|
note['fields'][get_config().ai.anki_field] = translation
|
85
89
|
|
@@ -328,7 +328,7 @@ class ConfigApp:
|
|
328
328
|
vad=VAD(
|
329
329
|
whisper_model=self.whisper_model.get(),
|
330
330
|
do_vad_postprocessing=self.do_vad_postprocessing.get(),
|
331
|
-
vosk_url='https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' if self.vosk_url.get() == VOSK_BASE else "https://alphacephei.com/vosk/models/vosk-model-small-ja-0.22.zip",
|
331
|
+
# vosk_url='https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' if self.vosk_url.get() == VOSK_BASE else "https://alphacephei.com/vosk/models/vosk-model-small-ja-0.22.zip",
|
332
332
|
selected_vad_model=self.selected_vad_model.get(),
|
333
333
|
backup_vad_model=self.backup_vad_model.get(),
|
334
334
|
trim_beginning=self.vad_trim_beginning.get(),
|
@@ -356,6 +356,7 @@ class ConfigApp:
|
|
356
356
|
gemini_api_key=self.gemini_api_key.get(),
|
357
357
|
api_key=self.gemini_api_key.get(),
|
358
358
|
groq_api_key=self.groq_api_key.get(),
|
359
|
+
local_model=self.local_ai_model.get(),
|
359
360
|
anki_field=self.ai_anki_field.get(),
|
360
361
|
use_canned_translation_prompt=self.use_canned_translation_prompt.get(),
|
361
362
|
use_canned_context_prompt=self.use_canned_context_prompt.get(),
|
@@ -413,13 +414,13 @@ class ConfigApp:
|
|
413
414
|
for func in on_save:
|
414
415
|
func()
|
415
416
|
|
416
|
-
def reload_settings(self):
|
417
|
+
def reload_settings(self, force_refresh=False):
|
417
418
|
new_config = configuration.load_config()
|
418
419
|
current_config = new_config.get_config()
|
419
420
|
|
420
421
|
self.window.title("GameSentenceMiner Configuration - " + current_config.name)
|
421
422
|
|
422
|
-
if current_config.name != self.settings.name or self.settings.config_changed(current_config):
|
423
|
+
if current_config.name != self.settings.name or self.settings.config_changed(current_config) or force_refresh:
|
423
424
|
logger.info("Config changed, reloading settings.")
|
424
425
|
self.master_config = new_config
|
425
426
|
self.settings = current_config
|
@@ -597,7 +598,7 @@ class ConfigApp:
|
|
597
598
|
self.current_row += 1
|
598
599
|
|
599
600
|
HoverInfoLabelWidget(vad_frame, text="Language:",
|
600
|
-
tooltip="Select the language for VAD. This is used for Whisper
|
601
|
+
tooltip="Select the language for VAD. This is used for Whisper Only.",
|
601
602
|
row=self.current_row, column=0)
|
602
603
|
self.language = ttk.Combobox(vad_frame, values=AVAILABLE_LANGUAGES, state="readonly")
|
603
604
|
self.language.set(self.settings.vad.language)
|
@@ -614,7 +615,7 @@ class ConfigApp:
|
|
614
615
|
|
615
616
|
HoverInfoLabelWidget(vad_frame, text="Select VAD Model:", tooltip="Select which VAD model to use.",
|
616
617
|
foreground="dark orange", font=("Helvetica", 10, "bold"), row=self.current_row, column=0)
|
617
|
-
self.selected_vad_model = ttk.Combobox(vad_frame, values=[
|
618
|
+
self.selected_vad_model = ttk.Combobox(vad_frame, values=[SILERO, WHISPER], state="readonly")
|
618
619
|
self.selected_vad_model.set(self.settings.vad.selected_vad_model)
|
619
620
|
self.selected_vad_model.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
620
621
|
self.current_row += 1
|
@@ -622,7 +623,7 @@ class ConfigApp:
|
|
622
623
|
HoverInfoLabelWidget(vad_frame, text="Backup VAD Model:",
|
623
624
|
tooltip="Select which model to use as a backup if no audio is found.",
|
624
625
|
row=self.current_row, column=0)
|
625
|
-
self.backup_vad_model = ttk.Combobox(vad_frame, values=[OFF,
|
626
|
+
self.backup_vad_model = ttk.Combobox(vad_frame, values=[OFF, SILERO, WHISPER], state="readonly")
|
626
627
|
self.backup_vad_model.set(self.settings.vad.backup_vad_model)
|
627
628
|
self.backup_vad_model.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
628
629
|
self.current_row += 1
|
@@ -1530,13 +1531,13 @@ class ConfigApp:
|
|
1530
1531
|
self.polling_rate.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
1531
1532
|
self.current_row += 1
|
1532
1533
|
|
1533
|
-
HoverInfoLabelWidget(advanced_frame, text="Vosk URL:", tooltip="URL for connecting to the Vosk server.",
|
1534
|
-
|
1535
|
-
self.vosk_url = ttk.Combobox(advanced_frame, values=[VOSK_BASE, VOSK_SMALL], state="readonly")
|
1536
|
-
self.vosk_url.set(
|
1537
|
-
|
1538
|
-
self.vosk_url.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
1539
|
-
self.current_row += 1
|
1534
|
+
# HoverInfoLabelWidget(advanced_frame, text="Vosk URL:", tooltip="URL for connecting to the Vosk server.",
|
1535
|
+
# row=self.current_row, column=0)
|
1536
|
+
# self.vosk_url = ttk.Combobox(advanced_frame, values=[VOSK_BASE, VOSK_SMALL], state="readonly")
|
1537
|
+
# self.vosk_url.set(
|
1538
|
+
# VOSK_BASE if self.settings.vad.vosk_url == 'https://alphacephei.com/vosk/models/vosk-model-ja-0.22.zip' else VOSK_SMALL)
|
1539
|
+
# self.vosk_url.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
1540
|
+
# self.current_row += 1
|
1540
1541
|
|
1541
1542
|
self.add_reset_button(advanced_frame, "advanced", self.current_row, 0, self.create_advanced_tab)
|
1542
1543
|
|
@@ -1568,15 +1569,14 @@ class ConfigApp:
|
|
1568
1569
|
|
1569
1570
|
HoverInfoLabelWidget(ai_frame, text="Provider:", tooltip="Select the AI provider.", row=self.current_row,
|
1570
1571
|
column=0)
|
1571
|
-
self.ai_provider = ttk.Combobox(ai_frame, values=[
|
1572
|
+
self.ai_provider = ttk.Combobox(ai_frame, values=[AI_GEMINI, AI_GROQ, AI_LOCAL], state="readonly")
|
1572
1573
|
self.ai_provider.set(self.settings.ai.provider)
|
1573
1574
|
self.ai_provider.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
1574
1575
|
self.current_row += 1
|
1575
1576
|
|
1576
1577
|
HoverInfoLabelWidget(ai_frame, text="Gemini AI Model:", tooltip="Select the AI model to use.",
|
1577
1578
|
row=self.current_row, column=0)
|
1578
|
-
self.gemini_model = ttk.Combobox(ai_frame, values=['gemini-2.5-flash', 'gemini-2.5-
|
1579
|
-
'gemini-2.5-flash-lite-preview-06-17'], state="readonly")
|
1579
|
+
self.gemini_model = ttk.Combobox(ai_frame, values=['gemma-3n-e4b-it', 'gemini-2.5-flash-lite-preview-06-17', 'gemini-2.5-flash','gemini-2.0-flash', 'gemini-2.0-flash-lite'], state="readonly")
|
1580
1580
|
try:
|
1581
1581
|
self.gemini_model.set(self.settings.ai.gemini_model)
|
1582
1582
|
except Exception:
|
@@ -1608,6 +1608,14 @@ class ConfigApp:
|
|
1608
1608
|
self.groq_api_key.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
1609
1609
|
self.current_row += 1
|
1610
1610
|
|
1611
|
+
# red
|
1612
|
+
HoverInfoLabelWidget(ai_frame, text="Local AI Model:", tooltip="Local AI Model to Use, Only very basic Translation is supported atm. May require some other setup, but idk."
|
1613
|
+
,foreground="red", font=("Helvetica", 10, "bold"), row=self.current_row, column=0)
|
1614
|
+
self.local_ai_model = ttk.Combobox(ai_frame, values=[OFF, 'facebook/nllb-200-distilled-600M', 'facebook/nllb-200-1.3B', 'facebook/nllb-200-3.3B'])
|
1615
|
+
self.local_ai_model.set(self.settings.ai.local_model)
|
1616
|
+
self.local_ai_model.grid(row=self.current_row, column=1, sticky='EW', pady=2)
|
1617
|
+
self.current_row += 1
|
1618
|
+
|
1611
1619
|
HoverInfoLabelWidget(ai_frame, text="Anki Field:", tooltip="Field in Anki for AI-generated content.",
|
1612
1620
|
row=self.current_row, column=0)
|
1613
1621
|
self.ai_anki_field = ttk.Entry(ai_frame)
|
@@ -1661,7 +1669,7 @@ class ConfigApp:
|
|
1661
1669
|
|
1662
1670
|
def on_profile_change(self, event):
|
1663
1671
|
self.save_settings(profile_change=True)
|
1664
|
-
self.reload_settings()
|
1672
|
+
self.reload_settings(force_refresh=True)
|
1665
1673
|
self.refresh_obs_scenes()
|
1666
1674
|
if self.master_config.current_profile != DEFAULT_CONFIG:
|
1667
1675
|
self.delete_profile_button.grid(row=1, column=2, pady=5)
|