ol-openedx-course-translations 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ol-openedx-course-translations might be problematic. Click here for more details.

Files changed (35) hide show
  1. ol_openedx_course_translations/apps.py +12 -2
  2. ol_openedx_course_translations/glossaries/machine_learning/ar.txt +175 -0
  3. ol_openedx_course_translations/glossaries/machine_learning/de.txt +175 -0
  4. ol_openedx_course_translations/glossaries/machine_learning/el.txt +988 -0
  5. ol_openedx_course_translations/glossaries/machine_learning/es.txt +175 -0
  6. ol_openedx_course_translations/glossaries/machine_learning/fr.txt +175 -0
  7. ol_openedx_course_translations/glossaries/machine_learning/ja.txt +175 -0
  8. ol_openedx_course_translations/glossaries/machine_learning/pt-br.txt +175 -0
  9. ol_openedx_course_translations/glossaries/machine_learning/ru.txt +213 -0
  10. ol_openedx_course_translations/management/commands/sync_and_translate_language.py +1866 -0
  11. ol_openedx_course_translations/management/commands/translate_course.py +419 -470
  12. ol_openedx_course_translations/middleware.py +143 -0
  13. ol_openedx_course_translations/providers/__init__.py +1 -0
  14. ol_openedx_course_translations/providers/base.py +278 -0
  15. ol_openedx_course_translations/providers/deepl_provider.py +292 -0
  16. ol_openedx_course_translations/providers/llm_providers.py +565 -0
  17. ol_openedx_course_translations/settings/cms.py +17 -0
  18. ol_openedx_course_translations/settings/common.py +57 -30
  19. ol_openedx_course_translations/settings/lms.py +15 -0
  20. ol_openedx_course_translations/tasks.py +222 -0
  21. ol_openedx_course_translations/urls.py +16 -0
  22. ol_openedx_course_translations/utils/__init__.py +0 -0
  23. ol_openedx_course_translations/utils/command_utils.py +197 -0
  24. ol_openedx_course_translations/utils/constants.py +216 -0
  25. ol_openedx_course_translations/utils/course_translations.py +581 -0
  26. ol_openedx_course_translations/utils/translation_sync.py +808 -0
  27. ol_openedx_course_translations/views.py +73 -0
  28. ol_openedx_course_translations-0.3.0.dist-info/METADATA +407 -0
  29. ol_openedx_course_translations-0.3.0.dist-info/RECORD +35 -0
  30. ol_openedx_course_translations-0.3.0.dist-info/entry_points.txt +5 -0
  31. ol_openedx_course_translations-0.1.0.dist-info/METADATA +0 -63
  32. ol_openedx_course_translations-0.1.0.dist-info/RECORD +0 -11
  33. ol_openedx_course_translations-0.1.0.dist-info/entry_points.txt +0 -2
  34. {ol_openedx_course_translations-0.1.0.dist-info → ol_openedx_course_translations-0.3.0.dist-info}/WHEEL +0 -0
  35. {ol_openedx_course_translations-0.1.0.dist-info → ol_openedx_course_translations-0.3.0.dist-info}/licenses/LICENSE.txt +0 -0
@@ -0,0 +1,565 @@
1
+ """LLM-based translation providers."""
2
+
3
+ import logging
4
+ import re
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ import srt
9
+ from litellm import completion
10
+
11
+ from .base import TranslationProvider, load_glossary
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ # Human-readable language names for LLM prompts
16
+ LANGUAGE_DISPLAY_NAMES = {
17
+ "en": "English",
18
+ "de": "Deutsch",
19
+ "es": "Español",
20
+ "fr": "Français",
21
+ "pt-br": "Português - Brasil",
22
+ "ru": "Русский",
23
+ "hi": "हिंदी",
24
+ "el": "ελληνικά",
25
+ "ja": "日本語",
26
+ "ar": "العربية",
27
+ "zh": "中文",
28
+ "tr": "Türkçe",
29
+ "sq": "Shqip",
30
+ "kr": "한국어",
31
+ "id": "Bahasa Indonesia",
32
+ }
33
+
34
+ # LLM error detection keywords
35
+ LLM_ERROR_KEYWORDS = [
36
+ "token",
37
+ "quota",
38
+ "limit",
39
+ "too large",
40
+ "context_length_exceeded",
41
+ "503",
42
+ "timeout",
43
+ ]
44
+
45
+ # LLM explanation phrases to filter from responses
46
+ LLM_EXPLANATION_KEYWORDS = [
47
+ "here is",
48
+ "here's",
49
+ "translation:",
50
+ "translated text:",
51
+ "note:",
52
+ "explanation:",
53
+ "i have translated",
54
+ "i've translated",
55
+ ]
56
+ # Translation markers for structured responses
57
+ TRANSLATION_MARKER_START = ":::TRANSLATION_START:::"
58
+ TRANSLATION_MARKER_END = ":::TRANSLATION_END:::"
59
+
60
+
61
+ class LLMProvider(TranslationProvider):
62
+ """
63
+ Base class for LLM-based providers (OpenAI, Gemini) that use structured prompting.
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ primary_api_key: str,
69
+ repair_api_key: str | None = None,
70
+ model_name: str | None = None,
71
+ ):
72
+ """
73
+ Initialize LLM provider with API keys and model name.
74
+
75
+ Args:
76
+ primary_api_key: API key for the LLM service
77
+ repair_api_key: API key for repair service (optional)
78
+ model_name: Name of the LLM model to use
79
+ """
80
+ super().__init__(primary_api_key, repair_api_key)
81
+ self.model_name = model_name
82
+
83
+ def _get_subtitle_system_prompt(
84
+ self,
85
+ target_language: str,
86
+ glossary_directory: str | None = None,
87
+ ) -> str:
88
+ """
89
+ Generate system prompt for subtitle translation.
90
+
91
+ Creates detailed prompts with rules for subtitle translation,
92
+ including glossary terms if provided.
93
+
94
+ Args:
95
+ target_language: Target language code
96
+ glossary_directory: Path to glossary directory (optional)
97
+
98
+ Returns:
99
+ System prompt string for subtitle translation
100
+ """
101
+ target_language_display_name = LANGUAGE_DISPLAY_NAMES.get(
102
+ target_language, target_language
103
+ )
104
+
105
+ system_prompt = (
106
+ f"You are a professional subtitle translator. "
107
+ f"Translate the following English subtitles to "
108
+ f"{target_language_display_name}.\n\n"
109
+ "INPUT FORMAT:\n"
110
+ ":::ID:::\n"
111
+ "Text to translate\n\n"
112
+ "OUTPUT FORMAT (exactly):\n"
113
+ ":::ID:::\n"
114
+ "Translated text\n\n"
115
+ "RULES:\n"
116
+ "1. Preserve ALL :::ID::: markers exactly as given.\n"
117
+ "2. Every input ID MUST appear in output with its translation.\n"
118
+ "3. One ID = one translation. "
119
+ "NEVER merge or split content across IDs.\n"
120
+ "4. Keep proper nouns, brand names, and acronyms unchanged.\n"
121
+ "5. Use natural phrasing appropriate for subtitles.\n"
122
+ )
123
+
124
+ if glossary_directory:
125
+ glossary_terms = load_glossary(target_language, glossary_directory)
126
+ if glossary_terms:
127
+ system_prompt += (
128
+ f"\nGLOSSARY TERMS (use these translations):\n{glossary_terms}\n"
129
+ )
130
+
131
+ return system_prompt
132
+
133
+ def _get_text_system_prompt(
134
+ self,
135
+ target_language: str,
136
+ glossary_directory: str | None = None,
137
+ ) -> str:
138
+ """
139
+ Generate system prompt for text/HTML/XML translation.
140
+
141
+ Creates detailed prompts with rules for text translation,
142
+ including glossary terms if provided.
143
+
144
+ Args:
145
+ target_language: Target language code
146
+ glossary_directory: Path to glossary directory (optional)
147
+
148
+ Returns:
149
+ System prompt string for text translation
150
+ """
151
+ target_language_display_name = LANGUAGE_DISPLAY_NAMES.get(
152
+ target_language, target_language
153
+ )
154
+
155
+ system_prompt = (
156
+ f"You are a professional translator. "
157
+ f"Translate the following English text to "
158
+ f"{target_language_display_name}.\n\n"
159
+ f"OUTPUT FORMAT (exactly):\n"
160
+ f"{TRANSLATION_MARKER_START}\n"
161
+ "Your translated text here\n"
162
+ f"{TRANSLATION_MARKER_END}\n\n"
163
+ "CRITICAL RULES FOR XML/HTML TAGS:\n"
164
+ "1. NEVER translate or modify XML/HTML tags, tag names, or attributes except display_name.\n" # noqa: E501
165
+ "2. XML/HTML tags include anything within angle brackets: < >.\n"
166
+ '3. Tag attributes (name="value") must remain in English.\n'
167
+ "4. Only translate the TEXT CONTENT between tags.\n"
168
+ "5. Preserve ALL tags exactly as they appear in the input.\n"
169
+ "6. DO NOT add display_name attribute if it is missing.\n"
170
+ "7. Examples of what NOT to translate:\n"
171
+ " - <video>, <problem>, <html>, <div>, <p>, etc.\n"
172
+ " - Attributes: url_name, filename, src, etc.\n"
173
+ " - Self-closing tags: <vertical />, <sequential />\n\n"
174
+ "GENERAL TRANSLATION RULES:\n"
175
+ "1. Output ONLY the translation between the markers.\n"
176
+ "2. Maintain the original formatting and structure.\n"
177
+ "3. Keep proper nouns, brand names, and acronyms unchanged.\n"
178
+ "4. Do NOT include explanations, notes, or commentary.\n"
179
+ "5. Preserve spacing, line breaks, and indentation.\n"
180
+ )
181
+
182
+ if glossary_directory:
183
+ glossary_terms = load_glossary(target_language, glossary_directory)
184
+ if glossary_terms:
185
+ system_prompt += (
186
+ f"\nGLOSSARY TERMS (use these translations):\n{glossary_terms}\n"
187
+ )
188
+
189
+ return system_prompt
190
+
191
+ def _parse_structured_response(
192
+ self, llm_response_text: str, original_subtitle_batch: list[srt.Subtitle]
193
+ ) -> list[srt.Subtitle]:
194
+ """
195
+ Parse the structured response and map back to original blocks.
196
+
197
+ Extracts translated content from LLM response using ID markers and maps
198
+ back to original subtitle objects preserving timestamps.
199
+
200
+ Args:
201
+ llm_response_text: Raw response text from LLM
202
+ original_subtitle_batch: Original subtitle batch for reference
203
+
204
+ Returns:
205
+ List of parsed subtitle objects with translations
206
+ """
207
+ parsed_subtitle_list = []
208
+
209
+ subtitle_id_pattern = re.compile(
210
+ r":::(\d+):::\s*(.*?)(?=(?::::\d+:::|$))", re.DOTALL
211
+ )
212
+ subtitle_matches = subtitle_id_pattern.findall(llm_response_text)
213
+
214
+ translation_map = {}
215
+ for subtitle_id_str, translated_text in subtitle_matches:
216
+ clean_subtitle_id = subtitle_id_str.strip()
217
+ translation_map[clean_subtitle_id] = translated_text.strip()
218
+
219
+ for original_subtitle in original_subtitle_batch:
220
+ subtitle_id_key = str(original_subtitle.index)
221
+ if subtitle_id_key in translation_map:
222
+ parsed_subtitle_list.append(
223
+ srt.Subtitle(
224
+ index=original_subtitle.index,
225
+ start=original_subtitle.start,
226
+ end=original_subtitle.end,
227
+ content=translation_map[subtitle_id_key],
228
+ )
229
+ )
230
+ else:
231
+ logger.warning(
232
+ "Block %s missing in translation response. Leaving empty.",
233
+ subtitle_id_key,
234
+ )
235
+ parsed_subtitle_list.append(
236
+ srt.Subtitle(
237
+ index=original_subtitle.index,
238
+ start=original_subtitle.start,
239
+ end=original_subtitle.end,
240
+ content="",
241
+ )
242
+ )
243
+
244
+ return parsed_subtitle_list
245
+
246
+ def _parse_text_response(self, llm_response_text: str) -> str:
247
+ """
248
+ Parse the structured text translation response.
249
+
250
+ Extracts translated content between translation markers, filtering out
251
+ explanations and metadata.
252
+
253
+ Args:
254
+ llm_response_text: Raw response text from LLM
255
+
256
+ Returns:
257
+ Cleaned translated text
258
+ """
259
+ # Try to extract content between markers
260
+ start_idx = llm_response_text.find(TRANSLATION_MARKER_START)
261
+ end_idx = llm_response_text.find(TRANSLATION_MARKER_END)
262
+
263
+ if start_idx != -1 and end_idx != -1 and start_idx < end_idx:
264
+ # Extract content between markers
265
+ translated_content = llm_response_text[
266
+ start_idx + len(TRANSLATION_MARKER_START) : end_idx
267
+ ]
268
+ return translated_content.strip()
269
+
270
+ # Fallback: if markers not found, try to extract without explanation
271
+ lines = llm_response_text.split("\n")
272
+ filtered_lines = []
273
+ skip_explanation = False
274
+
275
+ for line in lines:
276
+ lower_line = line.lower().strip()
277
+ # Skip lines that look like explanations
278
+ if any(phrase in lower_line for phrase in LLM_EXPLANATION_KEYWORDS):
279
+ skip_explanation = True
280
+ continue
281
+
282
+ # If we hit the translation markers in any form, start including
283
+ if TRANSLATION_MARKER_START.lower() in lower_line:
284
+ skip_explanation = False
285
+ continue
286
+
287
+ if TRANSLATION_MARKER_END.lower() in lower_line:
288
+ break
289
+
290
+ if not skip_explanation and line.strip():
291
+ filtered_lines.append(line)
292
+
293
+ result = "\n".join(filtered_lines).strip()
294
+
295
+ # If we still have no result, return the original response
296
+ return result if result else llm_response_text.strip()
297
+
298
+ def _call_llm(
299
+ self, system_prompt: str, user_content: str, **additional_kwargs: Any
300
+ ) -> str:
301
+ """
302
+ Call the LLM API with system and user prompts.
303
+
304
+ Args:
305
+ system_prompt: System prompt defining LLM behavior
306
+ user_content: User content to translate
307
+ **additional_kwargs: Additional arguments for the API call
308
+
309
+ Returns:
310
+ LLM response content as string
311
+ """
312
+ llm_messages = [
313
+ {"role": "system", "content": system_prompt},
314
+ {"role": "user", "content": user_content},
315
+ ]
316
+ llm_response = completion(
317
+ model=self.model_name,
318
+ messages=llm_messages,
319
+ api_key=self.primary_api_key,
320
+ **additional_kwargs,
321
+ )
322
+ return llm_response.choices[0].message.content.strip()
323
+
324
+ def translate_subtitles(
325
+ self,
326
+ subtitle_list: list[srt.Subtitle],
327
+ target_language: str,
328
+ glossary_directory: str | None = None,
329
+ ) -> list[srt.Subtitle]:
330
+ """
331
+ Translate subtitles using LLM.
332
+
333
+ Processes subtitles in batches with dynamic batch sizing to handle API limits.
334
+
335
+ Args:
336
+ subtitle_list: List of subtitle objects to translate
337
+ target_language: Target language code
338
+ glossary_directory: Path to glossary directory (optional)
339
+
340
+ Returns:
341
+ List of translated subtitle objects
342
+ """
343
+ system_prompt = self._get_subtitle_system_prompt(
344
+ target_language, glossary_directory
345
+ )
346
+
347
+ translated_subtitle_list = []
348
+ current_batch_size = len(subtitle_list)
349
+
350
+ current_index = 0
351
+ while current_index < len(subtitle_list):
352
+ subtitle_batch = subtitle_list[
353
+ current_index : current_index + current_batch_size
354
+ ]
355
+
356
+ user_payload_parts = []
357
+ for subtitle_item in subtitle_batch:
358
+ user_payload_parts.append(f":::{subtitle_item.index}:::")
359
+ user_payload_parts.append(subtitle_item.content)
360
+ user_payload_parts.append("")
361
+ user_payload = "\n".join(user_payload_parts)
362
+
363
+ logger.info(
364
+ " Translating batch starting at ID %s (%s blocks)...",
365
+ subtitle_batch[0].index,
366
+ len(subtitle_batch),
367
+ )
368
+
369
+ try:
370
+ llm_response_text = self._call_llm(system_prompt, user_payload)
371
+ translated_batch = self._parse_structured_response(
372
+ llm_response_text, subtitle_batch
373
+ )
374
+ translated_subtitle_list.extend(translated_batch)
375
+ current_index += current_batch_size
376
+
377
+ except Exception as llm_error:
378
+ error_message = str(llm_error).lower()
379
+ if any(
380
+ error_term in error_message for error_term in LLM_ERROR_KEYWORDS
381
+ ):
382
+ if current_batch_size <= 1:
383
+ logger.exception("Failed even with batch size 1")
384
+ raise
385
+
386
+ logger.warning("Error: %s. Reducing batch size...", llm_error)
387
+ current_batch_size = max(1, current_batch_size // 2)
388
+ continue
389
+ else:
390
+ raise
391
+
392
+ return translated_subtitle_list
393
+
394
+ def translate_text(
395
+ self,
396
+ source_text: str,
397
+ target_language: str,
398
+ tag_handling: str | None = None, # noqa: ARG002
399
+ glossary_directory: str | None = None,
400
+ ) -> str:
401
+ """
402
+ Translate text using LLM.
403
+
404
+ Handles plain text, HTML, and XML content with appropriate prompting.
405
+
406
+ Args:
407
+ source_text: Text to translate
408
+ target_language: Target language code
409
+ tag_handling: How to handle XML/HTML tags (not used for LLM)
410
+ glossary_directory: Path to glossary directory (optional)
411
+
412
+ Returns:
413
+ Translated text
414
+ """
415
+ if not source_text or not source_text.strip():
416
+ return source_text
417
+
418
+ system_prompt = self._get_text_system_prompt(
419
+ target_language, glossary_directory
420
+ )
421
+
422
+ try:
423
+ llm_response = self._call_llm(system_prompt, source_text)
424
+ logger.info(
425
+ "\n\n\nSource Text:\n%s\n LLM Response:\n%s\n\n",
426
+ source_text,
427
+ llm_response,
428
+ )
429
+ return self._parse_text_response(llm_response)
430
+ except (ValueError, ConnectionError) as llm_error:
431
+ logger.warning("LLM translation failed: %s", llm_error)
432
+ return source_text
433
+
434
+ def translate_document(
435
+ self,
436
+ input_file_path: Path,
437
+ output_file_path: Path,
438
+ source_language: str, # noqa: ARG002
439
+ target_language: str,
440
+ glossary_directory: str | None = None,
441
+ ) -> None:
442
+ """
443
+ Translate document by reading and translating content.
444
+
445
+ Handles SRT files using subtitle translation and other files as text.
446
+
447
+ Args:
448
+ input_file_path: Path to input file
449
+ output_file_path: Path to output file
450
+ source_language: Source language code (not used)
451
+ target_language: Target language code
452
+ glossary_directory: Path to glossary directory (optional)
453
+ """
454
+ # For SRT files, use subtitle translation
455
+ if input_file_path.suffix == ".srt":
456
+ srt_content = input_file_path.read_text(encoding="utf-8")
457
+ subtitle_list = list(srt.parse(srt_content))
458
+
459
+ translated_subtitle_list = self.translate_srt_with_validation(
460
+ subtitle_list, target_language, glossary_directory
461
+ )
462
+
463
+ translated_srt_content = srt.compose(translated_subtitle_list)
464
+ output_file_path.write_text(translated_srt_content, encoding="utf-8")
465
+ else:
466
+ # For other files, treat as text
467
+ file_content = input_file_path.read_text(encoding="utf-8")
468
+ translated_file_content = self.translate_text(
469
+ file_content, target_language, glossary_directory=glossary_directory
470
+ )
471
+ output_file_path.write_text(translated_file_content, encoding="utf-8")
472
+
473
+
474
+ class OpenAIProvider(LLMProvider):
475
+ """OpenAI translation provider."""
476
+
477
+ def __init__(
478
+ self,
479
+ primary_api_key: str,
480
+ repair_api_key: str | None = None,
481
+ model_name: str | None = None,
482
+ ):
483
+ """
484
+ Initialize OpenAI provider.
485
+
486
+ Args:
487
+ primary_api_key: OpenAI API key
488
+ repair_api_key: API key for repair service (optional)
489
+ model_name: OpenAI model name (e.g., "gpt-5.2")
490
+
491
+ Raises:
492
+ ValueError: If model_name is not provided
493
+ """
494
+ if not model_name:
495
+ msg = "model_name is required for OpenAIProvider"
496
+ raise ValueError(msg)
497
+ super().__init__(primary_api_key, repair_api_key, f"openai/{model_name}")
498
+
499
+ def _call_llm(
500
+ self, system_prompt: str, user_content: str, **additional_kwargs: Any
501
+ ) -> str:
502
+ """
503
+ Call OpenAI API with prompts.
504
+
505
+ Args:
506
+ system_prompt: System prompt defining behavior
507
+ user_content: User content to translate
508
+ **additional_kwargs: Additional arguments for the API
509
+
510
+ Returns:
511
+ OpenAI response content
512
+ """
513
+ return super()._call_llm(system_prompt, user_content, **additional_kwargs)
514
+
515
+
516
+ class GeminiProvider(LLMProvider):
517
+ """Gemini translation provider."""
518
+
519
+ def __init__(
520
+ self,
521
+ primary_api_key: str,
522
+ repair_api_key: str | None = None,
523
+ model_name: str | None = None,
524
+ ):
525
+ """
526
+ Initialize Gemini provider.
527
+
528
+ Args:
529
+ primary_api_key: Gemini API key
530
+ repair_api_key: API key for repair service (optional)
531
+ model_name: Gemini model name (e.g., "gemini-3-pro-preview")
532
+
533
+ Raises:
534
+ ValueError: If model_name is not provided
535
+ """
536
+ if not model_name:
537
+ msg = "model_name is required for GeminiProvider"
538
+ raise ValueError(msg)
539
+ super().__init__(primary_api_key, repair_api_key, f"gemini/{model_name}")
540
+
541
+
542
+ class MistralProvider(LLMProvider):
543
+ """Mistral translation provider."""
544
+
545
+ def __init__(
546
+ self,
547
+ primary_api_key: str,
548
+ repair_api_key: str | None = None,
549
+ model_name: str | None = None,
550
+ ):
551
+ """
552
+ Initialize Mistral provider.
553
+
554
+ Args:
555
+ primary_api_key: Mistral API key
556
+ repair_api_key: API key for repair service (optional)
557
+ model_name: Mistral model name (e.g., "mistral-large-latest")
558
+
559
+ Raises:
560
+ ValueError: If model_name is not provided
561
+ """
562
+ if not model_name:
563
+ msg = "model_name is required for MistralProvider"
564
+ raise ValueError(msg)
565
+ super().__init__(primary_api_key, repair_api_key, f"mistral/{model_name}")
@@ -0,0 +1,17 @@
1
+ # noqa: INP001
2
+
3
+ """Settings to provide to edX"""
4
+
5
+ from ol_openedx_course_translations.settings.common import apply_common_settings
6
+
7
+
8
+ def plugin_settings(settings):
9
+ """
10
+ Populate cms settings
11
+ """
12
+ apply_common_settings(settings)
13
+ settings.MIDDLEWARE.extend(
14
+ [
15
+ "ol_openedx_course_translations.middleware.CourseLanguageCookieResetMiddleware"
16
+ ]
17
+ )
@@ -1,37 +1,64 @@
1
1
  # noqa: INP001
2
2
 
3
- """Settings to provide to edX"""
3
+ """Common settings for LMS and CMS to provide to edX"""
4
4
 
5
5
 
6
- def plugin_settings(settings):
6
+ def apply_common_settings(settings):
7
7
  """
8
- Populate common settings
8
+ Apply custom settings function for LMS and CMS settings.
9
+
10
+ Configures translation-related settings including language selection,
11
+ supported file types, translation providers, and repository settings.
12
+
13
+ Args:
14
+ settings: Django settings object to modify
9
15
  """
10
- env_tokens = getattr(settings, "ENV_TOKENS", {})
11
- settings.DEEPL_API_KEY = env_tokens.get("DEEPL_API_KEY", "")
12
- settings.OL_OPENEDX_COURSE_TRANSLATIONS_TARGET_DIRECTORIES = env_tokens.get(
13
- "OL_OPENEDX_COURSE_TRANSLATIONS_TARGET_DIRECTORIES",
14
- [
15
- "about",
16
- "course",
17
- "chapter",
18
- "html",
19
- "info",
20
- "problem",
21
- "sequential",
22
- "vertical",
23
- "video",
24
- "static",
25
- "tabs",
26
- ],
27
- )
28
- settings.OL_OPENEDX_COURSE_TRANSLATIONS_SUPPORTED_ARCHIVE_EXTENSIONS = (
29
- env_tokens.get(
30
- "OL_OPENEDX_COURSE_TRANSLATIONS_SUPPORTED_ARCHIVE_EXTENSIONS",
31
- [".tar.gz", ".tgz", ".tar"],
32
- )
33
- )
34
- settings.OL_OPENEDX_COURSE_TRANSLATIONS_TRANSLATABLE_EXTENSIONS = env_tokens.get(
35
- "OL_OPENEDX_COURSE_TRANSLATIONS_TRANSLATABLE_EXTENSIONS",
36
- [".html", ".xml"],
16
+ settings.ENABLE_AUTO_LANGUAGE_SELECTION = False
17
+ settings.AUTO_LANGUAGE_SELECTION_EXEMPT_PATHS = ["admin", "sysadmin", "instructor"]
18
+ settings.COURSE_TRANSLATIONS_TARGET_DIRECTORIES = [
19
+ "about",
20
+ "course",
21
+ "chapter",
22
+ "html",
23
+ "info",
24
+ "problem",
25
+ "sequential",
26
+ "vertical",
27
+ "video",
28
+ "static",
29
+ "tabs",
30
+ ]
31
+ settings.COURSE_TRANSLATIONS_SUPPORTED_ARCHIVE_EXTENSIONS = [
32
+ ".tar.gz",
33
+ ".tgz",
34
+ ".tar",
35
+ ]
36
+ settings.COURSE_TRANSLATIONS_TRANSLATABLE_EXTENSIONS = [
37
+ ".html",
38
+ ".xml",
39
+ ".srt",
40
+ ]
41
+ settings.TRANSLATIONS_PROVIDERS = {
42
+ "default_provider": "mistral",
43
+ "deepl": {
44
+ "api_key": "",
45
+ },
46
+ "openai": {
47
+ "api_key": "",
48
+ "default_model": "gpt-5.2",
49
+ },
50
+ "gemini": {
51
+ "api_key": "",
52
+ "default_model": "gemini-3-pro-preview",
53
+ },
54
+ "mistral": {
55
+ "api_key": "",
56
+ "default_model": "mistral-large-latest",
57
+ },
58
+ }
59
+ settings.TRANSLATIONS_GITHUB_TOKEN = ""
60
+ # Translation repository settings
61
+ settings.TRANSLATIONS_REPO_PATH = ""
62
+ settings.TRANSLATIONS_REPO_URL = (
63
+ "https://github.com/mitodl/mitxonline-translations.git"
37
64
  )
@@ -0,0 +1,15 @@
1
+ # noqa: INP001
2
+
3
+ """Settings to provide to edX"""
4
+
5
+ from ol_openedx_course_translations.settings.common import apply_common_settings
6
+
7
+
8
+ def plugin_settings(settings):
9
+ """
10
+ Populate lms settings
11
+ """
12
+ apply_common_settings(settings)
13
+ settings.MIDDLEWARE.extend(
14
+ ["ol_openedx_course_translations.middleware.CourseLanguageCookieMiddleware"]
15
+ )