lattifai 0.4.5__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lattifai/__init__.py +61 -47
- lattifai/alignment/__init__.py +6 -0
- lattifai/alignment/lattice1_aligner.py +119 -0
- lattifai/alignment/lattice1_worker.py +185 -0
- lattifai/{tokenizer → alignment}/phonemizer.py +4 -4
- lattifai/alignment/segmenter.py +166 -0
- lattifai/{tokenizer → alignment}/tokenizer.py +244 -169
- lattifai/audio2.py +211 -0
- lattifai/caption/__init__.py +20 -0
- lattifai/caption/caption.py +1275 -0
- lattifai/{io → caption}/gemini_reader.py +30 -30
- lattifai/{io → caption}/gemini_writer.py +17 -17
- lattifai/{io → caption}/supervision.py +4 -3
- lattifai/caption/text_parser.py +145 -0
- lattifai/cli/__init__.py +17 -0
- lattifai/cli/alignment.py +153 -0
- lattifai/cli/caption.py +204 -0
- lattifai/cli/server.py +19 -0
- lattifai/cli/transcribe.py +197 -0
- lattifai/cli/youtube.py +128 -0
- lattifai/client.py +460 -251
- lattifai/config/__init__.py +20 -0
- lattifai/config/alignment.py +73 -0
- lattifai/config/caption.py +178 -0
- lattifai/config/client.py +46 -0
- lattifai/config/diarization.py +67 -0
- lattifai/config/media.py +335 -0
- lattifai/config/transcription.py +84 -0
- lattifai/diarization/__init__.py +5 -0
- lattifai/diarization/lattifai.py +89 -0
- lattifai/errors.py +98 -91
- lattifai/logging.py +116 -0
- lattifai/mixin.py +552 -0
- lattifai/server/app.py +420 -0
- lattifai/transcription/__init__.py +76 -0
- lattifai/transcription/base.py +108 -0
- lattifai/transcription/gemini.py +219 -0
- lattifai/transcription/lattifai.py +103 -0
- lattifai/{workflows → transcription}/prompts/__init__.py +4 -4
- lattifai/types.py +30 -0
- lattifai/utils.py +16 -44
- lattifai/workflow/__init__.py +22 -0
- lattifai/workflow/agents.py +6 -0
- lattifai/{workflows → workflow}/base.py +22 -22
- lattifai/{workflows → workflow}/file_manager.py +239 -215
- lattifai/workflow/youtube.py +564 -0
- lattifai-1.0.0.dist-info/METADATA +736 -0
- lattifai-1.0.0.dist-info/RECORD +52 -0
- {lattifai-0.4.5.dist-info → lattifai-1.0.0.dist-info}/WHEEL +1 -1
- lattifai-1.0.0.dist-info/entry_points.txt +13 -0
- {lattifai-0.4.5.dist-info → lattifai-1.0.0.dist-info}/licenses/LICENSE +1 -1
- lattifai/base_client.py +0 -126
- lattifai/bin/__init__.py +0 -3
- lattifai/bin/agent.py +0 -325
- lattifai/bin/align.py +0 -296
- lattifai/bin/cli_base.py +0 -25
- lattifai/bin/subtitle.py +0 -210
- lattifai/io/__init__.py +0 -42
- lattifai/io/reader.py +0 -85
- lattifai/io/text_parser.py +0 -75
- lattifai/io/utils.py +0 -15
- lattifai/io/writer.py +0 -90
- lattifai/tokenizer/__init__.py +0 -3
- lattifai/workers/__init__.py +0 -3
- lattifai/workers/lattice1_alpha.py +0 -284
- lattifai/workflows/__init__.py +0 -34
- lattifai/workflows/agents.py +0 -10
- lattifai/workflows/gemini.py +0 -167
- lattifai/workflows/prompts/README.md +0 -22
- lattifai/workflows/prompts/gemini/README.md +0 -24
- lattifai/workflows/prompts/gemini/transcription_gem.txt +0 -81
- lattifai/workflows/youtube.py +0 -931
- lattifai-0.4.5.dist-info/METADATA +0 -808
- lattifai-0.4.5.dist-info/RECORD +0 -39
- lattifai-0.4.5.dist-info/entry_points.txt +0 -3
- {lattifai-0.4.5.dist-info → lattifai-1.0.0.dist-info}/top_level.txt +0 -0
lattifai/errors.py
CHANGED
|
@@ -7,18 +7,18 @@ import colorful
|
|
|
7
7
|
|
|
8
8
|
# Error help messages
|
|
9
9
|
LATTICE_DECODING_FAILURE_HELP = (
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
10
|
+
"Failed to decode lattice alignment. Possible reasons:\n\n"
|
|
11
|
+
"1) Media(Audio/Video) and text content mismatch:\n"
|
|
12
|
+
" - The transcript/caption does not accurately match the media content\n"
|
|
13
|
+
" - Text may be from a different version or section of the media\n"
|
|
14
|
+
" ⚠️ Note: Gemini transcription may occasionally skip large segments of media, causing alignment failures.\n"
|
|
15
|
+
" We will detect and fix this issue in the next version.\n\n"
|
|
16
|
+
"2) Unsupported media type:\n"
|
|
17
|
+
" - Singing is not yet supported, this will be optimized in future versions\n\n"
|
|
18
|
+
"💡 Troubleshooting tips:\n"
|
|
19
|
+
" • Verify the transcript matches the media by listening to a few segments\n"
|
|
20
|
+
" • For YouTube videos, manually check if auto-generated transcript are accurate\n"
|
|
21
|
+
" • Consider using a different transcription source if Gemini results are incomplete"
|
|
22
22
|
)
|
|
23
23
|
|
|
24
24
|
|
|
@@ -43,19 +43,19 @@ class LattifAIError(Exception):
|
|
|
43
43
|
return (
|
|
44
44
|
f'\n{colorful.green("🔧 Need help? Here are two ways to get support:")}\n'
|
|
45
45
|
f' 1. 📝 Create a GitHub issue: {colorful.green("https://github.com/lattifai/lattifai-python/issues")}\n'
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
" - The text/
|
|
49
|
-
|
|
46
|
+
" Please include:\n"
|
|
47
|
+
" - Your audio file format and duration\n"
|
|
48
|
+
" - The text/caption content you're trying to align\n"
|
|
49
|
+
" - This error message and stack trace\n"
|
|
50
50
|
f' 2. 💬 Join our Discord community: {colorful.green("https://discord.gg/vzmTzzZgNu")}\n'
|
|
51
|
-
|
|
51
|
+
" Our team and community can help you troubleshoot\n"
|
|
52
52
|
)
|
|
53
53
|
|
|
54
54
|
def get_message(self) -> str:
|
|
55
55
|
"""Return formatted error message without support information."""
|
|
56
56
|
base_message = f'{colorful.red(f"[{self.error_code}] {self.message}")}'
|
|
57
57
|
if self.context:
|
|
58
|
-
context_str = f'\n{colorful.yellow("Context:")} ' +
|
|
58
|
+
context_str = f'\n{colorful.yellow("Context:")} ' + ", ".join(f"{k}={v}" for k, v in self.context.items())
|
|
59
59
|
base_message += context_str
|
|
60
60
|
return base_message
|
|
61
61
|
|
|
@@ -71,72 +71,72 @@ class LattifAIError(Exception):
|
|
|
71
71
|
class AudioProcessingError(LattifAIError):
|
|
72
72
|
"""Error during audio processing operations."""
|
|
73
73
|
|
|
74
|
-
def __init__(self, message: str,
|
|
75
|
-
context = kwargs.get(
|
|
76
|
-
if
|
|
77
|
-
context[
|
|
78
|
-
kwargs[
|
|
74
|
+
def __init__(self, message: str, media_path: Optional[str] = None, **kwargs):
|
|
75
|
+
context = kwargs.get("context", {})
|
|
76
|
+
if media_path:
|
|
77
|
+
context["media_path"] = media_path
|
|
78
|
+
kwargs["context"] = context
|
|
79
79
|
super().__init__(message, **kwargs)
|
|
80
80
|
|
|
81
81
|
|
|
82
82
|
class AudioLoadError(AudioProcessingError):
|
|
83
83
|
"""Error loading or reading audio file."""
|
|
84
84
|
|
|
85
|
-
def __init__(self,
|
|
86
|
-
message = f
|
|
85
|
+
def __init__(self, media_path: str, original_error: Optional[Exception] = None, **kwargs):
|
|
86
|
+
message = f"Failed to load audio file: {colorful.red(media_path)}"
|
|
87
87
|
if original_error:
|
|
88
|
-
message += f
|
|
88
|
+
message += f" - {colorful.red(str(original_error))}"
|
|
89
89
|
|
|
90
|
-
context = kwargs.get(
|
|
91
|
-
context.update({
|
|
92
|
-
kwargs[
|
|
90
|
+
context = kwargs.get("context", {})
|
|
91
|
+
context.update({"media_path": media_path, "original_error": str(original_error) if original_error else None})
|
|
92
|
+
kwargs["context"] = context
|
|
93
93
|
|
|
94
|
-
super().__init__(message,
|
|
94
|
+
super().__init__(message, media_path=media_path, **kwargs)
|
|
95
95
|
|
|
96
96
|
|
|
97
97
|
class AudioFormatError(AudioProcessingError):
|
|
98
98
|
"""Error with audio format or codec."""
|
|
99
99
|
|
|
100
|
-
def __init__(self,
|
|
101
|
-
message = f
|
|
102
|
-
context = kwargs.get(
|
|
103
|
-
context.update({
|
|
104
|
-
kwargs[
|
|
105
|
-
super().__init__(message,
|
|
100
|
+
def __init__(self, media_path: str, format_issue: str, **kwargs):
|
|
101
|
+
message = f"Audio format error for {colorful.red(media_path)}: {colorful.red(format_issue)}"
|
|
102
|
+
context = kwargs.get("context", {})
|
|
103
|
+
context.update({"media_path": media_path, "format_issue": format_issue})
|
|
104
|
+
kwargs["context"] = context
|
|
105
|
+
super().__init__(message, media_path=media_path, **kwargs)
|
|
106
106
|
|
|
107
107
|
|
|
108
|
-
class
|
|
109
|
-
"""Error during
|
|
108
|
+
class CaptionProcessingError(LattifAIError):
|
|
109
|
+
"""Error during caption/text processing operations."""
|
|
110
110
|
|
|
111
|
-
def __init__(self, message: str,
|
|
112
|
-
context = kwargs.get(
|
|
113
|
-
if
|
|
114
|
-
context[
|
|
115
|
-
kwargs[
|
|
111
|
+
def __init__(self, message: str, caption_path: Optional[str] = None, **kwargs):
|
|
112
|
+
context = kwargs.get("context", {})
|
|
113
|
+
if caption_path:
|
|
114
|
+
context["caption_path"] = caption_path
|
|
115
|
+
kwargs["context"] = context
|
|
116
116
|
super().__init__(message, **kwargs)
|
|
117
117
|
|
|
118
118
|
|
|
119
|
-
class
|
|
120
|
-
"""Error parsing
|
|
119
|
+
class CaptionParseError(CaptionProcessingError):
|
|
120
|
+
"""Error parsing caption or text file."""
|
|
121
121
|
|
|
122
|
-
def __init__(self,
|
|
123
|
-
message = f
|
|
124
|
-
context = kwargs.get(
|
|
125
|
-
context.update({
|
|
126
|
-
kwargs[
|
|
127
|
-
super().__init__(message,
|
|
122
|
+
def __init__(self, caption_path: str, parse_issue: str, **kwargs):
|
|
123
|
+
message = f"Failed to parse caption file {caption_path}: {parse_issue}"
|
|
124
|
+
context = kwargs.get("context", {})
|
|
125
|
+
context.update({"caption_path": caption_path, "parse_issue": parse_issue})
|
|
126
|
+
kwargs["context"] = context
|
|
127
|
+
super().__init__(message, caption_path=caption_path, **kwargs)
|
|
128
128
|
|
|
129
129
|
|
|
130
130
|
class AlignmentError(LattifAIError):
|
|
131
131
|
"""Error during audio-text alignment process."""
|
|
132
132
|
|
|
133
|
-
def __init__(self, message: str,
|
|
134
|
-
context = kwargs.get(
|
|
135
|
-
if
|
|
136
|
-
context[
|
|
137
|
-
if
|
|
138
|
-
context[
|
|
139
|
-
kwargs[
|
|
133
|
+
def __init__(self, message: str, media_path: Optional[str] = None, caption_path: Optional[str] = None, **kwargs):
|
|
134
|
+
context = kwargs.get("context", {})
|
|
135
|
+
if media_path:
|
|
136
|
+
context["media_path"] = media_path
|
|
137
|
+
if caption_path:
|
|
138
|
+
context["caption_path"] = caption_path
|
|
139
|
+
kwargs["context"] = context
|
|
140
140
|
super().__init__(message, **kwargs)
|
|
141
141
|
|
|
142
142
|
|
|
@@ -144,19 +144,19 @@ class LatticeEncodingError(AlignmentError):
|
|
|
144
144
|
"""Error generating lattice graph from text."""
|
|
145
145
|
|
|
146
146
|
def __init__(self, text_content: str, original_error: Optional[Exception] = None, **kwargs):
|
|
147
|
-
message =
|
|
147
|
+
message = "Failed to generate lattice graph from text"
|
|
148
148
|
if original_error:
|
|
149
|
-
message += f
|
|
149
|
+
message += f": {colorful.red(str(original_error))}"
|
|
150
150
|
|
|
151
|
-
context = kwargs.get(
|
|
151
|
+
context = kwargs.get("context", {})
|
|
152
152
|
context.update(
|
|
153
153
|
{
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
154
|
+
"text_content_length": len(text_content),
|
|
155
|
+
"text_preview": text_content[:100] + "..." if len(text_content) > 100 else text_content,
|
|
156
|
+
"original_error": str(original_error) if original_error else None,
|
|
157
157
|
}
|
|
158
158
|
)
|
|
159
|
-
kwargs[
|
|
159
|
+
kwargs["context"] = context
|
|
160
160
|
super().__init__(message, **kwargs)
|
|
161
161
|
|
|
162
162
|
|
|
@@ -164,28 +164,28 @@ class LatticeDecodingError(AlignmentError):
|
|
|
164
164
|
"""Error decoding lattice alignment results."""
|
|
165
165
|
|
|
166
166
|
def __init__(self, lattice_id: str, original_error: Optional[Exception] = None, **kwargs):
|
|
167
|
-
message = f
|
|
167
|
+
message = f"Failed to decode lattice alignment results for lattice ID: {colorful.red(lattice_id)}"
|
|
168
168
|
|
|
169
169
|
# Don't duplicate the help message if it's already in original_error
|
|
170
170
|
if original_error and str(original_error) != LATTICE_DECODING_FAILURE_HELP:
|
|
171
|
-
message += f
|
|
171
|
+
message += f" - {colorful.red(str(original_error))}"
|
|
172
172
|
|
|
173
|
-
context = kwargs.get(
|
|
173
|
+
context = kwargs.get("context", {})
|
|
174
174
|
# Don't store the entire help message in context to avoid duplication
|
|
175
175
|
if original_error and str(original_error) != LATTICE_DECODING_FAILURE_HELP:
|
|
176
|
-
context[
|
|
177
|
-
context[
|
|
178
|
-
kwargs[
|
|
176
|
+
context["original_error"] = str(original_error)
|
|
177
|
+
context["lattice_id"] = lattice_id
|
|
178
|
+
kwargs["context"] = context
|
|
179
179
|
super().__init__(message, **kwargs)
|
|
180
180
|
|
|
181
181
|
def get_message(self) -> str:
|
|
182
182
|
"""Return formatted error message with help text."""
|
|
183
183
|
base_message = f'{colorful.red(f"[{self.error_code}]")} {self.message}'
|
|
184
|
-
if self.context and self.context.get(
|
|
184
|
+
if self.context and self.context.get("lattice_id"):
|
|
185
185
|
# Only show essential context (lattice_id), not the duplicated help message
|
|
186
186
|
base_message += f'\n{colorful.yellow("Lattice ID:")} {self.context["lattice_id"]}'
|
|
187
187
|
# Append help message once at the end
|
|
188
|
-
base_message += f
|
|
188
|
+
base_message += f"\n\n{colorful.yellow(LATTICE_DECODING_FAILURE_HELP)}"
|
|
189
189
|
return base_message
|
|
190
190
|
|
|
191
191
|
|
|
@@ -193,13 +193,13 @@ class ModelLoadError(LattifAIError):
|
|
|
193
193
|
"""Error loading AI model."""
|
|
194
194
|
|
|
195
195
|
def __init__(self, model_name: str, original_error: Optional[Exception] = None, **kwargs):
|
|
196
|
-
message = f
|
|
196
|
+
message = f"Failed to load model: {colorful.red(model_name)}"
|
|
197
197
|
if original_error:
|
|
198
|
-
message += f
|
|
198
|
+
message += f" - {colorful.red(str(original_error))}"
|
|
199
199
|
|
|
200
|
-
context = kwargs.get(
|
|
201
|
-
context.update({
|
|
202
|
-
kwargs[
|
|
200
|
+
context = kwargs.get("context", {})
|
|
201
|
+
context.update({"model_name": model_name, "original_error": str(original_error) if original_error else None})
|
|
202
|
+
kwargs["context"] = context
|
|
203
203
|
super().__init__(message, **kwargs)
|
|
204
204
|
|
|
205
205
|
|
|
@@ -207,13 +207,13 @@ class DependencyError(LattifAIError):
|
|
|
207
207
|
"""Error with required dependencies."""
|
|
208
208
|
|
|
209
209
|
def __init__(self, dependency_name: str, install_command: Optional[str] = None, **kwargs):
|
|
210
|
-
message = f
|
|
210
|
+
message = f"Missing required dependency: {colorful.red(dependency_name)}"
|
|
211
211
|
if install_command:
|
|
212
|
-
message += f
|
|
212
|
+
message += f"\nPlease install it using: {colorful.yellow(install_command)}"
|
|
213
213
|
|
|
214
|
-
context = kwargs.get(
|
|
215
|
-
context.update({
|
|
216
|
-
kwargs[
|
|
214
|
+
context = kwargs.get("context", {})
|
|
215
|
+
context.update({"dependency_name": dependency_name, "install_command": install_command})
|
|
216
|
+
kwargs["context"] = context
|
|
217
217
|
super().__init__(message, **kwargs)
|
|
218
218
|
|
|
219
219
|
|
|
@@ -221,9 +221,9 @@ class APIError(LattifAIError):
|
|
|
221
221
|
"""Error communicating with LattifAI API."""
|
|
222
222
|
|
|
223
223
|
def __init__(self, message: str, status_code: Optional[int] = None, response_text: Optional[str] = None, **kwargs):
|
|
224
|
-
context = kwargs.get(
|
|
225
|
-
context.update({
|
|
226
|
-
kwargs[
|
|
224
|
+
context = kwargs.get("context", {})
|
|
225
|
+
context.update({"status_code": status_code, "response_text": response_text})
|
|
226
|
+
kwargs["context"] = context
|
|
227
227
|
super().__init__(message, **kwargs)
|
|
228
228
|
|
|
229
229
|
|
|
@@ -231,10 +231,17 @@ class ConfigurationError(LattifAIError):
|
|
|
231
231
|
"""Error with client configuration."""
|
|
232
232
|
|
|
233
233
|
def __init__(self, config_issue: str, **kwargs):
|
|
234
|
-
message = f
|
|
234
|
+
message = f"Configuration error: {config_issue}"
|
|
235
235
|
super().__init__(message, **kwargs)
|
|
236
236
|
|
|
237
237
|
|
|
238
|
+
class QuotaExceededError(APIError):
|
|
239
|
+
"""Error when user quota or API key limit is exceeded."""
|
|
240
|
+
|
|
241
|
+
def __init__(self, message: str, **kwargs):
|
|
242
|
+
super().__init__(message, status_code=402, **kwargs)
|
|
243
|
+
|
|
244
|
+
|
|
238
245
|
def handle_exception(func):
|
|
239
246
|
"""Decorator to handle exceptions and convert them to LattifAI errors."""
|
|
240
247
|
|
|
@@ -246,11 +253,11 @@ def handle_exception(func):
|
|
|
246
253
|
raise
|
|
247
254
|
except Exception as e:
|
|
248
255
|
# Convert other exceptions to LattifAI errors
|
|
249
|
-
error_msg = f
|
|
256
|
+
error_msg = f"Unexpected error in {func.__name__}: {str(e)}"
|
|
250
257
|
context = {
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
258
|
+
"function": func.__name__,
|
|
259
|
+
"original_exception": e.__class__.__name__,
|
|
260
|
+
"traceback": traceback.format_exc(),
|
|
254
261
|
}
|
|
255
262
|
raise LattifAIError(error_msg, context=context) from e
|
|
256
263
|
|
lattifai/logging.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""Unified logging configuration for LattifAI."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import sys
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
# Default log format
|
|
8
|
+
DEFAULT_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
9
|
+
SIMPLE_FORMAT = "%(levelname)s: %(message)s"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def setup_logger(
|
|
13
|
+
name: str,
|
|
14
|
+
level: Optional[int] = None,
|
|
15
|
+
format_string: Optional[str] = None,
|
|
16
|
+
handler: Optional[logging.Handler] = None,
|
|
17
|
+
) -> logging.Logger:
|
|
18
|
+
"""
|
|
19
|
+
Setup logger with consistent formatting for LattifAI modules.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
name: Logger name (will be prefixed with 'lattifai.')
|
|
23
|
+
level: Logging level (defaults to INFO)
|
|
24
|
+
format_string: Custom format string (defaults to SIMPLE_FORMAT)
|
|
25
|
+
handler: Custom handler (defaults to StreamHandler)
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Configured logger instance
|
|
29
|
+
|
|
30
|
+
Examples:
|
|
31
|
+
>>> logger = setup_logger(__name__)
|
|
32
|
+
>>> logger.info("Processing started")
|
|
33
|
+
|
|
34
|
+
>>> logger = setup_logger("alignment", level=logging.DEBUG)
|
|
35
|
+
>>> logger.debug("Debug information")
|
|
36
|
+
"""
|
|
37
|
+
# Ensure name is prefixed with 'lattifai.'
|
|
38
|
+
if not name.startswith("lattifai."):
|
|
39
|
+
logger_name = f"lattifai.{name}"
|
|
40
|
+
else:
|
|
41
|
+
logger_name = name
|
|
42
|
+
|
|
43
|
+
logger = logging.getLogger(logger_name)
|
|
44
|
+
|
|
45
|
+
# Set level
|
|
46
|
+
if level is None:
|
|
47
|
+
level = logging.INFO
|
|
48
|
+
logger.setLevel(level)
|
|
49
|
+
|
|
50
|
+
# Avoid duplicate handlers
|
|
51
|
+
if logger.handlers:
|
|
52
|
+
return logger
|
|
53
|
+
|
|
54
|
+
# Setup handler
|
|
55
|
+
if handler is None:
|
|
56
|
+
handler = logging.StreamHandler(sys.stderr)
|
|
57
|
+
|
|
58
|
+
# Setup formatter
|
|
59
|
+
if format_string is None:
|
|
60
|
+
format_string = SIMPLE_FORMAT
|
|
61
|
+
formatter = logging.Formatter(format_string)
|
|
62
|
+
handler.setFormatter(formatter)
|
|
63
|
+
|
|
64
|
+
logger.addHandler(handler)
|
|
65
|
+
return logger
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def get_logger(name: str) -> logging.Logger:
|
|
69
|
+
"""
|
|
70
|
+
Get existing logger or create new one with default settings.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
name: Logger name (will be prefixed with 'lattifai.')
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Logger instance
|
|
77
|
+
"""
|
|
78
|
+
if not name.startswith("lattifai."):
|
|
79
|
+
logger_name = f"lattifai.{name}"
|
|
80
|
+
else:
|
|
81
|
+
logger_name = name
|
|
82
|
+
|
|
83
|
+
logger = logging.getLogger(logger_name)
|
|
84
|
+
|
|
85
|
+
# If logger has no handlers, set it up with defaults
|
|
86
|
+
if not logger.handlers:
|
|
87
|
+
return setup_logger(name)
|
|
88
|
+
|
|
89
|
+
return logger
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def set_log_level(level: int) -> None:
|
|
93
|
+
"""
|
|
94
|
+
Set log level for all LattifAI loggers.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
level: Logging level (e.g., logging.DEBUG, logging.INFO)
|
|
98
|
+
|
|
99
|
+
Examples:
|
|
100
|
+
>>> from lattifai.logging import set_log_level
|
|
101
|
+
>>> import logging
|
|
102
|
+
>>> set_log_level(logging.DEBUG)
|
|
103
|
+
"""
|
|
104
|
+
root_logger = logging.getLogger("lattifai")
|
|
105
|
+
root_logger.setLevel(level)
|
|
106
|
+
for handler in root_logger.handlers:
|
|
107
|
+
handler.setLevel(level)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
__all__ = [
|
|
111
|
+
"setup_logger",
|
|
112
|
+
"get_logger",
|
|
113
|
+
"set_log_level",
|
|
114
|
+
"DEFAULT_FORMAT",
|
|
115
|
+
"SIMPLE_FORMAT",
|
|
116
|
+
]
|