nemo-evaluator-launcher 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nemo-evaluator-launcher might be problematic. Click here for more details.

Files changed (57) hide show
  1. nemo_evaluator_launcher/__init__.py +65 -0
  2. nemo_evaluator_launcher/api/__init__.py +24 -0
  3. nemo_evaluator_launcher/api/functional.py +678 -0
  4. nemo_evaluator_launcher/api/types.py +89 -0
  5. nemo_evaluator_launcher/api/utils.py +19 -0
  6. nemo_evaluator_launcher/cli/__init__.py +15 -0
  7. nemo_evaluator_launcher/cli/export.py +148 -0
  8. nemo_evaluator_launcher/cli/info.py +117 -0
  9. nemo_evaluator_launcher/cli/kill.py +39 -0
  10. nemo_evaluator_launcher/cli/ls_runs.py +113 -0
  11. nemo_evaluator_launcher/cli/ls_tasks.py +134 -0
  12. nemo_evaluator_launcher/cli/main.py +143 -0
  13. nemo_evaluator_launcher/cli/run.py +135 -0
  14. nemo_evaluator_launcher/cli/status.py +120 -0
  15. nemo_evaluator_launcher/cli/version.py +52 -0
  16. nemo_evaluator_launcher/common/__init__.py +16 -0
  17. nemo_evaluator_launcher/common/execdb.py +189 -0
  18. nemo_evaluator_launcher/common/helpers.py +194 -0
  19. nemo_evaluator_launcher/common/logging_utils.py +349 -0
  20. nemo_evaluator_launcher/common/mapping.py +295 -0
  21. nemo_evaluator_launcher/configs/__init__.py +15 -0
  22. nemo_evaluator_launcher/configs/default.yaml +28 -0
  23. nemo_evaluator_launcher/configs/deployment/nim.yaml +32 -0
  24. nemo_evaluator_launcher/configs/deployment/none.yaml +16 -0
  25. nemo_evaluator_launcher/configs/deployment/sglang.yaml +38 -0
  26. nemo_evaluator_launcher/configs/deployment/vllm.yaml +41 -0
  27. nemo_evaluator_launcher/configs/execution/lepton/default.yaml +92 -0
  28. nemo_evaluator_launcher/configs/execution/local.yaml +17 -0
  29. nemo_evaluator_launcher/configs/execution/slurm/default.yaml +33 -0
  30. nemo_evaluator_launcher/executors/__init__.py +22 -0
  31. nemo_evaluator_launcher/executors/base.py +97 -0
  32. nemo_evaluator_launcher/executors/lepton/__init__.py +16 -0
  33. nemo_evaluator_launcher/executors/lepton/deployment_helpers.py +589 -0
  34. nemo_evaluator_launcher/executors/lepton/executor.py +905 -0
  35. nemo_evaluator_launcher/executors/lepton/job_helpers.py +394 -0
  36. nemo_evaluator_launcher/executors/local/__init__.py +15 -0
  37. nemo_evaluator_launcher/executors/local/executor.py +491 -0
  38. nemo_evaluator_launcher/executors/local/run.template.sh +88 -0
  39. nemo_evaluator_launcher/executors/registry.py +38 -0
  40. nemo_evaluator_launcher/executors/slurm/__init__.py +15 -0
  41. nemo_evaluator_launcher/executors/slurm/executor.py +996 -0
  42. nemo_evaluator_launcher/exporters/__init__.py +36 -0
  43. nemo_evaluator_launcher/exporters/base.py +112 -0
  44. nemo_evaluator_launcher/exporters/gsheets.py +391 -0
  45. nemo_evaluator_launcher/exporters/local.py +488 -0
  46. nemo_evaluator_launcher/exporters/mlflow.py +448 -0
  47. nemo_evaluator_launcher/exporters/registry.py +40 -0
  48. nemo_evaluator_launcher/exporters/utils.py +669 -0
  49. nemo_evaluator_launcher/exporters/wandb.py +376 -0
  50. nemo_evaluator_launcher/package_info.py +38 -0
  51. nemo_evaluator_launcher/resources/mapping.toml +344 -0
  52. nemo_evaluator_launcher-0.1.0.dist-info/METADATA +494 -0
  53. nemo_evaluator_launcher-0.1.0.dist-info/RECORD +57 -0
  54. nemo_evaluator_launcher-0.1.0.dist-info/WHEEL +5 -0
  55. nemo_evaluator_launcher-0.1.0.dist-info/entry_points.txt +3 -0
  56. nemo_evaluator_launcher-0.1.0.dist-info/licenses/LICENSE +451 -0
  57. nemo_evaluator_launcher-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,194 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ import base64
17
+ import copy
18
+ import datetime
19
+ from typing import Optional
20
+
21
+ import yaml
22
+ from omegaconf import DictConfig, OmegaConf
23
+
24
+ from nemo_evaluator_launcher.common.logging_utils import logger
25
+
26
+
27
+ def _yaml_to_echo_command(yaml_str: str, filename: str = "config_ef.yaml") -> str:
28
+ yaml_str_b64 = base64.b64encode(yaml_str.encode("utf-8")).decode("utf-8")
29
+ return f'echo "{yaml_str_b64}" | base64 -d > {filename}'
30
+
31
+
32
+ def get_eval_factory_config(
33
+ cfg: DictConfig, user_task_config: DictConfig, task_definition: dict
34
+ ) -> dict:
35
+ """Extract config fields for eval factory.
36
+
37
+ This function extracts the config field similar to how overrides are handled.
38
+ """
39
+ # Extract config fields similar to overrides - convert to basic Python types first
40
+ cfg_config = cfg.evaluation.get("config", {})
41
+ user_config = user_task_config.get("config", {})
42
+
43
+ # Convert OmegaConf objects to basic Python types
44
+ if cfg_config:
45
+ cfg_config = OmegaConf.to_container(cfg_config, resolve=True)
46
+ if user_config:
47
+ user_config = OmegaConf.to_container(user_config, resolve=True)
48
+
49
+ # Merge the configs
50
+ config_fields = copy.deepcopy(cfg_config or {})
51
+ config_fields.update(user_config or {})
52
+
53
+ return config_fields
54
+
55
+
56
+ def get_eval_factory_command(
57
+ cfg: DictConfig, user_task_config: DictConfig, task_definition: dict
58
+ ) -> str:
59
+ config_fields = get_eval_factory_config(cfg, user_task_config, task_definition)
60
+
61
+ overrides = copy.deepcopy(dict(cfg.evaluation.get("overrides", {})))
62
+ overrides.update(dict(user_task_config.get("overrides", {})))
63
+ # NOTE(dfridman): Temporary fix to make sure that the overrides arg is not split into multiple lines.
64
+ # Consider passing a JSON object on Eval Factory side
65
+ overrides = {
66
+ k: (v.strip("\n") if isinstance(v, str) else v) for k, v in overrides.items()
67
+ }
68
+ overrides_str = ",".join([f"{k}={v}" for k, v in overrides.items()])
69
+ model_url = get_endpoint_url(cfg, user_task_config, task_definition)
70
+
71
+ model_id = get_served_model_name(cfg)
72
+ model_type = task_definition["endpoint_type"]
73
+ eval_type = task_definition["task"]
74
+
75
+ create_file_cmd = _yaml_to_echo_command(
76
+ yaml.safe_dump(config_fields), "config_ef.yaml"
77
+ )
78
+ nv_eval_command = f"""nv_eval run_eval --model_id {model_id} --model_type {model_type} --eval_type {eval_type} --model_url {model_url} --api_key_name API_KEY --output_dir /results --run_config config_ef.yaml"""
79
+
80
+ if overrides:
81
+ nv_eval_command = f"{nv_eval_command} --overrides {overrides_str}"
82
+
83
+ return create_file_cmd + " && " + "cat config_ef.yaml && " + nv_eval_command
84
+
85
+
86
+ def get_endpoint_url(
87
+ cfg: DictConfig, user_task_config: DictConfig, task_definition: dict
88
+ ) -> str:
89
+ def apply_url_override(url: str) -> str:
90
+ """Apply user URL override if provided."""
91
+ override_url = user_task_config.get("overrides", {}).get(
92
+ "config.target.api_endpoint.url"
93
+ )
94
+ return override_url if override_url is not None else url
95
+
96
+ if cfg.deployment.type == "none":
97
+ # For deployment: none, use target URL regardless of executor type
98
+ if OmegaConf.is_missing(cfg.target.api_endpoint, "url"):
99
+ raise ValueError(
100
+ "API endpoint URL is not set. Add `target.api_endpoint.url` to your config "
101
+ "OR override via CLI"
102
+ )
103
+ return apply_url_override(cfg.target.api_endpoint.url)
104
+
105
+ elif (
106
+ hasattr(cfg, "target")
107
+ and hasattr(cfg.target, "api_endpoint")
108
+ and hasattr(cfg.target.api_endpoint, "url")
109
+ and not OmegaConf.is_missing(cfg.target.api_endpoint, "url")
110
+ ):
111
+ # For Lepton executor with dynamically set target URL
112
+ return apply_url_override(cfg.target.api_endpoint.url)
113
+
114
+ else:
115
+ # Local executor - use localhost
116
+ task_endpoint_type = task_definition["endpoint_type"]
117
+ endpoint_uri = cfg.deployment.endpoints[task_endpoint_type]
118
+ endpoint_url = f"http://127.0.0.1:{cfg.deployment.port}{endpoint_uri}"
119
+ return endpoint_url
120
+
121
+
122
+ def get_health_url(cfg: DictConfig, endpoint_url: str) -> str:
123
+ if cfg.deployment.type == "none":
124
+ logger.warning("Using endpoint URL as health URL", will_be_used=endpoint_url)
125
+ return endpoint_url # TODO(public release) is using model url as health url OK?
126
+ health_uri = cfg.deployment.endpoints["health"]
127
+ health_url = f"http://127.0.0.1:{cfg.deployment.port}{health_uri}"
128
+ return health_url
129
+
130
+
131
+ def get_served_model_name(cfg: DictConfig) -> str:
132
+ if cfg.deployment.type == "none":
133
+ return str(cfg.target.api_endpoint.model_id)
134
+ else:
135
+ return str(cfg.deployment.served_model_name)
136
+
137
+
138
+ def get_api_key_name(cfg: DictConfig) -> str | None:
139
+ res = cfg.get("target", {}).get("api_endpoint", {}).get("api_key_name", None)
140
+ return str(res) if res else None
141
+
142
+
143
+ def get_timestamp_string(include_microseconds: bool = True) -> str:
144
+ """Get timestamp in format YYYYmmdd_HHMMSS_ffffff."""
145
+ dt = datetime.datetime.now()
146
+ fmt = "%Y%m%d_%H%M%S"
147
+ if include_microseconds:
148
+ fmt += "_%f"
149
+ dts = datetime.datetime.strftime(dt, fmt)
150
+ return dts
151
+
152
+
153
+ def get_eval_factory_dataset_size_from_run_config(run_config: dict) -> Optional[int]:
154
+ config = run_config["config"]
155
+ limit_samples = config["params"].get("limit_samples", None)
156
+ if limit_samples is not None:
157
+ return int(limit_samples)
158
+
159
+ # TODO(dfridman): Move `dataset_size` values to the corresponding `framework.yaml` in Eval Factory.
160
+ dataset_sizes = {
161
+ ("lm-evaluation-harness", "ifeval"): 541,
162
+ ("simple_evals", "gpqa_diamond"): 198,
163
+ ("simple_evals", "gpqa_diamond_nemo"): 198,
164
+ ("simple_evals", "AA_math_test_500"): 500,
165
+ ("simple_evals", "math_test_500_nemo"): 500,
166
+ ("simple_evals", "aime_2024_nemo"): 30,
167
+ ("simple_evals", "AA_AIME_2024"): 30,
168
+ ("simple_evals", "aime_2025_nemo"): 30,
169
+ ("simple_evals", "AIME_2025"): 30,
170
+ ("simple_evals", "humaneval"): 164,
171
+ ("simple_evals", "mmlu"): 14042,
172
+ ("simple_evals", "mmlu_pro"): 12032,
173
+ ("bigcode-evaluation-harness", "mbpp"): 500,
174
+ ("bigcode-evaluation-harness", "humaneval"): 164,
175
+ ("livecodebench", "livecodebench_0724_0125"): 315,
176
+ ("livecodebench", "livecodebench_0824_0225"): 279,
177
+ ("hle", "hle"): 2684,
178
+ ("scicode", "aa_scicode"): 338,
179
+ }
180
+ dataset_size = dataset_sizes.get((run_config["framework_name"], config["type"]))
181
+ if dataset_size is None:
182
+ return None
183
+ else:
184
+ dataset_size = int(dataset_size)
185
+
186
+ downsampling_ratio = (
187
+ config["params"].get("extra", {}).get("downsampling_ratio", None)
188
+ )
189
+ if downsampling_ratio is not None:
190
+ dataset_size = int(round(dataset_size * downsampling_ratio))
191
+
192
+ n_samples = int(config["params"].get("extra", {}).get("n_samples", 1))
193
+ dataset_size *= n_samples
194
+ return dataset_size
@@ -0,0 +1,349 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ """Logging configuration module for nemo-evaluator-launcher.
17
+
18
+ This module provides a centralized logging configuration using structlog that outputs
19
+ to both stderr and a log file. All modules should import and use the logger from this
20
+ module to ensure consistent logging behavior across the application.
21
+
22
+ LOGGING POLICY:
23
+ ==============
24
+ All logging in this project MUST go through this module. This is enforced by a pre-commit
25
+ hook that checks for violations.
26
+
27
+ DO NOT:
28
+ - import structlog directly
29
+ - import logging directly
30
+ - call structlog.get_logger() directly
31
+ - call logging.getLogger() directly
32
+
33
+ DO:
34
+ - from nemo_evaluator_launcher.common.logging_utils import logger
35
+ - from nemo_evaluator_launcher.common.logging_utils import get_logger
36
+
37
+ Examples:
38
+ # Correct
39
+ from nemo_evaluator_launcher.common.logging_utils import logger
40
+ logger.info("User logged in", user_id="12345")
41
+
42
+ # Incorrect
43
+ import structlog
44
+ logger = structlog.get_logger()
45
+ logger.info("User logged in")
46
+ """
47
+
48
+ import json
49
+ import logging
50
+ import logging.config
51
+ import os
52
+ import pathlib
53
+ import sys
54
+ from datetime import datetime
55
+ from pprint import pformat
56
+ from typing import Any, Dict
57
+
58
+ import structlog
59
+
60
+ # If this env var is set, it will override a more standard "LOG_LEVEL". If
61
+ # both are unset, default would be used.
62
+ _LOG_LEVEL_ENV_VAR = "NEMO_EVALUATOR_LOG_LEVEL"
63
+ _DEFAULT_LOG_LEVEL = "WARNING"
64
+ _SENSITIVE_KEY_SUBSTRINGS = {
65
+ # Keep minimal, broad substrings (normalized: lowercased, no spaces/_/-)
66
+ "authorization", # covers proxy-authorization, etc.
67
+ "apikey", # covers api_key, api-key, x-api-key, nvidia_api_key, ...
68
+ "accesskey", # covers access_key / access-key
69
+ "privatekey",
70
+ "token", # covers access_token, id_token, refresh_token, *_token
71
+ "secret", # covers openai_client_secret, aws_secret_access_key, *_secret
72
+ "password",
73
+ "pwd", # common shorthand
74
+ "passwd", # common variant
75
+ }
76
+
77
+
78
+ def _mask(val: object) -> str:
79
+ s = str(val)
80
+ if len(s) <= 10:
81
+ return "[REDACTED]"
82
+ return f"{s[:2]}…{s[-2:]}"
83
+
84
+
85
+ def _normalize(name: object) -> str:
86
+ if not isinstance(name, str):
87
+ return ""
88
+ s = name.lower()
89
+ # drop spaces, hyphens, underscores
90
+ return s.replace(" ", "").replace("-", "").replace("_", "")
91
+
92
+
93
+ def _is_sensitive_key(key: object) -> bool:
94
+ k = _normalize(key)
95
+ return any(substr in k for substr in _SENSITIVE_KEY_SUBSTRINGS)
96
+
97
+
98
+ def _redact_mapping(m: dict) -> dict:
99
+ red = {}
100
+ for k, v in m.items():
101
+ if _is_sensitive_key(k):
102
+ red[k] = _mask(v)
103
+ elif isinstance(v, dict):
104
+ red[k] = _redact_mapping(v)
105
+ else:
106
+ red[k] = v
107
+ return red
108
+
109
+
110
+ def redact_processor(_: Any, __: str, event_dict: Dict[str, Any]) -> Dict[str, Any]:
111
+ if os.getenv("LOG_DISABLE_REDACTION", "").lower() in {"1", "true", "yes"}:
112
+ return event_dict
113
+ return _redact_mapping(event_dict)
114
+
115
+
116
+ def _ensure_log_dir() -> pathlib.Path:
117
+ """Ensure the log directory exists and return its path."""
118
+ log_dir = pathlib.Path.home() / ".nemo-evaluator" / "logs"
119
+ log_dir.mkdir(parents=True, exist_ok=True)
120
+ return log_dir
121
+
122
+
123
+ def _get_env_log_level() -> str:
124
+ """Get log level from environment variable, translating single letters to full names.
125
+
126
+ Translates:
127
+ - D -> DEBUG
128
+ - I -> INFO
129
+ - W -> WARNING
130
+ - E -> ERROR
131
+ - F -> CRITICAL
132
+
133
+ Returns:
134
+ Uppercase log level string, defaults to WARNING if not set or invalid.
135
+ """
136
+ env_level = os.getenv(_LOG_LEVEL_ENV_VAR, os.getenv("LOG_LEVEL"))
137
+ # If empty or unset, default
138
+ if not env_level:
139
+ env_level = _DEFAULT_LOG_LEVEL
140
+ env_level = env_level.upper()
141
+
142
+ # Translate single letters to full level names
143
+ level_map = {
144
+ "D": "DEBUG",
145
+ "I": "INFO",
146
+ "W": "WARNING",
147
+ "E": "ERROR",
148
+ "F": "CRITICAL",
149
+ }
150
+
151
+ return level_map.get(env_level, env_level)
152
+
153
+
154
+ def custom_timestamper(_: Any, __: Any, event_dict: Dict[str, Any]) -> Dict[str, Any]:
155
+ """Add ISO UTC timestamp with milliseconds to event_dict['timestamp']."""
156
+ now = datetime.now()
157
+ event_dict["timestamp"] = now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
158
+ return event_dict
159
+
160
+
161
+ class MainConsoleRenderer:
162
+ """Custom console renderer for [L TIMESTAMP] message with color by level."""
163
+
164
+ LEVEL_MAP = {
165
+ "debug": ("D", "\033[90m"), # grey
166
+ "info": ("I", "\033[32m"), # green
167
+ "warning": ("W", "\033[33m"), # yellow
168
+ "warn": ("W", "\033[33m"), # yellow
169
+ "error": ("E", "\033[31m"), # red
170
+ "critical": ("F", "\033[41m"), # red background
171
+ "fatal": ("F", "\033[41m"), # alias for critical
172
+ }
173
+ RESET = "\033[0m"
174
+
175
+ def __init__(self, colors: bool = True):
176
+ self.colors = colors
177
+
178
+ def __call__(
179
+ self, logger: Any, method_name: str, event_dict: Dict[str, Any]
180
+ ) -> str:
181
+ timestamp = event_dict.get("timestamp", "")
182
+ message = event_dict.get("event", "")
183
+ level = event_dict.get("level", method_name).lower()
184
+ letter, color = self.LEVEL_MAP.get(level, ("?", ""))
185
+ prefix = f"[{letter} {timestamp}]"
186
+ if self.colors and color:
187
+ prefix = f"{color}{prefix}{self.RESET}"
188
+
189
+ # Build the output with message and key-value pairs
190
+ output_parts = [prefix]
191
+
192
+ # Make the main message bold
193
+ if self.colors:
194
+ message = f"\033[1m{message}\033[0m" # bold
195
+ output_parts.append(message)
196
+
197
+ # Add key-value pairs (excluding internal structlog keys)
198
+ kv_pairs = []
199
+ for key, value in event_dict.items():
200
+ if key not in ["timestamp", "event", "level"]:
201
+ # Pretty-format complex values (dict/list) as JSON on new lines
202
+ pretty_value = None
203
+ if isinstance(value, (dict, list)):
204
+ try:
205
+ pretty_value = json.dumps(
206
+ value, ensure_ascii=False, sort_keys=True, indent=2
207
+ )
208
+ except Exception:
209
+ pretty_value = pformat(value, width=100, compact=False)
210
+ elif not isinstance(value, (str, int, float, bool, type(None))):
211
+ # Fall back to reasonably readable representation for other complex types
212
+ pretty_value = pformat(value, width=100, compact=False)
213
+
214
+ rendered_value = (
215
+ pretty_value if pretty_value is not None else str(value)
216
+ )
217
+
218
+ # If multiline, place value on a new line for readability
219
+ if "\n" in rendered_value:
220
+ if self.colors:
221
+ kv_pairs.append(
222
+ f"\033[35m{key}\033[0m=\n\033[36m{rendered_value}\033[0m"
223
+ )
224
+ else:
225
+ kv_pairs.append(f"{key}=\n{rendered_value}")
226
+ else:
227
+ if self.colors:
228
+ # Format: magenta key + equals + cyan value
229
+ kv_pairs.append(
230
+ f"\033[35m{key}\033[0m=\033[36m{rendered_value}\033[0m"
231
+ )
232
+ else:
233
+ # No colors for plain output
234
+ kv_pairs.append(f"{key}={rendered_value}")
235
+
236
+ if kv_pairs:
237
+ # If any kv is multiline, join with newlines; otherwise keep single line
238
+ if any("\n" in kv for kv in kv_pairs):
239
+ kv_text = "\n".join(kv_pairs)
240
+ else:
241
+ kv_text = " ".join(kv_pairs)
242
+ if self.colors:
243
+ kv_text = f"\033[35m{kv_text}{self.RESET}" # magenta
244
+ output_parts.append(kv_text)
245
+
246
+ return " ".join(output_parts)
247
+
248
+
249
+ def _configure_structlog() -> None:
250
+ """Configure structlog for both console and file output."""
251
+ log_dir = _ensure_log_dir()
252
+ log_file = log_dir / "main.log"
253
+ json_log_file = log_dir / "main.log.json"
254
+
255
+ shared_processors = [
256
+ structlog.stdlib.ProcessorFormatter.remove_processors_meta,
257
+ redact_processor,
258
+ custom_timestamper,
259
+ structlog.stdlib.add_log_level,
260
+ structlog.processors.StackInfoRenderer(),
261
+ structlog.dev.set_exc_info,
262
+ structlog.processors.format_exc_info,
263
+ structlog.processors.UnicodeDecoder(),
264
+ ]
265
+
266
+ logging.config.dictConfig(
267
+ {
268
+ "version": 1,
269
+ "disable_existing_loggers": False,
270
+ "formatters": {
271
+ # Formatter for colored console output
272
+ "colored": {
273
+ "()": "structlog.stdlib.ProcessorFormatter",
274
+ "processors": [
275
+ *shared_processors,
276
+ MainConsoleRenderer(colors=True),
277
+ ],
278
+ },
279
+ # Formatter for plain file output
280
+ "plain": {
281
+ "()": "structlog.stdlib.ProcessorFormatter",
282
+ "processors": [
283
+ *shared_processors,
284
+ MainConsoleRenderer(colors=False),
285
+ ],
286
+ },
287
+ # Formatter for JSON file output
288
+ "json": {
289
+ "()": "structlog.stdlib.ProcessorFormatter",
290
+ "processors": [
291
+ *shared_processors,
292
+ structlog.processors.JSONRenderer(),
293
+ ],
294
+ },
295
+ },
296
+ "handlers": {
297
+ "console": {
298
+ "class": "logging.StreamHandler",
299
+ "level": _get_env_log_level(),
300
+ "formatter": "colored",
301
+ "stream": sys.stderr,
302
+ },
303
+ "file": {
304
+ "class": "logging.handlers.WatchedFileHandler",
305
+ "level": "DEBUG",
306
+ "filename": log_file,
307
+ "formatter": "plain",
308
+ },
309
+ "json_file": {
310
+ "class": "logging.handlers.WatchedFileHandler",
311
+ "level": "DEBUG",
312
+ "filename": json_log_file,
313
+ "formatter": "json",
314
+ },
315
+ },
316
+ "loggers": {
317
+ "": {
318
+ "handlers": ["console", "file", "json_file"],
319
+ "level": "DEBUG",
320
+ "propagate": True,
321
+ },
322
+ },
323
+ }
324
+ )
325
+
326
+ structlog.configure(
327
+ processors=[
328
+ structlog.stdlib.filter_by_level,
329
+ structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
330
+ ],
331
+ logger_factory=structlog.stdlib.LoggerFactory(),
332
+ wrapper_class=structlog.stdlib.BoundLogger,
333
+ cache_logger_on_first_use=True,
334
+ )
335
+
336
+ structlog.get_logger().debug("Logger configured", config=structlog.get_config())
337
+
338
+
339
+ # Configure logging on module import
340
+ _configure_structlog()
341
+
342
+
343
+ def get_logger(name: str | None = None) -> Any:
344
+ """Get a configured structlog logger."""
345
+ return structlog.get_logger(name)
346
+
347
+
348
+ # Export the root logger for convenience
349
+ logger = get_logger()