nemo-evaluator-launcher 0.1.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nemo-evaluator-launcher might be problematic. Click here for more details.
- nemo_evaluator_launcher/__init__.py +65 -0
- nemo_evaluator_launcher/api/__init__.py +24 -0
- nemo_evaluator_launcher/api/functional.py +641 -0
- nemo_evaluator_launcher/api/types.py +89 -0
- nemo_evaluator_launcher/api/utils.py +19 -0
- nemo_evaluator_launcher/cli/__init__.py +15 -0
- nemo_evaluator_launcher/cli/export.py +148 -0
- nemo_evaluator_launcher/cli/info.py +117 -0
- nemo_evaluator_launcher/cli/kill.py +39 -0
- nemo_evaluator_launcher/cli/ls_runs.py +113 -0
- nemo_evaluator_launcher/cli/ls_tasks.py +34 -0
- nemo_evaluator_launcher/cli/main.py +136 -0
- nemo_evaluator_launcher/cli/run.py +135 -0
- nemo_evaluator_launcher/cli/status.py +118 -0
- nemo_evaluator_launcher/cli/version.py +52 -0
- nemo_evaluator_launcher/common/__init__.py +16 -0
- nemo_evaluator_launcher/common/execdb.py +189 -0
- nemo_evaluator_launcher/common/helpers.py +157 -0
- nemo_evaluator_launcher/common/logging_utils.py +349 -0
- nemo_evaluator_launcher/common/mapping.py +310 -0
- nemo_evaluator_launcher/configs/__init__.py +15 -0
- nemo_evaluator_launcher/configs/default.yaml +28 -0
- nemo_evaluator_launcher/configs/deployment/nim.yaml +32 -0
- nemo_evaluator_launcher/configs/deployment/none.yaml +16 -0
- nemo_evaluator_launcher/configs/deployment/sglang.yaml +38 -0
- nemo_evaluator_launcher/configs/deployment/vllm.yaml +41 -0
- nemo_evaluator_launcher/configs/execution/lepton/default.yaml +92 -0
- nemo_evaluator_launcher/configs/execution/local.yaml +17 -0
- nemo_evaluator_launcher/configs/execution/slurm/default.yaml +33 -0
- nemo_evaluator_launcher/executors/__init__.py +22 -0
- nemo_evaluator_launcher/executors/base.py +97 -0
- nemo_evaluator_launcher/executors/lepton/__init__.py +16 -0
- nemo_evaluator_launcher/executors/lepton/deployment_helpers.py +589 -0
- nemo_evaluator_launcher/executors/lepton/executor.py +905 -0
- nemo_evaluator_launcher/executors/lepton/job_helpers.py +394 -0
- nemo_evaluator_launcher/executors/local/__init__.py +15 -0
- nemo_evaluator_launcher/executors/local/executor.py +491 -0
- nemo_evaluator_launcher/executors/local/run.template.sh +88 -0
- nemo_evaluator_launcher/executors/registry.py +38 -0
- nemo_evaluator_launcher/executors/slurm/__init__.py +15 -0
- nemo_evaluator_launcher/executors/slurm/executor.py +982 -0
- nemo_evaluator_launcher/exporters/__init__.py +36 -0
- nemo_evaluator_launcher/exporters/base.py +112 -0
- nemo_evaluator_launcher/exporters/gsheets.py +391 -0
- nemo_evaluator_launcher/exporters/local.py +488 -0
- nemo_evaluator_launcher/exporters/mlflow.py +448 -0
- nemo_evaluator_launcher/exporters/registry.py +40 -0
- nemo_evaluator_launcher/exporters/utils.py +669 -0
- nemo_evaluator_launcher/exporters/wandb.py +376 -0
- nemo_evaluator_launcher/package_info.py +35 -0
- nemo_evaluator_launcher/resources/mapping.toml +344 -0
- nemo_evaluator_launcher-0.1.0rc2.dist-info/METADATA +35 -0
- nemo_evaluator_launcher-0.1.0rc2.dist-info/RECORD +57 -0
- nemo_evaluator_launcher-0.1.0rc2.dist-info/WHEEL +5 -0
- nemo_evaluator_launcher-0.1.0rc2.dist-info/entry_points.txt +3 -0
- nemo_evaluator_launcher-0.1.0rc2.dist-info/licenses/LICENSE +451 -0
- nemo_evaluator_launcher-0.1.0rc2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
#
|
|
16
|
+
import copy
|
|
17
|
+
import datetime
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
from omegaconf import DictConfig, OmegaConf
|
|
21
|
+
|
|
22
|
+
from nemo_evaluator_launcher.common.logging_utils import logger
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_eval_factory_command(
|
|
26
|
+
cfg: DictConfig, user_task_config: DictConfig, task_definition: dict
|
|
27
|
+
) -> str:
|
|
28
|
+
overrides = copy.deepcopy(dict(cfg.evaluation.get("overrides", {})))
|
|
29
|
+
overrides.update(dict(user_task_config.get("overrides", {})))
|
|
30
|
+
# NOTE(dfridman): Temporary fix to make sure that the overrides arg is not split into multiple lines.
|
|
31
|
+
# Consider passing a JSON object on Eval Factory side
|
|
32
|
+
overrides = {
|
|
33
|
+
k: (v.strip("\n") if isinstance(v, str) else v) for k, v in overrides.items()
|
|
34
|
+
}
|
|
35
|
+
overrides_str = ",".join([f"{k}={v}" for k, v in overrides.items()])
|
|
36
|
+
model_url = get_endpoint_url(cfg, user_task_config, task_definition)
|
|
37
|
+
command = f"""nv_eval run_eval \
|
|
38
|
+
--model_id {get_served_model_name(cfg)} \
|
|
39
|
+
--model_type {task_definition["endpoint_type"]} \
|
|
40
|
+
--eval_type {task_definition["task"]} \
|
|
41
|
+
--model_url {model_url} \
|
|
42
|
+
--api_key_name API_KEY \
|
|
43
|
+
--output_dir /results"""
|
|
44
|
+
if overrides_str:
|
|
45
|
+
command = f"{command} --overrides {overrides_str}"
|
|
46
|
+
return command
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_endpoint_url(
|
|
50
|
+
cfg: DictConfig, user_task_config: DictConfig, task_definition: dict
|
|
51
|
+
) -> str:
|
|
52
|
+
def apply_url_override(url: str) -> str:
|
|
53
|
+
"""Apply user URL override if provided."""
|
|
54
|
+
override_url = user_task_config.get("overrides", {}).get(
|
|
55
|
+
"config.target.api_endpoint.url"
|
|
56
|
+
)
|
|
57
|
+
return override_url if override_url is not None else url
|
|
58
|
+
|
|
59
|
+
if cfg.deployment.type == "none":
|
|
60
|
+
# For deployment: none, use target URL regardless of executor type
|
|
61
|
+
if OmegaConf.is_missing(cfg.target.api_endpoint, "url"):
|
|
62
|
+
raise ValueError(
|
|
63
|
+
"API endpoint URL is not set. Add `target.api_endpoint.url` to your config "
|
|
64
|
+
"OR override via CLI"
|
|
65
|
+
)
|
|
66
|
+
return apply_url_override(cfg.target.api_endpoint.url)
|
|
67
|
+
|
|
68
|
+
elif (
|
|
69
|
+
hasattr(cfg, "target")
|
|
70
|
+
and hasattr(cfg.target, "api_endpoint")
|
|
71
|
+
and hasattr(cfg.target.api_endpoint, "url")
|
|
72
|
+
and not OmegaConf.is_missing(cfg.target.api_endpoint, "url")
|
|
73
|
+
):
|
|
74
|
+
# For Lepton executor with dynamically set target URL
|
|
75
|
+
return apply_url_override(cfg.target.api_endpoint.url)
|
|
76
|
+
|
|
77
|
+
else:
|
|
78
|
+
# Local executor - use localhost
|
|
79
|
+
task_endpoint_type = task_definition["endpoint_type"]
|
|
80
|
+
endpoint_uri = cfg.deployment.endpoints[task_endpoint_type]
|
|
81
|
+
endpoint_url = f"http://127.0.0.1:{cfg.deployment.port}{endpoint_uri}"
|
|
82
|
+
return endpoint_url
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def get_health_url(cfg: DictConfig, endpoint_url: str) -> str:
|
|
86
|
+
if cfg.deployment.type == "none":
|
|
87
|
+
logger.warning("Using endpoint URL as health URL", will_be_used=endpoint_url)
|
|
88
|
+
return endpoint_url # TODO(public release) is using model url as health url OK?
|
|
89
|
+
health_uri = cfg.deployment.endpoints["health"]
|
|
90
|
+
health_url = f"http://127.0.0.1:{cfg.deployment.port}{health_uri}"
|
|
91
|
+
return health_url
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def get_served_model_name(cfg: DictConfig) -> str:
|
|
95
|
+
if cfg.deployment.type == "none":
|
|
96
|
+
return str(cfg.target.api_endpoint.model_id)
|
|
97
|
+
else:
|
|
98
|
+
return str(cfg.deployment.served_model_name)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_api_key_name(cfg: DictConfig) -> str | None:
|
|
102
|
+
res = cfg.get("target", {}).get("api_endpoint", {}).get("api_key_name", None)
|
|
103
|
+
return str(res) if res else None
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def get_timestamp_string(include_microseconds: bool = True) -> str:
|
|
107
|
+
"""Get timestamp in format YYYYmmdd_HHMMSS_ffffff."""
|
|
108
|
+
dt = datetime.datetime.now()
|
|
109
|
+
fmt = "%Y%m%d_%H%M%S"
|
|
110
|
+
if include_microseconds:
|
|
111
|
+
fmt += "_%f"
|
|
112
|
+
dts = datetime.datetime.strftime(dt, fmt)
|
|
113
|
+
return dts
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def get_eval_factory_dataset_size_from_run_config(run_config: dict) -> Optional[int]:
|
|
117
|
+
config = run_config["config"]
|
|
118
|
+
limit_samples = config["params"].get("limit_samples", None)
|
|
119
|
+
if limit_samples is not None:
|
|
120
|
+
return int(limit_samples)
|
|
121
|
+
|
|
122
|
+
# TODO(dfridman): Move `dataset_size` values to the corresponding `framework.yaml` in Eval Factory.
|
|
123
|
+
dataset_sizes = {
|
|
124
|
+
("lm-evaluation-harness", "ifeval"): 541,
|
|
125
|
+
("simple_evals", "gpqa_diamond"): 198,
|
|
126
|
+
("simple_evals", "gpqa_diamond_nemo"): 198,
|
|
127
|
+
("simple_evals", "AA_math_test_500"): 500,
|
|
128
|
+
("simple_evals", "math_test_500_nemo"): 500,
|
|
129
|
+
("simple_evals", "aime_2024_nemo"): 30,
|
|
130
|
+
("simple_evals", "AA_AIME_2024"): 30,
|
|
131
|
+
("simple_evals", "aime_2025_nemo"): 30,
|
|
132
|
+
("simple_evals", "AIME_2025"): 30,
|
|
133
|
+
("simple_evals", "humaneval"): 164,
|
|
134
|
+
("simple_evals", "mmlu"): 14042,
|
|
135
|
+
("simple_evals", "mmlu_pro"): 12032,
|
|
136
|
+
("bigcode-evaluation-harness", "mbpp"): 500,
|
|
137
|
+
("bigcode-evaluation-harness", "humaneval"): 164,
|
|
138
|
+
("livecodebench", "livecodebench_0724_0125"): 315,
|
|
139
|
+
("livecodebench", "livecodebench_0824_0225"): 279,
|
|
140
|
+
("hle", "hle"): 2684,
|
|
141
|
+
("scicode", "aa_scicode"): 338,
|
|
142
|
+
}
|
|
143
|
+
dataset_size = dataset_sizes.get((run_config["framework_name"], config["type"]))
|
|
144
|
+
if dataset_size is None:
|
|
145
|
+
return None
|
|
146
|
+
else:
|
|
147
|
+
dataset_size = int(dataset_size)
|
|
148
|
+
|
|
149
|
+
downsampling_ratio = (
|
|
150
|
+
config["params"].get("extra", {}).get("downsampling_ratio", None)
|
|
151
|
+
)
|
|
152
|
+
if downsampling_ratio is not None:
|
|
153
|
+
dataset_size = int(round(dataset_size * downsampling_ratio))
|
|
154
|
+
|
|
155
|
+
n_samples = int(config["params"].get("extra", {}).get("n_samples", 1))
|
|
156
|
+
dataset_size *= n_samples
|
|
157
|
+
return dataset_size
|
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
#
|
|
16
|
+
"""Logging configuration module for nemo-evaluator-launcher.
|
|
17
|
+
|
|
18
|
+
This module provides a centralized logging configuration using structlog that outputs
|
|
19
|
+
to both stderr and a log file. All modules should import and use the logger from this
|
|
20
|
+
module to ensure consistent logging behavior across the application.
|
|
21
|
+
|
|
22
|
+
LOGGING POLICY:
|
|
23
|
+
==============
|
|
24
|
+
All logging in this project MUST go through this module. This is enforced by a pre-commit
|
|
25
|
+
hook that checks for violations.
|
|
26
|
+
|
|
27
|
+
DO NOT:
|
|
28
|
+
- import structlog directly
|
|
29
|
+
- import logging directly
|
|
30
|
+
- call structlog.get_logger() directly
|
|
31
|
+
- call logging.getLogger() directly
|
|
32
|
+
|
|
33
|
+
DO:
|
|
34
|
+
- from nemo_evaluator_launcher.common.logging_utils import logger
|
|
35
|
+
- from nemo_evaluator_launcher.common.logging_utils import get_logger
|
|
36
|
+
|
|
37
|
+
Examples:
|
|
38
|
+
# Correct
|
|
39
|
+
from nemo_evaluator_launcher.common.logging_utils import logger
|
|
40
|
+
logger.info("User logged in", user_id="12345")
|
|
41
|
+
|
|
42
|
+
# Incorrect
|
|
43
|
+
import structlog
|
|
44
|
+
logger = structlog.get_logger()
|
|
45
|
+
logger.info("User logged in")
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
import json
|
|
49
|
+
import logging
|
|
50
|
+
import logging.config
|
|
51
|
+
import os
|
|
52
|
+
import pathlib
|
|
53
|
+
import sys
|
|
54
|
+
from datetime import datetime
|
|
55
|
+
from pprint import pformat
|
|
56
|
+
from typing import Any, Dict
|
|
57
|
+
|
|
58
|
+
import structlog
|
|
59
|
+
|
|
60
|
+
# If this env var is set, it will override a more standard "LOG_LEVEL". If
|
|
61
|
+
# both are unset, default would be used.
|
|
62
|
+
_LOG_LEVEL_ENV_VAR = "NEMO_EVALUATOR_LOG_LEVEL"
|
|
63
|
+
_DEFAULT_LOG_LEVEL = "WARNING"
|
|
64
|
+
_SENSITIVE_KEY_SUBSTRINGS = {
|
|
65
|
+
# Keep minimal, broad substrings (normalized: lowercased, no spaces/_/-)
|
|
66
|
+
"authorization", # covers proxy-authorization, etc.
|
|
67
|
+
"apikey", # covers api_key, api-key, x-api-key, nvidia_api_key, ...
|
|
68
|
+
"accesskey", # covers access_key / access-key
|
|
69
|
+
"privatekey",
|
|
70
|
+
"token", # covers access_token, id_token, refresh_token, *_token
|
|
71
|
+
"secret", # covers openai_client_secret, aws_secret_access_key, *_secret
|
|
72
|
+
"password",
|
|
73
|
+
"pwd", # common shorthand
|
|
74
|
+
"passwd", # common variant
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _mask(val: object) -> str:
|
|
79
|
+
s = str(val)
|
|
80
|
+
if len(s) <= 10:
|
|
81
|
+
return "[REDACTED]"
|
|
82
|
+
return f"{s[:2]}…{s[-2:]}"
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _normalize(name: object) -> str:
|
|
86
|
+
if not isinstance(name, str):
|
|
87
|
+
return ""
|
|
88
|
+
s = name.lower()
|
|
89
|
+
# drop spaces, hyphens, underscores
|
|
90
|
+
return s.replace(" ", "").replace("-", "").replace("_", "")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _is_sensitive_key(key: object) -> bool:
|
|
94
|
+
k = _normalize(key)
|
|
95
|
+
return any(substr in k for substr in _SENSITIVE_KEY_SUBSTRINGS)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _redact_mapping(m: dict) -> dict:
|
|
99
|
+
red = {}
|
|
100
|
+
for k, v in m.items():
|
|
101
|
+
if _is_sensitive_key(k):
|
|
102
|
+
red[k] = _mask(v)
|
|
103
|
+
elif isinstance(v, dict):
|
|
104
|
+
red[k] = _redact_mapping(v)
|
|
105
|
+
else:
|
|
106
|
+
red[k] = v
|
|
107
|
+
return red
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def redact_processor(_: Any, __: str, event_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
111
|
+
if os.getenv("LOG_DISABLE_REDACTION", "").lower() in {"1", "true", "yes"}:
|
|
112
|
+
return event_dict
|
|
113
|
+
return _redact_mapping(event_dict)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _ensure_log_dir() -> pathlib.Path:
|
|
117
|
+
"""Ensure the log directory exists and return its path."""
|
|
118
|
+
log_dir = pathlib.Path.home() / ".nemo-evaluator" / "logs"
|
|
119
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
|
120
|
+
return log_dir
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def _get_env_log_level() -> str:
|
|
124
|
+
"""Get log level from environment variable, translating single letters to full names.
|
|
125
|
+
|
|
126
|
+
Translates:
|
|
127
|
+
- D -> DEBUG
|
|
128
|
+
- I -> INFO
|
|
129
|
+
- W -> WARNING
|
|
130
|
+
- E -> ERROR
|
|
131
|
+
- F -> CRITICAL
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Uppercase log level string, defaults to WARNING if not set or invalid.
|
|
135
|
+
"""
|
|
136
|
+
env_level = os.getenv(_LOG_LEVEL_ENV_VAR, os.getenv("LOG_LEVEL"))
|
|
137
|
+
# If empty or unset, default
|
|
138
|
+
if not env_level:
|
|
139
|
+
env_level = _DEFAULT_LOG_LEVEL
|
|
140
|
+
env_level = env_level.upper()
|
|
141
|
+
|
|
142
|
+
# Translate single letters to full level names
|
|
143
|
+
level_map = {
|
|
144
|
+
"D": "DEBUG",
|
|
145
|
+
"I": "INFO",
|
|
146
|
+
"W": "WARNING",
|
|
147
|
+
"E": "ERROR",
|
|
148
|
+
"F": "CRITICAL",
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
return level_map.get(env_level, env_level)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def custom_timestamper(_: Any, __: Any, event_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
155
|
+
"""Add ISO UTC timestamp with milliseconds to event_dict['timestamp']."""
|
|
156
|
+
now = datetime.now()
|
|
157
|
+
event_dict["timestamp"] = now.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
|
|
158
|
+
return event_dict
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class MainConsoleRenderer:
|
|
162
|
+
"""Custom console renderer for [L TIMESTAMP] message with color by level."""
|
|
163
|
+
|
|
164
|
+
LEVEL_MAP = {
|
|
165
|
+
"debug": ("D", "\033[90m"), # grey
|
|
166
|
+
"info": ("I", "\033[32m"), # green
|
|
167
|
+
"warning": ("W", "\033[33m"), # yellow
|
|
168
|
+
"warn": ("W", "\033[33m"), # yellow
|
|
169
|
+
"error": ("E", "\033[31m"), # red
|
|
170
|
+
"critical": ("F", "\033[41m"), # red background
|
|
171
|
+
"fatal": ("F", "\033[41m"), # alias for critical
|
|
172
|
+
}
|
|
173
|
+
RESET = "\033[0m"
|
|
174
|
+
|
|
175
|
+
def __init__(self, colors: bool = True):
|
|
176
|
+
self.colors = colors
|
|
177
|
+
|
|
178
|
+
def __call__(
|
|
179
|
+
self, logger: Any, method_name: str, event_dict: Dict[str, Any]
|
|
180
|
+
) -> str:
|
|
181
|
+
timestamp = event_dict.get("timestamp", "")
|
|
182
|
+
message = event_dict.get("event", "")
|
|
183
|
+
level = event_dict.get("level", method_name).lower()
|
|
184
|
+
letter, color = self.LEVEL_MAP.get(level, ("?", ""))
|
|
185
|
+
prefix = f"[{letter} {timestamp}]"
|
|
186
|
+
if self.colors and color:
|
|
187
|
+
prefix = f"{color}{prefix}{self.RESET}"
|
|
188
|
+
|
|
189
|
+
# Build the output with message and key-value pairs
|
|
190
|
+
output_parts = [prefix]
|
|
191
|
+
|
|
192
|
+
# Make the main message bold
|
|
193
|
+
if self.colors:
|
|
194
|
+
message = f"\033[1m{message}\033[0m" # bold
|
|
195
|
+
output_parts.append(message)
|
|
196
|
+
|
|
197
|
+
# Add key-value pairs (excluding internal structlog keys)
|
|
198
|
+
kv_pairs = []
|
|
199
|
+
for key, value in event_dict.items():
|
|
200
|
+
if key not in ["timestamp", "event", "level"]:
|
|
201
|
+
# Pretty-format complex values (dict/list) as JSON on new lines
|
|
202
|
+
pretty_value = None
|
|
203
|
+
if isinstance(value, (dict, list)):
|
|
204
|
+
try:
|
|
205
|
+
pretty_value = json.dumps(
|
|
206
|
+
value, ensure_ascii=False, sort_keys=True, indent=2
|
|
207
|
+
)
|
|
208
|
+
except Exception:
|
|
209
|
+
pretty_value = pformat(value, width=100, compact=False)
|
|
210
|
+
elif not isinstance(value, (str, int, float, bool, type(None))):
|
|
211
|
+
# Fall back to reasonably readable representation for other complex types
|
|
212
|
+
pretty_value = pformat(value, width=100, compact=False)
|
|
213
|
+
|
|
214
|
+
rendered_value = (
|
|
215
|
+
pretty_value if pretty_value is not None else str(value)
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# If multiline, place value on a new line for readability
|
|
219
|
+
if "\n" in rendered_value:
|
|
220
|
+
if self.colors:
|
|
221
|
+
kv_pairs.append(
|
|
222
|
+
f"\033[35m{key}\033[0m=\n\033[36m{rendered_value}\033[0m"
|
|
223
|
+
)
|
|
224
|
+
else:
|
|
225
|
+
kv_pairs.append(f"{key}=\n{rendered_value}")
|
|
226
|
+
else:
|
|
227
|
+
if self.colors:
|
|
228
|
+
# Format: magenta key + equals + cyan value
|
|
229
|
+
kv_pairs.append(
|
|
230
|
+
f"\033[35m{key}\033[0m=\033[36m{rendered_value}\033[0m"
|
|
231
|
+
)
|
|
232
|
+
else:
|
|
233
|
+
# No colors for plain output
|
|
234
|
+
kv_pairs.append(f"{key}={rendered_value}")
|
|
235
|
+
|
|
236
|
+
if kv_pairs:
|
|
237
|
+
# If any kv is multiline, join with newlines; otherwise keep single line
|
|
238
|
+
if any("\n" in kv for kv in kv_pairs):
|
|
239
|
+
kv_text = "\n".join(kv_pairs)
|
|
240
|
+
else:
|
|
241
|
+
kv_text = " ".join(kv_pairs)
|
|
242
|
+
if self.colors:
|
|
243
|
+
kv_text = f"\033[35m{kv_text}{self.RESET}" # magenta
|
|
244
|
+
output_parts.append(kv_text)
|
|
245
|
+
|
|
246
|
+
return " ".join(output_parts)
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _configure_structlog() -> None:
|
|
250
|
+
"""Configure structlog for both console and file output."""
|
|
251
|
+
log_dir = _ensure_log_dir()
|
|
252
|
+
log_file = log_dir / "main.log"
|
|
253
|
+
json_log_file = log_dir / "main.log.json"
|
|
254
|
+
|
|
255
|
+
shared_processors = [
|
|
256
|
+
structlog.stdlib.ProcessorFormatter.remove_processors_meta,
|
|
257
|
+
redact_processor,
|
|
258
|
+
custom_timestamper,
|
|
259
|
+
structlog.stdlib.add_log_level,
|
|
260
|
+
structlog.processors.StackInfoRenderer(),
|
|
261
|
+
structlog.dev.set_exc_info,
|
|
262
|
+
structlog.processors.format_exc_info,
|
|
263
|
+
structlog.processors.UnicodeDecoder(),
|
|
264
|
+
]
|
|
265
|
+
|
|
266
|
+
logging.config.dictConfig(
|
|
267
|
+
{
|
|
268
|
+
"version": 1,
|
|
269
|
+
"disable_existing_loggers": False,
|
|
270
|
+
"formatters": {
|
|
271
|
+
# Formatter for colored console output
|
|
272
|
+
"colored": {
|
|
273
|
+
"()": "structlog.stdlib.ProcessorFormatter",
|
|
274
|
+
"processors": [
|
|
275
|
+
*shared_processors,
|
|
276
|
+
MainConsoleRenderer(colors=True),
|
|
277
|
+
],
|
|
278
|
+
},
|
|
279
|
+
# Formatter for plain file output
|
|
280
|
+
"plain": {
|
|
281
|
+
"()": "structlog.stdlib.ProcessorFormatter",
|
|
282
|
+
"processors": [
|
|
283
|
+
*shared_processors,
|
|
284
|
+
MainConsoleRenderer(colors=False),
|
|
285
|
+
],
|
|
286
|
+
},
|
|
287
|
+
# Formatter for JSON file output
|
|
288
|
+
"json": {
|
|
289
|
+
"()": "structlog.stdlib.ProcessorFormatter",
|
|
290
|
+
"processors": [
|
|
291
|
+
*shared_processors,
|
|
292
|
+
structlog.processors.JSONRenderer(),
|
|
293
|
+
],
|
|
294
|
+
},
|
|
295
|
+
},
|
|
296
|
+
"handlers": {
|
|
297
|
+
"console": {
|
|
298
|
+
"class": "logging.StreamHandler",
|
|
299
|
+
"level": _get_env_log_level(),
|
|
300
|
+
"formatter": "colored",
|
|
301
|
+
"stream": sys.stderr,
|
|
302
|
+
},
|
|
303
|
+
"file": {
|
|
304
|
+
"class": "logging.handlers.WatchedFileHandler",
|
|
305
|
+
"level": "DEBUG",
|
|
306
|
+
"filename": log_file,
|
|
307
|
+
"formatter": "plain",
|
|
308
|
+
},
|
|
309
|
+
"json_file": {
|
|
310
|
+
"class": "logging.handlers.WatchedFileHandler",
|
|
311
|
+
"level": "DEBUG",
|
|
312
|
+
"filename": json_log_file,
|
|
313
|
+
"formatter": "json",
|
|
314
|
+
},
|
|
315
|
+
},
|
|
316
|
+
"loggers": {
|
|
317
|
+
"": {
|
|
318
|
+
"handlers": ["console", "file", "json_file"],
|
|
319
|
+
"level": "DEBUG",
|
|
320
|
+
"propagate": True,
|
|
321
|
+
},
|
|
322
|
+
},
|
|
323
|
+
}
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
structlog.configure(
|
|
327
|
+
processors=[
|
|
328
|
+
structlog.stdlib.filter_by_level,
|
|
329
|
+
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
|
|
330
|
+
],
|
|
331
|
+
logger_factory=structlog.stdlib.LoggerFactory(),
|
|
332
|
+
wrapper_class=structlog.stdlib.BoundLogger,
|
|
333
|
+
cache_logger_on_first_use=True,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
structlog.get_logger().debug("Logger configured", config=structlog.get_config())
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
# Configure logging on module import
|
|
340
|
+
_configure_structlog()
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def get_logger(name: str | None = None) -> Any:
|
|
344
|
+
"""Get a configured structlog logger."""
|
|
345
|
+
return structlog.get_logger(name)
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
# Export the root logger for convenience
|
|
349
|
+
logger = get_logger()
|