otel-log 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- otel_log-0.1.0/PKG-INFO +12 -0
- otel_log-0.1.0/pyproject.toml +24 -0
- otel_log-0.1.0/setup.cfg +4 -0
- otel_log-0.1.0/src/otel_log/__init__.py +22 -0
- otel_log-0.1.0/src/otel_log/handler.py +181 -0
- otel_log-0.1.0/src/otel_log/log_formatter.py +239 -0
- otel_log-0.1.0/src/otel_log/log_record.py +34 -0
- otel_log-0.1.0/src/otel_log/otlp_exporter.py +204 -0
- otel_log-0.1.0/src/otel_log/resource_provider.py +72 -0
- otel_log-0.1.0/src/otel_log/severity_mapper.py +127 -0
- otel_log-0.1.0/src/otel_log/trace_context.py +73 -0
- otel_log-0.1.0/src/otel_log.egg-info/PKG-INFO +12 -0
- otel_log-0.1.0/src/otel_log.egg-info/SOURCES.txt +14 -0
- otel_log-0.1.0/src/otel_log.egg-info/dependency_links.txt +1 -0
- otel_log-0.1.0/src/otel_log.egg-info/requires.txt +8 -0
- otel_log-0.1.0/src/otel_log.egg-info/top_level.txt +1 -0
otel_log-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: otel-log
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: OpenTelemetry Standard Log Format library for Python
|
|
5
|
+
Requires-Python: >=3.9
|
|
6
|
+
Requires-Dist: opentelemetry-api>=1.20.0
|
|
7
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
8
|
+
Requires-Dist: opentelemetry-exporter-otlp>=1.20.0
|
|
9
|
+
Provides-Extra: dev
|
|
10
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
11
|
+
Requires-Dist: hypothesis>=6.0; extra == "dev"
|
|
12
|
+
Requires-Dist: jsonschema>=4.0; extra == "dev"
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68.0", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "otel-log"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "OpenTelemetry Standard Log Format library for Python"
|
|
9
|
+
requires-python = ">=3.9"
|
|
10
|
+
dependencies = [
|
|
11
|
+
"opentelemetry-api>=1.20.0",
|
|
12
|
+
"opentelemetry-sdk>=1.20.0",
|
|
13
|
+
"opentelemetry-exporter-otlp>=1.20.0",
|
|
14
|
+
]
|
|
15
|
+
|
|
16
|
+
[project.optional-dependencies]
|
|
17
|
+
dev = [
|
|
18
|
+
"pytest>=7.0",
|
|
19
|
+
"hypothesis>=6.0",
|
|
20
|
+
"jsonschema>=4.0",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[tool.setuptools.packages.find]
|
|
24
|
+
where = ["src"]
|
otel_log-0.1.0/setup.cfg
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""OpenTelemetry Standard Log Format library for Python."""
|
|
2
|
+
|
|
3
|
+
from .handler import OTelLoggingHandler
|
|
4
|
+
from .log_formatter import LogFormatter
|
|
5
|
+
from .log_record import LogRecord
|
|
6
|
+
from .otlp_exporter import ExporterConfig, OTLPExporter
|
|
7
|
+
from .resource_provider import ResourceConfig, ResourceProvider
|
|
8
|
+
from .severity_mapper import SeverityMapper
|
|
9
|
+
from .trace_context import TraceContext, TraceContextExtractor
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"LogRecord",
|
|
13
|
+
"LogFormatter",
|
|
14
|
+
"OTelLoggingHandler",
|
|
15
|
+
"OTLPExporter",
|
|
16
|
+
"ExporterConfig",
|
|
17
|
+
"SeverityMapper",
|
|
18
|
+
"TraceContext",
|
|
19
|
+
"TraceContextExtractor",
|
|
20
|
+
"ResourceConfig",
|
|
21
|
+
"ResourceProvider",
|
|
22
|
+
]
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""OTelLoggingHandler: Python logging.Handler producing OTel-compliant JSON logs.
|
|
2
|
+
|
|
3
|
+
Subclasses ``logging.Handler`` and wires together LogFormatter, SeverityMapper,
|
|
4
|
+
TraceContextExtractor, and ResourceProvider to produce structured JSON log
|
|
5
|
+
output conforming to the Standard Log Format schema.
|
|
6
|
+
|
|
7
|
+
Supports configuration via ResourceConfig or environment variables:
|
|
8
|
+
OTEL_SERVICE_NAME, OTEL_SERVICE_VERSION, OTEL_DEPLOYMENT_ENVIRONMENT
|
|
9
|
+
|
|
10
|
+
When an OTLPExporter is provided (or auto-configured via OTEL_EXPORTER_OTLP_ENDPOINT),
|
|
11
|
+
each emitted record is forwarded to the exporter. Otherwise, JSON is written to
|
|
12
|
+
the configured stream (console-only mode).
|
|
13
|
+
|
|
14
|
+
Requirements: 5.1-5.5, 9.1
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import logging
|
|
20
|
+
import os
|
|
21
|
+
import sys
|
|
22
|
+
import traceback
|
|
23
|
+
from datetime import datetime, timezone
|
|
24
|
+
from typing import Any, Dict, IO, Optional
|
|
25
|
+
|
|
26
|
+
from .log_formatter import LogFormatter
|
|
27
|
+
from .log_record import LogRecord
|
|
28
|
+
from .otlp_exporter import ExporterConfig, OTLPExporter
|
|
29
|
+
from .resource_provider import ResourceConfig, ResourceProvider
|
|
30
|
+
from .severity_mapper import SeverityMapper
|
|
31
|
+
from .trace_context import TraceContextExtractor
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OTelLoggingHandler(logging.Handler):
|
|
35
|
+
"""Python logging.Handler that produces OTel-compliant JSON logs.
|
|
36
|
+
|
|
37
|
+
Each emitted ``logging.LogRecord`` is mapped to an OTel ``LogRecord``,
|
|
38
|
+
serialized to JSON via ``LogFormatter``, and either:
|
|
39
|
+
- forwarded to the configured ``OTLPExporter`` (when set), or
|
|
40
|
+
- written as JSON to the configured stream (console-only mode).
|
|
41
|
+
|
|
42
|
+
Environment variable overrides (take precedence over ResourceConfig):
|
|
43
|
+
- ``OTEL_SERVICE_NAME`` -> ``service.name``
|
|
44
|
+
- ``OTEL_SERVICE_VERSION`` -> ``service.version``
|
|
45
|
+
- ``OTEL_DEPLOYMENT_ENVIRONMENT``-> ``deployment.environment``
|
|
46
|
+
- ``OTEL_EXPORTER_OTLP_ENDPOINT``-> auto-configure OTLPExporter when
|
|
47
|
+
no explicit exporter is provided
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
resource_config: Optional[ResourceConfig] = None,
|
|
53
|
+
stream: Optional[IO] = None,
|
|
54
|
+
level: int = logging.NOTSET,
|
|
55
|
+
exporter: Optional[OTLPExporter] = None,
|
|
56
|
+
) -> None:
|
|
57
|
+
super().__init__(level)
|
|
58
|
+
self._stream = stream or sys.stderr
|
|
59
|
+
self._formatter_component = LogFormatter()
|
|
60
|
+
self._severity_mapper = SeverityMapper()
|
|
61
|
+
self._trace_extractor = TraceContextExtractor()
|
|
62
|
+
|
|
63
|
+
# Build resource config with env var overrides
|
|
64
|
+
config = resource_config or ResourceConfig()
|
|
65
|
+
config = self._apply_env_overrides(config)
|
|
66
|
+
self._resource_provider = ResourceProvider(config)
|
|
67
|
+
|
|
68
|
+
# Exporter: use explicit exporter, or auto-configure from env var.
|
|
69
|
+
if exporter is not None:
|
|
70
|
+
self._exporter: Optional[OTLPExporter] = exporter
|
|
71
|
+
else:
|
|
72
|
+
endpoint = os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT", "")
|
|
73
|
+
if endpoint:
|
|
74
|
+
self._exporter = OTLPExporter(ExporterConfig(endpoint=endpoint, protocol="http"))
|
|
75
|
+
else:
|
|
76
|
+
self._exporter = None
|
|
77
|
+
|
|
78
|
+
@staticmethod
|
|
79
|
+
def _apply_env_overrides(config: ResourceConfig) -> ResourceConfig:
|
|
80
|
+
"""Apply environment variable overrides to a ResourceConfig."""
|
|
81
|
+
service_name = os.environ.get("OTEL_SERVICE_NAME")
|
|
82
|
+
service_version = os.environ.get("OTEL_SERVICE_VERSION")
|
|
83
|
+
deployment_env = os.environ.get("OTEL_DEPLOYMENT_ENVIRONMENT")
|
|
84
|
+
|
|
85
|
+
return ResourceConfig(
|
|
86
|
+
service_name=service_name if service_name else config.service_name,
|
|
87
|
+
service_version=service_version if service_version else config.service_version,
|
|
88
|
+
deployment_environment=deployment_env if deployment_env else config.deployment_environment,
|
|
89
|
+
custom_attributes=config.custom_attributes,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
def emit(self, record: logging.LogRecord) -> None:
|
|
93
|
+
"""Emit a logging.LogRecord as OTel-compliant JSON.
|
|
94
|
+
|
|
95
|
+
Steps:
|
|
96
|
+
1. Map Python log level to SeverityNumber/SeverityText
|
|
97
|
+
2. Extract trace context from active OTel span
|
|
98
|
+
3. Build LogRecord with resource attributes
|
|
99
|
+
4. Handle exc_info (exception type, message, stacktrace)
|
|
100
|
+
5. Serialize to JSON via LogFormatter
|
|
101
|
+
6. Export via OTLPExporter (if configured) or write to stream
|
|
102
|
+
"""
|
|
103
|
+
try:
|
|
104
|
+
# 1. Map severity
|
|
105
|
+
severity_number = self._severity_mapper.to_severity_number(record.levelno)
|
|
106
|
+
severity_text = self._severity_mapper.to_severity_text(record.levelno)
|
|
107
|
+
|
|
108
|
+
# 2. Extract trace context
|
|
109
|
+
trace_ctx = self._trace_extractor.extract()
|
|
110
|
+
|
|
111
|
+
# 3. Build resource
|
|
112
|
+
resource = self._resource_provider.get_resource()
|
|
113
|
+
|
|
114
|
+
# 4. Build attributes from extra fields and exc_info
|
|
115
|
+
attributes = self._build_attributes(record)
|
|
116
|
+
|
|
117
|
+
# 5. Build OTel LogRecord
|
|
118
|
+
timestamp = datetime.fromtimestamp(record.created, tz=timezone.utc)
|
|
119
|
+
otel_record = LogRecord(
|
|
120
|
+
timestamp=timestamp,
|
|
121
|
+
observed_timestamp=timestamp,
|
|
122
|
+
severity_text=severity_text,
|
|
123
|
+
severity_number=severity_number,
|
|
124
|
+
body=record.getMessage(),
|
|
125
|
+
resource=resource,
|
|
126
|
+
attributes=attributes if attributes else None,
|
|
127
|
+
trace_id=trace_ctx.trace_id if trace_ctx else None,
|
|
128
|
+
span_id=trace_ctx.span_id if trace_ctx else None,
|
|
129
|
+
trace_flags=trace_ctx.trace_flags if trace_ctx else None,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# 6. Export or write to stream
|
|
133
|
+
if self._exporter is not None:
|
|
134
|
+
self._exporter.export([otel_record])
|
|
135
|
+
else:
|
|
136
|
+
json_line = self._formatter_component.serialize(otel_record)
|
|
137
|
+
self._stream.write(json_line + "\n")
|
|
138
|
+
self._stream.flush()
|
|
139
|
+
except Exception:
|
|
140
|
+
self.handleError(record)
|
|
141
|
+
|
|
142
|
+
def _build_attributes(self, record: logging.LogRecord) -> Optional[Dict[str, Any]]:
|
|
143
|
+
"""Build attributes dict from logging.LogRecord extras and exc_info.
|
|
144
|
+
|
|
145
|
+
Extracts extra attributes set on the record (beyond standard fields)
|
|
146
|
+
and, when exc_info is present, adds exception.type, exception.message,
|
|
147
|
+
and exception.stacktrace per OTel semantic conventions.
|
|
148
|
+
"""
|
|
149
|
+
attrs: Dict[str, Any] = {}
|
|
150
|
+
|
|
151
|
+
# Collect extra attributes (non-standard fields on the LogRecord)
|
|
152
|
+
_STANDARD_ATTRS = {
|
|
153
|
+
"name", "msg", "args", "created", "relativeCreated", "exc_info",
|
|
154
|
+
"exc_text", "stack_info", "lineno", "funcName", "pathname",
|
|
155
|
+
"filename", "module", "thread", "threadName", "process",
|
|
156
|
+
"processName", "levelname", "levelno", "msecs", "message",
|
|
157
|
+
"taskName",
|
|
158
|
+
}
|
|
159
|
+
for key, value in record.__dict__.items():
|
|
160
|
+
if key.startswith("_") or key in _STANDARD_ATTRS:
|
|
161
|
+
continue
|
|
162
|
+
# Skip empty string keys per Req 11.4
|
|
163
|
+
if not key:
|
|
164
|
+
continue
|
|
165
|
+
attrs[key] = value
|
|
166
|
+
|
|
167
|
+
# Handle exc_info -> exception attributes (Req 5.5)
|
|
168
|
+
if record.exc_info and record.exc_info[0] is not None:
|
|
169
|
+
exc_type, exc_value, exc_tb = record.exc_info
|
|
170
|
+
attrs["exception.type"] = (
|
|
171
|
+
exc_type.__name__ if hasattr(exc_type, "__name__") else str(exc_type)
|
|
172
|
+
)
|
|
173
|
+
attrs["exception.message"] = str(exc_value) if exc_value else ""
|
|
174
|
+
if exc_tb is not None:
|
|
175
|
+
attrs["exception.stacktrace"] = "".join(
|
|
176
|
+
traceback.format_exception(exc_type, exc_value, exc_tb)
|
|
177
|
+
)
|
|
178
|
+
else:
|
|
179
|
+
attrs["exception.stacktrace"] = ""
|
|
180
|
+
|
|
181
|
+
return attrs if attrs else None
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""LogFormatter: serialize LogRecord to JSON and parse JSON back to LogRecord."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from typing import Any, Dict, Optional
|
|
8
|
+
|
|
9
|
+
from .log_record import LogRecord
|
|
10
|
+
|
|
11
|
+
# JSON schema field name mapping: Python attribute -> JSON key
|
|
12
|
+
_FIELD_TO_KEY = {
|
|
13
|
+
"timestamp": "Timestamp",
|
|
14
|
+
"observed_timestamp": "ObservedTimestamp",
|
|
15
|
+
"severity_text": "SeverityText",
|
|
16
|
+
"severity_number": "SeverityNumber",
|
|
17
|
+
"body": "Body",
|
|
18
|
+
"resource": "Resource",
|
|
19
|
+
"instrumentation_scope": "InstrumentationScope",
|
|
20
|
+
"attributes": "Attributes",
|
|
21
|
+
"trace_id": "TraceId",
|
|
22
|
+
"span_id": "SpanId",
|
|
23
|
+
"trace_flags": "TraceFlags",
|
|
24
|
+
"event_name": "EventName",
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
_KEY_TO_FIELD = {v: k for k, v in _FIELD_TO_KEY.items()}
|
|
28
|
+
|
|
29
|
+
# Required JSON keys per schema
|
|
30
|
+
_REQUIRED_KEYS = {"Timestamp", "SeverityText", "SeverityNumber", "Body", "Resource"}
|
|
31
|
+
|
|
32
|
+
_VALID_SEVERITY_TEXTS = {"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL"}
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _encode_value(value: Any) -> Any:
|
|
36
|
+
"""Encode a value for JSON serialization, handling AnyValue types.
|
|
37
|
+
|
|
38
|
+
If a value cannot be serialized (e.g. circular reference),
|
|
39
|
+
return "[unserializable]".
|
|
40
|
+
"""
|
|
41
|
+
if value is None:
|
|
42
|
+
return None
|
|
43
|
+
if isinstance(value, (str, int, float, bool)):
|
|
44
|
+
return value
|
|
45
|
+
if isinstance(value, dict):
|
|
46
|
+
return {k: _encode_value(v) for k, v in value.items()}
|
|
47
|
+
if isinstance(value, (list, tuple)):
|
|
48
|
+
return [_encode_value(item) for item in value]
|
|
49
|
+
# Fallback for unserializable types
|
|
50
|
+
try:
|
|
51
|
+
json.dumps(value)
|
|
52
|
+
return value
|
|
53
|
+
except (TypeError, ValueError, OverflowError):
|
|
54
|
+
return "[unserializable]"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _format_timestamp(dt: datetime) -> str:
|
|
58
|
+
"""Format a datetime as ISO 8601 UTC string."""
|
|
59
|
+
# Ensure UTC
|
|
60
|
+
if dt.tzinfo is None:
|
|
61
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
62
|
+
else:
|
|
63
|
+
dt = dt.astimezone(timezone.utc)
|
|
64
|
+
return dt.isoformat().replace("+00:00", "Z")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _parse_timestamp(value: str) -> datetime:
|
|
68
|
+
"""Parse an ISO 8601 timestamp string to a UTC datetime."""
|
|
69
|
+
# Handle 'Z' suffix
|
|
70
|
+
s = value.replace("Z", "+00:00")
|
|
71
|
+
return datetime.fromisoformat(s).astimezone(timezone.utc)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class LogFormatter:
|
|
75
|
+
"""Serializes LogRecord to JSON and parses JSON back to LogRecord.
|
|
76
|
+
|
|
77
|
+
Conforms to the Standard Log Format JSON schema.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
@staticmethod
|
|
81
|
+
def serialize(record: LogRecord) -> str:
|
|
82
|
+
"""Serialize a LogRecord to a JSON string.
|
|
83
|
+
|
|
84
|
+
- Omits fields that are None/null.
|
|
85
|
+
- Encodes timestamps as ISO 8601 UTC.
|
|
86
|
+
- Handles AnyValue types in Body and Attributes.
|
|
87
|
+
- Falls back to "[unserializable]" for values that can't be serialized.
|
|
88
|
+
"""
|
|
89
|
+
data: Dict[str, Any] = {}
|
|
90
|
+
|
|
91
|
+
# Timestamp (required by schema)
|
|
92
|
+
if record.timestamp is not None:
|
|
93
|
+
data["Timestamp"] = _format_timestamp(record.timestamp)
|
|
94
|
+
|
|
95
|
+
# ObservedTimestamp (optional)
|
|
96
|
+
if record.observed_timestamp is not None:
|
|
97
|
+
data["ObservedTimestamp"] = _format_timestamp(record.observed_timestamp)
|
|
98
|
+
|
|
99
|
+
# SeverityText (required)
|
|
100
|
+
data["SeverityText"] = record.severity_text
|
|
101
|
+
|
|
102
|
+
# SeverityNumber (required)
|
|
103
|
+
data["SeverityNumber"] = record.severity_number
|
|
104
|
+
|
|
105
|
+
# Body (required)
|
|
106
|
+
data["Body"] = _encode_value(record.body)
|
|
107
|
+
|
|
108
|
+
# Resource (required)
|
|
109
|
+
data["Resource"] = _encode_value(record.resource)
|
|
110
|
+
|
|
111
|
+
# InstrumentationScope (optional)
|
|
112
|
+
if record.instrumentation_scope is not None:
|
|
113
|
+
data["InstrumentationScope"] = _encode_value(record.instrumentation_scope)
|
|
114
|
+
|
|
115
|
+
# Attributes (optional)
|
|
116
|
+
if record.attributes is not None:
|
|
117
|
+
data["Attributes"] = _encode_value(record.attributes)
|
|
118
|
+
|
|
119
|
+
# Trace context — omit if absent (Req 1.4)
|
|
120
|
+
if record.trace_id is not None:
|
|
121
|
+
data["TraceId"] = record.trace_id
|
|
122
|
+
if record.span_id is not None:
|
|
123
|
+
data["SpanId"] = record.span_id
|
|
124
|
+
if record.trace_flags is not None:
|
|
125
|
+
data["TraceFlags"] = record.trace_flags
|
|
126
|
+
|
|
127
|
+
# EventName (optional)
|
|
128
|
+
if record.event_name is not None:
|
|
129
|
+
data["EventName"] = record.event_name
|
|
130
|
+
|
|
131
|
+
return json.dumps(data, separators=(",", ":"))
|
|
132
|
+
|
|
133
|
+
@staticmethod
|
|
134
|
+
def parse(json_str: str) -> LogRecord:
|
|
135
|
+
"""Parse a JSON string into a LogRecord.
|
|
136
|
+
|
|
137
|
+
Raises ValueError with a descriptive message for invalid input.
|
|
138
|
+
"""
|
|
139
|
+
# Parse JSON
|
|
140
|
+
try:
|
|
141
|
+
data = json.loads(json_str)
|
|
142
|
+
except json.JSONDecodeError as e:
|
|
143
|
+
raise ValueError(f"Invalid JSON: {e}") from e
|
|
144
|
+
|
|
145
|
+
if not isinstance(data, dict):
|
|
146
|
+
raise ValueError("JSON root must be an object")
|
|
147
|
+
|
|
148
|
+
# Validate required fields
|
|
149
|
+
for key in _REQUIRED_KEYS:
|
|
150
|
+
if key not in data:
|
|
151
|
+
raise ValueError(f"Missing required field: {key}")
|
|
152
|
+
|
|
153
|
+
# Validate no unknown fields
|
|
154
|
+
allowed_keys = set(_KEY_TO_FIELD.keys())
|
|
155
|
+
unknown = set(data.keys()) - allowed_keys
|
|
156
|
+
if unknown:
|
|
157
|
+
raise ValueError(f"Unknown fields: {', '.join(sorted(unknown))}")
|
|
158
|
+
|
|
159
|
+
# Validate SeverityText
|
|
160
|
+
severity_text = data["SeverityText"]
|
|
161
|
+
if not isinstance(severity_text, str):
|
|
162
|
+
raise ValueError(f"SeverityText must be a string, got {type(severity_text).__name__}")
|
|
163
|
+
if severity_text not in _VALID_SEVERITY_TEXTS:
|
|
164
|
+
raise ValueError(
|
|
165
|
+
f"Invalid SeverityText: '{severity_text}'. "
|
|
166
|
+
f"Must be one of: {', '.join(sorted(_VALID_SEVERITY_TEXTS))}"
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Validate SeverityNumber
|
|
170
|
+
severity_number = data["SeverityNumber"]
|
|
171
|
+
if not isinstance(severity_number, int) or isinstance(severity_number, bool):
|
|
172
|
+
raise ValueError(f"SeverityNumber must be an integer, got {type(severity_number).__name__}")
|
|
173
|
+
if not (0 <= severity_number <= 24):
|
|
174
|
+
raise ValueError(f"SeverityNumber must be 0-24, got {severity_number}")
|
|
175
|
+
|
|
176
|
+
# Validate Resource
|
|
177
|
+
resource = data["Resource"]
|
|
178
|
+
if not isinstance(resource, dict):
|
|
179
|
+
raise ValueError(f"Resource must be an object, got {type(resource).__name__}")
|
|
180
|
+
if "service.name" not in resource:
|
|
181
|
+
raise ValueError("Resource must contain 'service.name'")
|
|
182
|
+
|
|
183
|
+
# Parse timestamps
|
|
184
|
+
timestamp: Optional[datetime] = None
|
|
185
|
+
if "Timestamp" in data:
|
|
186
|
+
try:
|
|
187
|
+
timestamp = _parse_timestamp(data["Timestamp"])
|
|
188
|
+
except (ValueError, TypeError) as e:
|
|
189
|
+
raise ValueError(f"Invalid Timestamp format: {e}") from e
|
|
190
|
+
|
|
191
|
+
observed_timestamp: Optional[datetime] = None
|
|
192
|
+
if "ObservedTimestamp" in data:
|
|
193
|
+
try:
|
|
194
|
+
observed_timestamp = _parse_timestamp(data["ObservedTimestamp"])
|
|
195
|
+
except (ValueError, TypeError) as e:
|
|
196
|
+
raise ValueError(f"Invalid ObservedTimestamp format: {e}") from e
|
|
197
|
+
|
|
198
|
+
# Validate trace context formats if present
|
|
199
|
+
trace_id = data.get("TraceId")
|
|
200
|
+
if trace_id is not None:
|
|
201
|
+
if not isinstance(trace_id, str) or len(trace_id) != 32:
|
|
202
|
+
raise ValueError(f"TraceId must be a 32-character hex string, got '{trace_id}'")
|
|
203
|
+
|
|
204
|
+
span_id = data.get("SpanId")
|
|
205
|
+
if span_id is not None:
|
|
206
|
+
if not isinstance(span_id, str) or len(span_id) != 16:
|
|
207
|
+
raise ValueError(f"SpanId must be a 16-character hex string, got '{span_id}'")
|
|
208
|
+
|
|
209
|
+
trace_flags = data.get("TraceFlags")
|
|
210
|
+
if trace_flags is not None:
|
|
211
|
+
if not isinstance(trace_flags, str) or len(trace_flags) != 2:
|
|
212
|
+
raise ValueError(f"TraceFlags must be a 2-character hex string, got '{trace_flags}'")
|
|
213
|
+
|
|
214
|
+
# Validate InstrumentationScope
|
|
215
|
+
instrumentation_scope = data.get("InstrumentationScope")
|
|
216
|
+
if instrumentation_scope is not None and not isinstance(instrumentation_scope, dict):
|
|
217
|
+
raise ValueError(
|
|
218
|
+
f"InstrumentationScope must be an object, got {type(instrumentation_scope).__name__}"
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
# Validate Attributes
|
|
222
|
+
attributes = data.get("Attributes")
|
|
223
|
+
if attributes is not None and not isinstance(attributes, dict):
|
|
224
|
+
raise ValueError(f"Attributes must be an object, got {type(attributes).__name__}")
|
|
225
|
+
|
|
226
|
+
return LogRecord(
|
|
227
|
+
timestamp=timestamp,
|
|
228
|
+
observed_timestamp=observed_timestamp,
|
|
229
|
+
severity_text=severity_text,
|
|
230
|
+
severity_number=severity_number,
|
|
231
|
+
body=data["Body"],
|
|
232
|
+
resource=resource,
|
|
233
|
+
instrumentation_scope=instrumentation_scope,
|
|
234
|
+
attributes=attributes,
|
|
235
|
+
trace_id=trace_id,
|
|
236
|
+
span_id=span_id,
|
|
237
|
+
trace_flags=trace_flags,
|
|
238
|
+
event_name=data.get("EventName"),
|
|
239
|
+
)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""LogRecord dataclass representing an OpenTelemetry log entry."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from typing import Any, Dict, List, Optional, Union
|
|
8
|
+
|
|
9
|
+
# AnyValue type: string, int, float, bool, list, dict, or None
|
|
10
|
+
AnyValue = Union[str, int, float, bool, List[Any], Dict[str, Any], None]
|
|
11
|
+
|
|
12
|
+
VALID_SEVERITY_TEXTS = frozenset({"TRACE", "DEBUG", "INFO", "WARN", "ERROR", "FATAL"})
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class LogRecord:
|
|
17
|
+
"""Canonical in-memory representation of an OTel log entry.
|
|
18
|
+
|
|
19
|
+
Fields align with the OpenTelemetry Log Data Model and the
|
|
20
|
+
Standard Log Format JSON schema.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
severity_text: str = "INFO"
|
|
24
|
+
severity_number: int = 9
|
|
25
|
+
body: Any = ""
|
|
26
|
+
resource: Dict[str, AnyValue] = field(default_factory=lambda: {"service.name": "unknown_service"})
|
|
27
|
+
timestamp: Optional[datetime] = None
|
|
28
|
+
observed_timestamp: Optional[datetime] = None
|
|
29
|
+
instrumentation_scope: Optional[Dict[str, str]] = None
|
|
30
|
+
attributes: Optional[Dict[str, AnyValue]] = None
|
|
31
|
+
trace_id: Optional[str] = None
|
|
32
|
+
span_id: Optional[str] = None
|
|
33
|
+
trace_flags: Optional[str] = None
|
|
34
|
+
event_name: Optional[str] = None
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
"""OTLPExporter: exports LogRecords to an OpenTelemetry Collector.
|
|
2
|
+
|
|
3
|
+
Supports gRPC and HTTP protocols with batch export, exponential backoff retry,
|
|
4
|
+
and graceful fallback to stdout when the SDK is unavailable or no endpoint is
|
|
5
|
+
configured.
|
|
6
|
+
|
|
7
|
+
Requirements: 9.1-9.6
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import sys
|
|
14
|
+
import time
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from typing import Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from .log_formatter import LogFormatter
|
|
19
|
+
from .log_record import LogRecord
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class ExporterConfig:
|
|
24
|
+
"""Configuration for OTLPExporter.
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
endpoint: OTLP collector endpoint URL (e.g. "http://localhost:4317").
|
|
28
|
+
When empty, falls back to console (stdout) mode.
|
|
29
|
+
protocol: Transport protocol — "grpc" or "http" (default "grpc").
|
|
30
|
+
headers: Optional dict of HTTP/gRPC headers (e.g. auth tokens).
|
|
31
|
+
tls_cert_path: Optional path to a TLS certificate file.
|
|
32
|
+
batch_size: Maximum number of records per export batch (default 512).
|
|
33
|
+
flush_interval_ms: Interval in milliseconds between automatic flushes (default 5000).
|
|
34
|
+
max_retries: Maximum number of retry attempts on failure (default 3).
|
|
35
|
+
retry_backoff_ms: Base backoff in milliseconds for exponential retry (default 1000).
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
endpoint: str = ""
|
|
39
|
+
protocol: str = "grpc"
|
|
40
|
+
headers: Optional[Dict[str, str]] = None
|
|
41
|
+
tls_cert_path: Optional[str] = None
|
|
42
|
+
batch_size: int = 512
|
|
43
|
+
flush_interval_ms: int = 5000
|
|
44
|
+
max_retries: int = 3
|
|
45
|
+
retry_backoff_ms: int = 1000
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class OTLPExporter:
|
|
49
|
+
"""Exports LogRecords to an OpenTelemetry Collector via OTLP.
|
|
50
|
+
|
|
51
|
+
When an endpoint is configured, attempts to use the OTel SDK exporters
|
|
52
|
+
(opentelemetry-exporter-otlp-proto-grpc for gRPC,
|
|
53
|
+
opentelemetry-exporter-otlp-proto-http for HTTP).
|
|
54
|
+
|
|
55
|
+
If the SDK is unavailable or no endpoint is configured, falls back to
|
|
56
|
+
writing JSON-serialized records to stdout (console-only mode).
|
|
57
|
+
|
|
58
|
+
Retry behaviour:
|
|
59
|
+
On export failure, retries up to ``config.max_retries`` times with
|
|
60
|
+
exponential backoff starting at ``config.retry_backoff_ms`` ms.
|
|
61
|
+
After exhausting retries, records are dropped and a warning is written
|
|
62
|
+
to stderr. The exporter never raises exceptions to the caller.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(self, config: Optional[ExporterConfig] = None) -> None:
|
|
66
|
+
self._config = config or ExporterConfig()
|
|
67
|
+
self._formatter = LogFormatter()
|
|
68
|
+
self._pending: List[LogRecord] = []
|
|
69
|
+
self._sdk_exporter = None # lazy-initialised on first export
|
|
70
|
+
|
|
71
|
+
# Attempt to initialise the SDK exporter if an endpoint is provided.
|
|
72
|
+
if self._config.endpoint:
|
|
73
|
+
self._sdk_exporter = self._init_sdk_exporter()
|
|
74
|
+
|
|
75
|
+
# ------------------------------------------------------------------
|
|
76
|
+
# Public API
|
|
77
|
+
# ------------------------------------------------------------------
|
|
78
|
+
|
|
79
|
+
def export(self, records: List[LogRecord]) -> bool:
|
|
80
|
+
"""Export a list of LogRecords, batching by configured batch_size.
|
|
81
|
+
|
|
82
|
+
Returns True when all batches were exported successfully, False if any
|
|
83
|
+
batch failed after exhausting retries (records are dropped in that case).
|
|
84
|
+
"""
|
|
85
|
+
if not records:
|
|
86
|
+
return True
|
|
87
|
+
|
|
88
|
+
all_ok = True
|
|
89
|
+
# Split into batches of at most batch_size.
|
|
90
|
+
for i in range(0, len(records), self._config.batch_size):
|
|
91
|
+
batch = records[i : i + self._config.batch_size]
|
|
92
|
+
ok = self._export_batch(batch)
|
|
93
|
+
if not ok:
|
|
94
|
+
all_ok = False
|
|
95
|
+
return all_ok
|
|
96
|
+
|
|
97
|
+
def shutdown(self) -> None:
|
|
98
|
+
"""Flush pending records and shut down the exporter."""
|
|
99
|
+
self.force_flush()
|
|
100
|
+
if self._sdk_exporter is not None:
|
|
101
|
+
try:
|
|
102
|
+
self._sdk_exporter.shutdown()
|
|
103
|
+
except Exception:
|
|
104
|
+
pass
|
|
105
|
+
|
|
106
|
+
def force_flush(self) -> None:
|
|
107
|
+
"""Immediately flush any pending records."""
|
|
108
|
+
if self._pending:
|
|
109
|
+
pending = list(self._pending)
|
|
110
|
+
self._pending.clear()
|
|
111
|
+
self.export(pending)
|
|
112
|
+
|
|
113
|
+
# ------------------------------------------------------------------
|
|
114
|
+
# Internal helpers
|
|
115
|
+
# ------------------------------------------------------------------
|
|
116
|
+
|
|
117
|
+
def _export_batch(self, batch: List[LogRecord]) -> bool:
|
|
118
|
+
"""Export a single batch with retry/backoff. Returns True on success."""
|
|
119
|
+
if self._sdk_exporter is not None:
|
|
120
|
+
return self._export_via_sdk(batch)
|
|
121
|
+
# Console fallback: write JSON to stdout.
|
|
122
|
+
self._console_export(batch)
|
|
123
|
+
return True
|
|
124
|
+
|
|
125
|
+
def _export_via_sdk(self, batch: List[LogRecord]) -> bool:
|
|
126
|
+
"""Try to export via the SDK exporter with exponential backoff."""
|
|
127
|
+
backoff_ms = self._config.retry_backoff_ms
|
|
128
|
+
for attempt in range(self._config.max_retries + 1):
|
|
129
|
+
try:
|
|
130
|
+
result = self._sdk_exporter.export(batch)
|
|
131
|
+
# OTel SDK returns ExportResult enum; SUCCESS == 0.
|
|
132
|
+
if hasattr(result, "value"):
|
|
133
|
+
if result.value == 0:
|
|
134
|
+
return True
|
|
135
|
+
# Non-success result — retry.
|
|
136
|
+
else:
|
|
137
|
+
# If no value attribute, treat as success.
|
|
138
|
+
return True
|
|
139
|
+
except Exception as exc:
|
|
140
|
+
if attempt == self._config.max_retries:
|
|
141
|
+
sys.stderr.write(
|
|
142
|
+
f"[OTLPExporter] Export failed after {self._config.max_retries} retries, "
|
|
143
|
+
f"dropping {len(batch)} records. Last error: {exc}\n"
|
|
144
|
+
)
|
|
145
|
+
return False
|
|
146
|
+
if attempt < self._config.max_retries:
|
|
147
|
+
time.sleep(backoff_ms / 1000.0)
|
|
148
|
+
backoff_ms *= 2 # exponential backoff
|
|
149
|
+
# Exhausted retries.
|
|
150
|
+
sys.stderr.write(
|
|
151
|
+
f"[OTLPExporter] Export failed after {self._config.max_retries} retries, "
|
|
152
|
+
f"dropping {len(batch)} records.\n"
|
|
153
|
+
)
|
|
154
|
+
return False
|
|
155
|
+
|
|
156
|
+
def _console_export(self, batch: List[LogRecord]) -> None:
|
|
157
|
+
"""Write JSON-serialized records to stdout (console-only mode)."""
|
|
158
|
+
for record in batch:
|
|
159
|
+
try:
|
|
160
|
+
json_line = self._formatter.serialize(record)
|
|
161
|
+
sys.stdout.write(json_line + "\n")
|
|
162
|
+
except Exception as exc:
|
|
163
|
+
sys.stderr.write(f"[OTLPExporter] Serialization error: {exc}\n")
|
|
164
|
+
|
|
165
|
+
def _init_sdk_exporter(self):
|
|
166
|
+
"""Attempt to initialise an OTel SDK exporter.
|
|
167
|
+
|
|
168
|
+
Returns the exporter instance on success, or None if the SDK is not
|
|
169
|
+
available (console-only fallback).
|
|
170
|
+
"""
|
|
171
|
+
protocol = self._config.protocol.lower()
|
|
172
|
+
endpoint = self._config.endpoint
|
|
173
|
+
headers = self._config.headers or {}
|
|
174
|
+
|
|
175
|
+
if protocol == "grpc":
|
|
176
|
+
return self._init_grpc_exporter(endpoint, headers)
|
|
177
|
+
else:
|
|
178
|
+
return self._init_http_exporter(endpoint, headers)
|
|
179
|
+
|
|
180
|
+
def _init_grpc_exporter(self, endpoint: str, headers: Dict[str, str]):
|
|
181
|
+
try:
|
|
182
|
+
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
|
|
183
|
+
OTLPLogExporter,
|
|
184
|
+
)
|
|
185
|
+
return OTLPLogExporter(endpoint=endpoint, headers=headers)
|
|
186
|
+
except ImportError:
|
|
187
|
+
sys.stderr.write(
|
|
188
|
+
"[OTLPExporter] opentelemetry-exporter-otlp-proto-grpc not available; "
|
|
189
|
+
"falling back to console mode.\n"
|
|
190
|
+
)
|
|
191
|
+
return None
|
|
192
|
+
|
|
193
|
+
def _init_http_exporter(self, endpoint: str, headers: Dict[str, str]):
|
|
194
|
+
try:
|
|
195
|
+
from opentelemetry.exporter.otlp.proto.http._log_exporter import (
|
|
196
|
+
OTLPLogExporter,
|
|
197
|
+
)
|
|
198
|
+
return OTLPLogExporter(endpoint=endpoint, headers=headers)
|
|
199
|
+
except ImportError:
|
|
200
|
+
sys.stderr.write(
|
|
201
|
+
"[OTLPExporter] opentelemetry-exporter-otlp-proto-http not available; "
|
|
202
|
+
"falling back to console mode.\n"
|
|
203
|
+
)
|
|
204
|
+
return None
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""ResourceProvider for managing OTel resource attributes.
|
|
2
|
+
|
|
3
|
+
Provides resource attributes (service.name, service.version,
|
|
4
|
+
deployment.environment, and custom attributes) that are attached to every
|
|
5
|
+
LogRecord produced by the logging bridge.
|
|
6
|
+
|
|
7
|
+
If ``service.name`` is not supplied, it defaults to ``"unknown_service"``.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from typing import Any, Dict, Optional
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class ResourceConfig:
|
|
18
|
+
"""Configuration for resource attributes.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
service_name: Service name (default ``"unknown_service"``).
|
|
22
|
+
service_version: Optional service version string.
|
|
23
|
+
deployment_environment: Optional deployment environment (e.g. ``"production"``).
|
|
24
|
+
custom_attributes: Optional dict of additional resource key-value pairs.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
service_name: str = "unknown_service"
|
|
28
|
+
service_version: Optional[str] = None
|
|
29
|
+
deployment_environment: Optional[str] = None
|
|
30
|
+
custom_attributes: Optional[Dict[str, Any]] = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class ResourceProvider:
|
|
34
|
+
"""Builds resource attribute dicts from a :class:`ResourceConfig`.
|
|
35
|
+
|
|
36
|
+
Always includes ``service.name`` in the returned resource dict.
|
|
37
|
+
Optional fields are included only when set. Custom attributes are
|
|
38
|
+
merged in and can use arbitrary keys.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self, config: Optional[ResourceConfig] = None) -> None:
|
|
42
|
+
self._config = config or ResourceConfig()
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def config(self) -> ResourceConfig:
|
|
46
|
+
"""Return the current resource configuration."""
|
|
47
|
+
return self._config
|
|
48
|
+
|
|
49
|
+
def get_resource(self) -> Dict[str, Any]:
|
|
50
|
+
"""Return resource attributes dict.
|
|
51
|
+
|
|
52
|
+
The dict always contains ``"service.name"``. ``"service.version"``
|
|
53
|
+
and ``"deployment.environment"`` are included when configured.
|
|
54
|
+
Any ``custom_attributes`` are merged into the result.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
A dict of resource attribute key-value pairs.
|
|
58
|
+
"""
|
|
59
|
+
resource: Dict[str, Any] = {
|
|
60
|
+
"service.name": self._config.service_name,
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if self._config.service_version is not None:
|
|
64
|
+
resource["service.version"] = self._config.service_version
|
|
65
|
+
|
|
66
|
+
if self._config.deployment_environment is not None:
|
|
67
|
+
resource["deployment.environment"] = self._config.deployment_environment
|
|
68
|
+
|
|
69
|
+
if self._config.custom_attributes:
|
|
70
|
+
resource.update(self._config.custom_attributes)
|
|
71
|
+
|
|
72
|
+
return resource
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"""SeverityMapper for mapping Python logging levels to OTel severity values.
|
|
2
|
+
|
|
3
|
+
Maps Python's standard logging levels to OpenTelemetry SeverityNumber ranges
|
|
4
|
+
and SeverityText values per the OTel Log Data Model specification.
|
|
5
|
+
|
|
6
|
+
Mapping table:
|
|
7
|
+
Python Level | SeverityText | SeverityNumber
|
|
8
|
+
NOTSET (0) | TRACE | 1
|
|
9
|
+
DEBUG (10) | DEBUG | 5
|
|
10
|
+
INFO (20) | INFO | 9
|
|
11
|
+
WARNING (30) | WARN | 13
|
|
12
|
+
ERROR (40) | ERROR | 17
|
|
13
|
+
CRITICAL (50) | FATAL | 21
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import logging
|
|
19
|
+
from typing import Tuple
|
|
20
|
+
|
|
21
|
+
# Standard Python logging levels mapped to (SeverityNumber, SeverityText).
|
|
22
|
+
# Each maps to the lowest value in the OTel range for that severity band.
|
|
23
|
+
_PYTHON_LEVEL_MAP: dict[int, tuple[int, str]] = {
|
|
24
|
+
logging.NOTSET: (1, "TRACE"), # NOTSET=0 -> TRACE range 1-4
|
|
25
|
+
logging.DEBUG: (5, "DEBUG"), # DEBUG=10 -> DEBUG range 5-8
|
|
26
|
+
logging.INFO: (9, "INFO"), # INFO=20 -> INFO range 9-12
|
|
27
|
+
logging.WARNING: (13, "WARN"), # WARNING=30 -> WARN range 13-16
|
|
28
|
+
logging.ERROR: (17, "ERROR"), # ERROR=40 -> ERROR range 17-20
|
|
29
|
+
logging.CRITICAL: (21, "FATAL"), # CRITICAL=50 -> FATAL range 21-24
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
# Sorted standard levels for closest-match lookup.
|
|
33
|
+
_SORTED_LEVELS = sorted(_PYTHON_LEVEL_MAP.keys())
|
|
34
|
+
|
|
35
|
+
# Recognized severity strings (case-insensitive) to (SeverityNumber, SeverityText).
|
|
36
|
+
_SEVERITY_STRING_MAP: dict[str, tuple[int, str]] = {
|
|
37
|
+
"TRACE": (1, "TRACE"),
|
|
38
|
+
"DEBUG": (5, "DEBUG"),
|
|
39
|
+
"INFO": (9, "INFO"),
|
|
40
|
+
"WARN": (13, "WARN"),
|
|
41
|
+
"WARNING": (13, "WARN"),
|
|
42
|
+
"ERROR": (17, "ERROR"),
|
|
43
|
+
"FATAL": (21, "FATAL"),
|
|
44
|
+
"CRITICAL": (21, "FATAL"),
|
|
45
|
+
# Python-specific level names
|
|
46
|
+
"NOTSET": (1, "TRACE"),
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _find_closest_level(python_level: int) -> int:
|
|
51
|
+
"""Find the closest standard Python logging level to the given value."""
|
|
52
|
+
closest = _SORTED_LEVELS[0]
|
|
53
|
+
min_distance = abs(python_level - closest)
|
|
54
|
+
for level in _SORTED_LEVELS[1:]:
|
|
55
|
+
distance = abs(python_level - level)
|
|
56
|
+
if distance < min_distance:
|
|
57
|
+
min_distance = distance
|
|
58
|
+
closest = level
|
|
59
|
+
elif distance == min_distance and level < closest:
|
|
60
|
+
# Tie-break: prefer the lower standard level
|
|
61
|
+
closest = level
|
|
62
|
+
return closest
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class SeverityMapper:
|
|
66
|
+
"""Maps between Python logging levels and OTel severity values."""
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
def to_severity_number(python_level: int) -> int:
|
|
70
|
+
"""Map a Python logging level (int) to an OTel SeverityNumber.
|
|
71
|
+
|
|
72
|
+
For standard levels (NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL),
|
|
73
|
+
returns the lowest SeverityNumber in the corresponding OTel range.
|
|
74
|
+
For non-standard levels, finds the closest standard level and uses
|
|
75
|
+
its mapping.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
python_level: A Python logging level integer.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
An OTel SeverityNumber (1-24).
|
|
82
|
+
"""
|
|
83
|
+
if python_level in _PYTHON_LEVEL_MAP:
|
|
84
|
+
return _PYTHON_LEVEL_MAP[python_level][0]
|
|
85
|
+
closest = _find_closest_level(python_level)
|
|
86
|
+
return _PYTHON_LEVEL_MAP[closest][0]
|
|
87
|
+
|
|
88
|
+
@staticmethod
|
|
89
|
+
def to_severity_text(python_level: int) -> str:
|
|
90
|
+
"""Map a Python logging level (int) to an OTel SeverityText.
|
|
91
|
+
|
|
92
|
+
For standard levels, returns the corresponding OTel SeverityText.
|
|
93
|
+
For non-standard levels, finds the closest standard level and uses
|
|
94
|
+
its mapping.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
python_level: A Python logging level integer.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
An OTel SeverityText string (TRACE, DEBUG, INFO, WARN, ERROR, FATAL).
|
|
101
|
+
"""
|
|
102
|
+
if python_level in _PYTHON_LEVEL_MAP:
|
|
103
|
+
return _PYTHON_LEVEL_MAP[python_level][1]
|
|
104
|
+
closest = _find_closest_level(python_level)
|
|
105
|
+
return _PYTHON_LEVEL_MAP[closest][1]
|
|
106
|
+
|
|
107
|
+
@staticmethod
|
|
108
|
+
def from_string(severity_str: str) -> Tuple[int, str]:
|
|
109
|
+
"""Map a severity string to (SeverityNumber, SeverityText).
|
|
110
|
+
|
|
111
|
+
Recognizes standard OTel severity names (TRACE, DEBUG, INFO, WARN,
|
|
112
|
+
ERROR, FATAL) and Python-specific names (WARNING, CRITICAL, NOTSET),
|
|
113
|
+
case-insensitively.
|
|
114
|
+
|
|
115
|
+
For unrecognized strings, returns SeverityNumber 0 (UNSPECIFIED)
|
|
116
|
+
and preserves the original string as SeverityText.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
severity_str: A severity level name string.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
A tuple of (SeverityNumber, SeverityText).
|
|
123
|
+
"""
|
|
124
|
+
normalized = severity_str.strip().upper()
|
|
125
|
+
if normalized in _SEVERITY_STRING_MAP:
|
|
126
|
+
return _SEVERITY_STRING_MAP[normalized]
|
|
127
|
+
return (0, severity_str)
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""TraceContextExtractor for extracting trace context from the active OTel span.
|
|
2
|
+
|
|
3
|
+
Extracts TraceId, SpanId, and TraceFlags from the current OpenTelemetry span
|
|
4
|
+
context. Returns None when no active span exists or the OTel SDK is unavailable.
|
|
5
|
+
|
|
6
|
+
Format requirements (per OTel spec):
|
|
7
|
+
TraceId: 32-character lowercase hexadecimal string
|
|
8
|
+
SpanId: 16-character lowercase hexadecimal string
|
|
9
|
+
TraceFlags: 2-character zero-padded hexadecimal string
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
from dataclasses import dataclass
|
|
15
|
+
from typing import Optional
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class TraceContext:
|
|
20
|
+
"""Trace context extracted from an active OpenTelemetry span.
|
|
21
|
+
|
|
22
|
+
Attributes:
|
|
23
|
+
trace_id: 32-char lowercase hex string.
|
|
24
|
+
span_id: 16-char lowercase hex string.
|
|
25
|
+
trace_flags: 2-char zero-padded hex string.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
trace_id: str
|
|
29
|
+
span_id: str
|
|
30
|
+
trace_flags: str
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class TraceContextExtractor:
|
|
34
|
+
"""Extracts trace context from the active OpenTelemetry span."""
|
|
35
|
+
|
|
36
|
+
@staticmethod
|
|
37
|
+
def extract() -> Optional[TraceContext]:
|
|
38
|
+
"""Extract trace context from the active OTel span.
|
|
39
|
+
|
|
40
|
+
Uses ``opentelemetry.trace.get_current_span()`` to obtain the active
|
|
41
|
+
span, then reads its SpanContext for TraceId, SpanId, and TraceFlags.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
A :class:`TraceContext` if a valid, recording span is active,
|
|
45
|
+
or ``None`` if no active span exists, the span context is invalid,
|
|
46
|
+
or the OpenTelemetry SDK is not installed.
|
|
47
|
+
"""
|
|
48
|
+
try:
|
|
49
|
+
from opentelemetry import trace
|
|
50
|
+
|
|
51
|
+
span = trace.get_current_span()
|
|
52
|
+
|
|
53
|
+
# INVALID_SPAN or non-recording spans have no useful context
|
|
54
|
+
if span is None or not span.is_recording():
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
ctx = span.get_span_context()
|
|
58
|
+
if ctx is None or not ctx.is_valid:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
# Format per OTel spec: lowercase hex, fixed width
|
|
62
|
+
trace_id = format(ctx.trace_id, "032x")
|
|
63
|
+
span_id = format(ctx.span_id, "016x")
|
|
64
|
+
trace_flags = format(ctx.trace_flags, "02x")
|
|
65
|
+
|
|
66
|
+
return TraceContext(
|
|
67
|
+
trace_id=trace_id,
|
|
68
|
+
span_id=span_id,
|
|
69
|
+
trace_flags=trace_flags,
|
|
70
|
+
)
|
|
71
|
+
except Exception:
|
|
72
|
+
# Gracefully handle missing SDK or any unexpected errors
|
|
73
|
+
return None
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: otel-log
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: OpenTelemetry Standard Log Format library for Python
|
|
5
|
+
Requires-Python: >=3.9
|
|
6
|
+
Requires-Dist: opentelemetry-api>=1.20.0
|
|
7
|
+
Requires-Dist: opentelemetry-sdk>=1.20.0
|
|
8
|
+
Requires-Dist: opentelemetry-exporter-otlp>=1.20.0
|
|
9
|
+
Provides-Extra: dev
|
|
10
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
11
|
+
Requires-Dist: hypothesis>=6.0; extra == "dev"
|
|
12
|
+
Requires-Dist: jsonschema>=4.0; extra == "dev"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
pyproject.toml
|
|
2
|
+
src/otel_log/__init__.py
|
|
3
|
+
src/otel_log/handler.py
|
|
4
|
+
src/otel_log/log_formatter.py
|
|
5
|
+
src/otel_log/log_record.py
|
|
6
|
+
src/otel_log/otlp_exporter.py
|
|
7
|
+
src/otel_log/resource_provider.py
|
|
8
|
+
src/otel_log/severity_mapper.py
|
|
9
|
+
src/otel_log/trace_context.py
|
|
10
|
+
src/otel_log.egg-info/PKG-INFO
|
|
11
|
+
src/otel_log.egg-info/SOURCES.txt
|
|
12
|
+
src/otel_log.egg-info/dependency_links.txt
|
|
13
|
+
src/otel_log.egg-info/requires.txt
|
|
14
|
+
src/otel_log.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
otel_log
|