provide-foundation 0.0.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- provide/__init__.py +15 -0
- provide/foundation/__init__.py +155 -0
- provide/foundation/_version.py +58 -0
- provide/foundation/cli/__init__.py +67 -0
- provide/foundation/cli/commands/__init__.py +3 -0
- provide/foundation/cli/commands/deps.py +71 -0
- provide/foundation/cli/commands/logs/__init__.py +63 -0
- provide/foundation/cli/commands/logs/generate.py +357 -0
- provide/foundation/cli/commands/logs/generate_old.py +569 -0
- provide/foundation/cli/commands/logs/query.py +174 -0
- provide/foundation/cli/commands/logs/send.py +166 -0
- provide/foundation/cli/commands/logs/tail.py +112 -0
- provide/foundation/cli/decorators.py +262 -0
- provide/foundation/cli/main.py +65 -0
- provide/foundation/cli/testing.py +220 -0
- provide/foundation/cli/utils.py +210 -0
- provide/foundation/config/__init__.py +106 -0
- provide/foundation/config/base.py +295 -0
- provide/foundation/config/env.py +369 -0
- provide/foundation/config/loader.py +311 -0
- provide/foundation/config/manager.py +387 -0
- provide/foundation/config/schema.py +284 -0
- provide/foundation/config/sync.py +281 -0
- provide/foundation/config/types.py +78 -0
- provide/foundation/config/validators.py +80 -0
- provide/foundation/console/__init__.py +29 -0
- provide/foundation/console/input.py +364 -0
- provide/foundation/console/output.py +178 -0
- provide/foundation/context/__init__.py +12 -0
- provide/foundation/context/core.py +356 -0
- provide/foundation/core.py +20 -0
- provide/foundation/crypto/__init__.py +182 -0
- provide/foundation/crypto/algorithms.py +111 -0
- provide/foundation/crypto/certificates.py +896 -0
- provide/foundation/crypto/checksums.py +301 -0
- provide/foundation/crypto/constants.py +57 -0
- provide/foundation/crypto/hashing.py +265 -0
- provide/foundation/crypto/keys.py +188 -0
- provide/foundation/crypto/signatures.py +144 -0
- provide/foundation/crypto/utils.py +164 -0
- provide/foundation/errors/__init__.py +96 -0
- provide/foundation/errors/auth.py +73 -0
- provide/foundation/errors/base.py +81 -0
- provide/foundation/errors/config.py +103 -0
- provide/foundation/errors/context.py +299 -0
- provide/foundation/errors/decorators.py +484 -0
- provide/foundation/errors/handlers.py +360 -0
- provide/foundation/errors/integration.py +105 -0
- provide/foundation/errors/platform.py +37 -0
- provide/foundation/errors/process.py +140 -0
- provide/foundation/errors/resources.py +133 -0
- provide/foundation/errors/runtime.py +160 -0
- provide/foundation/errors/safe_decorators.py +133 -0
- provide/foundation/errors/types.py +276 -0
- provide/foundation/file/__init__.py +79 -0
- provide/foundation/file/atomic.py +157 -0
- provide/foundation/file/directory.py +134 -0
- provide/foundation/file/formats.py +236 -0
- provide/foundation/file/lock.py +175 -0
- provide/foundation/file/safe.py +179 -0
- provide/foundation/file/utils.py +170 -0
- provide/foundation/hub/__init__.py +88 -0
- provide/foundation/hub/click_builder.py +310 -0
- provide/foundation/hub/commands.py +42 -0
- provide/foundation/hub/components.py +640 -0
- provide/foundation/hub/decorators.py +244 -0
- provide/foundation/hub/info.py +32 -0
- provide/foundation/hub/manager.py +446 -0
- provide/foundation/hub/registry.py +279 -0
- provide/foundation/hub/type_mapping.py +54 -0
- provide/foundation/hub/types.py +28 -0
- provide/foundation/logger/__init__.py +41 -0
- provide/foundation/logger/base.py +22 -0
- provide/foundation/logger/config/__init__.py +16 -0
- provide/foundation/logger/config/base.py +40 -0
- provide/foundation/logger/config/logging.py +394 -0
- provide/foundation/logger/config/telemetry.py +188 -0
- provide/foundation/logger/core.py +239 -0
- provide/foundation/logger/custom_processors.py +172 -0
- provide/foundation/logger/emoji/__init__.py +44 -0
- provide/foundation/logger/emoji/matrix.py +209 -0
- provide/foundation/logger/emoji/sets.py +458 -0
- provide/foundation/logger/emoji/types.py +56 -0
- provide/foundation/logger/factories.py +56 -0
- provide/foundation/logger/processors/__init__.py +13 -0
- provide/foundation/logger/processors/main.py +254 -0
- provide/foundation/logger/processors/trace.py +113 -0
- provide/foundation/logger/ratelimit/__init__.py +31 -0
- provide/foundation/logger/ratelimit/limiters.py +294 -0
- provide/foundation/logger/ratelimit/processor.py +203 -0
- provide/foundation/logger/ratelimit/queue_limiter.py +305 -0
- provide/foundation/logger/setup/__init__.py +29 -0
- provide/foundation/logger/setup/coordinator.py +138 -0
- provide/foundation/logger/setup/emoji_resolver.py +64 -0
- provide/foundation/logger/setup/processors.py +85 -0
- provide/foundation/logger/setup/testing.py +39 -0
- provide/foundation/logger/trace.py +38 -0
- provide/foundation/metrics/__init__.py +119 -0
- provide/foundation/metrics/otel.py +122 -0
- provide/foundation/metrics/simple.py +165 -0
- provide/foundation/observability/__init__.py +53 -0
- provide/foundation/observability/openobserve/__init__.py +79 -0
- provide/foundation/observability/openobserve/auth.py +72 -0
- provide/foundation/observability/openobserve/client.py +307 -0
- provide/foundation/observability/openobserve/commands.py +357 -0
- provide/foundation/observability/openobserve/exceptions.py +41 -0
- provide/foundation/observability/openobserve/formatters.py +298 -0
- provide/foundation/observability/openobserve/models.py +134 -0
- provide/foundation/observability/openobserve/otlp.py +320 -0
- provide/foundation/observability/openobserve/search.py +222 -0
- provide/foundation/observability/openobserve/streaming.py +235 -0
- provide/foundation/platform/__init__.py +44 -0
- provide/foundation/platform/detection.py +193 -0
- provide/foundation/platform/info.py +157 -0
- provide/foundation/process/__init__.py +39 -0
- provide/foundation/process/async_runner.py +373 -0
- provide/foundation/process/lifecycle.py +406 -0
- provide/foundation/process/runner.py +390 -0
- provide/foundation/setup/__init__.py +101 -0
- provide/foundation/streams/__init__.py +44 -0
- provide/foundation/streams/console.py +57 -0
- provide/foundation/streams/core.py +65 -0
- provide/foundation/streams/file.py +104 -0
- provide/foundation/testing/__init__.py +166 -0
- provide/foundation/testing/cli.py +227 -0
- provide/foundation/testing/crypto.py +163 -0
- provide/foundation/testing/fixtures.py +49 -0
- provide/foundation/testing/hub.py +23 -0
- provide/foundation/testing/logger.py +106 -0
- provide/foundation/testing/streams.py +54 -0
- provide/foundation/tracer/__init__.py +49 -0
- provide/foundation/tracer/context.py +115 -0
- provide/foundation/tracer/otel.py +135 -0
- provide/foundation/tracer/spans.py +174 -0
- provide/foundation/types.py +32 -0
- provide/foundation/utils/__init__.py +97 -0
- provide/foundation/utils/deps.py +195 -0
- provide/foundation/utils/env.py +491 -0
- provide/foundation/utils/formatting.py +483 -0
- provide/foundation/utils/parsing.py +235 -0
- provide/foundation/utils/rate_limiting.py +112 -0
- provide/foundation/utils/streams.py +67 -0
- provide/foundation/utils/timing.py +93 -0
- provide_foundation-0.0.0.dev0.dist-info/METADATA +469 -0
- provide_foundation-0.0.0.dev0.dist-info/RECORD +149 -0
- provide_foundation-0.0.0.dev0.dist-info/WHEEL +5 -0
- provide_foundation-0.0.0.dev0.dist-info/entry_points.txt +2 -0
- provide_foundation-0.0.0.dev0.dist-info/licenses/LICENSE +201 -0
- provide_foundation-0.0.0.dev0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,203 @@
|
|
1
|
+
#
|
2
|
+
# processor.py
|
3
|
+
#
|
4
|
+
"""
|
5
|
+
Structlog processor for rate limiting log messages.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import time
|
9
|
+
from typing import Any
|
10
|
+
|
11
|
+
import structlog
|
12
|
+
|
13
|
+
from provide.foundation.logger.ratelimit.limiters import GlobalRateLimiter
|
14
|
+
|
15
|
+
|
16
|
+
class RateLimiterProcessor:
|
17
|
+
"""
|
18
|
+
Structlog processor that applies rate limiting to log messages.
|
19
|
+
Can be configured with global and per-logger rate limits.
|
20
|
+
"""
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self,
|
24
|
+
emit_warning_on_limit: bool = True,
|
25
|
+
warning_interval_seconds: float = 60.0,
|
26
|
+
summary_interval_seconds: float = 5.0,
|
27
|
+
):
|
28
|
+
"""
|
29
|
+
Initialize the rate limiter processor.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
emit_warning_on_limit: Whether to emit a warning when rate limited
|
33
|
+
warning_interval_seconds: Minimum seconds between rate limit warnings
|
34
|
+
summary_interval_seconds: Interval for rate limit summary reports
|
35
|
+
"""
|
36
|
+
self.rate_limiter = GlobalRateLimiter()
|
37
|
+
self.emit_warning_on_limit = emit_warning_on_limit
|
38
|
+
self.warning_interval_seconds = warning_interval_seconds
|
39
|
+
|
40
|
+
# Track last warning time per logger
|
41
|
+
self.last_warning_times: dict[str, float] = {}
|
42
|
+
|
43
|
+
# Track suppressed message counts
|
44
|
+
self.suppressed_counts: dict[str, int] = {}
|
45
|
+
self.last_summary_time = time.monotonic()
|
46
|
+
self.summary_interval = summary_interval_seconds # Emit summary periodically
|
47
|
+
|
48
|
+
def __call__(
|
49
|
+
self, logger: Any, method_name: str, event_dict: structlog.types.EventDict
|
50
|
+
) -> structlog.types.EventDict:
|
51
|
+
"""
|
52
|
+
Process a log event, applying rate limiting.
|
53
|
+
|
54
|
+
Args:
|
55
|
+
logger: The logger instance
|
56
|
+
method_name: The log method name (debug, info, etc.)
|
57
|
+
event_dict: The event dictionary
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
The event dictionary if allowed, or raises DropEvent if rate limited
|
61
|
+
"""
|
62
|
+
logger_name = event_dict.get("logger_name", "unknown")
|
63
|
+
|
64
|
+
# Check if this log is allowed (pass event_dict for tracking)
|
65
|
+
allowed, reason = self.rate_limiter.is_allowed(logger_name, event_dict)
|
66
|
+
|
67
|
+
if not allowed:
|
68
|
+
# Track suppressed count
|
69
|
+
if logger_name not in self.suppressed_counts:
|
70
|
+
self.suppressed_counts[logger_name] = 0
|
71
|
+
self.suppressed_counts[logger_name] += 1
|
72
|
+
|
73
|
+
# Optionally emit a warning about rate limiting
|
74
|
+
if self.emit_warning_on_limit:
|
75
|
+
now = time.monotonic()
|
76
|
+
last_warning = self.last_warning_times.get(logger_name, 0)
|
77
|
+
|
78
|
+
if now - last_warning >= self.warning_interval_seconds:
|
79
|
+
# Create a rate limit warning event
|
80
|
+
self.last_warning_times[logger_name] = now
|
81
|
+
|
82
|
+
# Return a modified event indicating rate limiting
|
83
|
+
return {
|
84
|
+
"event": f"⚠️ Rate limit: {reason}",
|
85
|
+
"level": "warning",
|
86
|
+
"logger_name": "provide.foundation.ratelimit",
|
87
|
+
"suppressed_count": self.suppressed_counts[logger_name],
|
88
|
+
"original_logger": logger_name,
|
89
|
+
"_rate_limit_warning": True,
|
90
|
+
}
|
91
|
+
|
92
|
+
# Drop the event
|
93
|
+
raise structlog.DropEvent
|
94
|
+
|
95
|
+
# Check if we should emit a summary
|
96
|
+
now = time.monotonic()
|
97
|
+
if now - self.last_summary_time >= self.summary_interval:
|
98
|
+
# Always check and emit summary if there's been any rate limiting
|
99
|
+
self._emit_summary()
|
100
|
+
self.last_summary_time = now
|
101
|
+
|
102
|
+
return event_dict
|
103
|
+
|
104
|
+
def _emit_summary(self):
|
105
|
+
"""Emit a summary of rate-limited messages."""
|
106
|
+
# Get current stats first to check if any rate limiting has occurred
|
107
|
+
stats = self.rate_limiter.get_stats()
|
108
|
+
|
109
|
+
# Check if there's been any rate limiting activity
|
110
|
+
global_stats = stats.get("global", {})
|
111
|
+
total_denied = global_stats.get("total_denied", 0)
|
112
|
+
|
113
|
+
if not self.suppressed_counts and total_denied == 0:
|
114
|
+
return # No rate limiting activity to report
|
115
|
+
|
116
|
+
total_suppressed = sum(self.suppressed_counts.values())
|
117
|
+
|
118
|
+
# Get a logger for rate limit summaries
|
119
|
+
try:
|
120
|
+
from provide.foundation.logger import get_logger
|
121
|
+
|
122
|
+
summary_logger = get_logger("provide.foundation.ratelimit.summary")
|
123
|
+
|
124
|
+
# Calculate rate limiting percentage
|
125
|
+
total_allowed = global_stats.get("total_allowed", 0)
|
126
|
+
total_attempts = total_allowed + total_denied
|
127
|
+
if total_attempts > 0:
|
128
|
+
denial_rate = (total_denied / total_attempts) * 100
|
129
|
+
else:
|
130
|
+
denial_rate = 0
|
131
|
+
|
132
|
+
# Format the summary message
|
133
|
+
summary_logger.warning(
|
134
|
+
f"⚠️ Rate limiting active: {total_suppressed:,} logs dropped in last {self.summary_interval}s | "
|
135
|
+
f"Denial rate: {denial_rate:.1f}% | "
|
136
|
+
f"Tokens: {global_stats.get('tokens_available', 0):.0f}/{global_stats.get('capacity', 0):.0f}",
|
137
|
+
suppressed_by_logger=dict(self.suppressed_counts)
|
138
|
+
if self.suppressed_counts
|
139
|
+
else {},
|
140
|
+
total_suppressed=total_suppressed,
|
141
|
+
total_denied_overall=total_denied,
|
142
|
+
total_allowed_overall=total_allowed,
|
143
|
+
denial_rate_percent=denial_rate,
|
144
|
+
tokens_available=global_stats.get("tokens_available", 0),
|
145
|
+
capacity=global_stats.get("capacity", 0),
|
146
|
+
refill_rate=global_stats.get("refill_rate", 0),
|
147
|
+
)
|
148
|
+
|
149
|
+
# Reset counts after summary
|
150
|
+
self.suppressed_counts.clear()
|
151
|
+
except Exception:
|
152
|
+
# If we can't log the summary, just clear counts
|
153
|
+
self.suppressed_counts.clear()
|
154
|
+
|
155
|
+
|
156
|
+
def create_rate_limiter_processor(
|
157
|
+
global_rate: float | None = None,
|
158
|
+
global_capacity: float | None = None,
|
159
|
+
per_logger_rates: dict[str, tuple[float, float]] | None = None,
|
160
|
+
emit_warnings: bool = True,
|
161
|
+
summary_interval: float = 5.0,
|
162
|
+
max_queue_size: int = 1000,
|
163
|
+
max_memory_mb: float | None = None,
|
164
|
+
overflow_policy: str = "drop_oldest",
|
165
|
+
) -> RateLimiterProcessor:
|
166
|
+
"""
|
167
|
+
Factory function to create and configure a rate limiter processor.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
global_rate: Global logs per second limit
|
171
|
+
global_capacity: Global burst capacity
|
172
|
+
per_logger_rates: Dict of logger_name -> (rate, capacity) tuples
|
173
|
+
emit_warnings: Whether to emit warnings when rate limited
|
174
|
+
summary_interval: Seconds between rate limit summary reports
|
175
|
+
max_queue_size: Maximum queue size when buffering
|
176
|
+
max_memory_mb: Maximum memory for buffered logs
|
177
|
+
overflow_policy: Policy when queue is full
|
178
|
+
|
179
|
+
Returns:
|
180
|
+
Configured RateLimiterProcessor instance
|
181
|
+
"""
|
182
|
+
processor = RateLimiterProcessor(
|
183
|
+
emit_warning_on_limit=emit_warnings, summary_interval_seconds=summary_interval
|
184
|
+
)
|
185
|
+
|
186
|
+
# Determine if we should use buffered rate limiting
|
187
|
+
use_buffered = max_queue_size > 0 and overflow_policy in (
|
188
|
+
"drop_oldest",
|
189
|
+
"drop_newest",
|
190
|
+
)
|
191
|
+
|
192
|
+
# Configure the global rate limiter
|
193
|
+
processor.rate_limiter.configure(
|
194
|
+
global_rate=global_rate,
|
195
|
+
global_capacity=global_capacity,
|
196
|
+
per_logger_rates=per_logger_rates,
|
197
|
+
use_buffered=use_buffered,
|
198
|
+
max_queue_size=max_queue_size,
|
199
|
+
max_memory_mb=max_memory_mb,
|
200
|
+
overflow_policy=overflow_policy,
|
201
|
+
)
|
202
|
+
|
203
|
+
return processor
|
@@ -0,0 +1,305 @@
|
|
1
|
+
#
|
2
|
+
# queue_limiter.py
|
3
|
+
#
|
4
|
+
"""
|
5
|
+
Queue-based rate limiter with overflow protection for Foundation's logging system.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from collections import deque
|
9
|
+
import sys
|
10
|
+
import threading
|
11
|
+
import time
|
12
|
+
from typing import Any, Literal
|
13
|
+
|
14
|
+
|
15
|
+
class QueuedRateLimiter:
|
16
|
+
"""
|
17
|
+
Rate limiter with a queue for buffering logs.
|
18
|
+
Drops oldest messages when queue is full (FIFO overflow).
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(
|
22
|
+
self,
|
23
|
+
capacity: float,
|
24
|
+
refill_rate: float,
|
25
|
+
max_queue_size: int = 1000,
|
26
|
+
max_memory_mb: float | None = None,
|
27
|
+
overflow_policy: Literal["drop_oldest", "drop_newest", "block"] = "drop_oldest",
|
28
|
+
):
|
29
|
+
"""
|
30
|
+
Initialize the queued rate limiter.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
capacity: Maximum tokens (burst capacity)
|
34
|
+
refill_rate: Tokens per second
|
35
|
+
max_queue_size: Maximum number of items in queue
|
36
|
+
max_memory_mb: Maximum memory usage in MB (estimated)
|
37
|
+
overflow_policy: What to do when queue is full
|
38
|
+
"""
|
39
|
+
if capacity <= 0:
|
40
|
+
raise ValueError("Capacity must be positive")
|
41
|
+
if refill_rate <= 0:
|
42
|
+
raise ValueError("Refill rate must be positive")
|
43
|
+
if max_queue_size <= 0:
|
44
|
+
raise ValueError("Max queue size must be positive")
|
45
|
+
|
46
|
+
self.capacity = float(capacity)
|
47
|
+
self.refill_rate = float(refill_rate)
|
48
|
+
self.tokens = float(capacity)
|
49
|
+
self.last_refill = time.monotonic()
|
50
|
+
|
51
|
+
# Queue management
|
52
|
+
self.max_queue_size = max_queue_size
|
53
|
+
self.max_memory_bytes = (
|
54
|
+
int(max_memory_mb * 1024 * 1024) if max_memory_mb else None
|
55
|
+
)
|
56
|
+
self.overflow_policy = overflow_policy
|
57
|
+
|
58
|
+
# Use deque for efficient FIFO operations
|
59
|
+
self.pending_queue = deque(
|
60
|
+
maxlen=max_queue_size if overflow_policy == "drop_oldest" else None
|
61
|
+
)
|
62
|
+
self.queue_lock = threading.Lock()
|
63
|
+
|
64
|
+
# Track statistics
|
65
|
+
self.total_queued = 0
|
66
|
+
self.total_dropped = 0
|
67
|
+
self.total_processed = 0
|
68
|
+
self.estimated_memory = 0
|
69
|
+
|
70
|
+
# Worker thread for processing queue
|
71
|
+
self.running = True
|
72
|
+
self.worker_thread = threading.Thread(target=self._process_queue, daemon=True)
|
73
|
+
self.worker_thread.start()
|
74
|
+
|
75
|
+
def _estimate_size(self, item: Any) -> int:
|
76
|
+
"""Estimate memory size of an item."""
|
77
|
+
# Simple estimation - can be made more sophisticated
|
78
|
+
return sys.getsizeof(item)
|
79
|
+
|
80
|
+
def _refill_tokens(self) -> None:
|
81
|
+
"""Refill tokens based on elapsed time."""
|
82
|
+
now = time.monotonic()
|
83
|
+
elapsed = now - self.last_refill
|
84
|
+
|
85
|
+
if elapsed > 0:
|
86
|
+
tokens_to_add = elapsed * self.refill_rate
|
87
|
+
self.tokens = min(self.capacity, self.tokens + tokens_to_add)
|
88
|
+
self.last_refill = now
|
89
|
+
|
90
|
+
def enqueue(self, item: Any) -> tuple[bool, str | None]:
|
91
|
+
"""
|
92
|
+
Add item to queue for rate-limited processing.
|
93
|
+
|
94
|
+
Returns:
|
95
|
+
Tuple of (accepted, reason) where reason is set if rejected
|
96
|
+
"""
|
97
|
+
with self.queue_lock:
|
98
|
+
# Check memory limit
|
99
|
+
if self.max_memory_bytes:
|
100
|
+
item_size = self._estimate_size(item)
|
101
|
+
if self.estimated_memory + item_size > self.max_memory_bytes:
|
102
|
+
self.total_dropped += 1
|
103
|
+
return (
|
104
|
+
False,
|
105
|
+
f"Memory limit exceeded ({self.estimated_memory / 1024 / 1024:.1f}MB)",
|
106
|
+
)
|
107
|
+
|
108
|
+
# Check queue size
|
109
|
+
if len(self.pending_queue) >= self.max_queue_size:
|
110
|
+
if self.overflow_policy == "drop_newest":
|
111
|
+
self.total_dropped += 1
|
112
|
+
return False, f"Queue full ({self.max_queue_size} items)"
|
113
|
+
elif self.overflow_policy == "drop_oldest":
|
114
|
+
# deque with maxlen automatically drops oldest
|
115
|
+
if len(self.pending_queue) > 0:
|
116
|
+
old_item = (
|
117
|
+
self.pending_queue[0]
|
118
|
+
if len(self.pending_queue) == self.max_queue_size
|
119
|
+
else None
|
120
|
+
)
|
121
|
+
if old_item and self.max_memory_bytes:
|
122
|
+
self.estimated_memory -= self._estimate_size(old_item)
|
123
|
+
self.total_dropped += 1
|
124
|
+
elif self.overflow_policy == "block":
|
125
|
+
# In block mode, we would need to wait
|
126
|
+
# For now, just reject
|
127
|
+
return False, "Queue full (blocking not implemented)"
|
128
|
+
|
129
|
+
# Add to queue
|
130
|
+
self.pending_queue.append(item)
|
131
|
+
self.total_queued += 1
|
132
|
+
|
133
|
+
if self.max_memory_bytes:
|
134
|
+
self.estimated_memory += self._estimate_size(item)
|
135
|
+
|
136
|
+
return True, None
|
137
|
+
|
138
|
+
def _process_queue(self):
|
139
|
+
"""Worker thread that processes queued items."""
|
140
|
+
while self.running:
|
141
|
+
with self.queue_lock:
|
142
|
+
self._refill_tokens()
|
143
|
+
|
144
|
+
# Process items while we have tokens
|
145
|
+
while self.tokens >= 1.0 and self.pending_queue:
|
146
|
+
item = self.pending_queue.popleft()
|
147
|
+
self.tokens -= 1.0
|
148
|
+
self.total_processed += 1
|
149
|
+
|
150
|
+
if self.max_memory_bytes:
|
151
|
+
self.estimated_memory -= self._estimate_size(item)
|
152
|
+
|
153
|
+
# Here we would actually process the item
|
154
|
+
# For logging, this would mean emitting the log
|
155
|
+
self._process_item(item)
|
156
|
+
|
157
|
+
# Sleep briefly to avoid busy waiting
|
158
|
+
time.sleep(0.01)
|
159
|
+
|
160
|
+
def _process_item(self, item: Any):
|
161
|
+
"""Process a single item from the queue."""
|
162
|
+
# This would be overridden to actually emit the log
|
163
|
+
pass
|
164
|
+
|
165
|
+
def get_stats(self) -> dict[str, Any]:
|
166
|
+
"""Get queue statistics."""
|
167
|
+
with self.queue_lock:
|
168
|
+
return {
|
169
|
+
"queue_size": len(self.pending_queue),
|
170
|
+
"max_queue_size": self.max_queue_size,
|
171
|
+
"tokens_available": self.tokens,
|
172
|
+
"capacity": self.capacity,
|
173
|
+
"refill_rate": self.refill_rate,
|
174
|
+
"total_queued": self.total_queued,
|
175
|
+
"total_dropped": self.total_dropped,
|
176
|
+
"total_processed": self.total_processed,
|
177
|
+
"estimated_memory_mb": self.estimated_memory / 1024 / 1024
|
178
|
+
if self.max_memory_bytes
|
179
|
+
else None,
|
180
|
+
"max_memory_mb": self.max_memory_bytes / 1024 / 1024
|
181
|
+
if self.max_memory_bytes
|
182
|
+
else None,
|
183
|
+
"overflow_policy": self.overflow_policy,
|
184
|
+
}
|
185
|
+
|
186
|
+
def shutdown(self):
|
187
|
+
"""Shutdown the worker thread."""
|
188
|
+
self.running = False
|
189
|
+
if self.worker_thread.is_alive():
|
190
|
+
self.worker_thread.join(timeout=1.0)
|
191
|
+
|
192
|
+
|
193
|
+
class BufferedRateLimiter:
|
194
|
+
"""
|
195
|
+
Simple synchronous rate limiter with overflow buffer.
|
196
|
+
Does not use a worker thread - processes inline.
|
197
|
+
"""
|
198
|
+
|
199
|
+
def __init__(
|
200
|
+
self,
|
201
|
+
capacity: float,
|
202
|
+
refill_rate: float,
|
203
|
+
buffer_size: int = 100,
|
204
|
+
track_dropped: bool = True,
|
205
|
+
):
|
206
|
+
"""
|
207
|
+
Initialize buffered rate limiter.
|
208
|
+
|
209
|
+
Args:
|
210
|
+
capacity: Maximum tokens (burst capacity)
|
211
|
+
refill_rate: Tokens per second
|
212
|
+
buffer_size: Number of recently dropped items to track
|
213
|
+
track_dropped: Whether to keep dropped items for debugging
|
214
|
+
"""
|
215
|
+
if capacity <= 0:
|
216
|
+
raise ValueError("Capacity must be positive")
|
217
|
+
if refill_rate <= 0:
|
218
|
+
raise ValueError("Refill rate must be positive")
|
219
|
+
|
220
|
+
self.capacity = float(capacity)
|
221
|
+
self.refill_rate = float(refill_rate)
|
222
|
+
self.tokens = float(capacity)
|
223
|
+
self.last_refill = time.monotonic()
|
224
|
+
self.lock = threading.Lock()
|
225
|
+
|
226
|
+
# Track dropped items
|
227
|
+
self.buffer_size = buffer_size
|
228
|
+
self.track_dropped = track_dropped
|
229
|
+
self.dropped_buffer = deque(maxlen=buffer_size) if track_dropped else None
|
230
|
+
|
231
|
+
# Statistics
|
232
|
+
self.total_allowed = 0
|
233
|
+
self.total_denied = 0
|
234
|
+
self.total_bytes_dropped = 0
|
235
|
+
|
236
|
+
def is_allowed(self, item: Any | None = None) -> tuple[bool, str | None]:
|
237
|
+
"""
|
238
|
+
Check if item is allowed based on rate limit.
|
239
|
+
|
240
|
+
Args:
|
241
|
+
item: Optional item to track if dropped
|
242
|
+
|
243
|
+
Returns:
|
244
|
+
Tuple of (allowed, reason)
|
245
|
+
"""
|
246
|
+
with self.lock:
|
247
|
+
now = time.monotonic()
|
248
|
+
elapsed = now - self.last_refill
|
249
|
+
|
250
|
+
# Refill tokens
|
251
|
+
if elapsed > 0:
|
252
|
+
tokens_to_add = elapsed * self.refill_rate
|
253
|
+
self.tokens = min(self.capacity, self.tokens + tokens_to_add)
|
254
|
+
self.last_refill = now
|
255
|
+
|
256
|
+
# Try to consume token
|
257
|
+
if self.tokens >= 1.0:
|
258
|
+
self.tokens -= 1.0
|
259
|
+
self.total_allowed += 1
|
260
|
+
return True, None
|
261
|
+
else:
|
262
|
+
self.total_denied += 1
|
263
|
+
|
264
|
+
# Track dropped item
|
265
|
+
if self.track_dropped and item is not None:
|
266
|
+
self.dropped_buffer.append(
|
267
|
+
{
|
268
|
+
"time": now,
|
269
|
+
"item": item,
|
270
|
+
"size": sys.getsizeof(item),
|
271
|
+
}
|
272
|
+
)
|
273
|
+
self.total_bytes_dropped += sys.getsizeof(item)
|
274
|
+
|
275
|
+
return False, f"Rate limit exceeded (tokens: {self.tokens:.1f})"
|
276
|
+
|
277
|
+
def get_dropped_samples(self, count: int = 10) -> list[Any]:
|
278
|
+
"""Get recent dropped items for debugging."""
|
279
|
+
if not self.track_dropped or not self.dropped_buffer:
|
280
|
+
return []
|
281
|
+
|
282
|
+
with self.lock:
|
283
|
+
return list(self.dropped_buffer)[-count:]
|
284
|
+
|
285
|
+
def get_stats(self) -> dict[str, Any]:
|
286
|
+
"""Get statistics."""
|
287
|
+
with self.lock:
|
288
|
+
stats = {
|
289
|
+
"tokens_available": self.tokens,
|
290
|
+
"capacity": self.capacity,
|
291
|
+
"refill_rate": self.refill_rate,
|
292
|
+
"total_allowed": self.total_allowed,
|
293
|
+
"total_denied": self.total_denied,
|
294
|
+
"total_bytes_dropped": self.total_bytes_dropped,
|
295
|
+
}
|
296
|
+
|
297
|
+
if self.track_dropped and self.dropped_buffer:
|
298
|
+
stats["dropped_buffer_size"] = len(self.dropped_buffer)
|
299
|
+
stats["oldest_dropped_age"] = (
|
300
|
+
time.monotonic() - self.dropped_buffer[0]["time"]
|
301
|
+
if self.dropped_buffer
|
302
|
+
else 0
|
303
|
+
)
|
304
|
+
|
305
|
+
return stats
|
@@ -0,0 +1,29 @@
|
|
1
|
+
#
|
2
|
+
# __init__.py
|
3
|
+
#
|
4
|
+
"""
|
5
|
+
Foundation Logger Setup Module.
|
6
|
+
|
7
|
+
Handles structured logging configuration, processor setup, and emoji resolution.
|
8
|
+
Provides the core setup functionality for the Foundation logging system.
|
9
|
+
"""
|
10
|
+
|
11
|
+
from provide.foundation.logger.setup.coordinator import internal_setup
|
12
|
+
|
13
|
+
# Import testing utilities conditionally
|
14
|
+
try:
|
15
|
+
from provide.foundation.logger.setup.testing import (
|
16
|
+
reset_foundation_setup_for_testing as reset_for_testing,
|
17
|
+
)
|
18
|
+
|
19
|
+
_has_testing = True
|
20
|
+
except ImportError:
|
21
|
+
_has_testing = False
|
22
|
+
reset_for_testing = None
|
23
|
+
|
24
|
+
__all__ = [
|
25
|
+
"internal_setup",
|
26
|
+
]
|
27
|
+
|
28
|
+
if _has_testing:
|
29
|
+
__all__.append("reset_for_testing")
|
@@ -0,0 +1,138 @@
|
|
1
|
+
#
|
2
|
+
# coordinator.py
|
3
|
+
#
|
4
|
+
"""
|
5
|
+
Main setup coordination for Foundation Telemetry.
|
6
|
+
Handles the core setup logic, state management, and setup logger creation.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import logging as stdlib_logging
|
10
|
+
import threading
|
11
|
+
from typing import Any
|
12
|
+
|
13
|
+
import structlog
|
14
|
+
|
15
|
+
from provide.foundation.logger.config import LoggingConfig, TelemetryConfig
|
16
|
+
from provide.foundation.logger.core import (
|
17
|
+
_LAZY_SETUP_STATE,
|
18
|
+
logger as foundation_logger,
|
19
|
+
)
|
20
|
+
from provide.foundation.logger.emoji.sets import BUILTIN_EMOJI_SETS
|
21
|
+
from provide.foundation.logger.setup.emoji_resolver import resolve_active_emoji_config
|
22
|
+
from provide.foundation.logger.setup.processors import (
|
23
|
+
configure_structlog_output,
|
24
|
+
handle_globally_disabled_setup,
|
25
|
+
)
|
26
|
+
from provide.foundation.streams import get_log_stream
|
27
|
+
from provide.foundation.utils.streams import get_foundation_log_stream, get_safe_stderr
|
28
|
+
|
29
|
+
_PROVIDE_SETUP_LOCK = threading.Lock()
|
30
|
+
_CORE_SETUP_LOGGER_NAME = "provide.foundation.core_setup"
|
31
|
+
_EXPLICIT_SETUP_DONE = False
|
32
|
+
_FOUNDATION_LOG_LEVEL: int | None = None
|
33
|
+
|
34
|
+
|
35
|
+
def get_foundation_log_level() -> int:
|
36
|
+
"""Get the Foundation log level from LoggingConfig, checking only once."""
|
37
|
+
global _FOUNDATION_LOG_LEVEL
|
38
|
+
if _FOUNDATION_LOG_LEVEL is None:
|
39
|
+
# Use the proper config system to get the Foundation setup log level
|
40
|
+
logging_config = LoggingConfig.from_env(strict=False)
|
41
|
+
level_str = logging_config.foundation_setup_log_level.upper()
|
42
|
+
_FOUNDATION_LOG_LEVEL = getattr(
|
43
|
+
stdlib_logging,
|
44
|
+
level_str,
|
45
|
+
stdlib_logging.INFO, # Default fallback
|
46
|
+
)
|
47
|
+
return _FOUNDATION_LOG_LEVEL
|
48
|
+
|
49
|
+
|
50
|
+
def create_core_setup_logger(globally_disabled: bool = False) -> Any:
|
51
|
+
"""Create a structlog logger for core setup messages."""
|
52
|
+
if globally_disabled:
|
53
|
+
# Configure structlog to be a no-op for core setup logger
|
54
|
+
structlog.configure(
|
55
|
+
processors=[],
|
56
|
+
logger_factory=structlog.ReturnLoggerFactory(),
|
57
|
+
wrapper_class=structlog.BoundLogger,
|
58
|
+
cache_logger_on_first_use=True,
|
59
|
+
)
|
60
|
+
return structlog.get_logger(_CORE_SETUP_LOGGER_NAME)
|
61
|
+
else:
|
62
|
+
# Get the foundation log output stream
|
63
|
+
try:
|
64
|
+
logging_config = LoggingConfig.from_env(strict=False)
|
65
|
+
foundation_stream = get_foundation_log_stream(
|
66
|
+
logging_config.foundation_log_output
|
67
|
+
)
|
68
|
+
except Exception:
|
69
|
+
# Fallback to stderr if config loading fails
|
70
|
+
foundation_stream = get_safe_stderr()
|
71
|
+
|
72
|
+
# Configure structlog for core setup logger
|
73
|
+
structlog.configure(
|
74
|
+
processors=[
|
75
|
+
structlog.processors.add_log_level,
|
76
|
+
structlog.processors.TimeStamper(fmt="iso"),
|
77
|
+
structlog.dev.ConsoleRenderer(),
|
78
|
+
],
|
79
|
+
logger_factory=structlog.PrintLoggerFactory(file=foundation_stream),
|
80
|
+
wrapper_class=structlog.BoundLogger,
|
81
|
+
cache_logger_on_first_use=True,
|
82
|
+
)
|
83
|
+
|
84
|
+
return structlog.get_logger(_CORE_SETUP_LOGGER_NAME)
|
85
|
+
|
86
|
+
|
87
|
+
def internal_setup(
|
88
|
+
config: TelemetryConfig | None = None, is_explicit_call: bool = False
|
89
|
+
) -> None:
|
90
|
+
"""
|
91
|
+
The single, internal setup function that both explicit and lazy setup call.
|
92
|
+
It is protected by the _PROVIDE_SETUP_LOCK in its callers.
|
93
|
+
"""
|
94
|
+
# This function assumes the lock is already held.
|
95
|
+
structlog.reset_defaults()
|
96
|
+
foundation_logger._is_configured_by_setup = False
|
97
|
+
foundation_logger._active_config = None
|
98
|
+
foundation_logger._active_resolved_emoji_config = None
|
99
|
+
_LAZY_SETUP_STATE.update({"done": False, "error": None, "in_progress": False})
|
100
|
+
|
101
|
+
current_config = config if config is not None else TelemetryConfig.from_env()
|
102
|
+
core_setup_logger = create_core_setup_logger(
|
103
|
+
globally_disabled=current_config.globally_disabled
|
104
|
+
)
|
105
|
+
|
106
|
+
if not current_config.globally_disabled:
|
107
|
+
core_setup_logger.debug(
|
108
|
+
"⚙️➡️🚀 Starting Foundation (structlog) setup",
|
109
|
+
service_name=current_config.service_name,
|
110
|
+
log_level=current_config.logging.default_level,
|
111
|
+
formatter=current_config.logging.console_formatter,
|
112
|
+
)
|
113
|
+
|
114
|
+
resolved_emoji_config = resolve_active_emoji_config(
|
115
|
+
current_config.logging, BUILTIN_EMOJI_SETS
|
116
|
+
)
|
117
|
+
|
118
|
+
if current_config.globally_disabled:
|
119
|
+
handle_globally_disabled_setup()
|
120
|
+
else:
|
121
|
+
configure_structlog_output(
|
122
|
+
current_config, resolved_emoji_config, get_log_stream()
|
123
|
+
)
|
124
|
+
|
125
|
+
foundation_logger._is_configured_by_setup = is_explicit_call
|
126
|
+
foundation_logger._active_config = current_config
|
127
|
+
foundation_logger._active_resolved_emoji_config = resolved_emoji_config
|
128
|
+
_LAZY_SETUP_STATE["done"] = True
|
129
|
+
|
130
|
+
if not current_config.globally_disabled:
|
131
|
+
field_definitions, emoji_sets = resolved_emoji_config
|
132
|
+
core_setup_logger.debug(
|
133
|
+
"⚙️➡️✅ Foundation (structlog) setup completed",
|
134
|
+
emoji_sets_enabled=len(field_definitions) > 0,
|
135
|
+
emoji_sets_count=len(emoji_sets),
|
136
|
+
processors_configured=True,
|
137
|
+
log_file_enabled=current_config.logging.log_file is not None,
|
138
|
+
)
|