provide-foundation 0.0.0.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. provide/__init__.py +15 -0
  2. provide/foundation/__init__.py +155 -0
  3. provide/foundation/_version.py +58 -0
  4. provide/foundation/cli/__init__.py +67 -0
  5. provide/foundation/cli/commands/__init__.py +3 -0
  6. provide/foundation/cli/commands/deps.py +71 -0
  7. provide/foundation/cli/commands/logs/__init__.py +63 -0
  8. provide/foundation/cli/commands/logs/generate.py +357 -0
  9. provide/foundation/cli/commands/logs/generate_old.py +569 -0
  10. provide/foundation/cli/commands/logs/query.py +174 -0
  11. provide/foundation/cli/commands/logs/send.py +166 -0
  12. provide/foundation/cli/commands/logs/tail.py +112 -0
  13. provide/foundation/cli/decorators.py +262 -0
  14. provide/foundation/cli/main.py +65 -0
  15. provide/foundation/cli/testing.py +220 -0
  16. provide/foundation/cli/utils.py +210 -0
  17. provide/foundation/config/__init__.py +106 -0
  18. provide/foundation/config/base.py +295 -0
  19. provide/foundation/config/env.py +369 -0
  20. provide/foundation/config/loader.py +311 -0
  21. provide/foundation/config/manager.py +387 -0
  22. provide/foundation/config/schema.py +284 -0
  23. provide/foundation/config/sync.py +281 -0
  24. provide/foundation/config/types.py +78 -0
  25. provide/foundation/config/validators.py +80 -0
  26. provide/foundation/console/__init__.py +29 -0
  27. provide/foundation/console/input.py +364 -0
  28. provide/foundation/console/output.py +178 -0
  29. provide/foundation/context/__init__.py +12 -0
  30. provide/foundation/context/core.py +356 -0
  31. provide/foundation/core.py +20 -0
  32. provide/foundation/crypto/__init__.py +182 -0
  33. provide/foundation/crypto/algorithms.py +111 -0
  34. provide/foundation/crypto/certificates.py +896 -0
  35. provide/foundation/crypto/checksums.py +301 -0
  36. provide/foundation/crypto/constants.py +57 -0
  37. provide/foundation/crypto/hashing.py +265 -0
  38. provide/foundation/crypto/keys.py +188 -0
  39. provide/foundation/crypto/signatures.py +144 -0
  40. provide/foundation/crypto/utils.py +164 -0
  41. provide/foundation/errors/__init__.py +96 -0
  42. provide/foundation/errors/auth.py +73 -0
  43. provide/foundation/errors/base.py +81 -0
  44. provide/foundation/errors/config.py +103 -0
  45. provide/foundation/errors/context.py +299 -0
  46. provide/foundation/errors/decorators.py +484 -0
  47. provide/foundation/errors/handlers.py +360 -0
  48. provide/foundation/errors/integration.py +105 -0
  49. provide/foundation/errors/platform.py +37 -0
  50. provide/foundation/errors/process.py +140 -0
  51. provide/foundation/errors/resources.py +133 -0
  52. provide/foundation/errors/runtime.py +160 -0
  53. provide/foundation/errors/safe_decorators.py +133 -0
  54. provide/foundation/errors/types.py +276 -0
  55. provide/foundation/file/__init__.py +79 -0
  56. provide/foundation/file/atomic.py +157 -0
  57. provide/foundation/file/directory.py +134 -0
  58. provide/foundation/file/formats.py +236 -0
  59. provide/foundation/file/lock.py +175 -0
  60. provide/foundation/file/safe.py +179 -0
  61. provide/foundation/file/utils.py +170 -0
  62. provide/foundation/hub/__init__.py +88 -0
  63. provide/foundation/hub/click_builder.py +310 -0
  64. provide/foundation/hub/commands.py +42 -0
  65. provide/foundation/hub/components.py +640 -0
  66. provide/foundation/hub/decorators.py +244 -0
  67. provide/foundation/hub/info.py +32 -0
  68. provide/foundation/hub/manager.py +446 -0
  69. provide/foundation/hub/registry.py +279 -0
  70. provide/foundation/hub/type_mapping.py +54 -0
  71. provide/foundation/hub/types.py +28 -0
  72. provide/foundation/logger/__init__.py +41 -0
  73. provide/foundation/logger/base.py +22 -0
  74. provide/foundation/logger/config/__init__.py +16 -0
  75. provide/foundation/logger/config/base.py +40 -0
  76. provide/foundation/logger/config/logging.py +394 -0
  77. provide/foundation/logger/config/telemetry.py +188 -0
  78. provide/foundation/logger/core.py +239 -0
  79. provide/foundation/logger/custom_processors.py +172 -0
  80. provide/foundation/logger/emoji/__init__.py +44 -0
  81. provide/foundation/logger/emoji/matrix.py +209 -0
  82. provide/foundation/logger/emoji/sets.py +458 -0
  83. provide/foundation/logger/emoji/types.py +56 -0
  84. provide/foundation/logger/factories.py +56 -0
  85. provide/foundation/logger/processors/__init__.py +13 -0
  86. provide/foundation/logger/processors/main.py +254 -0
  87. provide/foundation/logger/processors/trace.py +113 -0
  88. provide/foundation/logger/ratelimit/__init__.py +31 -0
  89. provide/foundation/logger/ratelimit/limiters.py +294 -0
  90. provide/foundation/logger/ratelimit/processor.py +203 -0
  91. provide/foundation/logger/ratelimit/queue_limiter.py +305 -0
  92. provide/foundation/logger/setup/__init__.py +29 -0
  93. provide/foundation/logger/setup/coordinator.py +138 -0
  94. provide/foundation/logger/setup/emoji_resolver.py +64 -0
  95. provide/foundation/logger/setup/processors.py +85 -0
  96. provide/foundation/logger/setup/testing.py +39 -0
  97. provide/foundation/logger/trace.py +38 -0
  98. provide/foundation/metrics/__init__.py +119 -0
  99. provide/foundation/metrics/otel.py +122 -0
  100. provide/foundation/metrics/simple.py +165 -0
  101. provide/foundation/observability/__init__.py +53 -0
  102. provide/foundation/observability/openobserve/__init__.py +79 -0
  103. provide/foundation/observability/openobserve/auth.py +72 -0
  104. provide/foundation/observability/openobserve/client.py +307 -0
  105. provide/foundation/observability/openobserve/commands.py +357 -0
  106. provide/foundation/observability/openobserve/exceptions.py +41 -0
  107. provide/foundation/observability/openobserve/formatters.py +298 -0
  108. provide/foundation/observability/openobserve/models.py +134 -0
  109. provide/foundation/observability/openobserve/otlp.py +320 -0
  110. provide/foundation/observability/openobserve/search.py +222 -0
  111. provide/foundation/observability/openobserve/streaming.py +235 -0
  112. provide/foundation/platform/__init__.py +44 -0
  113. provide/foundation/platform/detection.py +193 -0
  114. provide/foundation/platform/info.py +157 -0
  115. provide/foundation/process/__init__.py +39 -0
  116. provide/foundation/process/async_runner.py +373 -0
  117. provide/foundation/process/lifecycle.py +406 -0
  118. provide/foundation/process/runner.py +390 -0
  119. provide/foundation/setup/__init__.py +101 -0
  120. provide/foundation/streams/__init__.py +44 -0
  121. provide/foundation/streams/console.py +57 -0
  122. provide/foundation/streams/core.py +65 -0
  123. provide/foundation/streams/file.py +104 -0
  124. provide/foundation/testing/__init__.py +166 -0
  125. provide/foundation/testing/cli.py +227 -0
  126. provide/foundation/testing/crypto.py +163 -0
  127. provide/foundation/testing/fixtures.py +49 -0
  128. provide/foundation/testing/hub.py +23 -0
  129. provide/foundation/testing/logger.py +106 -0
  130. provide/foundation/testing/streams.py +54 -0
  131. provide/foundation/tracer/__init__.py +49 -0
  132. provide/foundation/tracer/context.py +115 -0
  133. provide/foundation/tracer/otel.py +135 -0
  134. provide/foundation/tracer/spans.py +174 -0
  135. provide/foundation/types.py +32 -0
  136. provide/foundation/utils/__init__.py +97 -0
  137. provide/foundation/utils/deps.py +195 -0
  138. provide/foundation/utils/env.py +491 -0
  139. provide/foundation/utils/formatting.py +483 -0
  140. provide/foundation/utils/parsing.py +235 -0
  141. provide/foundation/utils/rate_limiting.py +112 -0
  142. provide/foundation/utils/streams.py +67 -0
  143. provide/foundation/utils/timing.py +93 -0
  144. provide_foundation-0.0.0.dev0.dist-info/METADATA +469 -0
  145. provide_foundation-0.0.0.dev0.dist-info/RECORD +149 -0
  146. provide_foundation-0.0.0.dev0.dist-info/WHEEL +5 -0
  147. provide_foundation-0.0.0.dev0.dist-info/entry_points.txt +2 -0
  148. provide_foundation-0.0.0.dev0.dist-info/licenses/LICENSE +201 -0
  149. provide_foundation-0.0.0.dev0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,254 @@
1
+ #
2
+ # processors.py
3
+ #
4
+ """
5
+ Structlog processors for Foundation Telemetry.
6
+ """
7
+
8
+ import json
9
+ import logging as stdlib_logging
10
+ from typing import TYPE_CHECKING, Any, TextIO, cast
11
+
12
+ import structlog
13
+
14
+ from provide.foundation.logger.config import LoggingConfig, TelemetryConfig
15
+ from provide.foundation.logger.custom_processors import (
16
+ StructlogProcessor,
17
+ add_log_level_custom,
18
+ add_logger_name_emoji_prefix,
19
+ filter_by_level_custom,
20
+ )
21
+
22
+ # Import trace context processor
23
+ from provide.foundation.logger.processors.trace import inject_trace_context
24
+ from provide.foundation.types import (
25
+ TRACE_LEVEL_NUM,
26
+ LogLevelStr,
27
+ )
28
+
29
+ if TYPE_CHECKING:
30
+ from provide.foundation.logger.setup.emoji_resolver import ResolvedEmojiConfig
31
+
32
+ _LEVEL_TO_NUMERIC: dict[LogLevelStr, int] = {
33
+ "CRITICAL": stdlib_logging.CRITICAL,
34
+ "ERROR": stdlib_logging.ERROR,
35
+ "WARNING": stdlib_logging.WARNING,
36
+ "INFO": stdlib_logging.INFO,
37
+ "DEBUG": stdlib_logging.DEBUG,
38
+ "TRACE": TRACE_LEVEL_NUM,
39
+ "NOTSET": stdlib_logging.NOTSET,
40
+ }
41
+
42
+
43
+ def _config_create_service_name_processor(
44
+ service_name: str | None,
45
+ ) -> StructlogProcessor:
46
+ def processor(
47
+ _logger: Any, _method_name: str, event_dict: structlog.types.EventDict
48
+ ) -> structlog.types.EventDict:
49
+ if service_name is not None:
50
+ event_dict["service_name"] = service_name
51
+ return event_dict
52
+
53
+ return cast(StructlogProcessor, processor)
54
+
55
+
56
+ def _config_create_timestamp_processors(
57
+ omit_timestamp: bool,
58
+ ) -> list[StructlogProcessor]:
59
+ processors: list[StructlogProcessor] = [
60
+ structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f", utc=False)
61
+ ]
62
+ if omit_timestamp:
63
+
64
+ def pop_timestamp_processor(
65
+ _logger: Any, _method_name: str, event_dict: structlog.types.EventDict
66
+ ) -> structlog.types.EventDict:
67
+ event_dict.pop("timestamp", None)
68
+ return event_dict
69
+
70
+ processors.append(cast(StructlogProcessor, pop_timestamp_processor))
71
+ return processors
72
+
73
+
74
+ def _config_create_emoji_processors(
75
+ logging_config: LoggingConfig, resolved_emoji_config: "ResolvedEmojiConfig"
76
+ ) -> list[StructlogProcessor]:
77
+ processors: list[StructlogProcessor] = []
78
+ if logging_config.logger_name_emoji_prefix_enabled:
79
+ processors.append(cast(StructlogProcessor, add_logger_name_emoji_prefix))
80
+ if logging_config.das_emoji_prefix_enabled:
81
+ # FIX: Create the processor as a closure with the resolved config
82
+ resolved_field_definitions, resolved_emoji_sets_lookup = resolved_emoji_config
83
+
84
+ def add_das_emoji_prefix_closure(
85
+ _logger: Any, _method_name: str, event_dict: structlog.types.EventDict
86
+ ) -> structlog.types.EventDict:
87
+ # This inner function now has access to the resolved config from its closure scope
88
+ from provide.foundation.logger.emoji.matrix import (
89
+ PRIMARY_EMOJI,
90
+ SECONDARY_EMOJI,
91
+ TERTIARY_EMOJI,
92
+ )
93
+
94
+ final_das_prefix_parts: list[str] = []
95
+
96
+ if resolved_field_definitions: # New Layered Emoji System is active
97
+ for field_def in resolved_field_definitions:
98
+ value_from_event = event_dict.get(field_def.log_key)
99
+ if value_from_event is not None and field_def.emoji_set_name:
100
+ event_dict.pop(field_def.log_key, None)
101
+ emoji_set = resolved_emoji_sets_lookup.get(
102
+ field_def.emoji_set_name
103
+ )
104
+ if emoji_set:
105
+ value_str_lower = str(value_from_event).lower()
106
+ specific_emoji = emoji_set.emojis.get(value_str_lower)
107
+ default_key = (
108
+ field_def.default_emoji_override_key
109
+ or emoji_set.default_emoji_key
110
+ )
111
+ default_emoji = emoji_set.emojis.get(default_key, "❓")
112
+ chosen_emoji = (
113
+ specific_emoji
114
+ if specific_emoji is not None
115
+ else default_emoji
116
+ )
117
+ final_das_prefix_parts.append(f"[{chosen_emoji}]")
118
+ else:
119
+ final_das_prefix_parts.append("[❓]")
120
+ else: # Fallback to Core DAS System
121
+ domain = event_dict.pop("domain", None)
122
+ action = event_dict.pop("action", None)
123
+ status = event_dict.pop("status", None)
124
+ if domain or action or status:
125
+ domain_emoji = (
126
+ PRIMARY_EMOJI.get(str(domain).lower(), PRIMARY_EMOJI["default"])
127
+ if domain
128
+ else PRIMARY_EMOJI["default"]
129
+ )
130
+ action_emoji = (
131
+ SECONDARY_EMOJI.get(
132
+ str(action).lower(), SECONDARY_EMOJI["default"]
133
+ )
134
+ if action
135
+ else SECONDARY_EMOJI["default"]
136
+ )
137
+ status_emoji = (
138
+ TERTIARY_EMOJI.get(
139
+ str(status).lower(), TERTIARY_EMOJI["default"]
140
+ )
141
+ if status
142
+ else TERTIARY_EMOJI["default"]
143
+ )
144
+ final_das_prefix_parts.extend(
145
+ [f"[{domain_emoji}]", f"[{action_emoji}]", f"[{status_emoji}]"]
146
+ )
147
+
148
+ if final_das_prefix_parts:
149
+ final_das_prefix_str = "".join(final_das_prefix_parts)
150
+ event_msg = event_dict.get("event")
151
+ event_dict["event"] = (
152
+ f"{final_das_prefix_str} {event_msg}"
153
+ if event_msg is not None
154
+ else final_das_prefix_str
155
+ )
156
+ return event_dict
157
+
158
+ processors.append(cast(StructlogProcessor, add_das_emoji_prefix_closure))
159
+ return processors
160
+
161
+
162
+ def _build_core_processors_list(
163
+ config: TelemetryConfig, resolved_emoji_config: "ResolvedEmojiConfig"
164
+ ) -> list[StructlogProcessor]:
165
+ log_cfg = config.logging
166
+ processors: list[StructlogProcessor] = [
167
+ structlog.contextvars.merge_contextvars,
168
+ cast(StructlogProcessor, add_log_level_custom),
169
+ cast(
170
+ StructlogProcessor,
171
+ filter_by_level_custom(
172
+ default_level_str=log_cfg.default_level,
173
+ module_levels=log_cfg.module_levels,
174
+ level_to_numeric_map=_LEVEL_TO_NUMERIC,
175
+ ),
176
+ ),
177
+ structlog.processors.StackInfoRenderer(),
178
+ structlog.dev.set_exc_info,
179
+ ]
180
+
181
+ # Add rate limiting processor if enabled
182
+ if log_cfg.rate_limit_enabled:
183
+ from provide.foundation.logger.ratelimit import create_rate_limiter_processor
184
+
185
+ rate_limiter_processor = create_rate_limiter_processor(
186
+ global_rate=log_cfg.rate_limit_global,
187
+ global_capacity=log_cfg.rate_limit_global_capacity,
188
+ per_logger_rates=log_cfg.rate_limit_per_logger,
189
+ emit_warnings=log_cfg.rate_limit_emit_warnings,
190
+ summary_interval=log_cfg.rate_limit_summary_interval,
191
+ max_queue_size=log_cfg.rate_limit_max_queue_size,
192
+ max_memory_mb=log_cfg.rate_limit_max_memory_mb,
193
+ overflow_policy=log_cfg.rate_limit_overflow_policy,
194
+ )
195
+ processors.append(cast(StructlogProcessor, rate_limiter_processor))
196
+
197
+ processors.extend(_config_create_timestamp_processors(log_cfg.omit_timestamp))
198
+ if config.service_name is not None:
199
+ processors.append(_config_create_service_name_processor(config.service_name))
200
+
201
+ # Add trace context injection if tracing is enabled
202
+ if config.tracing_enabled and not config.globally_disabled:
203
+ processors.append(cast(StructlogProcessor, inject_trace_context))
204
+
205
+ processors.extend(_config_create_emoji_processors(log_cfg, resolved_emoji_config))
206
+ return processors
207
+
208
+
209
+ def _config_create_json_formatter_processors() -> list[StructlogProcessor]:
210
+ return [
211
+ structlog.processors.format_exc_info,
212
+ structlog.processors.JSONRenderer(serializer=json.dumps, sort_keys=False),
213
+ ]
214
+
215
+
216
+ def _config_create_keyvalue_formatter_processors(
217
+ output_stream: TextIO,
218
+ ) -> list[StructlogProcessor]:
219
+ def pop_logger_name_processor(
220
+ _logger: object, _method_name: str, event_dict: structlog.types.EventDict
221
+ ) -> structlog.types.EventDict:
222
+ event_dict.pop("logger_name", None)
223
+ return event_dict
224
+
225
+ is_tty = hasattr(output_stream, "isatty") and output_stream.isatty()
226
+ return [
227
+ cast(StructlogProcessor, pop_logger_name_processor),
228
+ structlog.dev.ConsoleRenderer(
229
+ colors=is_tty, exception_formatter=structlog.dev.plain_traceback
230
+ ),
231
+ ]
232
+
233
+
234
+ def _build_formatter_processors_list(
235
+ logging_config: LoggingConfig, output_stream: TextIO
236
+ ) -> list[StructlogProcessor]:
237
+ match logging_config.console_formatter:
238
+ case "json":
239
+ return _config_create_json_formatter_processors()
240
+ case "key_value":
241
+ return _config_create_keyvalue_formatter_processors(output_stream)
242
+ case _:
243
+ # Unknown formatter, warn and default to key_value
244
+ # Use setup coordinator logger
245
+ from provide.foundation.logger.setup.coordinator import (
246
+ create_core_setup_logger,
247
+ )
248
+
249
+ setup_logger = create_core_setup_logger()
250
+ setup_logger.warning(
251
+ f"Unknown formatter '{logging_config.console_formatter}', using default 'key_value'. "
252
+ f"Valid formatters: ['json', 'key_value']"
253
+ )
254
+ return _config_create_keyvalue_formatter_processors(output_stream)
@@ -0,0 +1,113 @@
1
+ """Trace context processor for injecting trace/span IDs into logs."""
2
+
3
+ from typing import Any
4
+
5
+ # Note: Cannot import get_logger here due to circular dependency during setup
6
+ # Use structlog directly for foundation-internal logging
7
+ import structlog
8
+
9
+ log = structlog.get_logger(__name__)
10
+
11
+ # Note: Internal trace injection logging removed to avoid circular dependencies
12
+ # and level registration issues during logger setup
13
+
14
+ # OpenTelemetry feature detection
15
+ try:
16
+ from opentelemetry import trace as otel_trace
17
+
18
+ _HAS_OTEL = True
19
+ except ImportError:
20
+ otel_trace = None
21
+ _HAS_OTEL = False
22
+
23
+
24
+ def inject_trace_context(
25
+ logger: Any, method_name: str, event_dict: dict[str, Any]
26
+ ) -> dict[str, Any]:
27
+ """Processor to inject trace context into log records.
28
+
29
+ Args:
30
+ logger: Logger instance
31
+ method_name: Method name being called
32
+ event_dict: Current event dictionary
33
+
34
+ Returns:
35
+ Event dictionary with trace context added
36
+ """
37
+ # Try OpenTelemetry trace context first
38
+ if _HAS_OTEL:
39
+ try:
40
+ current_span = otel_trace.get_current_span()
41
+ if current_span and current_span.is_recording():
42
+ span_context = current_span.get_span_context()
43
+
44
+ # Add OpenTelemetry trace and span IDs (only if not already present)
45
+ if "trace_id" not in event_dict:
46
+ event_dict["trace_id"] = f"{span_context.trace_id:032x}"
47
+ if "span_id" not in event_dict:
48
+ event_dict["span_id"] = f"{span_context.span_id:016x}"
49
+
50
+ # Add trace flags if present
51
+ if span_context.trace_flags:
52
+ event_dict["trace_flags"] = span_context.trace_flags
53
+
54
+ # Trace context injected successfully
55
+ return event_dict
56
+ except Exception:
57
+ # OpenTelemetry trace context unavailable - continue to fallback
58
+ pass
59
+
60
+ # Fallback to Foundation's simple tracer context
61
+ try:
62
+ from provide.foundation.tracer.context import (
63
+ get_current_span,
64
+ get_current_trace_id,
65
+ )
66
+
67
+ current_span = get_current_span()
68
+ current_trace_id = get_current_trace_id()
69
+
70
+ if current_span:
71
+ if "trace_id" not in event_dict:
72
+ event_dict["trace_id"] = current_span.trace_id
73
+ if "span_id" not in event_dict:
74
+ event_dict["span_id"] = current_span.span_id
75
+ # Foundation trace context injected successfully
76
+ elif current_trace_id and "trace_id" not in event_dict:
77
+ event_dict["trace_id"] = current_trace_id
78
+ # Foundation trace ID injected successfully
79
+
80
+ except Exception:
81
+ # Foundation trace context unavailable - skip injection
82
+ pass
83
+
84
+ return event_dict
85
+
86
+
87
+ def should_inject_trace_context() -> bool:
88
+ """Check if trace context injection is available.
89
+
90
+ Returns:
91
+ True if trace context can be injected
92
+ """
93
+ # Check if OpenTelemetry is available and has active span
94
+ if _HAS_OTEL:
95
+ try:
96
+ current_span = otel_trace.get_current_span()
97
+ if current_span and current_span.is_recording():
98
+ return True
99
+ except Exception:
100
+ pass
101
+
102
+ # Check if Foundation tracer has active context
103
+ try:
104
+ from provide.foundation.tracer.context import (
105
+ get_current_span,
106
+ get_current_trace_id,
107
+ )
108
+
109
+ return get_current_span() is not None or get_current_trace_id() is not None
110
+ except Exception:
111
+ pass
112
+
113
+ return False
@@ -0,0 +1,31 @@
1
+ #
2
+ # __init__.py
3
+ #
4
+ """
5
+ Rate limiting subcomponent for Foundation's logging system.
6
+ Provides rate limiters and processors for controlling log output rates.
7
+ """
8
+
9
+ from provide.foundation.logger.ratelimit.limiters import (
10
+ AsyncRateLimiter,
11
+ GlobalRateLimiter,
12
+ SyncRateLimiter,
13
+ )
14
+ from provide.foundation.logger.ratelimit.processor import (
15
+ RateLimiterProcessor,
16
+ create_rate_limiter_processor,
17
+ )
18
+ from provide.foundation.logger.ratelimit.queue_limiter import (
19
+ BufferedRateLimiter,
20
+ QueuedRateLimiter,
21
+ )
22
+
23
+ __all__ = [
24
+ "AsyncRateLimiter",
25
+ "BufferedRateLimiter",
26
+ "GlobalRateLimiter",
27
+ "QueuedRateLimiter",
28
+ "RateLimiterProcessor",
29
+ "SyncRateLimiter",
30
+ "create_rate_limiter_processor",
31
+ ]
@@ -0,0 +1,294 @@
1
+ #
2
+ # limiters.py
3
+ #
4
+ """
5
+ Rate limiter implementations for Foundation's logging system.
6
+ """
7
+
8
+ import asyncio
9
+ import threading
10
+ import time
11
+ from typing import Any
12
+
13
+
14
+ class SyncRateLimiter:
15
+ """
16
+ Synchronous token bucket rate limiter for controlling log output rates.
17
+ Thread-safe implementation suitable for synchronous logging operations.
18
+ """
19
+
20
+ def __init__(self, capacity: float, refill_rate: float):
21
+ """
22
+ Initialize the rate limiter.
23
+
24
+ Args:
25
+ capacity: Maximum number of tokens (burst capacity)
26
+ refill_rate: Tokens refilled per second
27
+ """
28
+ if capacity <= 0:
29
+ raise ValueError("Capacity must be positive")
30
+ if refill_rate <= 0:
31
+ raise ValueError("Refill rate must be positive")
32
+
33
+ self.capacity = float(capacity)
34
+ self.refill_rate = float(refill_rate)
35
+ self.tokens = float(capacity)
36
+ self.last_refill = time.monotonic()
37
+ self.lock = threading.Lock()
38
+
39
+ # Track statistics
40
+ self.total_allowed = 0
41
+ self.total_denied = 0
42
+ self.last_denied_time = None
43
+
44
+ def is_allowed(self) -> bool:
45
+ """
46
+ Check if a log message is allowed based on available tokens.
47
+
48
+ Returns:
49
+ True if the log should be allowed, False if rate limited
50
+ """
51
+ with self.lock:
52
+ now = time.monotonic()
53
+ elapsed = now - self.last_refill
54
+
55
+ # Refill tokens based on elapsed time
56
+ if elapsed > 0:
57
+ tokens_to_add = elapsed * self.refill_rate
58
+ self.tokens = min(self.capacity, self.tokens + tokens_to_add)
59
+ self.last_refill = now
60
+
61
+ # Try to consume a token
62
+ if self.tokens >= 1.0:
63
+ self.tokens -= 1.0
64
+ self.total_allowed += 1
65
+ return True
66
+ else:
67
+ self.total_denied += 1
68
+ self.last_denied_time = now
69
+ return False
70
+
71
+ def get_stats(self) -> dict[str, Any]:
72
+ """Get rate limiter statistics."""
73
+ with self.lock:
74
+ return {
75
+ "tokens_available": self.tokens,
76
+ "capacity": self.capacity,
77
+ "refill_rate": self.refill_rate,
78
+ "total_allowed": self.total_allowed,
79
+ "total_denied": self.total_denied,
80
+ "last_denied_time": self.last_denied_time,
81
+ }
82
+
83
+
84
+ class AsyncRateLimiter:
85
+ """
86
+ Asynchronous token bucket rate limiter.
87
+ Uses asyncio.Lock for thread safety in async contexts.
88
+ """
89
+
90
+ def __init__(self, capacity: float, refill_rate: float):
91
+ """
92
+ Initialize the async rate limiter.
93
+
94
+ Args:
95
+ capacity: Maximum number of tokens (burst capacity)
96
+ refill_rate: Tokens refilled per second
97
+ """
98
+ if capacity <= 0:
99
+ raise ValueError("Capacity must be positive")
100
+ if refill_rate <= 0:
101
+ raise ValueError("Refill rate must be positive")
102
+
103
+ self.capacity = float(capacity)
104
+ self.refill_rate = float(refill_rate)
105
+ self.tokens = float(capacity)
106
+ self.last_refill = time.monotonic()
107
+ self.lock = asyncio.Lock()
108
+
109
+ # Track statistics
110
+ self.total_allowed = 0
111
+ self.total_denied = 0
112
+ self.last_denied_time = None
113
+
114
+ async def is_allowed(self) -> bool:
115
+ """
116
+ Check if a log message is allowed based on available tokens.
117
+
118
+ Returns:
119
+ True if the log should be allowed, False if rate limited
120
+ """
121
+ async with self.lock:
122
+ now = time.monotonic()
123
+ elapsed = now - self.last_refill
124
+
125
+ # Refill tokens based on elapsed time
126
+ if elapsed > 0:
127
+ tokens_to_add = elapsed * self.refill_rate
128
+ self.tokens = min(self.capacity, self.tokens + tokens_to_add)
129
+ self.last_refill = now
130
+
131
+ # Try to consume a token
132
+ if self.tokens >= 1.0:
133
+ self.tokens -= 1.0
134
+ self.total_allowed += 1
135
+ return True
136
+ else:
137
+ self.total_denied += 1
138
+ self.last_denied_time = now
139
+ return False
140
+
141
+ async def get_stats(self) -> dict[str, Any]:
142
+ """Get rate limiter statistics."""
143
+ async with self.lock:
144
+ return {
145
+ "tokens_available": self.tokens,
146
+ "capacity": self.capacity,
147
+ "refill_rate": self.refill_rate,
148
+ "total_allowed": self.total_allowed,
149
+ "total_denied": self.total_denied,
150
+ "last_denied_time": self.last_denied_time,
151
+ }
152
+
153
+
154
+ class GlobalRateLimiter:
155
+ """
156
+ Global rate limiter singleton for Foundation's logging system.
157
+ Manages per-logger and global rate limits.
158
+ """
159
+
160
+ _instance = None
161
+ _lock = threading.Lock()
162
+
163
+ def __new__(cls):
164
+ if cls._instance is None:
165
+ with cls._lock:
166
+ if cls._instance is None:
167
+ cls._instance = super().__new__(cls)
168
+ cls._instance._initialized = False
169
+ return cls._instance
170
+
171
+ def __init__(self):
172
+ if self._initialized:
173
+ return
174
+
175
+ self._initialized = True
176
+ self.global_limiter = None
177
+ self.logger_limiters: dict[str, SyncRateLimiter] = {}
178
+ self.lock = threading.Lock()
179
+
180
+ # Default configuration (can be overridden)
181
+ self.global_rate = None
182
+ self.global_capacity = None
183
+ self.per_logger_rates: dict[str, tuple[float, float]] = {}
184
+
185
+ # Queue configuration
186
+ self.use_buffered = False
187
+ self.max_queue_size = 1000
188
+ self.max_memory_mb = None
189
+ self.overflow_policy = "drop_oldest"
190
+
191
+ def configure(
192
+ self,
193
+ global_rate: float | None = None,
194
+ global_capacity: float | None = None,
195
+ per_logger_rates: dict[str, tuple[float, float]] | None = None,
196
+ use_buffered: bool = False,
197
+ max_queue_size: int = 1000,
198
+ max_memory_mb: float | None = None,
199
+ overflow_policy: str = "drop_oldest",
200
+ ):
201
+ """
202
+ Configure the global rate limiter.
203
+
204
+ Args:
205
+ global_rate: Global logs per second limit
206
+ global_capacity: Global burst capacity
207
+ per_logger_rates: Dict of logger_name -> (rate, capacity) tuples
208
+ use_buffered: Use buffered rate limiter with tracking
209
+ max_queue_size: Maximum queue size for buffered limiter
210
+ max_memory_mb: Maximum memory for buffered limiter
211
+ overflow_policy: What to do when queue is full
212
+ """
213
+ with self.lock:
214
+ self.use_buffered = use_buffered
215
+ self.max_queue_size = max_queue_size
216
+ self.max_memory_mb = max_memory_mb
217
+ self.overflow_policy = overflow_policy
218
+
219
+ if global_rate is not None and global_capacity is not None:
220
+ self.global_rate = global_rate
221
+ self.global_capacity = global_capacity
222
+
223
+ if use_buffered:
224
+ from provide.foundation.logger.ratelimit.queue_limiter import (
225
+ BufferedRateLimiter,
226
+ )
227
+
228
+ self.global_limiter = BufferedRateLimiter(
229
+ capacity=global_capacity,
230
+ refill_rate=global_rate,
231
+ buffer_size=max_queue_size,
232
+ track_dropped=True,
233
+ )
234
+ else:
235
+ self.global_limiter = SyncRateLimiter(global_capacity, global_rate)
236
+
237
+ if per_logger_rates:
238
+ self.per_logger_rates = per_logger_rates
239
+ # Create rate limiters for configured loggers
240
+ for logger_name, (rate, capacity) in per_logger_rates.items():
241
+ self.logger_limiters[logger_name] = SyncRateLimiter(capacity, rate)
242
+
243
+ def is_allowed(
244
+ self, logger_name: str, item: Any | None = None
245
+ ) -> tuple[bool, str | None]:
246
+ """
247
+ Check if a log from a specific logger is allowed.
248
+
249
+ Args:
250
+ logger_name: Name of the logger
251
+ item: Optional item for buffered tracking
252
+
253
+ Returns:
254
+ Tuple of (allowed, reason) where reason is set if denied
255
+ """
256
+ with self.lock:
257
+ # Check per-logger limit first
258
+ if logger_name in self.logger_limiters:
259
+ if not self.logger_limiters[logger_name].is_allowed():
260
+ return False, f"Logger '{logger_name}' rate limit exceeded"
261
+
262
+ # Check global limit
263
+ if self.global_limiter:
264
+ if self.use_buffered:
265
+ # BufferedRateLimiter returns tuple
266
+ from provide.foundation.logger.ratelimit.queue_limiter import (
267
+ BufferedRateLimiter,
268
+ )
269
+
270
+ if isinstance(self.global_limiter, BufferedRateLimiter):
271
+ allowed, reason = self.global_limiter.is_allowed(item)
272
+ if not allowed:
273
+ return False, reason or "Global rate limit exceeded"
274
+ else:
275
+ # SyncRateLimiter returns bool
276
+ if not self.global_limiter.is_allowed():
277
+ return False, "Global rate limit exceeded"
278
+
279
+ return True, None
280
+
281
+ def get_stats(self) -> dict[str, Any]:
282
+ """Get comprehensive rate limiting statistics."""
283
+ with self.lock:
284
+ stats = {
285
+ "global": self.global_limiter.get_stats()
286
+ if self.global_limiter
287
+ else None,
288
+ "per_logger": {},
289
+ }
290
+
291
+ for logger_name, limiter in self.logger_limiters.items():
292
+ stats["per_logger"][logger_name] = limiter.get_stats()
293
+
294
+ return stats