provide-foundation 0.0.0.dev1__py3-none-any.whl → 0.0.0.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. provide/foundation/__init__.py +29 -3
  2. provide/foundation/archive/operations.py +4 -6
  3. provide/foundation/cli/__init__.py +2 -2
  4. provide/foundation/cli/commands/deps.py +13 -7
  5. provide/foundation/cli/commands/logs/__init__.py +1 -1
  6. provide/foundation/cli/commands/logs/query.py +1 -1
  7. provide/foundation/cli/commands/logs/send.py +1 -1
  8. provide/foundation/cli/commands/logs/tail.py +1 -1
  9. provide/foundation/cli/decorators.py +11 -10
  10. provide/foundation/cli/main.py +1 -1
  11. provide/foundation/cli/testing.py +2 -35
  12. provide/foundation/cli/utils.py +21 -17
  13. provide/foundation/config/__init__.py +35 -2
  14. provide/foundation/config/converters.py +479 -0
  15. provide/foundation/config/defaults.py +67 -0
  16. provide/foundation/config/env.py +4 -19
  17. provide/foundation/config/loader.py +9 -3
  18. provide/foundation/console/input.py +5 -5
  19. provide/foundation/console/output.py +35 -13
  20. provide/foundation/context/__init__.py +8 -4
  21. provide/foundation/context/core.py +85 -109
  22. provide/foundation/crypto/certificates/operations.py +1 -1
  23. provide/foundation/errors/__init__.py +2 -3
  24. provide/foundation/errors/decorators.py +0 -231
  25. provide/foundation/errors/types.py +0 -97
  26. provide/foundation/file/directory.py +13 -22
  27. provide/foundation/file/lock.py +3 -1
  28. provide/foundation/hub/components.py +72 -384
  29. provide/foundation/hub/config.py +151 -0
  30. provide/foundation/hub/discovery.py +62 -0
  31. provide/foundation/hub/handlers.py +81 -0
  32. provide/foundation/hub/lifecycle.py +194 -0
  33. provide/foundation/hub/manager.py +4 -4
  34. provide/foundation/hub/processors.py +44 -0
  35. provide/foundation/integrations/__init__.py +11 -0
  36. provide/foundation/{observability → integrations}/openobserve/__init__.py +10 -7
  37. provide/foundation/{observability → integrations}/openobserve/auth.py +1 -1
  38. provide/foundation/{observability → integrations}/openobserve/client.py +12 -12
  39. provide/foundation/{observability → integrations}/openobserve/commands.py +3 -3
  40. provide/foundation/integrations/openobserve/config.py +37 -0
  41. provide/foundation/{observability → integrations}/openobserve/formatters.py +1 -1
  42. provide/foundation/{observability → integrations}/openobserve/otlp.py +1 -1
  43. provide/foundation/{observability → integrations}/openobserve/search.py +2 -2
  44. provide/foundation/{observability → integrations}/openobserve/streaming.py +4 -4
  45. provide/foundation/logger/config/logging.py +68 -298
  46. provide/foundation/logger/config/telemetry.py +41 -121
  47. provide/foundation/logger/setup/coordinator.py +1 -1
  48. provide/foundation/observability/__init__.py +2 -2
  49. provide/foundation/process/__init__.py +9 -0
  50. provide/foundation/process/exit.py +47 -0
  51. provide/foundation/process/lifecycle.py +33 -33
  52. provide/foundation/resilience/__init__.py +35 -0
  53. provide/foundation/resilience/circuit.py +164 -0
  54. provide/foundation/resilience/decorators.py +220 -0
  55. provide/foundation/resilience/fallback.py +193 -0
  56. provide/foundation/resilience/retry.py +325 -0
  57. provide/foundation/streams/config.py +79 -0
  58. provide/foundation/streams/console.py +7 -8
  59. provide/foundation/streams/core.py +6 -3
  60. provide/foundation/streams/file.py +12 -2
  61. provide/foundation/testing/__init__.py +7 -2
  62. provide/foundation/testing/cli.py +30 -17
  63. provide/foundation/testing/common/__init__.py +0 -2
  64. provide/foundation/testing/common/fixtures.py +0 -27
  65. provide/foundation/testing/file/content_fixtures.py +316 -0
  66. provide/foundation/testing/file/directory_fixtures.py +107 -0
  67. provide/foundation/testing/file/fixtures.py +45 -516
  68. provide/foundation/testing/file/special_fixtures.py +153 -0
  69. provide/foundation/testing/logger.py +76 -0
  70. provide/foundation/testing/process/async_fixtures.py +405 -0
  71. provide/foundation/testing/process/fixtures.py +50 -571
  72. provide/foundation/testing/process/subprocess_fixtures.py +209 -0
  73. provide/foundation/testing/threading/basic_fixtures.py +101 -0
  74. provide/foundation/testing/threading/data_fixtures.py +99 -0
  75. provide/foundation/testing/threading/execution_fixtures.py +263 -0
  76. provide/foundation/testing/threading/fixtures.py +34 -500
  77. provide/foundation/testing/threading/sync_fixtures.py +97 -0
  78. provide/foundation/testing/time/fixtures.py +4 -4
  79. provide/foundation/tools/cache.py +8 -6
  80. provide/foundation/tools/downloader.py +23 -12
  81. provide/foundation/tracer/spans.py +2 -2
  82. provide/foundation/transport/config.py +26 -95
  83. provide/foundation/transport/middleware.py +30 -36
  84. provide/foundation/utils/deps.py +14 -12
  85. provide/foundation/utils/parsing.py +49 -4
  86. {provide_foundation-0.0.0.dev1.dist-info → provide_foundation-0.0.0.dev2.dist-info}/METADATA +1 -1
  87. {provide_foundation-0.0.0.dev1.dist-info → provide_foundation-0.0.0.dev2.dist-info}/RECORD +93 -68
  88. /provide/foundation/{observability → integrations}/openobserve/exceptions.py +0 -0
  89. /provide/foundation/{observability → integrations}/openobserve/models.py +0 -0
  90. {provide_foundation-0.0.0.dev1.dist-info → provide_foundation-0.0.0.dev2.dist-info}/WHEEL +0 -0
  91. {provide_foundation-0.0.0.dev1.dist-info → provide_foundation-0.0.0.dev2.dist-info}/entry_points.txt +0 -0
  92. {provide_foundation-0.0.0.dev1.dist-info → provide_foundation-0.0.0.dev2.dist-info}/licenses/LICENSE +0 -0
  93. {provide_foundation-0.0.0.dev1.dist-info → provide_foundation-0.0.0.dev2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,193 @@
1
+ """
2
+ Fallback implementation for graceful degradation.
3
+ """
4
+
5
+ import asyncio
6
+ import functools
7
+ from typing import Any, Callable, TypeVar
8
+
9
+ from attrs import define, field
10
+
11
+ from provide.foundation.logger import logger
12
+
13
+ T = TypeVar("T")
14
+
15
+
16
+ @define(kw_only=True, slots=True)
17
+ class FallbackChain:
18
+ """Chain of fallback strategies for graceful degradation.
19
+
20
+ Executes fallback functions in order when primary function fails.
21
+ """
22
+
23
+ fallbacks: list[Callable[..., T]] = field(factory=list)
24
+ expected_exceptions: tuple[type[Exception], ...] = field(
25
+ factory=lambda: (Exception,)
26
+ )
27
+
28
+ def add_fallback(self, fallback_func: Callable[..., T]) -> None:
29
+ """Add a fallback function to the chain."""
30
+ self.fallbacks.append(fallback_func)
31
+ logger.debug(
32
+ "Added fallback to chain",
33
+ fallback_count=len(self.fallbacks),
34
+ fallback_name=getattr(fallback_func, '__name__', 'anonymous')
35
+ )
36
+
37
+ def execute(self, primary_func: Callable[..., T], *args, **kwargs) -> T:
38
+ """Execute primary function with fallback chain (sync)."""
39
+ # Try primary function first
40
+ primary_exception = None
41
+ try:
42
+ result = primary_func(*args, **kwargs)
43
+ logger.trace("Primary function succeeded", func=getattr(primary_func, '__name__', 'anonymous'))
44
+ return result
45
+ except Exception as e:
46
+ primary_exception = e
47
+ if not isinstance(e, self.expected_exceptions):
48
+ # Unexpected exception type, don't use fallbacks
49
+ logger.debug(
50
+ "Primary function failed with unexpected exception type",
51
+ exception_type=type(e).__name__,
52
+ expected_types=[t.__name__ for t in self.expected_exceptions]
53
+ )
54
+ raise
55
+
56
+ logger.warning(
57
+ "Primary function failed, trying fallbacks",
58
+ func=getattr(primary_func, '__name__', 'anonymous'),
59
+ error=str(e),
60
+ fallback_count=len(self.fallbacks)
61
+ )
62
+
63
+ # Try fallbacks in order
64
+ last_exception = None
65
+ for i, fallback_func in enumerate(self.fallbacks):
66
+ try:
67
+ result = fallback_func(*args, **kwargs)
68
+ logger.info(
69
+ "Fallback succeeded",
70
+ fallback_index=i,
71
+ fallback_name=getattr(fallback_func, '__name__', 'anonymous')
72
+ )
73
+ return result
74
+ except Exception as e:
75
+ last_exception = e
76
+ logger.warning(
77
+ "Fallback failed",
78
+ fallback_index=i,
79
+ fallback_name=getattr(fallback_func, '__name__', 'anonymous'),
80
+ error=str(e)
81
+ )
82
+ continue
83
+
84
+ # All fallbacks failed
85
+ logger.error(
86
+ "All fallbacks exhausted",
87
+ primary_func=getattr(primary_func, '__name__', 'anonymous'),
88
+ fallback_count=len(self.fallbacks)
89
+ )
90
+
91
+ # Raise the last exception from fallbacks, or original if no fallbacks
92
+ if last_exception is not None:
93
+ raise last_exception
94
+ if primary_exception is not None:
95
+ raise primary_exception
96
+ # This should never happen but provide fallback
97
+ raise RuntimeError("Fallback chain execution failed with no recorded exceptions")
98
+
99
+ async def execute_async(self, primary_func: Callable[..., T], *args, **kwargs) -> T:
100
+ """Execute primary function with fallback chain (async)."""
101
+ # Try primary function first
102
+ primary_exception = None
103
+ try:
104
+ if asyncio.iscoroutinefunction(primary_func):
105
+ result = await primary_func(*args, **kwargs)
106
+ else:
107
+ result = primary_func(*args, **kwargs)
108
+ logger.trace("Primary function succeeded", func=getattr(primary_func, '__name__', 'anonymous'))
109
+ return result
110
+ except Exception as e:
111
+ primary_exception = e
112
+ if not isinstance(e, self.expected_exceptions):
113
+ # Unexpected exception type, don't use fallbacks
114
+ logger.debug(
115
+ "Primary function failed with unexpected exception type",
116
+ exception_type=type(e).__name__,
117
+ expected_types=[t.__name__ for t in self.expected_exceptions]
118
+ )
119
+ raise
120
+
121
+ logger.warning(
122
+ "Primary function failed, trying fallbacks",
123
+ func=getattr(primary_func, '__name__', 'anonymous'),
124
+ error=str(e),
125
+ fallback_count=len(self.fallbacks)
126
+ )
127
+
128
+ # Try fallbacks in order
129
+ last_exception = None
130
+ for i, fallback_func in enumerate(self.fallbacks):
131
+ try:
132
+ if asyncio.iscoroutinefunction(fallback_func):
133
+ result = await fallback_func(*args, **kwargs)
134
+ else:
135
+ result = fallback_func(*args, **kwargs)
136
+ logger.info(
137
+ "Fallback succeeded",
138
+ fallback_index=i,
139
+ fallback_name=getattr(fallback_func, '__name__', 'anonymous')
140
+ )
141
+ return result
142
+ except Exception as e:
143
+ last_exception = e
144
+ logger.warning(
145
+ "Fallback failed",
146
+ fallback_index=i,
147
+ fallback_name=getattr(fallback_func, '__name__', 'anonymous'),
148
+ error=str(e)
149
+ )
150
+ continue
151
+
152
+ # All fallbacks failed
153
+ logger.error(
154
+ "All fallbacks exhausted",
155
+ primary_func=getattr(primary_func, '__name__', 'anonymous'),
156
+ fallback_count=len(self.fallbacks)
157
+ )
158
+
159
+ # Raise the last exception from fallbacks, or original if no fallbacks
160
+ if last_exception is not None:
161
+ raise last_exception
162
+ if primary_exception is not None:
163
+ raise primary_exception
164
+ # This should never happen but provide fallback
165
+ raise RuntimeError("Fallback chain execution failed with no recorded exceptions")
166
+
167
+
168
+ def fallback(*fallback_funcs: Callable[..., T]) -> Callable:
169
+ """Decorator to add fallback functions to a primary function.
170
+
171
+ Args:
172
+ *fallback_funcs: Functions to use as fallbacks, in order of preference
173
+
174
+ Returns:
175
+ Decorated function that uses fallback chain
176
+ """
177
+ def decorator(primary_func: Callable[..., T]) -> Callable[..., T]:
178
+ chain = FallbackChain()
179
+ for fallback_func in fallback_funcs:
180
+ chain.add_fallback(fallback_func)
181
+
182
+ if asyncio.iscoroutinefunction(primary_func):
183
+ @functools.wraps(primary_func)
184
+ async def async_wrapper(*args, **kwargs):
185
+ return await chain.execute_async(primary_func, *args, **kwargs)
186
+ return async_wrapper
187
+ else:
188
+ @functools.wraps(primary_func)
189
+ def sync_wrapper(*args, **kwargs):
190
+ return chain.execute(primary_func, *args, **kwargs)
191
+ return sync_wrapper
192
+
193
+ return decorator
@@ -0,0 +1,325 @@
1
+ """
2
+ Unified retry execution engine and policy configuration.
3
+
4
+ This module provides the core retry functionality used throughout foundation,
5
+ eliminating duplication between decorators and middleware.
6
+ """
7
+
8
+ import asyncio
9
+ import random
10
+ import time
11
+ from enum import Enum
12
+ from typing import Any, Callable, TypeVar
13
+
14
+ from attrs import define, field, validators
15
+
16
+ from provide.foundation.logger import get_logger
17
+
18
+ logger = get_logger(__name__)
19
+
20
+ T = TypeVar("T")
21
+
22
+
23
+ class BackoffStrategy(str, Enum):
24
+ """Backoff strategies for retry delays."""
25
+
26
+ FIXED = "fixed" # Same delay every time
27
+ LINEAR = "linear" # Linear increase (delay * attempt)
28
+ EXPONENTIAL = "exponential" # Exponential increase (delay * 2^attempt)
29
+ FIBONACCI = "fibonacci" # Fibonacci sequence delays
30
+
31
+
32
+ @define(frozen=True, kw_only=True)
33
+ class RetryPolicy:
34
+ """
35
+ Configuration for retry behavior.
36
+
37
+ This policy can be used with both the @retry decorator and transport middleware,
38
+ providing a unified configuration model for all retry scenarios.
39
+
40
+ Attributes:
41
+ max_attempts: Maximum number of retry attempts (must be >= 1)
42
+ backoff: Backoff strategy to use for delays
43
+ base_delay: Base delay in seconds between retries
44
+ max_delay: Maximum delay in seconds (caps exponential growth)
45
+ jitter: Whether to add random jitter to delays (±25%)
46
+ retryable_errors: Tuple of exception types to retry (None = all)
47
+ retryable_status_codes: Set of HTTP status codes to retry (for middleware)
48
+ """
49
+
50
+ max_attempts: int = field(default=3, validator=validators.instance_of(int))
51
+ backoff: BackoffStrategy = field(default=BackoffStrategy.EXPONENTIAL)
52
+ base_delay: float = field(default=1.0, validator=validators.instance_of((int, float)))
53
+ max_delay: float = field(default=60.0, validator=validators.instance_of((int, float)))
54
+ jitter: bool = field(default=True)
55
+ retryable_errors: tuple[type[Exception], ...] | None = field(default=None)
56
+ retryable_status_codes: set[int] | None = field(default=None)
57
+
58
+ @max_attempts.validator
59
+ def _validate_max_attempts(self, attribute, value):
60
+ """Validate max_attempts is at least 1."""
61
+ if value < 1:
62
+ raise ValueError("max_attempts must be at least 1")
63
+
64
+ @base_delay.validator
65
+ def _validate_base_delay(self, attribute, value):
66
+ """Validate base_delay is positive."""
67
+ if value < 0:
68
+ raise ValueError("base_delay must be positive")
69
+
70
+ @max_delay.validator
71
+ def _validate_max_delay(self, attribute, value):
72
+ """Validate max_delay is positive and >= base_delay."""
73
+ if value < 0:
74
+ raise ValueError("max_delay must be positive")
75
+ if value < self.base_delay:
76
+ raise ValueError("max_delay must be >= base_delay")
77
+
78
+ def calculate_delay(self, attempt: int) -> float:
79
+ """
80
+ Calculate delay for a given attempt number.
81
+
82
+ Args:
83
+ attempt: Attempt number (1-based)
84
+
85
+ Returns:
86
+ Delay in seconds
87
+ """
88
+ if attempt <= 0:
89
+ return 0
90
+
91
+ if self.backoff == BackoffStrategy.FIXED:
92
+ delay = self.base_delay
93
+ elif self.backoff == BackoffStrategy.LINEAR:
94
+ delay = self.base_delay * attempt
95
+ elif self.backoff == BackoffStrategy.EXPONENTIAL:
96
+ delay = self.base_delay * (2 ** (attempt - 1))
97
+ elif self.backoff == BackoffStrategy.FIBONACCI:
98
+ # Calculate fibonacci number for attempt
99
+ a, b = 0, 1
100
+ for _ in range(attempt):
101
+ a, b = b, a + b
102
+ delay = self.base_delay * a
103
+ else:
104
+ delay = self.base_delay
105
+
106
+ # Cap at max delay
107
+ delay = min(delay, self.max_delay)
108
+
109
+ # Add jitter if configured (±25% random variation)
110
+ if self.jitter:
111
+ jitter_factor = 0.75 + (random.random() * 0.5)
112
+ delay *= jitter_factor
113
+
114
+ return delay
115
+
116
+ def should_retry(self, error: Exception, attempt: int) -> bool:
117
+ """
118
+ Determine if an error should be retried.
119
+
120
+ Args:
121
+ error: The exception that occurred
122
+ attempt: Current attempt number (1-based)
123
+
124
+ Returns:
125
+ True if should retry, False otherwise
126
+ """
127
+ # Check attempt limit
128
+ if attempt >= self.max_attempts:
129
+ return False
130
+
131
+ # Check error type if filter is configured
132
+ if self.retryable_errors is not None:
133
+ return isinstance(error, self.retryable_errors)
134
+
135
+ # Default to retry for any error
136
+ return True
137
+
138
+ def should_retry_response(self, response: Any, attempt: int) -> bool:
139
+ """
140
+ Check if HTTP response should be retried.
141
+
142
+ Args:
143
+ response: Response object with status attribute
144
+ attempt: Current attempt number (1-based)
145
+
146
+ Returns:
147
+ True if should retry, False otherwise
148
+ """
149
+ # Check attempt limit
150
+ if attempt >= self.max_attempts:
151
+ return False
152
+
153
+ # Check status code if configured
154
+ if self.retryable_status_codes is not None:
155
+ return getattr(response, 'status', None) in self.retryable_status_codes
156
+
157
+ # Default to no retry for responses
158
+ return False
159
+
160
+ def __str__(self) -> str:
161
+ """Human-readable string representation."""
162
+ return (
163
+ f"RetryPolicy(max_attempts={self.max_attempts}, "
164
+ f"backoff={self.backoff.value}, base_delay={self.base_delay}s)"
165
+ )
166
+
167
+
168
+ class RetryExecutor:
169
+ """
170
+ Unified retry execution engine.
171
+
172
+ This executor handles the actual retry loop logic for both sync and async
173
+ functions, using a RetryPolicy for configuration. It's used internally by
174
+ both the @retry decorator and RetryMiddleware.
175
+ """
176
+
177
+ def __init__(
178
+ self,
179
+ policy: RetryPolicy,
180
+ on_retry: Callable[[int, Exception], None] | None = None
181
+ ):
182
+ """
183
+ Initialize retry executor.
184
+
185
+ Args:
186
+ policy: Retry policy configuration
187
+ on_retry: Optional callback for retry events (attempt, error)
188
+ """
189
+ self.policy = policy
190
+ self.on_retry = on_retry
191
+
192
+ def execute_sync(self, func: Callable[..., T], *args, **kwargs) -> T:
193
+ """
194
+ Execute synchronous function with retry logic.
195
+
196
+ Args:
197
+ func: Function to execute
198
+ *args: Positional arguments for func
199
+ **kwargs: Keyword arguments for func
200
+
201
+ Returns:
202
+ Result from successful execution
203
+
204
+ Raises:
205
+ Last exception if all retries are exhausted
206
+ """
207
+ last_exception = None
208
+
209
+ for attempt in range(1, self.policy.max_attempts + 1):
210
+ try:
211
+ return func(*args, **kwargs)
212
+ except Exception as e:
213
+ last_exception = e
214
+
215
+ # Don't retry on last attempt - log and raise
216
+ if attempt >= self.policy.max_attempts:
217
+ logger.error(
218
+ f"All {self.policy.max_attempts} retry attempts failed",
219
+ attempts=self.policy.max_attempts,
220
+ error=str(e),
221
+ error_type=type(e).__name__,
222
+ )
223
+ raise
224
+
225
+ # Check if we should retry this error
226
+ if not self.policy.should_retry(e, attempt):
227
+ raise
228
+
229
+ # Calculate delay
230
+ delay = self.policy.calculate_delay(attempt)
231
+
232
+ # Log retry attempt
233
+ logger.info(
234
+ f"Retry {attempt}/{self.policy.max_attempts} after {delay:.2f}s",
235
+ attempt=attempt,
236
+ max_attempts=self.policy.max_attempts,
237
+ delay=delay,
238
+ error=str(e),
239
+ error_type=type(e).__name__,
240
+ )
241
+
242
+ # Call retry callback if provided
243
+ if self.on_retry:
244
+ try:
245
+ self.on_retry(attempt, e)
246
+ except Exception as callback_error:
247
+ logger.warning(
248
+ "Retry callback failed",
249
+ error=str(callback_error)
250
+ )
251
+
252
+ # Wait before retry
253
+ time.sleep(delay)
254
+
255
+ # Should never reach here, but for safety
256
+ raise last_exception
257
+
258
+ async def execute_async(self, func: Callable[..., T], *args, **kwargs) -> T:
259
+ """
260
+ Execute asynchronous function with retry logic.
261
+
262
+ Args:
263
+ func: Async function to execute
264
+ *args: Positional arguments for func
265
+ **kwargs: Keyword arguments for func
266
+
267
+ Returns:
268
+ Result from successful execution
269
+
270
+ Raises:
271
+ Last exception if all retries are exhausted
272
+ """
273
+ last_exception = None
274
+
275
+ for attempt in range(1, self.policy.max_attempts + 1):
276
+ try:
277
+ return await func(*args, **kwargs)
278
+ except Exception as e:
279
+ last_exception = e
280
+
281
+ # Don't retry on last attempt - log and raise
282
+ if attempt >= self.policy.max_attempts:
283
+ logger.error(
284
+ f"All {self.policy.max_attempts} retry attempts failed",
285
+ attempts=self.policy.max_attempts,
286
+ error=str(e),
287
+ error_type=type(e).__name__,
288
+ )
289
+ raise
290
+
291
+ # Check if we should retry this error
292
+ if not self.policy.should_retry(e, attempt):
293
+ raise
294
+
295
+ # Calculate delay
296
+ delay = self.policy.calculate_delay(attempt)
297
+
298
+ # Log retry attempt
299
+ logger.info(
300
+ f"Retry {attempt}/{self.policy.max_attempts} after {delay:.2f}s",
301
+ attempt=attempt,
302
+ max_attempts=self.policy.max_attempts,
303
+ delay=delay,
304
+ error=str(e),
305
+ error_type=type(e).__name__,
306
+ )
307
+
308
+ # Call retry callback if provided
309
+ if self.on_retry:
310
+ try:
311
+ if asyncio.iscoroutinefunction(self.on_retry):
312
+ await self.on_retry(attempt, e)
313
+ else:
314
+ self.on_retry(attempt, e)
315
+ except Exception as callback_error:
316
+ logger.warning(
317
+ "Retry callback failed",
318
+ error=str(callback_error)
319
+ )
320
+
321
+ # Wait before retry
322
+ await asyncio.sleep(delay)
323
+
324
+ # Should never reach here, but for safety
325
+ raise last_exception
@@ -0,0 +1,79 @@
1
+ """
2
+ Stream configuration for console output settings.
3
+
4
+ This module provides configuration for console stream behavior,
5
+ including color support and testing mode detection.
6
+ """
7
+
8
+ from attrs import define
9
+
10
+ from provide.foundation.config.env import RuntimeConfig
11
+ from provide.foundation.config.base import field
12
+ from provide.foundation.config.converters import parse_bool_extended
13
+
14
+
15
+ @define(slots=True, repr=False)
16
+ class StreamConfig(RuntimeConfig):
17
+ """Configuration for console stream output behavior."""
18
+
19
+ no_color: bool = field(
20
+ default=False,
21
+ env_var="NO_COLOR",
22
+ converter=parse_bool_extended,
23
+ description="Disable color output in console",
24
+ )
25
+
26
+ force_color: bool = field(
27
+ default=False,
28
+ env_var="FORCE_COLOR",
29
+ converter=parse_bool_extended,
30
+ description="Force color output even when not in TTY",
31
+ )
32
+
33
+ click_testing: bool = field(
34
+ default=False,
35
+ env_var="CLICK_TESTING",
36
+ converter=parse_bool_extended,
37
+ description="Indicates if running inside Click testing framework",
38
+ )
39
+
40
+
41
+ def supports_color(self) -> bool:
42
+ """
43
+ Determine if the console supports color output.
44
+
45
+ Returns:
46
+ True if color is supported, False otherwise
47
+ """
48
+ if self.no_color:
49
+ return False
50
+
51
+ if self.force_color:
52
+ return True
53
+
54
+ # Additional logic for TTY detection would go here
55
+ # For now, just return based on the flags
56
+ return not self.no_color
57
+
58
+
59
+ # Global instance for easy access
60
+ _stream_config: StreamConfig | None = None
61
+
62
+
63
+ def get_stream_config() -> StreamConfig:
64
+ """
65
+ Get the global stream configuration instance.
66
+
67
+ Returns:
68
+ StreamConfig instance loaded from environment
69
+ """
70
+ global _stream_config
71
+ if _stream_config is None:
72
+ _stream_config = StreamConfig.from_env()
73
+ return _stream_config
74
+
75
+
76
+ def reset_stream_config() -> None:
77
+ """Reset the global stream configuration (mainly for testing)."""
78
+ global _stream_config
79
+ _stream_config = None
@@ -9,6 +9,7 @@ Handles console-specific stream operations and formatting.
9
9
  import sys
10
10
  from typing import TextIO
11
11
 
12
+ from provide.foundation.streams.config import get_stream_config
12
13
  from provide.foundation.streams.core import get_log_stream
13
14
 
14
15
 
@@ -25,16 +26,14 @@ def is_tty() -> bool:
25
26
 
26
27
  def supports_color() -> bool:
27
28
  """Check if the current stream supports color output."""
28
- import os
29
-
30
- # Check NO_COLOR environment variable
31
- if os.getenv("NO_COLOR"):
29
+ config = get_stream_config()
30
+
31
+ if config.no_color:
32
32
  return False
33
-
34
- # Check FORCE_COLOR environment variable
35
- if os.getenv("FORCE_COLOR"):
33
+
34
+ if config.force_color:
36
35
  return True
37
-
36
+
38
37
  # Check if we're in a TTY
39
38
  return is_tty()
40
39
 
@@ -10,6 +10,8 @@ import sys
10
10
  import threading
11
11
  from typing import TextIO
12
12
 
13
+ from provide.foundation.streams.config import get_stream_config
14
+
13
15
  _PROVIDE_LOG_STREAM: TextIO = sys.stderr
14
16
  _LOG_FILE_HANDLE: TextIO | None = None
15
17
  _STREAM_LOCK = threading.Lock()
@@ -18,10 +20,11 @@ _STREAM_LOCK = threading.Lock()
18
20
  def _is_in_click_testing() -> bool:
19
21
  """Check if we're running inside Click's testing framework."""
20
22
  import inspect
21
- import os
22
-
23
+
24
+ config = get_stream_config()
25
+
23
26
  # Check environment variables for Click testing
24
- if os.getenv("CLICK_TESTING"):
27
+ if config.click_testing:
25
28
  return True
26
29
 
27
30
  # Check the call stack for Click's testing module or CLI integration tests
@@ -18,6 +18,16 @@ from provide.foundation.streams.core import (
18
18
  from provide.foundation.utils.streams import get_safe_stderr
19
19
 
20
20
 
21
+ def _safe_error_output(message: str) -> None:
22
+ """
23
+ Output error message to stderr using basic print to avoid circular dependencies.
24
+
25
+ This function intentionally uses print() instead of Foundation's perr() to prevent
26
+ circular import issues during stream initialization and teardown phases.
27
+ """
28
+ print(message, file=sys.stderr)
29
+
30
+
21
31
  def configure_file_logging(log_file_path: str | None) -> None:
22
32
  """
23
33
  Configure file logging if a path is provided.
@@ -56,7 +66,7 @@ def configure_file_logging(log_file_path: str | None) -> None:
56
66
  _PROVIDE_LOG_STREAM = _LOG_FILE_HANDLE
57
67
  except Exception as e:
58
68
  # Log error to stderr and fall back
59
- print(f"Failed to open log file {log_file_path}: {e}", file=sys.stderr)
69
+ _safe_error_output(f"Failed to open log file {log_file_path}: {e}")
60
70
  _PROVIDE_LOG_STREAM = get_safe_stderr()
61
71
  elif not is_test_stream:
62
72
  _PROVIDE_LOG_STREAM = get_safe_stderr()
@@ -71,7 +81,7 @@ def flush_log_streams() -> None:
71
81
  try:
72
82
  _LOG_FILE_HANDLE.flush()
73
83
  except Exception as e:
74
- print(f"Failed to flush log file handle: {e}", file=sys.stderr)
84
+ _safe_error_output(f"Failed to flush log file handle: {e}")
75
85
 
76
86
 
77
87
  def close_log_streams() -> None: