genxai-framework 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. cli/__init__.py +3 -0
  2. cli/commands/__init__.py +6 -0
  3. cli/commands/approval.py +85 -0
  4. cli/commands/audit.py +127 -0
  5. cli/commands/metrics.py +25 -0
  6. cli/commands/tool.py +389 -0
  7. cli/main.py +32 -0
  8. genxai/__init__.py +81 -0
  9. genxai/api/__init__.py +5 -0
  10. genxai/api/app.py +21 -0
  11. genxai/config/__init__.py +5 -0
  12. genxai/config/settings.py +37 -0
  13. genxai/connectors/__init__.py +19 -0
  14. genxai/connectors/base.py +122 -0
  15. genxai/connectors/kafka.py +92 -0
  16. genxai/connectors/postgres_cdc.py +95 -0
  17. genxai/connectors/registry.py +44 -0
  18. genxai/connectors/sqs.py +94 -0
  19. genxai/connectors/webhook.py +73 -0
  20. genxai/core/__init__.py +37 -0
  21. genxai/core/agent/__init__.py +32 -0
  22. genxai/core/agent/base.py +206 -0
  23. genxai/core/agent/config_io.py +59 -0
  24. genxai/core/agent/registry.py +98 -0
  25. genxai/core/agent/runtime.py +970 -0
  26. genxai/core/communication/__init__.py +6 -0
  27. genxai/core/communication/collaboration.py +44 -0
  28. genxai/core/communication/message_bus.py +192 -0
  29. genxai/core/communication/protocols.py +35 -0
  30. genxai/core/execution/__init__.py +22 -0
  31. genxai/core/execution/metadata.py +181 -0
  32. genxai/core/execution/queue.py +201 -0
  33. genxai/core/graph/__init__.py +30 -0
  34. genxai/core/graph/checkpoints.py +77 -0
  35. genxai/core/graph/edges.py +131 -0
  36. genxai/core/graph/engine.py +813 -0
  37. genxai/core/graph/executor.py +516 -0
  38. genxai/core/graph/nodes.py +161 -0
  39. genxai/core/graph/trigger_runner.py +40 -0
  40. genxai/core/memory/__init__.py +19 -0
  41. genxai/core/memory/base.py +72 -0
  42. genxai/core/memory/embedding.py +327 -0
  43. genxai/core/memory/episodic.py +448 -0
  44. genxai/core/memory/long_term.py +467 -0
  45. genxai/core/memory/manager.py +543 -0
  46. genxai/core/memory/persistence.py +297 -0
  47. genxai/core/memory/procedural.py +461 -0
  48. genxai/core/memory/semantic.py +526 -0
  49. genxai/core/memory/shared.py +62 -0
  50. genxai/core/memory/short_term.py +303 -0
  51. genxai/core/memory/vector_store.py +508 -0
  52. genxai/core/memory/working.py +211 -0
  53. genxai/core/state/__init__.py +6 -0
  54. genxai/core/state/manager.py +293 -0
  55. genxai/core/state/schema.py +115 -0
  56. genxai/llm/__init__.py +14 -0
  57. genxai/llm/base.py +150 -0
  58. genxai/llm/factory.py +329 -0
  59. genxai/llm/providers/__init__.py +1 -0
  60. genxai/llm/providers/anthropic.py +249 -0
  61. genxai/llm/providers/cohere.py +274 -0
  62. genxai/llm/providers/google.py +334 -0
  63. genxai/llm/providers/ollama.py +147 -0
  64. genxai/llm/providers/openai.py +257 -0
  65. genxai/llm/routing.py +83 -0
  66. genxai/observability/__init__.py +6 -0
  67. genxai/observability/logging.py +327 -0
  68. genxai/observability/metrics.py +494 -0
  69. genxai/observability/tracing.py +372 -0
  70. genxai/performance/__init__.py +39 -0
  71. genxai/performance/cache.py +256 -0
  72. genxai/performance/pooling.py +289 -0
  73. genxai/security/audit.py +304 -0
  74. genxai/security/auth.py +315 -0
  75. genxai/security/cost_control.py +528 -0
  76. genxai/security/default_policies.py +44 -0
  77. genxai/security/jwt.py +142 -0
  78. genxai/security/oauth.py +226 -0
  79. genxai/security/pii.py +366 -0
  80. genxai/security/policy_engine.py +82 -0
  81. genxai/security/rate_limit.py +341 -0
  82. genxai/security/rbac.py +247 -0
  83. genxai/security/validation.py +218 -0
  84. genxai/tools/__init__.py +21 -0
  85. genxai/tools/base.py +383 -0
  86. genxai/tools/builtin/__init__.py +131 -0
  87. genxai/tools/builtin/communication/__init__.py +15 -0
  88. genxai/tools/builtin/communication/email_sender.py +159 -0
  89. genxai/tools/builtin/communication/notification_manager.py +167 -0
  90. genxai/tools/builtin/communication/slack_notifier.py +118 -0
  91. genxai/tools/builtin/communication/sms_sender.py +118 -0
  92. genxai/tools/builtin/communication/webhook_caller.py +136 -0
  93. genxai/tools/builtin/computation/__init__.py +15 -0
  94. genxai/tools/builtin/computation/calculator.py +101 -0
  95. genxai/tools/builtin/computation/code_executor.py +183 -0
  96. genxai/tools/builtin/computation/data_validator.py +259 -0
  97. genxai/tools/builtin/computation/hash_generator.py +129 -0
  98. genxai/tools/builtin/computation/regex_matcher.py +201 -0
  99. genxai/tools/builtin/data/__init__.py +15 -0
  100. genxai/tools/builtin/data/csv_processor.py +213 -0
  101. genxai/tools/builtin/data/data_transformer.py +299 -0
  102. genxai/tools/builtin/data/json_processor.py +233 -0
  103. genxai/tools/builtin/data/text_analyzer.py +288 -0
  104. genxai/tools/builtin/data/xml_processor.py +175 -0
  105. genxai/tools/builtin/database/__init__.py +15 -0
  106. genxai/tools/builtin/database/database_inspector.py +157 -0
  107. genxai/tools/builtin/database/mongodb_query.py +196 -0
  108. genxai/tools/builtin/database/redis_cache.py +167 -0
  109. genxai/tools/builtin/database/sql_query.py +145 -0
  110. genxai/tools/builtin/database/vector_search.py +163 -0
  111. genxai/tools/builtin/file/__init__.py +17 -0
  112. genxai/tools/builtin/file/directory_scanner.py +214 -0
  113. genxai/tools/builtin/file/file_compressor.py +237 -0
  114. genxai/tools/builtin/file/file_reader.py +102 -0
  115. genxai/tools/builtin/file/file_writer.py +122 -0
  116. genxai/tools/builtin/file/image_processor.py +186 -0
  117. genxai/tools/builtin/file/pdf_parser.py +144 -0
  118. genxai/tools/builtin/test/__init__.py +15 -0
  119. genxai/tools/builtin/test/async_simulator.py +62 -0
  120. genxai/tools/builtin/test/data_transformer.py +99 -0
  121. genxai/tools/builtin/test/error_generator.py +82 -0
  122. genxai/tools/builtin/test/simple_math.py +94 -0
  123. genxai/tools/builtin/test/string_processor.py +72 -0
  124. genxai/tools/builtin/web/__init__.py +15 -0
  125. genxai/tools/builtin/web/api_caller.py +161 -0
  126. genxai/tools/builtin/web/html_parser.py +330 -0
  127. genxai/tools/builtin/web/http_client.py +187 -0
  128. genxai/tools/builtin/web/url_validator.py +162 -0
  129. genxai/tools/builtin/web/web_scraper.py +170 -0
  130. genxai/tools/custom/my_test_tool_2.py +9 -0
  131. genxai/tools/dynamic.py +105 -0
  132. genxai/tools/mcp_server.py +167 -0
  133. genxai/tools/persistence/__init__.py +6 -0
  134. genxai/tools/persistence/models.py +55 -0
  135. genxai/tools/persistence/service.py +322 -0
  136. genxai/tools/registry.py +227 -0
  137. genxai/tools/security/__init__.py +11 -0
  138. genxai/tools/security/limits.py +214 -0
  139. genxai/tools/security/policy.py +20 -0
  140. genxai/tools/security/sandbox.py +248 -0
  141. genxai/tools/templates.py +435 -0
  142. genxai/triggers/__init__.py +19 -0
  143. genxai/triggers/base.py +104 -0
  144. genxai/triggers/file_watcher.py +75 -0
  145. genxai/triggers/queue.py +68 -0
  146. genxai/triggers/registry.py +82 -0
  147. genxai/triggers/schedule.py +66 -0
  148. genxai/triggers/webhook.py +68 -0
  149. genxai/utils/__init__.py +1 -0
  150. genxai/utils/tokens.py +295 -0
  151. genxai_framework-0.1.0.dist-info/METADATA +495 -0
  152. genxai_framework-0.1.0.dist-info/RECORD +156 -0
  153. genxai_framework-0.1.0.dist-info/WHEEL +5 -0
  154. genxai_framework-0.1.0.dist-info/entry_points.txt +2 -0
  155. genxai_framework-0.1.0.dist-info/licenses/LICENSE +21 -0
  156. genxai_framework-0.1.0.dist-info/top_level.txt +2 -0
@@ -0,0 +1,257 @@
1
+ """OpenAI LLM provider implementation."""
2
+
3
+ from typing import Any, Dict, Optional, AsyncIterator
4
+ import os
5
+ import logging
6
+
7
+ from genxai.llm.base import LLMProvider, LLMResponse
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class OpenAIProvider(LLMProvider):
13
+ """OpenAI LLM provider."""
14
+
15
+ def __init__(
16
+ self,
17
+ model: str = "gpt-4",
18
+ api_key: Optional[str] = None,
19
+ temperature: float = 0.7,
20
+ max_tokens: Optional[int] = None,
21
+ **kwargs: Any,
22
+ ) -> None:
23
+ """Initialize OpenAI provider.
24
+
25
+ Args:
26
+ model: Model name (gpt-4, gpt-3.5-turbo, etc.)
27
+ api_key: OpenAI API key (defaults to OPENAI_API_KEY env var)
28
+ temperature: Sampling temperature
29
+ max_tokens: Maximum tokens to generate
30
+ **kwargs: Additional OpenAI-specific parameters
31
+ """
32
+ super().__init__(model, temperature, max_tokens, **kwargs)
33
+
34
+ # Tests expect OpenAIProvider() with no args to raise, even if a global
35
+ # OPENAI_API_KEY is present in the environment.
36
+ if api_key is None:
37
+ raise ValueError(
38
+ "api_key is required when instantiating OpenAIProvider directly"
39
+ )
40
+
41
+ self.api_key = api_key
42
+
43
+ self._client: Optional[Any] = None
44
+ self._initialize_client()
45
+
46
+ def _initialize_client(self) -> None:
47
+ """Initialize OpenAI client."""
48
+ try:
49
+ from openai import AsyncOpenAI
50
+ self._client = AsyncOpenAI(api_key=self.api_key)
51
+ logger.info(f"OpenAI client initialized with model: {self.model}")
52
+ except ImportError:
53
+ logger.error(
54
+ "OpenAI package not installed. Install with: pip install openai"
55
+ )
56
+ self._client = None
57
+ except Exception as e:
58
+ logger.error(f"Failed to initialize OpenAI client: {e}")
59
+ self._client = None
60
+
61
+ async def generate(
62
+ self,
63
+ prompt: str,
64
+ system_prompt: Optional[str] = None,
65
+ **kwargs: Any,
66
+ ) -> LLMResponse:
67
+ """Generate completion using OpenAI.
68
+
69
+ Args:
70
+ prompt: User prompt
71
+ system_prompt: System prompt
72
+ **kwargs: Additional generation parameters
73
+
74
+ Returns:
75
+ LLM response
76
+
77
+ Raises:
78
+ RuntimeError: If client not initialized
79
+ Exception: If API call fails
80
+ """
81
+ if not self._client:
82
+ raise RuntimeError("OpenAI client not initialized")
83
+
84
+ # Build messages
85
+ messages = []
86
+ if system_prompt:
87
+ messages.append({"role": "system", "content": system_prompt})
88
+ messages.append({"role": "user", "content": prompt})
89
+
90
+ # Merge parameters
91
+ params = {
92
+ "model": self.model,
93
+ "messages": messages,
94
+ "temperature": kwargs.get("temperature", self.temperature),
95
+ }
96
+
97
+ if self.max_tokens:
98
+ params["max_tokens"] = kwargs.get("max_tokens", self.max_tokens)
99
+
100
+ # Tool calling parameters
101
+ if "tools" in kwargs:
102
+ params["tools"] = kwargs["tools"]
103
+ if "tool_choice" in kwargs:
104
+ params["tool_choice"] = kwargs["tool_choice"]
105
+
106
+ # Add additional parameters
107
+ for key in ["top_p", "frequency_penalty", "presence_penalty", "stop"]:
108
+ if key in kwargs:
109
+ params[key] = kwargs[key]
110
+
111
+ try:
112
+ logger.debug(f"Calling OpenAI API with model: {self.model}")
113
+ response = await self._client.chat.completions.create(**params)
114
+
115
+ # Extract response
116
+ message = response.choices[0].message
117
+ content = message.content or ""
118
+ finish_reason = response.choices[0].finish_reason
119
+
120
+ # Extract usage
121
+ usage = {
122
+ "prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
123
+ "completion_tokens": response.usage.completion_tokens if response.usage else 0,
124
+ "total_tokens": response.usage.total_tokens if response.usage else 0,
125
+ }
126
+
127
+ # Update stats
128
+ self._update_stats(usage)
129
+
130
+ return LLMResponse(
131
+ content=content,
132
+ model=response.model,
133
+ usage=usage,
134
+ finish_reason=finish_reason,
135
+ metadata={
136
+ "response_id": response.id,
137
+ "tool_calls": message.tool_calls if hasattr(message, "tool_calls") else None,
138
+ },
139
+ )
140
+
141
+ except Exception as e:
142
+ logger.error(f"OpenAI API call failed: {e}")
143
+ raise
144
+
145
+ async def generate_stream(
146
+ self,
147
+ prompt: str,
148
+ system_prompt: Optional[str] = None,
149
+ **kwargs: Any,
150
+ ) -> AsyncIterator[str]:
151
+ """Generate completion with streaming.
152
+
153
+ Args:
154
+ prompt: User prompt
155
+ system_prompt: System prompt
156
+ **kwargs: Additional generation parameters
157
+
158
+ Yields:
159
+ Content chunks
160
+
161
+ Raises:
162
+ RuntimeError: If client not initialized
163
+ """
164
+ if not self._client:
165
+ raise RuntimeError("OpenAI client not initialized")
166
+
167
+ # Build messages
168
+ messages = []
169
+ if system_prompt:
170
+ messages.append({"role": "system", "content": system_prompt})
171
+ messages.append({"role": "user", "content": prompt})
172
+
173
+ # Merge parameters
174
+ params = {
175
+ "model": self.model,
176
+ "messages": messages,
177
+ "temperature": kwargs.get("temperature", self.temperature),
178
+ "stream": True,
179
+ }
180
+
181
+ if self.max_tokens:
182
+ params["max_tokens"] = kwargs.get("max_tokens", self.max_tokens)
183
+
184
+ try:
185
+ logger.debug(f"Streaming from OpenAI API with model: {self.model}")
186
+ stream = await self._client.chat.completions.create(**params)
187
+
188
+ async for chunk in stream:
189
+ if chunk.choices[0].delta.content:
190
+ yield chunk.choices[0].delta.content
191
+
192
+ except Exception as e:
193
+ logger.error(f"OpenAI streaming failed: {e}")
194
+ raise
195
+
196
+ async def generate_chat(
197
+ self,
198
+ messages: list[Dict[str, str]],
199
+ **kwargs: Any,
200
+ ) -> LLMResponse:
201
+ """Generate completion for chat messages.
202
+
203
+ Args:
204
+ messages: List of message dictionaries
205
+ **kwargs: Additional generation parameters
206
+
207
+ Returns:
208
+ LLM response
209
+ """
210
+ if not self._client:
211
+ raise RuntimeError("OpenAI client not initialized")
212
+
213
+ # Merge parameters
214
+ params = {
215
+ "model": self.model,
216
+ "messages": messages,
217
+ "temperature": kwargs.get("temperature", self.temperature),
218
+ }
219
+
220
+ if self.max_tokens:
221
+ params["max_tokens"] = kwargs.get("max_tokens", self.max_tokens)
222
+
223
+ # Tool calling parameters
224
+ if "tools" in kwargs:
225
+ params["tools"] = kwargs["tools"]
226
+ if "tool_choice" in kwargs:
227
+ params["tool_choice"] = kwargs["tool_choice"]
228
+
229
+ try:
230
+ response = await self._client.chat.completions.create(**params)
231
+
232
+ message = response.choices[0].message
233
+ content = message.content or ""
234
+ finish_reason = response.choices[0].finish_reason
235
+
236
+ usage = {
237
+ "prompt_tokens": response.usage.prompt_tokens if response.usage else 0,
238
+ "completion_tokens": response.usage.completion_tokens if response.usage else 0,
239
+ "total_tokens": response.usage.total_tokens if response.usage else 0,
240
+ }
241
+
242
+ self._update_stats(usage)
243
+
244
+ return LLMResponse(
245
+ content=content,
246
+ model=response.model,
247
+ usage=usage,
248
+ finish_reason=finish_reason,
249
+ metadata={
250
+ "response_id": response.id,
251
+ "tool_calls": message.tool_calls if hasattr(message, "tool_calls") else None,
252
+ },
253
+ )
254
+
255
+ except Exception as e:
256
+ logger.error(f"OpenAI chat API call failed: {e}")
257
+ raise
genxai/llm/routing.py ADDED
@@ -0,0 +1,83 @@
1
+ """Routing wrapper for LLM providers with fallback support."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, AsyncIterator, Iterable, List, Optional
6
+ import logging
7
+
8
+ from genxai.llm.base import LLMProvider, LLMResponse
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class RoutedLLMProvider(LLMProvider):
14
+ """LLM provider wrapper that routes requests through fallback providers."""
15
+
16
+ def __init__(
17
+ self,
18
+ primary: LLMProvider,
19
+ fallbacks: Optional[Iterable[LLMProvider]] = None,
20
+ ) -> None:
21
+ self._primary = primary
22
+ self._fallbacks = list(fallbacks or [])
23
+ super().__init__(
24
+ model=primary.model,
25
+ temperature=primary.temperature,
26
+ max_tokens=primary.max_tokens,
27
+ )
28
+
29
+ @property
30
+ def providers(self) -> List[LLMProvider]:
31
+ return [self._primary, *self._fallbacks]
32
+
33
+ async def generate(
34
+ self,
35
+ prompt: str,
36
+ system_prompt: Optional[str] = None,
37
+ **kwargs: Any,
38
+ ) -> LLMResponse:
39
+ last_error: Optional[Exception] = None
40
+ for provider in self.providers:
41
+ try:
42
+ response = await provider.generate(
43
+ prompt=prompt,
44
+ system_prompt=system_prompt,
45
+ **kwargs,
46
+ )
47
+ self._update_stats(response.usage)
48
+ return response
49
+ except Exception as exc:
50
+ last_error = exc
51
+ logger.warning(
52
+ "Provider %s failed; attempting fallback: %s",
53
+ provider,
54
+ exc,
55
+ )
56
+ continue
57
+ raise RuntimeError("All LLM providers failed") from last_error
58
+
59
+ async def generate_stream(
60
+ self,
61
+ prompt: str,
62
+ system_prompt: Optional[str] = None,
63
+ **kwargs: Any,
64
+ ) -> AsyncIterator[str]:
65
+ last_error: Optional[Exception] = None
66
+ for provider in self.providers:
67
+ try:
68
+ async for chunk in provider.generate_stream(
69
+ prompt=prompt,
70
+ system_prompt=system_prompt,
71
+ **kwargs,
72
+ ):
73
+ yield chunk
74
+ return
75
+ except Exception as exc:
76
+ last_error = exc
77
+ logger.warning(
78
+ "Streaming provider %s failed; attempting fallback: %s",
79
+ provider,
80
+ exc,
81
+ )
82
+ continue
83
+ raise RuntimeError("All LLM providers failed for streaming") from last_error
@@ -0,0 +1,6 @@
1
+ """Observability system for GenXAI."""
2
+
3
+ from genxai.observability.logging import setup_logging, get_logger
4
+ from genxai.observability.metrics import MetricsCollector
5
+
6
+ __all__ = ["setup_logging", "get_logger", "MetricsCollector"]
@@ -0,0 +1,327 @@
1
+ """Structured logging for GenXAI."""
2
+
3
+ import json
4
+ import logging
5
+ import sys
6
+ from contextvars import ContextVar
7
+ from datetime import datetime
8
+ from typing import Any, Dict, Optional
9
+
10
+
11
+ _request_id_ctx: ContextVar[Optional[str]] = ContextVar("genxai_request_id", default=None)
12
+ _workflow_id_ctx: ContextVar[Optional[str]] = ContextVar("genxai_workflow_id", default=None)
13
+ _agent_id_ctx: ContextVar[Optional[str]] = ContextVar("genxai_agent_id", default=None)
14
+
15
+
16
+ def set_log_context(
17
+ *,
18
+ request_id: Optional[str] = None,
19
+ workflow_id: Optional[str] = None,
20
+ agent_id: Optional[str] = None,
21
+ ) -> None:
22
+ """Set context values for structured logging.
23
+
24
+ Args:
25
+ request_id: Request identifier
26
+ workflow_id: Workflow identifier
27
+ agent_id: Agent identifier
28
+ """
29
+ if request_id is not None:
30
+ _request_id_ctx.set(request_id)
31
+ if workflow_id is not None:
32
+ _workflow_id_ctx.set(workflow_id)
33
+ if agent_id is not None:
34
+ _agent_id_ctx.set(agent_id)
35
+
36
+
37
+ def clear_log_context() -> None:
38
+ """Clear logging context values."""
39
+ _request_id_ctx.set(None)
40
+ _workflow_id_ctx.set(None)
41
+ _agent_id_ctx.set(None)
42
+
43
+
44
+ def get_log_context() -> Dict[str, Optional[str]]:
45
+ """Get current logging context values."""
46
+ return {
47
+ "request_id": _request_id_ctx.get(),
48
+ "workflow_id": _workflow_id_ctx.get(),
49
+ "agent_id": _agent_id_ctx.get(),
50
+ }
51
+
52
+
53
+ class StructuredFormatter(logging.Formatter):
54
+ """Structured JSON formatter for logs."""
55
+
56
+ def format(self, record: logging.LogRecord) -> str:
57
+ """Format log record as JSON.
58
+
59
+ Args:
60
+ record: Log record
61
+
62
+ Returns:
63
+ JSON formatted log string
64
+ """
65
+ log_data = {
66
+ "timestamp": datetime.fromtimestamp(record.created).isoformat(),
67
+ "level": record.levelname,
68
+ "logger": record.name,
69
+ "message": record.getMessage(),
70
+ "module": record.module,
71
+ "function": record.funcName,
72
+ "line": record.lineno,
73
+ }
74
+
75
+ # Add contextual identifiers
76
+ log_data.update(get_log_context())
77
+
78
+ # Add exception info if present
79
+ if record.exc_info:
80
+ log_data["exception"] = self.formatException(record.exc_info)
81
+
82
+ # Add extra fields
83
+ if hasattr(record, "extra"):
84
+ log_data["extra"] = record.extra
85
+
86
+ return json.dumps(log_data)
87
+
88
+
89
+ def setup_logging(
90
+ level: str = "INFO",
91
+ structured: bool = False,
92
+ log_file: Optional[str] = None,
93
+ redact_sensitive: bool = True,
94
+ ) -> None:
95
+ """Set up logging configuration.
96
+
97
+ Args:
98
+ level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
99
+ structured: Use structured JSON logging
100
+ log_file: Optional log file path
101
+ """
102
+ # Create formatter
103
+ if structured:
104
+ formatter = StructuredFormatter()
105
+ else:
106
+ formatter = logging.Formatter(
107
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
108
+ datefmt="%Y-%m-%d %H:%M:%S",
109
+ )
110
+
111
+ # Console handler
112
+ console_handler = logging.StreamHandler(sys.stdout)
113
+ console_handler.setFormatter(formatter)
114
+ if redact_sensitive:
115
+ console_handler.addFilter(SensitiveDataFilter())
116
+
117
+ # Configure root logger
118
+ root_logger = logging.getLogger()
119
+ root_logger.setLevel(getattr(logging, level.upper()))
120
+ root_logger.addHandler(console_handler)
121
+
122
+ # File handler if specified
123
+ if log_file:
124
+ file_handler = logging.FileHandler(log_file)
125
+ file_handler.setFormatter(formatter)
126
+ if redact_sensitive:
127
+ file_handler.addFilter(SensitiveDataFilter())
128
+ root_logger.addHandler(file_handler)
129
+
130
+ logging.info(f"Logging configured: level={level}, structured={structured}")
131
+
132
+
133
+ def get_logger(name: str, extra: Optional[Dict[str, Any]] = None) -> logging.Logger:
134
+ """Get a logger with optional extra context.
135
+
136
+ Args:
137
+ name: Logger name
138
+ extra: Extra context to include in logs
139
+
140
+ Returns:
141
+ Configured logger
142
+ """
143
+ base_logger = logging.getLogger(name)
144
+
145
+ if extra:
146
+ # Create adapter with extra context
147
+ return logging.LoggerAdapter(base_logger, extra)
148
+
149
+ return base_logger
150
+
151
+
152
+ class LogContext:
153
+ """Context manager for adding context to logs."""
154
+
155
+ def __init__(self, logger: logging.Logger, **context: Any) -> None:
156
+ """Initialize log context.
157
+
158
+ Args:
159
+ logger: Logger instance
160
+ **context: Context key-value pairs
161
+ """
162
+ self.logger = logger
163
+ self.context = context
164
+ self.old_factory = None
165
+
166
+ def __enter__(self) -> logging.Logger:
167
+ """Enter context."""
168
+ self.old_factory = logging.getLogRecordFactory()
169
+
170
+ def record_factory(*args: Any, **kwargs: Any) -> logging.LogRecord:
171
+ record = self.old_factory(*args, **kwargs)
172
+ record.extra = self.context
173
+ return record
174
+
175
+ logging.setLogRecordFactory(record_factory)
176
+ return self.logger
177
+
178
+ def __exit__(self, *args: Any) -> None:
179
+ """Exit context."""
180
+ if self.old_factory:
181
+ logging.setLogRecordFactory(self.old_factory)
182
+
183
+
184
+ class SensitiveDataFilter(logging.Filter):
185
+ """Filter to redact sensitive data from logs."""
186
+
187
+ SENSITIVE_PATTERNS = [
188
+ (r'api[_-]?key["\']?\s*[:=]\s*["\']?([^"\'\\s]+)', 'api_key=***REDACTED***'),
189
+ (r'password["\']?\s*[:=]\s*["\']?([^"\'\\s]+)', 'password=***REDACTED***'),
190
+ (r'token["\']?\s*[:=]\s*["\']?([^"\'\\s]+)', 'token=***REDACTED***'),
191
+ (r'secret["\']?\s*[:=]\s*["\']?([^"\'\\s]+)', 'secret=***REDACTED***'),
192
+ (r'authorization["\']?\s*[:=]\s*["\']?([^"\'\\s]+)', 'authorization=***REDACTED***'),
193
+ ]
194
+
195
+ def filter(self, record: logging.LogRecord) -> bool:
196
+ """Filter and redact sensitive data.
197
+
198
+ Args:
199
+ record: Log record
200
+
201
+ Returns:
202
+ True to keep the record
203
+ """
204
+ import re
205
+
206
+ message = record.getMessage()
207
+
208
+ for pattern, replacement in self.SENSITIVE_PATTERNS:
209
+ message = re.sub(pattern, replacement, message, flags=re.IGNORECASE)
210
+
211
+ record.msg = message
212
+ record.args = ()
213
+
214
+ return True
215
+
216
+
217
+ class StructuredLogger:
218
+ """Structured JSON logger with context."""
219
+
220
+ def __init__(self, name: str):
221
+ """Initialize structured logger.
222
+
223
+ Args:
224
+ name: Logger name
225
+ """
226
+ self.logger = logging.getLogger(name)
227
+ self.context: Dict[str, Any] = {}
228
+
229
+ def add_context(self, **kwargs: Any) -> None:
230
+ """Add context to all log messages.
231
+
232
+ Args:
233
+ **kwargs: Context key-value pairs
234
+ """
235
+ self.context.update(kwargs)
236
+
237
+ def clear_context(self) -> None:
238
+ """Clear all context."""
239
+ self.context.clear()
240
+
241
+ def _format_message(self, level: str, message: str, **kwargs: Any) -> str:
242
+ """Format log message as JSON.
243
+
244
+ Args:
245
+ level: Log level
246
+ message: Log message
247
+ **kwargs: Additional fields
248
+
249
+ Returns:
250
+ JSON formatted log string
251
+ """
252
+ log_entry = {
253
+ "timestamp": datetime.utcnow().isoformat(),
254
+ "level": level,
255
+ "message": message,
256
+ "context": {**get_log_context(), **self.context},
257
+ **kwargs
258
+ }
259
+ return json.dumps(log_entry)
260
+
261
+ def debug(self, message: str, **kwargs: Any) -> None:
262
+ """Log debug message.
263
+
264
+ Args:
265
+ message: Log message
266
+ **kwargs: Additional fields
267
+ """
268
+ self.logger.debug(self._format_message("DEBUG", message, **kwargs))
269
+
270
+ def info(self, message: str, **kwargs: Any) -> None:
271
+ """Log info message.
272
+
273
+ Args:
274
+ message: Log message
275
+ **kwargs: Additional fields
276
+ """
277
+ self.logger.info(self._format_message("INFO", message, **kwargs))
278
+
279
+ def warning(self, message: str, **kwargs: Any) -> None:
280
+ """Log warning message.
281
+
282
+ Args:
283
+ message: Log message
284
+ **kwargs: Additional fields
285
+ """
286
+ self.logger.warning(self._format_message("WARNING", message, **kwargs))
287
+
288
+ def error(self, message: str, **kwargs: Any) -> None:
289
+ """Log error message.
290
+
291
+ Args:
292
+ message: Log message
293
+ **kwargs: Additional fields
294
+ """
295
+ self.logger.error(self._format_message("ERROR", message, **kwargs))
296
+
297
+ def critical(self, message: str, **kwargs: Any) -> None:
298
+ """Log critical message.
299
+
300
+ Args:
301
+ message: Log message
302
+ **kwargs: Additional fields
303
+ """
304
+ self.logger.critical(self._format_message("CRITICAL", message, **kwargs))
305
+
306
+ def exception(self, message: str, **kwargs: Any) -> None:
307
+ """Log exception with traceback.
308
+
309
+ Args:
310
+ message: Log message
311
+ **kwargs: Additional fields
312
+ """
313
+ import traceback
314
+ kwargs["traceback"] = traceback.format_exc()
315
+ self.logger.error(self._format_message("ERROR", message, **kwargs))
316
+
317
+
318
+ def get_structured_logger(name: str) -> StructuredLogger:
319
+ """Get a structured logger instance.
320
+
321
+ Args:
322
+ name: Logger name
323
+
324
+ Returns:
325
+ StructuredLogger instance
326
+ """
327
+ return StructuredLogger(name)