levelapp 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. levelapp/__init__.py +0 -0
  2. levelapp/aspects/__init__.py +8 -0
  3. levelapp/aspects/loader.py +253 -0
  4. levelapp/aspects/logger.py +59 -0
  5. levelapp/aspects/monitor.py +617 -0
  6. levelapp/aspects/sanitizer.py +168 -0
  7. levelapp/clients/__init__.py +122 -0
  8. levelapp/clients/anthropic.py +112 -0
  9. levelapp/clients/gemini.py +130 -0
  10. levelapp/clients/groq.py +101 -0
  11. levelapp/clients/huggingface.py +162 -0
  12. levelapp/clients/ionos.py +126 -0
  13. levelapp/clients/mistral.py +106 -0
  14. levelapp/clients/openai.py +116 -0
  15. levelapp/comparator/__init__.py +5 -0
  16. levelapp/comparator/comparator.py +232 -0
  17. levelapp/comparator/extractor.py +108 -0
  18. levelapp/comparator/schemas.py +61 -0
  19. levelapp/comparator/scorer.py +269 -0
  20. levelapp/comparator/utils.py +136 -0
  21. levelapp/config/__init__.py +5 -0
  22. levelapp/config/endpoint.py +199 -0
  23. levelapp/config/prompts.py +57 -0
  24. levelapp/core/__init__.py +0 -0
  25. levelapp/core/base.py +386 -0
  26. levelapp/core/schemas.py +24 -0
  27. levelapp/core/session.py +336 -0
  28. levelapp/endpoint/__init__.py +0 -0
  29. levelapp/endpoint/client.py +188 -0
  30. levelapp/endpoint/client_test.py +41 -0
  31. levelapp/endpoint/manager.py +114 -0
  32. levelapp/endpoint/parsers.py +119 -0
  33. levelapp/endpoint/schemas.py +38 -0
  34. levelapp/endpoint/tester.py +52 -0
  35. levelapp/evaluator/__init__.py +3 -0
  36. levelapp/evaluator/evaluator.py +307 -0
  37. levelapp/metrics/__init__.py +63 -0
  38. levelapp/metrics/embedding.py +56 -0
  39. levelapp/metrics/embeddings/__init__.py +0 -0
  40. levelapp/metrics/embeddings/sentence_transformer.py +30 -0
  41. levelapp/metrics/embeddings/torch_based.py +56 -0
  42. levelapp/metrics/exact.py +182 -0
  43. levelapp/metrics/fuzzy.py +80 -0
  44. levelapp/metrics/token.py +103 -0
  45. levelapp/plugins/__init__.py +0 -0
  46. levelapp/repository/__init__.py +3 -0
  47. levelapp/repository/filesystem.py +203 -0
  48. levelapp/repository/firestore.py +291 -0
  49. levelapp/simulator/__init__.py +3 -0
  50. levelapp/simulator/schemas.py +116 -0
  51. levelapp/simulator/simulator.py +531 -0
  52. levelapp/simulator/utils.py +134 -0
  53. levelapp/visualization/__init__.py +7 -0
  54. levelapp/visualization/charts.py +358 -0
  55. levelapp/visualization/dashboard.py +240 -0
  56. levelapp/visualization/exporter.py +167 -0
  57. levelapp/visualization/templates/base.html +158 -0
  58. levelapp/visualization/templates/comparator_dashboard.html +57 -0
  59. levelapp/visualization/templates/simulator_dashboard.html +111 -0
  60. levelapp/workflow/__init__.py +6 -0
  61. levelapp/workflow/base.py +192 -0
  62. levelapp/workflow/config.py +96 -0
  63. levelapp/workflow/context.py +64 -0
  64. levelapp/workflow/factory.py +42 -0
  65. levelapp/workflow/registration.py +6 -0
  66. levelapp/workflow/runtime.py +19 -0
  67. levelapp-0.1.15.dist-info/METADATA +571 -0
  68. levelapp-0.1.15.dist-info/RECORD +70 -0
  69. levelapp-0.1.15.dist-info/WHEEL +4 -0
  70. levelapp-0.1.15.dist-info/licenses/LICENSE +0 -0
@@ -0,0 +1,336 @@
1
+ """levelapp/core/session.py"""
2
+
3
+ import asyncio
4
+ import threading
5
+
6
+ from abc import ABC
7
+
8
+ from dataclasses import dataclass, field
9
+ from typing import Dict, List, Any
10
+
11
+ from datetime import datetime
12
+ from humanize import precisedelta
13
+
14
+ from levelapp.workflow import MainFactory, WorkflowConfig
15
+ from levelapp.workflow.base import BaseWorkflow
16
+ from levelapp.aspects import MetricType, ExecutionMetrics, MonitoringAspect, logger
17
+ from levelapp.workflow.context import WorkflowContextBuilder
18
+
19
+
20
+ class TemporalStatusMixin(ABC):
21
+ started_at: datetime | None
22
+ ended_at: datetime | None
23
+
24
+ @property
25
+ def is_active(self) -> bool:
26
+ """Check if the session is currently active."""
27
+ return self.ended_at is None
28
+
29
+ @property
30
+ def duration(self) -> float | None:
31
+ """Calculate the duration of the session in seconds."""
32
+ if not self.is_active:
33
+ return (self.ended_at - self.started_at).total_seconds()
34
+ return None
35
+
36
+
37
+ @dataclass
38
+ class SessionMetadata(TemporalStatusMixin):
39
+ """Metadata for an evaluation session."""
40
+
41
+ session_name: str
42
+ started_at: datetime | None = None
43
+ ended_at: datetime | None = None
44
+ total_executions: int = 0
45
+ total_duration: float = 0.0
46
+ steps: Dict[str, "StepMetadata"] = field(default_factory=dict)
47
+
48
+
49
+ @dataclass
50
+ class StepMetadata(TemporalStatusMixin):
51
+ """Metadata for a specific step within an evaluation session."""
52
+
53
+ step_name: str
54
+ session_name: str
55
+ started_at: datetime | None = None
56
+ ended_at: datetime | None = None
57
+ memory_peak_mb: float | None = None
58
+ error_count: int = 0
59
+ procedures_stats: List[ExecutionMetrics] | None = None
60
+
61
+
62
+ class StepContext:
63
+ """Context manager for an evaluation step within an EvaluationSession."""
64
+
65
+ def __init__(
66
+ self,
67
+ session: "EvaluationSession",
68
+ step_name: str,
69
+ category: MetricType,
70
+ ):
71
+ """
72
+ Initialize StepContext.
73
+
74
+ Args:
75
+ session (EvaluationSession): Evaluation session.
76
+ step_name (str): Step name.
77
+ category (MetricType): Metric type.
78
+ """
79
+ self.session = session
80
+ self.step_name = step_name
81
+ self.category = category
82
+
83
+ self.step_meta: StepMetadata | None = None
84
+ self.full_step_name = f"<{session.session_name}:{step_name}>"
85
+ self._monitored_func = None
86
+ self._func_gen = None
87
+
88
+ def __enter__(self):
89
+ with self.session.lock:
90
+ self.step_meta = StepMetadata(
91
+ step_name=self.step_name,
92
+ session_name=self.session.session_name,
93
+ started_at=datetime.now(),
94
+ )
95
+ self.session.session_metadata.steps[self.step_name] = self.step_meta
96
+
97
+ if self.session.enable_monitoring:
98
+ # Wrap with FunctionMonitor
99
+ self._monitored_func = self.session.monitor.monitor(
100
+ name=self.full_step_name,
101
+ category=self.category,
102
+ enable_timing=True,
103
+ track_memory=True,
104
+ verbose=self.session.verbose,
105
+ )(self._step_wrapper)
106
+
107
+ # Start monitoring
108
+ try:
109
+ self._func_gen = self._monitored_func()
110
+ next(self._func_gen) # Enter monitoring
111
+ except Exception as e:
112
+ logger.error(
113
+ f"[StepContext] Failed to initialize monitoring for {self.full_step_name}:\n{e}"
114
+ )
115
+ raise
116
+
117
+ return self # returning self allows nested instrumentation
118
+
119
+ # noinspection PyMethodMayBeStatic
120
+ def _step_wrapper(self):
121
+ yield # Actual user step execution happens here
122
+
123
+ def __exit__(self, exc_type, exc_val, exc_tb):
124
+ if self.session.enable_monitoring:
125
+ try:
126
+ next(self._func_gen) # Exit monitoring
127
+ except StopIteration:
128
+ pass
129
+
130
+ with self.session.lock:
131
+ self.step_meta.ended_at = datetime.now()
132
+
133
+ if exc_type:
134
+ self.step_meta.error_count += 1
135
+
136
+ self.session.session_metadata.total_executions += 1
137
+
138
+ if self.session.enable_monitoring and self.step_meta.duration:
139
+ self.session.monitor.update_procedure_duration(
140
+ name=self.full_step_name, value=self.step_meta.duration
141
+ )
142
+ self.session.session_metadata.total_duration += self.step_meta.duration
143
+
144
+ return False
145
+
146
+
147
+ class EvaluationSession:
148
+ """Context manager for LLM evaluation sessions with integrated monitoring."""
149
+
150
+ def __init__(
151
+ self,
152
+ session_name: str = "test-session",
153
+ workflow_config: WorkflowConfig | None = None,
154
+ enable_monitoring: bool = True,
155
+ verbose: bool = False,
156
+ ):
157
+ """
158
+ Initialize Evaluation Session.
159
+
160
+ Args:
161
+ session_name (str): Name of the session
162
+ workflow_config (WorkflowConfig): Workflow configuration.
163
+ enable_monitoring (bool): Switch monitoring on. Defaults to True.
164
+ verbose (bool): Verbose mode. Defaults to False.
165
+ """
166
+ self._NAME = self.__class__.__name__
167
+
168
+ self.session_name = session_name
169
+ self.workflow_config = workflow_config
170
+ self.enable_monitoring = enable_monitoring
171
+ self.verbose = verbose
172
+
173
+ self.workflow: BaseWorkflow | None = None
174
+
175
+ self.session_metadata = SessionMetadata(session_name=session_name)
176
+ self.monitor = MonitoringAspect if enable_monitoring else None
177
+ self._lock = threading.RLock()
178
+
179
+ logger.info("[EvaluationSession] Evaluation session initialized.")
180
+
181
+ @property
182
+ def lock(self):
183
+ return self._lock
184
+
185
+ def __enter__(self):
186
+ self.session_metadata.started_at = datetime.now()
187
+
188
+ # Instantiate workflow if not already
189
+ if not self.workflow:
190
+ if not self.workflow_config:
191
+ raise ValueError(f"{self._NAME}: Workflow configuration must be provided")
192
+
193
+ context_builder = WorkflowContextBuilder(self.workflow_config)
194
+ context = context_builder.build()
195
+
196
+ self.workflow = MainFactory.create_workflow(context=context)
197
+
198
+ logger.info(
199
+ f"[{self._NAME}] Starting evaluation session: {self.session_name} - "
200
+ f"Workflow: '{self.workflow.name}'"
201
+ )
202
+ return self
203
+
204
+ def __exit__(self, exc_type, exc_val, exc_tb):
205
+ self.session_metadata.ended_at = datetime.now()
206
+ logger.info(
207
+ f"[{self._NAME}] Completed session '{self.session_name}' "
208
+ f"in {self.session_metadata.duration:.2f}s"
209
+ )
210
+
211
+ if exc_type:
212
+ logger.error(
213
+ f"[{self._NAME}] Session ended with error: {exc_val}", exc_info=True
214
+ )
215
+
216
+ return False
217
+
218
+ def step(self, step_name: str, category: MetricType = MetricType.CUSTOM) -> StepContext:
219
+ """Create a monitored evaluation step."""
220
+ return StepContext(self, step_name, category)
221
+
222
+ def run(self):
223
+ if not self.workflow:
224
+ raise RuntimeError(f"{self._NAME} Workflow not initialized")
225
+
226
+ with self.step(step_name="setup", category=MetricType.SETUP):
227
+ self.workflow.setup()
228
+
229
+ with self.step(step_name="load_data", category=MetricType.DATA_LOADING):
230
+ self.workflow.load_data()
231
+
232
+ with self.step(step_name="execute", category=MetricType.EXECUTION):
233
+ self.workflow.execute()
234
+
235
+ with self.step(
236
+ step_name=f"{self.session_name}.collect_results",
237
+ category=MetricType.RESULTS_COLLECTION,
238
+ ):
239
+ self.workflow.collect_results()
240
+
241
+ def run_connectivity_test(self, context: Dict[str, Any]) -> Dict[str, Any]:
242
+ if not self.workflow:
243
+ raise RuntimeError(f"{self._NAME} Workflow not initialized")
244
+
245
+ results = asyncio.run(self.workflow.test_connection(context=context))
246
+ return results
247
+
248
+ def get_stats(self) -> Dict[str, Any]:
249
+ if self.enable_monitoring:
250
+ return {
251
+ "session": {
252
+ "name": self.session_name,
253
+ "duration": precisedelta(
254
+ self.session_metadata.duration, suppress=["minutes"]
255
+ ),
256
+ "start_time": self.session_metadata.started_at.isoformat(),
257
+ "end_time": self.session_metadata.ended_at.isoformat(),
258
+ "steps": len(self.session_metadata.steps),
259
+ "errors": sum(
260
+ s.error_count for s in self.session_metadata.steps.values()
261
+ ),
262
+ },
263
+ "stats": self.monitor.get_all_stats(),
264
+ }
265
+
266
+ return {
267
+ "session": {
268
+ "name": self.session_name,
269
+ "duration": precisedelta(
270
+ self.session_metadata.duration, suppress=["minutes"]
271
+ ),
272
+ "start_time": self.session_metadata.started_at.isoformat(),
273
+ "end_time": self.session_metadata.ended_at.isoformat(),
274
+ "steps": len(self.session_metadata.steps),
275
+ "errors": sum(
276
+ s.error_count for s in self.session_metadata.steps.values()
277
+ ),
278
+ },
279
+ }
280
+
281
+ def visualize_results(
282
+ self, output_dir: str = "./visualizations", formats: List[str] = None
283
+ ) -> Dict[str, str]:
284
+ """
285
+ Generate visualizations for evaluation results.
286
+
287
+ Args:
288
+ output_dir: Directory to save visualizations (default: ./visualizations)
289
+ formats: List of export formats (html, png, pdf). Default: ["html"]
290
+
291
+ Returns:
292
+ Dictionary mapping format to file path
293
+
294
+ Example:
295
+ with EvaluationSession("my-eval", config) as session:
296
+ session.run()
297
+ files = session.visualize_results(
298
+ output_dir="./reports",
299
+ formats=["html", "png"]
300
+ )
301
+ print(f"Dashboard: {files['html']}")
302
+ """
303
+ if formats is None:
304
+ formats = ["html"]
305
+
306
+ logger.info(f"[{self.session_name}] Generating visualizations to: {output_dir}")
307
+
308
+ # Import here to avoid circular dependency
309
+ from levelapp.visualization import ResultsExporter
310
+
311
+ # Collect results from workflow
312
+ results = self.workflow.collect_results()
313
+
314
+ if not results:
315
+ logger.warning(
316
+ f"[{self.session_name}] No results available for visualization"
317
+ )
318
+ return {}
319
+
320
+ # Parse results if they're JSON string
321
+ if isinstance(results, str):
322
+ import json
323
+ from levelapp.simulator.schemas import SimulationResults
324
+
325
+ results_dict = json.loads(results)
326
+ results = SimulationResults.model_validate(results_dict)
327
+
328
+ # Export visualizations
329
+ exporter = ResultsExporter(output_dir=output_dir)
330
+ exported_files = exporter.export_dashboard(results=results, formats=formats)
331
+
332
+ logger.info(
333
+ f"[{self.session_name}] Visualizations generated: {list(exported_files.keys())}"
334
+ )
335
+
336
+ return exported_files
File without changes
@@ -0,0 +1,188 @@
1
+ """levelapp/endpoint/client.py"""
2
+ import os
3
+ import httpx
4
+ import asyncio
5
+ import backoff
6
+ import logging
7
+
8
+ from dataclasses import dataclass, field
9
+ from typing import List, Dict, Any
10
+ from pydantic import BaseModel, Field
11
+
12
+ from levelapp.endpoint.schemas import HttpMethod, HeaderConfig, RequestSchemaConfig, ResponseMappingConfig
13
+
14
+
15
+ class EndpointConfig(BaseModel):
16
+ """Complete endpoint configuration."""
17
+ name: str
18
+ base_url: str
19
+ path: str
20
+ method: HttpMethod
21
+
22
+ headers: List[HeaderConfig] = Field(default_factory=list)
23
+ request_schema: List[RequestSchemaConfig] = Field(default_factory=list)
24
+ response_mapping: List[ResponseMappingConfig] = Field(default_factory=list)
25
+
26
+ # Timeouts (seconds)
27
+ connect_timeout: int = 10
28
+ read_timeout: int = 60
29
+ write_timeout: int = 10
30
+ pool_timeout: int = 10
31
+
32
+ # Concurrency
33
+ max_parallel_requests: int = 50
34
+ max_connections: int = 50
35
+ max_keepalive_connections: int = 50
36
+
37
+ # Retries
38
+ retry_count: int = 5
39
+ retry_backoff_base: float = 2.0
40
+ retry_backoff_max: float = 60.0
41
+
42
+ @classmethod
43
+ def validate_path(cls, v: str) -> str:
44
+ if not v.startswith('/'):
45
+ return f"/{v}"
46
+ return v
47
+
48
+
49
+ @dataclass
50
+ class ClientResult:
51
+ success: bool
52
+ response: httpx.Response | None = None
53
+ error: Exception | None = None
54
+
55
+
56
+ @dataclass
57
+ class APIClient:
58
+ """HTTP client for REST API interactions"""
59
+ config: EndpointConfig
60
+ client: httpx.AsyncClient = field(init=False)
61
+ semaphore: asyncio.Semaphore = field(init=False)
62
+ logger: logging.Logger = field(init=False)
63
+
64
+ RETRYABLE_ERRORS = (
65
+ httpx.ConnectTimeout,
66
+ httpx.WriteTimeout,
67
+ httpx.ReadTimeout,
68
+ httpx.NetworkError
69
+ )
70
+
71
+ def __post_init__(self):
72
+ self.logger = logging.getLogger(f"AsyncAPIClient.{self.config.name}")
73
+
74
+ self.client = httpx.AsyncClient(
75
+ base_url=self.config.base_url,
76
+ timeout=httpx.Timeout(
77
+ connect=self.config.connect_timeout,
78
+ read=self.config.read_timeout,
79
+ write=self.config.write_timeout,
80
+ pool=self.config.pool_timeout,
81
+ ),
82
+ limits=httpx.Limits(
83
+ max_connections=self.config.max_connections,
84
+ max_keepalive_connections=self.config.max_keepalive_connections,
85
+ ),
86
+ follow_redirects=True,
87
+ )
88
+
89
+ self.semaphore = asyncio.Semaphore(self.config.max_parallel_requests)
90
+
91
+ async def __aenter__(self) -> "APIClient":
92
+ return self
93
+
94
+ async def __aexit__(self, *args) -> None:
95
+ try:
96
+ if hasattr(self, 'client') and not self.client.is_closed:
97
+ self.logger.warning("[APIClient] Client not properly closed, forcing cleanup.")
98
+ asyncio.create_task(self.client.aclose())
99
+ except Exception as e:
100
+ self.logger.error(f"[APIClient] Error closing client: {e}")
101
+
102
+ def _build_headers(self) -> Dict[str, str]:
103
+ """Build headers with secure value resolution."""
104
+ headers = {}
105
+
106
+ for header in self.config.headers:
107
+ if header.secure:
108
+ value = os.getenv(header.value)
109
+ if value is None:
110
+ self.logger.warning(f"Secure header '{header.name}' env var '{header.value}' not found")
111
+ continue
112
+ headers[header.name] = value
113
+ else:
114
+ headers[header.name] = header.value
115
+
116
+ return headers
117
+
118
+ def _on_backoff(self, details):
119
+ """Callback for backoff logging"""
120
+ self.logger.warning(
121
+ f"[APIClient] Retry {details['tries']}/{self.config.retry_count} "
122
+ f"after {details['wait']:.2f}s (error: {details['exception'].__class__.__name__})"
123
+ )
124
+
125
+ def _on_giveup(self, details):
126
+ """Callback when all retries exhausted"""
127
+ self.logger.error(
128
+ f"[APIClient] Gave up after {details['tries']} tries, "
129
+ f"elapsed: {details['elapsed']:.2f}s"
130
+ )
131
+
132
+ async def send_request(
133
+ self,
134
+ payload: Dict[str, Any] | None = None,
135
+ query_params: Dict[str, Any] | None = None,
136
+ ) -> httpx.Response:
137
+ headers = self._build_headers()
138
+
139
+ async with self.semaphore:
140
+ response = await self.client.request(
141
+ method=self.config.method.value,
142
+ url=self.config.path,
143
+ json=payload,
144
+ params=query_params,
145
+ headers=headers,
146
+ )
147
+
148
+ if response.is_error:
149
+ response.raise_for_status()
150
+
151
+ return response
152
+
153
+ async def execute(
154
+ self,
155
+ payload: Dict[str, Any] | None = None,
156
+ query_params: Dict[str, Any] | None = None,
157
+ ) -> ClientResult:
158
+ """
159
+ Execute asynchronous REST API request with retry logic using backoff.
160
+
161
+ Retries on transient errors with exponential backoff and jitter.
162
+ Non-retryable errors (pool exhaustion, HTTP errors) are raised immediately.
163
+ """
164
+ @backoff.on_exception(
165
+ backoff.expo,
166
+ self.RETRYABLE_ERRORS,
167
+ max_tries=self.config.retry_count,
168
+ max_time=self.config.retry_backoff_max,
169
+ jitter=backoff.full_jitter,
170
+ on_backoff=self._on_backoff,
171
+ on_giveup=self._on_giveup,
172
+ raise_on_giveup=True,
173
+ )
174
+ async def _execute_with_retry() -> httpx.Response:
175
+ return await self.send_request(payload=payload, query_params=query_params)
176
+
177
+ try:
178
+ response = await _execute_with_retry()
179
+ response.raise_for_status()
180
+ return ClientResult(success=True, response=response)
181
+
182
+ except httpx.HTTPStatusError as exc:
183
+ exc_response = exc.response if hasattr(exc, "response") else None
184
+ return ClientResult(success=False, response=exc_response, error=exc)
185
+
186
+ except Exception as exc:
187
+ exc_response = exc.response if hasattr(exc, "response") else None
188
+ return ClientResult(success=False, response=exc_response, error=exc)
@@ -0,0 +1,41 @@
1
+ import asyncio
2
+ import logging
3
+ from levelapp.endpoint.client import APIClient, EndpointConfig
4
+ from levelapp.endpoint.schemas import HttpMethod
5
+
6
+ logging.basicConfig(level=logging.INFO)
7
+
8
+
9
+ async def stress_test():
10
+ config = EndpointConfig(
11
+ name="stress-test",
12
+ base_url="http://127.0.0.1:8000",
13
+ path="chat",
14
+ method=HttpMethod.POST,
15
+ max_parallel_requests=20,
16
+ max_connections=20,
17
+ retry_count=3,
18
+ retry_backoff_max=30,
19
+ )
20
+
21
+ async with APIClient(config) as client:
22
+
23
+ async def single_call(i: int):
24
+ try:
25
+ response = await client.execute(
26
+ payload={"message": f"test-{i}"}
27
+ )
28
+ return response.status_code
29
+ except Exception as e:
30
+ return f"ERROR: {type(e).__name__}"
31
+
32
+ tasks = [single_call(i) for i in range(100)]
33
+ results = await asyncio.gather(*tasks, return_exceptions=False)
34
+
35
+ print("Results summary:")
36
+ from collections import Counter
37
+ print(Counter(results))
38
+
39
+
40
+ if __name__ == "__main__":
41
+ asyncio.run(stress_test())
@@ -0,0 +1,114 @@
1
+ """levelapp/endpoint/manager.py"""
2
+ import httpx
3
+ import yaml
4
+ import logging
5
+
6
+ from pathlib import Path
7
+ from typing import Dict, List, Any
8
+ from pydantic import ValidationError
9
+
10
+ from levelapp.endpoint.schemas import ResponseMappingConfig
11
+ from levelapp.endpoint.tester import ConnectivityTester
12
+ from levelapp.endpoint.client import EndpointConfig, APIClient, ClientResult
13
+ from levelapp.endpoint.parsers import RequestPayloadBuilder, ResponseDataExtractor
14
+
15
+
16
+ class EndpointConfigManager:
17
+ """Manages endpoint configurations and creates testers."""
18
+ def __init__(self, config_path: Path | None = None):
19
+ self.config_path = config_path
20
+ self.endpoints: Dict[str, EndpointConfig] = {}
21
+ self.logger = logging.getLogger("ConfigurationManager")
22
+
23
+ if config_path:
24
+ self._load_config()
25
+
26
+ def _load_config(self) -> None:
27
+ """Load and validate YAML configuration file."""
28
+ try:
29
+ with open(self.config_path, "r") as f:
30
+ data = yaml.safe_load(f)
31
+
32
+ for endpoint_data in data.get("endpoints", []):
33
+ config = EndpointConfig.model_validate(endpoint_data)
34
+ self.endpoints[config.name] = config
35
+ self.logger.info(f"Loaded endpoint config: {config.name}")
36
+
37
+ except ValidationError as e:
38
+ self.logger.error(f"Failed to load endpoint config: {e}")
39
+
40
+ except Exception as e:
41
+ self.logger.error(f"Failed to load endpoint config: {e}", exc_info=e)
42
+ raise RuntimeError("Failed to extract endpoints data from YAML file:\n{e}")
43
+
44
+ def set_endpoints(self, endpoints_config: List[EndpointConfig]):
45
+ for endpoint in endpoints_config:
46
+ try:
47
+ config = EndpointConfig.model_validate(endpoint)
48
+ self.endpoints[config.name] = config
49
+
50
+ except ValidationError as e:
51
+ self.logger.error(f"Failed to load endpoint config: {e}", exc_info=e)
52
+ continue
53
+
54
+ def build_response_mapping(self, content: List[Dict[str, Any]]) -> List[ResponseMappingConfig]:
55
+ mappings = []
56
+ for el in content:
57
+ try:
58
+ mappings.append(ResponseMappingConfig.model_validate(el))
59
+ except ValidationError as e:
60
+ self.logger.error(f"Failed to validate response mapping: {e}", exc_info=e)
61
+
62
+ return mappings
63
+
64
+ async def send_request(
65
+ self,
66
+ endpoint_config: EndpointConfig,
67
+ context: Dict[str, Any],
68
+ contextual_mode: bool = False
69
+ ) -> ClientResult:
70
+ payload_builder = RequestPayloadBuilder()
71
+ client = APIClient(config=endpoint_config)
72
+
73
+ if not contextual_mode:
74
+ context = payload_builder.build(
75
+ schema=endpoint_config.request_schema,
76
+ context=context,
77
+ )
78
+
79
+ async with client:
80
+ response = await client.execute(payload=context)
81
+
82
+ self.logger.info(f"Response status: {response.error}")
83
+
84
+ return response
85
+
86
+ @staticmethod
87
+ def extract_response_data(
88
+ response: httpx.Response,
89
+ mappings: List[ResponseMappingConfig],
90
+ ) -> Dict[str, Any]:
91
+ extractor = ResponseDataExtractor()
92
+ response_data = response.json() if response.text else {}
93
+ extracted = extractor.extract(
94
+ response_data=response_data,
95
+ mappings=mappings
96
+ )
97
+
98
+ return extracted
99
+
100
+ def get_tester(self, endpoint_name: str) -> ConnectivityTester:
101
+ """Factory method: create connectivity tester for endpoint."""
102
+ if endpoint_name not in self.endpoints:
103
+ raise KeyError(f"Endpoint '{endpoint_name}' not found in configuration")
104
+
105
+ return ConnectivityTester(self.endpoints[endpoint_name])
106
+
107
+ def test_all(self, context: Dict[str, Any] | None = None) -> Dict[str, Dict[str, Any]]:
108
+ """Test all configured endpoints."""
109
+ results = {}
110
+ for name in self.endpoints:
111
+ tester = self.get_tester(name)
112
+ results[name] = tester.test(context)
113
+
114
+ return results