garf-executors 1.0.2__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,46 +15,11 @@
15
15
 
16
16
  from __future__ import annotations
17
17
 
18
- import importlib
19
-
20
- from garf.executors import executor, fetchers
21
18
  from garf.executors.api_executor import ApiExecutionContext, ApiQueryExecutor
22
- from garf.executors.telemetry import tracer
23
-
24
-
25
- @tracer.start_as_current_span('setup_executor')
26
- def setup_executor(
27
- source: str,
28
- fetcher_parameters: dict[str, str | int | bool],
29
- enable_cache: bool = False,
30
- cache_ttl_seconds: int = 3600,
31
- ) -> type[executor.Executor]:
32
- """Initializes executors based on a source and parameters."""
33
- if source == 'bq':
34
- bq_executor = importlib.import_module('garf.executors.bq_executor')
35
- query_executor = bq_executor.BigQueryExecutor(**fetcher_parameters)
36
- elif source == 'sqldb':
37
- sql_executor = importlib.import_module('garf.executors.sql_executor')
38
- query_executor = (
39
- sql_executor.SqlAlchemyQueryExecutor.from_connection_string(
40
- fetcher_parameters.get('connection_string')
41
- )
42
- )
43
- else:
44
- concrete_api_fetcher = fetchers.get_report_fetcher(source)
45
- query_executor = ApiQueryExecutor(
46
- fetcher=concrete_api_fetcher(
47
- **fetcher_parameters,
48
- enable_cache=enable_cache,
49
- cache_ttl_seconds=cache_ttl_seconds,
50
- )
51
- )
52
- return query_executor
53
-
54
19
 
55
20
  __all__ = [
56
21
  'ApiQueryExecutor',
57
22
  'ApiExecutionContext',
58
23
  ]
59
24
 
60
- __version__ = '1.0.2'
25
+ __version__ = '1.1.3'
@@ -21,8 +21,9 @@ GarfReport and saving it to local/remote storage.
21
21
  from __future__ import annotations
22
22
 
23
23
  import logging
24
+ import pathlib
24
25
 
25
- from garf.core import report_fetcher
26
+ from garf.core import report_fetcher, simulator
26
27
  from garf.executors import (
27
28
  exceptions,
28
29
  execution_context,
@@ -31,9 +32,16 @@ from garf.executors import (
31
32
  query_processor,
32
33
  )
33
34
  from garf.executors.telemetry import tracer
34
- from opentelemetry import trace
35
+ from opentelemetry import metrics, trace
35
36
 
36
37
  logger = logging.getLogger(__name__)
38
+ meter = metrics.get_meter('garf.executors')
39
+
40
+ api_counter = meter.create_counter(
41
+ 'garf_api_execute_total',
42
+ unit='1',
43
+ description='Counts number of API executions',
44
+ )
37
45
 
38
46
 
39
47
  class ApiExecutionContext(execution_context.ExecutionContext):
@@ -49,13 +57,19 @@ class ApiQueryExecutor(executor.Executor):
49
57
  api_client: a client used for connecting to API.
50
58
  """
51
59
 
52
- def __init__(self, fetcher: report_fetcher.ApiReportFetcher) -> None:
60
+ def __init__(
61
+ self,
62
+ fetcher: report_fetcher.ApiReportFetcher,
63
+ report_simulator: simulator.ApiReportSimulator | None = None,
64
+ ) -> None:
53
65
  """Initializes ApiQueryExecutor.
54
66
 
55
67
  Args:
56
- fetcher: Instantiated report fetcher.
68
+ fetcher: Instantiated report fetcher.
69
+ report_simulator: Instantiated simulator.
57
70
  """
58
71
  self.fetcher = fetcher
72
+ self.simulator = report_simulator
59
73
  super().__init__(
60
74
  preprocessors=self.fetcher.preprocessors,
61
75
  postprocessors=self.fetcher.postprocessors,
@@ -100,6 +114,8 @@ class ApiQueryExecutor(executor.Executor):
100
114
  Raises:
101
115
  GarfExecutorError: When failed to execute query.
102
116
  """
117
+ if self.simulator:
118
+ return self.simulate(query=query, title=title, context=context)
103
119
  context = query_processor.process_gquery(context)
104
120
  span = trace.get_current_span()
105
121
  span.set_attribute('fetcher.class', self.fetcher.__class__.__name__)
@@ -110,9 +126,78 @@ class ApiQueryExecutor(executor.Executor):
110
126
  span.set_attribute('query.title', title)
111
127
  span.set_attribute('query.text', query)
112
128
  logger.debug('starting query %s', query)
129
+ title = pathlib.Path(title).name.split('.')[0]
130
+ api_counter.add(
131
+ 1, {'api.client.class': self.fetcher.api_client.__class__.__name__}
132
+ )
113
133
  results = self.fetcher.fetch(
114
134
  query_specification=query,
115
135
  args=context.query_parameters,
136
+ title=title,
137
+ **context.fetcher_parameters,
138
+ )
139
+ writer_clients = context.writer_clients
140
+ if not writer_clients:
141
+ logger.warning('No writers configured, skipping write operation')
142
+ return None
143
+ writing_results = []
144
+ for writer_client in writer_clients:
145
+ logger.debug(
146
+ 'Start writing data for query %s via %s writer',
147
+ title,
148
+ type(writer_client),
149
+ )
150
+ result = writer_client.write(results, title)
151
+ logger.debug(
152
+ 'Finish writing data for query %s via %s writer',
153
+ title,
154
+ type(writer_client),
155
+ )
156
+ writing_results.append(result)
157
+ logger.info('%s executed successfully', title)
158
+ # Return the last writer's result for backward compatibility
159
+ return writing_results[-1] if writing_results else None
160
+ except Exception as e:
161
+ logger.error('%s generated an exception: %s', title, str(e))
162
+ raise exceptions.GarfExecutorError(
163
+ '%s generated an exception: %s', title, str(e)
164
+ ) from e
165
+
166
+ @tracer.start_as_current_span('api.simulate')
167
+ def simulate(
168
+ self,
169
+ query: str,
170
+ title: str,
171
+ context: ApiExecutionContext,
172
+ ) -> str:
173
+ """Reads query, simulates results and stores them in a specified location.
174
+
175
+ Args:
176
+ query: Location of the query.
177
+ title: Name of the query.
178
+ context: Query execution context.
179
+
180
+ Returns:
181
+ Result of writing the report.
182
+
183
+ Raises:
184
+ GarfExecutorError: When failed to execute query.
185
+ """
186
+ context = query_processor.process_gquery(context)
187
+ span = trace.get_current_span()
188
+ span.set_attribute('fetcher.class', self.fetcher.__class__.__name__)
189
+ span.set_attribute(
190
+ 'api.client.class', self.fetcher.api_client.__class__.__name__
191
+ )
192
+ try:
193
+ span.set_attribute('query.title', title)
194
+ span.set_attribute('query.text', query)
195
+ logger.debug('starting query %s', query)
196
+ title = pathlib.Path(title).name.split('.')[0]
197
+ results = self.simulator.simulate(
198
+ query_specification=query,
199
+ args=context.query_parameters,
200
+ title=title,
116
201
  **context.fetcher_parameters,
117
202
  )
118
203
  writer_clients = context.writer_clients
@@ -96,6 +96,8 @@ class BigQueryExecutor(executor.Executor, query_editor.TemplateProcessorMixin):
96
96
  Report with data if query returns some data otherwise empty Report.
97
97
  """
98
98
  span = trace.get_current_span()
99
+ span.set_attribute('query.title', title)
100
+ span.set_attribute('query.text', query)
99
101
  logger.info('Executing script: %s', title)
100
102
  query_text = self.replace_params_template(query, context.query_parameters)
101
103
  self.create_datasets(context.query_parameters.macro)
@@ -25,14 +25,19 @@ import pathlib
25
25
  import sys
26
26
 
27
27
  import garf.executors
28
- from garf.executors import config, exceptions, workflow
28
+ from garf.executors import config, exceptions, setup
29
29
  from garf.executors.entrypoints import utils
30
- from garf.executors.entrypoints.tracer import initialize_tracer
30
+ from garf.executors.entrypoints.tracer import (
31
+ initialize_meter,
32
+ initialize_tracer,
33
+ )
31
34
  from garf.executors.telemetry import tracer
35
+ from garf.executors.workflows import workflow, workflow_runner
32
36
  from garf.io import reader
33
37
  from opentelemetry import trace
34
38
 
35
39
  initialize_tracer()
40
+ meter_provider = initialize_meter()
36
41
 
37
42
 
38
43
  @tracer.start_as_current_span('garf.entrypoints.cli')
@@ -53,6 +58,7 @@ def main():
53
58
  parser.add_argument(
54
59
  '--no-parallel-queries', dest='parallel_queries', action='store_false'
55
60
  )
61
+ parser.add_argument('--simulate', dest='simulate', action='store_true')
56
62
  parser.add_argument('--dry-run', dest='dry_run', action='store_true')
57
63
  parser.add_argument('-v', '--version', dest='version', action='store_true')
58
64
  parser.add_argument(
@@ -67,7 +73,12 @@ def main():
67
73
  default=3600,
68
74
  type=int,
69
75
  )
76
+ parser.add_argument('--workflow-skip', dest='workflow_skip', default=None)
77
+ parser.add_argument(
78
+ '--workflow-include', dest='workflow_include', default=None
79
+ )
70
80
  parser.set_defaults(parallel_queries=True)
81
+ parser.set_defaults(simulate=False)
71
82
  parser.set_defaults(enable_cache=False)
72
83
  parser.set_defaults(dry_run=False)
73
84
  args, kwargs = parser.parse_known_args()
@@ -82,43 +93,37 @@ def main():
82
93
  loglevel=args.loglevel.upper(), logger_type=args.logger, name=args.log_name
83
94
  )
84
95
  reader_client = reader.create_reader(args.input)
96
+ param_types = ['source', 'macro', 'template']
97
+ outputs = args.output.split(',')
98
+ extra_parameters = utils.ParamsParser([*param_types, *outputs]).parse(kwargs)
99
+ source_parameters = extra_parameters.get('source', {})
100
+ writer_parameters = {}
101
+ for output in outputs:
102
+ writer_parameters.update(extra_parameters.get(output))
103
+
104
+ context = garf.executors.api_executor.ApiExecutionContext(
105
+ query_parameters={
106
+ 'macro': extra_parameters.get('macro'),
107
+ 'template': extra_parameters.get('template'),
108
+ },
109
+ writer=outputs,
110
+ writer_parameters=writer_parameters,
111
+ fetcher_parameters=source_parameters,
112
+ )
85
113
  if workflow_file := args.workflow:
86
114
  wf_parent = pathlib.Path.cwd() / pathlib.Path(workflow_file).parent
87
- execution_workflow = workflow.Workflow.from_file(workflow_file)
88
- for i, step in enumerate(execution_workflow.steps, 1):
89
- with tracer.start_as_current_span(f'{i}-{step.fetcher}'):
90
- query_executor = garf.executors.setup_executor(
91
- source=step.fetcher,
92
- fetcher_parameters=step.fetcher_parameters,
93
- enable_cache=args.enable_cache,
94
- cache_ttl_seconds=args.cache_ttl_seconds,
95
- )
96
- batch = {}
97
- if not (queries := step.queries):
98
- logger.error('Please provide one or more queries to run')
99
- raise exceptions.GarfExecutorError(
100
- 'Please provide one or more queries to run'
101
- )
102
- for query in queries:
103
- if isinstance(query, garf.executors.workflow.QueryPath):
104
- query_path = wf_parent / pathlib.Path(query.path)
105
- if not query_path.exists():
106
- raise workflow.GarfWorkflowError(f'Query: {query_path} not found')
107
- batch[query.path] = reader_client.read(query_path)
108
- elif isinstance(query, garf.executors.workflow.QueryFolder):
109
- query_path = wf_parent / pathlib.Path(query.folder)
110
- if not query_path.exists():
111
- raise workflow.GarfWorkflowError(
112
- f'Folder: {query_path} not found'
113
- )
114
- for p in query_path.rglob('*'):
115
- if p.suffix == '.sql':
116
- batch[p.stem] = reader_client.read(p)
117
- else:
118
- batch[query.query.title] = query.query.text
119
- query_executor.execute_batch(
120
- batch, step.context, args.parallel_threshold
121
- )
115
+ execution_workflow = workflow.Workflow.from_file(workflow_file, context)
116
+ workflow_skip = args.workflow_skip if args.workflow_skip else None
117
+ workflow_include = args.workflow_include if args.workflow_include else None
118
+ workflow_runner.WorkflowRunner(
119
+ execution_workflow=execution_workflow, wf_parent=wf_parent
120
+ ).run(
121
+ enable_cache=args.enable_cache,
122
+ cache_ttl_seconds=args.cache_ttl_seconds,
123
+ selected_aliases=workflow_include,
124
+ skipped_aliases=workflow_skip,
125
+ )
126
+ meter_provider.shutdown()
122
127
  sys.exit()
123
128
 
124
129
  if not args.query:
@@ -132,31 +137,12 @@ def main():
132
137
  raise exceptions.GarfExecutorError(
133
138
  f'No execution context found for source {args.source} in {config_file}'
134
139
  )
135
- else:
136
- param_types = ['source', 'macro', 'template']
137
- outputs = args.output.split(',')
138
- extra_parameters = utils.ParamsParser([*param_types, *outputs]).parse(
139
- kwargs
140
- )
141
- source_parameters = extra_parameters.get('source', {})
142
- writer_parameters = {}
143
- for output in outputs:
144
- writer_parameters.update(extra_parameters.get(output))
145
-
146
- context = garf.executors.api_executor.ApiExecutionContext(
147
- query_parameters={
148
- 'macro': extra_parameters.get('macro'),
149
- 'template': extra_parameters.get('template'),
150
- },
151
- writer=outputs,
152
- writer_parameters=writer_parameters,
153
- fetcher_parameters=source_parameters,
154
- )
155
- query_executor = garf.executors.setup_executor(
140
+ query_executor = setup.setup_executor(
156
141
  source=args.source,
157
142
  fetcher_parameters=context.fetcher_parameters,
158
143
  enable_cache=args.enable_cache,
159
144
  cache_ttl_seconds=args.cache_ttl_seconds,
145
+ simulate=args.simulate,
160
146
  )
161
147
  batch = {query: reader_client.read(query) for query in args.query}
162
148
  if args.parallel_queries and len(args.query) > 1:
@@ -171,6 +157,7 @@ def main():
171
157
  query=reader_client.read(query), title=query, context=context
172
158
  )
173
159
  logging.shutdown()
160
+ meter_provider.shutdown()
174
161
 
175
162
 
176
163
  if __name__ == '__main__':
@@ -18,9 +18,8 @@ import argparse
18
18
  import logging
19
19
  from concurrent import futures
20
20
 
21
- import garf.executors
22
21
  import grpc
23
- from garf.executors import garf_pb2, garf_pb2_grpc
22
+ from garf.executors import execution_context, garf_pb2, garf_pb2_grpc, setup
24
23
  from garf.executors.entrypoints.tracer import initialize_tracer
25
24
  from google.protobuf.json_format import MessageToDict
26
25
  from grpc_reflection.v1alpha import reflection
@@ -28,19 +27,34 @@ from grpc_reflection.v1alpha import reflection
28
27
 
29
28
  class GarfService(garf_pb2_grpc.GarfService):
30
29
  def Execute(self, request, context):
31
- query_executor = garf.executors.setup_executor(
30
+ query_executor = setup.setup_executor(
32
31
  request.source, request.context.fetcher_parameters
33
32
  )
34
- execution_context = garf.executors.execution_context.ExecutionContext(
35
- **MessageToDict(request.context, preserving_proto_field_name=True)
36
- )
37
33
  result = query_executor.execute(
38
34
  query=request.query,
39
35
  title=request.title,
40
- context=execution_context,
36
+ context=execution_context.ExecutionContext(
37
+ **MessageToDict(request.context, preserving_proto_field_name=True)
38
+ ),
41
39
  )
42
40
  return garf_pb2.ExecuteResponse(results=[result])
43
41
 
42
+ def Fetch(self, request, context):
43
+ query_executor = setup.setup_executor(
44
+ request.source, request.context.fetcher_parameters
45
+ )
46
+ query_args = execution_context.ExecutionContext(
47
+ **MessageToDict(request.context, preserving_proto_field_name=True)
48
+ ).query_parameters
49
+ result = query_executor.fetcher.fetch(
50
+ query_specification=request.query,
51
+ title=request.title,
52
+ args=query_args,
53
+ )
54
+ return garf_pb2.FetchResponse(
55
+ columns=result.column_names, rows=result.to_list(row_type='dict')
56
+ )
57
+
44
58
 
45
59
  if __name__ == '__main__':
46
60
  parser = argparse.ArgumentParser()
@@ -21,18 +21,55 @@ import garf.executors
21
21
  import pydantic
22
22
  import typer
23
23
  import uvicorn
24
- from garf.executors import exceptions
25
- from garf.executors.entrypoints.tracer import initialize_tracer
24
+ from garf.executors import exceptions, setup
25
+ from garf.executors.entrypoints import utils
26
+ from garf.executors.entrypoints.tracer import (
27
+ initialize_meter,
28
+ initialize_tracer,
29
+ )
30
+ from garf.executors.workflows import workflow_runner
26
31
  from garf.io import reader
27
32
  from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
33
+ from pydantic_settings import BaseSettings, SettingsConfigDict
28
34
  from typing_extensions import Annotated
29
35
 
30
36
  initialize_tracer()
37
+ initialize_meter()
31
38
  app = fastapi.FastAPI()
32
39
  FastAPIInstrumentor.instrument_app(app)
33
40
  typer_app = typer.Typer()
34
41
 
35
42
 
43
+ class GarfSettings(BaseSettings):
44
+ """Specifies environmental variables for garf.
45
+
46
+ Ensure that mandatory variables are exposed via
47
+ export ENV_VARIABLE_NAME=VALUE.
48
+
49
+ Attributes:
50
+ loglevel: Level of logging.
51
+ log_name: Name of log.
52
+ logger_type: Type of logger.
53
+ """
54
+
55
+ model_config = SettingsConfigDict(env_prefix='garf_')
56
+
57
+ loglevel: str = 'INFO'
58
+ log_name: str = 'garf'
59
+ logger_type: str = 'local'
60
+
61
+
62
+ class GarfDependencies:
63
+ def __init__(self) -> None:
64
+ """Initializes GarfDependencies."""
65
+ settings = GarfSettings()
66
+ self.logger = utils.init_logging(
67
+ loglevel=settings.loglevel,
68
+ logger_type=settings.logger_type,
69
+ name=settings.log_name,
70
+ )
71
+
72
+
36
73
  class ApiExecutorRequest(pydantic.BaseModel):
37
74
  """Request for executing a query.
38
75
 
@@ -81,14 +118,19 @@ async def version() -> str:
81
118
 
82
119
 
83
120
  @app.get('/api/fetchers')
84
- async def get_fetchers() -> list[str]:
121
+ async def get_fetchers(
122
+ dependencies: Annotated[GarfDependencies, fastapi.Depends(GarfDependencies)],
123
+ ) -> list[str]:
85
124
  """Shows all available API sources."""
86
125
  return list(garf.executors.fetchers.find_fetchers())
87
126
 
88
127
 
89
128
  @app.post('/api/execute')
90
- async def execute(request: ApiExecutorRequest) -> ApiExecutorResponse:
91
- query_executor = garf.executors.setup_executor(
129
+ def execute(
130
+ request: ApiExecutorRequest,
131
+ dependencies: Annotated[GarfDependencies, fastapi.Depends(GarfDependencies)],
132
+ ) -> ApiExecutorResponse:
133
+ query_executor = setup.setup_executor(
92
134
  request.source, request.context.fetcher_parameters
93
135
  )
94
136
  result = query_executor.execute(request.query, request.title, request.context)
@@ -96,8 +138,11 @@ async def execute(request: ApiExecutorRequest) -> ApiExecutorResponse:
96
138
 
97
139
 
98
140
  @app.post('/api/execute:batch')
99
- def execute_batch(request: ApiExecutorRequest) -> ApiExecutorResponse:
100
- query_executor = garf.executors.setup_executor(
141
+ def execute_batch(
142
+ request: ApiExecutorRequest,
143
+ dependencies: Annotated[GarfDependencies, fastapi.Depends(GarfDependencies)],
144
+ ) -> ApiExecutorResponse:
145
+ query_executor = setup.setup_executor(
101
146
  request.source, request.context.fetcher_parameters
102
147
  )
103
148
  reader_client = reader.FileReader()
@@ -106,6 +151,18 @@ def execute_batch(request: ApiExecutorRequest) -> ApiExecutorResponse:
106
151
  return ApiExecutorResponse(results=results)
107
152
 
108
153
 
154
+ @app.post('/api/execute:workflow')
155
+ def execute_workflow(
156
+ workflow_file: str,
157
+ dependencies: Annotated[GarfDependencies, fastapi.Depends(GarfDependencies)],
158
+ enable_cache: bool = False,
159
+ cache_ttl_seconds: int = 3600,
160
+ ) -> list[str]:
161
+ return workflow_runner.WorkflowRunner.from_file(workflow_file).run(
162
+ enable_cache=enable_cache, cache_ttl_seconds=cache_ttl_seconds
163
+ )
164
+
165
+
109
166
  @typer_app.command()
110
167
  def main(
111
168
  port: Annotated[int, typer.Option(help='Port to start the server')] = 8000,
@@ -14,15 +14,20 @@
14
14
 
15
15
  import os
16
16
 
17
- from opentelemetry import trace
17
+ from opentelemetry import metrics, trace
18
+ from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
19
+ OTLPMetricExporter,
20
+ )
18
21
  from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
19
22
  OTLPSpanExporter,
20
23
  )
24
+ from opentelemetry.sdk.metrics import MeterProvider
25
+ from opentelemetry.sdk.metrics.export import (
26
+ PeriodicExportingMetricReader,
27
+ )
21
28
  from opentelemetry.sdk.resources import Resource
22
29
  from opentelemetry.sdk.trace import TracerProvider
23
- from opentelemetry.sdk.trace.export import (
24
- BatchSpanProcessor,
25
- )
30
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
26
31
 
27
32
  DEFAULT_SERVICE_NAME = 'garf'
28
33
 
@@ -55,3 +60,23 @@ def initialize_tracer():
55
60
  tracer_provider.add_span_processor(otlp_processor)
56
61
 
57
62
  trace.set_tracer_provider(tracer_provider)
63
+
64
+
65
+ def initialize_meter():
66
+ resource = Resource.create(
67
+ {'service.name': os.getenv('OTLP_SERVICE_NAME', DEFAULT_SERVICE_NAME)}
68
+ )
69
+ meter_provider = MeterProvider(resource=resource)
70
+
71
+ if otel_endpoint := os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT'):
72
+ otlp_metric_exporter = OTLPMetricExporter(
73
+ endpoint=f'{otel_endpoint}/v1/metrics'
74
+ )
75
+ metric_reader = PeriodicExportingMetricReader(otlp_metric_exporter)
76
+ meter_provider = MeterProvider(
77
+ resource=resource, metric_readers=[metric_reader]
78
+ )
79
+ else:
80
+ meter_provider = MeterProvider(resource=resource)
81
+ metrics.set_meter_provider(meter_provider)
82
+ return meter_provider
@@ -17,7 +17,7 @@ import logging
17
17
  import sys
18
18
  from importlib.metadata import entry_points
19
19
 
20
- from garf.core import report_fetcher
20
+ from garf.core import report_fetcher, simulator
21
21
  from garf.executors.telemetry import tracer
22
22
 
23
23
  logger = logging.getLogger(name='garf.executors.fetchers')
@@ -31,6 +31,14 @@ def find_fetchers() -> set[str]:
31
31
  return set()
32
32
 
33
33
 
34
+ @tracer.start_as_current_span('find_simulators')
35
+ def find_simulators() -> set[str]:
36
+ """Identifiers all available report simulators."""
37
+ if entrypoints := _get_entrypoints('garf_simulator'):
38
+ return {simulator.name for simulator in entrypoints}
39
+ return set()
40
+
41
+
34
42
  @tracer.start_as_current_span('get_report_fetcher')
35
43
  def get_report_fetcher(source: str) -> type[report_fetcher.ApiReportFetcher]:
36
44
  """Loads report fetcher for a given source.
@@ -57,7 +65,10 @@ def get_report_fetcher(source: str) -> type[report_fetcher.ApiReportFetcher]:
57
65
  if inspect.isclass(obj) and issubclass(
58
66
  obj, report_fetcher.ApiReportFetcher
59
67
  ):
60
- return getattr(fetcher_module, name)
68
+ if not hasattr(obj, 'alias'):
69
+ return getattr(fetcher_module, name)
70
+ if obj.alias == fetcher.name:
71
+ return getattr(fetcher_module, name)
61
72
  except ModuleNotFoundError as e:
62
73
  raise report_fetcher.ApiReportFetcherError(
63
74
  f'Failed to load fetcher for source {source}, reason: {e}'
@@ -67,6 +78,45 @@ def get_report_fetcher(source: str) -> type[report_fetcher.ApiReportFetcher]:
67
78
  )
68
79
 
69
80
 
81
+ @tracer.start_as_current_span('get_report_simulator')
82
+ def get_report_simulator(source: str) -> type[simulator.ApiReportSimulator]:
83
+ """Loads report simulator for a given source.
84
+
85
+ Args:
86
+ source: Alias for a source associated with a simulator.
87
+
88
+ Returns:
89
+ Class for a found report simulator.
90
+
91
+ Raises:
92
+ GarfApiReportSimulatorError: When simulator cannot be loaded.
93
+ MissingApiReportSimulatorError: When simulator not found.
94
+ """
95
+ if source not in find_simulators():
96
+ raise simulator.MissingApiReportSimulatorError(source)
97
+ for sim in _get_entrypoints('garf_simulator'):
98
+ if sim.name == source:
99
+ try:
100
+ with tracer.start_as_current_span('load_simulator_module') as span:
101
+ simulator_module = sim.load()
102
+ span.set_attribute('loaded_module', simulator_module.__name__)
103
+ for name, obj in inspect.getmembers(simulator_module):
104
+ if inspect.isclass(obj) and issubclass(
105
+ obj, simulator.ApiReportSimulator
106
+ ):
107
+ if not hasattr(obj, 'alias'):
108
+ return getattr(simulator_module, name)
109
+ if obj.alias == sim.name:
110
+ return getattr(simulator_module, name)
111
+ except ModuleNotFoundError as e:
112
+ raise simulator.GarfApiReportSimulatorError(
113
+ f'Failed to load simulator for source {source}, reason: {e}'
114
+ )
115
+ raise simulator.GarfApiReportSimulatorError(
116
+ f'No simulator available for the source "{source}"'
117
+ )
118
+
119
+
70
120
  def _get_entrypoints(group='garf'):
71
121
  if sys.version_info.major == 3 and sys.version_info.minor == 9:
72
122
  try: