garf-executors 0.1.4__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -43,7 +43,7 @@ def setup_executor(
43
43
  else:
44
44
  concrete_api_fetcher = fetchers.get_report_fetcher(source)
45
45
  query_executor = ApiQueryExecutor(
46
- concrete_api_fetcher(
46
+ fetcher=concrete_api_fetcher(
47
47
  **fetcher_parameters,
48
48
  enable_cache=enable_cache,
49
49
  cache_ttl_seconds=cache_ttl_seconds,
@@ -57,4 +57,4 @@ __all__ = [
57
57
  'ApiExecutionContext',
58
58
  ]
59
59
 
60
- __version__ = '0.1.4'
60
+ __version__ = '0.2.3'
@@ -20,7 +20,6 @@ GarfReport and saving it to local/remote storage.
20
20
 
21
21
  from __future__ import annotations
22
22
 
23
- import asyncio
24
23
  import logging
25
24
 
26
25
  from garf_core import report_fetcher
@@ -35,7 +34,7 @@ logger = logging.getLogger(__name__)
35
34
  class ApiExecutionContext(execution_context.ExecutionContext):
36
35
  """Common context for executing one or more queries."""
37
36
 
38
- writer: str = 'console'
37
+ writer: str | list[str] = 'console'
39
38
 
40
39
 
41
40
  class ApiQueryExecutor(executor.Executor):
@@ -52,15 +51,29 @@ class ApiQueryExecutor(executor.Executor):
52
51
  fetcher: Instantiated report fetcher.
53
52
  """
54
53
  self.fetcher = fetcher
54
+ super().__init__(
55
+ preprocessors=self.fetcher.preprocessors,
56
+ postprocessors=self.fetcher.postprocessors,
57
+ )
55
58
 
56
59
  @classmethod
57
60
  def from_fetcher_alias(
58
- cls, source: str, fetcher_parameters: dict[str, str] | None = None
61
+ cls,
62
+ source: str,
63
+ fetcher_parameters: dict[str, str] | None = None,
64
+ enable_cache: bool = False,
65
+ cache_ttl_seconds: int = 3600,
59
66
  ) -> ApiQueryExecutor:
60
67
  if not fetcher_parameters:
61
68
  fetcher_parameters = {}
62
69
  concrete_api_fetcher = fetchers.get_report_fetcher(source)
63
- return ApiQueryExecutor(concrete_api_fetcher(**fetcher_parameters))
70
+ return ApiQueryExecutor(
71
+ fetcher=concrete_api_fetcher(
72
+ **fetcher_parameters,
73
+ enable_cache=enable_cache,
74
+ cache_ttl_seconds=cache_ttl_seconds,
75
+ )
76
+ )
64
77
 
65
78
  @tracer.start_as_current_span('api.execute')
66
79
  def execute(
@@ -83,31 +96,40 @@ class ApiQueryExecutor(executor.Executor):
83
96
  GarfExecutorError: When failed to execute query.
84
97
  """
85
98
  span = trace.get_current_span()
86
- span.set_attribute('fetcher', self.fetcher.__class__.__name__)
87
- span.set_attribute('api_client', self.fetcher.api_client.__class__.__name__)
99
+ span.set_attribute('fetcher.class', self.fetcher.__class__.__name__)
100
+ span.set_attribute(
101
+ 'api.client.class', self.fetcher.api_client.__class__.__name__
102
+ )
88
103
  try:
89
- span.set_attribute('query_title', title)
90
- span.set_attribute('query_text', query)
104
+ span.set_attribute('query.title', title)
105
+ span.set_attribute('query.text', query)
91
106
  logger.debug('starting query %s', query)
92
107
  results = self.fetcher.fetch(
93
108
  query_specification=query,
94
109
  args=context.query_parameters,
95
110
  **context.fetcher_parameters,
96
111
  )
97
- writer_client = context.writer_client
98
- logger.debug(
99
- 'Start writing data for query %s via %s writer',
100
- title,
101
- type(writer_client),
102
- )
103
- result = writer_client.write(results, title)
104
- logger.debug(
105
- 'Finish writing data for query %s via %s writer',
106
- title,
107
- type(writer_client),
108
- )
112
+ writer_clients = context.writer_clients
113
+ if not writer_clients:
114
+ logger.warning('No writers configured, skipping write operation')
115
+ return None
116
+ writing_results = []
117
+ for writer_client in writer_clients:
118
+ logger.debug(
119
+ 'Start writing data for query %s via %s writer',
120
+ title,
121
+ type(writer_client),
122
+ )
123
+ result = writer_client.write(results, title)
124
+ logger.debug(
125
+ 'Finish writing data for query %s via %s writer',
126
+ title,
127
+ type(writer_client),
128
+ )
129
+ writing_results.append(result)
109
130
  logger.info('%s executed successfully', title)
110
- return result
131
+ # Return the last writer's result for backward compatibility
132
+ return writing_results[-1] if writing_results else None
111
133
  except Exception as e:
112
134
  logger.error('%s generated an exception: %s', title, str(e))
113
135
  raise exceptions.GarfExecutorError(
@@ -15,6 +15,7 @@
15
15
 
16
16
  from __future__ import annotations
17
17
 
18
+ import contextlib
18
19
  import os
19
20
 
20
21
  try:
@@ -29,6 +30,7 @@ import logging
29
30
 
30
31
  from garf_core import query_editor, report
31
32
  from google.cloud import exceptions as google_cloud_exceptions
33
+ from opentelemetry import trace
32
34
 
33
35
  from garf_executors import exceptions, execution_context, executor
34
36
  from garf_executors.telemetry import tracer
@@ -67,6 +69,7 @@ class BigQueryExecutor(executor.Executor, query_editor.TemplateProcessorMixin):
67
69
  )
68
70
  self.project_id = project_id
69
71
  self.location = location
72
+ super().__init__()
70
73
 
71
74
  @property
72
75
  def client(self) -> bigquery.Client:
@@ -92,34 +95,47 @@ class BigQueryExecutor(executor.Executor, query_editor.TemplateProcessorMixin):
92
95
  Returns:
93
96
  Report with data if query returns some data otherwise empty Report.
94
97
  """
98
+ span = trace.get_current_span()
99
+ logger.info('Executing script: %s', title)
95
100
  query_text = self.replace_params_template(query, context.query_parameters)
96
101
  self.create_datasets(context.query_parameters.macro)
97
102
  job = self.client.query(query_text)
98
103
  try:
99
104
  result = job.result()
105
+ except google_cloud_exceptions.GoogleCloudError as e:
106
+ raise BigQueryExecutorError(
107
+ f'Failed to execute query {title}: Reason: {e}'
108
+ ) from e
100
109
  logger.debug('%s launched successfully', title)
101
- if result.total_rows:
102
- results = report.GarfReport.from_pandas(result.to_dataframe())
110
+ if result.total_rows:
111
+ results = report.GarfReport.from_pandas(result.to_dataframe())
112
+ else:
113
+ results = report.GarfReport()
114
+ if context.writer and results:
115
+ writer_clients = context.writer_clients
116
+ if not writer_clients:
117
+ logger.warning('No writers configured, skipping write operation')
103
118
  else:
104
- results = report.GarfReport()
105
- if context.writer and results:
106
- writer_client = context.writer_client
107
- logger.debug(
108
- 'Start writing data for query %s via %s writer',
109
- title,
110
- type(writer_client),
111
- )
112
- writing_result = writer_client.write(results, title)
113
- logger.debug(
114
- 'Finish writing data for query %s via %s writer',
115
- title,
116
- type(writer_client),
117
- )
119
+ writing_results = []
120
+ for writer_client in writer_clients:
121
+ logger.debug(
122
+ 'Start writing data for query %s via %s writer',
123
+ title,
124
+ type(writer_client),
125
+ )
126
+ writing_result = writer_client.write(results, title)
127
+ logger.debug(
128
+ 'Finish writing data for query %s via %s writer',
129
+ title,
130
+ type(writer_client),
131
+ )
132
+ writing_results.append(writing_result)
133
+ # Return the last writer's result for backward compatibility
118
134
  logger.info('%s executed successfully', title)
119
- return writing_result
120
- return results
121
- except google_cloud_exceptions.GoogleCloudError as e:
122
- raise BigQueryExecutorError(e) from e
135
+ return writing_results[-1] if writing_results else None
136
+ logger.info('%s executed successfully', title)
137
+ span.set_attribute('execute.num_results', len(results))
138
+ return results
123
139
 
124
140
  @tracer.start_as_current_span('bq.create_datasets')
125
141
  def create_datasets(self, macros: dict | None) -> None:
@@ -139,8 +155,9 @@ class BigQueryExecutor(executor.Executor, query_editor.TemplateProcessorMixin):
139
155
  except google_cloud_exceptions.NotFound:
140
156
  bq_dataset = bigquery.Dataset(dataset_id)
141
157
  bq_dataset.location = self.location
142
- self.client.create_dataset(bq_dataset, timeout=30)
143
- logger.info('Created new dataset %s', dataset_id)
158
+ with contextlib.suppress(google_cloud_exceptions.Conflict):
159
+ self.client.create_dataset(bq_dataset, timeout=30)
160
+ logger.info('Created new dataset %s', dataset_id)
144
161
 
145
162
 
146
163
  def extract_datasets(macros: dict | None) -> list[str]:
garf_executors/config.py CHANGED
@@ -47,5 +47,7 @@ class Config(pydantic.BaseModel):
47
47
  def save(self, path: str | pathlib.Path | os.PathLike[str]) -> str:
48
48
  """Saves config to local or remote yaml file."""
49
49
  with smart_open.open(path, 'w', encoding='utf-8') as f:
50
- yaml.dump(self.model_dump().get('sources'), f, encoding='utf-8')
50
+ yaml.dump(
51
+ self.model_dump(exclude_none=True).get('sources'), f, encoding='utf-8'
52
+ )
51
53
  return f'Config is saved to {str(path)}'
@@ -24,9 +24,10 @@ import logging
24
24
  import sys
25
25
 
26
26
  from garf_io import reader
27
+ from opentelemetry import trace
27
28
 
28
29
  import garf_executors
29
- from garf_executors import config, exceptions
30
+ from garf_executors import config, exceptions, workflow
30
31
  from garf_executors.entrypoints import utils
31
32
  from garf_executors.entrypoints.tracer import initialize_tracer
32
33
  from garf_executors.telemetry import tracer
@@ -39,6 +40,7 @@ def main():
39
40
  parser = argparse.ArgumentParser()
40
41
  parser.add_argument('query', nargs='*')
41
42
  parser.add_argument('-c', '--config', dest='config', default=None)
43
+ parser.add_argument('-w', '--workflow', dest='workflow', default=None)
42
44
  parser.add_argument('--source', dest='source', default=None)
43
45
  parser.add_argument('--output', dest='output', default='console')
44
46
  parser.add_argument('--input', dest='input', default='file')
@@ -70,61 +72,91 @@ def main():
70
72
  parser.set_defaults(dry_run=False)
71
73
  args, kwargs = parser.parse_known_args()
72
74
 
75
+ span = trace.get_current_span()
76
+ command_args = ' '.join(sys.argv[1:])
77
+ span.set_attribute('cli.command', f'garf {command_args}')
73
78
  if args.version:
74
79
  print(garf_executors.__version__)
75
80
  sys.exit()
76
81
  logger = utils.init_logging(
77
82
  loglevel=args.loglevel.upper(), logger_type=args.logger, name=args.log_name
78
83
  )
84
+ reader_client = reader.create_reader(args.input)
85
+ if workflow_file := args.workflow:
86
+ execution_workflow = workflow.Workflow.from_file(workflow_file)
87
+ for i, step in enumerate(execution_workflow.steps, 1):
88
+ with tracer.start_as_current_span(f'{i}-{step.fetcher}'):
89
+ query_executor = garf_executors.setup_executor(
90
+ source=step.fetcher,
91
+ fetcher_parameters=step.fetcher_parameters,
92
+ enable_cache=args.enable_cache,
93
+ cache_ttl_seconds=args.cache_ttl_seconds,
94
+ )
95
+ batch = {}
96
+ if not (queries := step.queries):
97
+ logger.error('Please provide one or more queries to run')
98
+ raise exceptions.GarfExecutorError(
99
+ 'Please provide one or more queries to run'
100
+ )
101
+ for query in queries:
102
+ if isinstance(query, garf_executors.workflow.QueryPath):
103
+ batch[query.path] = reader_client.read(query.path)
104
+ else:
105
+ batch[query.query.title] = query.query.text
106
+ query_executor.execute_batch(
107
+ batch, step.context, args.parallel_threshold
108
+ )
109
+ sys.exit()
110
+
79
111
  if not args.query:
80
112
  logger.error('Please provide one or more queries to run')
81
113
  raise exceptions.GarfExecutorError(
82
114
  'Please provide one or more queries to run'
83
115
  )
84
- reader_client = reader.create_reader(args.input)
85
116
  if config_file := args.config:
86
117
  execution_config = config.Config.from_file(config_file)
87
118
  if not (context := execution_config.sources.get(args.source)):
88
119
  raise exceptions.GarfExecutorError(
89
120
  f'No execution context found for source {args.source} in {config_file}'
90
121
  )
91
- query_executor = garf_executors.setup_executor(
92
- source=args.source,
93
- fetcher_parameters=context.fetcher_parameters,
94
- enable_cache=args.enable_cache,
95
- cache_ttl_seconds=args.cache_ttl_seconds,
96
- )
97
- batch = {query: reader_client.read(query) for query in args.query}
98
- query_executor.execute_batch(batch, context, args.parallel_threshold)
99
122
  else:
100
- extra_parameters = utils.ParamsParser(
101
- ['source', args.output, 'macro', 'template']
102
- ).parse(kwargs)
123
+ param_types = ['source', 'macro', 'template']
124
+ outputs = args.output.split(',')
125
+ extra_parameters = utils.ParamsParser([*param_types, *outputs]).parse(
126
+ kwargs
127
+ )
103
128
  source_parameters = extra_parameters.get('source', {})
129
+ writer_parameters = {}
130
+ for output in outputs:
131
+ writer_parameters.update(extra_parameters.get(output))
104
132
 
105
133
  context = garf_executors.api_executor.ApiExecutionContext(
106
134
  query_parameters={
107
135
  'macro': extra_parameters.get('macro'),
108
136
  'template': extra_parameters.get('template'),
109
137
  },
110
- writer=args.output,
111
- writer_parameters=extra_parameters.get(args.output),
138
+ writer=outputs,
139
+ writer_parameters=writer_parameters,
112
140
  fetcher_parameters=source_parameters,
113
141
  )
114
- query_executor = garf_executors.setup_executor(
115
- source=args.source,
116
- fetcher_parameters=context.fetcher_parameters,
117
- enable_cache=args.enable_cache,
118
- cache_ttl_seconds=args.cache_ttl_seconds,
119
- )
120
- if args.parallel_queries:
121
- logger.info('Running queries in parallel')
122
- batch = {query: reader_client.read(query) for query in args.query}
123
- query_executor.execute_batch(batch, context, args.parallel_threshold)
124
- else:
142
+ query_executor = garf_executors.setup_executor(
143
+ source=args.source,
144
+ fetcher_parameters=context.fetcher_parameters,
145
+ enable_cache=args.enable_cache,
146
+ cache_ttl_seconds=args.cache_ttl_seconds,
147
+ )
148
+ batch = {query: reader_client.read(query) for query in args.query}
149
+ if args.parallel_queries and len(args.query) > 1:
150
+ logger.info('Running queries in parallel')
151
+ batch = {query: reader_client.read(query) for query in args.query}
152
+ query_executor.execute_batch(batch, context, args.parallel_threshold)
153
+ else:
154
+ if len(args.query) > 1:
125
155
  logger.info('Running queries sequentially')
126
- for query in args.query:
127
- query_executor.execute(reader_client.read(query), query, context)
156
+ for query in args.query:
157
+ query_executor.execute(
158
+ query=reader_client.read(query), title=query, context=context
159
+ )
128
160
  logging.shutdown()
129
161
 
130
162
 
@@ -0,0 +1,68 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """gRPC endpoint for garf."""
16
+
17
+ import argparse
18
+ import logging
19
+ from concurrent import futures
20
+
21
+ import grpc
22
+ from google.protobuf.json_format import MessageToDict
23
+ from grpc_reflection.v1alpha import reflection
24
+
25
+ import garf_executors
26
+ from garf_executors import garf_pb2, garf_pb2_grpc
27
+ from garf_executors.entrypoints.tracer import initialize_tracer
28
+
29
+
30
+ class GarfService(garf_pb2_grpc.GarfService):
31
+ def Execute(self, request, context):
32
+ query_executor = garf_executors.setup_executor(
33
+ request.source, request.context.fetcher_parameters
34
+ )
35
+ execution_context = garf_executors.execution_context.ExecutionContext(
36
+ **MessageToDict(request.context, preserving_proto_field_name=True)
37
+ )
38
+ result = query_executor.execute(
39
+ query=request.query,
40
+ title=request.title,
41
+ context=execution_context,
42
+ )
43
+ return garf_pb2.ExecuteResponse(results=[result])
44
+
45
+
46
+ if __name__ == '__main__':
47
+ parser = argparse.ArgumentParser()
48
+ parser.add_argument('--port', dest='port', default=50051, type=int)
49
+ parser.add_argument(
50
+ '--parallel-threshold', dest='parallel_threshold', default=10, type=int
51
+ )
52
+ args, _ = parser.parse_known_args()
53
+ initialize_tracer()
54
+ server = grpc.server(
55
+ futures.ThreadPoolExecutor(max_workers=args.parallel_threshold)
56
+ )
57
+
58
+ service = GarfService()
59
+ garf_pb2_grpc.add_GarfServiceServicer_to_server(service, server)
60
+ SERVICE_NAMES = (
61
+ garf_pb2.DESCRIPTOR.services_by_name['GarfService'].full_name,
62
+ reflection.SERVICE_NAME,
63
+ )
64
+ reflection.enable_server_reflection(SERVICE_NAMES, server)
65
+ server.add_insecure_port(f'[::]:{args.port}')
66
+ server.start()
67
+ logging.info('Garf service started, listening on port %d', 50051)
68
+ server.wait_for_termination()
@@ -18,9 +18,11 @@ from typing import Optional, Union
18
18
 
19
19
  import fastapi
20
20
  import pydantic
21
+ import typer
21
22
  import uvicorn
22
23
  from garf_io import reader
23
24
  from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
25
+ from typing_extensions import Annotated
24
26
 
25
27
  import garf_executors
26
28
  from garf_executors import exceptions
@@ -29,6 +31,7 @@ from garf_executors.entrypoints.tracer import initialize_tracer
29
31
  initialize_tracer()
30
32
  app = fastapi.FastAPI()
31
33
  FastAPIInstrumentor.instrument_app(app)
34
+ typer_app = typer.Typer()
32
35
 
33
36
 
34
37
  class ApiExecutorRequest(pydantic.BaseModel):
@@ -104,5 +107,12 @@ def execute_batch(request: ApiExecutorRequest) -> ApiExecutorResponse:
104
107
  return ApiExecutorResponse(results=results)
105
108
 
106
109
 
110
+ @typer_app.command()
111
+ def main(
112
+ port: Annotated[int, typer.Option(help='Port to start the server')] = 8000,
113
+ ):
114
+ uvicorn.run(app, port=port)
115
+
116
+
107
117
  if __name__ == '__main__':
108
- uvicorn.run(app)
118
+ typer_app()
@@ -1,4 +1,4 @@
1
- # Copyright 2025 Google LLC
1
+ # Copyright 2026 Google LLC
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -35,8 +35,23 @@ def initialize_tracer():
35
35
  tracer_provider = TracerProvider(resource=resource)
36
36
 
37
37
  if otel_endpoint := os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT'):
38
- otlp_processor = BatchSpanProcessor(
39
- OTLPSpanExporter(endpoint=otel_endpoint, insecure=True)
40
- )
41
- tracer_provider.add_span_processor(otlp_processor)
38
+ if gcp_project_id := os.getenv('OTEL_EXPORTER_GCP_PROJECT_ID'):
39
+ try:
40
+ from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
41
+ except ImportError as e:
42
+ raise ImportError(
43
+ 'Please install garf_executors with GCP support '
44
+ '- `pip install garf_executors[gcp]`'
45
+ ) from e
46
+
47
+ cloud_span_processor = BatchSpanProcessor(
48
+ CloudTraceSpanExporter(project_id=gcp_project_id)
49
+ )
50
+ tracer_provider.add_span_processor(cloud_span_processor)
51
+ else:
52
+ otlp_processor = BatchSpanProcessor(
53
+ OTLPSpanExporter(endpoint=otel_endpoint, insecure=True)
54
+ )
55
+ tracer_provider.add_span_processor(otlp_processor)
56
+
42
57
  trace.set_tracer_provider(tracer_provider)
@@ -35,7 +35,7 @@ class ExecutionContext(pydantic.BaseModel):
35
35
  Attributes:
36
36
  query_parameters: Parameters to dynamically change query text.
37
37
  fetcher_parameters: Parameters to specify fetching setup.
38
- writer: Type of writer to use.
38
+ writer: Type of writer to use. Can be a single writer string or list of writers.
39
39
  writer_parameters: Optional parameters to setup writer.
40
40
  """
41
41
 
@@ -45,7 +45,7 @@ class ExecutionContext(pydantic.BaseModel):
45
45
  fetcher_parameters: dict[str, str | bool | int | list[str | int]] | None = (
46
46
  pydantic.Field(default_factory=dict)
47
47
  )
48
- writer: str | None = None
48
+ writer: str | list[str] | None = None
49
49
  writer_parameters: dict[str, str] | None = pydantic.Field(
50
50
  default_factory=dict
51
51
  )
@@ -75,9 +75,42 @@ class ExecutionContext(pydantic.BaseModel):
75
75
 
76
76
  @property
77
77
  def writer_client(self) -> abs_writer.AbsWriter:
78
- writer_client = writer.create_writer(self.writer, **self.writer_parameters)
79
- if self.writer == 'bq':
78
+ """Returns single writer client."""
79
+ if isinstance(self.writer, list) and len(self.writer) > 0:
80
+ writer_type = self.writer[0]
81
+ else:
82
+ writer_type = self.writer
83
+
84
+ writer_params = self.writer_parameters or {}
85
+
86
+ if not writer_type:
87
+ raise ValueError('No writer specified')
88
+
89
+ writer_client = writer.create_writer(writer_type, **writer_params)
90
+ if writer_type == 'bq':
80
91
  _ = writer_client.create_or_get_dataset()
81
- if self.writer == 'sheet':
92
+ if writer_type == 'sheet':
82
93
  writer_client.init_client()
83
94
  return writer_client
95
+
96
+ @property
97
+ def writer_clients(self) -> list[abs_writer.AbsWriter]:
98
+ """Returns list of writer clients."""
99
+ if not self.writer:
100
+ return []
101
+
102
+ # Convert single writer to list for uniform processing
103
+ writers_to_use = (
104
+ self.writer if isinstance(self.writer, list) else [self.writer]
105
+ )
106
+ writer_params = self.writer_parameters or {}
107
+
108
+ clients = []
109
+ for writer_type in writers_to_use:
110
+ writer_client = writer.create_writer(writer_type, **writer_params)
111
+ if writer_type == 'bq':
112
+ _ = writer_client.create_or_get_dataset()
113
+ if writer_type == 'sheet':
114
+ writer_client.init_client()
115
+ clients.append(writer_client)
116
+ return clients
@@ -15,7 +15,10 @@
15
15
  """Defines common functionality between executors."""
16
16
 
17
17
  import asyncio
18
+ import inspect
19
+ from typing import Optional
18
20
 
21
+ from garf_core import report_fetcher
19
22
  from opentelemetry import trace
20
23
 
21
24
  from garf_executors import execution_context
@@ -25,6 +28,14 @@ from garf_executors.telemetry import tracer
25
28
  class Executor:
26
29
  """Defines common functionality between executors."""
27
30
 
31
+ def __init__(
32
+ self,
33
+ preprocessors: Optional[dict[str, report_fetcher.Processor]] = None,
34
+ postprocessors: Optional[dict[str, report_fetcher.Processor]] = None,
35
+ ) -> None:
36
+ self.preprocessors = preprocessors or {}
37
+ self.postprocessors = postprocessors or {}
38
+
28
39
  @tracer.start_as_current_span('api.execute_batch')
29
40
  def execute_batch(
30
41
  self,
@@ -34,6 +45,9 @@ class Executor:
34
45
  ) -> list[str]:
35
46
  """Executes batch of queries for a common context.
36
47
 
48
+ If an executor has any pre/post processors, executes them first while
49
+ modifying the context.
50
+
37
51
  Args:
38
52
  batch: Mapping between query_title and its text.
39
53
  context: Execution context.
@@ -44,11 +58,19 @@ class Executor:
44
58
  """
45
59
  span = trace.get_current_span()
46
60
  span.set_attribute('api.parallel_threshold', parallel_threshold)
47
- return asyncio.run(
61
+ _handle_processors(processors=self.preprocessors, context=context)
62
+ results = asyncio.run(
48
63
  self._run(
49
64
  batch=batch, context=context, parallel_threshold=parallel_threshold
50
65
  )
51
66
  )
67
+ _handle_processors(processors=self.postprocessors, context=context)
68
+ return results
69
+
70
+ def add_preprocessor(
71
+ self, preprocessors: dict[str, report_fetcher.Processor]
72
+ ) -> None:
73
+ self.preprocessors.update(preprocessors)
52
74
 
53
75
  async def aexecute(
54
76
  self,
@@ -85,3 +107,18 @@ class Executor:
85
107
  for title, query in batch.items()
86
108
  ]
87
109
  return await asyncio.gather(*(run_with_semaphore(task) for task in tasks))
110
+
111
+
112
+ def _handle_processors(
113
+ processors: dict[str, report_fetcher.Processor],
114
+ context: execution_context.ExecutionContext,
115
+ ) -> None:
116
+ for k, processor in processors.items():
117
+ processor_signature = list(inspect.signature(processor).parameters.keys())
118
+ if k in context.fetcher_parameters:
119
+ processor_parameters = {
120
+ k: v
121
+ for k, v in context.fetcher_parameters.items()
122
+ if k in processor_signature
123
+ }
124
+ context.fetcher_parameters[k] = processor(**processor_parameters)
@@ -13,14 +13,16 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import inspect
16
+ import logging
16
17
  import sys
17
18
  from importlib.metadata import entry_points
18
19
 
19
20
  from garf_core import report_fetcher
20
- from opentelemetry import trace
21
21
 
22
22
  from garf_executors.telemetry import tracer
23
23
 
24
+ logger = logging.getLogger(name='garf_executors.fetchers')
25
+
24
26
 
25
27
  @tracer.start_as_current_span('find_fetchers')
26
28
  def find_fetchers() -> set[str]:
@@ -57,8 +59,10 @@ def get_report_fetcher(source: str) -> type[report_fetcher.ApiReportFetcher]:
57
59
  obj, report_fetcher.ApiReportFetcher
58
60
  ):
59
61
  return getattr(fetcher_module, name)
60
- except ModuleNotFoundError:
61
- continue
62
+ except ModuleNotFoundError as e:
63
+ raise report_fetcher.ApiReportFetcherError(
64
+ f'Failed to load fetcher for source {source}, reason: {e}'
65
+ )
62
66
  raise report_fetcher.ApiReportFetcherError(
63
67
  f'No fetcher available for the source "{source}"'
64
68
  )
@@ -0,0 +1,45 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # NO CHECKED-IN PROTOBUF GENCODE
4
+ # source: garf.proto
5
+ # Protobuf Python Version: 6.31.1
6
+ """Generated protocol buffer code."""
7
+ from google.protobuf import descriptor as _descriptor
8
+ from google.protobuf import descriptor_pool as _descriptor_pool
9
+ from google.protobuf import runtime_version as _runtime_version
10
+ from google.protobuf import symbol_database as _symbol_database
11
+ from google.protobuf.internal import builder as _builder
12
+ _runtime_version.ValidateProtobufRuntimeVersion(
13
+ _runtime_version.Domain.PUBLIC,
14
+ 6,
15
+ 31,
16
+ 1,
17
+ '',
18
+ 'garf.proto'
19
+ )
20
+ # @@protoc_insertion_point(imports)
21
+
22
+ _sym_db = _symbol_database.Default()
23
+
24
+
25
+ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
26
+
27
+
28
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\ngarf.proto\x12\x04garf\x1a\x1cgoogle/protobuf/struct.proto\"g\n\x0e\x45xecuteRequest\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\r\n\x05query\x18\x03 \x01(\t\x12\'\n\x07\x63ontext\x18\x04 \x01(\x0b\x32\x16.garf.ExecutionContext\"\xbc\x01\n\x10\x45xecutionContext\x12/\n\x10query_parameters\x18\x01 \x01(\x0b\x32\x15.garf.QueryParameters\x12\x33\n\x12\x66\x65tcher_parameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0e\n\x06writer\x18\x03 \x01(\t\x12\x32\n\x11writer_parameters\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\"d\n\x0fQueryParameters\x12&\n\x05macro\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08template\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\"\n\x0f\x45xecuteResponse\x12\x0f\n\x07results\x18\x01 \x03(\t2G\n\x0bGarfService\x12\x38\n\x07\x45xecute\x12\x14.garf.ExecuteRequest\x1a\x15.garf.ExecuteResponse\"\x00\x62\x06proto3')
29
+
30
+ _globals = globals()
31
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
32
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'garf_pb2', _globals)
33
+ if not _descriptor._USE_C_DESCRIPTORS:
34
+ DESCRIPTOR._loaded_options = None
35
+ _globals['_EXECUTEREQUEST']._serialized_start=50
36
+ _globals['_EXECUTEREQUEST']._serialized_end=153
37
+ _globals['_EXECUTIONCONTEXT']._serialized_start=156
38
+ _globals['_EXECUTIONCONTEXT']._serialized_end=344
39
+ _globals['_QUERYPARAMETERS']._serialized_start=346
40
+ _globals['_QUERYPARAMETERS']._serialized_end=446
41
+ _globals['_EXECUTERESPONSE']._serialized_start=448
42
+ _globals['_EXECUTERESPONSE']._serialized_end=482
43
+ _globals['_GARFSERVICE']._serialized_start=484
44
+ _globals['_GARFSERVICE']._serialized_end=555
45
+ # @@protoc_insertion_point(module_scope)
@@ -0,0 +1,97 @@
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+ import warnings
5
+
6
+ from . import garf_pb2 as garf__pb2
7
+
8
+ GRPC_GENERATED_VERSION = '1.75.0'
9
+ GRPC_VERSION = grpc.__version__
10
+ _version_not_supported = False
11
+
12
+ try:
13
+ from grpc._utilities import first_version_is_lower
14
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
15
+ except ImportError:
16
+ _version_not_supported = True
17
+
18
+ if _version_not_supported:
19
+ raise RuntimeError(
20
+ f'The grpc package installed is at version {GRPC_VERSION},'
21
+ + f' but the generated code in garf_pb2_grpc.py depends on'
22
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
23
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
24
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
25
+ )
26
+
27
+
28
+ class GarfServiceStub(object):
29
+ """Missing associated documentation comment in .proto file."""
30
+
31
+ def __init__(self, channel):
32
+ """Constructor.
33
+
34
+ Args:
35
+ channel: A grpc.Channel.
36
+ """
37
+ self.Execute = channel.unary_unary(
38
+ '/garf.GarfService/Execute',
39
+ request_serializer=garf__pb2.ExecuteRequest.SerializeToString,
40
+ response_deserializer=garf__pb2.ExecuteResponse.FromString,
41
+ _registered_method=True)
42
+
43
+
44
+ class GarfServiceServicer(object):
45
+ """Missing associated documentation comment in .proto file."""
46
+
47
+ def Execute(self, request, context):
48
+ """Missing associated documentation comment in .proto file."""
49
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
50
+ context.set_details('Method not implemented!')
51
+ raise NotImplementedError('Method not implemented!')
52
+
53
+
54
+ def add_GarfServiceServicer_to_server(servicer, server):
55
+ rpc_method_handlers = {
56
+ 'Execute': grpc.unary_unary_rpc_method_handler(
57
+ servicer.Execute,
58
+ request_deserializer=garf__pb2.ExecuteRequest.FromString,
59
+ response_serializer=garf__pb2.ExecuteResponse.SerializeToString,
60
+ ),
61
+ }
62
+ generic_handler = grpc.method_handlers_generic_handler(
63
+ 'garf.GarfService', rpc_method_handlers)
64
+ server.add_generic_rpc_handlers((generic_handler,))
65
+ server.add_registered_method_handlers('garf.GarfService', rpc_method_handlers)
66
+
67
+
68
+ # This class is part of an EXPERIMENTAL API.
69
+ class GarfService(object):
70
+ """Missing associated documentation comment in .proto file."""
71
+
72
+ @staticmethod
73
+ def Execute(request,
74
+ target,
75
+ options=(),
76
+ channel_credentials=None,
77
+ call_credentials=None,
78
+ insecure=False,
79
+ compression=None,
80
+ wait_for_ready=None,
81
+ timeout=None,
82
+ metadata=None):
83
+ return grpc.experimental.unary_unary(
84
+ request,
85
+ target,
86
+ '/garf.GarfService/Execute',
87
+ garf__pb2.ExecuteRequest.SerializeToString,
88
+ garf__pb2.ExecuteResponse.FromString,
89
+ options,
90
+ channel_credentials,
91
+ insecure,
92
+ call_credentials,
93
+ compression,
94
+ wait_for_ready,
95
+ timeout,
96
+ metadata,
97
+ _registered_method=True)
@@ -25,6 +25,7 @@ except ImportError as e:
25
25
 
26
26
  import logging
27
27
  import re
28
+ import uuid
28
29
 
29
30
  import pandas as pd
30
31
  from garf_core import query_editor, report
@@ -56,6 +57,7 @@ class SqlAlchemyQueryExecutor(
56
57
  engine: Initialized Engine object to operated on a given database.
57
58
  """
58
59
  self.engine = engine
60
+ super().__init__()
59
61
 
60
62
  @classmethod
61
63
  def from_connection_string(
@@ -88,36 +90,52 @@ class SqlAlchemyQueryExecutor(
88
90
  Report with data if query returns some data otherwise empty Report.
89
91
  """
90
92
  span = trace.get_current_span()
91
- logging.info('Executing script: %s', title)
93
+ logger.info('Executing script: %s', title)
92
94
  query_text = self.replace_params_template(query, context.query_parameters)
93
95
  with self.engine.begin() as conn:
94
96
  if re.findall(r'(create|update) ', query_text.lower()):
95
- conn.connection.executescript(query_text)
96
- results = report.GarfReport()
97
+ try:
98
+ conn.connection.executescript(query_text)
99
+ results = report.GarfReport()
100
+ except Exception as e:
101
+ raise SqlAlchemyQueryExecutorError(
102
+ f'Failed to execute query {title}: Reason: {e}'
103
+ ) from e
97
104
  else:
98
- temp_table_name = f'temp_{title}'.replace('.', '_')
105
+ temp_table_name = f'temp_{uuid.uuid4().hex}'
99
106
  query_text = f'CREATE TABLE {temp_table_name} AS {query_text}'
100
107
  conn.connection.executescript(query_text)
101
108
  try:
102
109
  results = report.GarfReport.from_pandas(
103
110
  pd.read_sql(f'SELECT * FROM {temp_table_name}', conn)
104
111
  )
112
+ except Exception as e:
113
+ raise SqlAlchemyQueryExecutorError(
114
+ f'Failed to execute query {title}: Reason: {e}'
115
+ ) from e
105
116
  finally:
106
117
  conn.connection.execute(f'DROP TABLE {temp_table_name}')
107
118
  if context.writer and results:
108
- writer_client = context.writer_client
109
- logger.debug(
110
- 'Start writing data for query %s via %s writer',
111
- title,
112
- type(writer_client),
113
- )
114
- writing_result = writer_client.write(results, title)
115
- logger.debug(
116
- 'Finish writing data for query %s via %s writer',
117
- title,
118
- type(writer_client),
119
- )
120
- logger.info('%s executed successfully', title)
121
- return writing_result
119
+ writer_clients = context.writer_clients
120
+ if not writer_clients:
121
+ logger.warning('No writers configured, skipping write operation')
122
+ else:
123
+ writing_results = []
124
+ for writer_client in writer_clients:
125
+ logger.debug(
126
+ 'Start writing data for query %s via %s writer',
127
+ title,
128
+ type(writer_client),
129
+ )
130
+ writing_result = writer_client.write(results, title)
131
+ logger.debug(
132
+ 'Finish writing data for query %s via %s writer',
133
+ title,
134
+ type(writer_client),
135
+ )
136
+ writing_results.append(writing_result)
137
+ logger.info('%s executed successfully', title)
138
+ # Return the last writer's result for backward compatibility
139
+ return writing_results[-1] if writing_results else None
122
140
  span.set_attribute('execute.num_results', len(results))
123
141
  return results
@@ -0,0 +1,96 @@
1
+ # Copyright 2026 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from __future__ import annotations
15
+
16
+ import os
17
+ import pathlib
18
+
19
+ import pydantic
20
+ import smart_open
21
+ import yaml
22
+
23
+ from garf_executors.execution_context import ExecutionContext
24
+
25
+
26
+ class QueryPath(pydantic.BaseModel):
27
+ """Path file with query."""
28
+
29
+ path: str
30
+
31
+
32
+ class QueryDefinition(pydantic.BaseModel):
33
+ """Definition of a query."""
34
+
35
+ query: Query
36
+
37
+
38
+ class Query(pydantic.BaseModel):
39
+ """Query elements.
40
+
41
+ Attributes:
42
+ text: Query text.
43
+ title: Name of the query.
44
+ """
45
+
46
+ text: str
47
+ title: str
48
+
49
+
50
+ class ExecutionStep(ExecutionContext):
51
+ """Common context for executing one or more queries.
52
+
53
+ Attributes:
54
+ fetcher: Name of a fetcher to get data from API.
55
+ alias: Optional alias to identify execution step.
56
+ queries: Queries to run for a particular fetcher.
57
+ context: Execution context for queries and fetcher.
58
+ """
59
+
60
+ fetcher: str | None = None
61
+ alias: str | None = None
62
+ queries: list[QueryPath | QueryDefinition] | None = None
63
+
64
+ @property
65
+ def context(self) -> ExecutionContext:
66
+ return ExecutionContext(
67
+ writer=self.writer,
68
+ writer_parameters=self.writer_parameters,
69
+ query_parameters=self.query_parameters,
70
+ fetcher_parameters=self.fetcher_parameters,
71
+ )
72
+
73
+
74
+ class Workflow(pydantic.BaseModel):
75
+ """Orchestrates execution of queries for multiple fetchers.
76
+
77
+ Attributes:
78
+ steps: Contains one or several fetcher executions.
79
+ """
80
+
81
+ steps: list[ExecutionStep]
82
+
83
+ @classmethod
84
+ def from_file(cls, path: str | pathlib.Path | os.PathLike[str]) -> Workflow:
85
+ """Builds workflow from local or remote yaml file."""
86
+ with smart_open.open(path, 'r', encoding='utf-8') as f:
87
+ data = yaml.safe_load(f)
88
+ return Workflow(steps=data.get('steps'))
89
+
90
+ def save(self, path: str | pathlib.Path | os.PathLike[str]) -> str:
91
+ """Saves workflow to local or remote yaml file."""
92
+ with smart_open.open(path, 'w', encoding='utf-8') as f:
93
+ yaml.dump(
94
+ self.model_dump(exclude_none=True).get('steps'), f, encoding='utf-8'
95
+ )
96
+ return f'Workflow is saved to {str(path)}'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: garf-executors
3
- Version: 0.1.4
3
+ Version: 0.2.3
4
4
  Summary: Executes queries against API and writes data to local/remote storage.
5
5
  Author-email: "Google Inc. (gTech gPS CSE team)" <no-reply@google.com>, Andrei Markin <andrey.markin.ppc@gmail.com>
6
6
  License: Apache 2.0
@@ -23,19 +23,23 @@ Requires-Dist: pyyaml
23
23
  Requires-Dist: pydantic
24
24
  Requires-Dist: opentelemetry-api
25
25
  Requires-Dist: opentelemetry-sdk
26
+ Requires-Dist: opentelemetry-exporter-otlp
26
27
  Provides-Extra: bq
27
28
  Requires-Dist: garf-io[bq]; extra == "bq"
28
29
  Requires-Dist: pandas; extra == "bq"
29
30
  Requires-Dist: google-cloud-logging; extra == "bq"
31
+ Requires-Dist: smart_open[gcs]; extra == "bq"
30
32
  Provides-Extra: sql
31
33
  Requires-Dist: garf-io[sqlalchemy]; extra == "sql"
32
34
  Requires-Dist: pandas; extra == "sql"
35
+ Provides-Extra: gcp
36
+ Requires-Dist: opentelemetry-exporter-gcp-trace; extra == "gcp"
33
37
  Provides-Extra: server
34
38
  Requires-Dist: fastapi[standard]; extra == "server"
35
39
  Requires-Dist: opentelemetry-instrumentation-fastapi; extra == "server"
36
- Requires-Dist: opentelemetry-exporter-otlp; extra == "server"
40
+ Requires-Dist: typer; extra == "server"
37
41
  Provides-Extra: all
38
- Requires-Dist: garf-executors[bq,server,sql]; extra == "all"
42
+ Requires-Dist: garf-executors[bq,gcp,server,sql]; extra == "all"
39
43
 
40
44
  # `garf-executors` - One stop-shop for interacting with Reporting APIs.
41
45
 
@@ -0,0 +1,24 @@
1
+ garf_executors/__init__.py,sha256=wZdLw0WyAGEK1y0Fdagvdd5xOWNKaPvL95yuaYziIWE,1941
2
+ garf_executors/api_executor.py,sha256=IKYI1TK2HI2njxw7_X9n78wAQ1briAXxbA15Ybmt6nA,4295
3
+ garf_executors/bq_executor.py,sha256=HKFBg4PhIaKM_SvjQy-ZbP7AsrsAF1FIj_w9gRqdICA,5756
4
+ garf_executors/config.py,sha256=rZTAuBEa-Loi3DSamXFTjFQXHdeYJv71WOEbLLeo3l4,1721
5
+ garf_executors/exceptions.py,sha256=U_7Q2ZMOUf89gzZd2pw7y3g7i1NeByPPKfpZ3q7p3ZU,662
6
+ garf_executors/execution_context.py,sha256=WhHoN60vyeBUJbdjtOEZC1vUEyLwnIzBHbhT8co3yhs,3850
7
+ garf_executors/executor.py,sha256=tobjdlOaAsc-nKLFSW-3qib5-ca6aHs5iw3Gn0sD72Y,3762
8
+ garf_executors/fetchers.py,sha256=0bYurZs5jzxfGP9BgDnifdM6yRFvyCtKO-i3hFb5T5A,2605
9
+ garf_executors/garf_pb2.py,sha256=mYvBYcAnZtyDflXGN2GZLM2KM0Nv9hoJs55zfQU_l1o,2564
10
+ garf_executors/garf_pb2_grpc.py,sha256=w8D_r3wpj1ZZstkIFogY679-lSCcL2iZQ4QLO8IfToY,3359
11
+ garf_executors/sql_executor.py,sha256=80WiuNBBWQz1y19LmWrzSk6auFFqh6YHBPTkFAGIhMs,4681
12
+ garf_executors/telemetry.py,sha256=P75klGEoYgJ_-pR-izUIQ7B88ufskQ4vmW1rETg63Nc,747
13
+ garf_executors/workflow.py,sha256=9Hkv0NgNyV5_xkkCTS6nsDVqtCmHfbqLQvdaIzFBrLU,2614
14
+ garf_executors/entrypoints/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
+ garf_executors/entrypoints/cli.py,sha256=Kei2Tqdw1syPKhbeK5u-1G72hXgmo1arXmxio150jPE,6006
16
+ garf_executors/entrypoints/grpc_server.py,sha256=zP9C-dStbElWkb0T_IcIAcBxmA9Wl4GTWytUcrC_7Xg,2296
17
+ garf_executors/entrypoints/server.py,sha256=FbemRjrGDgpr51iAMXdvTXlP1OG7Rc5i5M55Prw0wXg,3473
18
+ garf_executors/entrypoints/tracer.py,sha256=VylQMIXOsRLuT3UlFwjRy8GJiPUI6zohUXiGX_DcE4g,1912
19
+ garf_executors/entrypoints/utils.py,sha256=5XiGR2IOxdzAOY0lEWUeUV7tIpKBGRnQaIwBYvzQB7c,4337
20
+ garf_executors-0.2.3.dist-info/METADATA,sha256=96n_J13NrBFbfz2-fVYo0KlD5p9r7qO8AQ3R_K2V710,3055
21
+ garf_executors-0.2.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
22
+ garf_executors-0.2.3.dist-info/entry_points.txt,sha256=LskWNFIw8j0WJuI18-32OZrlASXAMg1XtrRYwsKBz2E,61
23
+ garf_executors-0.2.3.dist-info/top_level.txt,sha256=sP4dCXOENPn1hDFAunjMV8Js4NND_KGeO_gQWuaT0EY,15
24
+ garf_executors-0.2.3.dist-info/RECORD,,
@@ -1,20 +0,0 @@
1
- garf_executors/__init__.py,sha256=4BZv9zb3tjlpF4kQSdTj1L5IdR-BiNQwtejg5dPTTcY,1933
2
- garf_executors/api_executor.py,sha256=TxHtdnXjXjfBDU0z13yCulqF0XcEqAoOdVeGczdTSXs,3590
3
- garf_executors/bq_executor.py,sha256=LOKNitigaMk4U-UjBZTHy4vG092nw6suEbgo2rrHCTI,5002
4
- garf_executors/config.py,sha256=TqCzijm1PRvL4p-9Zl-kPkcC1SFKjhgTfKMJFmJW3fQ,1688
5
- garf_executors/exceptions.py,sha256=U_7Q2ZMOUf89gzZd2pw7y3g7i1NeByPPKfpZ3q7p3ZU,662
6
- garf_executors/execution_context.py,sha256=X4Wm_rE1mnnN2FuC_9bL05a8h8ko7qraeGY955ijNJc,2800
7
- garf_executors/executor.py,sha256=_Nj6CKgyhzwFOxneODDhV1bvLjrMEvIu93W8YF9-sXo,2481
8
- garf_executors/fetchers.py,sha256=HQqnMb0wlasVfXmAA7cnsd73POXPEGPxaC5mpEOnQk4,2443
9
- garf_executors/sql_executor.py,sha256=_4oVPZKTd3lrDE0SM6uQ_bl13Ay9uhQuD-PHO9247WM,3920
10
- garf_executors/telemetry.py,sha256=P75klGEoYgJ_-pR-izUIQ7B88ufskQ4vmW1rETg63Nc,747
11
- garf_executors/entrypoints/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- garf_executors/entrypoints/cli.py,sha256=Qbg10LLWHEMBjjsOfEMDZQtjWpUwh6WJKSnqiXOzF6A,4765
13
- garf_executors/entrypoints/server.py,sha256=b9blyBvN774RiTHUCZkfE5kNVnrTaANrETI4WMDHJeQ,3255
14
- garf_executors/entrypoints/tracer.py,sha256=A_nolmGuMT3wOZJsoPORjfdtPO2lXdbr6CZt5BW0RTY,1374
15
- garf_executors/entrypoints/utils.py,sha256=5XiGR2IOxdzAOY0lEWUeUV7tIpKBGRnQaIwBYvzQB7c,4337
16
- garf_executors-0.1.4.dist-info/METADATA,sha256=3Z0plyqxqwCKYOm2PlXIfvxGo0lAVkdIaLD0s0pgZzQ,2900
17
- garf_executors-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
18
- garf_executors-0.1.4.dist-info/entry_points.txt,sha256=LskWNFIw8j0WJuI18-32OZrlASXAMg1XtrRYwsKBz2E,61
19
- garf_executors-0.1.4.dist-info/top_level.txt,sha256=sP4dCXOENPn1hDFAunjMV8Js4NND_KGeO_gQWuaT0EY,15
20
- garf_executors-0.1.4.dist-info/RECORD,,