garf-executors 0.1.5__tar.gz → 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {garf_executors-0.1.5 → garf_executors-0.2.0}/PKG-INFO +1 -1
  2. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/__init__.py +2 -2
  3. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/api_executor.py +26 -16
  4. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/bq_executor.py +25 -16
  5. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/entrypoints/cli.py +28 -30
  6. garf_executors-0.2.0/garf_executors/entrypoints/grpc_server.py +68 -0
  7. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/execution_context.py +38 -5
  8. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/executor.py +38 -1
  9. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/fetchers.py +7 -3
  10. garf_executors-0.2.0/garf_executors/garf_pb2.py +45 -0
  11. garf_executors-0.2.0/garf_executors/garf_pb2_grpc.py +97 -0
  12. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/sql_executor.py +21 -14
  13. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors.egg-info/PKG-INFO +1 -1
  14. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors.egg-info/SOURCES.txt +3 -0
  15. {garf_executors-0.1.5 → garf_executors-0.2.0}/README.md +0 -0
  16. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/config.py +0 -0
  17. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/entrypoints/__init__.py +0 -0
  18. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/entrypoints/server.py +0 -0
  19. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/entrypoints/tracer.py +0 -0
  20. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/entrypoints/utils.py +0 -0
  21. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/exceptions.py +0 -0
  22. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors/telemetry.py +0 -0
  23. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors.egg-info/dependency_links.txt +0 -0
  24. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors.egg-info/entry_points.txt +0 -0
  25. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors.egg-info/requires.txt +0 -0
  26. {garf_executors-0.1.5 → garf_executors-0.2.0}/garf_executors.egg-info/top_level.txt +0 -0
  27. {garf_executors-0.1.5 → garf_executors-0.2.0}/pyproject.toml +0 -0
  28. {garf_executors-0.1.5 → garf_executors-0.2.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: garf-executors
3
- Version: 0.1.5
3
+ Version: 0.2.0
4
4
  Summary: Executes queries against API and writes data to local/remote storage.
5
5
  Author-email: "Google Inc. (gTech gPS CSE team)" <no-reply@google.com>, Andrei Markin <andrey.markin.ppc@gmail.com>
6
6
  License: Apache 2.0
@@ -43,7 +43,7 @@ def setup_executor(
43
43
  else:
44
44
  concrete_api_fetcher = fetchers.get_report_fetcher(source)
45
45
  query_executor = ApiQueryExecutor(
46
- concrete_api_fetcher(
46
+ fetcher=concrete_api_fetcher(
47
47
  **fetcher_parameters,
48
48
  enable_cache=enable_cache,
49
49
  cache_ttl_seconds=cache_ttl_seconds,
@@ -57,4 +57,4 @@ __all__ = [
57
57
  'ApiExecutionContext',
58
58
  ]
59
59
 
60
- __version__ = '0.1.5'
60
+ __version__ = '0.2.0'
@@ -20,7 +20,6 @@ GarfReport and saving it to local/remote storage.
20
20
 
21
21
  from __future__ import annotations
22
22
 
23
- import asyncio
24
23
  import logging
25
24
 
26
25
  from garf_core import report_fetcher
@@ -35,7 +34,7 @@ logger = logging.getLogger(__name__)
35
34
  class ApiExecutionContext(execution_context.ExecutionContext):
36
35
  """Common context for executing one or more queries."""
37
36
 
38
- writer: str = 'console'
37
+ writer: str | list[str] = 'console'
39
38
 
40
39
 
41
40
  class ApiQueryExecutor(executor.Executor):
@@ -52,6 +51,10 @@ class ApiQueryExecutor(executor.Executor):
52
51
  fetcher: Instantiated report fetcher.
53
52
  """
54
53
  self.fetcher = fetcher
54
+ super().__init__(
55
+ preprocessors=self.fetcher.preprocessors,
56
+ postprocessors=self.fetcher.postprocessors,
57
+ )
55
58
 
56
59
  @classmethod
57
60
  def from_fetcher_alias(
@@ -60,7 +63,7 @@ class ApiQueryExecutor(executor.Executor):
60
63
  if not fetcher_parameters:
61
64
  fetcher_parameters = {}
62
65
  concrete_api_fetcher = fetchers.get_report_fetcher(source)
63
- return ApiQueryExecutor(concrete_api_fetcher(**fetcher_parameters))
66
+ return ApiQueryExecutor(fetcher=concrete_api_fetcher(**fetcher_parameters))
64
67
 
65
68
  @tracer.start_as_current_span('api.execute')
66
69
  def execute(
@@ -94,20 +97,27 @@ class ApiQueryExecutor(executor.Executor):
94
97
  args=context.query_parameters,
95
98
  **context.fetcher_parameters,
96
99
  )
97
- writer_client = context.writer_client
98
- logger.debug(
99
- 'Start writing data for query %s via %s writer',
100
- title,
101
- type(writer_client),
102
- )
103
- result = writer_client.write(results, title)
104
- logger.debug(
105
- 'Finish writing data for query %s via %s writer',
106
- title,
107
- type(writer_client),
108
- )
100
+ writer_clients = context.writer_clients
101
+ if not writer_clients:
102
+ logger.warning('No writers configured, skipping write operation')
103
+ return None
104
+ writing_results = []
105
+ for writer_client in writer_clients:
106
+ logger.debug(
107
+ 'Start writing data for query %s via %s writer',
108
+ title,
109
+ type(writer_client),
110
+ )
111
+ result = writer_client.write(results, title)
112
+ logger.debug(
113
+ 'Finish writing data for query %s via %s writer',
114
+ title,
115
+ type(writer_client),
116
+ )
117
+ writing_results.append(result)
109
118
  logger.info('%s executed successfully', title)
110
- return result
119
+ # Return the last writer's result for backward compatibility
120
+ return writing_results[-1] if writing_results else None
111
121
  except Exception as e:
112
122
  logger.error('%s generated an exception: %s', title, str(e))
113
123
  raise exceptions.GarfExecutorError(
@@ -15,6 +15,7 @@
15
15
 
16
16
  from __future__ import annotations
17
17
 
18
+ import contextlib
18
19
  import os
19
20
 
20
21
  try:
@@ -103,20 +104,27 @@ class BigQueryExecutor(executor.Executor, query_editor.TemplateProcessorMixin):
103
104
  else:
104
105
  results = report.GarfReport()
105
106
  if context.writer and results:
106
- writer_client = context.writer_client
107
- logger.debug(
108
- 'Start writing data for query %s via %s writer',
109
- title,
110
- type(writer_client),
111
- )
112
- writing_result = writer_client.write(results, title)
113
- logger.debug(
114
- 'Finish writing data for query %s via %s writer',
115
- title,
116
- type(writer_client),
117
- )
118
- logger.info('%s executed successfully', title)
119
- return writing_result
107
+ writer_clients = context.writer_clients
108
+ if not writer_clients:
109
+ logger.warning('No writers configured, skipping write operation')
110
+ else:
111
+ writing_results = []
112
+ for writer_client in writer_clients:
113
+ logger.debug(
114
+ 'Start writing data for query %s via %s writer',
115
+ title,
116
+ type(writer_client),
117
+ )
118
+ writing_result = writer_client.write(results, title)
119
+ logger.debug(
120
+ 'Finish writing data for query %s via %s writer',
121
+ title,
122
+ type(writer_client),
123
+ )
124
+ writing_results.append(writing_result)
125
+ logger.info('%s executed successfully', title)
126
+ # Return the last writer's result for backward compatibility
127
+ return writing_results[-1] if writing_results else None
120
128
  return results
121
129
  except google_cloud_exceptions.GoogleCloudError as e:
122
130
  raise BigQueryExecutorError(e) from e
@@ -139,8 +147,9 @@ class BigQueryExecutor(executor.Executor, query_editor.TemplateProcessorMixin):
139
147
  except google_cloud_exceptions.NotFound:
140
148
  bq_dataset = bigquery.Dataset(dataset_id)
141
149
  bq_dataset.location = self.location
142
- self.client.create_dataset(bq_dataset, timeout=30)
143
- logger.info('Created new dataset %s', dataset_id)
150
+ with contextlib.suppress(google_cloud_exceptions.Conflict):
151
+ self.client.create_dataset(bq_dataset, timeout=30)
152
+ logger.info('Created new dataset %s', dataset_id)
144
153
 
145
154
 
146
155
  def extract_datasets(macros: dict | None) -> list[str]:
@@ -88,46 +88,44 @@ def main():
88
88
  raise exceptions.GarfExecutorError(
89
89
  f'No execution context found for source {args.source} in {config_file}'
90
90
  )
91
- query_executor = garf_executors.setup_executor(
92
- source=args.source,
93
- fetcher_parameters=context.fetcher_parameters,
94
- enable_cache=args.enable_cache,
95
- cache_ttl_seconds=args.cache_ttl_seconds,
96
- )
97
- batch = {query: reader_client.read(query) for query in args.query}
98
- query_executor.execute_batch(batch, context, args.parallel_threshold)
99
91
  else:
100
- extra_parameters = utils.ParamsParser(
101
- ['source', args.output, 'macro', 'template']
102
- ).parse(kwargs)
92
+ param_types = ['source', 'macro', 'template']
93
+ outputs = args.output.split(',')
94
+ extra_parameters = utils.ParamsParser([*param_types, *outputs]).parse(
95
+ kwargs
96
+ )
103
97
  source_parameters = extra_parameters.get('source', {})
98
+ writer_parameters = {}
99
+ for output in outputs:
100
+ writer_parameters.update(extra_parameters.get(output))
104
101
 
105
102
  context = garf_executors.api_executor.ApiExecutionContext(
106
103
  query_parameters={
107
104
  'macro': extra_parameters.get('macro'),
108
105
  'template': extra_parameters.get('template'),
109
106
  },
110
- writer=args.output,
111
- writer_parameters=extra_parameters.get(args.output),
107
+ writer=outputs,
108
+ writer_parameters=writer_parameters,
112
109
  fetcher_parameters=source_parameters,
113
110
  )
114
- query_executor = garf_executors.setup_executor(
115
- source=args.source,
116
- fetcher_parameters=context.fetcher_parameters,
117
- enable_cache=args.enable_cache,
118
- cache_ttl_seconds=args.cache_ttl_seconds,
119
- )
120
- if args.parallel_queries and len(args.query) > 1:
121
- logger.info('Running queries in parallel')
122
- batch = {query: reader_client.read(query) for query in args.query}
123
- query_executor.execute_batch(batch, context, args.parallel_threshold)
124
- else:
125
- if len(args.query) > 1:
126
- logger.info('Running queries sequentially')
127
- for query in args.query:
128
- query_executor.execute(
129
- query=reader_client.read(query), title=query, context=context
130
- )
111
+ query_executor = garf_executors.setup_executor(
112
+ source=args.source,
113
+ fetcher_parameters=context.fetcher_parameters,
114
+ enable_cache=args.enable_cache,
115
+ cache_ttl_seconds=args.cache_ttl_seconds,
116
+ )
117
+ batch = {query: reader_client.read(query) for query in args.query}
118
+ if args.parallel_queries and len(args.query) > 1:
119
+ logger.info('Running queries in parallel')
120
+ batch = {query: reader_client.read(query) for query in args.query}
121
+ query_executor.execute_batch(batch, context, args.parallel_threshold)
122
+ else:
123
+ if len(args.query) > 1:
124
+ logger.info('Running queries sequentially')
125
+ for query in args.query:
126
+ query_executor.execute(
127
+ query=reader_client.read(query), title=query, context=context
128
+ )
131
129
  logging.shutdown()
132
130
 
133
131
 
@@ -0,0 +1,68 @@
1
+ # Copyright 2025 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """gRPC endpoint for garf."""
16
+
17
+ import argparse
18
+ import logging
19
+ from concurrent import futures
20
+
21
+ import grpc
22
+ from google.protobuf.json_format import MessageToDict
23
+ from grpc_reflection.v1alpha import reflection
24
+
25
+ import garf_executors
26
+ from garf_executors import garf_pb2, garf_pb2_grpc
27
+ from garf_executors.entrypoints.tracer import initialize_tracer
28
+
29
+
30
+ class GarfService(garf_pb2_grpc.GarfService):
31
+ def Execute(self, request, context):
32
+ query_executor = garf_executors.setup_executor(
33
+ request.source, request.context.fetcher_parameters
34
+ )
35
+ execution_context = garf_executors.execution_context.ExecutionContext(
36
+ **MessageToDict(request.context, preserving_proto_field_name=True)
37
+ )
38
+ result = query_executor.execute(
39
+ query=request.query,
40
+ title=request.title,
41
+ context=execution_context,
42
+ )
43
+ return garf_pb2.ExecuteResponse(results=[result])
44
+
45
+
46
+ if __name__ == '__main__':
47
+ parser = argparse.ArgumentParser()
48
+ parser.add_argument('--port', dest='port', default=50051, type=int)
49
+ parser.add_argument(
50
+ '--parallel-threshold', dest='parallel_threshold', default=10, type=int
51
+ )
52
+ args, _ = parser.parse_known_args()
53
+ initialize_tracer()
54
+ server = grpc.server(
55
+ futures.ThreadPoolExecutor(max_workers=args.parallel_threshold)
56
+ )
57
+
58
+ service = GarfService()
59
+ garf_pb2_grpc.add_GarfServiceServicer_to_server(service, server)
60
+ SERVICE_NAMES = (
61
+ garf_pb2.DESCRIPTOR.services_by_name['GarfService'].full_name,
62
+ reflection.SERVICE_NAME,
63
+ )
64
+ reflection.enable_server_reflection(SERVICE_NAMES, server)
65
+ server.add_insecure_port(f'[::]:{args.port}')
66
+ server.start()
67
+ logging.info('Garf service started, listening on port %d', 50051)
68
+ server.wait_for_termination()
@@ -35,7 +35,7 @@ class ExecutionContext(pydantic.BaseModel):
35
35
  Attributes:
36
36
  query_parameters: Parameters to dynamically change query text.
37
37
  fetcher_parameters: Parameters to specify fetching setup.
38
- writer: Type of writer to use.
38
+ writer: Type of writer to use. Can be a single writer string or list of writers.
39
39
  writer_parameters: Optional parameters to setup writer.
40
40
  """
41
41
 
@@ -45,7 +45,7 @@ class ExecutionContext(pydantic.BaseModel):
45
45
  fetcher_parameters: dict[str, str | bool | int | list[str | int]] | None = (
46
46
  pydantic.Field(default_factory=dict)
47
47
  )
48
- writer: str | None = None
48
+ writer: str | list[str] | None = None
49
49
  writer_parameters: dict[str, str] | None = pydantic.Field(
50
50
  default_factory=dict
51
51
  )
@@ -75,9 +75,42 @@ class ExecutionContext(pydantic.BaseModel):
75
75
 
76
76
  @property
77
77
  def writer_client(self) -> abs_writer.AbsWriter:
78
- writer_client = writer.create_writer(self.writer, **self.writer_parameters)
79
- if self.writer == 'bq':
78
+ """Returns single writer client."""
79
+ if isinstance(self.writer, list) and len(self.writer) > 0:
80
+ writer_type = self.writer[0]
81
+ else:
82
+ writer_type = self.writer
83
+
84
+ writer_params = self.writer_parameters or {}
85
+
86
+ if not writer_type:
87
+ raise ValueError('No writer specified')
88
+
89
+ writer_client = writer.create_writer(writer_type, **writer_params)
90
+ if writer_type == 'bq':
80
91
  _ = writer_client.create_or_get_dataset()
81
- if self.writer == 'sheet':
92
+ if writer_type == 'sheet':
82
93
  writer_client.init_client()
83
94
  return writer_client
95
+
96
+ @property
97
+ def writer_clients(self) -> list[abs_writer.AbsWriter]:
98
+ """Returns list of writer clients."""
99
+ if not self.writer:
100
+ return []
101
+
102
+ # Convert single writer to list for uniform processing
103
+ writers_to_use = (
104
+ self.writer if isinstance(self.writer, list) else [self.writer]
105
+ )
106
+ writer_params = self.writer_parameters or {}
107
+
108
+ clients = []
109
+ for writer_type in writers_to_use:
110
+ writer_client = writer.create_writer(writer_type, **writer_params)
111
+ if writer_type == 'bq':
112
+ _ = writer_client.create_or_get_dataset()
113
+ if writer_type == 'sheet':
114
+ writer_client.init_client()
115
+ clients.append(writer_client)
116
+ return clients
@@ -15,7 +15,10 @@
15
15
  """Defines common functionality between executors."""
16
16
 
17
17
  import asyncio
18
+ import inspect
19
+ from typing import Optional
18
20
 
21
+ from garf_core import report_fetcher
19
22
  from opentelemetry import trace
20
23
 
21
24
  from garf_executors import execution_context
@@ -25,6 +28,14 @@ from garf_executors.telemetry import tracer
25
28
  class Executor:
26
29
  """Defines common functionality between executors."""
27
30
 
31
+ def __init__(
32
+ self,
33
+ preprocessors: Optional[dict[str, report_fetcher.Processor]] = None,
34
+ postprocessors: Optional[dict[str, report_fetcher.Processor]] = None,
35
+ ) -> None:
36
+ self.preprocessors = preprocessors or {}
37
+ self.postprocessors = postprocessors or {}
38
+
28
39
  @tracer.start_as_current_span('api.execute_batch')
29
40
  def execute_batch(
30
41
  self,
@@ -34,6 +45,9 @@ class Executor:
34
45
  ) -> list[str]:
35
46
  """Executes batch of queries for a common context.
36
47
 
48
+ If an executor has any pre/post processors, executes them first while
49
+ modifying the context.
50
+
37
51
  Args:
38
52
  batch: Mapping between query_title and its text.
39
53
  context: Execution context.
@@ -44,11 +58,19 @@ class Executor:
44
58
  """
45
59
  span = trace.get_current_span()
46
60
  span.set_attribute('api.parallel_threshold', parallel_threshold)
47
- return asyncio.run(
61
+ _handle_processors(processors=self.preprocessors, context=context)
62
+ results = asyncio.run(
48
63
  self._run(
49
64
  batch=batch, context=context, parallel_threshold=parallel_threshold
50
65
  )
51
66
  )
67
+ _handle_processors(processors=self.postprocessors, context=context)
68
+ return results
69
+
70
+ def add_preprocessor(
71
+ self, preprocessors: dict[str, report_fetcher.Processor]
72
+ ) -> None:
73
+ self.preprocessors.update(preprocessors)
52
74
 
53
75
  async def aexecute(
54
76
  self,
@@ -85,3 +107,18 @@ class Executor:
85
107
  for title, query in batch.items()
86
108
  ]
87
109
  return await asyncio.gather(*(run_with_semaphore(task) for task in tasks))
110
+
111
+
112
+ def _handle_processors(
113
+ processors: dict[str, report_fetcher.Processor],
114
+ context: execution_context.ExecutionContext,
115
+ ) -> None:
116
+ for k, processor in processors.items():
117
+ processor_signature = list(inspect.signature(processor).parameters.keys())
118
+ if k in context.fetcher_parameters:
119
+ processor_parameters = {
120
+ k: v
121
+ for k, v in context.fetcher_parameters.items()
122
+ if k in processor_signature
123
+ }
124
+ context.fetcher_parameters[k] = processor(**processor_parameters)
@@ -13,14 +13,16 @@
13
13
  # limitations under the License.
14
14
 
15
15
  import inspect
16
+ import logging
16
17
  import sys
17
18
  from importlib.metadata import entry_points
18
19
 
19
20
  from garf_core import report_fetcher
20
- from opentelemetry import trace
21
21
 
22
22
  from garf_executors.telemetry import tracer
23
23
 
24
+ logger = logging.getLogger(name='garf_executors.fetchers')
25
+
24
26
 
25
27
  @tracer.start_as_current_span('find_fetchers')
26
28
  def find_fetchers() -> set[str]:
@@ -57,8 +59,10 @@ def get_report_fetcher(source: str) -> type[report_fetcher.ApiReportFetcher]:
57
59
  obj, report_fetcher.ApiReportFetcher
58
60
  ):
59
61
  return getattr(fetcher_module, name)
60
- except ModuleNotFoundError:
61
- continue
62
+ except ModuleNotFoundError as e:
63
+ raise report_fetcher.ApiReportFetcherError(
64
+ f'Failed to load fetcher for source {source}, reason: {e}'
65
+ )
62
66
  raise report_fetcher.ApiReportFetcherError(
63
67
  f'No fetcher available for the source "{source}"'
64
68
  )
@@ -0,0 +1,45 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # NO CHECKED-IN PROTOBUF GENCODE
4
+ # source: garf.proto
5
+ # Protobuf Python Version: 6.31.1
6
+ """Generated protocol buffer code."""
7
+ from google.protobuf import descriptor as _descriptor
8
+ from google.protobuf import descriptor_pool as _descriptor_pool
9
+ from google.protobuf import runtime_version as _runtime_version
10
+ from google.protobuf import symbol_database as _symbol_database
11
+ from google.protobuf.internal import builder as _builder
12
+ _runtime_version.ValidateProtobufRuntimeVersion(
13
+ _runtime_version.Domain.PUBLIC,
14
+ 6,
15
+ 31,
16
+ 1,
17
+ '',
18
+ 'garf.proto'
19
+ )
20
+ # @@protoc_insertion_point(imports)
21
+
22
+ _sym_db = _symbol_database.Default()
23
+
24
+
25
+ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
26
+
27
+
28
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\ngarf.proto\x12\x04garf\x1a\x1cgoogle/protobuf/struct.proto\"g\n\x0e\x45xecuteRequest\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\r\n\x05query\x18\x03 \x01(\t\x12\'\n\x07\x63ontext\x18\x04 \x01(\x0b\x32\x16.garf.ExecutionContext\"\xbc\x01\n\x10\x45xecutionContext\x12/\n\x10query_parameters\x18\x01 \x01(\x0b\x32\x15.garf.QueryParameters\x12\x33\n\x12\x66\x65tcher_parameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0e\n\x06writer\x18\x03 \x01(\t\x12\x32\n\x11writer_parameters\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\"d\n\x0fQueryParameters\x12&\n\x05macro\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08template\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\"\n\x0f\x45xecuteResponse\x12\x0f\n\x07results\x18\x01 \x03(\t2G\n\x0bGarfService\x12\x38\n\x07\x45xecute\x12\x14.garf.ExecuteRequest\x1a\x15.garf.ExecuteResponse\"\x00\x62\x06proto3')
29
+
30
+ _globals = globals()
31
+ _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
32
+ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'garf_pb2', _globals)
33
+ if not _descriptor._USE_C_DESCRIPTORS:
34
+ DESCRIPTOR._loaded_options = None
35
+ _globals['_EXECUTEREQUEST']._serialized_start=50
36
+ _globals['_EXECUTEREQUEST']._serialized_end=153
37
+ _globals['_EXECUTIONCONTEXT']._serialized_start=156
38
+ _globals['_EXECUTIONCONTEXT']._serialized_end=344
39
+ _globals['_QUERYPARAMETERS']._serialized_start=346
40
+ _globals['_QUERYPARAMETERS']._serialized_end=446
41
+ _globals['_EXECUTERESPONSE']._serialized_start=448
42
+ _globals['_EXECUTERESPONSE']._serialized_end=482
43
+ _globals['_GARFSERVICE']._serialized_start=484
44
+ _globals['_GARFSERVICE']._serialized_end=555
45
+ # @@protoc_insertion_point(module_scope)
@@ -0,0 +1,97 @@
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+ import warnings
5
+
6
+ from . import garf_pb2 as garf__pb2
7
+
8
+ GRPC_GENERATED_VERSION = '1.75.0'
9
+ GRPC_VERSION = grpc.__version__
10
+ _version_not_supported = False
11
+
12
+ try:
13
+ from grpc._utilities import first_version_is_lower
14
+ _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION)
15
+ except ImportError:
16
+ _version_not_supported = True
17
+
18
+ if _version_not_supported:
19
+ raise RuntimeError(
20
+ f'The grpc package installed is at version {GRPC_VERSION},'
21
+ + f' but the generated code in garf_pb2_grpc.py depends on'
22
+ + f' grpcio>={GRPC_GENERATED_VERSION}.'
23
+ + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}'
24
+ + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.'
25
+ )
26
+
27
+
28
+ class GarfServiceStub(object):
29
+ """Missing associated documentation comment in .proto file."""
30
+
31
+ def __init__(self, channel):
32
+ """Constructor.
33
+
34
+ Args:
35
+ channel: A grpc.Channel.
36
+ """
37
+ self.Execute = channel.unary_unary(
38
+ '/garf.GarfService/Execute',
39
+ request_serializer=garf__pb2.ExecuteRequest.SerializeToString,
40
+ response_deserializer=garf__pb2.ExecuteResponse.FromString,
41
+ _registered_method=True)
42
+
43
+
44
+ class GarfServiceServicer(object):
45
+ """Missing associated documentation comment in .proto file."""
46
+
47
+ def Execute(self, request, context):
48
+ """Missing associated documentation comment in .proto file."""
49
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
50
+ context.set_details('Method not implemented!')
51
+ raise NotImplementedError('Method not implemented!')
52
+
53
+
54
+ def add_GarfServiceServicer_to_server(servicer, server):
55
+ rpc_method_handlers = {
56
+ 'Execute': grpc.unary_unary_rpc_method_handler(
57
+ servicer.Execute,
58
+ request_deserializer=garf__pb2.ExecuteRequest.FromString,
59
+ response_serializer=garf__pb2.ExecuteResponse.SerializeToString,
60
+ ),
61
+ }
62
+ generic_handler = grpc.method_handlers_generic_handler(
63
+ 'garf.GarfService', rpc_method_handlers)
64
+ server.add_generic_rpc_handlers((generic_handler,))
65
+ server.add_registered_method_handlers('garf.GarfService', rpc_method_handlers)
66
+
67
+
68
+ # This class is part of an EXPERIMENTAL API.
69
+ class GarfService(object):
70
+ """Missing associated documentation comment in .proto file."""
71
+
72
+ @staticmethod
73
+ def Execute(request,
74
+ target,
75
+ options=(),
76
+ channel_credentials=None,
77
+ call_credentials=None,
78
+ insecure=False,
79
+ compression=None,
80
+ wait_for_ready=None,
81
+ timeout=None,
82
+ metadata=None):
83
+ return grpc.experimental.unary_unary(
84
+ request,
85
+ target,
86
+ '/garf.GarfService/Execute',
87
+ garf__pb2.ExecuteRequest.SerializeToString,
88
+ garf__pb2.ExecuteResponse.FromString,
89
+ options,
90
+ channel_credentials,
91
+ insecure,
92
+ call_credentials,
93
+ compression,
94
+ wait_for_ready,
95
+ timeout,
96
+ metadata,
97
+ _registered_method=True)
@@ -106,19 +106,26 @@ class SqlAlchemyQueryExecutor(
106
106
  finally:
107
107
  conn.connection.execute(f'DROP TABLE {temp_table_name}')
108
108
  if context.writer and results:
109
- writer_client = context.writer_client
110
- logger.debug(
111
- 'Start writing data for query %s via %s writer',
112
- title,
113
- type(writer_client),
114
- )
115
- writing_result = writer_client.write(results, title)
116
- logger.debug(
117
- 'Finish writing data for query %s via %s writer',
118
- title,
119
- type(writer_client),
120
- )
121
- logger.info('%s executed successfully', title)
122
- return writing_result
109
+ writer_clients = context.writer_clients
110
+ if not writer_clients:
111
+ logger.warning('No writers configured, skipping write operation')
112
+ else:
113
+ writing_results = []
114
+ for writer_client in writer_clients:
115
+ logger.debug(
116
+ 'Start writing data for query %s via %s writer',
117
+ title,
118
+ type(writer_client),
119
+ )
120
+ writing_result = writer_client.write(results, title)
121
+ logger.debug(
122
+ 'Finish writing data for query %s via %s writer',
123
+ title,
124
+ type(writer_client),
125
+ )
126
+ writing_results.append(writing_result)
127
+ logger.info('%s executed successfully', title)
128
+ # Return the last writer's result for backward compatibility
129
+ return writing_results[-1] if writing_results else None
123
130
  span.set_attribute('execute.num_results', len(results))
124
131
  return results
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: garf-executors
3
- Version: 0.1.5
3
+ Version: 0.2.0
4
4
  Summary: Executes queries against API and writes data to local/remote storage.
5
5
  Author-email: "Google Inc. (gTech gPS CSE team)" <no-reply@google.com>, Andrei Markin <andrey.markin.ppc@gmail.com>
6
6
  License: Apache 2.0
@@ -8,6 +8,8 @@ garf_executors/exceptions.py
8
8
  garf_executors/execution_context.py
9
9
  garf_executors/executor.py
10
10
  garf_executors/fetchers.py
11
+ garf_executors/garf_pb2.py
12
+ garf_executors/garf_pb2_grpc.py
11
13
  garf_executors/sql_executor.py
12
14
  garf_executors/telemetry.py
13
15
  garf_executors.egg-info/PKG-INFO
@@ -18,6 +20,7 @@ garf_executors.egg-info/requires.txt
18
20
  garf_executors.egg-info/top_level.txt
19
21
  garf_executors/entrypoints/__init__.py
20
22
  garf_executors/entrypoints/cli.py
23
+ garf_executors/entrypoints/grpc_server.py
21
24
  garf_executors/entrypoints/server.py
22
25
  garf_executors/entrypoints/tracer.py
23
26
  garf_executors/entrypoints/utils.py
File without changes
File without changes