tilebox-datasets 0.36.0__py3-none-any.whl → 0.37.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,14 +18,14 @@ from tilebox.datasets.aio.pagination import (
18
18
  )
19
19
  from tilebox.datasets.data.collection import CollectionInfo
20
20
  from tilebox.datasets.data.data_access import QueryFilters, SpatialFilter, SpatialFilterLike
21
- from tilebox.datasets.data.datapoint import DatapointInterval, DatapointIntervalLike, DatapointPage, QueryResultPage
21
+ from tilebox.datasets.data.datapoint import DatapointInterval, DatapointIntervalLike, QueryResultPage
22
22
  from tilebox.datasets.data.datasets import Dataset
23
23
  from tilebox.datasets.data.pagination import Pagination
24
24
  from tilebox.datasets.data.time_interval import TimeInterval, TimeIntervalLike
25
25
  from tilebox.datasets.data.uuid import as_uuid
26
26
  from tilebox.datasets.message_pool import get_message_type
27
27
  from tilebox.datasets.progress import ProgressCallback
28
- from tilebox.datasets.protobuf_conversion.protobuf_xarray import MessageToXarrayConverter, TimeseriesToXarrayConverter
28
+ from tilebox.datasets.protobuf_conversion.protobuf_xarray import MessageToXarrayConverter
29
29
  from tilebox.datasets.protobuf_conversion.to_protobuf import (
30
30
  DatapointIDs,
31
31
  IngestionData,
@@ -145,7 +145,6 @@ class CollectionClient:
145
145
  info: CollectionInfo,
146
146
  ) -> None:
147
147
  self._dataset = dataset
148
- self._use_legacy_api = dataset._dataset.is_legacy_type
149
148
  self._collection = info.collection
150
149
  self._info: CollectionInfo | None = info
151
150
 
@@ -208,9 +207,6 @@ class CollectionClient:
208
207
  Returns:
209
208
  The datapoint as an xarray dataset
210
209
  """
211
- if self._use_legacy_api: # remove this once all datasets are fully migrated to the new endpoints
212
- return await self._find_legacy(str(datapoint_id), skip_data)
213
-
214
210
  try:
215
211
  datapoint = await self._dataset._service.query_by_id(
216
212
  [self._collection.id], as_uuid(datapoint_id), skip_data
@@ -225,21 +221,7 @@ class CollectionClient:
225
221
 
226
222
  converter = MessageToXarrayConverter(initial_capacity=1)
227
223
  converter.convert(data)
228
- return converter.finalize("time").isel(time=0)
229
-
230
- async def _find_legacy(self, datapoint_id: str, skip_data: bool = False) -> xr.Dataset:
231
- try:
232
- datapoint = await self._dataset._service.get_datapoint_by_id(
233
- str(self._collection.id), datapoint_id, skip_data
234
- )
235
- except ArgumentError:
236
- raise ValueError(f"Invalid datapoint id: {datapoint_id} is not a valid UUID") from None
237
- except NotFoundError:
238
- raise NotFoundError(f"No such datapoint {datapoint_id}") from None
239
-
240
- converter = TimeseriesToXarrayConverter(initial_capacity=1)
241
- converter.convert(datapoint)
242
- return converter.finalize().isel(time=0)
224
+ return converter.finalize("time", skip_empty_fields=skip_data).isel(time=0)
243
225
 
244
226
  async def _find_interval(
245
227
  self,
@@ -262,11 +244,6 @@ class CollectionClient:
262
244
  Returns:
263
245
  The datapoints in the given interval as an xarray dataset
264
246
  """
265
- if self._use_legacy_api: # remove this once all datasets are fully migrated to the new endpoints
266
- return await self._find_interval_legacy(
267
- datapoint_id_interval, end_inclusive, skip_data=skip_data, show_progress=show_progress
268
- )
269
-
270
247
  filters = QueryFilters(
271
248
  temporal_extent=DatapointInterval.parse(datapoint_id_interval, end_inclusive=end_inclusive)
272
249
  )
@@ -280,30 +257,7 @@ class CollectionClient:
280
257
  if show_progress:
281
258
  pages = with_progressbar(pages, f"Fetching {self._dataset.name}")
282
259
 
283
- return await _convert_to_dataset(pages)
284
-
285
- async def _find_interval_legacy(
286
- self,
287
- datapoint_id_interval: DatapointIntervalLike,
288
- end_inclusive: bool = True,
289
- *,
290
- skip_data: bool = False,
291
- show_progress: bool = False,
292
- ) -> xr.Dataset:
293
- datapoint_interval = DatapointInterval.parse(datapoint_id_interval, end_inclusive=end_inclusive)
294
-
295
- async def request(page: PaginationProtocol) -> DatapointPage:
296
- query_page = Pagination(page.limit, page.starting_after)
297
- return await self._dataset._service.get_dataset_for_datapoint_interval(
298
- str(self._collection.id), datapoint_interval, skip_data, False, query_page
299
- )
300
-
301
- initial_page = Pagination()
302
- pages = paginated_request(request, initial_page)
303
- if show_progress:
304
- pages = with_progressbar(pages, f"Fetching {self._dataset.name}")
305
-
306
- return await _convert_to_dataset_legacy(pages)
260
+ return await _convert_to_dataset(pages, skip_empty_fields=skip_data)
307
261
 
308
262
  async def load(
309
263
  self,
@@ -336,8 +290,11 @@ class CollectionClient:
336
290
  Returns:
337
291
  Matching datapoints in the given temporal extent as an xarray dataset
338
292
  """
339
- if self._use_legacy_api: # remove this once all datasets are fully migrated to the new endpoints
340
- return await self._load_legacy(temporal_extent, skip_data=skip_data, show_progress=show_progress)
293
+ warn(
294
+ "collection.load(interval) is deprecated. Please use collection.query(temporal_extent=interval) instead.",
295
+ DeprecationWarning,
296
+ stacklevel=2,
297
+ )
341
298
 
342
299
  return await self.query(temporal_extent=temporal_extent, skip_data=skip_data, show_progress=show_progress)
343
300
 
@@ -380,14 +337,11 @@ class CollectionClient:
380
337
  Returns:
381
338
  Matching datapoints in the given temporal and spatial extent as an xarray dataset
382
339
  """
383
- if self._use_legacy_api:
384
- raise ValueError("Querying is not supported for this dataset. Please use load() instead.")
385
-
386
340
  if temporal_extent is None:
387
341
  raise ValueError("A temporal_extent for your query must be specified")
388
342
 
389
343
  pages = self._iter_pages(temporal_extent, spatial_extent, skip_data, show_progress=show_progress)
390
- return await _convert_to_dataset(pages)
344
+ return await _convert_to_dataset(pages, skip_empty_fields=skip_data)
391
345
 
392
346
  async def _iter_pages(
393
347
  self,
@@ -400,7 +354,7 @@ class CollectionClient:
400
354
  time_interval = TimeInterval.parse(temporal_extent)
401
355
  filters = QueryFilters(time_interval, SpatialFilter.parse(spatial_extent) if spatial_extent else None)
402
356
 
403
- request = partial(self._load_page, filters, skip_data)
357
+ request = partial(self._query_page, filters, skip_data)
404
358
 
405
359
  initial_page = Pagination(limit=page_size)
406
360
  pages = paginated_request(request, initial_page)
@@ -414,60 +368,12 @@ class CollectionClient:
414
368
  async for page in pages:
415
369
  yield page
416
370
 
417
- async def _load_page(
371
+ async def _query_page(
418
372
  self, filters: QueryFilters, skip_data: bool, page: PaginationProtocol | None = None
419
373
  ) -> QueryResultPage:
420
374
  query_page = Pagination(page.limit, page.starting_after) if page else Pagination()
421
375
  return await self._dataset._service.query([self._collection.id], filters, skip_data, query_page)
422
376
 
423
- async def _load_legacy(
424
- self,
425
- time_or_interval: TimeIntervalLike,
426
- *,
427
- skip_data: bool = False,
428
- show_progress: bool | ProgressCallback = False,
429
- ) -> xr.Dataset:
430
- pages = self._iter_pages_legacy(time_or_interval, skip_data, show_progress=show_progress)
431
- return await _convert_to_dataset_legacy(pages)
432
-
433
- async def _iter_pages_legacy(
434
- self,
435
- time_or_interval: TimeIntervalLike,
436
- skip_data: bool = False,
437
- skip_meta: bool = False,
438
- show_progress: bool | ProgressCallback = False,
439
- page_size: int | None = None,
440
- ) -> AsyncIterator[DatapointPage]:
441
- time_interval = TimeInterval.parse(time_or_interval)
442
-
443
- request = partial(self._load_page_legacy, time_interval, skip_data, skip_meta)
444
-
445
- initial_page = Pagination(limit=page_size)
446
- pages = paginated_request(request, initial_page)
447
-
448
- if callable(show_progress):
449
- if skip_meta:
450
- raise ValueError("Progress callback requires datapoint metadata, but skip_meta is True")
451
- else:
452
- pages = with_time_progress_callback(pages, time_interval, show_progress)
453
- elif show_progress:
454
- message = f"Fetching {self._dataset.name}"
455
- if skip_meta: # without metadata we can't estimate progress based on event time (since it is not returned)
456
- pages = with_progressbar(pages, message)
457
- else:
458
- pages = with_time_progressbar(pages, time_interval, message)
459
-
460
- async for page in pages:
461
- yield page
462
-
463
- async def _load_page_legacy(
464
- self, time_interval: TimeInterval, skip_data: bool, skip_meta: bool, page: PaginationProtocol | None = None
465
- ) -> DatapointPage:
466
- query_page = Pagination(page.limit, page.starting_after) if page else Pagination()
467
- return await self._dataset._service.get_dataset_for_time_interval(
468
- str(self._collection.id), time_interval, skip_data, skip_meta, query_page
469
- )
470
-
471
377
  async def ingest(
472
378
  self,
473
379
  data: IngestionData,
@@ -493,9 +399,6 @@ class CollectionClient:
493
399
  Returns:
494
400
  List of datapoint ids that were ingested.
495
401
  """
496
- if self._use_legacy_api: # remove this once all datasets are fully migrated to the new endpoints
497
- raise ValueError("Ingestion is not supported for this dataset. Please create a new dataset.")
498
-
499
402
  message_type = get_message_type(self._dataset._dataset.type.type_url)
500
403
  messages = marshal_messages(
501
404
  to_messages(data, message_type, required_fields=["time"], ignore_fields=["id", "ingestion_time"])
@@ -564,7 +467,7 @@ class CollectionClient:
564
467
  return num_deleted
565
468
 
566
469
 
567
- async def _convert_to_dataset(pages: AsyncIterator[QueryResultPage]) -> xr.Dataset:
470
+ async def _convert_to_dataset(pages: AsyncIterator[QueryResultPage], skip_empty_fields: bool = False) -> xr.Dataset:
568
471
  """
569
472
  Convert an async iterator of QueryResultPages into a single xarray Dataset
570
473
 
@@ -572,6 +475,7 @@ async def _convert_to_dataset(pages: AsyncIterator[QueryResultPage]) -> xr.Datas
572
475
 
573
476
  Args:
574
477
  pages: Async iterator of QueryResultPages to convert
478
+ skip_empty_fields: Whether to omit fields from the output dataset in case no values are set
575
479
 
576
480
  Returns:
577
481
  The datapoints from the individual pages converted and combined into a single xarray dataset
@@ -588,26 +492,4 @@ async def _convert_to_dataset(pages: AsyncIterator[QueryResultPage]) -> xr.Datas
588
492
  # this would also account for the case where the server sends pages faster than we are converting
589
493
  # them to xarray
590
494
  await async_producer_consumer(pages, convert_page)
591
- return converter.finalize("time")
592
-
593
-
594
- async def _convert_to_dataset_legacy(pages: AsyncIterator[DatapointPage]) -> xr.Dataset:
595
- """
596
- Convert an async iterator of DatasetIntervals (pages) into a single xarray Dataset
597
-
598
- Parses each incoming page while in parallel already requesting and waiting for the next page from the server.
599
-
600
- Args:
601
- pages: Async iterator of DatasetIntervals (pages) to convert
602
-
603
- Returns:
604
- The datapoints from the individual pages converted and combined into a single xarray dataset
605
- """
606
-
607
- converter = TimeseriesToXarrayConverter()
608
- # lets parse the incoming pages already while we wait for the next page from the server
609
- # we solve this using a classic producer/consumer with a queue of pages for communication
610
- # this would also account for the case where the server sends pages faster than we are converting
611
- # them to xarray
612
- await async_producer_consumer(pages, lambda page: converter.convert_all(page))
613
- return converter.finalize()
495
+ return converter.finalize("time", skip_empty_fields=skip_empty_fields)
@@ -8,10 +8,10 @@ from tqdm.auto import tqdm
8
8
  from tilebox.datasets.data import (
9
9
  TimeInterval,
10
10
  )
11
- from tilebox.datasets.data.datapoint import DatapointPage, QueryResultPage
11
+ from tilebox.datasets.data.datapoint import QueryResultPage
12
12
  from tilebox.datasets.progress import ProgressCallback, TimeIntervalProgressBar
13
13
 
14
- ResultPage = TypeVar("ResultPage", bound=DatapointPage | QueryResultPage)
14
+ ResultPage = TypeVar("ResultPage", bound=QueryResultPage)
15
15
 
16
16
 
17
17
  async def with_progressbar(
@@ -81,14 +81,14 @@ async def with_time_progressbar(
81
81
 
82
82
  # we have more pages, so lets set up a progress bar
83
83
  actual_interval = TimeInterval(
84
- start=max(interval.start, first_page.min_time()),
84
+ start=max(interval.start, first_page.min_time),
85
85
  end=min(interval.end, datetime.now(tz=timezone.utc)),
86
86
  )
87
87
 
88
88
  with TimeIntervalProgressBar(
89
89
  interval=actual_interval,
90
90
  description=progress_description,
91
- initial_time=first_page.max_time(),
91
+ initial_time=first_page.max_time,
92
92
  actual_start_time=actual_start_time,
93
93
  ) as progress_bar:
94
94
  # provide download information for the first page
@@ -99,7 +99,7 @@ async def with_time_progressbar(
99
99
  async for page in paginated_request: # now loop over the remaining pages
100
100
  now = time.time()
101
101
  if page.n_datapoints > 0:
102
- progress_bar.set_progress(page.max_time())
102
+ progress_bar.set_progress(page.max_time)
103
103
  progress_bar.set_download_info(page.n_datapoints, page.byte_size, now - before)
104
104
  yield page
105
105
  before = now
@@ -134,17 +134,17 @@ async def with_time_progress_callback(
134
134
 
135
135
  # we have more pages, so lets set up a progress bar
136
136
  actual_interval = TimeInterval(
137
- start=max(interval.start, first_page.min_time()),
137
+ start=max(interval.start, first_page.min_time),
138
138
  end=min(interval.end, datetime.now(tz=timezone.utc)),
139
139
  )
140
140
 
141
141
  total = (actual_interval.end - actual_interval.start).total_seconds()
142
142
  if first_page.n_datapoints > 0:
143
- current = (first_page.max_time() - actual_interval.start).total_seconds()
143
+ current = (first_page.max_time - actual_interval.start).total_seconds()
144
144
  progress_callback(current / total)
145
145
  async for page in paginated_request: # now loop over the remaining pages
146
146
  if page.n_datapoints > 0:
147
- current = (page.max_time() - actual_interval.start).total_seconds()
147
+ current = (page.max_time - actual_interval.start).total_seconds()
148
148
  progress_callback(current / total)
149
149
  yield page
150
150
 
@@ -110,83 +110,6 @@ class RepeatedAny:
110
110
  return core_pb2.RepeatedAny(type_url=self.type_url, value=self.value)
111
111
 
112
112
 
113
- @dataclass(frozen=True)
114
- class Datapoint:
115
- """Datapoint contains the metadata for a single data point."""
116
-
117
- meta: core_pb2.DatapointMetadata # we keep this as protobuf message to easily convert to/from xarray
118
- data: AnyMessage
119
-
120
- @classmethod
121
- def from_message(
122
- cls, datapoint: core_pb2.Datapoint
123
- ) -> "Datapoint": # lets use typing.Self once we require python >= 3.11
124
- """Convert a Datapoint protobuf message to a Datapoint object."""
125
- return cls(
126
- meta=datapoint.meta,
127
- data=AnyMessage.from_message(datapoint.data),
128
- )
129
-
130
- def to_message(self) -> core_pb2.Datapoint:
131
- return core_pb2.Datapoint(
132
- meta=self.meta,
133
- data=self.data.to_message(),
134
- )
135
-
136
-
137
- @dataclass(frozen=True)
138
- class Datapoints:
139
- meta: list[core_pb2.DatapointMetadata] # we keep this as protobuf message to easily convert to/from xarray
140
- data: RepeatedAny
141
-
142
- @classmethod
143
- def from_message(cls, datapoints: core_pb2.Datapoints) -> "Datapoints":
144
- return cls(meta=list(datapoints.meta), data=RepeatedAny.from_message(datapoints.data))
145
-
146
- def to_message(self) -> core_pb2.Datapoints:
147
- return core_pb2.Datapoints(meta=self.meta, data=self.data.to_message())
148
-
149
-
150
- @dataclass(frozen=True)
151
- class DatapointPage:
152
- meta: list[core_pb2.DatapointMetadata] # we keep this as protobuf message to easily convert to/from xarray
153
- data: RepeatedAny
154
- next_page: Pagination
155
- byte_size: int = field(compare=False)
156
-
157
- @classmethod
158
- def from_message(cls, datapoints: core_pb2.DatapointPage) -> "DatapointPage":
159
- return cls(
160
- meta=list(datapoints.meta),
161
- data=RepeatedAny.from_message(datapoints.data),
162
- next_page=Pagination.from_legacy_message(datapoints.next_page),
163
- byte_size=datapoints.ByteSize(), # useful for progress bars
164
- )
165
-
166
- def to_message(self) -> core_pb2.DatapointPage:
167
- return core_pb2.DatapointPage(
168
- meta=self.meta,
169
- data=self.data.to_message(),
170
- next_page=self.next_page.to_legacy_message() if self.next_page else None,
171
- )
172
-
173
- @property
174
- def n_datapoints(self) -> int:
175
- return len(self.data.value)
176
-
177
- def min_id(self) -> UUID:
178
- return UUID(self.meta[0].id)
179
-
180
- def max_id(self) -> UUID:
181
- return UUID(self.meta[-1].id)
182
-
183
- def min_time(self) -> datetime:
184
- return timestamp_to_datetime(self.meta[0].event_time)
185
-
186
- def max_time(self) -> datetime:
187
- return timestamp_to_datetime(self.meta[-1].event_time)
188
-
189
-
190
113
  @dataclass(frozen=True)
191
114
  class QueryResultPage:
192
115
  data: RepeatedAny
@@ -211,15 +134,19 @@ class QueryResultPage:
211
134
  def n_datapoints(self) -> int:
212
135
  return len(self.data.value)
213
136
 
137
+ @property
214
138
  def min_id(self) -> UUID:
215
139
  return uuid_message_to_uuid(self._parse_message(0).id)
216
140
 
141
+ @property
217
142
  def max_id(self) -> UUID:
218
143
  return uuid_message_to_uuid(self._parse_message(-1).id)
219
144
 
145
+ @property
220
146
  def min_time(self) -> datetime:
221
147
  return timestamp_to_datetime(self._parse_message(0).time)
222
148
 
149
+ @property
223
150
  def max_time(self) -> datetime:
224
151
  return timestamp_to_datetime(self._parse_message(-1).time)
225
152
 
@@ -80,29 +80,6 @@ class Dataset:
80
80
  description=self.description,
81
81
  )
82
82
 
83
- @property
84
- def is_legacy_type(self) -> bool:
85
- """
86
- Check if the dataset type is a legacy type (without the meta fields in the proto message).
87
-
88
- For those types, we use the legacy query API, until all datasets are fully migrated to the new endpoints.
89
- """
90
- # helper function to check if the type is a legacy type (without the meta fields in the proto message)
91
- files = self.type.descriptor_set.file
92
- if not files or len(files) != 1:
93
- return False
94
- file = files[0]
95
- messages = file.message_type
96
- if not messages or len(messages) != 1:
97
- return False
98
- message = messages[0]
99
- fields = message.field
100
- if not fields or len(fields) < 3:
101
- return True # new style types have at least three fields (time, id, ingestion_time)
102
-
103
- has_new_type_fields = fields[0].name == "time" and fields[1].name == "id" and fields[2].name == "ingestion_time"
104
- return not has_new_type_fields
105
-
106
83
 
107
84
  @dataclass
108
85
  class DatasetGroup:
@@ -19,15 +19,3 @@ class Pagination:
19
19
 
20
20
  def to_message(self) -> core_pb2.Pagination:
21
21
  return core_pb2.Pagination(limit=self.limit, starting_after=uuid_to_uuid_message(self.starting_after))
22
-
23
- @classmethod
24
- def from_legacy_message(cls, page: core_pb2.LegacyPagination | None) -> "Pagination":
25
- if page is None:
26
- return cls()
27
- # convert falsish values (0 or empty string) to None
28
- return cls(limit=page.limit or None, starting_after=UUID(page.starting_after) if page.starting_after else None)
29
-
30
- def to_legacy_message(self) -> core_pb2.LegacyPagination:
31
- return core_pb2.LegacyPagination(
32
- limit=self.limit, starting_after=str(self.starting_after) if self.starting_after else None
33
- )
@@ -1,18 +1,18 @@
1
1
  # allow the uuid module name which shadows the builtin:
2
2
  from uuid import UUID
3
3
 
4
- from tilebox.datasets.datasetsv1 import core_pb2
4
+ from tilebox.datasets.datasetsv1 import core_pb2, well_known_types_pb2
5
5
 
6
6
  _NIL_UUID = UUID(int=0)
7
7
 
8
8
 
9
- def uuid_message_to_uuid(uuid_message: core_pb2.ID) -> UUID:
9
+ def uuid_message_to_uuid(uuid_message: core_pb2.ID | well_known_types_pb2.UUID) -> UUID:
10
10
  if uuid_message.uuid == b"":
11
11
  return _NIL_UUID
12
12
  return UUID(bytes=uuid_message.uuid)
13
13
 
14
14
 
15
- def uuid_message_to_optional_uuid(uuid_message: core_pb2.ID | None) -> UUID | None:
15
+ def uuid_message_to_optional_uuid(uuid_message: core_pb2.ID | well_known_types_pb2.UUID | None) -> UUID | None:
16
16
  if uuid_message is None:
17
17
  return None
18
18
  if uuid_message.uuid == b"":
@@ -22,11 +22,11 @@ _runtime_version.ValidateProtobufRuntimeVersion(
22
22
  _sym_db = _symbol_database.Default()
23
23
 
24
24
 
25
- from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
26
25
  from tilebox.datasets.datasetsv1 import dataset_type_pb2 as datasets_dot_v1_dot_dataset__type__pb2
26
+ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
27
27
 
28
28
 
29
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x64\x61tasets/v1/core.proto\x12\x0b\x64\x61tasets.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1e\x64\x61tasets/v1/dataset_type.proto\"\x18\n\x02ID\x12\x12\n\x04uuid\x18\x01 \x01(\x0cR\x04uuid\"\xce\x01\n\x0cTimeInterval\x12\x39\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartTime\x12\x35\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x07\x65ndTime\x12\'\n\x0fstart_exclusive\x18\x03 \x01(\x08R\x0estartExclusive\x12#\n\rend_inclusive\x18\x04 \x01(\x08R\x0c\x65ndInclusive\"\xb5\x01\n\x11\x44\x61tapointInterval\x12*\n\x08start_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x07startId\x12&\n\x06\x65nd_id\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDR\x05\x65ndId\x12\'\n\x0fstart_exclusive\x18\x03 \x01(\x08R\x0estartExclusive\x12#\n\rend_inclusive\x18\x04 \x01(\x08R\x0c\x65ndInclusive\"v\n\x10LegacyPagination\x12\x19\n\x05limit\x18\x01 \x01(\x03H\x00R\x05limit\x88\x01\x01\x12*\n\x0estarting_after\x18\x02 \x01(\tH\x01R\rstartingAfter\x88\x01\x01\x42\x08\n\x06_limitB\x11\n\x0f_starting_after\"\x81\x01\n\nPagination\x12\x19\n\x05limit\x18\x01 \x01(\x03H\x00R\x05limit\x88\x01\x01\x12;\n\x0estarting_after\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDH\x01R\rstartingAfter\x88\x01\x01\x42\x08\n\x06_limitB\x11\n\x0f_starting_after\"6\n\x03\x41ny\x12\x19\n\x08type_url\x18\x01 \x01(\tR\x07typeUrl\x12\x14\n\x05value\x18\x02 \x01(\x0cR\x05value\">\n\x0bRepeatedAny\x12\x19\n\x08type_url\x18\x01 \x01(\tR\x07typeUrl\x12\x14\n\x05value\x18\x02 \x03(\x0cR\x05value\"\xad\x01\n\x11\x44\x61tapointMetadata\x12\x39\n\nevent_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\teventTime\x12\x41\n\x0eingestion_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\ringestionTime\x12\x13\n\x02id\x18\x03 \x01(\tH\x00R\x02id\x88\x01\x01\x42\x05\n\x03_id\"n\n\nDatapoints\x12\x32\n\x04meta\x18\x01 \x03(\x0b\x32\x1e.datasets.v1.DatapointMetadataR\x04meta\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.datasets.v1.RepeatedAnyR\x04\x64\x61ta\"\xc0\x01\n\rDatapointPage\x12\x32\n\x04meta\x18\x01 \x03(\x0b\x32\x1e.datasets.v1.DatapointMetadataR\x04meta\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.datasets.v1.RepeatedAnyR\x04\x64\x61ta\x12?\n\tnext_page\x18\x03 \x01(\x0b\x32\x1d.datasets.v1.LegacyPaginationH\x00R\x08nextPage\x88\x01\x01\x42\x0c\n\n_next_page\"e\n\tDatapoint\x12\x32\n\x04meta\x18\x01 \x01(\x0b\x32\x1e.datasets.v1.DatapointMetadataR\x04meta\x12$\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x10.datasets.v1.AnyR\x04\x64\x61ta\"^\n\nCollection\x12\x1b\n\tlegacy_id\x18\x01 \x01(\tR\x08legacyId\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x1f\n\x02id\x18\x03 \x01(\x0b\x32\x0f.datasets.v1.IDR\x02id\"\xc3\x01\n\x0e\x43ollectionInfo\x12\x37\n\ncollection\x18\x01 \x01(\x0b\x32\x17.datasets.v1.CollectionR\ncollection\x12\x42\n\x0c\x61vailability\x18\x02 \x01(\x0b\x32\x19.datasets.v1.TimeIntervalH\x00R\x0c\x61vailability\x88\x01\x01\x12\x19\n\x05\x63ount\x18\x03 \x01(\x04H\x01R\x05\x63ount\x88\x01\x01\x42\x0f\n\r_availabilityB\x08\n\x06_count\"B\n\x0f\x43ollectionInfos\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x1b.datasets.v1.CollectionInfoR\x04\x64\x61ta\"\x96\x03\n\x07\x44\x61taset\x12\x1f\n\x02id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x02id\x12*\n\x08group_id\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDR\x07groupId\x12.\n\x04type\x18\x03 \x01(\x0b\x32\x1a.datasets.v1.AnnotatedTypeR\x04type\x12\x1b\n\tcode_name\x18\x04 \x01(\tR\x08\x63odeName\x12\x12\n\x04name\x18\x05 \x01(\tR\x04name\x12\x18\n\x07summary\x18\x06 \x01(\tR\x07summary\x12\x12\n\x04icon\x18\x07 \x01(\tR\x04icon\x12 \n\x0b\x64\x65scription\x18\x08 \x01(\tR\x0b\x64\x65scription\x12@\n\x0bpermissions\x18\n \x03(\x0e\x32\x1e.datasets.v1.DatasetPermissionR\x0bpermissions\x12\x37\n\nvisibility\x18\x0b \x01(\x0e\x32\x17.datasets.v1.VisibilityR\nvisibility\x12\x12\n\x04slug\x18\x0c \x01(\tR\x04slug\"\xa2\x01\n\x0c\x44\x61tasetGroup\x12\x1f\n\x02id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x02id\x12,\n\tparent_id\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDR\x08parentId\x12\x1b\n\tcode_name\x18\x03 \x01(\tR\x08\x63odeName\x12\x12\n\x04name\x18\x04 \x01(\tR\x04name\x12\x12\n\x04icon\x18\x05 \x01(\tR\x04icon*\x9b\x01\n\x11\x44\x61tasetPermission\x12\"\n\x1e\x44\x41TASET_PERMISSION_UNSPECIFIED\x10\x00\x12\"\n\x1e\x44\x41TASET_PERMISSION_ACCESS_DATA\x10\x01\x12!\n\x1d\x44\x41TASET_PERMISSION_WRITE_DATA\x10\x02\x12\x1b\n\x17\x44\x41TASET_PERMISSION_EDIT\x10\x03*v\n\nVisibility\x12\x1a\n\x16VISIBILITY_UNSPECIFIED\x10\x00\x12\x16\n\x12VISIBILITY_PRIVATE\x10\x01\x12\x1d\n\x19VISIBILITY_SHARED_WITH_ME\x10\x02\x12\x15\n\x11VISIBILITY_PUBLIC\x10\x03\x42\xab\x01\n\x0f\x63om.datasets.v1B\tCoreProtoP\x01Z@github.com/tilebox/tilebox-go/protogen/go/datasets/v1;datasetsv1\xa2\x02\x03\x44XX\xaa\x02\x0b\x44\x61tasets.V1\xca\x02\x0b\x44\x61tasets\\V1\xe2\x02\x17\x44\x61tasets\\V1\\GPBMetadata\xea\x02\x0c\x44\x61tasets::V1b\x06proto3')
29
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x64\x61tasets/v1/core.proto\x12\x0b\x64\x61tasets.v1\x1a\x1e\x64\x61tasets/v1/dataset_type.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x18\n\x02ID\x12\x12\n\x04uuid\x18\x01 \x01(\x0cR\x04uuid\"\xce\x01\n\x0cTimeInterval\x12\x39\n\nstart_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tstartTime\x12\x35\n\x08\x65nd_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\x07\x65ndTime\x12\'\n\x0fstart_exclusive\x18\x03 \x01(\x08R\x0estartExclusive\x12#\n\rend_inclusive\x18\x04 \x01(\x08R\x0c\x65ndInclusive\"\xb5\x01\n\x11\x44\x61tapointInterval\x12*\n\x08start_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x07startId\x12&\n\x06\x65nd_id\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDR\x05\x65ndId\x12\'\n\x0fstart_exclusive\x18\x03 \x01(\x08R\x0estartExclusive\x12#\n\rend_inclusive\x18\x04 \x01(\x08R\x0c\x65ndInclusive\"v\n\x10LegacyPagination\x12\x19\n\x05limit\x18\x01 \x01(\x03H\x00R\x05limit\x88\x01\x01\x12*\n\x0estarting_after\x18\x02 \x01(\tH\x01R\rstartingAfter\x88\x01\x01\x42\x08\n\x06_limitB\x11\n\x0f_starting_after\"\x81\x01\n\nPagination\x12\x19\n\x05limit\x18\x01 \x01(\x03H\x00R\x05limit\x88\x01\x01\x12;\n\x0estarting_after\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDH\x01R\rstartingAfter\x88\x01\x01\x42\x08\n\x06_limitB\x11\n\x0f_starting_after\"6\n\x03\x41ny\x12\x19\n\x08type_url\x18\x01 \x01(\tR\x07typeUrl\x12\x14\n\x05value\x18\x02 \x01(\x0cR\x05value\">\n\x0bRepeatedAny\x12\x19\n\x08type_url\x18\x01 \x01(\tR\x07typeUrl\x12\x14\n\x05value\x18\x02 \x03(\x0cR\x05value\"\xad\x01\n\x11\x44\x61tapointMetadata\x12\x39\n\nevent_time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\teventTime\x12\x41\n\x0eingestion_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\ringestionTime\x12\x13\n\x02id\x18\x03 \x01(\tH\x00R\x02id\x88\x01\x01\x42\x05\n\x03_id\"n\n\nDatapoints\x12\x32\n\x04meta\x18\x01 \x03(\x0b\x32\x1e.datasets.v1.DatapointMetadataR\x04meta\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.datasets.v1.RepeatedAnyR\x04\x64\x61ta\"\xc0\x01\n\rDatapointPage\x12\x32\n\x04meta\x18\x01 \x03(\x0b\x32\x1e.datasets.v1.DatapointMetadataR\x04meta\x12,\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x18.datasets.v1.RepeatedAnyR\x04\x64\x61ta\x12?\n\tnext_page\x18\x03 \x01(\x0b\x32\x1d.datasets.v1.LegacyPaginationH\x00R\x08nextPage\x88\x01\x01\x42\x0c\n\n_next_page\"e\n\tDatapoint\x12\x32\n\x04meta\x18\x01 \x01(\x0b\x32\x1e.datasets.v1.DatapointMetadataR\x04meta\x12$\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x10.datasets.v1.AnyR\x04\x64\x61ta\"^\n\nCollection\x12\x1b\n\tlegacy_id\x18\x01 \x01(\tR\x08legacyId\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x1f\n\x02id\x18\x03 \x01(\x0b\x32\x0f.datasets.v1.IDR\x02id\"\xc3\x01\n\x0e\x43ollectionInfo\x12\x37\n\ncollection\x18\x01 \x01(\x0b\x32\x17.datasets.v1.CollectionR\ncollection\x12\x42\n\x0c\x61vailability\x18\x02 \x01(\x0b\x32\x19.datasets.v1.TimeIntervalH\x00R\x0c\x61vailability\x88\x01\x01\x12\x19\n\x05\x63ount\x18\x03 \x01(\x04H\x01R\x05\x63ount\x88\x01\x01\x42\x0f\n\r_availabilityB\x08\n\x06_count\"B\n\x0f\x43ollectionInfos\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32\x1b.datasets.v1.CollectionInfoR\x04\x64\x61ta\"\x96\x03\n\x07\x44\x61taset\x12\x1f\n\x02id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x02id\x12*\n\x08group_id\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDR\x07groupId\x12.\n\x04type\x18\x03 \x01(\x0b\x32\x1a.datasets.v1.AnnotatedTypeR\x04type\x12\x1b\n\tcode_name\x18\x04 \x01(\tR\x08\x63odeName\x12\x12\n\x04name\x18\x05 \x01(\tR\x04name\x12\x18\n\x07summary\x18\x06 \x01(\tR\x07summary\x12\x12\n\x04icon\x18\x07 \x01(\tR\x04icon\x12 \n\x0b\x64\x65scription\x18\x08 \x01(\tR\x0b\x64\x65scription\x12@\n\x0bpermissions\x18\n \x03(\x0e\x32\x1e.datasets.v1.DatasetPermissionR\x0bpermissions\x12\x37\n\nvisibility\x18\x0b \x01(\x0e\x32\x17.datasets.v1.VisibilityR\nvisibility\x12\x12\n\x04slug\x18\x0c \x01(\tR\x04slug\"\xa2\x01\n\x0c\x44\x61tasetGroup\x12\x1f\n\x02id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x02id\x12,\n\tparent_id\x18\x02 \x01(\x0b\x32\x0f.datasets.v1.IDR\x08parentId\x12\x1b\n\tcode_name\x18\x03 \x01(\tR\x08\x63odeName\x12\x12\n\x04name\x18\x04 \x01(\tR\x04name\x12\x12\n\x04icon\x18\x05 \x01(\tR\x04icon*\x9b\x01\n\x11\x44\x61tasetPermission\x12\"\n\x1e\x44\x41TASET_PERMISSION_UNSPECIFIED\x10\x00\x12\"\n\x1e\x44\x41TASET_PERMISSION_ACCESS_DATA\x10\x01\x12!\n\x1d\x44\x41TASET_PERMISSION_WRITE_DATA\x10\x02\x12\x1b\n\x17\x44\x41TASET_PERMISSION_EDIT\x10\x03*v\n\nVisibility\x12\x1a\n\x16VISIBILITY_UNSPECIFIED\x10\x00\x12\x16\n\x12VISIBILITY_PRIVATE\x10\x01\x12\x1d\n\x19VISIBILITY_SHARED_WITH_ME\x10\x02\x12\x15\n\x11VISIBILITY_PUBLIC\x10\x03\x42\xab\x01\n\x0f\x63om.datasets.v1B\tCoreProtoP\x01Z@github.com/tilebox/tilebox-go/protogen/go/datasets/v1;datasetsv1\xa2\x02\x03\x44XX\xaa\x02\x0b\x44\x61tasets.V1\xca\x02\x0b\x44\x61tasets\\V1\xe2\x02\x17\x44\x61tasets\\V1\\GPBMetadata\xea\x02\x0c\x44\x61tasets::V1b\x06proto3')
30
30
 
31
31
  _globals = globals()
32
32
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -1,5 +1,5 @@
1
- from google.protobuf import timestamp_pb2 as _timestamp_pb2
2
1
  from tilebox.datasets.datasetsv1 import dataset_type_pb2 as _dataset_type_pb2
2
+ from google.protobuf import timestamp_pb2 as _timestamp_pb2
3
3
  from google.protobuf.internal import containers as _containers
4
4
  from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
5
5
  from google.protobuf import descriptor as _descriptor
@@ -43,14 +43,14 @@ class DataAccessServiceServicer(object):
43
43
  """
44
44
 
45
45
  def GetDatasetForInterval(self, request, context):
46
- """GetDatasetForInterval returns a list of data points for a given time interval and collection.
46
+ """legacy endpoint, kept around for backwards compatibility with older python clients for now
47
47
  """
48
48
  context.set_code(grpc.StatusCode.UNIMPLEMENTED)
49
49
  context.set_details('Method not implemented!')
50
50
  raise NotImplementedError('Method not implemented!')
51
51
 
52
52
  def GetDatapointByID(self, request, context):
53
- """GetDatapointByID returns a single datapoint by its ID.
53
+ """legacy endpoint, kept around for backwards compatibility with older python clients for now
54
54
  """
55
55
  context.set_code(grpc.StatusCode.UNIMPLEMENTED)
56
56
  context.set_details('Method not implemented!')
@@ -25,7 +25,7 @@ _sym_db = _symbol_database.Default()
25
25
  from tilebox.datasets.datasetsv1 import core_pb2 as datasets_dot_v1_dot_core__pb2
26
26
 
27
27
 
28
- DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n datasets/v1/data_ingestion.proto\x12\x0b\x64\x61tasets.v1\x1a\x16\x64\x61tasets/v1/core.proto\"\xaf\x01\n\x17IngestDatapointsRequest\x12\x34\n\rcollection_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x0c\x63ollectionId\x12\x37\n\ndatapoints\x18\x02 \x01(\x0b\x32\x17.datasets.v1.DatapointsR\ndatapoints\x12%\n\x0e\x61llow_existing\x18\x03 \x01(\x08R\rallowExisting\"\x84\x01\n\rIngestRequest\x12\x34\n\rcollection_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x0c\x63ollectionId\x12\x16\n\x06values\x18\x02 \x03(\x0cR\x06values\x12%\n\x0e\x61llow_existing\x18\x03 \x01(\x08R\rallowExisting\"\x8a\x01\n\x0eIngestResponse\x12\x1f\n\x0bnum_created\x18\x01 \x01(\x03R\nnumCreated\x12!\n\x0cnum_existing\x18\x02 \x01(\x03R\x0bnumExisting\x12\x34\n\rdatapoint_ids\x18\x03 \x03(\x0b\x32\x0f.datasets.v1.IDR\x0c\x64\x61tapointIds\"{\n\rDeleteRequest\x12\x34\n\rcollection_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x0c\x63ollectionId\x12\x34\n\rdatapoint_ids\x18\x02 \x03(\x0b\x32\x0f.datasets.v1.IDR\x0c\x64\x61tapointIds\"1\n\x0e\x44\x65leteResponse\x12\x1f\n\x0bnum_deleted\x18\x01 \x01(\x03R\nnumDeleted2\xf9\x01\n\x14\x44\x61taIngestionService\x12W\n\x10IngestDatapoints\x12$.datasets.v1.IngestDatapointsRequest\x1a\x1b.datasets.v1.IngestResponse\"\x00\x12\x43\n\x06Ingest\x12\x1a.datasets.v1.IngestRequest\x1a\x1b.datasets.v1.IngestResponse\"\x00\x12\x43\n\x06\x44\x65lete\x12\x1a.datasets.v1.DeleteRequest\x1a\x1b.datasets.v1.DeleteResponse\"\x00\x42\xb4\x01\n\x0f\x63om.datasets.v1B\x12\x44\x61taIngestionProtoP\x01Z@github.com/tilebox/tilebox-go/protogen/go/datasets/v1;datasetsv1\xa2\x02\x03\x44XX\xaa\x02\x0b\x44\x61tasets.V1\xca\x02\x0b\x44\x61tasets\\V1\xe2\x02\x17\x44\x61tasets\\V1\\GPBMetadata\xea\x02\x0c\x44\x61tasets::V1b\x06proto3')
28
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n datasets/v1/data_ingestion.proto\x12\x0b\x64\x61tasets.v1\x1a\x16\x64\x61tasets/v1/core.proto\"\xaf\x01\n\x17IngestDatapointsRequest\x12\x34\n\rcollection_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x0c\x63ollectionId\x12\x37\n\ndatapoints\x18\x02 \x01(\x0b\x32\x17.datasets.v1.DatapointsR\ndatapoints\x12%\n\x0e\x61llow_existing\x18\x03 \x01(\x08R\rallowExisting\"\x84\x01\n\rIngestRequest\x12\x34\n\rcollection_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x0c\x63ollectionId\x12\x16\n\x06values\x18\x02 \x03(\x0cR\x06values\x12%\n\x0e\x61llow_existing\x18\x03 \x01(\x08R\rallowExisting\"\x8a\x01\n\x0eIngestResponse\x12\x1f\n\x0bnum_created\x18\x01 \x01(\x03R\nnumCreated\x12!\n\x0cnum_existing\x18\x02 \x01(\x03R\x0bnumExisting\x12\x34\n\rdatapoint_ids\x18\x03 \x03(\x0b\x32\x0f.datasets.v1.IDR\x0c\x64\x61tapointIds\"{\n\rDeleteRequest\x12\x34\n\rcollection_id\x18\x01 \x01(\x0b\x32\x0f.datasets.v1.IDR\x0c\x63ollectionId\x12\x34\n\rdatapoint_ids\x18\x02 \x03(\x0b\x32\x0f.datasets.v1.IDR\x0c\x64\x61tapointIds\"1\n\x0e\x44\x65leteResponse\x12\x1f\n\x0bnum_deleted\x18\x01 \x01(\x03R\nnumDeleted2\xa0\x01\n\x14\x44\x61taIngestionService\x12\x43\n\x06Ingest\x12\x1a.datasets.v1.IngestRequest\x1a\x1b.datasets.v1.IngestResponse\"\x00\x12\x43\n\x06\x44\x65lete\x12\x1a.datasets.v1.DeleteRequest\x1a\x1b.datasets.v1.DeleteResponse\"\x00\x42\xb4\x01\n\x0f\x63om.datasets.v1B\x12\x44\x61taIngestionProtoP\x01Z@github.com/tilebox/tilebox-go/protogen/go/datasets/v1;datasetsv1\xa2\x02\x03\x44XX\xaa\x02\x0b\x44\x61tasets.V1\xca\x02\x0b\x44\x61tasets\\V1\xe2\x02\x17\x44\x61tasets\\V1\\GPBMetadata\xea\x02\x0c\x44\x61tasets::V1b\x06proto3')
29
29
 
30
30
  _globals = globals()
31
31
  _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -44,5 +44,5 @@ if not _descriptor._USE_C_DESCRIPTORS:
44
44
  _globals['_DELETERESPONSE']._serialized_start=652
45
45
  _globals['_DELETERESPONSE']._serialized_end=701
46
46
  _globals['_DATAINGESTIONSERVICE']._serialized_start=704
47
- _globals['_DATAINGESTIONSERVICE']._serialized_end=953
47
+ _globals['_DATAINGESTIONSERVICE']._serialized_end=864
48
48
  # @@protoc_insertion_point(module_scope)
@@ -15,11 +15,6 @@ class DataIngestionServiceStub(object):
15
15
  Args:
16
16
  channel: A grpc.Channel.
17
17
  """
18
- self.IngestDatapoints = channel.unary_unary(
19
- '/datasets.v1.DataIngestionService/IngestDatapoints',
20
- request_serializer=datasets_dot_v1_dot_data__ingestion__pb2.IngestDatapointsRequest.SerializeToString,
21
- response_deserializer=datasets_dot_v1_dot_data__ingestion__pb2.IngestResponse.FromString,
22
- _registered_method=True)
23
18
  self.Ingest = channel.unary_unary(
24
19
  '/datasets.v1.DataIngestionService/Ingest',
25
20
  request_serializer=datasets_dot_v1_dot_data__ingestion__pb2.IngestRequest.SerializeToString,
@@ -36,13 +31,6 @@ class DataIngestionServiceServicer(object):
36
31
  """DataIngestionService provides data ingestion and deletion capabilities for Tilebox datasets.
37
32
  """
38
33
 
39
- def IngestDatapoints(self, request, context):
40
- """legacy ingest endpoint, that separates datapoints into meta and data. Will be removed in the future.
41
- """
42
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
43
- context.set_details('Method not implemented!')
44
- raise NotImplementedError('Method not implemented!')
45
-
46
34
  def Ingest(self, request, context):
47
35
  """Missing associated documentation comment in .proto file."""
48
36
  context.set_code(grpc.StatusCode.UNIMPLEMENTED)
@@ -58,11 +46,6 @@ class DataIngestionServiceServicer(object):
58
46
 
59
47
  def add_DataIngestionServiceServicer_to_server(servicer, server):
60
48
  rpc_method_handlers = {
61
- 'IngestDatapoints': grpc.unary_unary_rpc_method_handler(
62
- servicer.IngestDatapoints,
63
- request_deserializer=datasets_dot_v1_dot_data__ingestion__pb2.IngestDatapointsRequest.FromString,
64
- response_serializer=datasets_dot_v1_dot_data__ingestion__pb2.IngestResponse.SerializeToString,
65
- ),
66
49
  'Ingest': grpc.unary_unary_rpc_method_handler(
67
50
  servicer.Ingest,
68
51
  request_deserializer=datasets_dot_v1_dot_data__ingestion__pb2.IngestRequest.FromString,
@@ -85,33 +68,6 @@ class DataIngestionService(object):
85
68
  """DataIngestionService provides data ingestion and deletion capabilities for Tilebox datasets.
86
69
  """
87
70
 
88
- @staticmethod
89
- def IngestDatapoints(request,
90
- target,
91
- options=(),
92
- channel_credentials=None,
93
- call_credentials=None,
94
- insecure=False,
95
- compression=None,
96
- wait_for_ready=None,
97
- timeout=None,
98
- metadata=None):
99
- return grpc.experimental.unary_unary(
100
- request,
101
- target,
102
- '/datasets.v1.DataIngestionService/IngestDatapoints',
103
- datasets_dot_v1_dot_data__ingestion__pb2.IngestDatapointsRequest.SerializeToString,
104
- datasets_dot_v1_dot_data__ingestion__pb2.IngestResponse.FromString,
105
- options,
106
- channel_credentials,
107
- insecure,
108
- call_credentials,
109
- compression,
110
- wait_for_ready,
111
- timeout,
112
- metadata,
113
- _registered_method=True)
114
-
115
71
  @staticmethod
116
72
  def Ingest(request,
117
73
  target,