airbyte-cdk 6.8.2.dev1__py3-none-any.whl → 6.8.2rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,9 +20,6 @@ from airbyte_cdk.sources.declarative.extractors.record_filter import (
20
20
  ClientSideIncrementalRecordFilterDecorator,
21
21
  )
22
22
  from airbyte_cdk.sources.declarative.incremental.datetime_based_cursor import DatetimeBasedCursor
23
- from airbyte_cdk.sources.declarative.incremental.per_partition_with_global import (
24
- PerPartitionWithGlobalCursor,
25
- )
26
23
  from airbyte_cdk.sources.declarative.interpolation import InterpolatedString
27
24
  from airbyte_cdk.sources.declarative.manifest_declarative_source import ManifestDeclarativeSource
28
25
  from airbyte_cdk.sources.declarative.models.declarative_component_schema import (
@@ -89,23 +86,10 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
89
86
  component_factory=component_factory,
90
87
  )
91
88
 
89
+ # todo: We could remove state from initialization. Now that streams are grouped during the read(), a source
90
+ # no longer needs to store the original incoming state. But maybe there's an edge case?
92
91
  self._state = state
93
92
 
94
- self._concurrent_streams: Optional[List[AbstractStream]]
95
- self._synchronous_streams: Optional[List[Stream]]
96
-
97
- # If the connector command was SPEC, there is no incoming config, and we cannot instantiate streams because
98
- # they might depend on it. Ideally we want to have a static method on this class to get the spec without
99
- # any other arguments, but the existing entrypoint.py isn't designed to support this. Just noting this
100
- # for our future improvements to the CDK.
101
- if config:
102
- self._concurrent_streams, self._synchronous_streams = self._group_streams(
103
- config=config or {}
104
- )
105
- else:
106
- self._concurrent_streams = None
107
- self._synchronous_streams = None
108
-
109
93
  concurrency_level_from_manifest = self._source_config.get("concurrency_level")
110
94
  if concurrency_level_from_manifest:
111
95
  concurrency_level_component = self._constructor.create_component(
@@ -139,17 +123,20 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
139
123
  logger: logging.Logger,
140
124
  config: Mapping[str, Any],
141
125
  catalog: ConfiguredAirbyteCatalog,
142
- state: Optional[Union[List[AirbyteStateMessage]]] = None,
126
+ state: Optional[List[AirbyteStateMessage]] = None,
143
127
  ) -> Iterator[AirbyteMessage]:
144
- # ConcurrentReadProcessor pops streams that are finished being read so before syncing, the names of the concurrent
145
- # streams must be saved so that they can be removed from the catalog before starting synchronous streams
146
- if self._concurrent_streams:
128
+ concurrent_streams, _ = self._group_streams(config=config)
129
+
130
+ # ConcurrentReadProcessor pops streams that are finished being read so before syncing, the names of
131
+ # the concurrent streams must be saved so that they can be removed from the catalog before starting
132
+ # synchronous streams
133
+ if len(concurrent_streams) > 0:
147
134
  concurrent_stream_names = set(
148
- [concurrent_stream.name for concurrent_stream in self._concurrent_streams]
135
+ [concurrent_stream.name for concurrent_stream in concurrent_streams]
149
136
  )
150
137
 
151
138
  selected_concurrent_streams = self._select_streams(
152
- streams=self._concurrent_streams, configured_catalog=catalog
139
+ streams=concurrent_streams, configured_catalog=catalog
153
140
  )
154
141
  # It would appear that passing in an empty set of streams causes an infinite loop in ConcurrentReadProcessor.
155
142
  # This is also evident in concurrent_source_adapter.py so I'll leave this out of scope to fix for now
@@ -168,8 +155,7 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
168
155
  yield from super().read(logger, config, filtered_catalog, state)
169
156
 
170
157
  def discover(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteCatalog:
171
- concurrent_streams = self._concurrent_streams or []
172
- synchronous_streams = self._synchronous_streams or []
158
+ concurrent_streams, synchronous_streams = self._group_streams(config=config)
173
159
  return AirbyteCatalog(
174
160
  streams=[
175
161
  stream.as_airbyte_stream() for stream in concurrent_streams + synchronous_streams
@@ -309,59 +295,6 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
309
295
  cursor=final_state_cursor,
310
296
  )
311
297
  )
312
- elif (
313
- incremental_sync_component_definition
314
- and incremental_sync_component_definition.get("type", "")
315
- == DatetimeBasedCursorModel.__name__
316
- and self._stream_supports_concurrent_partition_processing(
317
- declarative_stream=declarative_stream
318
- )
319
- and hasattr(declarative_stream.retriever, "stream_slicer")
320
- and isinstance(declarative_stream.retriever.stream_slicer, PerPartitionWithGlobalCursor)
321
- ):
322
- stream_state = state_manager.get_stream_state(
323
- stream_name=declarative_stream.name, namespace=declarative_stream.namespace
324
- )
325
- partition_router = declarative_stream.retriever.stream_slicer._partition_router
326
-
327
- cursor = self._constructor.create_concurrent_cursor_from_perpartition_cursor(
328
- state_manager=state_manager,
329
- model_type=DatetimeBasedCursorModel,
330
- component_definition=incremental_sync_component_definition,
331
- stream_name=declarative_stream.name,
332
- stream_namespace=declarative_stream.namespace,
333
- config=config or {},
334
- stream_state=stream_state,
335
- partition_router=partition_router,
336
- )
337
-
338
-
339
- partition_generator = StreamSlicerPartitionGenerator(
340
- DeclarativePartitionFactory(
341
- declarative_stream.name,
342
- declarative_stream.get_json_schema(),
343
- self._retriever_factory(
344
- name_to_stream_mapping[declarative_stream.name],
345
- config,
346
- stream_state,
347
- ),
348
- self.message_repository,
349
- ),
350
- cursor,
351
- )
352
-
353
- concurrent_streams.append(
354
- DefaultStream(
355
- partition_generator=partition_generator,
356
- name=declarative_stream.name,
357
- json_schema=declarative_stream.get_json_schema(),
358
- availability_strategy=AlwaysAvailableAvailabilityStrategy(),
359
- primary_key=get_primary_key_from_stream(declarative_stream.primary_key),
360
- cursor_field=cursor.cursor_field.cursor_field_key,
361
- logger=self.logger,
362
- cursor=cursor,
363
- )
364
- )
365
298
  else:
366
299
  synchronous_streams.append(declarative_stream)
367
300
  else:
@@ -59,11 +59,13 @@ class ClientSideIncrementalRecordFilterDecorator(RecordFilter):
59
59
 
60
60
  def __init__(
61
61
  self,
62
- cursor: Union[DatetimeBasedCursor, PerPartitionWithGlobalCursor, GlobalSubstreamCursor],
62
+ date_time_based_cursor: DatetimeBasedCursor,
63
+ substream_cursor: Optional[Union[PerPartitionWithGlobalCursor, GlobalSubstreamCursor]],
63
64
  **kwargs: Any,
64
65
  ):
65
66
  super().__init__(**kwargs)
66
- self._cursor = cursor
67
+ self._date_time_based_cursor = date_time_based_cursor
68
+ self._substream_cursor = substream_cursor
67
69
 
68
70
  def filter_records(
69
71
  self,
@@ -75,7 +77,7 @@ class ClientSideIncrementalRecordFilterDecorator(RecordFilter):
75
77
  records = (
76
78
  record
77
79
  for record in records
78
- if self._cursor.should_be_synced(
80
+ if (self._substream_cursor or self._date_time_based_cursor).should_be_synced(
79
81
  # Record is created on the fly to align with cursors interface; stream name is ignored as we don't need it here
80
82
  # Record stream name is empty cause it is not used durig the filtering
81
83
  Record(data=record, associated_slice=stream_slice, stream_name="")
@@ -2,7 +2,6 @@
2
2
  # Copyright (c) 2022 Airbyte, Inc., all rights reserved.
3
3
  #
4
4
 
5
- from airbyte_cdk.sources.declarative.incremental.concurrent_partition_cursor import ConcurrentCursorFactory, ConcurrentPerPartitionCursor
6
5
  from airbyte_cdk.sources.declarative.incremental.datetime_based_cursor import DatetimeBasedCursor
7
6
  from airbyte_cdk.sources.declarative.incremental.declarative_cursor import DeclarativeCursor
8
7
  from airbyte_cdk.sources.declarative.incremental.global_substream_cursor import GlobalSubstreamCursor
@@ -15,8 +14,6 @@ from airbyte_cdk.sources.declarative.incremental.resumable_full_refresh_cursor i
15
14
 
16
15
  __all__ = [
17
16
  "CursorFactory",
18
- "ConcurrentCursorFactory"
19
- "ConcurrentPerPartitionCursor",
20
17
  "DatetimeBasedCursor",
21
18
  "DeclarativeCursor",
22
19
  "GlobalSubstreamCursor",
@@ -303,15 +303,6 @@ class PerPartitionCursor(DeclarativeCursor):
303
303
  raise ValueError("A partition needs to be provided in order to get request body json")
304
304
 
305
305
  def should_be_synced(self, record: Record) -> bool:
306
- if self._to_partition_key(record.associated_slice.partition) not in self._cursor_per_partition:
307
- partition_state = (
308
- self._state_to_migrate_from
309
- if self._state_to_migrate_from
310
- else self._NO_CURSOR_STATE
311
- )
312
- cursor = self._create_cursor(partition_state)
313
-
314
- self._cursor_per_partition[self._to_partition_key(record.associated_slice.partition)] = cursor
315
306
  return self._get_cursor(record).should_be_synced(
316
307
  self._convert_record_to_cursor_record(record)
317
308
  )
@@ -81,8 +81,6 @@ from airbyte_cdk.sources.declarative.extractors.record_selector import (
81
81
  )
82
82
  from airbyte_cdk.sources.declarative.incremental import (
83
83
  ChildPartitionResumableFullRefreshCursor,
84
- ConcurrentCursorFactory,
85
- ConcurrentPerPartitionCursor,
86
84
  CursorFactory,
87
85
  DatetimeBasedCursor,
88
86
  DeclarativeCursor,
@@ -907,62 +905,6 @@ class ModelToComponentFactory:
907
905
  cursor_granularity=cursor_granularity,
908
906
  )
909
907
 
910
- def create_concurrent_cursor_from_perpartition_cursor(
911
- self,
912
- state_manager: ConnectorStateManager,
913
- model_type: Type[BaseModel],
914
- component_definition: ComponentDefinition,
915
- stream_name: str,
916
- stream_namespace: Optional[str],
917
- config: Config,
918
- stream_state: MutableMapping[str, Any],
919
- partition_router,
920
- **kwargs: Any,
921
- ) -> ConcurrentPerPartitionCursor:
922
- component_type = component_definition.get("type")
923
- if component_definition.get("type") != model_type.__name__:
924
- raise ValueError(
925
- f"Expected manifest component of type {model_type.__name__}, but received {component_type} instead"
926
- )
927
-
928
- datetime_based_cursor_model = model_type.parse_obj(component_definition)
929
-
930
- if not isinstance(datetime_based_cursor_model, DatetimeBasedCursorModel):
931
- raise ValueError(
932
- f"Expected {model_type.__name__} component, but received {datetime_based_cursor_model.__class__.__name__}"
933
- )
934
-
935
- interpolated_cursor_field = InterpolatedString.create(
936
- datetime_based_cursor_model.cursor_field,
937
- parameters=datetime_based_cursor_model.parameters or {},
938
- )
939
- cursor_field = CursorField(interpolated_cursor_field.eval(config=config))
940
-
941
- # Create the cursor factory
942
- cursor_factory = ConcurrentCursorFactory(
943
- partial(
944
- self.create_concurrent_cursor_from_datetime_based_cursor,
945
- state_manager=state_manager,
946
- model_type=model_type,
947
- component_definition=component_definition,
948
- stream_name=stream_name,
949
- stream_namespace=stream_namespace,
950
- config=config,
951
- )
952
- )
953
-
954
- # Return the concurrent cursor and state converter
955
- return ConcurrentPerPartitionCursor(
956
- cursor_factory=cursor_factory,
957
- partition_router=partition_router,
958
- stream_name=stream_name,
959
- stream_namespace=stream_namespace,
960
- stream_state=stream_state,
961
- message_repository=self._message_repository, # type: ignore
962
- connector_state_manager=state_manager,
963
- cursor_field=cursor_field,
964
- )
965
-
966
908
  @staticmethod
967
909
  def create_constant_backoff_strategy(
968
910
  model: ConstantBackoffStrategyModel, config: Config, **kwargs: Any
@@ -1245,14 +1187,17 @@ class ModelToComponentFactory:
1245
1187
  raise ValueError(
1246
1188
  "Unsupported Slicer is used. PerPartitionWithGlobalCursor should be used here instead"
1247
1189
  )
1248
- cursor = combined_slicers if isinstance(
1249
- combined_slicers, (PerPartitionWithGlobalCursor, GlobalSubstreamCursor)
1250
- ) else self._create_component_from_model(
1251
- model=model.incremental_sync, config=config
1252
- )
1253
-
1254
1190
  client_side_incremental_sync = {
1255
- "cursor": cursor
1191
+ "date_time_based_cursor": self._create_component_from_model(
1192
+ model=model.incremental_sync, config=config
1193
+ ),
1194
+ "substream_cursor": (
1195
+ combined_slicers
1196
+ if isinstance(
1197
+ combined_slicers, (PerPartitionWithGlobalCursor, GlobalSubstreamCursor)
1198
+ )
1199
+ else None
1200
+ ),
1256
1201
  }
1257
1202
 
1258
1203
  if model.incremental_sync and isinstance(model.incremental_sync, DatetimeBasedCursorModel):
@@ -1966,7 +1911,7 @@ class ModelToComponentFactory:
1966
1911
  if (
1967
1912
  not isinstance(stream_slicer, DatetimeBasedCursor)
1968
1913
  or type(stream_slicer) is not DatetimeBasedCursor
1969
- ) and not isinstance(stream_slicer, PerPartitionWithGlobalCursor):
1914
+ ):
1970
1915
  # Many of the custom component implementations of DatetimeBasedCursor override get_request_params() (or other methods).
1971
1916
  # Because we're decoupling RequestOptionsProvider from the Cursor, custom components will eventually need to reimplement
1972
1917
  # their own RequestOptionsProvider. However, right now the existing StreamSlicer/Cursor still can act as the SimpleRetriever's
@@ -178,7 +178,7 @@ class SimpleRetriever(Retriever):
178
178
  stream_slice,
179
179
  next_page_token,
180
180
  self._paginator.get_request_headers,
181
- self.request_option_provider.get_request_headers,
181
+ self.stream_slicer.get_request_headers,
182
182
  )
183
183
  if isinstance(headers, str):
184
184
  raise ValueError("Request headers cannot be a string")
@@ -38,6 +38,7 @@ class DeclarativePartitionFactory:
38
38
  stream_slice,
39
39
  )
40
40
 
41
+
41
42
  class DeclarativePartition(Partition):
42
43
  def __init__(
43
44
  self,
@@ -240,15 +240,6 @@ class ConcurrentCursor(Cursor):
240
240
  def _extract_cursor_value(self, record: Record) -> Any:
241
241
  return self._connector_state_converter.parse_value(self._cursor_field.extract_value(record))
242
242
 
243
- def close_partition_without_emit(self, partition: Partition) -> None:
244
- slice_count_before = len(self.state.get("slices", []))
245
- self._add_slice_to_state(partition)
246
- if slice_count_before < len(
247
- self.state["slices"]
248
- ): # only emit if at least one slice has been processed
249
- self._merge_partitions()
250
- self._has_closed_at_least_one_slice = True
251
-
252
243
  def close_partition(self, partition: Partition) -> None:
253
244
  slice_count_before = len(self.state.get("slices", []))
254
245
  self._add_slice_to_state(partition)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: airbyte-cdk
3
- Version: 6.8.2.dev1
3
+ Version: 6.8.2rc1
4
4
  Summary: A framework for writing Airbyte Connectors.
5
5
  Home-page: https://airbyte.com
6
6
  License: MIT
@@ -62,7 +62,7 @@ airbyte_cdk/sources/declarative/checks/check_stream.py,sha256=dAA-UhmMj0WLXCkRQr
62
62
  airbyte_cdk/sources/declarative/checks/connection_checker.py,sha256=MBRJo6WJlZQHpIfOGaNOkkHUmgUl_4wDM6VPo41z5Ss,1383
63
63
  airbyte_cdk/sources/declarative/concurrency_level/__init__.py,sha256=5XUqrmlstYlMM0j6crktlKQwALek0uiz2D3WdM46MyA,191
64
64
  airbyte_cdk/sources/declarative/concurrency_level/concurrency_level.py,sha256=YIwCTCpOr_QSNW4ltQK0yUGWInI8PKNY216HOOegYLk,2101
65
- airbyte_cdk/sources/declarative/concurrent_declarative_source.py,sha256=PWqtQ6xzRZiM0XrMO_zCJjl9tvbFMJMwSec0nN2ZekA,26434
65
+ airbyte_cdk/sources/declarative/concurrent_declarative_source.py,sha256=UAkFzFJ62tq7qWfudLLt-Sj-EhOJquYPd-FrwMBSA9I,22928
66
66
  airbyte_cdk/sources/declarative/datetime/__init__.py,sha256=l9LG7Qm6e5r_qgqfVKnx3mXYtg1I9MmMjomVIPfU4XA,177
67
67
  airbyte_cdk/sources/declarative/datetime/datetime_parser.py,sha256=SX9JjdesN1edN2WVUVMzU_ptqp2QB1OnsnjZ4mwcX7w,2579
68
68
  airbyte_cdk/sources/declarative/datetime/min_max_datetime.py,sha256=0BHBtDNQZfvwM45-tY5pNlTcKAFSGGNxemoi0Jic-0E,5785
@@ -80,15 +80,14 @@ airbyte_cdk/sources/declarative/extractors/__init__.py,sha256=YFuL4D4RuuB8E1DNSb
80
80
  airbyte_cdk/sources/declarative/extractors/dpath_extractor.py,sha256=wR4Ol4MG2lt5UlqXF5EU_k7qa5cN4_-luu3PJ1PlO3A,3131
81
81
  airbyte_cdk/sources/declarative/extractors/http_selector.py,sha256=2zWZ4ewTqQC8VwkjS0xD_u350Km3SiYP7hpOOgiLg5o,1169
82
82
  airbyte_cdk/sources/declarative/extractors/record_extractor.py,sha256=XJELMjahAsaomlvQgN2zrNO0DJX0G0fr9r682gUz7Pg,691
83
- airbyte_cdk/sources/declarative/extractors/record_filter.py,sha256=yTdEkyDUSW2KbFkEwJJMlS963C955LgCCOVfTmmScpQ,3367
83
+ airbyte_cdk/sources/declarative/extractors/record_filter.py,sha256=OJ9xmhNWNwwzxYOeIrDy1GINb1zH9MBy6suC5tm2LSk,3545
84
84
  airbyte_cdk/sources/declarative/extractors/record_selector.py,sha256=AkXPOWyp741cpYLBl9AbmVmOQmQ2BzZ2XjgsMEB6gGc,6583
85
85
  airbyte_cdk/sources/declarative/extractors/response_to_file_extractor.py,sha256=LhqGDfX06_dDYLKsIVnwQ_nAWCln-v8PV7Wgt_QVeTI,6533
86
- airbyte_cdk/sources/declarative/incremental/__init__.py,sha256=zEERPIXz1WxCJypqlSXZCFIpT4-mIsjzRdmFlX2-nMg,1210
87
- airbyte_cdk/sources/declarative/incremental/concurrent_partition_cursor.py,sha256=-ECXZbDh3nw7G4mBncsTT_68LWQvS8TySIgckBTZZQQ,11899
86
+ airbyte_cdk/sources/declarative/incremental/__init__.py,sha256=CmZl9ddwMZFo8L7mEl_OFHN3ahIFRSYrJjMbR_cJaFA,1006
88
87
  airbyte_cdk/sources/declarative/incremental/datetime_based_cursor.py,sha256=_UzUnSIUsDbRgbFTXgSyZEFb4ws-KdhdQPWO8mFbV7U,22028
89
88
  airbyte_cdk/sources/declarative/incremental/declarative_cursor.py,sha256=5Bhw9VRPyIuCaD0wmmq_L3DZsa-rJgtKSEUzSd8YYD0,536
90
89
  airbyte_cdk/sources/declarative/incremental/global_substream_cursor.py,sha256=3_EEZop94bMitZaJd2PF5Q2Xt9v94tYg7p7YJz8tAFc,15869
91
- airbyte_cdk/sources/declarative/incremental/per_partition_cursor.py,sha256=cdk4gSuYQmqcxxIOclhms6cnI1qm-FrSu7lmZULxOPM,16199
90
+ airbyte_cdk/sources/declarative/incremental/per_partition_cursor.py,sha256=hElcYijbOHjdLKOMA7W7aizEbf22r7OSApXALP875uI,15749
92
91
  airbyte_cdk/sources/declarative/incremental/per_partition_with_global.py,sha256=2YBOA2NnwAeIKlIhSwUB_W-FaGnPcmrG_liY7b4mV2Y,8365
93
92
  airbyte_cdk/sources/declarative/incremental/resumable_full_refresh_cursor.py,sha256=10LFv1QPM-agVKl6eaANmEBOfd7gZgBrkoTcMggsieQ,4809
94
93
  airbyte_cdk/sources/declarative/interpolation/__init__.py,sha256=tjUJkn3B-iZ-p7RP2c3dVZejrGiQeooGmS5ibWTuUL4,437
@@ -110,7 +109,7 @@ airbyte_cdk/sources/declarative/parsers/__init__.py,sha256=ZnqYNxHsKCgO38IwB34RQ
110
109
  airbyte_cdk/sources/declarative/parsers/custom_exceptions.py,sha256=Rir9_z3Kcd5Es0-LChrzk-0qubAsiK_RSEnLmK2OXm8,553
111
110
  airbyte_cdk/sources/declarative/parsers/manifest_component_transformer.py,sha256=jVZ3ZV5YZrmDNIX5cM2mugXmnbH27zHRcD22_3oatpo,8454
112
111
  airbyte_cdk/sources/declarative/parsers/manifest_reference_resolver.py,sha256=IWUOdF03o-aQn0Occo1BJCxU0Pz-QILk5L67nzw2thw,6803
113
- airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py,sha256=mOO0HahnHP0yv5LHFCayIx98R-yYHw6qkY9T5BxSlBg,98683
112
+ airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py,sha256=tO7xkv4y5iH6wGkj5As1T5ItUQxlw6cLflHAH48PKwc,96355
114
113
  airbyte_cdk/sources/declarative/partition_routers/__init__.py,sha256=8uGos2u7TFTx_EJBdcjdUGn3Eyx6jUuEa1_VB8UP_dI,631
115
114
  airbyte_cdk/sources/declarative/partition_routers/cartesian_product_stream_slicer.py,sha256=c5cuVFM6NFkuQqG8Z5IwkBuwDrvXZN1CunUOM_L0ezg,6892
116
115
  airbyte_cdk/sources/declarative/partition_routers/list_partition_router.py,sha256=t7pRdFWfFWJtQQG19c9PVeMODyO2BknRTakpM5U9N-8,4844
@@ -156,7 +155,7 @@ airbyte_cdk/sources/declarative/requesters/requester.py,sha256=iVVpXQ4KEd9OyZNwm
156
155
  airbyte_cdk/sources/declarative/retrievers/__init__.py,sha256=FVQpUGVwp2Gibk4gp07VmLKX5AafUlsZWFSrDpUDuJM,443
157
156
  airbyte_cdk/sources/declarative/retrievers/async_retriever.py,sha256=WDFnjrXLz3-YEjFhmlMkWAn9AJvnZ0mk9FyC8DAhEYk,4976
158
157
  airbyte_cdk/sources/declarative/retrievers/retriever.py,sha256=XPLs593Xv8c5cKMc37XzUAYmzlXd1a7eSsspM-CMuWA,1696
159
- airbyte_cdk/sources/declarative/retrievers/simple_retriever.py,sha256=6IP6e9cjGEU2y77lcOKj1bqn3bYGBAsP8vJU4Skzp30,24182
158
+ airbyte_cdk/sources/declarative/retrievers/simple_retriever.py,sha256=N4swGw5mfuTXJ2R7AKX18CHzizsr69pXwt5uSHLPi48,24172
160
159
  airbyte_cdk/sources/declarative/schema/__init__.py,sha256=ul8L9S0-__AMEdbCLHBq-PMEeA928NVp8BB83BMotfU,517
161
160
  airbyte_cdk/sources/declarative/schema/default_schema_loader.py,sha256=KTACrIE23a83wsm3Rd9Eb4K6-20lrGqYxTHNp9yxsso,1820
162
161
  airbyte_cdk/sources/declarative/schema/inline_schema_loader.py,sha256=bVETE10hRsatRJq3R3BeyRR0wIoK3gcP1gcpVRQ_P5U,464
@@ -165,7 +164,7 @@ airbyte_cdk/sources/declarative/schema/schema_loader.py,sha256=kjt8v0N5wWKA5zyLn
165
164
  airbyte_cdk/sources/declarative/spec/__init__.py,sha256=H0UwoRhgucbKBIzg85AXrifybVmfpwWpPdy22vZKVuo,141
166
165
  airbyte_cdk/sources/declarative/spec/spec.py,sha256=ODSNUgkDOhnLQnwLjgSaME6R3kNeywjROvbNrWEnsgU,1876
167
166
  airbyte_cdk/sources/declarative/stream_slicers/__init__.py,sha256=sI9vhc95RwJYOnA0VKjcbtKgFcmAbWjhdWBXFbAijOs,176
168
- airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py,sha256=7KE_qBBP3QYA7qQdOE42u3fwUM5S1FD5rowf7gtu3qk,3462
167
+ airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py,sha256=E7feZ5xkHwFHODq8FSjwdGe291RZoCMCRHT1rWnQ1lI,3463
169
168
  airbyte_cdk/sources/declarative/stream_slicers/stream_slicer.py,sha256=SOkIPBi2Wu7yxIvA15yFzUAB95a3IzA8LPq5DEqHQQc,725
170
169
  airbyte_cdk/sources/declarative/transformations/__init__.py,sha256=CPJ8TlMpiUmvG3624VYu_NfTzxwKcfBjM2Q2wJ7fkSA,919
171
170
  airbyte_cdk/sources/declarative/transformations/add_fields.py,sha256=r4YdAuAk2bQtNWJMztIIy2CC-NglD9NeK1s1TeO9wkw,5027
@@ -246,7 +245,7 @@ airbyte_cdk/sources/streams/concurrent/abstract_stream.py,sha256=3OB5VsvOkJmCxIM
246
245
  airbyte_cdk/sources/streams/concurrent/abstract_stream_facade.py,sha256=QTry1QCBUwJDw1QSCEvz23s7zIEx_7QMxkPq9j-oPIQ,1358
247
246
  airbyte_cdk/sources/streams/concurrent/adapters.py,sha256=QP_64kQo-b3sRNHZA5aqrgCJqAhIVegRM3vJ8jGyuSY,15213
248
247
  airbyte_cdk/sources/streams/concurrent/availability_strategy.py,sha256=4La5v2UffSjGnhmF4kwNIKt_g3RXk2ux1mSHA1ejgYM,2898
249
- airbyte_cdk/sources/streams/concurrent/cursor.py,sha256=SbkWn2t5uxVhT6W657zrENWnxC74oyp_WU9ol-_w5so,21215
248
+ airbyte_cdk/sources/streams/concurrent/cursor.py,sha256=Hke6CpD8Sq1FS4g1Xuht39UN7hKkGy1mvOxvQrm1lLM,20810
250
249
  airbyte_cdk/sources/streams/concurrent/default_stream.py,sha256=K3rLMpYhS7nnmvwQ52lqBy7DQdFMJpvvT7sgBg_ckA8,3207
251
250
  airbyte_cdk/sources/streams/concurrent/exceptions.py,sha256=JOZ446MCLpmF26r9KfS6OO_6rGjcjgJNZdcw6jccjEI,468
252
251
  airbyte_cdk/sources/streams/concurrent/helpers.py,sha256=S6AW8TgIASCZ2UuUcQLE8OzgYUHWt2-KPOvNPwnQf-Q,1596
@@ -331,8 +330,8 @@ airbyte_cdk/utils/slice_hasher.py,sha256=-pHexlNYoWYPnXNH-M7HEbjmeJe9Zk7SJijdQ7d
331
330
  airbyte_cdk/utils/spec_schema_transformations.py,sha256=-5HTuNsnDBAhj-oLeQXwpTGA0HdcjFOf2zTEMUTTg_Y,816
332
331
  airbyte_cdk/utils/stream_status_utils.py,sha256=ZmBoiy5HVbUEHAMrUONxZvxnvfV9CesmQJLDTAIWnWw,1171
333
332
  airbyte_cdk/utils/traced_exception.py,sha256=C8uIBuCL_E4WnBAOPSxBicD06JAldoN9fGsQDp463OY,6292
334
- airbyte_cdk-6.8.2.dev1.dist-info/LICENSE.txt,sha256=Wfe61S4BaGPj404v8lrAbvhjYR68SHlkzeYrg3_bbuM,1051
335
- airbyte_cdk-6.8.2.dev1.dist-info/METADATA,sha256=yATvM83Zo6tZfb5wnnP-1YGmBjL1ZR2zWzZFr09J1R8,6112
336
- airbyte_cdk-6.8.2.dev1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
337
- airbyte_cdk-6.8.2.dev1.dist-info/entry_points.txt,sha256=fj-e3PAQvsxsQzyyq8UkG1k8spunWnD4BAH2AwlR6NM,95
338
- airbyte_cdk-6.8.2.dev1.dist-info/RECORD,,
333
+ airbyte_cdk-6.8.2rc1.dist-info/LICENSE.txt,sha256=Wfe61S4BaGPj404v8lrAbvhjYR68SHlkzeYrg3_bbuM,1051
334
+ airbyte_cdk-6.8.2rc1.dist-info/METADATA,sha256=u5k6gz5XLQCQmR3CLeP-StHB8_iV6QhEqLuuBvh1G1w,6110
335
+ airbyte_cdk-6.8.2rc1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
336
+ airbyte_cdk-6.8.2rc1.dist-info/entry_points.txt,sha256=fj-e3PAQvsxsQzyyq8UkG1k8spunWnD4BAH2AwlR6NM,95
337
+ airbyte_cdk-6.8.2rc1.dist-info/RECORD,,
@@ -1,270 +0,0 @@
1
- import copy
2
-
3
- #
4
- # Copyright (c) 2023 Airbyte, Inc., all rights reserved.
5
- #
6
- import logging
7
- from collections import OrderedDict
8
- from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional
9
-
10
- from airbyte_cdk.sources.connector_state_manager import ConnectorStateManager
11
- from airbyte_cdk.sources.declarative.incremental.declarative_cursor import DeclarativeCursor
12
- from airbyte_cdk.sources.declarative.partition_routers.partition_router import PartitionRouter
13
- from airbyte_cdk.sources.message import MessageRepository
14
- from airbyte_cdk.sources.streams.checkpoint.per_partition_key_serializer import (
15
- PerPartitionKeySerializer,
16
- )
17
- from airbyte_cdk.sources.streams.concurrent.cursor import Cursor, CursorField
18
- from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition
19
- from airbyte_cdk.sources.types import Record, StreamSlice, StreamState
20
-
21
- logger = logging.getLogger("airbyte")
22
-
23
-
24
- class ConcurrentCursorFactory:
25
- def __init__(self, create_function: Callable[..., Cursor]):
26
- self._create_function = create_function
27
-
28
- def create(self, stream_state: Mapping[str, Any]) -> Cursor:
29
- return self._create_function(stream_state=stream_state)
30
-
31
-
32
- class ConcurrentPerPartitionCursor(Cursor):
33
- """
34
- Manages state per partition when a stream has many partitions, to prevent data loss or duplication.
35
-
36
- **Partition Limitation and Limit Reached Logic**
37
-
38
- - **DEFAULT_MAX_PARTITIONS_NUMBER**: The maximum number of partitions to keep in memory (default is 10,000).
39
- - **_cursor_per_partition**: An ordered dictionary that stores cursors for each partition.
40
- - **_over_limit**: A counter that increments each time an oldest partition is removed when the limit is exceeded.
41
-
42
- The class ensures that the number of partitions tracked does not exceed the `DEFAULT_MAX_PARTITIONS_NUMBER` to prevent excessive memory usage.
43
-
44
- - When the number of partitions exceeds the limit, the oldest partitions are removed from `_cursor_per_partition`, and `_over_limit` is incremented accordingly.
45
- - The `limit_reached` method returns `True` when `_over_limit` exceeds `DEFAULT_MAX_PARTITIONS_NUMBER`, indicating that the global cursor should be used instead of per-partition cursors.
46
-
47
- This approach avoids unnecessary switching to a global cursor due to temporary spikes in partition counts, ensuring that switching is only done when a sustained high number of partitions is observed.
48
- """
49
-
50
- DEFAULT_MAX_PARTITIONS_NUMBER = 10000
51
- _NO_STATE: Mapping[str, Any] = {}
52
- _NO_CURSOR_STATE: Mapping[str, Any] = {}
53
- _KEY = 0
54
- _VALUE = 1
55
- _state_to_migrate_from: Mapping[str, Any] = {}
56
-
57
- def __init__(
58
- self,
59
- cursor_factory: ConcurrentCursorFactory,
60
- partition_router: PartitionRouter,
61
- stream_name: str,
62
- stream_namespace: Optional[str],
63
- stream_state: Any,
64
- message_repository: MessageRepository,
65
- connector_state_manager: ConnectorStateManager,
66
- cursor_field: CursorField,
67
- ) -> None:
68
- self._stream_name = stream_name
69
- self._stream_namespace = stream_namespace
70
- self._message_repository = message_repository
71
- self._connector_state_manager = connector_state_manager
72
- self._cursor_field = cursor_field
73
-
74
- self._cursor_factory = cursor_factory
75
- self._partition_router = partition_router
76
-
77
- # The dict is ordered to ensure that once the maximum number of partitions is reached,
78
- # the oldest partitions can be efficiently removed, maintaining the most recent partitions.
79
- self._cursor_per_partition: OrderedDict[str, Cursor] = OrderedDict()
80
- self._over_limit = 0
81
- self._partition_serializer = PerPartitionKeySerializer()
82
-
83
- self._set_initial_state(stream_state)
84
-
85
- @property
86
- def cursor_field(self) -> CursorField:
87
- return self._cursor_field
88
-
89
- @property
90
- def state(self) -> MutableMapping[str, Any]:
91
- states = []
92
- for partition_tuple, cursor in self._cursor_per_partition.items():
93
- cursor_state = cursor._connector_state_converter.convert_to_state_message(
94
- cursor._cursor_field, cursor.state
95
- )
96
- if cursor_state:
97
- states.append(
98
- {
99
- "partition": self._to_dict(partition_tuple),
100
- "cursor": copy.deepcopy(cursor_state),
101
- }
102
- )
103
- state: dict[str, Any] = {"states": states}
104
- return state
105
-
106
- def close_partition(self, partition: Partition) -> None:
107
- self._cursor_per_partition[self._to_partition_key(partition._stream_slice.partition)].close_partition_without_emit(partition=partition)
108
-
109
- def ensure_at_least_one_state_emitted(self) -> None:
110
- """
111
- The platform expect to have at least one state message on successful syncs. Hence, whatever happens, we expect this method to be
112
- called.
113
- """
114
- self._emit_state_message()
115
-
116
- def _emit_state_message(self) -> None:
117
- self._connector_state_manager.update_state_for_stream(
118
- self._stream_name,
119
- self._stream_namespace,
120
- self.state,
121
- )
122
- state_message = self._connector_state_manager.create_state_message(
123
- self._stream_name, self._stream_namespace
124
- )
125
- self._message_repository.emit_message(state_message)
126
-
127
-
128
- def stream_slices(self) -> Iterable[StreamSlice]:
129
- slices = self._partition_router.stream_slices()
130
- for partition in slices:
131
- yield from self.generate_slices_from_partition(partition)
132
-
133
- def generate_slices_from_partition(self, partition: StreamSlice) -> Iterable[StreamSlice]:
134
- # Ensure the maximum number of partitions is not exceeded
135
- self._ensure_partition_limit()
136
-
137
- cursor = self._cursor_per_partition.get(self._to_partition_key(partition.partition))
138
- if not cursor:
139
- partition_state = (
140
- self._state_to_migrate_from
141
- if self._state_to_migrate_from
142
- else self._NO_CURSOR_STATE
143
- )
144
- cursor = self._create_cursor(partition_state)
145
- self._cursor_per_partition[self._to_partition_key(partition.partition)] = cursor
146
-
147
- for cursor_slice in cursor.stream_slices():
148
- yield StreamSlice(
149
- partition=partition, cursor_slice=cursor_slice, extra_fields=partition.extra_fields
150
- )
151
-
152
- def _ensure_partition_limit(self) -> None:
153
- """
154
- Ensure the maximum number of partitions is not exceeded. If so, the oldest added partition will be dropped.
155
- """
156
- while len(self._cursor_per_partition) > self.DEFAULT_MAX_PARTITIONS_NUMBER - 1:
157
- self._over_limit += 1
158
- oldest_partition = self._cursor_per_partition.popitem(last=False)[
159
- 0
160
- ] # Remove the oldest partition
161
- logger.warning(
162
- f"The maximum number of partitions has been reached. Dropping the oldest partition: {oldest_partition}. Over limit: {self._over_limit}."
163
- )
164
-
165
- def limit_reached(self) -> bool:
166
- return self._over_limit > self.DEFAULT_MAX_PARTITIONS_NUMBER
167
-
168
- def _set_initial_state(self, stream_state: StreamState) -> None:
169
- """
170
- Set the initial state for the cursors.
171
-
172
- This method initializes the state for each partition cursor using the provided stream state.
173
- If a partition state is provided in the stream state, it will update the corresponding partition cursor with this state.
174
-
175
- Additionally, it sets the parent state for partition routers that are based on parent streams. If a partition router
176
- does not have parent streams, this step will be skipped due to the default PartitionRouter implementation.
177
-
178
- Args:
179
- stream_state (StreamState): The state of the streams to be set. The format of the stream state should be:
180
- {
181
- "states": [
182
- {
183
- "partition": {
184
- "partition_key": "value"
185
- },
186
- "cursor": {
187
- "last_updated": "2023-05-27T00:00:00Z"
188
- }
189
- }
190
- ],
191
- "parent_state": {
192
- "parent_stream_name": {
193
- "last_updated": "2023-05-27T00:00:00Z"
194
- }
195
- }
196
- }
197
- """
198
- if not stream_state:
199
- return
200
-
201
- if "states" not in stream_state:
202
- # We assume that `stream_state` is in a global format that can be applied to all partitions.
203
- # Example: {"global_state_format_key": "global_state_format_value"}
204
- self._state_to_migrate_from = stream_state
205
-
206
- else:
207
- for state in stream_state["states"]:
208
- self._cursor_per_partition[self._to_partition_key(state["partition"])] = (
209
- self._create_cursor(state["cursor"])
210
- )
211
-
212
- # set default state for missing partitions if it is per partition with fallback to global
213
- if "state" in stream_state:
214
- self._state_to_migrate_from = stream_state["state"]
215
-
216
- # Set parent state for partition routers based on parent streams
217
- self._partition_router.set_initial_state(stream_state)
218
-
219
- def observe(self, record: Record) -> None:
220
- self._cursor_per_partition[self._to_partition_key(record.associated_slice.partition)].observe(record)
221
-
222
- def _to_partition_key(self, partition: Mapping[str, Any]) -> str:
223
- return self._partition_serializer.to_partition_key(partition)
224
-
225
- def _to_dict(self, partition_key: str) -> Mapping[str, Any]:
226
- return self._partition_serializer.to_partition(partition_key)
227
-
228
- def _create_cursor(self, cursor_state: Any) -> DeclarativeCursor:
229
- cursor = self._cursor_factory.create(stream_state=cursor_state)
230
- return cursor
231
-
232
- def should_be_synced(self, record: Record) -> bool:
233
- return self._get_cursor(record).should_be_synced(record)
234
-
235
- def is_greater_than_or_equal(self, first: Record, second: Record) -> bool:
236
- if not first.associated_slice or not second.associated_slice:
237
- raise ValueError(
238
- f"Both records should have an associated slice but got {first.associated_slice} and {second.associated_slice}"
239
- )
240
- if first.associated_slice.partition != second.associated_slice.partition:
241
- raise ValueError(
242
- f"To compare records, partition should be the same but got {first.associated_slice.partition} and {second.associated_slice.partition}"
243
- )
244
-
245
- return self._get_cursor(first).is_greater_than_or_equal(
246
- self._convert_record_to_cursor_record(first),
247
- self._convert_record_to_cursor_record(second),
248
- )
249
-
250
- @staticmethod
251
- def _convert_record_to_cursor_record(record: Record) -> Record:
252
- return Record(
253
- record.data,
254
- StreamSlice(partition={}, cursor_slice=record.associated_slice.cursor_slice)
255
- if record.associated_slice
256
- else None,
257
- )
258
-
259
- def _get_cursor(self, record: Record) -> Cursor:
260
- if not record.associated_slice:
261
- raise ValueError(
262
- "Invalid state as stream slices that are emitted should refer to an existing cursor"
263
- )
264
- partition_key = self._to_partition_key(record.associated_slice.partition)
265
- if partition_key not in self._cursor_per_partition:
266
- raise ValueError(
267
- "Invalid state as stream slices that are emitted should refer to an existing cursor"
268
- )
269
- cursor = self._cursor_per_partition[partition_key]
270
- return cursor