airbyte-cdk 6.60.5__py3-none-any.whl → 6.60.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,8 +3,8 @@
3
3
  #
4
4
 
5
5
 
6
- from dataclasses import asdict, dataclass, field
7
- from typing import Any, ClassVar, Dict, List, Mapping
6
+ from dataclasses import asdict
7
+ from typing import Any, Dict, List, Mapping, Optional
8
8
 
9
9
  from airbyte_cdk.connector_builder.test_reader import TestReader
10
10
  from airbyte_cdk.models import (
@@ -15,45 +15,32 @@ from airbyte_cdk.models import (
15
15
  Type,
16
16
  )
17
17
  from airbyte_cdk.models import Type as MessageType
18
+ from airbyte_cdk.sources.declarative.concurrent_declarative_source import (
19
+ ConcurrentDeclarativeSource,
20
+ TestLimits,
21
+ )
18
22
  from airbyte_cdk.sources.declarative.declarative_source import DeclarativeSource
19
23
  from airbyte_cdk.sources.declarative.manifest_declarative_source import ManifestDeclarativeSource
20
- from airbyte_cdk.sources.declarative.parsers.model_to_component_factory import (
21
- ModelToComponentFactory,
22
- )
23
24
  from airbyte_cdk.utils.airbyte_secrets_utils import filter_secrets
24
25
  from airbyte_cdk.utils.datetime_helpers import ab_datetime_now
25
26
  from airbyte_cdk.utils.traced_exception import AirbyteTracedException
26
27
 
27
- DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE = 5
28
- DEFAULT_MAXIMUM_NUMBER_OF_SLICES = 5
29
- DEFAULT_MAXIMUM_RECORDS = 100
30
- DEFAULT_MAXIMUM_STREAMS = 100
31
-
32
28
  MAX_PAGES_PER_SLICE_KEY = "max_pages_per_slice"
33
29
  MAX_SLICES_KEY = "max_slices"
34
30
  MAX_RECORDS_KEY = "max_records"
35
31
  MAX_STREAMS_KEY = "max_streams"
36
32
 
37
33
 
38
- @dataclass
39
- class TestLimits:
40
- __test__: ClassVar[bool] = False # Tell Pytest this is not a Pytest class, despite its name
41
-
42
- max_records: int = field(default=DEFAULT_MAXIMUM_RECORDS)
43
- max_pages_per_slice: int = field(default=DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE)
44
- max_slices: int = field(default=DEFAULT_MAXIMUM_NUMBER_OF_SLICES)
45
- max_streams: int = field(default=DEFAULT_MAXIMUM_STREAMS)
46
-
47
-
48
34
  def get_limits(config: Mapping[str, Any]) -> TestLimits:
49
35
  command_config = config.get("__test_read_config", {})
50
- max_pages_per_slice = (
51
- command_config.get(MAX_PAGES_PER_SLICE_KEY) or DEFAULT_MAXIMUM_NUMBER_OF_PAGES_PER_SLICE
36
+ return TestLimits(
37
+ max_records=command_config.get(MAX_RECORDS_KEY, TestLimits.DEFAULT_MAX_RECORDS),
38
+ max_pages_per_slice=command_config.get(
39
+ MAX_PAGES_PER_SLICE_KEY, TestLimits.DEFAULT_MAX_PAGES_PER_SLICE
40
+ ),
41
+ max_slices=command_config.get(MAX_SLICES_KEY, TestLimits.DEFAULT_MAX_SLICES),
42
+ max_streams=command_config.get(MAX_STREAMS_KEY, TestLimits.DEFAULT_MAX_STREAMS),
52
43
  )
53
- max_slices = command_config.get(MAX_SLICES_KEY) or DEFAULT_MAXIMUM_NUMBER_OF_SLICES
54
- max_records = command_config.get(MAX_RECORDS_KEY) or DEFAULT_MAXIMUM_RECORDS
55
- max_streams = command_config.get(MAX_STREAMS_KEY) or DEFAULT_MAXIMUM_STREAMS
56
- return TestLimits(max_records, max_pages_per_slice, max_slices, max_streams)
57
44
 
58
45
 
59
46
  def should_migrate_manifest(config: Mapping[str, Any]) -> bool:
@@ -75,21 +62,30 @@ def should_normalize_manifest(config: Mapping[str, Any]) -> bool:
75
62
  return config.get("__should_normalize", False)
76
63
 
77
64
 
78
- def create_source(config: Mapping[str, Any], limits: TestLimits) -> ManifestDeclarativeSource:
65
+ def create_source(
66
+ config: Mapping[str, Any],
67
+ limits: TestLimits,
68
+ catalog: Optional[ConfiguredAirbyteCatalog],
69
+ state: Optional[List[AirbyteStateMessage]],
70
+ ) -> ConcurrentDeclarativeSource[Optional[List[AirbyteStateMessage]]]:
79
71
  manifest = config["__injected_declarative_manifest"]
80
- return ManifestDeclarativeSource(
72
+
73
+ # We enforce a concurrency level of 1 so that the stream is processed on a single thread
74
+ # to retain ordering for the grouping of the builder message responses.
75
+ if "concurrency_level" in manifest:
76
+ manifest["concurrency_level"]["default_concurrency"] = 1
77
+ else:
78
+ manifest["concurrency_level"] = {"type": "ConcurrencyLevel", "default_concurrency": 1}
79
+
80
+ return ConcurrentDeclarativeSource(
81
+ catalog=catalog,
81
82
  config=config,
82
- emit_connector_builder_messages=True,
83
+ state=state,
83
84
  source_config=manifest,
85
+ emit_connector_builder_messages=True,
84
86
  migrate_manifest=should_migrate_manifest(config),
85
87
  normalize_manifest=should_normalize_manifest(config),
86
- component_factory=ModelToComponentFactory(
87
- emit_connector_builder_messages=True,
88
- limit_pages_fetched_per_slice=limits.max_pages_per_slice,
89
- limit_slices_fetched=limits.max_slices,
90
- disable_retries=True,
91
- disable_cache=True,
92
- ),
88
+ limits=limits,
93
89
  )
94
90
 
95
91
 
@@ -91,12 +91,12 @@ def handle_connector_builder_request(
91
91
  def handle_request(args: List[str]) -> str:
92
92
  command, config, catalog, state = get_config_and_catalog_from_args(args)
93
93
  limits = get_limits(config)
94
- source = create_source(config, limits)
95
- return orjson.dumps(
94
+ source = create_source(config=config, limits=limits, catalog=catalog, state=state)
95
+ return orjson.dumps( # type: ignore[no-any-return] # Serializer.dump() always returns AirbyteMessage
96
96
  AirbyteMessageSerializer.dump(
97
97
  handle_connector_builder_request(source, command, config, catalog, state, limits)
98
98
  )
99
- ).decode() # type: ignore[no-any-return] # Serializer.dump() always returns AirbyteMessage
99
+ ).decode()
100
100
 
101
101
 
102
102
  if __name__ == "__main__":
@@ -5,7 +5,7 @@
5
5
  import json
6
6
  from copy import deepcopy
7
7
  from json import JSONDecodeError
8
- from typing import Any, Dict, List, Mapping, Optional
8
+ from typing import Any, Dict, List, Mapping, Optional, Union
9
9
 
10
10
  from airbyte_cdk.connector_builder.models import (
11
11
  AuxiliaryRequest,
@@ -17,6 +17,8 @@ from airbyte_cdk.connector_builder.models import (
17
17
  from airbyte_cdk.models import (
18
18
  AirbyteLogMessage,
19
19
  AirbyteMessage,
20
+ AirbyteStateBlob,
21
+ AirbyteStateMessage,
20
22
  OrchestratorType,
21
23
  TraceType,
22
24
  )
@@ -466,7 +468,7 @@ def handle_current_slice(
466
468
  return StreamReadSlices(
467
469
  pages=current_slice_pages,
468
470
  slice_descriptor=current_slice_descriptor,
469
- state=[latest_state_message] if latest_state_message else [],
471
+ state=[convert_state_blob_to_mapping(latest_state_message)] if latest_state_message else [],
470
472
  auxiliary_requests=auxiliary_requests if auxiliary_requests else [],
471
473
  )
472
474
 
@@ -718,3 +720,23 @@ def get_auxiliary_request_type(stream: dict, http: dict) -> str: # type: ignore
718
720
  Determines the type of the auxiliary request based on the stream and HTTP properties.
719
721
  """
720
722
  return "PARENT_STREAM" if stream.get("is_substream", False) else str(http.get("type", None))
723
+
724
+
725
+ def convert_state_blob_to_mapping(
726
+ state_message: Union[AirbyteStateMessage, Dict[str, Any]],
727
+ ) -> Dict[str, Any]:
728
+ """
729
+ The AirbyteStreamState stores state as an AirbyteStateBlob which deceivingly is not
730
+ a dictionary, but rather a list of kwargs fields. This in turn causes it to not be
731
+ properly turned into a dictionary when translating this back into response output
732
+ by the connector_builder_handler using asdict()
733
+ """
734
+
735
+ if isinstance(state_message, AirbyteStateMessage) and state_message.stream:
736
+ state_value = state_message.stream.stream_state
737
+ if isinstance(state_value, AirbyteStateBlob):
738
+ state_value_mapping = {k: v for k, v in state_value.__dict__.items()}
739
+ state_message.stream.stream_state = state_value_mapping # type: ignore # we intentionally set this as a Dict so that StreamReadSlices is translated properly in the resulting HTTP response
740
+ return state_message # type: ignore # See above, but when this is an AirbyteStateMessage we must convert AirbyteStateBlob to a Dict
741
+ else:
742
+ return state_message # type: ignore # This is guaranteed to be a Dict since we check isinstance AirbyteStateMessage above
@@ -95,11 +95,14 @@ class ConcurrentReadProcessor:
95
95
  """
96
96
  stream_name = partition.stream_name()
97
97
  self._streams_to_running_partitions[stream_name].add(partition)
98
+ cursor = self._stream_name_to_instance[stream_name].cursor
98
99
  if self._slice_logger.should_log_slice_message(self._logger):
99
100
  self._message_repository.emit_message(
100
101
  self._slice_logger.create_slice_log_message(partition.to_slice())
101
102
  )
102
- self._thread_pool_manager.submit(self._partition_reader.process_partition, partition)
103
+ self._thread_pool_manager.submit(
104
+ self._partition_reader.process_partition, partition, cursor
105
+ )
103
106
 
104
107
  def on_partition_complete_sentinel(
105
108
  self, sentinel: PartitionCompleteSentinel
@@ -112,26 +115,16 @@ class ConcurrentReadProcessor:
112
115
  """
113
116
  partition = sentinel.partition
114
117
 
115
- try:
116
- if sentinel.is_successful:
117
- stream = self._stream_name_to_instance[partition.stream_name()]
118
- stream.cursor.close_partition(partition)
119
- except Exception as exception:
120
- self._flag_exception(partition.stream_name(), exception)
121
- yield AirbyteTracedException.from_exception(
122
- exception, stream_descriptor=StreamDescriptor(name=partition.stream_name())
123
- ).as_sanitized_airbyte_message()
124
- finally:
125
- partitions_running = self._streams_to_running_partitions[partition.stream_name()]
126
- if partition in partitions_running:
127
- partitions_running.remove(partition)
128
- # If all partitions were generated and this was the last one, the stream is done
129
- if (
130
- partition.stream_name() not in self._streams_currently_generating_partitions
131
- and len(partitions_running) == 0
132
- ):
133
- yield from self._on_stream_is_done(partition.stream_name())
134
- yield from self._message_repository.consume_queue()
118
+ partitions_running = self._streams_to_running_partitions[partition.stream_name()]
119
+ if partition in partitions_running:
120
+ partitions_running.remove(partition)
121
+ # If all partitions were generated and this was the last one, the stream is done
122
+ if (
123
+ partition.stream_name() not in self._streams_currently_generating_partitions
124
+ and len(partitions_running) == 0
125
+ ):
126
+ yield from self._on_stream_is_done(partition.stream_name())
127
+ yield from self._message_repository.consume_queue()
135
128
 
136
129
  def on_record(self, record: Record) -> Iterable[AirbyteMessage]:
137
130
  """
@@ -4,7 +4,7 @@
4
4
  import concurrent
5
5
  import logging
6
6
  from queue import Queue
7
- from typing import Iterable, Iterator, List
7
+ from typing import Iterable, Iterator, List, Optional
8
8
 
9
9
  from airbyte_cdk.models import AirbyteMessage
10
10
  from airbyte_cdk.sources.concurrent_source.concurrent_read_processor import ConcurrentReadProcessor
@@ -16,7 +16,7 @@ from airbyte_cdk.sources.concurrent_source.thread_pool_manager import ThreadPool
16
16
  from airbyte_cdk.sources.message import InMemoryMessageRepository, MessageRepository
17
17
  from airbyte_cdk.sources.streams.concurrent.abstract_stream import AbstractStream
18
18
  from airbyte_cdk.sources.streams.concurrent.partition_enqueuer import PartitionEnqueuer
19
- from airbyte_cdk.sources.streams.concurrent.partition_reader import PartitionReader
19
+ from airbyte_cdk.sources.streams.concurrent.partition_reader import PartitionLogger, PartitionReader
20
20
  from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition
21
21
  from airbyte_cdk.sources.streams.concurrent.partitions.types import (
22
22
  PartitionCompleteSentinel,
@@ -43,6 +43,7 @@ class ConcurrentSource:
43
43
  logger: logging.Logger,
44
44
  slice_logger: SliceLogger,
45
45
  message_repository: MessageRepository,
46
+ queue: Optional[Queue[QueueItem]] = None,
46
47
  timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
47
48
  ) -> "ConcurrentSource":
48
49
  is_single_threaded = initial_number_of_partitions_to_generate == 1 and num_workers == 1
@@ -59,12 +60,13 @@ class ConcurrentSource:
59
60
  logger,
60
61
  )
61
62
  return ConcurrentSource(
62
- threadpool,
63
- logger,
64
- slice_logger,
65
- message_repository,
66
- initial_number_of_partitions_to_generate,
67
- timeout_seconds,
63
+ threadpool=threadpool,
64
+ logger=logger,
65
+ slice_logger=slice_logger,
66
+ queue=queue,
67
+ message_repository=message_repository,
68
+ initial_number_partitions_to_generate=initial_number_of_partitions_to_generate,
69
+ timeout_seconds=timeout_seconds,
68
70
  )
69
71
 
70
72
  def __init__(
@@ -72,6 +74,7 @@ class ConcurrentSource:
72
74
  threadpool: ThreadPoolManager,
73
75
  logger: logging.Logger,
74
76
  slice_logger: SliceLogger = DebugSliceLogger(),
77
+ queue: Optional[Queue[QueueItem]] = None,
75
78
  message_repository: MessageRepository = InMemoryMessageRepository(),
76
79
  initial_number_partitions_to_generate: int = 1,
77
80
  timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS,
@@ -91,25 +94,28 @@ class ConcurrentSource:
91
94
  self._initial_number_partitions_to_generate = initial_number_partitions_to_generate
92
95
  self._timeout_seconds = timeout_seconds
93
96
 
97
+ # We set a maxsize to for the main thread to process record items when the queue size grows. This assumes that there are less
98
+ # threads generating partitions that than are max number of workers. If it weren't the case, we could have threads only generating
99
+ # partitions which would fill the queue. This number is arbitrarily set to 10_000 but will probably need to be changed given more
100
+ # information and might even need to be configurable depending on the source
101
+ self._queue = queue or Queue(maxsize=10_000)
102
+
94
103
  def read(
95
104
  self,
96
105
  streams: List[AbstractStream],
97
106
  ) -> Iterator[AirbyteMessage]:
98
107
  self._logger.info("Starting syncing")
99
-
100
- # We set a maxsize to for the main thread to process record items when the queue size grows. This assumes that there are less
101
- # threads generating partitions that than are max number of workers. If it weren't the case, we could have threads only generating
102
- # partitions which would fill the queue. This number is arbitrarily set to 10_000 but will probably need to be changed given more
103
- # information and might even need to be configurable depending on the source
104
- queue: Queue[QueueItem] = Queue(maxsize=10_000)
105
108
  concurrent_stream_processor = ConcurrentReadProcessor(
106
109
  streams,
107
- PartitionEnqueuer(queue, self._threadpool),
110
+ PartitionEnqueuer(self._queue, self._threadpool),
108
111
  self._threadpool,
109
112
  self._logger,
110
113
  self._slice_logger,
111
114
  self._message_repository,
112
- PartitionReader(queue),
115
+ PartitionReader(
116
+ self._queue,
117
+ PartitionLogger(self._slice_logger, self._logger, self._message_repository),
118
+ ),
113
119
  )
114
120
 
115
121
  # Enqueue initial partition generation tasks
@@ -117,7 +123,7 @@ class ConcurrentSource:
117
123
 
118
124
  # Read from the queue until all partitions were generated and read
119
125
  yield from self._consume_from_queue(
120
- queue,
126
+ self._queue,
121
127
  concurrent_stream_processor,
122
128
  )
123
129
  self._threadpool.check_for_errors_and_shutdown()
@@ -141,7 +147,10 @@ class ConcurrentSource:
141
147
  airbyte_message_or_record_or_exception,
142
148
  concurrent_stream_processor,
143
149
  )
144
- if concurrent_stream_processor.is_done() and queue.empty():
150
+ # In the event that a partition raises an exception, anything remaining in
151
+ # the queue will be missed because is_done() can raise an exception and exit
152
+ # out of this loop before remaining items are consumed
153
+ if queue.empty() and concurrent_stream_processor.is_done():
145
154
  # all partitions were generated and processed. we're done here
146
155
  break
147
156
 
@@ -161,5 +170,7 @@ class ConcurrentSource:
161
170
  yield from concurrent_stream_processor.on_partition_complete_sentinel(queue_item)
162
171
  elif isinstance(queue_item, Record):
163
172
  yield from concurrent_stream_processor.on_record(queue_item)
173
+ elif isinstance(queue_item, AirbyteMessage):
174
+ yield queue_item
164
175
  else:
165
176
  raise ValueError(f"Unknown queue item type: {type(queue_item)}")
@@ -3,7 +3,11 @@
3
3
  #
4
4
 
5
5
  import logging
6
- from typing import Any, Generic, Iterator, List, Mapping, MutableMapping, Optional, Tuple
6
+ from dataclasses import dataclass, field
7
+ from queue import Queue
8
+ from typing import Any, ClassVar, Generic, Iterator, List, Mapping, MutableMapping, Optional, Tuple
9
+
10
+ from airbyte_protocol_dataclasses.models import Level
7
11
 
8
12
  from airbyte_cdk.models import (
9
13
  AirbyteCatalog,
@@ -48,6 +52,8 @@ from airbyte_cdk.sources.declarative.stream_slicers.declarative_partition_genera
48
52
  StreamSlicerPartitionGenerator,
49
53
  )
50
54
  from airbyte_cdk.sources.declarative.types import ConnectionDefinition
55
+ from airbyte_cdk.sources.message.concurrent_repository import ConcurrentMessageRepository
56
+ from airbyte_cdk.sources.message.repository import InMemoryMessageRepository, MessageRepository
51
57
  from airbyte_cdk.sources.source import TState
52
58
  from airbyte_cdk.sources.streams import Stream
53
59
  from airbyte_cdk.sources.streams.concurrent.abstract_stream import AbstractStream
@@ -58,6 +64,22 @@ from airbyte_cdk.sources.streams.concurrent.availability_strategy import (
58
64
  from airbyte_cdk.sources.streams.concurrent.cursor import ConcurrentCursor, FinalStateCursor
59
65
  from airbyte_cdk.sources.streams.concurrent.default_stream import DefaultStream
60
66
  from airbyte_cdk.sources.streams.concurrent.helpers import get_primary_key_from_stream
67
+ from airbyte_cdk.sources.streams.concurrent.partitions.types import QueueItem
68
+
69
+
70
+ @dataclass
71
+ class TestLimits:
72
+ __test__: ClassVar[bool] = False # Tell Pytest this is not a Pytest class, despite its name
73
+
74
+ DEFAULT_MAX_PAGES_PER_SLICE: ClassVar[int] = 5
75
+ DEFAULT_MAX_SLICES: ClassVar[int] = 5
76
+ DEFAULT_MAX_RECORDS: ClassVar[int] = 100
77
+ DEFAULT_MAX_STREAMS: ClassVar[int] = 100
78
+
79
+ max_records: int = field(default=DEFAULT_MAX_RECORDS)
80
+ max_pages_per_slice: int = field(default=DEFAULT_MAX_PAGES_PER_SLICE)
81
+ max_slices: int = field(default=DEFAULT_MAX_SLICES)
82
+ max_streams: int = field(default=DEFAULT_MAX_STREAMS)
61
83
 
62
84
 
63
85
  class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
@@ -73,7 +95,9 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
73
95
  source_config: ConnectionDefinition,
74
96
  debug: bool = False,
75
97
  emit_connector_builder_messages: bool = False,
76
- component_factory: Optional[ModelToComponentFactory] = None,
98
+ migrate_manifest: bool = False,
99
+ normalize_manifest: bool = False,
100
+ limits: Optional[TestLimits] = None,
77
101
  config_path: Optional[str] = None,
78
102
  **kwargs: Any,
79
103
  ) -> None:
@@ -81,22 +105,40 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
81
105
  # no longer needs to store the original incoming state. But maybe there's an edge case?
82
106
  self._connector_state_manager = ConnectorStateManager(state=state) # type: ignore # state is always in the form of List[AirbyteStateMessage]. The ConnectorStateManager should use generics, but this can be done later
83
107
 
108
+ # We set a maxsize to for the main thread to process record items when the queue size grows. This assumes that there are less
109
+ # threads generating partitions that than are max number of workers. If it weren't the case, we could have threads only generating
110
+ # partitions which would fill the queue. This number is arbitrarily set to 10_000 but will probably need to be changed given more
111
+ # information and might even need to be configurable depending on the source
112
+ queue: Queue[QueueItem] = Queue(maxsize=10_000)
113
+ message_repository = InMemoryMessageRepository(
114
+ Level.DEBUG if emit_connector_builder_messages else Level.INFO
115
+ )
116
+
84
117
  # To reduce the complexity of the concurrent framework, we are not enabling RFR with synthetic
85
118
  # cursors. We do this by no longer automatically instantiating RFR cursors when converting
86
119
  # the declarative models into runtime components. Concurrent sources will continue to checkpoint
87
120
  # incremental streams running in full refresh.
88
- component_factory = component_factory or ModelToComponentFactory(
121
+ component_factory = ModelToComponentFactory(
89
122
  emit_connector_builder_messages=emit_connector_builder_messages,
90
123
  disable_resumable_full_refresh=True,
124
+ message_repository=ConcurrentMessageRepository(queue, message_repository),
91
125
  connector_state_manager=self._connector_state_manager,
92
126
  max_concurrent_async_job_count=source_config.get("max_concurrent_async_job_count"),
127
+ limit_pages_fetched_per_slice=limits.max_pages_per_slice if limits else None,
128
+ limit_slices_fetched=limits.max_slices if limits else None,
129
+ disable_retries=True if limits else False,
130
+ disable_cache=True if limits else False,
93
131
  )
94
132
 
133
+ self._limits = limits
134
+
95
135
  super().__init__(
96
136
  source_config=source_config,
97
137
  config=config,
98
138
  debug=debug,
99
139
  emit_connector_builder_messages=emit_connector_builder_messages,
140
+ migrate_manifest=migrate_manifest,
141
+ normalize_manifest=normalize_manifest,
100
142
  component_factory=component_factory,
101
143
  config_path=config_path,
102
144
  )
@@ -126,6 +168,7 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
126
168
  initial_number_of_partitions_to_generate=initial_number_of_partitions_to_generate,
127
169
  logger=self.logger,
128
170
  slice_logger=self._slice_logger,
171
+ queue=queue,
129
172
  message_repository=self.message_repository,
130
173
  )
131
174
 
@@ -287,6 +330,9 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
287
330
  self.message_repository,
288
331
  ),
289
332
  stream_slicer=declarative_stream.retriever.stream_slicer,
333
+ slice_limit=self._limits.max_slices
334
+ if self._limits
335
+ else None, # technically not needed because create_declarative_stream() -> create_simple_retriever() will apply the decorator. But for consistency and depending how we build create_default_stream, this may be needed later
290
336
  )
291
337
  else:
292
338
  if (
@@ -318,6 +364,7 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
318
364
  self.message_repository,
319
365
  ),
320
366
  stream_slicer=cursor,
367
+ slice_limit=self._limits.max_slices if self._limits else None,
321
368
  )
322
369
 
323
370
  concurrent_streams.append(
@@ -349,6 +396,9 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
349
396
  self.message_repository,
350
397
  ),
351
398
  declarative_stream.retriever.stream_slicer,
399
+ slice_limit=self._limits.max_slices
400
+ if self._limits
401
+ else None, # technically not needed because create_declarative_stream() -> create_simple_retriever() will apply the decorator. But for consistency and depending how we build create_default_stream, this may be needed later
352
402
  )
353
403
 
354
404
  final_state_cursor = FinalStateCursor(
@@ -410,6 +460,7 @@ class ConcurrentDeclarativeSource(ManifestDeclarativeSource, Generic[TState]):
410
460
  self.message_repository,
411
461
  ),
412
462
  perpartition_cursor,
463
+ slice_limit=self._limits.max_slices if self._limits else None,
413
464
  )
414
465
 
415
466
  concurrent_streams.append(
@@ -622,6 +622,10 @@ SCHEMA_TRANSFORMER_TYPE_MAPPING = {
622
622
  SchemaNormalizationModel.Default: TransformConfig.DefaultSchemaNormalization,
623
623
  }
624
624
 
625
+ # Ideally this should use the value defined in ConcurrentDeclarativeSource, but
626
+ # this would be a circular import
627
+ MAX_SLICES = 5
628
+
625
629
 
626
630
  class ModelToComponentFactory:
627
631
  EPOCH_DATETIME_FORMAT = "%s"
@@ -2112,6 +2116,15 @@ class ModelToComponentFactory:
2112
2116
  stream_slicer: Optional[PartitionRouter],
2113
2117
  config: Config,
2114
2118
  ) -> Optional[StreamSlicer]:
2119
+ state_transformations = (
2120
+ [
2121
+ self._create_component_from_model(state_migration, config, declarative_stream=model)
2122
+ for state_migration in model.state_migrations
2123
+ ]
2124
+ if model.state_migrations
2125
+ else []
2126
+ )
2127
+
2115
2128
  if model.incremental_sync and stream_slicer:
2116
2129
  if model.retriever.type == "AsyncRetriever":
2117
2130
  stream_name = model.name or ""
@@ -2119,16 +2132,6 @@ class ModelToComponentFactory:
2119
2132
  stream_state = self._connector_state_manager.get_stream_state(
2120
2133
  stream_name, stream_namespace
2121
2134
  )
2122
- state_transformations = (
2123
- [
2124
- self._create_component_from_model(
2125
- state_migration, config, declarative_stream=model
2126
- )
2127
- for state_migration in model.state_migrations
2128
- ]
2129
- if model.state_migrations
2130
- else []
2131
- )
2132
2135
 
2133
2136
  return self.create_concurrent_cursor_from_perpartition_cursor( # type: ignore # This is a known issue that we are creating and returning a ConcurrentCursor which does not technically implement the (low-code) StreamSlicer. However, (low-code) StreamSlicer and ConcurrentCursor both implement StreamSlicer.stream_slices() which is the primary method needed for checkpointing
2134
2137
  state_manager=self._connector_state_manager,
@@ -2172,7 +2175,7 @@ class ModelToComponentFactory:
2172
2175
  stream_name=model.name or "",
2173
2176
  stream_namespace=None,
2174
2177
  config=config or {},
2175
- stream_state_migrations=model.state_migrations,
2178
+ stream_state_migrations=state_transformations,
2176
2179
  )
2177
2180
  return self._create_component_from_model(model=model.incremental_sync, config=config) # type: ignore[no-any-return] # Will be created Cursor as stream_slicer_model is model.incremental_sync
2178
2181
  return None
@@ -2187,19 +2190,15 @@ class ModelToComponentFactory:
2187
2190
  stream_name=model.name or "", namespace=None
2188
2191
  )
2189
2192
 
2190
- if model.incremental_sync and stream_slicer:
2191
- # FIXME there is a discrepancy where this logic is applied on the create_*_cursor methods for
2192
- # ConcurrentCursor but it is applied outside of create_concurrent_cursor_from_perpartition_cursor
2193
- if model.state_migrations:
2194
- state_transformations = [
2195
- self._create_component_from_model(
2196
- state_migration, config, declarative_stream=model
2197
- )
2198
- for state_migration in model.state_migrations
2199
- ]
2200
- else:
2201
- state_transformations = []
2193
+ if model.state_migrations:
2194
+ state_transformations = [
2195
+ self._create_component_from_model(state_migration, config, declarative_stream=model)
2196
+ for state_migration in model.state_migrations
2197
+ ]
2198
+ else:
2199
+ state_transformations = []
2202
2200
 
2201
+ if model.incremental_sync and stream_slicer:
2203
2202
  return self.create_concurrent_cursor_from_perpartition_cursor( # type: ignore # This is a known issue that we are creating and returning a ConcurrentCursor which does not technically implement the (low-code) StreamSlicer. However, (low-code) StreamSlicer and ConcurrentCursor both implement StreamSlicer.stream_slices() which is the primary method needed for checkpointing
2204
2203
  state_manager=self._connector_state_manager,
2205
2204
  model_type=DatetimeBasedCursorModel,
@@ -2220,7 +2219,7 @@ class ModelToComponentFactory:
2220
2219
  stream_name=model.name or "",
2221
2220
  stream_namespace=None,
2222
2221
  config=config or {},
2223
- stream_state_migrations=model.state_migrations,
2222
+ stream_state_migrations=state_transformations,
2224
2223
  )
2225
2224
  elif type(model.incremental_sync) == DatetimeBasedCursorModel:
2226
2225
  return self.create_concurrent_cursor_from_datetime_based_cursor( # type: ignore # This is a known issue that we are creating and returning a ConcurrentCursor which does not technically implement the (low-code) StreamSlicer. However, (low-code) StreamSlicer and ConcurrentCursor both implement StreamSlicer.stream_slices() which is the primary method needed for checkpointing
@@ -2229,7 +2228,7 @@ class ModelToComponentFactory:
2229
2228
  stream_name=model.name or "",
2230
2229
  stream_namespace=None,
2231
2230
  config=config or {},
2232
- stream_state_migrations=model.state_migrations,
2231
+ stream_state_migrations=state_transformations,
2233
2232
  attempt_to_create_cursor_if_not_provided=True,
2234
2233
  )
2235
2234
  else:
@@ -168,7 +168,13 @@ class HttpRequester(Requester):
168
168
  next_page_token=next_page_token,
169
169
  )
170
170
 
171
- full_url = self._join_url(url_base, path) if url_base else url + path if path else url
171
+ full_url = (
172
+ self._join_url(url_base, path)
173
+ if url_base
174
+ else self._join_url(url, path)
175
+ if path
176
+ else url
177
+ )
172
178
 
173
179
  return full_url
174
180
 
@@ -1,8 +1,11 @@
1
- # Copyright (c) 2024 Airbyte, Inc., all rights reserved.
1
+ # Copyright (c) 2025 Airbyte, Inc., all rights reserved.
2
2
 
3
- from typing import Any, Iterable, Mapping, Optional
3
+ from typing import Any, Iterable, Mapping, Optional, cast
4
4
 
5
5
  from airbyte_cdk.sources.declarative.retrievers import Retriever
6
+ from airbyte_cdk.sources.declarative.stream_slicers.stream_slicer_test_read_decorator import (
7
+ StreamSlicerTestReadDecorator,
8
+ )
6
9
  from airbyte_cdk.sources.message import MessageRepository
7
10
  from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition
8
11
  from airbyte_cdk.sources.streams.concurrent.partitions.partition_generator import PartitionGenerator
@@ -83,10 +86,23 @@ class DeclarativePartition(Partition):
83
86
 
84
87
  class StreamSlicerPartitionGenerator(PartitionGenerator):
85
88
  def __init__(
86
- self, partition_factory: DeclarativePartitionFactory, stream_slicer: StreamSlicer
89
+ self,
90
+ partition_factory: DeclarativePartitionFactory,
91
+ stream_slicer: StreamSlicer,
92
+ slice_limit: Optional[int] = None,
87
93
  ) -> None:
88
94
  self._partition_factory = partition_factory
89
- self._stream_slicer = stream_slicer
95
+
96
+ if slice_limit:
97
+ self._stream_slicer = cast(
98
+ StreamSlicer,
99
+ StreamSlicerTestReadDecorator(
100
+ wrapped_slicer=stream_slicer,
101
+ maximum_number_of_slices=slice_limit,
102
+ ),
103
+ )
104
+ else:
105
+ self._stream_slicer = stream_slicer
90
106
 
91
107
  def generate(self) -> Iterable[Partition]:
92
108
  for stream_slice in self._stream_slicer.stream_slices():
@@ -4,10 +4,10 @@
4
4
 
5
5
  from dataclasses import dataclass
6
6
  from itertools import islice
7
- from typing import Any, Iterable, Mapping, Optional, Union
7
+ from typing import Any, Iterable
8
8
 
9
9
  from airbyte_cdk.sources.streams.concurrent.partitions.stream_slicer import StreamSlicer
10
- from airbyte_cdk.sources.types import StreamSlice, StreamState
10
+ from airbyte_cdk.sources.types import StreamSlice
11
11
 
12
12
 
13
13
  @dataclass
@@ -0,0 +1,43 @@
1
+ # Copyright (c) 2025 Airbyte, Inc., all rights reserved.
2
+
3
+ from queue import Queue
4
+ from typing import Callable, Iterable
5
+
6
+ from airbyte_cdk.models import AirbyteMessage, Level
7
+ from airbyte_cdk.sources.message.repository import LogMessage, MessageRepository
8
+ from airbyte_cdk.sources.streams.concurrent.partitions.types import QueueItem
9
+
10
+
11
+ class ConcurrentMessageRepository(MessageRepository):
12
+ """
13
+ Message repository that immediately loads messages onto the queue processed on the
14
+ main thread. This ensures that messages are processed in the correct order they are
15
+ received. The InMemoryMessageRepository implementation does not have guaranteed
16
+ ordering since whether to process the main thread vs. partitions is non-deterministic
17
+ and there can be a lag between reading the main-thread and consuming messages on the
18
+ MessageRepository.
19
+
20
+ This is particularly important for the connector builder which relies on grouping
21
+ of messages to organize request/response, pages, and partitions.
22
+ """
23
+
24
+ def __init__(self, queue: Queue[QueueItem], message_repository: MessageRepository):
25
+ self._queue = queue
26
+ self._decorated_message_repository = message_repository
27
+
28
+ def emit_message(self, message: AirbyteMessage) -> None:
29
+ self._decorated_message_repository.emit_message(message)
30
+ for message in self._decorated_message_repository.consume_queue():
31
+ self._queue.put(message)
32
+
33
+ def log_message(self, level: Level, message_provider: Callable[[], LogMessage]) -> None:
34
+ self._decorated_message_repository.log_message(level, message_provider)
35
+ for message in self._decorated_message_repository.consume_queue():
36
+ self._queue.put(message)
37
+
38
+ def consume_queue(self) -> Iterable[AirbyteMessage]:
39
+ """
40
+ This method shouldn't need to be called because as part of emit_message() we are already
41
+ loading messages onto the queue processed on the main thread.
42
+ """
43
+ yield from []
@@ -1,14 +1,45 @@
1
- #
2
- # Copyright (c) 2023 Airbyte, Inc., all rights reserved.
3
- #
1
+ # Copyright (c) 2025 Airbyte, Inc., all rights reserved.
2
+
3
+ import logging
4
4
  from queue import Queue
5
+ from typing import Optional
5
6
 
6
7
  from airbyte_cdk.sources.concurrent_source.stream_thread_exception import StreamThreadException
8
+ from airbyte_cdk.sources.message.repository import MessageRepository
9
+ from airbyte_cdk.sources.streams.concurrent.cursor import Cursor
7
10
  from airbyte_cdk.sources.streams.concurrent.partitions.partition import Partition
8
11
  from airbyte_cdk.sources.streams.concurrent.partitions.types import (
9
12
  PartitionCompleteSentinel,
10
13
  QueueItem,
11
14
  )
15
+ from airbyte_cdk.sources.utils.slice_logger import SliceLogger
16
+
17
+
18
+ # Since moving all the connector builder workflow to the concurrent CDK which required correct ordering
19
+ # of grouping log messages onto the main write thread using the ConcurrentMessageRepository, this
20
+ # separate flow and class that was used to log slices onto this partition's message_repository
21
+ # should just be replaced by emitting messages directly onto the repository instead of an intermediary.
22
+ class PartitionLogger:
23
+ """
24
+ Helper class that provides a mechanism for passing a log message onto the current
25
+ partitions message repository
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ slice_logger: SliceLogger,
31
+ logger: logging.Logger,
32
+ message_repository: MessageRepository,
33
+ ):
34
+ self._slice_logger = slice_logger
35
+ self._logger = logger
36
+ self._message_repository = message_repository
37
+
38
+ def log(self, partition: Partition) -> None:
39
+ if self._slice_logger.should_log_slice_message(self._logger):
40
+ self._message_repository.emit_message(
41
+ self._slice_logger.create_slice_log_message(partition.to_slice())
42
+ )
12
43
 
13
44
 
14
45
  class PartitionReader:
@@ -18,13 +49,16 @@ class PartitionReader:
18
49
 
19
50
  _IS_SUCCESSFUL = True
20
51
 
21
- def __init__(self, queue: Queue[QueueItem]) -> None:
52
+ def __init__(
53
+ self, queue: Queue[QueueItem], partition_logger: Optional[PartitionLogger] = None
54
+ ) -> None:
22
55
  """
23
56
  :param queue: The queue to put the records in.
24
57
  """
25
58
  self._queue = queue
59
+ self._partition_logger = partition_logger
26
60
 
27
- def process_partition(self, partition: Partition) -> None:
61
+ def process_partition(self, partition: Partition, cursor: Cursor) -> None:
28
62
  """
29
63
  Process a partition and put the records in the output queue.
30
64
  When all the partitions are added to the queue, a sentinel is added to the queue to indicate that all the partitions have been generated.
@@ -37,8 +71,13 @@ class PartitionReader:
37
71
  :return: None
38
72
  """
39
73
  try:
74
+ if self._partition_logger:
75
+ self._partition_logger.log(partition)
76
+
40
77
  for record in partition.read():
41
78
  self._queue.put(record)
79
+ cursor.observe(record)
80
+ cursor.close_partition(partition)
42
81
  self._queue.put(PartitionCompleteSentinel(partition, self._IS_SUCCESSFUL))
43
82
  except Exception as e:
44
83
  self._queue.put(StreamThreadException(e, partition.stream_name()))
@@ -4,6 +4,7 @@
4
4
 
5
5
  from typing import Any, Union
6
6
 
7
+ from airbyte_cdk.models import AirbyteMessage
7
8
  from airbyte_cdk.sources.concurrent_source.partition_generation_completed_sentinel import (
8
9
  PartitionGenerationCompletedSentinel,
9
10
  )
@@ -34,5 +35,10 @@ class PartitionCompleteSentinel:
34
35
  Typedef representing the items that can be added to the ThreadBasedConcurrentStream
35
36
  """
36
37
  QueueItem = Union[
37
- Record, Partition, PartitionCompleteSentinel, PartitionGenerationCompletedSentinel, Exception
38
+ Record,
39
+ Partition,
40
+ PartitionCompleteSentinel,
41
+ PartitionGenerationCompletedSentinel,
42
+ Exception,
43
+ AirbyteMessage,
38
44
  ]
@@ -11,6 +11,10 @@ from airbyte_cdk.models import AirbyteLogMessage, AirbyteMessage, Level
11
11
  from airbyte_cdk.models import Type as MessageType
12
12
 
13
13
 
14
+ # Once everything runs on the concurrent CDK and we've cleaned up the legacy flows, we should try to remove
15
+ # this class and write messages directly to the message_repository instead of through the logger because for
16
+ # cases like the connector builder where ordering of messages is important, using the logger can cause
17
+ # messages to be grouped out of order. Alas work for a different day.
14
18
  class SliceLogger(ABC):
15
19
  """
16
20
  SliceLogger is an interface that allows us to log slices of data in a uniform way.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: airbyte-cdk
3
- Version: 6.60.5
3
+ Version: 6.60.7
4
4
  Summary: A framework for writing Airbyte Connectors.
5
5
  Home-page: https://airbyte.com
6
6
  License: MIT
@@ -15,11 +15,11 @@ airbyte_cdk/config_observation.py,sha256=7SSPxtN0nXPkm4euGNcTTr1iLbwUL01jy-24V1H
15
15
  airbyte_cdk/connector.py,sha256=N6TUlrZOMjLAI85JrNAKkfyTqnO5xfBCw4oEfgjJd9o,4254
16
16
  airbyte_cdk/connector_builder/README.md,sha256=Hw3wvVewuHG9-QgsAq1jDiKuLlStDxKBz52ftyNRnBw,1665
17
17
  airbyte_cdk/connector_builder/__init__.py,sha256=4Hw-PX1-VgESLF16cDdvuYCzGJtHntThLF4qIiULWeo,61
18
- airbyte_cdk/connector_builder/connector_builder_handler.py,sha256=ySszXKleG7IGtxkwu2q9jczcwAAhZziLVzNAKtUvGY8,6664
19
- airbyte_cdk/connector_builder/main.py,sha256=j1pP5N8RsnvQZ4iYxhLdLEHsJ5Ui7IVFBUi6wYMGBkM,3839
18
+ airbyte_cdk/connector_builder/connector_builder_handler.py,sha256=jRtSfj3aca006-01Hax-THJpuoysd8QR6JPGnr8q1Xg,6371
19
+ airbyte_cdk/connector_builder/main.py,sha256=F9bmdz252pvGXAdDgPwIOPw3fl5fwTU41uG49BQyItI,3883
20
20
  airbyte_cdk/connector_builder/models.py,sha256=9pIZ98LW_d6fRS39VdnUOf3cxGt4TkC5MJ0_OrzcCRk,1578
21
21
  airbyte_cdk/connector_builder/test_reader/__init__.py,sha256=iTwBMoI9vaJotEgpqZbFjlxRcbxXYypSVJ9YxeHk7wc,120
22
- airbyte_cdk/connector_builder/test_reader/helpers.py,sha256=vqoHpZeQ0BLIw2NiTNGXr0euA8gI_X0pcNRcHOv8sHM,27942
22
+ airbyte_cdk/connector_builder/test_reader/helpers.py,sha256=5GSrK9EVBDm5dwtudVbA-73EHh53-niRA-oj8eQVFHI,29236
23
23
  airbyte_cdk/connector_builder/test_reader/message_grouper.py,sha256=LDNl-xFQwA4RsUpn7684KbWaVH-SWWBIwhHvIgduLTE,7090
24
24
  airbyte_cdk/connector_builder/test_reader/reader.py,sha256=3jLy3tUUHkG1rmGWrZuo4SmPYNVD9oiAqy8mdaUwzvo,21301
25
25
  airbyte_cdk/connector_builder/test_reader/types.py,sha256=hPZG3jO03kBaPyW94NI3JHRS1jxXGSNBcN1HFzOxo5Y,2528
@@ -57,8 +57,8 @@ airbyte_cdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
57
  airbyte_cdk/sources/__init__.py,sha256=45J83QsFH3Wky3sVapZWg4C58R_i1thm61M06t2c1AQ,1156
58
58
  airbyte_cdk/sources/abstract_source.py,sha256=50vxEBRByiNhT4WJkiFvgM-C6PWqKSJgvuNC_aeg2cw,15547
59
59
  airbyte_cdk/sources/concurrent_source/__init__.py,sha256=3D_RJsxQfiLboSCDdNei1Iv-msRp3DXsas6E9kl7dXc,386
60
- airbyte_cdk/sources/concurrent_source/concurrent_read_processor.py,sha256=P_GA5QayzehCf0ksUbEbGoNixBnauzsepv-0ICzhH4w,12691
61
- airbyte_cdk/sources/concurrent_source/concurrent_source.py,sha256=P8B6EcLKaSstfAD9kDZsTJ0q8vRmdFrxLt-zOA5_By0,7737
60
+ airbyte_cdk/sources/concurrent_source/concurrent_read_processor.py,sha256=iEm3st8ds4n6_Bn8J2hLK3bQuE5J6ihUitIexX4ADGQ,12240
61
+ airbyte_cdk/sources/concurrent_source/concurrent_source.py,sha256=Npc18v1ERfQyTjBwIDErTtoBtY11GMml5o_jU3ppRA0,8457
62
62
  airbyte_cdk/sources/concurrent_source/concurrent_source_adapter.py,sha256=f9PIRPWn2tXu0-bxVeYHL2vYdqCzZ_kgpHg5_Ep-cfQ,6103
63
63
  airbyte_cdk/sources/concurrent_source/partition_generation_completed_sentinel.py,sha256=z1t-rAZBsqVidv2fpUlPHE9JgyXsITuGk4AMu96mXSQ,696
64
64
  airbyte_cdk/sources/concurrent_source/stream_thread_exception.py,sha256=-q6mG2145HKQ28rZGD1bUmjPlIZ1S7-Yhewl8Ntu6xI,764
@@ -86,7 +86,7 @@ airbyte_cdk/sources/declarative/checks/check_stream.py,sha256=QeExVmpSYjr_CnghHu
86
86
  airbyte_cdk/sources/declarative/checks/connection_checker.py,sha256=MBRJo6WJlZQHpIfOGaNOkkHUmgUl_4wDM6VPo41z5Ss,1383
87
87
  airbyte_cdk/sources/declarative/concurrency_level/__init__.py,sha256=5XUqrmlstYlMM0j6crktlKQwALek0uiz2D3WdM46MyA,191
88
88
  airbyte_cdk/sources/declarative/concurrency_level/concurrency_level.py,sha256=YIwCTCpOr_QSNW4ltQK0yUGWInI8PKNY216HOOegYLk,2101
89
- airbyte_cdk/sources/declarative/concurrent_declarative_source.py,sha256=IwKlf20G5C4j-am9FrLhRN0qv61A5rU097xPnnFmt5U,27022
89
+ airbyte_cdk/sources/declarative/concurrent_declarative_source.py,sha256=ntVyaOmYt3b7rFUjmloDwE2Vwr9kAWMr1HryZicEnwY,30101
90
90
  airbyte_cdk/sources/declarative/datetime/__init__.py,sha256=4Hw-PX1-VgESLF16cDdvuYCzGJtHntThLF4qIiULWeo,61
91
91
  airbyte_cdk/sources/declarative/datetime/datetime_parser.py,sha256=_zGNGq31RNy_0QBLt_EcTvgPyhj7urPdx6oA3M5-r3o,3150
92
92
  airbyte_cdk/sources/declarative/datetime/min_max_datetime.py,sha256=0BHBtDNQZfvwM45-tY5pNlTcKAFSGGNxemoi0Jic-0E,5785
@@ -141,7 +141,7 @@ airbyte_cdk/sources/declarative/parsers/custom_exceptions.py,sha256=wnRUP0Xeru9R
141
141
  airbyte_cdk/sources/declarative/parsers/manifest_component_transformer.py,sha256=2UdpCz3yi7ISZTyqkQXSSy3dMxeyOWqV7OlAS5b9GVg,11568
142
142
  airbyte_cdk/sources/declarative/parsers/manifest_normalizer.py,sha256=EtKjS9c94yNp3AwQC8KUCQaAYW5T3zvFYxoWYjc_buI,19729
143
143
  airbyte_cdk/sources/declarative/parsers/manifest_reference_resolver.py,sha256=pJmg78vqE5VfUrF_KJnWjucQ4k9IWFULeAxHCowrHXE,6806
144
- airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py,sha256=rKp3chyB0iil4j-ekGmBA0Y2T7XbVM34pqpN3jNdvYc,181864
144
+ airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py,sha256=ONwdHGA5j4g-Iv2LRzcpXXpHf1Zm9ofgLcXYDyDadYk,181582
145
145
  airbyte_cdk/sources/declarative/partition_routers/__init__.py,sha256=TBC9AkGaUqHm2IKHMPN6punBIcY5tWGULowcLoAVkfw,1109
146
146
  airbyte_cdk/sources/declarative/partition_routers/async_job_partition_router.py,sha256=VelO7zKqKtzMJ35jyFeg0ypJLQC0plqqIBNXoBW1G2E,3001
147
147
  airbyte_cdk/sources/declarative/partition_routers/cartesian_product_stream_slicer.py,sha256=c5cuVFM6NFkuQqG8Z5IwkBuwDrvXZN1CunUOM_L0ezg,6892
@@ -166,7 +166,7 @@ airbyte_cdk/sources/declarative/requesters/error_handlers/default_http_response_
166
166
  airbyte_cdk/sources/declarative/requesters/error_handlers/error_handler.py,sha256=Tan66odx8VHzfdyyXMQkXz2pJYksllGqvxmpoajgcK4,669
167
167
  airbyte_cdk/sources/declarative/requesters/error_handlers/http_response_filter.py,sha256=E-fQbt4ShfxZVoqfnmOx69C6FUPWZz8BIqI3DN9Kcjs,7935
168
168
  airbyte_cdk/sources/declarative/requesters/http_job_repository.py,sha256=pVzIDdfGs1eAZo9F6zeFYKlEmEqanhNvZLKFCHkdmNo,14348
169
- airbyte_cdk/sources/declarative/requesters/http_requester.py,sha256=1qUqNxJ6I_4uSkW4KYXEtygVioURIEmiaDU8GMl_Jcs,18833
169
+ airbyte_cdk/sources/declarative/requesters/http_requester.py,sha256=cktdjnOu-o98smdCdrWC361iWNamPTZ-csT32OFh00c,18920
170
170
  airbyte_cdk/sources/declarative/requesters/paginators/__init__.py,sha256=uArbKs9JKNCt7t9tZoeWwjDpyI1HoPp29FNW0JzvaEM,644
171
171
  airbyte_cdk/sources/declarative/requesters/paginators/default_paginator.py,sha256=SB-Af3CRb4mJwhm4EKNxzl_PK2w5QS4tqrSNNMO2IV4,12760
172
172
  airbyte_cdk/sources/declarative/requesters/paginators/no_pagination.py,sha256=b1-zKxYOUMHn7ahdWpzKEzfG4A7s_WQWy-vzRqZWzME,2152
@@ -220,9 +220,9 @@ airbyte_cdk/sources/declarative/schema/schema_loader.py,sha256=kjt8v0N5wWKA5zyLn
220
220
  airbyte_cdk/sources/declarative/spec/__init__.py,sha256=9FYO-fVOclrwjAW4qwRTbZRVopTc9rOaauAJfThdNCQ,177
221
221
  airbyte_cdk/sources/declarative/spec/spec.py,sha256=SwL_pfXZgcLYLJY-MAeFMHug9oYh2tOWjgG0C3DoLOY,3602
222
222
  airbyte_cdk/sources/declarative/stream_slicers/__init__.py,sha256=UX-cP_C-9FIFFPL9z8nuxu_rglssRsMOqQmQHN8FLB8,341
223
- airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py,sha256=cjKGm4r438dd1GxrFHJ4aYrdzG2bkncnwaWxAwlXR3M,3585
223
+ airbyte_cdk/sources/declarative/stream_slicers/declarative_partition_generator.py,sha256=GTG1nqXVxbT7wnU-Q9hftlcWAqj-y4zR2KCnLWNIcv4,4084
224
224
  airbyte_cdk/sources/declarative/stream_slicers/stream_slicer.py,sha256=SOkIPBi2Wu7yxIvA15yFzUAB95a3IzA8LPq5DEqHQQc,725
225
- airbyte_cdk/sources/declarative/stream_slicers/stream_slicer_test_read_decorator.py,sha256=aUSleOw9elq3-5TaDUvp7H8W-2qUKqpr__kaJd8-ZFA,983
225
+ airbyte_cdk/sources/declarative/stream_slicers/stream_slicer_test_read_decorator.py,sha256=4vit5ADyhoZnd1psRVeM5jdySYzhjwspLVXxh8vt1M8,944
226
226
  airbyte_cdk/sources/declarative/transformations/__init__.py,sha256=CPJ8TlMpiUmvG3624VYu_NfTzxwKcfBjM2Q2wJ7fkSA,919
227
227
  airbyte_cdk/sources/declarative/transformations/add_fields.py,sha256=Eg1jQtRObgzxbtySTQs5uEZIjEklsoHFxYSPf78x6Ng,5420
228
228
  airbyte_cdk/sources/declarative/transformations/config_transformations/__init__.py,sha256=GaU3ezFa5opeDgdlNohX6TXsWJlOD2jOfJXQWeQCh7E,263
@@ -300,6 +300,7 @@ airbyte_cdk/sources/file_based/types.py,sha256=INxG7OPnkdUP69oYNKMAbwhvV1AGvLRHs
300
300
  airbyte_cdk/sources/http_config.py,sha256=OBZeuyFilm6NlDlBhFQvHhTWabEvZww6OHDIlZujIS0,730
301
301
  airbyte_cdk/sources/http_logger.py,sha256=H93kPAujHhPmXNX0JSFG3D-SL6yEFA5PtKot9Hu3TYA,1690
302
302
  airbyte_cdk/sources/message/__init__.py,sha256=y98fzHsQBwXwp2zEa4K5mxGFqjnx9lDn9O0pTk-VS4U,395
303
+ airbyte_cdk/sources/message/concurrent_repository.py,sha256=axSiL2Rm1z6ENtx_ZZ7B_UfLNesGX-lBMQLJvIlmCN4,1999
303
304
  airbyte_cdk/sources/message/repository.py,sha256=SG7avgti_-dj8FcRHTTrhgLLGJbElv14_zIB0SH8AIc,4763
304
305
  airbyte_cdk/sources/source.py,sha256=KIBBH5VLEb8BZ8B9aROlfaI6OLoJqKDPMJ10jkAR7nk,3611
305
306
  airbyte_cdk/sources/specs/transfer_modes.py,sha256=sfSVO0yT6SaGKN5_TP0Nl_ftG0yPhecaBv0WkhAEXA8,932
@@ -325,12 +326,12 @@ airbyte_cdk/sources/streams/concurrent/default_stream.py,sha256=3SBjFa1z955pSE_2
325
326
  airbyte_cdk/sources/streams/concurrent/exceptions.py,sha256=JOZ446MCLpmF26r9KfS6OO_6rGjcjgJNZdcw6jccjEI,468
326
327
  airbyte_cdk/sources/streams/concurrent/helpers.py,sha256=S6AW8TgIASCZ2UuUcQLE8OzgYUHWt2-KPOvNPwnQf-Q,1596
327
328
  airbyte_cdk/sources/streams/concurrent/partition_enqueuer.py,sha256=2t64b_z9cEPmlHZnjSiMTO8PEtEdiAJDG0JcYOtUqAE,3363
328
- airbyte_cdk/sources/streams/concurrent/partition_reader.py,sha256=0TIrjbTzYJGdA0AZUzbeIKr0iHbawnoEKVl7bWxOFZY,1760
329
+ airbyte_cdk/sources/streams/concurrent/partition_reader.py,sha256=0QKCJYBuCqiuHp0GqcOTvzsJS1Sp7qTNfjXESvs71Vk,3417
329
330
  airbyte_cdk/sources/streams/concurrent/partitions/__init__.py,sha256=4Hw-PX1-VgESLF16cDdvuYCzGJtHntThLF4qIiULWeo,61
330
331
  airbyte_cdk/sources/streams/concurrent/partitions/partition.py,sha256=CmaRcKn8y118No3qvbRV9DBeAUKv17lrVgloR4Y9TwU,1490
331
332
  airbyte_cdk/sources/streams/concurrent/partitions/partition_generator.py,sha256=_ymkkBr71_qt1fW0_MUqw96OfNBkeJngXQ09yolEDHw,441
332
333
  airbyte_cdk/sources/streams/concurrent/partitions/stream_slicer.py,sha256=zQPikLIt0yhP9EwZaPglRTIqFCauo4pSsJk_7kYq9Aw,1406
333
- airbyte_cdk/sources/streams/concurrent/partitions/types.py,sha256=frPVvHtY7vLxpGEbMQzNvF1Y52ZVyct9f1DDhGoRjwY,1166
334
+ airbyte_cdk/sources/streams/concurrent/partitions/types.py,sha256=EdPHmMcyZhRYtO2cniIQAQpzPfGCBpzmAJ3NTVS4qbo,1249
334
335
  airbyte_cdk/sources/streams/concurrent/state_converters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
335
336
  airbyte_cdk/sources/streams/concurrent/state_converters/abstract_stream_state_converter.py,sha256=CCxCbgvUugxiWpHX8-dkkJHWKDjL5iwiIbOUj8KIJ9c,7079
336
337
  airbyte_cdk/sources/streams/concurrent/state_converters/datetime_stream_state_converter.py,sha256=x8MLm1pTMfLNHvMF3P1ixYkYt_xjpbaIwnvhY_ofdBo,8076
@@ -364,7 +365,7 @@ airbyte_cdk/sources/utils/casing.py,sha256=QC-gV1O4e8DR4-bhdXieUPKm_JamzslVyfABL
364
365
  airbyte_cdk/sources/utils/files_directory.py,sha256=z8Dmr-wkL1sAqdwCST4MBUFAyMHPD2cJIzVdAuCynp8,391
365
366
  airbyte_cdk/sources/utils/record_helper.py,sha256=7wL-pDYrBpcmZHa8ORtiSOqBZJEZI5hdl2dA1RYiatk,2029
366
367
  airbyte_cdk/sources/utils/schema_helpers.py,sha256=bR3I70-e11S6B8r6VK-pthQXtcYrXojgXFvuK7lRrpg,8545
367
- airbyte_cdk/sources/utils/slice_logger.py,sha256=qWWeFLAvigFz0b4O1_O3QDM1cy8PqZAMMgVPR2hEeb8,1778
368
+ airbyte_cdk/sources/utils/slice_logger.py,sha256=M1TvcYGMftXR841XdJmeEpKpQqrdOD5X-qsspfAMKMs,2168
368
369
  airbyte_cdk/sources/utils/transform.py,sha256=0LOvIJg1vmg_70AiAVe-YHMr-LHrqEuxg9cm1BnYPDM,11725
369
370
  airbyte_cdk/sources/utils/types.py,sha256=41ZQR681t5TUnOScij58d088sb99klH_ZENFcaYro_g,175
370
371
  airbyte_cdk/sql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -424,9 +425,9 @@ airbyte_cdk/utils/slice_hasher.py,sha256=EDxgROHDbfG-QKQb59m7h_7crN1tRiawdf5uU7G
424
425
  airbyte_cdk/utils/spec_schema_transformations.py,sha256=-5HTuNsnDBAhj-oLeQXwpTGA0HdcjFOf2zTEMUTTg_Y,816
425
426
  airbyte_cdk/utils/stream_status_utils.py,sha256=ZmBoiy5HVbUEHAMrUONxZvxnvfV9CesmQJLDTAIWnWw,1171
426
427
  airbyte_cdk/utils/traced_exception.py,sha256=C8uIBuCL_E4WnBAOPSxBicD06JAldoN9fGsQDp463OY,6292
427
- airbyte_cdk-6.60.5.dist-info/LICENSE.txt,sha256=Wfe61S4BaGPj404v8lrAbvhjYR68SHlkzeYrg3_bbuM,1051
428
- airbyte_cdk-6.60.5.dist-info/LICENSE_SHORT,sha256=aqF6D1NcESmpn-cqsxBtszTEnHKnlsp8L4x9wAh3Nxg,55
429
- airbyte_cdk-6.60.5.dist-info/METADATA,sha256=yahbZHg8wSRJzuHofj2niQmhXaW5dxJftOi6tchY3F8,6477
430
- airbyte_cdk-6.60.5.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
431
- airbyte_cdk-6.60.5.dist-info/entry_points.txt,sha256=AKWbEkHfpzzk9nF9tqBUaw1MbvTM4mGtEzmZQm0ZWvM,139
432
- airbyte_cdk-6.60.5.dist-info/RECORD,,
428
+ airbyte_cdk-6.60.7.dist-info/LICENSE.txt,sha256=Wfe61S4BaGPj404v8lrAbvhjYR68SHlkzeYrg3_bbuM,1051
429
+ airbyte_cdk-6.60.7.dist-info/LICENSE_SHORT,sha256=aqF6D1NcESmpn-cqsxBtszTEnHKnlsp8L4x9wAh3Nxg,55
430
+ airbyte_cdk-6.60.7.dist-info/METADATA,sha256=8UFDCTLKAEkz9_HGFQhGgKVW3I0w-oPneO4SHQIHU24,6477
431
+ airbyte_cdk-6.60.7.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
432
+ airbyte_cdk-6.60.7.dist-info/entry_points.txt,sha256=AKWbEkHfpzzk9nF9tqBUaw1MbvTM4mGtEzmZQm0ZWvM,139
433
+ airbyte_cdk-6.60.7.dist-info/RECORD,,