airbyte-cdk 6.41.4__py3-none-any.whl → 6.41.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- airbyte_cdk/sources/declarative/async_job/job.py +6 -0
- airbyte_cdk/sources/declarative/async_job/job_orchestrator.py +18 -18
- airbyte_cdk/sources/declarative/async_job/job_tracker.py +22 -6
- airbyte_cdk/sources/declarative/declarative_component_schema.yaml +22 -12
- airbyte_cdk/sources/declarative/models/declarative_component_schema.py +4 -2
- airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py +4 -1
- {airbyte_cdk-6.41.4.dist-info → airbyte_cdk-6.41.6.dist-info}/METADATA +1 -1
- {airbyte_cdk-6.41.4.dist-info → airbyte_cdk-6.41.6.dist-info}/RECORD +12 -12
- {airbyte_cdk-6.41.4.dist-info → airbyte_cdk-6.41.6.dist-info}/LICENSE.txt +0 -0
- {airbyte_cdk-6.41.4.dist-info → airbyte_cdk-6.41.6.dist-info}/LICENSE_SHORT +0 -0
- {airbyte_cdk-6.41.4.dist-info → airbyte_cdk-6.41.6.dist-info}/WHEEL +0 -0
- {airbyte_cdk-6.41.4.dist-info → airbyte_cdk-6.41.6.dist-info}/entry_points.txt +0 -0
@@ -34,6 +34,12 @@ class AsyncJob:
|
|
34
34
|
|
35
35
|
def status(self) -> AsyncJobStatus:
|
36
36
|
if self._timer.has_timed_out():
|
37
|
+
# TODO: we should account the fact that,
|
38
|
+
# certain APIs could send the `Timeout` status,
|
39
|
+
# thus we should not return `Timeout` in that case,
|
40
|
+
# but act based on the scenario.
|
41
|
+
|
42
|
+
# the default behavior is to return `Timeout` status and retry.
|
37
43
|
return AsyncJobStatus.TIMED_OUT
|
38
44
|
return self._status
|
39
45
|
|
@@ -44,16 +44,21 @@ class AsyncPartition:
|
|
44
44
|
This bucket of api_jobs is a bit useless for this iteration but should become interesting when we will be able to split jobs
|
45
45
|
"""
|
46
46
|
|
47
|
-
|
47
|
+
_DEFAULT_MAX_JOB_RETRY = 3
|
48
48
|
|
49
|
-
def __init__(
|
49
|
+
def __init__(
|
50
|
+
self, jobs: List[AsyncJob], stream_slice: StreamSlice, job_max_retry: Optional[int] = None
|
51
|
+
) -> None:
|
50
52
|
self._attempts_per_job = {job: 1 for job in jobs}
|
51
53
|
self._stream_slice = stream_slice
|
54
|
+
self._job_max_retry = (
|
55
|
+
job_max_retry if job_max_retry is not None else self._DEFAULT_MAX_JOB_RETRY
|
56
|
+
)
|
52
57
|
|
53
58
|
def has_reached_max_attempt(self) -> bool:
|
54
59
|
return any(
|
55
60
|
map(
|
56
|
-
lambda attempt_count: attempt_count >= self.
|
61
|
+
lambda attempt_count: attempt_count >= self._job_max_retry,
|
57
62
|
self._attempts_per_job.values(),
|
58
63
|
)
|
59
64
|
)
|
@@ -62,7 +67,7 @@ class AsyncPartition:
|
|
62
67
|
current_attempt_count = self._attempts_per_job.pop(job_to_replace, None)
|
63
68
|
if current_attempt_count is None:
|
64
69
|
raise ValueError("Could not find job to replace")
|
65
|
-
elif current_attempt_count >= self.
|
70
|
+
elif current_attempt_count >= self._job_max_retry:
|
66
71
|
raise ValueError(f"Max attempt reached for job in partition {self._stream_slice}")
|
67
72
|
|
68
73
|
new_attempt_count = current_attempt_count + 1
|
@@ -155,6 +160,7 @@ class AsyncJobOrchestrator:
|
|
155
160
|
message_repository: MessageRepository,
|
156
161
|
exceptions_to_break_on: Iterable[Type[Exception]] = tuple(),
|
157
162
|
has_bulk_parent: bool = False,
|
163
|
+
job_max_retry: Optional[int] = None,
|
158
164
|
) -> None:
|
159
165
|
"""
|
160
166
|
If the stream slices provided as a parameters relies on a async job streams that relies on the same JobTracker, `has_bulk_parent`
|
@@ -175,11 +181,12 @@ class AsyncJobOrchestrator:
|
|
175
181
|
self._message_repository = message_repository
|
176
182
|
self._exceptions_to_break_on: Tuple[Type[Exception], ...] = tuple(exceptions_to_break_on)
|
177
183
|
self._has_bulk_parent = has_bulk_parent
|
184
|
+
self._job_max_retry = job_max_retry
|
178
185
|
|
179
186
|
self._non_breaking_exceptions: List[Exception] = []
|
180
187
|
|
181
188
|
def _replace_failed_jobs(self, partition: AsyncPartition) -> None:
|
182
|
-
failed_status_jobs = (AsyncJobStatus.FAILED,)
|
189
|
+
failed_status_jobs = (AsyncJobStatus.FAILED, AsyncJobStatus.TIMED_OUT)
|
183
190
|
jobs_to_replace = [job for job in partition.jobs if job.status() in failed_status_jobs]
|
184
191
|
for job in jobs_to_replace:
|
185
192
|
new_job = self._start_job(job.job_parameters(), job.api_job_id())
|
@@ -214,7 +221,7 @@ class AsyncJobOrchestrator:
|
|
214
221
|
for _slice in self._slice_iterator:
|
215
222
|
at_least_one_slice_consumed_from_slice_iterator_during_current_iteration = True
|
216
223
|
job = self._start_job(_slice)
|
217
|
-
self._running_partitions.append(AsyncPartition([job], _slice))
|
224
|
+
self._running_partitions.append(AsyncPartition([job], _slice, self._job_max_retry))
|
218
225
|
if self._has_bulk_parent and self._slice_iterator.has_next():
|
219
226
|
break
|
220
227
|
except ConcurrentJobLimitReached:
|
@@ -363,7 +370,7 @@ class AsyncJobOrchestrator:
|
|
363
370
|
self._reallocate_partition(current_running_partitions, partition)
|
364
371
|
|
365
372
|
# We only remove completed / timeout jobs jobs as we want failed jobs to be re-allocated in priority
|
366
|
-
self.
|
373
|
+
self._remove_completed_jobs(partition)
|
367
374
|
|
368
375
|
# update the referenced list with running partitions
|
369
376
|
self._running_partitions = current_running_partitions
|
@@ -378,11 +385,7 @@ class AsyncJobOrchestrator:
|
|
378
385
|
def _stop_timed_out_jobs(self, partition: AsyncPartition) -> None:
|
379
386
|
for job in partition.jobs:
|
380
387
|
if job.status() == AsyncJobStatus.TIMED_OUT:
|
381
|
-
self._abort_job(job, free_job_allocation=
|
382
|
-
raise AirbyteTracedException(
|
383
|
-
internal_message=f"Job {job.api_job_id()} has timed out. Try increasing the `polling job timeout`.",
|
384
|
-
failure_type=FailureType.config_error,
|
385
|
-
)
|
388
|
+
self._abort_job(job, free_job_allocation=False)
|
386
389
|
|
387
390
|
def _abort_job(self, job: AsyncJob, free_job_allocation: bool = True) -> None:
|
388
391
|
try:
|
@@ -392,7 +395,7 @@ class AsyncJobOrchestrator:
|
|
392
395
|
except Exception as exception:
|
393
396
|
LOGGER.warning(f"Could not free budget for job {job.api_job_id()}: {exception}")
|
394
397
|
|
395
|
-
def
|
398
|
+
def _remove_completed_jobs(self, partition: AsyncPartition) -> None:
|
396
399
|
"""
|
397
400
|
Remove completed or timed out jobs from the partition.
|
398
401
|
|
@@ -400,7 +403,7 @@ class AsyncJobOrchestrator:
|
|
400
403
|
partition (AsyncPartition): The partition to process.
|
401
404
|
"""
|
402
405
|
for job in partition.jobs:
|
403
|
-
if job.status()
|
406
|
+
if job.status() == AsyncJobStatus.COMPLETED:
|
404
407
|
self._job_tracker.remove_job(job.api_job_id())
|
405
408
|
|
406
409
|
def _reallocate_partition(
|
@@ -415,10 +418,7 @@ class AsyncJobOrchestrator:
|
|
415
418
|
current_running_partitions (list): The list of currently running partitions.
|
416
419
|
partition (AsyncPartition): The partition to reallocate.
|
417
420
|
"""
|
418
|
-
|
419
|
-
if job.status() != AsyncJobStatus.TIMED_OUT:
|
420
|
-
# allow the FAILED jobs to be re-allocated for partition
|
421
|
-
current_running_partitions.insert(0, partition)
|
421
|
+
current_running_partitions.insert(0, partition)
|
422
422
|
|
423
423
|
def _process_partitions_with_errors(self, partition: AsyncPartition) -> None:
|
424
424
|
"""
|
@@ -3,9 +3,11 @@
|
|
3
3
|
import logging
|
4
4
|
import threading
|
5
5
|
import uuid
|
6
|
-
from
|
6
|
+
from dataclasses import dataclass, field
|
7
|
+
from typing import Any, Mapping, Set, Union
|
7
8
|
|
8
9
|
from airbyte_cdk.logger import lazy_log
|
10
|
+
from airbyte_cdk.sources.declarative.interpolation import InterpolatedString
|
9
11
|
|
10
12
|
LOGGER = logging.getLogger("airbyte")
|
11
13
|
|
@@ -14,15 +16,29 @@ class ConcurrentJobLimitReached(Exception):
|
|
14
16
|
pass
|
15
17
|
|
16
18
|
|
19
|
+
@dataclass
|
17
20
|
class JobTracker:
|
18
|
-
|
21
|
+
limit: Union[int, str]
|
22
|
+
config: Mapping[str, Any] = field(default_factory=dict)
|
23
|
+
|
24
|
+
def __post_init__(self) -> None:
|
19
25
|
self._jobs: Set[str] = set()
|
20
|
-
|
26
|
+
self._lock = threading.Lock()
|
27
|
+
if isinstance(self.limit, str):
|
28
|
+
try:
|
29
|
+
self.limit = int(
|
30
|
+
InterpolatedString(self.limit, parameters={}).eval(config=self.config)
|
31
|
+
)
|
32
|
+
except Exception as e:
|
33
|
+
LOGGER.warning(
|
34
|
+
f"Error interpolating max job count: {self.limit}. Setting to 1. {e}"
|
35
|
+
)
|
36
|
+
self.limit = 1
|
37
|
+
if self.limit < 1:
|
21
38
|
LOGGER.warning(
|
22
|
-
f"The `max_concurrent_async_job_count` property is less than 1: {limit}. Setting to 1. Please update the source manifest to set a valid value."
|
39
|
+
f"The `max_concurrent_async_job_count` property is less than 1: {self.limit}. Setting to 1. Please update the source manifest to set a valid value."
|
23
40
|
)
|
24
|
-
self._limit =
|
25
|
-
self._lock = threading.Lock()
|
41
|
+
self._limit = self.limit if self.limit >= 1 else 1
|
26
42
|
|
27
43
|
def try_to_get_intent(self) -> str:
|
28
44
|
lazy_log(
|
@@ -47,7 +47,12 @@ properties:
|
|
47
47
|
max_concurrent_async_job_count:
|
48
48
|
title: Maximum Concurrent Asynchronous Jobs
|
49
49
|
description: Maximum number of concurrent asynchronous jobs to run. This property is only relevant for sources/streams that support asynchronous job execution through the AsyncRetriever (e.g. a report-based stream that initiates a job, polls the job status, and then fetches the job results). This is often set by the API's maximum number of concurrent jobs on the account level. Refer to the API's documentation for this information.
|
50
|
-
type:
|
50
|
+
type:
|
51
|
+
- integer
|
52
|
+
- string
|
53
|
+
examples:
|
54
|
+
- 3
|
55
|
+
- "{{ config['max_concurrent_async_job_count'] }}"
|
51
56
|
metadata:
|
52
57
|
type: object
|
53
58
|
description: For internal Airbyte use only - DO NOT modify manually. Used by consumers of declarative manifests for storing related metadata.
|
@@ -2192,7 +2197,8 @@ definitions:
|
|
2192
2197
|
type: object
|
2193
2198
|
additionalProperties: true
|
2194
2199
|
JsonDecoder:
|
2195
|
-
title:
|
2200
|
+
title: JSON
|
2201
|
+
description: Select 'JSON' if the response is formatted as a JSON object.
|
2196
2202
|
type: object
|
2197
2203
|
required:
|
2198
2204
|
- type
|
@@ -2201,8 +2207,8 @@ definitions:
|
|
2201
2207
|
type: string
|
2202
2208
|
enum: [JsonDecoder]
|
2203
2209
|
JsonlDecoder:
|
2204
|
-
title:
|
2205
|
-
description:
|
2210
|
+
title: JSON Lines
|
2211
|
+
description: Select 'JSON Lines' if the response consists of JSON objects separated by new lines ('\n') in JSONL format.
|
2206
2212
|
type: object
|
2207
2213
|
required:
|
2208
2214
|
- type
|
@@ -2327,8 +2333,8 @@ definitions:
|
|
2327
2333
|
type: object
|
2328
2334
|
additionalProperties: true
|
2329
2335
|
IterableDecoder:
|
2330
|
-
title: Iterable
|
2331
|
-
description:
|
2336
|
+
title: Iterable
|
2337
|
+
description: Select 'Iterable' if the response consists of strings separated by new lines (`\n`). The string will then be wrapped into a JSON object with the `record` key.
|
2332
2338
|
type: object
|
2333
2339
|
required:
|
2334
2340
|
- type
|
@@ -2337,8 +2343,8 @@ definitions:
|
|
2337
2343
|
type: string
|
2338
2344
|
enum: [IterableDecoder]
|
2339
2345
|
XmlDecoder:
|
2340
|
-
title: XML
|
2341
|
-
description:
|
2346
|
+
title: XML
|
2347
|
+
description: Select 'XML' if the response consists of XML-formatted data.
|
2342
2348
|
type: object
|
2343
2349
|
required:
|
2344
2350
|
- type
|
@@ -2369,8 +2375,8 @@ definitions:
|
|
2369
2375
|
type: object
|
2370
2376
|
additionalProperties: true
|
2371
2377
|
ZipfileDecoder:
|
2372
|
-
title:
|
2373
|
-
description:
|
2378
|
+
title: ZIP File
|
2379
|
+
description: Select 'ZIP file' for response data that is returned as a zipfile. Requires specifying an inner data type/decoder to parse the unzipped data.
|
2374
2380
|
type: object
|
2375
2381
|
additionalProperties: true
|
2376
2382
|
required:
|
@@ -2894,7 +2900,7 @@ definitions:
|
|
2894
2900
|
title: Lazy Read Pointer
|
2895
2901
|
description: If set, this will enable lazy reading, using the initial read of parent records to extract child records.
|
2896
2902
|
type: array
|
2897
|
-
default: [
|
2903
|
+
default: []
|
2898
2904
|
items:
|
2899
2905
|
- type: string
|
2900
2906
|
interpolation_context:
|
@@ -3199,7 +3205,7 @@ definitions:
|
|
3199
3205
|
properties:
|
3200
3206
|
type:
|
3201
3207
|
type: string
|
3202
|
-
enum: [
|
3208
|
+
enum: [StateDelegatingStream]
|
3203
3209
|
name:
|
3204
3210
|
title: Name
|
3205
3211
|
description: The stream name.
|
@@ -3276,6 +3282,8 @@ definitions:
|
|
3276
3282
|
type: object
|
3277
3283
|
additionalProperties: true
|
3278
3284
|
GzipDecoder:
|
3285
|
+
title: gzip
|
3286
|
+
description: Select 'gzip' for response data that is compressed with gzip. Requires specifying an inner data type/decoder to parse the decompressed data.
|
3279
3287
|
type: object
|
3280
3288
|
required:
|
3281
3289
|
- type
|
@@ -3291,6 +3299,8 @@ definitions:
|
|
3291
3299
|
- "$ref": "#/definitions/JsonDecoder"
|
3292
3300
|
- "$ref": "#/definitions/JsonlDecoder"
|
3293
3301
|
CsvDecoder:
|
3302
|
+
title: CSV
|
3303
|
+
description: "Select 'CSV' for response data that is formatted as CSV (comma-separated values). Can specify an encoding (default: 'utf-8') and a delimiter (default: ',')."
|
3294
3304
|
type: object
|
3295
3305
|
required:
|
3296
3306
|
- type
|
@@ -1890,9 +1890,10 @@ class DeclarativeSource1(BaseModel):
|
|
1890
1890
|
spec: Optional[Spec] = None
|
1891
1891
|
concurrency_level: Optional[ConcurrencyLevel] = None
|
1892
1892
|
api_budget: Optional[HTTPAPIBudget] = None
|
1893
|
-
max_concurrent_async_job_count: Optional[int] = Field(
|
1893
|
+
max_concurrent_async_job_count: Optional[Union[int, str]] = Field(
|
1894
1894
|
None,
|
1895
1895
|
description="Maximum number of concurrent asynchronous jobs to run. This property is only relevant for sources/streams that support asynchronous job execution through the AsyncRetriever (e.g. a report-based stream that initiates a job, polls the job status, and then fetches the job results). This is often set by the API's maximum number of concurrent jobs on the account level. Refer to the API's documentation for this information.",
|
1896
|
+
examples=[3, "{{ config['max_concurrent_async_job_count'] }}"],
|
1896
1897
|
title="Maximum Concurrent Asynchronous Jobs",
|
1897
1898
|
)
|
1898
1899
|
metadata: Optional[Dict[str, Any]] = Field(
|
@@ -1922,9 +1923,10 @@ class DeclarativeSource2(BaseModel):
|
|
1922
1923
|
spec: Optional[Spec] = None
|
1923
1924
|
concurrency_level: Optional[ConcurrencyLevel] = None
|
1924
1925
|
api_budget: Optional[HTTPAPIBudget] = None
|
1925
|
-
max_concurrent_async_job_count: Optional[int] = Field(
|
1926
|
+
max_concurrent_async_job_count: Optional[Union[int, str]] = Field(
|
1926
1927
|
None,
|
1927
1928
|
description="Maximum number of concurrent asynchronous jobs to run. This property is only relevant for sources/streams that support asynchronous job execution through the AsyncRetriever (e.g. a report-based stream that initiates a job, polls the job status, and then fetches the job results). This is often set by the API's maximum number of concurrent jobs on the account level. Refer to the API's documentation for this information.",
|
1929
|
+
examples=[3, "{{ config['max_concurrent_async_job_count'] }}"],
|
1928
1930
|
title="Maximum Concurrent Asynchronous Jobs",
|
1929
1931
|
)
|
1930
1932
|
metadata: Optional[Dict[str, Any]] = Field(
|
@@ -3073,8 +3073,11 @@ class ModelToComponentFactory:
|
|
3073
3073
|
stream_slices,
|
3074
3074
|
self._job_tracker,
|
3075
3075
|
self._message_repository,
|
3076
|
-
has_bulk_parent=False,
|
3077
3076
|
# FIXME work would need to be done here in order to detect if a stream as a parent stream that is bulk
|
3077
|
+
has_bulk_parent=False,
|
3078
|
+
# set the `job_max_retry` to 1 for the `Connector Builder`` use-case.
|
3079
|
+
# `None` == default retry is set to 3 attempts, under the hood.
|
3080
|
+
job_max_retry=1 if self._emit_connector_builder_messages else None,
|
3078
3081
|
),
|
3079
3082
|
stream_slicer=stream_slicer,
|
3080
3083
|
config=config,
|
@@ -48,9 +48,9 @@ airbyte_cdk/sources/config.py,sha256=wtwFF_7G_S2KB0IE2W5LBs7RO5e7EbgCAMzJpTcYTKo
|
|
48
48
|
airbyte_cdk/sources/connector_state_manager.py,sha256=hw3TJJWl3UJKSDsH-PypFQU7mD0ifffh1Noz-t_THr8,7486
|
49
49
|
airbyte_cdk/sources/declarative/__init__.py,sha256=ZnqYNxHsKCgO38IwB34RQyRMXTs4GTvlRi3ImKnIioo,61
|
50
50
|
airbyte_cdk/sources/declarative/async_job/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
|
-
airbyte_cdk/sources/declarative/async_job/job.py,sha256=
|
52
|
-
airbyte_cdk/sources/declarative/async_job/job_orchestrator.py,sha256=
|
53
|
-
airbyte_cdk/sources/declarative/async_job/job_tracker.py,sha256=
|
51
|
+
airbyte_cdk/sources/declarative/async_job/job.py,sha256=aR5UZAkNUYA1I1zjUMAcvdzCFL3lXXOllkFmlhEKgkc,2001
|
52
|
+
airbyte_cdk/sources/declarative/async_job/job_orchestrator.py,sha256=tcHvB5QdBnx4XQmFvr4Swdq2DLRPst5w5M-OIJHnp5c,22034
|
53
|
+
airbyte_cdk/sources/declarative/async_job/job_tracker.py,sha256=JowKzdT4E6IeE1cYIf4mOtB6sVEJoCeSsfzaFi9ghQ8,3231
|
54
54
|
airbyte_cdk/sources/declarative/async_job/repository.py,sha256=2OkWiZp5IKTOi_SIpP1U-Rw3gH36LBy_a8CgXoENTtg,1044
|
55
55
|
airbyte_cdk/sources/declarative/async_job/status.py,sha256=mkExR-uOAO1ckUnclaUOa74l2N9CdhLbVFM6KDoBgBM,715
|
56
56
|
airbyte_cdk/sources/declarative/async_job/timer.py,sha256=Fb8P72CQ7jIzJyzMSSNuBf2vt8bmrg9SrfmNxKwph2A,1242
|
@@ -71,7 +71,7 @@ airbyte_cdk/sources/declarative/concurrent_declarative_source.py,sha256=uhy0dRkA
|
|
71
71
|
airbyte_cdk/sources/declarative/datetime/__init__.py,sha256=4Hw-PX1-VgESLF16cDdvuYCzGJtHntThLF4qIiULWeo,61
|
72
72
|
airbyte_cdk/sources/declarative/datetime/datetime_parser.py,sha256=_zGNGq31RNy_0QBLt_EcTvgPyhj7urPdx6oA3M5-r3o,3150
|
73
73
|
airbyte_cdk/sources/declarative/datetime/min_max_datetime.py,sha256=0BHBtDNQZfvwM45-tY5pNlTcKAFSGGNxemoi0Jic-0E,5785
|
74
|
-
airbyte_cdk/sources/declarative/declarative_component_schema.yaml,sha256=
|
74
|
+
airbyte_cdk/sources/declarative/declarative_component_schema.yaml,sha256=7yP2RxR6a0atvRiqB52J5k0kWlX-O6XEWdrCVFt1sCU,151082
|
75
75
|
airbyte_cdk/sources/declarative/declarative_source.py,sha256=nF7wBqFd3AQmEKAm4CnIo29CJoQL562cJGSCeL8U8bA,1531
|
76
76
|
airbyte_cdk/sources/declarative/declarative_stream.py,sha256=dCRlddBUSaJmBNBz1pSO1r2rTw8AP5d2_vlmIeGs2gg,10767
|
77
77
|
airbyte_cdk/sources/declarative/decoders/__init__.py,sha256=JHb_0d3SE6kNY10mxA5YBEKPeSbsWYjByq1gUQxepoE,953
|
@@ -114,13 +114,13 @@ airbyte_cdk/sources/declarative/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW
|
|
114
114
|
airbyte_cdk/sources/declarative/migrations/legacy_to_per_partition_state_migration.py,sha256=iemy3fKLczcU0-Aor7tx5jcT6DRedKMqyK7kCOp01hg,3924
|
115
115
|
airbyte_cdk/sources/declarative/migrations/state_migration.py,sha256=KWPjealMLKSMtajXgkdGgKg7EmTLR-CqqD7UIh0-eDU,794
|
116
116
|
airbyte_cdk/sources/declarative/models/__init__.py,sha256=nUFxNCiKeYRVXuZEKA7GD-lTHxsiKcQ8FitZjKhPIvE,100
|
117
|
-
airbyte_cdk/sources/declarative/models/declarative_component_schema.py,sha256=
|
117
|
+
airbyte_cdk/sources/declarative/models/declarative_component_schema.py,sha256=p0-xyRzsPBfTpJy1B8I87J0dIyWVRu9y2v9McSkY6Js,106885
|
118
118
|
airbyte_cdk/sources/declarative/parsers/__init__.py,sha256=ZnqYNxHsKCgO38IwB34RQyRMXTs4GTvlRi3ImKnIioo,61
|
119
119
|
airbyte_cdk/sources/declarative/parsers/custom_code_compiler.py,sha256=nlVvHC511NUyDEEIRBkoeDTAvLqKNp-hRy8D19z8tdk,5941
|
120
120
|
airbyte_cdk/sources/declarative/parsers/custom_exceptions.py,sha256=Rir9_z3Kcd5Es0-LChrzk-0qubAsiK_RSEnLmK2OXm8,553
|
121
121
|
airbyte_cdk/sources/declarative/parsers/manifest_component_transformer.py,sha256=CXwTfD3wSQq3okcqwigpprbHhSURUokh4GK2OmOyKC8,9132
|
122
122
|
airbyte_cdk/sources/declarative/parsers/manifest_reference_resolver.py,sha256=IWUOdF03o-aQn0Occo1BJCxU0Pz-QILk5L67nzw2thw,6803
|
123
|
-
airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py,sha256=
|
123
|
+
airbyte_cdk/sources/declarative/parsers/model_to_component_factory.py,sha256=72haNs6JXWSbe9Vwya2mJo3GFBvzYwjLlReWmvO2uPo,147623
|
124
124
|
airbyte_cdk/sources/declarative/partition_routers/__init__.py,sha256=HJ-Syp3p7RpyR_OK0X_a2kSyISfu3W-PKrRI16iY0a8,957
|
125
125
|
airbyte_cdk/sources/declarative/partition_routers/async_job_partition_router.py,sha256=VelO7zKqKtzMJ35jyFeg0ypJLQC0plqqIBNXoBW1G2E,3001
|
126
126
|
airbyte_cdk/sources/declarative/partition_routers/cartesian_product_stream_slicer.py,sha256=c5cuVFM6NFkuQqG8Z5IwkBuwDrvXZN1CunUOM_L0ezg,6892
|
@@ -358,9 +358,9 @@ airbyte_cdk/utils/slice_hasher.py,sha256=EDxgROHDbfG-QKQb59m7h_7crN1tRiawdf5uU7G
|
|
358
358
|
airbyte_cdk/utils/spec_schema_transformations.py,sha256=-5HTuNsnDBAhj-oLeQXwpTGA0HdcjFOf2zTEMUTTg_Y,816
|
359
359
|
airbyte_cdk/utils/stream_status_utils.py,sha256=ZmBoiy5HVbUEHAMrUONxZvxnvfV9CesmQJLDTAIWnWw,1171
|
360
360
|
airbyte_cdk/utils/traced_exception.py,sha256=C8uIBuCL_E4WnBAOPSxBicD06JAldoN9fGsQDp463OY,6292
|
361
|
-
airbyte_cdk-6.41.
|
362
|
-
airbyte_cdk-6.41.
|
363
|
-
airbyte_cdk-6.41.
|
364
|
-
airbyte_cdk-6.41.
|
365
|
-
airbyte_cdk-6.41.
|
366
|
-
airbyte_cdk-6.41.
|
361
|
+
airbyte_cdk-6.41.6.dist-info/LICENSE.txt,sha256=Wfe61S4BaGPj404v8lrAbvhjYR68SHlkzeYrg3_bbuM,1051
|
362
|
+
airbyte_cdk-6.41.6.dist-info/LICENSE_SHORT,sha256=aqF6D1NcESmpn-cqsxBtszTEnHKnlsp8L4x9wAh3Nxg,55
|
363
|
+
airbyte_cdk-6.41.6.dist-info/METADATA,sha256=Xk23ce8RxVdxAlwvLwJpDyXn1pYHJPNhRpL0W98ChZw,6071
|
364
|
+
airbyte_cdk-6.41.6.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
365
|
+
airbyte_cdk-6.41.6.dist-info/entry_points.txt,sha256=fj-e3PAQvsxsQzyyq8UkG1k8spunWnD4BAH2AwlR6NM,95
|
366
|
+
airbyte_cdk-6.41.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|