port-ocean 0.26.2__py3-none-any.whl → 0.27.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,6 +27,7 @@ RUN mkdir -p /tmp/ocean
27
27
  ARG INTEGRATION_VERSION
28
28
  ARG BUILD_CONTEXT
29
29
  ARG PROMETHEUS_MULTIPROC_DIR=/tmp/ocean/prometheus/metrics
30
+ ARG OAUTH_CONFIG_DIR=/app/.config
30
31
 
31
32
  ENV LIBRDKAFKA_VERSION=2.8.2 \
32
33
  PROMETHEUS_MULTIPROC_DIR=${PROMETHEUS_MULTIPROC_DIR}
@@ -34,6 +35,9 @@ ENV LIBRDKAFKA_VERSION=2.8.2 \
34
35
  RUN mkdir -p ${PROMETHEUS_MULTIPROC_DIR}
35
36
  RUN chown -R ocean:appgroup /tmp/ocean && chmod -R 755 /tmp/ocean
36
37
 
38
+ RUN mkdir -p ${OAUTH_CONFIG_DIR}
39
+ RUN chown -R ocean:appgroup ${OAUTH_CONFIG_DIR}
40
+
37
41
  RUN apt-get update \
38
42
  && apt-get install -y \
39
43
  ca-certificates \
@@ -9,6 +9,7 @@ from pydantic.main import BaseModel
9
9
 
10
10
  from port_ocean.config.base import BaseOceanModel, BaseOceanSettings
11
11
  from port_ocean.core.event_listener import EventListenerSettingsType
12
+
12
13
  from port_ocean.core.models import (
13
14
  CachingStorageMode,
14
15
  CreatePortResourcesOrigin,
@@ -88,6 +89,7 @@ class IntegrationConfiguration(BaseOceanSettings, extra=Extra.allow):
88
89
  event_listener: EventListenerSettingsType = Field(
89
90
  default=cast(EventListenerSettingsType, {"type": "POLLING"})
90
91
  )
92
+ event_workers_count: int = 1
91
93
  # If an identifier or type is not provided, it will be generated based on the integration name
92
94
  integration: IntegrationSettings = Field(
93
95
  default_factory=lambda: IntegrationSettings(type="", identifier="")
@@ -1,4 +1,5 @@
1
1
  from .abstract_queue import AbstractQueue
2
2
  from .local_queue import LocalQueue
3
+ from .group_queue import GroupQueue
3
4
 
4
- __all__ = ["AbstractQueue", "LocalQueue"]
5
+ __all__ = ["AbstractQueue", "LocalQueue", "GroupQueue"]
@@ -7,6 +7,9 @@ T = TypeVar("T")
7
7
  class AbstractQueue(ABC, Generic[T]):
8
8
  """Abstract interface for queues"""
9
9
 
10
+ def __init__(self, name: str | None = None):
11
+ pass
12
+
10
13
  @abstractmethod
11
14
  async def put(self, item: T) -> None:
12
15
  """Put an item into the queue"""
@@ -22,6 +25,11 @@ class AbstractQueue(ABC, Generic[T]):
22
25
  """Wait for all items to be processed"""
23
26
  pass
24
27
 
28
+ @abstractmethod
29
+ async def size(self) -> int:
30
+ """Size of the queue"""
31
+ pass
32
+
25
33
  @abstractmethod
26
34
  async def commit(self) -> None:
27
35
  """Mark item as processed"""
@@ -0,0 +1,138 @@
1
+ import asyncio
2
+ from collections import defaultdict, deque
3
+ import time
4
+ from typing import Deque, Dict, Optional, Set, TypeVar, Any
5
+ from contextvars import ContextVar
6
+
7
+ from loguru import logger
8
+
9
+ from .abstract_queue import AbstractQueue
10
+
11
+ T = TypeVar("T")
12
+ MaybeStr = str | None
13
+
14
+ _NO_GROUP = object()
15
+ _current_group: ContextVar[Any] = ContextVar("current_group", default=_NO_GROUP)
16
+
17
+
18
+ class GroupQueue(AbstractQueue[T]):
19
+ """Queue with exclusive processing per group."""
20
+
21
+ def __init__(
22
+ self,
23
+ group_key: MaybeStr = None,
24
+ name: MaybeStr = None,
25
+ lock_timeout: float = 300,
26
+ ):
27
+ super().__init__(name)
28
+ self.group_key = group_key
29
+ self._queues: Dict[MaybeStr, Deque[T]] = defaultdict(deque)
30
+ self._locked: Set[MaybeStr] = set()
31
+ self._queue_not_empty = asyncio.Condition()
32
+ self.lock_timeout = lock_timeout
33
+ self._lock_timestamps: Dict[MaybeStr, float] = {}
34
+ self._timeout_task: Optional[asyncio.Task[None]] = None
35
+
36
+ async def _background_timeout_check(self) -> None:
37
+ """Periodically release locks that have timed out."""
38
+ while True:
39
+ try:
40
+ await asyncio.sleep(self.lock_timeout / 4)
41
+ async with self._queue_not_empty:
42
+ await self._release_expired_locks()
43
+ except asyncio.CancelledError:
44
+ break
45
+
46
+ def _extract_group_key(self, item: T) -> MaybeStr:
47
+ """Extract the group key from an item."""
48
+ if self.group_key is None:
49
+ return None
50
+ if not hasattr(item, self.group_key):
51
+ raise ValueError(
52
+ f"Item {item!r} lacks attribute '{self.group_key}' required for grouping"
53
+ )
54
+ return getattr(item, self.group_key)
55
+
56
+ async def put(self, item: T) -> None:
57
+ """Add item to its group's queue."""
58
+ group_key = self._extract_group_key(item)
59
+ async with self._queue_not_empty:
60
+ self._queues[group_key].append(item)
61
+ self._queue_not_empty.notify_all()
62
+
63
+ async def _release_expired_locks(self) -> None:
64
+ """Release locks that have exceeded the timeout."""
65
+ now = time.time()
66
+ expired_groups = []
67
+
68
+ for group, timestamp in list(self._lock_timestamps.items()):
69
+ if now - timestamp > self.lock_timeout:
70
+ expired_groups.append(group)
71
+ logger.warning(f"Releasing expired lock for group {group}")
72
+ self._locked.discard(group)
73
+ del self._lock_timestamps[group]
74
+
75
+ if expired_groups:
76
+ self._queue_not_empty.notify_all()
77
+
78
+ async def get(self) -> T:
79
+ """Get the next item from an unlocked group, locking that group."""
80
+ if self._timeout_task is None or self._timeout_task.done():
81
+ self._timeout_task = asyncio.create_task(self._background_timeout_check())
82
+
83
+ async with self._queue_not_empty:
84
+ while True:
85
+ await self._release_expired_locks()
86
+
87
+ for group, queue in self._queues.items():
88
+ if queue and group not in self._locked:
89
+ self._locked.add(group)
90
+ self._lock_timestamps[group] = time.time()
91
+ _current_group.set(group)
92
+ return queue[0]
93
+
94
+ await self._queue_not_empty.wait()
95
+
96
+ async def commit(self) -> None:
97
+ """Remove the current item and unlock its group."""
98
+ group = _current_group.get()
99
+ if group is _NO_GROUP:
100
+ logger.warning("commit() called without active get()")
101
+ return
102
+
103
+ async with self._queue_not_empty:
104
+ queue = self._queues.get(group)
105
+ if queue:
106
+ queue.popleft()
107
+ if not queue:
108
+ del self._queues[group]
109
+
110
+ self._locked.discard(group)
111
+ self._lock_timestamps.pop(group, None)
112
+ _current_group.set(_NO_GROUP)
113
+ self._queue_not_empty.notify_all()
114
+
115
+ async def teardown(self) -> None:
116
+ """Wait until all queues are empty and no groups are locked."""
117
+ async with self._queue_not_empty:
118
+ while any(self._queues.values()) or self._locked:
119
+ await self._queue_not_empty.wait()
120
+
121
+ if self._timeout_task and not self._timeout_task.done():
122
+ self._timeout_task.cancel()
123
+ try:
124
+ await self._timeout_task
125
+ except asyncio.CancelledError:
126
+ pass
127
+
128
+ async def size(self) -> int:
129
+ """Return total number of items across all groups."""
130
+ async with self._queue_not_empty:
131
+ return sum(len(queue) for queue in self._queues.values())
132
+
133
+ async def force_unlock_all(self) -> None:
134
+ """Force unlock all groups."""
135
+ async with self._queue_not_empty:
136
+ self._locked.clear()
137
+ self._lock_timestamps.clear()
138
+ self._queue_not_empty.notify_all()
@@ -23,3 +23,6 @@ class LocalQueue(AbstractQueue[T]):
23
23
 
24
24
  async def commit(self) -> None:
25
25
  self._queue.task_done()
26
+
27
+ async def size(self) -> int:
28
+ return self._queue.qsize()
@@ -1,4 +1,5 @@
1
- from typing import Dict, Type, Set
1
+ from typing import Dict, Tuple, Type, Set, List
2
+
2
3
  from fastapi import APIRouter, Request
3
4
  from loguru import logger
4
5
  import asyncio
@@ -6,6 +7,7 @@ import asyncio
6
7
  from port_ocean.context.ocean import ocean
7
8
  from port_ocean.context.event import EventType, event_context
8
9
  from port_ocean.core.handlers.port_app_config.models import ResourceConfig
10
+ from port_ocean.core.handlers.queue.abstract_queue import AbstractQueue
9
11
  from port_ocean.core.integrations.mixins.events import EventsMixin
10
12
  from port_ocean.core.integrations.mixins.live_events import LiveEventsMixin
11
13
  from port_ocean.exceptions.webhook_processor import WebhookEventNotSupportedError
@@ -15,7 +17,7 @@ from port_ocean.context.event import event
15
17
 
16
18
  from .abstract_webhook_processor import AbstractWebhookProcessor
17
19
  from port_ocean.utils.signal import SignalHandler
18
- from port_ocean.core.handlers.queue import AbstractQueue, LocalQueue
20
+ from port_ocean.core.handlers.queue import LocalQueue
19
21
 
20
22
 
21
23
  class LiveEventsProcessorManager(LiveEventsMixin, EventsMixin):
@@ -31,22 +33,103 @@ class LiveEventsProcessorManager(LiveEventsMixin, EventsMixin):
31
33
  self._router = router
32
34
  self._processors_classes: Dict[str, list[Type[AbstractWebhookProcessor]]] = {}
33
35
  self._event_queues: Dict[str, AbstractQueue[WebhookEvent]] = {}
34
- self._webhook_processor_tasks: Set[asyncio.Task[None]] = set()
36
+ self._event_processor_tasks: Set[asyncio.Task[None]] = set()
35
37
  self._max_event_processing_seconds = max_event_processing_seconds
36
38
  self._max_wait_seconds_before_shutdown = max_wait_seconds_before_shutdown
37
39
  signal_handler.register(self.shutdown)
38
40
 
39
41
  async def start_processing_event_messages(self) -> None:
40
- """Start processing events for all registered paths"""
42
+ """Start processing events for all registered paths with N workers each."""
41
43
  await self.initialize_handlers()
42
44
  loop = asyncio.get_event_loop()
45
+ config = ocean.integration.context.config
46
+
43
47
  for path in self._event_queues.keys():
48
+ for worker_id in range(0, config.event_workers_count):
49
+ task = loop.create_task(self._process_webhook_events(path, worker_id))
50
+ self._event_processor_tasks.add(task)
51
+ task.add_done_callback(self._event_processor_tasks.discard)
52
+
53
+ async def _process_webhook_events(self, path: str, worker_id: int) -> None:
54
+ """Process webhook events from the queue for a given path."""
55
+ queue = self._event_queues[path]
56
+ while True:
57
+ event = None
58
+ matching_processors: List[
59
+ Tuple[ResourceConfig, AbstractWebhookProcessor]
60
+ ] = []
44
61
  try:
45
- task = loop.create_task(self.process_queue(path))
46
- self._webhook_processor_tasks.add(task)
47
- task.add_done_callback(self._webhook_processor_tasks.discard)
62
+ event = await queue.get()
63
+ with logger.contextualize(
64
+ worker=worker_id,
65
+ webhook_path=path,
66
+ trace_id=event.trace_id,
67
+ ):
68
+ async with event_context(
69
+ EventType.HTTP_REQUEST,
70
+ trigger_type="machine",
71
+ ):
72
+
73
+ await ocean.integration.port_app_config_handler.get_port_app_config(
74
+ use_cache=False
75
+ )
76
+ matching_processors = await self._extract_matching_processors(
77
+ event, path
78
+ )
79
+
80
+ processing_results = await asyncio.gather(
81
+ *(
82
+ self._process_single_event(proc, path, res)
83
+ for res, proc in matching_processors
84
+ ),
85
+ return_exceptions=True,
86
+ )
87
+
88
+ successful_results: List[WebhookEventRawResults] = []
89
+ failed_exceptions: List[Exception] = []
90
+
91
+ for result in processing_results:
92
+ if isinstance(result, WebhookEventRawResults):
93
+ successful_results.append(result)
94
+ elif isinstance(result, Exception):
95
+ failed_exceptions.append(result)
96
+
97
+ if successful_results:
98
+ logger.info(
99
+ "Successfully processed webhook events",
100
+ success_count=len(successful_results),
101
+ failure_count=len(failed_exceptions),
102
+ )
103
+
104
+ if failed_exceptions:
105
+ logger.warning(
106
+ "Some webhook events failed processing",
107
+ failures=[str(e) for e in failed_exceptions],
108
+ )
109
+
110
+ await self.sync_raw_results(successful_results)
111
+
112
+ except asyncio.CancelledError:
113
+ logger.info(f"Worker {worker_id} for {path} shutting down")
114
+ for _, proc in matching_processors:
115
+ await proc.cancel()
116
+ self._timestamp_event_error(proc.event)
117
+ break
48
118
  except Exception as e:
49
- logger.exception(f"Error starting queue processor for {path}: {str(e)}")
119
+ logger.exception(
120
+ f"Unexpected error in worker {worker_id} for {path}: {e}"
121
+ )
122
+ for _, proc in matching_processors:
123
+ self._timestamp_event_error(proc.event)
124
+ finally:
125
+ try:
126
+ if event is not None:
127
+ await queue.commit()
128
+
129
+ except Exception as e:
130
+ logger.exception(
131
+ f"Unexpected error in queue commit in worker {worker_id} for {path}: {e}"
132
+ )
50
133
 
51
134
  async def _extract_matching_processors(
52
135
  self, webhook_event: WebhookEvent, path: str
@@ -91,70 +174,6 @@ class LiveEventsProcessorManager(LiveEventsMixin, EventsMixin):
91
174
  )
92
175
  return created_processors
93
176
 
94
- async def process_queue(self, path: str) -> None:
95
- """Process events for a specific path in order"""
96
- while True:
97
- matching_processors_with_resource: list[
98
- tuple[ResourceConfig, AbstractWebhookProcessor]
99
- ] = []
100
- webhook_event: WebhookEvent | None = None
101
- try:
102
- queue = self._event_queues[path]
103
- webhook_event = await queue.get()
104
- with logger.contextualize(
105
- webhook_path=path, trace_id=webhook_event.trace_id
106
- ):
107
- async with event_context(
108
- EventType.HTTP_REQUEST,
109
- trigger_type="machine",
110
- ):
111
- # This forces the Processor manager to fetch the latest port app config for each event
112
- await ocean.integration.port_app_config_handler.get_port_app_config(
113
- use_cache=False
114
- )
115
- matching_processors_with_resource = (
116
- await self._extract_matching_processors(webhook_event, path)
117
- )
118
- webhook_event_raw_results_for_all_resources = await asyncio.gather(
119
- *(
120
- self._process_single_event(processor, path, resource)
121
- for resource, processor in matching_processors_with_resource
122
- ),
123
- return_exceptions=True,
124
- )
125
-
126
- successful_raw_results: list[WebhookEventRawResults] = [
127
- result
128
- for result in webhook_event_raw_results_for_all_resources
129
- if isinstance(result, WebhookEventRawResults)
130
- ]
131
-
132
- if successful_raw_results:
133
- logger.info(
134
- "Exporting raw event results to entities",
135
- webhook_event_raw_results_for_all_resources_length=len(
136
- successful_raw_results
137
- ),
138
- )
139
- await self.sync_raw_results(successful_raw_results)
140
- except asyncio.CancelledError:
141
- logger.info(f"Queue processor for {path} is shutting down")
142
- for _, processor in matching_processors_with_resource:
143
- await processor.cancel()
144
- self._timestamp_event_error(processor.event)
145
- break
146
- except Exception as e:
147
- logger.exception(
148
- f"Unexpected error in queue processor for {path}: {str(e)}"
149
- )
150
- for _, processor in matching_processors_with_resource:
151
- self._timestamp_event_error(processor.event)
152
- finally:
153
- if webhook_event:
154
- await self._event_queues[path].commit()
155
- # Prevents committing empty events for cases where we shutdown while processing
156
- webhook_event = None
157
-
158
177
  def _timestamp_event_error(self, event: WebhookEvent) -> None:
159
178
  """Timestamp an event as having an error"""
160
179
  event.set_timestamp(LiveEventTimestamp.FinishedProcessingWithError)
@@ -279,12 +298,14 @@ class LiveEventsProcessorManager(LiveEventsMixin, EventsMixin):
279
298
  methods=["POST"],
280
299
  )
281
300
 
282
- async def _cancel_all_tasks(self) -> None:
283
- """Cancel all webhook processor tasks"""
284
- for task in self._webhook_processor_tasks:
301
+ async def _cancel_all_event_processors(
302
+ self,
303
+ ) -> None:
304
+ """Cancel all event processor tasks"""
305
+ for task in self._event_processor_tasks:
285
306
  task.cancel()
286
307
 
287
- await asyncio.gather(*self._webhook_processor_tasks, return_exceptions=True)
308
+ await asyncio.gather(*self._event_processor_tasks, return_exceptions=True)
288
309
 
289
310
  async def shutdown(self) -> None:
290
311
  """Gracefully shutdown all queue processors"""
@@ -299,5 +320,3 @@ class LiveEventsProcessorManager(LiveEventsMixin, EventsMixin):
299
320
  )
300
321
  except asyncio.TimeoutError:
301
322
  logger.warning("Shutdown timed out waiting for queues to empty")
302
-
303
- await self._cancel_all_tasks()
@@ -51,11 +51,13 @@ class WebhookEvent(LiveEvent):
51
51
  payload: EventPayload,
52
52
  headers: EventHeaders,
53
53
  original_request: Request | None = None,
54
+ group_id: str | None = None,
54
55
  ) -> None:
55
56
  self.trace_id = trace_id
56
57
  self.payload = payload
57
58
  self.headers = headers
58
59
  self._original_request = original_request
60
+ self.group_id = group_id
59
61
 
60
62
  @classmethod
61
63
  async def from_request(
@@ -0,0 +1,681 @@
1
+ import asyncio
2
+ import pytest
3
+ from dataclasses import dataclass
4
+ from port_ocean.core.handlers.queue.group_queue import GroupQueue
5
+ from typing import Any
6
+
7
+
8
+ @dataclass
9
+ class TestItem:
10
+ group_id: str
11
+ value: int
12
+
13
+
14
+ @dataclass
15
+ class TestItemNoGroup:
16
+ value: int
17
+
18
+
19
+ class TestGroupQueue:
20
+ """Test suite for GroupQueue lock mechanism"""
21
+
22
+ @pytest.fixture
23
+ def queue_with_group_key(self) -> GroupQueue[Any]:
24
+ """Create a GroupQueue with group_key='group_id'"""
25
+ return GroupQueue(group_key="group_id", name="test_queue")
26
+
27
+ @pytest.fixture
28
+ def queue_no_group_key(self) -> GroupQueue[Any]:
29
+ """Create a GroupQueue without group_key (all items in same group)"""
30
+ return GroupQueue(group_key=None, name="test_queue_no_group")
31
+
32
+ @pytest.mark.asyncio
33
+ async def test_basic_lock_mechanism(
34
+ self, queue_with_group_key: GroupQueue[Any]
35
+ ) -> None:
36
+ """Test that getting an item locks the group"""
37
+ queue: GroupQueue[TestItem] = queue_with_group_key
38
+
39
+ item1 = TestItem(group_id="group_a", value=1)
40
+ item2 = TestItem(group_id="group_a", value=2)
41
+
42
+ await queue.put(item1)
43
+ await queue.put(item2)
44
+
45
+ retrieved_item = await queue.get()
46
+ assert retrieved_item == item1
47
+ assert "group_a" in queue._locked
48
+
49
+ @pytest.mark.asyncio
50
+ async def test_locked_group_blocks_retrieval(
51
+ self, queue_with_group_key: GroupQueue[Any]
52
+ ) -> None:
53
+ """Test that locked groups cannot have items retrieved"""
54
+ queue: GroupQueue[TestItem] = queue_with_group_key
55
+
56
+ item1 = TestItem(group_id="group_a", value=1)
57
+ item2 = TestItem(group_id="group_a", value=2)
58
+ item3 = TestItem(group_id="group_b", value=3)
59
+
60
+ await queue.put(item1)
61
+ await queue.put(item2)
62
+ await queue.put(item3)
63
+
64
+ retrieved_item1 = await queue.get()
65
+ assert retrieved_item1 == item1
66
+ assert "group_a" in queue._locked
67
+
68
+ retrieved_item2 = await queue.get()
69
+ assert retrieved_item2 == item3
70
+ assert "group_b" in queue._locked
71
+ assert "group_a" in queue._locked
72
+
73
+ @pytest.mark.asyncio
74
+ async def test_commit_unlocks_group(
75
+ self, queue_with_group_key: GroupQueue[Any]
76
+ ) -> None:
77
+ """Test that commit() unlocks the group and allows next item retrieval"""
78
+ queue: GroupQueue[TestItem] = queue_with_group_key
79
+
80
+ item1 = TestItem(group_id="group_a", value=1)
81
+ item2 = TestItem(group_id="group_a", value=2)
82
+
83
+ await queue.put(item1)
84
+ await queue.put(item2)
85
+
86
+ retrieved_item1 = await queue.get()
87
+ assert retrieved_item1 == item1
88
+ assert "group_a" in queue._locked
89
+
90
+ await queue.commit()
91
+ assert "group_a" not in queue._locked
92
+
93
+ retrieved_item2 = await queue.get()
94
+ assert retrieved_item2 == item2
95
+ assert "group_a" in queue._locked
96
+
97
+ @pytest.mark.asyncio
98
+ async def test_multiple_groups_concurrent_processing(
99
+ self, queue_with_group_key: GroupQueue[Any]
100
+ ) -> None:
101
+ """Test that different groups can be processed concurrently"""
102
+ queue: GroupQueue[TestItem] = queue_with_group_key
103
+
104
+ item_a1 = TestItem(group_id="group_a", value=1)
105
+ item_a2 = TestItem(group_id="group_a", value=2)
106
+ item_b1 = TestItem(group_id="group_b", value=3)
107
+ item_c1 = TestItem(group_id="group_c", value=4)
108
+
109
+ await queue.put(item_a1)
110
+ await queue.put(item_b1)
111
+ await queue.put(item_c1)
112
+ await queue.put(item_a2)
113
+
114
+ retrieved_items = []
115
+ for _ in range(3):
116
+ item = await queue.get()
117
+ retrieved_items.append(item)
118
+
119
+ assert len(queue._locked) == 3
120
+ assert "group_a" in queue._locked
121
+ assert "group_b" in queue._locked
122
+ assert "group_c" in queue._locked
123
+
124
+ group_ids = [queue._extract_group_key(item) for item in retrieved_items]
125
+ assert set(group_ids) == {"group_a", "group_b", "group_c"}
126
+
127
+ @pytest.mark.asyncio
128
+ async def test_get_blocks_when_all_groups_locked(
129
+ self, queue_with_group_key: GroupQueue[Any]
130
+ ) -> None:
131
+ """Test that get() blocks when all available groups are locked"""
132
+ queue: GroupQueue[TestItem] = queue_with_group_key
133
+
134
+ item1 = TestItem(group_id="group_a", value=1)
135
+ item2 = TestItem(group_id="group_a", value=2)
136
+
137
+ await queue.put(item1)
138
+ await queue.put(item2)
139
+
140
+ await queue.get()
141
+ assert "group_a" in queue._locked
142
+
143
+ with pytest.raises(asyncio.TimeoutError):
144
+ await asyncio.wait_for(queue.get(), timeout=0.1)
145
+
146
+ @pytest.mark.asyncio
147
+ async def test_no_group_key_single_group_behavior(
148
+ self, queue_no_group_key: GroupQueue[Any]
149
+ ) -> None:
150
+ """Test behavior when group_key is None (all items in same group)"""
151
+ queue: GroupQueue[TestItemNoGroup] = queue_no_group_key
152
+
153
+ item1 = TestItemNoGroup(value=1)
154
+ item2 = TestItemNoGroup(value=2)
155
+
156
+ await queue.put(item1)
157
+ await queue.put(item2)
158
+
159
+ retrieved_item1 = await queue.get()
160
+ assert retrieved_item1 == item1
161
+ assert None in queue._locked
162
+
163
+ with pytest.raises(asyncio.TimeoutError):
164
+ await asyncio.wait_for(queue.get(), timeout=0.1)
165
+
166
+ await queue.commit()
167
+ assert None not in queue._locked
168
+
169
+ retrieved_item2 = await queue.get()
170
+ assert retrieved_item2 == item2
171
+
172
+ @pytest.mark.asyncio
173
+ async def test_commit_without_get_is_safe(
174
+ self, queue_with_group_key: GroupQueue[Any]
175
+ ) -> None:
176
+ """Test that calling commit() without get() doesn't break anything"""
177
+ queue: GroupQueue[TestItem] = queue_with_group_key
178
+
179
+ await queue.commit()
180
+ assert len(queue._locked) == 0
181
+
182
+ @pytest.mark.asyncio
183
+ async def test_multiple_commits_are_safe(
184
+ self, queue_with_group_key: GroupQueue[Any]
185
+ ) -> None:
186
+ """Test that multiple commits after a single get are safe"""
187
+ queue: GroupQueue[TestItem] = queue_with_group_key
188
+
189
+ item = TestItem(group_id="group_a", value=1)
190
+ await queue.put(item)
191
+
192
+ retrieved_item = await queue.get()
193
+ assert retrieved_item == item
194
+
195
+ await queue.commit()
196
+ assert "group_a" not in queue._locked
197
+
198
+ await queue.commit()
199
+ assert "group_a" not in queue._locked
200
+
201
+ @pytest.mark.asyncio
202
+ async def test_fifo_within_group(
203
+ self, queue_with_group_key: GroupQueue[Any]
204
+ ) -> None:
205
+ """Test that items within a group are processed in FIFO order"""
206
+ queue: GroupQueue[TestItem] = queue_with_group_key
207
+
208
+ items = [TestItem(group_id="group_a", value=i) for i in range(5)]
209
+ for item in items:
210
+ await queue.put(item)
211
+
212
+ processed_items = []
213
+ for _ in range(5):
214
+ item = await queue.get()
215
+ processed_items.append(item)
216
+ await queue.commit()
217
+
218
+ assert processed_items == items
219
+
220
+ @pytest.mark.asyncio
221
+ async def test_lock_prevents_queue_cleanup(
222
+ self, queue_with_group_key: GroupQueue[Any]
223
+ ) -> None:
224
+ """Test that locked groups prevent queue cleanup until unlocked"""
225
+ queue: GroupQueue[TestItem] = queue_with_group_key
226
+
227
+ item = TestItem(group_id="group_a", value=1)
228
+ await queue.put(item)
229
+
230
+ await queue.get()
231
+ assert "group_a" in queue._queues
232
+ assert "group_a" in queue._locked
233
+
234
+ await queue.commit()
235
+ assert "group_a" not in queue._queues
236
+ assert "group_a" not in queue._locked
237
+
238
+ @pytest.mark.asyncio
239
+ async def test_extract_group_key_missing_attribute(
240
+ self, queue_with_group_key: GroupQueue[Any]
241
+ ) -> None:
242
+ """Test that missing group key attribute raises ValueError"""
243
+ queue: GroupQueue[TestItemNoGroup] = queue_with_group_key
244
+
245
+ bad_item = TestItemNoGroup(value=1)
246
+
247
+ with pytest.raises(ValueError, match="lacks attribute 'group_id'"):
248
+ await queue.put(bad_item)
249
+
250
+ @pytest.mark.asyncio
251
+ async def test_size_excludes_current_item(
252
+ self, queue_with_group_key: GroupQueue[Any]
253
+ ) -> None:
254
+ """Test that size() excludes the currently processed item"""
255
+ queue: GroupQueue[TestItem] = queue_with_group_key
256
+
257
+ items = [TestItem(group_id="group_a", value=i) for i in range(3)]
258
+ for item in items:
259
+ await queue.put(item)
260
+
261
+ assert await queue.size() == 3
262
+
263
+ await queue.get()
264
+ assert await queue.size() == 3
265
+
266
+ await queue.commit()
267
+ assert await queue.size() == 2
268
+
269
+ @pytest.mark.asyncio
270
+ async def test_multiple_workers_different_groups(
271
+ self, queue_with_group_key: GroupQueue[Any]
272
+ ) -> None:
273
+ """Test multiple workers processing items from different groups concurrently"""
274
+ queue: GroupQueue[TestItem] = queue_with_group_key
275
+ processed_items = []
276
+
277
+ async def worker(worker_id: int, process_time: float = 0.1) -> Any:
278
+ """Simulate a worker that processes items"""
279
+ try:
280
+ item = await queue.get()
281
+ processed_items.append((worker_id, item))
282
+ await asyncio.sleep(process_time)
283
+ await queue.commit()
284
+ return item
285
+ except Exception as e:
286
+ return f"Worker {worker_id} error: {e}"
287
+
288
+ items = [
289
+ TestItem(group_id="group_a", value=1),
290
+ TestItem(group_id="group_b", value=2),
291
+ TestItem(group_id="group_c", value=3),
292
+ TestItem(group_id="group_d", value=4),
293
+ ]
294
+
295
+ for item in items:
296
+ await queue.put(item)
297
+
298
+ results = await asyncio.gather(
299
+ worker(1), worker(2), worker(3), worker(4), return_exceptions=True
300
+ )
301
+
302
+ assert len([r for r in results if isinstance(r, TestItem)]) == 4
303
+ assert len(processed_items) == 4
304
+
305
+ processed_values = {item.value for _, item in processed_items}
306
+ assert processed_values == {1, 2, 3, 4}
307
+
308
+ assert len(queue._locked) == 0
309
+
310
+ @pytest.mark.asyncio
311
+ async def test_multiple_workers_same_group_exclusivity(
312
+ self, queue_with_group_key: GroupQueue[Any]
313
+ ) -> None:
314
+ """Test that multiple workers cannot process items from same group concurrently"""
315
+ queue: GroupQueue[TestItem] = queue_with_group_key
316
+ processing_log = []
317
+
318
+ async def worker(worker_id: int, process_time: float = 0.2) -> Any:
319
+ """Worker that logs processing start and end times"""
320
+ try:
321
+ item = await queue.get()
322
+ start_time = asyncio.get_event_loop().time()
323
+ processing_log.append(("start", worker_id, item.value, start_time))
324
+
325
+ await asyncio.sleep(process_time)
326
+
327
+ end_time = asyncio.get_event_loop().time()
328
+ processing_log.append(("end", worker_id, item.value, end_time))
329
+
330
+ await queue.commit()
331
+ return item
332
+ except Exception as e:
333
+ return f"Worker {worker_id} error: {e}"
334
+
335
+ items = [TestItem(group_id="group_a", value=i) for i in range(4)]
336
+ for item in items:
337
+ await queue.put(item)
338
+
339
+ results = await asyncio.gather(
340
+ worker(1), worker(2), worker(3), worker(4), return_exceptions=True
341
+ )
342
+
343
+ assert len([r for r in results if isinstance(r, TestItem)]) == 4
344
+
345
+ start_times = {}
346
+ end_times = {}
347
+
348
+ for event, worker_id, value, timestamp in processing_log:
349
+ if event == "start":
350
+ start_times[value] = timestamp
351
+ else:
352
+ end_times[value] = timestamp
353
+
354
+ sorted_items = sorted(start_times.items(), key=lambda x: x[1])
355
+
356
+ for i in range(1, len(sorted_items)):
357
+ current_value = sorted_items[i][0]
358
+ previous_value = sorted_items[i - 1][0]
359
+
360
+ current_start = start_times[current_value]
361
+ previous_end = end_times[previous_value]
362
+
363
+ assert (
364
+ current_start >= previous_end - 0.01
365
+ ), f"Item {current_value} started before item {previous_value} finished"
366
+
367
+ assert len(queue._locked) == 0
368
+
369
+ @pytest.mark.asyncio
370
+ async def test_mixed_groups_with_multiple_workers(
371
+ self, queue_with_group_key: GroupQueue[Any]
372
+ ) -> None:
373
+ """Test workers processing mixed groups - some concurrent, some sequential"""
374
+ queue: GroupQueue[TestItem] = queue_with_group_key
375
+ processing_events = []
376
+
377
+ async def worker(worker_id: int) -> Any:
378
+ """Worker that tracks processing events"""
379
+ try:
380
+ item = await queue.get()
381
+ group = queue._extract_group_key(item)
382
+
383
+ start_time = asyncio.get_event_loop().time()
384
+ processing_events.append(
385
+ ("start", worker_id, group, item.value, start_time)
386
+ )
387
+
388
+ process_time = 0.1 if group == "fast_group" else 0.2
389
+ await asyncio.sleep(process_time)
390
+
391
+ end_time = asyncio.get_event_loop().time()
392
+ processing_events.append(
393
+ ("end", worker_id, group, item.value, end_time)
394
+ )
395
+
396
+ await queue.commit()
397
+ return item
398
+ except Exception as e:
399
+ return f"Worker {worker_id} error: {e}"
400
+
401
+ items = [
402
+ TestItem(group_id="same_group", value=1),
403
+ TestItem(group_id="same_group", value=2),
404
+ TestItem(group_id="same_group", value=3),
405
+ TestItem(group_id="fast_group", value=4),
406
+ TestItem(group_id="other_group", value=5),
407
+ TestItem(group_id="another_group", value=6),
408
+ ]
409
+
410
+ for item in items:
411
+ await queue.put(item)
412
+
413
+ results = await asyncio.gather(
414
+ *[worker(i) for i in range(1, 7)], return_exceptions=True
415
+ )
416
+
417
+ successful_results = [r for r in results if isinstance(r, TestItem)]
418
+ assert len(successful_results) == 6
419
+
420
+ group_events: dict[Any, Any] = {}
421
+ for event in processing_events:
422
+ _, worker_id, group, value, timestamp = event
423
+ if group not in group_events:
424
+ group_events[group] = []
425
+ group_events[group].append(event)
426
+
427
+ same_group_events = sorted(group_events["same_group"], key=lambda x: x[4])
428
+ starts = [e for e in same_group_events if e[0] == "start"]
429
+ ends = [e for e in same_group_events if e[0] == "end"]
430
+
431
+ for i in range(1, len(starts)):
432
+ assert starts[i][4] >= ends[i - 1][4] - 0.01
433
+
434
+ assert len(queue._locked) == 0
435
+
436
+ @pytest.mark.asyncio
437
+ async def test_high_concurrency_stress_test(
438
+ self, queue_with_group_key: GroupQueue[Any]
439
+ ) -> None:
440
+ """Stress test with many workers and items"""
441
+ queue: GroupQueue[TestItem] = queue_with_group_key
442
+
443
+ async def worker(worker_id: int) -> Any:
444
+ """Simple worker"""
445
+ results = []
446
+ while True:
447
+ try:
448
+
449
+ item = await asyncio.wait_for(queue.get(), timeout=1.0)
450
+ results.append(item)
451
+
452
+ await asyncio.sleep(0.01 + (worker_id % 3) * 0.01)
453
+
454
+ await queue.commit()
455
+ except asyncio.TimeoutError:
456
+ break
457
+ except Exception as e:
458
+ print(f"Worker {worker_id} error: {e}")
459
+ break
460
+ return results
461
+
462
+ num_groups = 5
463
+ items_per_group = 4
464
+
465
+ for group_id in range(num_groups):
466
+ for item_id in range(items_per_group):
467
+ item = TestItem(
468
+ group_id=f"group_{group_id}", value=group_id * 100 + item_id
469
+ )
470
+ await queue.put(item)
471
+
472
+ num_workers = 10
473
+ results = await asyncio.gather(
474
+ *[worker(i) for i in range(num_workers)], return_exceptions=True
475
+ )
476
+
477
+ all_processed = []
478
+ for result in results:
479
+ if isinstance(result, list):
480
+ all_processed.extend(result)
481
+
482
+ assert len(all_processed) == num_groups * items_per_group
483
+
484
+ processed_values = [item.value for item in all_processed]
485
+ expected_values = [
486
+ g * 100 + i for g in range(num_groups) for i in range(items_per_group)
487
+ ]
488
+ assert sorted(processed_values) == sorted(expected_values)
489
+
490
+ assert len(queue._locked) == 0
491
+ assert await queue.size() == 0
492
+
493
+ @pytest.mark.asyncio
494
+ async def test_frozen_lock_timeout_recovery(
495
+ self, queue_with_group_key: GroupQueue[Any]
496
+ ) -> None:
497
+ """Test that frozen locks are released after timeout and processing can resume"""
498
+
499
+ queue: GroupQueue[TestItem] = GroupQueue(
500
+ group_key="group_id", name="test_queue", lock_timeout=0.3
501
+ )
502
+
503
+ processed_items = []
504
+
505
+ async def normal_worker(worker_id: int) -> Any:
506
+ """Worker that processes items normally"""
507
+ try:
508
+ item = await queue.get()
509
+ processed_items.append((worker_id, item))
510
+ await asyncio.sleep(0.1)
511
+ await queue.commit()
512
+ return item
513
+ except Exception as e:
514
+ return f"Worker {worker_id} error: {e}"
515
+
516
+ async def hanging_worker(worker_id: int) -> Any:
517
+ """Worker that gets item but never commits (simulates hung worker)"""
518
+ try:
519
+ item = await queue.get()
520
+ processed_items.append((worker_id, item))
521
+
522
+ await asyncio.sleep(1.0)
523
+
524
+ return f"Worker {worker_id} hung"
525
+ except Exception as e:
526
+ return f"Worker {worker_id} error: {e}"
527
+
528
+ items = [TestItem(group_id="group_a", value=i) for i in range(3)]
529
+ for item in items:
530
+ await queue.put(item)
531
+
532
+ hanging_task = asyncio.create_task(hanging_worker(999))
533
+
534
+ await asyncio.sleep(0.1)
535
+
536
+ assert "group_a" in queue._locked
537
+
538
+ normal_task = asyncio.create_task(normal_worker(1))
539
+
540
+ await asyncio.sleep(0.2)
541
+ assert not normal_task.done()
542
+
543
+ await asyncio.sleep(0.4)
544
+
545
+ await asyncio.wait_for(normal_task, timeout=2.0)
546
+
547
+ normal_result = await normal_task
548
+ assert isinstance(normal_result, TestItem)
549
+
550
+ hanging_task.cancel()
551
+ try:
552
+ await hanging_task
553
+ except asyncio.CancelledError:
554
+ pass
555
+
556
+ assert len(queue._locked) <= 1
557
+
558
+ remaining_worker = asyncio.create_task(normal_worker(2))
559
+ remaining_result = await asyncio.wait_for(remaining_worker, timeout=1.0)
560
+ assert isinstance(remaining_result, TestItem)
561
+
562
+ await asyncio.sleep(0.1)
563
+
564
+ processed_values = {
565
+ item.value for _, item in processed_items if isinstance(item, TestItem)
566
+ }
567
+ assert len(processed_values) >= 2
568
+
569
+ @pytest.mark.asyncio
570
+ async def test_lock_timeout_doesnt_affect_normal_processing(
571
+ self, queue_with_group_key: GroupQueue[Any]
572
+ ) -> None:
573
+ """Test that lock timeout doesn't interfere with normal fast processing"""
574
+
575
+ queue: GroupQueue[TestItem] = GroupQueue(
576
+ group_key="group_id", name="test_queue", lock_timeout=2.0
577
+ )
578
+
579
+ processed_items = []
580
+
581
+ async def fast_worker(worker_id: int) -> Any:
582
+ """Worker that processes quickly (well under timeout)"""
583
+ try:
584
+ item = await queue.get()
585
+ processed_items.append((worker_id, item))
586
+ await asyncio.sleep(0.1)
587
+ await queue.commit()
588
+ return item
589
+ except Exception as e:
590
+ return f"Worker {worker_id} error: {e}"
591
+
592
+ items = [TestItem(group_id="group_a", value=i) for i in range(5)]
593
+ for item in items:
594
+ await queue.put(item)
595
+
596
+ results = []
597
+ for i in range(5):
598
+ task = asyncio.create_task(fast_worker(i))
599
+ result = await asyncio.wait_for(task, timeout=1.0)
600
+ results.append(result)
601
+
602
+ assert len([r for r in results if isinstance(r, TestItem)]) == 5
603
+
604
+ processed_values = {item.value for _, item in processed_items}
605
+ assert processed_values == {0, 1, 2, 3, 4}
606
+
607
+ assert len(queue._locked) == 0
608
+
609
+ @pytest.mark.asyncio
610
+ async def test_multiple_frozen_locks_recovery(
611
+ self, queue_with_group_key: GroupQueue[Any]
612
+ ) -> None:
613
+ """Test recovery when multiple groups have frozen locks"""
614
+ queue: GroupQueue[TestItem] = GroupQueue(
615
+ group_key="group_id", name="test_queue", lock_timeout=0.3
616
+ )
617
+
618
+ async def hanging_worker(worker_id: int, group: str) -> Any:
619
+ """Worker that grabs item from specific group and hangs"""
620
+ try:
621
+
622
+ while True:
623
+ item = await queue.get()
624
+ if item.group_id == group:
625
+
626
+ await asyncio.sleep(1.0)
627
+ return f"Worker {worker_id} hung with {group}"
628
+ else:
629
+
630
+ await queue.commit()
631
+ except Exception as e:
632
+ return f"Worker {worker_id} error: {e}"
633
+
634
+ async def recovery_worker(worker_id: int) -> Any:
635
+ """Worker that should be able to process after timeout"""
636
+ try:
637
+ item = await queue.get()
638
+ await asyncio.sleep(0.05)
639
+ await queue.commit()
640
+ return item
641
+ except Exception as e:
642
+ return f"Worker {worker_id} error: {e}"
643
+
644
+ for group in ["group_a", "group_b", "group_c"]:
645
+ for i in range(2):
646
+ await queue.put(TestItem(group_id=group, value=i))
647
+
648
+ hanging_tasks = [
649
+ asyncio.create_task(hanging_worker(i, f"group_{chr(97+i)}"))
650
+ for i in range(3)
651
+ ]
652
+
653
+ await asyncio.sleep(0.1)
654
+
655
+ assert len(queue._locked) == 3
656
+
657
+ recovery_tasks = [
658
+ asyncio.create_task(recovery_worker(100 + i)) for i in range(3)
659
+ ]
660
+
661
+ await asyncio.sleep(0.1)
662
+
663
+ for task in recovery_tasks:
664
+ assert not task.done()
665
+
666
+ await asyncio.sleep(0.4)
667
+
668
+ results = await asyncio.gather(*recovery_tasks, return_exceptions=True)
669
+
670
+ successful_results = [r for r in results if isinstance(r, TestItem)]
671
+ assert len(successful_results) == 3
672
+
673
+ for task in hanging_tasks:
674
+ task.cancel()
675
+ try:
676
+ await task
677
+ except asyncio.CancelledError:
678
+ pass
679
+
680
+ await asyncio.sleep(0.1)
681
+ assert len(queue._locked) == 0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: port-ocean
3
- Version: 0.26.2
3
+ Version: 0.27.0
4
4
  Summary: Port Ocean is a CLI tool for managing your Port projects.
5
5
  Home-page: https://app.getport.io
6
6
  Keywords: ocean,port-ocean,port
@@ -1,4 +1,4 @@
1
- integrations/_infra/Dockerfile.Deb,sha256=RW-BC8NaVmw3ASF7a3XNsjFN3ZN5gTVxbFrOmhBXq30,2251
1
+ integrations/_infra/Dockerfile.Deb,sha256=ovmwNBRNrblTW6K9ru2FNTiT9qDgI7_zY28O__VMW6I,2367
2
2
  integrations/_infra/Dockerfile.alpine,sha256=7E4Sb-8supsCcseerHwTkuzjHZoYcaHIyxiBZ-wewo0,3482
3
3
  integrations/_infra/Dockerfile.base.builder,sha256=ESe1PKC6itp_AuXawbLI75k1Kruny6NTANaTinxOgVs,743
4
4
  integrations/_infra/Dockerfile.base.runner,sha256=uAcs2IsxrAAUHGXt_qULA5INr-HFguf5a5fCKiqEzbY,384
@@ -70,7 +70,7 @@ port_ocean/clients/port/utils.py,sha256=osFyAjw7Y5Qf2uVSqC7_RTCQfijiL1zS74JJM0go
70
70
  port_ocean/config/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
71
  port_ocean/config/base.py,sha256=x1gFbzujrxn7EJudRT81C6eN9WsYAb3vOHwcpcpX8Tc,6370
72
72
  port_ocean/config/dynamic.py,sha256=Lrk4JRGtR-0YKQ9DDGexX5NGFE7EJ6VoHya19YYhssM,2687
73
- port_ocean/config/settings.py,sha256=SaCophXTKj_MgJRw5vrIXaNds1NEfewt0jzPQ0ytM2o,7192
73
+ port_ocean/config/settings.py,sha256=keRT2FJyzQ2G0LNlNP4mqYSwwTjbybL7Ire0oggwScw,7226
74
74
  port_ocean/consumers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
75
75
  port_ocean/consumers/kafka_consumer.py,sha256=N8KocjBi9aR0BOPG8hgKovg-ns_ggpEjrSxqSqF_BSo,4710
76
76
  port_ocean/context/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -106,15 +106,16 @@ port_ocean/core/handlers/port_app_config/__init__.py,sha256=8AAT5OthiVM7KCcM34iE
106
106
  port_ocean/core/handlers/port_app_config/api.py,sha256=r_Th66NEw38IpRdnXZcRvI8ACfvxW_A6V62WLwjWXlQ,1044
107
107
  port_ocean/core/handlers/port_app_config/base.py,sha256=Sup4-X_a7JGa27rMy_OgqGIjFHMlKBpKevicaK3AeHU,2919
108
108
  port_ocean/core/handlers/port_app_config/models.py,sha256=pO7oI7GIYZ9c2ZxLu8EQ97U2IPqzsbJf3gRQxlizEjE,2933
109
- port_ocean/core/handlers/queue/__init__.py,sha256=1fICM0ZLATmmj6f7cdq_eV2kmw0_jy7y2INuLQIpzIE,121
110
- port_ocean/core/handlers/queue/abstract_queue.py,sha256=q_gpaWFFZHxM3XovEbgsDn8jEOLM45iAZWVC81Paxto,620
111
- port_ocean/core/handlers/queue/local_queue.py,sha256=EzqsGIX43xbVAcePwTcCg5QDrXATQpy-VzWxxN_OyAM,574
109
+ port_ocean/core/handlers/queue/__init__.py,sha256=yzgicE_jAR1wtljFKxgyG6j-HbLcG_Zze5qw1kkALUI,171
110
+ port_ocean/core/handlers/queue/abstract_queue.py,sha256=SaivrYbqg8qsX6wtQlJZyxgcbdMD5B9NZG3byN9AvrI,782
111
+ port_ocean/core/handlers/queue/group_queue.py,sha256=JvvJOwz9z_aI4CjPr7yQX-0rOgqLI5wMdxWk2x5x-34,4989
112
+ port_ocean/core/handlers/queue/local_queue.py,sha256=Y6qabDbrQ8aOPTN6Ct3lnMU7JnT8O8iTpoxMoVt6lFs,643
112
113
  port_ocean/core/handlers/resync_state_updater/__init__.py,sha256=kG6y-JQGpPfuTHh912L_bctIDCzAK4DN-d00S7rguWU,81
113
114
  port_ocean/core/handlers/resync_state_updater/updater.py,sha256=TRYq6QnTtPlJg6MvgZPtQdZPvkAhkvpcmWjtkxCnkg4,3764
114
115
  port_ocean/core/handlers/webhook/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
115
116
  port_ocean/core/handlers/webhook/abstract_webhook_processor.py,sha256=5KwZkdkDd5HdVkXPzKiqabodZKl-hOtMypkTKd8Hq3M,3891
116
- port_ocean/core/handlers/webhook/processor_manager.py,sha256=4u9Q_djZAzxgwGHlHBmVBG26svEigeSka6GajcETd20,12976
117
- port_ocean/core/handlers/webhook/webhook_event.py,sha256=9wHXLY6IGgjuqrwXXvZm_RbYEd-a9qIFNxWnGbfPv6o,3877
117
+ port_ocean/core/handlers/webhook/processor_manager.py,sha256=0KRPD1ae-7w0na2AZY-rq9_gY0IaMv9LdwEh6y4_OiQ,13282
118
+ port_ocean/core/handlers/webhook/webhook_event.py,sha256=o-REML80GxN7jKonO-vlRnycN_8NAymbykQSUjVp5FI,3947
118
119
  port_ocean/core/integrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
119
120
  port_ocean/core/integrations/base.py,sha256=dUhytVM9uUbcDRzG1QWyvBvEJOWZY0dPVV3hXuukOfg,3587
120
121
  port_ocean/core/integrations/mixins/__init__.py,sha256=FA1FEKMM6P-L2_m7Q4L20mFa4_RgZnwSRmTCreKcBVM,220
@@ -172,6 +173,7 @@ port_ocean/tests/core/handlers/mixins/test_live_events.py,sha256=6yUsYooBYchiZP_
172
173
  port_ocean/tests/core/handlers/mixins/test_sync_raw.py,sha256=-Jd2rUG63fZM8LuyKtCp1tt4WEqO2m5woESjs1c91sU,44428
173
174
  port_ocean/tests/core/handlers/port_app_config/test_api.py,sha256=eJZ6SuFBLz71y4ca3DNqKag6d6HUjNJS0aqQPwiLMTI,1999
174
175
  port_ocean/tests/core/handlers/port_app_config/test_base.py,sha256=hSh556bJM9zuELwhwnyKSfd9z06WqWXIfe-6hCl5iKI,9799
176
+ port_ocean/tests/core/handlers/queue/test_group_queue.py,sha256=Y1BrQi5xwhk5bYDlKRWw9PenF5cqxIF2TIU_hldqji0,22801
175
177
  port_ocean/tests/core/handlers/queue/test_local_queue.py,sha256=9Ly0HzZXbs6Rbl_bstsIdInC3h2bgABU3roP9S_PnJM,2582
176
178
  port_ocean/tests/core/handlers/webhook/test_abstract_webhook_processor.py,sha256=zKwHhPAYEZoZ5Z2UETp1t--mbkS8uyvlXThB0obZTTc,3340
177
179
  port_ocean/tests/core/handlers/webhook/test_processor_manager.py,sha256=rqNFc-S_ZnPyDTSFTdiGcRFKbeDGfWQCH_f2UPbfcAA,52310
@@ -203,8 +205,8 @@ port_ocean/utils/repeat.py,sha256=U2OeCkHPWXmRTVoPV-VcJRlQhcYqPWI5NfmPlb1JIbc,32
203
205
  port_ocean/utils/signal.py,sha256=mMVq-1Ab5YpNiqN4PkiyTGlV_G0wkUDMMjTZp5z3pb0,1514
204
206
  port_ocean/utils/time.py,sha256=pufAOH5ZQI7gXvOvJoQXZXZJV-Dqktoj9Qp9eiRwmJ4,1939
205
207
  port_ocean/version.py,sha256=UsuJdvdQlazzKGD3Hd5-U7N69STh8Dq9ggJzQFnu9fU,177
206
- port_ocean-0.26.2.dist-info/LICENSE.md,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
207
- port_ocean-0.26.2.dist-info/METADATA,sha256=vKDMPtrSkiVpDBs1OdmO3RdRYqp88fLfKuQdXPL_no0,6887
208
- port_ocean-0.26.2.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
209
- port_ocean-0.26.2.dist-info/entry_points.txt,sha256=F_DNUmGZU2Kme-8NsWM5LLE8piGMafYZygRYhOVtcjA,54
210
- port_ocean-0.26.2.dist-info/RECORD,,
208
+ port_ocean-0.27.0.dist-info/LICENSE.md,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
209
+ port_ocean-0.27.0.dist-info/METADATA,sha256=dWJb3IOgu_hp6Q1I5G4VxB3b31owoTzF7xSSgSpsGAM,6887
210
+ port_ocean-0.27.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
211
+ port_ocean-0.27.0.dist-info/entry_points.txt,sha256=F_DNUmGZU2Kme-8NsWM5LLE8piGMafYZygRYhOVtcjA,54
212
+ port_ocean-0.27.0.dist-info/RECORD,,