port-ocean 0.4.2rc2__tar.gz → 0.4.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of port-ocean might be problematic. Click here for more details.
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/PKG-INFO +1 -1
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/authentication.py +8 -4
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/client.py +5 -2
- port_ocean-0.4.4/port_ocean/clients/port/retry_transport.py +51 -0
- port_ocean-0.4.4/port_ocean/clients/port/utils.py +51 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/consumers/kafka_consumer.py +4 -23
- port_ocean-0.4.4/port_ocean/context/utils.py +24 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/kafka.py +41 -20
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/port/order_by_entities_dependencies.py +10 -2
- port_ocean-0.4.4/port_ocean/helpers/retry.py +267 -0
- port_ocean-0.4.4/port_ocean/py.typed +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/utils.py +35 -2
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/pyproject.toml +1 -1
- port_ocean-0.4.2rc2/port_ocean/clients/port/utils.py +0 -28
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/LICENSE.md +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/README.md +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/bootstrap.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cli.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/defaults/__init___.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/defaults/clean.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/defaults/dock.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/defaults/group.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/list_integrations.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/main.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/new.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/pull.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/sail.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/commands/version.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/cookiecutter.json +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/extensions.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/.dockerignore +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/.gitignore +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/.port/resources/.gitignore +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/.port/spec.yaml +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/CHANGELOG.md +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/Dockerfile +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/Makefile +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/README.md +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/changelog/.gitignore +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/config.yaml +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/debug.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/main.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/poetry.toml +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/pyproject.toml +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/cookiecutter/{{cookiecutter.integration_slug}}/tests/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/cli/utils.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/mixins/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/mixins/blueprints.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/mixins/entities.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/mixins/integrations.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/mixins/migrations.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/clients/port/types.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/config/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/config/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/config/dynamic.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/config/settings.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/consumers/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/consumers/base_consumer.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/context/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/context/event.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/context/ocean.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/context/resource.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/defaults/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/defaults/clean.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/defaults/common.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/defaults/initialize.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/factory.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/http.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/once.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/event_listener/polling.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/port/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/port/applier.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/port/get_related_entities.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/port/validate_entity_relations.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entity_processor/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entity_processor/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entity_processor/jq_entity_processor.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/port_app_config/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/port_app_config/api.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/port_app_config/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/port_app_config/models.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/mixins/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/mixins/events.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/mixins/handler.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/mixins/sync.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/mixins/sync_raw.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/integrations/mixins/utils.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/models.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/ocean_types.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/utils.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/api.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/base.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/clients.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/context.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/core.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/exceptions/port_defaults.py +0 -0
- /port_ocean-0.4.2rc2/port_ocean/py.typed → /port_ocean-0.4.4/port_ocean/helpers/__init__.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/logger_setup.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/middlewares.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/ocean.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/run.py +0 -0
- {port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/version.py +0 -0
|
@@ -42,7 +42,7 @@ class PortAuthentication:
|
|
|
42
42
|
self.integration_identifier = integration_identifier
|
|
43
43
|
self.integration_type = integration_type
|
|
44
44
|
self.integration_version = integration_version
|
|
45
|
-
self.
|
|
45
|
+
self.last_token_object: TokenResponse | None = None
|
|
46
46
|
|
|
47
47
|
async def _get_token(self, client_id: str, client_secret: str) -> TokenResponse:
|
|
48
48
|
logger.info(f"Fetching access token for clientId: {client_id}")
|
|
@@ -71,9 +71,13 @@ class PortAuthentication:
|
|
|
71
71
|
|
|
72
72
|
@property
|
|
73
73
|
async def token(self) -> str:
|
|
74
|
-
if not self.
|
|
75
|
-
|
|
74
|
+
if not self.last_token_object or self.last_token_object.expired:
|
|
75
|
+
msg = "Token expired, fetching new token"
|
|
76
|
+
if not self.last_token_object:
|
|
77
|
+
msg = "No token found, fetching new token"
|
|
78
|
+
logger.info(msg)
|
|
79
|
+
self.last_token_object = await self._get_token(
|
|
76
80
|
self.client_id, self.client_secret
|
|
77
81
|
)
|
|
78
82
|
|
|
79
|
-
return self.
|
|
83
|
+
return self.last_token_object.full_token
|
|
@@ -8,7 +8,10 @@ from port_ocean.clients.port.mixins.migrations import MigrationClientMixin
|
|
|
8
8
|
from port_ocean.clients.port.types import (
|
|
9
9
|
KafkaCreds,
|
|
10
10
|
)
|
|
11
|
-
from port_ocean.clients.port.utils import
|
|
11
|
+
from port_ocean.clients.port.utils import (
|
|
12
|
+
handle_status_code,
|
|
13
|
+
get_internal_http_client,
|
|
14
|
+
)
|
|
12
15
|
from port_ocean.exceptions.clients import KafkaCredentialsNotFound
|
|
13
16
|
|
|
14
17
|
|
|
@@ -28,7 +31,7 @@ class PortClient(
|
|
|
28
31
|
integration_version: str,
|
|
29
32
|
):
|
|
30
33
|
self.api_url = f"{base_url}/v1"
|
|
31
|
-
self.client =
|
|
34
|
+
self.client = get_internal_http_client(self)
|
|
32
35
|
self.auth = PortAuthentication(
|
|
33
36
|
self.client,
|
|
34
37
|
client_id,
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from http import HTTPStatus
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from port_ocean.helpers.retry import RetryTransport
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from port_ocean.clients.port.client import PortClient
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TokenRetryTransport(RetryTransport):
|
|
14
|
+
def __init__(self, port_client: "PortClient", *args: Any, **kwargs: Any) -> None:
|
|
15
|
+
super().__init__(*args, **kwargs)
|
|
16
|
+
self.port_client = port_client
|
|
17
|
+
|
|
18
|
+
async def _handle_unauthorized(self, response: httpx.Response) -> None:
|
|
19
|
+
token = await self.port_client.auth.token
|
|
20
|
+
response.headers["Authorization"] = f"Bearer {token}"
|
|
21
|
+
|
|
22
|
+
def is_token_error(self, response: httpx.Response) -> bool:
|
|
23
|
+
return (
|
|
24
|
+
response.status_code == HTTPStatus.UNAUTHORIZED
|
|
25
|
+
and "/auth/access_token" not in str(response.request.url)
|
|
26
|
+
and self.port_client.auth.last_token_object is not None
|
|
27
|
+
and self.port_client.auth.last_token_object.expired
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
async def _should_retry_async(self, response: httpx.Response) -> bool:
|
|
31
|
+
if self.is_token_error(response):
|
|
32
|
+
if self._logger:
|
|
33
|
+
self._logger.info(
|
|
34
|
+
"Got unauthorized response, trying to refresh token before retrying"
|
|
35
|
+
)
|
|
36
|
+
await self._handle_unauthorized(response)
|
|
37
|
+
return True
|
|
38
|
+
return await super()._should_retry_async(response)
|
|
39
|
+
|
|
40
|
+
def _should_retry(self, response: httpx.Response) -> bool:
|
|
41
|
+
if self.is_token_error(response):
|
|
42
|
+
if self._logger:
|
|
43
|
+
self._logger.info(
|
|
44
|
+
"Got unauthorized response, trying to refresh token before retrying"
|
|
45
|
+
)
|
|
46
|
+
asyncio.get_running_loop().run_until_complete(
|
|
47
|
+
self._handle_unauthorized(response)
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
return True
|
|
51
|
+
return super()._should_retry(response)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from loguru import logger
|
|
5
|
+
from werkzeug.local import LocalStack, LocalProxy
|
|
6
|
+
|
|
7
|
+
from port_ocean.clients.port.retry_transport import TokenRetryTransport
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from port_ocean.clients.port.client import PortClient
|
|
11
|
+
|
|
12
|
+
_http_client: LocalStack[httpx.AsyncClient] = LocalStack()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_http_client_context(port_client: "PortClient") -> httpx.AsyncClient:
|
|
16
|
+
client = _http_client.top
|
|
17
|
+
if client is None:
|
|
18
|
+
client = httpx.AsyncClient(
|
|
19
|
+
transport=TokenRetryTransport(
|
|
20
|
+
port_client,
|
|
21
|
+
httpx.AsyncHTTPTransport(),
|
|
22
|
+
logger=logger,
|
|
23
|
+
)
|
|
24
|
+
)
|
|
25
|
+
_http_client.push(client)
|
|
26
|
+
|
|
27
|
+
return client
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
_port_internal_async_client: httpx.AsyncClient = None # type: ignore
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def get_internal_http_client(port_client: "PortClient") -> httpx.AsyncClient:
|
|
34
|
+
global _port_internal_async_client
|
|
35
|
+
if _port_internal_async_client is None:
|
|
36
|
+
_port_internal_async_client = LocalProxy(
|
|
37
|
+
lambda: _get_http_client_context(port_client)
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
return _port_internal_async_client
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def handle_status_code(
|
|
44
|
+
response: httpx.Response, should_raise: bool = True, should_log: bool = True
|
|
45
|
+
) -> None:
|
|
46
|
+
if should_log and response.is_error:
|
|
47
|
+
logger.error(
|
|
48
|
+
f"Request failed with status code: {response.status_code}, Error: {response.text}"
|
|
49
|
+
)
|
|
50
|
+
if should_raise:
|
|
51
|
+
response.raise_for_status()
|
|
@@ -1,8 +1,5 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import json
|
|
3
1
|
import signal
|
|
4
|
-
import
|
|
5
|
-
from typing import Any, Callable, Awaitable
|
|
2
|
+
from typing import Any, Callable
|
|
6
3
|
|
|
7
4
|
from confluent_kafka import Consumer, KafkaException, Message # type: ignore
|
|
8
5
|
from loguru import logger
|
|
@@ -25,7 +22,7 @@ class KafkaConsumerConfig(BaseModel):
|
|
|
25
22
|
class KafkaConsumer(BaseConsumer):
|
|
26
23
|
def __init__(
|
|
27
24
|
self,
|
|
28
|
-
msg_process: Callable[[
|
|
25
|
+
msg_process: Callable[[Message], None],
|
|
29
26
|
config: KafkaConsumerConfig,
|
|
30
27
|
org_id: str,
|
|
31
28
|
) -> None:
|
|
@@ -56,23 +53,6 @@ class KafkaConsumer(BaseConsumer):
|
|
|
56
53
|
|
|
57
54
|
self.consumer = Consumer(kafka_config)
|
|
58
55
|
|
|
59
|
-
def _handle_message(self, raw_msg: Message) -> None:
|
|
60
|
-
message = json.loads(raw_msg.value().decode())
|
|
61
|
-
topic = raw_msg.topic()
|
|
62
|
-
|
|
63
|
-
async def try_wrapper() -> None:
|
|
64
|
-
try:
|
|
65
|
-
await self.msg_process(message, topic)
|
|
66
|
-
except Exception as e:
|
|
67
|
-
_type, _, tb = sys.exc_info()
|
|
68
|
-
logger.opt(exception=(_type, None, tb)).error(
|
|
69
|
-
f"Failed to process message: {str(e)}"
|
|
70
|
-
)
|
|
71
|
-
|
|
72
|
-
loop = asyncio.new_event_loop()
|
|
73
|
-
asyncio.set_event_loop(loop)
|
|
74
|
-
loop.run_until_complete(try_wrapper())
|
|
75
|
-
|
|
76
56
|
def start(self) -> None:
|
|
77
57
|
try:
|
|
78
58
|
logger.info("Start consumer...")
|
|
@@ -98,7 +78,8 @@ class KafkaConsumer(BaseConsumer):
|
|
|
98
78
|
"Process message "
|
|
99
79
|
f"from topic {msg.topic()}, partition {msg.partition()}, offset {msg.offset()}"
|
|
100
80
|
)
|
|
101
|
-
self.
|
|
81
|
+
self.msg_process(msg)
|
|
82
|
+
|
|
102
83
|
except Exception as process_error:
|
|
103
84
|
logger.exception(
|
|
104
85
|
"Failed process message"
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from typing import Callable
|
|
2
|
+
|
|
3
|
+
from port_ocean.context.ocean import (
|
|
4
|
+
initialize_port_ocean_context,
|
|
5
|
+
ocean,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def wrap_method_with_context(
|
|
10
|
+
func: Callable[..., None],
|
|
11
|
+
) -> Callable[..., None]:
|
|
12
|
+
"""
|
|
13
|
+
A method that wraps a method and initializing the PortOceanContext and invoking the given function.
|
|
14
|
+
|
|
15
|
+
:param func: The function to be wrapped.
|
|
16
|
+
"""
|
|
17
|
+
# assign the current ocean app to a variable
|
|
18
|
+
ocean_app = ocean.app
|
|
19
|
+
|
|
20
|
+
def wrapper(*args, **kwargs) -> None: # type: ignore
|
|
21
|
+
initialize_port_ocean_context(ocean_app=ocean_app)
|
|
22
|
+
func(*args, **kwargs)
|
|
23
|
+
|
|
24
|
+
return wrapper
|
|
@@ -1,14 +1,17 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import sys
|
|
1
4
|
import threading
|
|
2
|
-
from typing import Any,
|
|
5
|
+
from typing import Any, Literal
|
|
3
6
|
|
|
7
|
+
from confluent_kafka import Message # type: ignore
|
|
4
8
|
from loguru import logger
|
|
5
9
|
|
|
6
10
|
from port_ocean.consumers.kafka_consumer import KafkaConsumer, KafkaConsumerConfig
|
|
7
11
|
from port_ocean.context.ocean import (
|
|
8
|
-
PortOceanContext,
|
|
9
|
-
initialize_port_ocean_context,
|
|
10
12
|
ocean,
|
|
11
13
|
)
|
|
14
|
+
from port_ocean.context.utils import wrap_method_with_context
|
|
12
15
|
from port_ocean.core.event_listener.base import (
|
|
13
16
|
BaseEventListener,
|
|
14
17
|
EventListenerEvents,
|
|
@@ -100,30 +103,48 @@ class KafkaEventListener(BaseEventListener):
|
|
|
100
103
|
|
|
101
104
|
return False
|
|
102
105
|
|
|
103
|
-
|
|
106
|
+
def _resync_in_new_event_loop(self, message: dict[Any, Any]) -> None:
|
|
104
107
|
"""
|
|
105
|
-
A private method that handles incoming Kafka messages.
|
|
106
|
-
|
|
108
|
+
A private method that handles incoming Kafka messages in a separate thread.
|
|
109
|
+
It triggers the `on_resync` event handler.
|
|
107
110
|
"""
|
|
108
|
-
if not self._should_be_processed(message, topic):
|
|
109
|
-
return
|
|
110
111
|
|
|
111
|
-
|
|
112
|
-
|
|
112
|
+
async def try_wrapper() -> None:
|
|
113
|
+
try:
|
|
114
|
+
await self.events["on_resync"](message)
|
|
115
|
+
except Exception as e:
|
|
116
|
+
_type, _, tb = sys.exc_info()
|
|
117
|
+
logger.opt(exception=(_type, None, tb)).error(
|
|
118
|
+
f"Failed to process message: {str(e)}"
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
loop = asyncio.new_event_loop()
|
|
122
|
+
asyncio.set_event_loop(loop)
|
|
123
|
+
loop.run_until_complete(try_wrapper())
|
|
113
124
|
|
|
114
|
-
def
|
|
115
|
-
self, context: PortOceanContext, func: Callable[[], None]
|
|
116
|
-
) -> Callable[[], None]:
|
|
125
|
+
def _handle_message(self, raw_msg: Message) -> None:
|
|
117
126
|
"""
|
|
118
|
-
A method that
|
|
127
|
+
A private method that handles incoming Kafka messages.
|
|
128
|
+
If the message should be processed (determined by `_should_be_processed`), it triggers the corresponding event handler.
|
|
129
|
+
|
|
130
|
+
Spawning a thread to handle the message allows the Kafka consumer to continue polling for new messages.
|
|
131
|
+
Using wrap_method_with_context ensures that the thread has access to the current context.
|
|
119
132
|
"""
|
|
120
|
-
|
|
133
|
+
message = json.loads(raw_msg.value().decode())
|
|
134
|
+
topic = raw_msg.topic()
|
|
121
135
|
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
func()
|
|
136
|
+
if not self._should_be_processed(message, topic):
|
|
137
|
+
return
|
|
125
138
|
|
|
126
|
-
|
|
139
|
+
if "change.log" in topic and message is not None:
|
|
140
|
+
thread_name = f"ocean_event_handler_{raw_msg.offset()}"
|
|
141
|
+
logger.info(f"spawning thread {thread_name} to start resync")
|
|
142
|
+
threading.Thread(
|
|
143
|
+
name=thread_name,
|
|
144
|
+
target=wrap_method_with_context(self._resync_in_new_event_loop),
|
|
145
|
+
args=(message,),
|
|
146
|
+
).start()
|
|
147
|
+
logger.info(f"thread {thread_name} started")
|
|
127
148
|
|
|
128
149
|
async def start(self) -> None:
|
|
129
150
|
"""
|
|
@@ -138,5 +159,5 @@ class KafkaEventListener(BaseEventListener):
|
|
|
138
159
|
logger.info("Starting Kafka consumer")
|
|
139
160
|
threading.Thread(
|
|
140
161
|
name="ocean_kafka_consumer",
|
|
141
|
-
target=
|
|
162
|
+
target=wrap_method_with_context(func=consumer.start),
|
|
142
163
|
).start()
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
from graphlib import TopologicalSorter
|
|
1
|
+
from graphlib import TopologicalSorter, CycleError
|
|
2
2
|
from typing import Set
|
|
3
3
|
|
|
4
4
|
from port_ocean.core.models import Entity
|
|
5
|
+
from port_ocean.exceptions.core import OceanAbortException
|
|
5
6
|
|
|
6
7
|
Node = tuple[str, str]
|
|
7
8
|
|
|
@@ -35,4 +36,11 @@ def order_by_entities_dependencies(entities: list[Entity]) -> list[Entity]:
|
|
|
35
36
|
nodes[node(entity)].add(node(related_entity))
|
|
36
37
|
|
|
37
38
|
sort_op = TopologicalSorter(nodes)
|
|
38
|
-
|
|
39
|
+
try:
|
|
40
|
+
return [entities_map[item] for item in sort_op.static_order()]
|
|
41
|
+
except CycleError as ex:
|
|
42
|
+
raise OceanAbortException(
|
|
43
|
+
"Cannot order entities due to cyclic dependencies. \n"
|
|
44
|
+
"If you do want to have cyclic dependencies, please make sure to set the keys"
|
|
45
|
+
" 'createMissingRelatedEntities' and 'deleteDependentEntities' in the integration config in Port."
|
|
46
|
+
) from ex
|
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import random
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from functools import partial
|
|
6
|
+
from http import HTTPStatus
|
|
7
|
+
from typing import Any, Callable, Coroutine, Iterable, Mapping, Union
|
|
8
|
+
|
|
9
|
+
import httpx
|
|
10
|
+
from dateutil.parser import isoparse # type: ignore
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Adapted from https://github.com/encode/httpx/issues/108#issuecomment-1434439481
|
|
14
|
+
class RetryTransport(httpx.AsyncBaseTransport, httpx.BaseTransport):
|
|
15
|
+
"""
|
|
16
|
+
A custom HTTP transport that automatically retries requests using an exponential backoff strategy
|
|
17
|
+
for specific HTTP status codes and request methods.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
wrapped_transport (Union[httpx.BaseTransport, httpx.AsyncBaseTransport]): The underlying HTTP transport
|
|
21
|
+
to wrap and use for making requests.
|
|
22
|
+
max_attempts (int, optional): The maximum number of times to retry a request before giving up. Defaults to 10.
|
|
23
|
+
max_backoff_wait (float, optional): The maximum time to wait between retries in seconds. Defaults to 60.
|
|
24
|
+
backoff_factor (float, optional): The factor by which the wait time increases with each retry attempt.
|
|
25
|
+
Defaults to 0.1.
|
|
26
|
+
jitter_ratio (float, optional): The amount of jitter to add to the backoff time. Jitter is a random
|
|
27
|
+
value added to the backoff time to avoid a "thundering herd" effect. The value should be between 0 and 0.5.
|
|
28
|
+
Defaults to 0.1.
|
|
29
|
+
respect_retry_after_header (bool, optional): Whether to respect the Retry-After header in HTTP responses
|
|
30
|
+
when deciding how long to wait before retrying. Defaults to True.
|
|
31
|
+
retryable_methods (Iterable[str], optional): The HTTP methods that can be retried. Defaults to
|
|
32
|
+
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"].
|
|
33
|
+
retry_status_codes (Iterable[int], optional): The HTTP status codes that can be retried. Defaults to
|
|
34
|
+
[429, 502, 503, 504].
|
|
35
|
+
|
|
36
|
+
Attributes:
|
|
37
|
+
_wrapped_transport (Union[httpx.BaseTransport, httpx.AsyncBaseTransport]): The underlying HTTP transport
|
|
38
|
+
being wrapped.
|
|
39
|
+
_max_attempts (int): The maximum number of times to retry a request.
|
|
40
|
+
_backoff_factor (float): The factor by which the wait time increases with each retry attempt.
|
|
41
|
+
_respect_retry_after_header (bool): Whether to respect the Retry-After header in HTTP responses.
|
|
42
|
+
_retryable_methods (frozenset): The HTTP methods that can be retried.
|
|
43
|
+
_retry_status_codes (frozenset): The HTTP status codes that can be retried.
|
|
44
|
+
_jitter_ratio (float): The amount of jitter to add to the backoff time.
|
|
45
|
+
_max_backoff_wait (float): The maximum time to wait between retries in seconds.
|
|
46
|
+
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
RETRYABLE_METHODS = frozenset(["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"])
|
|
50
|
+
RETRYABLE_STATUS_CODES = frozenset(
|
|
51
|
+
[
|
|
52
|
+
HTTPStatus.TOO_MANY_REQUESTS,
|
|
53
|
+
HTTPStatus.BAD_GATEWAY,
|
|
54
|
+
HTTPStatus.SERVICE_UNAVAILABLE,
|
|
55
|
+
HTTPStatus.GATEWAY_TIMEOUT,
|
|
56
|
+
]
|
|
57
|
+
)
|
|
58
|
+
MAX_BACKOFF_WAIT = 60
|
|
59
|
+
|
|
60
|
+
def __init__(
|
|
61
|
+
self,
|
|
62
|
+
wrapped_transport: Union[httpx.BaseTransport, httpx.AsyncBaseTransport],
|
|
63
|
+
max_attempts: int = 10,
|
|
64
|
+
max_backoff_wait: float = MAX_BACKOFF_WAIT,
|
|
65
|
+
backoff_factor: float = 0.1,
|
|
66
|
+
jitter_ratio: float = 0.1,
|
|
67
|
+
respect_retry_after_header: bool = True,
|
|
68
|
+
retryable_methods: Iterable[str] | None = None,
|
|
69
|
+
retry_status_codes: Iterable[int] | None = None,
|
|
70
|
+
logger: Any | None = None,
|
|
71
|
+
) -> None:
|
|
72
|
+
"""
|
|
73
|
+
Initializes the instance of RetryTransport class with the given parameters.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
wrapped_transport (Union[httpx.BaseTransport, httpx.AsyncBaseTransport]):
|
|
77
|
+
The transport layer that will be wrapped and retried upon failure.
|
|
78
|
+
max_attempts (int, optional):
|
|
79
|
+
The maximum number of times the request can be retried in case of failure.
|
|
80
|
+
Defaults to 10.
|
|
81
|
+
max_backoff_wait (float, optional):
|
|
82
|
+
The maximum amount of time (in seconds) to wait before retrying a request.
|
|
83
|
+
Defaults to 60.
|
|
84
|
+
backoff_factor (float, optional):
|
|
85
|
+
The factor by which the waiting time will be multiplied in each retry attempt.
|
|
86
|
+
Defaults to 0.1.
|
|
87
|
+
jitter_ratio (float, optional):
|
|
88
|
+
The ratio of randomness added to the waiting time to prevent simultaneous retries.
|
|
89
|
+
Should be between 0 and 0.5. Defaults to 0.1.
|
|
90
|
+
respect_retry_after_header (bool, optional):
|
|
91
|
+
A flag to indicate if the Retry-After header should be respected.
|
|
92
|
+
If True, the waiting time specified in Retry-After header is used for the waiting time.
|
|
93
|
+
Defaults to True.
|
|
94
|
+
retryable_methods (Iterable[str], optional):
|
|
95
|
+
The HTTP methods that can be retried. Defaults to ['HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'].
|
|
96
|
+
retry_status_codes (Iterable[int], optional):
|
|
97
|
+
The HTTP status codes that can be retried.
|
|
98
|
+
Defaults to [429, 502, 503, 504].
|
|
99
|
+
logger (Any): The logger to use for logging retries.
|
|
100
|
+
"""
|
|
101
|
+
self._wrapped_transport = wrapped_transport
|
|
102
|
+
if jitter_ratio < 0 or jitter_ratio > 0.5:
|
|
103
|
+
raise ValueError(
|
|
104
|
+
f"Jitter ratio should be between 0 and 0.5, actual {jitter_ratio}"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
self._max_attempts = max_attempts
|
|
108
|
+
self._backoff_factor = backoff_factor
|
|
109
|
+
self._respect_retry_after_header = respect_retry_after_header
|
|
110
|
+
self._retryable_methods = (
|
|
111
|
+
frozenset(retryable_methods)
|
|
112
|
+
if retryable_methods
|
|
113
|
+
else self.RETRYABLE_METHODS
|
|
114
|
+
)
|
|
115
|
+
self._retry_status_codes = (
|
|
116
|
+
frozenset(retry_status_codes)
|
|
117
|
+
if retry_status_codes
|
|
118
|
+
else self.RETRYABLE_STATUS_CODES
|
|
119
|
+
)
|
|
120
|
+
self._jitter_ratio = jitter_ratio
|
|
121
|
+
self._max_backoff_wait = max_backoff_wait
|
|
122
|
+
self._logger = logger
|
|
123
|
+
|
|
124
|
+
def handle_request(self, request: httpx.Request) -> httpx.Response:
|
|
125
|
+
"""
|
|
126
|
+
Sends an HTTP request, possibly with retries.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
request (httpx.Request): The request to send.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
httpx.Response: The response received.
|
|
133
|
+
|
|
134
|
+
"""
|
|
135
|
+
transport: httpx.BaseTransport = self._wrapped_transport # type: ignore
|
|
136
|
+
if request.method in self._retryable_methods:
|
|
137
|
+
send_method = partial(transport.handle_request)
|
|
138
|
+
response = self._retry_operation(request, send_method)
|
|
139
|
+
else:
|
|
140
|
+
response = transport.handle_request(request)
|
|
141
|
+
return response
|
|
142
|
+
|
|
143
|
+
async def handle_async_request(self, request: httpx.Request) -> httpx.Response:
|
|
144
|
+
"""Sends an HTTP request, possibly with retries.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
request: The request to perform.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
The response.
|
|
151
|
+
|
|
152
|
+
"""
|
|
153
|
+
transport: httpx.AsyncBaseTransport = self._wrapped_transport # type: ignore
|
|
154
|
+
if request.method in self._retryable_methods:
|
|
155
|
+
send_method = partial(transport.handle_async_request)
|
|
156
|
+
response = await self._retry_operation_async(request, send_method)
|
|
157
|
+
else:
|
|
158
|
+
response = await transport.handle_async_request(request)
|
|
159
|
+
return response
|
|
160
|
+
|
|
161
|
+
async def aclose(self) -> None:
|
|
162
|
+
"""
|
|
163
|
+
Closes the underlying HTTP transport, terminating all outstanding connections and rejecting any further
|
|
164
|
+
requests.
|
|
165
|
+
|
|
166
|
+
This should be called before the object is dereferenced, to ensure that connections are properly cleaned up.
|
|
167
|
+
"""
|
|
168
|
+
transport: httpx.AsyncBaseTransport = self._wrapped_transport # type: ignore
|
|
169
|
+
await transport.aclose()
|
|
170
|
+
|
|
171
|
+
def close(self) -> None:
|
|
172
|
+
"""
|
|
173
|
+
Closes the underlying HTTP transport, terminating all outstanding connections and rejecting any further
|
|
174
|
+
requests.
|
|
175
|
+
|
|
176
|
+
This should be called before the object is dereferenced, to ensure that connections are properly cleaned up.
|
|
177
|
+
"""
|
|
178
|
+
transport: httpx.BaseTransport = self._wrapped_transport # type: ignore
|
|
179
|
+
transport.close()
|
|
180
|
+
|
|
181
|
+
def _should_retry(self, response: httpx.Response) -> bool:
|
|
182
|
+
return response.status_code in self._retry_status_codes
|
|
183
|
+
|
|
184
|
+
async def _should_retry_async(self, response: httpx.Response) -> bool:
|
|
185
|
+
return response.status_code in self._retry_status_codes
|
|
186
|
+
|
|
187
|
+
def _calculate_sleep(
|
|
188
|
+
self, attempts_made: int, headers: Union[httpx.Headers, Mapping[str, str]]
|
|
189
|
+
) -> float:
|
|
190
|
+
# Retry-After
|
|
191
|
+
# The Retry-After response HTTP header indicates how long the user agent should wait before
|
|
192
|
+
# making a follow-up request. There are three main cases this header is used:
|
|
193
|
+
# - When sent with a 503 (Service Unavailable) response, this indicates how long the service
|
|
194
|
+
# is expected to be unavailable.
|
|
195
|
+
# - When sent with a 429 (Too Many Requests) response, this indicates how long to wait before
|
|
196
|
+
# making a new request.
|
|
197
|
+
# - When sent with a redirect response, such as 301 (Moved Permanently), this indicates the
|
|
198
|
+
# minimum time that the user agent is asked to wait before issuing the redirected request.
|
|
199
|
+
retry_after_header = (headers.get("Retry-After") or "").strip()
|
|
200
|
+
if self._respect_retry_after_header and retry_after_header:
|
|
201
|
+
if retry_after_header.isdigit():
|
|
202
|
+
return float(retry_after_header)
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
parsed_date = isoparse(
|
|
206
|
+
retry_after_header
|
|
207
|
+
).astimezone() # converts to local time
|
|
208
|
+
diff = (parsed_date - datetime.now().astimezone()).total_seconds()
|
|
209
|
+
if diff > 0:
|
|
210
|
+
return min(diff, self._max_backoff_wait)
|
|
211
|
+
except ValueError:
|
|
212
|
+
pass
|
|
213
|
+
|
|
214
|
+
backoff = self._backoff_factor * (2 ** (attempts_made - 1))
|
|
215
|
+
jitter = (backoff * self._jitter_ratio) * random.choice([1, -1])
|
|
216
|
+
total_backoff = backoff + jitter
|
|
217
|
+
return min(total_backoff, self._max_backoff_wait)
|
|
218
|
+
|
|
219
|
+
async def _retry_operation_async(
|
|
220
|
+
self,
|
|
221
|
+
request: httpx.Request,
|
|
222
|
+
send_method: Callable[..., Coroutine[Any, Any, httpx.Response]],
|
|
223
|
+
) -> httpx.Response:
|
|
224
|
+
remaining_attempts = self._max_attempts
|
|
225
|
+
attempts_made = 0
|
|
226
|
+
while True:
|
|
227
|
+
response: httpx.Response
|
|
228
|
+
if attempts_made > 0:
|
|
229
|
+
sleep_time = self._calculate_sleep(attempts_made, {})
|
|
230
|
+
if self._logger:
|
|
231
|
+
self._logger.warning(
|
|
232
|
+
f"Request {request.method} {request.url} failed with status code:"
|
|
233
|
+
f" {response.status_code}, retrying in {sleep_time} seconds." # noqa: F821
|
|
234
|
+
)
|
|
235
|
+
await asyncio.sleep(sleep_time)
|
|
236
|
+
response = await send_method(request)
|
|
237
|
+
response.request = request
|
|
238
|
+
if remaining_attempts < 1 or not (await self._should_retry_async(response)):
|
|
239
|
+
return response
|
|
240
|
+
await response.aclose()
|
|
241
|
+
attempts_made += 1
|
|
242
|
+
remaining_attempts -= 1
|
|
243
|
+
|
|
244
|
+
def _retry_operation(
|
|
245
|
+
self,
|
|
246
|
+
request: httpx.Request,
|
|
247
|
+
send_method: Callable[..., httpx.Response],
|
|
248
|
+
) -> httpx.Response:
|
|
249
|
+
remaining_attempts = self._max_attempts
|
|
250
|
+
attempts_made = 0
|
|
251
|
+
while True:
|
|
252
|
+
response: httpx.Response
|
|
253
|
+
if attempts_made > 0:
|
|
254
|
+
sleep_time = self._calculate_sleep(attempts_made, {})
|
|
255
|
+
if self._logger:
|
|
256
|
+
self._logger.warning(
|
|
257
|
+
f"Request {request.method} {request.url} failed with status code:"
|
|
258
|
+
f" {response.status_code}, retrying in {sleep_time} seconds." # noqa: F821
|
|
259
|
+
)
|
|
260
|
+
time.sleep(sleep_time)
|
|
261
|
+
response = send_method(request)
|
|
262
|
+
response.request = request
|
|
263
|
+
if remaining_attempts < 1 or not self._should_retry(response):
|
|
264
|
+
return response
|
|
265
|
+
response.close()
|
|
266
|
+
attempts_made += 1
|
|
267
|
+
remaining_attempts -= 1
|
|
File without changes
|
|
@@ -3,17 +3,49 @@ import inspect
|
|
|
3
3
|
from asyncio import ensure_future
|
|
4
4
|
from functools import wraps
|
|
5
5
|
from importlib.util import module_from_spec, spec_from_file_location
|
|
6
|
-
from pathlib import Path
|
|
7
6
|
from time import time
|
|
8
7
|
from traceback import format_exception
|
|
9
8
|
from types import ModuleType
|
|
10
9
|
from typing import Callable, Any, Coroutine
|
|
11
10
|
from uuid import uuid4
|
|
12
11
|
|
|
12
|
+
import httpx
|
|
13
13
|
import tomli
|
|
14
14
|
import yaml
|
|
15
15
|
from loguru import logger
|
|
16
|
+
from pathlib import Path
|
|
16
17
|
from starlette.concurrency import run_in_threadpool
|
|
18
|
+
from werkzeug.local import LocalStack, LocalProxy
|
|
19
|
+
|
|
20
|
+
from port_ocean.helpers.retry import RetryTransport
|
|
21
|
+
|
|
22
|
+
_http_client: LocalStack[httpx.AsyncClient] = LocalStack()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _get_http_client_context() -> httpx.AsyncClient:
|
|
26
|
+
client = _http_client.top
|
|
27
|
+
if client is None:
|
|
28
|
+
client = httpx.AsyncClient(
|
|
29
|
+
transport=RetryTransport(
|
|
30
|
+
httpx.AsyncHTTPTransport(),
|
|
31
|
+
logger=logger,
|
|
32
|
+
)
|
|
33
|
+
)
|
|
34
|
+
_http_client.push(client)
|
|
35
|
+
|
|
36
|
+
return client
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
"""
|
|
40
|
+
Utilize this client for all outbound integration requests to the third-party application. It functions as a wrapper
|
|
41
|
+
around the httpx.AsyncClient, incorporating retry logic at the transport layer for handling retries on 5xx errors and
|
|
42
|
+
connection errors.
|
|
43
|
+
|
|
44
|
+
The client is instantiated lazily, only coming into existence upon its initial access. It should not be closed when in
|
|
45
|
+
use, as it operates as a singleton shared across all events in the thread. It also takes care of recreating the client
|
|
46
|
+
in scenarios such as the creation of a new event loop, such as when initiating a new thread.
|
|
47
|
+
"""
|
|
48
|
+
http_async_client: httpx.AsyncClient = LocalProxy(lambda: _get_http_client_context()) # type: ignore
|
|
17
49
|
|
|
18
50
|
|
|
19
51
|
def get_time(seconds_precision: bool = True) -> float:
|
|
@@ -114,12 +146,13 @@ def repeat_every(
|
|
|
114
146
|
if wait_first:
|
|
115
147
|
await asyncio.sleep(seconds)
|
|
116
148
|
while max_repetitions is None or repetitions < max_repetitions:
|
|
149
|
+
# count the repetition even if an exception is raised
|
|
150
|
+
repetitions += 1
|
|
117
151
|
try:
|
|
118
152
|
if is_coroutine:
|
|
119
153
|
await func() # type: ignore
|
|
120
154
|
else:
|
|
121
155
|
await run_in_threadpool(func)
|
|
122
|
-
repetitions += 1
|
|
123
156
|
except Exception as exc:
|
|
124
157
|
formatted_exception = "".join(
|
|
125
158
|
format_exception(type(exc), exc, exc.__traceback__)
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
import httpx
|
|
2
|
-
from loguru import logger
|
|
3
|
-
from werkzeug.local import LocalStack, LocalProxy
|
|
4
|
-
|
|
5
|
-
_http_client: LocalStack[httpx.AsyncClient] = LocalStack()
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def _get_http_client_context() -> httpx.AsyncClient:
|
|
9
|
-
client = _http_client.top
|
|
10
|
-
if client is None:
|
|
11
|
-
client = httpx.AsyncClient()
|
|
12
|
-
_http_client.push(client)
|
|
13
|
-
|
|
14
|
-
return client
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
async_client: httpx.AsyncClient = LocalProxy(lambda: _get_http_client_context()) # type: ignore
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def handle_status_code(
|
|
21
|
-
response: httpx.Response, should_raise: bool = True, should_log: bool = True
|
|
22
|
-
) -> None:
|
|
23
|
-
if should_log and response.is_error:
|
|
24
|
-
logger.error(
|
|
25
|
-
f"Request failed with status code: {response.status_code}, Error: {response.text}"
|
|
26
|
-
)
|
|
27
|
-
if should_raise:
|
|
28
|
-
response.raise_for_status()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/__init__.py
RENAMED
|
File without changes
|
{port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entities_state_applier/base.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/entity_processor/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{port_ocean-0.4.2rc2 → port_ocean-0.4.4}/port_ocean/core/handlers/port_app_config/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|