nebu 0.1.99__py3-none-any.whl → 0.1.100__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nebu/processors/decorate.py +62 -0
- nebu/processors/processor.py +71 -1
- {nebu-0.1.99.dist-info → nebu-0.1.100.dist-info}/METADATA +1 -1
- {nebu-0.1.99.dist-info → nebu-0.1.100.dist-info}/RECORD +7 -7
- {nebu-0.1.99.dist-info → nebu-0.1.100.dist-info}/WHEEL +0 -0
- {nebu-0.1.99.dist-info → nebu-0.1.100.dist-info}/licenses/LICENSE +0 -0
- {nebu-0.1.99.dist-info → nebu-0.1.100.dist-info}/top_level.txt +0 -0
nebu/processors/decorate.py
CHANGED
@@ -396,7 +396,68 @@ def processor(
|
|
396
396
|
hot_reload: bool = True,
|
397
397
|
debug: bool = False,
|
398
398
|
name: Optional[str] = None,
|
399
|
+
wait_for_healthy: bool = False,
|
399
400
|
):
|
401
|
+
"""
|
402
|
+
Decorator that creates a processor from a function.
|
403
|
+
|
404
|
+
This decorator transforms a function into a distributed processor that can be
|
405
|
+
deployed and executed remotely. The function code is uploaded to S3 and a
|
406
|
+
containerized processor is created.
|
407
|
+
|
408
|
+
Args:
|
409
|
+
image: Container image to use for the processor
|
410
|
+
setup_script: Optional script to run during container setup
|
411
|
+
scale: Scaling configuration for the processor
|
412
|
+
min_replicas: Minimum number of processor replicas
|
413
|
+
max_replicas: Maximum number of processor replicas
|
414
|
+
platform: Target platform for container deployment
|
415
|
+
accelerators: List of accelerators (e.g., GPUs) to request
|
416
|
+
namespace: Kubernetes namespace for the processor
|
417
|
+
labels: Additional labels to apply to the processor
|
418
|
+
env: Environment variables to set in the container
|
419
|
+
volumes: Volume mounts for the container
|
420
|
+
resources: Resource requests and limits
|
421
|
+
meters: Billing/metering configuration
|
422
|
+
authz: Authorization configuration
|
423
|
+
python_cmd: Python command to use (default: "python")
|
424
|
+
no_delete: Whether to prevent deletion of the processor
|
425
|
+
include: List of objects to include in the processor environment
|
426
|
+
init_func: Optional initialization function to run once
|
427
|
+
queue: Message queue configuration
|
428
|
+
timeout: Processor timeout configuration
|
429
|
+
ssh_keys: SSH keys for secure access
|
430
|
+
ports: Port configurations for the container
|
431
|
+
proxy_port: Proxy port configuration
|
432
|
+
health_check: Container health check configuration
|
433
|
+
execution_mode: Execution mode ("inline" or "subprocess")
|
434
|
+
config: Global configuration override
|
435
|
+
hot_reload: Enable/disable hot code reloading (default: True)
|
436
|
+
debug: Enable debug mode
|
437
|
+
name: Override processor name (defaults to function name)
|
438
|
+
wait_for_healthy: If True, wait for the processor to become healthy
|
439
|
+
before the decorator returns (default: False)
|
440
|
+
|
441
|
+
Returns:
|
442
|
+
Processor: A Processor instance wrapping the decorated function
|
443
|
+
|
444
|
+
Example:
|
445
|
+
```python
|
446
|
+
@processor(
|
447
|
+
image="python:3.11",
|
448
|
+
setup_script="pip install numpy",
|
449
|
+
wait_for_healthy=True
|
450
|
+
)
|
451
|
+
def my_processor(data: Message[InputModel]) -> OutputModel:
|
452
|
+
return OutputModel(result=data.content.value * 2)
|
453
|
+
```
|
454
|
+
|
455
|
+
Note:
|
456
|
+
When wait_for_healthy=True, the decorator will send health check messages
|
457
|
+
to the processor and wait for successful responses before returning. This
|
458
|
+
ensures the processor is ready to accept requests but may add startup time.
|
459
|
+
"""
|
460
|
+
|
400
461
|
def decorator(
|
401
462
|
func: Callable[[Any], Any],
|
402
463
|
) -> Processor:
|
@@ -1208,6 +1269,7 @@ def processor(
|
|
1208
1269
|
max_replicas=max_replicas,
|
1209
1270
|
scale_config=scale,
|
1210
1271
|
no_delete=no_delete,
|
1272
|
+
wait_for_healthy=wait_for_healthy,
|
1211
1273
|
)
|
1212
1274
|
logger.debug(
|
1213
1275
|
f"Decorator: Processor instance '{processor_name}' created successfully."
|
nebu/processors/processor.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
import json
|
2
2
|
import threading
|
3
|
-
|
3
|
+
import time
|
4
|
+
import uuid
|
5
|
+
from typing import Any, Dict, Generic, List, Optional, TypeVar, cast
|
4
6
|
|
5
7
|
import requests
|
6
8
|
from pydantic import BaseModel
|
@@ -107,6 +109,7 @@ class Processor(Generic[InputType, OutputType]):
|
|
107
109
|
config: Optional[GlobalConfig] = None,
|
108
110
|
api_key: Optional[str] = None,
|
109
111
|
no_delete: bool = False,
|
112
|
+
wait_for_healthy: bool = False,
|
110
113
|
):
|
111
114
|
self.config = config or GlobalConfig.read()
|
112
115
|
if not self.config:
|
@@ -208,6 +211,10 @@ class Processor(Generic[InputType, OutputType]):
|
|
208
211
|
self.processor = V1Processor.model_validate(patch_response.json())
|
209
212
|
logger.info(f"Updated Processor {self.processor.metadata.name}")
|
210
213
|
|
214
|
+
# --- Wait for health check if requested ---
|
215
|
+
if wait_for_healthy:
|
216
|
+
self._wait_for_health_check()
|
217
|
+
|
211
218
|
def __call__(
|
212
219
|
self,
|
213
220
|
data: InputType,
|
@@ -230,6 +237,7 @@ class Processor(Generic[InputType, OutputType]):
|
|
230
237
|
logs: bool = False,
|
231
238
|
api_key: Optional[str] = None,
|
232
239
|
user_key: Optional[str] = None,
|
240
|
+
timeout: Optional[float] = 600.0,
|
233
241
|
) -> OutputType | Dict[str, Any] | None:
|
234
242
|
"""
|
235
243
|
Send data to the processor and optionally stream logs in the background.
|
@@ -260,6 +268,7 @@ class Processor(Generic[InputType, OutputType]):
|
|
260
268
|
messages_url,
|
261
269
|
json=stream_data.model_dump(mode="json", exclude_none=True),
|
262
270
|
headers={"Authorization": f"Bearer {api_key}"},
|
271
|
+
timeout=timeout,
|
263
272
|
)
|
264
273
|
response.raise_for_status()
|
265
274
|
send_response_json = response.json()
|
@@ -432,3 +441,64 @@ class Processor(Generic[InputType, OutputType]):
|
|
432
441
|
self._log_thread = None
|
433
442
|
else:
|
434
443
|
logger.info(f"No active log stream to stop for {self.name}.")
|
444
|
+
|
445
|
+
def _wait_for_health_check(
|
446
|
+
self, timeout: float = 300.0, retry_interval: float = 5.0
|
447
|
+
) -> None:
|
448
|
+
"""
|
449
|
+
Wait for the processor to respond to health checks.
|
450
|
+
|
451
|
+
Args:
|
452
|
+
timeout: Maximum time to wait for health check in seconds
|
453
|
+
retry_interval: Time between health check attempts in seconds
|
454
|
+
"""
|
455
|
+
if not self.processor or not self.processor.metadata.name:
|
456
|
+
raise ValueError("Processor not found, cannot perform health check")
|
457
|
+
|
458
|
+
logger.info(
|
459
|
+
f"Waiting for processor {self.processor.metadata.name} to be healthy..."
|
460
|
+
)
|
461
|
+
|
462
|
+
start_time = time.time()
|
463
|
+
while time.time() - start_time < timeout:
|
464
|
+
try:
|
465
|
+
# Create a health check message
|
466
|
+
health_check_data = {
|
467
|
+
"kind": "HealthCheck",
|
468
|
+
"id": str(uuid.uuid4()),
|
469
|
+
"content": {},
|
470
|
+
"created_at": time.time(),
|
471
|
+
}
|
472
|
+
|
473
|
+
# Send health check and wait for response
|
474
|
+
response = self.send(
|
475
|
+
data=cast(InputType, health_check_data),
|
476
|
+
wait=True,
|
477
|
+
timeout=30.0, # Short timeout for individual health check
|
478
|
+
)
|
479
|
+
|
480
|
+
# Check if the response indicates health
|
481
|
+
if response and isinstance(response, dict):
|
482
|
+
status = response.get("status")
|
483
|
+
content = response.get("content", {})
|
484
|
+
if status == "success" and content.get("status") == "healthy":
|
485
|
+
logger.info(
|
486
|
+
f"Processor {self.processor.metadata.name} is healthy!"
|
487
|
+
)
|
488
|
+
return
|
489
|
+
|
490
|
+
logger.debug(
|
491
|
+
f"Health check attempt failed, retrying in {retry_interval}s..."
|
492
|
+
)
|
493
|
+
|
494
|
+
except Exception as e:
|
495
|
+
logger.debug(
|
496
|
+
f"Health check failed with error: {e}, retrying in {retry_interval}s..."
|
497
|
+
)
|
498
|
+
|
499
|
+
time.sleep(retry_interval)
|
500
|
+
|
501
|
+
# If we get here, we timed out
|
502
|
+
raise TimeoutError(
|
503
|
+
f"Processor {self.processor.metadata.name} failed to become healthy within {timeout} seconds"
|
504
|
+
)
|
@@ -15,14 +15,14 @@ nebu/namespaces/models.py,sha256=EqUOpzhVBhvJw2P92ONDUbIgC31M9jMmcaG5vyOrsWg,497
|
|
15
15
|
nebu/namespaces/namespace.py,sha256=oeZyGqsIGIrppyjif1ZONsdTmqRgd9oSLFE1BChXTTE,5247
|
16
16
|
nebu/processors/consumer.py,sha256=j6iKF_wc8RUNKrFqjB5keUX-Gj9hGZUbmAjEyTm-Oj0,55367
|
17
17
|
nebu/processors/consumer_process_worker.py,sha256=h--eNFKaLbUayxn88mB8oGGdrU2liE1dnwm_TPlewX8,36960
|
18
|
-
nebu/processors/decorate.py,sha256=
|
18
|
+
nebu/processors/decorate.py,sha256=AfHVCoNbW7RymccF5ewleEL-GlMiqVH1-t9bCmD60rk,58654
|
19
19
|
nebu/processors/default.py,sha256=cy4ETMdbdRGkrvbYec1o60h7mGDlGN5JsuUph0ENtDU,364
|
20
20
|
nebu/processors/models.py,sha256=g4B1t6Rgoy-NUEHBLeQc0EENzHXLDlWSio8Muv7cTDU,4093
|
21
|
-
nebu/processors/processor.py,sha256=
|
21
|
+
nebu/processors/processor.py,sha256=Njy32S0r77K4ww1D5U0dhD0vgTyJSMQztKwpkzHwLwg,19250
|
22
22
|
nebu/redis/models.py,sha256=coPovAcVXnOU1Xh_fpJL4PO3QctgK9nBe5QYoqEcnxg,1230
|
23
23
|
nebu/services/service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
|
-
nebu-0.1.
|
25
|
-
nebu-0.1.
|
26
|
-
nebu-0.1.
|
27
|
-
nebu-0.1.
|
28
|
-
nebu-0.1.
|
24
|
+
nebu-0.1.100.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
25
|
+
nebu-0.1.100.dist-info/METADATA,sha256=5Kp2Ce9vAVCX7ugzOzanWWHZHErMvmntkH1nYfml6WU,1798
|
26
|
+
nebu-0.1.100.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
27
|
+
nebu-0.1.100.dist-info/top_level.txt,sha256=uLIbEKJeGSHWOAJN5S0i5XBGwybALlF9bYoB1UhdEgQ,5
|
28
|
+
nebu-0.1.100.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|