agentic-notify 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. agentic_notify-0.1.0/LICENSE +21 -0
  2. agentic_notify-0.1.0/PKG-INFO +55 -0
  3. agentic_notify-0.1.0/README.md +33 -0
  4. agentic_notify-0.1.0/agentic_notify/__init__.py +4 -0
  5. agentic_notify-0.1.0/agentic_notify/adapters/__init__.py +3 -0
  6. agentic_notify-0.1.0/agentic_notify/adapters/base.py +23 -0
  7. agentic_notify-0.1.0/agentic_notify/adapters/mcp.py +36 -0
  8. agentic_notify-0.1.0/agentic_notify/adapters/mock_reminder.py +17 -0
  9. agentic_notify-0.1.0/agentic_notify/adapters/notes.py +25 -0
  10. agentic_notify-0.1.0/agentic_notify/adapters/webhook.py +44 -0
  11. agentic_notify-0.1.0/agentic_notify/examples/__init__.py +1 -0
  12. agentic_notify-0.1.0/agentic_notify/examples/app.py +49 -0
  13. agentic_notify-0.1.0/agentic_notify/handlers/__init__.py +3 -0
  14. agentic_notify-0.1.0/agentic_notify/handlers/approval.py +32 -0
  15. agentic_notify-0.1.0/agentic_notify/handlers/base.py +25 -0
  16. agentic_notify-0.1.0/agentic_notify/integrations/__init__.py +3 -0
  17. agentic_notify-0.1.0/agentic_notify/integrations/base.py +19 -0
  18. agentic_notify-0.1.0/agentic_notify/integrations/fastapi_integration.py +30 -0
  19. agentic_notify-0.1.0/agentic_notify/integrations/queue.py +29 -0
  20. agentic_notify-0.1.0/agentic_notify/observability/__init__.py +3 -0
  21. agentic_notify-0.1.0/agentic_notify/observability/logger.py +45 -0
  22. agentic_notify-0.1.0/agentic_notify/orchestrator.py +96 -0
  23. agentic_notify-0.1.0/agentic_notify/planning/__init__.py +1 -0
  24. agentic_notify-0.1.0/agentic_notify/planning/openai_client.py +49 -0
  25. agentic_notify-0.1.0/agentic_notify/planning/planner.py +63 -0
  26. agentic_notify-0.1.0/agentic_notify/planning/prompts.py +15 -0
  27. agentic_notify-0.1.0/agentic_notify/policies/__init__.py +3 -0
  28. agentic_notify-0.1.0/agentic_notify/policies/engine.py +35 -0
  29. agentic_notify-0.1.0/agentic_notify/routing/__init__.py +3 -0
  30. agentic_notify-0.1.0/agentic_notify/routing/engine.py +21 -0
  31. agentic_notify-0.1.0/agentic_notify/schemas/__init__.py +12 -0
  32. agentic_notify-0.1.0/agentic_notify/schemas/notification.py +18 -0
  33. agentic_notify-0.1.0/agentic_notify/schemas/result.py +18 -0
  34. agentic_notify-0.1.0/agentic_notify/schemas/workflow.py +36 -0
  35. agentic_notify-0.1.0/agentic_notify/storage/__init__.py +4 -0
  36. agentic_notify-0.1.0/agentic_notify/storage/base.py +19 -0
  37. agentic_notify-0.1.0/agentic_notify/storage/memory.py +19 -0
  38. agentic_notify-0.1.0/agentic_notify/storage/postgres.py +36 -0
  39. agentic_notify-0.1.0/agentic_notify/tools/__init__.py +1 -0
  40. agentic_notify-0.1.0/agentic_notify/tools/registry.py +24 -0
  41. agentic_notify-0.1.0/agentic_notify/workflows/__init__.py +3 -0
  42. agentic_notify-0.1.0/agentic_notify/workflows/engine.py +128 -0
  43. agentic_notify-0.1.0/pyproject.toml +36 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Nikhil Kanamadi
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,55 @@
1
+ Metadata-Version: 2.4
2
+ Name: agentic-notify
3
+ Version: 0.1.0
4
+ Summary: A Python library for building agentic, notification-driven workflows with pluggable adapters, policy-aware handlers, and scalable integration interfaces.
5
+ License-File: LICENSE
6
+ Author: Your Name
7
+ Author-email: you@example.com
8
+ Requires-Python: >=3.10,<4.0
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.10
11
+ Classifier: Programming Language :: Python :: 3.11
12
+ Classifier: Programming Language :: Python :: 3.12
13
+ Classifier: Programming Language :: Python :: 3.13
14
+ Classifier: Programming Language :: Python :: 3.14
15
+ Provides-Extra: fastapi
16
+ Provides-Extra: mcp
17
+ Provides-Extra: observability
18
+ Provides-Extra: redis
19
+ Requires-Dist: pydantic (>=2.0.0,<3.0.0)
20
+ Description-Content-Type: text/markdown
21
+
22
+ # agentic-notify
23
+
24
+ A Python library for building **agentic, notification-driven workflows** with pluggable adapters, policy-aware handlers, and scalable integration interfaces for cross-native applications.
25
+
26
+ ## Overview
27
+
28
+ `agentic-notify` gives you a **backend-first orchestration SDK** with clear extension points:
29
+
30
+ - **Ingestion & Normalization:** Accept notifications from Android/React Native bridges and convert them to a canonical schema.
31
+ - **Agentic Routing:** Send notifications to workflows based on LLM decisions or rules.
32
+ - **Workflow Orchestration:** Execute steps safely, handle retries, and check execution policies.
33
+ - **Adapters:** Connect your workflow engine to device boundaries (Reminders, Summaries, Local DBs).
34
+
35
+ ## Architecture Approach
36
+
37
+ - **Schemas define contracts** (via Pydantic)
38
+ - **Normalizers unify platforms**
39
+ - **Routers decide**
40
+ - **Workflows orchestrate**
41
+ - **Handlers transform**
42
+ - **Adapters act**
43
+ - **Integrations connect**
44
+ - **Policies constrain**
45
+
46
+ ## Installation
47
+
48
+ ```bash
49
+ # Core package
50
+ pip install agentic-notify
51
+
52
+ # Install with FastAPI and Redis support
53
+ pip install agentic-notify[fastapi,redis]
54
+ ```
55
+
@@ -0,0 +1,33 @@
1
+ # agentic-notify
2
+
3
+ A Python library for building **agentic, notification-driven workflows** with pluggable adapters, policy-aware handlers, and scalable integration interfaces for cross-native applications.
4
+
5
+ ## Overview
6
+
7
+ `agentic-notify` gives you a **backend-first orchestration SDK** with clear extension points:
8
+
9
+ - **Ingestion & Normalization:** Accept notifications from Android/React Native bridges and convert them to a canonical schema.
10
+ - **Agentic Routing:** Send notifications to workflows based on LLM decisions or rules.
11
+ - **Workflow Orchestration:** Execute steps safely, handle retries, and check execution policies.
12
+ - **Adapters:** Connect your workflow engine to device boundaries (Reminders, Summaries, Local DBs).
13
+
14
+ ## Architecture Approach
15
+
16
+ - **Schemas define contracts** (via Pydantic)
17
+ - **Normalizers unify platforms**
18
+ - **Routers decide**
19
+ - **Workflows orchestrate**
20
+ - **Handlers transform**
21
+ - **Adapters act**
22
+ - **Integrations connect**
23
+ - **Policies constrain**
24
+
25
+ ## Installation
26
+
27
+ ```bash
28
+ # Core package
29
+ pip install agentic-notify
30
+
31
+ # Install with FastAPI and Redis support
32
+ pip install agentic-notify[fastapi,redis]
33
+ ```
@@ -0,0 +1,4 @@
1
+ """
2
+ agentic_notify - A Python library for building agentic, notification-driven workflows
3
+ """
4
+ __version__ = "0.1.0"
@@ -0,0 +1,3 @@
1
+ from agentic_notify.adapters.base import BaseAdapter
2
+
3
+ __all__ = ["BaseAdapter"]
@@ -0,0 +1,23 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any, Dict
3
+
4
+ class BaseAdapter(ABC):
5
+ """
6
+ An Adapter is responsible for performing a downstream action (e.g., calling an API,
7
+ saving to DB, triggering device native modules).
8
+ This is entirely decoupled from the inbound integrations.
9
+ """
10
+
11
+ @property
12
+ @abstractmethod
13
+ def name(self) -> str:
14
+ """The canonical name of the adapter used in workflow definitions."""
15
+ pass
16
+
17
+ @abstractmethod
18
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
19
+ """
20
+ Execute the business action with the provided payload.
21
+ Returns the output dict of the execution.
22
+ """
23
+ pass
@@ -0,0 +1,36 @@
1
+ from typing import Dict, Any
2
+ import logging
3
+ from agentic_notify.adapters.base import BaseAdapter
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class MCPClientAdapter(BaseAdapter):
8
+ """
9
+ Provides a standardized boundary to call out to an external Model Context Protocol (MCP) server.
10
+ Instead of hardcoding APIs, the workflow can trigger exposed MCP tools.
11
+ """
12
+ @property
13
+ def name(self) -> str:
14
+ return "call_mcp_tool"
15
+
16
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
17
+ server_name = payload.get("server_name")
18
+ tool_name = payload.get("tool_name")
19
+ tool_args = payload.get("tool_args", {})
20
+
21
+ if not server_name or not tool_name:
22
+ raise ValueError("MCPClientAdapter requires 'server_name' and 'tool_name'")
23
+
24
+ logger.info(f"MCP Action: Delegating to server '{server_name}' to run tool '{tool_name}' with args {tool_args}")
25
+
26
+ # Placeholder for actual MCP SDK invocation:
27
+ # e.g., result = await mcp_client_sessions[server_name].call_tool(tool_name, tool_args)
28
+
29
+ mock_result_payload = {"tool_status": "executed", "echo": tool_args}
30
+
31
+ return {
32
+ "status": "success",
33
+ "server_id": server_name,
34
+ "tool_id": tool_name,
35
+ "mcp_result": mock_result_payload
36
+ }
@@ -0,0 +1,17 @@
1
+ from typing import Dict, Any
2
+ import logging
3
+ from agentic_notify.adapters.base import BaseAdapter
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class MockReminderAdapter(BaseAdapter):
8
+ """
9
+ A mock adapter to simulate creating a reminder on a user's device.
10
+ """
11
+ @property
12
+ def name(self) -> str:
13
+ return "create_reminder"
14
+
15
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
16
+ logger.info(f"*** Action Executed: MockReminderAdapter received {payload} ***")
17
+ return {"status": "success", "reminder_id": "rem_test_001", "echo_payload": payload}
@@ -0,0 +1,25 @@
1
+ from typing import Dict, Any
2
+ import logging
3
+ from agentic_notify.adapters.base import BaseAdapter
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class NotesAdapter(BaseAdapter):
8
+ """
9
+ Simulates saving content to a local notes app or document store.
10
+ """
11
+ @property
12
+ def name(self) -> str:
13
+ return "save_note"
14
+
15
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
16
+ title = payload.get("title", "Untitled Note")
17
+ content = payload.get("content", "")
18
+
19
+ logger.info(f"Action Executed: Saved Note => Title: '{title}', Content Length: {len(content)}")
20
+
21
+ return {
22
+ "status": "success",
23
+ "note_id": "note_1001",
24
+ "saved_title": title
25
+ }
@@ -0,0 +1,44 @@
1
+ from typing import Dict, Any
2
+ import logging
3
+ import json
4
+ import urllib.request
5
+ import urllib.error
6
+ from agentic_notify.adapters.base import BaseAdapter
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ class WebhookAdapter(BaseAdapter):
11
+ """
12
+ Adapter that fires an HTTP POST to an external webhook URL.
13
+ """
14
+ @property
15
+ def name(self) -> str:
16
+ return "trigger_webhook"
17
+
18
+ async def execute(self, payload: Dict[str, Any]) -> Dict[str, Any]:
19
+ url = payload.get("url")
20
+ data = payload.get("data", {})
21
+
22
+ if not url:
23
+ raise ValueError("WebhookAdapter requires a 'url' in the payload.")
24
+
25
+ logger.info(f"Action Executed: Firing Webhook to {url}")
26
+
27
+ req = urllib.request.Request(
28
+ url,
29
+ data=json.dumps(data).encode('utf-8'),
30
+ headers={'Content-Type': 'application/json'},
31
+ method='POST'
32
+ )
33
+
34
+ try:
35
+ # Note: Minimal blocking sync call for demonstration.
36
+ # In production asyncio, use aiohttp or httpx.
37
+ with urllib.request.urlopen(req, timeout=10) as response:
38
+ return {
39
+ "status": "success",
40
+ "http_code": response.getcode()
41
+ }
42
+ except urllib.error.URLError as e:
43
+ logger.error(f"Webhook failed: {e}")
44
+ raise RuntimeError(f"Webhook call failed: {e}")
@@ -0,0 +1 @@
1
+ """Examples directory"""
@@ -0,0 +1,49 @@
1
+ import logging
2
+ from fastapi import FastAPI
3
+
4
+ from agentic_notify.orchestrator import NotificationOrchestrator
5
+ from agentic_notify.storage.memory import InMemoryStore
6
+ from agentic_notify.integrations.fastapi_integration import FastAPIIntegration
7
+ from agentic_notify.adapters.mock_reminder import MockReminderAdapter
8
+ from agentic_notify.schemas.workflow import WorkflowDefinition, WorkflowTrigger, WorkflowStep
9
+
10
+ logging.basicConfig(level=logging.INFO)
11
+
12
+ # 1. Setup Core dependencies
13
+ storage = InMemoryStore()
14
+
15
+ # 2. Setup Orchestrator
16
+ orchestrator = NotificationOrchestrator(storage=storage)
17
+
18
+ # 3. Register our mock adapter
19
+ orchestrator.register_adapter(MockReminderAdapter())
20
+
21
+ # 4. Define and Register Workflow
22
+ workflow = WorkflowDefinition(
23
+ workflow_id="wf_important_reminders",
24
+ workflow_name="Important Notification Reminders",
25
+ trigger=WorkflowTrigger(type="notification"),
26
+ steps=[
27
+ WorkflowStep(
28
+ id="step_create_reminder",
29
+ kind="adapter",
30
+ name="create_reminder",
31
+ # We map the inputs using standard dot notation to pull from the normalized event
32
+ input_from={
33
+ "task_text": "event.body",
34
+ "task_title": "event.title"
35
+ }
36
+ )
37
+ ]
38
+ )
39
+ orchestrator.register_workflow(workflow)
40
+
41
+ # 5. Add deterministic routing rule (Route all events to our test workflow)
42
+ orchestrator.router.add_rule(lambda e: True, "wf_important_reminders")
43
+
44
+ # 6. Initialize FastAPI Integration
45
+ app = FastAPI()
46
+ integration = FastAPIIntegration(orchestrator, app)
47
+
48
+ # To run this example locally:
49
+ # python -m uvicorn agentic_notify.examples.app:app --reload
@@ -0,0 +1,3 @@
1
+ from agentic_notify.handlers.base import BaseHandler
2
+
3
+ __all__ = ["BaseHandler"]
@@ -0,0 +1,32 @@
1
+ from typing import Dict, Any, List
2
+ import logging
3
+ from agentic_notify.handlers.base import BaseHandler
4
+ from agentic_notify.schemas.notification import NotificationEvent
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ class ApprovalHandler(BaseHandler):
9
+ """
10
+ Human-in-the-Loop Handler.
11
+ When a workflow executes this step, it halts execution and returns 'awaiting_approval'.
12
+ The orchestrator pauses the run state until a human explicitly resumes it.
13
+ """
14
+ @property
15
+ def name(self) -> str:
16
+ return "request_human_approval"
17
+
18
+ async def handle(self, event: NotificationEvent, context: Dict[str, Any]) -> Dict[str, Any]:
19
+ reason = context.get("reason", "Action requires explicit user approval.")
20
+ action_payload = context.get("action_payload", {})
21
+
22
+ logger.info(f"⏸️ WORKFLOW HALTED: Requesting Human Approval for: {reason}")
23
+
24
+ # In a real system, you would trigger an outbound push notification or email here
25
+ # telling the user to click an "Approve" button.
26
+
27
+ return {
28
+ "status": "awaiting_approval",
29
+ "approval_ticket_id": f"ticket_{event.event_id}",
30
+ "reason": reason,
31
+ "pending_action": action_payload
32
+ }
@@ -0,0 +1,25 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any, Dict
3
+
4
+ from agentic_notify.schemas.notification import NotificationEvent
5
+
6
+ class BaseHandler(ABC):
7
+ """
8
+ A Handler is responsible for processing a step logic in memory.
9
+ It does not necessarily perform a permanent external action (like an Adapter),
10
+ but typically manipulates, routes, or transforms data (e.g., classification, summarization).
11
+ """
12
+
13
+ @property
14
+ @abstractmethod
15
+ def name(self) -> str:
16
+ """The canonical name of the handler used in workflow definitions."""
17
+ pass
18
+
19
+ @abstractmethod
20
+ async def handle(self, event: NotificationEvent, context: Dict[str, Any]) -> Dict[str, Any]:
21
+ """
22
+ Process the notification event and context.
23
+ Returns the mutated context or result of the handling.
24
+ """
25
+ pass
@@ -0,0 +1,3 @@
1
+ from agentic_notify.integrations.base import BaseIntegration
2
+
3
+ __all__ = ["BaseIntegration"]
@@ -0,0 +1,19 @@
1
+ from typing import Any, Dict
2
+ from abc import ABC, abstractmethod
3
+ from agentic_notify.schemas.notification import NotificationEvent
4
+
5
+ class BaseIntegration(ABC):
6
+ """
7
+ An Integration receives raw events from an external host system
8
+ (e.g., FastAPI route, background worker, Webhook, RN Bridge).
9
+ """
10
+
11
+ @abstractmethod
12
+ async def start(self) -> None:
13
+ """Start the integration listener or service."""
14
+ pass
15
+
16
+ @abstractmethod
17
+ def stop(self) -> None:
18
+ """Stop the integration."""
19
+ pass
@@ -0,0 +1,30 @@
1
+ from fastapi import FastAPI, Request
2
+ from typing import Any, Dict
3
+ from agentic_notify.integrations.base import BaseIntegration
4
+ import logging
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ class FastAPIIntegration(BaseIntegration):
9
+ """
10
+ Exposes a REST API endpoint for ingesting notification events.
11
+ Requires FastAPI to be installed and passed in.
12
+ """
13
+ def __init__(self, orchestrator, app: FastAPI):
14
+ self.orchestrator = orchestrator
15
+ self.app = app
16
+ self._register_routes()
17
+
18
+ def _register_routes(self):
19
+ @self.app.post("/events/ingest")
20
+ async def ingest_event(request: Request):
21
+ payload = await request.json()
22
+ logger.info("FastAPI received event payload")
23
+ result = await self.orchestrator.process_event(payload)
24
+ return result
25
+
26
+ async def start(self) -> None:
27
+ logger.info("FastAPIIntegration started (managed by external uvicorn/asgi)")
28
+
29
+ def stop(self) -> None:
30
+ pass
@@ -0,0 +1,29 @@
1
+ from typing import Any, Dict
2
+ from agentic_notify.integrations.base import BaseIntegration
3
+ import logging
4
+ import asyncio
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ class RedisQueueIntegration(BaseIntegration):
9
+ """
10
+ Listens to a Redis List/PubSub queue to ingest events continuously.
11
+ (Stubbed for async ingestion demonstration)
12
+ """
13
+ def __init__(self, orchestrator, queue_name: str = "agentic_events"):
14
+ self.orchestrator = orchestrator
15
+ self.queue_name = queue_name
16
+ self._running = False
17
+
18
+ async def start(self) -> None:
19
+ self._running = True
20
+ logger.info(f"Started RedisQueueIntegration listening on '{self.queue_name}'")
21
+
22
+ # Simulated async polling loop
23
+ # while self._running:
24
+ # raw_event = await redis_client.blpop(self.queue_name)
25
+ # asyncio.create_task(self.orchestrator.process_event(raw_event))
26
+
27
+ def stop(self) -> None:
28
+ logger.info(f"Stopping RedisQueueIntegration '{self.queue_name}'")
29
+ self._running = False
@@ -0,0 +1,3 @@
1
+ from agentic_notify.observability.logger import setup_json_logger, trace_span
2
+
3
+ __all__ = ["setup_json_logger", "trace_span"]
@@ -0,0 +1,45 @@
1
+ import logging
2
+ import json
3
+ from contextlib import contextmanager
4
+ from datetime import datetime
5
+ import time
6
+ from typing import Dict, Any, Optional
7
+
8
+ def setup_json_logger(name: str) -> logging.Logger:
9
+ """Provides a basic structured logger."""
10
+ logger = logging.getLogger(name)
11
+ logger.setLevel(logging.INFO)
12
+
13
+ # Simple JSON formatter
14
+ class JsonFormatter(logging.Formatter):
15
+ def format(self, record: logging.LogRecord) -> str:
16
+ log_record = {
17
+ "timestamp": datetime.utcnow().isoformat(),
18
+ "level": record.levelname,
19
+ "name": record.name,
20
+ "message": record.getMessage()
21
+ }
22
+ if hasattr(record, "trace_id"):
23
+ log_record["trace_id"] = record.trace_id # type: ignore
24
+ return json.dumps(log_record)
25
+
26
+ handler = logging.StreamHandler()
27
+ handler.setFormatter(JsonFormatter())
28
+
29
+ # Avoid duplicate handlers if setup multiple times
30
+ if not logger.handlers:
31
+ logger.addHandler(handler)
32
+
33
+ return logger
34
+
35
+ @contextmanager
36
+ def trace_span(logger: logging.Logger, span_name: str, trace_id: Optional[str] = None):
37
+ """
38
+ Context manager to trace execution duration of a step.
39
+ Produces structured start/end logs.
40
+ """
41
+ start_time = time.time()
42
+ logger.info(f"SPAN_START: {span_name}", extra={"trace_id": trace_id} if trace_id else {})
43
+ yield
44
+ latency_ms = int((time.time() - start_time) * 1000)
45
+ logger.info(f"SPAN_END: {span_name} ({latency_ms}ms)", extra={"trace_id": trace_id} if trace_id else {})
@@ -0,0 +1,96 @@
1
+ import uuid
2
+ import logging
3
+ from typing import Any, Dict, Optional
4
+
5
+ from agentic_notify.adapters.base import BaseAdapter
6
+ from agentic_notify.handlers.base import BaseHandler
7
+ from agentic_notify.schemas.notification import NotificationEvent
8
+ from agentic_notify.schemas.workflow import WorkflowDefinition
9
+ from agentic_notify.schemas.result import WorkflowRunResult
10
+ from agentic_notify.storage.base import BaseStorage
11
+ from agentic_notify.routing.engine import RoutingEngine
12
+ from agentic_notify.workflows.engine import WorkflowExecutor
13
+ from agentic_notify.policies.engine import PolicyEngine
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+ class NotificationOrchestrator:
18
+ """
19
+ The main coordinator. Receives events, routes them to workflows,
20
+ evaluates policies, and delegates execution.
21
+ """
22
+ def __init__(
23
+ self,
24
+ storage: BaseStorage,
25
+ router: Optional[RoutingEngine] = None,
26
+ policy_engine: Optional[PolicyEngine] = None,
27
+ executor: Optional[WorkflowExecutor] = None,
28
+ ):
29
+ self.storage = storage
30
+ self.router = router or RoutingEngine()
31
+ self.policy_engine = policy_engine or PolicyEngine()
32
+ self.executor = executor or WorkflowExecutor(storage=self.storage)
33
+
34
+ self.adapters: Dict[str, BaseAdapter] = {}
35
+ self.handlers: Dict[str, BaseHandler] = {}
36
+ self.workflows: Dict[str, WorkflowDefinition] = {}
37
+
38
+ def register_adapter(self, adapter: BaseAdapter) -> None:
39
+ self.adapters[adapter.name] = adapter
40
+ logger.info(f"Registered adapter: {adapter.name}")
41
+
42
+ def register_handler(self, handler: BaseHandler) -> None:
43
+ self.handlers[handler.name] = handler
44
+ logger.info(f"Registered handler: {handler.name}")
45
+
46
+ def register_workflow(self, workflow: WorkflowDefinition) -> None:
47
+ self.workflows[workflow.workflow_id] = workflow
48
+ logger.info(f"Registered workflow: {workflow.workflow_id} ({workflow.workflow_name})")
49
+
50
+ async def process_event(self, raw_event_payload: Dict[str, Any]) -> Dict[str, Any]:
51
+ """
52
+ 1. Normalize raw event
53
+ 2. Route
54
+ 3. Execute workflow steps sequentially
55
+ """
56
+ # 1. Normalize
57
+ try:
58
+ event = NotificationEvent(**raw_event_payload)
59
+ except Exception as e:
60
+ logger.error(f"Event normalization failed: {e}")
61
+ return {"status": "error", "message": "Invalid event schema.", "details": str(e)}
62
+
63
+ logger.info(f"Normalized event: {event.event_id}")
64
+
65
+ # 2. Route
66
+ workflow_id = self.router.route(event)
67
+ if not workflow_id or workflow_id not in self.workflows:
68
+ msg = f"No workflow matched or found for event {event.event_id}"
69
+ logger.info(msg)
70
+ return {"status": "ignored", "message": msg}
71
+
72
+ workflow = self.workflows[workflow_id]
73
+
74
+ # 3. Policy Execution Check
75
+ if not self.policy_engine.evaluate("start_workflow", {"workflow_id": workflow_id}, {"event": event.model_dump()}):
76
+ logger.warning(f"Policy denied workflow {workflow_id}")
77
+ return {"status": "denied", "message": "Policy check failed."}
78
+
79
+ # 4. Execute
80
+ run_id = f"run_{uuid.uuid4().hex}"
81
+ logger.info(f"Delegating execution for {workflow_id} as run {run_id}")
82
+
83
+ result: WorkflowRunResult = await self.executor.execute(
84
+ run_id=run_id,
85
+ workflow=workflow,
86
+ event=event,
87
+ adapters=self.adapters,
88
+ handlers=self.handlers
89
+ )
90
+
91
+ return {
92
+ "status": "processed",
93
+ "run_id": run_id,
94
+ "workflow_id": workflow_id,
95
+ "run_status": result.status
96
+ }
@@ -0,0 +1 @@
1
+ """Planning module"""
@@ -0,0 +1,49 @@
1
+ import json
2
+ import logging
3
+ from typing import Any, Dict, Optional
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class OpenAIPlannerClient:
8
+ """
9
+ A concrete client wrapper for OpenAI's API to be used by the AgenticPlanner.
10
+ Requires `openai` package to be installed.
11
+ """
12
+ def __init__(self, api_key: str, model: str = "gpt-4o"):
13
+ try:
14
+ from openai import AsyncOpenAI
15
+ self.client = AsyncOpenAI(api_key=api_key)
16
+ self.model = model
17
+ logger.info(f"Initialized OpenAIPlannerClient with model: {self.model}")
18
+ except ImportError:
19
+ raise ImportError("Please install openai package: `pip install openai`")
20
+
21
+ async def chat(self, system_prompt: str, user_intent: str) -> Dict[str, Any]:
22
+ """
23
+ Sends the prompt to OpenAI and expects a JSON response constrained to the
24
+ WorkflowDefinition schema.
25
+ """
26
+ logger.info("Calling OpenAI API for workflow generation...")
27
+
28
+ try:
29
+ # Using response_format={ "type": "json_object" } perfectly aligns with
30
+ # our Pydantic validation boundary.
31
+ response = await self.client.chat.completions.create(
32
+ model=self.model,
33
+ messages=[
34
+ {"role": "system", "content": system_prompt},
35
+ {"role": "user", "content": user_intent}
36
+ ],
37
+ response_format={"type": "json_object"},
38
+ temperature=0.2, # Low temperature for deterministic planning
39
+ )
40
+
41
+ content = response.choices[0].message.content
42
+ if not content:
43
+ raise ValueError("OpenAI returned an empty response.")
44
+
45
+ return json.loads(content)
46
+
47
+ except Exception as e:
48
+ logger.error(f"OpenAI API call failed: {e}")
49
+ raise
@@ -0,0 +1,63 @@
1
+ import json
2
+ import uuid
3
+ import logging
4
+ from typing import Dict, Any, Optional
5
+
6
+ from agentic_notify.schemas.workflow import WorkflowDefinition, WorkflowStep
7
+ from agentic_notify.tools.registry import ToolRegistry
8
+ from agentic_notify.planning.prompts import SYSTEM_PLANNER_PROMPT
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class AgenticPlanner:
13
+ """
14
+ Converts natural language user intents into structured WorkflowDefinition schemas.
15
+ """
16
+ def __init__(self, tool_registry: ToolRegistry, llm_client: Optional[Any] = None):
17
+ self.tool_registry = tool_registry
18
+ self.llm_client = llm_client
19
+
20
+ async def plan_workflow(self, intent: str, trigger_type: str = "notification") -> WorkflowDefinition:
21
+ """
22
+ Executes a prompt to an LLM to generate the JSON graph, then strictly validates it
23
+ through the Pydantic definition.
24
+ """
25
+ tools = json.dumps(self.tool_registry.get_all_tool_schemas(), indent=2)
26
+ system_prompt = SYSTEM_PLANNER_PROMPT.format(tools=tools)
27
+
28
+ logger.info(f"Generating plan for intent: '{intent}' utilizing registered tools.")
29
+
30
+ # Real-world behavior: Wait for LLM
31
+ # response_text = await self.llm_client.chat(messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": intent}])
32
+
33
+ # Mocked deterministic response for Phase 4 proof-of-concept
34
+ mock_generated_plan = {
35
+ "workflow_id": f"wf_llm_{uuid.uuid4().hex[:8]}",
36
+ "workflow_name": "Generated LLM Plan",
37
+ "description": f"Automated workflow for: {intent}",
38
+ "enabled": True,
39
+ "trigger": {
40
+ "type": trigger_type
41
+ },
42
+ "steps": [
43
+ {
44
+ "id": "step_create_reminder",
45
+ "kind": "adapter",
46
+ "name": "create_reminder",
47
+ "input_from": {
48
+ "task_text": "event.body",
49
+ "task_title": "event.title"
50
+ }
51
+ }
52
+ ]
53
+ }
54
+
55
+ try:
56
+ # The most crucial step of the backend design:
57
+ # Force the untrusted LLM payload through the strict Pydantic `WorkflowDefinition` validator.
58
+ workflow = WorkflowDefinition(**mock_generated_plan)
59
+ logger.info(f"Successfully validated LLM plan into WorkflowDefinition: {workflow.workflow_id}")
60
+ return workflow
61
+ except Exception as e:
62
+ logger.error(f"LLM produced invalid workflow schema! Planning failed: {e}")
63
+ raise ValueError(f"Planner failed schema validation: {e}")
@@ -0,0 +1,15 @@
1
+ # Base System Prompts for Agentic Planning
2
+
3
+ SYSTEM_PLANNER_PROMPT = """
4
+ You are the Agentic Workflow Planner for a mobile companion app.
5
+ Your job is to read user intents and convert them into deterministic JSON workflow graphs.
6
+
7
+ RULES:
8
+ 1. Output MUST be perfectly valid JSON with NO markdown formatting.
9
+ 2. The JSON must align with the WorkflowDefinition Pydantic Schema.
10
+ 3. You may ONLY use 'steps' that rely on the tools listed below.
11
+ 4. Try to construct steps that chain outputs (e.g. input_from: {{ "text": "prev_step_id.content" }}) if multiple steps are needed.
12
+
13
+ AVAILABLE TOOLS (Adapter schemas):
14
+ {tools}
15
+ """
@@ -0,0 +1,3 @@
1
+ from agentic_notify.policies.engine import PolicyEngine
2
+
3
+ __all__ = ["PolicyEngine"]
@@ -0,0 +1,35 @@
1
+ from typing import Dict, Any, List
2
+ import logging
3
+
4
+ logger = logging.getLogger(__name__)
5
+
6
+ class PolicyEngine:
7
+ """
8
+ Dynamically evaluates risk and permissions by checking user-configured boundaries
9
+ before an action or workflow is allowed to fire.
10
+ """
11
+ def __init__(self):
12
+ # In production, this would query a DB or a Vector Redis cache per user.
13
+ self.user_restrictions: Dict[str, List[str]] = {
14
+ "default_user": ["never_auto_delete", "never_silence_bank"],
15
+ "vip_mode": ["disable_all_summarization"]
16
+ }
17
+
18
+ def evaluate(self, action: str, payload: Dict[str, Any], context: Dict[str, Any]) -> bool:
19
+ """
20
+ Check if an action is allowed based on user restrictions and global rules.
21
+ """
22
+ user_id = context.get("event", {}).get("metadata", {}).get("user_id", "default_user")
23
+ restrictions = self.user_restrictions.get(user_id, [])
24
+
25
+ logger.info(f"Evaluating Policy: action '{action}' for user '{user_id}'")
26
+
27
+ if action == "summarize" and "disable_all_summarization" in restrictions:
28
+ logger.warning(f"POLICY BLOCK: User {user_id} has disabled AI summarization.")
29
+ return False
30
+
31
+ if action == "delete_email" and "never_auto_delete" in restrictions:
32
+ logger.warning(f"POLICY BLOCK: User {user_id} has forbidden auto-deletion.")
33
+ return False
34
+
35
+ return True
@@ -0,0 +1,3 @@
1
+ from agentic_notify.routing.engine import RoutingEngine
2
+
3
+ __all__ = ["RoutingEngine"]
@@ -0,0 +1,21 @@
1
+ from typing import Dict, Any, Optional, Callable, Tuple, List
2
+ from agentic_notify.schemas.notification import NotificationEvent
3
+
4
+ class RoutingEngine:
5
+ """
6
+ Determines which workflow to trigger based on the incoming NotificationEvent.
7
+ """
8
+ def __init__(self):
9
+ # List of (rule_function, workflow_id)
10
+ self.rules: List[Tuple[Callable[[NotificationEvent], bool], str]] = []
11
+
12
+ def add_rule(self, rule_fn: Callable[[NotificationEvent], bool], workflow_id: str):
13
+ """Add a simple deterministic rule."""
14
+ self.rules.append((rule_fn, workflow_id))
15
+
16
+ def route(self, event: NotificationEvent) -> Optional[str]:
17
+ """Returns the matched workflow_id, or None if no match."""
18
+ for rule_fn, workflow_id in self.rules:
19
+ if rule_fn(event):
20
+ return workflow_id
21
+ return None
@@ -0,0 +1,12 @@
1
+ from agentic_notify.schemas.notification import NotificationEvent
2
+ from agentic_notify.schemas.workflow import WorkflowDefinition, WorkflowStep, WorkflowTrigger
3
+ from agentic_notify.schemas.result import StepResult, WorkflowRunResult
4
+
5
+ __all__ = [
6
+ "NotificationEvent",
7
+ "WorkflowDefinition",
8
+ "WorkflowStep",
9
+ "WorkflowTrigger",
10
+ "StepResult",
11
+ "WorkflowRunResult"
12
+ ]
@@ -0,0 +1,18 @@
1
+ from typing import Any, Dict, Optional
2
+ from pydantic import BaseModel, Field
3
+ from datetime import datetime
4
+
5
+ class NotificationEvent(BaseModel):
6
+ """
7
+ The canonical schema for an incoming platform event (e.g., from Android/iOS/Web).
8
+ All integrations must parse their raw events into this format.
9
+ """
10
+ event_id: str
11
+ kind: str = "notification"
12
+ source_platform: str
13
+ source_app: str
14
+ title: Optional[str] = None
15
+ body: Optional[str] = None
16
+ received_at: datetime = Field(default_factory=datetime.utcnow)
17
+ priority_hint: str = "unknown"
18
+ metadata: Dict[str, Any] = Field(default_factory=dict)
@@ -0,0 +1,18 @@
1
+ from pydantic import BaseModel
2
+ from typing import Any, Dict, Optional
3
+
4
+ class StepResult(BaseModel):
5
+ """Result of a single workflow step execution."""
6
+ status: str # "success", "failed", "awaiting_approval"
7
+ output: Optional[Dict[str, Any]] = None
8
+ error: Optional[Dict[str, Any]] = None
9
+ started_at: str
10
+ ended_at: str
11
+ latency_ms: int
12
+
13
+ class WorkflowRunResult(BaseModel):
14
+ """Result of an entire workflow execution."""
15
+ run_id: str
16
+ workflow_id: str
17
+ status: str
18
+ step_results: Dict[str, StepResult]
@@ -0,0 +1,36 @@
1
+ from typing import List, Dict, Any, Optional
2
+ from pydantic import BaseModel, ConfigDict
3
+
4
+ class WorkflowStep(BaseModel):
5
+ """
6
+ A single step in the workflow graph.
7
+ Can either trigger an internal handler or call an external tool adapter.
8
+ """
9
+ id: str
10
+ kind: str # typically "tool" or "handler"
11
+ name: str # e.g., "summarize_text", "create_reminder"
12
+ depends_on: List[str] = []
13
+ input_from: Dict[str, Any] = {}
14
+ timeout_ms: int = 15000
15
+ retry_limit: int = 2
16
+
17
+ class WorkflowTrigger(BaseModel):
18
+ """
19
+ Conditions describing how a workflow should be triggered.
20
+ """
21
+ type: str # e.g., "schedule", "notification", "event"
22
+ schedule: Optional[str] = None
23
+ event_pattern: Optional[Dict[str, Any]] = None
24
+
25
+ class WorkflowDefinition(BaseModel):
26
+ """
27
+ A deterministic, structured execution graph for an agentic workflow.
28
+ """
29
+ model_config = ConfigDict(extra="allow")
30
+
31
+ workflow_id: str
32
+ workflow_name: str
33
+ description: Optional[str] = None
34
+ enabled: bool = True
35
+ trigger: WorkflowTrigger
36
+ steps: List[WorkflowStep]
@@ -0,0 +1,4 @@
1
+ from agentic_notify.storage.base import BaseStorage
2
+ from agentic_notify.storage.memory import InMemoryStore
3
+
4
+ __all__ = ["BaseStorage", "InMemoryStore"]
@@ -0,0 +1,19 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ class BaseStorage(ABC):
5
+ """
6
+ Storage layer abstraction for runs, step cache, audit trails, and configurations.
7
+ """
8
+
9
+ @abstractmethod
10
+ async def save_workflow_run(self, run_id: str, run_data: Dict[str, Any]) -> None:
11
+ pass
12
+
13
+ @abstractmethod
14
+ async def get_workflow_run(self, run_id: str) -> Optional[Dict[str, Any]]:
15
+ pass
16
+
17
+ @abstractmethod
18
+ async def log_audit_event(self, event_type: str, data: Dict[str, Any]) -> None:
19
+ pass
@@ -0,0 +1,19 @@
1
+ from typing import Any, Dict, List, Optional
2
+ from agentic_notify.storage.base import BaseStorage
3
+
4
+ class InMemoryStore(BaseStorage):
5
+ """
6
+ A simple dictionary-backed storage for rapid prototyping and testing.
7
+ """
8
+ def __init__(self):
9
+ self.runs: Dict[str, Dict[str, Any]] = {}
10
+ self.audits: List[Dict[str, Any]] = []
11
+
12
+ async def save_workflow_run(self, run_id: str, run_data: Dict[str, Any]) -> None:
13
+ self.runs[run_id] = run_data
14
+
15
+ async def get_workflow_run(self, run_id: str) -> Optional[Dict[str, Any]]:
16
+ return self.runs.get(run_id)
17
+
18
+ async def log_audit_event(self, event_type: str, data: Dict[str, Any]) -> None:
19
+ self.audits.append({"event_type": event_type, "data": data})
@@ -0,0 +1,36 @@
1
+ from typing import Dict, Any, List
2
+ import logging
3
+ from agentic_notify.storage.base import BaseStorage
4
+
5
+ logger = logging.getLogger(__name__)
6
+
7
+ class PostgresStore(BaseStorage):
8
+ """
9
+ Durable database storage for production multi-tenancy.
10
+ Requires asyncpg or equivalent async SQL driver.
11
+ """
12
+ def __init__(self, connection_pool: Any):
13
+ # connection_pool would be an asyncpg pool in production
14
+ self.pool = connection_pool
15
+ logger.info("Initialized PostgresStore for durable partitioned execution tracking.")
16
+
17
+ async def save_workflow_run(self, run_id: str, run_data: Dict[str, Any]) -> None:
18
+ """Upserts a workflow run into the runs table."""
19
+ # async with self.pool.acquire() as conn:
20
+ # await conn.execute(
21
+ # "INSERT INTO workflow_runs (run_id, data) VALUES ($1, $2) ON CONFLICT (run_id) DO UPDATE SET data = $2",
22
+ # run_id, json.dumps(run_data)
23
+ # )
24
+ logger.info(f"DS_WRITE: Saved Run {run_id} to Stateful Postgres Database.")
25
+ pass
26
+
27
+ async def get_workflow_run(self, run_id: str) -> Dict[str, Any]:
28
+ # async with self.pool.acquire() as conn:
29
+ # record = await conn.fetchrow("SELECT data FROM workflow_runs WHERE run_id = $1", run_id)
30
+ # return json.loads(record['data']) if record else None
31
+ return {}
32
+
33
+ async def log_audit_event(self, event_type: str, data: Dict[str, Any]) -> None:
34
+ # async with self.pool.acquire() as conn:
35
+ # await conn.execute("INSERT INTO audit_logs (type, payload) VALUES ($1, $2)", event_type, json.dumps(data))
36
+ pass
@@ -0,0 +1 @@
1
+ """Tools registry module"""
@@ -0,0 +1,24 @@
1
+ from typing import Dict, Any, List
2
+ from pydantic import BaseModel
3
+
4
+ class ToolSchema(BaseModel):
5
+ """
6
+ Metadata representation of an Adapter or Handler.
7
+ This is what the LLM planner sees to know what actions it can take.
8
+ """
9
+ name: str
10
+ description: str
11
+ input_schema: Dict[str, Any]
12
+ idempotent: bool
13
+ sensitivity: str = "low"
14
+
15
+ class ToolRegistry:
16
+ """Holds metadata for all registered actions globally."""
17
+ def __init__(self):
18
+ self._tools: Dict[str, ToolSchema] = {}
19
+
20
+ def register_tool(self, schema: ToolSchema):
21
+ self._tools[schema.name] = schema
22
+
23
+ def get_all_tool_schemas(self) -> List[Dict[str, Any]]:
24
+ return [t.model_dump() for t in self._tools.values()]
@@ -0,0 +1,3 @@
1
+ from agentic_notify.workflows.engine import WorkflowExecutor
2
+
3
+ __all__ = ["WorkflowExecutor"]
@@ -0,0 +1,128 @@
1
+ import logging
2
+ from typing import Dict, Any
3
+ from datetime import datetime
4
+ import time
5
+
6
+ from agentic_notify.schemas.notification import NotificationEvent
7
+ from agentic_notify.schemas.workflow import WorkflowDefinition
8
+ from agentic_notify.schemas.result import WorkflowRunResult, StepResult
9
+ from agentic_notify.adapters.base import BaseAdapter
10
+ from agentic_notify.handlers.base import BaseHandler
11
+ from agentic_notify.storage.base import BaseStorage
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class WorkflowExecutor:
16
+ """
17
+ Iterates over workflow steps and invokes the appropriate handler or adapter,
18
+ capable of suspending state for Human-in-the-Loop approvals.
19
+ """
20
+ def __init__(self, storage: BaseStorage):
21
+ self.storage = storage
22
+
23
+ async def execute(
24
+ self,
25
+ run_id: str,
26
+ workflow: WorkflowDefinition,
27
+ event: NotificationEvent,
28
+ adapters: Dict[str, BaseAdapter],
29
+ handlers: Dict[str, BaseHandler],
30
+ resume_from_step: str = None
31
+ ) -> WorkflowRunResult:
32
+
33
+ logger.info(f"Starting/Resuming run {run_id} for workflow {workflow.workflow_id}")
34
+ # In a real durable engine, we would fetch existing step_results from DB here if resuming.
35
+ step_results: Dict[str, StepResult] = {}
36
+ context = {"event": event.model_dump(), "steps": {}}
37
+
38
+ run_status = "success"
39
+
40
+ for step in workflow.steps:
41
+ # Skip steps already executed if we are resuming from an approval
42
+ if resume_from_step and step.id != resume_from_step:
43
+ continue
44
+ elif resume_from_step and step.id == resume_from_step:
45
+ resume_from_step = None # We caught up
46
+
47
+ start_time = time.time()
48
+ started_at = datetime.utcnow().isoformat()
49
+
50
+ try:
51
+ # Resolve inputs mapping
52
+ step_input = {}
53
+ for in_key, in_val in step.input_from.items():
54
+ if isinstance(in_val, str) and "." in in_val:
55
+ parts = in_val.split(".", 1)
56
+ if parts[0] in context["steps"]:
57
+ step_input[in_key] = context["steps"][parts[0]].get(parts[1])
58
+ elif parts[0] == "event":
59
+ step_input[in_key] = context["event"].get(parts[1])
60
+ else:
61
+ step_input[in_key] = in_val
62
+ else:
63
+ step_input[in_key] = in_val
64
+
65
+ # Execute step
66
+ output = None
67
+ if step.kind == "adapter":
68
+ if step.name not in adapters:
69
+ raise ValueError(f"Adapter '{step.name}' not registered.")
70
+ output = await adapters[step.name].execute(step_input)
71
+ elif step.kind == "handler":
72
+ if step.name not in handlers:
73
+ raise ValueError(f"Handler '{step.name}' not registered.")
74
+ output = await handlers[step.name].handle(event, step_input) # Pass resolved input
75
+ else:
76
+ raise ValueError(f"Unknown step kind '{step.kind}'")
77
+
78
+ context["steps"][step.id] = output
79
+
80
+ # Handle Human-in-the-loop interruption
81
+ if output and output.get("status") == "awaiting_approval":
82
+ latency = int((time.time() - start_time) * 1000)
83
+ step_result = StepResult(
84
+ status="awaiting_approval",
85
+ output=output,
86
+ started_at=started_at,
87
+ ended_at=datetime.utcnow().isoformat(),
88
+ latency_ms=latency
89
+ )
90
+ step_results[step.id] = step_result
91
+ run_status = "suspended"
92
+ break # Halt execution graph immediately
93
+
94
+ latency = int((time.time() - start_time) * 1000)
95
+ step_result = StepResult(
96
+ status="success",
97
+ output=output,
98
+ started_at=started_at,
99
+ ended_at=datetime.utcnow().isoformat(),
100
+ latency_ms=latency
101
+ )
102
+
103
+ except Exception as e:
104
+ logger.error(f"Step {step.id} failed: {e}")
105
+ latency = int((time.time() - start_time) * 1000)
106
+ step_result = StepResult(
107
+ status="failed",
108
+ error={"message": str(e)},
109
+ started_at=started_at,
110
+ ended_at=datetime.utcnow().isoformat(),
111
+ latency_ms=latency
112
+ )
113
+ step_results[step.id] = step_result
114
+ run_status = "failed"
115
+ break
116
+
117
+ step_results[step.id] = step_result
118
+
119
+ run_result = WorkflowRunResult(
120
+ run_id=run_id,
121
+ workflow_id=workflow.workflow_id,
122
+ status=run_status,
123
+ step_results=step_results
124
+ )
125
+
126
+ # Persist State explicitly so it can be resumed later if 'suspended'
127
+ await self.storage.save_workflow_run(run_id, run_result.model_dump())
128
+ return run_result
@@ -0,0 +1,36 @@
1
+ [tool.poetry]
2
+ name = "agentic-notify"
3
+ version = "0.1.0"
4
+ description = "A Python library for building agentic, notification-driven workflows with pluggable adapters, policy-aware handlers, and scalable integration interfaces."
5
+ authors = ["Your Name <you@example.com>"]
6
+ readme = "README.md"
7
+ packages = [{include = "agentic_notify"}]
8
+
9
+ [tool.poetry.dependencies]
10
+ python = "^3.10"
11
+ pydantic = "^2.0.0"
12
+
13
+ [tool.poetry.group.dev.dependencies]
14
+ pytest = "^8.0.0"
15
+ pytest-asyncio = "^0.23.0"
16
+ ruff = "^0.3.0"
17
+ black = "^24.2.0"
18
+ mypy = "^1.8.0"
19
+
20
+ [tool.poetry.extras]
21
+ fastapi = ["fastapi", "uvicorn"]
22
+ redis = ["redis"]
23
+ mcp = ["mcp"] # Assuming standard mcp sdk
24
+ observability = ["opentelemetry-api", "opentelemetry-sdk"]
25
+
26
+ [build-system]
27
+ requires = ["poetry-core"]
28
+ build-backend = "poetry.core.masonry.api"
29
+
30
+ [tool.ruff]
31
+ line-length = 88
32
+ target-version = "py310"
33
+
34
+ [tool.black]
35
+ line-length = 88
36
+ target-version = ["py310"]