polos-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. polos/__init__.py +105 -0
  2. polos/agents/__init__.py +7 -0
  3. polos/agents/agent.py +746 -0
  4. polos/agents/conversation_history.py +121 -0
  5. polos/agents/stop_conditions.py +280 -0
  6. polos/agents/stream.py +635 -0
  7. polos/core/__init__.py +0 -0
  8. polos/core/context.py +143 -0
  9. polos/core/state.py +26 -0
  10. polos/core/step.py +1380 -0
  11. polos/core/workflow.py +1192 -0
  12. polos/features/__init__.py +0 -0
  13. polos/features/events.py +456 -0
  14. polos/features/schedules.py +110 -0
  15. polos/features/tracing.py +605 -0
  16. polos/features/wait.py +82 -0
  17. polos/llm/__init__.py +9 -0
  18. polos/llm/generate.py +152 -0
  19. polos/llm/providers/__init__.py +5 -0
  20. polos/llm/providers/anthropic.py +615 -0
  21. polos/llm/providers/azure.py +42 -0
  22. polos/llm/providers/base.py +196 -0
  23. polos/llm/providers/fireworks.py +41 -0
  24. polos/llm/providers/gemini.py +40 -0
  25. polos/llm/providers/groq.py +40 -0
  26. polos/llm/providers/openai.py +1021 -0
  27. polos/llm/providers/together.py +40 -0
  28. polos/llm/stream.py +183 -0
  29. polos/middleware/__init__.py +0 -0
  30. polos/middleware/guardrail.py +148 -0
  31. polos/middleware/guardrail_executor.py +253 -0
  32. polos/middleware/hook.py +164 -0
  33. polos/middleware/hook_executor.py +104 -0
  34. polos/runtime/__init__.py +0 -0
  35. polos/runtime/batch.py +87 -0
  36. polos/runtime/client.py +841 -0
  37. polos/runtime/queue.py +42 -0
  38. polos/runtime/worker.py +1365 -0
  39. polos/runtime/worker_server.py +249 -0
  40. polos/tools/__init__.py +0 -0
  41. polos/tools/tool.py +587 -0
  42. polos/types/__init__.py +23 -0
  43. polos/types/types.py +116 -0
  44. polos/utils/__init__.py +27 -0
  45. polos/utils/agent.py +27 -0
  46. polos/utils/client_context.py +41 -0
  47. polos/utils/config.py +12 -0
  48. polos/utils/output_schema.py +311 -0
  49. polos/utils/retry.py +47 -0
  50. polos/utils/serializer.py +167 -0
  51. polos/utils/tracing.py +27 -0
  52. polos/utils/worker_singleton.py +40 -0
  53. polos_sdk-0.1.0.dist-info/METADATA +650 -0
  54. polos_sdk-0.1.0.dist-info/RECORD +55 -0
  55. polos_sdk-0.1.0.dist-info/WHEEL +4 -0
@@ -0,0 +1,249 @@
1
+ """FastAPI server for push-based workers."""
2
+
3
+ import asyncio
4
+ import logging
5
+ from collections.abc import Awaitable, Callable
6
+ from typing import Any
7
+
8
+ import uvicorn
9
+ from fastapi import FastAPI, Request, status
10
+ from fastapi.responses import JSONResponse
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class WorkerServer:
16
+ """FastAPI server that receives pushed work from the orchestrator."""
17
+
18
+ def __init__(
19
+ self,
20
+ worker_id: str,
21
+ max_concurrent_workflows: int,
22
+ on_work_received: Callable[[dict[str, Any]], Awaitable[None]],
23
+ on_cancel_requested: Callable[[str], Awaitable[bool]] | None = None,
24
+ port: int = 8000,
25
+ local_mode: bool = False,
26
+ ):
27
+ """
28
+ Initialize worker server.
29
+
30
+ Args:
31
+ max_concurrent_workflows: Maximum number of concurrent workflows
32
+ on_work_received: Async callback function to handle received work
33
+ on_cancel_requested: Optional async callback function to handle cancel
34
+ requests (execution_id)
35
+ port: Port to run the server on
36
+ """
37
+ self.worker_id = worker_id
38
+ self.max_concurrent_workflows = max_concurrent_workflows
39
+ self.on_work_received = on_work_received
40
+ self.on_cancel_requested = on_cancel_requested
41
+ self.port = port
42
+ self.current_execution_count = 0
43
+ self.local_mode = local_mode
44
+ self.app: FastAPI | None = None
45
+ self.server: uvicorn.Server | None = None
46
+ self._setup_app()
47
+
48
+ def update_worker_id(self, new_worker_id: str):
49
+ """Update the worker_id (used when re-registering)."""
50
+ self.worker_id = new_worker_id
51
+
52
+ def _setup_app(self):
53
+ """Setup FastAPI application with endpoints."""
54
+ self.app = FastAPI(title="Polos Worker Server Endpoint")
55
+
56
+ @self.app.post("/execute")
57
+ async def execute(request: Request):
58
+ """Receive pushed work from orchestrator."""
59
+ try:
60
+ # Check if worker is at capacity
61
+ if self.current_execution_count >= self.max_concurrent_workflows:
62
+ return JSONResponse(
63
+ status_code=status.HTTP_429_TOO_MANY_REQUESTS,
64
+ content={"error": "Worker at capacity"},
65
+ )
66
+
67
+ # Parse request body
68
+ body = await request.json()
69
+ worker_id = body.get("worker_id")
70
+ if worker_id != self.worker_id:
71
+ return JSONResponse(
72
+ status_code=status.HTTP_400_BAD_REQUEST,
73
+ content={"error": "Worker ID mismatch"},
74
+ )
75
+
76
+ # Extract execution data
77
+ execution_id = body.get("execution_id")
78
+ workflow_id = body.get("workflow_id")
79
+ payload = body.get("payload", {})
80
+ root_execution_id = body.get("root_execution_id")
81
+ step_key = body.get("step_key")
82
+ session_id = body.get("session_id")
83
+ user_id = body.get("user_id")
84
+ retry_count = body.get("retry_count", 0)
85
+
86
+ # Log execution request with detailed context
87
+ logger.info(
88
+ "POST /execute - execution_id=%s, worker_id=%s, workflow_id=%s, "
89
+ "root_execution_id=%s, step_key=%s, session_id=%s, user_id=%s, "
90
+ "retry_count=%d",
91
+ execution_id,
92
+ self.worker_id,
93
+ workflow_id,
94
+ root_execution_id,
95
+ step_key,
96
+ session_id,
97
+ user_id,
98
+ retry_count,
99
+ )
100
+
101
+ # Build workflow_data dict (same format as poll mode)
102
+ workflow_data = {
103
+ "execution_id": execution_id,
104
+ "workflow_id": workflow_id,
105
+ "deployment_id": body.get("deployment_id"),
106
+ "payload": payload,
107
+ "parent_execution_id": body.get("parent_execution_id"),
108
+ "root_execution_id": root_execution_id,
109
+ "step_key": step_key,
110
+ "retry_count": retry_count,
111
+ "created_at": body.get("created_at"),
112
+ "session_id": session_id,
113
+ "user_id": user_id,
114
+ "otel_traceparent": body.get("otel_traceparent"),
115
+ "otel_span_id": body.get("otel_span_id"),
116
+ "initial_state": body.get("initial_state"),
117
+ "run_timeout_seconds": body.get("run_timeout_seconds"),
118
+ }
119
+
120
+ # Increment execution count
121
+ self.current_execution_count += 1
122
+
123
+ # Execute in background (don't await)
124
+ async def execute_with_cleanup(exec_data):
125
+ try:
126
+ await self.on_work_received(exec_data)
127
+ except Exception:
128
+ # Exceptions are already handled in on_work_received callback
129
+ # This just prevents "Task exception was never retrieved" warning
130
+ pass
131
+ finally:
132
+ # Decrement execution count when done
133
+ self.current_execution_count = max(0, self.current_execution_count - 1)
134
+
135
+ asyncio.create_task(execute_with_cleanup(workflow_data))
136
+
137
+ # Return 200 OK immediately (work accepted)
138
+ return JSONResponse(
139
+ status_code=status.HTTP_200_OK,
140
+ content={"status": "accepted", "execution_id": execution_id},
141
+ )
142
+
143
+ except Exception as e:
144
+ # On error, return 503 Service Unavailable
145
+ return JSONResponse(
146
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE, content={"error": str(e)}
147
+ )
148
+
149
+ @self.app.post("/cancel/{execution_id}")
150
+ async def cancel_execution(execution_id: str, request: Request):
151
+ """Handle cancellation request from orchestrator."""
152
+ try:
153
+ # Get worker_id from header (X-Worker-ID) or request body
154
+ worker_id = request.headers.get("X-Worker-ID")
155
+ if not worker_id:
156
+ raise ValueError("Missing Worker ID in the request headers")
157
+
158
+ if worker_id and str(worker_id) != self.worker_id:
159
+ return JSONResponse(
160
+ status_code=status.HTTP_400_BAD_REQUEST,
161
+ content={"error": "Worker ID mismatch"},
162
+ )
163
+
164
+ # Trigger cancellation and await result to check if execution was found
165
+ if self.on_cancel_requested:
166
+ execution_found = await self.on_cancel_requested(execution_id)
167
+ if execution_found:
168
+ return JSONResponse(
169
+ status_code=status.HTTP_200_OK,
170
+ content={
171
+ "status": "cancellation_requested",
172
+ "execution_id": execution_id,
173
+ },
174
+ )
175
+ else:
176
+ # Execution not found or already completed - return 404
177
+ return JSONResponse(
178
+ status_code=status.HTTP_404_NOT_FOUND,
179
+ content={
180
+ "error": "Execution not found or already completed",
181
+ "execution_id": execution_id,
182
+ },
183
+ )
184
+ else:
185
+ # No cancel handler - return 503
186
+ return JSONResponse(
187
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
188
+ content={"error": "Cancel handler not configured"},
189
+ )
190
+ except Exception as e:
191
+ return JSONResponse(
192
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content={"error": str(e)}
193
+ )
194
+
195
+ @self.app.get("/health")
196
+ async def health_check():
197
+ """Health check endpoint."""
198
+ return {
199
+ "status": "healthy",
200
+ "mode": "push",
201
+ "current_executions": self.current_execution_count,
202
+ "max_concurrent_workflows": self.max_concurrent_workflows,
203
+ }
204
+
205
+ async def run(self):
206
+ """Run the FastAPI server."""
207
+ if not self.app:
208
+ raise RuntimeError("FastAPI app not initialized")
209
+
210
+ host = "127.0.0.1" if self.local_mode else "0.0.0.0"
211
+
212
+ # Get uvicorn's default logging config and ensure root logger captures application logs
213
+ # This allows module loggers (using __name__) to appear alongside FastAPI logs
214
+ import copy
215
+
216
+ from uvicorn.config import LOGGING_CONFIG
217
+
218
+ logging_config = copy.deepcopy(LOGGING_CONFIG)
219
+ # Configure root logger to capture all application logs
220
+ if "" not in logging_config["loggers"]:
221
+ logging_config["loggers"][""] = {}
222
+ logging_config["loggers"][""].update(
223
+ {
224
+ "handlers": ["default"],
225
+ "level": "INFO",
226
+ "propagate": False,
227
+ }
228
+ )
229
+ # Disable httpx HTTP request logs (set to WARNING to suppress INFO level logs)
230
+ logging_config["loggers"]["httpx"] = {
231
+ "handlers": ["default"],
232
+ "level": "WARNING",
233
+ "propagate": False,
234
+ }
235
+
236
+ config = uvicorn.Config(
237
+ self.app,
238
+ host=host,
239
+ port=self.port,
240
+ log_level="info",
241
+ log_config=logging_config,
242
+ )
243
+ self.server = uvicorn.Server(config)
244
+ await self.server.serve()
245
+
246
+ async def shutdown(self):
247
+ """Shutdown the server gracefully."""
248
+ if self.server:
249
+ self.server.should_exit = True
File without changes