wizelit-sdk 0.1.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- wizelit_sdk/__init__.py +11 -0
- wizelit_sdk/agent_wrapper/__init__.py +9 -0
- wizelit_sdk/agent_wrapper/agent_wrapper.py +615 -0
- wizelit_sdk/agent_wrapper/job.py +379 -0
- wizelit_sdk/agent_wrapper/streaming.py +206 -0
- wizelit_sdk/agent_wrapper/utils.py +45 -0
- wizelit_sdk/database.py +148 -0
- wizelit_sdk/models/__init__.py +4 -0
- wizelit_sdk/models/base.py +25 -0
- wizelit_sdk/models/job.py +72 -0
- wizelit_sdk-0.1.23.dist-info/METADATA +111 -0
- wizelit_sdk-0.1.23.dist-info/RECORD +13 -0
- wizelit_sdk-0.1.23.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Job class for managing execution context and logging in Wizelit Agent Wrapper.
|
|
3
|
+
"""
|
|
4
|
+
import logging
|
|
5
|
+
import asyncio
|
|
6
|
+
import uuid
|
|
7
|
+
import time
|
|
8
|
+
from datetime import datetime, UTC
|
|
9
|
+
from typing import List, Optional, Awaitable, Any, TYPE_CHECKING
|
|
10
|
+
from fastmcp import Context
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from wizelit_sdk.database import DatabaseManager
|
|
14
|
+
from wizelit_sdk.agent_wrapper.streaming import LogStreamer
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MemoryLogHandler(logging.Handler):
|
|
18
|
+
"""
|
|
19
|
+
Custom logging handler that stores log messages in a list.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, logs_list: List[str]):
|
|
23
|
+
super().__init__()
|
|
24
|
+
self.logs_list = logs_list
|
|
25
|
+
self.setFormatter(logging.Formatter('%(message)s'))
|
|
26
|
+
|
|
27
|
+
def emit(self, record: logging.LogRecord) -> None:
|
|
28
|
+
"""
|
|
29
|
+
Emit a log record by appending it to the logs list.
|
|
30
|
+
"""
|
|
31
|
+
try:
|
|
32
|
+
# Format timestamp
|
|
33
|
+
ts = time.strftime("%H:%M:%S")
|
|
34
|
+
|
|
35
|
+
# Format message with level and timestamp
|
|
36
|
+
formatted_message = f"[{record.levelname}] [{ts}] {record.getMessage()}"
|
|
37
|
+
|
|
38
|
+
# Append to logs list
|
|
39
|
+
self.logs_list.append(formatted_message)
|
|
40
|
+
except Exception:
|
|
41
|
+
# Prevent exceptions in logging handler from breaking execution
|
|
42
|
+
self.handleError(record)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class DatabaseLogHandler(logging.Handler):
|
|
46
|
+
"""
|
|
47
|
+
Logging handler that persists log messages to PostgreSQL database.
|
|
48
|
+
Writes asynchronously to avoid blocking the logging thread.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self, job_id: str, db_manager: 'DatabaseManager'):
|
|
52
|
+
super().__init__()
|
|
53
|
+
self.job_id = job_id
|
|
54
|
+
self.db_manager = db_manager
|
|
55
|
+
self.setFormatter(logging.Formatter('%(message)s'))
|
|
56
|
+
|
|
57
|
+
def emit(self, record: logging.LogRecord) -> None:
|
|
58
|
+
"""
|
|
59
|
+
Emit a log record by writing it to the database asynchronously.
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
# Import here to avoid circular dependency
|
|
63
|
+
from models.job import JobLogModel
|
|
64
|
+
from datetime import datetime
|
|
65
|
+
|
|
66
|
+
async def write_log():
|
|
67
|
+
try:
|
|
68
|
+
async with self.db_manager.get_session() as session:
|
|
69
|
+
log = JobLogModel(
|
|
70
|
+
job_id=self.job_id,
|
|
71
|
+
message=record.getMessage(),
|
|
72
|
+
level=record.levelname,
|
|
73
|
+
timestamp=datetime.now(UTC).replace(tzinfo=None)
|
|
74
|
+
)
|
|
75
|
+
session.add(log)
|
|
76
|
+
await session.commit()
|
|
77
|
+
except Exception as e:
|
|
78
|
+
# Log to stderr but don't break execution
|
|
79
|
+
print(f"Error writing log to database: {e}", flush=True)
|
|
80
|
+
|
|
81
|
+
# Schedule async write without awaiting
|
|
82
|
+
try:
|
|
83
|
+
loop = asyncio.get_running_loop()
|
|
84
|
+
loop.create_task(write_log())
|
|
85
|
+
except RuntimeError:
|
|
86
|
+
# No event loop running - log warning
|
|
87
|
+
print("Warning: No event loop running, cannot write log to database", flush=True)
|
|
88
|
+
except Exception:
|
|
89
|
+
# Prevent exceptions in logging handler from breaking execution
|
|
90
|
+
self.handleError(record)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class StreamingLogHandler(logging.Handler):
|
|
94
|
+
"""
|
|
95
|
+
Logging handler that publishes log messages to Redis for real-time streaming.
|
|
96
|
+
Enables push-based log delivery without polling.
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
def __init__(self, job_id: str, log_streamer: 'LogStreamer'):
|
|
100
|
+
super().__init__()
|
|
101
|
+
self.job_id = job_id
|
|
102
|
+
self.log_streamer = log_streamer
|
|
103
|
+
self.setFormatter(logging.Formatter('%(message)s'))
|
|
104
|
+
|
|
105
|
+
def emit(self, record: logging.LogRecord) -> None:
|
|
106
|
+
"""
|
|
107
|
+
Emit a log record by publishing it to Redis Pub/Sub.
|
|
108
|
+
"""
|
|
109
|
+
try:
|
|
110
|
+
async def publish_log():
|
|
111
|
+
try:
|
|
112
|
+
await self.log_streamer.publish_log(
|
|
113
|
+
self.job_id,
|
|
114
|
+
record.getMessage(),
|
|
115
|
+
record.levelname
|
|
116
|
+
)
|
|
117
|
+
except Exception as e:
|
|
118
|
+
# Log to stderr but don't break execution
|
|
119
|
+
print(f"Error streaming log: {e}", flush=True)
|
|
120
|
+
|
|
121
|
+
# Schedule async publish without awaiting
|
|
122
|
+
try:
|
|
123
|
+
loop = asyncio.get_running_loop()
|
|
124
|
+
loop.create_task(publish_log())
|
|
125
|
+
except RuntimeError:
|
|
126
|
+
# No event loop running - log warning
|
|
127
|
+
print("Warning: No event loop running, cannot stream log to Redis", flush=True)
|
|
128
|
+
except Exception as e:
|
|
129
|
+
# Prevent exceptions in logging handler from breaking execution
|
|
130
|
+
print(f"Error in StreamingLogHandler.emit: {e}", flush=True)
|
|
131
|
+
self.handleError(record)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class Job:
|
|
135
|
+
"""
|
|
136
|
+
Job instance that provides logging capabilities and execution context.
|
|
137
|
+
Each decorated function execution gets a Job instance injected.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
def __init__(
|
|
141
|
+
self,
|
|
142
|
+
ctx: Context,
|
|
143
|
+
job_id: Optional[str] = None,
|
|
144
|
+
db_manager: Optional['DatabaseManager'] = None,
|
|
145
|
+
log_streamer: Optional['LogStreamer'] = None
|
|
146
|
+
):
|
|
147
|
+
"""
|
|
148
|
+
Initialize a Job instance.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
ctx: FastMCP Context for progress reporting
|
|
152
|
+
job_id: Optional job identifier (generates UUID if not provided)
|
|
153
|
+
db_manager: Optional DatabaseManager for persisting logs
|
|
154
|
+
log_streamer: Optional LogStreamer for real-time log streaming
|
|
155
|
+
"""
|
|
156
|
+
self._ctx = ctx
|
|
157
|
+
self._id = job_id or f"JOB-{str(uuid.uuid4())[:8]}"
|
|
158
|
+
self._status = "running"
|
|
159
|
+
self._logs: List[str] = []
|
|
160
|
+
self._result: Optional[str] = None
|
|
161
|
+
self._error: Optional[str] = None
|
|
162
|
+
self._db_manager = db_manager
|
|
163
|
+
self._log_streamer = log_streamer
|
|
164
|
+
|
|
165
|
+
# Set up logger
|
|
166
|
+
self._setup_logger(ctx)
|
|
167
|
+
|
|
168
|
+
@property
|
|
169
|
+
def id(self) -> str:
|
|
170
|
+
"""Unique job identifier."""
|
|
171
|
+
return self._id
|
|
172
|
+
|
|
173
|
+
@property
|
|
174
|
+
def logger(self) -> logging.Logger:
|
|
175
|
+
"""Python Logger instance configured with MCP streaming handler."""
|
|
176
|
+
return self._logger
|
|
177
|
+
|
|
178
|
+
@property
|
|
179
|
+
def logs(self) -> List[str]:
|
|
180
|
+
"""List of log messages (timestamped strings)."""
|
|
181
|
+
return self._logs
|
|
182
|
+
|
|
183
|
+
@property
|
|
184
|
+
def status(self) -> str:
|
|
185
|
+
"""Job status: 'running', 'completed', or 'failed'."""
|
|
186
|
+
return self._status
|
|
187
|
+
|
|
188
|
+
@status.setter
|
|
189
|
+
def status(self, value: str) -> None:
|
|
190
|
+
"""Set job status and publish status change event."""
|
|
191
|
+
self._status = value
|
|
192
|
+
# Publish status change to Redis if streamer is available
|
|
193
|
+
if self._log_streamer:
|
|
194
|
+
print(f"[DEBUG] Publishing status change for job {self._id}: {value}", flush=True)
|
|
195
|
+
async def publish_status():
|
|
196
|
+
try:
|
|
197
|
+
await self._log_streamer.publish_status_change(
|
|
198
|
+
self._id,
|
|
199
|
+
value,
|
|
200
|
+
result=self._result,
|
|
201
|
+
error=self._error
|
|
202
|
+
)
|
|
203
|
+
print(f"[DEBUG] Status change published successfully for job {self._id}", flush=True)
|
|
204
|
+
except Exception as e:
|
|
205
|
+
print(f"Error publishing status change: {e}", flush=True)
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
loop = asyncio.get_running_loop()
|
|
209
|
+
loop.create_task(publish_status())
|
|
210
|
+
except RuntimeError:
|
|
211
|
+
print("Warning: No event loop running, cannot publish status change to Redis", flush=True)
|
|
212
|
+
else:
|
|
213
|
+
print(f"[DEBUG] No log_streamer available for job {self._id}, skipping Redis publish", flush=True)
|
|
214
|
+
|
|
215
|
+
@property
|
|
216
|
+
def result(self) -> Optional[str | dict[str, Any]]:
|
|
217
|
+
"""Job result (if completed successfully)."""
|
|
218
|
+
return self._result
|
|
219
|
+
|
|
220
|
+
@result.setter
|
|
221
|
+
def result(self, value: Optional[str | dict[str, Any]]) -> None:
|
|
222
|
+
"""Set job result."""
|
|
223
|
+
self._result = value
|
|
224
|
+
|
|
225
|
+
@property
|
|
226
|
+
def error(self) -> Optional[str]:
|
|
227
|
+
"""Job error message (if failed)."""
|
|
228
|
+
return self._error
|
|
229
|
+
|
|
230
|
+
@error.setter
|
|
231
|
+
def error(self, value: Optional[str]) -> None:
|
|
232
|
+
"""Set job error message."""
|
|
233
|
+
self._error = value
|
|
234
|
+
|
|
235
|
+
async def _heartbeat(self, interval_seconds: float = 5.0) -> None:
|
|
236
|
+
"""
|
|
237
|
+
Periodically append a heartbeat log while a job is running so the UI
|
|
238
|
+
has visible progress even during long operations.
|
|
239
|
+
"""
|
|
240
|
+
start = time.monotonic()
|
|
241
|
+
while self._status == "running":
|
|
242
|
+
await asyncio.sleep(interval_seconds)
|
|
243
|
+
# Re-check in case status changed while sleeping
|
|
244
|
+
if self._status != "running":
|
|
245
|
+
break
|
|
246
|
+
elapsed = int(time.monotonic() - start)
|
|
247
|
+
# Use logger so logs are captured in memory and streamed if enabled
|
|
248
|
+
self.logger.info(f"⏳ Still working... ({elapsed}s)")
|
|
249
|
+
|
|
250
|
+
def run(
|
|
251
|
+
self,
|
|
252
|
+
coro: Awaitable[Any],
|
|
253
|
+
*,
|
|
254
|
+
heartbeat_interval: float = 5.0,
|
|
255
|
+
) -> "asyncio.Task[Any]":
|
|
256
|
+
"""
|
|
257
|
+
Run a coroutine in the background, managing heartbeat, status, result, and error.
|
|
258
|
+
|
|
259
|
+
This is intended for long-running jobs. It:
|
|
260
|
+
- Marks the job as running
|
|
261
|
+
- Starts a heartbeat logger
|
|
262
|
+
- Awaits the provided coroutine
|
|
263
|
+
- On success: stores the result (if string) and marks status 'completed'
|
|
264
|
+
- On failure: stores the error message and marks status 'failed'
|
|
265
|
+
"""
|
|
266
|
+
import asyncio
|
|
267
|
+
|
|
268
|
+
async def _runner() -> Any:
|
|
269
|
+
self.status = "running"
|
|
270
|
+
# Persist initial job state
|
|
271
|
+
await self.persist_to_db()
|
|
272
|
+
|
|
273
|
+
heartbeat_task = asyncio.create_task(self._heartbeat(heartbeat_interval))
|
|
274
|
+
try:
|
|
275
|
+
result = await coro
|
|
276
|
+
# Store string results for convenience
|
|
277
|
+
if isinstance(result, str|dict):
|
|
278
|
+
self.result = result
|
|
279
|
+
if self._status == "running":
|
|
280
|
+
self.status = "completed"
|
|
281
|
+
# Persist completion state
|
|
282
|
+
await self.persist_to_db()
|
|
283
|
+
return result
|
|
284
|
+
except Exception as e: # noqa: BLE001 - we deliberately capture all
|
|
285
|
+
self.error = str(e)
|
|
286
|
+
self.status = "failed"
|
|
287
|
+
# Persist failure state
|
|
288
|
+
await self.persist_to_db()
|
|
289
|
+
# Also log the error so it shows up in logs UI
|
|
290
|
+
self.logger.error(f"❌ [System] Error: {e}")
|
|
291
|
+
raise
|
|
292
|
+
finally:
|
|
293
|
+
# Stop heartbeat
|
|
294
|
+
heartbeat_task.cancel()
|
|
295
|
+
try:
|
|
296
|
+
import contextlib
|
|
297
|
+
|
|
298
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
299
|
+
await heartbeat_task
|
|
300
|
+
except Exception:
|
|
301
|
+
# Ignore heartbeat shutdown errors
|
|
302
|
+
pass
|
|
303
|
+
|
|
304
|
+
# Schedule the runner in the current event loop and return the Task
|
|
305
|
+
return asyncio.create_task(_runner())
|
|
306
|
+
|
|
307
|
+
def _setup_logger(self, ctx: Context) -> None:
|
|
308
|
+
"""
|
|
309
|
+
Configure logger with custom handlers for streaming and storage.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
ctx: FastMCP Context for progress reporting
|
|
313
|
+
"""
|
|
314
|
+
_ = ctx # ctx reserved for potential streaming handler setup
|
|
315
|
+
# Create logger with unique name per job
|
|
316
|
+
logger_name = f"wizelit.job.{self._id}"
|
|
317
|
+
self._logger = logging.getLogger(logger_name)
|
|
318
|
+
|
|
319
|
+
# Set level to INFO by default
|
|
320
|
+
self._logger.setLevel(logging.INFO)
|
|
321
|
+
|
|
322
|
+
# Remove any existing handlers to avoid duplicates
|
|
323
|
+
self._logger.handlers.clear()
|
|
324
|
+
|
|
325
|
+
# Add MemoryLogHandler for internal storage (backward compatibility)
|
|
326
|
+
memory_handler = MemoryLogHandler(self._logs)
|
|
327
|
+
memory_handler.setLevel(logging.INFO)
|
|
328
|
+
self._logger.addHandler(memory_handler)
|
|
329
|
+
|
|
330
|
+
# Add DatabaseLogHandler if db_manager provided
|
|
331
|
+
if self._db_manager:
|
|
332
|
+
db_handler = DatabaseLogHandler(self._id, self._db_manager)
|
|
333
|
+
db_handler.setLevel(logging.INFO)
|
|
334
|
+
self._logger.addHandler(db_handler)
|
|
335
|
+
|
|
336
|
+
# Add StreamingLogHandler if log_streamer provided
|
|
337
|
+
if self._log_streamer:
|
|
338
|
+
streaming_handler = StreamingLogHandler(self._id, self._log_streamer)
|
|
339
|
+
streaming_handler.setLevel(logging.INFO)
|
|
340
|
+
self._logger.addHandler(streaming_handler)
|
|
341
|
+
|
|
342
|
+
# Prevent propagation to root logger
|
|
343
|
+
self._logger.propagate = False
|
|
344
|
+
|
|
345
|
+
async def persist_to_db(self) -> None:
|
|
346
|
+
"""
|
|
347
|
+
Persist the job state to the database.
|
|
348
|
+
Creates or updates the job record.
|
|
349
|
+
"""
|
|
350
|
+
if not self._db_manager:
|
|
351
|
+
return
|
|
352
|
+
|
|
353
|
+
try:
|
|
354
|
+
from models.job import JobModel
|
|
355
|
+
|
|
356
|
+
async with self._db_manager.get_session() as session:
|
|
357
|
+
# Check if job already exists
|
|
358
|
+
existing_job = await session.get(JobModel, self._id)
|
|
359
|
+
|
|
360
|
+
if existing_job:
|
|
361
|
+
# Update existing job
|
|
362
|
+
existing_job.status = self._status
|
|
363
|
+
existing_job.result = self._result
|
|
364
|
+
existing_job.error = self._error
|
|
365
|
+
existing_job.updated_at = datetime.now(UTC).replace(tzinfo=None)
|
|
366
|
+
else:
|
|
367
|
+
# Create new job
|
|
368
|
+
job = JobModel(
|
|
369
|
+
id=self._id,
|
|
370
|
+
status=self._status,
|
|
371
|
+
result=self._result,
|
|
372
|
+
error=self._error
|
|
373
|
+
)
|
|
374
|
+
session.add(job)
|
|
375
|
+
|
|
376
|
+
await session.commit()
|
|
377
|
+
except Exception as e:
|
|
378
|
+
# Log error but don't break execution
|
|
379
|
+
print(f"Error persisting job to database: {e}", flush=True)
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Real-time log streaming using Redis Pub/Sub.
|
|
3
|
+
Enables push-based log delivery from workers to hub without polling.
|
|
4
|
+
"""
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
from datetime import datetime, UTC
|
|
8
|
+
from typing import AsyncGenerator, Optional, Dict, Any
|
|
9
|
+
import asyncio
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import redis.asyncio as redis
|
|
13
|
+
except ImportError:
|
|
14
|
+
redis = None
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class LogStreamer:
|
|
20
|
+
"""
|
|
21
|
+
Manages real-time log streaming via Redis Pub/Sub.
|
|
22
|
+
|
|
23
|
+
Workers publish log events to job-specific channels.
|
|
24
|
+
Hub subscribes to these channels for real-time updates.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, redis_url: str = "redis://localhost:6379"):
|
|
28
|
+
"""
|
|
29
|
+
Initialize the log streamer.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
redis_url: Redis connection URL
|
|
33
|
+
"""
|
|
34
|
+
if redis is None:
|
|
35
|
+
raise ImportError(
|
|
36
|
+
"redis package is required for log streaming. "
|
|
37
|
+
"Install it with: pip install redis"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
self.redis_url = redis_url
|
|
41
|
+
self._redis: Optional[redis.Redis] = None
|
|
42
|
+
self._pubsub: Optional[redis.client.PubSub] = None
|
|
43
|
+
|
|
44
|
+
async def _ensure_connected(self) -> redis.Redis:
|
|
45
|
+
"""Ensure Redis connection is established."""
|
|
46
|
+
if self._redis is None:
|
|
47
|
+
try:
|
|
48
|
+
self._redis = redis.from_url(
|
|
49
|
+
self.redis_url,
|
|
50
|
+
decode_responses=True,
|
|
51
|
+
socket_connect_timeout=5,
|
|
52
|
+
socket_keepalive=True,
|
|
53
|
+
)
|
|
54
|
+
# Test connection
|
|
55
|
+
await self._redis.ping()
|
|
56
|
+
logger.info(f"Connected to Redis at {self.redis_url}")
|
|
57
|
+
except Exception as e:
|
|
58
|
+
logger.error(f"Failed to connect to Redis: {e}")
|
|
59
|
+
raise
|
|
60
|
+
return self._redis
|
|
61
|
+
|
|
62
|
+
async def publish_log(
|
|
63
|
+
self,
|
|
64
|
+
job_id: str,
|
|
65
|
+
message: str,
|
|
66
|
+
level: str = "INFO",
|
|
67
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
68
|
+
) -> None:
|
|
69
|
+
"""
|
|
70
|
+
Publish a log event to Redis.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
job_id: Job identifier
|
|
74
|
+
message: Log message
|
|
75
|
+
level: Log level (INFO, ERROR, WARNING, DEBUG)
|
|
76
|
+
metadata: Additional metadata to include
|
|
77
|
+
"""
|
|
78
|
+
try:
|
|
79
|
+
redis_client = await self._ensure_connected()
|
|
80
|
+
|
|
81
|
+
event = {
|
|
82
|
+
"job_id": job_id,
|
|
83
|
+
"message": message,
|
|
84
|
+
"level": level,
|
|
85
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
if metadata:
|
|
89
|
+
event["metadata"] = metadata
|
|
90
|
+
|
|
91
|
+
channel = f"job:{job_id}:logs"
|
|
92
|
+
await redis_client.publish(channel, json.dumps(event))
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
# Don't let streaming errors break the main execution
|
|
96
|
+
logger.error(f"Failed to publish log for job {job_id}: {e}")
|
|
97
|
+
|
|
98
|
+
async def publish_status_change(
|
|
99
|
+
self,
|
|
100
|
+
job_id: str,
|
|
101
|
+
status: str,
|
|
102
|
+
result: Optional[Any] = None,
|
|
103
|
+
error: Optional[str] = None
|
|
104
|
+
) -> None:
|
|
105
|
+
"""
|
|
106
|
+
Publish a job status change event.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
job_id: Job identifier
|
|
110
|
+
status: New status (running, completed, failed)
|
|
111
|
+
result: Job result (for completed jobs)
|
|
112
|
+
error: Error message (for failed jobs)
|
|
113
|
+
"""
|
|
114
|
+
try:
|
|
115
|
+
redis_client = await self._ensure_connected()
|
|
116
|
+
|
|
117
|
+
event = {
|
|
118
|
+
"job_id": job_id,
|
|
119
|
+
"status": status,
|
|
120
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if result is not None:
|
|
124
|
+
event["result"] = result
|
|
125
|
+
if error is not None:
|
|
126
|
+
event["error"] = error
|
|
127
|
+
|
|
128
|
+
channel = f"job:{job_id}:status"
|
|
129
|
+
await redis_client.publish(channel, json.dumps(event))
|
|
130
|
+
logger.info(f"Published status change for job {job_id}: {status}")
|
|
131
|
+
|
|
132
|
+
except Exception as e:
|
|
133
|
+
logger.error(f"Failed to publish status change for job {job_id}: {e}")
|
|
134
|
+
|
|
135
|
+
async def subscribe_logs(
|
|
136
|
+
self,
|
|
137
|
+
job_id: str,
|
|
138
|
+
timeout: Optional[float] = None
|
|
139
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
|
140
|
+
"""
|
|
141
|
+
Subscribe to log stream for a specific job.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
job_id: Job identifier
|
|
145
|
+
timeout: Optional timeout in seconds
|
|
146
|
+
|
|
147
|
+
Yields:
|
|
148
|
+
Dict containing log event data
|
|
149
|
+
"""
|
|
150
|
+
redis_client = await self._ensure_connected()
|
|
151
|
+
pubsub = redis_client.pubsub()
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
# Subscribe to both logs and status channels
|
|
155
|
+
log_channel = f"job:{job_id}:logs"
|
|
156
|
+
status_channel = f"job:{job_id}:status"
|
|
157
|
+
await pubsub.subscribe(log_channel, status_channel)
|
|
158
|
+
logger.info(f"Subscribed to channels for job {job_id}")
|
|
159
|
+
|
|
160
|
+
start_time = asyncio.get_event_loop().time() if timeout else None
|
|
161
|
+
|
|
162
|
+
async for message in pubsub.listen():
|
|
163
|
+
# Check timeout
|
|
164
|
+
if timeout and start_time:
|
|
165
|
+
elapsed = asyncio.get_event_loop().time() - start_time
|
|
166
|
+
if elapsed > timeout:
|
|
167
|
+
logger.info(f"Subscription timeout for job {job_id}")
|
|
168
|
+
break
|
|
169
|
+
|
|
170
|
+
if message["type"] == "message":
|
|
171
|
+
try:
|
|
172
|
+
event = json.loads(message["data"])
|
|
173
|
+
yield event
|
|
174
|
+
|
|
175
|
+
# Stop listening if job is completed or failed
|
|
176
|
+
if event.get("status") in ["completed", "failed"]:
|
|
177
|
+
logger.info(f"Job {job_id} finished with status: {event.get('status')}")
|
|
178
|
+
break
|
|
179
|
+
|
|
180
|
+
except json.JSONDecodeError as e:
|
|
181
|
+
logger.error(f"Failed to decode message: {e}")
|
|
182
|
+
|
|
183
|
+
except asyncio.CancelledError:
|
|
184
|
+
logger.info(f"Subscription cancelled for job {job_id}")
|
|
185
|
+
except Exception as e:
|
|
186
|
+
logger.error(f"Error in subscription for job {job_id}: {e}")
|
|
187
|
+
finally:
|
|
188
|
+
await pubsub.unsubscribe()
|
|
189
|
+
await pubsub.close()
|
|
190
|
+
|
|
191
|
+
async def close(self) -> None:
|
|
192
|
+
"""Close Redis connection."""
|
|
193
|
+
if self._redis:
|
|
194
|
+
await self._redis.close()
|
|
195
|
+
await self._redis.connection_pool.disconnect()
|
|
196
|
+
self._redis = None
|
|
197
|
+
logger.info("Redis connection closed")
|
|
198
|
+
|
|
199
|
+
async def __aenter__(self):
|
|
200
|
+
"""Async context manager entry."""
|
|
201
|
+
await self._ensure_connected()
|
|
202
|
+
return self
|
|
203
|
+
|
|
204
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
205
|
+
"""Async context manager exit."""
|
|
206
|
+
await self.close()
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Utility functions for Wizelit Agent Wrapper."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def greet(name: str = "World") -> str:
|
|
5
|
+
"""
|
|
6
|
+
Generate a greeting message.
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
name: Name to greet. Defaults to "World".
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
A greeting string.
|
|
13
|
+
|
|
14
|
+
Example:
|
|
15
|
+
>>> greet("Alice")
|
|
16
|
+
'Hello, Alice!'
|
|
17
|
+
>>> greet()
|
|
18
|
+
'Hello, World!'
|
|
19
|
+
"""
|
|
20
|
+
return f"Hello, {name}!"
|
|
21
|
+
|
|
22
|
+
def greet_many(names: list[str]) -> list[str]:
|
|
23
|
+
"""
|
|
24
|
+
Generate a greeting message for each name.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
names: List of names to greet.
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
A list of greeting strings.
|
|
31
|
+
"""
|
|
32
|
+
return [f"Hello, {name}!" for name in names]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def greet_many3(names: list[str]) -> str:
|
|
36
|
+
"""
|
|
37
|
+
Generate a greeting message for each name.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
names: List of names to greet.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
A list of greeting strings.
|
|
44
|
+
"""
|
|
45
|
+
return ', '.join([f"Hello, {name}!" for name in names])
|