spatelier 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- analytics/__init__.py +1 -0
- analytics/reporter.py +497 -0
- cli/__init__.py +1 -0
- cli/app.py +147 -0
- cli/audio.py +129 -0
- cli/cli_analytics.py +320 -0
- cli/cli_utils.py +282 -0
- cli/error_handlers.py +122 -0
- cli/files.py +299 -0
- cli/update.py +325 -0
- cli/video.py +823 -0
- cli/worker.py +615 -0
- core/__init__.py +1 -0
- core/analytics_dashboard.py +368 -0
- core/base.py +303 -0
- core/base_service.py +69 -0
- core/config.py +345 -0
- core/database_service.py +116 -0
- core/decorators.py +263 -0
- core/error_handler.py +210 -0
- core/file_tracker.py +254 -0
- core/interactive_cli.py +366 -0
- core/interfaces.py +166 -0
- core/job_queue.py +437 -0
- core/logger.py +79 -0
- core/package_updater.py +469 -0
- core/progress.py +228 -0
- core/service_factory.py +295 -0
- core/streaming.py +299 -0
- core/worker.py +765 -0
- database/__init__.py +1 -0
- database/connection.py +265 -0
- database/metadata.py +516 -0
- database/models.py +288 -0
- database/repository.py +592 -0
- database/transcription_storage.py +219 -0
- modules/__init__.py +1 -0
- modules/audio/__init__.py +5 -0
- modules/audio/converter.py +197 -0
- modules/video/__init__.py +16 -0
- modules/video/converter.py +191 -0
- modules/video/fallback_extractor.py +334 -0
- modules/video/services/__init__.py +18 -0
- modules/video/services/audio_extraction_service.py +274 -0
- modules/video/services/download_service.py +852 -0
- modules/video/services/metadata_service.py +190 -0
- modules/video/services/playlist_service.py +445 -0
- modules/video/services/transcription_service.py +491 -0
- modules/video/transcription_service.py +385 -0
- modules/video/youtube_api.py +397 -0
- spatelier/__init__.py +33 -0
- spatelier-0.3.0.dist-info/METADATA +260 -0
- spatelier-0.3.0.dist-info/RECORD +59 -0
- spatelier-0.3.0.dist-info/WHEEL +5 -0
- spatelier-0.3.0.dist-info/entry_points.txt +2 -0
- spatelier-0.3.0.dist-info/licenses/LICENSE +21 -0
- spatelier-0.3.0.dist-info/top_level.txt +7 -0
- utils/__init__.py +1 -0
- utils/helpers.py +250 -0
cli/worker.py
ADDED
|
@@ -0,0 +1,615 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Job worker CLI commands.
|
|
3
|
+
|
|
4
|
+
This module provides CLI commands for managing the background job worker daemon,
|
|
5
|
+
including starting, stopping, and monitoring the worker process.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
import typer
|
|
13
|
+
from rich.console import Console
|
|
14
|
+
from rich.panel import Panel
|
|
15
|
+
from rich.table import Table
|
|
16
|
+
|
|
17
|
+
from core.config import Config
|
|
18
|
+
from core.decorators import handle_errors, time_operation
|
|
19
|
+
from core.logger import get_logger
|
|
20
|
+
from core.worker import Worker, WorkerMode
|
|
21
|
+
|
|
22
|
+
# Create the worker CLI app
|
|
23
|
+
app = typer.Typer(
|
|
24
|
+
name="worker",
|
|
25
|
+
help="Background job worker daemon management",
|
|
26
|
+
rich_markup_mode="rich",
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
console = Console()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@app.command()
|
|
33
|
+
@handle_errors(context="start worker daemon", verbose=True)
|
|
34
|
+
@time_operation(verbose=True)
|
|
35
|
+
def start(
|
|
36
|
+
max_retries: int = typer.Option(
|
|
37
|
+
10, "--max-retries", "-r", help="Maximum retries for failed jobs"
|
|
38
|
+
),
|
|
39
|
+
verbose: bool = typer.Option(
|
|
40
|
+
False, "--verbose", "-v", help="Enable verbose output"
|
|
41
|
+
),
|
|
42
|
+
):
|
|
43
|
+
"""
|
|
44
|
+
Start the background job worker daemon.
|
|
45
|
+
|
|
46
|
+
The worker will run in the background and automatically process jobs from the queue.
|
|
47
|
+
Only one worker daemon can run at a time.
|
|
48
|
+
"""
|
|
49
|
+
logger = get_logger("worker-start", verbose=verbose)
|
|
50
|
+
config = Config()
|
|
51
|
+
|
|
52
|
+
# Initialize worker in daemon mode
|
|
53
|
+
worker = Worker(
|
|
54
|
+
config=config, mode=WorkerMode.DAEMON, verbose=verbose, max_retries=max_retries
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Check if already running
|
|
58
|
+
if worker.is_running():
|
|
59
|
+
console.print(
|
|
60
|
+
Panel(
|
|
61
|
+
"⚠️ Worker daemon is already running\n"
|
|
62
|
+
"Use 'spt worker status' to check status\n"
|
|
63
|
+
"Use 'spt worker stop' to stop the current daemon",
|
|
64
|
+
title="Already Running",
|
|
65
|
+
border_style="yellow",
|
|
66
|
+
)
|
|
67
|
+
)
|
|
68
|
+
return
|
|
69
|
+
|
|
70
|
+
# Start daemon
|
|
71
|
+
try:
|
|
72
|
+
worker.start()
|
|
73
|
+
console.print(
|
|
74
|
+
Panel(
|
|
75
|
+
"🚀 Worker daemon started successfully\n"
|
|
76
|
+
"The worker is now running in the background\n"
|
|
77
|
+
"Use 'spt worker status' to check status\n"
|
|
78
|
+
"Use 'spt worker stop' to stop the daemon",
|
|
79
|
+
title="Daemon Started",
|
|
80
|
+
border_style="green",
|
|
81
|
+
)
|
|
82
|
+
)
|
|
83
|
+
except Exception as e:
|
|
84
|
+
logger.error(f"Failed to start worker daemon: {e}")
|
|
85
|
+
console.print(
|
|
86
|
+
Panel(
|
|
87
|
+
f"❌ Failed to start worker daemon: {e}\n"
|
|
88
|
+
"Check logs for more information",
|
|
89
|
+
title="Start Failed",
|
|
90
|
+
border_style="red",
|
|
91
|
+
)
|
|
92
|
+
)
|
|
93
|
+
raise typer.Exit(1)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@app.command()
|
|
97
|
+
@handle_errors(context="stop worker daemon", verbose=True)
|
|
98
|
+
@time_operation(verbose=True)
|
|
99
|
+
def stop(
|
|
100
|
+
verbose: bool = typer.Option(
|
|
101
|
+
False, "--verbose", "-v", help="Enable verbose output"
|
|
102
|
+
),
|
|
103
|
+
):
|
|
104
|
+
"""
|
|
105
|
+
Stop the background job worker daemon.
|
|
106
|
+
|
|
107
|
+
This will gracefully stop the worker daemon if it's running.
|
|
108
|
+
"""
|
|
109
|
+
logger = get_logger("worker-stop", verbose=verbose)
|
|
110
|
+
config = Config()
|
|
111
|
+
|
|
112
|
+
# Initialize worker in daemon mode
|
|
113
|
+
worker = Worker(config=config, mode=WorkerMode.DAEMON, verbose=verbose)
|
|
114
|
+
|
|
115
|
+
# Check if running
|
|
116
|
+
if not worker.is_running():
|
|
117
|
+
console.print(
|
|
118
|
+
Panel(
|
|
119
|
+
"ℹ️ Worker daemon is not running",
|
|
120
|
+
title="Not Running",
|
|
121
|
+
border_style="blue",
|
|
122
|
+
)
|
|
123
|
+
)
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
# Stop daemon
|
|
127
|
+
if worker.stop_daemon():
|
|
128
|
+
console.print(
|
|
129
|
+
Panel(
|
|
130
|
+
"🛑 Worker daemon stopped successfully",
|
|
131
|
+
title="Daemon Stopped",
|
|
132
|
+
border_style="yellow",
|
|
133
|
+
)
|
|
134
|
+
)
|
|
135
|
+
else:
|
|
136
|
+
console.print(
|
|
137
|
+
Panel(
|
|
138
|
+
"❌ Failed to stop worker daemon\n"
|
|
139
|
+
"You may need to force kill the process",
|
|
140
|
+
title="Stop Failed",
|
|
141
|
+
border_style="red",
|
|
142
|
+
)
|
|
143
|
+
)
|
|
144
|
+
raise typer.Exit(1)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
@app.command()
|
|
148
|
+
@handle_errors(context="check worker status", verbose=True)
|
|
149
|
+
@time_operation(verbose=True)
|
|
150
|
+
def status(
|
|
151
|
+
verbose: bool = typer.Option(
|
|
152
|
+
False, "--verbose", "-v", help="Enable verbose output"
|
|
153
|
+
),
|
|
154
|
+
):
|
|
155
|
+
"""
|
|
156
|
+
Check the status of the background job worker daemon.
|
|
157
|
+
|
|
158
|
+
Shows whether the daemon is running, its PID, uptime, and resource usage.
|
|
159
|
+
"""
|
|
160
|
+
logger = get_logger("worker-status", verbose=verbose)
|
|
161
|
+
config = Config()
|
|
162
|
+
|
|
163
|
+
# Initialize worker in daemon mode
|
|
164
|
+
worker = Worker(config=config, mode=WorkerMode.DAEMON, verbose=verbose)
|
|
165
|
+
|
|
166
|
+
# Get status
|
|
167
|
+
is_running = worker.is_running()
|
|
168
|
+
stats = worker.get_stats() if is_running else {}
|
|
169
|
+
|
|
170
|
+
# Create status table
|
|
171
|
+
table = Table(title="Worker Daemon Status")
|
|
172
|
+
table.add_column("Metric", style="cyan")
|
|
173
|
+
table.add_column("Value", style="green")
|
|
174
|
+
|
|
175
|
+
if is_running:
|
|
176
|
+
# Try to get PID and process info
|
|
177
|
+
try:
|
|
178
|
+
import psutil
|
|
179
|
+
|
|
180
|
+
if worker.pid_file and worker.pid_file.exists():
|
|
181
|
+
pid = int(worker.pid_file.read_text().strip())
|
|
182
|
+
process = psutil.Process(pid)
|
|
183
|
+
|
|
184
|
+
table.add_row("Status", "🟢 Running")
|
|
185
|
+
table.add_row("PID", str(pid))
|
|
186
|
+
uptime_hours = (time.time() - process.create_time()) / 3600
|
|
187
|
+
table.add_row("Uptime", f"{uptime_hours:.1f} hours")
|
|
188
|
+
memory_mb = process.memory_info().rss / 1024 / 1024
|
|
189
|
+
table.add_row("Memory", f"{memory_mb:.1f} MB")
|
|
190
|
+
table.add_row("CPU", f"{process.cpu_percent():.1f}%")
|
|
191
|
+
else:
|
|
192
|
+
table.add_row("Status", "🟢 Running")
|
|
193
|
+
table.add_row("PID", "N/A")
|
|
194
|
+
except Exception as e:
|
|
195
|
+
logger.debug(f"Failed to get process info: {e}")
|
|
196
|
+
table.add_row("Status", "🟢 Running")
|
|
197
|
+
table.add_row("PID", "N/A")
|
|
198
|
+
|
|
199
|
+
# Add worker stats
|
|
200
|
+
if stats:
|
|
201
|
+
worker_stats = stats.get("worker_stats", {})
|
|
202
|
+
table.add_row("Jobs Processed", str(worker_stats.get("jobs_processed", 0)))
|
|
203
|
+
table.add_row("Jobs Failed", str(worker_stats.get("jobs_failed", 0)))
|
|
204
|
+
table.add_row(
|
|
205
|
+
"Stuck Jobs Detected", str(stats.get("stuck_jobs_detected", 0))
|
|
206
|
+
)
|
|
207
|
+
else:
|
|
208
|
+
table.add_row("Status", "🔴 Not Running")
|
|
209
|
+
table.add_row("PID", "N/A")
|
|
210
|
+
table.add_row("Uptime", "N/A")
|
|
211
|
+
table.add_row("Memory", "N/A")
|
|
212
|
+
table.add_row("CPU", "N/A")
|
|
213
|
+
|
|
214
|
+
console.print(table)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
@app.command()
|
|
218
|
+
@handle_errors(context="restart worker daemon", verbose=True)
|
|
219
|
+
@time_operation(verbose=True)
|
|
220
|
+
def restart(
|
|
221
|
+
max_retries: int = typer.Option(
|
|
222
|
+
10, "--max-retries", "-r", help="Maximum retries for failed jobs"
|
|
223
|
+
),
|
|
224
|
+
verbose: bool = typer.Option(
|
|
225
|
+
False, "--verbose", "-v", help="Enable verbose output"
|
|
226
|
+
),
|
|
227
|
+
):
|
|
228
|
+
"""
|
|
229
|
+
Restart the background job worker daemon.
|
|
230
|
+
|
|
231
|
+
This will stop the current daemon (if running) and start a new one.
|
|
232
|
+
"""
|
|
233
|
+
logger = get_logger("worker-restart", verbose=verbose)
|
|
234
|
+
config = Config()
|
|
235
|
+
|
|
236
|
+
# Initialize worker in daemon mode
|
|
237
|
+
worker = Worker(
|
|
238
|
+
config=config, mode=WorkerMode.DAEMON, verbose=verbose, max_retries=max_retries
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
# Stop if running
|
|
242
|
+
if worker.is_running():
|
|
243
|
+
console.print("Stopping current daemon...")
|
|
244
|
+
worker.stop_daemon()
|
|
245
|
+
time.sleep(2) # Give it time to stop
|
|
246
|
+
|
|
247
|
+
# Start new daemon
|
|
248
|
+
console.print("Starting new daemon...")
|
|
249
|
+
try:
|
|
250
|
+
worker.start()
|
|
251
|
+
console.print(
|
|
252
|
+
Panel(
|
|
253
|
+
"🔄 Worker daemon restarted successfully",
|
|
254
|
+
title="Daemon Restarted",
|
|
255
|
+
border_style="green",
|
|
256
|
+
)
|
|
257
|
+
)
|
|
258
|
+
except Exception as e:
|
|
259
|
+
logger.error(f"Failed to restart worker daemon: {e}")
|
|
260
|
+
console.print(
|
|
261
|
+
Panel(
|
|
262
|
+
f"❌ Failed to restart worker daemon: {e}",
|
|
263
|
+
title="Restart Failed",
|
|
264
|
+
border_style="red",
|
|
265
|
+
)
|
|
266
|
+
)
|
|
267
|
+
raise typer.Exit(1)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
@app.command()
|
|
271
|
+
@handle_errors(context="list jobs", verbose=True)
|
|
272
|
+
@time_operation(verbose=True)
|
|
273
|
+
def list_jobs(
|
|
274
|
+
format: str = typer.Option(
|
|
275
|
+
"json", "--format", "-f", help="Output format: json, table, summary"
|
|
276
|
+
),
|
|
277
|
+
verbose: bool = typer.Option(
|
|
278
|
+
False, "--verbose", "-v", help="Enable verbose output"
|
|
279
|
+
),
|
|
280
|
+
):
|
|
281
|
+
"""
|
|
282
|
+
List all jobs in the queue.
|
|
283
|
+
|
|
284
|
+
Shows pending, running, completed, and failed jobs.
|
|
285
|
+
"""
|
|
286
|
+
logger = get_logger("worker-list-jobs", verbose=verbose)
|
|
287
|
+
|
|
288
|
+
try:
|
|
289
|
+
from core.config import Config
|
|
290
|
+
from core.job_queue import JobQueue
|
|
291
|
+
|
|
292
|
+
# Initialize job queue
|
|
293
|
+
config = Config()
|
|
294
|
+
job_queue = JobQueue(config, verbose=verbose)
|
|
295
|
+
|
|
296
|
+
# Get all jobs
|
|
297
|
+
jobs = job_queue.get_all_jobs()
|
|
298
|
+
|
|
299
|
+
if not jobs:
|
|
300
|
+
if format == "json":
|
|
301
|
+
console.print('{"jobs": [], "total": 0, "summary": {}}')
|
|
302
|
+
else:
|
|
303
|
+
console.print(
|
|
304
|
+
Panel(
|
|
305
|
+
"📭 No jobs found in queue",
|
|
306
|
+
title="Empty Queue",
|
|
307
|
+
border_style="blue",
|
|
308
|
+
)
|
|
309
|
+
)
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
# Count jobs by status
|
|
313
|
+
status_counts = {}
|
|
314
|
+
for job in jobs:
|
|
315
|
+
status_value = (
|
|
316
|
+
job.status.value if hasattr(job.status, "value") else str(job.status)
|
|
317
|
+
)
|
|
318
|
+
status_counts[status_value] = status_counts.get(status_value, 0) + 1
|
|
319
|
+
|
|
320
|
+
# Handle JSON format
|
|
321
|
+
if format == "json":
|
|
322
|
+
import json
|
|
323
|
+
|
|
324
|
+
# Prepare job data for JSON
|
|
325
|
+
job_data = []
|
|
326
|
+
for job in jobs:
|
|
327
|
+
job_info = {
|
|
328
|
+
"id": job.id,
|
|
329
|
+
"type": job.job_type.value,
|
|
330
|
+
"status": job.status.value
|
|
331
|
+
if hasattr(job.status, "value")
|
|
332
|
+
else str(job.status),
|
|
333
|
+
"path": job.job_path,
|
|
334
|
+
"created_at": job.created_at.isoformat()
|
|
335
|
+
if job.created_at
|
|
336
|
+
else None,
|
|
337
|
+
"error_message": job.error_message,
|
|
338
|
+
"retry_count": job.retry_count,
|
|
339
|
+
"max_retries": job.max_retries,
|
|
340
|
+
}
|
|
341
|
+
job_data.append(job_info)
|
|
342
|
+
|
|
343
|
+
# Create JSON response
|
|
344
|
+
response = {"total": len(jobs), "summary": status_counts, "jobs": job_data}
|
|
345
|
+
|
|
346
|
+
console.print(json.dumps(response, indent=2))
|
|
347
|
+
return
|
|
348
|
+
|
|
349
|
+
# Show summary for table/summary formats
|
|
350
|
+
summary_parts = []
|
|
351
|
+
for status, count in status_counts.items():
|
|
352
|
+
emoji = {
|
|
353
|
+
"pending": "⏳",
|
|
354
|
+
"running": "🔄",
|
|
355
|
+
"completed": "✅",
|
|
356
|
+
"failed": "❌",
|
|
357
|
+
"cancelled": "🚫",
|
|
358
|
+
}.get(status.lower(), "❓")
|
|
359
|
+
summary_parts.append(f"{emoji} {count} {status.title()}")
|
|
360
|
+
|
|
361
|
+
if format == "summary":
|
|
362
|
+
console.print(
|
|
363
|
+
Panel(
|
|
364
|
+
f"📊 Total Jobs: {len(jobs)} | " + " | ".join(summary_parts),
|
|
365
|
+
title="Job Queue Summary",
|
|
366
|
+
border_style="green",
|
|
367
|
+
)
|
|
368
|
+
)
|
|
369
|
+
return
|
|
370
|
+
|
|
371
|
+
# Show summary for table format
|
|
372
|
+
console.print(
|
|
373
|
+
Panel(
|
|
374
|
+
f"📊 Total Jobs: {len(jobs)} | " + " | ".join(summary_parts),
|
|
375
|
+
title="Job Queue Summary",
|
|
376
|
+
border_style="green",
|
|
377
|
+
)
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
# Create a very simple table
|
|
381
|
+
table = Table(show_header=True, header_style="bold cyan", box=None)
|
|
382
|
+
table.add_column("ID", style="cyan", width=3)
|
|
383
|
+
table.add_column("Status", style="bold", width=8)
|
|
384
|
+
table.add_column("Type", style="green", width=12)
|
|
385
|
+
table.add_column("Path", style="blue", width=20)
|
|
386
|
+
table.add_column("Time", style="magenta", width=6)
|
|
387
|
+
table.add_column("Error", style="red", width=15)
|
|
388
|
+
|
|
389
|
+
for job in jobs:
|
|
390
|
+
# Simple status with emoji
|
|
391
|
+
status_value = (
|
|
392
|
+
job.status.value if hasattr(job.status, "value") else str(job.status)
|
|
393
|
+
)
|
|
394
|
+
status_emoji = {
|
|
395
|
+
"pending": "⏳",
|
|
396
|
+
"running": "🔄",
|
|
397
|
+
"completed": "✅",
|
|
398
|
+
"failed": "❌",
|
|
399
|
+
"cancelled": "🚫",
|
|
400
|
+
}.get(status_value.lower(), "❓")
|
|
401
|
+
|
|
402
|
+
# Simple job type
|
|
403
|
+
job_type = "Video" if "video" in job.job_type.value else "Playlist"
|
|
404
|
+
|
|
405
|
+
# Simple path - just show the last part
|
|
406
|
+
path = job.job_path.split("/")[-1] if "/" in job.job_path else job.job_path
|
|
407
|
+
if len(path) > 15:
|
|
408
|
+
path = "..." + path[-12:]
|
|
409
|
+
|
|
410
|
+
# Simple time
|
|
411
|
+
time_str = job.created_at.strftime("%H:%M") if job.created_at else "N/A"
|
|
412
|
+
|
|
413
|
+
# Simple error - only show if there is one
|
|
414
|
+
error = (
|
|
415
|
+
job.error_message[:12] + "..."
|
|
416
|
+
if job.error_message and len(job.error_message) > 15
|
|
417
|
+
else (job.error_message or "")
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
table.add_row(
|
|
421
|
+
str(job.id),
|
|
422
|
+
f"{status_emoji} {status_value.upper()[:4]}",
|
|
423
|
+
job_type,
|
|
424
|
+
path,
|
|
425
|
+
time_str,
|
|
426
|
+
error,
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
console.print(table)
|
|
430
|
+
|
|
431
|
+
except Exception as e:
|
|
432
|
+
logger.error(f"Failed to list jobs: {e}")
|
|
433
|
+
console.print(
|
|
434
|
+
Panel(f"❌ Failed to list jobs: {e}", title="Error", border_style="red")
|
|
435
|
+
)
|
|
436
|
+
raise typer.Exit(1)
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
@app.command()
|
|
440
|
+
@handle_errors(context="check stuck jobs", verbose=True)
|
|
441
|
+
@time_operation(verbose=True)
|
|
442
|
+
def check_stuck(
|
|
443
|
+
timeout: int = typer.Option(
|
|
444
|
+
1800,
|
|
445
|
+
"--timeout",
|
|
446
|
+
"-t",
|
|
447
|
+
help="Timeout in seconds for stuck jobs (default: 1800 = 30 minutes)",
|
|
448
|
+
),
|
|
449
|
+
reset: bool = typer.Option(
|
|
450
|
+
False, "--reset", "-r", help="Reset stuck jobs to pending status"
|
|
451
|
+
),
|
|
452
|
+
verbose: bool = typer.Option(
|
|
453
|
+
False, "--verbose", "-v", help="Enable verbose output"
|
|
454
|
+
),
|
|
455
|
+
):
|
|
456
|
+
"""
|
|
457
|
+
Check for stuck jobs in the queue.
|
|
458
|
+
|
|
459
|
+
Jobs stuck in 'running' state for longer than the timeout are considered stuck.
|
|
460
|
+
Use --reset to automatically reset stuck jobs to pending status.
|
|
461
|
+
"""
|
|
462
|
+
logger = get_logger("worker-check-stuck", verbose=verbose)
|
|
463
|
+
|
|
464
|
+
try:
|
|
465
|
+
from datetime import datetime, timedelta
|
|
466
|
+
|
|
467
|
+
from core.job_queue import JobQueue, JobStatus
|
|
468
|
+
|
|
469
|
+
# Initialize job queue
|
|
470
|
+
config = Config()
|
|
471
|
+
job_queue = JobQueue(config, verbose=verbose)
|
|
472
|
+
|
|
473
|
+
# Get running jobs
|
|
474
|
+
running_jobs = job_queue.get_jobs_by_status(JobStatus.RUNNING, limit=50)
|
|
475
|
+
|
|
476
|
+
if not running_jobs:
|
|
477
|
+
console.print(
|
|
478
|
+
Panel(
|
|
479
|
+
"✅ No running jobs found",
|
|
480
|
+
title="No Stuck Jobs",
|
|
481
|
+
border_style="green",
|
|
482
|
+
)
|
|
483
|
+
)
|
|
484
|
+
return
|
|
485
|
+
|
|
486
|
+
# Check for stuck jobs
|
|
487
|
+
stuck_jobs = []
|
|
488
|
+
cutoff_time = datetime.now() - timedelta(seconds=timeout)
|
|
489
|
+
|
|
490
|
+
for job in running_jobs:
|
|
491
|
+
# Check if job has been running too long
|
|
492
|
+
if job.started_at and job.started_at < cutoff_time:
|
|
493
|
+
stuck_jobs.append(job)
|
|
494
|
+
# Also check jobs without started_at that are old
|
|
495
|
+
elif job.created_at and job.created_at < cutoff_time:
|
|
496
|
+
stuck_jobs.append(job)
|
|
497
|
+
|
|
498
|
+
if not stuck_jobs:
|
|
499
|
+
console.print(
|
|
500
|
+
Panel(
|
|
501
|
+
f"✅ No stuck jobs found (timeout: {timeout}s)",
|
|
502
|
+
title="No Stuck Jobs",
|
|
503
|
+
border_style="green",
|
|
504
|
+
)
|
|
505
|
+
)
|
|
506
|
+
return
|
|
507
|
+
|
|
508
|
+
# Show stuck jobs
|
|
509
|
+
console.print(
|
|
510
|
+
Panel(
|
|
511
|
+
f"⚠️ Found {len(stuck_jobs)} stuck jobs (timeout: {timeout}s)",
|
|
512
|
+
title="Stuck Jobs Detected",
|
|
513
|
+
border_style="yellow",
|
|
514
|
+
)
|
|
515
|
+
)
|
|
516
|
+
|
|
517
|
+
# Create table for stuck jobs
|
|
518
|
+
table = Table(show_header=True, header_style="bold cyan")
|
|
519
|
+
table.add_column("ID", style="cyan", width=3)
|
|
520
|
+
table.add_column("Type", style="green", width=12)
|
|
521
|
+
table.add_column("Path", style="blue", width=20)
|
|
522
|
+
table.add_column("Running Since", style="magenta", width=12)
|
|
523
|
+
table.add_column("Duration", style="red", width=10)
|
|
524
|
+
|
|
525
|
+
for job in stuck_jobs:
|
|
526
|
+
# Calculate duration
|
|
527
|
+
start_time = job.started_at or job.created_at
|
|
528
|
+
if start_time:
|
|
529
|
+
duration = datetime.now() - start_time
|
|
530
|
+
duration_str = f"{duration.seconds // 60}m {duration.seconds % 60}s"
|
|
531
|
+
running_since = start_time.strftime("%H:%M:%S")
|
|
532
|
+
else:
|
|
533
|
+
duration_str = "Unknown"
|
|
534
|
+
running_since = "Unknown"
|
|
535
|
+
|
|
536
|
+
# Truncate path
|
|
537
|
+
path = job.job_path.split("/")[-1] if "/" in job.job_path else job.job_path
|
|
538
|
+
if len(path) > 15:
|
|
539
|
+
path = "..." + path[-12:]
|
|
540
|
+
|
|
541
|
+
# Check if job has output files (intelligent assessment)
|
|
542
|
+
has_output = False
|
|
543
|
+
try:
|
|
544
|
+
from pathlib import Path
|
|
545
|
+
|
|
546
|
+
output_path = Path(job.job_path)
|
|
547
|
+
if output_path.exists():
|
|
548
|
+
video_files = (
|
|
549
|
+
list(output_path.rglob("*.mp4"))
|
|
550
|
+
+ list(output_path.rglob("*.mkv"))
|
|
551
|
+
+ list(output_path.rglob("*.avi"))
|
|
552
|
+
)
|
|
553
|
+
has_output = len(video_files) > 0
|
|
554
|
+
except:
|
|
555
|
+
pass
|
|
556
|
+
|
|
557
|
+
# Add output status to duration
|
|
558
|
+
if has_output:
|
|
559
|
+
duration_str += " ✅"
|
|
560
|
+
else:
|
|
561
|
+
duration_str += " ❌"
|
|
562
|
+
|
|
563
|
+
table.add_row(
|
|
564
|
+
str(job.id),
|
|
565
|
+
job.job_type.value.replace("download_", "").title(),
|
|
566
|
+
path,
|
|
567
|
+
running_since,
|
|
568
|
+
duration_str,
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
console.print(table)
|
|
572
|
+
|
|
573
|
+
# Reset stuck jobs if requested
|
|
574
|
+
if reset:
|
|
575
|
+
console.print("\n🔄 Resetting stuck jobs to pending...")
|
|
576
|
+
reset_count = 0
|
|
577
|
+
for job in stuck_jobs:
|
|
578
|
+
try:
|
|
579
|
+
job_queue.update_job_status(
|
|
580
|
+
job.id,
|
|
581
|
+
JobStatus.PENDING,
|
|
582
|
+
error_message=f"Job was stuck in running state for {timeout}s, reset to pending",
|
|
583
|
+
)
|
|
584
|
+
reset_count += 1
|
|
585
|
+
except Exception as e:
|
|
586
|
+
logger.error(f"Failed to reset job {job.id}: {e}")
|
|
587
|
+
|
|
588
|
+
console.print(
|
|
589
|
+
Panel(
|
|
590
|
+
f"✅ Reset {reset_count} stuck jobs to pending status",
|
|
591
|
+
title="Jobs Reset",
|
|
592
|
+
border_style="green",
|
|
593
|
+
)
|
|
594
|
+
)
|
|
595
|
+
else:
|
|
596
|
+
console.print(
|
|
597
|
+
Panel(
|
|
598
|
+
"💡 Use --reset to automatically reset stuck jobs to pending status",
|
|
599
|
+
title="Reset Available",
|
|
600
|
+
border_style="blue",
|
|
601
|
+
)
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
except Exception as e:
|
|
605
|
+
logger.error(f"Failed to check stuck jobs: {e}")
|
|
606
|
+
console.print(
|
|
607
|
+
Panel(
|
|
608
|
+
f"❌ Failed to check stuck jobs: {e}", title="Error", border_style="red"
|
|
609
|
+
)
|
|
610
|
+
)
|
|
611
|
+
raise typer.Exit(1)
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
if __name__ == "__main__":
|
|
615
|
+
app()
|
core/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Core functionality and base classes."""
|