spatelier 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. analytics/__init__.py +1 -0
  2. analytics/reporter.py +497 -0
  3. cli/__init__.py +1 -0
  4. cli/app.py +147 -0
  5. cli/audio.py +129 -0
  6. cli/cli_analytics.py +320 -0
  7. cli/cli_utils.py +282 -0
  8. cli/error_handlers.py +122 -0
  9. cli/files.py +299 -0
  10. cli/update.py +325 -0
  11. cli/video.py +823 -0
  12. cli/worker.py +615 -0
  13. core/__init__.py +1 -0
  14. core/analytics_dashboard.py +368 -0
  15. core/base.py +303 -0
  16. core/base_service.py +69 -0
  17. core/config.py +345 -0
  18. core/database_service.py +116 -0
  19. core/decorators.py +263 -0
  20. core/error_handler.py +210 -0
  21. core/file_tracker.py +254 -0
  22. core/interactive_cli.py +366 -0
  23. core/interfaces.py +166 -0
  24. core/job_queue.py +437 -0
  25. core/logger.py +79 -0
  26. core/package_updater.py +469 -0
  27. core/progress.py +228 -0
  28. core/service_factory.py +295 -0
  29. core/streaming.py +299 -0
  30. core/worker.py +765 -0
  31. database/__init__.py +1 -0
  32. database/connection.py +265 -0
  33. database/metadata.py +516 -0
  34. database/models.py +288 -0
  35. database/repository.py +592 -0
  36. database/transcription_storage.py +219 -0
  37. modules/__init__.py +1 -0
  38. modules/audio/__init__.py +5 -0
  39. modules/audio/converter.py +197 -0
  40. modules/video/__init__.py +16 -0
  41. modules/video/converter.py +191 -0
  42. modules/video/fallback_extractor.py +334 -0
  43. modules/video/services/__init__.py +18 -0
  44. modules/video/services/audio_extraction_service.py +274 -0
  45. modules/video/services/download_service.py +852 -0
  46. modules/video/services/metadata_service.py +190 -0
  47. modules/video/services/playlist_service.py +445 -0
  48. modules/video/services/transcription_service.py +491 -0
  49. modules/video/transcription_service.py +385 -0
  50. modules/video/youtube_api.py +397 -0
  51. spatelier/__init__.py +33 -0
  52. spatelier-0.3.0.dist-info/METADATA +260 -0
  53. spatelier-0.3.0.dist-info/RECORD +59 -0
  54. spatelier-0.3.0.dist-info/WHEEL +5 -0
  55. spatelier-0.3.0.dist-info/entry_points.txt +2 -0
  56. spatelier-0.3.0.dist-info/licenses/LICENSE +21 -0
  57. spatelier-0.3.0.dist-info/top_level.txt +7 -0
  58. utils/__init__.py +1 -0
  59. utils/helpers.py +250 -0
analytics/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """Analytics and reporting modules."""
analytics/reporter.py ADDED
@@ -0,0 +1,497 @@
1
+ """
2
+ Analytics reporter for generating insights and reports.
3
+
4
+ This module provides analytics reporting capabilities using the established data models.
5
+ """
6
+
7
+ import json
8
+ from datetime import datetime, timedelta
9
+ from pathlib import Path
10
+ from typing import Any, Dict, List, Optional, Union
11
+
12
+ from sqlalchemy import func
13
+
14
+ from core.config import Config
15
+ from core.database_service import DatabaseServiceFactory
16
+ from core.logger import get_logger
17
+ from database.models import (
18
+ AnalyticsEvent,
19
+ MediaFile,
20
+ MediaType,
21
+ ProcessingJob,
22
+ ProcessingStatus,
23
+ )
24
+
25
+
26
+ class AnalyticsReporter:
27
+ """
28
+ Analytics reporter for generating insights and reports.
29
+
30
+ Uses the established data models and repository patterns.
31
+ """
32
+
33
+ def __init__(self, config: Config, verbose: bool = False, db_service=None):
34
+ """
35
+ Initialize analytics reporter.
36
+
37
+ Args:
38
+ config: Configuration instance
39
+ verbose: Enable verbose logging
40
+ db_service: Optional database service for dependency injection
41
+ """
42
+ self.config = config
43
+ self.verbose = verbose
44
+ self.logger = get_logger("AnalyticsReporter", verbose=verbose)
45
+
46
+ # Use provided database service or create one
47
+ if db_service:
48
+ self.db_factory = db_service
49
+ self.repos = self.db_factory.initialize()
50
+ else:
51
+ # Fallback for backward compatibility
52
+ from core.database_service import DatabaseServiceFactory
53
+
54
+ self.db_factory = DatabaseServiceFactory(config, verbose=verbose)
55
+ self.repos = self.db_factory.initialize()
56
+ self.db_manager = self.db_factory.get_db_manager()
57
+ self.session = self.db_manager.get_sqlite_session()
58
+
59
+ def generate_media_report(self, days: int = 30) -> Dict[str, Any]:
60
+ """
61
+ Generate media files report.
62
+
63
+ Args:
64
+ days: Number of days to analyze
65
+
66
+ Returns:
67
+ Dictionary with media statistics
68
+ """
69
+ self.logger.info(f"Generating media report for last {days} days")
70
+
71
+ # Get media statistics
72
+ media_stats = self.repos.media.get_statistics()
73
+
74
+ # Get recent files
75
+ since = datetime.now() - timedelta(days=days)
76
+ recent_files = (
77
+ self.session.query(MediaFile)
78
+ .filter(MediaFile.created_at >= since)
79
+ .order_by(MediaFile.created_at.desc())
80
+ .all()
81
+ )
82
+
83
+ # Calculate additional metrics
84
+ total_size = sum(f.file_size for f in recent_files)
85
+ avg_file_size = total_size / len(recent_files) if recent_files else 0
86
+
87
+ report = {
88
+ "period_days": days,
89
+ "total_files": len(recent_files),
90
+ "total_size_bytes": total_size,
91
+ "total_size_mb": total_size / (1024 * 1024),
92
+ "avg_file_size_bytes": avg_file_size,
93
+ "files_by_type": media_stats.get("files_by_type", {}),
94
+ "size_by_type": media_stats.get("size_by_type", {}),
95
+ "recent_files": [
96
+ {
97
+ "id": f.id,
98
+ "name": f.file_name,
99
+ "path": f.file_path,
100
+ "type": f.media_type.value,
101
+ "size": f.file_size,
102
+ "created_at": f.created_at.isoformat(),
103
+ }
104
+ for f in recent_files[:10] # Last 10 files
105
+ ],
106
+ }
107
+
108
+ return report
109
+
110
+ def generate_processing_report(self, days: int = 30) -> Dict[str, Any]:
111
+ """
112
+ Generate processing jobs report.
113
+
114
+ Args:
115
+ days: Number of days to analyze
116
+
117
+ Returns:
118
+ Dictionary with processing statistics
119
+ """
120
+ self.logger.info(f"Generating processing report for last {days} days")
121
+
122
+ # Get job statistics
123
+ job_stats = self.repos.jobs.get_job_statistics()
124
+
125
+ # Get recent jobs
126
+ since = datetime.now() - timedelta(days=days)
127
+ recent_jobs = (
128
+ self.session.query(ProcessingJob)
129
+ .filter(ProcessingJob.created_at >= since)
130
+ .order_by(ProcessingJob.created_at.desc())
131
+ .all()
132
+ )
133
+
134
+ # Calculate success rate
135
+ completed_jobs = [
136
+ j for j in recent_jobs if j.status == ProcessingStatus.COMPLETED
137
+ ]
138
+ failed_jobs = [j for j in recent_jobs if j.status == ProcessingStatus.FAILED]
139
+ success_rate = len(completed_jobs) / len(recent_jobs) if recent_jobs else 0
140
+
141
+ # Calculate average processing time
142
+ completed_with_duration = [
143
+ j for j in completed_jobs if j.duration_seconds is not None
144
+ ]
145
+ avg_processing_time = (
146
+ sum(j.duration_seconds for j in completed_with_duration)
147
+ / len(completed_with_duration)
148
+ if completed_with_duration
149
+ else 0
150
+ )
151
+
152
+ report = {
153
+ "period_days": days,
154
+ "total_jobs": len(recent_jobs),
155
+ "completed_jobs": len(completed_jobs),
156
+ "failed_jobs": len(failed_jobs),
157
+ "success_rate": success_rate,
158
+ "avg_processing_time_seconds": avg_processing_time,
159
+ "jobs_by_status": job_stats.get("jobs_by_status", {}),
160
+ "jobs_by_type": job_stats.get("jobs_by_type", {}),
161
+ "recent_jobs": [
162
+ {
163
+ "id": j.id,
164
+ "type": j.job_type,
165
+ "status": j.status.value,
166
+ "input_path": j.input_path,
167
+ "output_path": j.output_path,
168
+ "duration_seconds": j.duration_seconds,
169
+ "created_at": j.created_at.isoformat(),
170
+ "completed_at": (
171
+ j.completed_at.isoformat() if j.completed_at else None
172
+ ),
173
+ }
174
+ for j in recent_jobs[:10] # Last 10 jobs
175
+ ],
176
+ }
177
+
178
+ return report
179
+
180
+ def generate_usage_report(self, days: int = 30) -> Dict[str, Any]:
181
+ """
182
+ Generate usage analytics report.
183
+
184
+ Args:
185
+ days: Number of days to analyze
186
+
187
+ Returns:
188
+ Dictionary with usage statistics
189
+ """
190
+ self.logger.info(f"Generating usage report for last {days} days")
191
+
192
+ # Get usage statistics
193
+ usage_stats = self.repos.analytics.get_usage_statistics(days)
194
+
195
+ # Get events by type
196
+ event_types = ["download", "convert", "extract", "view", "error"]
197
+ events_by_type = {}
198
+
199
+ for event_type in event_types:
200
+ events = self.repos.analytics.get_events_by_type(event_type, days)
201
+ events_by_type[event_type] = len(events)
202
+
203
+ report = {
204
+ "period_days": days,
205
+ "total_events": sum(events_by_type.values()),
206
+ "events_by_type": events_by_type,
207
+ "daily_activity": usage_stats.get("daily_activity", []),
208
+ "most_active_day": self._find_most_active_day(
209
+ usage_stats.get("daily_activity", [])
210
+ ),
211
+ "trend_analysis": self._analyze_trends(
212
+ usage_stats.get("daily_activity", [])
213
+ ),
214
+ }
215
+
216
+ return report
217
+
218
+ def create_visualizations(
219
+ self, output_dir: Union[str, Path], days: int = 30
220
+ ) -> List[Path]:
221
+ """
222
+ Create visualization charts and save them.
223
+
224
+ Args:
225
+ output_dir: Directory to save visualizations
226
+ days: Number of days to analyze
227
+
228
+ Returns:
229
+ List of created visualization files
230
+ """
231
+ output_dir = Path(output_dir)
232
+ output_dir.mkdir(parents=True, exist_ok=True)
233
+
234
+ self.logger.info(f"Creating visualizations in {output_dir}")
235
+
236
+ # Lazy import heavy dependencies
237
+ import matplotlib.pyplot as plt
238
+ import seaborn as sns
239
+
240
+ created_files = []
241
+
242
+ # Set up plotting style
243
+ plt.style.use("seaborn-v0_8")
244
+ sns.set_palette("husl")
245
+
246
+ # 1. Media files by type
247
+ media_report = self.generate_media_report(days)
248
+ if media_report["files_by_type"]:
249
+ fig, ax = plt.subplots(figsize=(10, 6))
250
+ types = list(media_report["files_by_type"].keys())
251
+ counts = list(media_report["files_by_type"].values())
252
+
253
+ ax.pie(counts, labels=types, autopct="%1.1f%%", startangle=90)
254
+ ax.set_title(f"Media Files by Type (Last {days} days)")
255
+
256
+ chart_path = output_dir / f"media_files_by_type_{days}d.png"
257
+ plt.savefig(chart_path, dpi=300, bbox_inches="tight")
258
+ plt.close()
259
+ created_files.append(chart_path)
260
+
261
+ # 2. Processing jobs over time
262
+ since = datetime.now() - timedelta(days=days)
263
+ daily_jobs = (
264
+ self.session.query(
265
+ func.date(ProcessingJob.created_at).label("date"),
266
+ func.count(ProcessingJob.id).label("count"),
267
+ )
268
+ .filter(ProcessingJob.created_at >= since)
269
+ .group_by(func.date(ProcessingJob.created_at))
270
+ .order_by("date")
271
+ .all()
272
+ )
273
+
274
+ if daily_jobs:
275
+ dates = [row.date for row in daily_jobs]
276
+ counts = [row.count for row in daily_jobs]
277
+
278
+ fig, ax = plt.subplots(figsize=(12, 6))
279
+ ax.plot(dates, counts, marker="o", linewidth=2, markersize=6)
280
+ ax.set_title(f"Processing Jobs Over Time (Last {days} days)")
281
+ ax.set_xlabel("Date")
282
+ ax.set_ylabel("Number of Jobs")
283
+ ax.tick_params(axis="x", rotation=45)
284
+
285
+ chart_path = output_dir / f"processing_jobs_timeline_{days}d.png"
286
+ plt.savefig(chart_path, dpi=300, bbox_inches="tight")
287
+ plt.close()
288
+ created_files.append(chart_path)
289
+
290
+ # 3. Interactive Plotly dashboard
291
+ self._create_interactive_dashboard(output_dir, days)
292
+
293
+ self.logger.info(f"Created {len(created_files)} visualization files")
294
+ return created_files
295
+
296
+ def export_data(self, output_path: Union[str, Path], format: str = "json") -> Path:
297
+ """
298
+ Export analytics data to file.
299
+
300
+ Args:
301
+ output_path: Path to save exported data
302
+ format: Export format (json, csv, excel)
303
+
304
+ Returns:
305
+ Path to exported file
306
+ """
307
+ output_path = Path(output_path)
308
+ self.logger.info(f"Exporting analytics data to {output_path}")
309
+
310
+ # Gather all data
311
+ media_report = self.generate_media_report(30)
312
+ processing_report = self.generate_processing_report(30)
313
+ usage_report = self.generate_usage_report(30)
314
+
315
+ export_data = {
316
+ "export_timestamp": datetime.now().isoformat(),
317
+ "media_report": media_report,
318
+ "processing_report": processing_report,
319
+ "usage_report": usage_report,
320
+ }
321
+
322
+ if format.lower() == "json":
323
+ with open(output_path, "w") as f:
324
+ json.dump(export_data, f, indent=2, default=str)
325
+
326
+ elif format.lower() == "csv":
327
+ # Create CSV files for each report type
328
+ output_dir = output_path.parent
329
+ base_name = output_path.stem
330
+
331
+ # Lazy import pandas
332
+ import pandas as pd
333
+
334
+ # Media files CSV
335
+ media_df = pd.DataFrame(media_report["recent_files"])
336
+ media_df.to_csv(output_dir / f"{base_name}_media.csv", index=False)
337
+
338
+ # Processing jobs CSV
339
+ jobs_df = pd.DataFrame(processing_report["recent_jobs"])
340
+ jobs_df.to_csv(output_dir / f"{base_name}_jobs.csv", index=False)
341
+
342
+ elif format.lower() == "excel":
343
+ # Lazy import pandas
344
+ import pandas as pd
345
+
346
+ with pd.ExcelWriter(output_path, engine="openpyxl") as writer:
347
+ # Media files sheet
348
+ media_df = pd.DataFrame(media_report["recent_files"])
349
+ media_df.to_excel(writer, sheet_name="Media Files", index=False)
350
+
351
+ # Processing jobs sheet
352
+ jobs_df = pd.DataFrame(processing_report["recent_jobs"])
353
+ jobs_df.to_excel(writer, sheet_name="Processing Jobs", index=False)
354
+
355
+ # Summary sheet
356
+ summary_data = {
357
+ "Metric": [
358
+ "Total Files",
359
+ "Total Jobs",
360
+ "Success Rate",
361
+ "Avg Processing Time",
362
+ ],
363
+ "Value": [
364
+ media_report["total_files"],
365
+ processing_report["total_jobs"],
366
+ f"{processing_report['success_rate']:.2%}",
367
+ f"{processing_report['avg_processing_time_seconds']:.2f}s",
368
+ ],
369
+ }
370
+ summary_df = pd.DataFrame(summary_data)
371
+ summary_df.to_excel(writer, sheet_name="Summary", index=False)
372
+
373
+ self.logger.info(f"Exported data to {output_path}")
374
+ return output_path
375
+
376
+ def _find_most_active_day(
377
+ self, daily_activity: List[Dict[str, Any]]
378
+ ) -> Optional[str]:
379
+ """Find the most active day from daily activity data."""
380
+ if not daily_activity:
381
+ return None
382
+
383
+ max_activity = max(daily_activity, key=lambda x: x["count"])
384
+ return max_activity["date"]
385
+
386
+ def _analyze_trends(self, daily_activity: List[Dict[str, Any]]) -> Dict[str, Any]:
387
+ """Analyze trends in daily activity."""
388
+ if len(daily_activity) < 2:
389
+ return {"trend": "insufficient_data"}
390
+
391
+ # Simple trend analysis
392
+ counts = [day["count"] for day in daily_activity]
393
+ first_half_avg = sum(counts[: len(counts) // 2]) / (len(counts) // 2)
394
+ second_half_avg = sum(counts[len(counts) // 2 :]) / (
395
+ len(counts) - len(counts) // 2
396
+ )
397
+
398
+ if second_half_avg > first_half_avg * 1.1:
399
+ trend = "increasing"
400
+ elif second_half_avg < first_half_avg * 0.9:
401
+ trend = "decreasing"
402
+ else:
403
+ trend = "stable"
404
+
405
+ return {
406
+ "trend": trend,
407
+ "first_half_avg": first_half_avg,
408
+ "second_half_avg": second_half_avg,
409
+ "change_percent": ((second_half_avg - first_half_avg) / first_half_avg)
410
+ * 100,
411
+ }
412
+
413
+ def _create_interactive_dashboard(self, output_dir: Path, days: int):
414
+ """Create interactive Plotly dashboard."""
415
+ # Get data
416
+ media_report = self.generate_media_report(days)
417
+ processing_report = self.generate_processing_report(days)
418
+
419
+ # Lazy import plotly dependencies
420
+ import plotly.graph_objects as go
421
+ from plotly.offline import plot
422
+ from plotly.subplots import make_subplots
423
+
424
+ fig = make_subplots(
425
+ rows=2,
426
+ cols=2,
427
+ subplot_titles=(
428
+ "Files by Type",
429
+ "Processing Jobs Status",
430
+ "File Sizes Over Time",
431
+ "Job Types Distribution",
432
+ ),
433
+ specs=[
434
+ [{"type": "pie"}, {"type": "pie"}],
435
+ [{"type": "bar"}, {"type": "bar"}],
436
+ ],
437
+ )
438
+
439
+ # Files by type pie chart
440
+ if media_report["files_by_type"]:
441
+ fig.add_trace(
442
+ go.Pie(
443
+ labels=list(media_report["files_by_type"].keys()),
444
+ values=list(media_report["files_by_type"].values()),
445
+ name="Files by Type",
446
+ ),
447
+ row=1,
448
+ col=1,
449
+ )
450
+
451
+ # Processing jobs status pie chart
452
+ if processing_report["jobs_by_status"]:
453
+ fig.add_trace(
454
+ go.Pie(
455
+ labels=list(processing_report["jobs_by_status"].keys()),
456
+ values=list(processing_report["jobs_by_status"].values()),
457
+ name="Job Status",
458
+ ),
459
+ row=1,
460
+ col=2,
461
+ )
462
+
463
+ # File sizes over time (simplified)
464
+ if media_report["recent_files"]:
465
+ file_sizes = [f["size"] for f in media_report["recent_files"]]
466
+ file_names = [
467
+ f["name"][:20] + "..." if len(f["name"]) > 20 else f["name"]
468
+ for f in media_report["recent_files"]
469
+ ]
470
+
471
+ fig.add_trace(
472
+ go.Bar(x=file_names, y=file_sizes, name="File Sizes"), row=2, col=1
473
+ )
474
+
475
+ # Job types distribution
476
+ if processing_report["jobs_by_type"]:
477
+ fig.add_trace(
478
+ go.Bar(
479
+ x=list(processing_report["jobs_by_type"].keys()),
480
+ y=list(processing_report["jobs_by_type"].values()),
481
+ name="Job Types",
482
+ ),
483
+ row=2,
484
+ col=2,
485
+ )
486
+
487
+ fig.update_layout(
488
+ title_text=f"Spatelier Analytics Dashboard (Last {days} days)",
489
+ showlegend=True,
490
+ height=800,
491
+ )
492
+
493
+ # Save interactive HTML
494
+ dashboard_path = output_dir / f"analytics_dashboard_{days}d.html"
495
+ plot(fig, filename=str(dashboard_path), auto_open=False)
496
+
497
+ return dashboard_path
cli/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """Command-line interface modules."""
cli/app.py ADDED
@@ -0,0 +1,147 @@
1
+ """
2
+ Main CLI application entry point.
3
+
4
+ This module provides the main Typer application that orchestrates all CLI commands.
5
+ """
6
+
7
+ import sys
8
+
9
+ import typer
10
+ from rich.console import Console
11
+ from rich.panel import Panel
12
+ from rich.text import Text
13
+
14
+ from core.config import Config
15
+ from core.interactive_cli import run_interactive_cli
16
+ from core.logger import get_logger
17
+ from core.package_updater import PackageUpdater
18
+
19
+ from . import audio, cli_analytics, cli_utils, files, update, video, worker
20
+
21
+ # Create the main Typer app
22
+ app = typer.Typer(
23
+ name="spatelier",
24
+ help="Personal tool library for video and music file handling",
25
+ add_completion=False,
26
+ rich_markup_mode="rich",
27
+ )
28
+
29
+ # Add subcommands
30
+ app.add_typer(video.app, name="video", help="Video processing commands")
31
+ app.add_typer(audio.app, name="audio", help="Audio processing commands")
32
+ app.add_typer(cli_utils.app, name="utils", help="Utility commands")
33
+ app.add_typer(
34
+ cli_analytics.app, name="analytics", help="Analytics and reporting commands"
35
+ )
36
+ app.add_typer(worker.app, name="worker", help="Background job worker management")
37
+ app.add_typer(update.app, name="update", help="Package update management")
38
+ app.add_typer(files.app, name="files", help="File tracking and management")
39
+
40
+
41
+ # Add interactive mode command
42
+ @app.command()
43
+ def interactive(
44
+ verbose: bool = typer.Option(
45
+ False, "--verbose", "-v", help="Enable verbose logging"
46
+ )
47
+ ):
48
+ """
49
+ 🎮 Launch interactive mode with guided workflows.
50
+
51
+ Interactive mode provides a user-friendly interface for common operations
52
+ like downloading videos, processing audio, and viewing analytics.
53
+ """
54
+ config = Config()
55
+ run_interactive_cli(config, verbose)
56
+
57
+
58
+ # Global options
59
+ def version_callback(value: bool):
60
+ """Show version information."""
61
+ if value:
62
+ version = None
63
+
64
+ # Strategy 1: Try getting version from installed package metadata (standard way)
65
+ try:
66
+ from importlib.metadata import version
67
+
68
+ version = version("spatelier")
69
+ except Exception:
70
+ pass
71
+
72
+ # Strategy 2: Try importing from root __init__.py (when running from source)
73
+ if version is None:
74
+ try:
75
+ import importlib.util
76
+ from pathlib import Path
77
+
78
+ root_init = Path(__file__).parent.parent / "__init__.py"
79
+ if root_init.exists():
80
+ spec = importlib.util.spec_from_file_location(
81
+ "spatelier_init", root_init
82
+ )
83
+ if spec and spec.loader:
84
+ module = importlib.util.module_from_spec(spec)
85
+ spec.loader.exec_module(module)
86
+ version = module.__version__
87
+ except Exception:
88
+ pass
89
+
90
+ # Strategy 3: Fallback to pyproject.toml version
91
+ if version is None:
92
+ try:
93
+ import tomllib
94
+ from pathlib import Path
95
+
96
+ pyproject_path = Path(__file__).parent.parent / "pyproject.toml"
97
+ with open(pyproject_path, "rb") as f:
98
+ pyproject = tomllib.load(f)
99
+ version = pyproject["project"]["version"]
100
+ except Exception:
101
+ version = "unknown"
102
+
103
+ console = Console()
104
+ console.print(f"Spatelier version {version}")
105
+ raise typer.Exit()
106
+
107
+
108
+ @app.callback()
109
+ def main(
110
+ version: bool = typer.Option(
111
+ False,
112
+ "--version",
113
+ "-v",
114
+ callback=version_callback,
115
+ is_eager=True,
116
+ help="Show version information",
117
+ ),
118
+ verbose: bool = typer.Option(False, "--verbose", help="Enable verbose logging"),
119
+ config_file: str = typer.Option(
120
+ None, "--config", "-c", help="Path to configuration file"
121
+ ),
122
+ ):
123
+ """
124
+ Spatelier - Personal tool library for video and music file handling.
125
+
126
+ A modular, extensible tool library built with modern Python architecture.
127
+ """
128
+ # Initialize configuration
129
+ config = Config(config_file=config_file, verbose=verbose)
130
+
131
+ # Initialize logger
132
+ logger = get_logger(verbose=verbose)
133
+ logger.info("Spatelier CLI started")
134
+
135
+ # Start automatic background updates (opt-in via auto_update=True)
136
+ # Note: Auto-updates are disabled by default - use explicit update commands
137
+ # To enable: PackageUpdater(config, verbose=verbose, auto_update=True).start_background_update()
138
+
139
+
140
+ # Entry point function for setuptools
141
+ def main_entry():
142
+ """Entry point for setuptools console script."""
143
+ app()
144
+
145
+
146
+ if __name__ == "__main__":
147
+ main_entry()