mxx-tool 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,355 @@
1
+ """
2
+ Scheduler service for managing APScheduler jobs.
3
+
4
+ Provides decoupled job management with:
5
+ - Dynamic job scheduling
6
+ - Job status tracking
7
+ - Overlap detection
8
+ - Active job monitoring
9
+ """
10
+
11
+ from apscheduler.schedulers.background import BackgroundScheduler
12
+ from apscheduler.executors.pool import ThreadPoolExecutor
13
+ from mxx.runner.core.callstack import PluginCallstackMeta
14
+ from mxx.runner.core.runner import MxxRunner
15
+ from mxx.server.schedule import ScheduleConfig
16
+ from mxx.server.registry import JobRegistry
17
+ from datetime import datetime
18
+ from typing import Dict, List, Optional
19
+ import logging
20
+ import threading
21
+
22
+
23
+ class JobExecutionContext:
24
+ """Context for a job execution"""
25
+ def __init__(self, job_id: str, config: dict):
26
+ self.job_id = job_id
27
+ self.config = config
28
+ self.start_time: Optional[datetime] = None
29
+ self.end_time: Optional[datetime] = None
30
+ self.status: str = "pending" # pending, running, completed, failed
31
+ self.error: Optional[str] = None
32
+
33
+
34
+ class SchedulerService:
35
+ """
36
+ Decoupled scheduler service for managing plugin execution jobs.
37
+
38
+ Features:
39
+ - Dynamic job scheduling via API
40
+ - Job status tracking and monitoring
41
+ - Overlap detection for scheduled jobs
42
+ - Thread-safe job management
43
+ - Persistent job registry integration
44
+ """
45
+
46
+ def __init__(self, max_workers: int = 10, registry: Optional[JobRegistry] = None):
47
+ self.scheduler = BackgroundScheduler(
48
+ executors={'default': ThreadPoolExecutor(max_workers=max_workers)},
49
+ job_defaults={
50
+ 'coalesce': True, # Combine missed runs
51
+ 'max_instances': 1 # Only one instance per job
52
+ }
53
+ )
54
+ self.job_contexts: Dict[str, JobExecutionContext] = {}
55
+ self.registry = registry or JobRegistry()
56
+ self._lock = threading.Lock()
57
+ self._started = False
58
+
59
+ def start(self):
60
+ """Start the scheduler"""
61
+ if not self._started:
62
+ self.scheduler.start()
63
+ self._started = True
64
+ logging.info("Scheduler started")
65
+
66
+ def stop(self):
67
+ """Stop the scheduler and wait for jobs to complete"""
68
+ if self._started:
69
+ self.scheduler.shutdown(wait=True)
70
+ self._started = False
71
+ logging.info("Scheduler stopped")
72
+
73
+ def schedule_job(
74
+ self,
75
+ job_id: str,
76
+ config: dict,
77
+ schedule_config: Optional[ScheduleConfig] = None,
78
+ replace_existing: bool = False
79
+ ) -> Dict[str, any]:
80
+ """
81
+ Schedule a new job for plugin execution.
82
+
83
+ Args:
84
+ job_id: Unique identifier for the job
85
+ config: Configuration dict for MxxRunner
86
+ schedule_config: Optional schedule configuration (if None, registers as on-demand)
87
+ replace_existing: Whether to replace existing job with same ID
88
+
89
+ Returns:
90
+ Dict with job info and status
91
+
92
+ Raises:
93
+ ValueError: If job_id already exists and replace_existing is False
94
+ ValueError: If schedule overlaps with existing jobs
95
+ """
96
+ with self._lock:
97
+ # Check if job already exists
98
+ if job_id in self.job_contexts and not replace_existing:
99
+ raise ValueError(f"Job '{job_id}' already exists. Use replace_existing=True to replace.")
100
+
101
+ # Check for overlaps if scheduling
102
+ if schedule_config:
103
+ overlap_info = self._check_overlaps(job_id, schedule_config)
104
+ if overlap_info:
105
+ raise ValueError(f"Schedule overlaps with existing jobs: {overlap_info}")
106
+
107
+ # Create job context
108
+ context = JobExecutionContext(job_id, config)
109
+ self.job_contexts[job_id] = context
110
+
111
+ # Register job in registry
112
+ self.registry.register(
113
+ job_id=job_id,
114
+ config=config,
115
+ schedule=schedule_config,
116
+ source="api",
117
+ replace_existing=replace_existing
118
+ )
119
+
120
+ # Schedule or just register as on-demand
121
+ if schedule_config:
122
+ # Job has schedule - schedule it with APScheduler
123
+ schedule_dict = schedule_config.to_apscheduler_config()
124
+ self.scheduler.add_job(
125
+ func=self._execute_job,
126
+ args=[job_id],
127
+ **schedule_dict,
128
+ id=job_id,
129
+ name=f"Job: {job_id}",
130
+ replace_existing=replace_existing
131
+ )
132
+ logging.info(f"Scheduled job '{job_id}' with schedule: {schedule_dict}")
133
+ return {
134
+ "job_id": job_id,
135
+ "status": "scheduled",
136
+ "schedule": schedule_dict,
137
+ "next_run": self.scheduler.get_job(job_id).next_run_time.isoformat() if self.scheduler.get_job(job_id) else None
138
+ }
139
+ else:
140
+ # No schedule - register as on-demand job
141
+ logging.info(f"Registered on-demand job '{job_id}' (trigger via API)")
142
+ return {
143
+ "job_id": job_id,
144
+ "status": "registered",
145
+ "on_demand": True,
146
+ "hint": f"Trigger execution via POST /api/scheduler/jobs/{job_id}/trigger"
147
+ }
148
+
149
+ def _check_overlaps(self, new_job_id: str, schedule_config: ScheduleConfig) -> Optional[str]:
150
+ """
151
+ Check if the new schedule overlaps with existing jobs.
152
+
153
+ Returns:
154
+ String describing overlap if found, None otherwise
155
+ """
156
+ # Get all scheduled jobs (excluding the new one if replacing)
157
+ scheduled_jobs = [
158
+ job for job in self.scheduler.get_jobs()
159
+ if job.id != new_job_id
160
+ ]
161
+
162
+ if not scheduled_jobs:
163
+ return None
164
+
165
+ # For interval-based schedules, check if any job would run simultaneously
166
+ if schedule_config.trigger == "interval":
167
+ # Check if any other interval jobs might overlap
168
+ for job in scheduled_jobs:
169
+ if hasattr(job.trigger, 'interval'):
170
+ # Calculate potential overlap window
171
+ # If two interval jobs exist, they might overlap
172
+ return f"Interval-based job '{job.id}' may overlap"
173
+
174
+ # For cron-based schedules, check for exact time matches
175
+ elif schedule_config.trigger == "cron":
176
+ for job in scheduled_jobs:
177
+ if hasattr(job.trigger, 'fields'):
178
+ # Check if cron expressions match
179
+ # This is a simplified check - exact matching
180
+ trigger = job.trigger
181
+ if (hasattr(trigger, 'hour') and trigger.hour and
182
+ str(schedule_config.hour) in str(trigger.hour) and
183
+ hasattr(trigger, 'minute') and trigger.minute and
184
+ str(schedule_config.minute) in str(trigger.minute)):
185
+ return f"Cron job '{job.id}' scheduled at same time"
186
+
187
+ return None
188
+
189
+ def _execute_job(self, job_id: str):
190
+ """
191
+ Execute a job by running all its plugins through MxxRunner.
192
+ """
193
+ context = self.job_contexts.get(job_id)
194
+ if not context:
195
+ logging.error(f"Job context not found for job '{job_id}'")
196
+ return
197
+
198
+ context.status = "running"
199
+ context.start_time = datetime.now()
200
+ logging.info(f"Starting execution of job '{job_id}'")
201
+
202
+ try:
203
+ # Clear callstack for this job execution
204
+ PluginCallstackMeta._callstackMap.clear()
205
+
206
+ # Create runner and execute
207
+ runner = MxxRunner()
208
+ runner.run(context.config)
209
+
210
+ context.status = "completed"
211
+ logging.info(f"Job '{job_id}' completed successfully")
212
+
213
+ except Exception as e:
214
+ context.status = "failed"
215
+ context.error = str(e)
216
+ logging.error(f"Job '{job_id}' failed: {e}", exc_info=True)
217
+
218
+ finally:
219
+ context.end_time = datetime.now()
220
+
221
+ def get_job_status(self, job_id: str) -> Optional[Dict]:
222
+ """Get status information for a specific job"""
223
+ context = self.job_contexts.get(job_id)
224
+ if not context:
225
+ return None
226
+
227
+ job = self.scheduler.get_job(job_id)
228
+
229
+ result = {
230
+ "job_id": job_id,
231
+ "status": context.status,
232
+ "start_time": context.start_time.isoformat() if context.start_time else None,
233
+ "end_time": context.end_time.isoformat() if context.end_time else None,
234
+ "error": context.error
235
+ }
236
+
237
+ if job:
238
+ result["next_run_time"] = job.next_run_time.isoformat() if job.next_run_time else None
239
+ result["scheduled"] = True
240
+ else:
241
+ result["scheduled"] = False
242
+
243
+ return result
244
+
245
+ def list_jobs(self) -> List[Dict]:
246
+ """List all jobs with their status"""
247
+ jobs = []
248
+ for job_id in self.job_contexts.keys():
249
+ status = self.get_job_status(job_id)
250
+ if status:
251
+ jobs.append(status)
252
+ return jobs
253
+
254
+ def list_active_jobs(self) -> List[Dict]:
255
+ """List currently running jobs"""
256
+ return [
257
+ self.get_job_status(job_id)
258
+ for job_id, context in self.job_contexts.items()
259
+ if context.status == "running"
260
+ ]
261
+
262
+ def cancel_job(self, job_id: str) -> bool:
263
+ """
264
+ Cancel a scheduled job (cannot stop already running jobs).
265
+
266
+ Returns:
267
+ True if job was cancelled, False if not found or already running
268
+ """
269
+ context = self.job_contexts.get(job_id)
270
+ if not context:
271
+ return False
272
+
273
+ if context.status == "running":
274
+ logging.warning(f"Cannot cancel running job '{job_id}'")
275
+ return False
276
+
277
+ job = self.scheduler.get_job(job_id)
278
+ if job:
279
+ job.remove()
280
+ logging.info(f"Cancelled job '{job_id}'")
281
+
282
+ # Clean up context
283
+ with self._lock:
284
+ del self.job_contexts[job_id]
285
+
286
+ return True
287
+
288
+ def remove_job(self, job_id: str) -> bool:
289
+ """
290
+ Remove a job from tracking (for completed/failed jobs).
291
+
292
+ Returns:
293
+ True if job was removed, False if not found or still scheduled
294
+ """
295
+ context = self.job_contexts.get(job_id)
296
+ if not context:
297
+ return False
298
+
299
+ if context.status in ["pending", "running"]:
300
+ logging.warning(f"Cannot remove active job '{job_id}'")
301
+ return False
302
+
303
+ with self._lock:
304
+ del self.job_contexts[job_id]
305
+
306
+ logging.info(f"Removed job '{job_id}' from tracking")
307
+ return True
308
+
309
+ def trigger_job(self, job_id: str) -> Dict[str, any]:
310
+ """
311
+ Trigger an on-demand job to run immediately.
312
+
313
+ This creates a one-time execution of a registered job.
314
+
315
+ Args:
316
+ job_id: Job identifier from registry
317
+
318
+ Returns:
319
+ Dict with execution info
320
+
321
+ Raises:
322
+ ValueError: If job not found in registry
323
+ """
324
+ # Get job from registry
325
+ entry = self.registry.get(job_id)
326
+ if not entry:
327
+ raise ValueError(f"Job '{job_id}' not found in registry")
328
+
329
+ # Create unique execution ID
330
+ execution_id = f"{job_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
331
+
332
+ with self._lock:
333
+ # Create job context
334
+ context = JobExecutionContext(execution_id, entry.config)
335
+ self.job_contexts[execution_id] = context
336
+
337
+ # Schedule immediate execution
338
+ self.scheduler.add_job(
339
+ func=self._execute_job,
340
+ args=[execution_id],
341
+ id=execution_id,
342
+ name=f"Trigger: {job_id}"
343
+ )
344
+
345
+ # Mark in registry
346
+ self.registry.mark_triggered(job_id)
347
+
348
+ logging.info(f"Triggered on-demand job '{job_id}' as execution '{execution_id}'")
349
+
350
+ return {
351
+ "job_id": job_id,
352
+ "execution_id": execution_id,
353
+ "status": "triggered",
354
+ "message": f"Job '{job_id}' triggered for immediate execution"
355
+ }
mxx/server/server.py ADDED
@@ -0,0 +1,188 @@
1
+ """
2
+ MXX Scheduler Server
3
+
4
+ Flask-based HTTP server for managing scheduled and on-demand jobs.
5
+
6
+ Usage:
7
+ mxx-server [--host HOST] [--port PORT] [--jobs-dir PATH]
8
+
9
+ Environment Variables:
10
+ MXX_JOBS_DIR: Directory containing job configurations (default: ~/.mxx/jobs)
11
+ MXX_SERVER_HOST: Host to bind to (default: 127.0.0.1)
12
+ MXX_SERVER_PORT: Port to bind to (default: 5000)
13
+ """
14
+
15
+ from flask import Flask
16
+ import logging
17
+ import os
18
+ from pathlib import Path
19
+ import argparse
20
+
21
+ from mxx.server.flask_runner import FlaskMxxRunner
22
+ from mxx.server.routes import scheduler_bp
23
+
24
+ # Configure logging
25
+ logging.basicConfig(
26
+ level=logging.INFO,
27
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
28
+ )
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ def get_jobs_directory() -> Path:
33
+ """Get the jobs directory from environment or default"""
34
+ jobs_dir = os.environ.get('MXX_JOBS_DIR')
35
+
36
+ if jobs_dir:
37
+ return Path(jobs_dir).expanduser()
38
+
39
+ # Default to ~/.mxx/jobs
40
+ return Path.home() / '.mxx' / 'jobs'
41
+
42
+
43
+ def create_app(jobs_dir: Path = None) -> Flask:
44
+ """
45
+ Create and configure Flask application.
46
+
47
+ Args:
48
+ jobs_dir: Directory containing job configuration files.
49
+ If None, uses get_jobs_directory()
50
+
51
+ Returns:
52
+ Configured Flask app
53
+ """
54
+ app = Flask(__name__)
55
+
56
+ # Use provided directory or get default
57
+ if jobs_dir is None:
58
+ jobs_dir = get_jobs_directory()
59
+
60
+ # Ensure jobs directory exists
61
+ jobs_dir.mkdir(parents=True, exist_ok=True)
62
+ logger.info(f"Jobs directory: {jobs_dir}")
63
+
64
+ # Create FlaskMxxRunner instance (pass app as first argument)
65
+ flask_runner = FlaskMxxRunner(app=app, jobs_dir=jobs_dir)
66
+
67
+ # Store in app config
68
+ app.config['SCHEDULER_SERVICE'] = flask_runner.scheduler_service
69
+ app.config['FLASK_RUNNER'] = flask_runner
70
+ app.config['JOBS_DIR'] = jobs_dir
71
+
72
+ # Register blueprints
73
+ app.register_blueprint(scheduler_bp)
74
+
75
+ # Add root endpoint
76
+ @app.route('/')
77
+ def index():
78
+ return {
79
+ "service": "mxx-scheduler",
80
+ "version": "1.0.0",
81
+ "jobs_dir": str(jobs_dir),
82
+ "endpoints": {
83
+ "health": "/api/scheduler/health",
84
+ "jobs": "/api/scheduler/jobs",
85
+ "registry": "/api/scheduler/registry"
86
+ }
87
+ }
88
+
89
+ # Add shutdown handler
90
+ @app.teardown_appcontext
91
+ def shutdown_scheduler(exception=None):
92
+ """Stop scheduler on app shutdown"""
93
+ if 'FLASK_RUNNER' in app.config:
94
+ try:
95
+ app.config['FLASK_RUNNER'].stop()
96
+ except Exception as e:
97
+ logger.error(f"Error stopping scheduler: {e}")
98
+
99
+ return app
100
+
101
+
102
+ def main():
103
+ """
104
+ Main entry point for mxx-server command.
105
+ """
106
+ parser = argparse.ArgumentParser(
107
+ description='MXX Scheduler Server',
108
+ formatter_class=argparse.RawDescriptionHelpFormatter,
109
+ epilog="""
110
+ Environment Variables:
111
+ MXX_JOBS_DIR Directory containing job configurations (default: ~/.mxx/jobs)
112
+ MXX_SERVER_HOST Host to bind to (default: 127.0.0.1)
113
+ MXX_SERVER_PORT Port to bind to (default: 5000)
114
+
115
+ Examples:
116
+ # Start server with defaults
117
+ mxx-server
118
+
119
+ # Start on different port
120
+ mxx-server --port 8080
121
+
122
+ # Use custom jobs directory
123
+ mxx-server --jobs-dir /path/to/jobs
124
+
125
+ # Listen on all interfaces
126
+ mxx-server --host 0.0.0.0
127
+ """
128
+ )
129
+
130
+ parser.add_argument(
131
+ '--host',
132
+ default=os.environ.get('MXX_SERVER_HOST', '127.0.0.1'),
133
+ help='Host to bind to (default: 127.0.0.1)'
134
+ )
135
+
136
+ parser.add_argument(
137
+ '--port',
138
+ type=int,
139
+ default=int(os.environ.get('MXX_SERVER_PORT', 5000)),
140
+ help='Port to bind to (default: 5000)'
141
+ )
142
+
143
+ parser.add_argument(
144
+ '--jobs-dir',
145
+ type=Path,
146
+ default=get_jobs_directory(),
147
+ help='Directory containing job configurations (default: ~/.mxx/jobs)'
148
+ )
149
+
150
+ parser.add_argument(
151
+ '--debug',
152
+ action='store_true',
153
+ help='Run in debug mode'
154
+ )
155
+
156
+ args = parser.parse_args()
157
+
158
+ # Create app
159
+ logger.info("Starting MXX Scheduler Server...")
160
+ logger.info(f"Host: {args.host}")
161
+ logger.info(f"Port: {args.port}")
162
+ logger.info(f"Jobs directory: {args.jobs_dir}")
163
+
164
+ app = create_app(jobs_dir=args.jobs_dir)
165
+
166
+ # Start the FlaskMxxRunner to load configs and start scheduler
167
+ flask_runner = app.config['FLASK_RUNNER']
168
+ flask_runner.start()
169
+
170
+ logger.info("Server ready!")
171
+
172
+ # Run Flask app
173
+ try:
174
+ app.run(
175
+ host=args.host,
176
+ port=args.port,
177
+ debug=args.debug,
178
+ use_reloader=False # Disable reloader to avoid double scheduler start
179
+ )
180
+ except KeyboardInterrupt:
181
+ logger.info("Shutting down...")
182
+ finally:
183
+ flask_runner.stop()
184
+ logger.info("Server stopped")
185
+
186
+
187
+ if __name__ == '__main__':
188
+ main()
mxx/utils/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ """
2
+ MXX Utilities package.
3
+ """
4
+
5
+ from .nested import nested_get, nested_set, nested_remove
6
+
7
+ __all__ = ["nested_get", "nested_set", "nested_remove"]
mxx/utils/nested.py ADDED
@@ -0,0 +1,148 @@
1
+ """
2
+ Nested dictionary utilities for handling x/y/z key paths.
3
+
4
+ Provides functions to get, set, and remove values from nested dictionaries
5
+ using path-like keys with '/' separators.
6
+ """
7
+
8
+ from typing import Any, Dict
9
+
10
+
11
+ def nested_get(data: Dict[str, Any], key: str, default: Any = None) -> Any:
12
+ """
13
+ Get a nested value from dictionary using path-like key.
14
+
15
+ Args:
16
+ data: Dictionary to search in
17
+ key: Nested key in format "x/y/z"
18
+ default: Default value if key not found
19
+
20
+ Returns:
21
+ Value if found, default otherwise
22
+
23
+ Example:
24
+ >>> data = {"config": {"server": {"port": 8080}}}
25
+ >>> nested_get(data, "config/server/port")
26
+ 8080
27
+ >>> nested_get(data, "config/missing/key", "default")
28
+ 'default'
29
+ """
30
+ if not key:
31
+ return data
32
+
33
+ parts = key.split('/')
34
+ current = data
35
+
36
+ try:
37
+ for part in parts:
38
+ if isinstance(current, dict) and part in current:
39
+ current = current[part]
40
+ else:
41
+ return default
42
+ return current
43
+ except (KeyError, TypeError):
44
+ return default
45
+
46
+
47
+ def nested_set(data: Dict[str, Any], key: str, value: Any) -> None:
48
+ """
49
+ Set a nested value in dictionary using path-like key.
50
+ Creates intermediate dictionaries as needed.
51
+
52
+ Args:
53
+ data: Dictionary to modify
54
+ key: Nested key in format "x/y/z"
55
+ value: Value to set
56
+
57
+ Example:
58
+ >>> data = {}
59
+ >>> nested_set(data, "config/server/port", 8080)
60
+ >>> data
61
+ {'config': {'server': {'port': 8080}}}
62
+ """
63
+ if not key:
64
+ raise ValueError("Key cannot be empty")
65
+
66
+ parts = key.split('/')
67
+ current = data
68
+
69
+ # Navigate/create nested structure
70
+ for part in parts[:-1]:
71
+ if part not in current:
72
+ current[part] = {}
73
+ elif not isinstance(current[part], dict):
74
+ # Overwrite non-dict values with dict to continue nesting
75
+ current[part] = {}
76
+ current = current[part]
77
+
78
+ # Set the final value
79
+ current[parts[-1]] = value
80
+
81
+
82
+ def nested_remove(data: Dict[str, Any], key: str) -> bool:
83
+ """
84
+ Remove a nested key from dictionary using path-like key.
85
+
86
+ Args:
87
+ data: Dictionary to modify
88
+ key: Nested key in format "x/y/z"
89
+
90
+ Returns:
91
+ True if key was found and removed, False otherwise
92
+
93
+ Example:
94
+ >>> data = {"config": {"server": {"port": 8080, "host": "localhost"}}}
95
+ >>> nested_remove(data, "config/server/port")
96
+ True
97
+ >>> data
98
+ {'config': {'server': {'host': 'localhost'}}}
99
+ """
100
+ if not key:
101
+ return False
102
+
103
+ parts = key.split('/')
104
+ current = data
105
+
106
+ try:
107
+ # Navigate to parent of target key
108
+ for part in parts[:-1]:
109
+ if isinstance(current, dict) and part in current:
110
+ current = current[part]
111
+ else:
112
+ return False # Path doesn't exist
113
+
114
+ # Remove the final key if it exists
115
+ if isinstance(current, dict) and parts[-1] in current:
116
+ del current[parts[-1]]
117
+ return True
118
+ else:
119
+ return False
120
+ except (KeyError, TypeError):
121
+ return False
122
+
123
+
124
+ def nested_update(target: Dict[str, Any], source: Dict[str, Any]) -> None:
125
+ """
126
+ Deep update target dictionary with source dictionary.
127
+ Unlike dict.update(), this merges nested dictionaries recursively.
128
+
129
+ Args:
130
+ target: Dictionary to update (modified in place)
131
+ source: Dictionary with values to merge in
132
+
133
+ Example:
134
+ >>> target = {"config": {"server": {"port": 8080}}}
135
+ >>> source = {"config": {"server": {"host": "localhost"}, "client": {"timeout": 30}}}
136
+ >>> nested_update(target, source)
137
+ >>> target
138
+ {'config': {'server': {'port': 8080, 'host': 'localhost'}, 'client': {'timeout': 30}}}
139
+ """
140
+ for key, value in source.items():
141
+ if (key in target and
142
+ isinstance(target[key], dict) and
143
+ isinstance(value, dict)):
144
+ # Recursively merge nested dictionaries
145
+ nested_update(target[key], value)
146
+ else:
147
+ # Direct assignment for non-dict values or new keys
148
+ target[key] = value