mxx-tool 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,128 @@
1
+
2
+
3
+ from time import sleep
4
+ from mxx.runner.core.callstack import MxxCallstack, PluginCallstackMeta
5
+ from mxx.runner.core.plugin import MxxPlugin
6
+ from mxx.runner.core.registry import MAPPINGS
7
+
8
+
9
+ class MxxRunner:
10
+ def _exportCfgs(self, cfg : dict):
11
+ """
12
+ Export and separate plugin configs from global configs.
13
+
14
+ Args:
15
+ cfg: Configuration dictionary
16
+
17
+ Returns:
18
+ Tuple of (plugin_configs, global_configs)
19
+ - plugin_configs: Dict of plugin_name -> plugin_config
20
+ - global_configs: Dict of global configuration values
21
+ """
22
+ pcfg = {}
23
+ gcfg = {}
24
+
25
+ # Separate plugin-specific configs from global configs
26
+ for k, v in cfg.items():
27
+ if k in MAPPINGS:
28
+ # This is a plugin configuration
29
+ if isinstance(v, dict):
30
+ pcfg[k] = v
31
+ else:
32
+ gcfg[k] = v
33
+ else:
34
+ # Could be a global config or unknown
35
+ gcfg[k] = v
36
+
37
+ return pcfg, gcfg
38
+
39
+ def run(self, cfg : dict):
40
+ """
41
+ Run the MxxRunner with given configuration.
42
+
43
+ Args:
44
+ cfg: Configuration dictionary where keys are plugin names
45
+ and values are plugin-specific config dicts.
46
+ Example: {"lifetime": {"lifetime": 3600}, "os": {"cmd": "echo"}}
47
+ """
48
+ pcfg, gcfg = self._exportCfgs(cfg)
49
+ self.gcfg = gcfg
50
+ plugins = {}
51
+
52
+ for plugin_name, plugin_cfg in pcfg.items():
53
+ if plugin_name not in MAPPINGS:
54
+ raise Exception(f"Plugin '{plugin_name}' is not registered")
55
+
56
+ plugin_cls = MAPPINGS[plugin_name]
57
+ plugin_instance = plugin_cls(**plugin_cfg)
58
+ plugins[plugin_name] = plugin_instance
59
+
60
+ self.plugins = plugins
61
+
62
+ self.run_events(plugins)
63
+
64
+ def run_events(self, plugins : dict[str, MxxPlugin]):
65
+ callstack = MxxCallstack()
66
+ for plugin in plugins.values():
67
+ plugin_callstack = PluginCallstackMeta._callstackMap[plugin.__cmdname__]
68
+ callstack.merge(plugin_callstack)
69
+
70
+ try:
71
+ # Check initial conditions
72
+ # all_cond: all must return True (empty list = pass)
73
+ # any_cond: at least one must return True (empty list = pass)
74
+ all_cond_passed = all(self._run_action(cond) for cond in callstack.all_cond) if callstack.all_cond else True
75
+ any_cond_passed = any(self._run_action(cond) for cond in callstack.any_cond) if callstack.any_cond else True
76
+
77
+ if not all_cond_passed or not any_cond_passed:
78
+ return
79
+
80
+ # Execute pre-actions
81
+ for pre_action in callstack.pre_action:
82
+ self._run_action(pre_action)
83
+
84
+ # Execute main actions
85
+ for action in callstack.action:
86
+ self._run_action(action)
87
+
88
+ # Now run on_true/on_false loop
89
+ # on_true: return True to continue, False to stop
90
+ # on_false: return True to stop, False to continue
91
+ # Loop continues while: all on_true return True AND all on_false return False
92
+ # If no on_true/on_false hooks exist, skip the loop
93
+ if callstack.on_true or callstack.on_false:
94
+ while True:
95
+ should_continue = True
96
+
97
+ # Check on_true hooks - if any return False, stop
98
+ if callstack.on_true:
99
+ if not all(self._run_action(ontrue) for ontrue in callstack.on_true):
100
+ should_continue = False
101
+
102
+ # Check on_false hooks - if any return True, stop
103
+ if callstack.on_false:
104
+ if any(self._run_action(onfalse) for onfalse in callstack.on_false):
105
+ should_continue = False
106
+
107
+ if not should_continue:
108
+ break
109
+
110
+ sleep(0.5)
111
+
112
+ # Execute post-actions (cleanup)
113
+ for post_action in callstack.post_action:
114
+ self._run_action(post_action)
115
+
116
+ except Exception as e:
117
+ for onerror in callstack.on_error:
118
+ self.currentError = e
119
+ self._run_action(onerror)
120
+
121
+ def _run_action(self, func):
122
+ # check wehther func takes an argument, if yes, pass self
123
+ import inspect
124
+ sig = inspect.signature(func)
125
+ if len(sig.parameters) > 0:
126
+ return func(self)
127
+ return func()
128
+
mxx/server/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ """
2
+ MXX Server - Flask-based scheduler service for MXX runner.
3
+
4
+ Provides HTTP API for scheduling and managing MXX plugin jobs.
5
+ """
6
+
7
+ __version__ = "0.1.0"
@@ -0,0 +1,114 @@
1
+ """
2
+ Flask-integrated runner - thin wrapper around SchedulerService.
3
+
4
+ This module provides Flask integration for the MXX scheduler service.
5
+ It auto-loads configs from ~/.mxx/jobs/ and schedules them based on their
6
+ schedule section.
7
+ """
8
+
9
+ from flask import Flask
10
+ from mxx.server.scheduler import SchedulerService
11
+ from mxx.server.registry import JobRegistry
12
+ from mxx.runner.core.config_loader import load_config
13
+ from mxx.server.schedule import extract_schedule
14
+ from pathlib import Path
15
+ import logging
16
+
17
+
18
+ class FlaskMxxRunner:
19
+ """
20
+ Flask-integrated runner with APScheduler support.
21
+
22
+ This wrapper:
23
+ - Provides Flask app integration
24
+ - Manages scheduler lifecycle
25
+ - Auto-loads configs from ~/.mxx/jobs/ on startup
26
+ - Schedules configs that have a schedule section
27
+ - Exposes scheduler service for routes (dynamic scheduling)
28
+ """
29
+
30
+ def __init__(self, app: Flask, max_workers: int = 10, jobs_dir: Path = None):
31
+ """
32
+ Initialize Flask runner with scheduler service.
33
+
34
+ Args:
35
+ app: Flask application instance
36
+ max_workers: Maximum number of concurrent job workers
37
+ jobs_dir: Directory to load job configs from (default: ~/.mxx/jobs)
38
+ """
39
+ self.app = app
40
+ self.jobs_dir = jobs_dir or Path.home() / ".mxx" / "jobs"
41
+ self.jobs_dir.mkdir(parents=True, exist_ok=True)
42
+
43
+ self.registry = JobRegistry()
44
+ self.scheduler_service = SchedulerService(max_workers=max_workers, registry=self.registry)
45
+
46
+ # Store reference in app context for routes
47
+ app.config['SCHEDULER_SERVICE'] = self.scheduler_service
48
+
49
+ def load_configs_from_directory(self):
50
+ """
51
+ Load all config files from ~/.mxx/jobs/ and register them.
52
+
53
+ This method:
54
+ 1. Finds all config files (.toml, .yaml, .json) excluding templates
55
+ 2. Parses each config
56
+ 3. Checks if config has a 'schedule' section
57
+ 4. If schedule exists: Scheduled automatically
58
+ 5. If no schedule: Registered as on-demand job (trigger via API)
59
+ """
60
+ # Find all config files (exclude templates)
61
+ config_files = []
62
+ for pattern in ["*.toml", "*.yaml", "*.yml", "*.json"]:
63
+ config_files.extend([
64
+ f for f in self.jobs_dir.glob(pattern)
65
+ if not f.name.endswith(".template.toml") and
66
+ not f.name.endswith(".template.yaml") and
67
+ not f.name.endswith(".template.json")
68
+ ])
69
+
70
+ logging.info(f"Loading {len(config_files)} config files from {self.jobs_dir}")
71
+
72
+ for config_file in config_files:
73
+ try:
74
+ # Load config using config_loader
75
+ config = load_config(config_file)
76
+
77
+ # Extract schedule
78
+ schedule_config = extract_schedule(config)
79
+
80
+ # Use filename (without extension) as job_id
81
+ job_id = config_file.stem
82
+
83
+ # Register the job (with or without schedule)
84
+ result = self.scheduler_service.schedule_job(
85
+ job_id=job_id,
86
+ config=config,
87
+ schedule_config=schedule_config,
88
+ replace_existing=True # Allow server restarts to replace jobs
89
+ )
90
+
91
+ # Update registry source to indicate config file origin
92
+ if self.registry.exists(job_id):
93
+ entry = self.registry.get(job_id)
94
+ entry.source = f"config:{config_file.name}"
95
+ self.registry._save()
96
+
97
+ if schedule_config:
98
+ logging.info(f"Scheduled job '{job_id}' from {config_file.name}: {result}")
99
+ else:
100
+ logging.info(f"Registered on-demand job '{job_id}' from {config_file.name}: {result}")
101
+
102
+ except Exception as e:
103
+ logging.error(f"Failed to load config {config_file.name}: {e}", exc_info=True)
104
+
105
+ def start(self):
106
+ """Start the scheduler and load configs from directory"""
107
+ self.load_configs_from_directory()
108
+ self.scheduler_service.start()
109
+ logging.info("Flask runner started")
110
+
111
+ def stop(self):
112
+ """Stop the scheduler and wait for jobs to complete"""
113
+ self.scheduler_service.stop()
114
+ logging.info("Flask runner stopped")
mxx/server/registry.py ADDED
@@ -0,0 +1,229 @@
1
+ """
2
+ Job registry for persistent storage of registered jobs.
3
+
4
+ Provides:
5
+ - Persistent storage of job configurations in ~/.mxx/server/registry.json
6
+ - Separation between scheduled jobs and on-demand jobs
7
+ - Job execution history tracking
8
+ - Registry operations (add, remove, list, trigger)
9
+ """
10
+
11
+ from pathlib import Path
12
+ from typing import Dict, List, Optional
13
+ from mxx.server.schedule import ScheduleConfig
14
+ import json
15
+ import logging
16
+ from datetime import datetime
17
+ import threading
18
+
19
+
20
+ class JobRegistryEntry:
21
+ """Entry in the job registry"""
22
+ def __init__(
23
+ self,
24
+ job_id: str,
25
+ config: dict,
26
+ schedule: Optional[ScheduleConfig] = None,
27
+ source: str = "api"
28
+ ):
29
+ self.job_id = job_id
30
+ self.config = config
31
+ self.schedule = schedule
32
+ self.source = source # "api", "config:filename.toml", etc.
33
+ self.registered_at = datetime.now()
34
+ self.last_triggered: Optional[datetime] = None
35
+ self.execution_count = 0
36
+
37
+ def to_dict(self) -> dict:
38
+ """Convert entry to dictionary for serialization"""
39
+ return {
40
+ "job_id": self.job_id,
41
+ "config": self.config,
42
+ "schedule": {
43
+ "trigger": self.schedule.trigger,
44
+ "hour": self.schedule.hour,
45
+ "minute": self.schedule.minute,
46
+ "second": self.schedule.second,
47
+ "day_of_week": self.schedule.day_of_week,
48
+ "day": self.schedule.day,
49
+ "interval_seconds": self.schedule.interval_seconds
50
+ } if self.schedule else None,
51
+ "source": self.source,
52
+ "registered_at": self.registered_at.isoformat(),
53
+ "last_triggered": self.last_triggered.isoformat() if self.last_triggered else None,
54
+ "execution_count": self.execution_count
55
+ }
56
+
57
+ @staticmethod
58
+ def from_dict(data: dict) -> 'JobRegistryEntry':
59
+ """Create entry from dictionary"""
60
+ entry = JobRegistryEntry(
61
+ job_id=data["job_id"],
62
+ config=data["config"],
63
+ schedule=ScheduleConfig(**data["schedule"]) if data.get("schedule") else None,
64
+ source=data.get("source", "api")
65
+ )
66
+ entry.registered_at = datetime.fromisoformat(data["registered_at"])
67
+ if data.get("last_triggered"):
68
+ entry.last_triggered = datetime.fromisoformat(data["last_triggered"])
69
+ entry.execution_count = data.get("execution_count", 0)
70
+ return entry
71
+
72
+
73
+ class JobRegistry:
74
+ """
75
+ Persistent registry for job configurations.
76
+
77
+ Jobs are stored in two categories:
78
+ - Scheduled: Jobs with schedule config (auto-executed by scheduler)
79
+ - On-demand: Jobs without schedule (executed via trigger API)
80
+
81
+ Registry is persisted to ~/.mxx/server/registry.json
82
+ """
83
+
84
+ def __init__(self, registry_path: Optional[Path] = None):
85
+ """
86
+ Initialize job registry.
87
+
88
+ Args:
89
+ registry_path: Path to registry file (default: ~/.mxx/server/registry.json)
90
+ """
91
+ if registry_path is None:
92
+ registry_path = Path.home() / ".mxx" / "server" / "registry.json"
93
+
94
+ self.registry_path = registry_path
95
+ self.registry_path.parent.mkdir(parents=True, exist_ok=True)
96
+
97
+ self._entries: Dict[str, JobRegistryEntry] = {}
98
+ self._lock = threading.Lock()
99
+
100
+ # Load existing registry
101
+ self._load()
102
+
103
+ def _load(self):
104
+ """Load registry from disk"""
105
+ if not self.registry_path.exists():
106
+ logging.info(f"No existing registry found at {self.registry_path}")
107
+ return
108
+
109
+ try:
110
+ with open(self.registry_path, "r") as f:
111
+ data = json.load(f)
112
+
113
+ for entry_data in data.get("entries", []):
114
+ entry = JobRegistryEntry.from_dict(entry_data)
115
+ self._entries[entry.job_id] = entry
116
+
117
+ logging.info(f"Loaded {len(self._entries)} jobs from registry")
118
+ except Exception as e:
119
+ logging.error(f"Failed to load registry: {e}", exc_info=True)
120
+
121
+ def _save(self):
122
+ """Save registry to disk"""
123
+ try:
124
+ data = {
125
+ "version": "1.0",
126
+ "entries": [entry.to_dict() for entry in self._entries.values()]
127
+ }
128
+
129
+ with open(self.registry_path, "w") as f:
130
+ json.dump(data, f, indent=2)
131
+
132
+ logging.debug(f"Saved registry with {len(self._entries)} entries")
133
+ except Exception as e:
134
+ logging.error(f"Failed to save registry: {e}", exc_info=True)
135
+
136
+ def register(
137
+ self,
138
+ job_id: str,
139
+ config: dict,
140
+ schedule: Optional[ScheduleConfig] = None,
141
+ source: str = "api",
142
+ replace_existing: bool = False
143
+ ) -> JobRegistryEntry:
144
+ """
145
+ Register a job in the registry.
146
+
147
+ Args:
148
+ job_id: Unique job identifier
149
+ config: Job configuration dict
150
+ schedule: Optional schedule configuration
151
+ source: Source of the job ("api", "config:filename.toml", etc.)
152
+ replace_existing: Whether to replace existing entry
153
+
154
+ Returns:
155
+ JobRegistryEntry object
156
+
157
+ Raises:
158
+ ValueError: If job_id already exists and replace_existing is False
159
+ """
160
+ with self._lock:
161
+ if job_id in self._entries and not replace_existing:
162
+ raise ValueError(f"Job '{job_id}' already registered. Use replace_existing=True to replace.")
163
+
164
+ entry = JobRegistryEntry(
165
+ job_id=job_id,
166
+ config=config,
167
+ schedule=schedule,
168
+ source=source
169
+ )
170
+
171
+ self._entries[job_id] = entry
172
+ self._save()
173
+
174
+ logging.info(f"Registered job '{job_id}' ({source})")
175
+ return entry
176
+
177
+ def unregister(self, job_id: str) -> bool:
178
+ """
179
+ Remove a job from registry.
180
+
181
+ Args:
182
+ job_id: Job identifier
183
+
184
+ Returns:
185
+ True if job was removed, False if not found
186
+ """
187
+ with self._lock:
188
+ if job_id not in self._entries:
189
+ return False
190
+
191
+ del self._entries[job_id]
192
+ self._save()
193
+
194
+ logging.info(f"Unregistered job '{job_id}'")
195
+ return True
196
+
197
+ def get(self, job_id: str) -> Optional[JobRegistryEntry]:
198
+ """Get a job entry by ID"""
199
+ return self._entries.get(job_id)
200
+
201
+ def list_all(self) -> List[JobRegistryEntry]:
202
+ """List all registered jobs"""
203
+ return list(self._entries.values())
204
+
205
+ def list_scheduled(self) -> List[JobRegistryEntry]:
206
+ """List jobs with schedules"""
207
+ return [e for e in self._entries.values() if e.schedule is not None]
208
+
209
+ def list_on_demand(self) -> List[JobRegistryEntry]:
210
+ """List jobs without schedules (on-demand only)"""
211
+ return [e for e in self._entries.values() if e.schedule is None]
212
+
213
+ def mark_triggered(self, job_id: str):
214
+ """
215
+ Mark a job as triggered (update last_triggered time and count).
216
+
217
+ Args:
218
+ job_id: Job identifier
219
+ """
220
+ with self._lock:
221
+ entry = self._entries.get(job_id)
222
+ if entry:
223
+ entry.last_triggered = datetime.now()
224
+ entry.execution_count += 1
225
+ self._save()
226
+
227
+ def exists(self, job_id: str) -> bool:
228
+ """Check if job exists in registry"""
229
+ return job_id in self._entries