kuhl-haus-mdp 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,236 @@
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ from typing import Union, Optional, List
5
+
6
+ import redis.asyncio as aioredis
7
+ from redis.exceptions import ConnectionError
8
+
9
+ from kuhl_haus.mdp.analyzers.analyzer import Analyzer
10
+ from kuhl_haus.mdp.models.market_data_analyzer_result import MarketDataAnalyzerResult
11
+
12
+
13
+ class MarketDataScanner:
14
+ mdc_connected: bool
15
+ processed: int
16
+ decoding_error: int
17
+ dropped: int
18
+ error: int
19
+ restarts: int
20
+
21
+ def __init__(self, redis_url: str, analyzer: Analyzer, subscriptions: List[str]):
22
+ self.redis_url = redis_url
23
+ self.analyzer = analyzer
24
+ self.logger = logging.getLogger(__name__)
25
+
26
+ # Connection objects
27
+ self.redis_client = None # : aioredis.Redis = None
28
+ self.pubsub_client: Optional[aioredis.client.PubSub] = None
29
+
30
+ # State
31
+ self.mdc_connected = False
32
+ self.running = False
33
+
34
+ self.subscriptions: List[str] = subscriptions
35
+ self._pubsub_task: Union[asyncio.Task, None] = None
36
+
37
+ # Metrics
38
+ self.restarts = 0
39
+ self.processed = 0
40
+ self.decoding_errors = 0
41
+ self.empty_results = 0
42
+ self.published_results = 0
43
+ self.errors = 0
44
+
45
+ async def start(self):
46
+ """Initialize Redis connections. Pub/sub task starts on first subscription."""
47
+ self.logger.info("mds.starting")
48
+ await self.connect()
49
+ self.pubsub_client = self.redis_client.pubsub()
50
+
51
+ scanner_cache = await self.get_cache(self.analyzer.cache_key)
52
+ self.logger.info(f"mds rehydrating from cache")
53
+ await self.analyzer.rehydrate(scanner_cache)
54
+ self.logger.info("mds rehydration complete")
55
+
56
+ for subscription in self.subscriptions:
57
+ if subscription.endswith("*"):
58
+ await self.pubsub_client.psubscribe(subscription)
59
+ else:
60
+ await self.pubsub_client.subscribe(subscription)
61
+ self._pubsub_task = asyncio.create_task(self._handle_pubsub())
62
+ self.logger.info("mds.started")
63
+
64
+ async def stop(self):
65
+ """Cleanup Redis connections."""
66
+ self.logger.info("mds.stopping")
67
+
68
+ if self._pubsub_task:
69
+ self._pubsub_task.cancel()
70
+ try:
71
+ await self._pubsub_task
72
+ except asyncio.CancelledError:
73
+ pass
74
+ self._pubsub_task = None
75
+
76
+ if self.pubsub_client:
77
+ for subscription in self.subscriptions:
78
+ if subscription.endswith("*"):
79
+ await self.pubsub_client.punsubscribe(subscription)
80
+ else:
81
+ await self.pubsub_client.unsubscribe(subscription)
82
+ await self.pubsub_client.close()
83
+ self.pubsub_client = None
84
+
85
+ if self.redis_client:
86
+ await self.redis_client.close()
87
+ self.redis_client = None
88
+ self.mdc_connected = False
89
+
90
+ self.logger.info("mds.stopped")
91
+
92
+ async def connect(self, force: bool = False):
93
+ """Establish async connections to Redis"""
94
+ if not self.mdc_connected or force:
95
+ # Redis connection pool
96
+ try:
97
+ self.redis_client = aioredis.from_url(
98
+ self.redis_url,
99
+ encoding="utf-8",
100
+ decode_responses=True,
101
+ max_connections=1000,
102
+ socket_connect_timeout=10, # Add timeout
103
+ )
104
+
105
+ # Test Redis connection
106
+ await self.redis_client.ping()
107
+ self.mdc_connected = True
108
+ self.logger.debug(f"Connected to Redis: {self.redis_url}")
109
+ except Exception as e:
110
+ self.logger.error(f"Failed to connect to Redis: {e}")
111
+ raise
112
+
113
+ async def restart(self):
114
+ """Restart Market Data Scanner"""
115
+ try:
116
+ await self.stop()
117
+ await asyncio.sleep(1)
118
+ await self.start()
119
+ self.restarts += 1
120
+ except Exception as e:
121
+ self.logger.error(f"Error restarting Market Data Scanner: {e}")
122
+
123
+ async def _handle_pubsub(self):
124
+ """Background task to receive Redis pub/sub messages and fan out to WebSockets."""
125
+ try:
126
+ self.logger.info("mds.pubsub.starting")
127
+ message_count = 0
128
+ retry_count = 0
129
+ max_retry_interval = 60
130
+ self.running = True
131
+ while True:
132
+ # get_message() requires active subscriptions
133
+ message = await self.pubsub_client.get_message(
134
+ ignore_subscribe_messages=False,
135
+ timeout=1.0
136
+ )
137
+
138
+ if message is None:
139
+ # Timeout reached, no message available sleep with exponential backoff
140
+ # to a maximum duration of max_retry_interval seconds
141
+ retry_count += 1
142
+ self.logger.debug(
143
+ f"mds.pubsub.message timeout reached, no message available. Retry count: {retry_count}")
144
+ sleep_interval = min(2**retry_count, max_retry_interval)
145
+ await asyncio.sleep(sleep_interval)
146
+ continue
147
+ else:
148
+ retry_count = 0
149
+ msg_type = message.get("type")
150
+ channel = message.get("channel")
151
+ data = message.get("data")
152
+ # Log subscription lifecycle events
153
+ if msg_type == "subscribe" or msg_type == "psubscribe":
154
+ self.logger.info(f"mds.pubsub.subscribed channel:{channel}, num_subs:{data}")
155
+
156
+ elif msg_type == "unsubscribe" or msg_type == "punsubscribe":
157
+ self.logger.info(f"mds.pubsub.unsubscribed channel:{channel}, num_subs:{data}")
158
+
159
+ # Process actual data messages
160
+ elif msg_type == "message" or msg_type == "pmessage":
161
+ message_count += 1
162
+ self.logger.debug(f"mds.pubsub.message channel:{channel}, data_len:{len(data)}, msg_num:{message_count}, data:{data}")
163
+ await self._process_message(data=json.loads(data))
164
+ else:
165
+ self.logger.warning(f"mds.pubsub.unknown message type: {msg_type}")
166
+ self.dropped += 1
167
+ continue
168
+ except ConnectionError as e:
169
+ self.logger.error(f"mds.pubsub.connection_error error:{repr(e)}", e)
170
+ self.running = False
171
+ self.mdc_connected = False
172
+ await self.restart()
173
+ except asyncio.CancelledError:
174
+ self.logger.info("mds.pubsub.cancelled")
175
+ self.running = False
176
+ self.mdc_connected = False
177
+ raise
178
+ except Exception as e:
179
+ self.logger.error(f"mds.pubsub.error error:{repr(e)}", e)
180
+ self.running = False
181
+ self.mdc_connected = False
182
+ raise
183
+
184
+ async def _process_message(self, data: dict):
185
+ """Process single message with concurrency control"""
186
+ try:
187
+ # Delegate to analyzer (async)
188
+ self.logger.debug(f"Processing message - data_len:{len(data)}")
189
+ analyzer_results = await self.analyzer.analyze_data(data)
190
+ self.processed += 1
191
+ if analyzer_results:
192
+ for analyzer_result in analyzer_results:
193
+ # Cache in Redis
194
+ self.logger.debug(f"Caching message {analyzer_result.cache_key}")
195
+ await self.cache_result(analyzer_result)
196
+ self.published_results += 1
197
+ else:
198
+ # Empty result - nothing to cache
199
+ self.empty_results += 1
200
+ except json.JSONDecodeError as e:
201
+ self.logger.error(f"JSON decode error: {e}")
202
+ self.decoding_errors += 1
203
+ except Exception as e:
204
+ self.logger.error(f"Processing error: {e}", exc_info=True)
205
+ self.errors += 1
206
+
207
+ async def get_cache(self, cache_key: str) -> Optional[dict]:
208
+ """Fetch current value from Redis cache (for snapshot requests)."""
209
+ value = await self.redis_client.get(cache_key)
210
+ if value:
211
+ return json.loads(value)
212
+ return None
213
+
214
+ async def cache_result(self, analyzer_result: MarketDataAnalyzerResult):
215
+ """
216
+ Async cache to Redis with pub/sub notification
217
+
218
+ Args:
219
+ analyzer_result: MarketDataAnalyzerResult
220
+ """
221
+ result_json = json.dumps(analyzer_result.data)
222
+
223
+ # Pipeline - no async context manager, no await on queue methods
224
+ pipe = self.redis_client.pipeline(transaction=False)
225
+ if analyzer_result.cache_key:
226
+ if analyzer_result.cache_ttl > 0:
227
+ pipe.setex(analyzer_result.cache_key, analyzer_result.cache_ttl, result_json)
228
+ else:
229
+ pipe.set(analyzer_result.cache_key, result_json)
230
+ if analyzer_result.publish_key:
231
+ pipe.publish(analyzer_result.publish_key, result_json)
232
+
233
+ await pipe.execute()
234
+
235
+ self.logger.debug(f"Cached result for {analyzer_result.cache_key}")
236
+
@@ -0,0 +1,191 @@
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ import os
5
+ from typing import Dict, Set
6
+
7
+ import redis.asyncio as redis
8
+ from fastapi import WebSocket
9
+ from pydantic_settings import BaseSettings
10
+
11
+
12
+ class UnauthorizedException(Exception):
13
+ pass
14
+
15
+
16
+ class Settings(BaseSettings):
17
+ log_level: str = os.environ.get("LOG_LEVEL", "INFO").upper()
18
+
19
+
20
+ settings = Settings()
21
+ logging.basicConfig(
22
+ level=settings.log_level,
23
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
24
+ )
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ class WidgetDataService:
29
+ """WebSocket interface for client subscriptions to Redis market data."""
30
+
31
+ def __init__(self, redis_client: redis.Redis, pubsub_client: redis.client.PubSub):
32
+ self.redis_client: redis.Redis = redis_client
33
+ self.pubsub_client: redis.client.PubSub = pubsub_client
34
+
35
+ # Track active WebSocket connections per feed
36
+ self.subscriptions: Dict[str, Set[WebSocket]] = {}
37
+ self._pubsub_task: asyncio.Task = None
38
+ self._pubsub_lock = asyncio.Lock()
39
+
40
+ self.mdc_connected = False
41
+
42
+ async def start(self):
43
+ """This doesn't do anything anymore. Pub/sub task starts on first subscription."""
44
+ logger.info("wds.starting")
45
+ await self.redis_client.ping()
46
+ self.mdc_connected = True
47
+ logger.info("wds.started")
48
+
49
+ async def stop(self):
50
+ """Cleanup Redis connections."""
51
+ logger.info("wds.stopping")
52
+
53
+ if self._pubsub_task:
54
+ self._pubsub_task.cancel()
55
+ try:
56
+ await self._pubsub_task
57
+ except asyncio.CancelledError:
58
+ pass
59
+
60
+ logger.info("wds.stopped")
61
+
62
+ async def subscribe(self, feed: str, websocket: WebSocket):
63
+ """Subscribe WebSocket client to a Redis feed."""
64
+ async with self._pubsub_lock:
65
+ if feed not in self.subscriptions:
66
+ self.subscriptions[feed] = set()
67
+ if "*" in feed:
68
+ await self.pubsub_client.psubscribe(feed)
69
+ else:
70
+ await self.pubsub_client.subscribe(feed)
71
+ logger.info(f"wds.feed.subscribed feed:{feed}, total_feeds:{len(self.subscriptions)}")
72
+
73
+ # First subscription: start pub/sub task
74
+ if len(self.subscriptions.keys()) == 1 and self._pubsub_task is None:
75
+ self._pubsub_task = asyncio.create_task(self._handle_pubsub())
76
+ logger.info("wds.pubsub.task_started")
77
+ self.subscriptions[feed].add(websocket)
78
+ logger.info(f"wds.client.subscribed feed:{feed}, clients:{len(self.subscriptions[feed])}")
79
+
80
+ async def unsubscribe(self, feed: str, websocket: WebSocket):
81
+ """Unsubscribe WebSocket client from a Redis feed."""
82
+ async with self._pubsub_lock:
83
+ if feed in self.subscriptions:
84
+ self.subscriptions[feed].discard(websocket)
85
+
86
+ if not self.subscriptions[feed]:
87
+ if "*" in feed:
88
+ await self.pubsub_client.punsubscribe(feed)
89
+ else:
90
+ await self.pubsub_client.unsubscribe(feed)
91
+ del self.subscriptions[feed]
92
+ logger.info(f"wds.feed.unsubscribed feed:{feed}, total_feeds:{len(self.subscriptions)}")
93
+ else:
94
+ logger.info(f"wds.client.unsubscribed feed:{feed}, clients:{len(self.subscriptions[feed])}")
95
+
96
+ # Last subscription removed: stop pub/sub task
97
+ if not self.subscriptions and self._pubsub_task:
98
+ try:
99
+ self._pubsub_task.cancel()
100
+ await self._pubsub_task
101
+ except asyncio.CancelledError:
102
+ pass
103
+ except RuntimeError:
104
+ pass
105
+ self._pubsub_task = None
106
+ logger.info("wds.pubsub.task_stopped")
107
+
108
+ async def disconnect(self, websocket: WebSocket):
109
+ """Disconnect WebSocket client from all feeds."""
110
+ subs = []
111
+ async with self._pubsub_lock:
112
+ feeds = self.subscriptions.keys()
113
+ for feed in feeds:
114
+ logger.info(f"wds.client.disconnecting feed:{feed}")
115
+ subs.append(f"{feed}")
116
+ for sub in subs:
117
+ await self.unsubscribe(sub, websocket)
118
+
119
+ async def get_cache(self, cache_key: str) -> dict:
120
+ """Fetch current value from Redis cache (for snapshot requests)."""
121
+ logger.info(f"wds.cache.get cache_key:{cache_key}")
122
+ value = await self.redis_client.get(cache_key)
123
+ if value:
124
+ logger.info(f"wds.cache.hit cache_key:{cache_key}")
125
+ return json.loads(value)
126
+ logger.info(f"wds.cache.miss cache_key:{cache_key}")
127
+ return None
128
+
129
+ async def _handle_pubsub(self):
130
+ """Background task to receive Redis pub/sub messages and fan out to WebSockets."""
131
+ try:
132
+ logger.info("wds.pubsub.starting")
133
+ message_count = 0
134
+
135
+ while True:
136
+ # get_message() requires active subscriptions
137
+ message = await self.pubsub_client.get_message(
138
+ ignore_subscribe_messages=False,
139
+ timeout=1.0
140
+ )
141
+
142
+ if message is None:
143
+ # Timeout reached, no message available
144
+ await asyncio.sleep(0.01)
145
+ continue
146
+
147
+ msg_type = message.get("type")
148
+
149
+ # Log subscription lifecycle events
150
+ if msg_type == "subscribe":
151
+ logger.info(f"wds.pubsub.subscribed channel:{message['channel']}, num_subs:{message['data']}")
152
+
153
+ elif msg_type == "unsubscribe":
154
+ logger.info(f"wds.pubsub.unsubscribed channel:{message['channel']}, num_subs:{message['data']}")
155
+
156
+ # Process actual data messages
157
+ elif msg_type == "message":
158
+ message_count += 1
159
+ feed = message["channel"]
160
+ data = message["data"]
161
+
162
+ logger.debug(f"wds.pubsub.message feed:{feed}, data_len:{len(data)}, msg_num:{message_count}")
163
+
164
+ if feed in self.subscriptions:
165
+ # Fan out to all WebSocket clients subscribed to this feed
166
+ disconnected = []
167
+ sent_count = 0
168
+
169
+ for ws in self.subscriptions[feed]:
170
+ try:
171
+ await ws.send_text(data)
172
+ sent_count += 1
173
+ except Exception as e:
174
+ logger.error(f"wds.send.failed feed:{feed}, error:{repr(e)}")
175
+ disconnected.append(ws)
176
+
177
+ logger.debug(f"wds.fanout.complete feed:{feed}, sent:{sent_count}, failed:{len(disconnected)}")
178
+
179
+ # Clean up disconnected clients
180
+ for ws in disconnected:
181
+ await self.unsubscribe(feed, ws)
182
+ else:
183
+ logger.warning(f"wds.pubsub.orphan feed:{feed}, msg:Received message for untracked feed")
184
+
185
+ except asyncio.CancelledError:
186
+ logger.info("wds.pubsub.cancelled")
187
+ raise
188
+
189
+ except Exception as e:
190
+ logger.error(f"wds.pubsub.error error:{repr(e)}", e)
191
+ raise
File without changes
@@ -0,0 +1,228 @@
1
+ from multiprocessing import Process, Queue
2
+ from queue import Empty, Full
3
+ from multiprocessing.synchronize import Event as MPEvent
4
+ from typing import Dict, Type, Any
5
+ import signal
6
+ import asyncio
7
+ import logging
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class ProcessManager:
13
+ """Manages worker processes for MDP components"""
14
+
15
+ def __init__(self):
16
+ self.processes: Dict[str, Process] = {}
17
+ self.shutdown_events: Dict[str, MPEvent] = {}
18
+ self.status_queues: Dict[str, Queue] = {}
19
+
20
+ def start_worker(self, name: str, worker_class: Type, **kwargs):
21
+ """Start any worker class in a separate process"""
22
+ import multiprocessing as mp # Keep factory import local
23
+
24
+ shutdown_event = mp.Event() # Factory call
25
+ status_queue = mp.Queue(maxsize=1)
26
+
27
+ process = Process(
28
+ target=self._run_worker,
29
+ args=(worker_class, shutdown_event, status_queue),
30
+ kwargs=kwargs,
31
+ name=name,
32
+ daemon=False
33
+ )
34
+
35
+ self.processes[name] = process
36
+ self.shutdown_events[name] = shutdown_event
37
+ self.status_queues[name] = status_queue
38
+
39
+ process.start()
40
+ logger.info(f"Started process: {name} (PID: {process.pid})")
41
+
42
+ @staticmethod
43
+ def _run_worker(
44
+ worker_class: Type[Any],
45
+ shutdown_event: MPEvent,
46
+ status_queue: Queue,
47
+ **kwargs: Any
48
+ ) -> None:
49
+ """Generic worker process entry point with async event loop management"""
50
+
51
+ # Signal handlers for graceful shutdown
52
+ def shutdown_handler(signum, frame):
53
+ shutdown_event.set()
54
+
55
+ signal.signal(signal.SIGTERM, shutdown_handler)
56
+ signal.signal(signal.SIGINT, shutdown_handler)
57
+
58
+ # Create new event loop for this process
59
+ loop = asyncio.new_event_loop()
60
+ asyncio.set_event_loop(loop)
61
+
62
+ # Create asyncio event for coordinating shutdown
63
+ async_shutdown = asyncio.Event()
64
+
65
+ async def monitor_shutdown():
66
+ """Monitor multiprocessing shutdown_event and set async event"""
67
+ while not shutdown_event.is_set():
68
+ await asyncio.sleep(0.1) # Poll frequently without blocking
69
+ async_shutdown.set()
70
+ logger.info(f"Shutdown signal detected for {worker_class.__name__}")
71
+
72
+ async def status_reporter(worker_instance):
73
+ """Periodically report worker status to parent process"""
74
+ while not async_shutdown.is_set():
75
+ try:
76
+ status = {
77
+ "processed": getattr(worker_instance, 'processed', 0),
78
+ "errors": getattr(worker_instance, 'errors', getattr(worker_instance, 'error', 0)),
79
+ "decoding_errors": getattr(worker_instance, 'decoding_errors', 0),
80
+ "dropped": getattr(worker_instance, 'dropped', 0),
81
+ "duplicated": getattr(worker_instance, 'duplicated', 0),
82
+ "mdq_connected": getattr(worker_instance, 'mdq_connected', False),
83
+ "mdc_connected": getattr(worker_instance, 'mdc_connected', False),
84
+ "restarts": getattr(worker_instance, 'restarts', 0),
85
+ "running": getattr(worker_instance, 'running', False),
86
+ }
87
+
88
+ try:
89
+ status_queue.put_nowait(status)
90
+ except Full:
91
+ pass # Queue full, skip this update
92
+ except Exception as ex:
93
+ logger.error(f"Error updating status for {worker_class.__name__}: {ex}")
94
+
95
+ # Wait 1 second before next status update
96
+ try:
97
+ await asyncio.wait_for(async_shutdown.wait(), timeout=1.0)
98
+ except asyncio.TimeoutError:
99
+ pass # Expected - just continue loop
100
+
101
+ async def run_worker():
102
+ """Main worker coroutine that manages worker lifecycle"""
103
+ worker = None
104
+
105
+ try:
106
+ # Instantiate worker
107
+ worker = worker_class(**kwargs)
108
+ logger.info(f"Instantiated {worker_class.__name__}")
109
+
110
+ # Start monitoring and status tasks
111
+ monitor_task = asyncio.create_task(monitor_shutdown())
112
+ status_task = asyncio.create_task(status_reporter(worker))
113
+
114
+ # Start the worker (may block indefinitely or return immediately)
115
+ logger.info(f"Starting {worker_class.__name__}")
116
+ worker_task = asyncio.create_task(worker.start())
117
+
118
+ # Wait for shutdown signal while worker and status tasks run
119
+ await async_shutdown.wait()
120
+
121
+ logger.info(f"Shutdown initiated for {worker_class.__name__}")
122
+
123
+ # Signal worker to stop if it has a running flag
124
+ if hasattr(worker, 'running'):
125
+ worker.running = False
126
+ logger.debug(f"Set running=False for {worker_class.__name__}")
127
+
128
+ # Cancel worker task if it's still running
129
+ if not worker_task.done():
130
+ logger.info(f"Cancelling worker task for {worker_class.__name__}")
131
+ worker_task.cancel()
132
+ try:
133
+ await asyncio.wait_for(worker_task, timeout=5.0)
134
+ except (asyncio.CancelledError, asyncio.TimeoutError):
135
+ logger.warning(f"Worker task cancellation timeout for {worker_class.__name__}")
136
+ except Exception as ex:
137
+ logger.error(f"Error during worker task cancellation: {ex}")
138
+
139
+ # Call worker's stop method for cleanup
140
+ logger.info(f"Calling stop() for {worker_class.__name__}")
141
+ await worker.stop()
142
+
143
+ # Cancel monitoring tasks
144
+ monitor_task.cancel()
145
+ status_task.cancel()
146
+
147
+ # Wait for cleanup with timeout
148
+ try:
149
+ await asyncio.wait_for(
150
+ asyncio.gather(monitor_task, status_task, return_exceptions=True),
151
+ timeout=2.0
152
+ )
153
+ except asyncio.TimeoutError:
154
+ logger.warning(f"Monitoring task cleanup timeout for {worker_class.__name__}")
155
+
156
+ logger.info(f"Worker {worker_class.__name__} stopped cleanly")
157
+
158
+ except Exception as ex:
159
+ logger.error(f"Worker error in {worker_class.__name__}: {ex}", exc_info=True)
160
+ finally:
161
+ # Ensure stop is called even if errors occurred
162
+ if worker is not None:
163
+ try:
164
+ await worker.stop()
165
+ except Exception as ex:
166
+ logger.error(f"Error during final stop() call: {ex}", exc_info=True)
167
+
168
+ # Run the worker coroutine
169
+ try:
170
+ loop.run_until_complete(run_worker())
171
+ except Exception as e:
172
+ logger.error(f"Fatal error in worker process {worker_class.__name__}: {e}", exc_info=True)
173
+ finally:
174
+ try:
175
+ # Cancel any remaining tasks
176
+ pending = asyncio.all_tasks(loop)
177
+ for task in pending:
178
+ task.cancel()
179
+
180
+ # Give tasks a chance to clean up
181
+ if pending:
182
+ loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
183
+ except Exception as e:
184
+ logger.error(f"Error during event loop cleanup: {e}")
185
+ finally:
186
+ loop.close()
187
+
188
+ def stop_process(self, name: str, timeout: float = 10.0):
189
+ """Stop a specific worker process"""
190
+ if name not in self.processes:
191
+ return
192
+
193
+ process = self.processes[name]
194
+ shutdown_event = self.shutdown_events[name]
195
+
196
+ logger.info(f"Stopping process: {name}")
197
+ shutdown_event.set()
198
+
199
+ process.join(timeout=timeout)
200
+ if process.is_alive():
201
+ logger.warning(f"Force killing process: {name}")
202
+ process.kill()
203
+ process.join()
204
+
205
+ def stop_all(self, timeout: float = 10.0):
206
+ """Stop all worker processes"""
207
+ for name in list(self.processes.keys()):
208
+ self.stop_process(name, timeout)
209
+
210
+ def get_status(self, name: str) -> dict:
211
+ """Get status from worker process (non-blocking)"""
212
+ if name not in self.processes:
213
+ return {"alive": False}
214
+
215
+ process = self.processes[name]
216
+ status_queue = self.status_queues[name]
217
+
218
+ status = {"alive": process.is_alive(), "pid": process.pid}
219
+
220
+ try:
221
+ worker_status = status_queue.get_nowait()
222
+ status.update(worker_status)
223
+ except Empty:
224
+ pass
225
+ except Exception as e:
226
+ logger.error(f"Error getting status for process name {name}: {e}", exc_info=True)
227
+
228
+ return status
@@ -0,0 +1,24 @@
1
+ from massive.websocket.models import (
2
+ WebSocketMessage,
3
+ EquityAgg,
4
+ EquityQuote,
5
+ EquityTrade,
6
+ LimitUpLimitDown,
7
+ )
8
+
9
+ from kuhl_haus.mdp.models.massive_data_queue import MassiveDataQueue
10
+
11
+
12
+ class QueueNameResolver:
13
+ @staticmethod
14
+ def queue_name_for_web_socket_message(message: WebSocketMessage):
15
+ if isinstance(message, EquityTrade):
16
+ return MassiveDataQueue.TRADES.value
17
+ elif isinstance(message, EquityAgg):
18
+ return MassiveDataQueue.AGGREGATE.value
19
+ elif isinstance(message, EquityQuote):
20
+ return MassiveDataQueue.QUOTES.value
21
+ elif isinstance(message, LimitUpLimitDown):
22
+ return MassiveDataQueue.HALTS.value
23
+ else:
24
+ return MassiveDataQueue.UNKNOWN.value
File without changes