physicsworks 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,234 @@
1
+ """
2
+ Utility functions for the runner package.
3
+ Post-processing, file operations, and other helper functions.
4
+ """
5
+
6
+ import json
7
+ import os
8
+ import subprocess
9
+ import zipfile
10
+ import tarfile
11
+ import uuid
12
+ import base64
13
+ from typing import Dict, List, Any
14
+
15
+ from .config import RuntimeAttributes
16
+ from .logger import DebugLogger
17
+ from .server import ServerCommunicator
18
+
19
+
20
+ class PostProcessor:
21
+ """Handles post-processing operations"""
22
+
23
+ def __init__(self, config, runtime_attrs: RuntimeAttributes,
24
+ server_comm: ServerCommunicator, logger: DebugLogger):
25
+ self.config = config
26
+ self.runtime_attrs = runtime_attrs
27
+ self.server_comm = server_comm
28
+ self.logger = logger
29
+
30
+ def create_visualization_archives(self):
31
+ """Create visualization archives from available files"""
32
+ try:
33
+ # Create VTP archive if VTP files exist
34
+ vtp_files = []
35
+ for root, dirs, files in os.walk(self.config.outputs_path):
36
+ vtp_files.extend([f for f in files if f.endswith('.vtp')])
37
+
38
+ if vtp_files:
39
+ vtp_filename = f"vtp{uuid.uuid1()}.tar.gz"
40
+ self.tar_files(self.config.outputs_path, vtp_filename, 'vtp')
41
+ self.logger.debug(f"Created VTP archive: {vtp_filename}")
42
+
43
+ # Create GLTF archive if GLTF files exist
44
+ files_dir = os.path.join(self.config.outputs_path, "files")
45
+ if os.path.exists(files_dir):
46
+ gltf_files = [f for f in os.listdir(files_dir) if f.endswith('.gltf')]
47
+ if gltf_files:
48
+ gltf_filename = f"gltf{uuid.uuid1()}.tar.gz"
49
+ self.tar_files(files_dir, gltf_filename, 'gltf')
50
+ self.logger.debug(f"Created GLTF archive: {gltf_filename}")
51
+
52
+ except Exception as e:
53
+ self.logger.error(f"Error creating visualization archives: {e}")
54
+
55
+ def set_results(self):
56
+ """Set results from results.csv"""
57
+ try:
58
+ results_path = os.path.join(self.config.outputs_path, "results.csv")
59
+ with open(results_path, 'r') as results_file:
60
+ lines = results_file.readlines()
61
+ if len(lines) < 2:
62
+ return
63
+
64
+ columns = lines[0].strip().split(",")
65
+ values = lines[1].strip().split(",")
66
+ results = []
67
+
68
+ for i, (col, val) in enumerate(zip(columns, values)):
69
+ val = val.strip()
70
+ try:
71
+ results.append({"name": col, "type": "scalar", "value": int(val)})
72
+ except ValueError:
73
+ try:
74
+ results.append({"name": col, "type": "scalar", "value": float(val)})
75
+ except ValueError:
76
+ if val.lower() in ["true", "false"]:
77
+ results.append({"name": col, "type": "boolean", "value": val.lower() == "true"})
78
+ else:
79
+ results.append({"name": col, "type": "ascii", "value": val})
80
+
81
+ # Send results to server
82
+ import requests
83
+ config = self.config.config
84
+ requests.put(
85
+ f"{config['host']}simulation/run/patch/{self.runtime_attrs.run_id}",
86
+ json={"results": results},
87
+ headers={'auth-token': config['token']}
88
+ )
89
+
90
+ self.logger.debug(f"Set {len(results)} results")
91
+
92
+ except FileNotFoundError:
93
+ self.logger.debug("Results.csv file not found")
94
+ except Exception as e:
95
+ self.logger.error(f"Error setting results: {e}")
96
+
97
+ def tar_files(self, compress_path: str, compress_filename: str, type_filter: str = None):
98
+ """Create tar.gz archive of files"""
99
+ try:
100
+ full_path = os.path.join(compress_path, compress_filename)
101
+ with tarfile.open(full_path, 'w:gz') as tar:
102
+ for filename in os.listdir(compress_path):
103
+ if not type_filter or filename.endswith(type_filter):
104
+ file_path = os.path.join(compress_path, filename)
105
+ tar.add(file_path, arcname=filename)
106
+
107
+ self.runtime_attrs.output_files.append(full_path)
108
+
109
+ except Exception as e:
110
+ self.logger.error(f"Error creating tar file {compress_filename}: {e}")
111
+
112
+
113
+ class FileUploader:
114
+ """Handles file upload operations"""
115
+
116
+ def __init__(self, config, runtime_attrs: RuntimeAttributes,
117
+ server_comm: ServerCommunicator, logger: DebugLogger, progress_callback=None):
118
+ self.config = config
119
+ self.runtime_attrs = runtime_attrs
120
+ self.server_comm = server_comm
121
+ self.logger = logger
122
+ self.progress_callback = progress_callback
123
+
124
+ def upload_file(self, path: str, metadata: Dict[str, Any], filename: str = None):
125
+ """Upload file to server in chunks"""
126
+ try:
127
+ import requests
128
+ chunk_size = 5242880 # = 1024 * 1024 * 5 -> 5MB chunks
129
+ file_size = os.path.getsize(path)
130
+ chunks_number = int(file_size / chunk_size) + 1
131
+
132
+ if not filename:
133
+ filename = os.path.basename(path)
134
+ if path.endswith(".drc") and path in self.server_comm.geometries:
135
+ filename = self.server_comm.geometries[os.path.basename(path)]
136
+
137
+ for i in range(chunks_number):
138
+ start = i * chunk_size
139
+ end = min((i + 1) * chunk_size, file_size)
140
+
141
+ with open(path, 'rb') as file:
142
+ file.seek(start)
143
+ chunk = file.read(end - start)
144
+
145
+ chunk_bytes = end - start
146
+
147
+ payload = {
148
+ 'filename': filename,
149
+ 'fileSize': file_size,
150
+ 'chunkIndex': i,
151
+ 'chunk': base64.b64encode(chunk).decode('utf-8'),
152
+ 'metadata': metadata
153
+ }
154
+
155
+ config = self.config.config
156
+ response = requests.post(
157
+ f"{config['host']}storage/chunks",
158
+ data={'json': json.dumps(payload)},
159
+ headers={'auth-token': config['token'], 'Content-Type': 'application/x-www-form-urlencoded'}
160
+ )
161
+
162
+ # Call progress callback after successful chunk upload
163
+ if self.progress_callback:
164
+ self.progress_callback(chunk_bytes)
165
+
166
+ except Exception as e:
167
+ self.logger.error(f"Error uploading file {filename}: {e}")
168
+
169
+
170
+ class ResultsZipper:
171
+ """Handles result packaging and zipping"""
172
+
173
+ def __init__(self, config, runtime_attrs: RuntimeAttributes,
174
+ server_comm: ServerCommunicator, logger: DebugLogger):
175
+ self.config = config
176
+ self.runtime_attrs = runtime_attrs
177
+ self.server_comm = server_comm
178
+ self.logger = logger
179
+
180
+ def zip_results(self):
181
+ """Create result zip files"""
182
+ try:
183
+ package_id = str(uuid.uuid1())
184
+
185
+ if self.server_comm.solver_config and 'downloadable' in self.server_comm.solver_config:
186
+ basic_paths = self.server_comm.solver_config['downloadable'].get('basic', {})
187
+
188
+ self.create_zip_file(
189
+ f"{package_id}.zip",
190
+ os.path.join(self.config.outputs_path, "result.zip"),
191
+ basic_paths
192
+ )
193
+
194
+ except Exception as e:
195
+ self.logger.error(f"Error zipping results: {e}")
196
+
197
+ def create_zip_file(self, name: str, path: str, paths_schema: Dict[str, Any]):
198
+ """Create a zip file from paths schema"""
199
+ try:
200
+ paths = self.parse_paths(paths_schema)
201
+
202
+ with zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED) as zipf:
203
+ for item in paths:
204
+ item_path = os.path.join(self.config.work_dir, item.lstrip('/'))
205
+
206
+ if os.path.isfile(item_path):
207
+ arcname = os.path.relpath(item_path, self.config.work_dir)
208
+ zipf.write(item_path, arcname)
209
+ elif os.path.isdir(item_path):
210
+ for root, dirs, files in os.walk(item_path):
211
+ for file in files:
212
+ full_path = os.path.join(root, file)
213
+ arcname = os.path.relpath(full_path, self.config.work_dir)
214
+ zipf.write(full_path, arcname)
215
+
216
+ self.runtime_attrs.output_files.append(path)
217
+ self.runtime_attrs.filenames[path] = name
218
+
219
+ except Exception as e:
220
+ self.logger.error(f"Error creating zip file {name}: {e}")
221
+
222
+ def parse_paths(self, schema: Dict[str, Any], route: str = '') -> List[str]:
223
+ """Parse paths from schema"""
224
+ paths = []
225
+
226
+ if isinstance(schema, dict) and schema:
227
+ for key in schema:
228
+ new_route = f"{route}/{key}" if route else f"/{key}"
229
+ paths.extend(self.parse_paths(schema[key], new_route))
230
+ else:
231
+ if schema and route:
232
+ paths.append(route)
233
+
234
+ return paths
@@ -0,0 +1,357 @@
1
+ """
2
+ File watching utilities for monitoring solver execution.
3
+ """
4
+
5
+ import os
6
+ import json
7
+ import sys
8
+ import threading
9
+ import time
10
+ import stat
11
+ import base64
12
+ from typing import List, Optional
13
+ import uuid
14
+
15
+ from .server import ServerCommunicator
16
+ from .config import RuntimeAttributes
17
+ from .logger import DebugLogger
18
+
19
+
20
+ class FileWatcher:
21
+ """Monitors files and directories for changes during solver execution"""
22
+
23
+ def __init__(self, runtime_attrs: RuntimeAttributes, server_communicator: ServerCommunicator,
24
+ logger: DebugLogger, poll_interval: float = 1.0, on_abort_callback=None, main_process_ref=None):
25
+ self.runtime_attrs = runtime_attrs
26
+ self.server_communicator = server_communicator
27
+ self.logger = logger
28
+ self.watching = False
29
+ self.watch_thread = None
30
+ self.file_states = {} # Track file modification times
31
+ self.watch_directories = []
32
+ self.poll_interval = poll_interval # Configurable polling interval
33
+ self.upload_threads = set() # Track active upload threads
34
+ self.upload_lock = threading.Lock() # Protect upload_threads set
35
+ self.on_abort_callback = on_abort_callback # Optional callback for custom abort handling
36
+ self.main_process_ref = main_process_ref # Callable to get reference to main process
37
+
38
+ def add_watch_directory(self, directory: str):
39
+ """Add a directory to watch"""
40
+ if os.path.isdir(directory):
41
+ self.watch_directories.append(directory)
42
+
43
+ def start_watching(self):
44
+ """Start the file watching thread"""
45
+ self.watching = True
46
+ self.watch_thread = threading.Thread(target=self._watch_loop, daemon=False)
47
+ self.watch_thread.start()
48
+
49
+ def stop_watching(self):
50
+ """Stop the file watching thread and wait for pending operations"""
51
+ self.watching = False
52
+ if self.watch_thread:
53
+ self.logger.debug("Waiting for file watcher to complete pending operations...")
54
+ self.watch_thread.join(timeout=None) # Wait indefinitely for completion
55
+
56
+ # Wait for all upload threads to complete
57
+ with self.upload_lock:
58
+ pending_uploads = list(self.upload_threads)
59
+
60
+ if pending_uploads:
61
+ self.logger.debug(f"Waiting for {len(pending_uploads)} upload thread(s) to complete...")
62
+ for thread in pending_uploads:
63
+ thread.join(timeout=30) # 30 second timeout per upload
64
+ if thread.is_alive():
65
+ self.logger.warning(f"Upload thread {thread.name} did not complete in time")
66
+
67
+ self.logger.debug("File watcher stopped successfully")
68
+
69
+ def _watch_loop(self):
70
+ """Main watching loop using polling"""
71
+ while self.watching:
72
+ try:
73
+ self._check_files()
74
+ time.sleep(self.poll_interval) # Configurable poll interval
75
+ except Exception as e:
76
+ self.logger.error(f"Error in file watcher: {e}")
77
+
78
+ def _check_files(self):
79
+ """Check for file changes in watched directories"""
80
+ for directory in self.watch_directories:
81
+ if not os.path.isdir(directory):
82
+ continue
83
+
84
+ # Check all files recursively
85
+ for root, dirs, files in os.walk(directory):
86
+ for file in files:
87
+ filepath = os.path.join(root, file)
88
+ try:
89
+ stat_info = os.stat(filepath)
90
+ current_mtime = stat_info.st_mtime
91
+
92
+ # Check if file is new or modified
93
+ if filepath not in self.file_states:
94
+ self.file_states[filepath] = current_mtime
95
+ self._handle_file_event(filepath, 'created')
96
+ elif self.file_states[filepath] != current_mtime:
97
+ self.file_states[filepath] = current_mtime
98
+ self._handle_file_event(filepath, 'modified')
99
+ except (OSError, IOError):
100
+ # File might have been deleted or is inaccessible
101
+ continue
102
+
103
+ def _handle_file_event(self, filepath: str, event_type: str):
104
+ """Handle file events"""
105
+ filename = os.path.basename(filepath)
106
+ normalized_path = filepath.replace('\\', '/')
107
+
108
+ print(f"File {event_type}: {filepath}")
109
+ if filename == 'state.txt':
110
+ self._handle_state_txt(filepath)
111
+ elif filename == 'config.json':
112
+ self._handle_config_json(filepath)
113
+ elif '/outputs/logs/' in normalized_path or '\\outputs\\logs\\' in filepath:
114
+ self._handle_log_file(filepath)
115
+ elif '/outputs/media/' in normalized_path or '\\outputs\\media\\' in filepath:
116
+ self._handle_media_file(filepath)
117
+ elif '/outputs/plots/' in normalized_path or '\\outputs\\plots\\' in filepath:
118
+ self._handle_plot_file(filepath)
119
+
120
+ def _handle_config_json(self, filepath: str):
121
+ """Handle config.json file changes"""
122
+ try:
123
+ with open(filepath, 'r', encoding='utf-8') as f:
124
+ config = json.load(f)
125
+
126
+ if 'abort' in config and config['abort']:
127
+ self.logger.error("Aborted by user")
128
+
129
+ # Set abort flag
130
+ self.runtime_attrs.aborted = True
131
+
132
+ # Set status to error
133
+ self.server_communicator.set_status('error', 0, 'Aborted by user')
134
+
135
+ # Call custom abort callback if provided (for remote job cancellation, cleanup, etc.)
136
+ if self.on_abort_callback:
137
+ try:
138
+ self.logger.debug("Calling custom abort callback...")
139
+ self.on_abort_callback(config)
140
+ except Exception as e:
141
+ self.logger.error(f"Error in abort callback: {e}")
142
+
143
+ # Kill the running main process if it exists
144
+ if self.main_process_ref:
145
+ try:
146
+ main_process = self.main_process_ref()
147
+ if main_process and main_process.poll() is None: # Process is still running
148
+ self.logger.info("Terminating running solver process...")
149
+ main_process.terminate()
150
+ # Give it 5 seconds to terminate gracefully
151
+ try:
152
+ main_process.wait(timeout=5)
153
+ self.logger.info("Solver process terminated")
154
+ except:
155
+ # Force kill if it doesn't terminate
156
+ self.logger.warning("Force killing solver process...")
157
+ main_process.kill()
158
+ main_process.wait()
159
+ except Exception as e:
160
+ self.logger.error(f"Error terminating main process: {e}")
161
+
162
+ # Stop watching to prevent further processing
163
+ self.watching = False
164
+
165
+ except (json.JSONDecodeError, IOError) as e:
166
+ self.logger.error(f"Error reading config.json: {e}")
167
+
168
+ def _handle_state_txt(self, filepath: str):
169
+ """Handle state.txt file changes - parses CSV format: date & time,progress,text,step,status"""
170
+ try:
171
+ with open(filepath, 'r', encoding='utf-8') as f:
172
+ lines = f.readlines()
173
+
174
+ if not lines:
175
+ return
176
+
177
+ # Get the last non-empty line as the current status
178
+ last_line = None
179
+ for line in reversed(lines):
180
+ stripped_line = line.strip()
181
+ if stripped_line:
182
+ last_line = stripped_line
183
+ break
184
+
185
+ if last_line:
186
+ # Parse CSV format: date & time,progress(number),text,step,status
187
+ parts = last_line.split(',')
188
+
189
+ if len(parts) >= 3:
190
+ # Extract fields
191
+ # parts[0] = date & time
192
+ progress_str = parts[1].strip()
193
+ text = parts[2].strip()
194
+ step = parts[3].strip() if len(parts) > 3 else ""
195
+ status = parts[4].strip() if len(parts) > 4 else "running"
196
+
197
+ # Parse progress as integer (use local variable)
198
+ progress = self.runtime_attrs.progress # Default to current value
199
+ try:
200
+ progress = int(progress_str)
201
+ progress = max(0, min(100, progress))
202
+ except ValueError:
203
+ # If progress can't be parsed, keep current value
204
+ pass
205
+
206
+ # Map status string to StepStatus enum values (use local variable)
207
+ status_lower = status.lower()
208
+ if status_lower == 'finished':
209
+ mapped_status = "finished"
210
+ elif status_lower == 'failed':
211
+ mapped_status = "error" # Map 'failed' to 'error' for runtime_attrs
212
+ elif status_lower == 'running':
213
+ mapped_status = "running"
214
+ elif status_lower == 'pending':
215
+ mapped_status = "pending"
216
+ else:
217
+ # Default to running if status is unclear
218
+ mapped_status = "running"
219
+
220
+ self.logger.debug(f"Status updated from state file: progress={progress}%, status={mapped_status}, text={text}, step={step}")
221
+
222
+ # Send status update to server - this will update runtime_attrs if values changed
223
+ self.server_communicator.set_status(
224
+ mapped_status,
225
+ progress,
226
+ text
227
+ )
228
+ else:
229
+ self.logger.warning(f"Invalid state.txt format: {last_line}")
230
+
231
+ except IOError as e:
232
+ self.logger.error(f"Error reading state.txt: {e}")
233
+
234
+ def _handle_log_file(self, filepath: str):
235
+ """Handle log file changes"""
236
+ try:
237
+ if filepath not in self.runtime_attrs.log_paths:
238
+ self.runtime_attrs.log_paths.append(filepath)
239
+
240
+ # For log files, we store metadata rather than content due to size
241
+ # The actual file upload will be handled by the server communicator
242
+ file_stat = os.stat(filepath)
243
+ if filepath not in self.runtime_attrs.logs:
244
+ self.runtime_attrs.logs[filepath] = {
245
+ 'size': file_stat.st_size,
246
+ 'modified': file_stat.st_mtime,
247
+ 'path': filepath,
248
+ 'name': f"{uuid.uuid1()}.log",
249
+ 'position': 0
250
+ }
251
+ else:
252
+ # Update metadata for existing entry
253
+ self.runtime_attrs.logs[filepath]['size'] = file_stat.st_size
254
+ self.runtime_attrs.logs[filepath]['modified'] = file_stat.st_mtime
255
+
256
+ # Defer update to avoid blocking the poll loop
257
+ # Update will be called in separate thread via threading.Thread
258
+ thread = threading.Thread(target=self._upload_wrapper, args=(self.server_communicator._update_logs_node,), daemon=False)
259
+ with self.upload_lock:
260
+ self.upload_threads.add(thread)
261
+ thread.start()
262
+
263
+ except IOError as e:
264
+ self.logger.error(f"Error processing log file {filepath}: {e}")
265
+
266
+ def _handle_plot_file(self, filepath: str):
267
+ """Handle plot file changes"""
268
+ try:
269
+ if filepath not in self.runtime_attrs.plots_paths:
270
+ self.runtime_attrs.plots_paths.append(filepath)
271
+
272
+ # For plot files, we store metadata rather than content due to size
273
+ # The actual file upload will be handled by the server communicator
274
+ file_stat = os.stat(filepath)
275
+ if filepath not in self.runtime_attrs.plots:
276
+ self.runtime_attrs.plots[filepath] = {
277
+ 'size': file_stat.st_size,
278
+ 'modified': file_stat.st_mtime,
279
+ 'path': filepath,
280
+ 'name': f"{uuid.uuid1()}.{filepath.split('.')[-1]}",
281
+ 'position': 0
282
+ }
283
+ else:
284
+ # Update metadata for existing entry
285
+ self.runtime_attrs.plots[filepath]['size'] = file_stat.st_size
286
+ self.runtime_attrs.plots[filepath]['modified'] = file_stat.st_mtime
287
+
288
+ # Defer update to avoid blocking the poll loop
289
+ # Update will be called in separate thread via threading.Thread
290
+ thread = threading.Thread(target=self._upload_wrapper, args=(self.server_communicator._update_plots_node,), daemon=False)
291
+ with self.upload_lock:
292
+ self.upload_threads.add(thread)
293
+ thread.start()
294
+
295
+ except IOError as e:
296
+ self.logger.error(f"Error processing plot file {filepath}: {e}")
297
+
298
+ def _upload_wrapper(self, upload_func):
299
+ """Wrapper to track upload thread lifecycle"""
300
+ try:
301
+ upload_func()
302
+ finally:
303
+ # Remove thread from tracking set when done
304
+ with self.upload_lock:
305
+ self.upload_threads.discard(threading.current_thread())
306
+
307
+ def _handle_media_file(self, filepath: str):
308
+ """Handle media file changes (binary files)"""
309
+ try:
310
+ if filepath not in self.runtime_attrs.media_paths:
311
+ self.runtime_attrs.media_paths.append(filepath)
312
+
313
+ # For media files, we store metadata rather than content due to size
314
+ # The actual file upload will be handled by the server communicator
315
+ file_stat = os.stat(filepath)
316
+ if filepath not in self.runtime_attrs.media:
317
+ self.runtime_attrs.media[filepath] = {
318
+ 'size': file_stat.st_size,
319
+ 'modified': file_stat.st_mtime,
320
+ 'path': filepath,
321
+ 'name': f"{uuid.uuid1()}.{filepath.split('.')[-1]}",
322
+ 'position': 0
323
+ }
324
+ else:
325
+ # Update metadata for existing entry
326
+ self.runtime_attrs.media[filepath]['size'] = file_stat.st_size
327
+ self.runtime_attrs.media[filepath]['modified'] = file_stat.st_mtime
328
+
329
+ # Defer update to avoid blocking the poll loop
330
+ # Update will be called in separate thread via threading.Thread
331
+ thread = threading.Thread(target=self._upload_wrapper, args=(self.server_communicator._update_media_node,), daemon=False)
332
+ with self.upload_lock:
333
+ self.upload_threads.add(thread)
334
+ thread.start()
335
+
336
+ except IOError as e:
337
+ self.logger.error(f"Error processing media file {filepath}: {e}")
338
+
339
+
340
+ class FileSystemWatcher:
341
+ """Legacy compatibility wrapper for FileWatcher"""
342
+
343
+ def __init__(self, runtime_attrs: RuntimeAttributes, server_communicator: ServerCommunicator,
344
+ logger: DebugLogger, on_abort_callback=None, main_process_ref=None):
345
+ self.watcher = FileWatcher(runtime_attrs, server_communicator, logger,
346
+ on_abort_callback=on_abort_callback, main_process_ref=main_process_ref)
347
+
348
+ def start_watching(self, directories: List[str]):
349
+ """Start watching directories using built-in polling"""
350
+ for directory in directories:
351
+ self.watcher.add_watch_directory(directory)
352
+ self.watcher.start_watching()
353
+
354
+ def stop_watching(self):
355
+ """Stop watching directories"""
356
+ self.watcher.stop_watching()
357
+
@@ -0,0 +1,19 @@
1
+ from pythreejs import ParametricGeometry, Mesh, BufferGeometryLoader
2
+
3
+ f = '''
4
+ function f(origu, origv) {
5
+ var u = 2*Math.PI*origu
6
+ var v = 2*Math.PI*origv
7
+
8
+ var x = Math.sin(u)
9
+ var y = Math.cos(v)
10
+ var z = Math.cos(u+v)
11
+
12
+ return new THREE.Vector3(x,y,z)
13
+ }
14
+ '''
15
+ surf_g = ParametricGeometry(func=f)
16
+
17
+ loader = BufferGeometryLoader()
18
+
19
+ loader.load('pressure.json')
@@ -0,0 +1,29 @@
1
+ import pymongo
2
+
3
+
4
+ class MongoClientWrapper:
5
+
6
+ def __init__(self):
7
+ self.db = None
8
+ self._client = None
9
+
10
+ @property
11
+ def client(self):
12
+ if self._client is None:
13
+ raise Exception('Cannot access MongoDB client before connecting!')
14
+ return self._client
15
+
16
+ def connect(self, url, dbName):
17
+ try:
18
+ self._client = pymongo.MongoClient(url, serverSelectionTimeoutMS=5000)
19
+
20
+ db = self._client[dbName]
21
+
22
+ self.db = db
23
+
24
+ return db
25
+ except Exception as ex:
26
+ print(ex)
27
+
28
+
29
+ mongoClientWrapper = MongoClientWrapper()
@@ -0,0 +1,62 @@
1
+ import asyncio
2
+
3
+ from nats.aio.client import Client as NATS
4
+ from stan.aio.client import Client as STAN
5
+
6
+
7
+ class NatsClientWrapper:
8
+
9
+ def __init__(self):
10
+ self._nc = None
11
+ self._client = None
12
+
13
+ @property
14
+ def client(self):
15
+ if self._client is None:
16
+ raise Exception('Cannot access NATS client before connecting!')
17
+ return self._client
18
+
19
+ async def disconnected_cb():
20
+ print('Got disconnected!')
21
+
22
+ async def reconnected_cb(self):
23
+ print(f'Got reconnected to {self._nc.connected_url.netloc}')
24
+
25
+ # async def error_cb(e):
26
+ # print(f'There was an error: {e}')
27
+
28
+ async def closed_cb():
29
+ print('Connection is closed')
30
+
31
+ async def connect(self, url: str, clusterId: str, clientId: str, io_loop=None):
32
+ while True:
33
+ try:
34
+ nc = NATS()
35
+ await nc.connect(
36
+ servers=[url],
37
+ io_loop=io_loop,
38
+ disconnected_cb=self.disconnected_cb,
39
+ reconnected_cb=self.reconnected_cb,
40
+ ping_interval=60,
41
+ # error_cb=self.error_cb,
42
+ closed_cb=self.closed_cb)
43
+
44
+ self._nc = nc
45
+
46
+ # Start session with NATS Streaming cluster.
47
+ sc = STAN()
48
+ await sc.connect(clusterId, clientId, nats=nc)
49
+
50
+ self._client = sc
51
+
52
+ break
53
+ except Exception as ex:
54
+ # Could not connect to any server in the cluster.
55
+ print(ex)
56
+
57
+ await asyncio.sleep(5)
58
+
59
+ continue
60
+
61
+
62
+ natsClientWrapper = NatsClientWrapper()