speedy-utils 1.0.5__py3-none-any.whl → 1.0.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,32 +3,30 @@ import random
3
3
  from collections import defaultdict
4
4
  import time
5
5
  from tabulate import tabulate
6
- import logging
7
6
  import contextlib
8
- import aiohttp # <-- Import aiohttp
7
+ import aiohttp # <-- Import aiohttp
8
+ from loguru import logger
9
9
 
10
10
  # --- Configuration ---
11
- LOAD_BALANCER_HOST = '0.0.0.0'
11
+ LOAD_BALANCER_HOST = "0.0.0.0"
12
12
  LOAD_BALANCER_PORT = 8008
13
13
 
14
- SCAN_TARGET_HOST = 'localhost'
14
+ SCAN_TARGET_HOST = "localhost"
15
15
  SCAN_PORT_START = 8150
16
- SCAN_PORT_END = 8170 # Inclusive
16
+ SCAN_PORT_END = 8170 # Inclusive
17
17
  SCAN_INTERVAL = 30
18
18
  # Timeout applies to the HTTP health check request now
19
- HEALTH_CHECK_TIMEOUT = 2.0 # Increased slightly for HTTP requests
19
+ HEALTH_CHECK_TIMEOUT = 2.0 # Increased slightly for HTTP requests
20
20
 
21
21
  STATUS_PRINT_INTERVAL = 5
22
22
  BUFFER_SIZE = 4096
23
23
 
24
- # Setup basic logging
25
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
26
-
27
24
  # --- Global Shared State ---
28
25
  available_servers = []
29
26
  connection_counts = defaultdict(int)
30
27
  state_lock = asyncio.Lock()
31
28
 
29
+
32
30
  # --- Helper Functions --- (relay_data and safe_close_writer remain the same)
33
31
  async def relay_data(reader, writer, direction):
34
32
  """Reads data from reader and writes to writer until EOF or error."""
@@ -36,25 +34,28 @@ async def relay_data(reader, writer, direction):
36
34
  while True:
37
35
  data = await reader.read(BUFFER_SIZE)
38
36
  if not data:
39
- logging.debug(f"EOF received on {direction} stream.")
37
+ logger.debug(f"EOF received on {direction} stream.")
40
38
  break
41
39
  writer.write(data)
42
40
  await writer.drain()
43
41
  except ConnectionResetError:
44
- logging.warning(f"Connection reset on {direction} stream.")
42
+ logger.warning(f"Connection reset on {direction} stream.")
45
43
  except asyncio.CancelledError:
46
- logging.debug(f"Relay task cancelled for {direction}.")
44
+ logger.debug(f"Relay task cancelled for {direction}.")
47
45
  raise
48
46
  except Exception as e:
49
- logging.warning(f"Error during data relay ({direction}): {e}")
47
+ logger.warning(f"Error during data relay ({direction}): {e}")
50
48
  finally:
51
49
  if not writer.is_closing():
52
50
  try:
53
51
  writer.close()
54
52
  await writer.wait_closed()
55
- logging.debug(f"Closed writer for {direction}")
53
+ logger.debug(f"Closed writer for {direction}")
56
54
  except Exception as close_err:
57
- logging.debug(f"Error closing writer for {direction} (might be expected): {close_err}")
55
+ logger.debug(
56
+ f"Error closing writer for {direction} (might be expected): {close_err}"
57
+ )
58
+
58
59
 
59
60
  @contextlib.asynccontextmanager
60
61
  async def safe_close_writer(writer):
@@ -67,10 +68,12 @@ async def safe_close_writer(writer):
67
68
  writer.close()
68
69
  await writer.wait_closed()
69
70
  except Exception as e:
70
- logging.debug(f"Error closing writer in context manager: {e}")
71
+ logger.debug(f"Error closing writer in context manager: {e}")
72
+
71
73
 
72
74
  # --- Server Scanning and Health Check (Modified) ---
73
75
 
76
+
74
77
  async def check_server_health(session, host, port):
75
78
  """Performs an HTTP GET request to the /health endpoint."""
76
79
  url = f"http://{host}:{port}/health"
@@ -79,34 +82,41 @@ async def check_server_health(session, host, port):
79
82
  async with session.get(url, timeout=HEALTH_CHECK_TIMEOUT) as response:
80
83
  # Check for a successful status code (2xx range)
81
84
  if 200 <= response.status < 300:
82
- logging.debug(f"Health check success for {url} (Status: {response.status})")
85
+ logger.debug(
86
+ f"Health check success for {url} (Status: {response.status})"
87
+ )
83
88
  # Ensure the connection is released back to the pool
84
89
  await response.release()
85
90
  return True
86
91
  else:
87
- logging.debug(f"Health check failed for {url} (Status: {response.status})")
92
+ logger.debug(
93
+ f"Health check failed for {url} (Status: {response.status})"
94
+ )
88
95
  await response.release()
89
96
  return False
90
97
  except asyncio.TimeoutError:
91
- logging.debug(f"Health check HTTP request timeout for {url}")
98
+ logger.debug(f"Health check HTTP request timeout for {url}")
92
99
  return False
93
100
  except aiohttp.ClientConnectorError as e:
94
101
  # Handles connection refused, DNS errors etc. - server likely down
95
- logging.debug(f"Health check connection error for {url}: {e}")
102
+ logger.debug(f"Health check connection error for {url}: {e}")
96
103
  return False
97
104
  except aiohttp.ClientError as e:
98
105
  # Catch other potential client errors (e.g., invalid URL structure, too many redirects)
99
- logging.warning(f"Health check client error for {url}: {e}")
106
+ logger.warning(f"Health check client error for {url}: {e}")
100
107
  return False
101
108
  except Exception as e:
102
109
  # Catch any other unexpected errors during the check
103
- logging.error(f"Unexpected health check error for {url}: {e}")
110
+ logger.error(f"Unexpected health check error for {url}: {e}")
104
111
  return False
105
112
 
113
+
106
114
  async def scan_and_update_servers():
107
115
  """Periodically scans ports using HTTP /health check and updates available servers."""
108
116
  global available_servers
109
- logging.debug(f"Starting server scan task (HTTP GET /health on Ports {SCAN_PORT_START}-{SCAN_PORT_END} every {SCAN_INTERVAL}s)")
117
+ logger.debug(
118
+ f"Starting server scan task (HTTP GET /health on Ports {SCAN_PORT_START}-{SCAN_PORT_END} every {SCAN_INTERVAL}s)"
119
+ )
110
120
  while True:
111
121
  try:
112
122
  current_scan_results = []
@@ -117,23 +127,31 @@ async def scan_and_update_servers():
117
127
  async with aiohttp.ClientSession() as session:
118
128
  # Create health check tasks for all ports, passing the shared session
119
129
  for port in ports_to_scan:
120
- task = asyncio.create_task(check_server_health(session, SCAN_TARGET_HOST, port))
130
+ task = asyncio.create_task(
131
+ check_server_health(session, SCAN_TARGET_HOST, port)
132
+ )
121
133
  scan_tasks.append((task, port))
122
134
 
123
135
  # Wait for all health checks to complete
124
136
  # return_exceptions=True prevents gather from stopping if one check fails
125
- await asyncio.gather(*(task for task, port in scan_tasks), return_exceptions=True)
137
+ await asyncio.gather(
138
+ *(task for task, port in scan_tasks), return_exceptions=True
139
+ )
126
140
 
127
141
  # Collect results from completed tasks
128
142
  for task, port in scan_tasks:
129
143
  try:
130
144
  # Check if task finished, wasn't cancelled, and returned True
131
- if task.done() and not task.cancelled() and task.result() is True:
145
+ if (
146
+ task.done()
147
+ and not task.cancelled()
148
+ and task.result() is True
149
+ ):
132
150
  current_scan_results.append((SCAN_TARGET_HOST, port))
133
151
  except Exception as e:
134
- # Log errors from the health check task itself if gather didn't catch them
135
- logging.error(f"Error retrieving health check result for port {port}: {e}")
136
-
152
+ logger.error(
153
+ f"Error retrieving health check result for port {port}: {e}"
154
+ )
137
155
  # --- Update Shared State (Locked) ---
138
156
  async with state_lock:
139
157
  previous_servers = set(available_servers)
@@ -143,27 +161,33 @@ async def scan_and_update_servers():
143
161
  removed = previous_servers - current_set
144
162
 
145
163
  if added:
146
- logging.info(f"Servers added (passed /health check): {sorted(list(added))}")
164
+ logger.info(
165
+ f"Servers added (passed /health check): {sorted(list(added))}"
166
+ )
147
167
  if removed:
148
- logging.info(f"Servers removed (failed /health check or stopped): {sorted(list(removed))}")
168
+ logger.info(
169
+ f"Servers removed (failed /health check or stopped): {sorted(list(removed))}"
170
+ )
149
171
  for server in removed:
150
172
  if server in connection_counts:
151
173
  del connection_counts[server]
152
- logging.debug(f"Removed connection count entry for unavailable server {server}")
174
+ logger.debug(
175
+ f"Removed connection count entry for unavailable server {server}"
176
+ )
153
177
 
154
178
  available_servers = sorted(list(current_set))
155
179
  for server in available_servers:
156
180
  if server not in connection_counts:
157
181
  connection_counts[server] = 0
158
182
 
159
- logging.debug(f"Scan complete. Active servers: {available_servers}")
183
+ logger.debug(f"Scan complete. Active servers: {available_servers}")
160
184
 
161
185
  except asyncio.CancelledError:
162
- logging.info("Server scan task cancelled.")
163
- break
186
+ logger.info("Server scan task cancelled.")
187
+ break
164
188
  except Exception as e:
165
- logging.error(f"Error in scan_and_update_servers loop: {e}")
166
- await asyncio.sleep(SCAN_INTERVAL / 2) # Avoid tight loop on error
189
+ logger.error(f"Error in scan_and_update_servers loop: {e}")
190
+ await asyncio.sleep(SCAN_INTERVAL / 2) # Avoid tight loop on error
167
191
 
168
192
  await asyncio.sleep(SCAN_INTERVAL)
169
193
 
@@ -171,8 +195,8 @@ async def scan_and_update_servers():
171
195
  # --- Core Load Balancer Logic (handle_client remains the same) ---
172
196
  async def handle_client(client_reader, client_writer):
173
197
  """Handles a single client connection."""
174
- client_addr = client_writer.get_extra_info('peername')
175
- logging.info(f"Accepted connection from {client_addr}")
198
+ client_addr = client_writer.get_extra_info("peername")
199
+ logger.info(f"Accepted connection from {client_addr}")
176
200
 
177
201
  backend_server = None
178
202
  backend_reader = None
@@ -182,15 +206,24 @@ async def handle_client(client_reader, client_writer):
182
206
  try:
183
207
  # --- Select Backend Server (Least Connections from Available) ---
184
208
  selected_server = None
185
- async with state_lock: # Lock to safely access available_servers and connection_counts
209
+ async with (
210
+ state_lock
211
+ ): # Lock to safely access available_servers and connection_counts
186
212
  if not available_servers:
187
- logging.warning(f"No backend servers available (failed health checks?) for client {client_addr}. Closing connection.")
188
- async with safe_close_writer(client_writer): pass
213
+ logger.warning(
214
+ f"No backend servers available (failed health checks?) for client {client_addr}. Closing connection."
215
+ )
216
+ async with safe_close_writer(client_writer):
217
+ pass
189
218
  return
190
219
 
191
- min_connections = float('inf')
220
+ min_connections = float("inf")
192
221
  least_used_available_servers = []
193
- for server in available_servers: # Iterate only over servers that passed health check
222
+ for (
223
+ server
224
+ ) in (
225
+ available_servers
226
+ ): # Iterate only over servers that passed health check
194
227
  count = connection_counts.get(server, 0)
195
228
  if count < min_connections:
196
229
  min_connections = count
@@ -203,75 +236,114 @@ async def handle_client(client_reader, client_writer):
203
236
  connection_counts[selected_server] += 1
204
237
  backend_server = selected_server
205
238
  server_selected = True
206
- logging.info(f"Routing {client_addr} to {backend_server} (Current connections: {connection_counts[backend_server]})")
239
+ logger.info(
240
+ f"Routing {client_addr} to {backend_server} (Current connections: {connection_counts[backend_server]})"
241
+ )
207
242
  else:
208
- logging.error(f"Logic error: No server chosen despite available servers list not being empty for {client_addr}.")
209
- async with safe_close_writer(client_writer): pass
210
- return
243
+ logger.error(
244
+ f"Logic error: No server chosen despite available servers list not being empty for {client_addr}."
245
+ )
246
+ async with safe_close_writer(client_writer):
247
+ pass
248
+ return
211
249
 
212
250
  # --- Connect to Backend Server ---
213
251
  if not backend_server:
214
- logging.error(f"No backend server selected for {client_addr} before connection attempt.")
215
- async with safe_close_writer(client_writer): pass
216
- server_selected = False
217
- return
218
-
252
+ logger.error(
253
+ f"No backend server selected for {client_addr} before connection attempt."
254
+ )
255
+ async with safe_close_writer(client_writer):
256
+ pass
257
+ server_selected = False
258
+ return
219
259
  try:
220
- logging.debug(f"Attempting connection to backend {backend_server} for {client_addr}")
260
+ logger.debug(
261
+ f"Attempting connection to backend {backend_server} for {client_addr}"
262
+ )
221
263
  backend_reader, backend_writer = await asyncio.open_connection(
222
264
  backend_server[0], backend_server[1]
223
265
  )
224
- logging.debug(f"Successfully connected to backend {backend_server} for {client_addr}")
266
+ logger.debug(
267
+ f"Successfully connected to backend {backend_server} for {client_addr}"
268
+ )
225
269
 
226
270
  # Handle connection failure AFTER selection (server might go down between health check and selection)
227
271
  except ConnectionRefusedError:
228
- logging.error(f"Connection refused by selected backend server {backend_server} for {client_addr}")
229
- async with state_lock: # Decrement count under lock
230
- if backend_server in connection_counts and connection_counts[backend_server] > 0: connection_counts[backend_server] -= 1
231
- server_selected = False # Mark failure
232
- async with safe_close_writer(client_writer): pass
272
+ logger.error(
273
+ f"Connection refused by selected backend server {backend_server} for {client_addr}"
274
+ )
275
+ async with state_lock: # Decrement count under lock
276
+ if (
277
+ backend_server in connection_counts
278
+ and connection_counts[backend_server] > 0
279
+ ):
280
+ connection_counts[backend_server] -= 1
281
+ server_selected = False # Mark failure
282
+ async with safe_close_writer(client_writer):
283
+ pass
233
284
  return
234
285
  except Exception as e:
235
- logging.error(f"Failed to connect to selected backend server {backend_server} for {client_addr}: {e}")
236
- async with state_lock: # Decrement count under lock
237
- if backend_server in connection_counts and connection_counts[backend_server] > 0: connection_counts[backend_server] -= 1
238
- server_selected = False # Mark failure
239
- async with safe_close_writer(client_writer): pass
286
+ logger.error(
287
+ f"Failed to connect to selected backend server {backend_server} for {client_addr}: {e}"
288
+ )
289
+ async with state_lock: # Decrement count under lock
290
+ if (
291
+ backend_server in connection_counts
292
+ and connection_counts[backend_server] > 0
293
+ ):
294
+ connection_counts[backend_server] -= 1
295
+ server_selected = False # Mark failure
296
+ async with safe_close_writer(client_writer):
297
+ pass
240
298
  return
241
299
 
242
300
  # --- Relay Data Bidirectionally ---
243
- async with safe_close_writer(backend_writer): # Ensure backend writer is closed
301
+ async with safe_close_writer(backend_writer): # Ensure backend writer is closed
244
302
  client_to_backend = asyncio.create_task(
245
- relay_data(client_reader, backend_writer, f"{client_addr} -> {backend_server}")
303
+ relay_data(
304
+ client_reader, backend_writer, f"{client_addr} -> {backend_server}"
305
+ )
246
306
  )
247
307
  backend_to_client = asyncio.create_task(
248
- relay_data(backend_reader, client_writer, f"{backend_server} -> {client_addr}")
308
+ relay_data(
309
+ backend_reader, client_writer, f"{backend_server} -> {client_addr}"
310
+ )
249
311
  )
250
312
  done, pending = await asyncio.wait(
251
- [client_to_backend, backend_to_client], return_when=asyncio.FIRST_COMPLETED
313
+ [client_to_backend, backend_to_client],
314
+ return_when=asyncio.FIRST_COMPLETED,
252
315
  )
253
- for task in pending: task.cancel()
316
+ for task in pending:
317
+ task.cancel()
254
318
  for task in done:
255
- with contextlib.suppress(asyncio.CancelledError):
256
- if task.exception(): logging.warning(f"Relay task finished with error: {task.exception()}")
319
+ with contextlib.suppress(asyncio.CancelledError):
320
+ if task.exception():
321
+ logger.warning(
322
+ f"Relay task finished with error: {task.exception()}"
323
+ )
257
324
 
258
325
  except asyncio.CancelledError:
259
- logging.info(f"Client handler for {client_addr} cancelled.")
326
+ logger.info(f"Client handler for {client_addr} cancelled.")
260
327
  except Exception as e:
261
- logging.error(f"Error handling client {client_addr}: {e}")
328
+ logger.error(f"Error handling client {client_addr}: {e}")
262
329
  finally:
263
- logging.info(f"Closing connection for {client_addr}")
330
+ logger.info(f"Closing connection for {client_addr}")
264
331
  # Decrement connection count only if we successfully selected/incremented
265
332
  if backend_server and server_selected:
266
333
  async with state_lock:
267
334
  if backend_server in connection_counts:
268
335
  if connection_counts[backend_server] > 0:
269
336
  connection_counts[backend_server] -= 1
270
- logging.info(f"Connection closed for {client_addr}. Backend {backend_server} connections: {connection_counts[backend_server]}")
337
+ logger.info(
338
+ f"Connection closed for {client_addr}. Backend {backend_server} connections: {connection_counts[backend_server]}"
339
+ )
271
340
  else:
272
- logging.warning(f"Attempted to decrement count below zero for {backend_server} on close")
341
+ logger.warning(
342
+ f"Attempted to decrement count below zero for {backend_server} on close"
343
+ )
273
344
  connection_counts[backend_server] = 0
274
345
 
346
+
275
347
  # --- Status Reporting Task (print_status_periodically remains the same) ---
276
348
  async def print_status_periodically():
277
349
  """Periodically prints the connection status based on available servers."""
@@ -303,13 +375,19 @@ async def print_status_periodically():
303
375
  try:
304
376
  table = tabulate(table_data, headers=headers, tablefmt="grid")
305
377
  print("\n----- Load Balancer Status -----")
306
- print(f"Scanning Ports: {SCAN_PORT_START}-{SCAN_PORT_END} on {SCAN_TARGET_HOST} (using /health endpoint)")
307
- print(f"Scan Interval: {SCAN_INTERVAL}s | Health Check Timeout: {HEALTH_CHECK_TIMEOUT}s")
378
+ print(
379
+ f"Scanning Ports: {SCAN_PORT_START}-{SCAN_PORT_END} on {SCAN_TARGET_HOST} (using /health endpoint)"
380
+ )
381
+ print(
382
+ f"Scan Interval: {SCAN_INTERVAL}s | Health Check Timeout: {HEALTH_CHECK_TIMEOUT}s"
383
+ )
308
384
  print(table)
309
- print(f"Total Active Connections (on available servers): {total_connections}")
385
+ print(
386
+ f"Total Active Connections (on available servers): {total_connections}"
387
+ )
310
388
  print("------------------------------\n")
311
389
  except Exception as e:
312
- logging.error(f"Error printing status table: {e}")
390
+ logger.error(f"Error printing status table: {e}")
313
391
 
314
392
 
315
393
  # --- Main Execution (main remains the same) ---
@@ -321,33 +399,37 @@ async def main():
321
399
  handle_client, LOAD_BALANCER_HOST, LOAD_BALANCER_PORT
322
400
  )
323
401
 
324
- addrs = ', '.join(str(sock.getsockname()) for sock in server.sockets)
325
- logging.info(f'Load balancer serving on {addrs}')
326
- logging.info(f'Dynamically discovering servers via HTTP /health on {SCAN_TARGET_HOST}:{SCAN_PORT_START}-{SCAN_PORT_END}')
402
+ addrs = ", ".join(str(sock.getsockname()) for sock in server.sockets)
403
+ logger.info(f"Load balancer serving on {addrs}")
404
+ logger.info(
405
+ f"Dynamically discovering servers via HTTP /health on {SCAN_TARGET_HOST}:{SCAN_PORT_START}-{SCAN_PORT_END}"
406
+ )
327
407
 
328
408
  async with server:
329
409
  try:
330
410
  await server.serve_forever()
331
411
  except asyncio.CancelledError:
332
- logging.info("Load balancer server shutting down.")
412
+ logger.info("Load balancer server shutting down.")
333
413
  finally:
334
- logging.info("Cancelling background tasks...")
414
+ logger.info("Cancelling background tasks...")
335
415
  scan_task.cancel()
336
416
  status_task.cancel()
337
417
  try:
338
418
  await asyncio.gather(scan_task, status_task, return_exceptions=True)
339
419
  except asyncio.CancelledError:
340
420
  pass
341
- logging.info("Background tasks finished.")
421
+ logger.info("Background tasks finished.")
422
+
342
423
 
343
424
  def run_load_balancer():
344
425
  # Make sure to install aiohttp: pip install aiohttp
345
426
  try:
346
427
  asyncio.run(main())
347
428
  except KeyboardInterrupt:
348
- logging.info("Shutdown requested by user.")
429
+ logger.info("Shutdown requested by user.")
349
430
  except Exception as e:
350
- logging.critical(f"Critical error in main execution: {e}")
431
+ logger.critical(f"Critical error in main execution: {e}")
432
+
351
433
 
352
434
  if __name__ == "__main__":
353
- run_load_balancer()
435
+ run_load_balancer()