cafs-cache-cdn-client 1.0.8__tar.gz → 1.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/PKG-INFO +5 -2
  2. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/README.md +4 -1
  3. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/README.md +6 -1
  4. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/blob/package.py +18 -6
  5. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/client.py +79 -14
  6. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/client.py +69 -3
  7. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/repo/client.py +10 -1
  8. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/pyproject.toml +1 -1
  9. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/__init__.py +0 -0
  10. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/__init__.py +0 -0
  11. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/blob/__init__.py +0 -0
  12. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/blob/hash_.py +0 -0
  13. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/blob/utils.py +0 -0
  14. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/exceptions.py +0 -0
  15. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/cafs/types.py +0 -0
  16. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/file_utils.py +0 -0
  17. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/repo/__init__.py +0 -0
  18. {cafs_cache_cdn_client-1.0.8 → cafs_cache_cdn_client-1.0.9}/cafs_cache_cdn_client/repo/datatypes.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: cafs-cache-cdn-client
3
- Version: 1.0.8
3
+ Version: 1.0.9
4
4
  Summary: Async Cache CDN client implementation
5
5
  Keywords: cafs,cache
6
6
  Author: Konstantin Belov
@@ -49,7 +49,8 @@ async def main():
49
49
  # The connection_per_cafs_server parameter controls concurrency
50
50
  client = CacheCdnClient(
51
51
  'http://cache-server.example.com:8300',
52
- connection_per_cafs_server=10
52
+ connection_per_cafs_server=10,
53
+ verbose_debug=True,
53
54
  )
54
55
 
55
56
  # Use as an async context manager to ensure proper resource cleanup
@@ -89,6 +90,8 @@ if __name__ == '__main__':
89
90
  - **Constructor**: `CacheCdnClient(server: str, connection_per_cafs_server: int = 1)`
90
91
  - `server`: URL of the cache server
91
92
  - `connection_per_cafs_server`: Number of concurrent connections per CAFS server
93
+ - `logger`: Optional logger for custom logging
94
+ - `verbose_debug`: Enable verbose debug logging (default: `False`)
92
95
 
93
96
  - **Methods**:
94
97
  - `push(repo: str, ref: str, directory: Path | str, ttl_hours: int = 0, comment: str | None = None, compression: CompressionT = CompressionT.NONE)` - Push a local directory to cache
@@ -29,7 +29,8 @@ async def main():
29
29
  # The connection_per_cafs_server parameter controls concurrency
30
30
  client = CacheCdnClient(
31
31
  'http://cache-server.example.com:8300',
32
- connection_per_cafs_server=10
32
+ connection_per_cafs_server=10,
33
+ verbose_debug=True,
33
34
  )
34
35
 
35
36
  # Use as an async context manager to ensure proper resource cleanup
@@ -69,6 +70,8 @@ if __name__ == '__main__':
69
70
  - **Constructor**: `CacheCdnClient(server: str, connection_per_cafs_server: int = 1)`
70
71
  - `server`: URL of the cache server
71
72
  - `connection_per_cafs_server`: Number of concurrent connections per CAFS server
73
+ - `logger`: Optional logger for custom logging
74
+ - `verbose_debug`: Enable verbose debug logging (default: `False`)
72
75
 
73
76
  - **Methods**:
74
77
  - `push(repo: str, ref: str, directory: Path | str, ttl_hours: int = 0, comment: str | None = None, compression: CompressionT = CompressionT.NONE)` - Push a local directory to cache
@@ -11,17 +11,22 @@ Below is a complete example demonstrating all major functionality of the CAFSCli
11
11
 
12
12
  ```python
13
13
  import asyncio
14
+ import logging
14
15
  from pathlib import Path
15
16
  from cafs_cache_cdn_client.cafs import CAFSClient, CompressionT
16
17
 
17
18
 
19
+ # Configure logging to see detailed operation information
20
+ logging.basicConfig(level=logging.DEBUG)
21
+
18
22
  async def cafs_client_demo():
19
23
 
20
24
  client = CAFSClient(
21
25
  server_root='/data',
22
26
  servers=['localhost', 'example.com:2403'],
23
27
  connection_per_server=2,
24
- connect_timeout=5.0
28
+ connect_timeout=5.0,
29
+ verbose_debug=True, # Enable verbose debug logging
25
30
  )
26
31
 
27
32
  async with client:
@@ -53,6 +53,7 @@ class Decompressor(Protocol):
53
53
  class Packer:
54
54
  logger: Logger | LoggerAdapter
55
55
  chunk_size: int
56
+ verbose_debug: bool
56
57
 
57
58
  _reader: 'AsyncReader'
58
59
  _eof_reached: bool
@@ -65,10 +66,12 @@ class Packer:
65
66
  compression: CompressionT = CompressionT.NONE,
66
67
  chunk_size: int = DEFAULT_CHUNK_SIZE,
67
68
  logger: Logger | LoggerAdapter | None = None,
69
+ verbose_debug: bool = False,
68
70
  ) -> None:
69
71
  self._reader = reader
70
72
  self._eof_reached = False
71
73
  self.chunk_size = chunk_size
74
+ self.verbose_debug = verbose_debug
72
75
 
73
76
  self._compressor = None
74
77
  if compression == CompressionT.ZLIB:
@@ -104,14 +107,16 @@ class Packer:
104
107
 
105
108
  async def _fill_buffer(self) -> None:
106
109
  chunk = await self._reader.read(self.chunk_size)
107
- self.logger.debug('Filling buffer with chunk of %d bytes', len(chunk))
110
+ if self.verbose_debug:
111
+ self.logger.debug('Filling buffer with chunk of %d bytes', len(chunk))
108
112
 
109
113
  if not chunk:
110
114
  self._eof_reached = True
111
115
  self.logger.debug('EOF reached')
112
116
  if self._compressor:
113
117
  data = self._compressor.flush()
114
- self.logger.debug('Flushing compressor: %d bytes', len(data))
118
+ if self.verbose_debug:
119
+ self.logger.debug('Flushing compressor: %d bytes', len(data))
115
120
  self._buffer.extend(data)
116
121
  return
117
122
 
@@ -120,13 +125,15 @@ class Packer:
120
125
  return
121
126
 
122
127
  data = self._compressor.compress(chunk)
123
- self.logger.debug('Got %d bytes from compressor', len(data))
128
+ if self.verbose_debug:
129
+ self.logger.debug('Got %d bytes from compressor', len(data))
124
130
  self._buffer.extend(data)
125
131
 
126
132
 
127
133
  class Unpacker:
128
134
  logger: Logger | LoggerAdapter
129
135
  chunk_size: int
136
+ verbose_debug: bool
130
137
 
131
138
  _writer: 'AsyncWriter'
132
139
  _header: bytearray
@@ -138,6 +145,7 @@ class Unpacker:
138
145
  writer: 'AsyncWriter',
139
146
  chunk_size: int = DEFAULT_CHUNK_SIZE,
140
147
  logger: Logger | LoggerAdapter | None = None,
148
+ verbose_debug: bool = False,
141
149
  ) -> None:
142
150
  self._writer = writer
143
151
  self._buffer = bytearray()
@@ -145,6 +153,7 @@ class Unpacker:
145
153
  self._header = bytearray()
146
154
  self.chunk_size = chunk_size
147
155
  self.logger = logger or module_logger
156
+ self.verbose_debug = verbose_debug
148
157
 
149
158
  async def write(self, data: bytes, /) -> None:
150
159
  if not data:
@@ -157,7 +166,8 @@ class Unpacker:
157
166
  async def flush(self) -> None:
158
167
  if self._decompressor:
159
168
  data = self._decompressor.flush()
160
- self.logger.debug('Flushing decompressor: %d bytes', len(data))
169
+ if self.verbose_debug:
170
+ self.logger.debug('Flushing decompressor: %d bytes', len(data))
161
171
  self._buffer.extend(data)
162
172
  if self._buffer:
163
173
  await self._writer.write(self._buffer)
@@ -165,7 +175,8 @@ class Unpacker:
165
175
  await self._writer.flush()
166
176
 
167
177
  async def _fill_buffer(self, data: bytes) -> None:
168
- self.logger.debug('Filling buffer with chunk of %d bytes', len(data))
178
+ if self.verbose_debug:
179
+ self.logger.debug('Filling buffer with chunk of %d bytes', len(data))
169
180
  if len(self._header) < FULL_HEADER_SIZE:
170
181
  header_offset = FULL_HEADER_SIZE - len(self._header)
171
182
  self._header.extend(data[:header_offset])
@@ -194,5 +205,6 @@ class Unpacker:
194
205
  return
195
206
 
196
207
  data = self._decompressor.decompress(data)
197
- self.logger.debug('Got %d bytes from decompressor', len(data))
208
+ if self.verbose_debug:
209
+ self.logger.debug('Got %d bytes from decompressor', len(data))
198
210
  self._buffer.extend(data)
@@ -8,7 +8,7 @@ from collections.abc import (
8
8
  )
9
9
  from contextlib import asynccontextmanager
10
10
  from enum import Enum
11
- from logging import LoggerAdapter, getLogger
11
+ from logging import Logger, LoggerAdapter, getLogger
12
12
  from pathlib import Path
13
13
  from typing import Any, Self, TypeVar
14
14
 
@@ -80,7 +80,8 @@ class CAFSConnection:
80
80
  port: int
81
81
  timeout: float
82
82
  server_root: bytes
83
- module_logger: ConnectionLoggerAdapter
83
+ logger: ConnectionLoggerAdapter
84
+ verbose_debug: bool
84
85
 
85
86
  _reader: asyncio.StreamReader | None = None
86
87
  _writer: asyncio.StreamWriter | None = None
@@ -91,6 +92,8 @@ class CAFSConnection:
91
92
  host: str,
92
93
  port: int = CAFS_DEFAULT_PORT,
93
94
  timeout: float = DEFAULT_CONNECT_TIMEOUT,
95
+ logger: Logger | LoggerAdapter | None = None,
96
+ verbose_debug: bool = False,
94
97
  ) -> None:
95
98
  self.server_root = server_root.encode('utf-8')
96
99
  self.host = host
@@ -98,8 +101,10 @@ class CAFSConnection:
98
101
  self.timeout = timeout
99
102
  self.is_connected = False
100
103
  self.logger = ConnectionLoggerAdapter(
101
- module_logger, {'host': host, 'port': port, 'connection_id': id(self)}
104
+ logger or module_logger,
105
+ {'host': host, 'port': port, 'connection_id': id(self)},
102
106
  )
107
+ self.verbose_debug = verbose_debug
103
108
 
104
109
  async def connect(self) -> None:
105
110
  if self.is_connected:
@@ -219,9 +224,10 @@ class CAFSConnection:
219
224
  chunk = await reader.read(STREAM_MAX_CHUNK_SIZE)
220
225
  while chunk:
221
226
  size_header = len(chunk).to_bytes(2, 'little')
222
- self.logger.debug(
223
- 'Streaming chunk of size: %d (%s)', len(chunk), size_header.hex()
224
- )
227
+ if self.verbose_debug:
228
+ self.logger.debug(
229
+ 'Streaming chunk of size: %d (%s)', len(chunk), size_header.hex()
230
+ )
225
231
  await self._send(size_header)
226
232
  await self._send(chunk)
227
233
  chunk = await reader.read(STREAM_MAX_CHUNK_SIZE)
@@ -262,10 +268,12 @@ class CAFSConnection:
262
268
 
263
269
  while received < blob_size:
264
270
  chunk_size = min(STREAM_MAX_CHUNK_SIZE, blob_size - received)
265
- self.logger.debug('Pulling chunk of size: %d', chunk_size)
271
+ if self.verbose_debug:
272
+ self.logger.debug('Pulling chunk of size: %d', chunk_size)
266
273
  chunk = await self._receive(chunk_size)
267
274
  received += chunk_size
268
- self.logger.debug('Received %d bytes', len(chunk))
275
+ if self.verbose_debug:
276
+ self.logger.debug('Received %d bytes', len(chunk))
269
277
  await writer.write(chunk)
270
278
 
271
279
  await writer.flush()
@@ -278,6 +286,8 @@ class ConnectionPool:
278
286
  server_root: str
279
287
  servers: set[tuple[str, int]]
280
288
  connection_per_server: int
289
+ logger: Logger | LoggerAdapter
290
+ verbose_debug: bool
281
291
 
282
292
  _lock: asyncio.Lock
283
293
  _connections: set[CAFSConnection]
@@ -290,6 +300,8 @@ class ConnectionPool:
290
300
  servers: Collection[tuple[str, int]],
291
301
  connection_per_server: int = 1,
292
302
  connect_timeout: float = DEFAULT_CONNECT_TIMEOUT,
303
+ logger: Logger | LoggerAdapter | None = None,
304
+ verbose_debug: bool = False,
293
305
  ) -> None:
294
306
  self.server_root = server_root
295
307
  self.connect_timeout = connect_timeout
@@ -300,26 +312,40 @@ class ConnectionPool:
300
312
  self._connection_queue = asyncio.Queue()
301
313
  self._lock = asyncio.Lock()
302
314
  self._close_event = asyncio.Event()
315
+ self.logger = logger or module_logger
316
+ self.verbose_debug = verbose_debug
303
317
 
304
318
  async def get_connection_count(self) -> int:
305
319
  async with self._lock:
306
320
  return len(self._connections)
307
321
 
308
322
  async def initialize(self) -> None:
323
+ self.logger.debug(
324
+ 'Initializing connection pool with %d servers (%d connections each)',
325
+ len(self.servers),
326
+ self.connection_per_server,
327
+ )
309
328
  for server in self.servers:
310
329
  host, port = server
311
330
  for _ in range(self.connection_per_server):
312
331
  conn = CAFSConnection(
313
- self.server_root, host, port, timeout=self.connect_timeout
332
+ self.server_root,
333
+ host,
334
+ port,
335
+ timeout=self.connect_timeout,
336
+ logger=self.logger,
337
+ verbose_debug=self.verbose_debug,
314
338
  )
315
339
  self._connections.add(conn)
316
340
  await self._connection_queue.put(conn)
341
+ self.logger.debug('Connection pool initialized')
317
342
 
318
343
  async def _get_connection(self) -> CAFSConnection:
319
344
  if self._close_event.is_set():
320
345
  raise EmptyConnectionPoolError()
321
346
  get_task = asyncio.create_task(self._connection_queue.get())
322
347
  close_task = asyncio.create_task(self._close_event.wait())
348
+ self.logger.debug('Waiting for connection')
323
349
  _, pending = await asyncio.wait(
324
350
  [get_task, close_task], return_when=asyncio.FIRST_COMPLETED
325
351
  )
@@ -330,19 +356,25 @@ class ConnectionPool:
330
356
  if get_task in pending:
331
357
  raise EmptyConnectionPoolError()
332
358
 
333
- return get_task.result()
359
+ conn = get_task.result()
360
+ self.logger.debug('Got connection %s', id(conn))
361
+ return conn
334
362
 
335
363
  async def _release_connection(self, conn: CAFSConnection) -> None:
364
+ self.logger.debug('Releasing connection %s', id(conn))
336
365
  await self._connection_queue.put(conn)
337
366
 
338
367
  async def _delete_connection(self, conn: CAFSConnection) -> None:
368
+ self.logger.debug('Deleting connection %s', id(conn))
339
369
  await conn.disconnect()
340
370
  async with self._lock:
341
371
  self._connections.remove(conn)
342
372
  if not self._connections:
373
+ self.logger.debug('Connection pool is empty, closing')
343
374
  self._close_event.set()
344
375
 
345
376
  async def close(self) -> None:
377
+ self.logger.debug('Closing connection pool')
346
378
  async with self._lock:
347
379
  self._close_event.set()
348
380
  for conn in self._connections:
@@ -387,6 +419,9 @@ async def _until_pool_empty_wrapper(
387
419
 
388
420
 
389
421
  class CAFSClient:
422
+ logger: Logger | LoggerAdapter
423
+ verbose_debug: bool
424
+
390
425
  _connection_pool: ConnectionPool
391
426
 
392
427
  def __init__(
@@ -395,19 +430,31 @@ class CAFSClient:
395
430
  servers: Collection[str],
396
431
  connection_per_server: int = 1,
397
432
  connect_timeout: float = DEFAULT_CONNECT_TIMEOUT,
433
+ logger: Logger | LoggerAdapter | None = None,
434
+ verbose_debug: bool = False,
398
435
  ) -> None:
436
+ self.verbose_debug = verbose_debug
399
437
  servers_ = {self.parse_server_uri(server) for server in servers}
438
+ self.logger = logger or module_logger
400
439
  self._connection_pool = ConnectionPool(
401
- server_root, servers_, connection_per_server, connect_timeout
440
+ server_root,
441
+ servers_,
442
+ connection_per_server,
443
+ connect_timeout,
444
+ logger=self.logger,
445
+ verbose_debug=self.verbose_debug,
402
446
  )
403
447
 
404
448
  async def pull(self, blob_hash: str, path: Path, retry: bool = True) -> None:
449
+ self.logger.info('Pulling %s to %s', blob_hash, path)
405
450
  await aio_os.makedirs(path.parent, exist_ok=True)
406
451
 
407
452
  async def _pull() -> None:
408
453
  async with aiofiles.open(path, 'wb') as file:
409
454
  async with self._connection_pool.connection() as conn:
410
- unpacker = Unpacker(file, logger=conn.logger)
455
+ unpacker = Unpacker(
456
+ file, logger=conn.logger, verbose_debug=self.verbose_debug
457
+ )
411
458
  await conn.pull(blob_hash, unpacker)
412
459
 
413
460
  await _until_pool_empty_wrapper(_pull, retry=retry)
@@ -416,10 +463,16 @@ class CAFSClient:
416
463
  self,
417
464
  blobs: list[tuple[str, Path]],
418
465
  retry: bool = True,
466
+ max_concurrent: int | None = None,
419
467
  ) -> None:
420
468
  if not blobs:
421
469
  return
422
470
 
471
+ max_concurrent = min(
472
+ max_concurrent or await self._connection_pool.get_connection_count(),
473
+ len(blobs),
474
+ )
475
+
423
476
  files_queue: asyncio.Queue[tuple[str, Path]] = asyncio.Queue()
424
477
  for blob_hash, blob_path in blobs:
425
478
  files_queue.put_nowait((blob_hash, blob_path))
@@ -439,7 +492,10 @@ class CAFSClient:
439
492
  files_queue.task_done()
440
493
 
441
494
  stop_event = asyncio.Event()
442
- workers = [asyncio.create_task(worker(stop_event)) for _ in range(len(blobs))]
495
+ self.logger.debug('Initializing %d workers', max_concurrent)
496
+ workers = [
497
+ asyncio.create_task(worker(stop_event)) for _ in range(max_concurrent)
498
+ ]
443
499
  errors = await asyncio.gather(*workers, return_exceptions=True)
444
500
 
445
501
  for err in errors:
@@ -468,11 +524,19 @@ class CAFSClient:
468
524
  ) -> str:
469
525
  blob_hash: str = await calc_hash_file(path)
470
526
  compression = choose_compression(path, preferred_compression=compression)
527
+ self.logger.info(
528
+ 'Streaming %s to %s with %s compression', path, blob_hash, compression
529
+ )
471
530
 
472
531
  async def _stream() -> str:
473
532
  async with aiofiles.open(path, 'rb') as file:
474
533
  async with self._connection_pool.connection() as conn:
475
- packer = Packer(file, compression=compression, logger=conn.logger)
534
+ packer = Packer(
535
+ file,
536
+ compression=compression,
537
+ logger=conn.logger,
538
+ verbose_debug=self.verbose_debug,
539
+ )
476
540
  await conn.stream(blob_hash, packer)
477
541
  return blob_hash
478
542
 
@@ -516,6 +580,7 @@ class CAFSClient:
516
580
  files_queue.task_done()
517
581
 
518
582
  stop_event = asyncio.Event()
583
+ self.logger.debug('Initializing %d workers', max_concurrent)
519
584
  workers = [
520
585
  asyncio.create_task(worker(stop_event)) for _ in range(max_concurrent)
521
586
  ]
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
  import functools
3
3
  from collections.abc import Awaitable, Callable
4
+ from logging import Logger, LoggerAdapter, getLogger
4
5
  from os.path import normpath
5
6
  from pathlib import Path
6
7
  from typing import Any, Self, TypeVar
@@ -19,6 +20,9 @@ from cafs_cache_cdn_client.repo import RepoClient
19
20
  __all__ = ('CacheCdnClient',)
20
21
 
21
22
 
23
+ package_logger = getLogger(__name__.split('.')[0])
24
+
25
+
22
26
  CAFS_SERVER_ROOT = '/cache'
23
27
 
24
28
 
@@ -35,25 +39,44 @@ def needs_cafs_client(func: Callable[..., Awaitable[T]]) -> Callable[..., Awaita
35
39
 
36
40
 
37
41
  class CacheCdnClient:
42
+ verbose_debug: bool
43
+
38
44
  _cafs_client: CAFSClient | None = None
39
45
  _repo_client: RepoClient
40
46
 
41
47
  __connection_per_cafs_server: int
42
48
  __cafs_client_lock = asyncio.Lock()
43
49
 
44
- def __init__(self, server: str, connection_per_cafs_server: int = 1) -> None:
45
- self._repo_client = RepoClient(server)
50
+ def __init__(
51
+ self,
52
+ server: str,
53
+ connection_per_cafs_server: int = 1,
54
+ logger: Logger | LoggerAdapter | None = None,
55
+ verbose_debug: bool = False,
56
+ ) -> None:
57
+ self.logger = logger or package_logger
58
+ self._repo_client = RepoClient(server, logger=self.logger)
46
59
  self.__connection_per_cafs_server = connection_per_cafs_server
60
+ self.verbose_debug = verbose_debug
47
61
 
48
62
  async def _init_cafs_client(self) -> None:
63
+ self.logger.debug(
64
+ 'Initializing CAFS client with %d connections per server',
65
+ self.__connection_per_cafs_server,
66
+ )
49
67
  async with self.__cafs_client_lock:
50
68
  if self._cafs_client:
69
+ self.logger.debug('CAFS client already initialized')
51
70
  return
71
+ self.logger.debug('Fetching blob URLs from the server')
52
72
  blob_urls = await self._repo_client.get_blob_urls()
73
+ self.logger.debug('Blob URLs: %s', blob_urls)
53
74
  self._cafs_client = await CAFSClient(
54
75
  CAFS_SERVER_ROOT,
55
76
  blob_urls,
56
77
  connection_per_server=self.__connection_per_cafs_server,
78
+ logger=self.logger,
79
+ verbose_debug=self.verbose_debug,
57
80
  ).__aenter__()
58
81
 
59
82
  @needs_cafs_client
@@ -66,15 +89,25 @@ class CacheCdnClient:
66
89
  comment: str | None = None,
67
90
  compression: CompressionT = CompressionT.NONE,
68
91
  ) -> None:
92
+ self.logger.info(
93
+ 'Pushing %s to %s/%s with ttl=%d hours, compression=%s',
94
+ directory,
95
+ repo,
96
+ ref,
97
+ ttl_hours,
98
+ compression,
99
+ )
69
100
  if isinstance(directory, str):
70
101
  directory = Path(directory)
71
102
  if not directory.is_dir():
72
103
  raise ValueError(f'{directory} is not a directory')
73
104
  files = walk(directory)
105
+ self.logger.debug('Uploading %d files to CAFS server')
74
106
  hashes = await self._cafs_client.stream_batch(
75
107
  [directory / file.path for file in files],
76
108
  compression=compression,
77
109
  )
110
+ self.logger.debug('CAFS upload complete, uploading metadata to the server')
78
111
  await self._repo_client.post_ref_info(
79
112
  repo,
80
113
  ref,
@@ -93,21 +126,34 @@ class CacheCdnClient:
93
126
  ],
94
127
  },
95
128
  )
129
+ self.logger.info('Pushed %d files to %s/%s successfully', len(files), repo, ref)
96
130
 
97
131
  async def check(self, repo: str, ref: str) -> bool:
98
- return await self._repo_client.is_ref_exist(repo, ref)
132
+ self.logger.info('Checking %s/%s', repo, ref)
133
+ res = await self._repo_client.is_ref_exist(repo, ref)
134
+ if res:
135
+ self.logger.info('Ref %s/%s exists', repo, ref)
136
+ else:
137
+ self.logger.info('Ref %s/%s does not exist', repo, ref)
138
+ return res
99
139
 
100
140
  async def delete(self, repo: str, ref: str) -> None:
141
+ self.logger.info('Deleting %s/%s', repo, ref)
101
142
  await self._repo_client.delete_ref(repo, ref)
143
+ self.logger.info('Deleted %s/%s successfully', repo, ref)
102
144
 
103
145
  async def attach(self, repo: str, ref: str, file_path: Path) -> None:
146
+ self.logger.info('Attaching %s to %s/%s', file_path, repo, ref)
104
147
  await self._repo_client.attach_file(repo, ref, file_path)
148
+ self.logger.info('Attached %s to %s/%s successfully', file_path, repo, ref)
105
149
 
106
150
  @needs_cafs_client
107
151
  async def pull(self, repo: str, ref: str, directory: Path | str) -> None:
152
+ self.logger.info('Pulling %s/%s to %s', repo, ref, directory)
108
153
  if isinstance(directory, str):
109
154
  directory = Path(directory)
110
155
  await aio_os.makedirs(directory, exist_ok=True)
156
+ self.logger.debug('Fetching info about %s/%s from the server', repo, ref)
111
157
  ref_info = await self._repo_client.get_ref_info(repo, ref)
112
158
  remote_files = [
113
159
  LocalFile(
@@ -118,21 +164,41 @@ class CacheCdnClient:
118
164
  )
119
165
  for file in ref_info['files']
120
166
  ]
167
+ self.logger.debug('%d files on the server', len(remote_files))
121
168
  local_files = walk(directory)
169
+ self.logger.debug('%d files locally', len(local_files))
122
170
  to_remove, to_add, to_update = await compare_file_lists(
123
171
  local_files, remote_files, directory
124
172
  )
173
+ self.logger.debug(
174
+ 'Files to remove: %d, files to add: %d, files to update: %d',
175
+ len(to_remove),
176
+ len(to_add),
177
+ len(to_update),
178
+ )
125
179
  for file in to_remove:
126
180
  await aio_os.unlink(directory / file.path)
127
181
  if to_add:
182
+ self.logger.debug('Downloading %d files from CAFS server', len(to_add))
128
183
  await self._cafs_client.pull_batch(
129
184
  [(file.blob, directory / file.path) for file in to_add]
130
185
  )
186
+ self.logger.debug('CAFS download complete')
131
187
  for file in to_add + to_update:
132
188
  set_file_stat(file, directory)
189
+ self.logger.info(
190
+ 'Pulled %d files from %s/%s successfully, updated %d files, removed %d files',
191
+ len(to_add),
192
+ repo,
193
+ ref,
194
+ len(to_update),
195
+ len(to_remove),
196
+ )
133
197
 
134
198
  async def tag(self, repo: str, ref: str, tag: str) -> None:
199
+ self.logger.info('Tagging %s/%s to %s', repo, ref, tag)
135
200
  await self._repo_client.tag_ref(repo, ref, tag)
201
+ self.logger.info('Tagged %s/%s to %s successfully', repo, ref, tag)
136
202
 
137
203
  async def __aenter__(self) -> Self:
138
204
  return self
@@ -1,5 +1,6 @@
1
1
  from collections.abc import Iterable
2
2
  from http import HTTPMethod
3
+ from logging import Logger, LoggerAdapter, getLogger
3
4
  from pathlib import Path
4
5
  from typing import Any, cast
5
6
  from urllib.parse import quote, urljoin
@@ -13,11 +14,18 @@ import cafs_cache_cdn_client.repo.datatypes as dt
13
14
  __all__ = ('RepoClient',)
14
15
 
15
16
 
17
+ module_logger = getLogger(__name__)
18
+
19
+
16
20
  class RepoClient:
17
21
  server_base_url: str
22
+ logger: Logger | LoggerAdapter
18
23
 
19
- def __init__(self, server: str) -> None:
24
+ def __init__(
25
+ self, server: str, logger: Logger | LoggerAdapter | None = None
26
+ ) -> None:
20
27
  self.server_base_url = server
28
+ self.logger = logger or module_logger
21
29
 
22
30
  async def _request(
23
31
  self,
@@ -38,6 +46,7 @@ class RepoClient:
38
46
  data_arg = {'json': data}
39
47
  else:
40
48
  data_arg = {'data': data}
49
+ self.logger.debug('Requesting %s', url_)
41
50
  async with aiohttp.ClientSession(
42
51
  headers=headers, requote_redirect_url=False
43
52
  ) as session:
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "cafs-cache-cdn-client"
3
- version = "1.0.8"
3
+ version = "1.0.9"
4
4
  description = "Async Cache CDN client implementation"
5
5
  authors = [
6
6
  { name = "Konstantin Belov", "email" = "k.belov@gaijin.team" },