rclone-api 1.2.14__py2.py3-none-any.whl → 1.2.154__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rclone_api/__init__.py CHANGED
@@ -1,38 +1,45 @@
1
- from .completed_process import CompletedProcess
2
- from .config import Config, Parsed, Section
3
- from .diff import DiffItem, DiffOption, DiffType
4
- from .dir import Dir
5
- from .dir_listing import DirListing
6
- from .file import File
7
- from .filelist import FileList
8
- from .process import Process
9
- from .rclone import Rclone, rclone_verbose
10
- from .remote import Remote
11
- from .rpath import RPath
12
- from .s3.types import MultiUploadResult
13
- from .types import ListingOption, Order, SizeResult, SizeSuffix
14
-
15
- __all__ = [
16
- "Rclone",
17
- "File",
18
- "Config",
19
- "Remote",
20
- "Dir",
21
- "RPath",
22
- "DirListing",
23
- "FileList",
24
- "Process",
25
- "DiffItem",
26
- "DiffType",
27
- "rclone_verbose",
28
- "CompletedProcess",
29
- "DiffOption",
30
- "ListingOption",
31
- "Order",
32
- "ListingOption",
33
- "SizeResult",
34
- "Parsed",
35
- "Section",
36
- "MultiUploadResult",
37
- "SizeSuffix",
38
- ]
1
+ # Import logging module to activate default configuration
2
+ import rclone_api.logging # noqa: F401
3
+
4
+ from .completed_process import CompletedProcess
5
+ from .config import Config, Parsed, Section
6
+ from .diff import DiffItem, DiffOption, DiffType
7
+ from .dir import Dir
8
+ from .dir_listing import DirListing
9
+ from .file import File
10
+ from .filelist import FileList
11
+
12
+ # Import the configure_logging function to make it available at package level
13
+ from .logging import configure_logging
14
+ from .process import Process
15
+ from .rclone import Rclone, rclone_verbose
16
+ from .remote import Remote
17
+ from .rpath import RPath
18
+ from .s3.types import MultiUploadResult
19
+ from .types import ListingOption, Order, SizeResult, SizeSuffix
20
+
21
+ __all__ = [
22
+ "Rclone",
23
+ "File",
24
+ "Config",
25
+ "Remote",
26
+ "Dir",
27
+ "RPath",
28
+ "DirListing",
29
+ "FileList",
30
+ "Process",
31
+ "DiffItem",
32
+ "DiffType",
33
+ "rclone_verbose",
34
+ "CompletedProcess",
35
+ "DiffOption",
36
+ "ListingOption",
37
+ "Order",
38
+ "ListingOption",
39
+ "SizeResult",
40
+ "Parsed",
41
+ "Section",
42
+ "MultiUploadResult",
43
+ "SizeSuffix",
44
+ "configure_logging",
45
+ ]
rclone_api/logging.py ADDED
@@ -0,0 +1,39 @@
1
+ import logging
2
+ import sys
3
+
4
+
5
+ def setup_default_logging():
6
+ """Set up default logging configuration if none exists."""
7
+ if not logging.root.handlers:
8
+ logging.basicConfig(
9
+ level=logging.INFO,
10
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
11
+ handlers=[
12
+ logging.StreamHandler(sys.stdout),
13
+ # Uncomment to add file logging
14
+ # logging.FileHandler('rclone_api.log')
15
+ ],
16
+ )
17
+
18
+
19
+ def configure_logging(level=logging.INFO, log_file=None):
20
+ """Configure logging for the rclone_api package.
21
+
22
+ Args:
23
+ level: The logging level (default: logging.INFO)
24
+ log_file: Optional path to a log file
25
+ """
26
+ handlers = [logging.StreamHandler(sys.stdout)]
27
+ if log_file:
28
+ handlers.append(logging.FileHandler(log_file))
29
+
30
+ logging.basicConfig(
31
+ level=level,
32
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
33
+ handlers=handlers,
34
+ force=True, # Override any existing configuration
35
+ )
36
+
37
+
38
+ # Call setup_default_logging when this module is imported
39
+ setup_default_logging()
@@ -1,145 +1,146 @@
1
- import time
2
- import warnings
3
- from concurrent.futures import Future
4
- from dataclasses import dataclass
5
- from pathlib import Path
6
- from queue import Queue
7
- from threading import Event
8
- from typing import Any, Callable
9
-
10
- from rclone_api.mount_read_chunker import FilePart
11
- from rclone_api.s3.chunk_types import UploadState
12
- from rclone_api.types import EndOfStream
13
- from rclone_api.util import locked_print
14
-
15
-
16
- def _get_file_size(file_path: Path, timeout: int = 60) -> int:
17
- sleep_time = timeout / 60 if timeout > 0 else 1
18
- start = time.time()
19
- while True:
20
- expired = time.time() - start > timeout
21
- try:
22
- time.sleep(sleep_time)
23
- if file_path.exists():
24
- return file_path.stat().st_size
25
- except FileNotFoundError as e:
26
- if expired:
27
- print(f"File not found: {file_path}, exception is {e}")
28
- raise
29
- if expired:
30
- raise TimeoutError(f"File {file_path} not found after {timeout} seconds")
31
-
32
-
33
- @dataclass
34
- class S3FileInfo:
35
- upload_id: str
36
- part_number: int
37
-
38
-
39
- def file_chunker(
40
- upload_state: UploadState,
41
- fetcher: Callable[[int, int, Any], Future[FilePart]],
42
- max_chunks: int | None,
43
- cancel_signal: Event,
44
- queue_upload: Queue[FilePart | EndOfStream],
45
- ) -> None:
46
- count = 0
47
-
48
- def should_stop() -> bool:
49
- nonlocal count
50
-
51
- if max_chunks is None:
52
- return False
53
- if count >= max_chunks:
54
- print(
55
- f"Stopping file chunker after {count} chunks because it exceeded max_chunks {max_chunks}"
56
- )
57
- return True
58
- count += 1
59
- return False
60
-
61
- upload_info = upload_state.upload_info
62
- file_path = upload_info.src_file_path
63
- chunk_size = upload_info.chunk_size
64
- # src = Path(file_path)
65
-
66
- try:
67
- part_number = 1
68
- done_part_numbers: set[int] = {
69
- p.part_number for p in upload_state.parts if not isinstance(p, EndOfStream)
70
- }
71
- num_parts = upload_info.total_chunks()
72
-
73
- def next_part_number() -> int | None:
74
- nonlocal part_number
75
- while part_number in done_part_numbers:
76
- part_number += 1
77
- if part_number > num_parts:
78
- return None
79
- return part_number
80
-
81
- if cancel_signal.is_set():
82
- print(
83
- f"Cancel signal is set for file chunker while processing {file_path}, returning"
84
- )
85
- return
86
-
87
- while not should_stop():
88
- print("@@@@@@@@@@")
89
- curr_part_number = next_part_number()
90
- if curr_part_number is None:
91
- locked_print(f"File {file_path} has completed chunking all parts")
92
- break
93
- assert curr_part_number is not None
94
- offset = (curr_part_number - 1) * chunk_size
95
- file_size = upload_info.file_size
96
-
97
- assert offset < file_size, f"Offset {offset} is greater than file size"
98
-
99
- # Open the file, seek, read the chunk, and close immediately.
100
- # with open(file_path, "rb") as f:
101
- # f.seek(offset)
102
- # data = f.read(chunk_size)
103
-
104
- # data = chunk_fetcher(offset, chunk_size).result()
105
-
106
- assert curr_part_number is not None
107
- cpn: int = curr_part_number
108
-
109
- def on_complete(fut: Future[FilePart]) -> None:
110
- print("ON COMPLETE")
111
- fp: FilePart = fut.result()
112
- if fp.is_error():
113
- warnings.warn(
114
- f"Error reading file: {fp}, skipping part {part_number}"
115
- )
116
- return
117
-
118
- if fp.n_bytes() == 0:
119
- warnings.warn(f"Empty data for part {part_number} of {file_path}")
120
- raise ValueError(
121
- f"Empty data for part {part_number} of {file_path}"
122
- )
123
-
124
- if isinstance(fp.payload, Exception):
125
- warnings.warn(f"Error reading file because of error: {fp.payload}")
126
- return
127
-
128
- done_part_numbers.add(part_number)
129
- queue_upload.put(fp)
130
-
131
- offset = (curr_part_number - 1) * chunk_size
132
- print(f"Reading chunk {curr_part_number} of {num_parts} for {file_path}")
133
- fut = fetcher(offset, file_size, S3FileInfo(upload_info.upload_id, cpn))
134
- fut.add_done_callback(on_complete)
135
- # wait until the queue_upload queue can accept the next chunk
136
- while queue_upload.full():
137
- time.sleep(0.1)
138
- except Exception as e:
139
-
140
- warnings.warn(f"Error reading file: {e}")
141
- finally:
142
- print("#############################################################")
143
- print(f"Finishing FILE CHUNKER for {file_path} and adding EndOfStream")
144
- print("#############################################################")
145
- queue_upload.put(EndOfStream())
1
+ import logging
2
+ import time
3
+ from concurrent.futures import Future
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+ from queue import Queue
7
+ from threading import Event
8
+ from typing import Any, Callable
9
+
10
+ from rclone_api.mount_read_chunker import FilePart
11
+ from rclone_api.s3.chunk_types import UploadState
12
+ from rclone_api.types import EndOfStream
13
+
14
+ logger = logging.getLogger(__name__) # noqa
15
+
16
+
17
+ def _get_file_size(file_path: Path, timeout: int = 60) -> int:
18
+ sleep_time = timeout / 60 if timeout > 0 else 1
19
+ start = time.time()
20
+ while True:
21
+ expired = time.time() - start > timeout
22
+ try:
23
+ time.sleep(sleep_time)
24
+ if file_path.exists():
25
+ return file_path.stat().st_size
26
+ except FileNotFoundError as e:
27
+ if expired:
28
+ print(f"File not found: {file_path}, exception is {e}")
29
+ raise
30
+ if expired:
31
+ raise TimeoutError(f"File {file_path} not found after {timeout} seconds")
32
+
33
+
34
+ @dataclass
35
+ class S3FileInfo:
36
+ upload_id: str
37
+ part_number: int
38
+
39
+
40
+ def file_chunker(
41
+ upload_state: UploadState,
42
+ fetcher: Callable[[int, int, Any], Future[FilePart]],
43
+ max_chunks: int | None,
44
+ cancel_signal: Event,
45
+ queue_upload: Queue[FilePart | EndOfStream],
46
+ ) -> None:
47
+ count = 0
48
+
49
+ def should_stop() -> bool:
50
+ nonlocal count
51
+
52
+ if max_chunks is None:
53
+ return False
54
+ if count >= max_chunks:
55
+ logger.info(
56
+ f"Stopping file chunker after {count} chunks because it exceeded max_chunks {max_chunks}"
57
+ )
58
+ return True
59
+ count += 1
60
+ return False
61
+
62
+ upload_info = upload_state.upload_info
63
+ file_path = upload_info.src_file_path
64
+ chunk_size = upload_info.chunk_size
65
+ # src = Path(file_path)
66
+
67
+ try:
68
+ part_number = 1
69
+ done_part_numbers: set[int] = {
70
+ p.part_number for p in upload_state.parts if not isinstance(p, EndOfStream)
71
+ }
72
+ num_parts = upload_info.total_chunks()
73
+
74
+ def next_part_number() -> int | None:
75
+ nonlocal part_number
76
+ while part_number in done_part_numbers:
77
+ part_number += 1
78
+ if part_number > num_parts:
79
+ return None
80
+ return part_number
81
+
82
+ if cancel_signal.is_set():
83
+ logger.info(
84
+ f"Cancel signal is set for file chunker while processing {file_path}, returning"
85
+ )
86
+ return
87
+
88
+ while not should_stop():
89
+ logger.debug("Processing next chunk")
90
+ curr_part_number = next_part_number()
91
+ if curr_part_number is None:
92
+ logger.info(f"File {file_path} has completed chunking all parts")
93
+ break
94
+ assert curr_part_number is not None
95
+ offset = (curr_part_number - 1) * chunk_size
96
+ file_size = upload_info.file_size
97
+
98
+ assert offset < file_size, f"Offset {offset} is greater than file size"
99
+
100
+ # Open the file, seek, read the chunk, and close immediately.
101
+ # with open(file_path, "rb") as f:
102
+ # f.seek(offset)
103
+ # data = f.read(chunk_size)
104
+
105
+ # data = chunk_fetcher(offset, chunk_size).result()
106
+
107
+ assert curr_part_number is not None
108
+ cpn: int = curr_part_number
109
+
110
+ def on_complete(fut: Future[FilePart]) -> None:
111
+ logger.debug("Chunk read complete")
112
+ fp: FilePart = fut.result()
113
+ if fp.is_error():
114
+ logger.warning(
115
+ f"Error reading file: {fp}, skipping part {part_number}"
116
+ )
117
+ return
118
+
119
+ if fp.n_bytes() == 0:
120
+ logger.warning(f"Empty data for part {part_number} of {file_path}")
121
+ raise ValueError(
122
+ f"Empty data for part {part_number} of {file_path}"
123
+ )
124
+
125
+ if isinstance(fp.payload, Exception):
126
+ logger.warning(f"Error reading file because of error: {fp.payload}")
127
+ return
128
+
129
+ done_part_numbers.add(part_number)
130
+ queue_upload.put(fp)
131
+
132
+ offset = (curr_part_number - 1) * chunk_size
133
+ logger.info(
134
+ f"Reading chunk {curr_part_number} of {num_parts} for {file_path}"
135
+ )
136
+ fut = fetcher(offset, file_size, S3FileInfo(upload_info.upload_id, cpn))
137
+ fut.add_done_callback(on_complete)
138
+ # wait until the queue_upload queue can accept the next chunk
139
+ while queue_upload.full():
140
+ time.sleep(0.1)
141
+ except Exception as e:
142
+
143
+ logger.error(f"Error reading file: {e}", exc_info=True)
144
+ finally:
145
+ logger.info(f"Finishing FILE CHUNKER for {file_path} and adding EndOfStream")
146
+ queue_upload.put(EndOfStream())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: rclone_api
3
- Version: 1.2.14
3
+ Version: 1.2.154
4
4
  Summary: rclone api in python
5
5
  Home-page: https://github.com/zackees/rclone-api
6
6
  License: BSD 3-Clause License
@@ -1,4 +1,4 @@
1
- rclone_api/__init__.py,sha256=6TVA3JUVxr76wzy29XRTX_xTdQ5JBGwuheNvQc8EgsU,863
1
+ rclone_api/__init__.py,sha256=dPa4vtqtaO47JZa-YUjOgVa2gCSHLdGxM-_xX_4C7Yw,1150
2
2
  rclone_api/cli.py,sha256=dibfAZIh0kXWsBbfp3onKLjyZXo54mTzDjUdzJlDlWo,231
3
3
  rclone_api/completed_process.py,sha256=_IZ8IWK7DM1_tsbDEkH6wPZ-bbcrgf7A7smls854pmg,1775
4
4
  rclone_api/config.py,sha256=f6jEAxVorGFr31oHfcsu5AJTtOJj2wR5tTSsbGGZuIw,2558
@@ -11,6 +11,7 @@ rclone_api/exec.py,sha256=Pd7pUBd8ib5MzqvMybG2DQISPRbDRu20VjVRL2mLAVY,1076
11
11
  rclone_api/file.py,sha256=EP5yT2dZ0H2p7CY5n0y5k5pHhIliV25pm8KOwBklUTk,1863
12
12
  rclone_api/filelist.py,sha256=xbiusvNgaB_b_kQOZoHMJJxn6TWGtPrWd2J042BI28o,767
13
13
  rclone_api/group_files.py,sha256=H92xPW9lQnbNw5KbtZCl00bD6iRh9yRbCuxku4j_3dg,8036
14
+ rclone_api/logging.py,sha256=fJ4Hr4baAEv93oOOiyzNfoQ8eD0MuErT3NHMjBC3W_w,1184
14
15
  rclone_api/mount.py,sha256=TE_VIBMW7J1UkF_6HRCt8oi_jGdMov4S51bm2OgxFAM,10045
15
16
  rclone_api/mount_read_chunker.py,sha256=IH2YcB-N22oiJLkp7KlpG3A4VZkkHOqTYvDLyor2e7Q,4505
16
17
  rclone_api/process.py,sha256=rBj_S86jC6nqCYop-jq8r9eMSteKeObxUrJMgH8LZvI,5084
@@ -29,14 +30,14 @@ rclone_api/experimental/flags_base.py,sha256=ajU_czkTcAxXYU-SlmiCfHY7aCQGHvpCLqJ
29
30
  rclone_api/profile/mount_copy_bytes.py,sha256=okzcfpmLcQvh5IUcIwZs9jLPSxFMv2igt2-kHoEmlfE,8571
30
31
  rclone_api/s3/api.py,sha256=PafsIEyWDpLWAXsZAjFm9CY14vJpsDr9lOsn0kGRLZ0,4009
31
32
  rclone_api/s3/basic_ops.py,sha256=hK3366xhVEzEcjz9Gk_8lFx6MRceAk72cax6mUrr6ko,2104
32
- rclone_api/s3/chunk_file.py,sha256=NqwEJp79VIiuOuCpvYq8w84zRr4_WTVqpb_5dJhyR8U,4973
33
+ rclone_api/s3/chunk_file.py,sha256=xtg9g4BvaFsipyfj6p5iRitR53jXjBqX0tmtO7Vf3Us,5068
33
34
  rclone_api/s3/chunk_types.py,sha256=I0YCWFgxCvmt8cp4tMabiiwiD2yKTcbA6ZL2D3xnn5w,8781
34
35
  rclone_api/s3/create.py,sha256=wgfkapv_j904CfKuWyiBIWJVxfAx_ftemFSUV14aT68,3149
35
36
  rclone_api/s3/types.py,sha256=Elmh__gvZJyJyElYwMmvYZIBIunDJiTRAbEg21GmsRU,1604
36
37
  rclone_api/s3/upload_file_multipart.py,sha256=inoMOQDZZYqTitJz3f0BBHo3F9ZYm8VhL4UTzPmcdm0,11385
37
- rclone_api-1.2.14.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
38
- rclone_api-1.2.14.dist-info/METADATA,sha256=wQaHcpw0KiZ7cpJkzz3oM0FkWR3Dyv4lkqlYoXsJEdI,4537
39
- rclone_api-1.2.14.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
40
- rclone_api-1.2.14.dist-info/entry_points.txt,sha256=TV8kwP3FRzYwUEr0RLC7aJh0W03SAefIJNXTJ-FdMIQ,200
41
- rclone_api-1.2.14.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
42
- rclone_api-1.2.14.dist-info/RECORD,,
38
+ rclone_api-1.2.154.dist-info/LICENSE,sha256=b6pOoifSXiUaz_lDS84vWlG3fr4yUKwB8fzkrH9R8bQ,1064
39
+ rclone_api-1.2.154.dist-info/METADATA,sha256=RTR14SPQNbWbnFKlWuMGj-y3CdZ4tU3x70odXfdXJVQ,4538
40
+ rclone_api-1.2.154.dist-info/WHEEL,sha256=rF4EZyR2XVS6irmOHQIJx2SUqXLZKRMUrjsg8UwN-XQ,109
41
+ rclone_api-1.2.154.dist-info/entry_points.txt,sha256=TV8kwP3FRzYwUEr0RLC7aJh0W03SAefIJNXTJ-FdMIQ,200
42
+ rclone_api-1.2.154.dist-info/top_level.txt,sha256=EvZ7uuruUpe9RiUyEp25d1Keq7PWYNT0O_-mr8FCG5g,11
43
+ rclone_api-1.2.154.dist-info/RECORD,,