async-mega-py 2.0.3.dev0__tar.gz → 2.0.5.dev0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/PKG-INFO +1 -1
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/pyproject.toml +1 -1
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/chunker.py +25 -2
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/client.py +6 -5
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/core.py +6 -3
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/crypto.py +2 -19
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/download.py +6 -7
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/progress.py +53 -35
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/transfer_it.py +7 -4
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/upload.py +27 -22
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/utils.py +18 -3
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/LICENSE +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/README.md +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/__init__.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/__main__.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/api.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/auth.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/cli/__init__.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/cli/app.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/data_structures.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/env.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/errors.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/filesystem.py +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/py.typed +0 -0
- {async_mega_py-2.0.3.dev0 → async_mega_py-2.0.5.dev0}/src/mega/vault.py +0 -0
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import dataclasses
|
|
4
4
|
import logging
|
|
5
5
|
from collections.abc import Generator
|
|
6
|
-
from typing import TYPE_CHECKING
|
|
6
|
+
from typing import TYPE_CHECKING, NamedTuple
|
|
7
7
|
|
|
8
8
|
from Crypto.Cipher import AES
|
|
9
9
|
from Crypto.Util import Counter
|
|
@@ -17,9 +17,32 @@ if TYPE_CHECKING:
|
|
|
17
17
|
logger = logging.getLogger(__name__)
|
|
18
18
|
|
|
19
19
|
|
|
20
|
+
class ChunkBoundary(NamedTuple):
|
|
21
|
+
offset: int
|
|
22
|
+
size: int
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_chunks(size: int) -> Generator[ChunkBoundary]:
|
|
26
|
+
"""
|
|
27
|
+
Yield chunk boundaries for Mega's MAC computation.
|
|
28
|
+
|
|
29
|
+
Chunk sizes double from 128 KiB (0x20000) up to 1 MiB (0x100000).
|
|
30
|
+
The last chunk may be smaller
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
offset = 0
|
|
34
|
+
current_size = init_size = 0x20000
|
|
35
|
+
while offset + current_size < size:
|
|
36
|
+
yield ChunkBoundary(offset, current_size)
|
|
37
|
+
offset += current_size
|
|
38
|
+
if current_size < 0x100000:
|
|
39
|
+
current_size += init_size
|
|
40
|
+
yield ChunkBoundary(offset, size - offset)
|
|
41
|
+
|
|
42
|
+
|
|
20
43
|
@dataclasses.dataclass(slots=True)
|
|
21
44
|
class MegaChunker:
|
|
22
|
-
"""Decrypts/encrypts a flow of chunks using Mega's CBC algorithm"""
|
|
45
|
+
"""Decrypts/encrypts a flow of chunks using Mega's custom CBC-MAC algorithm"""
|
|
23
46
|
|
|
24
47
|
iv: tuple[int, int]
|
|
25
48
|
key: tuple[int, int, int, int]
|
|
@@ -22,7 +22,7 @@ from mega.crypto import (
|
|
|
22
22
|
from mega.data_structures import AccountStats, Attributes, Crypto, FileInfo, Node, NodeID, NodeType, UserResponse
|
|
23
23
|
from mega.download import DownloadResults
|
|
24
24
|
from mega.filesystem import FileSystem
|
|
25
|
-
from mega.utils import Site,
|
|
25
|
+
from mega.utils import Site, async_map
|
|
26
26
|
|
|
27
27
|
from .errors import MegaNzError, RequestError, ValidationError
|
|
28
28
|
|
|
@@ -155,7 +155,7 @@ class MegaNzClient(MegaCore):
|
|
|
155
155
|
return await self.get_folder_link(fs[node.id])
|
|
156
156
|
|
|
157
157
|
async def get_public_filesystem(self, public_handle: NodeID, public_key: str) -> FileSystem:
|
|
158
|
-
logger.info(f"
|
|
158
|
+
logger.info(f"Fetching filesystem information for {public_handle = }...")
|
|
159
159
|
folder: GetNodesResponse = await self._api.post(
|
|
160
160
|
{
|
|
161
161
|
"a": "f",
|
|
@@ -165,7 +165,8 @@ class MegaNzClient(MegaCore):
|
|
|
165
165
|
},
|
|
166
166
|
{"n": public_handle},
|
|
167
167
|
)
|
|
168
|
-
|
|
168
|
+
nodes = folder["f"]
|
|
169
|
+
logger.info(f"Decrypting and building filesystem for {public_handle =} ({len(nodes)} nodes)...")
|
|
169
170
|
nodes = await self._vault.deserialize_nodes(folder["f"], public_key)
|
|
170
171
|
return await asyncio.to_thread(FileSystem.build, nodes)
|
|
171
172
|
|
|
@@ -207,7 +208,7 @@ class MegaNzClient(MegaCore):
|
|
|
207
208
|
base_path = Path(output_dir or ".")
|
|
208
209
|
folder_url = f"{_DOMAIN}/folder/{public_handle}#{public_key}"
|
|
209
210
|
|
|
210
|
-
async def
|
|
211
|
+
async def download(file: Node) -> tuple[NodeID, Path | Exception]:
|
|
211
212
|
web_url = folder_url + f"/file/{file.id}"
|
|
212
213
|
output_path = base_path / fs.relative_path(file.id)
|
|
213
214
|
try:
|
|
@@ -224,7 +225,7 @@ class MegaNzClient(MegaCore):
|
|
|
224
225
|
|
|
225
226
|
return file.id, result
|
|
226
227
|
|
|
227
|
-
results = await
|
|
228
|
+
results = await async_map(download, fs.files_from(root_id))
|
|
228
229
|
return DownloadResults.split(dict(results))
|
|
229
230
|
|
|
230
231
|
async def upload(self, file_path: str | PathLike[str], dest_node_id: NodeID | None = None) -> Node:
|
|
@@ -22,7 +22,7 @@ from mega.crypto import (
|
|
|
22
22
|
from mega.data_structures import Crypto, FileInfo, FileInfoSerialized, Node, NodeID
|
|
23
23
|
from mega.errors import MegaNzError, RequestError, ValidationError
|
|
24
24
|
from mega.filesystem import UserFileSystem
|
|
25
|
-
from mega.utils import Site, random_u32int_array, transform_v1_url
|
|
25
|
+
from mega.utils import Site, get_file_size, random_u32int_array, transform_v1_url
|
|
26
26
|
from mega.vault import MegaVault
|
|
27
27
|
|
|
28
28
|
if TYPE_CHECKING:
|
|
@@ -169,6 +169,7 @@ class MegaCore(AbstractApiClient):
|
|
|
169
169
|
return FileInfo.parse(resp)
|
|
170
170
|
|
|
171
171
|
async def _prepare_filesystem(self) -> UserFileSystem:
|
|
172
|
+
logger.info("Fetching users's filesystem information...")
|
|
172
173
|
nodes_resp: GetNodesResponse = await self._api.post(
|
|
173
174
|
{
|
|
174
175
|
"a": "f",
|
|
@@ -177,13 +178,15 @@ class MegaCore(AbstractApiClient):
|
|
|
177
178
|
},
|
|
178
179
|
)
|
|
179
180
|
|
|
181
|
+
nodes = nodes_resp["f"]
|
|
182
|
+
logger.info(f"Decrypting and building users's filesystem ({len(nodes)} nodes)...")
|
|
180
183
|
self._vault.init_shared_keys(nodes_resp)
|
|
181
|
-
nodes = await self._vault.deserialize_nodes(
|
|
184
|
+
nodes = await self._vault.deserialize_nodes(nodes)
|
|
182
185
|
return await asyncio.to_thread(UserFileSystem.build, nodes)
|
|
183
186
|
|
|
184
187
|
async def _upload(self, file_path: str | PathLike[str], dest_node_id: NodeID) -> GetNodesResponse:
|
|
185
188
|
file_path = Path(file_path)
|
|
186
|
-
file_size =
|
|
189
|
+
file_size = await asyncio.to_thread(get_file_size, file_path)
|
|
187
190
|
|
|
188
191
|
with progress.new_task(file_path.name, file_size, "UP"):
|
|
189
192
|
file_id, crypto = await upload.upload(self._api, file_path, file_size)
|
|
@@ -7,14 +7,14 @@ import logging
|
|
|
7
7
|
import math
|
|
8
8
|
import struct
|
|
9
9
|
import time
|
|
10
|
-
from typing import TYPE_CHECKING, Any
|
|
10
|
+
from typing import TYPE_CHECKING, Any
|
|
11
11
|
|
|
12
12
|
from Crypto.Cipher import AES
|
|
13
13
|
from Crypto.Math.Numbers import Integer
|
|
14
14
|
from Crypto.PublicKey import RSA
|
|
15
15
|
|
|
16
16
|
if TYPE_CHECKING:
|
|
17
|
-
from collections.abc import
|
|
17
|
+
from collections.abc import Mapping, Sequence
|
|
18
18
|
|
|
19
19
|
from mega.data_structures import AttributesSerialized
|
|
20
20
|
|
|
@@ -24,11 +24,6 @@ CHUNK_BLOCK_LEN = 16 # Hexadecimal
|
|
|
24
24
|
EMPTY_IV = b"\0" * CHUNK_BLOCK_LEN
|
|
25
25
|
|
|
26
26
|
|
|
27
|
-
class ChunkBoundary(NamedTuple):
|
|
28
|
-
offset: int
|
|
29
|
-
size: int
|
|
30
|
-
|
|
31
|
-
|
|
32
27
|
def pad_bytes(data: bytes | memoryview[int], length: int = CHUNK_BLOCK_LEN) -> bytes:
|
|
33
28
|
if len(data) % length:
|
|
34
29
|
padding = b"\0" * (length - len(data) % length)
|
|
@@ -147,18 +142,6 @@ def a32_to_base64(array: Sequence[int]) -> str:
|
|
|
147
142
|
return b64_url_encode(a32_to_bytes(array))
|
|
148
143
|
|
|
149
144
|
|
|
150
|
-
def get_chunks(size: int) -> Generator[ChunkBoundary]:
|
|
151
|
-
# generates a list of chunks (offset, chunk_size), where offset refers to the file initial position
|
|
152
|
-
offset = 0
|
|
153
|
-
current_size = init_size = 0x20000
|
|
154
|
-
while offset + current_size < size:
|
|
155
|
-
yield ChunkBoundary(offset, current_size)
|
|
156
|
-
offset += current_size
|
|
157
|
-
if current_size < 0x100000:
|
|
158
|
-
current_size += init_size
|
|
159
|
-
yield ChunkBoundary(offset, size - offset)
|
|
160
|
-
|
|
161
|
-
|
|
162
145
|
def decrypt_rsa_key(private_key: bytes) -> RSA.RsaKey:
|
|
163
146
|
# The private_key contains 4 MPI integers concatenated together.
|
|
164
147
|
rsa_private_key = [0, 0, 0, 0]
|
|
@@ -13,8 +13,7 @@ from types import MappingProxyType
|
|
|
13
13
|
from typing import IO, TYPE_CHECKING, Final, Generic, Self, TypeVar
|
|
14
14
|
|
|
15
15
|
from mega import progress
|
|
16
|
-
from mega.chunker import MegaChunker
|
|
17
|
-
from mega.crypto import get_chunks
|
|
16
|
+
from mega.chunker import MegaChunker, get_chunks
|
|
18
17
|
from mega.data_structures import NodeID
|
|
19
18
|
|
|
20
19
|
if TYPE_CHECKING:
|
|
@@ -63,11 +62,11 @@ async def encrypted_stream(
|
|
|
63
62
|
|
|
64
63
|
chunker = MegaChunker(iv, key, meta_mac)
|
|
65
64
|
progress_hook = progress.current_hook.get()
|
|
66
|
-
async with _new_temp_download(output_path) as
|
|
65
|
+
async with _new_temp_download(output_path) as file_io:
|
|
67
66
|
for _, chunk_size in get_chunks(file_size):
|
|
68
67
|
encrypted_chunk = await stream.readexactly(chunk_size)
|
|
69
68
|
chunk = chunker.read(encrypted_chunk)
|
|
70
|
-
|
|
69
|
+
await asyncio.to_thread(file_io.write, chunk)
|
|
71
70
|
progress_hook(len(chunk))
|
|
72
71
|
|
|
73
72
|
chunker.check_integrity()
|
|
@@ -81,9 +80,9 @@ async def stream(stream: aiohttp.StreamReader, output_path: Path) -> Path:
|
|
|
81
80
|
raise FileExistsError(errno.EEXIST, output_path)
|
|
82
81
|
|
|
83
82
|
progress_hook = progress.current_hook.get()
|
|
84
|
-
async with _new_temp_download(output_path) as
|
|
83
|
+
async with _new_temp_download(output_path) as file_io:
|
|
85
84
|
async for chunk in stream.iter_chunked(_CHUNK_SIZE):
|
|
86
|
-
|
|
85
|
+
await asyncio.to_thread(file_io.write, chunk)
|
|
87
86
|
progress_hook(len(chunk))
|
|
88
87
|
|
|
89
88
|
return output_path
|
|
@@ -92,7 +91,7 @@ async def stream(stream: aiohttp.StreamReader, output_path: Path) -> Path:
|
|
|
92
91
|
@contextlib.asynccontextmanager
|
|
93
92
|
async def _new_temp_download(output_path: Path) -> AsyncGenerator[IO[bytes]]:
|
|
94
93
|
# We need NamedTemporaryFile to not delete on file.close() but on context exit, which is not supported until python 3.12
|
|
95
|
-
temp_file = tempfile.NamedTemporaryFile
|
|
94
|
+
temp_file = await asyncio.to_thread(tempfile.NamedTemporaryFile, prefix="mega_py_", delete=False)
|
|
96
95
|
logger.debug(f'Created temp file "{temp_file.name!s}" for "{output_path!s}"')
|
|
97
96
|
try:
|
|
98
97
|
yield temp_file
|
|
@@ -3,14 +3,16 @@ from __future__ import annotations
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import contextlib
|
|
5
5
|
from contextvars import ContextVar
|
|
6
|
-
from typing import TYPE_CHECKING, Any, Literal, Protocol, TypeAlias
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Literal, Protocol, TypeAlias, TypeVar
|
|
7
7
|
|
|
8
8
|
if TYPE_CHECKING:
|
|
9
9
|
from collections.abc import Callable, Generator
|
|
10
10
|
from types import TracebackType
|
|
11
11
|
|
|
12
|
-
from rich.progress import Progress
|
|
12
|
+
from rich.progress import Progress, Task
|
|
13
|
+
from rich.text import Text
|
|
13
14
|
|
|
15
|
+
_T = TypeVar("_T")
|
|
14
16
|
ProgressHook: TypeAlias = Callable[[float], None]
|
|
15
17
|
|
|
16
18
|
class ProgressHookContext(Protocol):
|
|
@@ -33,12 +35,13 @@ current_hook: ContextVar[ProgressHook] = ContextVar("current_hook", default=lamb
|
|
|
33
35
|
|
|
34
36
|
|
|
35
37
|
@contextlib.contextmanager
|
|
36
|
-
def
|
|
37
|
-
|
|
38
|
+
def _enter_context(context_var: ContextVar[_T], value: _T) -> Generator[None]:
|
|
39
|
+
"""Context manager for context vars"""
|
|
40
|
+
token = context_var.set(value)
|
|
38
41
|
try:
|
|
39
42
|
yield
|
|
40
43
|
finally:
|
|
41
|
-
|
|
44
|
+
context_var.reset(token)
|
|
42
45
|
|
|
43
46
|
|
|
44
47
|
@contextlib.contextmanager
|
|
@@ -48,12 +51,8 @@ def new_task(description: str, total: float, kind: Literal["UP", "DOWN"]) -> Gen
|
|
|
48
51
|
yield
|
|
49
52
|
return
|
|
50
53
|
|
|
51
|
-
with factory(description, total, kind) as
|
|
52
|
-
|
|
53
|
-
try:
|
|
54
|
-
yield
|
|
55
|
-
finally:
|
|
56
|
-
current_hook.reset(token)
|
|
54
|
+
with factory(description, total, kind) as new_hook, _enter_context(current_hook, new_hook):
|
|
55
|
+
yield
|
|
57
56
|
|
|
58
57
|
|
|
59
58
|
@contextlib.contextmanager
|
|
@@ -66,45 +65,64 @@ def new_progress() -> Generator[None]:
|
|
|
66
65
|
def hook_factory(*args, **kwargs):
|
|
67
66
|
return _new_rich_task(progress, *args, **kwargs)
|
|
68
67
|
|
|
69
|
-
with
|
|
68
|
+
with (
|
|
69
|
+
progress,
|
|
70
|
+
_enter_context(_PROGRESS_HOOK_FACTORY, hook_factory),
|
|
71
|
+
):
|
|
70
72
|
yield
|
|
71
73
|
|
|
72
74
|
|
|
73
|
-
def _truncate_desc(desc: str, length: int = 80, placeholder: str = "...") -> str:
|
|
74
|
-
if len(desc) <= length:
|
|
75
|
-
return desc
|
|
76
|
-
|
|
77
|
-
return f"{desc[: length - len(placeholder)]}{placeholder}"
|
|
78
|
-
|
|
79
|
-
|
|
80
75
|
def _new_rich_progress() -> Progress | None:
|
|
81
76
|
try:
|
|
77
|
+
from rich import get_console
|
|
82
78
|
from rich.progress import (
|
|
83
79
|
BarColumn,
|
|
84
80
|
DownloadColumn,
|
|
85
81
|
Progress,
|
|
86
82
|
SpinnerColumn,
|
|
83
|
+
TextColumn,
|
|
87
84
|
TimeRemainingColumn,
|
|
88
85
|
TransferSpeedColumn,
|
|
89
86
|
)
|
|
87
|
+
from rich.table import Column
|
|
90
88
|
except ImportError:
|
|
91
89
|
return None
|
|
92
90
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
)
|
|
91
|
+
console = get_console()
|
|
92
|
+
|
|
93
|
+
class AutoTruncatedTextColumn(TextColumn):
|
|
94
|
+
def render(self, task: Task) -> Text:
|
|
95
|
+
text = super().render(task)
|
|
96
|
+
width = console.width
|
|
97
|
+
available_witdh = min((width * 60 // 100), (width - 65))
|
|
98
|
+
desc_limit = max(available_witdh, 8)
|
|
99
|
+
text.truncate(desc_limit, overflow="ellipsis")
|
|
100
|
+
return text
|
|
101
|
+
|
|
102
|
+
return Progress(
|
|
103
|
+
"[{task.fields[kind]}]",
|
|
104
|
+
SpinnerColumn(),
|
|
105
|
+
AutoTruncatedTextColumn("{task.description}"),
|
|
106
|
+
BarColumn(
|
|
107
|
+
bar_width=None,
|
|
108
|
+
),
|
|
109
|
+
"[progress.percentage]{task.percentage:>6.1f}%",
|
|
110
|
+
"•",
|
|
111
|
+
DownloadColumn(
|
|
112
|
+
table_column=Column(justify="right", no_wrap=True),
|
|
113
|
+
),
|
|
114
|
+
"•",
|
|
115
|
+
TransferSpeedColumn(table_column=Column(justify="right", no_wrap=True)),
|
|
116
|
+
"•",
|
|
117
|
+
TimeRemainingColumn(
|
|
118
|
+
compact=True,
|
|
119
|
+
elapsed_when_finished=True,
|
|
120
|
+
table_column=Column(justify="right", no_wrap=True),
|
|
121
|
+
),
|
|
122
|
+
transient=True,
|
|
123
|
+
console=console,
|
|
124
|
+
expand=True,
|
|
125
|
+
)
|
|
108
126
|
|
|
109
127
|
|
|
110
128
|
@contextlib.contextmanager
|
|
@@ -114,7 +132,7 @@ def _new_rich_task(
|
|
|
114
132
|
total: float,
|
|
115
133
|
kind: Literal["UP", "DOWN"],
|
|
116
134
|
) -> Generator[ProgressHook]:
|
|
117
|
-
task_id = progress.add_task(
|
|
135
|
+
task_id = progress.add_task(description, total=total, kind=kind)
|
|
118
136
|
|
|
119
137
|
def progress_hook(advance: float) -> None:
|
|
120
138
|
progress.advance(task_id, advance)
|
|
@@ -15,7 +15,7 @@ from mega.crypto import b64_to_a32, b64_url_decode, decrypt_attr
|
|
|
15
15
|
from mega.data_structures import Attributes, Crypto, Node, NodeID, NodeType
|
|
16
16
|
from mega.download import DownloadResults
|
|
17
17
|
from mega.filesystem import FileSystem
|
|
18
|
-
from mega.utils import Site,
|
|
18
|
+
from mega.utils import Site, async_map
|
|
19
19
|
|
|
20
20
|
if TYPE_CHECKING:
|
|
21
21
|
from collections.abc import Iterable
|
|
@@ -47,6 +47,7 @@ class TransferItClient(AbstractApiClient):
|
|
|
47
47
|
self._api = TransferItAPI(session)
|
|
48
48
|
|
|
49
49
|
async def get_filesystem(self, transfer_id: TransferID) -> FileSystem:
|
|
50
|
+
logger.info(f"Fetching filesystem information for {transfer_id = }...")
|
|
50
51
|
folder: GetNodesResponse = await self._api.post(
|
|
51
52
|
{
|
|
52
53
|
"a": "f",
|
|
@@ -56,7 +57,9 @@ class TransferItClient(AbstractApiClient):
|
|
|
56
57
|
},
|
|
57
58
|
{"x": transfer_id},
|
|
58
59
|
)
|
|
59
|
-
|
|
60
|
+
nodes = folder["f"]
|
|
61
|
+
logger.info(f"Decrypting and building filesystem for {transfer_id = } ({len(nodes)} nodes)...")
|
|
62
|
+
return await asyncio.to_thread(self._deserialize_nodes, nodes)
|
|
60
63
|
|
|
61
64
|
@staticmethod
|
|
62
65
|
def parse_url(url: str | yarl.URL) -> TransferID:
|
|
@@ -107,7 +110,7 @@ class TransferItClient(AbstractApiClient):
|
|
|
107
110
|
base_path = Path(output_dir or ".") / f"transfer.it ({transfer_id})"
|
|
108
111
|
folder_url = f"https://transfer.it/t/{transfer_id}"
|
|
109
112
|
|
|
110
|
-
async def
|
|
113
|
+
async def download(file: Node) -> tuple[NodeID, Path | Exception]:
|
|
111
114
|
web_url = folder_url + f"#{file.id}"
|
|
112
115
|
output_path = base_path / fs.relative_path(file.id)
|
|
113
116
|
dl_link = self.create_download_url(transfer_id, file)
|
|
@@ -123,7 +126,7 @@ class TransferItClient(AbstractApiClient):
|
|
|
123
126
|
|
|
124
127
|
return file.id, result
|
|
125
128
|
|
|
126
|
-
results = await
|
|
129
|
+
results = await async_map(download, fs.files_from(root_id))
|
|
127
130
|
return DownloadResults.split(dict(results))
|
|
128
131
|
|
|
129
132
|
async def _download_file(self, dl_link: str, output_path: str | PathLike[str]) -> Path:
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
3
4
|
import logging
|
|
4
|
-
from typing import
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
5
6
|
|
|
6
7
|
from mega import progress
|
|
7
|
-
from mega.chunker import MegaChunker
|
|
8
|
-
from mega.crypto import a32_to_base64, b64_url_encode, encrypt_attr, encrypt_key
|
|
9
|
-
from mega.data_structures import Crypto
|
|
8
|
+
from mega.chunker import MegaChunker, get_chunks
|
|
9
|
+
from mega.crypto import a32_to_base64, b64_url_encode, encrypt_attr, encrypt_key
|
|
10
|
+
from mega.data_structures import ByteSize, Crypto
|
|
10
11
|
from mega.utils import random_u32int_array
|
|
11
12
|
|
|
12
13
|
if TYPE_CHECKING:
|
|
@@ -24,24 +25,23 @@ async def _request_upload_url(api: MegaAPI, file_size: int) -> str:
|
|
|
24
25
|
|
|
25
26
|
|
|
26
27
|
async def upload(api: MegaAPI, file_path: Path, file_size: int) -> tuple[str, Crypto]:
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
key, iv = random_array[:4], random_array[4:]
|
|
28
|
+
random_array = random_u32int_array(6)
|
|
29
|
+
key, iv = random_array[:4], random_array[4:]
|
|
30
30
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
31
|
+
if file_size == 0:
|
|
32
|
+
upload_url = await _request_upload_url(api, file_size)
|
|
33
|
+
file_handle = await api.upload_chunk(upload_url, 0, b"")
|
|
34
|
+
meta_mac = 0, 0
|
|
35
|
+
return file_handle, Crypto.compose(key, iv, meta_mac)
|
|
36
36
|
|
|
37
|
-
|
|
38
|
-
|
|
37
|
+
chunker = MegaChunker(iv, key) # pyright: ignore[reportArgumentType]
|
|
38
|
+
return await _upload_chunks(api, chunker, file_path, file_size)
|
|
39
39
|
|
|
40
40
|
|
|
41
41
|
async def _upload_chunks(
|
|
42
42
|
api: MegaAPI,
|
|
43
43
|
chunker: MegaChunker,
|
|
44
|
-
|
|
44
|
+
file_path: Path,
|
|
45
45
|
file_size: int,
|
|
46
46
|
) -> tuple[str, Crypto]:
|
|
47
47
|
upload_progress = 0
|
|
@@ -49,13 +49,18 @@ async def _upload_chunks(
|
|
|
49
49
|
upload_url = await _request_upload_url(api, file_size)
|
|
50
50
|
progress_hook = progress.current_hook.get()
|
|
51
51
|
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
52
|
+
file_size = ByteSize(file_size)
|
|
53
|
+
total = file_size.human_readable()
|
|
54
|
+
with await asyncio.to_thread(file_path.open, "rb") as input_file:
|
|
55
|
+
for offset, size in get_chunks(file_size):
|
|
56
|
+
chunk = chunker.read(await asyncio.to_thread(input_file.read, size))
|
|
57
|
+
file_handle = await api.upload_chunk(upload_url, offset, chunk)
|
|
58
|
+
human_progress = ByteSize(upload_progress).human_readable()
|
|
59
|
+
ratio = (upload_progress / file_size) * 100
|
|
60
|
+
logger.debug(f'{human_progress}/{total} uploaded ({ratio:0.1f}%) for "{file_path!s}"')
|
|
61
|
+
real_size = len(chunk)
|
|
62
|
+
upload_progress += real_size
|
|
63
|
+
progress_hook(real_size)
|
|
59
64
|
|
|
60
65
|
assert file_handle
|
|
61
66
|
return file_handle, Crypto.compose(chunker.key, chunker.iv, chunker.compute_meta_mac())
|
|
@@ -2,16 +2,19 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import datetime
|
|
5
|
+
import errno
|
|
5
6
|
import logging
|
|
6
7
|
import random
|
|
7
8
|
import string
|
|
8
9
|
from enum import Enum
|
|
10
|
+
from stat import S_ISREG
|
|
9
11
|
from typing import TYPE_CHECKING, Literal, TypeVar, overload
|
|
10
12
|
|
|
11
13
|
import yarl
|
|
12
14
|
|
|
13
15
|
if TYPE_CHECKING:
|
|
14
16
|
from collections.abc import Awaitable, Callable, Iterable, Sequence
|
|
17
|
+
from pathlib import Path
|
|
15
18
|
|
|
16
19
|
_T1 = TypeVar("_T1")
|
|
17
20
|
_T2 = TypeVar("_T2")
|
|
@@ -76,8 +79,20 @@ def transform_v1_url(url: yarl.URL) -> yarl.URL:
|
|
|
76
79
|
return url
|
|
77
80
|
|
|
78
81
|
|
|
82
|
+
def get_file_size(file_path: Path) -> int:
|
|
83
|
+
try:
|
|
84
|
+
stat = file_path.stat()
|
|
85
|
+
except (OSError, ValueError):
|
|
86
|
+
raise FileNotFoundError(errno.ENOENT, str(file_path)) from None
|
|
87
|
+
|
|
88
|
+
if not S_ISREG(stat.st_mode):
|
|
89
|
+
raise IsADirectoryError(errno.EISDIR, str(file_path))
|
|
90
|
+
|
|
91
|
+
return stat.st_size
|
|
92
|
+
|
|
93
|
+
|
|
79
94
|
@overload
|
|
80
|
-
async def
|
|
95
|
+
async def async_map(
|
|
81
96
|
coro_factory: Callable[[_T1], Awaitable[_T2]],
|
|
82
97
|
values: Iterable[_T1],
|
|
83
98
|
*,
|
|
@@ -87,7 +102,7 @@ async def throttled_gather(
|
|
|
87
102
|
|
|
88
103
|
|
|
89
104
|
@overload
|
|
90
|
-
async def
|
|
105
|
+
async def async_map(
|
|
91
106
|
coro_factory: Callable[[_T1], Awaitable[_T2]],
|
|
92
107
|
values: Iterable[_T1],
|
|
93
108
|
*,
|
|
@@ -96,7 +111,7 @@ async def throttled_gather(
|
|
|
96
111
|
) -> list[_T2]: ...
|
|
97
112
|
|
|
98
113
|
|
|
99
|
-
async def
|
|
114
|
+
async def async_map(
|
|
100
115
|
coro_factory: Callable[[_T1], Awaitable[_T2]],
|
|
101
116
|
values: Iterable[_T1],
|
|
102
117
|
*,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|