mapillary-downloader 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapillary_downloader/__main__.py +18 -1
- mapillary_downloader/downloader.py +3 -8
- mapillary_downloader/ia_stats.py +242 -0
- mapillary_downloader/tar_sequences.py +30 -26
- mapillary_downloader/utils.py +61 -0
- mapillary_downloader/worker.py +20 -11
- {mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/METADATA +25 -11
- {mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/RECORD +11 -10
- {mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/WHEEL +0 -0
- {mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/entry_points.txt +0 -0
- {mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/licenses/LICENSE.md +0 -0
mapillary_downloader/__main__.py
CHANGED
|
@@ -26,7 +26,7 @@ def main():
|
|
|
26
26
|
default=os.environ.get("MAPILLARY_TOKEN"),
|
|
27
27
|
help="Mapillary API access token (or set MAPILLARY_TOKEN env var)",
|
|
28
28
|
)
|
|
29
|
-
parser.add_argument("usernames", nargs="
|
|
29
|
+
parser.add_argument("usernames", nargs="*", help="Mapillary username(s) to download")
|
|
30
30
|
parser.add_argument("--output", default="./mapillary_data", help="Output directory (default: ./mapillary_data)")
|
|
31
31
|
parser.add_argument(
|
|
32
32
|
"--quality",
|
|
@@ -61,9 +61,21 @@ def main():
|
|
|
61
61
|
action="store_true",
|
|
62
62
|
help="Enable debug logging (EXIF data, API responses, etc.)",
|
|
63
63
|
)
|
|
64
|
+
parser.add_argument(
|
|
65
|
+
"--stats",
|
|
66
|
+
action="store_true",
|
|
67
|
+
help="Show statistics of collections on archive.org and exit",
|
|
68
|
+
)
|
|
64
69
|
|
|
65
70
|
args = parser.parse_args()
|
|
66
71
|
|
|
72
|
+
# Handle --stats early (before token check)
|
|
73
|
+
if args.stats:
|
|
74
|
+
from mapillary_downloader.ia_stats import show_stats
|
|
75
|
+
|
|
76
|
+
show_stats()
|
|
77
|
+
sys.exit(0)
|
|
78
|
+
|
|
67
79
|
# Set debug logging level if requested
|
|
68
80
|
if args.debug:
|
|
69
81
|
import logging
|
|
@@ -71,6 +83,11 @@ def main():
|
|
|
71
83
|
logging.getLogger("mapillary_downloader").setLevel(logging.DEBUG)
|
|
72
84
|
logger.debug("Debug logging enabled")
|
|
73
85
|
|
|
86
|
+
# Check for usernames (required unless using --stats)
|
|
87
|
+
if not args.usernames:
|
|
88
|
+
logger.error("Error: At least one username is required")
|
|
89
|
+
sys.exit(1)
|
|
90
|
+
|
|
74
91
|
# Check for token
|
|
75
92
|
if not args.token:
|
|
76
93
|
logger.error("Error: Mapillary API token required. Use --token or set MAPILLARY_TOKEN environment variable")
|
|
@@ -7,7 +7,7 @@ import os
|
|
|
7
7
|
import shutil
|
|
8
8
|
import time
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from mapillary_downloader.utils import format_size, format_time
|
|
10
|
+
from mapillary_downloader.utils import format_size, format_time, safe_json_save
|
|
11
11
|
from mapillary_downloader.ia_meta import generate_ia_metadata
|
|
12
12
|
from mapillary_downloader.ia_check import check_ia_exists
|
|
13
13
|
from mapillary_downloader.worker import worker_process
|
|
@@ -143,13 +143,8 @@ class MapillaryDownloader:
|
|
|
143
143
|
# Update this quality's progress
|
|
144
144
|
progress[str(self.quality)] = list(self.downloaded)
|
|
145
145
|
|
|
146
|
-
# Write atomically
|
|
147
|
-
|
|
148
|
-
with open(temp_file, "w") as f:
|
|
149
|
-
json.dump(progress, f)
|
|
150
|
-
f.flush()
|
|
151
|
-
os.fsync(f.fileno())
|
|
152
|
-
temp_file.replace(self.progress_file)
|
|
146
|
+
# Write atomically using utility function
|
|
147
|
+
safe_json_save(self.progress_file, progress)
|
|
153
148
|
|
|
154
149
|
def download_user_data(self, bbox=None, convert_webp=False):
|
|
155
150
|
"""Download all images for a user using streaming queue-based architecture.
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
"""Internet Archive statistics for mapillary_downloader collections."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
from mapillary_downloader.utils import safe_json_save, http_get_with_retry, format_size
|
|
7
|
+
from mapillary_downloader.downloader import get_cache_dir
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger("mapillary_downloader")
|
|
10
|
+
|
|
11
|
+
CACHE_FILE = get_cache_dir() / ".stats.json"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def search_ia_collections():
|
|
15
|
+
"""Search IA for all mapillary_downloader collections.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
List of dicts with: identifier, description, item_size, uploader
|
|
19
|
+
"""
|
|
20
|
+
logger.info("Searching archive.org for mapillary_downloader collections...")
|
|
21
|
+
|
|
22
|
+
url = "https://archive.org/advancedsearch.php"
|
|
23
|
+
params = {
|
|
24
|
+
"q": "mapillary_downloader:*",
|
|
25
|
+
"fl[]": ["identifier", "description", "item_size", "uploader"],
|
|
26
|
+
"rows": 10000,
|
|
27
|
+
"output": "json",
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
response = http_get_with_retry(url, params=params, max_retries=3)
|
|
31
|
+
data = response.json()
|
|
32
|
+
|
|
33
|
+
collections = data["response"]["docs"]
|
|
34
|
+
logger.info(f"Found {len(collections)} collections on archive.org")
|
|
35
|
+
|
|
36
|
+
return collections
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def parse_collection_info(identifier):
|
|
40
|
+
"""Parse username, quality, webp from collection identifier.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
dict with username, quality, is_webp or None if invalid
|
|
44
|
+
"""
|
|
45
|
+
match = re.match(r"mapillary-(.+)-(256|1024|2048|original)(?:-webp)?$", identifier)
|
|
46
|
+
if match:
|
|
47
|
+
return {"username": match.group(1), "quality": match.group(2), "is_webp": "-webp" in identifier}
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def extract_image_count(description):
|
|
52
|
+
"""Extract image count from IA description field.
|
|
53
|
+
|
|
54
|
+
Description format: "Contains 12,345 images in..."
|
|
55
|
+
"""
|
|
56
|
+
if not description:
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
match = re.search(r"Contains ([\d,]+) images", description)
|
|
60
|
+
if match:
|
|
61
|
+
return int(match.group(1).replace(",", ""))
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def load_cache():
|
|
66
|
+
"""Load cached collection data.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
dict of {collection_id: {size, uploader, images, quality, username}}
|
|
70
|
+
"""
|
|
71
|
+
if CACHE_FILE.exists():
|
|
72
|
+
try:
|
|
73
|
+
with open(CACHE_FILE) as f:
|
|
74
|
+
return json.load(f)
|
|
75
|
+
except Exception as e:
|
|
76
|
+
logger.warning(f"Failed to load cache: {e}")
|
|
77
|
+
return {}
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def update_cache(ia_collections):
|
|
81
|
+
"""Update cache with new IA search results.
|
|
82
|
+
|
|
83
|
+
Merges new collections into existing cache.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Updated cache dict
|
|
87
|
+
"""
|
|
88
|
+
cache = load_cache()
|
|
89
|
+
|
|
90
|
+
for item in ia_collections:
|
|
91
|
+
identifier = item.get("identifier")
|
|
92
|
+
if not identifier:
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
info = parse_collection_info(identifier)
|
|
96
|
+
if not info:
|
|
97
|
+
logger.debug(f"Skipping non-mapillary collection: {identifier}")
|
|
98
|
+
continue
|
|
99
|
+
|
|
100
|
+
# Parse item data
|
|
101
|
+
size_bytes = item.get("item_size", 0)
|
|
102
|
+
if isinstance(size_bytes, str):
|
|
103
|
+
size_bytes = int(size_bytes)
|
|
104
|
+
|
|
105
|
+
image_count = extract_image_count(item.get("description"))
|
|
106
|
+
|
|
107
|
+
# Update cache entry
|
|
108
|
+
cache[identifier] = {
|
|
109
|
+
"size": size_bytes,
|
|
110
|
+
"uploader": item.get("uploader"),
|
|
111
|
+
"images": image_count,
|
|
112
|
+
"quality": info["quality"],
|
|
113
|
+
"username": info["username"],
|
|
114
|
+
"is_webp": info["is_webp"],
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
# Save updated cache
|
|
118
|
+
safe_json_save(CACHE_FILE, cache)
|
|
119
|
+
logger.info(f"Updated cache with {len(cache)} collections")
|
|
120
|
+
|
|
121
|
+
return cache
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def aggregate_stats(cache):
|
|
125
|
+
"""Aggregate statistics from cached collection data.
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
dict with total and per-quality stats
|
|
129
|
+
"""
|
|
130
|
+
stats = {
|
|
131
|
+
"total": {"collections": 0, "total_images": 0, "unique_images": 0, "bytes": 0},
|
|
132
|
+
"by_quality": {},
|
|
133
|
+
"users": set(),
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
# Track images per user for deduplication
|
|
137
|
+
user_images = {} # {username: max_images_across_qualities}
|
|
138
|
+
|
|
139
|
+
for collection_id, data in cache.items():
|
|
140
|
+
images = data.get("images") or 0
|
|
141
|
+
size = data.get("size") or 0
|
|
142
|
+
quality = data.get("quality", "unknown")
|
|
143
|
+
username = data.get("username")
|
|
144
|
+
|
|
145
|
+
# Track user coverage
|
|
146
|
+
if username:
|
|
147
|
+
stats["users"].add(username)
|
|
148
|
+
# Keep maximum image count across all qualities for this user
|
|
149
|
+
if username not in user_images or images > user_images[username]:
|
|
150
|
+
user_images[username] = images
|
|
151
|
+
|
|
152
|
+
# Total stats (collections, total images, and bytes)
|
|
153
|
+
stats["total"]["collections"] += 1
|
|
154
|
+
stats["total"]["total_images"] += images
|
|
155
|
+
stats["total"]["bytes"] += size
|
|
156
|
+
|
|
157
|
+
# Per-quality stats
|
|
158
|
+
if quality not in stats["by_quality"]:
|
|
159
|
+
stats["by_quality"][quality] = {"collections": 0, "images": 0, "bytes": 0}
|
|
160
|
+
|
|
161
|
+
stats["by_quality"][quality]["collections"] += 1
|
|
162
|
+
stats["by_quality"][quality]["images"] += images
|
|
163
|
+
stats["by_quality"][quality]["bytes"] += size
|
|
164
|
+
|
|
165
|
+
# Unique images is sum of max images per user
|
|
166
|
+
stats["total"]["unique_images"] = sum(user_images.values())
|
|
167
|
+
|
|
168
|
+
return stats
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def format_stats(stats):
|
|
172
|
+
"""Format statistics as human-readable text.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
stats: Dict from aggregate_stats()
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Formatted string
|
|
179
|
+
"""
|
|
180
|
+
TOTAL_MAPILLARY_IMAGES = 2_000_000_000 # 2 billion
|
|
181
|
+
|
|
182
|
+
output = []
|
|
183
|
+
output.append("=" * 70)
|
|
184
|
+
output.append("Mapillary Downloader - Archive.org Statistics")
|
|
185
|
+
output.append("=" * 70)
|
|
186
|
+
output.append("")
|
|
187
|
+
|
|
188
|
+
# Total stats
|
|
189
|
+
total = stats["total"]
|
|
190
|
+
unique_pct = (total["unique_images"] / TOTAL_MAPILLARY_IMAGES * 100) if total["unique_images"] else 0
|
|
191
|
+
|
|
192
|
+
output.append(f"Total Collections: {total['collections']:,}")
|
|
193
|
+
output.append(f"Total Users: {len(stats['users']):,}")
|
|
194
|
+
output.append(f"Total Images: {total['total_images']:,}")
|
|
195
|
+
output.append(f"Unique Images: {total['unique_images']:,} ({unique_pct:.3f}% of 2B)")
|
|
196
|
+
output.append(f"Total Size: {format_size(total['bytes'])}")
|
|
197
|
+
output.append("")
|
|
198
|
+
|
|
199
|
+
# Per-quality breakdown
|
|
200
|
+
output.append("By Quality:")
|
|
201
|
+
output.append("-" * 70)
|
|
202
|
+
|
|
203
|
+
# Sort by quality (original first, then numeric)
|
|
204
|
+
qualities = sorted(stats["by_quality"].items(), key=lambda x: (x[0] != "original", x[0]))
|
|
205
|
+
|
|
206
|
+
for quality, data in qualities:
|
|
207
|
+
pct = (data["images"] / TOTAL_MAPILLARY_IMAGES * 100) if data["images"] else 0
|
|
208
|
+
output.append(
|
|
209
|
+
f" {quality:8s} {data['collections']:3d} collections "
|
|
210
|
+
f"{data['images']:12,d} images ({pct:.3f}%) "
|
|
211
|
+
f"{format_size(data['bytes']):>8s}"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
output.append("")
|
|
215
|
+
output.append(f"Cache: {CACHE_FILE}")
|
|
216
|
+
|
|
217
|
+
return "\n".join(output)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def show_stats(refresh=True):
|
|
221
|
+
"""Show archive.org statistics for mapillary_downloader collections.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
refresh: If True, fetch fresh data from IA. If False, use cache only.
|
|
225
|
+
"""
|
|
226
|
+
if refresh:
|
|
227
|
+
try:
|
|
228
|
+
ia_collections = search_ia_collections()
|
|
229
|
+
cache = update_cache(ia_collections)
|
|
230
|
+
except Exception as e:
|
|
231
|
+
logger.error(f"Failed to fetch IA data: {e}")
|
|
232
|
+
logger.info("Using cached data...")
|
|
233
|
+
cache = load_cache()
|
|
234
|
+
else:
|
|
235
|
+
cache = load_cache()
|
|
236
|
+
|
|
237
|
+
if not cache:
|
|
238
|
+
logger.error("No cached data and failed to fetch from IA")
|
|
239
|
+
return
|
|
240
|
+
|
|
241
|
+
stats = aggregate_stats(cache)
|
|
242
|
+
print(format_stats(stats))
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Tar sequence directories for efficient Internet Archive uploads."""
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
+
import re
|
|
4
5
|
import tarfile
|
|
5
6
|
from pathlib import Path
|
|
6
7
|
from mapillary_downloader.utils import format_size
|
|
@@ -9,7 +10,9 @@ logger = logging.getLogger("mapillary_downloader")
|
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
def tar_sequence_directories(collection_dir):
|
|
12
|
-
"""Tar all
|
|
13
|
+
"""Tar all date directories in a collection for faster IA uploads.
|
|
14
|
+
|
|
15
|
+
Organizes by capture date (YYYY-MM-DD) for incremental archive.org uploads.
|
|
13
16
|
|
|
14
17
|
Args:
|
|
15
18
|
collection_dir: Path to collection directory (e.g., mapillary-user-quality/)
|
|
@@ -23,41 +26,44 @@ def tar_sequence_directories(collection_dir):
|
|
|
23
26
|
logger.error(f"Collection directory not found: {collection_dir}")
|
|
24
27
|
return 0, 0
|
|
25
28
|
|
|
26
|
-
# Find all
|
|
27
|
-
#
|
|
29
|
+
# Find all date directories (skip special dirs)
|
|
30
|
+
# Date format: YYYY-MM-DD or unknown-date
|
|
28
31
|
skip_dirs = {".meta", "__pycache__"}
|
|
29
|
-
|
|
32
|
+
date_dirs = []
|
|
30
33
|
|
|
31
34
|
for item in collection_dir.iterdir():
|
|
32
35
|
if item.is_dir() and item.name not in skip_dirs:
|
|
33
|
-
# Check if this is a
|
|
34
|
-
if
|
|
35
|
-
|
|
36
|
+
# Check if this is a date dir (YYYY-MM-DD) or unknown-date
|
|
37
|
+
if re.match(r"\d{4}-\d{2}-\d{2}$", item.name) or item.name == "unknown-date":
|
|
38
|
+
date_dirs.append(item)
|
|
36
39
|
|
|
37
|
-
if not
|
|
38
|
-
logger.info("No
|
|
40
|
+
if not date_dirs:
|
|
41
|
+
logger.info("No date directories to tar")
|
|
39
42
|
return 0, 0
|
|
40
43
|
|
|
41
|
-
|
|
44
|
+
# Sort date directories chronologically (YYYY-MM-DD sorts naturally)
|
|
45
|
+
date_dirs = sorted(date_dirs, key=lambda x: x.name)
|
|
46
|
+
|
|
47
|
+
logger.info(f"Tarring {len(date_dirs)} date directories...")
|
|
42
48
|
|
|
43
49
|
tarred_count = 0
|
|
44
50
|
total_files = 0
|
|
45
51
|
total_tar_bytes = 0
|
|
46
52
|
|
|
47
|
-
for
|
|
48
|
-
|
|
49
|
-
tar_path = collection_dir / f"{
|
|
53
|
+
for date_dir in date_dirs:
|
|
54
|
+
date_name = date_dir.name
|
|
55
|
+
tar_path = collection_dir / f"{date_name}.tar"
|
|
50
56
|
|
|
51
|
-
# Count files in
|
|
52
|
-
files_to_tar = sorted([f for f in
|
|
57
|
+
# Count files in date directory
|
|
58
|
+
files_to_tar = sorted([f for f in date_dir.rglob("*") if f.is_file()], key=lambda x: str(x))
|
|
53
59
|
file_count = len(files_to_tar)
|
|
54
60
|
|
|
55
61
|
if file_count == 0:
|
|
56
|
-
logger.warning(f"Skipping empty
|
|
62
|
+
logger.warning(f"Skipping empty date directory: {date_name}")
|
|
57
63
|
continue
|
|
58
64
|
|
|
59
65
|
try:
|
|
60
|
-
logger.info(f"Tarring
|
|
66
|
+
logger.info(f"Tarring date '{date_name}' ({file_count} files)...")
|
|
61
67
|
|
|
62
68
|
# Create reproducible uncompressed tar (WebP already compressed)
|
|
63
69
|
with tarfile.open(tar_path, "w") as tar:
|
|
@@ -84,36 +90,34 @@ def tar_sequence_directories(collection_dir):
|
|
|
84
90
|
tar_size = tar_path.stat().st_size
|
|
85
91
|
total_tar_bytes += tar_size
|
|
86
92
|
|
|
87
|
-
# Remove original
|
|
88
|
-
for file in
|
|
93
|
+
# Remove original date directory
|
|
94
|
+
for file in date_dir.rglob("*"):
|
|
89
95
|
if file.is_file():
|
|
90
96
|
file.unlink()
|
|
91
97
|
|
|
92
98
|
# Remove empty subdirs and main dir
|
|
93
|
-
for subdir in list(
|
|
99
|
+
for subdir in list(date_dir.rglob("*")):
|
|
94
100
|
if subdir.is_dir():
|
|
95
101
|
try:
|
|
96
102
|
subdir.rmdir()
|
|
97
103
|
except OSError:
|
|
98
104
|
pass # Not empty yet
|
|
99
105
|
|
|
100
|
-
|
|
106
|
+
date_dir.rmdir()
|
|
101
107
|
|
|
102
108
|
tarred_count += 1
|
|
103
109
|
total_files += file_count
|
|
104
110
|
|
|
105
|
-
logger.info(f"Tarred
|
|
111
|
+
logger.info(f"Tarred date '{date_name}': {file_count:,} files, {format_size(tar_size)}")
|
|
106
112
|
else:
|
|
107
113
|
logger.error(f"Tar file empty or not created: {tar_path}")
|
|
108
114
|
if tar_path.exists():
|
|
109
115
|
tar_path.unlink()
|
|
110
116
|
|
|
111
117
|
except Exception as e:
|
|
112
|
-
logger.error(f"Error tarring
|
|
118
|
+
logger.error(f"Error tarring date {date_name}: {e}")
|
|
113
119
|
if tar_path.exists():
|
|
114
120
|
tar_path.unlink()
|
|
115
121
|
|
|
116
|
-
logger.info(
|
|
117
|
-
f"Tarred {tarred_count} sequences ({total_files:,} files, {format_size(total_tar_bytes)} total tar size)"
|
|
118
|
-
)
|
|
122
|
+
logger.info(f"Tarred {tarred_count} dates ({total_files:,} files, {format_size(total_tar_bytes)} total tar size)")
|
|
119
123
|
return tarred_count, total_files
|
mapillary_downloader/utils.py
CHANGED
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
"""Utility functions for formatting and display."""
|
|
2
2
|
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import time
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
import requests
|
|
9
|
+
from requests.exceptions import RequestException
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger("mapillary_downloader")
|
|
12
|
+
|
|
3
13
|
|
|
4
14
|
def format_size(bytes_count):
|
|
5
15
|
"""Format bytes as human-readable size.
|
|
@@ -45,3 +55,54 @@ def format_time(seconds):
|
|
|
45
55
|
if remaining_minutes > 0:
|
|
46
56
|
return f"{hours}h {remaining_minutes}m"
|
|
47
57
|
return f"{hours}h"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def safe_json_save(file_path, data):
|
|
61
|
+
"""Atomically save JSON data to file.
|
|
62
|
+
|
|
63
|
+
Writes to temp file, then atomic rename to prevent corruption.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
file_path: Path to JSON file
|
|
67
|
+
data: Data to serialize to JSON
|
|
68
|
+
"""
|
|
69
|
+
file_path = Path(file_path)
|
|
70
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
71
|
+
|
|
72
|
+
temp_file = file_path.with_suffix(".json.tmp")
|
|
73
|
+
with open(temp_file, "w") as f:
|
|
74
|
+
json.dump(data, f, indent=2)
|
|
75
|
+
f.flush()
|
|
76
|
+
os.fsync(f.fileno())
|
|
77
|
+
temp_file.replace(file_path)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def http_get_with_retry(url, params=None, max_retries=5, base_delay=1.0, timeout=60):
|
|
81
|
+
"""HTTP GET with exponential backoff retry.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
url: URL to fetch
|
|
85
|
+
params: Optional query parameters
|
|
86
|
+
max_retries: Maximum retry attempts (default: 5)
|
|
87
|
+
base_delay: Initial delay in seconds (default: 1.0)
|
|
88
|
+
timeout: Request timeout in seconds (default: 60)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
requests.Response object
|
|
92
|
+
|
|
93
|
+
Raises:
|
|
94
|
+
requests.RequestException: If all retries exhausted
|
|
95
|
+
"""
|
|
96
|
+
for attempt in range(max_retries):
|
|
97
|
+
try:
|
|
98
|
+
response = requests.get(url, params=params, timeout=timeout)
|
|
99
|
+
response.raise_for_status()
|
|
100
|
+
return response
|
|
101
|
+
except RequestException as e:
|
|
102
|
+
if attempt == max_retries - 1:
|
|
103
|
+
raise
|
|
104
|
+
|
|
105
|
+
delay = base_delay * (2**attempt)
|
|
106
|
+
logger.warning(f"Request failed (attempt {attempt + 1}/{max_retries}): {e}")
|
|
107
|
+
logger.info(f"Retrying in {delay:.1f} seconds...")
|
|
108
|
+
time.sleep(delay)
|
mapillary_downloader/worker.py
CHANGED
|
@@ -3,11 +3,12 @@
|
|
|
3
3
|
import os
|
|
4
4
|
import signal
|
|
5
5
|
import tempfile
|
|
6
|
+
from datetime import datetime
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
import requests
|
|
8
|
-
from requests.exceptions import RequestException
|
|
9
9
|
from mapillary_downloader.exif_writer import write_exif_to_image
|
|
10
10
|
from mapillary_downloader.webp_converter import convert_to_webp
|
|
11
|
+
from mapillary_downloader.utils import http_get_with_retry
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def worker_process(work_queue, result_queue, worker_id):
|
|
@@ -69,16 +70,25 @@ def download_and_convert_image(image_data, output_dir, quality, convert_webp, se
|
|
|
69
70
|
if not image_url:
|
|
70
71
|
return (image_id, 0, False, f"No {quality} URL")
|
|
71
72
|
|
|
72
|
-
# Determine final output directory - organize by
|
|
73
|
+
# Determine final output directory - organize by capture date
|
|
73
74
|
output_dir = Path(output_dir)
|
|
74
75
|
sequence_id = image_data.get("sequence")
|
|
76
|
+
|
|
77
|
+
# Extract date from captured_at timestamp (milliseconds since epoch)
|
|
78
|
+
captured_at = image_data.get("captured_at")
|
|
79
|
+
if captured_at:
|
|
80
|
+
# Convert to UTC date string (YYYY-MM-DD)
|
|
81
|
+
date_str = datetime.utcfromtimestamp(captured_at / 1000).strftime("%Y-%m-%d")
|
|
82
|
+
else:
|
|
83
|
+
# Fallback for missing timestamp (should be rare per API docs)
|
|
84
|
+
date_str = "unknown-date"
|
|
85
|
+
|
|
75
86
|
if sequence_id:
|
|
76
|
-
|
|
77
|
-
first_char = sequence_id[0]
|
|
78
|
-
img_dir = output_dir / first_char / sequence_id
|
|
87
|
+
img_dir = output_dir / date_str / sequence_id
|
|
79
88
|
img_dir.mkdir(parents=True, exist_ok=True)
|
|
80
89
|
else:
|
|
81
|
-
img_dir = output_dir
|
|
90
|
+
img_dir = output_dir / date_str
|
|
91
|
+
img_dir.mkdir(parents=True, exist_ok=True)
|
|
82
92
|
|
|
83
93
|
# If converting to WebP, use /tmp for intermediate JPEG
|
|
84
94
|
# Otherwise write JPEG directly to final location
|
|
@@ -90,19 +100,18 @@ def download_and_convert_image(image_data, output_dir, quality, convert_webp, se
|
|
|
90
100
|
jpg_path = img_dir / f"{image_id}.jpg"
|
|
91
101
|
final_path = jpg_path
|
|
92
102
|
|
|
93
|
-
# Download image
|
|
103
|
+
# Download image with retry logic
|
|
94
104
|
bytes_downloaded = 0
|
|
95
105
|
|
|
96
106
|
try:
|
|
97
|
-
#
|
|
98
|
-
response =
|
|
99
|
-
response.raise_for_status()
|
|
107
|
+
# Use retry logic with 3 attempts for image downloads
|
|
108
|
+
response = http_get_with_retry(image_url, max_retries=3, base_delay=1.0, timeout=60)
|
|
100
109
|
|
|
101
110
|
with open(jpg_path, "wb") as f:
|
|
102
111
|
for chunk in response.iter_content(chunk_size=8192):
|
|
103
112
|
f.write(chunk)
|
|
104
113
|
bytes_downloaded += len(chunk)
|
|
105
|
-
except
|
|
114
|
+
except Exception as e:
|
|
106
115
|
return (image_id, 0, False, f"Download failed: {e}")
|
|
107
116
|
|
|
108
117
|
# Write EXIF metadata
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mapillary_downloader
|
|
3
|
-
Version: 0.
|
|
4
|
-
Summary:
|
|
3
|
+
Version: 0.7.0
|
|
4
|
+
Summary: Archive user data from Mapillary
|
|
5
5
|
Author-email: Gareth Davidson <gaz@bitplane.net>
|
|
6
6
|
Requires-Python: >=3.10
|
|
7
7
|
Description-Content-Type: text/markdown
|
|
@@ -100,21 +100,28 @@ mapillary-downloader --no-webp USERNAME
|
|
|
100
100
|
|
|
101
101
|
## Tarballs
|
|
102
102
|
|
|
103
|
-
Images are organized by
|
|
104
|
-
sequence to reduce directory count:
|
|
103
|
+
Images are organized by capture date (YYYY-MM-DD) for incremental archiving:
|
|
105
104
|
|
|
106
105
|
```
|
|
107
106
|
mapillary-username-quality/
|
|
108
|
-
|
|
107
|
+
2024-01-15/
|
|
109
108
|
abc123/
|
|
110
109
|
image1.webp
|
|
111
110
|
image2.webp
|
|
111
|
+
bcd456/
|
|
112
|
+
image3.webp
|
|
113
|
+
2024-01-16/
|
|
114
|
+
def789/
|
|
115
|
+
image4.webp
|
|
112
116
|
```
|
|
113
117
|
|
|
114
|
-
By default, these
|
|
115
|
-
(resulting in `
|
|
116
|
-
|
|
117
|
-
|
|
118
|
+
By default, these date directories are automatically tarred after download
|
|
119
|
+
(resulting in `2024-01-15.tar`, `2024-01-16.tar`, etc.). This date-based
|
|
120
|
+
organization enables:
|
|
121
|
+
|
|
122
|
+
- **Incremental uploads** - Upload each day's tar as soon as it's ready
|
|
123
|
+
- **Manageable file counts** - ~365 days/year × 10 years = 3,650 tars max
|
|
124
|
+
- **Chronological organization** - Natural sorting and progress tracking
|
|
118
125
|
|
|
119
126
|
To keep individual files instead of creating tars, use the `--no-tar` flag.
|
|
120
127
|
|
|
@@ -128,8 +135,15 @@ See inlay for details:
|
|
|
128
135
|
|
|
129
136
|
* [📀 rip](https://bitplane.net/dev/sh/rip)
|
|
130
137
|
|
|
138
|
+
## 📊 Stats
|
|
139
|
+
|
|
140
|
+
To see overall project progress, or an estimate, use `--stats`
|
|
141
|
+
|
|
142
|
+
```bash
|
|
143
|
+
mapillary-downloader --stats
|
|
144
|
+
```
|
|
131
145
|
|
|
132
|
-
## Development
|
|
146
|
+
## 🚧 Development
|
|
133
147
|
|
|
134
148
|
```bash
|
|
135
149
|
make dev # Setup dev environment
|
|
@@ -138,7 +152,7 @@ make dist # Build the distribution
|
|
|
138
152
|
make help # See other make options
|
|
139
153
|
```
|
|
140
154
|
|
|
141
|
-
## Links
|
|
155
|
+
## 🔗 Links
|
|
142
156
|
|
|
143
157
|
* [🏠 home](https://bitplane.net/dev/python/mapillary_downloader)
|
|
144
158
|
* [📖 pydoc](https://bitplane.net/dev/python/mapillary_downloader/pydoc)
|
|
@@ -1,19 +1,20 @@
|
|
|
1
1
|
mapillary_downloader/__init__.py,sha256=KEjiBRghXDeA7E15RJeLBfQm-yNJkowZarL59QOh_1w,120
|
|
2
|
-
mapillary_downloader/__main__.py,sha256=
|
|
2
|
+
mapillary_downloader/__main__.py,sha256=jK0MU9Xn9KGb_V8x5giIRuwhUjNFQ-jjlprtbeW6b94,4817
|
|
3
3
|
mapillary_downloader/client.py,sha256=a5n43FLHP45EHodEjl0ieziBK-b6Ey-rZJwYB6EFhNI,4743
|
|
4
|
-
mapillary_downloader/downloader.py,sha256=
|
|
4
|
+
mapillary_downloader/downloader.py,sha256=TrFy9eTcZD_wyVh7L58HuVxgCIKbAYRnmr2gAWtXOuY,19738
|
|
5
5
|
mapillary_downloader/exif_writer.py,sha256=K_441EG1siWyNMmFGZSfnORUCjBThkeg4JFtbg9AOsA,5120
|
|
6
6
|
mapillary_downloader/ia_check.py,sha256=L2MEbG_KmlAd5NLmo2HQkO8HWvRN0brE5wXXoyNMbq8,1100
|
|
7
7
|
mapillary_downloader/ia_meta.py,sha256=78rcybHIPnQDsF02KGj6RYmDXzYzrU8sdVx4Q9Y0sfI,6266
|
|
8
|
+
mapillary_downloader/ia_stats.py,sha256=TSVCoaCcGFDPTYqxikGdvMo7uWtExRniYABjQQS26fw,7302
|
|
8
9
|
mapillary_downloader/logging_config.py,sha256=Z-wNq34nt7aIhJWdeKc1feTY46P9-Or7HtiX7eUFjEI,2324
|
|
9
10
|
mapillary_downloader/metadata_reader.py,sha256=Re-HN0Vfc7Hs1eOut7uOoW7jWJ2PIbKoNzC7Ak3ah5o,4933
|
|
10
|
-
mapillary_downloader/tar_sequences.py,sha256=
|
|
11
|
-
mapillary_downloader/utils.py,sha256=
|
|
11
|
+
mapillary_downloader/tar_sequences.py,sha256=UchKvvajBr5uaoE8xDHgyiFTkjh08EK7pPhtwkyCQXU,4416
|
|
12
|
+
mapillary_downloader/utils.py,sha256=VgcwbC8yb2XlTGerTNwHBU42K2IN14VU7P-I52Vb01c,2947
|
|
12
13
|
mapillary_downloader/webp_converter.py,sha256=vYLLQxDmdnqRz0nm7wXwRUd4x9mQZNah-DrncpA8sNs,1901
|
|
13
|
-
mapillary_downloader/worker.py,sha256=
|
|
14
|
+
mapillary_downloader/worker.py,sha256=K2DkQgFzALKs20TsG1KibNUdFiWN_v8MtVnBX_0xVyc,5162
|
|
14
15
|
mapillary_downloader/worker_pool.py,sha256=iGRq5uFwBNNVQnI4vEjbKHkbKTaEVCdmvMvXcRGuDMg,8203
|
|
15
|
-
mapillary_downloader-0.
|
|
16
|
-
mapillary_downloader-0.
|
|
17
|
-
mapillary_downloader-0.
|
|
18
|
-
mapillary_downloader-0.
|
|
19
|
-
mapillary_downloader-0.
|
|
16
|
+
mapillary_downloader-0.7.0.dist-info/entry_points.txt,sha256=PdYtxOXHMJrUhmiPO4G-F98VuhUI4MN9D_T4KPrVZ5w,75
|
|
17
|
+
mapillary_downloader-0.7.0.dist-info/licenses/LICENSE.md,sha256=7_BIuQ-veOrsF-WarH8kTkm0-xrCLvJ1PFE1C4Ebs64,146
|
|
18
|
+
mapillary_downloader-0.7.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
|
19
|
+
mapillary_downloader-0.7.0.dist-info/METADATA,sha256=Ftc--29thU8dc-J_11_NlBUnf6SsOlvQP4r28nclsnk,5540
|
|
20
|
+
mapillary_downloader-0.7.0.dist-info/RECORD,,
|
|
File without changes
|
{mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/entry_points.txt
RENAMED
|
File without changes
|
{mapillary_downloader-0.6.0.dist-info → mapillary_downloader-0.7.0.dist-info}/licenses/LICENSE.md
RENAMED
|
File without changes
|