mapillary-downloader 0.6.0__tar.gz → 0.7.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/PKG-INFO +25 -11
  2. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/README.md +23 -9
  3. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/pyproject.toml +2 -2
  4. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/__main__.py +18 -1
  5. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/downloader.py +3 -8
  6. mapillary_downloader-0.7.0/src/mapillary_downloader/ia_stats.py +242 -0
  7. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/tar_sequences.py +30 -26
  8. mapillary_downloader-0.7.0/src/mapillary_downloader/utils.py +108 -0
  9. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/worker.py +20 -11
  10. mapillary_downloader-0.6.0/src/mapillary_downloader/utils.py +0 -47
  11. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/LICENSE.md +0 -0
  12. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/__init__.py +0 -0
  13. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/client.py +0 -0
  14. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/exif_writer.py +0 -0
  15. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/ia_check.py +0 -0
  16. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/ia_meta.py +0 -0
  17. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/logging_config.py +0 -0
  18. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/metadata_reader.py +0 -0
  19. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/webp_converter.py +0 -0
  20. {mapillary_downloader-0.6.0 → mapillary_downloader-0.7.0}/src/mapillary_downloader/worker_pool.py +0 -0
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mapillary_downloader
3
- Version: 0.6.0
4
- Summary: Download your Mapillary data before it's gone
3
+ Version: 0.7.0
4
+ Summary: Archive user data from Mapillary
5
5
  Author-email: Gareth Davidson <gaz@bitplane.net>
6
6
  Requires-Python: >=3.10
7
7
  Description-Content-Type: text/markdown
@@ -100,21 +100,28 @@ mapillary-downloader --no-webp USERNAME
100
100
 
101
101
  ## Tarballs
102
102
 
103
- Images are organized by sequence ID, bucketed by the first character of the
104
- sequence to reduce directory count:
103
+ Images are organized by capture date (YYYY-MM-DD) for incremental archiving:
105
104
 
106
105
  ```
107
106
  mapillary-username-quality/
108
- a/
107
+ 2024-01-15/
109
108
  abc123/
110
109
  image1.webp
111
110
  image2.webp
111
+ bcd456/
112
+ image3.webp
113
+ 2024-01-16/
114
+ def789/
115
+ image4.webp
112
116
  ```
113
117
 
114
- By default, these bucket directories are automatically tarred after download
115
- (resulting in `a.tar`, `b.tar`, etc. - about 62 tar files total). This is done
116
- because large collections with millions of images would otherwise create hundreds
117
- of thousands of tiny tars, and anger the archive gods.
118
+ By default, these date directories are automatically tarred after download
119
+ (resulting in `2024-01-15.tar`, `2024-01-16.tar`, etc.). This date-based
120
+ organization enables:
121
+
122
+ - **Incremental uploads** - Upload each day's tar as soon as it's ready
123
+ - **Manageable file counts** - ~365 days/year × 10 years = 3,650 tars max
124
+ - **Chronological organization** - Natural sorting and progress tracking
118
125
 
119
126
  To keep individual files instead of creating tars, use the `--no-tar` flag.
120
127
 
@@ -128,8 +135,15 @@ See inlay for details:
128
135
 
129
136
  * [📀 rip](https://bitplane.net/dev/sh/rip)
130
137
 
138
+ ## 📊 Stats
139
+
140
+ To see overall project progress, or an estimate, use `--stats`
141
+
142
+ ```bash
143
+ mapillary-downloader --stats
144
+ ```
131
145
 
132
- ## Development
146
+ ## 🚧 Development
133
147
 
134
148
  ```bash
135
149
  make dev # Setup dev environment
@@ -138,7 +152,7 @@ make dist # Build the distribution
138
152
  make help # See other make options
139
153
  ```
140
154
 
141
- ## Links
155
+ ## 🔗 Links
142
156
 
143
157
  * [🏠 home](https://bitplane.net/dev/python/mapillary_downloader)
144
158
  * [📖 pydoc](https://bitplane.net/dev/python/mapillary_downloader/pydoc)
@@ -70,21 +70,28 @@ mapillary-downloader --no-webp USERNAME
70
70
 
71
71
  ## Tarballs
72
72
 
73
- Images are organized by sequence ID, bucketed by the first character of the
74
- sequence to reduce directory count:
73
+ Images are organized by capture date (YYYY-MM-DD) for incremental archiving:
75
74
 
76
75
  ```
77
76
  mapillary-username-quality/
78
- a/
77
+ 2024-01-15/
79
78
  abc123/
80
79
  image1.webp
81
80
  image2.webp
81
+ bcd456/
82
+ image3.webp
83
+ 2024-01-16/
84
+ def789/
85
+ image4.webp
82
86
  ```
83
87
 
84
- By default, these bucket directories are automatically tarred after download
85
- (resulting in `a.tar`, `b.tar`, etc. - about 62 tar files total). This is done
86
- because large collections with millions of images would otherwise create hundreds
87
- of thousands of tiny tars, and anger the archive gods.
88
+ By default, these date directories are automatically tarred after download
89
+ (resulting in `2024-01-15.tar`, `2024-01-16.tar`, etc.). This date-based
90
+ organization enables:
91
+
92
+ - **Incremental uploads** - Upload each day's tar as soon as it's ready
93
+ - **Manageable file counts** - ~365 days/year × 10 years = 3,650 tars max
94
+ - **Chronological organization** - Natural sorting and progress tracking
88
95
 
89
96
  To keep individual files instead of creating tars, use the `--no-tar` flag.
90
97
 
@@ -98,8 +105,15 @@ See inlay for details:
98
105
 
99
106
  * [📀 rip](https://bitplane.net/dev/sh/rip)
100
107
 
108
+ ## 📊 Stats
109
+
110
+ To see overall project progress, or an estimate, use `--stats`
111
+
112
+ ```bash
113
+ mapillary-downloader --stats
114
+ ```
101
115
 
102
- ## Development
116
+ ## 🚧 Development
103
117
 
104
118
  ```bash
105
119
  make dev # Setup dev environment
@@ -108,7 +122,7 @@ make dist # Build the distribution
108
122
  make help # See other make options
109
123
  ```
110
124
 
111
- ## Links
125
+ ## 🔗 Links
112
126
 
113
127
  * [🏠 home](https://bitplane.net/dev/python/mapillary_downloader)
114
128
  * [📖 pydoc](https://bitplane.net/dev/python/mapillary_downloader/pydoc)
@@ -1,7 +1,7 @@
1
1
  [project]
2
2
  name = "mapillary_downloader"
3
- description = "Download your Mapillary data before it's gone"
4
- version = "0.6.0"
3
+ description = "Archive user data from Mapillary"
4
+ version = "0.7.0"
5
5
  authors = [
6
6
  { name = "Gareth Davidson", email = "gaz@bitplane.net" }
7
7
  ]
@@ -26,7 +26,7 @@ def main():
26
26
  default=os.environ.get("MAPILLARY_TOKEN"),
27
27
  help="Mapillary API access token (or set MAPILLARY_TOKEN env var)",
28
28
  )
29
- parser.add_argument("usernames", nargs="+", help="Mapillary username(s) to download")
29
+ parser.add_argument("usernames", nargs="*", help="Mapillary username(s) to download")
30
30
  parser.add_argument("--output", default="./mapillary_data", help="Output directory (default: ./mapillary_data)")
31
31
  parser.add_argument(
32
32
  "--quality",
@@ -61,9 +61,21 @@ def main():
61
61
  action="store_true",
62
62
  help="Enable debug logging (EXIF data, API responses, etc.)",
63
63
  )
64
+ parser.add_argument(
65
+ "--stats",
66
+ action="store_true",
67
+ help="Show statistics of collections on archive.org and exit",
68
+ )
64
69
 
65
70
  args = parser.parse_args()
66
71
 
72
+ # Handle --stats early (before token check)
73
+ if args.stats:
74
+ from mapillary_downloader.ia_stats import show_stats
75
+
76
+ show_stats()
77
+ sys.exit(0)
78
+
67
79
  # Set debug logging level if requested
68
80
  if args.debug:
69
81
  import logging
@@ -71,6 +83,11 @@ def main():
71
83
  logging.getLogger("mapillary_downloader").setLevel(logging.DEBUG)
72
84
  logger.debug("Debug logging enabled")
73
85
 
86
+ # Check for usernames (required unless using --stats)
87
+ if not args.usernames:
88
+ logger.error("Error: At least one username is required")
89
+ sys.exit(1)
90
+
74
91
  # Check for token
75
92
  if not args.token:
76
93
  logger.error("Error: Mapillary API token required. Use --token or set MAPILLARY_TOKEN environment variable")
@@ -7,7 +7,7 @@ import os
7
7
  import shutil
8
8
  import time
9
9
  from pathlib import Path
10
- from mapillary_downloader.utils import format_size, format_time
10
+ from mapillary_downloader.utils import format_size, format_time, safe_json_save
11
11
  from mapillary_downloader.ia_meta import generate_ia_metadata
12
12
  from mapillary_downloader.ia_check import check_ia_exists
13
13
  from mapillary_downloader.worker import worker_process
@@ -143,13 +143,8 @@ class MapillaryDownloader:
143
143
  # Update this quality's progress
144
144
  progress[str(self.quality)] = list(self.downloaded)
145
145
 
146
- # Write atomically
147
- temp_file = self.progress_file.with_suffix(".json.tmp")
148
- with open(temp_file, "w") as f:
149
- json.dump(progress, f)
150
- f.flush()
151
- os.fsync(f.fileno())
152
- temp_file.replace(self.progress_file)
146
+ # Write atomically using utility function
147
+ safe_json_save(self.progress_file, progress)
153
148
 
154
149
  def download_user_data(self, bbox=None, convert_webp=False):
155
150
  """Download all images for a user using streaming queue-based architecture.
@@ -0,0 +1,242 @@
1
+ """Internet Archive statistics for mapillary_downloader collections."""
2
+
3
+ import json
4
+ import logging
5
+ import re
6
+ from mapillary_downloader.utils import safe_json_save, http_get_with_retry, format_size
7
+ from mapillary_downloader.downloader import get_cache_dir
8
+
9
+ logger = logging.getLogger("mapillary_downloader")
10
+
11
+ CACHE_FILE = get_cache_dir() / ".stats.json"
12
+
13
+
14
+ def search_ia_collections():
15
+ """Search IA for all mapillary_downloader collections.
16
+
17
+ Returns:
18
+ List of dicts with: identifier, description, item_size, uploader
19
+ """
20
+ logger.info("Searching archive.org for mapillary_downloader collections...")
21
+
22
+ url = "https://archive.org/advancedsearch.php"
23
+ params = {
24
+ "q": "mapillary_downloader:*",
25
+ "fl[]": ["identifier", "description", "item_size", "uploader"],
26
+ "rows": 10000,
27
+ "output": "json",
28
+ }
29
+
30
+ response = http_get_with_retry(url, params=params, max_retries=3)
31
+ data = response.json()
32
+
33
+ collections = data["response"]["docs"]
34
+ logger.info(f"Found {len(collections)} collections on archive.org")
35
+
36
+ return collections
37
+
38
+
39
+ def parse_collection_info(identifier):
40
+ """Parse username, quality, webp from collection identifier.
41
+
42
+ Returns:
43
+ dict with username, quality, is_webp or None if invalid
44
+ """
45
+ match = re.match(r"mapillary-(.+)-(256|1024|2048|original)(?:-webp)?$", identifier)
46
+ if match:
47
+ return {"username": match.group(1), "quality": match.group(2), "is_webp": "-webp" in identifier}
48
+ return None
49
+
50
+
51
+ def extract_image_count(description):
52
+ """Extract image count from IA description field.
53
+
54
+ Description format: "Contains 12,345 images in..."
55
+ """
56
+ if not description:
57
+ return None
58
+
59
+ match = re.search(r"Contains ([\d,]+) images", description)
60
+ if match:
61
+ return int(match.group(1).replace(",", ""))
62
+ return None
63
+
64
+
65
+ def load_cache():
66
+ """Load cached collection data.
67
+
68
+ Returns:
69
+ dict of {collection_id: {size, uploader, images, quality, username}}
70
+ """
71
+ if CACHE_FILE.exists():
72
+ try:
73
+ with open(CACHE_FILE) as f:
74
+ return json.load(f)
75
+ except Exception as e:
76
+ logger.warning(f"Failed to load cache: {e}")
77
+ return {}
78
+
79
+
80
+ def update_cache(ia_collections):
81
+ """Update cache with new IA search results.
82
+
83
+ Merges new collections into existing cache.
84
+
85
+ Returns:
86
+ Updated cache dict
87
+ """
88
+ cache = load_cache()
89
+
90
+ for item in ia_collections:
91
+ identifier = item.get("identifier")
92
+ if not identifier:
93
+ continue
94
+
95
+ info = parse_collection_info(identifier)
96
+ if not info:
97
+ logger.debug(f"Skipping non-mapillary collection: {identifier}")
98
+ continue
99
+
100
+ # Parse item data
101
+ size_bytes = item.get("item_size", 0)
102
+ if isinstance(size_bytes, str):
103
+ size_bytes = int(size_bytes)
104
+
105
+ image_count = extract_image_count(item.get("description"))
106
+
107
+ # Update cache entry
108
+ cache[identifier] = {
109
+ "size": size_bytes,
110
+ "uploader": item.get("uploader"),
111
+ "images": image_count,
112
+ "quality": info["quality"],
113
+ "username": info["username"],
114
+ "is_webp": info["is_webp"],
115
+ }
116
+
117
+ # Save updated cache
118
+ safe_json_save(CACHE_FILE, cache)
119
+ logger.info(f"Updated cache with {len(cache)} collections")
120
+
121
+ return cache
122
+
123
+
124
+ def aggregate_stats(cache):
125
+ """Aggregate statistics from cached collection data.
126
+
127
+ Returns:
128
+ dict with total and per-quality stats
129
+ """
130
+ stats = {
131
+ "total": {"collections": 0, "total_images": 0, "unique_images": 0, "bytes": 0},
132
+ "by_quality": {},
133
+ "users": set(),
134
+ }
135
+
136
+ # Track images per user for deduplication
137
+ user_images = {} # {username: max_images_across_qualities}
138
+
139
+ for collection_id, data in cache.items():
140
+ images = data.get("images") or 0
141
+ size = data.get("size") or 0
142
+ quality = data.get("quality", "unknown")
143
+ username = data.get("username")
144
+
145
+ # Track user coverage
146
+ if username:
147
+ stats["users"].add(username)
148
+ # Keep maximum image count across all qualities for this user
149
+ if username not in user_images or images > user_images[username]:
150
+ user_images[username] = images
151
+
152
+ # Total stats (collections, total images, and bytes)
153
+ stats["total"]["collections"] += 1
154
+ stats["total"]["total_images"] += images
155
+ stats["total"]["bytes"] += size
156
+
157
+ # Per-quality stats
158
+ if quality not in stats["by_quality"]:
159
+ stats["by_quality"][quality] = {"collections": 0, "images": 0, "bytes": 0}
160
+
161
+ stats["by_quality"][quality]["collections"] += 1
162
+ stats["by_quality"][quality]["images"] += images
163
+ stats["by_quality"][quality]["bytes"] += size
164
+
165
+ # Unique images is sum of max images per user
166
+ stats["total"]["unique_images"] = sum(user_images.values())
167
+
168
+ return stats
169
+
170
+
171
+ def format_stats(stats):
172
+ """Format statistics as human-readable text.
173
+
174
+ Args:
175
+ stats: Dict from aggregate_stats()
176
+
177
+ Returns:
178
+ Formatted string
179
+ """
180
+ TOTAL_MAPILLARY_IMAGES = 2_000_000_000 # 2 billion
181
+
182
+ output = []
183
+ output.append("=" * 70)
184
+ output.append("Mapillary Downloader - Archive.org Statistics")
185
+ output.append("=" * 70)
186
+ output.append("")
187
+
188
+ # Total stats
189
+ total = stats["total"]
190
+ unique_pct = (total["unique_images"] / TOTAL_MAPILLARY_IMAGES * 100) if total["unique_images"] else 0
191
+
192
+ output.append(f"Total Collections: {total['collections']:,}")
193
+ output.append(f"Total Users: {len(stats['users']):,}")
194
+ output.append(f"Total Images: {total['total_images']:,}")
195
+ output.append(f"Unique Images: {total['unique_images']:,} ({unique_pct:.3f}% of 2B)")
196
+ output.append(f"Total Size: {format_size(total['bytes'])}")
197
+ output.append("")
198
+
199
+ # Per-quality breakdown
200
+ output.append("By Quality:")
201
+ output.append("-" * 70)
202
+
203
+ # Sort by quality (original first, then numeric)
204
+ qualities = sorted(stats["by_quality"].items(), key=lambda x: (x[0] != "original", x[0]))
205
+
206
+ for quality, data in qualities:
207
+ pct = (data["images"] / TOTAL_MAPILLARY_IMAGES * 100) if data["images"] else 0
208
+ output.append(
209
+ f" {quality:8s} {data['collections']:3d} collections "
210
+ f"{data['images']:12,d} images ({pct:.3f}%) "
211
+ f"{format_size(data['bytes']):>8s}"
212
+ )
213
+
214
+ output.append("")
215
+ output.append(f"Cache: {CACHE_FILE}")
216
+
217
+ return "\n".join(output)
218
+
219
+
220
+ def show_stats(refresh=True):
221
+ """Show archive.org statistics for mapillary_downloader collections.
222
+
223
+ Args:
224
+ refresh: If True, fetch fresh data from IA. If False, use cache only.
225
+ """
226
+ if refresh:
227
+ try:
228
+ ia_collections = search_ia_collections()
229
+ cache = update_cache(ia_collections)
230
+ except Exception as e:
231
+ logger.error(f"Failed to fetch IA data: {e}")
232
+ logger.info("Using cached data...")
233
+ cache = load_cache()
234
+ else:
235
+ cache = load_cache()
236
+
237
+ if not cache:
238
+ logger.error("No cached data and failed to fetch from IA")
239
+ return
240
+
241
+ stats = aggregate_stats(cache)
242
+ print(format_stats(stats))
@@ -1,6 +1,7 @@
1
1
  """Tar sequence directories for efficient Internet Archive uploads."""
2
2
 
3
3
  import logging
4
+ import re
4
5
  import tarfile
5
6
  from pathlib import Path
6
7
  from mapillary_downloader.utils import format_size
@@ -9,7 +10,9 @@ logger = logging.getLogger("mapillary_downloader")
9
10
 
10
11
 
11
12
  def tar_sequence_directories(collection_dir):
12
- """Tar all sequence directories in a collection for faster IA uploads.
13
+ """Tar all date directories in a collection for faster IA uploads.
14
+
15
+ Organizes by capture date (YYYY-MM-DD) for incremental archive.org uploads.
13
16
 
14
17
  Args:
15
18
  collection_dir: Path to collection directory (e.g., mapillary-user-quality/)
@@ -23,41 +26,44 @@ def tar_sequence_directories(collection_dir):
23
26
  logger.error(f"Collection directory not found: {collection_dir}")
24
27
  return 0, 0
25
28
 
26
- # Find all bucket directories (skip special dirs)
27
- # Now we tar entire bucket dirs (e.g., a/, b/, etc) to get ~62 tar files
29
+ # Find all date directories (skip special dirs)
30
+ # Date format: YYYY-MM-DD or unknown-date
28
31
  skip_dirs = {".meta", "__pycache__"}
29
- bucket_dirs = []
32
+ date_dirs = []
30
33
 
31
34
  for item in collection_dir.iterdir():
32
35
  if item.is_dir() and item.name not in skip_dirs:
33
- # Check if this is a bucket dir (single char)
34
- if len(item.name) == 1:
35
- bucket_dirs.append(item)
36
+ # Check if this is a date dir (YYYY-MM-DD) or unknown-date
37
+ if re.match(r"\d{4}-\d{2}-\d{2}$", item.name) or item.name == "unknown-date":
38
+ date_dirs.append(item)
36
39
 
37
- if not bucket_dirs:
38
- logger.info("No bucket directories to tar")
40
+ if not date_dirs:
41
+ logger.info("No date directories to tar")
39
42
  return 0, 0
40
43
 
41
- logger.info(f"Tarring {len(bucket_dirs)} bucket directories...")
44
+ # Sort date directories chronologically (YYYY-MM-DD sorts naturally)
45
+ date_dirs = sorted(date_dirs, key=lambda x: x.name)
46
+
47
+ logger.info(f"Tarring {len(date_dirs)} date directories...")
42
48
 
43
49
  tarred_count = 0
44
50
  total_files = 0
45
51
  total_tar_bytes = 0
46
52
 
47
- for bucket_dir in bucket_dirs:
48
- bucket_name = bucket_dir.name
49
- tar_path = collection_dir / f"{bucket_name}.tar"
53
+ for date_dir in date_dirs:
54
+ date_name = date_dir.name
55
+ tar_path = collection_dir / f"{date_name}.tar"
50
56
 
51
- # Count files in bucket
52
- files_to_tar = sorted([f for f in bucket_dir.rglob("*") if f.is_file()], key=lambda x: str(x))
57
+ # Count files in date directory
58
+ files_to_tar = sorted([f for f in date_dir.rglob("*") if f.is_file()], key=lambda x: str(x))
53
59
  file_count = len(files_to_tar)
54
60
 
55
61
  if file_count == 0:
56
- logger.warning(f"Skipping empty bucket directory: {bucket_name}")
62
+ logger.warning(f"Skipping empty date directory: {date_name}")
57
63
  continue
58
64
 
59
65
  try:
60
- logger.info(f"Tarring bucket '{bucket_name}' ({file_count} files)...")
66
+ logger.info(f"Tarring date '{date_name}' ({file_count} files)...")
61
67
 
62
68
  # Create reproducible uncompressed tar (WebP already compressed)
63
69
  with tarfile.open(tar_path, "w") as tar:
@@ -84,36 +90,34 @@ def tar_sequence_directories(collection_dir):
84
90
  tar_size = tar_path.stat().st_size
85
91
  total_tar_bytes += tar_size
86
92
 
87
- # Remove original bucket directory
88
- for file in bucket_dir.rglob("*"):
93
+ # Remove original date directory
94
+ for file in date_dir.rglob("*"):
89
95
  if file.is_file():
90
96
  file.unlink()
91
97
 
92
98
  # Remove empty subdirs and main dir
93
- for subdir in list(bucket_dir.rglob("*")):
99
+ for subdir in list(date_dir.rglob("*")):
94
100
  if subdir.is_dir():
95
101
  try:
96
102
  subdir.rmdir()
97
103
  except OSError:
98
104
  pass # Not empty yet
99
105
 
100
- bucket_dir.rmdir()
106
+ date_dir.rmdir()
101
107
 
102
108
  tarred_count += 1
103
109
  total_files += file_count
104
110
 
105
- logger.info(f"Tarred bucket '{bucket_name}': {file_count:,} files, {format_size(tar_size)}")
111
+ logger.info(f"Tarred date '{date_name}': {file_count:,} files, {format_size(tar_size)}")
106
112
  else:
107
113
  logger.error(f"Tar file empty or not created: {tar_path}")
108
114
  if tar_path.exists():
109
115
  tar_path.unlink()
110
116
 
111
117
  except Exception as e:
112
- logger.error(f"Error tarring bucket {bucket_name}: {e}")
118
+ logger.error(f"Error tarring date {date_name}: {e}")
113
119
  if tar_path.exists():
114
120
  tar_path.unlink()
115
121
 
116
- logger.info(
117
- f"Tarred {tarred_count} sequences ({total_files:,} files, {format_size(total_tar_bytes)} total tar size)"
118
- )
122
+ logger.info(f"Tarred {tarred_count} dates ({total_files:,} files, {format_size(total_tar_bytes)} total tar size)")
119
123
  return tarred_count, total_files
@@ -0,0 +1,108 @@
1
+ """Utility functions for formatting and display."""
2
+
3
+ import json
4
+ import logging
5
+ import os
6
+ import time
7
+ from pathlib import Path
8
+ import requests
9
+ from requests.exceptions import RequestException
10
+
11
+ logger = logging.getLogger("mapillary_downloader")
12
+
13
+
14
+ def format_size(bytes_count):
15
+ """Format bytes as human-readable size.
16
+
17
+ Args:
18
+ bytes_count: Number of bytes
19
+
20
+ Returns:
21
+ Formatted string (e.g. "1.23 GB", "456.78 MB")
22
+ """
23
+ if bytes_count >= 1_000_000_000:
24
+ return f"{bytes_count / 1_000_000_000:.2f} GB"
25
+ if bytes_count >= 1_000_000:
26
+ return f"{bytes_count / 1_000_000:.2f} MB"
27
+ if bytes_count >= 1_000:
28
+ return f"{bytes_count / 1000:.2f} KB"
29
+ return f"{bytes_count} B"
30
+
31
+
32
+ def format_time(seconds):
33
+ """Format seconds as human-readable time.
34
+
35
+ Args:
36
+ seconds: Number of seconds
37
+
38
+ Returns:
39
+ Formatted string (e.g. "2h 15m", "45m 30s", "30s")
40
+ """
41
+ if seconds < 60:
42
+ return f"{int(seconds)}s"
43
+
44
+ minutes = int(seconds / 60)
45
+ remaining_seconds = int(seconds % 60)
46
+
47
+ if minutes < 60:
48
+ if remaining_seconds > 0:
49
+ return f"{minutes}m {remaining_seconds}s"
50
+ return f"{minutes}m"
51
+
52
+ hours = int(minutes / 60)
53
+ remaining_minutes = minutes % 60
54
+
55
+ if remaining_minutes > 0:
56
+ return f"{hours}h {remaining_minutes}m"
57
+ return f"{hours}h"
58
+
59
+
60
+ def safe_json_save(file_path, data):
61
+ """Atomically save JSON data to file.
62
+
63
+ Writes to temp file, then atomic rename to prevent corruption.
64
+
65
+ Args:
66
+ file_path: Path to JSON file
67
+ data: Data to serialize to JSON
68
+ """
69
+ file_path = Path(file_path)
70
+ file_path.parent.mkdir(parents=True, exist_ok=True)
71
+
72
+ temp_file = file_path.with_suffix(".json.tmp")
73
+ with open(temp_file, "w") as f:
74
+ json.dump(data, f, indent=2)
75
+ f.flush()
76
+ os.fsync(f.fileno())
77
+ temp_file.replace(file_path)
78
+
79
+
80
+ def http_get_with_retry(url, params=None, max_retries=5, base_delay=1.0, timeout=60):
81
+ """HTTP GET with exponential backoff retry.
82
+
83
+ Args:
84
+ url: URL to fetch
85
+ params: Optional query parameters
86
+ max_retries: Maximum retry attempts (default: 5)
87
+ base_delay: Initial delay in seconds (default: 1.0)
88
+ timeout: Request timeout in seconds (default: 60)
89
+
90
+ Returns:
91
+ requests.Response object
92
+
93
+ Raises:
94
+ requests.RequestException: If all retries exhausted
95
+ """
96
+ for attempt in range(max_retries):
97
+ try:
98
+ response = requests.get(url, params=params, timeout=timeout)
99
+ response.raise_for_status()
100
+ return response
101
+ except RequestException as e:
102
+ if attempt == max_retries - 1:
103
+ raise
104
+
105
+ delay = base_delay * (2**attempt)
106
+ logger.warning(f"Request failed (attempt {attempt + 1}/{max_retries}): {e}")
107
+ logger.info(f"Retrying in {delay:.1f} seconds...")
108
+ time.sleep(delay)
@@ -3,11 +3,12 @@
3
3
  import os
4
4
  import signal
5
5
  import tempfile
6
+ from datetime import datetime
6
7
  from pathlib import Path
7
8
  import requests
8
- from requests.exceptions import RequestException
9
9
  from mapillary_downloader.exif_writer import write_exif_to_image
10
10
  from mapillary_downloader.webp_converter import convert_to_webp
11
+ from mapillary_downloader.utils import http_get_with_retry
11
12
 
12
13
 
13
14
  def worker_process(work_queue, result_queue, worker_id):
@@ -69,16 +70,25 @@ def download_and_convert_image(image_data, output_dir, quality, convert_webp, se
69
70
  if not image_url:
70
71
  return (image_id, 0, False, f"No {quality} URL")
71
72
 
72
- # Determine final output directory - organize by first char of sequence ID
73
+ # Determine final output directory - organize by capture date
73
74
  output_dir = Path(output_dir)
74
75
  sequence_id = image_data.get("sequence")
76
+
77
+ # Extract date from captured_at timestamp (milliseconds since epoch)
78
+ captured_at = image_data.get("captured_at")
79
+ if captured_at:
80
+ # Convert to UTC date string (YYYY-MM-DD)
81
+ date_str = datetime.utcfromtimestamp(captured_at / 1000).strftime("%Y-%m-%d")
82
+ else:
83
+ # Fallback for missing timestamp (should be rare per API docs)
84
+ date_str = "unknown-date"
85
+
75
86
  if sequence_id:
76
- # Use first character as bucket (gives us ~62 dirs instead of millions)
77
- first_char = sequence_id[0]
78
- img_dir = output_dir / first_char / sequence_id
87
+ img_dir = output_dir / date_str / sequence_id
79
88
  img_dir.mkdir(parents=True, exist_ok=True)
80
89
  else:
81
- img_dir = output_dir
90
+ img_dir = output_dir / date_str
91
+ img_dir.mkdir(parents=True, exist_ok=True)
82
92
 
83
93
  # If converting to WebP, use /tmp for intermediate JPEG
84
94
  # Otherwise write JPEG directly to final location
@@ -90,19 +100,18 @@ def download_and_convert_image(image_data, output_dir, quality, convert_webp, se
90
100
  jpg_path = img_dir / f"{image_id}.jpg"
91
101
  final_path = jpg_path
92
102
 
93
- # Download image (using session passed from worker)
103
+ # Download image with retry logic
94
104
  bytes_downloaded = 0
95
105
 
96
106
  try:
97
- # 60 second timeout for entire download (connection + read)
98
- response = session.get(image_url, stream=True, timeout=60)
99
- response.raise_for_status()
107
+ # Use retry logic with 3 attempts for image downloads
108
+ response = http_get_with_retry(image_url, max_retries=3, base_delay=1.0, timeout=60)
100
109
 
101
110
  with open(jpg_path, "wb") as f:
102
111
  for chunk in response.iter_content(chunk_size=8192):
103
112
  f.write(chunk)
104
113
  bytes_downloaded += len(chunk)
105
- except RequestException as e:
114
+ except Exception as e:
106
115
  return (image_id, 0, False, f"Download failed: {e}")
107
116
 
108
117
  # Write EXIF metadata
@@ -1,47 +0,0 @@
1
- """Utility functions for formatting and display."""
2
-
3
-
4
- def format_size(bytes_count):
5
- """Format bytes as human-readable size.
6
-
7
- Args:
8
- bytes_count: Number of bytes
9
-
10
- Returns:
11
- Formatted string (e.g. "1.23 GB", "456.78 MB")
12
- """
13
- if bytes_count >= 1_000_000_000:
14
- return f"{bytes_count / 1_000_000_000:.2f} GB"
15
- if bytes_count >= 1_000_000:
16
- return f"{bytes_count / 1_000_000:.2f} MB"
17
- if bytes_count >= 1_000:
18
- return f"{bytes_count / 1000:.2f} KB"
19
- return f"{bytes_count} B"
20
-
21
-
22
- def format_time(seconds):
23
- """Format seconds as human-readable time.
24
-
25
- Args:
26
- seconds: Number of seconds
27
-
28
- Returns:
29
- Formatted string (e.g. "2h 15m", "45m 30s", "30s")
30
- """
31
- if seconds < 60:
32
- return f"{int(seconds)}s"
33
-
34
- minutes = int(seconds / 60)
35
- remaining_seconds = int(seconds % 60)
36
-
37
- if minutes < 60:
38
- if remaining_seconds > 0:
39
- return f"{minutes}m {remaining_seconds}s"
40
- return f"{minutes}m"
41
-
42
- hours = int(minutes / 60)
43
- remaining_minutes = minutes % 60
44
-
45
- if remaining_minutes > 0:
46
- return f"{hours}h {remaining_minutes}m"
47
- return f"{hours}h"