pixeltable 0.2.19__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pixeltable might be problematic. Click here for more details.

Files changed (88) hide show
  1. pixeltable/__init__.py +7 -19
  2. pixeltable/__version__.py +2 -2
  3. pixeltable/catalog/__init__.py +7 -7
  4. pixeltable/catalog/globals.py +3 -0
  5. pixeltable/catalog/insertable_table.py +9 -7
  6. pixeltable/catalog/table.py +220 -143
  7. pixeltable/catalog/table_version.py +36 -18
  8. pixeltable/catalog/table_version_path.py +0 -8
  9. pixeltable/catalog/view.py +3 -3
  10. pixeltable/dataframe.py +9 -24
  11. pixeltable/env.py +107 -36
  12. pixeltable/exceptions.py +7 -4
  13. pixeltable/exec/__init__.py +1 -1
  14. pixeltable/exec/aggregation_node.py +22 -15
  15. pixeltable/exec/component_iteration_node.py +62 -41
  16. pixeltable/exec/data_row_batch.py +7 -7
  17. pixeltable/exec/exec_node.py +35 -7
  18. pixeltable/exec/expr_eval_node.py +2 -1
  19. pixeltable/exec/in_memory_data_node.py +9 -9
  20. pixeltable/exec/sql_node.py +265 -136
  21. pixeltable/exprs/__init__.py +1 -0
  22. pixeltable/exprs/data_row.py +30 -19
  23. pixeltable/exprs/expr.py +15 -14
  24. pixeltable/exprs/expr_dict.py +55 -0
  25. pixeltable/exprs/expr_set.py +21 -15
  26. pixeltable/exprs/function_call.py +21 -8
  27. pixeltable/exprs/json_path.py +3 -6
  28. pixeltable/exprs/rowid_ref.py +2 -2
  29. pixeltable/exprs/sql_element_cache.py +5 -1
  30. pixeltable/ext/functions/whisperx.py +7 -2
  31. pixeltable/func/callable_function.py +2 -2
  32. pixeltable/func/function_registry.py +6 -7
  33. pixeltable/func/query_template_function.py +11 -12
  34. pixeltable/func/signature.py +17 -15
  35. pixeltable/func/udf.py +0 -4
  36. pixeltable/functions/__init__.py +1 -1
  37. pixeltable/functions/audio.py +4 -6
  38. pixeltable/functions/globals.py +86 -42
  39. pixeltable/functions/huggingface.py +12 -14
  40. pixeltable/functions/image.py +59 -45
  41. pixeltable/functions/json.py +0 -1
  42. pixeltable/functions/mistralai.py +2 -2
  43. pixeltable/functions/openai.py +22 -25
  44. pixeltable/functions/string.py +50 -50
  45. pixeltable/functions/timestamp.py +20 -20
  46. pixeltable/functions/together.py +26 -12
  47. pixeltable/functions/video.py +11 -20
  48. pixeltable/functions/whisper.py +2 -20
  49. pixeltable/globals.py +57 -56
  50. pixeltable/index/base.py +2 -2
  51. pixeltable/index/btree.py +7 -7
  52. pixeltable/index/embedding_index.py +8 -10
  53. pixeltable/io/external_store.py +11 -5
  54. pixeltable/io/globals.py +3 -1
  55. pixeltable/io/hf_datasets.py +4 -4
  56. pixeltable/io/label_studio.py +6 -6
  57. pixeltable/io/parquet.py +14 -13
  58. pixeltable/iterators/document.py +10 -8
  59. pixeltable/iterators/video.py +10 -1
  60. pixeltable/metadata/__init__.py +3 -2
  61. pixeltable/metadata/converters/convert_14.py +4 -2
  62. pixeltable/metadata/converters/convert_15.py +1 -1
  63. pixeltable/metadata/converters/convert_19.py +1 -0
  64. pixeltable/metadata/converters/convert_20.py +1 -1
  65. pixeltable/metadata/converters/util.py +9 -8
  66. pixeltable/metadata/schema.py +32 -21
  67. pixeltable/plan.py +136 -154
  68. pixeltable/store.py +51 -36
  69. pixeltable/tool/create_test_db_dump.py +7 -7
  70. pixeltable/tool/doc_plugins/griffe.py +3 -34
  71. pixeltable/tool/mypy_plugin.py +32 -0
  72. pixeltable/type_system.py +243 -60
  73. pixeltable/utils/arrow.py +10 -9
  74. pixeltable/utils/coco.py +4 -4
  75. pixeltable/utils/documents.py +1 -1
  76. pixeltable/utils/filecache.py +131 -84
  77. pixeltable/utils/formatter.py +1 -1
  78. pixeltable/utils/http_server.py +2 -5
  79. pixeltable/utils/media_store.py +6 -6
  80. pixeltable/utils/pytorch.py +10 -11
  81. pixeltable/utils/sql.py +2 -1
  82. {pixeltable-0.2.19.dist-info → pixeltable-0.2.21.dist-info}/METADATA +16 -7
  83. pixeltable-0.2.21.dist-info/RECORD +148 -0
  84. pixeltable/utils/help.py +0 -11
  85. pixeltable-0.2.19.dist-info/RECORD +0 -147
  86. {pixeltable-0.2.19.dist-info → pixeltable-0.2.21.dist-info}/LICENSE +0 -0
  87. {pixeltable-0.2.19.dist-info → pixeltable-0.2.21.dist-info}/WHEEL +0 -0
  88. {pixeltable-0.2.19.dist-info → pixeltable-0.2.21.dist-info}/entry_points.txt +0 -0
pixeltable/utils/coco.py CHANGED
@@ -1,12 +1,12 @@
1
- from typing import List, Dict, Any, Set
2
- from pathlib import Path
3
1
  import json
2
+ from pathlib import Path
3
+ from typing import Any, Dict, List, Set
4
4
 
5
5
  import PIL
6
6
 
7
+ import pixeltable as pxt
7
8
  import pixeltable.exceptions as excs
8
9
 
9
-
10
10
  format_msg = """
11
11
 
12
12
  Required format:
@@ -48,7 +48,7 @@ def _verify_input_dict(input_dict: Dict[str, Any]) -> None:
48
48
  if not isinstance(annotation['category'], (str, int)):
49
49
  raise excs.Error(f'Value for "category" is not a str or int: {annotation}{format_msg}')
50
50
 
51
- def write_coco_dataset(df: 'pixeltable.DataFrame', dest_path: Path) -> Path:
51
+ def write_coco_dataset(df: pxt.DataFrame, dest_path: Path) -> Path:
52
52
  """Export a DataFrame result set as a COCO dataset in dest_path and return the path of the data.json file."""
53
53
  # TODO: validate schema
54
54
  if len(df._select_list_exprs) != 1 or not df._select_list_exprs[0].col_type.is_json_type():
@@ -2,7 +2,7 @@ import dataclasses
2
2
  from typing import Optional
3
3
 
4
4
  import bs4
5
- import fitz # (pymupdf)
5
+ import fitz # type: ignore[import-untyped]
6
6
  import puremagic
7
7
 
8
8
  import pixeltable.type_system as ts
@@ -1,28 +1,33 @@
1
1
  from __future__ import annotations
2
- from typing import Optional, List, Tuple, Dict
3
- from collections import OrderedDict, defaultdict, namedtuple
4
- import os
2
+
5
3
  import glob
6
- from pathlib import Path
7
- from time import time
4
+ import hashlib
8
5
  import logging
6
+ import os
7
+ import warnings
8
+ from collections import OrderedDict, defaultdict, namedtuple
9
+ from dataclasses import dataclass
10
+ from datetime import datetime, timezone
11
+ from pathlib import Path
12
+ from typing import Optional
9
13
  from uuid import UUID
10
- import hashlib
11
14
 
15
+ import pixeltable.exceptions as excs
12
16
  from pixeltable.env import Env
13
17
 
14
-
15
18
  _logger = logging.getLogger('pixeltable')
16
19
 
20
+ @dataclass
17
21
  class CacheEntry:
18
- def __init__(self, key: str, tbl_id: UUID, col_id: int, size: int, last_accessed_ts: int, ext: str):
19
- self.key = key
20
- self.tbl_id = tbl_id
21
- self.col_id = col_id
22
- self.size = size
23
- self.last_accessed_ts = last_accessed_ts
24
- self.ext = ext
25
22
 
23
+ key: str
24
+ tbl_id: UUID
25
+ col_id: int
26
+ size: int
27
+ last_used: datetime
28
+ ext: str
29
+
30
+ @property
26
31
  def path(self) -> Path:
27
32
  return Env.get().file_cache_dir / f'{self.tbl_id.hex}_{self.col_id}_{self.key}{self.ext}'
28
33
 
@@ -34,7 +39,11 @@ class CacheEntry:
34
39
  col_id = int(components[1])
35
40
  key = components[2]
36
41
  file_info = os.stat(str(path))
37
- return cls(key, tbl_id, col_id, file_info.st_size, file_info.st_mtime, path.suffix)
42
+ # We use the last modified time (file_info.st_mtime) as the timestamp; `FileCache` will touch the file
43
+ # each time it is retrieved, so that the mtime of the file will always represent the last used time of
44
+ # the cache entry.
45
+ last_used = datetime.fromtimestamp(file_info.st_mtime, tz=timezone.utc)
46
+ return cls(key, tbl_id, col_id, file_info.st_size, last_used, path.suffix)
38
47
 
39
48
 
40
49
  class FileCache:
@@ -45,31 +54,60 @@ class FileCache:
45
54
  access of a cache entries is its file's mtime.
46
55
 
47
56
  TODO:
48
- - enforce a maximum capacity with LRU eviction
49
57
  - implement MRU eviction for queries that exceed the capacity
50
58
  """
51
- _instance: Optional[FileCache] = None
52
- ColumnStats = namedtuple('FileCacheColumnStats', ['tbl_id', 'col_id', 'num_files', 'total_size'])
53
- CacheStats = namedtuple(
54
- 'FileCacheStats', ['total_size', 'num_requests', 'num_hits', 'num_evictions', 'column_stats'])
59
+ __instance: Optional[FileCache] = None
60
+
61
+ cache: OrderedDict[str, CacheEntry]
62
+ total_size: int
63
+ capacity_bytes: int
64
+ num_requests: int
65
+ num_hits: int
66
+ num_evictions: int
67
+ keys_retrieved: set[str] # keys retrieved (downloaded or accessed) this session
68
+ keys_evicted_after_retrieval: set[str] # keys that were evicted after having been retrieved this session
69
+
70
+ # A key is added to this set when it is already present in `keys_evicted_this_session` and is downloaded again.
71
+ # In other words, for a key to be added to this set, the following sequence of events must occur in this order:
72
+ # - It is retrieved during this session (either because it was newly downloaded, or because it was in the cache
73
+ # at the start of the session and was accessed at some point during the session)
74
+ # - It is subsequently evicted
75
+ # - It is subsequently retrieved a second time ("download after a previous retrieval")
76
+ # The contents of this set will be used to generate a more informative warning.
77
+ evicted_working_set_keys: set[str]
78
+ new_redownload_witnessed: bool # whether a new re-download has occurred since the last time a warning was issued
79
+
80
+ FileCacheColumnStats = namedtuple('FileCacheColumnStats', ('tbl_id', 'col_id', 'num_files', 'total_size'))
81
+ FileCacheStats = namedtuple(
82
+ 'FileCacheStats',
83
+ ('total_size', 'num_requests', 'num_hits', 'num_evictions', 'column_stats')
84
+ )
55
85
 
56
86
  @classmethod
57
87
  def get(cls) -> FileCache:
58
- if cls._instance is None:
59
- cls._instance = cls()
60
- return cls._instance
88
+ if cls.__instance is None:
89
+ cls.init()
90
+ return cls.__instance
91
+
92
+ @classmethod
93
+ def init(cls) -> None:
94
+ cls.__instance = cls()
61
95
 
62
96
  def __init__(self):
63
- self.cache: OrderedDict[str, CacheEntry] = OrderedDict() # ordered by entry.last_accessed_ts
97
+ self.cache = OrderedDict()
64
98
  self.total_size = 0
65
- #self.capacity = Env.get().max_filecache_size
99
+ self.capacity_bytes = int(Env.get()._file_cache_size_g * (1 << 30))
66
100
  self.num_requests = 0
67
101
  self.num_hits = 0
68
102
  self.num_evictions = 0
103
+ self.keys_retrieved = set()
104
+ self.keys_evicted_after_retrieval = set()
105
+ self.evicted_working_set_keys = set()
106
+ self.new_redownload_witnessed = False
69
107
  paths = glob.glob(str(Env.get().file_cache_dir / '*'))
70
108
  entries = [CacheEntry.from_file(Path(path_str)) for path_str in paths]
71
- # we need to insert entries in order of last_accessed_ts
72
- entries.sort(key=lambda e: e.last_accessed_ts)
109
+ # we need to insert entries in access order
110
+ entries.sort(key=lambda e: e.last_used)
73
111
  for entry in entries:
74
112
  self.cache[entry.key] = entry
75
113
  self.total_size += entry.size
@@ -82,30 +120,43 @@ class FileCache:
82
120
  def num_files(self, tbl_id: Optional[UUID] = None) -> int:
83
121
  if tbl_id is None:
84
122
  return len(self.cache)
85
- entries = [e for e in self.cache.values() if e.tbl_id == tbl_id]
86
- return len(entries)
123
+ return sum(e.tbl_id == tbl_id for e in self.cache.values())
87
124
 
88
- def clear(self, tbl_id: Optional[UUID] = None, capacity: Optional[int] = None) -> None:
125
+ def clear(self, tbl_id: Optional[UUID] = None) -> None:
89
126
  """
90
127
  For testing purposes: allow resetting capacity and stats.
91
128
  """
92
- self.num_requests, self.num_hits, self.num_evictions = 0, 0, 0
93
- entries = list(self.cache.values()) # list(): avoid dealing with values() return type
94
- if tbl_id is not None:
95
- entries = [e for e in entries if e.tbl_id == tbl_id]
96
- _logger.debug(f'clearing {len(entries)} entries from file cache for table {tbl_id}')
129
+ if tbl_id is None:
130
+ # We need to store the entries to remove in a list, because we can't remove items from a dict while iterating
131
+ entries_to_remove = list(self.cache.values())
132
+ _logger.debug(f'clearing {self.num_files()} entries from file cache')
133
+ self.num_requests, self.num_hits, self.num_evictions = 0, 0, 0
134
+ self.keys_retrieved.clear()
135
+ self.keys_evicted_after_retrieval.clear()
136
+ self.new_redownload_witnessed = False
97
137
  else:
98
- _logger.debug(f'clearing {len(entries)} entries from file cache')
99
- for entry in entries:
138
+ entries_to_remove = [e for e in self.cache.values() if e.tbl_id == tbl_id]
139
+ _logger.debug(f'clearing {self.num_files(tbl_id)} entries from file cache for table {tbl_id}')
140
+ for entry in entries_to_remove:
141
+ os.remove(entry.path)
100
142
  del self.cache[entry.key]
101
143
  self.total_size -= entry.size
102
- os.remove(entry.path())
103
- # if capacity is not None:
104
- # self.capacity = capacity
105
- # else:
106
- # # need to reset to default
107
- # self.capacity = Env.get().max_filecache_size
108
- # _logger.debug(f'setting file cache capacity to {self.capacity}')
144
+
145
+ def emit_eviction_warnings(self) -> None:
146
+ if self.new_redownload_witnessed:
147
+ # Compute the additional capacity that would be needed in order to retain all the re-downloaded files
148
+ extra_capacity_needed = sum(self.cache[key].size for key in self.evicted_working_set_keys)
149
+ suggested_cache_size = self.capacity_bytes + extra_capacity_needed + (1 << 30)
150
+ warnings.warn(
151
+ f'{len(self.evicted_working_set_keys)} media file(s) had to be downloaded multiple times this session, '
152
+ 'because they were evicted\nfrom the file cache after their first access. The total size '
153
+ f'of the evicted file(s) is {round(extra_capacity_needed / (1 << 30), 1)} GiB.\n'
154
+ f'Consider increasing the cache size to at least {round(suggested_cache_size / (1 << 30), 1)} GiB '
155
+ f'(it is currently {round(self.capacity_bytes / (1 << 30), 1)} GiB).\n'
156
+ f'You can do this by setting the value of `file_cache_size_g` in: {str(Env.get()._config_file)}',
157
+ excs.PixeltableWarning
158
+ )
159
+ self.new_redownload_witnessed = False
109
160
 
110
161
  def _url_hash(self, url: str) -> str:
111
162
  h = hashlib.sha256()
@@ -120,75 +171,71 @@ class FileCache:
120
171
  _logger.debug(f'file cache miss for {url}')
121
172
  return None
122
173
  # update mtime and cache
123
- path = entry.path()
174
+ path = entry.path
124
175
  path.touch(exist_ok=True)
125
176
  file_info = os.stat(str(path))
126
- entry.last_accessed_ts = file_info.st_mtime
177
+ entry.last_used = datetime.fromtimestamp(file_info.st_mtime)
127
178
  self.cache.move_to_end(key, last=True)
128
179
  self.num_hits += 1
180
+ self.keys_retrieved.add(key)
129
181
  _logger.debug(f'file cache hit for {url}')
130
182
  return path
131
183
 
132
- # def can_admit(self, query_ts: int) -> bool:
133
- # if self.total_size + self.avg_file_size <= self.capacity:
134
- # return True
135
- # assert len(self.cache) > 0
136
- # # check whether we can evict the current lru entry
137
- # lru_entry = next(iter(self.cache.values()))
138
- # if lru_entry.last_accessed_ts >= query_ts:
139
- # # the current query brought this entry in: we're not going to evict it
140
- # return False
141
- # return True
142
-
143
184
  def add(self, tbl_id: UUID, col_id: int, url: str, path: Path) -> Path:
144
185
  """Adds url at 'path' to cache and returns its new path.
145
186
  'path' will not be accessible after this call. Retains the extension of 'path'.
146
187
  """
147
188
  file_info = os.stat(str(path))
148
- _ = time()
149
- #if self.total_size + file_info.st_size > self.capacity:
150
- if False:
151
- if len(self.cache) == 0:
152
- # nothing to evict
153
- return
154
- # evict entries until we're below the limit or until we run into entries the current query brought in
155
- while True:
156
- lru_entry = next(iter(self.cache.values()))
157
- if lru_entry.last_accessed_ts >= query_ts:
158
- # the current query brought this entry in: switch to MRU and ignore this put()
159
- _logger.debug('file cache switched to MRU')
160
- return
161
- self.cache.popitem(last=False)
162
- self.total_size -= lru_entry.size
163
- self.num_evictions += 1
164
- os.remove(str(lru_entry.path()))
165
- _logger.debug(f'evicted entry for cell {lru_entry.cell_id} from file cache')
166
- if self.total_size + file_info.st_size <= self.capacity:
167
- break
168
-
189
+ self.ensure_capacity(file_info.st_size)
169
190
  key = self._url_hash(url)
170
191
  assert key not in self.cache
171
- entry = CacheEntry(key, tbl_id, col_id, file_info.st_size, file_info.st_mtime, path.suffix)
192
+ if key in self.keys_evicted_after_retrieval:
193
+ # This key was evicted after being retrieved earlier this session, and is now being retrieved again.
194
+ # Add it to `keys_multiply_downloaded` so that we may generate a warning later.
195
+ self.evicted_working_set_keys.add(key)
196
+ self.new_redownload_witnessed = True
197
+ self.keys_retrieved.add(key)
198
+ entry = CacheEntry(key, tbl_id, col_id, file_info.st_size, datetime.fromtimestamp(file_info.st_mtime), path.suffix)
172
199
  self.cache[key] = entry
173
200
  self.total_size += entry.size
174
- new_path = entry.path()
201
+ new_path = entry.path
175
202
  os.rename(str(path), str(new_path))
203
+ new_path.touch(exist_ok=True)
176
204
  _logger.debug(f'added entry for cell {url} to file cache')
177
205
  return new_path
178
206
 
179
- def stats(self) -> CacheStats:
207
+ def ensure_capacity(self, size: int) -> None:
208
+ """
209
+ Evict entries from the cache until there is at least 'size' bytes of free space.
210
+ """
211
+ while len(self.cache) > 0 and self.total_size + size > self.capacity_bytes:
212
+ _, lru_entry = self.cache.popitem(last=False)
213
+ self.total_size -= lru_entry.size
214
+ self.num_evictions += 1
215
+ if lru_entry.key in self.keys_retrieved:
216
+ # This key was retrieved at some point earlier this session and is now being evicted.
217
+ # Make a record of the eviction, so that we can generate a warning later if the key is retrieved again.
218
+ self.keys_evicted_after_retrieval.add(lru_entry.key)
219
+ os.remove(str(lru_entry.path))
220
+ _logger.debug(f'evicted entry for cell {lru_entry.key} from file cache (of size {lru_entry.size // (1 << 20)} MiB)')
221
+
222
+ def set_capacity(self, capacity_bytes: int) -> None:
223
+ self.capacity_bytes = capacity_bytes
224
+ self.ensure_capacity(0) # evict entries if necessary
225
+
226
+ def stats(self) -> FileCacheStats:
180
227
  # collect column stats
181
228
  # (tbl_id, col_id) -> (num_files, total_size)
182
- d: Dict[Tuple[int, int], List[int]] = defaultdict(lambda: [0, 0])
229
+ d: dict[tuple[UUID, int], list[int]] = defaultdict(lambda: [0, 0])
183
230
  for entry in self.cache.values():
184
231
  t = d[(entry.tbl_id, entry.col_id)]
185
232
  t[0] += 1
186
233
  t[1] += entry.size
187
234
  col_stats = [
188
- self.ColumnStats(tbl_id, col_id, num_files, size) for (tbl_id, col_id), (num_files, size) in d.items()
235
+ self.FileCacheColumnStats(tbl_id, col_id, num_files, size) for (tbl_id, col_id), (num_files, size) in d.items()
189
236
  ]
190
237
  col_stats.sort(key=lambda e: e[3], reverse=True)
191
- return self.CacheStats(self.total_size, self.num_requests, self.num_hits, self.num_evictions, col_stats)
238
+ return self.FileCacheStats(self.total_size, self.num_requests, self.num_hits, self.num_evictions, col_stats)
192
239
 
193
240
  def debug_print(self) -> None:
194
241
  for entry in self.cache.values():
@@ -201,7 +201,7 @@ class Formatter:
201
201
  # try generating a thumbnail for different types and use that if successful
202
202
  if file_path.lower().endswith('.pdf'):
203
203
  try:
204
- import fitz
204
+ import fitz # type: ignore[import-untyped]
205
205
 
206
206
  doc = fitz.open(file_path)
207
207
  p = doc.get_page_pixmap(0)
@@ -1,11 +1,8 @@
1
1
  import http
2
2
  import http.server
3
3
  import logging
4
- import urllib
5
- import posixpath
6
4
  import pathlib
7
- import os
8
- import string
5
+ import urllib
9
6
 
10
7
  _logger = logging.getLogger('pixeltable.http.server')
11
8
 
@@ -43,7 +40,7 @@ class AbsolutePathHandler(http.server.SimpleHTTPRequestHandler):
43
40
  def log_message(self, format, *args) -> None:
44
41
  """override logging to stderr in http.server.BaseHTTPRequestHandler"""
45
42
  message = format % args
46
- _logger.info(message.translate(self._control_char_table))
43
+ _logger.info(message.translate(self._control_char_table)) # type: ignore[attr-defined]
47
44
 
48
45
 
49
46
  class LoggingHTTPServer(http.server.ThreadingHTTPServer):
@@ -3,9 +3,9 @@ import os
3
3
  import re
4
4
  import shutil
5
5
  import uuid
6
- from typing import Optional, List, Tuple, Dict
7
- from pathlib import Path
8
6
  from collections import defaultdict
7
+ from pathlib import Path
8
+ from typing import Optional
9
9
  from uuid import UUID
10
10
 
11
11
  from pixeltable.env import Env
@@ -46,8 +46,8 @@ class MediaStore:
46
46
  else:
47
47
  # Remove only the elements for the specified version.
48
48
  paths = glob.glob(str(Env.get().media_dir / tbl_id.hex) + f'/**/{tbl_id.hex}_*_{version}_*', recursive=True)
49
- for path in paths:
50
- os.remove(path)
49
+ for p in paths:
50
+ os.remove(p)
51
51
 
52
52
  @classmethod
53
53
  def count(cls, tbl_id: UUID) -> int:
@@ -58,10 +58,10 @@ class MediaStore:
58
58
  return len(paths)
59
59
 
60
60
  @classmethod
61
- def stats(cls) -> List[Tuple[int, int, int, int]]:
61
+ def stats(cls) -> list[tuple[UUID, int, int, int]]:
62
62
  paths = glob.glob(str(Env.get().media_dir) + "/**", recursive=True)
63
63
  # key: (tbl_id, col_id), value: (num_files, size)
64
- d: Dict[Tuple[UUID, int], List[int]] = defaultdict(lambda: [0, 0])
64
+ d: dict[tuple[UUID, int], list[int]] = defaultdict(lambda: [0, 0])
65
65
  for p in paths:
66
66
  if not os.path.isdir(p):
67
67
  matched = re.match(cls.pattern, Path(p).name)
@@ -2,13 +2,13 @@ import datetime
2
2
  import io
3
3
  import json
4
4
  from pathlib import Path
5
- from typing import Any, Dict, Iterator
5
+ from typing import Any, Iterator, Sequence
6
6
 
7
7
  import numpy as np
8
8
  import PIL.Image
9
- import pyarrow as pa
10
9
  import torch
11
10
  import torch.utils.data
11
+ import torchvision # type: ignore[import-untyped]
12
12
  from pyarrow import parquet
13
13
 
14
14
  from pixeltable.type_system import ColumnType
@@ -41,7 +41,7 @@ class PixeltablePytorchDataset(torch.utils.data.IterableDataset):
41
41
  with column_type_path.open() as f:
42
42
  column_types = json.load(f)
43
43
  self.column_types = {k: ColumnType.from_dict(v) for k, v in column_types.items()}
44
- self.part_metadata = parquet.ParquetDataset(path).files
44
+ self.part_metadata: list = parquet.ParquetDataset(str(path)).files
45
45
 
46
46
  def _unmarshall(self, k: str, v: Any) -> Any:
47
47
  if self.column_types[k].is_image_type():
@@ -54,7 +54,6 @@ class PixeltablePytorchDataset(torch.utils.data.IterableDataset):
54
54
  return arr
55
55
 
56
56
  assert self.image_format == "pt"
57
- import torchvision
58
57
 
59
58
  # use arr instead of im in ToTensor() to guarantee array input
60
59
  # to torch.from_numpy is writable. Using im is a suspected cause of
@@ -77,17 +76,17 @@ class PixeltablePytorchDataset(torch.utils.data.IterableDataset):
77
76
  assert not isinstance(v, np.ndarray) # all array outputs should be handled above
78
77
  return v
79
78
 
80
- def __iter__(self) -> Iterator[Dict[str, Any]]:
81
- import pixeltable.utils.arrow as arrow
79
+ def __iter__(self) -> Iterator[dict[str, Any]]:
80
+ from pixeltable.utils import arrow
81
+
82
82
  worker_info = torch.utils.data.get_worker_info()
83
83
 
84
- if worker_info is None:
85
- part_list = range(len(self.part_metadata))
86
- else:
87
- part_list = [ i for i in part_list if (i % worker_info.num_workers) == worker_info.id ]
84
+ part_list: Sequence[int] = range(len(self.part_metadata))
85
+ if worker_info is not None:
86
+ part_list = [i for i in part_list if (i % worker_info.num_workers) == worker_info.id]
88
87
 
89
88
  for part_no in part_list:
90
89
  pqf = parquet.ParquetFile(self.part_metadata[part_no])
91
90
  for batch in pqf.iter_batches():
92
91
  for tup in arrow.iter_tuples(batch):
93
- yield {k: self._unmarshall(k, v) for k, v in tup.items()}
92
+ yield {k: self._unmarshall(k, v) for k, v in tup.items()}
pixeltable/utils/sql.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import logging
2
2
 
3
3
  import sqlalchemy as sql
4
+ from sqlalchemy.dialects import postgresql
4
5
 
5
6
 
6
7
  def log_stmt(logger: logging.Logger, stmt) -> None:
7
- logger.debug(f'executing {str(stmt.compile(dialect=sql.dialects.postgresql.dialect()))}')
8
+ logger.debug(f'executing {str(stmt.compile(dialect=postgresql.dialect()))}')
8
9
 
9
10
  def log_explain(logger: logging.Logger, stmt: sql.sql.ClauseElement, conn: sql.engine.Connection) -> None:
10
11
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pixeltable
3
- Version: 0.2.19
3
+ Version: 0.2.21
4
4
  Summary: Pixeltable: The Multimodal AI Data Plane
5
5
  Author: Pixeltable, Inc.
6
6
  Author-email: contact@pixeltable.com
@@ -31,6 +31,7 @@ Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
31
31
  Requires-Dist: requests (>=2.31.0,<3.0.0)
32
32
  Requires-Dist: sqlalchemy (>=2.0.23,<3.0.0)
33
33
  Requires-Dist: tenacity (>=8.2,<9.0)
34
+ Requires-Dist: toml (>=0.10)
34
35
  Requires-Dist: tqdm (>=4.64)
35
36
  Description-Content-Type: text/markdown
36
37
 
@@ -38,18 +39,26 @@ Description-Content-Type: text/markdown
38
39
  <img src="https://raw.githubusercontent.com/pixeltable/pixeltable/main/docs/source/data/pixeltable-logo-large.png" alt="Pixeltable" width="50%" />
39
40
  <br></br>
40
41
 
41
- [![License](https://img.shields.io/badge/License-Apache%202.0-darkblue.svg)](https://opensource.org/licenses/Apache-2.0)
42
- ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pixeltable?logo=python&logoColor=white)
43
- ![Platform Support](https://img.shields.io/badge/platform-Linux%20%7C%20macOS%20%7C%20Windows-8A2BE2)
42
+ [![License](https://img.shields.io/badge/License-Apache%202.0-0530AD.svg)](https://opensource.org/licenses/Apache-2.0)
43
+ ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pixeltable?logo=python&logoColor=white&)
44
+ ![Platform Support](https://img.shields.io/badge/platform-Linux%20%7C%20macOS%20%7C%20Windows-E5DDD4)
44
45
  <br>
45
46
  [![tests status](https://github.com/pixeltable/pixeltable/actions/workflows/pytest.yml/badge.svg)](https://github.com/pixeltable/pixeltable/actions/workflows/pytest.yml)
46
47
  [![tests status](https://github.com/pixeltable/pixeltable/actions/workflows/nightly.yml/badge.svg)](https://github.com/pixeltable/pixeltable/actions/workflows/nightly.yml)
47
- [![PyPI Package](https://img.shields.io/pypi/v/pixeltable?color=darkorange)](https://pypi.org/project/pixeltable/)
48
+ [![PyPI Package](https://img.shields.io/pypi/v/pixeltable?color=4D148C)](https://pypi.org/project/pixeltable/)
49
+ <a target="_blank" href="https://huggingface.co/Pixeltable"> <img src="https://img.shields.io/badge/🤗-HF Space-F25022" alt="Visit our Hugging Face space"/></a>
48
50
 
49
- [Installation](https://pixeltable.github.io/pixeltable/getting-started/) | [Documentation](https://pixeltable.readme.io/) | [API Reference](https://pixeltable.github.io/pixeltable/) | [Code Samples](https://pixeltable.readme.io/recipes) | [Examples](https://github.com/pixeltable/pixeltable/tree/release/docs/release/tutorials)
51
+ [Installation](https://pixeltable.github.io/pixeltable/getting-started/) | [Documentation](https://pixeltable.readme.io/) | [API Reference](https://pixeltable.github.io/pixeltable/) | [Code Samples](https://github.com/pixeltable/pixeltable?tab=readme-ov-file#-code-samples) | [Computer Vision](https://docs.pixeltable.com/docs/object-detection-in-videos) | [LLM](https://docs.pixeltable.com/docs/document-indexing-and-rag)
50
52
  </div>
51
53
 
52
- Pixeltable is a Python library providing a declarative interface for multimodal data (text, images, audio, video). It features built-in versioning, lineage tracking, and incremental updates, enabling users to store, transform, index, and iterate on data for their ML workflows. Data transformations, model inference, and custom logic are embedded as computed columns.
54
+ Pixeltable is a Python library providing a declarative interface for multimodal data (text, images, audio, video). It features built-in versioning, lineage tracking, and incremental updates, enabling users to **store**, **transform**, **index**, and **iterate** on data for their ML workflows.
55
+
56
+ Data transformations, model inference, and custom logic are embedded as **computed columns**.
57
+ - **Load/Query all data types**: Interact with [video data](https://github.com/pixeltable/pixeltable?tab=readme-ov-file#import-media-data-into-pixeltable-videos-images-audio) at the [frame level](https://github.com/pixeltable/pixeltable?tab=readme-ov-file#text-and-image-similarity-search-on-video-frames-with-embedding-indexes) and documents at the [chunk level](https://github.com/pixeltable/pixeltable?tab=readme-ov-file#automate-data-operations-with-views-eg-split-documents-into-chunks)
58
+ - **Incremental updates for data transformation**: Maintain an [embedding index](https://docs.pixeltable.com/docs/embedding-vector-indexes) colocated with your data
59
+ - **Lazy evaluation and cache management**: Eliminates the need for [manual frame extraction](https://docs.pixeltable.com/docs/object-detection-in-videos)
60
+ - **Integrates with any Python libraries**: Use [built-in and custom functions (UDFs)](https://docs.pixeltable.com/docs/user-defined-functions-udfs) without complex pipelines
61
+ - **Data format agnostic and extensibility**: Access tables as Parquet files, [PyTorch datasets](https://pixeltable.github.io/pixeltable/api/data-frame/#pixeltable.DataFrame.to_pytorch_dataset), or [COCO annotations](https://pixeltable.github.io/pixeltable/api/table/#pixeltable.Table.to_coco_dataset)
53
62
 
54
63
  ## 💾 Installation
55
64