pixeltable 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pixeltable might be problematic. Click here for more details.

Files changed (94) hide show
  1. pixeltable/__init__.py +5 -3
  2. pixeltable/__version__.py +2 -2
  3. pixeltable/catalog/__init__.py +1 -0
  4. pixeltable/catalog/catalog.py +335 -128
  5. pixeltable/catalog/column.py +21 -5
  6. pixeltable/catalog/dir.py +19 -6
  7. pixeltable/catalog/insertable_table.py +34 -37
  8. pixeltable/catalog/named_function.py +0 -4
  9. pixeltable/catalog/schema_object.py +28 -42
  10. pixeltable/catalog/table.py +195 -158
  11. pixeltable/catalog/table_version.py +187 -232
  12. pixeltable/catalog/table_version_handle.py +50 -0
  13. pixeltable/catalog/table_version_path.py +49 -33
  14. pixeltable/catalog/view.py +56 -96
  15. pixeltable/config.py +103 -0
  16. pixeltable/dataframe.py +90 -90
  17. pixeltable/env.py +98 -168
  18. pixeltable/exec/aggregation_node.py +5 -4
  19. pixeltable/exec/cache_prefetch_node.py +1 -1
  20. pixeltable/exec/component_iteration_node.py +13 -9
  21. pixeltable/exec/data_row_batch.py +3 -3
  22. pixeltable/exec/exec_context.py +0 -4
  23. pixeltable/exec/exec_node.py +3 -2
  24. pixeltable/exec/expr_eval/schedulers.py +2 -1
  25. pixeltable/exec/in_memory_data_node.py +9 -4
  26. pixeltable/exec/row_update_node.py +1 -2
  27. pixeltable/exec/sql_node.py +20 -16
  28. pixeltable/exprs/column_ref.py +9 -9
  29. pixeltable/exprs/comparison.py +1 -1
  30. pixeltable/exprs/data_row.py +4 -4
  31. pixeltable/exprs/expr.py +20 -5
  32. pixeltable/exprs/function_call.py +98 -58
  33. pixeltable/exprs/json_mapper.py +25 -8
  34. pixeltable/exprs/json_path.py +6 -5
  35. pixeltable/exprs/object_ref.py +16 -5
  36. pixeltable/exprs/row_builder.py +15 -15
  37. pixeltable/exprs/rowid_ref.py +21 -7
  38. pixeltable/func/__init__.py +1 -1
  39. pixeltable/func/function.py +38 -6
  40. pixeltable/func/query_template_function.py +3 -6
  41. pixeltable/func/tools.py +26 -26
  42. pixeltable/func/udf.py +1 -1
  43. pixeltable/functions/__init__.py +2 -0
  44. pixeltable/functions/anthropic.py +9 -3
  45. pixeltable/functions/fireworks.py +7 -4
  46. pixeltable/functions/globals.py +4 -5
  47. pixeltable/functions/huggingface.py +1 -5
  48. pixeltable/functions/image.py +17 -7
  49. pixeltable/functions/llama_cpp.py +1 -1
  50. pixeltable/functions/mistralai.py +1 -1
  51. pixeltable/functions/ollama.py +4 -4
  52. pixeltable/functions/openai.py +26 -23
  53. pixeltable/functions/string.py +23 -30
  54. pixeltable/functions/timestamp.py +11 -6
  55. pixeltable/functions/together.py +14 -12
  56. pixeltable/functions/util.py +1 -1
  57. pixeltable/functions/video.py +5 -4
  58. pixeltable/functions/vision.py +6 -9
  59. pixeltable/functions/whisper.py +3 -3
  60. pixeltable/globals.py +246 -260
  61. pixeltable/index/__init__.py +2 -0
  62. pixeltable/index/base.py +1 -1
  63. pixeltable/index/btree.py +3 -1
  64. pixeltable/index/embedding_index.py +11 -5
  65. pixeltable/io/external_store.py +11 -12
  66. pixeltable/io/label_studio.py +4 -3
  67. pixeltable/io/parquet.py +57 -56
  68. pixeltable/iterators/__init__.py +4 -2
  69. pixeltable/iterators/audio.py +11 -11
  70. pixeltable/iterators/document.py +10 -10
  71. pixeltable/iterators/string.py +1 -2
  72. pixeltable/iterators/video.py +14 -15
  73. pixeltable/metadata/__init__.py +9 -5
  74. pixeltable/metadata/converters/convert_10.py +0 -1
  75. pixeltable/metadata/converters/convert_15.py +0 -2
  76. pixeltable/metadata/converters/convert_23.py +0 -2
  77. pixeltable/metadata/converters/convert_24.py +3 -3
  78. pixeltable/metadata/converters/convert_25.py +1 -1
  79. pixeltable/metadata/converters/convert_27.py +0 -2
  80. pixeltable/metadata/converters/convert_28.py +0 -2
  81. pixeltable/metadata/converters/convert_29.py +7 -8
  82. pixeltable/metadata/converters/util.py +7 -7
  83. pixeltable/metadata/schema.py +27 -19
  84. pixeltable/plan.py +68 -40
  85. pixeltable/share/packager.py +12 -9
  86. pixeltable/store.py +37 -38
  87. pixeltable/type_system.py +41 -28
  88. pixeltable/utils/filecache.py +2 -1
  89. {pixeltable-0.3.5.dist-info → pixeltable-0.3.7.dist-info}/METADATA +1 -1
  90. pixeltable-0.3.7.dist-info/RECORD +174 -0
  91. pixeltable-0.3.5.dist-info/RECORD +0 -172
  92. {pixeltable-0.3.5.dist-info → pixeltable-0.3.7.dist-info}/LICENSE +0 -0
  93. {pixeltable-0.3.5.dist-info → pixeltable-0.3.7.dist-info}/WHEEL +0 -0
  94. {pixeltable-0.3.5.dist-info → pixeltable-0.3.7.dist-info}/entry_points.txt +0 -0
@@ -2,7 +2,6 @@ from __future__ import annotations
2
2
 
3
3
  import dataclasses
4
4
  import importlib
5
- import inspect
6
5
  import logging
7
6
  import time
8
7
  import uuid
@@ -11,12 +10,10 @@ from uuid import UUID
11
10
 
12
11
  import jsonschema.exceptions
13
12
  import sqlalchemy as sql
14
- import sqlalchemy.orm as orm
15
13
 
16
14
  import pixeltable as pxt
17
15
  import pixeltable.exceptions as excs
18
16
  import pixeltable.exprs as exprs
19
- import pixeltable.func as func
20
17
  import pixeltable.index as index
21
18
  import pixeltable.type_system as ts
22
19
  from pixeltable.env import Env
@@ -32,6 +29,8 @@ from .globals import _POS_COLUMN_NAME, _ROWID_COLUMN_NAME, MediaValidation, Upda
32
29
  if TYPE_CHECKING:
33
30
  from pixeltable import exec, store
34
31
 
32
+ from .table_version_handle import TableVersionHandle
33
+
35
34
  _logger = logging.getLogger('pixeltable')
36
35
 
37
36
 
@@ -48,26 +47,27 @@ class TableVersion:
48
47
  * TODO: create a separate hierarchy of objects that records the version-independent tree of tables/views, and
49
48
  have TableVersions reference those
50
49
  - mutable TableVersions record their TableVersionPath, which is needed for expr evaluation in updates
50
+
51
+ Instances of TableVersion should not be stored as member variables (ie, used across transaction boundaries).
52
+ Use a TableVersionHandle instead.
51
53
  """
52
54
 
53
55
  id: UUID
54
56
  name: str
57
+ effective_version: Optional[int]
55
58
  version: int
56
59
  comment: str
57
60
  media_validation: MediaValidation
58
61
  num_retained_versions: int
59
62
  schema_version: int
60
63
  view_md: Optional[schema.ViewMd]
61
- is_snapshot: bool
62
- include_base_columns: bool
63
- effective_version: Optional[int]
64
- path: Optional[pxt.catalog.TableVersionPath]
65
- base: Optional[TableVersion]
64
+ path: Optional[pxt.catalog.TableVersionPath] # only set for live tables; needed to resolve computed cols
65
+ base: Optional[TableVersionHandle] # only set for views
66
66
  next_col_id: int
67
67
  next_idx_id: int
68
68
  next_rowid: int
69
69
  predicate: Optional[exprs.Expr]
70
- mutable_views: list[TableVersion]
70
+ mutable_views: list[TableVersionHandle] # target for data operation propagation (only set for live tables)
71
71
  iterator_cls: Optional[type[ComponentIterator]]
72
72
  iterator_args: Optional[exprs.InlineDict]
73
73
  num_iterator_cols: int
@@ -99,37 +99,37 @@ class TableVersion:
99
99
  self,
100
100
  id: UUID,
101
101
  tbl_md: schema.TableMd,
102
- version: int,
102
+ effective_version: Optional[int],
103
103
  schema_version_md: schema.TableSchemaVersionMd,
104
- base: Optional[TableVersion] = None,
104
+ mutable_views: list[TableVersionHandle],
105
105
  base_path: Optional[pxt.catalog.TableVersionPath] = None,
106
- is_snapshot: Optional[bool] = None,
106
+ base: Optional[TableVersionHandle] = None,
107
+ # base_store_tbl: Optional['store.StoreBase'] = None,
107
108
  ):
108
- # only one of base and base_path can be non-None
109
- assert base is None or base_path is None
110
109
  self.id = id
111
110
  self.name = tbl_md.name
112
- self.version = version
111
+ self.effective_version = effective_version
112
+ self.version = tbl_md.current_version if effective_version is None else effective_version
113
113
  self.comment = schema_version_md.comment
114
114
  self.num_retained_versions = schema_version_md.num_retained_versions
115
115
  self.schema_version = schema_version_md.schema_version
116
116
  self.view_md = tbl_md.view_md # save this as-is, it's needed for _create_md()
117
- is_view = tbl_md.view_md is not None
118
- self.is_snapshot = (is_view and tbl_md.view_md.is_snapshot) or bool(is_snapshot)
119
- self.include_base_columns = not is_view or tbl_md.view_md.include_base_columns
120
117
  self.media_validation = MediaValidation[schema_version_md.media_validation.upper()]
121
- # a mutable TableVersion doesn't have a static version
122
- self.effective_version = self.version if self.is_snapshot else None
118
+ assert not (self.is_view and base is None)
119
+ self.base = base
123
120
 
124
121
  # mutable tables need their TableVersionPath for expr eval during updates
122
+ from .table_version_handle import TableVersionHandle
125
123
  from .table_version_path import TableVersionPath
126
124
 
127
125
  if self.is_snapshot:
128
126
  self.path = None
129
127
  else:
130
- self.path = TableVersionPath(self, base=base_path) if base_path is not None else TableVersionPath(self)
128
+ self_handle = TableVersionHandle(id, self.effective_version)
129
+ if self.is_view:
130
+ assert base_path is not None
131
+ self.path = TableVersionPath(self_handle, base=base_path)
131
132
 
132
- self.base = base_path.tbl_version if base_path is not None else base
133
133
  if self.is_snapshot:
134
134
  self.next_col_id = -1
135
135
  self.next_idx_id = -1 # TODO: can snapshots have separate indices?
@@ -143,17 +143,15 @@ class TableVersion:
143
143
  # view-specific initialization
144
144
  from pixeltable import exprs
145
145
 
146
- predicate_dict = None if not is_view or tbl_md.view_md.predicate is None else tbl_md.view_md.predicate
146
+ predicate_dict = None if self.view_md is None or self.view_md.predicate is None else self.view_md.predicate
147
147
  self.predicate = exprs.Expr.from_dict(predicate_dict) if predicate_dict is not None else None
148
- self.mutable_views = [] # targets for update propagation
149
- if self.base is not None and not self.base.is_snapshot and not self.is_snapshot:
150
- self.base.mutable_views.append(self)
148
+ self.mutable_views = mutable_views
151
149
 
152
150
  # component view-specific initialization
153
151
  self.iterator_cls = None
154
152
  self.iterator_args = None
155
153
  self.num_iterator_cols = 0
156
- if is_view and tbl_md.view_md.iterator_class_fqn is not None:
154
+ if self.view_md is not None and self.view_md.iterator_class_fqn is not None:
157
155
  module_name, class_name = tbl_md.view_md.iterator_class_fqn.rsplit('.', 1)
158
156
  module = importlib.import_module(module_name)
159
157
  self.iterator_cls = getattr(module, class_name)
@@ -164,7 +162,7 @@ class TableVersion:
164
162
 
165
163
  # register this table version now so that it's available when we're re-creating value exprs
166
164
  cat = pxt.catalog.Catalog.get()
167
- cat.tbl_versions[(self.id, self.effective_version)] = self
165
+ cat.add_tbl_version(self)
168
166
 
169
167
  # init schema after we determined whether we're a component view, and before we create the store table
170
168
  self.cols = []
@@ -182,39 +180,38 @@ class TableVersion:
182
180
  def __hash__(self) -> int:
183
181
  return hash(self.id)
184
182
 
185
- def _get_column(self, tbl_id: UUID, col_id: int) -> Column:
186
- if self.id == tbl_id:
187
- return self.cols_by_id[col_id]
188
- else:
189
- if self.base is None:
190
- raise excs.Error(f'Unknown table id: {tbl_id}')
191
- return self.base._get_column(tbl_id, col_id)
192
-
193
183
  def create_snapshot_copy(self) -> TableVersion:
194
184
  """Create a snapshot copy of this TableVersion"""
195
185
  assert not self.is_snapshot
186
+ base = self.path.base.tbl_version if self.is_view else None
196
187
  return TableVersion(
197
188
  self.id,
198
189
  self._create_tbl_md(),
199
190
  self.version,
200
191
  self._create_schema_version_md(preceding_schema_version=0), # preceding_schema_version: dummy value
201
- is_snapshot=True,
202
- base=self.base,
192
+ mutable_views=[],
193
+ base=base,
203
194
  )
204
195
 
196
+ def create_handle(self) -> TableVersionHandle:
197
+ from .table_version_handle import TableVersionHandle
198
+
199
+ return TableVersionHandle(self.id, self.effective_version, tbl_version=self)
200
+
205
201
  @classmethod
206
202
  def create(
207
203
  cls,
208
- session: orm.Session,
209
204
  dir_id: UUID,
210
205
  name: str,
211
206
  cols: list[Column],
212
207
  num_retained_versions: int,
213
208
  comment: str,
214
209
  media_validation: MediaValidation,
215
- base_path: Optional[pxt.catalog.TableVersionPath] = None,
210
+ # base_path: Optional[pxt.catalog.TableVersionPath] = None,
216
211
  view_md: Optional[schema.ViewMd] = None,
217
212
  ) -> tuple[UUID, Optional[TableVersion]]:
213
+ session = Env.get().session
214
+
218
215
  # assign ids
219
216
  cols_by_name: dict[str, Column] = {}
220
217
  for pos, col in enumerate(cols):
@@ -276,7 +273,7 @@ class TableVersion:
276
273
  tbl_id=tbl_record.id, schema_version=0, md=dataclasses.asdict(schema_version_md)
277
274
  )
278
275
 
279
- # if this is purely a snapshot (it doesn't require any additional storage for columns and it # doesn't have a
276
+ # if this is purely a snapshot (it doesn't require any additional storage for columns and it doesn't have a
280
277
  # predicate to apply at runtime), we don't create a physical table and simply use the base's table version path
281
278
  if view_md is not None and view_md.is_snapshot and view_md.predicate is None and len(cols) == 0:
282
279
  session.add(tbl_record)
@@ -284,17 +281,20 @@ class TableVersion:
284
281
  session.add(schema_version_record)
285
282
  return tbl_record.id, None
286
283
 
287
- assert (base_path is not None) == (view_md is not None)
288
- base = base_path.tbl_version if base_path is not None and view_md.is_snapshot else None
289
- base_path = base_path if base_path is not None and not view_md.is_snapshot else None
290
- tbl_version = cls(tbl_record.id, table_md, 0, schema_version_md, base=base, base_path=base_path)
284
+ # assert (base_path is not None) == (view_md is not None)
285
+ is_snapshot = view_md is not None and view_md.is_snapshot
286
+ effective_version = 0 if is_snapshot else None
287
+ base_path = pxt.catalog.TableVersionPath.from_md(view_md.base_versions) if view_md is not None else None
288
+ base = base_path.tbl_version if base_path is not None else None
289
+ tbl_version = cls(
290
+ tbl_record.id, table_md, effective_version, schema_version_md, [], base_path=base_path, base=base
291
+ )
291
292
 
292
- conn = session.connection()
293
- tbl_version.store_tbl.create(conn)
293
+ tbl_version.store_tbl.create()
294
294
  if view_md is None or not view_md.is_snapshot:
295
295
  # add default indices, after creating the store table
296
296
  for col in tbl_version.cols_by_name.values():
297
- status = tbl_version._add_default_index(col, conn=conn)
297
+ status = tbl_version._add_default_index(col)
298
298
  assert status is None or status.num_excs == 0
299
299
 
300
300
  # we re-create the tbl_record here, now that we have new index metadata
@@ -305,30 +305,30 @@ class TableVersion:
305
305
  return tbl_record.id, tbl_version
306
306
 
307
307
  @classmethod
308
- def delete_md(cls, tbl_id: UUID, conn: sql.Connection) -> None:
308
+ def delete_md(cls, tbl_id: UUID) -> None:
309
+ conn = Env.get().conn
309
310
  conn.execute(sql.delete(schema.TableSchemaVersion.__table__).where(schema.TableSchemaVersion.tbl_id == tbl_id))
310
311
  conn.execute(sql.delete(schema.TableVersion.__table__).where(schema.TableVersion.tbl_id == tbl_id))
311
312
  conn.execute(sql.delete(schema.Table.__table__).where(schema.Table.id == tbl_id))
312
313
 
313
314
  def drop(self) -> None:
314
- with Env.get().engine.begin() as conn:
315
- # delete this table and all associated data
316
- MediaStore.delete(self.id)
317
- FileCache.get().clear(tbl_id=self.id)
318
- self.delete_md(self.id, conn)
319
- self.store_tbl.drop(conn)
315
+ # delete this table and all associated data
316
+ MediaStore.delete(self.id)
317
+ FileCache.get().clear(tbl_id=self.id)
318
+ self.delete_md(self.id)
319
+ self.store_tbl.drop()
320
320
 
321
321
  # de-register table version from catalog
322
322
  from .catalog import Catalog
323
323
 
324
324
  cat = Catalog.get()
325
- del cat.tbl_versions[(self.id, self.effective_version)]
326
- # TODO: remove from tbl_dependents
325
+ cat.remove_tbl_version(self)
327
326
 
328
327
  def _init_schema(self, tbl_md: schema.TableMd, schema_version_md: schema.TableSchemaVersionMd) -> None:
329
328
  # create columns first, so the indices can reference them
330
329
  self._init_cols(tbl_md, schema_version_md)
331
- self._init_idxs(tbl_md)
330
+ if not self.is_snapshot:
331
+ self._init_idxs(tbl_md)
332
332
  # create the sa schema only after creating the columns and indices
333
333
  self._init_sa_schema()
334
334
 
@@ -356,7 +356,7 @@ class TableVersion:
356
356
  schema_version_drop=col_md.schema_version_drop,
357
357
  value_expr_dict=col_md.value_expr,
358
358
  )
359
- col.tbl = self
359
+ col.tbl = self.create_handle()
360
360
  self.cols.append(col)
361
361
 
362
362
  # populate the lookup structures before Expr.from_dict()
@@ -373,7 +373,6 @@ class TableVersion:
373
373
  # make sure to traverse columns ordered by position = order in which cols were created;
374
374
  # this guarantees that references always point backwards
375
375
  if col_md.value_expr is not None:
376
- refd_cols = exprs.Expr.get_refd_columns(col_md.value_expr)
377
376
  self._record_refd_columns(col)
378
377
 
379
378
  def _init_idxs(self, tbl_md: schema.TableMd) -> None:
@@ -393,7 +392,7 @@ class TableVersion:
393
392
  # instantiate index object
394
393
  cls_name = md.class_fqn.rsplit('.', 1)[-1]
395
394
  cls = getattr(index_module, cls_name)
396
- idx_col = self._get_column(UUID(md.indexed_col_tbl_id), md.indexed_col_id)
395
+ idx_col = self.path.get_column_by_id(UUID(md.indexed_col_tbl_id), md.indexed_col_id)
397
396
  idx = cls.from_dict(idx_col, md.init_args)
398
397
 
399
398
  # fix up the sa column type of the index value and undo columns
@@ -411,19 +410,15 @@ class TableVersion:
411
410
  # need to record errors
412
411
  from pixeltable.store import StoreComponentView, StoreTable, StoreView
413
412
 
414
- if self.is_component_view():
413
+ if self.is_component_view:
415
414
  self.store_tbl = StoreComponentView(self)
416
- elif self.is_view():
415
+ elif self.is_view:
417
416
  self.store_tbl = StoreView(self)
418
417
  else:
419
418
  self.store_tbl = StoreTable(self)
420
419
 
421
420
  def _update_md(
422
- self,
423
- timestamp: float,
424
- conn: sql.engine.Connection,
425
- update_tbl_version: bool = True,
426
- preceding_schema_version: Optional[int] = None,
421
+ self, timestamp: float, update_tbl_version: bool = True, preceding_schema_version: Optional[int] = None
427
422
  ) -> None:
428
423
  """Writes table metadata to the database.
429
424
 
@@ -436,6 +431,7 @@ class TableVersion:
436
431
  """
437
432
  assert update_tbl_version or preceding_schema_version is None
438
433
 
434
+ conn = Env.get().conn
439
435
  conn.execute(
440
436
  sql.update(schema.Table.__table__)
441
437
  .values({schema.Table.md: dataclasses.asdict(self._create_tbl_md())})
@@ -467,13 +463,12 @@ class TableVersion:
467
463
  self.version += 1
468
464
  preceding_schema_version = self.schema_version
469
465
  self.schema_version = self.version
470
- with Env.get().engine.begin() as conn:
471
- status = self._add_index(col, idx_name, idx, conn)
472
- self._update_md(time.time(), conn, preceding_schema_version=preceding_schema_version)
473
- _logger.info(f'Added index {idx_name} on column {col.name} to table {self.name}')
474
- return status
466
+ status = self._add_index(col, idx_name, idx)
467
+ self._update_md(time.time(), preceding_schema_version=preceding_schema_version)
468
+ _logger.info(f'Added index {idx_name} on column {col.name} to table {self.name}')
469
+ return status
475
470
 
476
- def _add_default_index(self, col: Column, conn: sql.engine.Connection) -> Optional[UpdateStatus]:
471
+ def _add_default_index(self, col: Column) -> Optional[UpdateStatus]:
477
472
  """Add a B-tree index on this column if it has a compatible type"""
478
473
  if not col.stored:
479
474
  # if the column is intentionally not stored, we want to avoid the overhead of an index
@@ -487,12 +482,10 @@ class TableVersion:
487
482
  if col.col_type.is_bool_type():
488
483
  # B-trees on bools aren't useful
489
484
  return None
490
- status = self._add_index(col, idx_name=None, idx=index.BtreeIndex(col), conn=conn)
485
+ status = self._add_index(col, idx_name=None, idx=index.BtreeIndex(col))
491
486
  return status
492
487
 
493
- def _add_index(
494
- self, col: Column, idx_name: Optional[str], idx: index.IndexBase, conn: sql.engine.Connection
495
- ) -> UpdateStatus:
488
+ def _add_index(self, col: Column, idx_name: Optional[str], idx: index.IndexBase) -> UpdateStatus:
496
489
  assert not self.is_snapshot
497
490
  idx_id = self.next_idx_id
498
491
  self.next_idx_id += 1
@@ -513,7 +506,7 @@ class TableVersion:
513
506
  schema_version_drop=None,
514
507
  records_errors=idx.records_value_errors(),
515
508
  )
516
- val_col.tbl = self
509
+ val_col.tbl = self.create_handle()
517
510
  val_col.col_type = val_col.col_type.copy(nullable=True)
518
511
  self.next_col_id += 1
519
512
 
@@ -527,7 +520,7 @@ class TableVersion:
527
520
  schema_version_drop=None,
528
521
  records_errors=False,
529
522
  )
530
- undo_col.tbl = self
523
+ undo_col.tbl = self.create_handle()
531
524
  undo_col.col_type = undo_col.col_type.copy(nullable=True)
532
525
  self.next_col_id += 1
533
526
 
@@ -552,9 +545,9 @@ class TableVersion:
552
545
  # add the columns and update the metadata
553
546
  # TODO support on_error='abort' for indices; it's tricky because of the way metadata changes are entangled
554
547
  # with the database operations
555
- status = self._add_columns([val_col, undo_col], conn, print_stats=False, on_error='ignore')
548
+ status = self._add_columns([val_col, undo_col], print_stats=False, on_error='ignore')
556
549
  # now create the index structure
557
- idx.create_index(self._store_idx_name(idx_id), val_col, conn)
550
+ idx.create_index(self._store_idx_name(idx_id), val_col)
558
551
 
559
552
  return status
560
553
 
@@ -575,10 +568,9 @@ class TableVersion:
575
568
  del self.idxs_by_name[idx_md.name]
576
569
  del self.idx_md[idx_id]
577
570
 
578
- with Env.get().engine.begin() as conn:
579
- self._drop_columns([idx_info.val_col, idx_info.undo_col])
580
- self._update_md(time.time(), conn, preceding_schema_version=preceding_schema_version)
581
- _logger.info(f'Dropped index {idx_md.name} on table {self.name}')
571
+ self._drop_columns([idx_info.val_col, idx_info.undo_col])
572
+ self._update_md(time.time(), preceding_schema_version=preceding_schema_version)
573
+ _logger.info(f'Dropped index {idx_md.name} on table {self.name}')
582
574
 
583
575
  def add_columns(
584
576
  self, cols: Iterable[Column], print_stats: bool, on_error: Literal['abort', 'ignore']
@@ -589,7 +581,7 @@ class TableVersion:
589
581
  assert all(col.stored is not None for col in cols)
590
582
  assert all(col.name not in self.cols_by_name for col in cols)
591
583
  for col in cols:
592
- col.tbl = self
584
+ col.tbl = self.create_handle()
593
585
  col.id = self.next_col_id
594
586
  self.next_col_id += 1
595
587
 
@@ -597,11 +589,10 @@ class TableVersion:
597
589
  self.version += 1
598
590
  preceding_schema_version = self.schema_version
599
591
  self.schema_version = self.version
600
- with Env.get().engine.begin() as conn:
601
- status = self._add_columns(cols, conn, print_stats=print_stats, on_error=on_error)
602
- for col in cols:
603
- _ = self._add_default_index(col, conn)
604
- self._update_md(time.time(), conn, preceding_schema_version=preceding_schema_version)
592
+ status = self._add_columns(cols, print_stats=print_stats, on_error=on_error)
593
+ for col in cols:
594
+ _ = self._add_default_index(col)
595
+ self._update_md(time.time(), preceding_schema_version=preceding_schema_version)
605
596
  _logger.info(f'Added columns {[col.name for col in cols]} to table {self.name}, new version: {self.version}')
606
597
 
607
598
  msg = (
@@ -613,15 +604,11 @@ class TableVersion:
613
604
  return status
614
605
 
615
606
  def _add_columns(
616
- self,
617
- cols: Iterable[Column],
618
- conn: sql.engine.Connection,
619
- print_stats: bool,
620
- on_error: Literal['abort', 'ignore'],
607
+ self, cols: Iterable[Column], print_stats: bool, on_error: Literal['abort', 'ignore']
621
608
  ) -> UpdateStatus:
622
609
  """Add and populate columns within the current transaction"""
623
610
  cols = list(cols)
624
- row_count = self.store_tbl.count(conn=conn)
611
+ row_count = self.store_tbl.count()
625
612
  for col in cols:
626
613
  if not col.col_type.nullable and not col.is_computed:
627
614
  if row_count > 0:
@@ -644,7 +631,7 @@ class TableVersion:
644
631
  self._record_refd_columns(col)
645
632
 
646
633
  if col.is_stored:
647
- self.store_tbl.add_column(col, conn)
634
+ self.store_tbl.add_column(col)
648
635
 
649
636
  if not col.is_computed or not col.is_stored or row_count == 0:
650
637
  continue
@@ -656,10 +643,9 @@ class TableVersion:
656
643
  plan.ctx.num_rows = row_count
657
644
 
658
645
  try:
659
- plan.ctx.set_conn(conn)
660
646
  plan.open()
661
647
  try:
662
- num_excs = self.store_tbl.load_column(col, plan, value_expr_slot_idx, conn, on_error)
648
+ num_excs = self.store_tbl.load_column(col, plan, value_expr_slot_idx, on_error)
663
649
  except sql.exc.DBAPIError as exc:
664
650
  # Wrap the DBAPIError in an excs.Error to unify processing in the subsequent except block
665
651
  raise excs.Error(f'SQL error during execution of computed column `{col.name}`:\n{exc}') from exc
@@ -687,12 +673,11 @@ class TableVersion:
687
673
  num_rows=row_count,
688
674
  num_computed_values=row_count,
689
675
  num_excs=num_excs,
690
- cols_with_excs=[f'{col.tbl.name}.{col.name}' for col in cols_with_excs if col.name is not None],
676
+ cols_with_excs=[f'{col.tbl.get().name}.{col.name}' for col in cols_with_excs if col.name is not None],
691
677
  )
692
678
 
693
679
  def drop_column(self, col: Column) -> None:
694
680
  """Drop a column from the table."""
695
- from pixeltable.catalog import Catalog
696
681
 
697
682
  assert not self.is_snapshot
698
683
 
@@ -701,23 +686,22 @@ class TableVersion:
701
686
  preceding_schema_version = self.schema_version
702
687
  self.schema_version = self.version
703
688
 
704
- with Env.get().engine.begin() as conn:
705
- # drop this column and all dependent index columns and indices
706
- dropped_cols = [col]
707
- dropped_idx_names: list[str] = []
708
- for idx_info in self.idxs_by_name.values():
709
- if idx_info.col != col:
710
- continue
711
- dropped_cols.extend([idx_info.val_col, idx_info.undo_col])
712
- idx_md = self.idx_md[idx_info.id]
713
- idx_md.schema_version_drop = self.schema_version
714
- assert idx_md.name in self.idxs_by_name
715
- dropped_idx_names.append(idx_md.name)
716
- # update idxs_by_name
717
- for idx_name in dropped_idx_names:
718
- del self.idxs_by_name[idx_name]
719
- self._drop_columns(dropped_cols)
720
- self._update_md(time.time(), conn, preceding_schema_version=preceding_schema_version)
689
+ # drop this column and all dependent index columns and indices
690
+ dropped_cols = [col]
691
+ dropped_idx_names: list[str] = []
692
+ for idx_info in self.idxs_by_name.values():
693
+ if idx_info.col != col:
694
+ continue
695
+ dropped_cols.extend([idx_info.val_col, idx_info.undo_col])
696
+ idx_md = self.idx_md[idx_info.id]
697
+ idx_md.schema_version_drop = self.schema_version
698
+ assert idx_md.name in self.idxs_by_name
699
+ dropped_idx_names.append(idx_md.name)
700
+ # update idxs_by_name
701
+ for idx_name in dropped_idx_names:
702
+ del self.idxs_by_name[idx_name]
703
+ self._drop_columns(dropped_cols)
704
+ self._update_md(time.time(), preceding_schema_version=preceding_schema_version)
721
705
  _logger.info(f'Dropped column {col.name} from table {self.name}, new version: {self.version}')
722
706
 
723
707
  def _drop_columns(self, cols: Iterable[Column]) -> None:
@@ -760,8 +744,7 @@ class TableVersion:
760
744
  preceding_schema_version = self.schema_version
761
745
  self.schema_version = self.version
762
746
 
763
- with Env.get().engine.begin() as conn:
764
- self._update_md(time.time(), conn, preceding_schema_version=preceding_schema_version)
747
+ self._update_md(time.time(), preceding_schema_version=preceding_schema_version)
765
748
  _logger.info(f'Renamed column {old_name} to {new_name} in table {self.name}, new version: {self.version}')
766
749
 
767
750
  def set_comment(self, new_comment: Optional[str]):
@@ -781,15 +764,13 @@ class TableVersion:
781
764
  self.version += 1
782
765
  preceding_schema_version = self.schema_version
783
766
  self.schema_version = self.version
784
- with Env.get().engine.begin() as conn:
785
- self._update_md(time.time(), conn, preceding_schema_version=preceding_schema_version)
767
+ self._update_md(time.time(), preceding_schema_version=preceding_schema_version)
786
768
  _logger.info(f'[{self.name}] Updating table schema to version: {self.version}')
787
769
 
788
770
  def insert(
789
771
  self,
790
772
  rows: Optional[list[dict[str, Any]]],
791
773
  df: Optional[pxt.DataFrame],
792
- conn: Optional[sql.engine.Connection] = None,
793
774
  print_stats: bool = False,
794
775
  fail_on_exception: bool = True,
795
776
  ) -> UpdateStatus:
@@ -812,20 +793,11 @@ class TableVersion:
812
793
  self.next_rowid += 1
813
794
  yield rowid
814
795
 
815
- if conn is None:
816
- with Env.get().engine.begin() as conn:
817
- return self._insert(
818
- plan, conn, time.time(), print_stats=print_stats, rowids=rowids(), abort_on_exc=fail_on_exception
819
- )
820
- else:
821
- return self._insert(
822
- plan, conn, time.time(), print_stats=print_stats, rowids=rowids(), abort_on_exc=fail_on_exception
823
- )
796
+ return self._insert(plan, time.time(), print_stats=print_stats, rowids=rowids(), abort_on_exc=fail_on_exception)
824
797
 
825
798
  def _insert(
826
799
  self,
827
800
  exec_plan: 'exec.ExecNode',
828
- conn: sql.engine.Connection,
829
801
  timestamp: float,
830
802
  *,
831
803
  rowids: Optional[Iterator[int]] = None,
@@ -837,20 +809,20 @@ class TableVersion:
837
809
  self.version += 1
838
810
  result = UpdateStatus()
839
811
  num_rows, num_excs, cols_with_excs = self.store_tbl.insert_rows(
840
- exec_plan, conn, v_min=self.version, rowids=rowids, abort_on_exc=abort_on_exc
812
+ exec_plan, v_min=self.version, rowids=rowids, abort_on_exc=abort_on_exc
841
813
  )
842
814
  result.num_rows = num_rows
843
815
  result.num_excs = num_excs
844
816
  result.num_computed_values += exec_plan.ctx.num_computed_exprs * num_rows
845
817
  result.cols_with_excs = [f'{self.name}.{self.cols_by_id[cid].name}' for cid in cols_with_excs]
846
- self._update_md(timestamp, conn)
818
+ self._update_md(timestamp)
847
819
 
848
820
  # update views
849
821
  for view in self.mutable_views:
850
822
  from pixeltable.plan import Planner
851
823
 
852
- plan, _ = Planner.create_view_load_plan(view.path, propagates_insert=True)
853
- status = view._insert(plan, conn, timestamp, print_stats=print_stats)
824
+ plan, _ = Planner.create_view_load_plan(view.get().path, propagates_insert=True)
825
+ status = view.get()._insert(plan, timestamp, print_stats=print_stats)
854
826
  result.num_rows += status.num_rows
855
827
  result.num_excs += status.num_excs
856
828
  result.num_computed_values += status.num_computed_values
@@ -886,22 +858,20 @@ class TableVersion:
886
858
  if analysis_info.filter is not None:
887
859
  raise excs.Error(f'Filter {analysis_info.filter} not expressible in SQL')
888
860
 
889
- with Env.get().engine.begin() as conn:
890
- plan, updated_cols, recomputed_cols = Planner.create_update_plan(self.path, update_spec, [], where, cascade)
891
- from pixeltable.exprs import SqlElementCache
892
-
893
- result = self.propagate_update(
894
- plan,
895
- where.sql_expr(SqlElementCache()) if where is not None else None,
896
- recomputed_cols,
897
- base_versions=[],
898
- conn=conn,
899
- timestamp=time.time(),
900
- cascade=cascade,
901
- show_progress=True,
902
- )
903
- result.updated_cols = updated_cols
904
- return result
861
+ plan, updated_cols, recomputed_cols = Planner.create_update_plan(self.path, update_spec, [], where, cascade)
862
+ from pixeltable.exprs import SqlElementCache
863
+
864
+ result = self.propagate_update(
865
+ plan,
866
+ where.sql_expr(SqlElementCache()) if where is not None else None,
867
+ recomputed_cols,
868
+ base_versions=[],
869
+ timestamp=time.time(),
870
+ cascade=cascade,
871
+ show_progress=True,
872
+ )
873
+ result.updated_cols = updated_cols
874
+ return result
905
875
 
906
876
  def batch_update(
907
877
  self,
@@ -920,33 +890,24 @@ class TableVersion:
920
890
  assert len(rowids) == 0 or len(rowids) == len(batch)
921
891
  cols_with_excs: set[str] = set()
922
892
 
923
- with Env.get().engine.begin() as conn:
924
- from pixeltable.plan import Planner
893
+ from pixeltable.plan import Planner
925
894
 
926
- plan, row_update_node, delete_where_clause, updated_cols, recomputed_cols = (
927
- Planner.create_batch_update_plan(self.path, batch, rowids, cascade=cascade)
928
- )
929
- result = self.propagate_update(
930
- plan,
931
- delete_where_clause,
932
- recomputed_cols,
933
- base_versions=[],
934
- conn=conn,
935
- timestamp=time.time(),
936
- cascade=cascade,
937
- )
938
- result.updated_cols = [c.qualified_name for c in updated_cols]
939
-
940
- unmatched_rows = row_update_node.unmatched_rows()
941
- if len(unmatched_rows) > 0:
942
- if error_if_not_exists:
943
- raise excs.Error(f'batch_update(): {len(unmatched_rows)} row(s) not found')
944
- if insert_if_not_exists:
945
- insert_status = self.insert(
946
- unmatched_rows, None, conn=conn, print_stats=False, fail_on_exception=False
947
- )
948
- result += insert_status
949
- return result
895
+ plan, row_update_node, delete_where_clause, updated_cols, recomputed_cols = Planner.create_batch_update_plan(
896
+ self.path, batch, rowids, cascade=cascade
897
+ )
898
+ result = self.propagate_update(
899
+ plan, delete_where_clause, recomputed_cols, base_versions=[], timestamp=time.time(), cascade=cascade
900
+ )
901
+ result.updated_cols = [c.qualified_name for c in updated_cols]
902
+
903
+ unmatched_rows = row_update_node.unmatched_rows()
904
+ if len(unmatched_rows) > 0:
905
+ if error_if_not_exists:
906
+ raise excs.Error(f'batch_update(): {len(unmatched_rows)} row(s) not found')
907
+ if insert_if_not_exists:
908
+ insert_status = self.insert(unmatched_rows, None, print_stats=False, fail_on_exception=False)
909
+ result += insert_status
910
+ return result
950
911
 
951
912
  def _validate_update_spec(
952
913
  self, value_spec: dict[str, Any], allow_pk: bool, allow_exprs: bool
@@ -1000,7 +961,6 @@ class TableVersion:
1000
961
  where_clause: Optional[sql.ColumnElement],
1001
962
  recomputed_view_cols: list[Column],
1002
963
  base_versions: list[Optional[int]],
1003
- conn: sql.engine.Connection,
1004
964
  timestamp: float,
1005
965
  cascade: bool,
1006
966
  show_progress: bool = True,
@@ -1010,32 +970,26 @@ class TableVersion:
1010
970
  # we're creating a new version
1011
971
  self.version += 1
1012
972
  result.num_rows, result.num_excs, cols_with_excs = self.store_tbl.insert_rows(
1013
- plan, conn, v_min=self.version, show_progress=show_progress
973
+ plan, v_min=self.version, show_progress=show_progress
1014
974
  )
1015
975
  result.cols_with_excs = [f'{self.name}.{self.cols_by_id[cid].name}' for cid in cols_with_excs]
1016
976
  self.store_tbl.delete_rows(
1017
- self.version, base_versions=base_versions, match_on_vmin=True, where_clause=where_clause, conn=conn
977
+ self.version, base_versions=base_versions, match_on_vmin=True, where_clause=where_clause
1018
978
  )
1019
- self._update_md(timestamp, conn)
979
+ self._update_md(timestamp)
1020
980
 
1021
981
  if cascade:
1022
982
  base_versions = [None if plan is None else self.version] + base_versions # don't update in place
1023
983
  # propagate to views
1024
984
  for view in self.mutable_views:
1025
- recomputed_cols = [col for col in recomputed_view_cols if col.tbl is view]
985
+ recomputed_cols = [col for col in recomputed_view_cols if col.tbl == view]
1026
986
  plan = None
1027
987
  if len(recomputed_cols) > 0:
1028
988
  from pixeltable.plan import Planner
1029
989
 
1030
- plan = Planner.create_view_update_plan(view.path, recompute_targets=recomputed_cols)
1031
- status = view.propagate_update(
1032
- plan,
1033
- None,
1034
- recomputed_view_cols,
1035
- base_versions=base_versions,
1036
- conn=conn,
1037
- timestamp=timestamp,
1038
- cascade=True,
990
+ plan = Planner.create_view_update_plan(view.get().path, recompute_targets=recomputed_cols)
991
+ status = view.get().propagate_update(
992
+ plan, None, recomputed_view_cols, base_versions=base_versions, timestamp=timestamp, cascade=True
1039
993
  )
1040
994
  result.num_rows += status.num_rows
1041
995
  result.num_excs += status.num_excs
@@ -1063,18 +1017,13 @@ class TableVersion:
1063
1017
  raise excs.Error(f'Filter {analysis_info.filter} not expressible in SQL')
1064
1018
  sql_where_clause = analysis_info.sql_where_clause
1065
1019
 
1066
- with Env.get().engine.begin() as conn:
1067
- num_rows = self.propagate_delete(sql_where_clause, base_versions=[], conn=conn, timestamp=time.time())
1020
+ num_rows = self.propagate_delete(sql_where_clause, base_versions=[], timestamp=time.time())
1068
1021
 
1069
1022
  status = UpdateStatus(num_rows=num_rows)
1070
1023
  return status
1071
1024
 
1072
1025
  def propagate_delete(
1073
- self,
1074
- where: Optional[exprs.Expr],
1075
- base_versions: list[Optional[int]],
1076
- conn: sql.engine.Connection,
1077
- timestamp: float,
1026
+ self, where: Optional[exprs.Expr], base_versions: list[Optional[int]], timestamp: float
1078
1027
  ) -> int:
1079
1028
  """Delete rows in this table and propagate to views.
1080
1029
  Args:
@@ -1084,17 +1033,17 @@ class TableVersion:
1084
1033
  """
1085
1034
  sql_where_clause = where.sql_expr(exprs.SqlElementCache()) if where is not None else None
1086
1035
  num_rows = self.store_tbl.delete_rows(
1087
- self.version + 1, base_versions=base_versions, match_on_vmin=False, where_clause=sql_where_clause, conn=conn
1036
+ self.version + 1, base_versions=base_versions, match_on_vmin=False, where_clause=sql_where_clause
1088
1037
  )
1089
1038
  if num_rows > 0:
1090
1039
  # we're creating a new version
1091
1040
  self.version += 1
1092
- self._update_md(timestamp, conn)
1041
+ self._update_md(timestamp)
1093
1042
  else:
1094
1043
  pass
1095
1044
  for view in self.mutable_views:
1096
- num_rows += view.propagate_delete(
1097
- where=None, base_versions=[self.version] + base_versions, conn=conn, timestamp=timestamp
1045
+ num_rows += view.get().propagate_delete(
1046
+ where=None, base_versions=[self.version] + base_versions, timestamp=timestamp
1098
1047
  )
1099
1048
  return num_rows
1100
1049
 
@@ -1103,22 +1052,20 @@ class TableVersion:
1103
1052
  assert not self.is_snapshot
1104
1053
  if self.version == 0:
1105
1054
  raise excs.Error('Cannot revert version 0')
1106
- with orm.Session(Env.get().engine, future=True) as session:
1107
- self._revert(session)
1108
- session.commit()
1055
+ self._revert()
1109
1056
 
1110
- def _delete_column(self, col: Column, conn: sql.engine.Connection) -> None:
1057
+ def _delete_column(self, col: Column) -> None:
1111
1058
  """Physically remove the column from the schema and the store table"""
1112
1059
  if col.is_stored:
1113
- self.store_tbl.drop_column(col, conn)
1060
+ self.store_tbl.drop_column(col)
1114
1061
  self.cols.remove(col)
1115
1062
  if col.name is not None:
1116
1063
  del self.cols_by_name[col.name]
1117
1064
  del self.cols_by_id[col.id]
1118
1065
 
1119
- def _revert(self, session: orm.Session) -> None:
1066
+ def _revert(self) -> None:
1120
1067
  """Reverts this table version and propagates to views"""
1121
- conn = session.connection()
1068
+ conn = Env.get().conn
1122
1069
  # make sure we don't have a snapshot referencing this version
1123
1070
  # (unclear how to express this with sqlalchemy)
1124
1071
  query = (
@@ -1137,7 +1084,6 @@ class TableVersion:
1137
1084
  )
1138
1085
  )
1139
1086
 
1140
- conn = session.connection()
1141
1087
  # delete newly-added data
1142
1088
  MediaStore.delete(self.id, version=self.version)
1143
1089
  conn.execute(sql.delete(self.store_tbl.sa_tbl).where(self.store_tbl.sa_tbl.c.v_min == self.version))
@@ -1158,7 +1104,7 @@ class TableVersion:
1158
1104
  if len(added_cols) > 0:
1159
1105
  next_col_id = min(col.id for col in added_cols)
1160
1106
  for col in added_cols:
1161
- self._delete_column(col, conn)
1107
+ self._delete_column(col)
1162
1108
  self.next_col_id = next_col_id
1163
1109
 
1164
1110
  # remove newly-added indices from the lookup structures
@@ -1181,6 +1127,7 @@ class TableVersion:
1181
1127
  for md in dropped_idx_md:
1182
1128
  md.schema_version_drop = None
1183
1129
 
1130
+ session = Env.get().session
1184
1131
  # we need to determine the preceding schema version and reload the schema
1185
1132
  schema_version_md_dict = (
1186
1133
  session.query(schema.TableSchemaVersion.md)
@@ -1224,7 +1171,7 @@ class TableVersion:
1224
1171
 
1225
1172
  # propagate to views
1226
1173
  for view in self.mutable_views:
1227
- view._revert(session)
1174
+ view.get()._revert()
1228
1175
  _logger.info(f'TableVersion {self.name}: reverted to version {self.version}')
1229
1176
 
1230
1177
  def _init_external_stores(self, tbl_md: schema.TableMd) -> None:
@@ -1235,40 +1182,48 @@ class TableVersion:
1235
1182
  self.external_stores[store.name] = store
1236
1183
 
1237
1184
  def link_external_store(self, store: pxt.io.ExternalStore) -> None:
1238
- with Env.get().engine.begin() as conn:
1239
- store.link(self, conn) # May result in additional metadata changes
1240
- self.external_stores[store.name] = store
1241
- self._update_md(time.time(), conn, update_tbl_version=False)
1185
+ store.link(self) # May result in additional metadata changes
1186
+ self.external_stores[store.name] = store
1187
+ self._update_md(time.time(), update_tbl_version=False)
1242
1188
 
1243
1189
  def unlink_external_store(self, store_name: str, delete_external_data: bool) -> None:
1244
1190
  assert store_name in self.external_stores
1245
1191
  store = self.external_stores[store_name]
1246
- with Env.get().engine.begin() as conn:
1247
- store.unlink(self, conn) # May result in additional metadata changes
1248
- del self.external_stores[store_name]
1249
- self._update_md(time.time(), conn, update_tbl_version=False)
1192
+ store.unlink(self) # May result in additional metadata changes
1193
+ del self.external_stores[store_name]
1194
+ self._update_md(time.time(), update_tbl_version=False)
1250
1195
 
1251
1196
  if delete_external_data and isinstance(store, pxt.io.external_store.Project):
1252
1197
  store.delete()
1253
1198
 
1199
+ @property
1200
+ def is_snapshot(self) -> bool:
1201
+ return self.effective_version is not None
1202
+
1203
+ @property
1254
1204
  def is_view(self) -> bool:
1255
- return self.base is not None
1205
+ return self.view_md is not None
1206
+
1207
+ @property
1208
+ def include_base_columns(self) -> bool:
1209
+ return self.view_md is not None and self.view_md.include_base_columns
1256
1210
 
1211
+ @property
1257
1212
  def is_component_view(self) -> bool:
1258
1213
  return self.iterator_cls is not None
1259
1214
 
1260
1215
  def is_insertable(self) -> bool:
1261
1216
  """Returns True if this corresponds to an InsertableTable"""
1262
- return not self.is_snapshot and not self.is_view()
1217
+ return not self.is_snapshot and not self.is_view
1263
1218
 
1264
1219
  def is_iterator_column(self, col: Column) -> bool:
1265
1220
  """Returns True if col is produced by an iterator"""
1266
1221
  # the iterator columns directly follow the pos column
1267
- return self.is_component_view() and col.id > 0 and col.id < self.num_iterator_cols + 1
1222
+ return self.is_component_view and col.id > 0 and col.id < self.num_iterator_cols + 1
1268
1223
 
1269
1224
  def is_system_column(self, col: Column) -> bool:
1270
1225
  """Return True if column was created by Pixeltable"""
1271
- if col.name == _POS_COLUMN_NAME and self.is_component_view():
1226
+ if col.name == _POS_COLUMN_NAME and self.is_component_view:
1272
1227
  return True
1273
1228
  return False
1274
1229
 
@@ -1282,7 +1237,7 @@ class TableVersion:
1282
1237
 
1283
1238
  def get_required_col_names(self) -> list[str]:
1284
1239
  """Return the names of all columns for which values must be specified in insert()"""
1285
- assert not self.is_view()
1240
+ assert not self.is_view
1286
1241
  names = [c.name for c in self.cols_by_name.values() if not c.is_computed and not c.col_type.nullable]
1287
1242
  return names
1288
1243
 
@@ -1318,8 +1273,8 @@ class TableVersion:
1318
1273
 
1319
1274
  def num_rowid_columns(self) -> int:
1320
1275
  """Return the number of columns of the rowids, without accessing store_tbl"""
1321
- if self.is_component_view():
1322
- return 1 + self.base.num_rowid_columns()
1276
+ if self.is_component_view:
1277
+ return 1 + self.base.get().num_rowid_columns()
1323
1278
  return 1
1324
1279
 
1325
1280
  @classmethod
@@ -1393,4 +1348,4 @@ class TableVersion:
1393
1348
 
1394
1349
  id = UUID(d['id'])
1395
1350
  effective_version = d['effective_version']
1396
- return catalog.Catalog.get().tbl_versions[(id, effective_version)]
1351
+ return catalog.Catalog.get().get_tbl_version(id, effective_version)