pixeltable 0.4.18__py3-none-any.whl → 0.4.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pixeltable might be problematic. Click here for more details.

Files changed (152) hide show
  1. pixeltable/__init__.py +1 -1
  2. pixeltable/_version.py +1 -0
  3. pixeltable/catalog/catalog.py +119 -100
  4. pixeltable/catalog/column.py +104 -115
  5. pixeltable/catalog/globals.py +1 -2
  6. pixeltable/catalog/insertable_table.py +44 -49
  7. pixeltable/catalog/path.py +3 -4
  8. pixeltable/catalog/schema_object.py +4 -4
  9. pixeltable/catalog/table.py +118 -122
  10. pixeltable/catalog/table_metadata.py +6 -6
  11. pixeltable/catalog/table_version.py +322 -257
  12. pixeltable/catalog/table_version_handle.py +4 -4
  13. pixeltable/catalog/table_version_path.py +9 -10
  14. pixeltable/catalog/tbl_ops.py +9 -3
  15. pixeltable/catalog/view.py +34 -28
  16. pixeltable/config.py +14 -10
  17. pixeltable/dataframe.py +68 -77
  18. pixeltable/env.py +74 -64
  19. pixeltable/exec/aggregation_node.py +6 -6
  20. pixeltable/exec/cache_prefetch_node.py +10 -10
  21. pixeltable/exec/data_row_batch.py +3 -3
  22. pixeltable/exec/exec_context.py +4 -5
  23. pixeltable/exec/exec_node.py +5 -5
  24. pixeltable/exec/expr_eval/evaluators.py +6 -6
  25. pixeltable/exec/expr_eval/expr_eval_node.py +8 -7
  26. pixeltable/exec/expr_eval/globals.py +6 -6
  27. pixeltable/exec/expr_eval/row_buffer.py +1 -2
  28. pixeltable/exec/expr_eval/schedulers.py +11 -11
  29. pixeltable/exec/in_memory_data_node.py +2 -2
  30. pixeltable/exec/object_store_save_node.py +14 -17
  31. pixeltable/exec/sql_node.py +25 -25
  32. pixeltable/exprs/arithmetic_expr.py +4 -4
  33. pixeltable/exprs/array_slice.py +2 -2
  34. pixeltable/exprs/column_property_ref.py +3 -3
  35. pixeltable/exprs/column_ref.py +61 -74
  36. pixeltable/exprs/comparison.py +5 -5
  37. pixeltable/exprs/compound_predicate.py +3 -3
  38. pixeltable/exprs/data_row.py +12 -12
  39. pixeltable/exprs/expr.py +41 -31
  40. pixeltable/exprs/expr_dict.py +3 -3
  41. pixeltable/exprs/expr_set.py +3 -3
  42. pixeltable/exprs/function_call.py +14 -14
  43. pixeltable/exprs/in_predicate.py +4 -4
  44. pixeltable/exprs/inline_expr.py +8 -8
  45. pixeltable/exprs/is_null.py +1 -3
  46. pixeltable/exprs/json_mapper.py +8 -8
  47. pixeltable/exprs/json_path.py +6 -6
  48. pixeltable/exprs/literal.py +5 -5
  49. pixeltable/exprs/method_ref.py +2 -2
  50. pixeltable/exprs/object_ref.py +2 -2
  51. pixeltable/exprs/row_builder.py +14 -14
  52. pixeltable/exprs/rowid_ref.py +8 -8
  53. pixeltable/exprs/similarity_expr.py +50 -25
  54. pixeltable/exprs/sql_element_cache.py +4 -4
  55. pixeltable/exprs/string_op.py +2 -2
  56. pixeltable/exprs/type_cast.py +3 -5
  57. pixeltable/func/aggregate_function.py +8 -8
  58. pixeltable/func/callable_function.py +9 -9
  59. pixeltable/func/expr_template_function.py +3 -3
  60. pixeltable/func/function.py +15 -17
  61. pixeltable/func/function_registry.py +6 -7
  62. pixeltable/func/globals.py +2 -3
  63. pixeltable/func/mcp.py +2 -2
  64. pixeltable/func/query_template_function.py +16 -16
  65. pixeltable/func/signature.py +14 -14
  66. pixeltable/func/tools.py +11 -11
  67. pixeltable/func/udf.py +16 -18
  68. pixeltable/functions/__init__.py +1 -0
  69. pixeltable/functions/anthropic.py +7 -7
  70. pixeltable/functions/audio.py +76 -0
  71. pixeltable/functions/bedrock.py +6 -6
  72. pixeltable/functions/deepseek.py +4 -4
  73. pixeltable/functions/fireworks.py +2 -2
  74. pixeltable/functions/gemini.py +6 -6
  75. pixeltable/functions/globals.py +12 -12
  76. pixeltable/functions/groq.py +4 -4
  77. pixeltable/functions/huggingface.py +18 -20
  78. pixeltable/functions/image.py +7 -10
  79. pixeltable/functions/llama_cpp.py +7 -7
  80. pixeltable/functions/math.py +2 -3
  81. pixeltable/functions/mistralai.py +3 -3
  82. pixeltable/functions/ollama.py +9 -9
  83. pixeltable/functions/openai.py +21 -21
  84. pixeltable/functions/openrouter.py +7 -7
  85. pixeltable/functions/string.py +21 -28
  86. pixeltable/functions/timestamp.py +7 -8
  87. pixeltable/functions/together.py +4 -6
  88. pixeltable/functions/twelvelabs.py +92 -0
  89. pixeltable/functions/video.py +2 -24
  90. pixeltable/functions/vision.py +6 -6
  91. pixeltable/functions/whisper.py +7 -7
  92. pixeltable/functions/whisperx.py +16 -16
  93. pixeltable/globals.py +52 -36
  94. pixeltable/index/base.py +12 -8
  95. pixeltable/index/btree.py +19 -22
  96. pixeltable/index/embedding_index.py +30 -39
  97. pixeltable/io/datarows.py +3 -3
  98. pixeltable/io/external_store.py +13 -16
  99. pixeltable/io/fiftyone.py +5 -5
  100. pixeltable/io/globals.py +5 -5
  101. pixeltable/io/hf_datasets.py +4 -4
  102. pixeltable/io/label_studio.py +12 -12
  103. pixeltable/io/pandas.py +6 -6
  104. pixeltable/io/parquet.py +2 -2
  105. pixeltable/io/table_data_conduit.py +12 -12
  106. pixeltable/io/utils.py +2 -2
  107. pixeltable/iterators/audio.py +2 -2
  108. pixeltable/iterators/video.py +8 -13
  109. pixeltable/metadata/converters/convert_18.py +2 -2
  110. pixeltable/metadata/converters/convert_19.py +2 -2
  111. pixeltable/metadata/converters/convert_20.py +2 -2
  112. pixeltable/metadata/converters/convert_21.py +2 -2
  113. pixeltable/metadata/converters/convert_22.py +2 -2
  114. pixeltable/metadata/converters/convert_24.py +2 -2
  115. pixeltable/metadata/converters/convert_25.py +2 -2
  116. pixeltable/metadata/converters/convert_26.py +2 -2
  117. pixeltable/metadata/converters/convert_29.py +4 -4
  118. pixeltable/metadata/converters/convert_34.py +2 -2
  119. pixeltable/metadata/converters/convert_36.py +2 -2
  120. pixeltable/metadata/converters/convert_38.py +2 -2
  121. pixeltable/metadata/converters/convert_39.py +1 -2
  122. pixeltable/metadata/converters/util.py +11 -13
  123. pixeltable/metadata/schema.py +22 -21
  124. pixeltable/metadata/utils.py +2 -6
  125. pixeltable/mypy/mypy_plugin.py +5 -5
  126. pixeltable/plan.py +30 -28
  127. pixeltable/share/packager.py +7 -7
  128. pixeltable/share/publish.py +3 -3
  129. pixeltable/store.py +125 -61
  130. pixeltable/type_system.py +43 -46
  131. pixeltable/utils/__init__.py +1 -2
  132. pixeltable/utils/arrow.py +4 -4
  133. pixeltable/utils/av.py +8 -0
  134. pixeltable/utils/azure_store.py +305 -0
  135. pixeltable/utils/code.py +1 -2
  136. pixeltable/utils/dbms.py +15 -19
  137. pixeltable/utils/description_helper.py +2 -3
  138. pixeltable/utils/documents.py +5 -6
  139. pixeltable/utils/exception_handler.py +2 -2
  140. pixeltable/utils/filecache.py +5 -5
  141. pixeltable/utils/formatter.py +4 -6
  142. pixeltable/utils/gcs_store.py +9 -9
  143. pixeltable/utils/local_store.py +17 -17
  144. pixeltable/utils/object_stores.py +59 -43
  145. pixeltable/utils/s3_store.py +35 -30
  146. {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/METADATA +1 -1
  147. pixeltable-0.4.19.dist-info/RECORD +213 -0
  148. pixeltable/__version__.py +0 -3
  149. pixeltable-0.4.18.dist-info/RECORD +0 -211
  150. {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/WHEEL +0 -0
  151. {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/entry_points.txt +0 -0
  152. {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/licenses/LICENSE +0 -0
@@ -7,7 +7,7 @@ first `pip install transformers` (or in some cases, `sentence-transformers`, as
7
7
  UDFs).
8
8
  """
9
9
 
10
- from typing import Any, Callable, Literal, Optional, TypeVar
10
+ from typing import Any, Callable, Literal, TypeVar
11
11
 
12
12
  import av
13
13
  import numpy as np
@@ -351,7 +351,7 @@ def vit_for_image_classification(
351
351
 
352
352
 
353
353
  @pxt.udf
354
- def speech2text_for_conditional_generation(audio: pxt.Audio, *, model_id: str, language: Optional[str] = None) -> str:
354
+ def speech2text_for_conditional_generation(audio: pxt.Audio, *, model_id: str, language: str | None = None) -> str:
355
355
  """
356
356
  Transcribes or translates speech to text using a Speech2Text model. `model_id` should be a reference to a
357
357
  pretrained [Speech2Text](https://huggingface.co/docs/transformers/en/model_doc/speech_to_text) model.
@@ -408,7 +408,7 @@ def speech2text_for_conditional_generation(audio: pxt.Audio, *, model_id: str, l
408
408
  f'Supported languages are: {list(tokenizer.lang_code_to_id.keys())}'
409
409
  )
410
410
 
411
- forced_bos_token_id: Optional[int] = None if language is None else tokenizer.lang_code_to_id[language]
411
+ forced_bos_token_id: int | None = None if language is None else tokenizer.lang_code_to_id[language]
412
412
 
413
413
  # Get the model's sampling rate. Default to 16 kHz (the standard) if not in config
414
414
  model_sampling_rate = getattr(model.config, 'sampling_rate', 16_000)
@@ -460,7 +460,7 @@ def detr_to_coco(image: PIL.Image.Image, detr_info: dict[str, Any]) -> dict[str,
460
460
 
461
461
 
462
462
  @pxt.udf
463
- def text_generation(text: str, *, model_id: str, model_kwargs: Optional[dict[str, Any]] = None) -> str:
463
+ def text_generation(text: str, *, model_id: str, model_kwargs: dict[str, Any] | None = None) -> str:
464
464
  """
465
465
  Generates text using a pretrained language model. `model_id` should be a reference to a pretrained
466
466
  [text generation model](https://huggingface.co/models?pipeline_tag=text-generation).
@@ -574,7 +574,7 @@ def text_classification(text: Batch[str], *, model_id: str, top_k: int = 5) -> B
574
574
 
575
575
  @pxt.udf(batch_size=4)
576
576
  def image_captioning(
577
- image: Batch[PIL.Image.Image], *, model_id: str, model_kwargs: Optional[dict[str, Any]] = None
577
+ image: Batch[PIL.Image.Image], *, model_id: str, model_kwargs: dict[str, Any] | None = None
578
578
  ) -> Batch[str]:
579
579
  """
580
580
  Generates captions for images using a pretrained image captioning model. `model_id` should be a reference to a
@@ -624,7 +624,7 @@ def image_captioning(
624
624
 
625
625
 
626
626
  @pxt.udf(batch_size=8)
627
- def summarization(text: Batch[str], *, model_id: str, model_kwargs: Optional[dict[str, Any]] = None) -> Batch[str]:
627
+ def summarization(text: Batch[str], *, model_id: str, model_kwargs: dict[str, Any] | None = None) -> Batch[str]:
628
628
  """
629
629
  Summarizes text using a pretrained summarization model. `model_id` should be a reference to a pretrained
630
630
  [summarization model](https://huggingface.co/models?pipeline_tag=summarization) such as BART, T5, or Pegasus.
@@ -880,7 +880,7 @@ def question_answering(context: str, question: str, *, model_id: str) -> dict[st
880
880
 
881
881
  @pxt.udf(batch_size=8)
882
882
  def translation(
883
- text: Batch[str], *, model_id: str, src_lang: Optional[str] = None, target_lang: Optional[str] = None
883
+ text: Batch[str], *, model_id: str, src_lang: str | None = None, target_lang: str | None = None
884
884
  ) -> Batch[str]:
885
885
  """
886
886
  Translates text using a pretrained translation model. `model_id` should be a reference to a pretrained
@@ -954,8 +954,8 @@ def text_to_image(
954
954
  model_id: str,
955
955
  height: int = 512,
956
956
  width: int = 512,
957
- seed: Optional[int] = None,
958
- model_kwargs: Optional[dict[str, Any]] = None,
957
+ seed: int | None = None,
958
+ model_kwargs: dict[str, Any] | None = None,
959
959
  ) -> PIL.Image.Image:
960
960
  """
961
961
  Generates images from text prompts using a pretrained text-to-image model. `model_id` should be a reference to a
@@ -1034,9 +1034,7 @@ def text_to_image(
1034
1034
 
1035
1035
 
1036
1036
  @pxt.udf
1037
- def text_to_speech(
1038
- text: str, *, model_id: str, speaker_id: Optional[int] = None, vocoder: Optional[str] = None
1039
- ) -> pxt.Audio:
1037
+ def text_to_speech(text: str, *, model_id: str, speaker_id: int | None = None, vocoder: str | None = None) -> pxt.Audio:
1040
1038
  """
1041
1039
  Converts text to speech using a pretrained TTS model. `model_id` should be a reference to a
1042
1040
  pretrained [text-to-speech model](https://huggingface.co/models?pipeline_tag=text-to-speech).
@@ -1142,8 +1140,8 @@ def image_to_image(
1142
1140
  prompt: str,
1143
1141
  *,
1144
1142
  model_id: str,
1145
- seed: Optional[int] = None,
1146
- model_kwargs: Optional[dict[str, Any]] = None,
1143
+ seed: int | None = None,
1144
+ model_kwargs: dict[str, Any] | None = None,
1147
1145
  ) -> PIL.Image.Image:
1148
1146
  """
1149
1147
  Transforms input images based on text prompts using a pretrained image-to-image model.
@@ -1217,8 +1215,8 @@ def automatic_speech_recognition(
1217
1215
  audio: pxt.Audio,
1218
1216
  *,
1219
1217
  model_id: str,
1220
- language: Optional[str] = None,
1221
- chunk_length_s: Optional[int] = None,
1218
+ language: str | None = None,
1219
+ chunk_length_s: int | None = None,
1222
1220
  return_timestamps: bool = False,
1223
1221
  ) -> str:
1224
1222
  """
@@ -1370,8 +1368,8 @@ def image_to_video(
1370
1368
  model_id: str,
1371
1369
  num_frames: int = 25,
1372
1370
  fps: int = 6,
1373
- seed: Optional[int] = None,
1374
- model_kwargs: Optional[dict[str, Any]] = None,
1371
+ seed: int | None = None,
1372
+ model_kwargs: dict[str, Any] | None = None,
1375
1373
  ) -> pxt.Video:
1376
1374
  """
1377
1375
  Generates videos from input images using a pretrained image-to-video model.
@@ -1487,7 +1485,7 @@ def image_to_video(
1487
1485
 
1488
1486
 
1489
1487
  def _lookup_model(
1490
- model_id: str, create: Callable[..., T], device: Optional[str] = None, pass_device_to_create: bool = False
1488
+ model_id: str, create: Callable[..., T], device: str | None = None, pass_device_to_create: bool = False
1491
1489
  ) -> T:
1492
1490
  from torch import nn
1493
1491
 
@@ -1512,7 +1510,7 @@ def _lookup_processor(model_id: str, create: Callable[[str], T]) -> T:
1512
1510
  return _processor_cache[key]
1513
1511
 
1514
1512
 
1515
- _model_cache: dict[tuple[str, Callable, Optional[str]], Any] = {}
1513
+ _model_cache: dict[tuple[str, Callable, str | None], Any] = {}
1516
1514
  _processor_cache: dict[tuple[str, Callable], Any] = {}
1517
1515
 
1518
1516
 
@@ -11,7 +11,6 @@ t.select(t.img_col.convert('L')).collect()
11
11
  """
12
12
 
13
13
  import base64
14
- from typing import Optional
15
14
 
16
15
  import PIL.Image
17
16
 
@@ -156,7 +155,7 @@ def get_metadata(self: PIL.Image.Image) -> dict:
156
155
 
157
156
  # Image.point()
158
157
  @pxt.udf(is_method=True)
159
- def point(self: PIL.Image.Image, lut: list[int], mode: Optional[str] = None) -> PIL.Image.Image:
158
+ def point(self: PIL.Image.Image, lut: list[int], mode: str | None = None) -> PIL.Image.Image:
160
159
  """
161
160
  Map image pixels through a lookup table.
162
161
 
@@ -241,7 +240,7 @@ def _(self: Expr) -> ts.ColumnType:
241
240
 
242
241
 
243
242
  @pxt.udf(substitute_fn=PIL.Image.Image.entropy, is_method=True)
244
- def entropy(self: PIL.Image.Image, mask: Optional[PIL.Image.Image] = None, extrema: Optional[list] = None) -> float:
243
+ def entropy(self: PIL.Image.Image, mask: PIL.Image.Image | None = None, extrema: list | None = None) -> float:
245
244
  """
246
245
  Returns the entropy of the image, optionally using a mask and extrema.
247
246
 
@@ -306,7 +305,7 @@ def getextrema(self: PIL.Image.Image) -> tuple[int, int]:
306
305
 
307
306
 
308
307
  @pxt.udf(substitute_fn=PIL.Image.Image.getpalette, is_method=True)
309
- def getpalette(self: PIL.Image.Image, mode: Optional[str] = None) -> tuple[int]:
308
+ def getpalette(self: PIL.Image.Image, mode: str | None = None) -> tuple[int]:
310
309
  """
311
310
  Return the palette of the image, optionally converting it to a different mode.
312
311
 
@@ -346,9 +345,7 @@ def getprojection(self: PIL.Image.Image) -> tuple[int]:
346
345
 
347
346
 
348
347
  @pxt.udf(substitute_fn=PIL.Image.Image.histogram, is_method=True)
349
- def histogram(
350
- self: PIL.Image.Image, mask: Optional[PIL.Image.Image] = None, extrema: Optional[list] = None
351
- ) -> list[int]:
348
+ def histogram(self: PIL.Image.Image, mask: PIL.Image.Image | None = None, extrema: list | None = None) -> list[int]:
352
349
  """
353
350
  Return a histogram for the image.
354
351
 
@@ -366,9 +363,9 @@ def histogram(
366
363
  def quantize(
367
364
  self: PIL.Image.Image,
368
365
  colors: int = 256,
369
- method: Optional[int] = None,
366
+ method: int | None = None,
370
367
  kmeans: int = 0,
371
- palette: Optional[int] = None,
368
+ palette: int | None = None,
372
369
  dither: int = PIL.Image.Dither.FLOYDSTEINBERG,
373
370
  ) -> PIL.Image.Image:
374
371
  """
@@ -392,7 +389,7 @@ def quantize(
392
389
 
393
390
 
394
391
  @pxt.udf(substitute_fn=PIL.Image.Image.reduce, is_method=True)
395
- def reduce(self: PIL.Image.Image, factor: int, box: Optional[tuple[int, int, int, int]] = None) -> PIL.Image.Image:
392
+ def reduce(self: PIL.Image.Image, factor: int, box: tuple[int, int, int, int] | None = None) -> PIL.Image.Image:
396
393
  """
397
394
  Reduce the image by the given factor.
398
395
 
@@ -6,7 +6,7 @@ supporting chat completions and embeddings with GGUF format models.
6
6
  """
7
7
 
8
8
  from pathlib import Path
9
- from typing import TYPE_CHECKING, Any, Optional
9
+ from typing import TYPE_CHECKING, Any
10
10
 
11
11
  import pixeltable as pxt
12
12
  import pixeltable.exceptions as excs
@@ -21,10 +21,10 @@ if TYPE_CHECKING:
21
21
  def create_chat_completion(
22
22
  messages: list[dict],
23
23
  *,
24
- model_path: Optional[str] = None,
25
- repo_id: Optional[str] = None,
26
- repo_filename: Optional[str] = None,
27
- model_kwargs: Optional[dict[str, Any]] = None,
24
+ model_path: str | None = None,
25
+ repo_id: str | None = None,
26
+ repo_filename: str | None = None,
27
+ model_kwargs: dict[str, Any] | None = None,
28
28
  ) -> dict:
29
29
  """
30
30
  Generate a chat completion from a list of messages.
@@ -88,7 +88,7 @@ def _lookup_local_model(model_path: str, n_gpu_layers: int) -> 'llama_cpp.Llama'
88
88
  return _model_cache[key]
89
89
 
90
90
 
91
- def _lookup_pretrained_model(repo_id: str, filename: Optional[str], n_gpu_layers: int) -> 'llama_cpp.Llama':
91
+ def _lookup_pretrained_model(repo_id: str, filename: str | None, n_gpu_layers: int) -> 'llama_cpp.Llama':
92
92
  import llama_cpp
93
93
 
94
94
  key = (repo_id, filename, n_gpu_layers)
@@ -101,7 +101,7 @@ def _lookup_pretrained_model(repo_id: str, filename: Optional[str], n_gpu_layers
101
101
 
102
102
 
103
103
  _model_cache: dict[tuple[str, str, int], 'llama_cpp.Llama'] = {}
104
- _IS_GPU_AVAILABLE: Optional[bool] = None
104
+ _IS_GPU_AVAILABLE: bool | None = None
105
105
 
106
106
 
107
107
  def cleanup() -> None:
@@ -12,7 +12,6 @@ t.select(t.float_col.floor()).collect()
12
12
 
13
13
  import builtins
14
14
  import math
15
- from typing import Optional
16
15
 
17
16
  import sqlalchemy as sql
18
17
 
@@ -80,7 +79,7 @@ def _(self: sql.ColumnElement) -> sql.ColumnElement:
80
79
 
81
80
 
82
81
  @pxt.udf(is_method=True)
83
- def round(self: float, digits: Optional[int] = None) -> float:
82
+ def round(self: float, digits: int | None = None) -> float:
84
83
  """
85
84
  Round a number to a given precision in decimal digits.
86
85
 
@@ -93,7 +92,7 @@ def round(self: float, digits: Optional[int] = None) -> float:
93
92
 
94
93
 
95
94
  @round.to_sql
96
- def _(self: sql.ColumnElement, digits: Optional[sql.ColumnElement] = None) -> sql.ColumnElement:
95
+ def _(self: sql.ColumnElement, digits: sql.ColumnElement | None = None) -> sql.ColumnElement:
97
96
  if digits is None:
98
97
  return sql.func.round(self)
99
98
  else:
@@ -5,7 +5,7 @@ first `pip install mistralai` and configure your Mistral AI credentials, as desc
5
5
  the [Working with Mistral AI](https://pixeltable.readme.io/docs/working-with-mistralai) tutorial.
6
6
  """
7
7
 
8
- from typing import TYPE_CHECKING, Any, Optional
8
+ from typing import TYPE_CHECKING, Any
9
9
 
10
10
  import numpy as np
11
11
 
@@ -32,7 +32,7 @@ def _mistralai_client() -> 'mistralai.Mistral':
32
32
 
33
33
  @pxt.udf(resource_pool='request-rate:mistral')
34
34
  async def chat_completions(
35
- messages: list[dict[str, str]], *, model: str, model_kwargs: Optional[dict[str, Any]] = None
35
+ messages: list[dict[str, str]], *, model: str, model_kwargs: dict[str, Any] | None = None
36
36
  ) -> dict:
37
37
  """
38
38
  Chat Completion API.
@@ -77,7 +77,7 @@ async def chat_completions(
77
77
 
78
78
 
79
79
  @pxt.udf(resource_pool='request-rate:mistral')
80
- async def fim_completions(prompt: str, *, model: str, model_kwargs: Optional[dict[str, Any]] = None) -> dict:
80
+ async def fim_completions(prompt: str, *, model: str, model_kwargs: dict[str, Any] | None = None) -> dict:
81
81
  """
82
82
  Fill-in-the-middle Completion API.
83
83
 
@@ -5,7 +5,7 @@ Provides integration with Ollama for running large language models locally,
5
5
  including chat completions and embeddings.
6
6
  """
7
7
 
8
- from typing import TYPE_CHECKING, Optional
8
+ from typing import TYPE_CHECKING
9
9
 
10
10
  import numpy as np
11
11
 
@@ -25,7 +25,7 @@ def _(host: str) -> 'ollama.Client':
25
25
  return ollama.Client(host=host)
26
26
 
27
27
 
28
- def _ollama_client() -> Optional['ollama.Client']:
28
+ def _ollama_client() -> 'ollama.Client | None':
29
29
  try:
30
30
  return env.Env.get().get_client('ollama')
31
31
  except Exception:
@@ -40,10 +40,10 @@ def generate(
40
40
  suffix: str = '',
41
41
  system: str = '',
42
42
  template: str = '',
43
- context: Optional[list[int]] = None,
43
+ context: list[int] | None = None,
44
44
  raw: bool = False,
45
- format: Optional[str] = None,
46
- options: Optional[dict] = None,
45
+ format: str | None = None,
46
+ options: dict | None = None,
47
47
  ) -> dict:
48
48
  """
49
49
  Generate a response for a given prompt with a provided model.
@@ -84,9 +84,9 @@ def chat(
84
84
  messages: list[dict],
85
85
  *,
86
86
  model: str,
87
- tools: Optional[list[dict]] = None,
88
- format: Optional[str] = None,
89
- options: Optional[dict] = None,
87
+ tools: list[dict] | None = None,
88
+ format: str | None = None,
89
+ options: dict | None = None,
90
90
  ) -> dict:
91
91
  """
92
92
  Generate the next message in a chat with a provided model.
@@ -110,7 +110,7 @@ def chat(
110
110
 
111
111
  @pxt.udf(batch_size=16)
112
112
  def embed(
113
- input: Batch[str], *, model: str, truncate: bool = True, options: Optional[dict] = None
113
+ input: Batch[str], *, model: str, truncate: bool = True, options: dict | None = None
114
114
  ) -> Batch[pxt.Array[(None,), pxt.Float]]:
115
115
  """
116
116
  Generate embeddings from a model.
@@ -13,7 +13,7 @@ import logging
13
13
  import math
14
14
  import pathlib
15
15
  import re
16
- from typing import TYPE_CHECKING, Any, Callable, Optional, Type
16
+ from typing import TYPE_CHECKING, Any, Callable, Type
17
17
 
18
18
  import httpx
19
19
  import numpy as np
@@ -32,7 +32,7 @@ _logger = logging.getLogger('pixeltable')
32
32
 
33
33
 
34
34
  @env.register_client('openai')
35
- def _(api_key: str, base_url: Optional[str] = None, api_version: Optional[str] = None) -> 'openai.AsyncOpenAI':
35
+ def _(api_key: str, base_url: str | None = None, api_version: str | None = None) -> 'openai.AsyncOpenAI':
36
36
  import openai
37
37
 
38
38
  default_query = None if api_version is None else {'api-version': api_version}
@@ -169,7 +169,7 @@ class OpenAIRateLimitsInfo(env.RateLimitsInfo):
169
169
  self.record(requests=requests_info, tokens=tokens_info)
170
170
  self.has_exc = True
171
171
 
172
- def get_retry_delay(self, exc: Exception) -> Optional[float]:
172
+ def get_retry_delay(self, exc: Exception) -> float | None:
173
173
  import openai
174
174
 
175
175
  if not isinstance(exc, self.retryable_errors):
@@ -183,7 +183,7 @@ class OpenAIRateLimitsInfo(env.RateLimitsInfo):
183
183
 
184
184
 
185
185
  @pxt.udf
186
- async def speech(input: str, *, model: str, voice: str, model_kwargs: Optional[dict[str, Any]] = None) -> pxt.Audio:
186
+ async def speech(input: str, *, model: str, voice: str, model_kwargs: dict[str, Any] | None = None) -> pxt.Audio:
187
187
  """
188
188
  Generates audio from the input text.
189
189
 
@@ -226,7 +226,7 @@ async def speech(input: str, *, model: str, voice: str, model_kwargs: Optional[d
226
226
 
227
227
 
228
228
  @pxt.udf
229
- async def transcriptions(audio: pxt.Audio, *, model: str, model_kwargs: Optional[dict[str, Any]] = None) -> dict:
229
+ async def transcriptions(audio: pxt.Audio, *, model: str, model_kwargs: dict[str, Any] | None = None) -> dict:
230
230
  """
231
231
  Transcribes audio into the input language.
232
232
 
@@ -265,7 +265,7 @@ async def transcriptions(audio: pxt.Audio, *, model: str, model_kwargs: Optional
265
265
 
266
266
 
267
267
  @pxt.udf
268
- async def translations(audio: pxt.Audio, *, model: str, model_kwargs: Optional[dict[str, Any]] = None) -> dict:
268
+ async def translations(audio: pxt.Audio, *, model: str, model_kwargs: dict[str, Any] | None = None) -> dict:
269
269
  """
270
270
  Translates audio into English.
271
271
 
@@ -335,7 +335,7 @@ def _is_model_family(model: str, family: str) -> bool:
335
335
 
336
336
 
337
337
  def _chat_completions_get_request_resources(
338
- messages: list, model: str, model_kwargs: Optional[dict[str, Any]]
338
+ messages: list, model: str, model_kwargs: dict[str, Any] | None
339
339
  ) -> dict[str, int]:
340
340
  if model_kwargs is None:
341
341
  model_kwargs = {}
@@ -362,10 +362,10 @@ async def chat_completions(
362
362
  messages: list,
363
363
  *,
364
364
  model: str,
365
- model_kwargs: Optional[dict[str, Any]] = None,
366
- tools: Optional[list[dict[str, Any]]] = None,
367
- tool_choice: Optional[dict[str, Any]] = None,
368
- _runtime_ctx: Optional[env.RuntimeCtx] = None,
365
+ model_kwargs: dict[str, Any] | None = None,
366
+ tools: list[dict[str, Any]] | None = None,
367
+ tool_choice: dict[str, Any] | None = None,
368
+ _runtime_ctx: env.RuntimeCtx | None = None,
369
369
  ) -> dict:
370
370
  """
371
371
  Creates a model response for the given chat conversation.
@@ -436,7 +436,7 @@ async def chat_completions(
436
436
 
437
437
 
438
438
  def _vision_get_request_resources(
439
- prompt: str, image: PIL.Image.Image, model: str, model_kwargs: Optional[dict[str, Any]] = None
439
+ prompt: str, image: PIL.Image.Image, model: str, model_kwargs: dict[str, Any] | None = None
440
440
  ) -> dict[str, int]:
441
441
  if model_kwargs is None:
442
442
  model_kwargs = {}
@@ -477,8 +477,8 @@ async def vision(
477
477
  image: PIL.Image.Image,
478
478
  *,
479
479
  model: str,
480
- model_kwargs: Optional[dict[str, Any]] = None,
481
- _runtime_ctx: Optional[env.RuntimeCtx] = None,
480
+ model_kwargs: dict[str, Any] | None = None,
481
+ _runtime_ctx: env.RuntimeCtx | None = None,
482
482
  ) -> str:
483
483
  """
484
484
  Analyzes an image with the OpenAI vision capability. This is a convenience function that takes an image and
@@ -567,8 +567,8 @@ async def embeddings(
567
567
  input: Batch[str],
568
568
  *,
569
569
  model: str,
570
- model_kwargs: Optional[dict[str, Any]] = None,
571
- _runtime_ctx: Optional[env.RuntimeCtx] = None,
570
+ model_kwargs: dict[str, Any] | None = None,
571
+ _runtime_ctx: env.RuntimeCtx | None = None,
572
572
  ) -> Batch[pxt.Array[(None,), pxt.Float]]:
573
573
  """
574
574
  Creates an embedding vector representing the input text.
@@ -621,8 +621,8 @@ async def embeddings(
621
621
 
622
622
 
623
623
  @embeddings.conditional_return_type
624
- def _(model: str, model_kwargs: Optional[dict[str, Any]] = None) -> ts.ArrayType:
625
- dimensions: Optional[int] = None
624
+ def _(model: str, model_kwargs: dict[str, Any] | None = None) -> ts.ArrayType:
625
+ dimensions: int | None = None
626
626
  if model_kwargs is not None:
627
627
  dimensions = model_kwargs.get('dimensions')
628
628
  if dimensions is None:
@@ -639,7 +639,7 @@ def _(model: str, model_kwargs: Optional[dict[str, Any]] = None) -> ts.ArrayType
639
639
 
640
640
  @pxt.udf
641
641
  async def image_generations(
642
- prompt: str, *, model: str = 'dall-e-2', model_kwargs: Optional[dict[str, Any]] = None
642
+ prompt: str, *, model: str = 'dall-e-2', model_kwargs: dict[str, Any] | None = None
643
643
  ) -> PIL.Image.Image:
644
644
  """
645
645
  Creates an image given a prompt.
@@ -685,7 +685,7 @@ async def image_generations(
685
685
 
686
686
 
687
687
  @image_generations.conditional_return_type
688
- def _(model_kwargs: Optional[dict[str, Any]] = None) -> ts.ImageType:
688
+ def _(model_kwargs: dict[str, Any] | None = None) -> ts.ImageType:
689
689
  if model_kwargs is None or 'size' not in model_kwargs:
690
690
  # default size is 1024x1024
691
691
  return ts.ImageType(size=(1024, 1024))
@@ -761,7 +761,7 @@ def invoke_tools(tools: Tools, response: exprs.Expr) -> exprs.InlineDict:
761
761
 
762
762
 
763
763
  @pxt.udf
764
- def _openai_response_to_pxt_tool_calls(response: dict) -> Optional[dict]:
764
+ def _openai_response_to_pxt_tool_calls(response: dict) -> dict | None:
765
765
  if 'tool_calls' not in response['choices'][0]['message'] or response['choices'][0]['message']['tool_calls'] is None:
766
766
  return None
767
767
  openai_tool_calls = response['choices'][0]['message']['tool_calls']
@@ -6,7 +6,7 @@ you must first sign up at https://openrouter.ai, create an API key, and configur
6
6
  as described in the Working with OpenRouter tutorial.
7
7
  """
8
8
 
9
- from typing import TYPE_CHECKING, Any, Optional
9
+ from typing import TYPE_CHECKING, Any
10
10
 
11
11
  import pixeltable as pxt
12
12
  from pixeltable.env import Env, register_client
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
17
17
 
18
18
 
19
19
  @register_client('openrouter')
20
- def _(api_key: str, site_url: Optional[str] = None, app_name: Optional[str] = None) -> 'openai.AsyncOpenAI':
20
+ def _(api_key: str, site_url: str | None = None, app_name: str | None = None) -> 'openai.AsyncOpenAI':
21
21
  import openai
22
22
 
23
23
  # Create default headers for OpenRouter
@@ -39,11 +39,11 @@ async def chat_completions(
39
39
  messages: list,
40
40
  *,
41
41
  model: str,
42
- model_kwargs: Optional[dict[str, Any]] = None,
43
- tools: Optional[list[dict[str, Any]]] = None,
44
- tool_choice: Optional[dict[str, Any]] = None,
45
- provider: Optional[dict[str, Any]] = None,
46
- transforms: Optional[list[str]] = None,
42
+ model_kwargs: dict[str, Any] | None = None,
43
+ tools: list[dict[str, Any]] | None = None,
44
+ tool_choice: dict[str, Any] | None = None,
45
+ provider: dict[str, Any] | None = None,
46
+ transforms: list[str] | None = None,
47
47
  ) -> dict:
48
48
  """
49
49
  Chat Completion API via OpenRouter.