pixeltable 0.4.18__py3-none-any.whl → 0.4.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pixeltable might be problematic. Click here for more details.
- pixeltable/__init__.py +1 -1
- pixeltable/_version.py +1 -0
- pixeltable/catalog/catalog.py +119 -100
- pixeltable/catalog/column.py +104 -115
- pixeltable/catalog/globals.py +1 -2
- pixeltable/catalog/insertable_table.py +44 -49
- pixeltable/catalog/path.py +3 -4
- pixeltable/catalog/schema_object.py +4 -4
- pixeltable/catalog/table.py +118 -122
- pixeltable/catalog/table_metadata.py +6 -6
- pixeltable/catalog/table_version.py +322 -257
- pixeltable/catalog/table_version_handle.py +4 -4
- pixeltable/catalog/table_version_path.py +9 -10
- pixeltable/catalog/tbl_ops.py +9 -3
- pixeltable/catalog/view.py +34 -28
- pixeltable/config.py +14 -10
- pixeltable/dataframe.py +68 -77
- pixeltable/env.py +74 -64
- pixeltable/exec/aggregation_node.py +6 -6
- pixeltable/exec/cache_prefetch_node.py +10 -10
- pixeltable/exec/data_row_batch.py +3 -3
- pixeltable/exec/exec_context.py +4 -5
- pixeltable/exec/exec_node.py +5 -5
- pixeltable/exec/expr_eval/evaluators.py +6 -6
- pixeltable/exec/expr_eval/expr_eval_node.py +8 -7
- pixeltable/exec/expr_eval/globals.py +6 -6
- pixeltable/exec/expr_eval/row_buffer.py +1 -2
- pixeltable/exec/expr_eval/schedulers.py +11 -11
- pixeltable/exec/in_memory_data_node.py +2 -2
- pixeltable/exec/object_store_save_node.py +14 -17
- pixeltable/exec/sql_node.py +25 -25
- pixeltable/exprs/arithmetic_expr.py +4 -4
- pixeltable/exprs/array_slice.py +2 -2
- pixeltable/exprs/column_property_ref.py +3 -3
- pixeltable/exprs/column_ref.py +61 -74
- pixeltable/exprs/comparison.py +5 -5
- pixeltable/exprs/compound_predicate.py +3 -3
- pixeltable/exprs/data_row.py +12 -12
- pixeltable/exprs/expr.py +41 -31
- pixeltable/exprs/expr_dict.py +3 -3
- pixeltable/exprs/expr_set.py +3 -3
- pixeltable/exprs/function_call.py +14 -14
- pixeltable/exprs/in_predicate.py +4 -4
- pixeltable/exprs/inline_expr.py +8 -8
- pixeltable/exprs/is_null.py +1 -3
- pixeltable/exprs/json_mapper.py +8 -8
- pixeltable/exprs/json_path.py +6 -6
- pixeltable/exprs/literal.py +5 -5
- pixeltable/exprs/method_ref.py +2 -2
- pixeltable/exprs/object_ref.py +2 -2
- pixeltable/exprs/row_builder.py +14 -14
- pixeltable/exprs/rowid_ref.py +8 -8
- pixeltable/exprs/similarity_expr.py +50 -25
- pixeltable/exprs/sql_element_cache.py +4 -4
- pixeltable/exprs/string_op.py +2 -2
- pixeltable/exprs/type_cast.py +3 -5
- pixeltable/func/aggregate_function.py +8 -8
- pixeltable/func/callable_function.py +9 -9
- pixeltable/func/expr_template_function.py +3 -3
- pixeltable/func/function.py +15 -17
- pixeltable/func/function_registry.py +6 -7
- pixeltable/func/globals.py +2 -3
- pixeltable/func/mcp.py +2 -2
- pixeltable/func/query_template_function.py +16 -16
- pixeltable/func/signature.py +14 -14
- pixeltable/func/tools.py +11 -11
- pixeltable/func/udf.py +16 -18
- pixeltable/functions/__init__.py +1 -0
- pixeltable/functions/anthropic.py +7 -7
- pixeltable/functions/audio.py +76 -0
- pixeltable/functions/bedrock.py +6 -6
- pixeltable/functions/deepseek.py +4 -4
- pixeltable/functions/fireworks.py +2 -2
- pixeltable/functions/gemini.py +6 -6
- pixeltable/functions/globals.py +12 -12
- pixeltable/functions/groq.py +4 -4
- pixeltable/functions/huggingface.py +18 -20
- pixeltable/functions/image.py +7 -10
- pixeltable/functions/llama_cpp.py +7 -7
- pixeltable/functions/math.py +2 -3
- pixeltable/functions/mistralai.py +3 -3
- pixeltable/functions/ollama.py +9 -9
- pixeltable/functions/openai.py +21 -21
- pixeltable/functions/openrouter.py +7 -7
- pixeltable/functions/string.py +21 -28
- pixeltable/functions/timestamp.py +7 -8
- pixeltable/functions/together.py +4 -6
- pixeltable/functions/twelvelabs.py +92 -0
- pixeltable/functions/video.py +2 -24
- pixeltable/functions/vision.py +6 -6
- pixeltable/functions/whisper.py +7 -7
- pixeltable/functions/whisperx.py +16 -16
- pixeltable/globals.py +52 -36
- pixeltable/index/base.py +12 -8
- pixeltable/index/btree.py +19 -22
- pixeltable/index/embedding_index.py +30 -39
- pixeltable/io/datarows.py +3 -3
- pixeltable/io/external_store.py +13 -16
- pixeltable/io/fiftyone.py +5 -5
- pixeltable/io/globals.py +5 -5
- pixeltable/io/hf_datasets.py +4 -4
- pixeltable/io/label_studio.py +12 -12
- pixeltable/io/pandas.py +6 -6
- pixeltable/io/parquet.py +2 -2
- pixeltable/io/table_data_conduit.py +12 -12
- pixeltable/io/utils.py +2 -2
- pixeltable/iterators/audio.py +2 -2
- pixeltable/iterators/video.py +8 -13
- pixeltable/metadata/converters/convert_18.py +2 -2
- pixeltable/metadata/converters/convert_19.py +2 -2
- pixeltable/metadata/converters/convert_20.py +2 -2
- pixeltable/metadata/converters/convert_21.py +2 -2
- pixeltable/metadata/converters/convert_22.py +2 -2
- pixeltable/metadata/converters/convert_24.py +2 -2
- pixeltable/metadata/converters/convert_25.py +2 -2
- pixeltable/metadata/converters/convert_26.py +2 -2
- pixeltable/metadata/converters/convert_29.py +4 -4
- pixeltable/metadata/converters/convert_34.py +2 -2
- pixeltable/metadata/converters/convert_36.py +2 -2
- pixeltable/metadata/converters/convert_38.py +2 -2
- pixeltable/metadata/converters/convert_39.py +1 -2
- pixeltable/metadata/converters/util.py +11 -13
- pixeltable/metadata/schema.py +22 -21
- pixeltable/metadata/utils.py +2 -6
- pixeltable/mypy/mypy_plugin.py +5 -5
- pixeltable/plan.py +30 -28
- pixeltable/share/packager.py +7 -7
- pixeltable/share/publish.py +3 -3
- pixeltable/store.py +125 -61
- pixeltable/type_system.py +43 -46
- pixeltable/utils/__init__.py +1 -2
- pixeltable/utils/arrow.py +4 -4
- pixeltable/utils/av.py +8 -0
- pixeltable/utils/azure_store.py +305 -0
- pixeltable/utils/code.py +1 -2
- pixeltable/utils/dbms.py +15 -19
- pixeltable/utils/description_helper.py +2 -3
- pixeltable/utils/documents.py +5 -6
- pixeltable/utils/exception_handler.py +2 -2
- pixeltable/utils/filecache.py +5 -5
- pixeltable/utils/formatter.py +4 -6
- pixeltable/utils/gcs_store.py +9 -9
- pixeltable/utils/local_store.py +17 -17
- pixeltable/utils/object_stores.py +59 -43
- pixeltable/utils/s3_store.py +35 -30
- {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/METADATA +1 -1
- pixeltable-0.4.19.dist-info/RECORD +213 -0
- pixeltable/__version__.py +0 -3
- pixeltable-0.4.18.dist-info/RECORD +0 -211
- {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/WHEEL +0 -0
- {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/entry_points.txt +0 -0
- {pixeltable-0.4.18.dist-info → pixeltable-0.4.19.dist-info}/licenses/LICENSE +0 -0
|
@@ -7,7 +7,7 @@ first `pip install transformers` (or in some cases, `sentence-transformers`, as
|
|
|
7
7
|
UDFs).
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
|
-
from typing import Any, Callable, Literal,
|
|
10
|
+
from typing import Any, Callable, Literal, TypeVar
|
|
11
11
|
|
|
12
12
|
import av
|
|
13
13
|
import numpy as np
|
|
@@ -351,7 +351,7 @@ def vit_for_image_classification(
|
|
|
351
351
|
|
|
352
352
|
|
|
353
353
|
@pxt.udf
|
|
354
|
-
def speech2text_for_conditional_generation(audio: pxt.Audio, *, model_id: str, language:
|
|
354
|
+
def speech2text_for_conditional_generation(audio: pxt.Audio, *, model_id: str, language: str | None = None) -> str:
|
|
355
355
|
"""
|
|
356
356
|
Transcribes or translates speech to text using a Speech2Text model. `model_id` should be a reference to a
|
|
357
357
|
pretrained [Speech2Text](https://huggingface.co/docs/transformers/en/model_doc/speech_to_text) model.
|
|
@@ -408,7 +408,7 @@ def speech2text_for_conditional_generation(audio: pxt.Audio, *, model_id: str, l
|
|
|
408
408
|
f'Supported languages are: {list(tokenizer.lang_code_to_id.keys())}'
|
|
409
409
|
)
|
|
410
410
|
|
|
411
|
-
forced_bos_token_id:
|
|
411
|
+
forced_bos_token_id: int | None = None if language is None else tokenizer.lang_code_to_id[language]
|
|
412
412
|
|
|
413
413
|
# Get the model's sampling rate. Default to 16 kHz (the standard) if not in config
|
|
414
414
|
model_sampling_rate = getattr(model.config, 'sampling_rate', 16_000)
|
|
@@ -460,7 +460,7 @@ def detr_to_coco(image: PIL.Image.Image, detr_info: dict[str, Any]) -> dict[str,
|
|
|
460
460
|
|
|
461
461
|
|
|
462
462
|
@pxt.udf
|
|
463
|
-
def text_generation(text: str, *, model_id: str, model_kwargs:
|
|
463
|
+
def text_generation(text: str, *, model_id: str, model_kwargs: dict[str, Any] | None = None) -> str:
|
|
464
464
|
"""
|
|
465
465
|
Generates text using a pretrained language model. `model_id` should be a reference to a pretrained
|
|
466
466
|
[text generation model](https://huggingface.co/models?pipeline_tag=text-generation).
|
|
@@ -574,7 +574,7 @@ def text_classification(text: Batch[str], *, model_id: str, top_k: int = 5) -> B
|
|
|
574
574
|
|
|
575
575
|
@pxt.udf(batch_size=4)
|
|
576
576
|
def image_captioning(
|
|
577
|
-
image: Batch[PIL.Image.Image], *, model_id: str, model_kwargs:
|
|
577
|
+
image: Batch[PIL.Image.Image], *, model_id: str, model_kwargs: dict[str, Any] | None = None
|
|
578
578
|
) -> Batch[str]:
|
|
579
579
|
"""
|
|
580
580
|
Generates captions for images using a pretrained image captioning model. `model_id` should be a reference to a
|
|
@@ -624,7 +624,7 @@ def image_captioning(
|
|
|
624
624
|
|
|
625
625
|
|
|
626
626
|
@pxt.udf(batch_size=8)
|
|
627
|
-
def summarization(text: Batch[str], *, model_id: str, model_kwargs:
|
|
627
|
+
def summarization(text: Batch[str], *, model_id: str, model_kwargs: dict[str, Any] | None = None) -> Batch[str]:
|
|
628
628
|
"""
|
|
629
629
|
Summarizes text using a pretrained summarization model. `model_id` should be a reference to a pretrained
|
|
630
630
|
[summarization model](https://huggingface.co/models?pipeline_tag=summarization) such as BART, T5, or Pegasus.
|
|
@@ -880,7 +880,7 @@ def question_answering(context: str, question: str, *, model_id: str) -> dict[st
|
|
|
880
880
|
|
|
881
881
|
@pxt.udf(batch_size=8)
|
|
882
882
|
def translation(
|
|
883
|
-
text: Batch[str], *, model_id: str, src_lang:
|
|
883
|
+
text: Batch[str], *, model_id: str, src_lang: str | None = None, target_lang: str | None = None
|
|
884
884
|
) -> Batch[str]:
|
|
885
885
|
"""
|
|
886
886
|
Translates text using a pretrained translation model. `model_id` should be a reference to a pretrained
|
|
@@ -954,8 +954,8 @@ def text_to_image(
|
|
|
954
954
|
model_id: str,
|
|
955
955
|
height: int = 512,
|
|
956
956
|
width: int = 512,
|
|
957
|
-
seed:
|
|
958
|
-
model_kwargs:
|
|
957
|
+
seed: int | None = None,
|
|
958
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
959
959
|
) -> PIL.Image.Image:
|
|
960
960
|
"""
|
|
961
961
|
Generates images from text prompts using a pretrained text-to-image model. `model_id` should be a reference to a
|
|
@@ -1034,9 +1034,7 @@ def text_to_image(
|
|
|
1034
1034
|
|
|
1035
1035
|
|
|
1036
1036
|
@pxt.udf
|
|
1037
|
-
def text_to_speech(
|
|
1038
|
-
text: str, *, model_id: str, speaker_id: Optional[int] = None, vocoder: Optional[str] = None
|
|
1039
|
-
) -> pxt.Audio:
|
|
1037
|
+
def text_to_speech(text: str, *, model_id: str, speaker_id: int | None = None, vocoder: str | None = None) -> pxt.Audio:
|
|
1040
1038
|
"""
|
|
1041
1039
|
Converts text to speech using a pretrained TTS model. `model_id` should be a reference to a
|
|
1042
1040
|
pretrained [text-to-speech model](https://huggingface.co/models?pipeline_tag=text-to-speech).
|
|
@@ -1142,8 +1140,8 @@ def image_to_image(
|
|
|
1142
1140
|
prompt: str,
|
|
1143
1141
|
*,
|
|
1144
1142
|
model_id: str,
|
|
1145
|
-
seed:
|
|
1146
|
-
model_kwargs:
|
|
1143
|
+
seed: int | None = None,
|
|
1144
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
1147
1145
|
) -> PIL.Image.Image:
|
|
1148
1146
|
"""
|
|
1149
1147
|
Transforms input images based on text prompts using a pretrained image-to-image model.
|
|
@@ -1217,8 +1215,8 @@ def automatic_speech_recognition(
|
|
|
1217
1215
|
audio: pxt.Audio,
|
|
1218
1216
|
*,
|
|
1219
1217
|
model_id: str,
|
|
1220
|
-
language:
|
|
1221
|
-
chunk_length_s:
|
|
1218
|
+
language: str | None = None,
|
|
1219
|
+
chunk_length_s: int | None = None,
|
|
1222
1220
|
return_timestamps: bool = False,
|
|
1223
1221
|
) -> str:
|
|
1224
1222
|
"""
|
|
@@ -1370,8 +1368,8 @@ def image_to_video(
|
|
|
1370
1368
|
model_id: str,
|
|
1371
1369
|
num_frames: int = 25,
|
|
1372
1370
|
fps: int = 6,
|
|
1373
|
-
seed:
|
|
1374
|
-
model_kwargs:
|
|
1371
|
+
seed: int | None = None,
|
|
1372
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
1375
1373
|
) -> pxt.Video:
|
|
1376
1374
|
"""
|
|
1377
1375
|
Generates videos from input images using a pretrained image-to-video model.
|
|
@@ -1487,7 +1485,7 @@ def image_to_video(
|
|
|
1487
1485
|
|
|
1488
1486
|
|
|
1489
1487
|
def _lookup_model(
|
|
1490
|
-
model_id: str, create: Callable[..., T], device:
|
|
1488
|
+
model_id: str, create: Callable[..., T], device: str | None = None, pass_device_to_create: bool = False
|
|
1491
1489
|
) -> T:
|
|
1492
1490
|
from torch import nn
|
|
1493
1491
|
|
|
@@ -1512,7 +1510,7 @@ def _lookup_processor(model_id: str, create: Callable[[str], T]) -> T:
|
|
|
1512
1510
|
return _processor_cache[key]
|
|
1513
1511
|
|
|
1514
1512
|
|
|
1515
|
-
_model_cache: dict[tuple[str, Callable,
|
|
1513
|
+
_model_cache: dict[tuple[str, Callable, str | None], Any] = {}
|
|
1516
1514
|
_processor_cache: dict[tuple[str, Callable], Any] = {}
|
|
1517
1515
|
|
|
1518
1516
|
|
pixeltable/functions/image.py
CHANGED
|
@@ -11,7 +11,6 @@ t.select(t.img_col.convert('L')).collect()
|
|
|
11
11
|
"""
|
|
12
12
|
|
|
13
13
|
import base64
|
|
14
|
-
from typing import Optional
|
|
15
14
|
|
|
16
15
|
import PIL.Image
|
|
17
16
|
|
|
@@ -156,7 +155,7 @@ def get_metadata(self: PIL.Image.Image) -> dict:
|
|
|
156
155
|
|
|
157
156
|
# Image.point()
|
|
158
157
|
@pxt.udf(is_method=True)
|
|
159
|
-
def point(self: PIL.Image.Image, lut: list[int], mode:
|
|
158
|
+
def point(self: PIL.Image.Image, lut: list[int], mode: str | None = None) -> PIL.Image.Image:
|
|
160
159
|
"""
|
|
161
160
|
Map image pixels through a lookup table.
|
|
162
161
|
|
|
@@ -241,7 +240,7 @@ def _(self: Expr) -> ts.ColumnType:
|
|
|
241
240
|
|
|
242
241
|
|
|
243
242
|
@pxt.udf(substitute_fn=PIL.Image.Image.entropy, is_method=True)
|
|
244
|
-
def entropy(self: PIL.Image.Image, mask:
|
|
243
|
+
def entropy(self: PIL.Image.Image, mask: PIL.Image.Image | None = None, extrema: list | None = None) -> float:
|
|
245
244
|
"""
|
|
246
245
|
Returns the entropy of the image, optionally using a mask and extrema.
|
|
247
246
|
|
|
@@ -306,7 +305,7 @@ def getextrema(self: PIL.Image.Image) -> tuple[int, int]:
|
|
|
306
305
|
|
|
307
306
|
|
|
308
307
|
@pxt.udf(substitute_fn=PIL.Image.Image.getpalette, is_method=True)
|
|
309
|
-
def getpalette(self: PIL.Image.Image, mode:
|
|
308
|
+
def getpalette(self: PIL.Image.Image, mode: str | None = None) -> tuple[int]:
|
|
310
309
|
"""
|
|
311
310
|
Return the palette of the image, optionally converting it to a different mode.
|
|
312
311
|
|
|
@@ -346,9 +345,7 @@ def getprojection(self: PIL.Image.Image) -> tuple[int]:
|
|
|
346
345
|
|
|
347
346
|
|
|
348
347
|
@pxt.udf(substitute_fn=PIL.Image.Image.histogram, is_method=True)
|
|
349
|
-
def histogram(
|
|
350
|
-
self: PIL.Image.Image, mask: Optional[PIL.Image.Image] = None, extrema: Optional[list] = None
|
|
351
|
-
) -> list[int]:
|
|
348
|
+
def histogram(self: PIL.Image.Image, mask: PIL.Image.Image | None = None, extrema: list | None = None) -> list[int]:
|
|
352
349
|
"""
|
|
353
350
|
Return a histogram for the image.
|
|
354
351
|
|
|
@@ -366,9 +363,9 @@ def histogram(
|
|
|
366
363
|
def quantize(
|
|
367
364
|
self: PIL.Image.Image,
|
|
368
365
|
colors: int = 256,
|
|
369
|
-
method:
|
|
366
|
+
method: int | None = None,
|
|
370
367
|
kmeans: int = 0,
|
|
371
|
-
palette:
|
|
368
|
+
palette: int | None = None,
|
|
372
369
|
dither: int = PIL.Image.Dither.FLOYDSTEINBERG,
|
|
373
370
|
) -> PIL.Image.Image:
|
|
374
371
|
"""
|
|
@@ -392,7 +389,7 @@ def quantize(
|
|
|
392
389
|
|
|
393
390
|
|
|
394
391
|
@pxt.udf(substitute_fn=PIL.Image.Image.reduce, is_method=True)
|
|
395
|
-
def reduce(self: PIL.Image.Image, factor: int, box:
|
|
392
|
+
def reduce(self: PIL.Image.Image, factor: int, box: tuple[int, int, int, int] | None = None) -> PIL.Image.Image:
|
|
396
393
|
"""
|
|
397
394
|
Reduce the image by the given factor.
|
|
398
395
|
|
|
@@ -6,7 +6,7 @@ supporting chat completions and embeddings with GGUF format models.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import TYPE_CHECKING, Any
|
|
9
|
+
from typing import TYPE_CHECKING, Any
|
|
10
10
|
|
|
11
11
|
import pixeltable as pxt
|
|
12
12
|
import pixeltable.exceptions as excs
|
|
@@ -21,10 +21,10 @@ if TYPE_CHECKING:
|
|
|
21
21
|
def create_chat_completion(
|
|
22
22
|
messages: list[dict],
|
|
23
23
|
*,
|
|
24
|
-
model_path:
|
|
25
|
-
repo_id:
|
|
26
|
-
repo_filename:
|
|
27
|
-
model_kwargs:
|
|
24
|
+
model_path: str | None = None,
|
|
25
|
+
repo_id: str | None = None,
|
|
26
|
+
repo_filename: str | None = None,
|
|
27
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
28
28
|
) -> dict:
|
|
29
29
|
"""
|
|
30
30
|
Generate a chat completion from a list of messages.
|
|
@@ -88,7 +88,7 @@ def _lookup_local_model(model_path: str, n_gpu_layers: int) -> 'llama_cpp.Llama'
|
|
|
88
88
|
return _model_cache[key]
|
|
89
89
|
|
|
90
90
|
|
|
91
|
-
def _lookup_pretrained_model(repo_id: str, filename:
|
|
91
|
+
def _lookup_pretrained_model(repo_id: str, filename: str | None, n_gpu_layers: int) -> 'llama_cpp.Llama':
|
|
92
92
|
import llama_cpp
|
|
93
93
|
|
|
94
94
|
key = (repo_id, filename, n_gpu_layers)
|
|
@@ -101,7 +101,7 @@ def _lookup_pretrained_model(repo_id: str, filename: Optional[str], n_gpu_layers
|
|
|
101
101
|
|
|
102
102
|
|
|
103
103
|
_model_cache: dict[tuple[str, str, int], 'llama_cpp.Llama'] = {}
|
|
104
|
-
_IS_GPU_AVAILABLE:
|
|
104
|
+
_IS_GPU_AVAILABLE: bool | None = None
|
|
105
105
|
|
|
106
106
|
|
|
107
107
|
def cleanup() -> None:
|
pixeltable/functions/math.py
CHANGED
|
@@ -12,7 +12,6 @@ t.select(t.float_col.floor()).collect()
|
|
|
12
12
|
|
|
13
13
|
import builtins
|
|
14
14
|
import math
|
|
15
|
-
from typing import Optional
|
|
16
15
|
|
|
17
16
|
import sqlalchemy as sql
|
|
18
17
|
|
|
@@ -80,7 +79,7 @@ def _(self: sql.ColumnElement) -> sql.ColumnElement:
|
|
|
80
79
|
|
|
81
80
|
|
|
82
81
|
@pxt.udf(is_method=True)
|
|
83
|
-
def round(self: float, digits:
|
|
82
|
+
def round(self: float, digits: int | None = None) -> float:
|
|
84
83
|
"""
|
|
85
84
|
Round a number to a given precision in decimal digits.
|
|
86
85
|
|
|
@@ -93,7 +92,7 @@ def round(self: float, digits: Optional[int] = None) -> float:
|
|
|
93
92
|
|
|
94
93
|
|
|
95
94
|
@round.to_sql
|
|
96
|
-
def _(self: sql.ColumnElement, digits:
|
|
95
|
+
def _(self: sql.ColumnElement, digits: sql.ColumnElement | None = None) -> sql.ColumnElement:
|
|
97
96
|
if digits is None:
|
|
98
97
|
return sql.func.round(self)
|
|
99
98
|
else:
|
|
@@ -5,7 +5,7 @@ first `pip install mistralai` and configure your Mistral AI credentials, as desc
|
|
|
5
5
|
the [Working with Mistral AI](https://pixeltable.readme.io/docs/working-with-mistralai) tutorial.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import TYPE_CHECKING, Any
|
|
8
|
+
from typing import TYPE_CHECKING, Any
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
|
|
@@ -32,7 +32,7 @@ def _mistralai_client() -> 'mistralai.Mistral':
|
|
|
32
32
|
|
|
33
33
|
@pxt.udf(resource_pool='request-rate:mistral')
|
|
34
34
|
async def chat_completions(
|
|
35
|
-
messages: list[dict[str, str]], *, model: str, model_kwargs:
|
|
35
|
+
messages: list[dict[str, str]], *, model: str, model_kwargs: dict[str, Any] | None = None
|
|
36
36
|
) -> dict:
|
|
37
37
|
"""
|
|
38
38
|
Chat Completion API.
|
|
@@ -77,7 +77,7 @@ async def chat_completions(
|
|
|
77
77
|
|
|
78
78
|
|
|
79
79
|
@pxt.udf(resource_pool='request-rate:mistral')
|
|
80
|
-
async def fim_completions(prompt: str, *, model: str, model_kwargs:
|
|
80
|
+
async def fim_completions(prompt: str, *, model: str, model_kwargs: dict[str, Any] | None = None) -> dict:
|
|
81
81
|
"""
|
|
82
82
|
Fill-in-the-middle Completion API.
|
|
83
83
|
|
pixeltable/functions/ollama.py
CHANGED
|
@@ -5,7 +5,7 @@ Provides integration with Ollama for running large language models locally,
|
|
|
5
5
|
including chat completions and embeddings.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from typing import TYPE_CHECKING
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
|
|
@@ -25,7 +25,7 @@ def _(host: str) -> 'ollama.Client':
|
|
|
25
25
|
return ollama.Client(host=host)
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
def _ollama_client() ->
|
|
28
|
+
def _ollama_client() -> 'ollama.Client | None':
|
|
29
29
|
try:
|
|
30
30
|
return env.Env.get().get_client('ollama')
|
|
31
31
|
except Exception:
|
|
@@ -40,10 +40,10 @@ def generate(
|
|
|
40
40
|
suffix: str = '',
|
|
41
41
|
system: str = '',
|
|
42
42
|
template: str = '',
|
|
43
|
-
context:
|
|
43
|
+
context: list[int] | None = None,
|
|
44
44
|
raw: bool = False,
|
|
45
|
-
format:
|
|
46
|
-
options:
|
|
45
|
+
format: str | None = None,
|
|
46
|
+
options: dict | None = None,
|
|
47
47
|
) -> dict:
|
|
48
48
|
"""
|
|
49
49
|
Generate a response for a given prompt with a provided model.
|
|
@@ -84,9 +84,9 @@ def chat(
|
|
|
84
84
|
messages: list[dict],
|
|
85
85
|
*,
|
|
86
86
|
model: str,
|
|
87
|
-
tools:
|
|
88
|
-
format:
|
|
89
|
-
options:
|
|
87
|
+
tools: list[dict] | None = None,
|
|
88
|
+
format: str | None = None,
|
|
89
|
+
options: dict | None = None,
|
|
90
90
|
) -> dict:
|
|
91
91
|
"""
|
|
92
92
|
Generate the next message in a chat with a provided model.
|
|
@@ -110,7 +110,7 @@ def chat(
|
|
|
110
110
|
|
|
111
111
|
@pxt.udf(batch_size=16)
|
|
112
112
|
def embed(
|
|
113
|
-
input: Batch[str], *, model: str, truncate: bool = True, options:
|
|
113
|
+
input: Batch[str], *, model: str, truncate: bool = True, options: dict | None = None
|
|
114
114
|
) -> Batch[pxt.Array[(None,), pxt.Float]]:
|
|
115
115
|
"""
|
|
116
116
|
Generate embeddings from a model.
|
pixeltable/functions/openai.py
CHANGED
|
@@ -13,7 +13,7 @@ import logging
|
|
|
13
13
|
import math
|
|
14
14
|
import pathlib
|
|
15
15
|
import re
|
|
16
|
-
from typing import TYPE_CHECKING, Any, Callable,
|
|
16
|
+
from typing import TYPE_CHECKING, Any, Callable, Type
|
|
17
17
|
|
|
18
18
|
import httpx
|
|
19
19
|
import numpy as np
|
|
@@ -32,7 +32,7 @@ _logger = logging.getLogger('pixeltable')
|
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
@env.register_client('openai')
|
|
35
|
-
def _(api_key: str, base_url:
|
|
35
|
+
def _(api_key: str, base_url: str | None = None, api_version: str | None = None) -> 'openai.AsyncOpenAI':
|
|
36
36
|
import openai
|
|
37
37
|
|
|
38
38
|
default_query = None if api_version is None else {'api-version': api_version}
|
|
@@ -169,7 +169,7 @@ class OpenAIRateLimitsInfo(env.RateLimitsInfo):
|
|
|
169
169
|
self.record(requests=requests_info, tokens=tokens_info)
|
|
170
170
|
self.has_exc = True
|
|
171
171
|
|
|
172
|
-
def get_retry_delay(self, exc: Exception) ->
|
|
172
|
+
def get_retry_delay(self, exc: Exception) -> float | None:
|
|
173
173
|
import openai
|
|
174
174
|
|
|
175
175
|
if not isinstance(exc, self.retryable_errors):
|
|
@@ -183,7 +183,7 @@ class OpenAIRateLimitsInfo(env.RateLimitsInfo):
|
|
|
183
183
|
|
|
184
184
|
|
|
185
185
|
@pxt.udf
|
|
186
|
-
async def speech(input: str, *, model: str, voice: str, model_kwargs:
|
|
186
|
+
async def speech(input: str, *, model: str, voice: str, model_kwargs: dict[str, Any] | None = None) -> pxt.Audio:
|
|
187
187
|
"""
|
|
188
188
|
Generates audio from the input text.
|
|
189
189
|
|
|
@@ -226,7 +226,7 @@ async def speech(input: str, *, model: str, voice: str, model_kwargs: Optional[d
|
|
|
226
226
|
|
|
227
227
|
|
|
228
228
|
@pxt.udf
|
|
229
|
-
async def transcriptions(audio: pxt.Audio, *, model: str, model_kwargs:
|
|
229
|
+
async def transcriptions(audio: pxt.Audio, *, model: str, model_kwargs: dict[str, Any] | None = None) -> dict:
|
|
230
230
|
"""
|
|
231
231
|
Transcribes audio into the input language.
|
|
232
232
|
|
|
@@ -265,7 +265,7 @@ async def transcriptions(audio: pxt.Audio, *, model: str, model_kwargs: Optional
|
|
|
265
265
|
|
|
266
266
|
|
|
267
267
|
@pxt.udf
|
|
268
|
-
async def translations(audio: pxt.Audio, *, model: str, model_kwargs:
|
|
268
|
+
async def translations(audio: pxt.Audio, *, model: str, model_kwargs: dict[str, Any] | None = None) -> dict:
|
|
269
269
|
"""
|
|
270
270
|
Translates audio into English.
|
|
271
271
|
|
|
@@ -335,7 +335,7 @@ def _is_model_family(model: str, family: str) -> bool:
|
|
|
335
335
|
|
|
336
336
|
|
|
337
337
|
def _chat_completions_get_request_resources(
|
|
338
|
-
messages: list, model: str, model_kwargs:
|
|
338
|
+
messages: list, model: str, model_kwargs: dict[str, Any] | None
|
|
339
339
|
) -> dict[str, int]:
|
|
340
340
|
if model_kwargs is None:
|
|
341
341
|
model_kwargs = {}
|
|
@@ -362,10 +362,10 @@ async def chat_completions(
|
|
|
362
362
|
messages: list,
|
|
363
363
|
*,
|
|
364
364
|
model: str,
|
|
365
|
-
model_kwargs:
|
|
366
|
-
tools:
|
|
367
|
-
tool_choice:
|
|
368
|
-
_runtime_ctx:
|
|
365
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
366
|
+
tools: list[dict[str, Any]] | None = None,
|
|
367
|
+
tool_choice: dict[str, Any] | None = None,
|
|
368
|
+
_runtime_ctx: env.RuntimeCtx | None = None,
|
|
369
369
|
) -> dict:
|
|
370
370
|
"""
|
|
371
371
|
Creates a model response for the given chat conversation.
|
|
@@ -436,7 +436,7 @@ async def chat_completions(
|
|
|
436
436
|
|
|
437
437
|
|
|
438
438
|
def _vision_get_request_resources(
|
|
439
|
-
prompt: str, image: PIL.Image.Image, model: str, model_kwargs:
|
|
439
|
+
prompt: str, image: PIL.Image.Image, model: str, model_kwargs: dict[str, Any] | None = None
|
|
440
440
|
) -> dict[str, int]:
|
|
441
441
|
if model_kwargs is None:
|
|
442
442
|
model_kwargs = {}
|
|
@@ -477,8 +477,8 @@ async def vision(
|
|
|
477
477
|
image: PIL.Image.Image,
|
|
478
478
|
*,
|
|
479
479
|
model: str,
|
|
480
|
-
model_kwargs:
|
|
481
|
-
_runtime_ctx:
|
|
480
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
481
|
+
_runtime_ctx: env.RuntimeCtx | None = None,
|
|
482
482
|
) -> str:
|
|
483
483
|
"""
|
|
484
484
|
Analyzes an image with the OpenAI vision capability. This is a convenience function that takes an image and
|
|
@@ -567,8 +567,8 @@ async def embeddings(
|
|
|
567
567
|
input: Batch[str],
|
|
568
568
|
*,
|
|
569
569
|
model: str,
|
|
570
|
-
model_kwargs:
|
|
571
|
-
_runtime_ctx:
|
|
570
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
571
|
+
_runtime_ctx: env.RuntimeCtx | None = None,
|
|
572
572
|
) -> Batch[pxt.Array[(None,), pxt.Float]]:
|
|
573
573
|
"""
|
|
574
574
|
Creates an embedding vector representing the input text.
|
|
@@ -621,8 +621,8 @@ async def embeddings(
|
|
|
621
621
|
|
|
622
622
|
|
|
623
623
|
@embeddings.conditional_return_type
|
|
624
|
-
def _(model: str, model_kwargs:
|
|
625
|
-
dimensions:
|
|
624
|
+
def _(model: str, model_kwargs: dict[str, Any] | None = None) -> ts.ArrayType:
|
|
625
|
+
dimensions: int | None = None
|
|
626
626
|
if model_kwargs is not None:
|
|
627
627
|
dimensions = model_kwargs.get('dimensions')
|
|
628
628
|
if dimensions is None:
|
|
@@ -639,7 +639,7 @@ def _(model: str, model_kwargs: Optional[dict[str, Any]] = None) -> ts.ArrayType
|
|
|
639
639
|
|
|
640
640
|
@pxt.udf
|
|
641
641
|
async def image_generations(
|
|
642
|
-
prompt: str, *, model: str = 'dall-e-2', model_kwargs:
|
|
642
|
+
prompt: str, *, model: str = 'dall-e-2', model_kwargs: dict[str, Any] | None = None
|
|
643
643
|
) -> PIL.Image.Image:
|
|
644
644
|
"""
|
|
645
645
|
Creates an image given a prompt.
|
|
@@ -685,7 +685,7 @@ async def image_generations(
|
|
|
685
685
|
|
|
686
686
|
|
|
687
687
|
@image_generations.conditional_return_type
|
|
688
|
-
def _(model_kwargs:
|
|
688
|
+
def _(model_kwargs: dict[str, Any] | None = None) -> ts.ImageType:
|
|
689
689
|
if model_kwargs is None or 'size' not in model_kwargs:
|
|
690
690
|
# default size is 1024x1024
|
|
691
691
|
return ts.ImageType(size=(1024, 1024))
|
|
@@ -761,7 +761,7 @@ def invoke_tools(tools: Tools, response: exprs.Expr) -> exprs.InlineDict:
|
|
|
761
761
|
|
|
762
762
|
|
|
763
763
|
@pxt.udf
|
|
764
|
-
def _openai_response_to_pxt_tool_calls(response: dict) ->
|
|
764
|
+
def _openai_response_to_pxt_tool_calls(response: dict) -> dict | None:
|
|
765
765
|
if 'tool_calls' not in response['choices'][0]['message'] or response['choices'][0]['message']['tool_calls'] is None:
|
|
766
766
|
return None
|
|
767
767
|
openai_tool_calls = response['choices'][0]['message']['tool_calls']
|
|
@@ -6,7 +6,7 @@ you must first sign up at https://openrouter.ai, create an API key, and configur
|
|
|
6
6
|
as described in the Working with OpenRouter tutorial.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
|
-
from typing import TYPE_CHECKING, Any
|
|
9
|
+
from typing import TYPE_CHECKING, Any
|
|
10
10
|
|
|
11
11
|
import pixeltable as pxt
|
|
12
12
|
from pixeltable.env import Env, register_client
|
|
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
@register_client('openrouter')
|
|
20
|
-
def _(api_key: str, site_url:
|
|
20
|
+
def _(api_key: str, site_url: str | None = None, app_name: str | None = None) -> 'openai.AsyncOpenAI':
|
|
21
21
|
import openai
|
|
22
22
|
|
|
23
23
|
# Create default headers for OpenRouter
|
|
@@ -39,11 +39,11 @@ async def chat_completions(
|
|
|
39
39
|
messages: list,
|
|
40
40
|
*,
|
|
41
41
|
model: str,
|
|
42
|
-
model_kwargs:
|
|
43
|
-
tools:
|
|
44
|
-
tool_choice:
|
|
45
|
-
provider:
|
|
46
|
-
transforms:
|
|
42
|
+
model_kwargs: dict[str, Any] | None = None,
|
|
43
|
+
tools: list[dict[str, Any]] | None = None,
|
|
44
|
+
tool_choice: dict[str, Any] | None = None,
|
|
45
|
+
provider: dict[str, Any] | None = None,
|
|
46
|
+
transforms: list[str] | None = None,
|
|
47
47
|
) -> dict:
|
|
48
48
|
"""
|
|
49
49
|
Chat Completion API via OpenRouter.
|