pixeltable 0.2.13__py3-none-any.whl → 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pixeltable might be problematic. Click here for more details.

Files changed (51) hide show
  1. pixeltable/__init__.py +1 -1
  2. pixeltable/__version__.py +2 -2
  3. pixeltable/catalog/column.py +5 -0
  4. pixeltable/catalog/globals.py +8 -0
  5. pixeltable/catalog/table.py +22 -4
  6. pixeltable/catalog/table_version.py +30 -55
  7. pixeltable/catalog/view.py +1 -1
  8. pixeltable/exec/__init__.py +2 -1
  9. pixeltable/exec/row_update_node.py +61 -0
  10. pixeltable/exec/{sql_scan_node.py → sql_node.py} +120 -56
  11. pixeltable/exprs/__init__.py +1 -1
  12. pixeltable/exprs/expr.py +35 -22
  13. pixeltable/exprs/function_call.py +60 -29
  14. pixeltable/exprs/globals.py +2 -0
  15. pixeltable/exprs/inline_array.py +18 -11
  16. pixeltable/exprs/method_ref.py +63 -0
  17. pixeltable/ext/__init__.py +9 -0
  18. pixeltable/ext/functions/__init__.py +8 -0
  19. pixeltable/ext/functions/whisperx.py +45 -5
  20. pixeltable/ext/functions/yolox.py +60 -14
  21. pixeltable/func/callable_function.py +12 -4
  22. pixeltable/func/expr_template_function.py +1 -1
  23. pixeltable/func/function.py +12 -2
  24. pixeltable/func/function_registry.py +24 -9
  25. pixeltable/func/udf.py +32 -4
  26. pixeltable/functions/__init__.py +1 -1
  27. pixeltable/functions/fireworks.py +33 -0
  28. pixeltable/functions/huggingface.py +96 -6
  29. pixeltable/functions/image.py +226 -41
  30. pixeltable/functions/openai.py +214 -0
  31. pixeltable/functions/string.py +195 -218
  32. pixeltable/functions/timestamp.py +210 -0
  33. pixeltable/functions/together.py +106 -0
  34. pixeltable/functions/video.py +2 -2
  35. pixeltable/functions/whisper.py +32 -0
  36. pixeltable/io/__init__.py +1 -1
  37. pixeltable/io/globals.py +133 -1
  38. pixeltable/io/pandas.py +52 -27
  39. pixeltable/metadata/__init__.py +1 -1
  40. pixeltable/metadata/converters/convert_18.py +39 -0
  41. pixeltable/metadata/notes.py +10 -0
  42. pixeltable/plan.py +76 -1
  43. pixeltable/tool/create_test_db_dump.py +3 -4
  44. pixeltable/tool/doc_plugins/griffe.py +4 -0
  45. pixeltable/type_system.py +15 -14
  46. {pixeltable-0.2.13.dist-info → pixeltable-0.2.14.dist-info}/METADATA +1 -1
  47. {pixeltable-0.2.13.dist-info → pixeltable-0.2.14.dist-info}/RECORD +50 -45
  48. pixeltable/exprs/image_member_access.py +0 -96
  49. {pixeltable-0.2.13.dist-info → pixeltable-0.2.14.dist-info}/LICENSE +0 -0
  50. {pixeltable-0.2.13.dist-info → pixeltable-0.2.14.dist-info}/WHEEL +0 -0
  51. {pixeltable-0.2.13.dist-info → pixeltable-0.2.14.dist-info}/entry_points.txt +0 -0
pixeltable/func/udf.py CHANGED
@@ -2,7 +2,6 @@ from __future__ import annotations
2
2
 
3
3
  from typing import List, Callable, Optional, overload, Any
4
4
 
5
- import pixeltable as pxt
6
5
  import pixeltable.exceptions as excs
7
6
  import pixeltable.type_system as ts
8
7
  from .callable_function import CallableFunction
@@ -26,6 +25,8 @@ def udf(
26
25
  param_types: Optional[List[ts.ColumnType]] = None,
27
26
  batch_size: Optional[int] = None,
28
27
  substitute_fn: Optional[Callable] = None,
28
+ is_method: bool = False,
29
+ is_property: bool = False,
29
30
  _force_stored: bool = False
30
31
  ) -> Callable[[Callable], Function]: ...
31
32
 
@@ -56,6 +57,8 @@ def udf(*args, **kwargs):
56
57
  param_types = kwargs.pop('param_types', None)
57
58
  batch_size = kwargs.pop('batch_size', None)
58
59
  substitute_fn = kwargs.pop('substitute_fn', None)
60
+ is_method = kwargs.pop('is_method', None)
61
+ is_property = kwargs.pop('is_property', None)
59
62
  force_stored = kwargs.pop('_force_stored', False)
60
63
  if len(kwargs) > 0:
61
64
  raise excs.Error(f'Invalid @udf decorator kwargs: {", ".join(kwargs.keys())}')
@@ -64,8 +67,15 @@ def udf(*args, **kwargs):
64
67
 
65
68
  def decorator(decorated_fn: Callable):
66
69
  return make_function(
67
- decorated_fn, return_type, param_types, batch_size,
68
- substitute_fn=substitute_fn, force_stored=force_stored)
70
+ decorated_fn,
71
+ return_type,
72
+ param_types,
73
+ batch_size,
74
+ substitute_fn=substitute_fn,
75
+ is_method=is_method,
76
+ is_property=is_property,
77
+ force_stored=force_stored
78
+ )
69
79
 
70
80
  return decorator
71
81
 
@@ -76,6 +86,8 @@ def make_function(
76
86
  param_types: Optional[List[ts.ColumnType]] = None,
77
87
  batch_size: Optional[int] = None,
78
88
  substitute_fn: Optional[Callable] = None,
89
+ is_method: bool = False,
90
+ is_property: bool = False,
79
91
  function_name: Optional[str] = None,
80
92
  force_stored: bool = False
81
93
  ) -> Function:
@@ -112,6 +124,15 @@ def make_function(
112
124
  if batch_size is None and len(sig.batched_parameters) > 0:
113
125
  raise excs.Error(f'{errmsg_name}(): batched parameters in udf, but no `batch_size` given')
114
126
 
127
+ if is_method and is_property:
128
+ raise excs.Error(f'Cannot specify both `is_method` and `is_property` (in function `{function_name}`)')
129
+ if is_property and len(sig.parameters) != 1:
130
+ raise excs.Error(
131
+ f"`is_property=True` expects a UDF with exactly 1 parameter, but `{function_name}` has {len(sig.parameters)}"
132
+ )
133
+ if (is_method or is_property) and function_path is None:
134
+ raise excs.Error('Stored functions cannot be declared using `is_method` or `is_property`')
135
+
115
136
  if substitute_fn is None:
116
137
  py_fn = decorated_fn
117
138
  else:
@@ -120,7 +141,14 @@ def make_function(
120
141
  py_fn = substitute_fn
121
142
 
122
143
  result = CallableFunction(
123
- signature=sig, py_fn=py_fn, self_path=function_path, self_name=function_name, batch_size=batch_size)
144
+ signature=sig,
145
+ py_fn=py_fn,
146
+ self_path=function_path,
147
+ self_name=function_name,
148
+ batch_size=batch_size,
149
+ is_method=is_method,
150
+ is_property=is_property
151
+ )
124
152
 
125
153
  # If this function is part of a module, register it
126
154
  if function_path is not None:
@@ -1,4 +1,4 @@
1
- from . import fireworks, huggingface, image, openai, string, together, video
1
+ from . import fireworks, huggingface, image, openai, string, together, video, timestamp
2
2
  from .globals import *
3
3
  from pixeltable.utils.code import local_public_names
4
4
 
@@ -1,3 +1,10 @@
1
+ """
2
+ Pixeltable [UDFs](https://pixeltable.readme.io/docs/user-defined-functions-udfs)
3
+ that wrap various endpoints from the Fireworks AI API. In order to use them, you must
4
+ first `pip install fireworks-ai` and configure your Fireworks AI credentials, as described in
5
+ the [Working with Fireworks](https://pixeltable.readme.io/docs/working-with-fireworks) tutorial.
6
+ """
7
+
1
8
  from typing import Optional, TYPE_CHECKING
2
9
 
3
10
  import pixeltable as pxt
@@ -29,6 +36,32 @@ def chat_completions(
29
36
  top_p: Optional[float] = None,
30
37
  temperature: Optional[float] = None,
31
38
  ) -> dict:
39
+ """
40
+ Creates a model response for the given chat conversation.
41
+
42
+ Equivalent to the Fireworks AI `chat/completions` API endpoint.
43
+ For additional details, see: [https://docs.fireworks.ai/api-reference/post-chatcompletions](https://docs.fireworks.ai/api-reference/post-chatcompletions)
44
+
45
+ __Requirements:__
46
+
47
+ - `pip install fireworks-ai`
48
+
49
+ Args:
50
+ messages: A list of messages comprising the conversation so far.
51
+ model: The name of the model to use.
52
+
53
+ For details on the other parameters, see: [https://docs.fireworks.ai/api-reference/post-chatcompletions](https://docs.fireworks.ai/api-reference/post-chatcompletions)
54
+
55
+ Returns:
56
+ A dictionary containing the response and other metadata.
57
+
58
+ Examples:
59
+ Add a computed column that applies the model `accounts/fireworks/models/mixtral-8x22b-instruct`
60
+ to an existing Pixeltable column `tbl.prompt` of the table `tbl`:
61
+
62
+ >>> messages = [{'role': 'user', 'content': tbl.prompt}]
63
+ ... tbl['response'] = chat_completions(tbl.prompt, model='accounts/fireworks/models/mixtral-8x22b-instruct')
64
+ """
32
65
  kwargs = {'max_tokens': max_tokens, 'top_k': top_k, 'top_p': top_p, 'temperature': temperature}
33
66
  kwargs_not_none = {k: v for k, v in kwargs.items() if v is not None}
34
67
  return _fireworks_client().chat.completions.create(model=model, messages=messages, **kwargs_not_none).dict()
@@ -25,7 +25,7 @@ def sentence_transformer(
25
25
  sentence: Batch[str], *, model_id: str, normalize_embeddings: bool = False
26
26
  ) -> Batch[np.ndarray]:
27
27
  """
28
- Runs the specified pretrained sentence-transformers model. `model_id` should be a pretrained model, as described
28
+ Computes sentence embeddings. `model_id` should be a pretrained Sentence Transformers model, as described
29
29
  in the [Sentence Transformers Pretrained Models](https://sbert.net/docs/sentence_transformer/pretrained_models.html)
30
30
  documentation.
31
31
 
@@ -83,8 +83,8 @@ def sentence_transformer_list(sentences: list, *, model_id: str, normalize_embed
83
83
  @pxt.udf(batch_size=32)
84
84
  def cross_encoder(sentences1: Batch[str], sentences2: Batch[str], *, model_id: str) -> Batch[float]:
85
85
  """
86
- Runs the specified cross-encoder model to compute similarity scores for pairs of sentences.
87
- `model_id` should be a pretrained model, as described in the
86
+ Performs predicts on the given sentence pair.
87
+ `model_id` should be a pretrained Cross-Encoder model, as described in the
88
88
  [Cross-Encoder Pretrained Models](https://www.sbert.net/docs/cross_encoder/pretrained_models.html)
89
89
  documentation.
90
90
 
@@ -130,7 +130,27 @@ def cross_encoder_list(sentence1: str, sentences2: list, *, model_id: str) -> li
130
130
 
131
131
  @pxt.udf(batch_size=32, return_type=ts.ArrayType((None,), dtype=ts.FloatType(), nullable=False))
132
132
  def clip_text(text: Batch[str], *, model_id: str) -> Batch[np.ndarray]:
133
- """Runs the specified CLIP model on text."""
133
+ """
134
+ Computes a CLIP embedding for the specified text. `model_id` should be a reference to a pretrained
135
+ [CLIP Model](https://huggingface.co/docs/transformers/model_doc/clip).
136
+
137
+ __Requirements:__
138
+
139
+ - `pip install transformers`
140
+
141
+ Args:
142
+ text: The string to embed.
143
+ model_id: The pretrained model to use for the embedding.
144
+
145
+ Returns:
146
+ An array containing the output of the embedding model.
147
+
148
+ Examples:
149
+ Add a computed column that applies the model `openai/clip-vit-base-patch32` to an existing
150
+ Pixeltable column `tbl.text` of the table `tbl`:
151
+
152
+ >>> tbl['result'] = clip_text(tbl.text, model_id='openai/clip-vit-base-patch32')
153
+ """
134
154
  env.Env.get().require_package('transformers')
135
155
  device = resolve_torch_device('auto')
136
156
  import torch
@@ -148,7 +168,27 @@ def clip_text(text: Batch[str], *, model_id: str) -> Batch[np.ndarray]:
148
168
 
149
169
  @pxt.udf(batch_size=32, return_type=ts.ArrayType((None,), dtype=ts.FloatType(), nullable=False))
150
170
  def clip_image(image: Batch[PIL.Image.Image], *, model_id: str) -> Batch[np.ndarray]:
151
- """Runs the specified CLIP model on images."""
171
+ """
172
+ Computes a CLIP embedding for the specified image. `model_id` should be a reference to a pretrained
173
+ [CLIP Model](https://huggingface.co/docs/transformers/model_doc/clip).
174
+
175
+ __Requirements:__
176
+
177
+ - `pip install transformers`
178
+
179
+ Args:
180
+ image: The image to embed.
181
+ model_id: The pretrained model to use for the embedding.
182
+
183
+ Returns:
184
+ An array containing the output of the embedding model.
185
+
186
+ Examples:
187
+ Add a computed column that applies the model `openai/clip-vit-base-patch32` to an existing
188
+ Pixeltable column `tbl.image` of the table `tbl`:
189
+
190
+ >>> tbl['result'] = clip_image(tbl.image, model_id='openai/clip-vit-base-patch32')
191
+ """
152
192
  env.Env.get().require_package('transformers')
153
193
  device = resolve_torch_device('auto')
154
194
  import torch
@@ -178,7 +218,41 @@ def _(model_id: str) -> ts.ArrayType:
178
218
 
179
219
  @pxt.udf(batch_size=4)
180
220
  def detr_for_object_detection(image: Batch[PIL.Image.Image], *, model_id: str, threshold: float = 0.5) -> Batch[dict]:
181
- """Runs the specified DETR model."""
221
+ """
222
+ Computes DETR object detections for the specified image. `model_id` should be a reference to a pretrained
223
+ [DETR Model](https://huggingface.co/docs/transformers/model_doc/detr).
224
+
225
+ __Requirements:__
226
+
227
+ - `pip install transformers`
228
+
229
+ Args:
230
+ image: The image to embed.
231
+ model_id: The pretrained model to use for the embedding.
232
+
233
+ Returns:
234
+ A dictionary containing the output of the object detection model, in the following format:
235
+
236
+ ```python
237
+ {
238
+ 'scores': [0.99, 0.999], # list of confidence scores for each detected object
239
+ 'labels': [25, 25], # list of COCO class labels for each detected object
240
+ 'label_text': ['giraffe', 'giraffe'], # corresponding text names of class labels
241
+ 'boxes': [[51.942, 356.174, 181.481, 413.975], [383.225, 58.66, 605.64, 361.346]]
242
+ # list of bounding boxes for each detected object, as [x1, y1, x2, y2]
243
+ }
244
+ ```
245
+
246
+ Examples:
247
+ Add a computed column that applies the model `facebook/detr-resnet-50` to an existing
248
+ Pixeltable column `tbl.image` of the table `tbl`:
249
+
250
+ >>> tbl['detections'] = detr_for_object_detection(
251
+ ... tbl.image,
252
+ ... model_id='facebook/detr-resnet-50',
253
+ ... threshold=0.8
254
+ ... )
255
+ """
182
256
  env.Env.get().require_package('transformers')
183
257
  device = resolve_torch_device('auto')
184
258
  import torch
@@ -210,6 +284,22 @@ def detr_for_object_detection(image: Batch[PIL.Image.Image], *, model_id: str, t
210
284
 
211
285
  @pxt.udf
212
286
  def detr_to_coco(image: PIL.Image.Image, detr_info: dict[str, Any]) -> dict[str, Any]:
287
+ """
288
+ Converts the output of a DETR object detection model to COCO format.
289
+
290
+ Args:
291
+ image: The image for which detections were computed.
292
+ detr_info: The output of a DETR object detection model, as returned by `detr_for_object_detection`.
293
+
294
+ Returns:
295
+ A dictionary containing the data from `detr_info`, converted to COCO format.
296
+
297
+ Examples:
298
+ Add a computed column that converts the output `tbl.detections` to COCO format, where `tbl.image`
299
+ is the image for which detections were computed:
300
+
301
+ >>> tbl['detections_coco'] = detr_to_coco(tbl.image, tbl.detections)
302
+ """
213
303
  bboxes, labels = detr_info['boxes'], detr_info['labels']
214
304
  annotations = [
215
305
  {'bbox': [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]], 'category': label}
@@ -11,7 +11,7 @@ t.select(t.img_col.convert('L')).collect()
11
11
  """
12
12
 
13
13
  import base64
14
- from typing import Optional, Tuple
14
+ from typing import Optional
15
15
 
16
16
  import PIL.Image
17
17
 
@@ -21,9 +21,15 @@ from pixeltable.utils.code import local_public_names
21
21
  from pixeltable.exprs import Expr
22
22
 
23
23
 
24
- @func.udf
24
+ @func.udf(is_method=True)
25
25
  def b64_encode(img: PIL.Image.Image, image_format: str = 'png') -> str:
26
- # Encode this image as a b64-encoded png.
26
+ """
27
+ Convert image to a base64-encoded string.
28
+
29
+ Args:
30
+ img: image
31
+ image_format: image format [supported by PIL](https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#fully-supported-formats)
32
+ """
27
33
  import io
28
34
 
29
35
  bytes_arr = io.BytesIO()
@@ -32,31 +38,48 @@ def b64_encode(img: PIL.Image.Image, image_format: str = 'png') -> str:
32
38
  return b64_bytes.decode('utf-8')
33
39
 
34
40
 
35
- @func.udf(substitute_fn=PIL.Image.alpha_composite)
41
+ @func.udf(substitute_fn=PIL.Image.alpha_composite, is_method=True)
36
42
  def alpha_composite(im1: PIL.Image.Image, im2: PIL.Image.Image) -> PIL.Image.Image:
43
+ """
44
+ Alpha composite `im2` over `im1`.
45
+
46
+ Equivalent to [`PIL.Image.alpha_composite()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.alpha_composite)
47
+ """
37
48
  pass
38
49
 
39
50
 
40
- @func.udf(substitute_fn=PIL.Image.blend)
51
+ @func.udf(substitute_fn=PIL.Image.blend, is_method=True)
41
52
  def blend(im1: PIL.Image.Image, im2: PIL.Image.Image, alpha: float) -> PIL.Image.Image:
42
- pass
53
+ """
54
+ Return a new image by interpolating between two input images, using a constant alpha.
43
55
 
56
+ Equivalent to [`PIL.Image.blend()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.blend)
57
+ """
58
+ pass
44
59
 
45
- @func.udf(substitute_fn=PIL.Image.composite)
60
+ @func.udf(substitute_fn=PIL.Image.composite, is_method=True)
46
61
  def composite(image1: PIL.Image.Image, image2: PIL.Image.Image, mask: PIL.Image.Image) -> PIL.Image.Image:
62
+ """
63
+ Return a composite image by blending two images using a mask.
64
+
65
+ Equivalent to [`PIL.Image.composite()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.composite)
66
+ """
47
67
  pass
48
68
 
49
69
 
50
70
  # PIL.Image.Image methods
51
71
 
52
-
53
72
  # Image.convert()
54
- @func.udf
73
+ @func.udf(is_method=True)
55
74
  def convert(self: PIL.Image.Image, mode: str) -> PIL.Image.Image:
56
75
  """
57
76
  Convert the image to a different mode.
58
77
 
59
- Equivalent to [`PIL.Image.Image.convert()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert).
78
+ Equivalent to
79
+ [`PIL.Image.Image.convert()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert).
80
+
81
+ Args:
82
+ mode: The mode to convert to. See the [Pillow documentation](https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes) for a list of supported modes.
60
83
  """
61
84
  return self.convert(mode)
62
85
 
@@ -69,23 +92,39 @@ def _(self: Expr, mode: str) -> ts.ColumnType:
69
92
 
70
93
 
71
94
  # Image.crop()
72
- @func.udf(substitute_fn=PIL.Image.Image.crop, param_types=[ts.ImageType(), ts.ArrayType((4,), dtype=ts.IntType())])
73
- def crop(self: PIL.Image.Image, box: Tuple[int, int, int, int]) -> PIL.Image.Image:
95
+ @func.udf(substitute_fn=PIL.Image.Image.crop, param_types=[ts.ImageType(), ts.ArrayType((4,), dtype=ts.IntType())], is_method=True)
96
+ def crop(self: PIL.Image.Image, box: tuple[int, int, int, int]) -> PIL.Image.Image:
97
+ """
98
+ Return a rectangular region from the image. The box is a 4-tuple defining the left, upper, right, and lower pixel
99
+ coordinates.
100
+
101
+ Equivalent to
102
+ [`PIL.Image.Image.crop()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.crop)
103
+ """
74
104
  pass
75
105
 
76
106
 
77
107
  @crop.conditional_return_type
78
- def _(self: Expr, box: Tuple[int, int, int, int]) -> ts.ColumnType:
108
+ def _(self: Expr, box: tuple[int, int, int, int]) -> ts.ColumnType:
79
109
  input_type = self.col_type
80
110
  assert isinstance(input_type, ts.ImageType)
81
- if isinstance(box, list) and all(isinstance(x, int) for x in box):
111
+ if (isinstance(box, list) or isinstance(box, tuple)) and len(box) == 4 and all(isinstance(x, int) for x in box):
82
112
  return ts.ImageType(size=(box[2] - box[0], box[3] - box[1]), mode=input_type.mode, nullable=input_type.nullable)
83
113
  return ts.ImageType(mode=input_type.mode, nullable=input_type.nullable) # we can't compute the size statically
84
114
 
85
115
 
86
116
  # Image.getchannel()
87
- @func.udf(substitute_fn=PIL.Image.Image.getchannel)
117
+ @func.udf(substitute_fn=PIL.Image.Image.getchannel, is_method=True)
88
118
  def getchannel(self: PIL.Image.Image, channel: int) -> PIL.Image.Image:
119
+ """
120
+ Return an L-mode image containing a single channel of the original image.
121
+
122
+ Equivalent to
123
+ [`PIL.Image.Image.getchannel()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getchannel)
124
+
125
+ Args:
126
+ channel: The channel to extract. This is a 0-based index.
127
+ """
89
128
  pass
90
129
 
91
130
 
@@ -97,31 +136,64 @@ def _(self: Expr) -> ts.ColumnType:
97
136
 
98
137
 
99
138
  # Image.resize()
100
- @func.udf(param_types=[ts.ImageType(), ts.ArrayType((2,), dtype=ts.IntType())])
101
- def resize(self: PIL.Image.Image, size: Tuple[int, int]) -> PIL.Image.Image:
139
+ @func.udf(param_types=[ts.ImageType(), ts.ArrayType((2,), dtype=ts.IntType())], is_method=True)
140
+ def resize(self: PIL.Image.Image, size: tuple[int, int]) -> PIL.Image.Image:
141
+ """
142
+ Return a resized copy of the image. The size parameter is a tuple containing the width and height of the new image.
143
+
144
+ Equivalent to
145
+ [`PIL.Image.Image.resize()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize)
146
+ """
102
147
  return self.resize(size)
103
148
 
104
149
 
105
150
  @resize.conditional_return_type
106
- def _(self: Expr, size: Tuple[int, int]) -> ts.ColumnType:
151
+ def _(self: Expr, size: tuple[int, int]) -> ts.ColumnType:
107
152
  input_type = self.col_type
108
153
  assert isinstance(input_type, ts.ImageType)
109
154
  return ts.ImageType(size=size, mode=input_type.mode, nullable=input_type.nullable)
110
155
 
111
156
 
112
157
  # Image.rotate()
113
- @func.udf
158
+ @func.udf(is_method=True)
114
159
  def rotate(self: PIL.Image.Image, angle: int) -> PIL.Image.Image:
160
+ """
161
+ Return a copy of the image rotated by the given angle.
162
+
163
+ Equivalent to
164
+ [`PIL.Image.Image.rotate()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.rotate)
165
+
166
+ Args:
167
+ angle: The angle to rotate the image, in degrees. Positive angles are counter-clockwise.
168
+ """
115
169
  return self.rotate(angle)
116
170
 
117
171
 
118
- @func.udf(substitute_fn=PIL.Image.Image.effect_spread)
172
+ @func.udf(substitute_fn=PIL.Image.Image.effect_spread, is_method=True)
119
173
  def effect_spread(self: PIL.Image.Image, distance: int) -> PIL.Image.Image:
174
+ """
175
+ Randomly spread pixels in an image.
176
+
177
+ Equivalent to
178
+ [`PIL.Image.Image.effect_spread()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.effect_spread)
179
+
180
+ Args:
181
+ distance: The distance to spread pixels.
182
+ """
120
183
  pass
121
184
 
122
185
 
123
- @func.udf(substitute_fn=PIL.Image.Image.transpose)
186
+ @func.udf(substitute_fn=PIL.Image.Image.transpose, is_method=True)
124
187
  def transpose(self: PIL.Image.Image, method: int) -> PIL.Image.Image:
188
+ """
189
+ Transpose the image.
190
+
191
+ Equivalent to
192
+ [`PIL.Image.Image.transpose()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transpose)
193
+
194
+ Args:
195
+ method: The transpose method. See the [Pillow documentation](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transpose) for a list of supported methods.
196
+ """
125
197
  pass
126
198
 
127
199
 
@@ -132,53 +204,128 @@ def _(self: Expr) -> ts.ColumnType:
132
204
  return self.col_type
133
205
 
134
206
 
135
- @func.udf(substitute_fn=PIL.Image.Image.entropy)
207
+ @func.udf(substitute_fn=PIL.Image.Image.entropy, is_method=True)
136
208
  def entropy(self: PIL.Image.Image, mask: Optional[PIL.Image.Image] = None, extrema: Optional[list] = None) -> float:
209
+ """
210
+ Returns the entropy of the image, optionally using a mask and extrema.
211
+
212
+ Equivalent to
213
+ [`PIL.Image.Image.entropy()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.entropy)
214
+
215
+ Args:
216
+ mask: An optional mask image.
217
+ extrema: An optional list of extrema.
218
+ """
137
219
  pass
138
220
 
139
221
 
140
- @func.udf(substitute_fn=PIL.Image.Image.getbands)
141
- def getbands(self: PIL.Image.Image) -> Tuple[str]:
222
+ @func.udf(substitute_fn=PIL.Image.Image.getbands, is_method=True)
223
+ def getbands(self: PIL.Image.Image) -> tuple[str]:
224
+ """
225
+ Return a tuple containing the names of the image bands.
226
+
227
+ Equivalent to
228
+ [`PIL.Image.Image.getbands()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getbands)
229
+ """
142
230
  pass
143
231
 
144
232
 
145
- @func.udf(substitute_fn=PIL.Image.Image.getbbox)
146
- def getbbox(self: PIL.Image.Image) -> Tuple[int, int, int, int]:
233
+ @func.udf(substitute_fn=PIL.Image.Image.getbbox, is_method=True)
234
+ def getbbox(self: PIL.Image.Image, *, alpha_only: bool = True) -> tuple[int, int, int, int]:
235
+ """
236
+ Return a bounding box for the non-zero regions of the image.
237
+
238
+ Equivalent to [`PIL.Image.Image.getbbox()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getbbox)
239
+
240
+ Args:
241
+ alpha_only: If `True`, and the image has an alpha channel, trim transparent pixels. Otherwise, trim pixels when all channels are zero.
242
+ """
147
243
  pass
148
244
 
149
245
 
150
- @func.udf(substitute_fn=PIL.Image.Image.getcolors)
151
- def getcolors(self: PIL.Image.Image, maxcolors: int) -> Tuple[Tuple[int, int, int], int]:
246
+ @func.udf(substitute_fn=PIL.Image.Image.getcolors, is_method=True)
247
+ def getcolors(self: PIL.Image.Image, maxcolors: int = 256) -> tuple[tuple[int, int, int], int]:
248
+ """
249
+ Return a list of colors used in the image, up to a maximum of `maxcolors`.
250
+
251
+ Equivalent to
252
+ [`PIL.Image.Image.getcolors()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getcolors)
253
+
254
+ Args:
255
+ maxcolors: The maximum number of colors to return.
256
+ """
152
257
  pass
153
258
 
154
259
 
155
- @func.udf(substitute_fn=PIL.Image.Image.getextrema)
156
- def getextrema(self: PIL.Image.Image) -> Tuple[int, int]:
260
+ @func.udf(substitute_fn=PIL.Image.Image.getextrema, is_method=True)
261
+ def getextrema(self: PIL.Image.Image) -> tuple[int, int]:
262
+ """
263
+ Return a 2-tuple containing the minimum and maximum pixel values of the image.
264
+
265
+ Equivalent to
266
+ [`PIL.Image.Image.getextrema()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getextrema)
267
+ """
157
268
  pass
158
269
 
159
270
 
160
- @func.udf(substitute_fn=PIL.Image.Image.getpalette)
161
- def getpalette(self: PIL.Image.Image, mode: Optional[str] = None) -> Tuple[int]:
271
+ @func.udf(substitute_fn=PIL.Image.Image.getpalette, is_method=True)
272
+ def getpalette(self: PIL.Image.Image, mode: Optional[str] = None) -> tuple[int]:
273
+ """
274
+ Return the palette of the image, optionally converting it to a different mode.
275
+
276
+ Equivalent to
277
+ [`PIL.Image.Image.getpalette()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getpalette)
278
+
279
+ Args:
280
+ mode: The mode to convert the palette to.
281
+ """
162
282
  pass
163
283
 
164
284
 
165
- @func.udf(param_types=[ts.ImageType(), ts.ArrayType((2,), dtype=ts.IntType())])
166
- def getpixel(self: PIL.Image.Image, xy: tuple[int, int]) -> Tuple[int]:
285
+ @func.udf(param_types=[ts.ImageType(), ts.ArrayType((2,), dtype=ts.IntType())], is_method=True)
286
+ def getpixel(self: PIL.Image.Image, xy: tuple[int, int]) -> tuple[int]:
287
+ """
288
+ Return the pixel value at the given position. The position `xy` is a tuple containing the x and y coordinates.
289
+
290
+ Equivalent to
291
+ [`PIL.Image.Image.getpixel()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getpixel)
292
+
293
+ Args:
294
+ xy: The coordinates, given as (x, y).
295
+ """
167
296
  # `xy` will be a list; `tuple(xy)` is necessary for pillow 9 compatibility
168
297
  return self.getpixel(tuple(xy))
169
298
 
170
299
 
171
- @func.udf(substitute_fn=PIL.Image.Image.getprojection)
172
- def getprojection(self: PIL.Image.Image) -> Tuple[int]:
300
+ @func.udf(substitute_fn=PIL.Image.Image.getprojection, is_method=True)
301
+ def getprojection(self: PIL.Image.Image) -> tuple[int]:
302
+ """
303
+ Return two sequences representing the horizontal and vertical projection of the image.
304
+
305
+ Equivalent to
306
+ [`PIL.Image.Image.getprojection()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.getprojection)
307
+ """
173
308
  pass
174
309
 
175
310
 
176
- @func.udf(substitute_fn=PIL.Image.Image.histogram)
177
- def histogram(self: PIL.Image.Image, mask: PIL.Image.Image, extrema: Optional[list] = None) -> Tuple[int]:
311
+ @func.udf(substitute_fn=PIL.Image.Image.histogram, is_method=True)
312
+ def histogram(
313
+ self: PIL.Image.Image, mask: Optional[PIL.Image.Image] = None, extrema: Optional[list] = None
314
+ ) -> list[int]:
315
+ """
316
+ Return a histogram for the image.
317
+
318
+ Equivalent to
319
+ [`PIL.Image.Image.histogram()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.histogram)
320
+
321
+ Args:
322
+ mask: An optional mask image.
323
+ extrema: An optional list of extrema.
324
+ """
178
325
  pass
179
326
 
180
327
 
181
- @func.udf(substitute_fn=PIL.Image.Image.quantize)
328
+ @func.udf(substitute_fn=PIL.Image.Image.quantize, is_method=True)
182
329
  def quantize(
183
330
  self: PIL.Image.Image,
184
331
  colors: int = 256,
@@ -187,14 +334,52 @@ def quantize(
187
334
  palette: Optional[int] = None,
188
335
  dither: int = PIL.Image.Dither.FLOYDSTEINBERG,
189
336
  ) -> PIL.Image.Image:
337
+ """
338
+ Convert the image to 'P' mode with the specified number of colors.
339
+
340
+ Equivalent to
341
+ [`PIL.Image.Image.quantize()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.quantize)
342
+
343
+ Args:
344
+ colors: The number of colors to quantize to.
345
+ method: The quantization method. See the [Pillow documentation](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.quantize) for a list of supported methods.
346
+ kmeans: The number of k-means clusters to use.
347
+ palette: The palette to use.
348
+ dither: The dithering method. See the [Pillow documentation](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.quantize) for a list of supported methods.
349
+ """
190
350
  pass
191
351
 
192
352
 
193
- @func.udf(substitute_fn=PIL.Image.Image.reduce)
194
- def reduce(self: PIL.Image.Image, factor: int, box: Optional[Tuple[int]] = None) -> PIL.Image.Image:
353
+ @func.udf(substitute_fn=PIL.Image.Image.reduce, is_method=True)
354
+ def reduce(self: PIL.Image.Image, factor: int, box: Optional[tuple[int]] = None) -> PIL.Image.Image:
355
+ """
356
+ Reduce the image by the given factor.
357
+
358
+ Equivalent to
359
+ [`PIL.Image.Image.reduce()`](https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.reduce)
360
+
361
+ Args:
362
+ factor: The reduction factor.
363
+ box: An optional 4-tuple of ints providing the source image region to be reduced. The values must be within (0, 0, width, height) rectangle. If omitted or None, the entire source is used.
364
+ """
195
365
  pass
196
366
 
197
367
 
368
+ @func.udf(is_property=True)
369
+ def width(self: PIL.Image.Image) -> int:
370
+ return self.width
371
+
372
+
373
+ @func.udf(is_property=True)
374
+ def height(self: PIL.Image.Image) -> int:
375
+ return self.height
376
+
377
+
378
+ @func.udf(is_property=True)
379
+ def mode(self: PIL.Image.Image) -> str:
380
+ return self.mode
381
+
382
+
198
383
  __all__ = local_public_names(__name__)
199
384
 
200
385