pixeltable 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pixeltable might be problematic. Click here for more details.

Files changed (76) hide show
  1. pixeltable/__init__.py +15 -33
  2. pixeltable/__version__.py +2 -2
  3. pixeltable/catalog/catalog.py +1 -1
  4. pixeltable/catalog/column.py +28 -16
  5. pixeltable/catalog/dir.py +2 -2
  6. pixeltable/catalog/insertable_table.py +5 -55
  7. pixeltable/catalog/named_function.py +2 -2
  8. pixeltable/catalog/schema_object.py +2 -7
  9. pixeltable/catalog/table.py +298 -204
  10. pixeltable/catalog/table_version.py +104 -139
  11. pixeltable/catalog/table_version_path.py +22 -4
  12. pixeltable/catalog/view.py +20 -10
  13. pixeltable/dataframe.py +128 -25
  14. pixeltable/env.py +21 -14
  15. pixeltable/exec/exec_context.py +5 -0
  16. pixeltable/exec/exec_node.py +1 -0
  17. pixeltable/exec/in_memory_data_node.py +29 -24
  18. pixeltable/exec/sql_scan_node.py +1 -1
  19. pixeltable/exprs/column_ref.py +13 -8
  20. pixeltable/exprs/data_row.py +4 -0
  21. pixeltable/exprs/expr.py +16 -1
  22. pixeltable/exprs/function_call.py +4 -4
  23. pixeltable/exprs/row_builder.py +29 -20
  24. pixeltable/exprs/similarity_expr.py +4 -3
  25. pixeltable/ext/functions/yolox.py +2 -1
  26. pixeltable/func/__init__.py +1 -0
  27. pixeltable/func/aggregate_function.py +14 -12
  28. pixeltable/func/callable_function.py +8 -6
  29. pixeltable/func/expr_template_function.py +13 -19
  30. pixeltable/func/function.py +3 -6
  31. pixeltable/func/query_template_function.py +84 -0
  32. pixeltable/func/signature.py +68 -23
  33. pixeltable/func/udf.py +13 -10
  34. pixeltable/functions/__init__.py +6 -91
  35. pixeltable/functions/eval.py +26 -14
  36. pixeltable/functions/fireworks.py +25 -23
  37. pixeltable/functions/globals.py +62 -0
  38. pixeltable/functions/huggingface.py +20 -16
  39. pixeltable/functions/image.py +170 -1
  40. pixeltable/functions/openai.py +95 -128
  41. pixeltable/functions/string.py +10 -2
  42. pixeltable/functions/together.py +95 -84
  43. pixeltable/functions/util.py +16 -0
  44. pixeltable/functions/video.py +94 -16
  45. pixeltable/functions/whisper.py +78 -0
  46. pixeltable/globals.py +1 -1
  47. pixeltable/io/__init__.py +10 -0
  48. pixeltable/io/external_store.py +370 -0
  49. pixeltable/io/globals.py +50 -22
  50. pixeltable/{datatransfer → io}/label_studio.py +279 -166
  51. pixeltable/io/parquet.py +1 -1
  52. pixeltable/iterators/__init__.py +9 -0
  53. pixeltable/iterators/string.py +40 -0
  54. pixeltable/metadata/__init__.py +6 -8
  55. pixeltable/metadata/converters/convert_10.py +2 -4
  56. pixeltable/metadata/converters/convert_12.py +7 -2
  57. pixeltable/metadata/converters/convert_13.py +6 -8
  58. pixeltable/metadata/converters/convert_14.py +2 -4
  59. pixeltable/metadata/converters/convert_15.py +40 -25
  60. pixeltable/metadata/converters/convert_16.py +18 -0
  61. pixeltable/metadata/converters/util.py +11 -8
  62. pixeltable/metadata/schema.py +3 -6
  63. pixeltable/plan.py +8 -7
  64. pixeltable/store.py +1 -1
  65. pixeltable/tool/create_test_db_dump.py +145 -54
  66. pixeltable/tool/embed_udf.py +9 -0
  67. pixeltable/type_system.py +1 -2
  68. pixeltable/utils/code.py +34 -0
  69. {pixeltable-0.2.7.dist-info → pixeltable-0.2.9.dist-info}/METADATA +2 -2
  70. pixeltable-0.2.9.dist-info/RECORD +131 -0
  71. pixeltable/datatransfer/__init__.py +0 -1
  72. pixeltable/datatransfer/remote.py +0 -113
  73. pixeltable/functions/pil/image.py +0 -147
  74. pixeltable-0.2.7.dist-info/RECORD +0 -126
  75. {pixeltable-0.2.7.dist-info → pixeltable-0.2.9.dist-info}/LICENSE +0 -0
  76. {pixeltable-0.2.7.dist-info → pixeltable-0.2.9.dist-info}/WHEEL +0 -0
@@ -1,95 +1,10 @@
1
- import tempfile
2
- from pathlib import Path
3
- from typing import Optional, Union
1
+ from . import fireworks, huggingface, image, openai, string, together, video
2
+ from .globals import *
3
+ from pixeltable.utils.code import local_public_names
4
4
 
5
- import PIL.Image
6
- import av
7
- import av.container
8
- import av.stream
9
- import numpy as np
10
5
 
11
- import pixeltable.env as env
12
- import pixeltable.func as func
13
- # import all standard function modules here so they get registered with the FunctionRegistry
14
- import pixeltable.functions.pil.image
15
- from pixeltable import exprs
16
- from pixeltable.type_system import IntType, ColumnType, FloatType, ImageType, VideoType
17
- # automatically import all submodules so that the udfs get registered
18
- from . import image, string, video, huggingface
6
+ __all__ = local_public_names(__name__, exclude=['globals']) + local_public_names(globals.__name__)
19
7
 
20
- # TODO: remove and replace calls with astype()
21
- def cast(expr: exprs.Expr, target_type: ColumnType) -> exprs.Expr:
22
- expr.col_type = target_type
23
- return expr
24
8
 
25
- @func.uda(
26
- update_types=[IntType()], value_type=IntType(), allows_window=True, requires_order_by=False)
27
- class sum(func.Aggregator):
28
- def __init__(self):
29
- self.sum: Union[int, float] = 0
30
- def update(self, val: Union[int, float]) -> None:
31
- if val is not None:
32
- self.sum += val
33
- def value(self) -> Union[int, float]:
34
- return self.sum
35
-
36
-
37
- @func.uda(
38
- update_types=[IntType()], value_type=IntType(), allows_window = True, requires_order_by = False)
39
- class count(func.Aggregator):
40
- def __init__(self):
41
- self.count = 0
42
- def update(self, val: int) -> None:
43
- if val is not None:
44
- self.count += 1
45
- def value(self) -> int:
46
- return self.count
47
-
48
-
49
- @func.uda(
50
- update_types=[IntType()], value_type=FloatType(), allows_window=False, requires_order_by=False)
51
- class mean(func.Aggregator):
52
- def __init__(self):
53
- self.sum = 0
54
- self.count = 0
55
- def update(self, val: int) -> None:
56
- if val is not None:
57
- self.sum += val
58
- self.count += 1
59
- def value(self) -> float:
60
- if self.count == 0:
61
- return None
62
- return self.sum / self.count
63
-
64
-
65
- @func.uda(
66
- init_types=[IntType()], update_types=[ImageType()], value_type=VideoType(),
67
- requires_order_by=True, allows_window=False)
68
- class make_video(func.Aggregator):
69
- def __init__(self, fps: int = 25):
70
- """follows https://pyav.org/docs/develop/cookbook/numpy.html#generating-video"""
71
- self.container: Optional[av.container.OutputContainer] = None
72
- self.stream: Optional[av.stream.Stream] = None
73
- self.fps = fps
74
-
75
- def update(self, frame: PIL.Image.Image) -> None:
76
- if frame is None:
77
- return
78
- if self.container is None:
79
- (_, output_filename) = tempfile.mkstemp(suffix='.mp4', dir=str(env.Env.get().tmp_dir))
80
- self.out_file = Path(output_filename)
81
- self.container = av.open(str(self.out_file), mode='w')
82
- self.stream = self.container.add_stream('h264', rate=self.fps)
83
- self.stream.pix_fmt = 'yuv420p'
84
- self.stream.width = frame.width
85
- self.stream.height = frame.height
86
-
87
- av_frame = av.VideoFrame.from_ndarray(np.array(frame.convert('RGB')), format='rgb24')
88
- for packet in self.stream.encode(av_frame):
89
- self.container.mux(packet)
90
-
91
- def value(self) -> str:
92
- for packet in self.stream.encode():
93
- self.container.mux(packet)
94
- self.container.close()
95
- return str(self.out_file)
9
+ def __dir__():
10
+ return __all__
@@ -10,6 +10,7 @@ import pixeltable.func as func
10
10
 
11
11
  # TODO: figure out a better submodule structure
12
12
 
13
+
13
14
  # the following function has been adapted from MMEval
14
15
  # (sources at https://github.com/open-mmlab/mmeval)
15
16
  # Copyright (c) OpenMMLab. All rights reserved.
@@ -21,11 +22,12 @@ def calculate_bboxes_area(bboxes: np.ndarray) -> np.ndarray:
21
22
  Returns:
22
23
  numpy.ndarray: The area of bboxes.
23
24
  """
24
- bboxes_w = (bboxes[..., 2] - bboxes[..., 0])
25
- bboxes_h = (bboxes[..., 3] - bboxes[..., 1])
25
+ bboxes_w = bboxes[..., 2] - bboxes[..., 0]
26
+ bboxes_h = bboxes[..., 3] - bboxes[..., 1]
26
27
  areas = bboxes_w * bboxes_h
27
28
  return areas
28
29
 
30
+
29
31
  # the following function has been adapted from MMEval
30
32
  # (sources at https://github.com/open-mmlab/mmeval)
31
33
  # Copyright (c) OpenMMLab. All rights reserved.
@@ -146,6 +148,7 @@ def calculate_image_tpfp(
146
148
 
147
149
  return tp, fp
148
150
 
151
+
149
152
  @func.udf(
150
153
  return_type=ts.JsonType(nullable=False),
151
154
  param_types=[
@@ -153,11 +156,15 @@ def calculate_image_tpfp(
153
156
  ts.JsonType(nullable=False),
154
157
  ts.JsonType(nullable=False),
155
158
  ts.JsonType(nullable=False),
156
- ts.JsonType(nullable=False)
157
- ])
159
+ ts.JsonType(nullable=False),
160
+ ],
161
+ )
158
162
  def eval_detections(
159
- pred_bboxes: List[List[int]], pred_labels: List[int], pred_scores: List[float],
160
- gt_bboxes: List[List[int]], gt_labels: List[int]
163
+ pred_bboxes: List[List[int]],
164
+ pred_labels: List[int],
165
+ pred_scores: List[float],
166
+ gt_bboxes: List[List[int]],
167
+ gt_labels: List[int],
161
168
  ) -> Dict:
162
169
  class_idxs = list(set(pred_labels + gt_labels))
163
170
  result: List[Dict] = []
@@ -170,17 +177,22 @@ def eval_detections(
170
177
  pred_filter = pred_classes_arr == class_idx
171
178
  gt_filter = gt_classes_arr == class_idx
172
179
  class_pred_scores = pred_scores_arr[pred_filter]
173
- tp, fp = calculate_image_tpfp(
174
- pred_bboxes_arr[pred_filter], class_pred_scores, gt_bboxes_arr[gt_filter], [0.5])
180
+ tp, fp = calculate_image_tpfp(pred_bboxes_arr[pred_filter], class_pred_scores, gt_bboxes_arr[gt_filter], [0.5])
175
181
  ordered_class_pred_scores = -np.sort(-class_pred_scores)
176
- result.append({
177
- 'min_iou': 0.5, 'class': class_idx, 'tp': tp.tolist(), 'fp': fp.tolist(),
178
- 'scores': ordered_class_pred_scores.tolist(), 'num_gts': gt_filter.sum().item(),
179
- })
182
+ result.append(
183
+ {
184
+ 'min_iou': 0.5,
185
+ 'class': class_idx,
186
+ 'tp': tp.tolist(),
187
+ 'fp': fp.tolist(),
188
+ 'scores': ordered_class_pred_scores.tolist(),
189
+ 'num_gts': gt_filter.sum().item(),
190
+ }
191
+ )
180
192
  return result
181
193
 
182
- @func.uda(
183
- update_types=[ts.JsonType()], value_type=ts.JsonType(), allows_std_agg=True, allows_window=False)
194
+
195
+ @func.uda(update_types=[ts.JsonType()], value_type=ts.JsonType(), allows_std_agg=True, allows_window=False)
184
196
  class mean_ap(func.Aggregator):
185
197
  def __init__(self):
186
198
  self.class_tpfp: Dict[int, List[Dict]] = defaultdict(list)
@@ -1,39 +1,41 @@
1
- from typing import Optional
2
-
3
- import fireworks.client
1
+ from typing import Optional, TYPE_CHECKING
4
2
 
5
3
  import pixeltable as pxt
6
4
  from pixeltable import env
5
+ from pixeltable.utils.code import local_public_names
6
+
7
+ if TYPE_CHECKING:
8
+ import fireworks.client
7
9
 
8
10
 
9
11
  @env.register_client('fireworks')
10
- def _(api_key: str) -> fireworks.client.Fireworks:
12
+ def _(api_key: str) -> 'fireworks.client.Fireworks':
13
+ import fireworks.client
14
+
11
15
  return fireworks.client.Fireworks(api_key=api_key)
12
16
 
13
17
 
14
- def _fireworks_client() -> fireworks.client.Fireworks:
18
+ def _fireworks_client() -> 'fireworks.client.Fireworks':
15
19
  return env.Env.get().get_client('fireworks')
16
20
 
17
21
 
18
22
  @pxt.udf
19
23
  def chat_completions(
20
- messages: list[dict[str, str]],
21
- *,
22
- model: str,
23
- max_tokens: Optional[int] = None,
24
- top_k: Optional[int] = None,
25
- top_p: Optional[float] = None,
26
- temperature: Optional[float] = None
24
+ messages: list[dict[str, str]],
25
+ *,
26
+ model: str,
27
+ max_tokens: Optional[int] = None,
28
+ top_k: Optional[int] = None,
29
+ top_p: Optional[float] = None,
30
+ temperature: Optional[float] = None,
27
31
  ) -> dict:
28
- kwargs = {
29
- 'max_tokens': max_tokens,
30
- 'top_k': top_k,
31
- 'top_p': top_p,
32
- 'temperature': temperature
33
- }
32
+ kwargs = {'max_tokens': max_tokens, 'top_k': top_k, 'top_p': top_p, 'temperature': temperature}
34
33
  kwargs_not_none = {k: v for k, v in kwargs.items() if v is not None}
35
- return _fireworks_client().chat.completions.create(
36
- model=model,
37
- messages=messages,
38
- **kwargs_not_none
39
- ).dict()
34
+ return _fireworks_client().chat.completions.create(model=model, messages=messages, **kwargs_not_none).dict()
35
+
36
+
37
+ __all__ = local_public_names(__name__)
38
+
39
+
40
+ def __dir__():
41
+ return __all__
@@ -0,0 +1,62 @@
1
+ from typing import Union
2
+
3
+ import pixeltable.func as func
4
+ import pixeltable.type_system as ts
5
+ from pixeltable import exprs
6
+ from pixeltable.utils.code import local_public_names
7
+
8
+
9
+ # TODO: remove and replace calls with astype()
10
+ def cast(expr: exprs.Expr, target_type: ts.ColumnType) -> exprs.Expr:
11
+ expr.col_type = target_type
12
+ return expr
13
+
14
+
15
+ @func.uda(update_types=[ts.IntType()], value_type=ts.IntType(), allows_window=True, requires_order_by=False)
16
+ class sum(func.Aggregator):
17
+ def __init__(self):
18
+ self.sum: Union[int, float] = 0
19
+
20
+ def update(self, val: Union[int, float]) -> None:
21
+ if val is not None:
22
+ self.sum += val
23
+
24
+ def value(self) -> Union[int, float]:
25
+ return self.sum
26
+
27
+
28
+ @func.uda(update_types=[ts.IntType()], value_type=ts.IntType(), allows_window=True, requires_order_by=False)
29
+ class count(func.Aggregator):
30
+ def __init__(self):
31
+ self.count = 0
32
+
33
+ def update(self, val: int) -> None:
34
+ if val is not None:
35
+ self.count += 1
36
+
37
+ def value(self) -> int:
38
+ return self.count
39
+
40
+
41
+ @func.uda(update_types=[ts.IntType()], value_type=ts.FloatType(), allows_window=False, requires_order_by=False)
42
+ class mean(func.Aggregator):
43
+ def __init__(self):
44
+ self.sum = 0
45
+ self.count = 0
46
+
47
+ def update(self, val: int) -> None:
48
+ if val is not None:
49
+ self.sum += val
50
+ self.count += 1
51
+
52
+ def value(self) -> float:
53
+ if self.count == 0:
54
+ return None
55
+ return self.sum / self.count
56
+
57
+
58
+ __all__ = local_public_names(__name__)
59
+
60
+
61
+ def __dir__():
62
+ return __all__
@@ -7,12 +7,13 @@ import pixeltable as pxt
7
7
  import pixeltable.env as env
8
8
  import pixeltable.type_system as ts
9
9
  from pixeltable.func import Batch
10
- from pixeltable.functions.util import resolve_torch_device
10
+ from pixeltable.functions.util import resolve_torch_device, normalize_image_mode
11
+ from pixeltable.utils.code import local_public_names
11
12
 
12
13
 
13
14
  @pxt.udf(batch_size=32, return_type=ts.ArrayType((None,), dtype=ts.FloatType()))
14
15
  def sentence_transformer(
15
- sentences: Batch[str], *, model_id: str, normalize_embeddings: bool = False
16
+ sentences: Batch[str], *, model_id: str, normalize_embeddings: bool = False
16
17
  ) -> Batch[np.ndarray]:
17
18
  """Runs the specified sentence transformer model."""
18
19
  env.Env.get().require_package('sentence_transformers')
@@ -28,6 +29,7 @@ def sentence_transformer(
28
29
  def _(model_id: str) -> ts.ArrayType:
29
30
  try:
30
31
  from sentence_transformers import SentenceTransformer
32
+
31
33
  model = _lookup_model(model_id, SentenceTransformer)
32
34
  return ts.ArrayType((model.get_sentence_embedding_dimension(),), dtype=ts.FloatType(), nullable=False)
33
35
  except ImportError:
@@ -109,6 +111,7 @@ def clip_image(image: Batch[PIL.Image.Image], *, model_id: str) -> Batch[np.ndar
109
111
  def _(model_id: str) -> ts.ArrayType:
110
112
  try:
111
113
  from transformers import CLIPModel
114
+
112
115
  model = _lookup_model(model_id, CLIPModel.from_pretrained)
113
116
  return ts.ArrayType((model.config.projection_dim,), dtype=ts.FloatType(), nullable=False)
114
117
  except ImportError:
@@ -124,11 +127,13 @@ def detr_for_object_detection(image: Batch[PIL.Image.Image], *, model_id: str, t
124
127
  from transformers import DetrImageProcessor, DetrForObjectDetection
125
128
 
126
129
  model = _lookup_model(
127
- model_id, lambda x: DetrForObjectDetection.from_pretrained(x, revision='no_timm'), device=device)
130
+ model_id, lambda x: DetrForObjectDetection.from_pretrained(x, revision='no_timm'), device=device
131
+ )
128
132
  processor = _lookup_processor(model_id, lambda x: DetrImageProcessor.from_pretrained(x, revision='no_timm'))
133
+ normalized_images = [normalize_image_mode(img) for img in image]
129
134
 
130
135
  with torch.no_grad():
131
- inputs = processor(images=image, return_tensors='pt')
136
+ inputs = processor(images=normalized_images, return_tensors='pt')
132
137
  outputs = model(**inputs.to(device))
133
138
  results = processor.post_process_object_detection(
134
139
  outputs, threshold=threshold, target_sizes=[(img.height, img.width) for img in image]
@@ -139,7 +144,7 @@ def detr_for_object_detection(image: Batch[PIL.Image.Image], *, model_id: str, t
139
144
  'scores': [score.item() for score in result['scores']],
140
145
  'labels': [label.item() for label in result['labels']],
141
146
  'label_text': [model.config.id2label[label.item()] for label in result['labels']],
142
- 'boxes': [box.tolist() for box in result['boxes']]
147
+ 'boxes': [box.tolist() for box in result['boxes']],
143
148
  }
144
149
  for result in results
145
150
  ]
@@ -149,19 +154,10 @@ def detr_for_object_detection(image: Batch[PIL.Image.Image], *, model_id: str, t
149
154
  def detr_to_coco(image: PIL.Image.Image, detr_info: dict[str, Any]) -> dict[str, Any]:
150
155
  bboxes, labels = detr_info['boxes'], detr_info['labels']
151
156
  annotations = [
152
- {
153
- 'bbox': [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]],
154
- 'category': label
155
- }
157
+ {'bbox': [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]], 'category': label}
156
158
  for bbox, label in zip(bboxes, labels)
157
159
  ]
158
- return {
159
- 'image': {
160
- 'width': image.width,
161
- 'height': image.height
162
- },
163
- 'annotations': annotations
164
- }
160
+ return {'image': {'width': image.width, 'height': image.height}, 'annotations': annotations}
165
161
 
166
162
 
167
163
  T = TypeVar('T')
@@ -169,6 +165,7 @@ T = TypeVar('T')
169
165
 
170
166
  def _lookup_model(model_id: str, create: Callable[[str], T], device: Optional[str] = None) -> T:
171
167
  from torch import nn
168
+
172
169
  key = (model_id, create, device) # For safety, include the `create` callable in the cache key
173
170
  if key not in _model_cache:
174
171
  model = create(model_id)
@@ -189,3 +186,10 @@ def _lookup_processor(model_id: str, create: Callable[[str], T]) -> T:
189
186
 
190
187
  _model_cache = {}
191
188
  _processor_cache = {}
189
+
190
+
191
+ __all__ = local_public_names(__name__)
192
+
193
+
194
+ def __dir__():
195
+ return __all__
@@ -1,16 +1,185 @@
1
1
  import base64
2
+ from typing import Optional, Tuple
2
3
 
3
4
  import PIL.Image
4
5
 
5
- from pixeltable.type_system import ImageType, StringType
6
6
  import pixeltable.func as func
7
+ import pixeltable.type_system as ts
8
+ from pixeltable.utils.code import local_public_names
9
+ from pixeltable.exprs import Expr
7
10
 
8
11
 
9
12
  @func.udf
10
13
  def b64_encode(img: PIL.Image.Image, image_format: str = 'png') -> str:
11
14
  # Encode this image as a b64-encoded png.
12
15
  import io
16
+
13
17
  bytes_arr = io.BytesIO()
14
18
  img.save(bytes_arr, format=image_format)
15
19
  b64_bytes = base64.b64encode(bytes_arr.getvalue())
16
20
  return b64_bytes.decode('utf-8')
21
+
22
+
23
+ @func.udf(substitute_fn=PIL.Image.alpha_composite)
24
+ def alpha_composite(im1: PIL.Image.Image, im2: PIL.Image.Image) -> PIL.Image.Image:
25
+ pass
26
+
27
+
28
+ @func.udf(substitute_fn=PIL.Image.blend)
29
+ def blend(im1: PIL.Image.Image, im2: PIL.Image.Image, alpha: float) -> PIL.Image.Image:
30
+ pass
31
+
32
+
33
+ @func.udf(substitute_fn=PIL.Image.composite)
34
+ def composite(image1: PIL.Image.Image, image2: PIL.Image.Image, mask: PIL.Image.Image) -> PIL.Image.Image:
35
+ pass
36
+
37
+
38
+ # PIL.Image.Image methods
39
+
40
+
41
+ # Image.convert()
42
+ @func.udf
43
+ def convert(self: PIL.Image.Image, mode: str) -> PIL.Image.Image:
44
+ return self.convert(mode)
45
+
46
+
47
+ @convert.conditional_return_type
48
+ def _(self: Expr, mode: str) -> ts.ColumnType:
49
+ input_type = self.col_type
50
+ assert isinstance(input_type, ts.ImageType)
51
+ return ts.ImageType(size=input_type.size, mode=mode, nullable=input_type.nullable)
52
+
53
+
54
+ # Image.crop()
55
+ @func.udf(substitute_fn=PIL.Image.Image.crop, param_types=[ts.ImageType(), ts.ArrayType((4,), dtype=ts.IntType())])
56
+ def crop(self: PIL.Image.Image, box: Tuple[int, int, int, int]) -> PIL.Image.Image:
57
+ pass
58
+
59
+
60
+ @crop.conditional_return_type
61
+ def _(self: Expr, box: Tuple[int, int, int, int]) -> ts.ColumnType:
62
+ input_type = self.col_type
63
+ assert isinstance(input_type, ts.ImageType)
64
+ if isinstance(box, list) and all(isinstance(x, int) for x in box):
65
+ return ts.ImageType(size=(box[2] - box[0], box[3] - box[1]), mode=input_type.mode, nullable=input_type.nullable)
66
+ return ts.ImageType(mode=input_type.mode, nullable=input_type.nullable) # we can't compute the size statically
67
+
68
+
69
+ # Image.getchannel()
70
+ @func.udf(substitute_fn=PIL.Image.Image.getchannel)
71
+ def getchannel(self: PIL.Image.Image, channel: int) -> PIL.Image.Image:
72
+ pass
73
+
74
+
75
+ @getchannel.conditional_return_type
76
+ def _(self: Expr) -> ts.ColumnType:
77
+ input_type = self.col_type
78
+ assert isinstance(input_type, ts.ImageType)
79
+ return ts.ImageType(size=input_type.size, mode='L', nullable=input_type.nullable)
80
+
81
+
82
+ # Image.resize()
83
+ @func.udf(param_types=[ts.ImageType(), ts.ArrayType((2,), dtype=ts.IntType())])
84
+ def resize(self: PIL.Image.Image, size: Tuple[int, int]) -> PIL.Image.Image:
85
+ return self.resize(size)
86
+
87
+
88
+ @resize.conditional_return_type
89
+ def _(self: Expr, size: Tuple[int, int]) -> ts.ColumnType:
90
+ input_type = self.col_type
91
+ assert isinstance(input_type, ts.ImageType)
92
+ return ts.ImageType(size=size, mode=input_type.mode, nullable=input_type.nullable)
93
+
94
+
95
+ # Image.rotate()
96
+ @func.udf
97
+ def rotate(self: PIL.Image.Image, angle: int) -> PIL.Image.Image:
98
+ return self.rotate(angle)
99
+
100
+
101
+ @func.udf(substitute_fn=PIL.Image.Image.effect_spread)
102
+ def effect_spread(self: PIL.Image.Image, distance: int) -> PIL.Image.Image:
103
+ pass
104
+
105
+
106
+ @func.udf(substitute_fn=PIL.Image.Image.transpose)
107
+ def transpose(self: PIL.Image.Image, method: int) -> PIL.Image.Image:
108
+ pass
109
+
110
+
111
+ @rotate.conditional_return_type
112
+ @effect_spread.conditional_return_type
113
+ @transpose.conditional_return_type
114
+ def _(self: Expr) -> ts.ColumnType:
115
+ return self.col_type
116
+
117
+
118
+ @func.udf(substitute_fn=PIL.Image.Image.entropy)
119
+ def entropy(self: PIL.Image.Image, mask: Optional[PIL.Image.Image] = None, extrema: Optional[list] = None) -> float:
120
+ pass
121
+
122
+
123
+ @func.udf(substitute_fn=PIL.Image.Image.getbands)
124
+ def getbands(self: PIL.Image.Image) -> Tuple[str]:
125
+ pass
126
+
127
+
128
+ @func.udf(substitute_fn=PIL.Image.Image.getbbox)
129
+ def getbbox(self: PIL.Image.Image) -> Tuple[int, int, int, int]:
130
+ pass
131
+
132
+
133
+ @func.udf(substitute_fn=PIL.Image.Image.getcolors)
134
+ def getcolors(self: PIL.Image.Image, maxcolors: int) -> Tuple[Tuple[int, int, int], int]:
135
+ pass
136
+
137
+
138
+ @func.udf(substitute_fn=PIL.Image.Image.getextrema)
139
+ def getextrema(self: PIL.Image.Image) -> Tuple[int, int]:
140
+ pass
141
+
142
+
143
+ @func.udf(substitute_fn=PIL.Image.Image.getpalette)
144
+ def getpalette(self: PIL.Image.Image, mode: Optional[str] = None) -> Tuple[int]:
145
+ pass
146
+
147
+
148
+ @func.udf(param_types=[ts.ImageType(), ts.ArrayType((2,), dtype=ts.IntType())])
149
+ def getpixel(self: PIL.Image.Image, xy: tuple[int, int]) -> Tuple[int]:
150
+ # `xy` will be a list; `tuple(xy)` is necessary for pillow 9 compatibility
151
+ return self.getpixel(tuple(xy))
152
+
153
+
154
+ @func.udf(substitute_fn=PIL.Image.Image.getprojection)
155
+ def getprojection(self: PIL.Image.Image) -> Tuple[int]:
156
+ pass
157
+
158
+
159
+ @func.udf(substitute_fn=PIL.Image.Image.histogram)
160
+ def histogram(self: PIL.Image.Image, mask: PIL.Image.Image, extrema: Optional[list] = None) -> Tuple[int]:
161
+ pass
162
+
163
+
164
+ @func.udf(substitute_fn=PIL.Image.Image.quantize)
165
+ def quantize(
166
+ self: PIL.Image.Image,
167
+ colors: int = 256,
168
+ method: Optional[int] = None,
169
+ kmeans: int = 0,
170
+ palette: Optional[int] = None,
171
+ dither: int = PIL.Image.Dither.FLOYDSTEINBERG,
172
+ ) -> PIL.Image.Image:
173
+ pass
174
+
175
+
176
+ @func.udf(substitute_fn=PIL.Image.Image.reduce)
177
+ def reduce(self: PIL.Image.Image, factor: int, box: Optional[Tuple[int]] = None) -> PIL.Image.Image:
178
+ pass
179
+
180
+
181
+ __all__ = local_public_names(__name__)
182
+
183
+
184
+ def __dir__():
185
+ return __all__