google-genai 1.30.0__py3-none-any.whl → 1.31.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
google/genai/models.py CHANGED
@@ -32,6 +32,7 @@ from ._common import get_value_by_path as getv
32
32
  from ._common import set_value_by_path as setv
33
33
  from .pagers import AsyncPager, Pager
34
34
 
35
+
35
36
  logger = logging.getLogger('google_genai.models')
36
37
 
37
38
 
@@ -1337,6 +1338,11 @@ def _GenerateVideosConfig_to_mldev(
1337
1338
  if getv(from_object, ['last_frame']) is not None:
1338
1339
  raise ValueError('last_frame parameter is not supported in Gemini API.')
1339
1340
 
1341
+ if getv(from_object, ['reference_images']) is not None:
1342
+ raise ValueError(
1343
+ 'reference_images parameter is not supported in Gemini API.'
1344
+ )
1345
+
1340
1346
  if getv(from_object, ['compression_quality']) is not None:
1341
1347
  raise ValueError(
1342
1348
  'compression_quality parameter is not supported in Gemini API.'
@@ -3025,6 +3031,120 @@ def _RecontextImageParameters_to_vertex(
3025
3031
  return to_object
3026
3032
 
3027
3033
 
3034
+ def _ScribbleImage_to_vertex(
3035
+ from_object: Union[dict[str, Any], object],
3036
+ parent_object: Optional[dict[str, Any]] = None,
3037
+ ) -> dict[str, Any]:
3038
+ to_object: dict[str, Any] = {}
3039
+ if getv(from_object, ['image']) is not None:
3040
+ setv(
3041
+ to_object,
3042
+ ['image'],
3043
+ _Image_to_vertex(getv(from_object, ['image']), to_object),
3044
+ )
3045
+
3046
+ return to_object
3047
+
3048
+
3049
+ def _SegmentImageSource_to_vertex(
3050
+ from_object: Union[dict[str, Any], object],
3051
+ parent_object: Optional[dict[str, Any]] = None,
3052
+ ) -> dict[str, Any]:
3053
+ to_object: dict[str, Any] = {}
3054
+ if getv(from_object, ['prompt']) is not None:
3055
+ setv(
3056
+ parent_object, ['instances[0]', 'prompt'], getv(from_object, ['prompt'])
3057
+ )
3058
+
3059
+ if getv(from_object, ['image']) is not None:
3060
+ setv(
3061
+ parent_object,
3062
+ ['instances[0]', 'image'],
3063
+ _Image_to_vertex(getv(from_object, ['image']), to_object),
3064
+ )
3065
+
3066
+ if getv(from_object, ['scribble_image']) is not None:
3067
+ setv(
3068
+ parent_object,
3069
+ ['instances[0]', 'scribble'],
3070
+ _ScribbleImage_to_vertex(
3071
+ getv(from_object, ['scribble_image']), to_object
3072
+ ),
3073
+ )
3074
+
3075
+ return to_object
3076
+
3077
+
3078
+ def _SegmentImageConfig_to_vertex(
3079
+ from_object: Union[dict[str, Any], object],
3080
+ parent_object: Optional[dict[str, Any]] = None,
3081
+ ) -> dict[str, Any]:
3082
+ to_object: dict[str, Any] = {}
3083
+
3084
+ if getv(from_object, ['mode']) is not None:
3085
+ setv(parent_object, ['parameters', 'mode'], getv(from_object, ['mode']))
3086
+
3087
+ if getv(from_object, ['max_predictions']) is not None:
3088
+ setv(
3089
+ parent_object,
3090
+ ['parameters', 'maxPredictions'],
3091
+ getv(from_object, ['max_predictions']),
3092
+ )
3093
+
3094
+ if getv(from_object, ['confidence_threshold']) is not None:
3095
+ setv(
3096
+ parent_object,
3097
+ ['parameters', 'confidenceThreshold'],
3098
+ getv(from_object, ['confidence_threshold']),
3099
+ )
3100
+
3101
+ if getv(from_object, ['mask_dilation']) is not None:
3102
+ setv(
3103
+ parent_object,
3104
+ ['parameters', 'maskDilation'],
3105
+ getv(from_object, ['mask_dilation']),
3106
+ )
3107
+
3108
+ if getv(from_object, ['binary_color_threshold']) is not None:
3109
+ setv(
3110
+ parent_object,
3111
+ ['parameters', 'binaryColorThreshold'],
3112
+ getv(from_object, ['binary_color_threshold']),
3113
+ )
3114
+
3115
+ return to_object
3116
+
3117
+
3118
+ def _SegmentImageParameters_to_vertex(
3119
+ api_client: BaseApiClient,
3120
+ from_object: Union[dict[str, Any], object],
3121
+ parent_object: Optional[dict[str, Any]] = None,
3122
+ ) -> dict[str, Any]:
3123
+ to_object: dict[str, Any] = {}
3124
+ if getv(from_object, ['model']) is not None:
3125
+ setv(
3126
+ to_object,
3127
+ ['_url', 'model'],
3128
+ t.t_model(api_client, getv(from_object, ['model'])),
3129
+ )
3130
+
3131
+ if getv(from_object, ['source']) is not None:
3132
+ setv(
3133
+ to_object,
3134
+ ['config'],
3135
+ _SegmentImageSource_to_vertex(getv(from_object, ['source']), to_object),
3136
+ )
3137
+
3138
+ if getv(from_object, ['config']) is not None:
3139
+ setv(
3140
+ to_object,
3141
+ ['config'],
3142
+ _SegmentImageConfig_to_vertex(getv(from_object, ['config']), to_object),
3143
+ )
3144
+
3145
+ return to_object
3146
+
3147
+
3028
3148
  def _GetModelParameters_to_vertex(
3029
3149
  api_client: BaseApiClient,
3030
3150
  from_object: Union[dict[str, Any], object],
@@ -3276,6 +3396,24 @@ def _Video_to_vertex(
3276
3396
  return to_object
3277
3397
 
3278
3398
 
3399
+ def _VideoGenerationReferenceImage_to_vertex(
3400
+ from_object: Union[dict[str, Any], object],
3401
+ parent_object: Optional[dict[str, Any]] = None,
3402
+ ) -> dict[str, Any]:
3403
+ to_object: dict[str, Any] = {}
3404
+ if getv(from_object, ['image']) is not None:
3405
+ setv(
3406
+ to_object,
3407
+ ['image'],
3408
+ _Image_to_vertex(getv(from_object, ['image']), to_object),
3409
+ )
3410
+
3411
+ if getv(from_object, ['reference_type']) is not None:
3412
+ setv(to_object, ['referenceType'], getv(from_object, ['reference_type']))
3413
+
3414
+ return to_object
3415
+
3416
+
3279
3417
  def _GenerateVideosConfig_to_vertex(
3280
3418
  from_object: Union[dict[str, Any], object],
3281
3419
  parent_object: Optional[dict[str, Any]] = None,
@@ -3365,6 +3503,16 @@ def _GenerateVideosConfig_to_vertex(
3365
3503
  _Image_to_vertex(getv(from_object, ['last_frame']), to_object),
3366
3504
  )
3367
3505
 
3506
+ if getv(from_object, ['reference_images']) is not None:
3507
+ setv(
3508
+ parent_object,
3509
+ ['instances[0]', 'referenceImages'],
3510
+ [
3511
+ _VideoGenerationReferenceImage_to_vertex(item, to_object)
3512
+ for item in getv(from_object, ['reference_images'])
3513
+ ],
3514
+ )
3515
+
3368
3516
  if getv(from_object, ['compression_quality']) is not None:
3369
3517
  setv(
3370
3518
  parent_object,
@@ -4618,6 +4766,63 @@ def _RecontextImageResponse_from_vertex(
4618
4766
  return to_object
4619
4767
 
4620
4768
 
4769
+ def _EntityLabel_from_vertex(
4770
+ from_object: Union[dict[str, Any], object],
4771
+ parent_object: Optional[dict[str, Any]] = None,
4772
+ ) -> dict[str, Any]:
4773
+ to_object: dict[str, Any] = {}
4774
+ if getv(from_object, ['label']) is not None:
4775
+ setv(to_object, ['label'], getv(from_object, ['label']))
4776
+
4777
+ if getv(from_object, ['score']) is not None:
4778
+ setv(to_object, ['score'], getv(from_object, ['score']))
4779
+
4780
+ return to_object
4781
+
4782
+
4783
+ def _GeneratedImageMask_from_vertex(
4784
+ from_object: Union[dict[str, Any], object],
4785
+ parent_object: Optional[dict[str, Any]] = None,
4786
+ ) -> dict[str, Any]:
4787
+ to_object: dict[str, Any] = {}
4788
+ if getv(from_object, ['_self']) is not None:
4789
+ setv(
4790
+ to_object,
4791
+ ['mask'],
4792
+ _Image_from_vertex(getv(from_object, ['_self']), to_object),
4793
+ )
4794
+
4795
+ if getv(from_object, ['labels']) is not None:
4796
+ setv(
4797
+ to_object,
4798
+ ['labels'],
4799
+ [
4800
+ _EntityLabel_from_vertex(item, to_object)
4801
+ for item in getv(from_object, ['labels'])
4802
+ ],
4803
+ )
4804
+
4805
+ return to_object
4806
+
4807
+
4808
+ def _SegmentImageResponse_from_vertex(
4809
+ from_object: Union[dict[str, Any], object],
4810
+ parent_object: Optional[dict[str, Any]] = None,
4811
+ ) -> dict[str, Any]:
4812
+ to_object: dict[str, Any] = {}
4813
+ if getv(from_object, ['predictions']) is not None:
4814
+ setv(
4815
+ to_object,
4816
+ ['generated_masks'],
4817
+ [
4818
+ _GeneratedImageMask_from_vertex(item, to_object)
4819
+ for item in getv(from_object, ['predictions'])
4820
+ ],
4821
+ )
4822
+
4823
+ return to_object
4824
+
4825
+
4621
4826
  def _Endpoint_from_vertex(
4622
4827
  from_object: Union[dict[str, Any], object],
4623
4828
  parent_object: Optional[dict[str, Any]] = None,
@@ -5511,6 +5716,89 @@ class Models(_api_module.BaseModule):
5511
5716
  self._api_client._verify_response(return_value)
5512
5717
  return return_value
5513
5718
 
5719
+ def segment_image(
5720
+ self,
5721
+ *,
5722
+ model: str,
5723
+ source: types.SegmentImageSourceOrDict,
5724
+ config: Optional[types.SegmentImageConfigOrDict] = None,
5725
+ ) -> types.SegmentImageResponse:
5726
+ """Segments an image, creating a mask of a specified area.
5727
+
5728
+ Args:
5729
+ model (str): The model to use.
5730
+ source (SegmentImageSource): An object containing the source inputs
5731
+ (prompt, image, scribble_image) for image segmentation. The prompt is
5732
+ required for prompt mode and semantic mode, disallowed for other modes.
5733
+ scribble_image is required for the interactive mode, disallowed for
5734
+ other modes.
5735
+ config (SegmentImageConfig): Configuration for segmentation.
5736
+
5737
+ Usage:
5738
+
5739
+ ```
5740
+ response = client.models.segment_image(
5741
+ model="image-segmentation-001",
5742
+ source=types.SegmentImageSource(
5743
+ image=types.Image.from_file(IMAGE_FILE_PATH),
5744
+ ),
5745
+ )
5746
+
5747
+ mask_image = response.generated_masks[0].mask
5748
+ ```
5749
+ """
5750
+
5751
+ parameter_model = types._SegmentImageParameters(
5752
+ model=model,
5753
+ source=source,
5754
+ config=config,
5755
+ )
5756
+
5757
+ request_url_dict: Optional[dict[str, str]]
5758
+ if not self._api_client.vertexai:
5759
+ raise ValueError('This method is only supported in the Vertex AI client.')
5760
+ else:
5761
+ request_dict = _SegmentImageParameters_to_vertex(
5762
+ self._api_client, parameter_model
5763
+ )
5764
+ request_url_dict = request_dict.get('_url')
5765
+ if request_url_dict:
5766
+ path = '{model}:predict'.format_map(request_url_dict)
5767
+ else:
5768
+ path = '{model}:predict'
5769
+
5770
+ query_params = request_dict.get('_query')
5771
+ if query_params:
5772
+ path = f'{path}?{urlencode(query_params)}'
5773
+ # TODO: remove the hack that pops config.
5774
+ request_dict.pop('config', None)
5775
+
5776
+ http_options: Optional[types.HttpOptions] = None
5777
+ if (
5778
+ parameter_model.config is not None
5779
+ and parameter_model.config.http_options is not None
5780
+ ):
5781
+ http_options = parameter_model.config.http_options
5782
+
5783
+ request_dict = _common.convert_to_dict(request_dict)
5784
+ request_dict = _common.encode_unserializable_types(request_dict)
5785
+
5786
+ response = self._api_client.request(
5787
+ 'post', path, request_dict, http_options
5788
+ )
5789
+
5790
+ response_dict = '' if not response.body else json.loads(response.body)
5791
+
5792
+ if self._api_client.vertexai:
5793
+ response_dict = _SegmentImageResponse_from_vertex(response_dict)
5794
+
5795
+ return_value = types.SegmentImageResponse._from_response(
5796
+ response=response_dict, kwargs=parameter_model.model_dump()
5797
+ )
5798
+
5799
+ self._api_client._verify_response(return_value)
5800
+ return return_value
5801
+
5514
5802
  def get(
5515
5803
  self, *, model: str, config: Optional[types.GetModelConfigOrDict] = None
5516
5804
  ) -> types.Model:
@@ -7240,6 +7528,92 @@ class AsyncModels(_api_module.BaseModule):
7240
7528
  self._api_client._verify_response(return_value)
7241
7529
  return return_value
7242
7530
 
7531
+ async def segment_image(
7532
+ self,
7533
+ *,
7534
+ model: str,
7535
+ source: types.SegmentImageSourceOrDict,
7536
+ config: Optional[types.SegmentImageConfigOrDict] = None,
7537
+ ) -> types.SegmentImageResponse:
7538
+ """Segments an image, creating a mask of a specified area.
7539
+
7540
+ Args:
7541
+ model (str): The model to use.
7542
+ source (SegmentImageSource): An object containing the source inputs
7543
+ (prompt, image, scribble_image) for image segmentation. The prompt is
7544
+ required for prompt mode and semantic mode, disallowed for other modes.
7545
+ scribble_image is required for the interactive mode, disallowed for
7546
+ other modes.
7547
+ config (SegmentImageConfig): Configuration for segmentation.
7548
+
7549
+ Usage:
7550
+
7551
+ ```
7552
+ response = client.models.segment_image(
7553
+ model="image-segmentation-001",
7554
+ source=types.SegmentImageSource(
7555
+ image=types.Image.from_file(IMAGE_FILE_PATH),
7556
+ ),
7557
+ config=types.SegmentImageConfig(
7558
+ mode=types.SegmentMode.foreground,
7559
+ ),
7560
+ )
7561
+
7562
+ mask_image = response.generated_masks[0].mask
7563
+ ```
7564
+ """
7565
+
7566
+ parameter_model = types._SegmentImageParameters(
7567
+ model=model,
7568
+ source=source,
7569
+ config=config,
7570
+ )
7571
+
7572
+ request_url_dict: Optional[dict[str, str]]
7573
+ if not self._api_client.vertexai:
7574
+ raise ValueError('This method is only supported in the Vertex AI client.')
7575
+ else:
7576
+ request_dict = _SegmentImageParameters_to_vertex(
7577
+ self._api_client, parameter_model
7578
+ )
7579
+ request_url_dict = request_dict.get('_url')
7580
+ if request_url_dict:
7581
+ path = '{model}:predict'.format_map(request_url_dict)
7582
+ else:
7583
+ path = '{model}:predict'
7584
+
7585
+ query_params = request_dict.get('_query')
7586
+ if query_params:
7587
+ path = f'{path}?{urlencode(query_params)}'
7588
+ # TODO: remove the hack that pops config.
7589
+ request_dict.pop('config', None)
7590
+
7591
+ http_options: Optional[types.HttpOptions] = None
7592
+ if (
7593
+ parameter_model.config is not None
7594
+ and parameter_model.config.http_options is not None
7595
+ ):
7596
+ http_options = parameter_model.config.http_options
7597
+
7598
+ request_dict = _common.convert_to_dict(request_dict)
7599
+ request_dict = _common.encode_unserializable_types(request_dict)
7600
+
7601
+ response = await self._api_client.async_request(
7602
+ 'post', path, request_dict, http_options
7603
+ )
7604
+
7605
+ response_dict = '' if not response.body else json.loads(response.body)
7606
+
7607
+ if self._api_client.vertexai:
7608
+ response_dict = _SegmentImageResponse_from_vertex(response_dict)
7609
+
7610
+ return_value = types.SegmentImageResponse._from_response(
7611
+ response=response_dict, kwargs=parameter_model.model_dump()
7612
+ )
7613
+
7614
+ self._api_client._verify_response(return_value)
7615
+ return return_value
7616
+
7243
7617
  async def get(
7244
7618
  self, *, model: str, config: Optional[types.GetModelConfigOrDict] = None
7245
7619
  ) -> types.Model:
@@ -26,6 +26,7 @@ from . import types
26
26
  from ._common import get_value_by_path as getv
27
27
  from ._common import set_value_by_path as setv
28
28
 
29
+
29
30
  logger = logging.getLogger('google_genai.operations')
30
31
 
31
32
 
google/genai/tunings.py CHANGED
@@ -28,6 +28,7 @@ from ._common import get_value_by_path as getv
28
28
  from ._common import set_value_by_path as setv
29
29
  from .pagers import AsyncPager, Pager
30
30
 
31
+
31
32
  logger = logging.getLogger('google_genai.tunings')
32
33
 
33
34