google-genai 0.0.1__tar.gz → 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-0.0.1/google_genai.egg-info → google_genai-0.1.0}/PKG-INFO +52 -44
- {google_genai-0.0.1 → google_genai-0.1.0}/README.md +49 -42
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/__init__.py +2 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_api_client.py +1 -1
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_automatic_function_calling_util.py +0 -44
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_extra_utils.py +15 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_transformers.py +3 -2
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/batches.py +148 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/caches.py +10 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/chats.py +12 -2
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/live.py +74 -42
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/models.py +98 -11
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/tunings.py +241 -4
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/types.py +379 -85
- {google_genai-0.0.1 → google_genai-0.1.0/google_genai.egg-info}/PKG-INFO +52 -44
- {google_genai-0.0.1 → google_genai-0.1.0}/pyproject.toml +3 -2
- {google_genai-0.0.1 → google_genai-0.1.0}/LICENSE +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_common.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_replay_api_client.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/_test_api_client.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/client.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/errors.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/files.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google/genai/pagers.py +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google_genai.egg-info/SOURCES.txt +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google_genai.egg-info/requires.txt +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/google_genai.egg-info/top_level.txt +0 -0
- {google_genai-0.0.1 → google_genai-0.1.0}/setup.cfg +0 -0
@@ -1,10 +1,10 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: google-genai
|
3
|
-
Version: 0.0
|
3
|
+
Version: 0.1.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
7
|
-
Project-URL: Homepage, https://github.com/
|
7
|
+
Project-URL: Homepage, https://github.com/googleapis/python-genai
|
8
8
|
Classifier: Intended Audience :: Developers
|
9
9
|
Classifier: License :: OSI Approved :: Apache Software License
|
10
10
|
Classifier: Operating System :: OS Independent
|
@@ -14,6 +14,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.10
|
15
15
|
Classifier: Programming Language :: Python :: 3.11
|
16
16
|
Classifier: Programming Language :: Python :: 3.12
|
17
|
+
Classifier: Programming Language :: Python :: 3.13
|
17
18
|
Classifier: Topic :: Internet
|
18
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
19
20
|
Requires-Python: >=3.9
|
@@ -66,7 +67,7 @@ The `client.models` modules exposes model inferencing and model getters.
|
|
66
67
|
|
67
68
|
``` python
|
68
69
|
response = client.models.generate_content(
|
69
|
-
model='gemini-
|
70
|
+
model='gemini-2.0-flash-exp', contents='What is your name?'
|
70
71
|
)
|
71
72
|
print(response.text)
|
72
73
|
```
|
@@ -75,7 +76,7 @@ print(response.text)
|
|
75
76
|
|
76
77
|
``` python
|
77
78
|
response = client.models.generate_content(
|
78
|
-
model='gemini-
|
79
|
+
model='gemini-2.0-flash-exp',
|
79
80
|
contents='high',
|
80
81
|
config=types.GenerateContentConfig(
|
81
82
|
system_instruction='I say high, you say low',
|
@@ -92,7 +93,7 @@ dictionaries. You can get the type from `google.genai.types`.
|
|
92
93
|
|
93
94
|
``` python
|
94
95
|
response = client.models.generate_content(
|
95
|
-
model='gemini-
|
96
|
+
model='gemini-2.0-flash-exp',
|
96
97
|
contents=types.Part.from_text('Why is sky blue?'),
|
97
98
|
config=types.GenerateContentConfig(
|
98
99
|
temperature=0,
|
@@ -114,7 +115,7 @@ response
|
|
114
115
|
|
115
116
|
``` python
|
116
117
|
response = client.models.generate_content(
|
117
|
-
model='gemini-
|
118
|
+
model='gemini-2.0-flash-exp',
|
118
119
|
contents='Say something bad.',
|
119
120
|
config=types.GenerateContentConfig(
|
120
121
|
safety_settings= [types.SafetySetting(
|
@@ -143,7 +144,7 @@ def get_current_weather(location: str,) -> int:
|
|
143
144
|
return 'sunny'
|
144
145
|
|
145
146
|
response = client.models.generate_content(
|
146
|
-
model='gemini-
|
147
|
+
model='gemini-2.0-flash-exp',
|
147
148
|
contents="What is the weather like in Boston?",
|
148
149
|
config=types.GenerateContentConfig(tools=[get_current_weather],)
|
149
150
|
)
|
@@ -171,7 +172,7 @@ tool = types.Tool(function_declarations=[function])
|
|
171
172
|
|
172
173
|
|
173
174
|
response = client.models.generate_content(
|
174
|
-
model='gemini-
|
175
|
+
model='gemini-2.0-flash-exp',
|
175
176
|
contents="What is the weather like in Boston?",
|
176
177
|
config=types.GenerateContentConfig(tools=[tool],)
|
177
178
|
)
|
@@ -191,7 +192,7 @@ function_response_part = types.Part.from_function_response(
|
|
191
192
|
)
|
192
193
|
|
193
194
|
response = client.models.generate_content(
|
194
|
-
model='gemini-
|
195
|
+
model='gemini-2.0-flash-exp',
|
195
196
|
contents=[
|
196
197
|
types.Part.from_text("What is the weather like in Boston?"),
|
197
198
|
function_call_part,
|
@@ -221,7 +222,7 @@ class CountryInfo(BaseModel):
|
|
221
222
|
|
222
223
|
|
223
224
|
response = client.models.generate_content(
|
224
|
-
model='gemini-
|
225
|
+
model='gemini-2.0-flash-exp',
|
225
226
|
contents='Give me information of the United States.',
|
226
227
|
config=types.GenerateContentConfig(
|
227
228
|
response_mime_type= 'application/json',
|
@@ -233,7 +234,7 @@ print(response.text)
|
|
233
234
|
|
234
235
|
``` python
|
235
236
|
response = client.models.generate_content(
|
236
|
-
model='gemini-
|
237
|
+
model='gemini-2.0-flash-exp',
|
237
238
|
contents='Give me information of the United States.',
|
238
239
|
config={
|
239
240
|
'response_mime_type': 'application/json',
|
@@ -267,7 +268,7 @@ print(response.text)
|
|
267
268
|
|
268
269
|
``` python
|
269
270
|
for chunk in client.models.generate_content_stream(
|
270
|
-
model='gemini-
|
271
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
271
272
|
):
|
272
273
|
print(chunk.text)
|
273
274
|
```
|
@@ -282,7 +283,7 @@ of `client.models.generate_content`
|
|
282
283
|
|
283
284
|
``` python
|
284
285
|
request = await client.aio.models.generate_content(
|
285
|
-
model='gemini-
|
286
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
286
287
|
)
|
287
288
|
|
288
289
|
print(response.text)
|
@@ -292,7 +293,7 @@ print(response.text)
|
|
292
293
|
|
293
294
|
``` python
|
294
295
|
async for response in client.aio.models.generate_content_stream(
|
295
|
-
model='gemini-
|
296
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
296
297
|
):
|
297
298
|
print(response.text)
|
298
299
|
```
|
@@ -301,7 +302,7 @@ async for response in client.aio.models.generate_content_stream(
|
|
301
302
|
|
302
303
|
``` python
|
303
304
|
response = client.models.count_tokens(
|
304
|
-
model='gemini-
|
305
|
+
model='gemini-2.0-flash-exp',
|
305
306
|
contents='What is your name?',
|
306
307
|
)
|
307
308
|
print(response)
|
@@ -313,7 +314,7 @@ Compute tokens is not supported by Google AI.
|
|
313
314
|
|
314
315
|
``` python
|
315
316
|
response = client.models.compute_tokens(
|
316
|
-
model='gemini-
|
317
|
+
model='gemini-2.0-flash-exp',
|
317
318
|
contents='What is your name?',
|
318
319
|
)
|
319
320
|
print(response)
|
@@ -323,7 +324,7 @@ print(response)
|
|
323
324
|
|
324
325
|
``` python
|
325
326
|
response = await client.aio.models.count_tokens(
|
326
|
-
model='gemini-
|
327
|
+
model='gemini-2.0-flash-exp',
|
327
328
|
contents='What is your name?',
|
328
329
|
)
|
329
330
|
print(response)
|
@@ -360,14 +361,12 @@ Support for generate image in Google AI is behind an allowlist
|
|
360
361
|
# Generate Image
|
361
362
|
response1 = client.models.generate_image(
|
362
363
|
model='imagen-3.0-generate-001',
|
363
|
-
prompt='
|
364
|
+
prompt='An umbrella in the foreground, and a rainy night sky in the background',
|
364
365
|
config=types.GenerateImageConfig(
|
366
|
+
negative_prompt= "human",
|
365
367
|
number_of_images= 1,
|
366
|
-
person_generation= "ALLOW_ADULT",
|
367
|
-
safety_filter_level= "BLOCK_LOW_AND_ABOVE",
|
368
368
|
include_rai_reason= True,
|
369
|
-
|
370
|
-
aspect_ratio= "4:3"
|
369
|
+
output_mime_type= "image/jpeg"
|
371
370
|
)
|
372
371
|
)
|
373
372
|
response1.generated_images[0].image.show()
|
@@ -375,9 +374,10 @@ response1.generated_images[0].image.show()
|
|
375
374
|
|
376
375
|
#### Upscale Image
|
377
376
|
|
377
|
+
Upscale image is not supported in Google AI.
|
378
|
+
|
378
379
|
``` python
|
379
|
-
# Upscale the generated image from
|
380
|
-
from google.genai.types import Image
|
380
|
+
# Upscale the generated image from above
|
381
381
|
response2 = client.models.upscale_image(
|
382
382
|
model='imagen-3.0-generate-001',
|
383
383
|
image=response1.generated_images[0].image,
|
@@ -388,21 +388,36 @@ response2.generated_images[0].image.show()
|
|
388
388
|
|
389
389
|
#### Edit Image
|
390
390
|
|
391
|
+
Edit image is not supported in Google AI.
|
392
|
+
|
391
393
|
``` python
|
392
|
-
# Edit the generated image from
|
393
|
-
from google.genai.types import
|
394
|
+
# Edit the generated image from above
|
395
|
+
from google.genai.types import RawReferenceImage, MaskReferenceImage
|
396
|
+
raw_ref_image = RawReferenceImage(
|
397
|
+
reference_id=1,
|
398
|
+
reference_image=response1.generated_images[0].image,
|
399
|
+
)
|
400
|
+
|
401
|
+
# Model computes a mask of the background
|
402
|
+
mask_ref_image = MaskReferenceImage(
|
403
|
+
reference_id=2,
|
404
|
+
config=types.MaskReferenceConfig(
|
405
|
+
mask_mode='MASK_MODE_BACKGROUND',
|
406
|
+
mask_dilation=0,
|
407
|
+
),
|
408
|
+
)
|
409
|
+
|
394
410
|
response3 = client.models.edit_image(
|
395
|
-
model='
|
396
|
-
prompt='
|
397
|
-
|
411
|
+
model='imagen-3.0-capability-preview-0930',
|
412
|
+
prompt='Sunlight and clear sky',
|
413
|
+
reference_images=[raw_ref_image, mask_ref_image],
|
398
414
|
config=types.EditImageConfig(
|
399
|
-
edit_mode=
|
400
|
-
mask_type="semantic",
|
401
|
-
segmentation_classes=[156],
|
415
|
+
edit_mode= 'EDIT_MODE_INPAINT_INSERTION',
|
402
416
|
number_of_images= 1,
|
417
|
+
negative_prompt= 'human',
|
403
418
|
include_rai_reason= True,
|
404
|
-
|
405
|
-
)
|
419
|
+
output_mime_type= 'image/jpeg',
|
420
|
+
),
|
406
421
|
)
|
407
422
|
response3.generated_images[0].image.show()
|
408
423
|
```
|
@@ -439,7 +454,7 @@ client.files.delete(name=file3.name)
|
|
439
454
|
### Create
|
440
455
|
|
441
456
|
``` python
|
442
|
-
if client.
|
457
|
+
if client.vertexai:
|
443
458
|
file_uris = [
|
444
459
|
'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
|
445
460
|
'gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf'
|
@@ -624,7 +639,7 @@ distillation_job = client.tunings.distill(
|
|
624
639
|
config=genai.types.CreateDistillationJobConfig(
|
625
640
|
epoch_count=1,
|
626
641
|
pipeline_root_directory=(
|
627
|
-
"gs://
|
642
|
+
"gs://my-bucket"
|
628
643
|
),
|
629
644
|
),
|
630
645
|
)
|
@@ -689,14 +704,7 @@ Only supported in Vertex AI.
|
|
689
704
|
# Specify model and source file only, destination and job display name will be auto-populated
|
690
705
|
job = client.batches.create(
|
691
706
|
model='gemini-1.5-flash-002',
|
692
|
-
src='bq://
|
693
|
-
# # optionally specify destination and display_name by yourself
|
694
|
-
# config = {
|
695
|
-
# 'dest': 'bq://vertex-sdk-dev.unified_genai_tests_batches.generate_content_responses',
|
696
|
-
# 'display_name': 'create_batch_job_demo'
|
697
|
-
# }
|
698
|
-
)
|
699
|
-
|
707
|
+
src='bq://my-project.my-dataset.my-table',
|
700
708
|
job
|
701
709
|
```
|
702
710
|
|
@@ -39,7 +39,7 @@ The `client.models` modules exposes model inferencing and model getters.
|
|
39
39
|
|
40
40
|
``` python
|
41
41
|
response = client.models.generate_content(
|
42
|
-
model='gemini-
|
42
|
+
model='gemini-2.0-flash-exp', contents='What is your name?'
|
43
43
|
)
|
44
44
|
print(response.text)
|
45
45
|
```
|
@@ -48,7 +48,7 @@ print(response.text)
|
|
48
48
|
|
49
49
|
``` python
|
50
50
|
response = client.models.generate_content(
|
51
|
-
model='gemini-
|
51
|
+
model='gemini-2.0-flash-exp',
|
52
52
|
contents='high',
|
53
53
|
config=types.GenerateContentConfig(
|
54
54
|
system_instruction='I say high, you say low',
|
@@ -65,7 +65,7 @@ dictionaries. You can get the type from `google.genai.types`.
|
|
65
65
|
|
66
66
|
``` python
|
67
67
|
response = client.models.generate_content(
|
68
|
-
model='gemini-
|
68
|
+
model='gemini-2.0-flash-exp',
|
69
69
|
contents=types.Part.from_text('Why is sky blue?'),
|
70
70
|
config=types.GenerateContentConfig(
|
71
71
|
temperature=0,
|
@@ -87,7 +87,7 @@ response
|
|
87
87
|
|
88
88
|
``` python
|
89
89
|
response = client.models.generate_content(
|
90
|
-
model='gemini-
|
90
|
+
model='gemini-2.0-flash-exp',
|
91
91
|
contents='Say something bad.',
|
92
92
|
config=types.GenerateContentConfig(
|
93
93
|
safety_settings= [types.SafetySetting(
|
@@ -116,7 +116,7 @@ def get_current_weather(location: str,) -> int:
|
|
116
116
|
return 'sunny'
|
117
117
|
|
118
118
|
response = client.models.generate_content(
|
119
|
-
model='gemini-
|
119
|
+
model='gemini-2.0-flash-exp',
|
120
120
|
contents="What is the weather like in Boston?",
|
121
121
|
config=types.GenerateContentConfig(tools=[get_current_weather],)
|
122
122
|
)
|
@@ -144,7 +144,7 @@ tool = types.Tool(function_declarations=[function])
|
|
144
144
|
|
145
145
|
|
146
146
|
response = client.models.generate_content(
|
147
|
-
model='gemini-
|
147
|
+
model='gemini-2.0-flash-exp',
|
148
148
|
contents="What is the weather like in Boston?",
|
149
149
|
config=types.GenerateContentConfig(tools=[tool],)
|
150
150
|
)
|
@@ -164,7 +164,7 @@ function_response_part = types.Part.from_function_response(
|
|
164
164
|
)
|
165
165
|
|
166
166
|
response = client.models.generate_content(
|
167
|
-
model='gemini-
|
167
|
+
model='gemini-2.0-flash-exp',
|
168
168
|
contents=[
|
169
169
|
types.Part.from_text("What is the weather like in Boston?"),
|
170
170
|
function_call_part,
|
@@ -194,7 +194,7 @@ class CountryInfo(BaseModel):
|
|
194
194
|
|
195
195
|
|
196
196
|
response = client.models.generate_content(
|
197
|
-
model='gemini-
|
197
|
+
model='gemini-2.0-flash-exp',
|
198
198
|
contents='Give me information of the United States.',
|
199
199
|
config=types.GenerateContentConfig(
|
200
200
|
response_mime_type= 'application/json',
|
@@ -206,7 +206,7 @@ print(response.text)
|
|
206
206
|
|
207
207
|
``` python
|
208
208
|
response = client.models.generate_content(
|
209
|
-
model='gemini-
|
209
|
+
model='gemini-2.0-flash-exp',
|
210
210
|
contents='Give me information of the United States.',
|
211
211
|
config={
|
212
212
|
'response_mime_type': 'application/json',
|
@@ -240,7 +240,7 @@ print(response.text)
|
|
240
240
|
|
241
241
|
``` python
|
242
242
|
for chunk in client.models.generate_content_stream(
|
243
|
-
model='gemini-
|
243
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
244
244
|
):
|
245
245
|
print(chunk.text)
|
246
246
|
```
|
@@ -255,7 +255,7 @@ of `client.models.generate_content`
|
|
255
255
|
|
256
256
|
``` python
|
257
257
|
request = await client.aio.models.generate_content(
|
258
|
-
model='gemini-
|
258
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
259
259
|
)
|
260
260
|
|
261
261
|
print(response.text)
|
@@ -265,7 +265,7 @@ print(response.text)
|
|
265
265
|
|
266
266
|
``` python
|
267
267
|
async for response in client.aio.models.generate_content_stream(
|
268
|
-
model='gemini-
|
268
|
+
model='gemini-2.0-flash-exp', contents='Tell me a story in 300 words.'
|
269
269
|
):
|
270
270
|
print(response.text)
|
271
271
|
```
|
@@ -274,7 +274,7 @@ async for response in client.aio.models.generate_content_stream(
|
|
274
274
|
|
275
275
|
``` python
|
276
276
|
response = client.models.count_tokens(
|
277
|
-
model='gemini-
|
277
|
+
model='gemini-2.0-flash-exp',
|
278
278
|
contents='What is your name?',
|
279
279
|
)
|
280
280
|
print(response)
|
@@ -286,7 +286,7 @@ Compute tokens is not supported by Google AI.
|
|
286
286
|
|
287
287
|
``` python
|
288
288
|
response = client.models.compute_tokens(
|
289
|
-
model='gemini-
|
289
|
+
model='gemini-2.0-flash-exp',
|
290
290
|
contents='What is your name?',
|
291
291
|
)
|
292
292
|
print(response)
|
@@ -296,7 +296,7 @@ print(response)
|
|
296
296
|
|
297
297
|
``` python
|
298
298
|
response = await client.aio.models.count_tokens(
|
299
|
-
model='gemini-
|
299
|
+
model='gemini-2.0-flash-exp',
|
300
300
|
contents='What is your name?',
|
301
301
|
)
|
302
302
|
print(response)
|
@@ -333,14 +333,12 @@ Support for generate image in Google AI is behind an allowlist
|
|
333
333
|
# Generate Image
|
334
334
|
response1 = client.models.generate_image(
|
335
335
|
model='imagen-3.0-generate-001',
|
336
|
-
prompt='
|
336
|
+
prompt='An umbrella in the foreground, and a rainy night sky in the background',
|
337
337
|
config=types.GenerateImageConfig(
|
338
|
+
negative_prompt= "human",
|
338
339
|
number_of_images= 1,
|
339
|
-
person_generation= "ALLOW_ADULT",
|
340
|
-
safety_filter_level= "BLOCK_LOW_AND_ABOVE",
|
341
340
|
include_rai_reason= True,
|
342
|
-
|
343
|
-
aspect_ratio= "4:3"
|
341
|
+
output_mime_type= "image/jpeg"
|
344
342
|
)
|
345
343
|
)
|
346
344
|
response1.generated_images[0].image.show()
|
@@ -348,9 +346,10 @@ response1.generated_images[0].image.show()
|
|
348
346
|
|
349
347
|
#### Upscale Image
|
350
348
|
|
349
|
+
Upscale image is not supported in Google AI.
|
350
|
+
|
351
351
|
``` python
|
352
|
-
# Upscale the generated image from
|
353
|
-
from google.genai.types import Image
|
352
|
+
# Upscale the generated image from above
|
354
353
|
response2 = client.models.upscale_image(
|
355
354
|
model='imagen-3.0-generate-001',
|
356
355
|
image=response1.generated_images[0].image,
|
@@ -361,21 +360,36 @@ response2.generated_images[0].image.show()
|
|
361
360
|
|
362
361
|
#### Edit Image
|
363
362
|
|
363
|
+
Edit image is not supported in Google AI.
|
364
|
+
|
364
365
|
``` python
|
365
|
-
# Edit the generated image from
|
366
|
-
from google.genai.types import
|
366
|
+
# Edit the generated image from above
|
367
|
+
from google.genai.types import RawReferenceImage, MaskReferenceImage
|
368
|
+
raw_ref_image = RawReferenceImage(
|
369
|
+
reference_id=1,
|
370
|
+
reference_image=response1.generated_images[0].image,
|
371
|
+
)
|
372
|
+
|
373
|
+
# Model computes a mask of the background
|
374
|
+
mask_ref_image = MaskReferenceImage(
|
375
|
+
reference_id=2,
|
376
|
+
config=types.MaskReferenceConfig(
|
377
|
+
mask_mode='MASK_MODE_BACKGROUND',
|
378
|
+
mask_dilation=0,
|
379
|
+
),
|
380
|
+
)
|
381
|
+
|
367
382
|
response3 = client.models.edit_image(
|
368
|
-
model='
|
369
|
-
prompt='
|
370
|
-
|
383
|
+
model='imagen-3.0-capability-preview-0930',
|
384
|
+
prompt='Sunlight and clear sky',
|
385
|
+
reference_images=[raw_ref_image, mask_ref_image],
|
371
386
|
config=types.EditImageConfig(
|
372
|
-
edit_mode=
|
373
|
-
mask_type="semantic",
|
374
|
-
segmentation_classes=[156],
|
387
|
+
edit_mode= 'EDIT_MODE_INPAINT_INSERTION',
|
375
388
|
number_of_images= 1,
|
389
|
+
negative_prompt= 'human',
|
376
390
|
include_rai_reason= True,
|
377
|
-
|
378
|
-
)
|
391
|
+
output_mime_type= 'image/jpeg',
|
392
|
+
),
|
379
393
|
)
|
380
394
|
response3.generated_images[0].image.show()
|
381
395
|
```
|
@@ -412,7 +426,7 @@ client.files.delete(name=file3.name)
|
|
412
426
|
### Create
|
413
427
|
|
414
428
|
``` python
|
415
|
-
if client.
|
429
|
+
if client.vertexai:
|
416
430
|
file_uris = [
|
417
431
|
'gs://cloud-samples-data/generative-ai/pdf/2312.11805v3.pdf',
|
418
432
|
'gs://cloud-samples-data/generative-ai/pdf/2403.05530.pdf'
|
@@ -597,7 +611,7 @@ distillation_job = client.tunings.distill(
|
|
597
611
|
config=genai.types.CreateDistillationJobConfig(
|
598
612
|
epoch_count=1,
|
599
613
|
pipeline_root_directory=(
|
600
|
-
"gs://
|
614
|
+
"gs://my-bucket"
|
601
615
|
),
|
602
616
|
),
|
603
617
|
)
|
@@ -662,14 +676,7 @@ Only supported in Vertex AI.
|
|
662
676
|
# Specify model and source file only, destination and job display name will be auto-populated
|
663
677
|
job = client.batches.create(
|
664
678
|
model='gemini-1.5-flash-002',
|
665
|
-
src='bq://
|
666
|
-
# # optionally specify destination and display_name by yourself
|
667
|
-
# config = {
|
668
|
-
# 'dest': 'bq://vertex-sdk-dev.unified_genai_tests_batches.generate_content_responses',
|
669
|
-
# 'display_name': 'create_batch_job_demo'
|
670
|
-
# }
|
671
|
-
)
|
672
|
-
|
679
|
+
src='bq://my-project.my-dataset.my-table',
|
673
680
|
job
|
674
681
|
```
|
675
682
|
|
@@ -51,7 +51,7 @@ class HttpOptions(TypedDict):
|
|
51
51
|
def _append_library_version_headers(headers: dict[str, str]) -> None:
|
52
52
|
"""Appends the telemetry header to the headers dict."""
|
53
53
|
# TODO: Automate revisions to the SDK library version.
|
54
|
-
library_label = 'google-genai-sdk/0.1.0'
|
54
|
+
library_label = f'google-genai-sdk/0.1.0'
|
55
55
|
language_label = 'gl-python/' + sys.version.split()[0]
|
56
56
|
version_header_value = f'{library_label} {language_label}'
|
57
57
|
if (
|
@@ -295,47 +295,3 @@ def _get_required_fields(schema: types.Schema) -> list[str]:
|
|
295
295
|
if not field_schema.nullable and field_schema.default is None
|
296
296
|
]
|
297
297
|
|
298
|
-
|
299
|
-
def function_to_declaration(
|
300
|
-
client, func: Callable
|
301
|
-
) -> types.FunctionDeclaration:
|
302
|
-
"""Converts a function to a FunctionDeclaration."""
|
303
|
-
parameters_properties = {}
|
304
|
-
for name, param in inspect.signature(func).parameters.items():
|
305
|
-
if param.kind in (
|
306
|
-
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
307
|
-
inspect.Parameter.KEYWORD_ONLY,
|
308
|
-
inspect.Parameter.POSITIONAL_ONLY,
|
309
|
-
):
|
310
|
-
schema = _parse_schema_from_parameter(client, param, func.__name__)
|
311
|
-
parameters_properties[name] = schema
|
312
|
-
declaration = types.FunctionDeclaration(
|
313
|
-
name=func.__name__,
|
314
|
-
description=func.__doc__,
|
315
|
-
)
|
316
|
-
if parameters_properties:
|
317
|
-
declaration.parameters = types.Schema(
|
318
|
-
type='OBJECT',
|
319
|
-
properties=parameters_properties,
|
320
|
-
)
|
321
|
-
if client.vertexai:
|
322
|
-
declaration.parameters.required = _get_required_fields(
|
323
|
-
declaration.parameters
|
324
|
-
)
|
325
|
-
if not client.vertexai:
|
326
|
-
return declaration
|
327
|
-
|
328
|
-
return_annotation = inspect.signature(func).return_annotation
|
329
|
-
if return_annotation is inspect._empty:
|
330
|
-
return declaration
|
331
|
-
|
332
|
-
declaration.response = _parse_schema_from_parameter(
|
333
|
-
client,
|
334
|
-
inspect.Parameter(
|
335
|
-
'return_value',
|
336
|
-
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
337
|
-
annotation=return_annotation,
|
338
|
-
),
|
339
|
-
func.__name__,
|
340
|
-
)
|
341
|
-
return declaration
|
@@ -293,3 +293,18 @@ def get_max_remote_calls_afc(
|
|
293
293
|
):
|
294
294
|
return _DEFAULT_MAX_REMOTE_CALLS_AFC
|
295
295
|
return int(config_model.automatic_function_calling.maximum_remote_calls)
|
296
|
+
|
297
|
+
def should_append_afc_history(
|
298
|
+
config: Optional[types.GenerateContentConfigOrDict] = None,
|
299
|
+
) -> bool:
|
300
|
+
config_model = (
|
301
|
+
types.GenerateContentConfig(**config)
|
302
|
+
if config and isinstance(config, dict)
|
303
|
+
else config
|
304
|
+
)
|
305
|
+
if (
|
306
|
+
not config_model
|
307
|
+
or not config_model.automatic_function_calling
|
308
|
+
):
|
309
|
+
return True
|
310
|
+
return not config_model.automatic_function_calling.ignore_call_history
|
@@ -27,7 +27,6 @@ import PIL.Image
|
|
27
27
|
|
28
28
|
from . import _api_client
|
29
29
|
from . import types
|
30
|
-
from ._automatic_function_calling_util import function_to_declaration
|
31
30
|
|
32
31
|
|
33
32
|
def _resource_name(
|
@@ -307,7 +306,9 @@ def t_tool(client: _api_client.ApiClient, origin) -> types.Tool:
|
|
307
306
|
return None
|
308
307
|
if inspect.isfunction(origin):
|
309
308
|
return types.Tool(
|
310
|
-
function_declarations=[
|
309
|
+
function_declarations=[
|
310
|
+
types.FunctionDeclaration.from_function(client, origin)
|
311
|
+
]
|
311
312
|
)
|
312
313
|
else:
|
313
314
|
return origin
|