google-genai 1.55.0__py3-none-any.whl → 1.57.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google/genai/_api_client.py +37 -18
- google/genai/_interactions/_base_client.py +8 -2
- google/genai/_interactions/resources/interactions.py +6 -6
- google/genai/_interactions/types/__init__.py +4 -0
- google/genai/_interactions/types/audio_content.py +0 -1
- google/genai/_interactions/types/audio_content_param.py +0 -1
- google/genai/_interactions/types/code_execution_call_content.py +0 -1
- google/genai/_interactions/types/code_execution_call_content_param.py +0 -1
- google/genai/_interactions/types/code_execution_result_content.py +0 -1
- google/genai/_interactions/types/code_execution_result_content_param.py +0 -1
- google/genai/_interactions/types/content.py +63 -0
- google/genai/_interactions/types/content_delta.py +7 -23
- google/genai/_interactions/types/content_param.py +61 -0
- google/genai/_interactions/types/content_start.py +4 -44
- google/genai/_interactions/types/deep_research_agent_config.py +0 -1
- google/genai/_interactions/types/deep_research_agent_config_param.py +0 -1
- google/genai/_interactions/types/document_content.py +3 -2
- google/genai/_interactions/types/document_content_param.py +3 -2
- google/genai/_interactions/types/document_mime_type.py +23 -0
- google/genai/_interactions/types/document_mime_type_param.py +25 -0
- google/genai/_interactions/types/dynamic_agent_config.py +0 -1
- google/genai/_interactions/types/dynamic_agent_config_param.py +0 -1
- google/genai/_interactions/types/file_search_result_content.py +0 -1
- google/genai/_interactions/types/file_search_result_content_param.py +0 -1
- google/genai/_interactions/types/function_call_content.py +0 -1
- google/genai/_interactions/types/function_call_content_param.py +0 -1
- google/genai/_interactions/types/function_result_content.py +1 -2
- google/genai/_interactions/types/function_result_content_param.py +1 -2
- google/genai/_interactions/types/google_search_call_content.py +0 -1
- google/genai/_interactions/types/google_search_call_content_param.py +0 -1
- google/genai/_interactions/types/google_search_result_content.py +0 -1
- google/genai/_interactions/types/google_search_result_content_param.py +0 -1
- google/genai/_interactions/types/image_content.py +1 -2
- google/genai/_interactions/types/image_content_param.py +1 -2
- google/genai/_interactions/types/interaction.py +4 -52
- google/genai/_interactions/types/interaction_create_params.py +2 -22
- google/genai/_interactions/types/mcp_server_tool_call_content.py +0 -1
- google/genai/_interactions/types/mcp_server_tool_call_content_param.py +0 -1
- google/genai/_interactions/types/mcp_server_tool_result_content.py +1 -2
- google/genai/_interactions/types/mcp_server_tool_result_content_param.py +1 -2
- google/genai/_interactions/types/model.py +1 -0
- google/genai/_interactions/types/model_param.py +1 -0
- google/genai/_interactions/types/text_content.py +0 -1
- google/genai/_interactions/types/text_content_param.py +0 -1
- google/genai/_interactions/types/thinking_level.py +1 -1
- google/genai/_interactions/types/thought_content.py +0 -1
- google/genai/_interactions/types/thought_content_param.py +0 -1
- google/genai/_interactions/types/turn.py +3 -44
- google/genai/_interactions/types/turn_param.py +4 -40
- google/genai/_interactions/types/url_context_call_content.py +0 -1
- google/genai/_interactions/types/url_context_call_content_param.py +0 -1
- google/genai/_interactions/types/url_context_result_content.py +0 -1
- google/genai/_interactions/types/url_context_result_content_param.py +0 -1
- google/genai/_interactions/types/usage.py +1 -1
- google/genai/_interactions/types/usage_param.py +1 -1
- google/genai/_interactions/types/video_content.py +1 -2
- google/genai/_interactions/types/video_content_param.py +1 -2
- google/genai/_live_converters.py +36 -64
- google/genai/_local_tokenizer_loader.py +1 -0
- google/genai/_tokens_converters.py +14 -14
- google/genai/batches.py +27 -22
- google/genai/caches.py +42 -42
- google/genai/chats.py +0 -2
- google/genai/client.py +3 -1
- google/genai/files.py +224 -0
- google/genai/models.py +57 -72
- google/genai/tests/chats/test_get_history.py +9 -8
- google/genai/tests/chats/test_validate_response.py +1 -1
- google/genai/tests/client/test_client_requests.py +1 -135
- google/genai/tests/files/test_register.py +272 -0
- google/genai/tests/files/test_register_table.py +70 -0
- google/genai/tests/interactions/test_auth.py +479 -0
- google/genai/tests/interactions/test_integration.py +2 -0
- google/genai/tests/interactions/test_paths.py +105 -0
- google/genai/tests/live/test_live.py +2 -36
- google/genai/tests/local_tokenizer/test_local_tokenizer.py +1 -1
- google/genai/tests/models/test_function_call_streaming.py +90 -90
- google/genai/tests/models/test_generate_content.py +1 -2
- google/genai/tests/models/test_recontext_image.py +1 -1
- google/genai/tests/pytest_helper.py +17 -0
- google/genai/tunings.py +1 -27
- google/genai/types.py +603 -518
- google/genai/version.py +1 -1
- {google_genai-1.55.0.dist-info → google_genai-1.57.0.dist-info}/METADATA +224 -22
- {google_genai-1.55.0.dist-info → google_genai-1.57.0.dist-info}/RECORD +88 -80
- {google_genai-1.55.0.dist-info → google_genai-1.57.0.dist-info}/WHEEL +0 -0
- {google_genai-1.55.0.dist-info → google_genai-1.57.0.dist-info}/licenses/LICENSE +0 -0
- {google_genai-1.55.0.dist-info → google_genai-1.57.0.dist-info}/top_level.txt +0 -0
google/genai/version.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: google-genai
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.57.0
|
|
4
4
|
Summary: GenAI Python SDK
|
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
|
6
6
|
License-Expression: Apache-2.0
|
|
@@ -20,7 +20,7 @@ Requires-Python: >=3.10
|
|
|
20
20
|
Description-Content-Type: text/markdown
|
|
21
21
|
License-File: LICENSE
|
|
22
22
|
Requires-Dist: anyio<5.0.0,>=4.8.0
|
|
23
|
-
Requires-Dist: google-auth[requests]<3.0.0,>=2.
|
|
23
|
+
Requires-Dist: google-auth[requests]<3.0.0,>=2.46.0
|
|
24
24
|
Requires-Dist: httpx<1.0.0,>=0.28.1
|
|
25
25
|
Requires-Dist: pydantic<3.0.0,>=2.9.0
|
|
26
26
|
Requires-Dist: requests<3.0.0,>=2.28.1
|
|
@@ -57,11 +57,7 @@ APIs.
|
|
|
57
57
|
|
|
58
58
|
Generative models are often unaware of recent API and SDK updates and may suggest outdated or legacy code.
|
|
59
59
|
|
|
60
|
-
We recommend using our Code Generation instructions [codegen_instructions.md](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md) when generating Google Gen AI SDK code to guide your model towards using the more recent SDK features.
|
|
61
|
-
|
|
62
|
-
Copy and paste the instructions from [this file](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md)
|
|
63
|
-
into your development environment to provide the model with the necessary
|
|
64
|
-
context
|
|
60
|
+
We recommend using our Code Generation instructions [`codegen_instructions.md`](https://raw.githubusercontent.com/googleapis/python-genai/refs/heads/main/codegen_instructions.md) when generating Google Gen AI SDK code to guide your model towards using the more recent SDK features. Copy and paste the instructions into your development environment to provide the model with the necessary context.
|
|
65
61
|
|
|
66
62
|
## Installation
|
|
67
63
|
|
|
@@ -174,7 +170,7 @@ client = genai.Client()
|
|
|
174
170
|
## Close a client
|
|
175
171
|
|
|
176
172
|
Explicitly close the sync client to ensure that resources, such as the
|
|
177
|
-
|
|
173
|
+
underlying HTTP connections, are properly cleaned up and closed.
|
|
178
174
|
|
|
179
175
|
```python
|
|
180
176
|
from google.genai import Client
|
|
@@ -215,7 +211,7 @@ await aclient.aclose()
|
|
|
215
211
|
## Client context managers
|
|
216
212
|
|
|
217
213
|
By using the sync client context manager, it will close the underlying
|
|
218
|
-
|
|
214
|
+
sync client when exiting the with block and avoid httpx "client has been closed" error like [issues#1763](https://github.com/googleapis/python-genai/issues/1763).
|
|
219
215
|
|
|
220
216
|
```python
|
|
221
217
|
from google.genai import Client
|
|
@@ -286,7 +282,7 @@ client = genai.Client(
|
|
|
286
282
|
By default we use httpx for both sync and async client implementations. In order
|
|
287
283
|
to have faster performance, you may install `google-genai[aiohttp]`. In Gen AI
|
|
288
284
|
SDK we configure `trust_env=True` to match with the default behavior of httpx.
|
|
289
|
-
Additional args of `aiohttp.ClientSession.request()` ([see _RequestOptions args](https://github.com/aio-libs/aiohttp/blob/v3.12.13/aiohttp/client.py#L170)) can be passed
|
|
285
|
+
Additional args of `aiohttp.ClientSession.request()` ([see `_RequestOptions` args](https://github.com/aio-libs/aiohttp/blob/v3.12.13/aiohttp/client.py#L170)) can be passed
|
|
290
286
|
through the following way:
|
|
291
287
|
|
|
292
288
|
```python
|
|
@@ -301,7 +297,7 @@ client=Client(..., http_options=http_options)
|
|
|
301
297
|
|
|
302
298
|
Both httpx and aiohttp libraries use `urllib.request.getproxies` from
|
|
303
299
|
environment variables. Before client initialization, you may set proxy (and
|
|
304
|
-
optional SSL_CERT_FILE) by setting the environment variables:
|
|
300
|
+
optional `SSL_CERT_FILE`) by setting the environment variables:
|
|
305
301
|
|
|
306
302
|
```bash
|
|
307
303
|
export HTTPS_PROXY='http://username:password@proxy_uri:port'
|
|
@@ -324,7 +320,7 @@ client=Client(..., http_options=http_options)
|
|
|
324
320
|
### Custom base url
|
|
325
321
|
|
|
326
322
|
In some cases you might need a custom base url (for example, API gateway proxy
|
|
327
|
-
|
|
323
|
+
server) and bypass some authentication checks for project, location, or API key.
|
|
328
324
|
You may pass the custom base url like this:
|
|
329
325
|
|
|
330
326
|
```python
|
|
@@ -383,7 +379,8 @@ for part in response.parts:
|
|
|
383
379
|
```
|
|
384
380
|
|
|
385
381
|
#### with uploaded file (Gemini Developer API only)
|
|
386
|
-
|
|
382
|
+
|
|
383
|
+
Download the file in console.
|
|
387
384
|
|
|
388
385
|
```sh
|
|
389
386
|
!wget -q https://storage.googleapis.com/generativeai-downloads/data/a11.txt
|
|
@@ -401,11 +398,13 @@ print(response.text)
|
|
|
401
398
|
```
|
|
402
399
|
|
|
403
400
|
#### How to structure `contents` argument for `generate_content`
|
|
401
|
+
|
|
404
402
|
The SDK always converts the inputs to the `contents` argument into
|
|
405
403
|
`list[types.Content]`.
|
|
406
404
|
The following shows some common ways to provide your inputs.
|
|
407
405
|
|
|
408
406
|
##### Provide a `list[types.Content]`
|
|
407
|
+
|
|
409
408
|
This is the canonical way to provide contents, SDK will not do any conversion.
|
|
410
409
|
|
|
411
410
|
##### Provide a `types.Content` instance
|
|
@@ -451,7 +450,7 @@ The SDK will assume this is a text part, and it converts this into the following
|
|
|
451
450
|
Where a `types.UserContent` is a subclass of `types.Content`, it sets the
|
|
452
451
|
`role` field to be `user`.
|
|
453
452
|
|
|
454
|
-
##### Provide a list of
|
|
453
|
+
##### Provide a list of strings
|
|
455
454
|
|
|
456
455
|
```python
|
|
457
456
|
contents=['Why is the sky blue?', 'Why is the cloud white?']
|
|
@@ -520,7 +519,7 @@ contents = [
|
|
|
520
519
|
]
|
|
521
520
|
```
|
|
522
521
|
|
|
523
|
-
The SDK converts a list of function call parts to
|
|
522
|
+
The SDK converts a list of function call parts to a content with a `model` role:
|
|
524
523
|
|
|
525
524
|
```python
|
|
526
525
|
[
|
|
@@ -712,7 +711,9 @@ response = client.models.generate_content(
|
|
|
712
711
|
|
|
713
712
|
print(response.text)
|
|
714
713
|
```
|
|
714
|
+
|
|
715
715
|
#### Disabling automatic function calling
|
|
716
|
+
|
|
716
717
|
If you pass in a python function as a tool directly, and do not want
|
|
717
718
|
automatic function calling, you can disable automatic function calling
|
|
718
719
|
as follows:
|
|
@@ -944,7 +945,9 @@ including by giving examples of expected JSON output. If you do, the generated
|
|
|
944
945
|
output might be lower in quality.
|
|
945
946
|
|
|
946
947
|
#### JSON Schema support
|
|
948
|
+
|
|
947
949
|
Schemas can be provided as standard JSON schema.
|
|
950
|
+
|
|
948
951
|
```python
|
|
949
952
|
user_profile = {
|
|
950
953
|
'properties': {
|
|
@@ -1071,7 +1074,7 @@ print(response.text)
|
|
|
1071
1074
|
|
|
1072
1075
|
#### JSON Response
|
|
1073
1076
|
|
|
1074
|
-
You can also set response_mime_type to 'application/json'
|
|
1077
|
+
You can also set `response_mime_type` to `'application/json'`, the response will be
|
|
1075
1078
|
identical but in quotes.
|
|
1076
1079
|
|
|
1077
1080
|
```python
|
|
@@ -1561,6 +1564,205 @@ response = client.models.generate_content(
|
|
|
1561
1564
|
print(response.text)
|
|
1562
1565
|
```
|
|
1563
1566
|
|
|
1567
|
+
## Interactions (Preview)
|
|
1568
|
+
|
|
1569
|
+
> **Warning:** The Interactions API is in **Beta**. This is a preview of an experimental feature. Features and schemas are subject to **breaking changes**.
|
|
1570
|
+
|
|
1571
|
+
The Interactions API is a unified interface for interacting with Gemini models and agents. It simplifies state management, tool orchestration, and long-running tasks.
|
|
1572
|
+
|
|
1573
|
+
See the [documentation site](https://ai.google.dev/gemini-api/docs/interactions) for more details.
|
|
1574
|
+
|
|
1575
|
+
### Basic Interaction
|
|
1576
|
+
|
|
1577
|
+
```python
|
|
1578
|
+
interaction = client.interactions.create(
|
|
1579
|
+
model='gemini-2.5-flash',
|
|
1580
|
+
input='Tell me a short joke about programming.'
|
|
1581
|
+
)
|
|
1582
|
+
print(interaction.outputs[-1].text)
|
|
1583
|
+
|
|
1584
|
+
```
|
|
1585
|
+
|
|
1586
|
+
### Stateful Conversation
|
|
1587
|
+
|
|
1588
|
+
The Interactions API supports server-side state management. You can continue a conversation by referencing the `previous_interaction_id`.
|
|
1589
|
+
|
|
1590
|
+
```python
|
|
1591
|
+
# 1. First turn
|
|
1592
|
+
interaction1 = client.interactions.create(
|
|
1593
|
+
model='gemini-2.5-flash',
|
|
1594
|
+
input='Hi, my name is Amir.'
|
|
1595
|
+
)
|
|
1596
|
+
print(f"Model: {interaction1.outputs[-1].text}")
|
|
1597
|
+
|
|
1598
|
+
# 2. Second turn (passing previous_interaction_id)
|
|
1599
|
+
interaction2 = client.interactions.create(
|
|
1600
|
+
model='gemini-2.5-flash',
|
|
1601
|
+
input='What is my name?',
|
|
1602
|
+
previous_interaction_id=interaction1.id
|
|
1603
|
+
)
|
|
1604
|
+
print(f"Model: {interaction2.outputs[-1].text}")
|
|
1605
|
+
|
|
1606
|
+
```
|
|
1607
|
+
|
|
1608
|
+
### Agents (Deep Research)
|
|
1609
|
+
|
|
1610
|
+
You can use specialized agents like `deep-research-pro-preview-12-2025` for complex tasks.
|
|
1611
|
+
|
|
1612
|
+
```python
|
|
1613
|
+
import time
|
|
1614
|
+
|
|
1615
|
+
# 1. Start the Deep Research Agent
|
|
1616
|
+
initial_interaction = client.interactions.create(
|
|
1617
|
+
input='Research the history of the Google TPUs with a focus on 2025 and 2026.',
|
|
1618
|
+
agent='deep-research-pro-preview-12-2025',
|
|
1619
|
+
background=True
|
|
1620
|
+
)
|
|
1621
|
+
print(f"Research started. Interaction ID: {initial_interaction.id}")
|
|
1622
|
+
|
|
1623
|
+
# 2. Poll for results
|
|
1624
|
+
while True:
|
|
1625
|
+
interaction = client.interactions.get(id=initial_interaction.id)
|
|
1626
|
+
print(f"Status: {interaction.status}")
|
|
1627
|
+
|
|
1628
|
+
if interaction.status == "completed":
|
|
1629
|
+
print("\nFinal Report:\n", interaction.outputs[-1].text)
|
|
1630
|
+
break
|
|
1631
|
+
elif interaction.status in ["failed", "cancelled"]:
|
|
1632
|
+
print(f"Failed with status: {interaction.status}")
|
|
1633
|
+
break
|
|
1634
|
+
|
|
1635
|
+
time.sleep(10)
|
|
1636
|
+
|
|
1637
|
+
```
|
|
1638
|
+
|
|
1639
|
+
### Multimodal Input
|
|
1640
|
+
|
|
1641
|
+
You can provide multimodal data (text, images, audio, etc.) in the input list.
|
|
1642
|
+
|
|
1643
|
+
```python
|
|
1644
|
+
import base64
|
|
1645
|
+
|
|
1646
|
+
# Assuming you have an image loaded as bytes
|
|
1647
|
+
# base64_image = ...
|
|
1648
|
+
|
|
1649
|
+
interaction = client.interactions.create(
|
|
1650
|
+
model='gemini-2.5-flash',
|
|
1651
|
+
input=[
|
|
1652
|
+
{'type': 'text', 'text': 'Describe the image.'},
|
|
1653
|
+
{'type': 'image', 'data': base64_image, 'mime_type': 'image/png'}
|
|
1654
|
+
]
|
|
1655
|
+
)
|
|
1656
|
+
print(interaction.outputs[-1].text)
|
|
1657
|
+
|
|
1658
|
+
```
|
|
1659
|
+
|
|
1660
|
+
### Function Calling
|
|
1661
|
+
|
|
1662
|
+
You can define custom functions for the model to use. The Interactions API handles the tool selection, and you provide the execution result back to the model.
|
|
1663
|
+
|
|
1664
|
+
```python
|
|
1665
|
+
# 1. Define the tool
|
|
1666
|
+
def get_weather(location: str):
|
|
1667
|
+
"""Gets the weather for a given location."""
|
|
1668
|
+
return f"The weather in {location} is sunny."
|
|
1669
|
+
|
|
1670
|
+
weather_tool = {
|
|
1671
|
+
'type': 'function',
|
|
1672
|
+
'name': 'get_weather',
|
|
1673
|
+
'description': 'Gets the weather for a given location.',
|
|
1674
|
+
'parameters': {
|
|
1675
|
+
'type': 'object',
|
|
1676
|
+
'properties': {
|
|
1677
|
+
'location': {'type': 'string', 'description': 'The city and state, e.g. San Francisco, CA'}
|
|
1678
|
+
},
|
|
1679
|
+
'required': ['location']
|
|
1680
|
+
}
|
|
1681
|
+
}
|
|
1682
|
+
|
|
1683
|
+
# 2. Send the request with tools
|
|
1684
|
+
interaction = client.interactions.create(
|
|
1685
|
+
model='gemini-2.5-flash',
|
|
1686
|
+
input='What is the weather in Mountain View, CA?',
|
|
1687
|
+
tools=[weather_tool]
|
|
1688
|
+
)
|
|
1689
|
+
|
|
1690
|
+
# 3. Handle the tool call
|
|
1691
|
+
for output in interaction.outputs:
|
|
1692
|
+
if output.type == 'function_call':
|
|
1693
|
+
print(f"Tool Call: {output.name}({output.arguments})")
|
|
1694
|
+
|
|
1695
|
+
# Execute your actual function here
|
|
1696
|
+
result = get_weather(**output.arguments)
|
|
1697
|
+
|
|
1698
|
+
# Send result back to the model
|
|
1699
|
+
interaction = client.interactions.create(
|
|
1700
|
+
model='gemini-2.5-flash',
|
|
1701
|
+
previous_interaction_id=interaction.id,
|
|
1702
|
+
input=[{
|
|
1703
|
+
'type': 'function_result',
|
|
1704
|
+
'name': output.name,
|
|
1705
|
+
'call_id': output.id,
|
|
1706
|
+
'result': result
|
|
1707
|
+
}]
|
|
1708
|
+
)
|
|
1709
|
+
print(f"Response: {interaction.outputs[-1].text}")
|
|
1710
|
+
|
|
1711
|
+
```
|
|
1712
|
+
|
|
1713
|
+
### Built-in Tools
|
|
1714
|
+
You can also use Google's built-in tools, such as **Google Search** or **Code Execution**.
|
|
1715
|
+
|
|
1716
|
+
#### Grounding with Google Search
|
|
1717
|
+
|
|
1718
|
+
```python
|
|
1719
|
+
interaction = client.interactions.create(
|
|
1720
|
+
model='gemini-2.5-flash',
|
|
1721
|
+
input='Who won the last Super Bowl?',
|
|
1722
|
+
tools=[{'type': 'google_search'}]
|
|
1723
|
+
)
|
|
1724
|
+
|
|
1725
|
+
# Find the text output (not the GoogleSearchResultContent)
|
|
1726
|
+
text_output = next((o for o in interaction.outputs if o.type == 'text'), None)
|
|
1727
|
+
if text_output:
|
|
1728
|
+
print(text_output.text)
|
|
1729
|
+
|
|
1730
|
+
```
|
|
1731
|
+
|
|
1732
|
+
#### Code Execution
|
|
1733
|
+
|
|
1734
|
+
```python
|
|
1735
|
+
interaction = client.interactions.create(
|
|
1736
|
+
model='gemini-2.5-flash',
|
|
1737
|
+
input='Calculate the 50th Fibonacci number.',
|
|
1738
|
+
tools=[{'type': 'code_execution'}]
|
|
1739
|
+
)
|
|
1740
|
+
print(interaction.outputs[-1].text)
|
|
1741
|
+
|
|
1742
|
+
```
|
|
1743
|
+
|
|
1744
|
+
### Multimodal Output
|
|
1745
|
+
|
|
1746
|
+
The Interactions API can generate multimodal outputs, such as images. You must specify the `response_modalities`.
|
|
1747
|
+
|
|
1748
|
+
```python
|
|
1749
|
+
import base64
|
|
1750
|
+
|
|
1751
|
+
interaction = client.interactions.create(
|
|
1752
|
+
model='gemini-3-pro-image-preview',
|
|
1753
|
+
input='Generate an image of a futuristic city.',
|
|
1754
|
+
response_modalities=['IMAGE']
|
|
1755
|
+
)
|
|
1756
|
+
|
|
1757
|
+
for output in interaction.outputs:
|
|
1758
|
+
if output.type == 'image':
|
|
1759
|
+
print(f"Generated image with mime_type: {output.mime_type}")
|
|
1760
|
+
# Save the image
|
|
1761
|
+
with open("generated_city.png", "wb") as f:
|
|
1762
|
+
f.write(base64.b64decode(output.data))
|
|
1763
|
+
|
|
1764
|
+
```
|
|
1765
|
+
|
|
1564
1766
|
## Tunings
|
|
1565
1767
|
|
|
1566
1768
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
|
@@ -1756,13 +1958,14 @@ job
|
|
|
1756
1958
|
```
|
|
1757
1959
|
|
|
1758
1960
|
In order to create a batch job with file name. Need to upload a json file.
|
|
1759
|
-
For example myrequests.json
|
|
1961
|
+
For example `myrequests.json`:
|
|
1760
1962
|
|
|
1761
|
-
```
|
|
1963
|
+
```json
|
|
1762
1964
|
{"key":"request_1", "request": {"contents": [{"parts": [{"text":
|
|
1763
1965
|
"Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
|
|
1764
1966
|
{"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
|
|
1765
1967
|
```
|
|
1968
|
+
|
|
1766
1969
|
Then upload the file.
|
|
1767
1970
|
|
|
1768
1971
|
```python
|
|
@@ -1779,7 +1982,6 @@ batch_job = client.batches.create(
|
|
|
1779
1982
|
)
|
|
1780
1983
|
```
|
|
1781
1984
|
|
|
1782
|
-
|
|
1783
1985
|
```python
|
|
1784
1986
|
# Get a job by name
|
|
1785
1987
|
job = client.batches.get(name=job.name)
|
|
@@ -1850,7 +2052,7 @@ delete_job
|
|
|
1850
2052
|
|
|
1851
2053
|
## Error Handling
|
|
1852
2054
|
|
|
1853
|
-
To handle errors raised by the model service, the SDK provides this [APIError](https://github.com/googleapis/python-genai/blob/main/google/genai/errors.py) class.
|
|
2055
|
+
To handle errors raised by the model service, the SDK provides this [`APIError`](https://github.com/googleapis/python-genai/blob/main/google/genai/errors.py) class.
|
|
1854
2056
|
|
|
1855
2057
|
```python
|
|
1856
2058
|
from google.genai import errors
|
|
@@ -1872,8 +2074,8 @@ properties to include in the request body. This can be used to access new or
|
|
|
1872
2074
|
experimental backend features that are not yet formally supported in the SDK.
|
|
1873
2075
|
The structure of the dictionary must match the backend API's request structure.
|
|
1874
2076
|
|
|
1875
|
-
-
|
|
1876
|
-
-
|
|
2077
|
+
- Vertex AI backend API docs: https://cloud.google.com/vertex-ai/docs/reference/rest
|
|
2078
|
+
- Gemini API backend API docs: https://ai.google.dev/api/rest
|
|
1877
2079
|
|
|
1878
2080
|
```python
|
|
1879
2081
|
response = client.models.generate_content(
|