google-genai 1.26.0__tar.gz → 1.28.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {google_genai-1.26.0/google_genai.egg-info → google_genai-1.28.0}/PKG-INFO +61 -27
- {google_genai-1.26.0 → google_genai-1.28.0}/README.md +60 -26
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/__init__.py +1 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_api_client.py +143 -39
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_common.py +9 -6
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_extra_utils.py +8 -8
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_mcp_utils.py +4 -1
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_replay_api_client.py +6 -2
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_transformers.py +13 -12
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/batches.py +16 -2
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/caches.py +16 -2
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/errors.py +1 -1
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/files.py +11 -2
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/live.py +2 -3
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/models.py +229 -18
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/pagers.py +13 -3
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/tokens.py +3 -3
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/tunings.py +49 -8
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/types.py +108 -2
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/version.py +1 -1
- {google_genai-1.26.0 → google_genai-1.28.0/google_genai.egg-info}/PKG-INFO +61 -27
- {google_genai-1.26.0 → google_genai-1.28.0}/pyproject.toml +1 -1
- {google_genai-1.26.0 → google_genai-1.28.0}/LICENSE +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/MANIFEST.in +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_adapters.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_api_module.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_automatic_function_calling_util.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_base_url.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_live_converters.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_test_api_client.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/_tokens_converters.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/chats.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/client.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/live_music.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/operations.py +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google/genai/py.typed +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google_genai.egg-info/SOURCES.txt +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google_genai.egg-info/dependency_links.txt +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google_genai.egg-info/requires.txt +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/google_genai.egg-info/top_level.txt +0 -0
- {google_genai-1.26.0 → google_genai-1.28.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: google-genai
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.28.0
|
4
4
|
Summary: GenAI Python SDK
|
5
5
|
Author-email: Google LLC <googleapis-packages@google.com>
|
6
6
|
License: Apache-2.0
|
@@ -1404,35 +1404,21 @@ print(response.text)
|
|
1404
1404
|
## Tunings
|
1405
1405
|
|
1406
1406
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
1407
|
-
tuning through `tune`. See the 'Create a client'
|
1408
|
-
client.
|
1407
|
+
tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
|
1408
|
+
section above to initialize a client.
|
1409
1409
|
|
1410
1410
|
### Tune
|
1411
1411
|
|
1412
1412
|
- Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
|
1413
|
-
- Gemini Developer API supports tuning from inline examples
|
1414
1413
|
|
1415
1414
|
```python
|
1416
1415
|
from google.genai import types
|
1417
1416
|
|
1418
|
-
|
1419
|
-
|
1420
|
-
|
1421
|
-
|
1422
|
-
|
1423
|
-
)
|
1424
|
-
else:
|
1425
|
-
model = 'models/gemini-2.0-flash-001'
|
1426
|
-
# or gcs_uri=my_vertex_multimodal_dataset.resource_name
|
1427
|
-
training_dataset = types.TuningDataset(
|
1428
|
-
examples=[
|
1429
|
-
types.TuningExample(
|
1430
|
-
text_input=f'Input text {i}',
|
1431
|
-
output=f'Output text {i}',
|
1432
|
-
)
|
1433
|
-
for i in range(5)
|
1434
|
-
],
|
1435
|
-
)
|
1417
|
+
model = 'gemini-2.0-flash-001'
|
1418
|
+
training_dataset = types.TuningDataset(
|
1419
|
+
# or gcs_uri=my_vertex_multimodal_dataset
|
1420
|
+
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
1421
|
+
)
|
1436
1422
|
```
|
1437
1423
|
|
1438
1424
|
```python
|
@@ -1458,14 +1444,15 @@ print(tuning_job)
|
|
1458
1444
|
```python
|
1459
1445
|
import time
|
1460
1446
|
|
1461
|
-
|
1447
|
+
completed_states = set(
|
1462
1448
|
[
|
1463
|
-
'
|
1464
|
-
'
|
1449
|
+
'JOB_STATE_SUCCEEDED',
|
1450
|
+
'JOB_STATE_FAILED',
|
1451
|
+
'JOB_STATE_CANCELLED',
|
1465
1452
|
]
|
1466
1453
|
)
|
1467
1454
|
|
1468
|
-
while tuning_job.state in
|
1455
|
+
while tuning_job.state not in completed_states:
|
1469
1456
|
print(tuning_job.state)
|
1470
1457
|
tuning_job = client.tunings.get(name=tuning_job.name)
|
1471
1458
|
time.sleep(10)
|
@@ -1576,16 +1563,63 @@ initialize a client.
|
|
1576
1563
|
|
1577
1564
|
### Create
|
1578
1565
|
|
1566
|
+
Vertex AI:
|
1567
|
+
|
1579
1568
|
```python
|
1580
1569
|
# Specify model and source file only, destination and job display name will be auto-populated
|
1581
1570
|
job = client.batches.create(
|
1582
1571
|
model='gemini-2.0-flash-001',
|
1583
|
-
src='bq://my-project.my-dataset.my-table',
|
1572
|
+
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
1584
1573
|
)
|
1585
1574
|
|
1586
1575
|
job
|
1587
1576
|
```
|
1588
1577
|
|
1578
|
+
Gemini Developer API:
|
1579
|
+
|
1580
|
+
```python
|
1581
|
+
# Create a batch job with inlined requests
|
1582
|
+
batch_job = client.batches.create(
|
1583
|
+
model="gemini-2.0-flash",
|
1584
|
+
src=[{
|
1585
|
+
"contents": [{
|
1586
|
+
"parts": [{
|
1587
|
+
"text": "Hello!",
|
1588
|
+
}],
|
1589
|
+
"role": "user",
|
1590
|
+
}],
|
1591
|
+
"config:": {"response_modalities": ["text"]},
|
1592
|
+
}],
|
1593
|
+
)
|
1594
|
+
|
1595
|
+
job
|
1596
|
+
```
|
1597
|
+
|
1598
|
+
In order to create a batch job with file name. Need to upload a jsonl file.
|
1599
|
+
For example myrequests.json:
|
1600
|
+
|
1601
|
+
```
|
1602
|
+
{"key":"request_1", "request": {"contents": [{"parts": [{"text":
|
1603
|
+
"Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
|
1604
|
+
{"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
|
1605
|
+
```
|
1606
|
+
Then upload the file.
|
1607
|
+
|
1608
|
+
```python
|
1609
|
+
# Upload the file
|
1610
|
+
file = client.files.upload(
|
1611
|
+
file='myrequest.json',
|
1612
|
+
config=types.UploadFileConfig(display_name='test_json')
|
1613
|
+
)
|
1614
|
+
|
1615
|
+
# Create a batch job with file name
|
1616
|
+
batch_job = client.batches.create(
|
1617
|
+
model="gemini-2.0-flash",
|
1618
|
+
src="files/file_name",
|
1619
|
+
)
|
1620
|
+
```
|
1621
|
+
|
1622
|
+
|
1589
1623
|
```python
|
1590
1624
|
# Get a job by name
|
1591
1625
|
job = client.batches.get(name=job.name)
|
@@ -1370,35 +1370,21 @@ print(response.text)
|
|
1370
1370
|
## Tunings
|
1371
1371
|
|
1372
1372
|
`client.tunings` contains tuning job APIs and supports supervised fine
|
1373
|
-
tuning through `tune`. See the 'Create a client'
|
1374
|
-
client.
|
1373
|
+
tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
|
1374
|
+
section above to initialize a client.
|
1375
1375
|
|
1376
1376
|
### Tune
|
1377
1377
|
|
1378
1378
|
- Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
|
1379
|
-
- Gemini Developer API supports tuning from inline examples
|
1380
1379
|
|
1381
1380
|
```python
|
1382
1381
|
from google.genai import types
|
1383
1382
|
|
1384
|
-
|
1385
|
-
|
1386
|
-
|
1387
|
-
|
1388
|
-
|
1389
|
-
)
|
1390
|
-
else:
|
1391
|
-
model = 'models/gemini-2.0-flash-001'
|
1392
|
-
# or gcs_uri=my_vertex_multimodal_dataset.resource_name
|
1393
|
-
training_dataset = types.TuningDataset(
|
1394
|
-
examples=[
|
1395
|
-
types.TuningExample(
|
1396
|
-
text_input=f'Input text {i}',
|
1397
|
-
output=f'Output text {i}',
|
1398
|
-
)
|
1399
|
-
for i in range(5)
|
1400
|
-
],
|
1401
|
-
)
|
1383
|
+
model = 'gemini-2.0-flash-001'
|
1384
|
+
training_dataset = types.TuningDataset(
|
1385
|
+
# or gcs_uri=my_vertex_multimodal_dataset
|
1386
|
+
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
|
1387
|
+
)
|
1402
1388
|
```
|
1403
1389
|
|
1404
1390
|
```python
|
@@ -1424,14 +1410,15 @@ print(tuning_job)
|
|
1424
1410
|
```python
|
1425
1411
|
import time
|
1426
1412
|
|
1427
|
-
|
1413
|
+
completed_states = set(
|
1428
1414
|
[
|
1429
|
-
'
|
1430
|
-
'
|
1415
|
+
'JOB_STATE_SUCCEEDED',
|
1416
|
+
'JOB_STATE_FAILED',
|
1417
|
+
'JOB_STATE_CANCELLED',
|
1431
1418
|
]
|
1432
1419
|
)
|
1433
1420
|
|
1434
|
-
while tuning_job.state in
|
1421
|
+
while tuning_job.state not in completed_states:
|
1435
1422
|
print(tuning_job.state)
|
1436
1423
|
tuning_job = client.tunings.get(name=tuning_job.name)
|
1437
1424
|
time.sleep(10)
|
@@ -1542,16 +1529,63 @@ initialize a client.
|
|
1542
1529
|
|
1543
1530
|
### Create
|
1544
1531
|
|
1532
|
+
Vertex AI:
|
1533
|
+
|
1545
1534
|
```python
|
1546
1535
|
# Specify model and source file only, destination and job display name will be auto-populated
|
1547
1536
|
job = client.batches.create(
|
1548
1537
|
model='gemini-2.0-flash-001',
|
1549
|
-
src='bq://my-project.my-dataset.my-table',
|
1538
|
+
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
|
1550
1539
|
)
|
1551
1540
|
|
1552
1541
|
job
|
1553
1542
|
```
|
1554
1543
|
|
1544
|
+
Gemini Developer API:
|
1545
|
+
|
1546
|
+
```python
|
1547
|
+
# Create a batch job with inlined requests
|
1548
|
+
batch_job = client.batches.create(
|
1549
|
+
model="gemini-2.0-flash",
|
1550
|
+
src=[{
|
1551
|
+
"contents": [{
|
1552
|
+
"parts": [{
|
1553
|
+
"text": "Hello!",
|
1554
|
+
}],
|
1555
|
+
"role": "user",
|
1556
|
+
}],
|
1557
|
+
"config:": {"response_modalities": ["text"]},
|
1558
|
+
}],
|
1559
|
+
)
|
1560
|
+
|
1561
|
+
job
|
1562
|
+
```
|
1563
|
+
|
1564
|
+
In order to create a batch job with file name. Need to upload a jsonl file.
|
1565
|
+
For example myrequests.json:
|
1566
|
+
|
1567
|
+
```
|
1568
|
+
{"key":"request_1", "request": {"contents": [{"parts": [{"text":
|
1569
|
+
"Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
|
1570
|
+
{"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
|
1571
|
+
```
|
1572
|
+
Then upload the file.
|
1573
|
+
|
1574
|
+
```python
|
1575
|
+
# Upload the file
|
1576
|
+
file = client.files.upload(
|
1577
|
+
file='myrequest.json',
|
1578
|
+
config=types.UploadFileConfig(display_name='test_json')
|
1579
|
+
)
|
1580
|
+
|
1581
|
+
# Create a batch job with file name
|
1582
|
+
batch_job = client.batches.create(
|
1583
|
+
model="gemini-2.0-flash",
|
1584
|
+
src="files/file_name",
|
1585
|
+
)
|
1586
|
+
```
|
1587
|
+
|
1588
|
+
|
1555
1589
|
```python
|
1556
1590
|
# Get a job by name
|
1557
1591
|
job = client.batches.get(name=job.name)
|
@@ -32,6 +32,7 @@ import logging
|
|
32
32
|
import math
|
33
33
|
import os
|
34
34
|
import ssl
|
35
|
+
import random
|
35
36
|
import sys
|
36
37
|
import threading
|
37
38
|
import time
|
@@ -81,6 +82,7 @@ if TYPE_CHECKING:
|
|
81
82
|
|
82
83
|
logger = logging.getLogger('google_genai._api_client')
|
83
84
|
CHUNK_SIZE = 8 * 1024 * 1024 # 8 MB chunk size
|
85
|
+
READ_BUFFER_SIZE = 2**20
|
84
86
|
MAX_RETRY_COUNT = 3
|
85
87
|
INITIAL_RETRY_DELAY = 1 # second
|
86
88
|
DELAY_MULTIPLIER = 2
|
@@ -363,7 +365,7 @@ _RETRY_HTTP_STATUS_CODES = (
|
|
363
365
|
)
|
364
366
|
|
365
367
|
|
366
|
-
def _retry_args(options: Optional[HttpRetryOptions]) ->
|
368
|
+
def _retry_args(options: Optional[HttpRetryOptions]) -> _common.StringDict:
|
367
369
|
"""Returns the retry args for the given http retry options.
|
368
370
|
|
369
371
|
Args:
|
@@ -533,17 +535,30 @@ class BaseApiClient:
|
|
533
535
|
+ ' precedence over the API key from the environment variables.'
|
534
536
|
)
|
535
537
|
self.api_key = None
|
536
|
-
|
538
|
+
|
539
|
+
# Skip fetching project from ADC if base url is provided in http options.
|
540
|
+
if (
|
541
|
+
not self.project
|
542
|
+
and not self.api_key
|
543
|
+
and not validated_http_options.base_url
|
544
|
+
):
|
537
545
|
credentials, self.project = _load_auth(project=None)
|
538
546
|
if not self._credentials:
|
539
547
|
self._credentials = credentials
|
540
|
-
|
548
|
+
|
549
|
+
has_sufficient_auth = (self.project and self.location) or self.api_key
|
550
|
+
|
551
|
+
if (not has_sufficient_auth and not validated_http_options.base_url):
|
552
|
+
# Skip sufficient auth check if base url is provided in http options.
|
541
553
|
raise ValueError(
|
542
554
|
'Project and location or API key must be set when using the Vertex '
|
543
555
|
'AI API.'
|
544
556
|
)
|
545
557
|
if self.api_key or self.location == 'global':
|
546
558
|
self._http_options.base_url = f'https://aiplatform.googleapis.com/'
|
559
|
+
elif validated_http_options.base_url and not has_sufficient_auth:
|
560
|
+
# Avoid setting default base url and api version if base_url provided.
|
561
|
+
self._http_options.base_url = validated_http_options.base_url
|
547
562
|
else:
|
548
563
|
self._http_options.base_url = (
|
549
564
|
f'https://{self.location}-aiplatform.googleapis.com/'
|
@@ -592,7 +607,7 @@ class BaseApiClient:
|
|
592
607
|
@staticmethod
|
593
608
|
def _ensure_httpx_ssl_ctx(
|
594
609
|
options: HttpOptions,
|
595
|
-
) -> Tuple[
|
610
|
+
) -> Tuple[_common.StringDict, _common.StringDict]:
|
596
611
|
"""Ensures the SSL context is present in the HTTPX client args.
|
597
612
|
|
598
613
|
Creates a default SSL context if one is not provided.
|
@@ -626,9 +641,9 @@ class BaseApiClient:
|
|
626
641
|
)
|
627
642
|
|
628
643
|
def _maybe_set(
|
629
|
-
args: Optional[
|
644
|
+
args: Optional[_common.StringDict],
|
630
645
|
ctx: ssl.SSLContext,
|
631
|
-
) ->
|
646
|
+
) -> _common.StringDict:
|
632
647
|
"""Sets the SSL context in the client args if not set.
|
633
648
|
|
634
649
|
Does not override the SSL context if it is already set.
|
@@ -656,7 +671,7 @@ class BaseApiClient:
|
|
656
671
|
)
|
657
672
|
|
658
673
|
@staticmethod
|
659
|
-
def _ensure_aiohttp_ssl_ctx(options: HttpOptions) ->
|
674
|
+
def _ensure_aiohttp_ssl_ctx(options: HttpOptions) -> _common.StringDict:
|
660
675
|
"""Ensures the SSL context is present in the async client args.
|
661
676
|
|
662
677
|
Creates a default SSL context if one is not provided.
|
@@ -684,9 +699,9 @@ class BaseApiClient:
|
|
684
699
|
)
|
685
700
|
|
686
701
|
def _maybe_set(
|
687
|
-
args: Optional[
|
702
|
+
args: Optional[_common.StringDict],
|
688
703
|
ctx: ssl.SSLContext,
|
689
|
-
) ->
|
704
|
+
) -> _common.StringDict:
|
690
705
|
"""Sets the SSL context in the client args if not set.
|
691
706
|
|
692
707
|
Does not override the SSL context if it is already set.
|
@@ -714,7 +729,7 @@ class BaseApiClient:
|
|
714
729
|
return _maybe_set(async_args, ctx)
|
715
730
|
|
716
731
|
@staticmethod
|
717
|
-
def _ensure_websocket_ssl_ctx(options: HttpOptions) ->
|
732
|
+
def _ensure_websocket_ssl_ctx(options: HttpOptions) -> _common.StringDict:
|
718
733
|
"""Ensures the SSL context is present in the async client args.
|
719
734
|
|
720
735
|
Creates a default SSL context if one is not provided.
|
@@ -742,9 +757,9 @@ class BaseApiClient:
|
|
742
757
|
)
|
743
758
|
|
744
759
|
def _maybe_set(
|
745
|
-
args: Optional[
|
760
|
+
args: Optional[_common.StringDict],
|
746
761
|
ctx: ssl.SSLContext,
|
747
|
-
) ->
|
762
|
+
) -> _common.StringDict:
|
748
763
|
"""Sets the SSL context in the client args if not set.
|
749
764
|
|
750
765
|
Does not override the SSL context if it is already set.
|
@@ -864,7 +879,7 @@ class BaseApiClient:
|
|
864
879
|
self.vertexai
|
865
880
|
and not path.startswith('projects/')
|
866
881
|
and not query_vertex_base_models
|
867
|
-
and
|
882
|
+
and (self.project or self.location)
|
868
883
|
):
|
869
884
|
path = f'projects/{self.project}/locations/{self.location}/' + path
|
870
885
|
|
@@ -920,7 +935,8 @@ class BaseApiClient:
|
|
920
935
|
stream: bool = False,
|
921
936
|
) -> HttpResponse:
|
922
937
|
data: Optional[Union[str, bytes]] = None
|
923
|
-
|
938
|
+
# If using proj/location, fetch ADC
|
939
|
+
if self.vertexai and (self.project or self.location):
|
924
940
|
http_request.headers['Authorization'] = f'Bearer {self._access_token()}'
|
925
941
|
if self._credentials and self._credentials.quota_project_id:
|
926
942
|
http_request.headers['x-goog-user-project'] = (
|
@@ -963,8 +979,21 @@ class BaseApiClient:
|
|
963
979
|
def _request(
|
964
980
|
self,
|
965
981
|
http_request: HttpRequest,
|
982
|
+
http_options: Optional[HttpOptionsOrDict] = None,
|
966
983
|
stream: bool = False,
|
967
984
|
) -> HttpResponse:
|
985
|
+
if http_options:
|
986
|
+
parameter_model = (
|
987
|
+
HttpOptions(**http_options)
|
988
|
+
if isinstance(http_options, dict)
|
989
|
+
else http_options
|
990
|
+
)
|
991
|
+
# Support per request retry options.
|
992
|
+
if parameter_model.retry_options:
|
993
|
+
retry_kwargs = _retry_args(parameter_model.retry_options)
|
994
|
+
retry = tenacity.Retrying(**retry_kwargs)
|
995
|
+
return retry(self._request_once, http_request, stream) # type: ignore[no-any-return]
|
996
|
+
|
968
997
|
return self._retry(self._request_once, http_request, stream) # type: ignore[no-any-return]
|
969
998
|
|
970
999
|
async def _async_request_once(
|
@@ -972,7 +1001,8 @@ class BaseApiClient:
|
|
972
1001
|
) -> HttpResponse:
|
973
1002
|
data: Optional[Union[str, bytes]] = None
|
974
1003
|
|
975
|
-
|
1004
|
+
# If using proj/location, fetch ADC
|
1005
|
+
if self.vertexai and (self.project or self.location):
|
976
1006
|
http_request.headers['Authorization'] = (
|
977
1007
|
f'Bearer {await self._async_access_token()}'
|
978
1008
|
)
|
@@ -993,15 +1023,43 @@ class BaseApiClient:
|
|
993
1023
|
session = aiohttp.ClientSession(
|
994
1024
|
headers=http_request.headers,
|
995
1025
|
trust_env=True,
|
1026
|
+
read_bufsize=READ_BUFFER_SIZE,
|
996
1027
|
)
|
997
|
-
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
|
1002
|
-
|
1003
|
-
|
1004
|
-
|
1028
|
+
try:
|
1029
|
+
response = await session.request(
|
1030
|
+
method=http_request.method,
|
1031
|
+
url=http_request.url,
|
1032
|
+
headers=http_request.headers,
|
1033
|
+
data=data,
|
1034
|
+
timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
|
1035
|
+
**self._async_client_session_request_args,
|
1036
|
+
)
|
1037
|
+
except (
|
1038
|
+
aiohttp.ClientConnectorError,
|
1039
|
+
aiohttp.ClientConnectorDNSError,
|
1040
|
+
aiohttp.ClientOSError,
|
1041
|
+
aiohttp.ServerDisconnectedError,
|
1042
|
+
) as e:
|
1043
|
+
await asyncio.sleep(1 + random.randint(0, 9))
|
1044
|
+
logger.info('Retrying due to aiohttp error: %s' % e)
|
1045
|
+
# Retrieve the SSL context from the session.
|
1046
|
+
self._async_client_session_request_args = (
|
1047
|
+
self._ensure_aiohttp_ssl_ctx(self._http_options)
|
1048
|
+
)
|
1049
|
+
# Instantiate a new session with the updated SSL context.
|
1050
|
+
session = aiohttp.ClientSession(
|
1051
|
+
headers=http_request.headers,
|
1052
|
+
trust_env=True,
|
1053
|
+
read_bufsize=READ_BUFFER_SIZE,
|
1054
|
+
)
|
1055
|
+
response = await session.request(
|
1056
|
+
method=http_request.method,
|
1057
|
+
url=http_request.url,
|
1058
|
+
headers=http_request.headers,
|
1059
|
+
data=data,
|
1060
|
+
timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
|
1061
|
+
**self._async_client_session_request_args,
|
1062
|
+
)
|
1005
1063
|
|
1006
1064
|
await errors.APIError.raise_for_async_response(response)
|
1007
1065
|
return HttpResponse(response.headers, response, session=session)
|
@@ -1022,20 +1080,50 @@ class BaseApiClient:
|
|
1022
1080
|
return HttpResponse(client_response.headers, client_response)
|
1023
1081
|
else:
|
1024
1082
|
if self._use_aiohttp():
|
1025
|
-
|
1026
|
-
|
1027
|
-
trust_env=True,
|
1028
|
-
) as session:
|
1029
|
-
response = await session.request(
|
1030
|
-
method=http_request.method,
|
1031
|
-
url=http_request.url,
|
1083
|
+
try:
|
1084
|
+
async with aiohttp.ClientSession(
|
1032
1085
|
headers=http_request.headers,
|
1033
|
-
|
1034
|
-
|
1035
|
-
|
1086
|
+
trust_env=True,
|
1087
|
+
read_bufsize=READ_BUFFER_SIZE,
|
1088
|
+
) as session:
|
1089
|
+
response = await session.request(
|
1090
|
+
method=http_request.method,
|
1091
|
+
url=http_request.url,
|
1092
|
+
headers=http_request.headers,
|
1093
|
+
data=data,
|
1094
|
+
timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
|
1095
|
+
**self._async_client_session_request_args,
|
1096
|
+
)
|
1097
|
+
await errors.APIError.raise_for_async_response(response)
|
1098
|
+
return HttpResponse(response.headers, [await response.text()])
|
1099
|
+
except (
|
1100
|
+
aiohttp.ClientConnectorError,
|
1101
|
+
aiohttp.ClientConnectorDNSError,
|
1102
|
+
aiohttp.ClientOSError,
|
1103
|
+
aiohttp.ServerDisconnectedError,
|
1104
|
+
) as e:
|
1105
|
+
await asyncio.sleep(1 + random.randint(0, 9))
|
1106
|
+
logger.info('Retrying due to aiohttp error: %s' % e)
|
1107
|
+
# Retrieve the SSL context from the session.
|
1108
|
+
self._async_client_session_request_args = (
|
1109
|
+
self._ensure_aiohttp_ssl_ctx(self._http_options)
|
1036
1110
|
)
|
1037
|
-
|
1038
|
-
|
1111
|
+
# Instantiate a new session with the updated SSL context.
|
1112
|
+
async with aiohttp.ClientSession(
|
1113
|
+
headers=http_request.headers,
|
1114
|
+
trust_env=True,
|
1115
|
+
read_bufsize=READ_BUFFER_SIZE,
|
1116
|
+
) as session:
|
1117
|
+
response = await session.request(
|
1118
|
+
method=http_request.method,
|
1119
|
+
url=http_request.url,
|
1120
|
+
headers=http_request.headers,
|
1121
|
+
data=data,
|
1122
|
+
timeout=aiohttp.ClientTimeout(connect=http_request.timeout),
|
1123
|
+
**self._async_client_session_request_args,
|
1124
|
+
)
|
1125
|
+
await errors.APIError.raise_for_async_response(response)
|
1126
|
+
return HttpResponse(response.headers, [await response.text()])
|
1039
1127
|
else:
|
1040
1128
|
# aiohttp is not available. Fall back to httpx.
|
1041
1129
|
client_response = await self._async_httpx_client.request(
|
@@ -1051,13 +1139,25 @@ class BaseApiClient:
|
|
1051
1139
|
async def _async_request(
|
1052
1140
|
self,
|
1053
1141
|
http_request: HttpRequest,
|
1142
|
+
http_options: Optional[HttpOptionsOrDict] = None,
|
1054
1143
|
stream: bool = False,
|
1055
1144
|
) -> HttpResponse:
|
1145
|
+
if http_options:
|
1146
|
+
parameter_model = (
|
1147
|
+
HttpOptions(**http_options)
|
1148
|
+
if isinstance(http_options, dict)
|
1149
|
+
else http_options
|
1150
|
+
)
|
1151
|
+
# Support per request retry options.
|
1152
|
+
if parameter_model.retry_options:
|
1153
|
+
retry_kwargs = _retry_args(parameter_model.retry_options)
|
1154
|
+
retry = tenacity.AsyncRetrying(**retry_kwargs)
|
1155
|
+
return await retry(self._async_request_once, http_request, stream) # type: ignore[no-any-return]
|
1056
1156
|
return await self._async_retry( # type: ignore[no-any-return]
|
1057
1157
|
self._async_request_once, http_request, stream
|
1058
1158
|
)
|
1059
1159
|
|
1060
|
-
def get_read_only_http_options(self) ->
|
1160
|
+
def get_read_only_http_options(self) -> _common.StringDict:
|
1061
1161
|
if isinstance(self._http_options, BaseModel):
|
1062
1162
|
copied = self._http_options.model_dump()
|
1063
1163
|
else:
|
@@ -1074,7 +1174,7 @@ class BaseApiClient:
|
|
1074
1174
|
http_request = self._build_request(
|
1075
1175
|
http_method, path, request_dict, http_options
|
1076
1176
|
)
|
1077
|
-
response = self._request(http_request, stream=False)
|
1177
|
+
response = self._request(http_request, http_options, stream=False)
|
1078
1178
|
response_body = (
|
1079
1179
|
response.response_stream[0] if response.response_stream else ''
|
1080
1180
|
)
|
@@ -1091,7 +1191,7 @@ class BaseApiClient:
|
|
1091
1191
|
http_method, path, request_dict, http_options
|
1092
1192
|
)
|
1093
1193
|
|
1094
|
-
session_response = self._request(http_request, stream=True)
|
1194
|
+
session_response = self._request(http_request, http_options, stream=True)
|
1095
1195
|
for chunk in session_response.segments():
|
1096
1196
|
yield SdkHttpResponse(
|
1097
1197
|
headers=session_response.headers, body=json.dumps(chunk)
|
@@ -1108,7 +1208,9 @@ class BaseApiClient:
|
|
1108
1208
|
http_method, path, request_dict, http_options
|
1109
1209
|
)
|
1110
1210
|
|
1111
|
-
result = await self._async_request(
|
1211
|
+
result = await self._async_request(
|
1212
|
+
http_request=http_request, http_options=http_options, stream=False
|
1213
|
+
)
|
1112
1214
|
response_body = result.response_stream[0] if result.response_stream else ''
|
1113
1215
|
return SdkHttpResponse(headers=result.headers, body=response_body)
|
1114
1216
|
|
@@ -1340,6 +1442,7 @@ class BaseApiClient:
|
|
1340
1442
|
async with aiohttp.ClientSession(
|
1341
1443
|
headers=self._http_options.headers,
|
1342
1444
|
trust_env=True,
|
1445
|
+
read_bufsize=READ_BUFFER_SIZE,
|
1343
1446
|
) as session:
|
1344
1447
|
while True:
|
1345
1448
|
if isinstance(file, io.IOBase):
|
@@ -1523,6 +1626,7 @@ class BaseApiClient:
|
|
1523
1626
|
async with aiohttp.ClientSession(
|
1524
1627
|
headers=http_request.headers,
|
1525
1628
|
trust_env=True,
|
1629
|
+
read_bufsize=READ_BUFFER_SIZE,
|
1526
1630
|
) as session:
|
1527
1631
|
response = await session.request(
|
1528
1632
|
method=http_request.method,
|
@@ -22,15 +22,17 @@ import enum
|
|
22
22
|
import functools
|
23
23
|
import logging
|
24
24
|
import typing
|
25
|
-
from typing import Any, Callable,
|
25
|
+
from typing import Any, Callable, FrozenSet, Optional, Union, get_args, get_origin
|
26
26
|
import uuid
|
27
27
|
import warnings
|
28
|
-
|
29
28
|
import pydantic
|
30
29
|
from pydantic import alias_generators
|
30
|
+
from typing_extensions import TypeAlias
|
31
31
|
|
32
32
|
logger = logging.getLogger('google_genai._common')
|
33
33
|
|
34
|
+
StringDict: TypeAlias = dict[str, Any]
|
35
|
+
|
34
36
|
|
35
37
|
class ExperimentalWarning(Warning):
|
36
38
|
"""Warning for experimental features."""
|
@@ -355,7 +357,6 @@ def _pretty_repr(
|
|
355
357
|
return raw_repr.replace('\n', f'\n{next_indent_str}')
|
356
358
|
|
357
359
|
|
358
|
-
|
359
360
|
def _format_collection(
|
360
361
|
obj: Any,
|
361
362
|
*,
|
@@ -555,7 +556,9 @@ def _normalize_key_for_matching(key_str: str) -> str:
|
|
555
556
|
return key_str.replace("_", "").lower()
|
556
557
|
|
557
558
|
|
558
|
-
def align_key_case(
|
559
|
+
def align_key_case(
|
560
|
+
target_dict: StringDict, update_dict: StringDict
|
561
|
+
) -> StringDict:
|
559
562
|
"""Aligns the keys of update_dict to the case of target_dict keys.
|
560
563
|
|
561
564
|
Args:
|
@@ -565,7 +568,7 @@ def align_key_case(target_dict: dict[str, Any], update_dict: dict[str, Any]) ->
|
|
565
568
|
Returns:
|
566
569
|
A new dictionary with keys aligned to target_dict's key casing.
|
567
570
|
"""
|
568
|
-
aligned_update_dict:
|
571
|
+
aligned_update_dict: StringDict = {}
|
569
572
|
target_keys_map = {_normalize_key_for_matching(key): key for key in target_dict.keys()}
|
570
573
|
|
571
574
|
for key, value in update_dict.items():
|
@@ -587,7 +590,7 @@ def align_key_case(target_dict: dict[str, Any], update_dict: dict[str, Any]) ->
|
|
587
590
|
|
588
591
|
|
589
592
|
def recursive_dict_update(
|
590
|
-
target_dict:
|
593
|
+
target_dict: StringDict, update_dict: StringDict
|
591
594
|
) -> None:
|
592
595
|
"""Recursively updates a target dictionary with values from an update dictionary.
|
593
596
|
|