google-genai 1.25.0__tar.gz → 1.27.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {google_genai-1.25.0/google_genai.egg-info → google_genai-1.27.0}/PKG-INFO +104 -36
  2. {google_genai-1.25.0 → google_genai-1.27.0}/README.md +103 -35
  3. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_extra_utils.py +1 -1
  4. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_live_converters.py +1624 -1922
  5. google_genai-1.27.0/google/genai/_tokens_converters.py +817 -0
  6. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_transformers.py +21 -11
  7. google_genai-1.27.0/google/genai/batches.py +2693 -0
  8. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/caches.py +16 -67
  9. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/files.py +15 -2
  10. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/live.py +8 -10
  11. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/models.py +150 -168
  12. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/operations.py +36 -266
  13. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/pagers.py +11 -1
  14. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/tunings.py +23 -48
  15. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/types.py +158 -48
  16. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/version.py +1 -1
  17. {google_genai-1.25.0 → google_genai-1.27.0/google_genai.egg-info}/PKG-INFO +104 -36
  18. {google_genai-1.25.0 → google_genai-1.27.0}/pyproject.toml +1 -1
  19. google_genai-1.25.0/google/genai/_tokens_converters.py +0 -1680
  20. google_genai-1.25.0/google/genai/batches.py +0 -5768
  21. {google_genai-1.25.0 → google_genai-1.27.0}/LICENSE +0 -0
  22. {google_genai-1.25.0 → google_genai-1.27.0}/MANIFEST.in +0 -0
  23. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/__init__.py +0 -0
  24. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_adapters.py +0 -0
  25. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_api_client.py +0 -0
  26. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_api_module.py +0 -0
  27. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_automatic_function_calling_util.py +0 -0
  28. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_base_url.py +0 -0
  29. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_common.py +0 -0
  30. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_mcp_utils.py +0 -0
  31. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_replay_api_client.py +0 -0
  32. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/_test_api_client.py +0 -0
  33. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/chats.py +0 -0
  34. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/client.py +0 -0
  35. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/errors.py +0 -0
  36. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/live_music.py +0 -0
  37. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/py.typed +0 -0
  38. {google_genai-1.25.0 → google_genai-1.27.0}/google/genai/tokens.py +0 -0
  39. {google_genai-1.25.0 → google_genai-1.27.0}/google_genai.egg-info/SOURCES.txt +0 -0
  40. {google_genai-1.25.0 → google_genai-1.27.0}/google_genai.egg-info/dependency_links.txt +0 -0
  41. {google_genai-1.25.0 → google_genai-1.27.0}/google_genai.egg-info/requires.txt +0 -0
  42. {google_genai-1.25.0 → google_genai-1.27.0}/google_genai.egg-info/top_level.txt +0 -0
  43. {google_genai-1.25.0 → google_genai-1.27.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-genai
3
- Version: 1.25.0
3
+ Version: 1.27.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -609,16 +609,16 @@ from google.genai import types
609
609
  function = types.FunctionDeclaration(
610
610
  name='get_current_weather',
611
611
  description='Get the current weather in a given location',
612
- parameters=types.Schema(
613
- type='OBJECT',
614
- properties={
615
- 'location': types.Schema(
616
- type='STRING',
617
- description='The city and state, e.g. San Francisco, CA',
618
- ),
612
+ parameters_json_schema={
613
+ 'type': 'object',
614
+ 'properties': {
615
+ 'location': {
616
+ 'type': 'string',
617
+ 'description': 'The city and state, e.g. San Francisco, CA',
618
+ }
619
619
  },
620
- required=['location'],
621
- ),
620
+ 'required': ['location'],
621
+ },
622
622
  )
623
623
 
624
624
  tool = types.Tool(function_declarations=[function])
@@ -799,6 +799,40 @@ However you define your schema, don't duplicate it in your input prompt,
799
799
  including by giving examples of expected JSON output. If you do, the generated
800
800
  output might be lower in quality.
801
801
 
802
+ #### JSON Schema support
803
+ Schemas can be provided as standard JSON schema.
804
+ ```python
805
+ user_profile = {
806
+ 'properties': {
807
+ 'age': {
808
+ 'anyOf': [
809
+ {'maximum': 20, 'minimum': 0, 'type': 'integer'},
810
+ {'type': 'null'},
811
+ ],
812
+ 'title': 'Age',
813
+ },
814
+ 'username': {
815
+ 'description': "User's unique name",
816
+ 'title': 'Username',
817
+ 'type': 'string',
818
+ },
819
+ },
820
+ 'required': ['username', 'age'],
821
+ 'title': 'User Schema',
822
+ 'type': 'object',
823
+ }
824
+
825
+ response = client.models.generate_content(
826
+ model='gemini-2.0-flash',
827
+ contents='Give me information of the United States.',
828
+ config={
829
+ 'response_mime_type': 'application/json',
830
+ 'response_json_schema': userProfile
831
+ },
832
+ )
833
+ print(response.parsed)
834
+ ```
835
+
802
836
  #### Pydantic Model Schema support
803
837
 
804
838
  Schemas can be provided as Pydantic Models.
@@ -1370,35 +1404,21 @@ print(response.text)
1370
1404
  ## Tunings
1371
1405
 
1372
1406
  `client.tunings` contains tuning job APIs and supports supervised fine
1373
- tuning through `tune`. See the 'Create a client' section above to initialize a
1374
- client.
1407
+ tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
1408
+ section above to initialize a client.
1375
1409
 
1376
1410
  ### Tune
1377
1411
 
1378
1412
  - Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
1379
- - Gemini Developer API supports tuning from inline examples
1380
1413
 
1381
1414
  ```python
1382
1415
  from google.genai import types
1383
1416
 
1384
- if client.vertexai:
1385
- model = 'gemini-2.0-flash-001'
1386
- training_dataset = types.TuningDataset(
1387
- # or gcs_uri=my_vertex_multimodal_dataset
1388
- gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1389
- )
1390
- else:
1391
- model = 'models/gemini-2.0-flash-001'
1392
- # or gcs_uri=my_vertex_multimodal_dataset.resource_name
1393
- training_dataset = types.TuningDataset(
1394
- examples=[
1395
- types.TuningExample(
1396
- text_input=f'Input text {i}',
1397
- output=f'Output text {i}',
1398
- )
1399
- for i in range(5)
1400
- ],
1401
- )
1417
+ model = 'gemini-2.0-flash-001'
1418
+ training_dataset = types.TuningDataset(
1419
+ # or gcs_uri=my_vertex_multimodal_dataset
1420
+ gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1421
+ )
1402
1422
  ```
1403
1423
 
1404
1424
  ```python
@@ -1424,14 +1444,15 @@ print(tuning_job)
1424
1444
  ```python
1425
1445
  import time
1426
1446
 
1427
- running_states = set(
1447
+ completed_states = set(
1428
1448
  [
1429
- 'JOB_STATE_PENDING',
1430
- 'JOB_STATE_RUNNING',
1449
+ 'JOB_STATE_SUCCEEDED',
1450
+ 'JOB_STATE_FAILED',
1451
+ 'JOB_STATE_CANCELLED',
1431
1452
  ]
1432
1453
  )
1433
1454
 
1434
- while tuning_job.state in running_states:
1455
+ while tuning_job.state not in completed_states:
1435
1456
  print(tuning_job.state)
1436
1457
  tuning_job = client.tunings.get(name=tuning_job.name)
1437
1458
  time.sleep(10)
@@ -1542,16 +1563,63 @@ initialize a client.
1542
1563
 
1543
1564
  ### Create
1544
1565
 
1566
+ Vertex AI:
1567
+
1545
1568
  ```python
1546
1569
  # Specify model and source file only, destination and job display name will be auto-populated
1547
1570
  job = client.batches.create(
1548
1571
  model='gemini-2.0-flash-001',
1549
- src='bq://my-project.my-dataset.my-table',
1572
+ src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1573
+ )
1574
+
1575
+ job
1576
+ ```
1577
+
1578
+ Gemini Developer API:
1579
+
1580
+ ```python
1581
+ # Create a batch job with inlined requests
1582
+ batch_job = client.batches.create(
1583
+ model="gemini-2.0-flash",
1584
+ src=[{
1585
+ "contents": [{
1586
+ "parts": [{
1587
+ "text": "Hello!",
1588
+ }],
1589
+ "role": "user",
1590
+ }],
1591
+ "config:": {"response_modalities": ["text"]},
1592
+ }],
1550
1593
  )
1551
1594
 
1552
1595
  job
1553
1596
  ```
1554
1597
 
1598
+ In order to create a batch job with file name. Need to upload a jsonl file.
1599
+ For example myrequests.json:
1600
+
1601
+ ```
1602
+ {"key":"request_1", "request": {"contents": [{"parts": [{"text":
1603
+ "Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
1604
+ {"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
1605
+ ```
1606
+ Then upload the file.
1607
+
1608
+ ```python
1609
+ # Upload the file
1610
+ file = client.files.upload(
1611
+ file='myrequest.json',
1612
+ config=types.UploadFileConfig(display_name='test_json')
1613
+ )
1614
+
1615
+ # Create a batch job with file name
1616
+ batch_job = client.batches.create(
1617
+ model="gemini-2.0-flash",
1618
+ src="files/file_name",
1619
+ )
1620
+ ```
1621
+
1622
+
1555
1623
  ```python
1556
1624
  # Get a job by name
1557
1625
  job = client.batches.get(name=job.name)
@@ -575,16 +575,16 @@ from google.genai import types
575
575
  function = types.FunctionDeclaration(
576
576
  name='get_current_weather',
577
577
  description='Get the current weather in a given location',
578
- parameters=types.Schema(
579
- type='OBJECT',
580
- properties={
581
- 'location': types.Schema(
582
- type='STRING',
583
- description='The city and state, e.g. San Francisco, CA',
584
- ),
578
+ parameters_json_schema={
579
+ 'type': 'object',
580
+ 'properties': {
581
+ 'location': {
582
+ 'type': 'string',
583
+ 'description': 'The city and state, e.g. San Francisco, CA',
584
+ }
585
585
  },
586
- required=['location'],
587
- ),
586
+ 'required': ['location'],
587
+ },
588
588
  )
589
589
 
590
590
  tool = types.Tool(function_declarations=[function])
@@ -765,6 +765,40 @@ However you define your schema, don't duplicate it in your input prompt,
765
765
  including by giving examples of expected JSON output. If you do, the generated
766
766
  output might be lower in quality.
767
767
 
768
+ #### JSON Schema support
769
+ Schemas can be provided as standard JSON schema.
770
+ ```python
771
+ user_profile = {
772
+ 'properties': {
773
+ 'age': {
774
+ 'anyOf': [
775
+ {'maximum': 20, 'minimum': 0, 'type': 'integer'},
776
+ {'type': 'null'},
777
+ ],
778
+ 'title': 'Age',
779
+ },
780
+ 'username': {
781
+ 'description': "User's unique name",
782
+ 'title': 'Username',
783
+ 'type': 'string',
784
+ },
785
+ },
786
+ 'required': ['username', 'age'],
787
+ 'title': 'User Schema',
788
+ 'type': 'object',
789
+ }
790
+
791
+ response = client.models.generate_content(
792
+ model='gemini-2.0-flash',
793
+ contents='Give me information of the United States.',
794
+ config={
795
+ 'response_mime_type': 'application/json',
796
+ 'response_json_schema': userProfile
797
+ },
798
+ )
799
+ print(response.parsed)
800
+ ```
801
+
768
802
  #### Pydantic Model Schema support
769
803
 
770
804
  Schemas can be provided as Pydantic Models.
@@ -1336,35 +1370,21 @@ print(response.text)
1336
1370
  ## Tunings
1337
1371
 
1338
1372
  `client.tunings` contains tuning job APIs and supports supervised fine
1339
- tuning through `tune`. See the 'Create a client' section above to initialize a
1340
- client.
1373
+ tuning through `tune`. Only supported in Vertex AI. See the 'Create a client'
1374
+ section above to initialize a client.
1341
1375
 
1342
1376
  ### Tune
1343
1377
 
1344
1378
  - Vertex AI supports tuning from GCS source or from a Vertex Multimodal Dataset
1345
- - Gemini Developer API supports tuning from inline examples
1346
1379
 
1347
1380
  ```python
1348
1381
  from google.genai import types
1349
1382
 
1350
- if client.vertexai:
1351
- model = 'gemini-2.0-flash-001'
1352
- training_dataset = types.TuningDataset(
1353
- # or gcs_uri=my_vertex_multimodal_dataset
1354
- gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1355
- )
1356
- else:
1357
- model = 'models/gemini-2.0-flash-001'
1358
- # or gcs_uri=my_vertex_multimodal_dataset.resource_name
1359
- training_dataset = types.TuningDataset(
1360
- examples=[
1361
- types.TuningExample(
1362
- text_input=f'Input text {i}',
1363
- output=f'Output text {i}',
1364
- )
1365
- for i in range(5)
1366
- ],
1367
- )
1383
+ model = 'gemini-2.0-flash-001'
1384
+ training_dataset = types.TuningDataset(
1385
+ # or gcs_uri=my_vertex_multimodal_dataset
1386
+ gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
1387
+ )
1368
1388
  ```
1369
1389
 
1370
1390
  ```python
@@ -1390,14 +1410,15 @@ print(tuning_job)
1390
1410
  ```python
1391
1411
  import time
1392
1412
 
1393
- running_states = set(
1413
+ completed_states = set(
1394
1414
  [
1395
- 'JOB_STATE_PENDING',
1396
- 'JOB_STATE_RUNNING',
1415
+ 'JOB_STATE_SUCCEEDED',
1416
+ 'JOB_STATE_FAILED',
1417
+ 'JOB_STATE_CANCELLED',
1397
1418
  ]
1398
1419
  )
1399
1420
 
1400
- while tuning_job.state in running_states:
1421
+ while tuning_job.state not in completed_states:
1401
1422
  print(tuning_job.state)
1402
1423
  tuning_job = client.tunings.get(name=tuning_job.name)
1403
1424
  time.sleep(10)
@@ -1508,16 +1529,63 @@ initialize a client.
1508
1529
 
1509
1530
  ### Create
1510
1531
 
1532
+ Vertex AI:
1533
+
1511
1534
  ```python
1512
1535
  # Specify model and source file only, destination and job display name will be auto-populated
1513
1536
  job = client.batches.create(
1514
1537
  model='gemini-2.0-flash-001',
1515
- src='bq://my-project.my-dataset.my-table',
1538
+ src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
1539
+ )
1540
+
1541
+ job
1542
+ ```
1543
+
1544
+ Gemini Developer API:
1545
+
1546
+ ```python
1547
+ # Create a batch job with inlined requests
1548
+ batch_job = client.batches.create(
1549
+ model="gemini-2.0-flash",
1550
+ src=[{
1551
+ "contents": [{
1552
+ "parts": [{
1553
+ "text": "Hello!",
1554
+ }],
1555
+ "role": "user",
1556
+ }],
1557
+ "config:": {"response_modalities": ["text"]},
1558
+ }],
1516
1559
  )
1517
1560
 
1518
1561
  job
1519
1562
  ```
1520
1563
 
1564
+ In order to create a batch job with file name. Need to upload a jsonl file.
1565
+ For example myrequests.json:
1566
+
1567
+ ```
1568
+ {"key":"request_1", "request": {"contents": [{"parts": [{"text":
1569
+ "Explain how AI works in a few words"}]}], "generation_config": {"response_modalities": ["TEXT"]}}}
1570
+ {"key":"request_2", "request": {"contents": [{"parts": [{"text": "Explain how Crypto works in a few words"}]}]}}
1571
+ ```
1572
+ Then upload the file.
1573
+
1574
+ ```python
1575
+ # Upload the file
1576
+ file = client.files.upload(
1577
+ file='myrequest.json',
1578
+ config=types.UploadFileConfig(display_name='test_json')
1579
+ )
1580
+
1581
+ # Create a batch job with file name
1582
+ batch_job = client.batches.create(
1583
+ model="gemini-2.0-flash",
1584
+ src="files/file_name",
1585
+ )
1586
+ ```
1587
+
1588
+
1521
1589
  ```python
1522
1590
  # Get a job by name
1523
1591
  job = client.batches.get(name=job.name)
@@ -114,7 +114,7 @@ def format_destination(
114
114
  unique_name = unique_name or _common.timestamped_unique_name()
115
115
  config.dest = f'{bigquery_source_uri}_dest_{unique_name}'
116
116
  else:
117
- raise ValueError(f'Unsupported source: {src}')
117
+ raise ValueError(f'The source {src} is not supported.')
118
118
  return config
119
119
 
120
120