google-genai 0.8.0__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {google_genai-0.8.0/google_genai.egg-info → google_genai-1.0.0}/PKG-INFO +87 -46
  2. {google_genai-0.8.0 → google_genai-1.0.0}/README.md +86 -45
  3. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_api_client.py +24 -21
  4. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_automatic_function_calling_util.py +19 -20
  5. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_common.py +22 -0
  6. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_extra_utils.py +12 -6
  7. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_replay_api_client.py +2 -0
  8. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_transformers.py +32 -14
  9. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/errors.py +4 -0
  10. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/files.py +21 -15
  11. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/live.py +5 -0
  12. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/models.py +341 -38
  13. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/tunings.py +288 -61
  14. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/types.py +86 -67
  15. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/version.py +1 -1
  16. {google_genai-0.8.0 → google_genai-1.0.0/google_genai.egg-info}/PKG-INFO +87 -46
  17. {google_genai-0.8.0 → google_genai-1.0.0}/pyproject.toml +1 -1
  18. {google_genai-0.8.0 → google_genai-1.0.0}/LICENSE +0 -0
  19. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/__init__.py +0 -0
  20. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_api_module.py +0 -0
  21. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_operations.py +0 -0
  22. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/_test_api_client.py +0 -0
  23. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/batches.py +0 -0
  24. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/caches.py +0 -0
  25. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/chats.py +0 -0
  26. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/client.py +0 -0
  27. {google_genai-0.8.0 → google_genai-1.0.0}/google/genai/pagers.py +0 -0
  28. {google_genai-0.8.0 → google_genai-1.0.0}/google_genai.egg-info/SOURCES.txt +0 -0
  29. {google_genai-0.8.0 → google_genai-1.0.0}/google_genai.egg-info/dependency_links.txt +0 -0
  30. {google_genai-0.8.0 → google_genai-1.0.0}/google_genai.egg-info/requires.txt +0 -0
  31. {google_genai-0.8.0 → google_genai-1.0.0}/google_genai.egg-info/top_level.txt +0 -0
  32. {google_genai-0.8.0 → google_genai-1.0.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: google-genai
3
- Version: 0.8.0
3
+ Version: 1.0.0
4
4
  Summary: GenAI Python SDK
5
5
  Author-email: Google LLC <googleapis-packages@google.com>
6
6
  License: Apache-2.0
@@ -34,7 +34,7 @@ Requires-Dist: websockets<15.0dev,>=13.0
34
34
 
35
35
  -----
36
36
 
37
- Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs. This is an early release. API is subject to change. Please do not use this SDK in production environments at this stage.
37
+ Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs.
38
38
 
39
39
  ## Installation
40
40
 
@@ -66,6 +66,23 @@ client = genai.Client(
66
66
  )
67
67
  ```
68
68
 
69
+ To set the API version use `http_options`. For example, to set the API version
70
+ to `v1` for Vertex AI:
71
+
72
+ ```python
73
+ client = genai.Client(
74
+ vertexai=True, project='your-project-id', location='us-central1',
75
+ http_options={'api_version': 'v1'}
76
+ )
77
+ ```
78
+
79
+ To set the API version to `v1alpha` for the Gemini API:
80
+
81
+ ```python
82
+ client = genai.Client(api_key='GEMINI_API_KEY',
83
+ http_options={'api_version': 'v1alpha'})
84
+ ```
85
+
69
86
  ## Types
70
87
 
71
88
  Parameter types can be specified as either dictionaries(`TypedDict`) or
@@ -144,53 +161,17 @@ response = client.models.generate_content(
144
161
  print(response.text)
145
162
  ```
146
163
 
147
- ### Thinking
148
-
149
- The Gemini 2.0 Flash Thinking model is an experimental model that could return
150
- "thoughts" as part of its response.
151
-
152
- #### Gemini Developer API
153
-
154
- Thinking config is only available in v1alpha for Gemini AI API.
155
-
156
- ```python
157
- response = client.models.generate_content(
158
- model='gemini-2.0-flash-thinking-exp',
159
- contents='What is the sum of natural numbers from 1 to 100?',
160
- config=types.GenerateContentConfig(
161
- thinking_config=types.ThinkingConfig(include_thoughts=True),
162
- http_options=types.HttpOptions(api_version='v1alpha'),
163
- )
164
- )
165
- for part in response.candidates[0].content.parts:
166
- print(part)
167
- ```
168
-
169
- #### Vertex AI API
170
-
171
- ```python
172
- response = client.models.generate_content(
173
- model='gemini-2.0-flash-thinking-exp-01-21',
174
- contents='What is the sum of natural numbers from 1 to 100?',
175
- config=types.GenerateContentConfig(
176
- thinking_config=types.ThinkingConfig(include_thoughts=True),
177
- )
178
- )
179
- for part in response.candidates[0].content.parts:
180
- print(part)
181
- ```
182
-
183
164
  ### List Base Models
184
165
 
185
166
  To retrieve tuned models, see [list tuned models](#list-tuned-models).
186
167
 
187
168
  ```python
188
- for model in client.models.list(config={'query_base':True}):
169
+ for model in client.models.list():
189
170
  print(model)
190
171
  ```
191
172
 
192
173
  ```python
193
- pager = client.models.list(config={"page_size": 10, 'query_base':True})
174
+ pager = client.models.list(config={"page_size": 10})
194
175
  print(pager.page_size)
195
176
  print(pager[0])
196
177
  pager.next_page()
@@ -200,12 +181,12 @@ print(pager[0])
200
181
  #### Async
201
182
 
202
183
  ```python
203
- async for job in await client.aio.models.list(config={'query_base':True}):
184
+ async for job in await client.aio.models.list():
204
185
  print(job)
205
186
  ```
206
187
 
207
188
  ```python
208
- async_pager = await client.aio.models.list(config={"page_size": 10, 'query_base':True})
189
+ async_pager = await client.aio.models.list(config={"page_size": 10})
209
190
  print(async_pager.page_size)
210
191
  print(async_pager[0])
211
192
  await async_pager.next_page()
@@ -338,6 +319,66 @@ response = client.models.generate_content(
338
319
  print(response.text)
339
320
  ```
340
321
 
322
+ #### Function calling with `ANY` tools config mode
323
+
324
+ If you configure function calling mode to be `ANY`, then the model will always
325
+ return function call parts. If you also pass a python function as a tool, by
326
+ default the SDK will perform automatic function calling until the remote calls exceed the
327
+ maximum remote call for automatic function calling (default to 10 times).
328
+
329
+ If you'd like to disable automatic function calling in `ANY` mode:
330
+
331
+ ```python
332
+ def get_current_weather(location: str) -> str:
333
+ """Returns the current weather.
334
+
335
+ Args:
336
+ location: The city and state, e.g. San Francisco, CA
337
+ """
338
+ return "sunny"
339
+
340
+ response = client.models.generate_content(
341
+ model="gemini-2.0-flash-exp",
342
+ contents="What is the weather like in Boston?",
343
+ config=types.GenerateContentConfig(
344
+ tools=[get_current_weather],
345
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
346
+ disable=True
347
+ ),
348
+ tool_config=types.ToolConfig(
349
+ function_calling_config=types.FunctionCallingConfig(mode='ANY')
350
+ ),
351
+ ),
352
+ )
353
+ ```
354
+
355
+ If you'd like to set `x` number of automatic function call turns, you can
356
+ configure the maximum remote calls to be `x + 1`.
357
+ Assuming you prefer `1` turn for automatic function calling.
358
+
359
+ ```python
360
+ def get_current_weather(location: str) -> str:
361
+ """Returns the current weather.
362
+
363
+ Args:
364
+ location: The city and state, e.g. San Francisco, CA
365
+ """
366
+ return "sunny"
367
+
368
+ response = client.models.generate_content(
369
+ model="gemini-2.0-flash-exp",
370
+ contents="What is the weather like in Boston?",
371
+ config=types.GenerateContentConfig(
372
+ tools=[get_current_weather],
373
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
374
+ maximum_remote_calls=2
375
+ ),
376
+ tool_config=types.ToolConfig(
377
+ function_calling_config=types.FunctionCallingConfig(mode='ANY')
378
+ ),
379
+ ),
380
+ )
381
+ ```
341
382
  ### JSON Response Schema
342
383
 
343
384
  #### Pydantic Model Schema support
@@ -864,12 +905,12 @@ print(tuned_model)
864
905
  To retrieve base models, see [list base models](#list-base-models).
865
906
 
866
907
  ```python
867
- for model in client.models.list(config={"page_size": 10}):
908
+ for model in client.models.list(config={"page_size": 10, "query_base": False}):
868
909
  print(model)
869
910
  ```
870
911
 
871
912
  ```python
872
- pager = client.models.list(config={"page_size": 10})
913
+ pager = client.models.list(config={"page_size": 10, "query_base": False})
873
914
  print(pager.page_size)
874
915
  print(pager[0])
875
916
  pager.next_page()
@@ -879,12 +920,12 @@ print(pager[0])
879
920
  #### Async
880
921
 
881
922
  ```python
882
- async for job in await client.aio.models.list(config={"page_size": 10}):
923
+ async for job in await client.aio.models.list(config={"page_size": 10, "query_base": False}):
883
924
  print(job)
884
925
  ```
885
926
 
886
927
  ```python
887
- async_pager = await client.aio.models.list(config={"page_size": 10})
928
+ async_pager = await client.aio.models.list(config={"page_size": 10, "query_base": False})
888
929
  print(async_pager.page_size)
889
930
  print(async_pager[0])
890
931
  await async_pager.next_page()
@@ -7,7 +7,7 @@
7
7
 
8
8
  -----
9
9
 
10
- Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs. This is an early release. API is subject to change. Please do not use this SDK in production environments at this stage.
10
+ Google Gen AI Python SDK provides an interface for developers to integrate Google's generative models into their Python applications. It supports the [Gemini Developer API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/overview) APIs.
11
11
 
12
12
  ## Installation
13
13
 
@@ -39,6 +39,23 @@ client = genai.Client(
39
39
  )
40
40
  ```
41
41
 
42
+ To set the API version use `http_options`. For example, to set the API version
43
+ to `v1` for Vertex AI:
44
+
45
+ ```python
46
+ client = genai.Client(
47
+ vertexai=True, project='your-project-id', location='us-central1',
48
+ http_options={'api_version': 'v1'}
49
+ )
50
+ ```
51
+
52
+ To set the API version to `v1alpha` for the Gemini API:
53
+
54
+ ```python
55
+ client = genai.Client(api_key='GEMINI_API_KEY',
56
+ http_options={'api_version': 'v1alpha'})
57
+ ```
58
+
42
59
  ## Types
43
60
 
44
61
  Parameter types can be specified as either dictionaries(`TypedDict`) or
@@ -117,53 +134,17 @@ response = client.models.generate_content(
117
134
  print(response.text)
118
135
  ```
119
136
 
120
- ### Thinking
121
-
122
- The Gemini 2.0 Flash Thinking model is an experimental model that could return
123
- "thoughts" as part of its response.
124
-
125
- #### Gemini Developer API
126
-
127
- Thinking config is only available in v1alpha for Gemini AI API.
128
-
129
- ```python
130
- response = client.models.generate_content(
131
- model='gemini-2.0-flash-thinking-exp',
132
- contents='What is the sum of natural numbers from 1 to 100?',
133
- config=types.GenerateContentConfig(
134
- thinking_config=types.ThinkingConfig(include_thoughts=True),
135
- http_options=types.HttpOptions(api_version='v1alpha'),
136
- )
137
- )
138
- for part in response.candidates[0].content.parts:
139
- print(part)
140
- ```
141
-
142
- #### Vertex AI API
143
-
144
- ```python
145
- response = client.models.generate_content(
146
- model='gemini-2.0-flash-thinking-exp-01-21',
147
- contents='What is the sum of natural numbers from 1 to 100?',
148
- config=types.GenerateContentConfig(
149
- thinking_config=types.ThinkingConfig(include_thoughts=True),
150
- )
151
- )
152
- for part in response.candidates[0].content.parts:
153
- print(part)
154
- ```
155
-
156
137
  ### List Base Models
157
138
 
158
139
  To retrieve tuned models, see [list tuned models](#list-tuned-models).
159
140
 
160
141
  ```python
161
- for model in client.models.list(config={'query_base':True}):
142
+ for model in client.models.list():
162
143
  print(model)
163
144
  ```
164
145
 
165
146
  ```python
166
- pager = client.models.list(config={"page_size": 10, 'query_base':True})
147
+ pager = client.models.list(config={"page_size": 10})
167
148
  print(pager.page_size)
168
149
  print(pager[0])
169
150
  pager.next_page()
@@ -173,12 +154,12 @@ print(pager[0])
173
154
  #### Async
174
155
 
175
156
  ```python
176
- async for job in await client.aio.models.list(config={'query_base':True}):
157
+ async for job in await client.aio.models.list():
177
158
  print(job)
178
159
  ```
179
160
 
180
161
  ```python
181
- async_pager = await client.aio.models.list(config={"page_size": 10, 'query_base':True})
162
+ async_pager = await client.aio.models.list(config={"page_size": 10})
182
163
  print(async_pager.page_size)
183
164
  print(async_pager[0])
184
165
  await async_pager.next_page()
@@ -311,6 +292,66 @@ response = client.models.generate_content(
311
292
  print(response.text)
312
293
  ```
313
294
 
295
+ #### Function calling with `ANY` tools config mode
296
+
297
+ If you configure function calling mode to be `ANY`, then the model will always
298
+ return function call parts. If you also pass a python function as a tool, by
299
+ default the SDK will perform automatic function calling until the remote calls exceed the
300
+ maximum remote call for automatic function calling (default to 10 times).
301
+
302
+ If you'd like to disable automatic function calling in `ANY` mode:
303
+
304
+ ```python
305
+ def get_current_weather(location: str) -> str:
306
+ """Returns the current weather.
307
+
308
+ Args:
309
+ location: The city and state, e.g. San Francisco, CA
310
+ """
311
+ return "sunny"
312
+
313
+ response = client.models.generate_content(
314
+ model="gemini-2.0-flash-exp",
315
+ contents="What is the weather like in Boston?",
316
+ config=types.GenerateContentConfig(
317
+ tools=[get_current_weather],
318
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
319
+ disable=True
320
+ ),
321
+ tool_config=types.ToolConfig(
322
+ function_calling_config=types.FunctionCallingConfig(mode='ANY')
323
+ ),
324
+ ),
325
+ )
326
+ ```
327
+
328
+ If you'd like to set `x` number of automatic function call turns, you can
329
+ configure the maximum remote calls to be `x + 1`.
330
+ Assuming you prefer `1` turn for automatic function calling.
331
+
332
+ ```python
333
+ def get_current_weather(location: str) -> str:
334
+ """Returns the current weather.
335
+
336
+ Args:
337
+ location: The city and state, e.g. San Francisco, CA
338
+ """
339
+ return "sunny"
340
+
341
+ response = client.models.generate_content(
342
+ model="gemini-2.0-flash-exp",
343
+ contents="What is the weather like in Boston?",
344
+ config=types.GenerateContentConfig(
345
+ tools=[get_current_weather],
346
+ automatic_function_calling=types.AutomaticFunctionCallingConfig(
347
+ maximum_remote_calls=2
348
+ ),
349
+ tool_config=types.ToolConfig(
350
+ function_calling_config=types.FunctionCallingConfig(mode='ANY')
351
+ ),
352
+ ),
353
+ )
354
+ ```
314
355
  ### JSON Response Schema
315
356
 
316
357
  #### Pydantic Model Schema support
@@ -837,12 +878,12 @@ print(tuned_model)
837
878
  To retrieve base models, see [list base models](#list-base-models).
838
879
 
839
880
  ```python
840
- for model in client.models.list(config={"page_size": 10}):
881
+ for model in client.models.list(config={"page_size": 10, "query_base": False}):
841
882
  print(model)
842
883
  ```
843
884
 
844
885
  ```python
845
- pager = client.models.list(config={"page_size": 10})
886
+ pager = client.models.list(config={"page_size": 10, "query_base": False})
846
887
  print(pager.page_size)
847
888
  print(pager[0])
848
889
  pager.next_page()
@@ -852,12 +893,12 @@ print(pager[0])
852
893
  #### Async
853
894
 
854
895
  ```python
855
- async for job in await client.aio.models.list(config={"page_size": 10}):
896
+ async for job in await client.aio.models.list(config={"page_size": 10, "query_base": False}):
856
897
  print(job)
857
898
  ```
858
899
 
859
900
  ```python
860
- async_pager = await client.aio.models.list(config={"page_size": 10})
901
+ async_pager = await client.aio.models.list(config={"page_size": 10, "query_base": False})
861
902
  print(async_pager.page_size)
862
903
  print(async_pager[0])
863
904
  await async_pager.next_page()
@@ -99,6 +99,19 @@ class HttpRequest:
99
99
  timeout: Optional[float] = None
100
100
 
101
101
 
102
+ # TODO(b/394358912): Update this class to use a SDKResponse class that can be
103
+ # generated and used for all languages.
104
+ @dataclass
105
+ class BaseResponse:
106
+ http_headers: dict[str, str]
107
+
108
+ @property
109
+ def dict(self) -> dict[str, Any]:
110
+ if isinstance(self, dict):
111
+ return self
112
+ return {'httpHeaders': self.http_headers}
113
+
114
+
102
115
  class HttpResponse:
103
116
 
104
117
  def __init__(
@@ -434,18 +447,12 @@ class ApiClient:
434
447
  http_method, path, request_dict, http_options
435
448
  )
436
449
  response = self._request(http_request, stream=False)
437
- if http_options:
438
- if (
439
- isinstance(http_options, HttpOptions)
440
- and http_options.deprecated_response_payload is not None
441
- ):
442
- response._copy_to_dict(http_options.deprecated_response_payload)
443
- elif (
444
- isinstance(http_options, dict)
445
- and 'deprecated_response_payload' in http_options
446
- ):
447
- response._copy_to_dict(http_options['deprecated_response_payload'])
448
- return response.json
450
+ json_response = response.json
451
+ if not json_response:
452
+ base_response = BaseResponse(response.headers).dict
453
+ return base_response
454
+
455
+ return json_response
449
456
 
450
457
  def request_streamed(
451
458
  self,
@@ -459,10 +466,6 @@ class ApiClient:
459
466
  )
460
467
 
461
468
  session_response = self._request(http_request, stream=True)
462
- if http_options and 'deprecated_response_payload' in http_options:
463
- session_response._copy_to_dict(
464
- http_options['deprecated_response_payload']
465
- )
466
469
  for chunk in session_response.segments():
467
470
  yield chunk
468
471
 
@@ -478,9 +481,11 @@ class ApiClient:
478
481
  )
479
482
 
480
483
  result = await self._async_request(http_request=http_request, stream=False)
481
- if http_options and 'deprecated_response_payload' in http_options:
482
- result._copy_to_dict(http_options['deprecated_response_payload'])
483
- return result.json
484
+ json_response = result.json
485
+ if not json_response:
486
+ base_response = BaseResponse(result.headers).dict
487
+ return base_response
488
+ return json_response
484
489
 
485
490
  async def async_request_streamed(
486
491
  self,
@@ -495,8 +500,6 @@ class ApiClient:
495
500
 
496
501
  response = await self._async_request(http_request=http_request, stream=True)
497
502
 
498
- if http_options and 'deprecated_response_payload' in http_options:
499
- response._copy_to_dict(http_options['deprecated_response_payload'])
500
503
  async def async_generator():
501
504
  async for chunk in response:
502
505
  yield chunk
@@ -17,14 +17,18 @@ import inspect
17
17
  import sys
18
18
  import types as builtin_types
19
19
  import typing
20
- from typing import Any, Callable, Literal, Union, _GenericAlias, get_args, get_origin
20
+ from typing import _GenericAlias, Any, Callable, get_args, get_origin, Literal, Union
21
+
21
22
  import pydantic
23
+
24
+ from . import _extra_utils
22
25
  from . import types
23
26
 
27
+
24
28
  if sys.version_info >= (3, 10):
25
- UnionType = builtin_types.UnionType
29
+ VersionedUnionType = builtin_types.UnionType
26
30
  else:
27
- UnionType = typing._UnionGenericAlias
31
+ VersionedUnionType = typing._UnionGenericAlias
28
32
 
29
33
  _py_builtin_type_to_schema_type = {
30
34
  str: 'STRING',
@@ -45,7 +49,8 @@ def _is_builtin_primitive_or_compound(
45
49
  def _raise_for_any_of_if_mldev(schema: types.Schema):
46
50
  if schema.any_of:
47
51
  raise ValueError(
48
- 'AnyOf is not supported in function declaration schema for Google AI.'
52
+ 'AnyOf is not supported in function declaration schema for'
53
+ ' the Gemini API.'
49
54
  )
50
55
 
51
56
 
@@ -53,15 +58,7 @@ def _raise_for_default_if_mldev(schema: types.Schema):
53
58
  if schema.default is not None:
54
59
  raise ValueError(
55
60
  'Default value is not supported in function declaration schema for'
56
- ' Google AI.'
57
- )
58
-
59
-
60
- def _raise_for_nullable_if_mldev(schema: types.Schema):
61
- if schema.nullable:
62
- raise ValueError(
63
- 'Nullable is not supported in function declaration schema for'
64
- ' Google AI.'
61
+ ' the Gemini API.'
65
62
  )
66
63
 
67
64
 
@@ -69,7 +66,6 @@ def _raise_if_schema_unsupported(client, schema: types.Schema):
69
66
  if not client.vertexai:
70
67
  _raise_for_any_of_if_mldev(schema)
71
68
  _raise_for_default_if_mldev(schema)
72
- _raise_for_nullable_if_mldev(schema)
73
69
 
74
70
 
75
71
  def _is_default_value_compatible(
@@ -82,10 +78,10 @@ def _is_default_value_compatible(
82
78
  if (
83
79
  isinstance(annotation, _GenericAlias)
84
80
  or isinstance(annotation, builtin_types.GenericAlias)
85
- or isinstance(annotation, UnionType)
81
+ or isinstance(annotation, VersionedUnionType)
86
82
  ):
87
83
  origin = get_origin(annotation)
88
- if origin in (Union, UnionType):
84
+ if origin in (Union, VersionedUnionType):
89
85
  return any(
90
86
  _is_default_value_compatible(default_value, arg)
91
87
  for arg in get_args(annotation)
@@ -141,7 +137,7 @@ def _parse_schema_from_parameter(
141
137
  _raise_if_schema_unsupported(client, schema)
142
138
  return schema
143
139
  if (
144
- isinstance(param.annotation, UnionType)
140
+ isinstance(param.annotation, VersionedUnionType)
145
141
  # only parse simple UnionType, example int | str | float | bool
146
142
  # complex UnionType will be invoked in raise branch
147
143
  and all(
@@ -229,7 +225,11 @@ def _parse_schema_from_parameter(
229
225
  schema.type = 'OBJECT'
230
226
  unique_types = set()
231
227
  for arg in args:
232
- if arg.__name__ == 'NoneType': # Optional type
228
+ # The first check is for NoneType in Python 3.9, since the __name__
229
+ # attribute is not available in Python 3.9
230
+ if type(arg) is type(None) or (
231
+ hasattr(arg, '__name__') and arg.__name__ == 'NoneType'
232
+ ): # Optional type
233
233
  schema.nullable = True
234
234
  continue
235
235
  schema_in_any_of = _parse_schema_from_parameter(
@@ -272,9 +272,8 @@ def _parse_schema_from_parameter(
272
272
  return schema
273
273
  # all other generic alias will be invoked in raise branch
274
274
  if (
275
- inspect.isclass(param.annotation)
276
275
  # for user defined class, we only support pydantic model
277
- and issubclass(param.annotation, pydantic.BaseModel)
276
+ _extra_utils.is_annotation_pydantic_model(param.annotation)
278
277
  ):
279
278
  if (
280
279
  param.default is not inspect.Parameter.empty
@@ -18,6 +18,7 @@
18
18
  import base64
19
19
  import datetime
20
20
  import enum
21
+ import functools
21
22
  import typing
22
23
  from typing import Union
23
24
  import uuid
@@ -27,6 +28,7 @@ import pydantic
27
28
  from pydantic import alias_generators
28
29
 
29
30
  from . import _api_client
31
+ from . import errors
30
32
 
31
33
 
32
34
  def set_value_by_path(data, keys, value):
@@ -273,3 +275,23 @@ def encode_unserializable_types(data: dict[str, object]) -> dict[str, object]:
273
275
  else:
274
276
  processed_data[key] = value
275
277
  return processed_data
278
+
279
+
280
+ def experimental_warning(message: str):
281
+ """Experimental warning, only warns once."""
282
+ def decorator(func):
283
+ warning_done = False
284
+ @functools.wraps(func)
285
+ def wrapper(*args, **kwargs):
286
+ nonlocal warning_done
287
+ if not warning_done:
288
+ warning_done = True
289
+ warnings.warn(
290
+ message=message,
291
+ category=errors.ExperimentalWarning,
292
+ stacklevel=2,
293
+ )
294
+ return func(*args, **kwargs)
295
+ return wrapper
296
+ return decorator
297
+
@@ -108,16 +108,22 @@ def convert_number_values_for_function_call_args(
108
108
  return args
109
109
 
110
110
 
111
- def _is_annotation_pydantic_model(annotation: Any) -> bool:
112
- return inspect.isclass(annotation) and issubclass(
113
- annotation, pydantic.BaseModel
114
- )
111
+ def is_annotation_pydantic_model(annotation: Any) -> bool:
112
+ try:
113
+ return inspect.isclass(annotation) and issubclass(
114
+ annotation, pydantic.BaseModel
115
+ )
116
+ # for python 3.10 and below, inspect.isclass(annotation) has inconsistent
117
+ # results with versions above. for example, inspect.isclass(dict[str, int]) is
118
+ # True in 3.10 and below but False in 3.11 and above.
119
+ except TypeError:
120
+ return False
115
121
 
116
122
 
117
123
  def convert_if_exist_pydantic_model(
118
124
  value: Any, annotation: Any, param_name: str, func_name: str
119
125
  ) -> Any:
120
- if isinstance(value, dict) and _is_annotation_pydantic_model(annotation):
126
+ if isinstance(value, dict) and is_annotation_pydantic_model(annotation):
121
127
  try:
122
128
  return annotation(**value)
123
129
  except pydantic.ValidationError as e:
@@ -146,7 +152,7 @@ def convert_if_exist_pydantic_model(
146
152
  if (
147
153
  (get_args(arg) and get_origin(arg) is list)
148
154
  or isinstance(value, arg)
149
- or (isinstance(value, dict) and _is_annotation_pydantic_model(arg))
155
+ or (isinstance(value, dict) and is_annotation_pydantic_model(arg))
150
156
  ):
151
157
  try:
152
158
  return convert_if_exist_pydantic_model(
@@ -362,6 +362,8 @@ class ReplayApiClient(ApiClient):
362
362
  if self._should_update_replay():
363
363
  if isinstance(response_model, list):
364
364
  response_model = response_model[0]
365
+ if response_model and 'http_headers' in response_model.model_fields:
366
+ response_model.http_headers.pop('Date', None)
365
367
  interaction.response.sdk_response_segments.append(
366
368
  response_model.model_dump(exclude_none=True)
367
369
  )