mistralai 1.2.4__py3-none-any.whl → 1.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. mistralai/_hooks/custom_user_agent.py +7 -1
  2. mistralai/_version.py +1 -1
  3. mistralai/agents.py +17 -5
  4. mistralai/basesdk.py +25 -9
  5. mistralai/chat.py +25 -13
  6. mistralai/classifiers.py +17 -5
  7. mistralai/embeddings.py +9 -3
  8. mistralai/files.py +49 -13
  9. mistralai/fim.py +17 -5
  10. mistralai/jobs.py +41 -11
  11. mistralai/mistral_jobs.py +33 -9
  12. mistralai/models/chatcompletionrequest.py +2 -2
  13. mistralai/models/chatcompletionstreamrequest.py +2 -2
  14. mistralai/models/files_api_routes_upload_fileop.py +2 -6
  15. mistralai/models_.py +49 -13
  16. mistralai/sdkconfiguration.py +3 -3
  17. mistralai/utils/forms.py +4 -10
  18. mistralai/utils/requestbodies.py +1 -1
  19. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/METADATA +117 -86
  20. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/RECORD +41 -41
  21. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/WHEEL +1 -1
  22. mistralai_azure/_hooks/custom_user_agent.py +8 -2
  23. mistralai_azure/_version.py +1 -1
  24. mistralai_azure/basesdk.py +25 -9
  25. mistralai_azure/chat.py +25 -13
  26. mistralai_azure/models/chatcompletionrequest.py +2 -2
  27. mistralai_azure/models/chatcompletionstreamrequest.py +2 -2
  28. mistralai_azure/sdkconfiguration.py +3 -3
  29. mistralai_azure/utils/forms.py +4 -10
  30. mistralai_azure/utils/requestbodies.py +1 -1
  31. mistralai_gcp/_hooks/custom_user_agent.py +7 -1
  32. mistralai_gcp/_version.py +1 -1
  33. mistralai_gcp/basesdk.py +25 -9
  34. mistralai_gcp/chat.py +21 -9
  35. mistralai_gcp/fim.py +17 -5
  36. mistralai_gcp/models/chatcompletionrequest.py +1 -1
  37. mistralai_gcp/models/chatcompletionstreamrequest.py +1 -1
  38. mistralai_gcp/sdkconfiguration.py +3 -3
  39. mistralai_gcp/utils/forms.py +4 -10
  40. mistralai_gcp/utils/requestbodies.py +1 -1
  41. {mistralai-1.2.4.dist-info → mistralai-1.2.6.dist-info}/LICENSE +0 -0
@@ -9,7 +9,8 @@ from mistralai_azure._hooks import (
9
9
  BeforeRequestContext,
10
10
  )
11
11
  from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content
12
- from typing import Callable, List, Optional, Tuple
12
+ from typing import Callable, List, Mapping, Optional, Tuple
13
+ from urllib.parse import parse_qs, urlparse
13
14
 
14
15
 
15
16
  class BaseSDK:
@@ -18,7 +19,7 @@ class BaseSDK:
18
19
  def __init__(self, sdk_config: SDKConfiguration) -> None:
19
20
  self.sdk_configuration = sdk_config
20
21
 
21
- def get_url(self, base_url, url_variables):
22
+ def _get_url(self, base_url, url_variables):
22
23
  sdk_url, sdk_variables = self.sdk_configuration.get_server_details()
23
24
 
24
25
  if base_url is None:
@@ -29,7 +30,7 @@ class BaseSDK:
29
30
 
30
31
  return utils.template_url(base_url, url_variables)
31
32
 
32
- def build_request_async(
33
+ def _build_request_async(
33
34
  self,
34
35
  method,
35
36
  path,
@@ -48,9 +49,10 @@ class BaseSDK:
48
49
  Callable[[], Optional[SerializedRequestBody]]
49
50
  ] = None,
50
51
  url_override: Optional[str] = None,
52
+ http_headers: Optional[Mapping[str, str]] = None,
51
53
  ) -> httpx.Request:
52
54
  client = self.sdk_configuration.async_client
53
- return self.build_request_with_client(
55
+ return self._build_request_with_client(
54
56
  client,
55
57
  method,
56
58
  path,
@@ -67,9 +69,10 @@ class BaseSDK:
67
69
  timeout_ms,
68
70
  get_serialized_body,
69
71
  url_override,
72
+ http_headers,
70
73
  )
71
74
 
72
- def build_request(
75
+ def _build_request(
73
76
  self,
74
77
  method,
75
78
  path,
@@ -88,9 +91,10 @@ class BaseSDK:
88
91
  Callable[[], Optional[SerializedRequestBody]]
89
92
  ] = None,
90
93
  url_override: Optional[str] = None,
94
+ http_headers: Optional[Mapping[str, str]] = None,
91
95
  ) -> httpx.Request:
92
96
  client = self.sdk_configuration.client
93
- return self.build_request_with_client(
97
+ return self._build_request_with_client(
94
98
  client,
95
99
  method,
96
100
  path,
@@ -107,9 +111,10 @@ class BaseSDK:
107
111
  timeout_ms,
108
112
  get_serialized_body,
109
113
  url_override,
114
+ http_headers,
110
115
  )
111
116
 
112
- def build_request_with_client(
117
+ def _build_request_with_client(
113
118
  self,
114
119
  client,
115
120
  method,
@@ -129,13 +134,14 @@ class BaseSDK:
129
134
  Callable[[], Optional[SerializedRequestBody]]
130
135
  ] = None,
131
136
  url_override: Optional[str] = None,
137
+ http_headers: Optional[Mapping[str, str]] = None,
132
138
  ) -> httpx.Request:
133
139
  query_params = {}
134
140
 
135
141
  url = url_override
136
142
  if url is None:
137
143
  url = utils.generate_url(
138
- self.get_url(base_url, url_variables),
144
+ self._get_url(base_url, url_variables),
139
145
  path,
140
146
  request if request_has_path_params else None,
141
147
  _globals if request_has_path_params else None,
@@ -145,6 +151,12 @@ class BaseSDK:
145
151
  request if request_has_query_params else None,
146
152
  _globals if request_has_query_params else None,
147
153
  )
154
+ else:
155
+ # Pick up the query parameter from the override so they can be
156
+ # preserved when building the request later on (necessary as of
157
+ # httpx 0.28).
158
+ parsed_override = urlparse(str(url_override))
159
+ query_params = parse_qs(parsed_override.query, keep_blank_values=True)
148
160
 
149
161
  headers = utils.get_headers(request, _globals)
150
162
  headers["Accept"] = accept_header_value
@@ -159,7 +171,7 @@ class BaseSDK:
159
171
  headers = {**headers, **security_headers}
160
172
  query_params = {**query_params, **security_query_params}
161
173
 
162
- serialized_request_body = SerializedRequestBody("application/octet-stream")
174
+ serialized_request_body = SerializedRequestBody()
163
175
  if get_serialized_body is not None:
164
176
  rb = get_serialized_body()
165
177
  if request_body_required and rb is None:
@@ -178,6 +190,10 @@ class BaseSDK:
178
190
  ):
179
191
  headers["content-type"] = serialized_request_body.media_type
180
192
 
193
+ if http_headers is not None:
194
+ for header, value in http_headers.items():
195
+ headers[header] = value
196
+
181
197
  timeout = timeout_ms / 1000 if timeout_ms is not None else None
182
198
 
183
199
  return client.build_request(
mistralai_azure/chat.py CHANGED
@@ -5,7 +5,7 @@ from mistralai_azure import models, utils
5
5
  from mistralai_azure._hooks import HookContext
6
6
  from mistralai_azure.types import OptionalNullable, UNSET
7
7
  from mistralai_azure.utils import eventstreaming
8
- from typing import Any, List, Optional, Union
8
+ from typing import Any, List, Mapping, Optional, Union
9
9
 
10
10
 
11
11
  class Chat(BaseSDK):
@@ -17,7 +17,7 @@ class Chat(BaseSDK):
17
17
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
18
18
  model: OptionalNullable[str] = "azureai",
19
19
  temperature: OptionalNullable[float] = UNSET,
20
- top_p: Optional[float] = 1,
20
+ top_p: Optional[float] = None,
21
21
  max_tokens: OptionalNullable[int] = UNSET,
22
22
  stream: Optional[bool] = True,
23
23
  stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
@@ -37,10 +37,11 @@ class Chat(BaseSDK):
37
37
  presence_penalty: Optional[float] = None,
38
38
  frequency_penalty: Optional[float] = None,
39
39
  n: OptionalNullable[int] = UNSET,
40
- safe_prompt: Optional[bool] = False,
40
+ safe_prompt: Optional[bool] = None,
41
41
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
42
42
  server_url: Optional[str] = None,
43
43
  timeout_ms: Optional[int] = None,
44
+ http_headers: Optional[Mapping[str, str]] = None,
44
45
  ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]:
45
46
  r"""Stream chat completion
46
47
 
@@ -64,6 +65,7 @@ class Chat(BaseSDK):
64
65
  :param retries: Override the default retry configuration for this method
65
66
  :param server_url: Override the default server URL for this method
66
67
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
68
+ :param http_headers: Additional headers to set or replace on requests.
67
69
  """
68
70
  base_url = None
69
71
  url_variables = None
@@ -95,7 +97,7 @@ class Chat(BaseSDK):
95
97
  safe_prompt=safe_prompt,
96
98
  )
97
99
 
98
- req = self.build_request(
100
+ req = self._build_request(
99
101
  method="POST",
100
102
  path="/chat/completions#stream",
101
103
  base_url=base_url,
@@ -106,6 +108,7 @@ class Chat(BaseSDK):
106
108
  request_has_query_params=True,
107
109
  user_agent_header="user-agent",
108
110
  accept_header_value="text/event-stream",
111
+ http_headers=http_headers,
109
112
  security=self.sdk_configuration.security,
110
113
  get_serialized_body=lambda: utils.serialize_request_body(
111
114
  request, False, False, "json", models.ChatCompletionStreamRequest
@@ -165,7 +168,7 @@ class Chat(BaseSDK):
165
168
  messages: Union[List[models.Messages], List[models.MessagesTypedDict]],
166
169
  model: OptionalNullable[str] = "azureai",
167
170
  temperature: OptionalNullable[float] = UNSET,
168
- top_p: Optional[float] = 1,
171
+ top_p: Optional[float] = None,
169
172
  max_tokens: OptionalNullable[int] = UNSET,
170
173
  stream: Optional[bool] = True,
171
174
  stop: Optional[Union[models.Stop, models.StopTypedDict]] = None,
@@ -185,10 +188,11 @@ class Chat(BaseSDK):
185
188
  presence_penalty: Optional[float] = None,
186
189
  frequency_penalty: Optional[float] = None,
187
190
  n: OptionalNullable[int] = UNSET,
188
- safe_prompt: Optional[bool] = False,
191
+ safe_prompt: Optional[bool] = None,
189
192
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
190
193
  server_url: Optional[str] = None,
191
194
  timeout_ms: Optional[int] = None,
195
+ http_headers: Optional[Mapping[str, str]] = None,
192
196
  ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]:
193
197
  r"""Stream chat completion
194
198
 
@@ -212,6 +216,7 @@ class Chat(BaseSDK):
212
216
  :param retries: Override the default retry configuration for this method
213
217
  :param server_url: Override the default server URL for this method
214
218
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
219
+ :param http_headers: Additional headers to set or replace on requests.
215
220
  """
216
221
  base_url = None
217
222
  url_variables = None
@@ -243,7 +248,7 @@ class Chat(BaseSDK):
243
248
  safe_prompt=safe_prompt,
244
249
  )
245
250
 
246
- req = self.build_request_async(
251
+ req = self._build_request_async(
247
252
  method="POST",
248
253
  path="/chat/completions#stream",
249
254
  base_url=base_url,
@@ -254,6 +259,7 @@ class Chat(BaseSDK):
254
259
  request_has_query_params=True,
255
260
  user_agent_header="user-agent",
256
261
  accept_header_value="text/event-stream",
262
+ http_headers=http_headers,
257
263
  security=self.sdk_configuration.security,
258
264
  get_serialized_body=lambda: utils.serialize_request_body(
259
265
  request, False, False, "json", models.ChatCompletionStreamRequest
@@ -316,7 +322,7 @@ class Chat(BaseSDK):
316
322
  ],
317
323
  model: OptionalNullable[str] = "azureai",
318
324
  temperature: OptionalNullable[float] = UNSET,
319
- top_p: Optional[float] = 1,
325
+ top_p: Optional[float] = None,
320
326
  max_tokens: OptionalNullable[int] = UNSET,
321
327
  stream: Optional[bool] = False,
322
328
  stop: Optional[
@@ -341,10 +347,11 @@ class Chat(BaseSDK):
341
347
  presence_penalty: Optional[float] = None,
342
348
  frequency_penalty: Optional[float] = None,
343
349
  n: OptionalNullable[int] = UNSET,
344
- safe_prompt: Optional[bool] = False,
350
+ safe_prompt: Optional[bool] = None,
345
351
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
346
352
  server_url: Optional[str] = None,
347
353
  timeout_ms: Optional[int] = None,
354
+ http_headers: Optional[Mapping[str, str]] = None,
348
355
  ) -> Optional[models.ChatCompletionResponse]:
349
356
  r"""Chat Completion
350
357
 
@@ -366,6 +373,7 @@ class Chat(BaseSDK):
366
373
  :param retries: Override the default retry configuration for this method
367
374
  :param server_url: Override the default server URL for this method
368
375
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
376
+ :param http_headers: Additional headers to set or replace on requests.
369
377
  """
370
378
  base_url = None
371
379
  url_variables = None
@@ -399,7 +407,7 @@ class Chat(BaseSDK):
399
407
  safe_prompt=safe_prompt,
400
408
  )
401
409
 
402
- req = self.build_request(
410
+ req = self._build_request(
403
411
  method="POST",
404
412
  path="/chat/completions",
405
413
  base_url=base_url,
@@ -410,6 +418,7 @@ class Chat(BaseSDK):
410
418
  request_has_query_params=True,
411
419
  user_agent_header="user-agent",
412
420
  accept_header_value="application/json",
421
+ http_headers=http_headers,
413
422
  security=self.sdk_configuration.security,
414
423
  get_serialized_body=lambda: utils.serialize_request_body(
415
424
  request, False, False, "json", models.ChatCompletionRequest
@@ -468,7 +477,7 @@ class Chat(BaseSDK):
468
477
  ],
469
478
  model: OptionalNullable[str] = "azureai",
470
479
  temperature: OptionalNullable[float] = UNSET,
471
- top_p: Optional[float] = 1,
480
+ top_p: Optional[float] = None,
472
481
  max_tokens: OptionalNullable[int] = UNSET,
473
482
  stream: Optional[bool] = False,
474
483
  stop: Optional[
@@ -493,10 +502,11 @@ class Chat(BaseSDK):
493
502
  presence_penalty: Optional[float] = None,
494
503
  frequency_penalty: Optional[float] = None,
495
504
  n: OptionalNullable[int] = UNSET,
496
- safe_prompt: Optional[bool] = False,
505
+ safe_prompt: Optional[bool] = None,
497
506
  retries: OptionalNullable[utils.RetryConfig] = UNSET,
498
507
  server_url: Optional[str] = None,
499
508
  timeout_ms: Optional[int] = None,
509
+ http_headers: Optional[Mapping[str, str]] = None,
500
510
  ) -> Optional[models.ChatCompletionResponse]:
501
511
  r"""Chat Completion
502
512
 
@@ -518,6 +528,7 @@ class Chat(BaseSDK):
518
528
  :param retries: Override the default retry configuration for this method
519
529
  :param server_url: Override the default server URL for this method
520
530
  :param timeout_ms: Override the default request timeout configuration for this method in milliseconds
531
+ :param http_headers: Additional headers to set or replace on requests.
521
532
  """
522
533
  base_url = None
523
534
  url_variables = None
@@ -551,7 +562,7 @@ class Chat(BaseSDK):
551
562
  safe_prompt=safe_prompt,
552
563
  )
553
564
 
554
- req = self.build_request_async(
565
+ req = self._build_request_async(
555
566
  method="POST",
556
567
  path="/chat/completions",
557
568
  base_url=base_url,
@@ -562,6 +573,7 @@ class Chat(BaseSDK):
562
573
  request_has_query_params=True,
563
574
  user_agent_header="user-agent",
564
575
  accept_header_value="application/json",
576
+ http_headers=http_headers,
565
577
  security=self.sdk_configuration.security,
566
578
  get_serialized_body=lambda: utils.serialize_request_body(
567
579
  request, False, False, "json", models.ChatCompletionRequest
@@ -107,7 +107,7 @@ class ChatCompletionRequest(BaseModel):
107
107
  temperature: OptionalNullable[float] = UNSET
108
108
  r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
109
109
 
110
- top_p: Optional[float] = 1
110
+ top_p: Optional[float] = None
111
111
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
112
112
 
113
113
  max_tokens: OptionalNullable[int] = UNSET
@@ -137,7 +137,7 @@ class ChatCompletionRequest(BaseModel):
137
137
  n: OptionalNullable[int] = UNSET
138
138
  r"""Number of completions to return for each request, input tokens are only billed once."""
139
139
 
140
- safe_prompt: Optional[bool] = False
140
+ safe_prompt: Optional[bool] = None
141
141
  r"""Whether to inject a safety prompt before all conversations."""
142
142
 
143
143
  @model_serializer(mode="wrap")
@@ -102,7 +102,7 @@ class ChatCompletionStreamRequest(BaseModel):
102
102
  temperature: OptionalNullable[float] = UNSET
103
103
  r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
104
104
 
105
- top_p: Optional[float] = 1
105
+ top_p: Optional[float] = None
106
106
  r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
107
107
 
108
108
  max_tokens: OptionalNullable[int] = UNSET
@@ -131,7 +131,7 @@ class ChatCompletionStreamRequest(BaseModel):
131
131
  n: OptionalNullable[int] = UNSET
132
132
  r"""Number of completions to return for each request, input tokens are only billed once."""
133
133
 
134
- safe_prompt: Optional[bool] = False
134
+ safe_prompt: Optional[bool] = None
135
135
  r"""Whether to inject a safety prompt before all conversations."""
136
136
 
137
137
  @model_serializer(mode="wrap")
@@ -28,9 +28,9 @@ class SDKConfiguration:
28
28
  server: Optional[str] = ""
29
29
  language: str = "python"
30
30
  openapi_doc_version: str = "0.0.2"
31
- sdk_version: str = "1.2.3"
32
- gen_version: str = "2.470.1"
33
- user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai_azure"
31
+ sdk_version: str = "1.2.6"
32
+ gen_version: str = "2.486.1"
33
+ user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure"
34
34
  retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET)
35
35
  timeout_ms: Optional[int] = None
36
36
 
@@ -109,13 +109,12 @@ def serialize_multipart_form(
109
109
  if not field_metadata:
110
110
  continue
111
111
 
112
- f_name = field.alias if field.alias is not None else name
112
+ f_name = field.alias if field.alias else name
113
113
 
114
114
  if field_metadata.file:
115
115
  file_fields: Dict[str, FieldInfo] = val.__class__.model_fields
116
116
 
117
117
  file_name = ""
118
- field_name = ""
119
118
  content = None
120
119
  content_type = None
121
120
 
@@ -131,20 +130,15 @@ def serialize_multipart_form(
131
130
  elif file_field_name == "content_type":
132
131
  content_type = getattr(val, file_field_name, None)
133
132
  else:
134
- field_name = (
135
- file_field.alias
136
- if file_field.alias is not None
137
- else file_field_name
138
- )
139
133
  file_name = getattr(val, file_field_name)
140
134
 
141
- if field_name == "" or file_name == "" or content is None:
135
+ if file_name == "" or content is None:
142
136
  raise ValueError("invalid multipart/form-data file")
143
137
 
144
138
  if content_type is not None:
145
- files[field_name] = (file_name, content, content_type)
139
+ files[f_name] = (file_name, content, content_type)
146
140
  else:
147
- files[field_name] = (file_name, content)
141
+ files[f_name] = (file_name, content)
148
142
  elif field_metadata.json:
149
143
  files[f_name] = (
150
144
  None,
@@ -23,7 +23,7 @@ SERIALIZATION_METHOD_TO_CONTENT_TYPE = {
23
23
 
24
24
  @dataclass
25
25
  class SerializedRequestBody:
26
- media_type: str
26
+ media_type: Optional[str] = None
27
27
  content: Optional[Any] = None
28
28
  data: Optional[Any] = None
29
29
  files: Optional[Any] = None
@@ -5,12 +5,18 @@ import httpx
5
5
 
6
6
  from .types import BeforeRequestContext, BeforeRequestHook
7
7
 
8
+ PREFIX = "mistral-client-python/"
8
9
 
9
10
  class CustomUserAgentHook(BeforeRequestHook):
10
11
  def before_request(
11
12
  self, hook_ctx: BeforeRequestContext, request: httpx.Request
12
13
  ) -> Union[httpx.Request, Exception]:
14
+ current = request.headers["user-agent"]
15
+ if current.startswith(PREFIX):
16
+ return request
17
+
13
18
  request.headers["user-agent"] = (
14
- "mistral-client-python/" + request.headers["user-agent"].split(" ")[1]
19
+ PREFIX + current.split(" ")[1]
15
20
  )
21
+
16
22
  return request
mistralai_gcp/_version.py CHANGED
@@ -3,7 +3,7 @@
3
3
  import importlib.metadata
4
4
 
5
5
  __title__: str = "mistralai-gcp"
6
- __version__: str = "1.2.3"
6
+ __version__: str = "1.2.6"
7
7
 
8
8
  try:
9
9
  if __package__ is not None:
mistralai_gcp/basesdk.py CHANGED
@@ -9,7 +9,8 @@ from mistralai_gcp._hooks import (
9
9
  BeforeRequestContext,
10
10
  )
11
11
  from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content
12
- from typing import Callable, List, Optional, Tuple
12
+ from typing import Callable, List, Mapping, Optional, Tuple
13
+ from urllib.parse import parse_qs, urlparse
13
14
 
14
15
 
15
16
  class BaseSDK:
@@ -18,7 +19,7 @@ class BaseSDK:
18
19
  def __init__(self, sdk_config: SDKConfiguration) -> None:
19
20
  self.sdk_configuration = sdk_config
20
21
 
21
- def get_url(self, base_url, url_variables):
22
+ def _get_url(self, base_url, url_variables):
22
23
  sdk_url, sdk_variables = self.sdk_configuration.get_server_details()
23
24
 
24
25
  if base_url is None:
@@ -29,7 +30,7 @@ class BaseSDK:
29
30
 
30
31
  return utils.template_url(base_url, url_variables)
31
32
 
32
- def build_request_async(
33
+ def _build_request_async(
33
34
  self,
34
35
  method,
35
36
  path,
@@ -48,9 +49,10 @@ class BaseSDK:
48
49
  Callable[[], Optional[SerializedRequestBody]]
49
50
  ] = None,
50
51
  url_override: Optional[str] = None,
52
+ http_headers: Optional[Mapping[str, str]] = None,
51
53
  ) -> httpx.Request:
52
54
  client = self.sdk_configuration.async_client
53
- return self.build_request_with_client(
55
+ return self._build_request_with_client(
54
56
  client,
55
57
  method,
56
58
  path,
@@ -67,9 +69,10 @@ class BaseSDK:
67
69
  timeout_ms,
68
70
  get_serialized_body,
69
71
  url_override,
72
+ http_headers,
70
73
  )
71
74
 
72
- def build_request(
75
+ def _build_request(
73
76
  self,
74
77
  method,
75
78
  path,
@@ -88,9 +91,10 @@ class BaseSDK:
88
91
  Callable[[], Optional[SerializedRequestBody]]
89
92
  ] = None,
90
93
  url_override: Optional[str] = None,
94
+ http_headers: Optional[Mapping[str, str]] = None,
91
95
  ) -> httpx.Request:
92
96
  client = self.sdk_configuration.client
93
- return self.build_request_with_client(
97
+ return self._build_request_with_client(
94
98
  client,
95
99
  method,
96
100
  path,
@@ -107,9 +111,10 @@ class BaseSDK:
107
111
  timeout_ms,
108
112
  get_serialized_body,
109
113
  url_override,
114
+ http_headers,
110
115
  )
111
116
 
112
- def build_request_with_client(
117
+ def _build_request_with_client(
113
118
  self,
114
119
  client,
115
120
  method,
@@ -129,13 +134,14 @@ class BaseSDK:
129
134
  Callable[[], Optional[SerializedRequestBody]]
130
135
  ] = None,
131
136
  url_override: Optional[str] = None,
137
+ http_headers: Optional[Mapping[str, str]] = None,
132
138
  ) -> httpx.Request:
133
139
  query_params = {}
134
140
 
135
141
  url = url_override
136
142
  if url is None:
137
143
  url = utils.generate_url(
138
- self.get_url(base_url, url_variables),
144
+ self._get_url(base_url, url_variables),
139
145
  path,
140
146
  request if request_has_path_params else None,
141
147
  _globals if request_has_path_params else None,
@@ -145,6 +151,12 @@ class BaseSDK:
145
151
  request if request_has_query_params else None,
146
152
  _globals if request_has_query_params else None,
147
153
  )
154
+ else:
155
+ # Pick up the query parameter from the override so they can be
156
+ # preserved when building the request later on (necessary as of
157
+ # httpx 0.28).
158
+ parsed_override = urlparse(str(url_override))
159
+ query_params = parse_qs(parsed_override.query, keep_blank_values=True)
148
160
 
149
161
  headers = utils.get_headers(request, _globals)
150
162
  headers["Accept"] = accept_header_value
@@ -159,7 +171,7 @@ class BaseSDK:
159
171
  headers = {**headers, **security_headers}
160
172
  query_params = {**query_params, **security_query_params}
161
173
 
162
- serialized_request_body = SerializedRequestBody("application/octet-stream")
174
+ serialized_request_body = SerializedRequestBody()
163
175
  if get_serialized_body is not None:
164
176
  rb = get_serialized_body()
165
177
  if request_body_required and rb is None:
@@ -178,6 +190,10 @@ class BaseSDK:
178
190
  ):
179
191
  headers["content-type"] = serialized_request_body.media_type
180
192
 
193
+ if http_headers is not None:
194
+ for header, value in http_headers.items():
195
+ headers[header] = value
196
+
181
197
  timeout = timeout_ms / 1000 if timeout_ms is not None else None
182
198
 
183
199
  return client.build_request(