vellum-ai 0.8.13__py3-none-any.whl → 0.8.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. vellum/__init__.py +18 -0
  2. vellum/client.py +103 -34
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/core/http_client.py +6 -2
  5. vellum/core/pydantic_utilities.py +42 -11
  6. vellum/core/serialization.py +18 -0
  7. vellum/resources/ad_hoc/client.py +31 -10
  8. vellum/resources/deployments/client.py +13 -4
  9. vellum/resources/document_indexes/client.py +7 -2
  10. vellum/resources/documents/client.py +8 -8
  11. vellum/resources/sandboxes/client.py +7 -2
  12. vellum/resources/test_suite_runs/client.py +7 -2
  13. vellum/resources/test_suites/client.py +27 -6
  14. vellum/types/__init__.py +18 -0
  15. vellum/types/api_node_result_data.py +6 -2
  16. vellum/types/array_variable_value.py +27 -0
  17. vellum/types/array_variable_value_item.py +9 -0
  18. vellum/types/array_vellum_value.py +31 -0
  19. vellum/types/array_vellum_value_item.py +15 -1
  20. vellum/types/array_vellum_value_item_request.py +9 -0
  21. vellum/types/array_vellum_value_request.py +31 -0
  22. vellum/types/chat_history_variable_value.py +21 -0
  23. vellum/types/chat_history_vellum_value.py +25 -0
  24. vellum/types/chat_history_vellum_value_request.py +25 -0
  25. vellum/types/code_execution_node_array_result.py +6 -0
  26. vellum/types/code_execution_node_result.py +6 -0
  27. vellum/types/code_execution_node_result_data.py +6 -0
  28. vellum/types/create_test_suite_test_case_request.py +6 -0
  29. vellum/types/execute_workflow_response.py +6 -0
  30. vellum/types/execution_array_vellum_value.py +6 -0
  31. vellum/types/external_test_case_execution.py +6 -0
  32. vellum/types/external_test_case_execution_request.py +6 -0
  33. vellum/types/fulfilled_execute_workflow_workflow_result_event.py +8 -0
  34. vellum/types/fulfilled_workflow_node_result_event.py +8 -0
  35. vellum/types/initiated_workflow_node_result_event.py +6 -0
  36. vellum/types/named_test_case_array_variable_value.py +6 -0
  37. vellum/types/named_test_case_array_variable_value_request.py +8 -0
  38. vellum/types/node_input_compiled_array_value.py +6 -0
  39. vellum/types/node_output_compiled_array_value.py +6 -0
  40. vellum/types/paginated_test_suite_run_execution_list.py +6 -0
  41. vellum/types/paginated_test_suite_test_case_list.py +6 -0
  42. vellum/types/rejected_workflow_node_result_event.py +6 -0
  43. vellum/types/replace_test_suite_test_case_request.py +6 -0
  44. vellum/types/search_results_variable_value.py +21 -0
  45. vellum/types/search_results_vellum_value.py +25 -0
  46. vellum/types/search_results_vellum_value_request.py +25 -0
  47. vellum/types/streaming_workflow_node_result_event.py +8 -0
  48. vellum/types/templating_node_array_result.py +6 -0
  49. vellum/types/templating_node_result.py +6 -0
  50. vellum/types/templating_node_result_data.py +6 -0
  51. vellum/types/terminal_node_array_result.py +6 -0
  52. vellum/types/terminal_node_result.py +6 -0
  53. vellum/types/terminal_node_result_data.py +6 -0
  54. vellum/types/test_case_array_variable_value.py +6 -0
  55. vellum/types/test_suite_run_execution.py +6 -0
  56. vellum/types/test_suite_run_execution_array_output.py +6 -0
  57. vellum/types/test_suite_run_external_exec_config.py +6 -0
  58. vellum/types/test_suite_run_external_exec_config_data.py +6 -0
  59. vellum/types/test_suite_run_external_exec_config_data_request.py +8 -0
  60. vellum/types/test_suite_run_external_exec_config_request.py +8 -0
  61. vellum/types/test_suite_run_read.py +6 -0
  62. vellum/types/test_suite_test_case.py +6 -0
  63. vellum/types/test_suite_test_case_create_bulk_operation_request.py +8 -0
  64. vellum/types/test_suite_test_case_replace_bulk_operation_request.py +8 -0
  65. vellum/types/test_suite_test_case_upsert_bulk_operation_request.py +8 -0
  66. vellum/types/upsert_test_suite_test_case_request.py +6 -0
  67. vellum/types/workflow_execution_node_result_event.py +8 -0
  68. vellum/types/workflow_execution_workflow_result_event.py +8 -0
  69. vellum/types/workflow_output_array.py +6 -0
  70. vellum/types/workflow_result_event.py +8 -0
  71. vellum/types/workflow_result_event_output_data_array.py +6 -0
  72. {vellum_ai-0.8.13.dist-info → vellum_ai-0.8.15.dist-info}/METADATA +1 -1
  73. {vellum_ai-0.8.13.dist-info → vellum_ai-0.8.15.dist-info}/RECORD +75 -66
  74. {vellum_ai-0.8.13.dist-info → vellum_ai-0.8.15.dist-info}/LICENSE +0 -0
  75. {vellum_ai-0.8.13.dist-info → vellum_ai-0.8.15.dist-info}/WHEEL +0 -0
vellum/__init__.py CHANGED
@@ -14,9 +14,12 @@ from .types import (
14
14
  ArrayChatMessageContentItem,
15
15
  ArrayChatMessageContentItemRequest,
16
16
  ArrayChatMessageContentRequest,
17
+ ArrayVariableValue,
17
18
  ArrayVariableValueItem,
19
+ ArrayVellumValue,
18
20
  ArrayVellumValueItem,
19
21
  ArrayVellumValueItemRequest,
22
+ ArrayVellumValueRequest,
20
23
  BasicVectorizerIntfloatMultilingualE5Large,
21
24
  BasicVectorizerIntfloatMultilingualE5LargeRequest,
22
25
  BasicVectorizerSentenceTransformersMultiQaMpnetBaseCosV1,
@@ -24,6 +27,9 @@ from .types import (
24
27
  BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1,
25
28
  BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1Request,
26
29
  ChatHistoryInputRequest,
30
+ ChatHistoryVariableValue,
31
+ ChatHistoryVellumValue,
32
+ ChatHistoryVellumValueRequest,
27
33
  ChatMessage,
28
34
  ChatMessageContent,
29
35
  ChatMessageContentRequest,
@@ -258,6 +264,9 @@ from .types import (
258
264
  SearchResultMeta,
259
265
  SearchResultMetaRequest,
260
266
  SearchResultRequest,
267
+ SearchResultsVariableValue,
268
+ SearchResultsVellumValue,
269
+ SearchResultsVellumValueRequest,
261
270
  SearchWeightsRequest,
262
271
  SentenceChunkerConfig,
263
272
  SentenceChunkerConfigRequest,
@@ -452,9 +461,12 @@ __all__ = [
452
461
  "ArrayChatMessageContentItem",
453
462
  "ArrayChatMessageContentItemRequest",
454
463
  "ArrayChatMessageContentRequest",
464
+ "ArrayVariableValue",
455
465
  "ArrayVariableValueItem",
466
+ "ArrayVellumValue",
456
467
  "ArrayVellumValueItem",
457
468
  "ArrayVellumValueItemRequest",
469
+ "ArrayVellumValueRequest",
458
470
  "AsyncVellum",
459
471
  "BadRequestError",
460
472
  "BasicVectorizerIntfloatMultilingualE5Large",
@@ -464,6 +476,9 @@ __all__ = [
464
476
  "BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1",
465
477
  "BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1Request",
466
478
  "ChatHistoryInputRequest",
479
+ "ChatHistoryVariableValue",
480
+ "ChatHistoryVellumValue",
481
+ "ChatHistoryVellumValueRequest",
467
482
  "ChatMessage",
468
483
  "ChatMessageContent",
469
484
  "ChatMessageContentRequest",
@@ -703,6 +718,9 @@ __all__ = [
703
718
  "SearchResultMeta",
704
719
  "SearchResultMetaRequest",
705
720
  "SearchResultRequest",
721
+ "SearchResultsVariableValue",
722
+ "SearchResultsVellumValue",
723
+ "SearchResultsVellumValueRequest",
706
724
  "SearchWeightsRequest",
707
725
  "SentenceChunkerConfig",
708
726
  "SentenceChunkerConfigRequest",
vellum/client.py CHANGED
@@ -19,6 +19,7 @@ from .types.prompt_deployment_expand_meta_request import PromptDeploymentExpandM
19
19
  from .types.raw_prompt_execution_overrides_request import RawPromptExecutionOverridesRequest
20
20
  from .core.request_options import RequestOptions
21
21
  from .types.execute_prompt_response import ExecutePromptResponse
22
+ from .core.serialization import convert_and_respect_annotation_metadata
22
23
  from .core.pydantic_utilities import parse_obj_as
23
24
  from .errors.bad_request_error import BadRequestError
24
25
  from .errors.forbidden_error import ForbiddenError
@@ -197,13 +198,19 @@ class Vellum:
197
198
  base_url=self._client_wrapper.get_environment().predict,
198
199
  method="POST",
199
200
  json={
200
- "inputs": inputs,
201
+ "inputs": convert_and_respect_annotation_metadata(
202
+ object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
203
+ ),
201
204
  "prompt_deployment_id": prompt_deployment_id,
202
205
  "prompt_deployment_name": prompt_deployment_name,
203
206
  "release_tag": release_tag,
204
207
  "external_id": external_id,
205
- "expand_meta": expand_meta,
206
- "raw_overrides": raw_overrides,
208
+ "expand_meta": convert_and_respect_annotation_metadata(
209
+ object_=expand_meta, annotation=PromptDeploymentExpandMetaRequest, direction="write"
210
+ ),
211
+ "raw_overrides": convert_and_respect_annotation_metadata(
212
+ object_=raw_overrides, annotation=RawPromptExecutionOverridesRequest, direction="write"
213
+ ),
207
214
  "expand_raw": expand_raw,
208
215
  "metadata": metadata,
209
216
  },
@@ -366,13 +373,19 @@ class Vellum:
366
373
  base_url=self._client_wrapper.get_environment().predict,
367
374
  method="POST",
368
375
  json={
369
- "inputs": inputs,
376
+ "inputs": convert_and_respect_annotation_metadata(
377
+ object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
378
+ ),
370
379
  "prompt_deployment_id": prompt_deployment_id,
371
380
  "prompt_deployment_name": prompt_deployment_name,
372
381
  "release_tag": release_tag,
373
382
  "external_id": external_id,
374
- "expand_meta": expand_meta,
375
- "raw_overrides": raw_overrides,
383
+ "expand_meta": convert_and_respect_annotation_metadata(
384
+ object_=expand_meta, annotation=PromptDeploymentExpandMetaRequest, direction="write"
385
+ ),
386
+ "raw_overrides": convert_and_respect_annotation_metadata(
387
+ object_=raw_overrides, annotation=RawPromptExecutionOverridesRequest, direction="write"
388
+ ),
376
389
  "expand_raw": expand_raw,
377
390
  "metadata": metadata,
378
391
  },
@@ -508,8 +521,12 @@ class Vellum:
508
521
  base_url=self._client_wrapper.get_environment().predict,
509
522
  method="POST",
510
523
  json={
511
- "inputs": inputs,
512
- "expand_meta": expand_meta,
524
+ "inputs": convert_and_respect_annotation_metadata(
525
+ object_=inputs, annotation=typing.Sequence[WorkflowRequestInputRequest], direction="write"
526
+ ),
527
+ "expand_meta": convert_and_respect_annotation_metadata(
528
+ object_=expand_meta, annotation=WorkflowExpandMetaRequest, direction="write"
529
+ ),
513
530
  "workflow_deployment_id": workflow_deployment_id,
514
531
  "workflow_deployment_name": workflow_deployment_name,
515
532
  "release_tag": release_tag,
@@ -649,8 +666,12 @@ class Vellum:
649
666
  base_url=self._client_wrapper.get_environment().predict,
650
667
  method="POST",
651
668
  json={
652
- "inputs": inputs,
653
- "expand_meta": expand_meta,
669
+ "inputs": convert_and_respect_annotation_metadata(
670
+ object_=inputs, annotation=typing.Sequence[WorkflowRequestInputRequest], direction="write"
671
+ ),
672
+ "expand_meta": convert_and_respect_annotation_metadata(
673
+ object_=expand_meta, annotation=WorkflowExpandMetaRequest, direction="write"
674
+ ),
654
675
  "workflow_deployment_id": workflow_deployment_id,
655
676
  "workflow_deployment_name": workflow_deployment_name,
656
677
  "release_tag": release_tag,
@@ -772,8 +793,12 @@ class Vellum:
772
793
  json={
773
794
  "deployment_id": deployment_id,
774
795
  "deployment_name": deployment_name,
775
- "requests": requests,
776
- "options": options,
796
+ "requests": convert_and_respect_annotation_metadata(
797
+ object_=requests, annotation=typing.Sequence[GenerateRequest], direction="write"
798
+ ),
799
+ "options": convert_and_respect_annotation_metadata(
800
+ object_=options, annotation=GenerateOptionsRequest, direction="write"
801
+ ),
777
802
  },
778
803
  request_options=request_options,
779
804
  omit=OMIT,
@@ -915,8 +940,12 @@ class Vellum:
915
940
  json={
916
941
  "deployment_id": deployment_id,
917
942
  "deployment_name": deployment_name,
918
- "requests": requests,
919
- "options": options,
943
+ "requests": convert_and_respect_annotation_metadata(
944
+ object_=requests, annotation=typing.Sequence[GenerateRequest], direction="write"
945
+ ),
946
+ "options": convert_and_respect_annotation_metadata(
947
+ object_=options, annotation=GenerateOptionsRequest, direction="write"
948
+ ),
920
949
  },
921
950
  request_options=request_options,
922
951
  omit=OMIT,
@@ -1036,7 +1065,9 @@ class Vellum:
1036
1065
  "index_id": index_id,
1037
1066
  "index_name": index_name,
1038
1067
  "query": query,
1039
- "options": options,
1068
+ "options": convert_and_respect_annotation_metadata(
1069
+ object_=options, annotation=SearchRequestOptionsRequest, direction="write"
1070
+ ),
1040
1071
  },
1041
1072
  request_options=request_options,
1042
1073
  omit=OMIT,
@@ -1132,7 +1163,9 @@ class Vellum:
1132
1163
  json={
1133
1164
  "deployment_id": deployment_id,
1134
1165
  "deployment_name": deployment_name,
1135
- "actuals": actuals,
1166
+ "actuals": convert_and_respect_annotation_metadata(
1167
+ object_=actuals, annotation=typing.Sequence[SubmitCompletionActualRequest], direction="write"
1168
+ ),
1136
1169
  },
1137
1170
  request_options=request_options,
1138
1171
  omit=OMIT,
@@ -1222,7 +1255,9 @@ class Vellum:
1222
1255
  base_url=self._client_wrapper.get_environment().predict,
1223
1256
  method="POST",
1224
1257
  json={
1225
- "actuals": actuals,
1258
+ "actuals": convert_and_respect_annotation_metadata(
1259
+ object_=actuals, annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest], direction="write"
1260
+ ),
1226
1261
  "execution_id": execution_id,
1227
1262
  "external_id": external_id,
1228
1263
  },
@@ -1386,13 +1421,19 @@ class AsyncVellum:
1386
1421
  base_url=self._client_wrapper.get_environment().predict,
1387
1422
  method="POST",
1388
1423
  json={
1389
- "inputs": inputs,
1424
+ "inputs": convert_and_respect_annotation_metadata(
1425
+ object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
1426
+ ),
1390
1427
  "prompt_deployment_id": prompt_deployment_id,
1391
1428
  "prompt_deployment_name": prompt_deployment_name,
1392
1429
  "release_tag": release_tag,
1393
1430
  "external_id": external_id,
1394
- "expand_meta": expand_meta,
1395
- "raw_overrides": raw_overrides,
1431
+ "expand_meta": convert_and_respect_annotation_metadata(
1432
+ object_=expand_meta, annotation=PromptDeploymentExpandMetaRequest, direction="write"
1433
+ ),
1434
+ "raw_overrides": convert_and_respect_annotation_metadata(
1435
+ object_=raw_overrides, annotation=RawPromptExecutionOverridesRequest, direction="write"
1436
+ ),
1396
1437
  "expand_raw": expand_raw,
1397
1438
  "metadata": metadata,
1398
1439
  },
@@ -1563,13 +1604,19 @@ class AsyncVellum:
1563
1604
  base_url=self._client_wrapper.get_environment().predict,
1564
1605
  method="POST",
1565
1606
  json={
1566
- "inputs": inputs,
1607
+ "inputs": convert_and_respect_annotation_metadata(
1608
+ object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
1609
+ ),
1567
1610
  "prompt_deployment_id": prompt_deployment_id,
1568
1611
  "prompt_deployment_name": prompt_deployment_name,
1569
1612
  "release_tag": release_tag,
1570
1613
  "external_id": external_id,
1571
- "expand_meta": expand_meta,
1572
- "raw_overrides": raw_overrides,
1614
+ "expand_meta": convert_and_respect_annotation_metadata(
1615
+ object_=expand_meta, annotation=PromptDeploymentExpandMetaRequest, direction="write"
1616
+ ),
1617
+ "raw_overrides": convert_and_respect_annotation_metadata(
1618
+ object_=raw_overrides, annotation=RawPromptExecutionOverridesRequest, direction="write"
1619
+ ),
1573
1620
  "expand_raw": expand_raw,
1574
1621
  "metadata": metadata,
1575
1622
  },
@@ -1713,8 +1760,12 @@ class AsyncVellum:
1713
1760
  base_url=self._client_wrapper.get_environment().predict,
1714
1761
  method="POST",
1715
1762
  json={
1716
- "inputs": inputs,
1717
- "expand_meta": expand_meta,
1763
+ "inputs": convert_and_respect_annotation_metadata(
1764
+ object_=inputs, annotation=typing.Sequence[WorkflowRequestInputRequest], direction="write"
1765
+ ),
1766
+ "expand_meta": convert_and_respect_annotation_metadata(
1767
+ object_=expand_meta, annotation=WorkflowExpandMetaRequest, direction="write"
1768
+ ),
1718
1769
  "workflow_deployment_id": workflow_deployment_id,
1719
1770
  "workflow_deployment_name": workflow_deployment_name,
1720
1771
  "release_tag": release_tag,
@@ -1862,8 +1913,12 @@ class AsyncVellum:
1862
1913
  base_url=self._client_wrapper.get_environment().predict,
1863
1914
  method="POST",
1864
1915
  json={
1865
- "inputs": inputs,
1866
- "expand_meta": expand_meta,
1916
+ "inputs": convert_and_respect_annotation_metadata(
1917
+ object_=inputs, annotation=typing.Sequence[WorkflowRequestInputRequest], direction="write"
1918
+ ),
1919
+ "expand_meta": convert_and_respect_annotation_metadata(
1920
+ object_=expand_meta, annotation=WorkflowExpandMetaRequest, direction="write"
1921
+ ),
1867
1922
  "workflow_deployment_id": workflow_deployment_id,
1868
1923
  "workflow_deployment_name": workflow_deployment_name,
1869
1924
  "release_tag": release_tag,
@@ -1993,8 +2048,12 @@ class AsyncVellum:
1993
2048
  json={
1994
2049
  "deployment_id": deployment_id,
1995
2050
  "deployment_name": deployment_name,
1996
- "requests": requests,
1997
- "options": options,
2051
+ "requests": convert_and_respect_annotation_metadata(
2052
+ object_=requests, annotation=typing.Sequence[GenerateRequest], direction="write"
2053
+ ),
2054
+ "options": convert_and_respect_annotation_metadata(
2055
+ object_=options, annotation=GenerateOptionsRequest, direction="write"
2056
+ ),
1998
2057
  },
1999
2058
  request_options=request_options,
2000
2059
  omit=OMIT,
@@ -2144,8 +2203,12 @@ class AsyncVellum:
2144
2203
  json={
2145
2204
  "deployment_id": deployment_id,
2146
2205
  "deployment_name": deployment_name,
2147
- "requests": requests,
2148
- "options": options,
2206
+ "requests": convert_and_respect_annotation_metadata(
2207
+ object_=requests, annotation=typing.Sequence[GenerateRequest], direction="write"
2208
+ ),
2209
+ "options": convert_and_respect_annotation_metadata(
2210
+ object_=options, annotation=GenerateOptionsRequest, direction="write"
2211
+ ),
2149
2212
  },
2150
2213
  request_options=request_options,
2151
2214
  omit=OMIT,
@@ -2273,7 +2336,9 @@ class AsyncVellum:
2273
2336
  "index_id": index_id,
2274
2337
  "index_name": index_name,
2275
2338
  "query": query,
2276
- "options": options,
2339
+ "options": convert_and_respect_annotation_metadata(
2340
+ object_=options, annotation=SearchRequestOptionsRequest, direction="write"
2341
+ ),
2277
2342
  },
2278
2343
  request_options=request_options,
2279
2344
  omit=OMIT,
@@ -2377,7 +2442,9 @@ class AsyncVellum:
2377
2442
  json={
2378
2443
  "deployment_id": deployment_id,
2379
2444
  "deployment_name": deployment_name,
2380
- "actuals": actuals,
2445
+ "actuals": convert_and_respect_annotation_metadata(
2446
+ object_=actuals, annotation=typing.Sequence[SubmitCompletionActualRequest], direction="write"
2447
+ ),
2381
2448
  },
2382
2449
  request_options=request_options,
2383
2450
  omit=OMIT,
@@ -2475,7 +2542,9 @@ class AsyncVellum:
2475
2542
  base_url=self._client_wrapper.get_environment().predict,
2476
2543
  method="POST",
2477
2544
  json={
2478
- "actuals": actuals,
2545
+ "actuals": convert_and_respect_annotation_metadata(
2546
+ object_=actuals, annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest], direction="write"
2547
+ ),
2479
2548
  "execution_id": execution_id,
2480
2549
  "external_id": external_id,
2481
2550
  },
@@ -17,7 +17,7 @@ class BaseClientWrapper:
17
17
  headers: typing.Dict[str, str] = {
18
18
  "X-Fern-Language": "Python",
19
19
  "X-Fern-SDK-Name": "vellum-ai",
20
- "X-Fern-SDK-Version": "0.8.13",
20
+ "X-Fern-SDK-Version": "0.8.15",
21
21
  }
22
22
  headers["X_API_KEY"] = self.api_key
23
23
  return headers
@@ -224,7 +224,9 @@ class HttpClient:
224
224
  json=json_body,
225
225
  data=data_body,
226
226
  content=content,
227
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
227
+ files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files))
228
+ if (files is not None and files is not omit)
229
+ else None,
228
230
  timeout=timeout,
229
231
  )
230
232
 
@@ -306,7 +308,9 @@ class HttpClient:
306
308
  json=json_body,
307
309
  data=data_body,
308
310
  content=content,
309
- files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files)) if files is not None else None,
311
+ files=convert_file_dict_to_httpx_tuples(remove_none_from_dict(files))
312
+ if (files is not None and files is not omit)
313
+ else None,
310
314
  timeout=timeout,
311
315
  ) as stream:
312
316
  yield stream
@@ -10,6 +10,7 @@ import typing_extensions
10
10
  import pydantic
11
11
 
12
12
  from .datetime_utils import serialize_datetime
13
+ from .serialization import convert_and_respect_annotation_metadata
13
14
 
14
15
  IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
15
16
 
@@ -56,11 +57,12 @@ Model = typing.TypeVar("Model", bound=pydantic.BaseModel)
56
57
 
57
58
 
58
59
  def parse_obj_as(type_: typing.Type[T], object_: typing.Any) -> T:
60
+ dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read")
59
61
  if IS_PYDANTIC_V2:
60
62
  adapter = pydantic.TypeAdapter(type_) # type: ignore # Pydantic v2
61
- return adapter.validate_python(object_)
63
+ return adapter.validate_python(dealiased_object)
62
64
  else:
63
- return pydantic.parse_obj_as(type_, object_)
65
+ return pydantic.parse_obj_as(type_, dealiased_object)
64
66
 
65
67
 
66
68
  def to_jsonable_with_fallback(
@@ -75,13 +77,40 @@ def to_jsonable_with_fallback(
75
77
 
76
78
 
77
79
  class UniversalBaseModel(pydantic.BaseModel):
78
- class Config:
79
- populate_by_name = True
80
- smart_union = True
81
- allow_population_by_field_name = True
82
- json_encoders = {dt.datetime: serialize_datetime}
83
- # Allow fields begining with `model_` to be used in the model
84
- protected_namespaces = ()
80
+ if IS_PYDANTIC_V2:
81
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
82
+ # Allow fields begining with `model_` to be used in the model
83
+ protected_namespaces=(),
84
+ ) # type: ignore # Pydantic v2
85
+
86
+ @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore # Pydantic v2
87
+ def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> typing.Any: # type: ignore # Pydantic v2
88
+ serialized = handler(self)
89
+ data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()}
90
+ return data
91
+
92
+ else:
93
+
94
+ class Config:
95
+ smart_union = True
96
+ json_encoders = {dt.datetime: serialize_datetime}
97
+
98
+ @classmethod
99
+ def model_construct(
100
+ cls: typing.Type["Model"], _fields_set: typing.Optional[typing.Set[str]] = None, **values: typing.Any
101
+ ) -> "Model":
102
+ dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
103
+ return cls.construct(_fields_set, **dealiased_object)
104
+
105
+ @classmethod
106
+ def construct(
107
+ cls: typing.Type["Model"], _fields_set: typing.Optional[typing.Set[str]] = None, **values: typing.Any
108
+ ) -> "Model":
109
+ dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
110
+ if IS_PYDANTIC_V2:
111
+ return super().model_construct(_fields_set, **dealiased_object) # type: ignore # Pydantic v2
112
+ else:
113
+ return super().construct(_fields_set, **dealiased_object)
85
114
 
86
115
  def json(self, **kwargs: typing.Any) -> str:
87
116
  kwargs_with_defaults: typing.Any = {
@@ -117,7 +146,7 @@ class UniversalBaseModel(pydantic.BaseModel):
117
146
  "exclude_none": True,
118
147
  "exclude_unset": False,
119
148
  }
120
- return deep_union_pydantic_dicts(
149
+ dict_dump = deep_union_pydantic_dicts(
121
150
  super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore # Pydantic v2
122
151
  super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore # Pydantic v2
123
152
  )
@@ -143,7 +172,9 @@ class UniversalBaseModel(pydantic.BaseModel):
143
172
  **kwargs,
144
173
  }
145
174
 
146
- return super().dict(**kwargs_with_defaults_exclude_unset_include_fields)
175
+ dict_dump = super().dict(**kwargs_with_defaults_exclude_unset_include_fields)
176
+
177
+ return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write")
147
178
 
148
179
 
149
180
  def deep_union_pydantic_dicts(
@@ -71,6 +71,24 @@ def convert_and_respect_annotation_metadata(
71
71
  if typing_extensions.is_typeddict(clean_type) and isinstance(object_, typing.Mapping):
72
72
  return _convert_mapping(object_, clean_type, direction)
73
73
 
74
+ if (
75
+ typing_extensions.get_origin(clean_type) == typing.Dict
76
+ or typing_extensions.get_origin(clean_type) == dict
77
+ or clean_type == typing.Dict
78
+ ) and isinstance(object_, typing.Dict):
79
+ key_type = typing_extensions.get_args(clean_type)[0]
80
+ value_type = typing_extensions.get_args(clean_type)[1]
81
+
82
+ return {
83
+ key: convert_and_respect_annotation_metadata(
84
+ object_=value,
85
+ annotation=annotation,
86
+ inner_type=value_type,
87
+ direction=direction,
88
+ )
89
+ for key, value in object_.items()
90
+ }
91
+
74
92
  # If you're iterating on a string, do not bother to coerce it to a sequence.
75
93
  if not isinstance(object_, str):
76
94
  if (
@@ -9,6 +9,7 @@ from ...types.prompt_block_request import PromptBlockRequest
9
9
  from ...types.ad_hoc_expand_meta_request import AdHocExpandMetaRequest
10
10
  from ...core.request_options import RequestOptions
11
11
  from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
12
+ from ...core.serialization import convert_and_respect_annotation_metadata
12
13
  from ...core.pydantic_utilities import parse_obj_as
13
14
  import json
14
15
  from ...errors.bad_request_error import BadRequestError
@@ -133,11 +134,21 @@ class AdHocClient:
133
134
  method="POST",
134
135
  json={
135
136
  "ml_model": ml_model,
136
- "input_values": input_values,
137
- "input_variables": input_variables,
138
- "parameters": parameters,
139
- "blocks": blocks,
140
- "expand_meta": expand_meta,
137
+ "input_values": convert_and_respect_annotation_metadata(
138
+ object_=input_values, annotation=typing.Sequence[PromptRequestInputRequest], direction="write"
139
+ ),
140
+ "input_variables": convert_and_respect_annotation_metadata(
141
+ object_=input_variables, annotation=typing.Sequence[VellumVariableRequest], direction="write"
142
+ ),
143
+ "parameters": convert_and_respect_annotation_metadata(
144
+ object_=parameters, annotation=PromptParametersRequest, direction="write"
145
+ ),
146
+ "blocks": convert_and_respect_annotation_metadata(
147
+ object_=blocks, annotation=typing.Sequence[PromptBlockRequest], direction="write"
148
+ ),
149
+ "expand_meta": convert_and_respect_annotation_metadata(
150
+ object_=expand_meta, annotation=AdHocExpandMetaRequest, direction="write"
151
+ ),
141
152
  },
142
153
  request_options=request_options,
143
154
  omit=OMIT,
@@ -314,11 +325,21 @@ class AsyncAdHocClient:
314
325
  method="POST",
315
326
  json={
316
327
  "ml_model": ml_model,
317
- "input_values": input_values,
318
- "input_variables": input_variables,
319
- "parameters": parameters,
320
- "blocks": blocks,
321
- "expand_meta": expand_meta,
328
+ "input_values": convert_and_respect_annotation_metadata(
329
+ object_=input_values, annotation=typing.Sequence[PromptRequestInputRequest], direction="write"
330
+ ),
331
+ "input_variables": convert_and_respect_annotation_metadata(
332
+ object_=input_variables, annotation=typing.Sequence[VellumVariableRequest], direction="write"
333
+ ),
334
+ "parameters": convert_and_respect_annotation_metadata(
335
+ object_=parameters, annotation=PromptParametersRequest, direction="write"
336
+ ),
337
+ "blocks": convert_and_respect_annotation_metadata(
338
+ object_=blocks, annotation=typing.Sequence[PromptBlockRequest], direction="write"
339
+ ),
340
+ "expand_meta": convert_and_respect_annotation_metadata(
341
+ object_=expand_meta, annotation=AdHocExpandMetaRequest, direction="write"
342
+ ),
322
343
  },
323
344
  request_options=request_options,
324
345
  omit=OMIT,
@@ -14,6 +14,7 @@ from ...types.deployment_release_tag_read import DeploymentReleaseTagRead
14
14
  from ...types.prompt_deployment_input_request import PromptDeploymentInputRequest
15
15
  from ...types.compile_prompt_deployment_expand_meta_request import CompilePromptDeploymentExpandMetaRequest
16
16
  from ...types.deployment_provider_payload_response import DeploymentProviderPayloadResponse
17
+ from ...core.serialization import convert_and_respect_annotation_metadata
17
18
  from ...errors.bad_request_error import BadRequestError
18
19
  from ...errors.forbidden_error import ForbiddenError
19
20
  from ...errors.not_found_error import NotFoundError
@@ -334,9 +335,13 @@ class DeploymentsClient:
334
335
  json={
335
336
  "deployment_id": deployment_id,
336
337
  "deployment_name": deployment_name,
337
- "inputs": inputs,
338
+ "inputs": convert_and_respect_annotation_metadata(
339
+ object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
340
+ ),
338
341
  "release_tag": release_tag,
339
- "expand_meta": expand_meta,
342
+ "expand_meta": convert_and_respect_annotation_metadata(
343
+ object_=expand_meta, annotation=CompilePromptDeploymentExpandMetaRequest, direction="write"
344
+ ),
340
345
  },
341
346
  request_options=request_options,
342
347
  omit=OMIT,
@@ -746,9 +751,13 @@ class AsyncDeploymentsClient:
746
751
  json={
747
752
  "deployment_id": deployment_id,
748
753
  "deployment_name": deployment_name,
749
- "inputs": inputs,
754
+ "inputs": convert_and_respect_annotation_metadata(
755
+ object_=inputs, annotation=typing.Sequence[PromptDeploymentInputRequest], direction="write"
756
+ ),
750
757
  "release_tag": release_tag,
751
- "expand_meta": expand_meta,
758
+ "expand_meta": convert_and_respect_annotation_metadata(
759
+ object_=expand_meta, annotation=CompilePromptDeploymentExpandMetaRequest, direction="write"
760
+ ),
752
761
  },
753
762
  request_options=request_options,
754
763
  omit=OMIT,
@@ -12,6 +12,7 @@ from ...types.document_index_indexing_config_request import DocumentIndexIndexin
12
12
  from ...types.entity_status import EntityStatus
13
13
  from ...types.environment_enum import EnvironmentEnum
14
14
  from ...types.document_index_read import DocumentIndexRead
15
+ from ...core.serialization import convert_and_respect_annotation_metadata
15
16
  from ...core.jsonable_encoder import jsonable_encoder
16
17
  from ...core.client_wrapper import AsyncClientWrapper
17
18
 
@@ -191,7 +192,9 @@ class DocumentIndexesClient:
191
192
  "name": name,
192
193
  "status": status,
193
194
  "environment": environment,
194
- "indexing_config": indexing_config,
195
+ "indexing_config": convert_and_respect_annotation_metadata(
196
+ object_=indexing_config, annotation=DocumentIndexIndexingConfigRequest, direction="write"
197
+ ),
195
198
  "copy_documents_from_index_id": copy_documents_from_index_id,
196
199
  },
197
200
  request_options=request_options,
@@ -740,7 +743,9 @@ class AsyncDocumentIndexesClient:
740
743
  "name": name,
741
744
  "status": status,
742
745
  "environment": environment,
743
- "indexing_config": indexing_config,
746
+ "indexing_config": convert_and_respect_annotation_metadata(
747
+ object_=indexing_config, annotation=DocumentIndexIndexingConfigRequest, direction="write"
748
+ ),
744
749
  "copy_documents_from_index_id": copy_documents_from_index_id,
745
750
  },
746
751
  request_options=request_options,
@@ -261,10 +261,10 @@ class DocumentsClient:
261
261
  *,
262
262
  label: str,
263
263
  contents: core.File,
264
- add_to_index_names: typing.Optional[typing.List[str]] = None,
265
- external_id: typing.Optional[str] = None,
266
- keywords: typing.Optional[typing.List[str]] = None,
267
- metadata: typing.Optional[str] = None,
264
+ add_to_index_names: typing.Optional[typing.List[str]] = OMIT,
265
+ external_id: typing.Optional[str] = OMIT,
266
+ keywords: typing.Optional[typing.List[str]] = OMIT,
267
+ metadata: typing.Optional[str] = OMIT,
268
268
  request_options: typing.Optional[RequestOptions] = None,
269
269
  ) -> UploadDocumentResponse:
270
270
  """
@@ -653,10 +653,10 @@ class AsyncDocumentsClient:
653
653
  *,
654
654
  label: str,
655
655
  contents: core.File,
656
- add_to_index_names: typing.Optional[typing.List[str]] = None,
657
- external_id: typing.Optional[str] = None,
658
- keywords: typing.Optional[typing.List[str]] = None,
659
- metadata: typing.Optional[str] = None,
656
+ add_to_index_names: typing.Optional[typing.List[str]] = OMIT,
657
+ external_id: typing.Optional[str] = OMIT,
658
+ keywords: typing.Optional[typing.List[str]] = OMIT,
659
+ metadata: typing.Optional[str] = OMIT,
660
660
  request_options: typing.Optional[RequestOptions] = None,
661
661
  ) -> UploadDocumentResponse:
662
662
  """