llama-cloud 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (37) hide show
  1. llama_cloud/__init__.py +8 -28
  2. llama_cloud/resources/evals/client.py +0 -643
  3. llama_cloud/resources/llama_extract/client.py +168 -6
  4. llama_cloud/resources/parsing/client.py +0 -8
  5. llama_cloud/resources/pipelines/client.py +10 -371
  6. llama_cloud/resources/projects/client.py +72 -923
  7. llama_cloud/resources/retrievers/client.py +124 -0
  8. llama_cloud/types/__init__.py +8 -28
  9. llama_cloud/types/chunk_mode.py +4 -0
  10. llama_cloud/types/extract_config.py +0 -3
  11. llama_cloud/types/{local_eval.py → extract_job_create_batch.py} +9 -14
  12. llama_cloud/types/extract_job_create_batch_data_schema_override.py +9 -0
  13. llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py +7 -0
  14. llama_cloud/types/extract_mode.py +9 -1
  15. llama_cloud/types/llama_parse_parameters.py +0 -1
  16. llama_cloud/types/{local_eval_results.py → paginated_extract_runs_response.py} +7 -8
  17. llama_cloud/types/prompt_conf.py +1 -0
  18. llama_cloud/types/report_block.py +1 -0
  19. llama_cloud/types/struct_mode.py +4 -0
  20. llama_cloud/types/struct_parse_conf.py +6 -0
  21. llama_cloud/types/usage.py +2 -1
  22. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/METADATA +1 -1
  23. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/RECORD +25 -35
  24. llama_cloud/types/eval_dataset.py +0 -40
  25. llama_cloud/types/eval_dataset_job_params.py +0 -39
  26. llama_cloud/types/eval_dataset_job_record.py +0 -58
  27. llama_cloud/types/eval_execution_params_override.py +0 -37
  28. llama_cloud/types/eval_metric.py +0 -17
  29. llama_cloud/types/eval_question.py +0 -38
  30. llama_cloud/types/eval_question_create.py +0 -31
  31. llama_cloud/types/eval_question_result.py +0 -52
  32. llama_cloud/types/local_eval_sets.py +0 -33
  33. llama_cloud/types/metric_result.py +0 -33
  34. llama_cloud/types/prompt_mixin_prompts.py +0 -39
  35. llama_cloud/types/prompt_spec.py +0 -36
  36. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/LICENSE +0 -0
  37. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/WHEEL +0 -0
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=oto0-DnvGBhrsq8rZG6BOZfURSx45dq3NRYt-WIVMpc,22865
1
+ llama_cloud/__init__.py,sha256=GsERaXUabzoc0F4eXn1nzIVnb9iuBaEMCgSyfYJ2TMQ,22569
2
2
  llama_cloud/client.py,sha256=0fK6iRBCA77eSs0zFrYQj-zD0BLy6Dr2Ss0ETJ4WaOY,5555
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -28,7 +28,7 @@ llama_cloud/resources/embedding_model_configs/client.py,sha256=uyuDfQQXudqLEQFev
28
28
  llama_cloud/resources/embedding_model_configs/types/__init__.py,sha256=6-rcDwJhw_0shz3CjrPvlYBYXJJ1bLn-PpplhOsQ79w,1156
29
29
  llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py,sha256=SQCHJk0AmBbKS5XKdcEJxhDhIMLQCmCI13IHC28v7vQ,3054
30
30
  llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
31
- llama_cloud/resources/evals/client.py,sha256=JyPHP9MsJ-15XHUVu-UjCcINo2IDPr2OageAqLBGlmw,27578
31
+ llama_cloud/resources/evals/client.py,sha256=v2AyeQV0hVgC6xoP2gJNgneJMaeXALV1hIeirYGxlPw,3242
32
32
  llama_cloud/resources/files/__init__.py,sha256=3B0SNM8EE6PddD5LpxYllci9vflEXy1xjPzhEEd-OUk,293
33
33
  llama_cloud/resources/files/client.py,sha256=7VmhrE5fbftB6p6QUQUkGM5FO48obF73keq86vGFyhE,49676
34
34
  llama_cloud/resources/files/types/__init__.py,sha256=EPYENAwkjBWv1MLf8s7R5-RO-cxZ_8NPrqfR4ZoR7jY,418
@@ -38,26 +38,26 @@ llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-
38
38
  llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
39
39
  llama_cloud/resources/jobs/client.py,sha256=mN9uOzys9aZkhOJkApUy0yhfNeK8X09xQxT34ZPptNY,5386
40
40
  llama_cloud/resources/llama_extract/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
41
- llama_cloud/resources/llama_extract/client.py,sha256=hYiITZt7CQsfPZ1A4_qq1U6ngIO5QbJTeUrrLo-KZkU,46554
41
+ llama_cloud/resources/llama_extract/client.py,sha256=xEezIrVQcLW7bTle3gNi2bXVDm3trjXsUJpJtChXHVo,53044
42
42
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
43
43
  llama_cloud/resources/organizations/client.py,sha256=OGSVpkfY5wu8-22IFWVmtbYSDiy0-KqA3Lc1E_jNHvg,55889
44
44
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
45
- llama_cloud/resources/parsing/client.py,sha256=7IZQc5hUiGhlGYwRdValcRdtyDLEmc8Dmr-0xdccoVg,74316
45
+ llama_cloud/resources/parsing/client.py,sha256=cdEEqjb5pRvb-Vq9VXjgh1107emTzYh5VP-Uu4aV3XI,74026
46
46
  llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
47
- llama_cloud/resources/pipelines/client.py,sha256=-Oveo6XSfCZva-ylJp7DikV26KxkJsDr6xNFZ8FIqkQ,139274
47
+ llama_cloud/resources/pipelines/client.py,sha256=My_TCezdFHfzPmzSzD25DIKNO88XUrQGeFmwOQ-Z0Gk,125055
48
48
  llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
49
49
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
50
50
  llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
51
51
  llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256=KbkyULMv-qeS3qRd31ia6pd5rOdypS0o2UL42NRcA7E,321
52
52
  llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
53
- llama_cloud/resources/projects/client.py,sha256=Tn-8WLGKHc3tzJikrcPOlefjASen7fTAzBDbzdvXtes,56315
53
+ llama_cloud/resources/projects/client.py,sha256=_9a54cNU8deQKrOpx4kj7Vgj2ByCyQQ7eEHhj-Zc1Ik,22498
54
54
  llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7KzvzMzbtPH94,217
55
55
  llama_cloud/resources/reports/client.py,sha256=kHjtXVVc1Xi3T1GyBvSW5K4mTdr6xQwZA3vw-liRKBg,46736
56
56
  llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
57
57
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
58
58
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
59
- llama_cloud/resources/retrievers/client.py,sha256=ASDdqnwXX4qj0sCAkWO7RKFnQ1oiLzBLIQ2bwqnMOKs,24905
60
- llama_cloud/types/__init__.py,sha256=Wu51DrEz4_aonTl2T_ftwoNyAyV_w8Jbi17wZ1rNnOQ,28238
59
+ llama_cloud/resources/retrievers/client.py,sha256=fmRVQjMaSaytaU1NMvE_vosyrbkdY93kGi2VKAGcb4U,30245
60
+ llama_cloud/types/__init__.py,sha256=AHJ1ew2Q4Y-b1dj2WHJDv9mSH7b--pfw2FrCgoIeC6I,27769
61
61
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
62
62
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
63
63
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -79,7 +79,7 @@ llama_cloud/types/character_splitter.py,sha256=Jm6ie7c9JmMqIqLfAN-96sYvNUaIyLzCP
79
79
  llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA,1795
80
80
  llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
81
81
  llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
82
- llama_cloud/types/chunk_mode.py,sha256=7FIsCfJqZyek1cwRykSgRY24gA0Qo9kMGdJDFjabb9c,621
82
+ llama_cloud/types/chunk_mode.py,sha256=J4vqAQfQG6PWsIv1Fe_99nVsAfDbv_P81_KVsJ9AkU4,790
83
83
  llama_cloud/types/cloud_az_storage_blob_data_source.py,sha256=NT4cYsD1M868_bSJxKM9cvTMtjQtQxKloE4vRv8_lwg,1534
84
84
  llama_cloud/types/cloud_azure_ai_search_vector_store.py,sha256=9GTaft7BaKsR9RJQp5dlpbslXUlTMA1AcDdKV1ApfqI,1513
85
85
  llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb9wdvKY2uwjpwY,1470
@@ -131,15 +131,7 @@ llama_cloud/types/embedding_model_config.py,sha256=6-o0vsAX89eHQdCAG5sI317Aivr4T
131
131
  llama_cloud/types/embedding_model_config_embedding_config.py,sha256=9rmfeiJYhBPmSJCXp-qxkOAd9WPwL5Hks7jIKd8XCPM,2901
132
132
  llama_cloud/types/embedding_model_config_update.py,sha256=BiA1KbFT-TSvy5OEyChd0dgDnQCKfBRxsDTvVKNj10Q,1175
133
133
  llama_cloud/types/embedding_model_config_update_embedding_config.py,sha256=mrXFxzb9GRaH4UUnOe_05-uYUuiTgDDCRadAMbPmGgc,2991
134
- llama_cloud/types/eval_dataset.py,sha256=FIP4uHqUXg0LxGPaq-LmW2aTcEdQk-i5AYLbGqsQSV0,1310
135
- llama_cloud/types/eval_dataset_job_params.py,sha256=vcXLJWO581uigNvGAurPDgMeEFtQURWucLF5pemdeS0,1343
136
- llama_cloud/types/eval_dataset_job_record.py,sha256=vBDz7xezpE8AB6Kw7sZLYxgMcv0dxUWVC01_fI2QuUU,2168
137
134
  llama_cloud/types/eval_execution_params.py,sha256=ntVaJh5SMZMPL4QLUiihVjUlg2SKbrezvbMKGlrF66Q,1369
138
- llama_cloud/types/eval_execution_params_override.py,sha256=ihEFbMRYmFJ5mWmFW24JjV6D0qqeDM4p829mSxMGtOQ,1195
139
- llama_cloud/types/eval_metric.py,sha256=vhO_teMLiyzBdzKpOBW8Bm9qCw2h6m3unp2XotB7pDQ,499
140
- llama_cloud/types/eval_question.py,sha256=UG042gXLw1uIW9hQOffCzIoGHVSve8Wk9ZeYGzwhHDU,1432
141
- llama_cloud/types/eval_question_create.py,sha256=oOwxkE5gPj8RAwgr3uuTHfTvLSXmYkkxNHqsT7oUHjI,1031
142
- llama_cloud/types/eval_question_result.py,sha256=Y4RFXnA4YJTlzM6_NtLOi0rt6hRZoQbToiVJqm41ArY,2168
143
135
  llama_cloud/types/extract_agent.py,sha256=T98IOueut4M52Qm7hqcUOcWFFDhZ-ye0OFdXgfFGtS4,1763
144
136
  llama_cloud/types/extract_agent_create.py,sha256=nDe2AELKdhF2VKe-IiajHavo8xatTZWbJb76D-HhJkM,1429
145
137
  llama_cloud/types/extract_agent_create_data_schema.py,sha256=zB31hJQ8hKaIsPkfTWiX5hqsPVFMyyeWEDZ_Aq237jo,305
@@ -148,12 +140,15 @@ llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKd
148
140
  llama_cloud/types/extract_agent_update.py,sha256=bcXovL4OblDFQXAfhstLMfSSY2sJHQFkfVjzZ_8jO8c,1349
149
141
  llama_cloud/types/extract_agent_update_data_schema.py,sha256=argR5gPRUYWY6ADCMKRdg-8NM-rsBM91_TEn8NKqVy8,305
150
142
  llama_cloud/types/extract_agent_update_data_schema_zero_value.py,sha256=Nvd892EFhg-PzlqoFp5i2owL7hCZ2SsuL7U4Tk9NeRI,217
151
- llama_cloud/types/extract_config.py,sha256=s0f8Yzfuzl0P_xV91SNj0Cbp77I_FMXCxL5lEJyXR6I,1505
143
+ llama_cloud/types/extract_config.py,sha256=oR_6uYl8-58q6a5BsgymJuqCKPn6JoY7SAUmjT9M3es,1369
152
144
  llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
153
145
  llama_cloud/types/extract_job_create.py,sha256=UK1mBIKyflo7e6m1MxMN95pLscj67jH_yvs8EvmBXqU,1545
146
+ llama_cloud/types/extract_job_create_batch.py,sha256=64BAproProYtPk7vAPGvFoxvlgg7ZLb1LSg3ChIf7AM,1589
147
+ llama_cloud/types/extract_job_create_batch_data_schema_override.py,sha256=GykJ1BBecRtWYD3ZPi1YINqrr-me_pyr2w_4Ei4QOZQ,351
148
+ llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py,sha256=7zXOgTYUwVAeyYeqWvX69m-7mhvK0V9cBRvgqVSd0X0,228
154
149
  llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
155
150
  llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
156
- llama_cloud/types/extract_mode.py,sha256=Xu8TvYHXYs-EcELV0hXbkcPuMyK1BLBQPKIBuHeUSnY,457
151
+ llama_cloud/types/extract_mode.py,sha256=mMkEugv91d-kcWLGUlr7Nm62p0eSlXeqfMAKw7u7wXI,644
157
152
  llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
158
153
  llama_cloud/types/extract_resultset_data.py,sha256=v9Ae4SxLsvYPE9crko4N16lBjsxuZpz1yrUOhnaM_VY,427
159
154
  llama_cloud/types/extract_resultset_data_item_value.py,sha256=JwqgDIGW0irr8QWaSTIrl24FhGxTUDOXIbxoSdIjuxs,209
@@ -194,14 +189,11 @@ llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyA
194
189
  llama_cloud/types/llama_extract_settings.py,sha256=Yh9Ah9W0X4l-znjYm4oNIh8-LCBc99JEQmGU87bUzWs,2225
195
190
  llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
196
191
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=tTglUqrSUaVc2Wsi4uIt5MU-80_oxZzTnhf8ziilVGY,874
197
- llama_cloud/types/llama_parse_parameters.py,sha256=mwd4VxshMuBJxUvldKHTWWs90W27p72VB_bPItj6fgY,5284
192
+ llama_cloud/types/llama_parse_parameters.py,sha256=TMKaebSDi_6B4qsalE2zyYCJirj_HW_x5MhSIimGPJ8,5234
198
193
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
199
194
  llama_cloud/types/llm.py,sha256=7iIItVPjURp4u5xxJDAFIefUdhUKwIuA245WXilJPXE,2234
200
195
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
201
196
  llama_cloud/types/llm_parameters.py,sha256=RTKYt09lm9a1MlnBfYuTP2x_Ww4byUNNc1TqIel5O1Y,1377
202
- llama_cloud/types/local_eval.py,sha256=aJ8jRG0b5EL9cLjx281bzAzPw7Ar004Jfp6mBmyjuTA,1491
203
- llama_cloud/types/local_eval_results.py,sha256=YfK6AhfD0gr5apQBfrfzrTHDXvrk7ynAUUjNSKu9NVk,1380
204
- llama_cloud/types/local_eval_sets.py,sha256=XJSSriwRvkma889pPiBQrpRakKejKOX3tWPu1TGb1ug,1181
205
197
  llama_cloud/types/managed_ingestion_status.py,sha256=3KVlcurpEBOPAesBUS5pSYLoQVIyZUlr90Mmv-uALHE,1290
206
198
  llama_cloud/types/managed_ingestion_status_response.py,sha256=rdNpjNbQswF-6JG1e-EU374TP6Pjlxl0p7HJyNmuxTI,1373
207
199
  llama_cloud/types/markdown_element_node_parser.py,sha256=NUqdU8BmyfSFK2rV6hCrvP6U1iB6aqZCVsvHWJQ49xU,1964
@@ -212,7 +204,6 @@ llama_cloud/types/metadata_filter.py,sha256=dVdXY6i0aCkvJrs7ncQt4-S8jmBF9bBSp2Vu
212
204
  llama_cloud/types/metadata_filter_value.py,sha256=ij721gXNI7zbgsuDl9-AqBcXg2WDuVZhYS5F5YqekEs,188
213
205
  llama_cloud/types/metadata_filters.py,sha256=uSf6sB4oQu6WzMPNFG6Tc4euqEiYcj_X14Y5JWt9xVE,1315
214
206
  llama_cloud/types/metadata_filters_filters_item.py,sha256=e8KhD2q6Qc2_aK6r5CvyxC0oWVYO4F4vBIcB9eMEPPM,246
215
- llama_cloud/types/metric_result.py,sha256=gCVyu9usPip30igCLKS0oTYU6V3CvY8QIk1gwaXB7ik,1051
216
207
  llama_cloud/types/node_parser.py,sha256=rqZTQ_9GnCHOvSpXuAZoezxQCOgxHo-hmQv0s7pnEFc,1380
217
208
  llama_cloud/types/node_relationship.py,sha256=2e2PqWm0LOTiImvtsyiuaAPNIl0BItjSrQZTJv65GRA,1209
218
209
  llama_cloud/types/none_chunking_config.py,sha256=D062t314Vp-s4n9h8wNgsYfElI4PonPKmihvjEmaqdA,952
@@ -227,6 +218,7 @@ llama_cloud/types/page_screenshot_metadata.py,sha256=lobrq0AsOr8sDwMgA9ytop8lRmR
227
218
  llama_cloud/types/page_screenshot_node_with_score.py,sha256=EdqoXbmARCz1DV14E2saCPshIeII709uM4cLwxw_mkM,1232
228
219
  llama_cloud/types/page_segmentation_config.py,sha256=VH8uuxnubnJak1gSpS64OoMueHidhsDB-2eq2tVHbag,998
229
220
  llama_cloud/types/page_splitter_node_parser.py,sha256=rQgS1CDk18UKA0r9OPvjdtM570jzFArdLCTxYAtZny8,1424
221
+ llama_cloud/types/paginated_extract_runs_response.py,sha256=NNeVcgBm0mYTAsumwQBO_YrxvkgUqwsvZo3xs8QjVCc,1423
230
222
  llama_cloud/types/paginated_jobs_history_with_metrics.py,sha256=Bxy6N0x0FARJhgwNKKPkNpXx8YLRHvth23G14f5Fuk4,1136
231
223
  llama_cloud/types/paginated_list_cloud_documents_response.py,sha256=MsjS0SWlT0syELDck4x2sxxR3_NC1e6QTdepgVmK9aY,1341
232
224
  llama_cloud/types/paginated_list_pipeline_files_response.py,sha256=2TKR2oHSQRyLMqWz1qQBSIvz-ZJb8U_94367lwOJ2S4,1317
@@ -273,15 +265,13 @@ llama_cloud/types/progress_event.py,sha256=Bk73A8geTVaq0ze5pMnbkAmx7FSOHQIixYCpC
273
265
  llama_cloud/types/progress_event_status.py,sha256=yb4RAXwOKU6Bi7iyYy-3lwhF6_mLz0ZFyGjxIdaByoE,893
274
266
  llama_cloud/types/project.py,sha256=4NNh_ZAjEkoWl5st6b1jsPVf_SYKtUTB6rS1701G4IQ,1441
275
267
  llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
276
- llama_cloud/types/prompt_conf.py,sha256=B3G9kdx1Md5fsx2ix4NYz5emvKi2GisYOOp9RozCPCU,1294
277
- llama_cloud/types/prompt_mixin_prompts.py,sha256=_ipiIFWmWSuaJ5VFI5rXa_C7lHaIL3Yv5izh7__xTxI,1323
278
- llama_cloud/types/prompt_spec.py,sha256=tPJTIzN9pYmiZD-HcPHFuhh4n1ak9FI5f7xFNV31djQ,1410
268
+ llama_cloud/types/prompt_conf.py,sha256=4vAKt0Gce9ALRb_-FE0QbRiFM1Rc9OQAADggwBwgauE,1402
279
269
  llama_cloud/types/pydantic_program_mode.py,sha256=QfvpqR7TqyNuOxo78Sr58VOu7KDSBrHJM4XXBB0F5z0,1202
280
270
  llama_cloud/types/recurring_credit_grant.py,sha256=19qI3p5k1mQ1Qoo-gCQU02Aa42XpEsmwxPF1F88F-Yg,1517
281
271
  llama_cloud/types/related_node_info.py,sha256=frQg_RqrSBc62ooJ4QOF5QRKymHcNot5WVFAB_g1sMg,1216
282
272
  llama_cloud/types/related_node_info_node_type.py,sha256=lH95d8G-EnKCllV_igJsBfYt49y162PoNxWtrCo_Kgk,173
283
273
  llama_cloud/types/report.py,sha256=9M_WkIxi5ilmtXrLKo5XxWzJ_qV8FFf5j8bAlQRmaks,1155
284
- llama_cloud/types/report_block.py,sha256=h11qkKbd5fdNWILjLTiz4alQCSqITTq9DlGya8OuTVU,1260
274
+ llama_cloud/types/report_block.py,sha256=y5n5z0JxZNH9kzN0rTqIdZPRLA9XHdYvQlHTcPSraKk,1381
285
275
  llama_cloud/types/report_block_dependency.py,sha256=TGtLpcJG2xwTKr3GU8Err53T0BR_zNTiT-2JILvPbSg,785
286
276
  llama_cloud/types/report_create_response.py,sha256=tmnVkyAMVf0HNQy186DFVV1oZQzYGY9wxNk84cwQLKA,1020
287
277
  llama_cloud/types/report_event_item.py,sha256=_-0wgI96Ama2qKqUODTmI_fEcrnW5eAAjL1AoFEr4cQ,1451
@@ -306,8 +296,8 @@ llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Zt
306
296
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
307
297
  llama_cloud/types/sentence_splitter.py,sha256=GbC3KE20Nd85uzO4bqJttjqJhQ_1co2gKnSQxzfOAiM,2140
308
298
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
309
- llama_cloud/types/struct_mode.py,sha256=AjYmpXTEYlMNNac6cNjEGYQBJwKJERw2ERdjGKgrX3o,845
310
- llama_cloud/types/struct_parse_conf.py,sha256=bD0gZzN6tR8VO9s81KPwTffLQDnLLAAcNrnknii_178,1825
299
+ llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
300
+ llama_cloud/types/struct_parse_conf.py,sha256=Od5f8azJlJTJJ6rwtZEIaEsSSYBdrNsHtLeMtdpMtxM,2101
311
301
  llama_cloud/types/supported_llm_model.py,sha256=0v-g01LyZB7TeN0zwAeSJejRoT95SVaXOJhNz7boJwM,1461
312
302
  llama_cloud/types/supported_llm_model_names.py,sha256=dEhmwGQVG-dmuGGbTWBAYadr-g5u3kiVz308CLWuSqw,2657
313
303
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
@@ -317,7 +307,7 @@ llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586
317
307
  llama_cloud/types/token_chunking_config.py,sha256=XNvnTsNd--YOMQ_Ad8hoqhYgQftqkBHKVn6i7nJnMqs,1067
318
308
  llama_cloud/types/token_text_splitter.py,sha256=iTT3x9yO021v757B2r-0Z-WFQiIESLqEJUCmUUwPQ_o,1899
319
309
  llama_cloud/types/transformation_category_names.py,sha256=Wb7NBB0f-tEtfEZQis-iKy71SUKmmHFcXf6XLn6g0XU,545
320
- llama_cloud/types/usage.py,sha256=-Yzh_NwIXlumIdOQH7YppEfABs3xHEcYrkPGk3mduLg,1450
310
+ llama_cloud/types/usage.py,sha256=LT4Jr4kiQiug2A_cNmGSxWV7UZmseYzxACXo7dTQIBQ,1466
321
311
  llama_cloud/types/usage_active_alerts_item.py,sha256=YZkSH_Vd3hu5f-Nv0LKKj9slVTa3GsOcbSPhttKcVqQ,964
322
312
  llama_cloud/types/usage_and_plan.py,sha256=DsQVkOkh6yiDY9FsGR34DcTocj53loO2lU55P45XnWY,1040
323
313
  llama_cloud/types/usage_metric_response.py,sha256=ukvtNZLeLacv-5F0-GQ5wTBZOPUPEjAeurgYPc4s7nA,1047
@@ -331,7 +321,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
331
321
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
332
322
  llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
333
323
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
334
- llama_cloud-0.1.14.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
335
- llama_cloud-0.1.14.dist-info/METADATA,sha256=7-aIbaODwyeHrJ0WXBxE1QpyHx700jDivfPH3rWDGDk,902
336
- llama_cloud-0.1.14.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
337
- llama_cloud-0.1.14.dist-info/RECORD,,
324
+ llama_cloud-0.1.16.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
325
+ llama_cloud-0.1.16.dist-info/METADATA,sha256=nCSIO_-vJxp4O2kbNl74lwlihxhu62Bg3eI7yjC8tu4,902
326
+ llama_cloud-0.1.16.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
327
+ llama_cloud-0.1.16.dist-info/RECORD,,
@@ -1,40 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class EvalDataset(pydantic.BaseModel):
18
- """
19
- Schema for an eval dataset.
20
- Includes the other DB fields like id, created_at, & updated_at.
21
- """
22
-
23
- id: str = pydantic.Field(description="Unique identifier")
24
- created_at: typing.Optional[dt.datetime]
25
- updated_at: typing.Optional[dt.datetime]
26
- name: str = pydantic.Field(description="The name of the EvalDataset.")
27
- project_id: str
28
-
29
- def json(self, **kwargs: typing.Any) -> str:
30
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
- return super().json(**kwargs_with_defaults)
32
-
33
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
- return super().dict(**kwargs_with_defaults)
36
-
37
- class Config:
38
- frozen = True
39
- smart_union = True
40
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,39 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .eval_execution_params import EvalExecutionParams
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class EvalDatasetJobParams(pydantic.BaseModel):
19
- """
20
- Schema for the parameters of an eval dataset job.
21
- """
22
-
23
- eval_question_ids: typing.List[str] = pydantic.Field(
24
- description="The IDs for the EvalQuestions this execution ran against."
25
- )
26
- eval_execution_params: EvalExecutionParams = pydantic.Field(description="The parameters for the eval execution.")
27
-
28
- def json(self, **kwargs: typing.Any) -> str:
29
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
- return super().json(**kwargs_with_defaults)
31
-
32
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
- return super().dict(**kwargs_with_defaults)
35
-
36
- class Config:
37
- frozen = True
38
- smart_union = True
39
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,58 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- import typing_extensions
7
-
8
- from ..core.datetime_utils import serialize_datetime
9
- from .eval_dataset_job_params import EvalDatasetJobParams
10
- from .status_enum import StatusEnum
11
-
12
- try:
13
- import pydantic
14
- if pydantic.__version__.startswith("1."):
15
- raise ImportError
16
- import pydantic.v1 as pydantic # type: ignore
17
- except ImportError:
18
- import pydantic # type: ignore
19
-
20
-
21
- class EvalDatasetJobRecord(pydantic.BaseModel):
22
- """
23
- Schema for job that evaluates an EvalDataset against a pipeline.
24
- """
25
-
26
- job_name: typing_extensions.Literal["eval_dataset_job"]
27
- partitions: typing.Dict[str, str] = pydantic.Field(
28
- description="The partitions for this execution. Used for determining where to save job output."
29
- )
30
- parameters: typing.Optional[EvalDatasetJobParams]
31
- session_id: typing.Optional[str]
32
- correlation_id: typing.Optional[str]
33
- parent_job_execution_id: typing.Optional[str]
34
- user_id: typing.Optional[str]
35
- created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
36
- project_id: typing.Optional[str]
37
- id: typing.Optional[str] = pydantic.Field(description="Unique identifier")
38
- status: StatusEnum
39
- error_code: typing.Optional[str]
40
- error_message: typing.Optional[str]
41
- attempts: typing.Optional[int]
42
- started_at: typing.Optional[dt.datetime]
43
- ended_at: typing.Optional[dt.datetime]
44
- updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
45
- data: typing.Optional[typing.Any]
46
-
47
- def json(self, **kwargs: typing.Any) -> str:
48
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
49
- return super().json(**kwargs_with_defaults)
50
-
51
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
52
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
53
- return super().dict(**kwargs_with_defaults)
54
-
55
- class Config:
56
- frozen = True
57
- smart_union = True
58
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,37 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .supported_llm_model_names import SupportedLlmModelNames
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class EvalExecutionParamsOverride(pydantic.BaseModel):
19
- """
20
- Schema for the params override for an eval execution.
21
- """
22
-
23
- llm_model: typing.Optional[SupportedLlmModelNames]
24
- qa_prompt_tmpl: typing.Optional[str]
25
-
26
- def json(self, **kwargs: typing.Any) -> str:
27
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
- return super().json(**kwargs_with_defaults)
29
-
30
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
31
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
- return super().dict(**kwargs_with_defaults)
33
-
34
- class Config:
35
- frozen = True
36
- smart_union = True
37
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,17 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class EvalMetric(str, enum.Enum):
10
- RELEVANCY = "RELEVANCY"
11
- FAITHFULNESS = "FAITHFULNESS"
12
-
13
- def visit(self, relevancy: typing.Callable[[], T_Result], faithfulness: typing.Callable[[], T_Result]) -> T_Result:
14
- if self is EvalMetric.RELEVANCY:
15
- return relevancy()
16
- if self is EvalMetric.FAITHFULNESS:
17
- return faithfulness()
@@ -1,38 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class EvalQuestion(pydantic.BaseModel):
18
- id: str = pydantic.Field(description="Unique identifier")
19
- created_at: typing.Optional[dt.datetime]
20
- updated_at: typing.Optional[dt.datetime]
21
- content: str = pydantic.Field(description="The content of the question.")
22
- eval_dataset_id: str
23
- eval_dataset_index: int = pydantic.Field(
24
- description="The index at which this question is positioned relative to the other questions in the linked EvalDataset. Client is responsible for setting this correctly."
25
- )
26
-
27
- def json(self, **kwargs: typing.Any) -> str:
28
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
- return super().json(**kwargs_with_defaults)
30
-
31
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
- return super().dict(**kwargs_with_defaults)
34
-
35
- class Config:
36
- frozen = True
37
- smart_union = True
38
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,31 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class EvalQuestionCreate(pydantic.BaseModel):
18
- content: str = pydantic.Field(description="The content of the question.")
19
-
20
- def json(self, **kwargs: typing.Any) -> str:
21
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
- return super().json(**kwargs_with_defaults)
23
-
24
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
- return super().dict(**kwargs_with_defaults)
27
-
28
- class Config:
29
- frozen = True
30
- smart_union = True
31
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,52 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .eval_execution_params import EvalExecutionParams
8
- from .metric_result import MetricResult
9
- from .text_node import TextNode
10
-
11
- try:
12
- import pydantic
13
- if pydantic.__version__.startswith("1."):
14
- raise ImportError
15
- import pydantic.v1 as pydantic # type: ignore
16
- except ImportError:
17
- import pydantic # type: ignore
18
-
19
-
20
- class EvalQuestionResult(pydantic.BaseModel):
21
- """
22
- Schema for the result of an eval question job.
23
- """
24
-
25
- eval_question_id: str = pydantic.Field(description="The ID of the question that was executed.")
26
- pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the question was executed against.")
27
- source_nodes: typing.List[TextNode] = pydantic.Field(
28
- description="The nodes retrieved by the pipeline for the given question."
29
- )
30
- answer: str = pydantic.Field(description="The answer to the question.")
31
- eval_metrics: typing.Dict[str, MetricResult] = pydantic.Field(description="The eval metrics for the question.")
32
- eval_dataset_execution_id: str = pydantic.Field(
33
- description="The ID of the EvalDatasetJobRecord that this result was generated from."
34
- )
35
- eval_dataset_execution_params: EvalExecutionParams = pydantic.Field(
36
- description="The EvalExecutionParams that were used when this result was generated."
37
- )
38
- eval_finished_at: dt.datetime = pydantic.Field(description="The timestamp when the eval finished.")
39
- class_name: typing.Optional[str]
40
-
41
- def json(self, **kwargs: typing.Any) -> str:
42
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
- return super().json(**kwargs_with_defaults)
44
-
45
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
46
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
47
- return super().dict(**kwargs_with_defaults)
48
-
49
- class Config:
50
- frozen = True
51
- smart_union = True
52
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class LocalEvalSets(pydantic.BaseModel):
18
- eval_set_id: str = pydantic.Field(description="The ID of the eval set.")
19
- app_name: str = pydantic.Field(description="The name of the app.")
20
- upload_time: dt.datetime = pydantic.Field(description="The time of the upload.")
21
-
22
- def json(self, **kwargs: typing.Any) -> str:
23
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
- return super().json(**kwargs_with_defaults)
25
-
26
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
- return super().dict(**kwargs_with_defaults)
29
-
30
- class Config:
31
- frozen = True
32
- smart_union = True
33
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class MetricResult(pydantic.BaseModel):
18
- passing: typing.Optional[bool]
19
- score: typing.Optional[float]
20
- feedback: typing.Optional[str]
21
-
22
- def json(self, **kwargs: typing.Any) -> str:
23
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
- return super().json(**kwargs_with_defaults)
25
-
26
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
- return super().dict(**kwargs_with_defaults)
29
-
30
- class Config:
31
- frozen = True
32
- smart_union = True
33
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,39 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .prompt_spec import PromptSpec
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class PromptMixinPrompts(pydantic.BaseModel):
19
- """
20
- Schema for the prompts derived from the PromptMixin.
21
- """
22
-
23
- project_id: str = pydantic.Field(description="The ID of the project.")
24
- id: typing.Optional[str]
25
- name: str = pydantic.Field(description="The name of the prompt set.")
26
- prompts: typing.List[PromptSpec] = pydantic.Field(description="The prompts.")
27
-
28
- def json(self, **kwargs: typing.Any) -> str:
29
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
- return super().json(**kwargs_with_defaults)
31
-
32
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
33
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
34
- return super().dict(**kwargs_with_defaults)
35
-
36
- class Config:
37
- frozen = True
38
- smart_union = True
39
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,36 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .app_schema_chat_chat_message import AppSchemaChatChatMessage
8
-
9
- try:
10
- import pydantic
11
- if pydantic.__version__.startswith("1."):
12
- raise ImportError
13
- import pydantic.v1 as pydantic # type: ignore
14
- except ImportError:
15
- import pydantic # type: ignore
16
-
17
-
18
- class PromptSpec(pydantic.BaseModel):
19
- prompt_key: str = pydantic.Field(description="The key of the prompt in the PromptMixin.")
20
- prompt_class: str = pydantic.Field(description="The class of the prompt (PromptTemplate or ChatPromptTemplate).")
21
- prompt_type: str = pydantic.Field(description="The type of prompt.")
22
- template: typing.Optional[str]
23
- message_templates: typing.Optional[typing.List[AppSchemaChatChatMessage]]
24
-
25
- def json(self, **kwargs: typing.Any) -> str:
26
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
- return super().json(**kwargs_with_defaults)
28
-
29
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
- return super().dict(**kwargs_with_defaults)
32
-
33
- class Config:
34
- frozen = True
35
- smart_union = True
36
- json_encoders = {dt.datetime: serialize_datetime}