llama-cloud 0.1.30__py3-none-any.whl → 0.1.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (39) hide show
  1. llama_cloud/__init__.py +26 -14
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +0 -2
  4. llama_cloud/resources/beta/client.py +602 -0
  5. llama_cloud/resources/organizations/client.py +2 -2
  6. llama_cloud/resources/parsing/client.py +8 -0
  7. llama_cloud/resources/pipelines/client.py +64 -0
  8. llama_cloud/types/__init__.py +26 -12
  9. llama_cloud/types/{model_configuration.py → agent_data.py} +8 -7
  10. llama_cloud/types/agent_deployment_summary.py +1 -1
  11. llama_cloud/types/{message.py → aggregate_group.py} +8 -9
  12. llama_cloud/types/base_plan.py +3 -0
  13. llama_cloud/types/extract_mode.py +0 -4
  14. llama_cloud/types/filter_operation.py +46 -0
  15. llama_cloud/types/filter_operation_eq.py +6 -0
  16. llama_cloud/types/filter_operation_gt.py +6 -0
  17. llama_cloud/types/filter_operation_gte.py +6 -0
  18. llama_cloud/types/filter_operation_includes_item.py +6 -0
  19. llama_cloud/types/filter_operation_lt.py +6 -0
  20. llama_cloud/types/filter_operation_lte.py +6 -0
  21. llama_cloud/types/input_message.py +2 -2
  22. llama_cloud/types/legacy_parse_job_config.py +3 -0
  23. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +2 -2
  24. llama_cloud/types/llama_parse_parameters.py +1 -0
  25. llama_cloud/types/{llama_index_core_base_llms_types_message_role.py → message_role.py} +9 -9
  26. llama_cloud/types/{text_content_block.py → paginated_response_agent_data.py} +5 -5
  27. llama_cloud/types/paginated_response_aggregate_group.py +34 -0
  28. llama_cloud/types/parse_job_config.py +1 -0
  29. llama_cloud/types/playground_session.py +2 -2
  30. llama_cloud/types/role.py +0 -1
  31. llama_cloud/types/{app_schema_chat_chat_message.py → src_app_schema_chat_chat_message.py} +3 -3
  32. llama_cloud/types/user_organization_role.py +0 -1
  33. {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.32.dist-info}/METADATA +1 -1
  34. {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.32.dist-info}/RECORD +36 -31
  35. llama_cloud/resources/responses/__init__.py +0 -2
  36. llama_cloud/resources/responses/client.py +0 -137
  37. llama_cloud/types/app_schema_responses_message_role.py +0 -33
  38. {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.32.dist-info}/LICENSE +0 -0
  39. {llama_cloud-0.1.30.dist-info → llama_cloud-0.1.32.dist-info}/WHEEL +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-cloud
3
- Version: 0.1.30
3
+ Version: 0.1.32
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -1,5 +1,5 @@
1
- llama_cloud/__init__.py,sha256=J8cweD7dD1ETW9wbr9TdNztTs_GP5amLcQ64J5AfFes,25215
2
- llama_cloud/client.py,sha256=ylV-19129KufjzRDCoH4yARObhdUxc9vLL4kV-7fIck,6132
1
+ llama_cloud/__init__.py,sha256=T-HghZZ4yA4QPgXeEvHQsmp5o8o1K2amrf7SftKYwE4,25511
2
+ llama_cloud/client.py,sha256=VNO5-JE1H0zWJudlDA9GJ2N6qEKQvxN5Q5QgVNTQPSI,5893
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
5
5
  llama_cloud/core/client_wrapper.py,sha256=xmj0jCdQ0ySzbSqHUWOkpRRy069y74I_HuXkWltcsVM,1507
@@ -9,11 +9,11 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=n3hSlo3KQatoFhDLk7Vm_hB_5lzh70T0S2r3cSpDWec,4211
12
+ llama_cloud/resources/__init__.py,sha256=cFMt4FZb8n6SMbRXYzYqIR-PlJbO7C-jX4iBeCym_8E,4179
13
13
  llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/admin/client.py,sha256=mzA_ezCjugKNmvWCMWEF0Z0k86ErACWov1VtPV1J2tU,3678
15
15
  llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
- llama_cloud/resources/beta/client.py,sha256=mfqHAPWQEZwZM0LRYkia36EFdGrU2sZ_Y-MM1JU_0Yg,14966
16
+ llama_cloud/resources/beta/client.py,sha256=uJO08z4WF3I_tVyZEu0SiwfeSx3iQaTUPZkoh6Pevs8,39144
17
17
  llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
18
18
  llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
19
19
  llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
@@ -53,11 +53,11 @@ llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_o
53
53
  llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py,sha256=uMqpKJdCmUNtryS2bkQTNA1AgDlWdtsBOP31iMt3zNA,346
54
54
  llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py,sha256=cUS7ez5r0Vx8T7SxwLYptZMmvpT5JoDVMyn54Q6VL-g,227
55
55
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
56
- llama_cloud/resources/organizations/client.py,sha256=CdrdNdB9R-bOsNqZ4Jbm1BzG1RafXMFjuCsrVYf2OrE,56567
56
+ llama_cloud/resources/organizations/client.py,sha256=yJ2TYvr7tPRS_Zhdb_IbknKo8aIIRSWm-63d0nh535s,56597
57
57
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
58
- llama_cloud/resources/parsing/client.py,sha256=tUA6jXDoUbbu4qM-VvoUDU6BFSOZTUSfD6lz7wfCqnA,87707
58
+ llama_cloud/resources/parsing/client.py,sha256=EHrQKjOl_VPPbcbaXi5TSah8HBf7ooHijhMF7IEzBMg,88117
59
59
  llama_cloud/resources/pipelines/__init__.py,sha256=zyvVEOF_krvEZkCIj_kZoMKfhDqHo_R32a1mv9CriQc,1193
60
- llama_cloud/resources/pipelines/client.py,sha256=BcBqzTPu1LUsdimXvuaaKjUu6w5xjbL-ZBfWsO183Vk,132360
60
+ llama_cloud/resources/pipelines/client.py,sha256=VAqAm0oY_nXGkMPqXuzPEHS9kPtpuOE5sxfyqlzXuSI,134738
61
61
  llama_cloud/resources/pipelines/types/__init__.py,sha256=C68NQ5QzA0dFXf9oePFFGmV1vn96jcAp-QAznSgoRYQ,1375
62
62
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
63
63
  llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
@@ -69,23 +69,21 @@ llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7
69
69
  llama_cloud/resources/reports/client.py,sha256=kHjtXVVc1Xi3T1GyBvSW5K4mTdr6xQwZA3vw-liRKBg,46736
70
70
  llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
71
71
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
72
- llama_cloud/resources/responses/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
73
- llama_cloud/resources/responses/client.py,sha256=ard4U9yZcD89pJ_hyYqeRDIfQYaX2WGl36OK7re8q3U,5481
74
72
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
75
73
  llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
76
- llama_cloud/types/__init__.py,sha256=AN53ky-a7dob2L_8xPRUSyS75IGy5MZqT8-7k85ASIs,29974
74
+ llama_cloud/types/__init__.py,sha256=ZYnUvMdFPye-wlq-XeyWUmhtVeLpi8c0UR0vSemiHP4,30490
77
75
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
78
76
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
79
77
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
78
+ llama_cloud/types/agent_data.py,sha256=Onaoc1QeIn3Il-8r1vgEzqvef92gHclCO7AC4kucEMI,1220
80
79
  llama_cloud/types/agent_deployment_list.py,sha256=7PWm2GHumo8CfqKU8fDRTJVDV4QQh8My1dhvBPO2zaA,1120
81
- llama_cloud/types/agent_deployment_summary.py,sha256=H1BlLyP5mouiDdELpPhiYMSrMnTvxsWx5OEbHj0LDp0,1603
82
- llama_cloud/types/app_schema_chat_chat_message.py,sha256=e81y7h9e-1cBSlGWuktkbq6G6ql_96qoTafXycZk8dw,1680
83
- llama_cloud/types/app_schema_responses_message_role.py,sha256=Wod45VMOCo-z6DWNqCOOJAaiFuZHjHCPtgqKPlVt2fI,972
80
+ llama_cloud/types/agent_deployment_summary.py,sha256=YEZxnNvTGYHz3zV6eGldVKfcy5S_IM-KlcOzDUqTfiU,1605
81
+ llama_cloud/types/aggregate_group.py,sha256=LybxFl_1snA9VgG6f7sogwO7kYAwH_I88pkYc0oMOH0,1164
84
82
  llama_cloud/types/audio_block.py,sha256=9JIGjZ8GU3C7ICv6XdNVN6_gWXyF18TJPaDuM9OUoMU,1071
85
83
  llama_cloud/types/auto_transform_config.py,sha256=HVeHZM75DMRznScqLTfrMwcZwIdyWPuaEYbPewnHqwc,1168
86
84
  llama_cloud/types/azure_open_ai_embedding.py,sha256=MeDqZoPYFN7Nv_imY9cfqDU9SPlEyAY4HcQZ4PF5X3g,2264
87
85
  llama_cloud/types/azure_open_ai_embedding_config.py,sha256=o1zZhzcGElH3SeixFErrm7P_WFHQ6LvrLem_nKJWunw,1170
88
- llama_cloud/types/base_plan.py,sha256=5DZi20EOciTc5okLAxQDqyGylsW-DflTy14dcvQb2fQ,1910
86
+ llama_cloud/types/base_plan.py,sha256=kuRJi-OxFHbKAxoQWe08IG45_i8xL67WeOZFCGWkOHI,2049
89
87
  llama_cloud/types/base_plan_metronome_plan_type.py,sha256=I3g_dVoWWztbmpWpYmseDqQSbwtlLUl2vS01tfgMjEA,499
90
88
  llama_cloud/types/base_plan_name.py,sha256=keHQaw9YV9ghsWnGfnHrLtB4qNz0v4TWX4_MoO3flRM,1926
91
89
  llama_cloud/types/base_plan_plan_frequency.py,sha256=idUZlDaSdMrMZ2lQ1ytBWM4QyduIZu6Gt2eLU0LVqH4,684
@@ -159,7 +157,7 @@ llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1
159
157
  llama_cloud/types/extract_job_create.py,sha256=yLtrh46fsK8Q2_hz8Ub3mvGriSn5BI2OjjwpWRy5YsA,1680
160
158
  llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
161
159
  llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
162
- llama_cloud/types/extract_mode.py,sha256=DwTMzDq3HHJop_fxQelHEE_k8UcdDz-W_v_Oj2WWXLk,931
160
+ llama_cloud/types/extract_mode.py,sha256=S7H-XcH1wvPbOPVdwG9kVnZaH1pMY-LNzAD6TjCm0mc,785
163
161
  llama_cloud/types/extract_models.py,sha256=tx4NquIoJ4irXncqRUjnuE542nPu5jMuzy-ZaMdg3PI,1958
164
162
  llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
165
163
  llama_cloud/types/extract_resultset_data.py,sha256=v9Ae4SxLsvYPE9crko4N16lBjsxuZpz1yrUOhnaM_VY,427
@@ -186,6 +184,13 @@ llama_cloud/types/file_parse_public.py,sha256=sshZ0BcjHMGpuz4ylSurv0K_3ejfPrUGGy
186
184
  llama_cloud/types/file_permission_info_value.py,sha256=RyQlNbhvIKS87Ywu7XUaw5jDToZX64M9Wqzu1U_q2Us,197
187
185
  llama_cloud/types/file_resource_info_value.py,sha256=g6T6ELeLK9jgcvX6r-EuAl_4JkwnyqdS0RRoabMReSU,195
188
186
  llama_cloud/types/filter_condition.py,sha256=YEc-NaZbMha4oZVSKerZ6-gNYriNOZmTHTRMKX-9Ju0,678
187
+ llama_cloud/types/filter_operation.py,sha256=lzyF_LQ-bT_wubU2bSbV6q2oncCE3mypz3D6qkAR86U,1663
188
+ llama_cloud/types/filter_operation_eq.py,sha256=7UQkjycQvUFBvd1KRWfNacXAEgp2eGG6XNej0EikP1M,165
189
+ llama_cloud/types/filter_operation_gt.py,sha256=ueeaTBhCGM0xUWLjdFei55ecbtbR3jFuiAtXrinFNDk,165
190
+ llama_cloud/types/filter_operation_gte.py,sha256=A_8I_-EpBNqcX_KbwMdhXI0Kno3WCwZnPofSRJxECpU,166
191
+ llama_cloud/types/filter_operation_includes_item.py,sha256=kwI0NjIZVUfaNU3BBue-AAEkPl_42_GjE_CR0OwZV5Y,175
192
+ llama_cloud/types/filter_operation_lt.py,sha256=Njv9OnuI3tzo88EAMhsVN8BvuzR1164GQP4SggbZe1U,165
193
+ llama_cloud/types/filter_operation_lte.py,sha256=5Evci2M4XfkkWMlY746t52OiTYiO9SaIJ72QDPu2G7U,166
189
194
  llama_cloud/types/filter_operator.py,sha256=tY_DWFVOoLrqDc-soJcSFvUL-MsltK6iLSK7IKK-TPs,2439
190
195
  llama_cloud/types/free_credits_usage.py,sha256=TPktesYpM5gVeBXPbRFun19XaPJo-dIu0Xbrg-iX8qE,1052
191
196
  llama_cloud/types/gemini_embedding.py,sha256=n9vuxFbXt_VNuaZvp7BlkFWmGMgehpJz_ICacIafdYw,1418
@@ -196,20 +201,19 @@ llama_cloud/types/hugging_face_inference_api_embedding_config.py,sha256=EFHhuPCx
196
201
  llama_cloud/types/hugging_face_inference_api_embedding_token.py,sha256=A7-_YryBcsP4G5uRyJ9acao3XwX5-YC3NRndTeDAPj4,144
197
202
  llama_cloud/types/image_block.py,sha256=Bccrsm1-B2hUzObP7Oy1H7IVnurixfTpL03i-yqfZp0,1112
198
203
  llama_cloud/types/ingestion_error_response.py,sha256=8u0cyT44dnpkNeUKemTvJMUqi_WyPcYQKP_DMTqaFPY,1259
199
- llama_cloud/types/input_message.py,sha256=H7XMpGjkk7f9Fgz4YuuD9OBpNDR68lnP91LxCP1R-Vw,1433
204
+ llama_cloud/types/input_message.py,sha256=Ym6-tX6CMWKuHfxRtyM2y16kqSS3BzHged9rFRFkX0g,1346
200
205
  llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuABj3vMY7ys,1617
201
206
  llama_cloud/types/job_names.py,sha256=WacongwoJygg_gCyYjPsOVv3cmVtRaX633JNgFxy-d8,3915
202
207
  llama_cloud/types/job_record.py,sha256=Z6sF9AruZJo-kTRgNufAWS3WK1yaEqop6kox1GpBYy4,2219
203
208
  llama_cloud/types/job_record_parameters.py,sha256=Oqxp5y0owPfjLc_NR7AYE8P3zM2PJo36N9olbyNl7AA,3425
204
209
  llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
205
210
  llama_cloud/types/l_lama_parse_transform_config.py,sha256=YQRJZvKh1Ee2FUyW_N0nqYJoW599qBgH3JCH9SH6YLo,1249
206
- llama_cloud/types/legacy_parse_job_config.py,sha256=5l1ZT0n2UTX5t45ePjZZ07RkQNUg5E6n0Xb1gz_CzxE,12522
211
+ llama_cloud/types/legacy_parse_job_config.py,sha256=zZJFYnquo51NbEXhw-yhpRjIuaJNgg-T_fAI2J7-hrM,12660
207
212
  llama_cloud/types/license_info_response.py,sha256=fE9vcWO8k92SBqb_wOyBu_16C61s72utA-SifEi9iBc,1192
208
213
  llama_cloud/types/llama_extract_settings.py,sha256=Y60XxsxVHUtX-ZjC0tyNzsaDIj_ojxYC1iy2w4vti54,2532
209
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=tF54vcCwjArHWozzC81bCZfI4gJBmhnx6s592VoQ5UM,1452
214
+ llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
210
215
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=-aL8fh-w2Xf4uQs_LHzb3q6LL_onLAcVzCR5yMI4qJw,1571
211
- llama_cloud/types/llama_index_core_base_llms_types_message_role.py,sha256=i8G2QGRrEUmb1P9BrKW3frfTOQ9RlJvMU0FMCRNpE5c,1602
212
- llama_cloud/types/llama_parse_parameters.py,sha256=SiSqreBFW5hGf7gVuXdITwW1ugxv03L5VpNQqoeI6Pk,6260
216
+ llama_cloud/types/llama_parse_parameters.py,sha256=pgWWbeaoC8p01_c0bC--ksHjJt-_A7QUhQdCW4MAVIQ,6325
213
217
  llama_cloud/types/llama_parse_parameters_priority.py,sha256=EFRudtaID_s8rLKlfW8O8O9TDbpZdniIidK-xchhfRI,830
214
218
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
215
219
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
@@ -217,13 +221,12 @@ llama_cloud/types/llm_parameters.py,sha256=RTKYt09lm9a1MlnBfYuTP2x_Ww4byUNNc1TqI
217
221
  llama_cloud/types/load_files_job_config.py,sha256=R5sFgFmV__0mqLUuD7dkFoBJHG2ZLw5px9zRapvYcpE,1069
218
222
  llama_cloud/types/managed_ingestion_status.py,sha256=3KVlcurpEBOPAesBUS5pSYLoQVIyZUlr90Mmv-uALHE,1290
219
223
  llama_cloud/types/managed_ingestion_status_response.py,sha256=rdNpjNbQswF-6JG1e-EU374TP6Pjlxl0p7HJyNmuxTI,1373
220
- llama_cloud/types/message.py,sha256=RnahpUQR7s_QOu7pOdS7GNkZe8rJaPLC-wTJbzhSQV8,1373
221
224
  llama_cloud/types/message_annotation.py,sha256=n4F9w4LxwmGvgXDk6E8YPTMu_g0yEjZhZ_eNFXdS_bc,1017
225
+ llama_cloud/types/message_role.py,sha256=9MpXT9drR33TyT1-NiqB3uGbuxvWwtoOdSmKQE9HmJI,1359
222
226
  llama_cloud/types/metadata_filter.py,sha256=LX2fGsUb4wvF5bj9iWO6IPQGi3i0L2Lb4cE6igeeX9Y,1438
223
227
  llama_cloud/types/metadata_filter_value.py,sha256=ij721gXNI7zbgsuDl9-AqBcXg2WDuVZhYS5F5YqekEs,188
224
228
  llama_cloud/types/metadata_filters.py,sha256=uSf6sB4oQu6WzMPNFG6Tc4euqEiYcj_X14Y5JWt9xVE,1315
225
229
  llama_cloud/types/metadata_filters_filters_item.py,sha256=e8KhD2q6Qc2_aK6r5CvyxC0oWVYO4F4vBIcB9eMEPPM,246
226
- llama_cloud/types/model_configuration.py,sha256=JD_KSml2EB1EpwuuJUdwZeN4_aO7mTd76tQ16zK2vuU,1370
227
230
  llama_cloud/types/node_relationship.py,sha256=2e2PqWm0LOTiImvtsyiuaAPNIl0BItjSrQZTJv65GRA,1209
228
231
  llama_cloud/types/none_chunking_config.py,sha256=D062t314Vp-s4n9h8wNgsYfElI4PonPKmihvjEmaqdA,952
229
232
  llama_cloud/types/none_segmentation_config.py,sha256=j3jUA6E8uFtwDMEu4TFG3Q4ZGCGiuUfUW9AMO1NNqXU,956
@@ -242,7 +245,9 @@ llama_cloud/types/paginated_jobs_history_with_metrics.py,sha256=Bxy6N0x0FARJhgwN
242
245
  llama_cloud/types/paginated_list_cloud_documents_response.py,sha256=MsjS0SWlT0syELDck4x2sxxR3_NC1e6QTdepgVmK9aY,1341
243
246
  llama_cloud/types/paginated_list_pipeline_files_response.py,sha256=2TKR2oHSQRyLMqWz1qQBSIvz-ZJb8U_94367lwOJ2S4,1317
244
247
  llama_cloud/types/paginated_report_response.py,sha256=o79QhQi9r0HZZrhvRlA6WGjxtyPuxN0xONhwXSwxtcs,1104
245
- llama_cloud/types/parse_job_config.py,sha256=MuP202tVYpLxtHvobcCzMog348ACahqGdD4z1PHjd6o,6723
248
+ llama_cloud/types/paginated_response_agent_data.py,sha256=u6Y-Cq9qjGF5tskMOQChUNqyI91Tk-uQ6vQdi69cs80,1159
249
+ llama_cloud/types/paginated_response_aggregate_group.py,sha256=1ajZLZJLU6-GuQ_PPsEVRFZ6bm9he807F_F_DmB2HlQ,1179
250
+ llama_cloud/types/parse_job_config.py,sha256=gLRQOaPgTfQuaNzriYtjDPucSFXt1AWyG19tGfzoy5M,6788
246
251
  llama_cloud/types/parse_job_config_priority.py,sha256=__-gVv1GzktVCYZVyl6zeDt0pAZwYl-mxM0xkIHPEro,800
247
252
  llama_cloud/types/parse_plan_level.py,sha256=GBkDS19qfHseBa17EXfuTPNT4GNv5alyPrWEvWji3GY,528
248
253
  llama_cloud/types/parser_languages.py,sha256=Ps3IlaSt6tyxEI657N3-vZL96r2puk8wsf31cWnO-SI,10840
@@ -286,7 +291,7 @@ llama_cloud/types/pipeline_status.py,sha256=aC340nhfuPSrFVZOH_DhgYHWe985J3WNHrwv
286
291
  llama_cloud/types/pipeline_transform_config.py,sha256=zMr-ePLKGjbaScxbAHaSwYBL7rrNibVlnn0cbgElDfU,824
287
292
  llama_cloud/types/pipeline_type.py,sha256=tTqrhxHP5xd7W2dQGD0e5FOv886nwJssyaVlXpWrtRo,551
288
293
  llama_cloud/types/plan_limits.py,sha256=WAbDbRl8gsQxvhmuVB0YT8mry-0uKg6c66uivyppdQU,2056
289
- llama_cloud/types/playground_session.py,sha256=F8u2KZL2YaOrsT-o1n4zbhyPxSsoduc3ZCzQB8AecFA,1858
294
+ llama_cloud/types/playground_session.py,sha256=BZZk9F_FVuMPcCE5dVNACPqHKIvyWGSkbRrrQOweaaw,1868
290
295
  llama_cloud/types/pooling.py,sha256=5Fr6c8rx9SDWwWzEvD78suob2d79ktodUtLUAUHMbP8,651
291
296
  llama_cloud/types/preset_composite_retrieval_params.py,sha256=yEf1pk4Wz5J6SxgB8elklwuyVDCRSZqfWC6x3hJUS4Q,1366
292
297
  llama_cloud/types/preset_retrieval_params.py,sha256=TcyljefpspJSveMR9L5DQHlqW4jZeexBsXus_LkHkJA,2365
@@ -322,17 +327,17 @@ llama_cloud/types/retrieve_results.py,sha256=rHArmu05K3NvIQepHX5nsVOfcMsZj3MaIcP
322
327
  llama_cloud/types/retriever.py,sha256=ZItPsorL8x1XjtJT49ZodaMqU8h2GfwlB4U4cgnfZkM,1626
323
328
  llama_cloud/types/retriever_create.py,sha256=WyUR9DRzu3Q9tzKEeXCdQuzCY6WKi9ADJkZea9rqvxU,1286
324
329
  llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplPcGDED-J4b0,1330
325
- llama_cloud/types/role.py,sha256=SCi2TyFbc68RJuNB-OdcP8ut03Uv5zPZk84QMmf17w8,1384
330
+ llama_cloud/types/role.py,sha256=4pbyLVNPleDd624cDcOhu9y1WvqC0J0gmNirTOW97iA,1342
326
331
  llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
327
332
  llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
328
333
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
334
+ llama_cloud/types/src_app_schema_chat_chat_message.py,sha256=ddMQXZybeExPVFMNe8FWghyXXWktsujpZ_0Xmou3Zz8,1596
329
335
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
330
336
  llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
331
337
  llama_cloud/types/struct_parse_conf.py,sha256=WlL8y0IBvdzGsDtFUlEZLzoUODwmOWAJi0viS9unL18,2297
332
338
  llama_cloud/types/supported_llm_model.py,sha256=hubSopFICVNEegbJbtbpK6zRHwFPwUNtrw_NAw_3bfg,1380
333
339
  llama_cloud/types/supported_llm_model_names.py,sha256=PXL0gA1lc0GJNzZHnjOscoxHpPW787A8Adh-2egAKo8,2512
334
340
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
335
- llama_cloud/types/text_content_block.py,sha256=MKMBMhJS7Tr-Vmr4MhhDgH8pO6r-_g_8bjWYT8LxitA,1130
336
341
  llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
337
342
  llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
338
343
  llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586_HsvDoQbLfuQ,1229
@@ -345,7 +350,7 @@ llama_cloud/types/user_job_record.py,sha256=mJHdokJsemXJOwM2l7fsW3X0SlwSNcy7yHbc
345
350
  llama_cloud/types/user_organization.py,sha256=yKewpOrMcB-CbujGNTjkX6QiWYr5HVsRIFQ-WX8kp2I,1729
346
351
  llama_cloud/types/user_organization_create.py,sha256=Zj57s9xuYVnLW2p8i4j2QORL-G1y7Ab3avXE1baERQY,1189
347
352
  llama_cloud/types/user_organization_delete.py,sha256=bEfgQMdTd6oAMZXtvSm5BhZahG1wAVDBXZ8e7V9UN7w,1159
348
- llama_cloud/types/user_organization_role.py,sha256=vTM5pYG9NJpTQACn8vzSIt01Ul6jEHCVmyR3vV0isPg,1512
353
+ llama_cloud/types/user_organization_role.py,sha256=Tcfu9QISF5nRpo9jvboHzX-Yfg6b676UNfdjzjUIgAs,1448
349
354
  llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
350
355
  llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
351
356
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
@@ -353,7 +358,7 @@ llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2B
353
358
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
354
359
  llama_cloud/types/webhook_configuration.py,sha256=_Xm15whrWoKNBuCoO5y_NunA-ByhCAYK87LnC4W-Pzg,1350
355
360
  llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=LTfOwphnoYUQYwsHGTlCxoVU_PseIRAbmQJRBdyXnbg,1519
356
- llama_cloud-0.1.30.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
357
- llama_cloud-0.1.30.dist-info/METADATA,sha256=uHG2_pSkr7dmrXGGSTKO11eIshFPn2ke4kEW2aq0Kgc,1194
358
- llama_cloud-0.1.30.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
359
- llama_cloud-0.1.30.dist-info/RECORD,,
361
+ llama_cloud-0.1.32.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
362
+ llama_cloud-0.1.32.dist-info/METADATA,sha256=1nAROO_4DqpEvwvY8WwhsVU5rRTnFx9K08C3-G6b7H0,1194
363
+ llama_cloud-0.1.32.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
364
+ llama_cloud-0.1.32.dist-info/RECORD,,
@@ -1,2 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
@@ -1,137 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
- import urllib.parse
5
- from json.decoder import JSONDecodeError
6
-
7
- from ...core.api_error import ApiError
8
- from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
- from ...core.jsonable_encoder import jsonable_encoder
10
- from ...core.remove_none_from_dict import remove_none_from_dict
11
- from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
- from ...types.http_validation_error import HttpValidationError
13
- from ...types.message import Message
14
- from ...types.model_configuration import ModelConfiguration
15
-
16
- try:
17
- import pydantic
18
- if pydantic.__version__.startswith("1."):
19
- raise ImportError
20
- import pydantic.v1 as pydantic # type: ignore
21
- except ImportError:
22
- import pydantic # type: ignore
23
-
24
- # this is used as the default value for optional parameters
25
- OMIT = typing.cast(typing.Any, ...)
26
-
27
-
28
- class ResponsesClient:
29
- def __init__(self, *, client_wrapper: SyncClientWrapper):
30
- self._client_wrapper = client_wrapper
31
-
32
- def generate_response(
33
- self,
34
- *,
35
- project_id: typing.Optional[str] = None,
36
- organization_id: typing.Optional[str] = None,
37
- messages: typing.List[Message],
38
- model_configuration: ModelConfiguration,
39
- ) -> typing.Any:
40
- """
41
- EXPERIMENTAL - SSE endpoint for basic response generation (dummy stream).
42
-
43
- Parameters:
44
- - project_id: typing.Optional[str].
45
-
46
- - organization_id: typing.Optional[str].
47
-
48
- - messages: typing.List[Message]. List of messages in the conversation
49
-
50
- - model_configuration: ModelConfiguration. Configuration for the model to use in the response
51
- ---
52
- from llama_cloud import ModelConfiguration, SupportedLlmModelNames
53
- from llama_cloud.client import LlamaCloud
54
-
55
- client = LlamaCloud(
56
- token="YOUR_TOKEN",
57
- )
58
- client.responses.generate_response(
59
- messages=[],
60
- model_configuration=ModelConfiguration(
61
- model_name=SupportedLlmModelNames.GPT_4_O,
62
- ),
63
- )
64
- """
65
- _response = self._client_wrapper.httpx_client.request(
66
- "POST",
67
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/responses/generate"),
68
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
69
- json=jsonable_encoder({"messages": messages, "model_configuration": model_configuration}),
70
- headers=self._client_wrapper.get_headers(),
71
- timeout=60,
72
- )
73
- if 200 <= _response.status_code < 300:
74
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
75
- if _response.status_code == 422:
76
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
77
- try:
78
- _response_json = _response.json()
79
- except JSONDecodeError:
80
- raise ApiError(status_code=_response.status_code, body=_response.text)
81
- raise ApiError(status_code=_response.status_code, body=_response_json)
82
-
83
-
84
- class AsyncResponsesClient:
85
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
86
- self._client_wrapper = client_wrapper
87
-
88
- async def generate_response(
89
- self,
90
- *,
91
- project_id: typing.Optional[str] = None,
92
- organization_id: typing.Optional[str] = None,
93
- messages: typing.List[Message],
94
- model_configuration: ModelConfiguration,
95
- ) -> typing.Any:
96
- """
97
- EXPERIMENTAL - SSE endpoint for basic response generation (dummy stream).
98
-
99
- Parameters:
100
- - project_id: typing.Optional[str].
101
-
102
- - organization_id: typing.Optional[str].
103
-
104
- - messages: typing.List[Message]. List of messages in the conversation
105
-
106
- - model_configuration: ModelConfiguration. Configuration for the model to use in the response
107
- ---
108
- from llama_cloud import ModelConfiguration, SupportedLlmModelNames
109
- from llama_cloud.client import AsyncLlamaCloud
110
-
111
- client = AsyncLlamaCloud(
112
- token="YOUR_TOKEN",
113
- )
114
- await client.responses.generate_response(
115
- messages=[],
116
- model_configuration=ModelConfiguration(
117
- model_name=SupportedLlmModelNames.GPT_4_O,
118
- ),
119
- )
120
- """
121
- _response = await self._client_wrapper.httpx_client.request(
122
- "POST",
123
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/responses/generate"),
124
- params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
125
- json=jsonable_encoder({"messages": messages, "model_configuration": model_configuration}),
126
- headers=self._client_wrapper.get_headers(),
127
- timeout=60,
128
- )
129
- if 200 <= _response.status_code < 300:
130
- return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
131
- if _response.status_code == 422:
132
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
133
- try:
134
- _response_json = _response.json()
135
- except JSONDecodeError:
136
- raise ApiError(status_code=_response.status_code, body=_response.text)
137
- raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -1,33 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class AppSchemaResponsesMessageRole(str, enum.Enum):
10
- """
11
- Enum representing the role of a message in a conversation.
12
-
13
- - system: The system message that sets the context or instructions.
14
- - user: The user's message in the conversation.
15
- - assistant: The AI assistant's response in the conversation.
16
- """
17
-
18
- SYSTEM = "system"
19
- USER = "user"
20
- ASSISTANT = "assistant"
21
-
22
- def visit(
23
- self,
24
- system: typing.Callable[[], T_Result],
25
- user: typing.Callable[[], T_Result],
26
- assistant: typing.Callable[[], T_Result],
27
- ) -> T_Result:
28
- if self is AppSchemaResponsesMessageRole.SYSTEM:
29
- return system()
30
- if self is AppSchemaResponsesMessageRole.USER:
31
- return user()
32
- if self is AppSchemaResponsesMessageRole.ASSISTANT:
33
- return assistant()