llama-cloud 0.1.19__py3-none-any.whl → 0.1.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (38) hide show
  1. llama_cloud/__init__.py +44 -26
  2. llama_cloud/resources/files/client.py +18 -4
  3. llama_cloud/resources/parsing/client.py +8 -0
  4. llama_cloud/resources/pipelines/client.py +25 -11
  5. llama_cloud/types/__init__.py +46 -26
  6. llama_cloud/types/{base_prompt_template.py → data_source_update_dispatcher_config.py} +9 -7
  7. llama_cloud/types/{node_parser.py → delete_params.py} +7 -9
  8. llama_cloud/types/document_ingestion_job_params.py +43 -0
  9. llama_cloud/types/extract_config.py +3 -0
  10. llama_cloud/types/job_record.py +2 -2
  11. llama_cloud/types/job_record_parameters.py +111 -0
  12. llama_cloud/types/{page_splitter_node_parser.py → l_lama_parse_transform_config.py} +5 -10
  13. llama_cloud/types/legacy_parse_job_config.py +189 -0
  14. llama_cloud/types/llama_parse_parameters.py +1 -0
  15. llama_cloud/types/load_files_job_config.py +35 -0
  16. llama_cloud/types/parse_job_config.py +134 -0
  17. llama_cloud/types/pipeline.py +4 -4
  18. llama_cloud/types/pipeline_create.py +2 -2
  19. llama_cloud/types/pipeline_file_update_dispatcher_config.py +38 -0
  20. llama_cloud/types/{configured_transformation_item.py → pipeline_file_updater_config.py} +13 -12
  21. llama_cloud/types/pipeline_managed_ingestion_job_params.py +37 -0
  22. llama_cloud/types/pipeline_metadata_config.py +36 -0
  23. llama_cloud/types/pipeline_status.py +17 -0
  24. llama_cloud/types/prompt_conf.py +1 -0
  25. llama_cloud/types/supported_llm_model.py +1 -2
  26. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/METADATA +6 -2
  27. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/RECORD +29 -29
  28. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/WHEEL +1 -1
  29. llama_cloud/types/character_splitter.py +0 -46
  30. llama_cloud/types/code_splitter.py +0 -50
  31. llama_cloud/types/configured_transformation_item_component.py +0 -22
  32. llama_cloud/types/llm.py +0 -60
  33. llama_cloud/types/markdown_element_node_parser.py +0 -51
  34. llama_cloud/types/markdown_node_parser.py +0 -52
  35. llama_cloud/types/pydantic_program_mode.py +0 -41
  36. llama_cloud/types/sentence_splitter.py +0 -50
  37. llama_cloud/types/token_text_splitter.py +0 -50
  38. {llama_cloud-0.1.19.dist-info → llama_cloud-0.1.21.dist-info}/LICENSE +0 -0
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=ek0T4C2t2jQzRaagVuKndSzFXXwuS_AY6J6pvsiay_s,23165
1
+ llama_cloud/__init__.py,sha256=LY7rGZhiQwpQG72OHDir1GGwPpY18UKTow2wVENeRxs,24071
2
2
  llama_cloud/client.py,sha256=L8gEXB8nVlGVgfncfdLaS1j4b-1wExV4TqElUwayvtQ,5759
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -32,7 +32,7 @@ llama_cloud/resources/embedding_model_configs/types/embedding_model_config_creat
32
32
  llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
33
33
  llama_cloud/resources/evals/client.py,sha256=v2AyeQV0hVgC6xoP2gJNgneJMaeXALV1hIeirYGxlPw,3242
34
34
  llama_cloud/resources/files/__init__.py,sha256=3B0SNM8EE6PddD5LpxYllci9vflEXy1xjPzhEEd-OUk,293
35
- llama_cloud/resources/files/client.py,sha256=7VmhrE5fbftB6p6QUQUkGM5FO48obF73keq86vGFyhE,49676
35
+ llama_cloud/resources/files/client.py,sha256=oPwDQAkf0zN1mxP_vT6Songp4scOq5k0jcfHo-zfCtY,50560
36
36
  llama_cloud/resources/files/types/__init__.py,sha256=EPYENAwkjBWv1MLf8s7R5-RO-cxZ_8NPrqfR4ZoR7jY,418
37
37
  llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py,sha256=Wc8wFgujOO5pZvbbh2TMMzpa37GKZd14GYNJ9bdq7BE,214
38
38
  llama_cloud/resources/files/types/file_create_permission_info_value.py,sha256=KPCFuEaa8NiB85A5MfdXRAQ0poAUTl7Feg6BTfmdWas,209
@@ -53,9 +53,9 @@ llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_s
53
53
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
54
54
  llama_cloud/resources/organizations/client.py,sha256=OGSVpkfY5wu8-22IFWVmtbYSDiy0-KqA3Lc1E_jNHvg,55889
55
55
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
56
- llama_cloud/resources/parsing/client.py,sha256=M3w5xLx8SZdvfh0Op7TI0CDwdF0YNcjk6E9qEfbZTf0,76377
56
+ llama_cloud/resources/parsing/client.py,sha256=QoRN6Zie7jSY3qAhRa6OnCdYg4e62SkunFQ3NJWLWcs,76711
57
57
  llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
58
- llama_cloud/resources/pipelines/client.py,sha256=x6MjLVwA6bQfDZmelc364tXmSoJeMUj6xPadjermpGQ,129010
58
+ llama_cloud/resources/pipelines/client.py,sha256=3sBLSIR5iY-rH5usZUM6dTWjbIEiL62Up15wbsWAzn4,129436
59
59
  llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
60
60
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
61
61
  llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
@@ -68,7 +68,7 @@ llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwH
68
68
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
69
69
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
70
70
  llama_cloud/resources/retrievers/client.py,sha256=T7fu41wXAYUTGh23ZWlKPM4e8zH7mg5MDa8F1GxNYwQ,31502
71
- llama_cloud/types/__init__.py,sha256=eWj70ScNb8UwFMtLJTGsuhD3nqPBNssVT3qh1iEyh7s,27476
71
+ llama_cloud/types/__init__.py,sha256=aoxpc2tZxdBkPg2NcmgJYUAuDkE98loZibDrxWSaeLg,28456
72
72
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
73
73
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
74
74
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -81,7 +81,6 @@ llama_cloud/types/base_plan.py,sha256=5DZi20EOciTc5okLAxQDqyGylsW-DflTy14dcvQb2f
81
81
  llama_cloud/types/base_plan_metronome_plan_type.py,sha256=I3g_dVoWWztbmpWpYmseDqQSbwtlLUl2vS01tfgMjEA,499
82
82
  llama_cloud/types/base_plan_name.py,sha256=keHQaw9YV9ghsWnGfnHrLtB4qNz0v4TWX4_MoO3flRM,1926
83
83
  llama_cloud/types/base_plan_plan_frequency.py,sha256=idUZlDaSdMrMZ2lQ1ytBWM4QyduIZu6Gt2eLU0LVqH4,684
84
- llama_cloud/types/base_prompt_template.py,sha256=Cw3887tnytHZ5bJBSlniyU9k5ASidv9VYR86--IbNqo,1248
85
84
  llama_cloud/types/batch.py,sha256=C8320qAjzQGYHiAvUOUzYsT9Ba7OYiHfA9T9_H8_wCY,2235
86
85
  llama_cloud/types/batch_item.py,sha256=ea0efWurrduelCg3wG4bhQOLiWTH1NJfd7So3j_HEbg,1574
87
86
  llama_cloud/types/batch_paginated_list.py,sha256=p25r9oyidy-Cd2D8xt_KLiTn7eMFvAVnzmvXfvKsOsw,1262
@@ -91,7 +90,6 @@ llama_cloud/types/bedrock_embedding_config.py,sha256=32dMhoA2cLx1jeogDnCl9WPVb83
91
90
  llama_cloud/types/billing_period.py,sha256=_BvznHPiB101hKeFmP0ZIRkBnGboxNvNgJD0BhegvN4,1002
92
91
  llama_cloud/types/box_auth_mechanism.py,sha256=EwEdpWYytw_dRtSElfSMPhh5dxalYH8mGW3UAUpkUfY,502
93
92
  llama_cloud/types/character_chunking_config.py,sha256=2ooAnrlVVbKj4nDi_lR66x5E6nWOmj5YDWhSMQD0ubc,1035
94
- llama_cloud/types/character_splitter.py,sha256=Jm6ie7c9JmMqIqLfAN-96sYvNUaIyLzCPBjNUx29VUw,1896
95
93
  llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA,1795
96
94
  llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
97
95
  llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
@@ -114,7 +112,6 @@ llama_cloud/types/cloud_qdrant_vector_store.py,sha256=F-gjNArzwLWmqgPcC-ZxRqSrhT
114
112
  llama_cloud/types/cloud_s_3_data_source.py,sha256=LG19EMOfIfm14XLbMaUC25BKzdL5u_Mb5GwgF7cB9Kw,1376
115
113
  llama_cloud/types/cloud_sharepoint_data_source.py,sha256=iJtlgb4hsj8CP2IJ7TxdK1GOb3MdyKr7_jsOlY3kFiE,1609
116
114
  llama_cloud/types/cloud_slack_data_source.py,sha256=tlsNj-hDj1gWmM0Q2A1BeyolfaPg_wfvSlJGTETknAo,1374
117
- llama_cloud/types/code_splitter.py,sha256=8MJScSxk9LzByufokcWG3AHAnOjUt13VlV2w0SCXTLc,1987
118
115
  llama_cloud/types/cohere_embedding.py,sha256=wkv_fVCA1WEroGawzPFExwmiJ75gPfzeeemty7NBlsM,1579
119
116
  llama_cloud/types/cohere_embedding_config.py,sha256=c0Kj1wuSsBX9TQ2AondKv5ZtX5PmkivsHj6P0M7tVB4,1142
120
117
  llama_cloud/types/composite_retrieval_mode.py,sha256=PtN0vQ90xyAJL4vyGRG4lMNOpnJ__2L1xiwosI9yfms,548
@@ -125,8 +122,6 @@ llama_cloud/types/configurable_data_sink_names.py,sha256=0Yk9i8hcNXKCcSKpa5KwsCw
125
122
  llama_cloud/types/configurable_data_source_names.py,sha256=mNW71sSgcVhU3kePAOUgRxeqK1Vo7F_J1xIzmYKPRq0,1971
126
123
  llama_cloud/types/configurable_transformation_definition.py,sha256=LDOhI5IDxlLDWM_p_xwCFM7qq1y-aGA8UxN7dnplDlU,1886
127
124
  llama_cloud/types/configurable_transformation_names.py,sha256=N_YhY8IuQxsqBteCibaQwEaY0zd6Ncb6jW69d9mjrdU,1898
128
- llama_cloud/types/configured_transformation_item.py,sha256=9caK5ZOKgGCZc6ynJJIWwpxpScKHOHkZwHFlsBy-Fog,1826
129
- llama_cloud/types/configured_transformation_item_component.py,sha256=VEwtkbnImKGtzaSaIb9q46xu7ZPZliqK7oMh_-ftiq8,712
130
125
  llama_cloud/types/credit_type.py,sha256=nwSRKDWgHk_msdWitctqtyeZwj5EFd6VLto6NF2yCd4,971
131
126
  llama_cloud/types/data_sink.py,sha256=PeexYHHoD8WkVp9WsFtfC-AIWszcgeJUprG1bwC8WsQ,1498
132
127
  llama_cloud/types/data_sink_component.py,sha256=uvuxLY3MPDpv_bkT0y-tHSZVPRSHCkDBDHVff-036Dg,749
@@ -140,6 +135,9 @@ llama_cloud/types/data_source_create_component.py,sha256=-P4FGv9Xg951n-77_bb-2_C
140
135
  llama_cloud/types/data_source_create_custom_metadata_value.py,sha256=ejSsQNbszYQaUWFh9r9kQpHf88qbhuRv1SI9J_MOSC0,215
141
136
  llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABFJOKZblZUkRqo1CqLAuP5tKji4,209
142
137
  llama_cloud/types/data_source_definition.py,sha256=HlSlTxzYcQJOSo_2OSroAE8vAr-otDvTNBSEkA54vL8,1575
138
+ llama_cloud/types/data_source_update_dispatcher_config.py,sha256=Sh6HhXfEV2Z6PYhkYQucs2MxyKVpL3UPV-I4cbf--bA,1242
139
+ llama_cloud/types/delete_params.py,sha256=1snPrd3WO9C1bKf0WdMslE2HQMF0yYLI3U7N53cmurM,1285
140
+ llama_cloud/types/document_ingestion_job_params.py,sha256=33xTAl-K-m1j_Ufkj7w2GaYg9EUH5Hwsjn869X-fWMk,1524
143
141
  llama_cloud/types/edit_suggestion.py,sha256=uzXSZYJiU3FaNN-TvEd3EXdaXvjQIe7Mf4kntKkyB2I,1202
144
142
  llama_cloud/types/edit_suggestion_blocks_item.py,sha256=ojTk4lh0IHmrWP5wLPTIlsc2jAUDoHvdjJ5sm2uMut0,236
145
143
  llama_cloud/types/element_segmentation_config.py,sha256=QOBk8YFrgK0I2m3caqV5bpYaGXbk0fMSjZ4hUPZXZDI,959
@@ -150,7 +148,7 @@ llama_cloud/types/embedding_model_config_update_embedding_config.py,sha256=mrXFx
150
148
  llama_cloud/types/eval_execution_params.py,sha256=ntVaJh5SMZMPL4QLUiihVjUlg2SKbrezvbMKGlrF66Q,1369
151
149
  llama_cloud/types/extract_agent.py,sha256=T98IOueut4M52Qm7hqcUOcWFFDhZ-ye0OFdXgfFGtS4,1763
152
150
  llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKdfpefeTJrr1hl0c95WRETYkM,201
153
- llama_cloud/types/extract_config.py,sha256=9UH8cNBvKBQX9YqVAGwG0a7B73Y4Cwrmvil5Ex-L_I0,1603
151
+ llama_cloud/types/extract_config.py,sha256=YZgNJpH1n6N-Lx9cIeuijT9xk1F6SeKjn4526wAtljc,1745
154
152
  llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
155
153
  llama_cloud/types/extract_job_create.py,sha256=UK1mBIKyflo7e6m1MxMN95pLscj67jH_yvs8EvmBXqU,1545
156
154
  llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
@@ -191,27 +189,27 @@ llama_cloud/types/ingestion_error_response.py,sha256=8u0cyT44dnpkNeUKemTvJMUqi_W
191
189
  llama_cloud/types/input_message.py,sha256=Ym6-tX6CMWKuHfxRtyM2y16kqSS3BzHged9rFRFkX0g,1346
192
190
  llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuABj3vMY7ys,1617
193
191
  llama_cloud/types/job_names.py,sha256=WacongwoJygg_gCyYjPsOVv3cmVtRaX633JNgFxy-d8,3915
194
- llama_cloud/types/job_record.py,sha256=r2WzLQXSOFogNMN2rl10rAlYI9OTCmVn06QaZXxa0rQ,2058
192
+ llama_cloud/types/job_record.py,sha256=7hdDPZU11EG8g6_9iq6vy-zqLEryeC7i8fZ-CkUB_xQ,2084
193
+ llama_cloud/types/job_record_parameters.py,sha256=Oqxp5y0owPfjLc_NR7AYE8P3zM2PJo36N9olbyNl7AA,3425
195
194
  llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
195
+ llama_cloud/types/l_lama_parse_transform_config.py,sha256=YQRJZvKh1Ee2FUyW_N0nqYJoW599qBgH3JCH9SH6YLo,1249
196
+ llama_cloud/types/legacy_parse_job_config.py,sha256=kVBdiSLraI9rKQOPf0Ci9RtbNLkco0byBJC42uE_PCI,11698
196
197
  llama_cloud/types/llama_extract_settings.py,sha256=IQFxtKa4GtHKc9w-fLwsH0LSKDWzR9_vZ_cTFJ9cGBI,2288
197
198
  llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
198
199
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=JTU5EDoZB_1vcUixiWDCEbCj3-09GhYC3RDDSc0aqBU,1216
199
- llama_cloud/types/llama_parse_parameters.py,sha256=zgA133oDF0QYS2E4r8CtvKPi0nOTq0K5jBRaZmXJGT4,5718
200
+ llama_cloud/types/llama_parse_parameters.py,sha256=DNhVZm3YQ_3xZiz7WUrwH7E6jqW2fZ7YGFsdfsYalUk,5773
200
201
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
201
- llama_cloud/types/llm.py,sha256=7iIItVPjURp4u5xxJDAFIefUdhUKwIuA245WXilJPXE,2234
202
202
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
203
203
  llama_cloud/types/llm_parameters.py,sha256=RTKYt09lm9a1MlnBfYuTP2x_Ww4byUNNc1TqIel5O1Y,1377
204
+ llama_cloud/types/load_files_job_config.py,sha256=R5sFgFmV__0mqLUuD7dkFoBJHG2ZLw5px9zRapvYcpE,1069
204
205
  llama_cloud/types/managed_ingestion_status.py,sha256=3KVlcurpEBOPAesBUS5pSYLoQVIyZUlr90Mmv-uALHE,1290
205
206
  llama_cloud/types/managed_ingestion_status_response.py,sha256=rdNpjNbQswF-6JG1e-EU374TP6Pjlxl0p7HJyNmuxTI,1373
206
- llama_cloud/types/markdown_element_node_parser.py,sha256=NUqdU8BmyfSFK2rV6hCrvP6U1iB6aqZCVsvHWJQ49xU,1964
207
- llama_cloud/types/markdown_node_parser.py,sha256=GchDnlADMRiYREFOO6o_3LoiCXwUrrhms2CQkbP8sMo,1924
208
207
  llama_cloud/types/message_annotation.py,sha256=n4F9w4LxwmGvgXDk6E8YPTMu_g0yEjZhZ_eNFXdS_bc,1017
209
208
  llama_cloud/types/message_role.py,sha256=9MpXT9drR33TyT1-NiqB3uGbuxvWwtoOdSmKQE9HmJI,1359
210
209
  llama_cloud/types/metadata_filter.py,sha256=dVdXY6i0aCkvJrs7ncQt4-S8jmBF9bBSp2VuWrmAVfI,1440
211
210
  llama_cloud/types/metadata_filter_value.py,sha256=ij721gXNI7zbgsuDl9-AqBcXg2WDuVZhYS5F5YqekEs,188
212
211
  llama_cloud/types/metadata_filters.py,sha256=uSf6sB4oQu6WzMPNFG6Tc4euqEiYcj_X14Y5JWt9xVE,1315
213
212
  llama_cloud/types/metadata_filters_filters_item.py,sha256=e8KhD2q6Qc2_aK6r5CvyxC0oWVYO4F4vBIcB9eMEPPM,246
214
- llama_cloud/types/node_parser.py,sha256=rqZTQ_9GnCHOvSpXuAZoezxQCOgxHo-hmQv0s7pnEFc,1380
215
213
  llama_cloud/types/node_relationship.py,sha256=2e2PqWm0LOTiImvtsyiuaAPNIl0BItjSrQZTJv65GRA,1209
216
214
  llama_cloud/types/none_chunking_config.py,sha256=D062t314Vp-s4n9h8wNgsYfElI4PonPKmihvjEmaqdA,952
217
215
  llama_cloud/types/none_segmentation_config.py,sha256=j3jUA6E8uFtwDMEu4TFG3Q4ZGCGiuUfUW9AMO1NNqXU,956
@@ -224,12 +222,12 @@ llama_cloud/types/page_figure_metadata.py,sha256=iIg6_f2SwJg6UcQo9X4MoSm_ygxnIBm
224
222
  llama_cloud/types/page_screenshot_metadata.py,sha256=lobrq0AsOr8sDwMgA9ytop8lRmRFvJW2oiql3yLvbjM,1328
225
223
  llama_cloud/types/page_screenshot_node_with_score.py,sha256=EdqoXbmARCz1DV14E2saCPshIeII709uM4cLwxw_mkM,1232
226
224
  llama_cloud/types/page_segmentation_config.py,sha256=VH8uuxnubnJak1gSpS64OoMueHidhsDB-2eq2tVHbag,998
227
- llama_cloud/types/page_splitter_node_parser.py,sha256=rQgS1CDk18UKA0r9OPvjdtM570jzFArdLCTxYAtZny8,1424
228
225
  llama_cloud/types/paginated_extract_runs_response.py,sha256=NNeVcgBm0mYTAsumwQBO_YrxvkgUqwsvZo3xs8QjVCc,1423
229
226
  llama_cloud/types/paginated_jobs_history_with_metrics.py,sha256=Bxy6N0x0FARJhgwNKKPkNpXx8YLRHvth23G14f5Fuk4,1136
230
227
  llama_cloud/types/paginated_list_cloud_documents_response.py,sha256=MsjS0SWlT0syELDck4x2sxxR3_NC1e6QTdepgVmK9aY,1341
231
228
  llama_cloud/types/paginated_list_pipeline_files_response.py,sha256=2TKR2oHSQRyLMqWz1qQBSIvz-ZJb8U_94367lwOJ2S4,1317
232
229
  llama_cloud/types/paginated_report_response.py,sha256=o79QhQi9r0HZZrhvRlA6WGjxtyPuxN0xONhwXSwxtcs,1104
230
+ llama_cloud/types/parse_job_config.py,sha256=KLBhRRGziH4eU2sZgab24c8-L9b8M9on1Dg0nVnObGc,6254
233
231
  llama_cloud/types/parse_plan_level.py,sha256=GBkDS19qfHseBa17EXfuTPNT4GNv5alyPrWEvWji3GY,528
234
232
  llama_cloud/types/parser_languages.py,sha256=Ps3IlaSt6tyxEI657N3-vZL96r2puk8wsf31cWnO-SI,10840
235
233
  llama_cloud/types/parsing_history_item.py,sha256=_MVzf43t84PbmjOzsMLZ_NBoyiisigLWz-fr0ZxU63g,1183
@@ -244,9 +242,9 @@ llama_cloud/types/permission.py,sha256=LjhZdo0oLvk7ZVIF1d6Qja--AKH5Ri0naUhuJvZS6
244
242
  llama_cloud/types/pg_vector_distance_method.py,sha256=U81o0ARjPR-HuFcVspHiJUrjIDJo3jLhB46vkITDu7M,1203
245
243
  llama_cloud/types/pg_vector_hnsw_settings.py,sha256=-RE59xUgHwNEyAwRYmOQ8SHeAqkSYBfCAROw7QomxUU,1758
246
244
  llama_cloud/types/pg_vector_vector_type.py,sha256=VwOohN566zw42UMlnuKTJopYJypsSnzWjCFmKRoU-bo,952
247
- llama_cloud/types/pipeline.py,sha256=eVNfQjfQTArB3prPeDkfDK6PtfhhBxW7-_VhH9MzlsE,2789
245
+ llama_cloud/types/pipeline.py,sha256=4m1NIqTtG2DItvW69SWW3NjZPBL848VEW69Qbt2B7uo,2728
248
246
  llama_cloud/types/pipeline_configuration_hashes.py,sha256=7_MbOcPWV6iyMflJeXoo9vLzD04E5WM7YxYp4ls0jQs,1169
249
- llama_cloud/types/pipeline_create.py,sha256=pS1Lc5Ihh2OXMgRmaO_597a_6ddJEJL-m57efyRsgzw,2687
247
+ llama_cloud/types/pipeline_create.py,sha256=kF9lOu4Kgwgg26Kj3VsAeHoi59jga6ka4oYkIzVy25M,2645
250
248
  llama_cloud/types/pipeline_create_embedding_config.py,sha256=PQqmVBFUyZXYKKBmVQF2zPsGp1L6rje6g3RtXEcdfc8,2811
251
249
  llama_cloud/types/pipeline_create_transform_config.py,sha256=HP6tzLsw_pomK1Ye2PYCS_XDZK_TMgg22mz17_zYKFg,303
252
250
  llama_cloud/types/pipeline_data_source.py,sha256=g8coq6ohp09TtqzvB3_A8Nzery3J5knIfxGWzUtozmg,2381
@@ -264,6 +262,11 @@ llama_cloud/types/pipeline_file_custom_metadata_value.py,sha256=ClFphYDNlHxeyLF5
264
262
  llama_cloud/types/pipeline_file_permission_info_value.py,sha256=a9yfg5n9po0-4ljGx8DtJoeLBwWFpaEk9ZQUN195BXg,211
265
263
  llama_cloud/types/pipeline_file_resource_info_value.py,sha256=s3uFGQNwlUEr-X4TJZkW_kMBvX3h1sXRJoYlJRvHSDc,209
266
264
  llama_cloud/types/pipeline_file_status.py,sha256=7AJOlwqZVcsk6aPF6Q-x7UzjdzdBj4FeXAZ4m35Bb5M,1003
265
+ llama_cloud/types/pipeline_file_update_dispatcher_config.py,sha256=PiJ1brbKGyq07GmD2VouFfm_Y3KShiyhBXJkwFJsKXw,1222
266
+ llama_cloud/types/pipeline_file_updater_config.py,sha256=KMHBYpH3fYDQaDVvxVgckosiWz0Dl3v5dC53Cgnmtb8,1761
267
+ llama_cloud/types/pipeline_managed_ingestion_job_params.py,sha256=ahliOe6YnLI-upIq1v5HZd9p8xH6pPdkh2M_n_zM9TA,1180
268
+ llama_cloud/types/pipeline_metadata_config.py,sha256=yMnPu6FnhagjuJ_rQ756WbIvVG5dzyXT1fmCYUAmCS0,1291
269
+ llama_cloud/types/pipeline_status.py,sha256=aC340nhfuPSrFVZOH_DhgYHWe985J3WNHrwvUtjXTRA,481
267
270
  llama_cloud/types/pipeline_transform_config.py,sha256=zMr-ePLKGjbaScxbAHaSwYBL7rrNibVlnn0cbgElDfU,824
268
271
  llama_cloud/types/pipeline_type.py,sha256=tTqrhxHP5xd7W2dQGD0e5FOv886nwJssyaVlXpWrtRo,551
269
272
  llama_cloud/types/plan_limits.py,sha256=WAbDbRl8gsQxvhmuVB0YT8mry-0uKg6c66uivyppdQU,2056
@@ -276,8 +279,7 @@ llama_cloud/types/progress_event.py,sha256=Bk73A8geTVaq0ze5pMnbkAmx7FSOHQIixYCpC
276
279
  llama_cloud/types/progress_event_status.py,sha256=yb4RAXwOKU6Bi7iyYy-3lwhF6_mLz0ZFyGjxIdaByoE,893
277
280
  llama_cloud/types/project.py,sha256=4NNh_ZAjEkoWl5st6b1jsPVf_SYKtUTB6rS1701G4IQ,1441
278
281
  llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
279
- llama_cloud/types/prompt_conf.py,sha256=6vhUFOBL5MUUJ_ucyvFfmyNCaiPOWepviEawChu0enI,1550
280
- llama_cloud/types/pydantic_program_mode.py,sha256=QfvpqR7TqyNuOxo78Sr58VOu7KDSBrHJM4XXBB0F5z0,1202
282
+ llama_cloud/types/prompt_conf.py,sha256=hh8I3jxk3K6e5QZoBCLqszohMYtk73PERYoL36lLmTk,1660
281
283
  llama_cloud/types/re_rank_config.py,sha256=mxRWwrC5BLg3DP1yEyRwW2lIpv5BuXZfTy8f4RbcOp0,1262
282
284
  llama_cloud/types/re_ranker_type.py,sha256=qYItMEHrf80ePBp7gNGBSL67mkTIsqco92WJaJiYweo,1123
283
285
  llama_cloud/types/recurring_credit_grant.py,sha256=19qI3p5k1mQ1Qoo-gCQU02Aa42XpEsmwxPF1F88F-Yg,1517
@@ -307,18 +309,16 @@ llama_cloud/types/role.py,sha256=SCi2TyFbc68RJuNB-OdcP8ut03Uv5zPZk84QMmf17w8,138
307
309
  llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
308
310
  llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
309
311
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
310
- llama_cloud/types/sentence_splitter.py,sha256=GbC3KE20Nd85uzO4bqJttjqJhQ_1co2gKnSQxzfOAiM,2140
311
312
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
312
313
  llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
313
314
  llama_cloud/types/struct_parse_conf.py,sha256=kKmxsfllbXlRVVDmJtL3Uto9B340row00mYXCzF5tX4,2245
314
- llama_cloud/types/supported_llm_model.py,sha256=0v-g01LyZB7TeN0zwAeSJejRoT95SVaXOJhNz7boJwM,1461
315
+ llama_cloud/types/supported_llm_model.py,sha256=hubSopFICVNEegbJbtbpK6zRHwFPwUNtrw_NAw_3bfg,1380
315
316
  llama_cloud/types/supported_llm_model_names.py,sha256=xZhgu4NcxnA61vmQsxDFgPSRjWtczcXOoCKrtwOBWqc,2161
316
317
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
317
318
  llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
318
319
  llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
319
320
  llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586_HsvDoQbLfuQ,1229
320
321
  llama_cloud/types/token_chunking_config.py,sha256=XNvnTsNd--YOMQ_Ad8hoqhYgQftqkBHKVn6i7nJnMqs,1067
321
- llama_cloud/types/token_text_splitter.py,sha256=0o3dml94ub5KLy3E5MjxfK4IwVAn0-VTE4zVWG1fUZE,2048
322
322
  llama_cloud/types/transformation_category_names.py,sha256=Wb7NBB0f-tEtfEZQis-iKy71SUKmmHFcXf6XLn6g0XU,545
323
323
  llama_cloud/types/usage_and_plan.py,sha256=bclc7TE7CTBu7RLiTHG426dziyj--I8m5NVu86I2AV4,1065
324
324
  llama_cloud/types/usage_metric_response.py,sha256=ukvtNZLeLacv-5F0-GQ5wTBZOPUPEjAeurgYPc4s7nA,1047
@@ -334,7 +334,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
334
334
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
335
335
  llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
336
336
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
337
- llama_cloud-0.1.19.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
338
- llama_cloud-0.1.19.dist-info/METADATA,sha256=1VcJf7tzF1bifeNhTbBaRxN5STLYpLydLOJJJ7Os6ck,902
339
- llama_cloud-0.1.19.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
340
- llama_cloud-0.1.19.dist-info/RECORD,,
337
+ llama_cloud-0.1.21.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
338
+ llama_cloud-0.1.21.dist-info/METADATA,sha256=AyJOHUBeiTz4oFSdEaOFUWUPp_bqoiVsX-B3erArGTc,1194
339
+ llama_cloud-0.1.21.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
340
+ llama_cloud-0.1.21.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 2.0.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -1,46 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class CharacterSplitter(pydantic.BaseModel):
18
- """
19
- A splitter that splits text into characters.
20
- """
21
-
22
- include_metadata: typing.Optional[bool] = pydantic.Field(
23
- description="Whether or not to consider metadata when splitting."
24
- )
25
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
- callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str]
28
- chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
29
- chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
30
- separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
31
- paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
32
- secondary_chunking_regex: typing.Optional[str]
33
- class_name: typing.Optional[str]
34
-
35
- def json(self, **kwargs: typing.Any) -> str:
36
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
- return super().json(**kwargs_with_defaults)
38
-
39
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
- return super().dict(**kwargs_with_defaults)
42
-
43
- class Config:
44
- frozen = True
45
- smart_union = True
46
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,50 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class CodeSplitter(pydantic.BaseModel):
18
- """
19
- Split code using a AST parser.
20
-
21
- Thank you to Kevin Lu / SweepAI for suggesting this elegant code splitting solution.
22
- https://docs.sweep.dev/blogs/chunking-2m-files
23
- """
24
-
25
- include_metadata: typing.Optional[bool] = pydantic.Field(
26
- description="Whether or not to consider metadata when splitting."
27
- )
28
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
29
- callback_manager: typing.Optional[typing.Any]
30
- id_func: typing.Optional[str]
31
- language: str = pydantic.Field(description="The programming language of the code being split.")
32
- chunk_lines: typing.Optional[int] = pydantic.Field(description="The number of lines to include in each chunk.")
33
- chunk_lines_overlap: typing.Optional[int] = pydantic.Field(
34
- description="How many lines of code each chunk overlaps with."
35
- )
36
- max_chars: typing.Optional[int] = pydantic.Field(description="Maximum number of characters per chunk.")
37
- class_name: typing.Optional[str]
38
-
39
- def json(self, **kwargs: typing.Any) -> str:
40
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
- return super().json(**kwargs_with_defaults)
42
-
43
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
- return super().dict(**kwargs_with_defaults)
46
-
47
- class Config:
48
- frozen = True
49
- smart_union = True
50
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,22 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import typing
4
-
5
- from .character_splitter import CharacterSplitter
6
- from .code_splitter import CodeSplitter
7
- from .markdown_element_node_parser import MarkdownElementNodeParser
8
- from .markdown_node_parser import MarkdownNodeParser
9
- from .page_splitter_node_parser import PageSplitterNodeParser
10
- from .sentence_splitter import SentenceSplitter
11
- from .token_text_splitter import TokenTextSplitter
12
-
13
- ConfiguredTransformationItemComponent = typing.Union[
14
- typing.Dict[str, typing.Any],
15
- CharacterSplitter,
16
- PageSplitterNodeParser,
17
- CodeSplitter,
18
- SentenceSplitter,
19
- TokenTextSplitter,
20
- MarkdownNodeParser,
21
- MarkdownElementNodeParser,
22
- ]
llama_cloud/types/llm.py DELETED
@@ -1,60 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .base_prompt_template import BasePromptTemplate
8
- from .pydantic_program_mode import PydanticProgramMode
9
-
10
- try:
11
- import pydantic
12
- if pydantic.__version__.startswith("1."):
13
- raise ImportError
14
- import pydantic.v1 as pydantic # type: ignore
15
- except ImportError:
16
- import pydantic # type: ignore
17
-
18
-
19
- class Llm(pydantic.BaseModel):
20
- """
21
- The LLM class is the main class for interacting with language models.
22
-
23
- Attributes:
24
- system_prompt (Optional[str]):
25
- System prompt for LLM calls.
26
- messages_to_prompt (Callable):
27
- Function to convert a list of messages to an LLM prompt.
28
- completion_to_prompt (Callable):
29
- Function to convert a completion to an LLM prompt.
30
- output_parser (Optional[BaseOutputParser]):
31
- Output parser to parse, validate, and correct errors programmatically.
32
- pydantic_program_mode (PydanticProgramMode):
33
- Pydantic program mode to use for structured prediction.
34
- """
35
-
36
- callback_manager: typing.Optional[typing.Any]
37
- system_prompt: typing.Optional[str]
38
- messages_to_prompt: typing.Optional[str] = pydantic.Field(
39
- description="Function to convert a list of messages to an LLM prompt."
40
- )
41
- completion_to_prompt: typing.Optional[str] = pydantic.Field(
42
- description="Function to convert a completion to an LLM prompt."
43
- )
44
- output_parser: typing.Optional[typing.Any]
45
- pydantic_program_mode: typing.Optional[PydanticProgramMode]
46
- query_wrapper_prompt: typing.Optional[BasePromptTemplate]
47
- class_name: typing.Optional[str]
48
-
49
- def json(self, **kwargs: typing.Any) -> str:
50
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
51
- return super().json(**kwargs_with_defaults)
52
-
53
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
54
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
55
- return super().dict(**kwargs_with_defaults)
56
-
57
- class Config:
58
- frozen = True
59
- smart_union = True
60
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,51 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
- from .llm import Llm
8
- from .node_parser import NodeParser
9
-
10
- try:
11
- import pydantic
12
- if pydantic.__version__.startswith("1."):
13
- raise ImportError
14
- import pydantic.v1 as pydantic # type: ignore
15
- except ImportError:
16
- import pydantic # type: ignore
17
-
18
-
19
- class MarkdownElementNodeParser(pydantic.BaseModel):
20
- """
21
- Markdown element node parser.
22
-
23
- Splits a markdown document into Text Nodes and Index Nodes corresponding to embedded objects
24
- (e.g. tables).
25
- """
26
-
27
- include_metadata: typing.Optional[bool] = pydantic.Field(
28
- description="Whether or not to consider metadata when splitting."
29
- )
30
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
31
- callback_manager: typing.Optional[typing.Any]
32
- id_func: typing.Optional[str]
33
- llm: typing.Optional[Llm]
34
- summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
35
- num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
36
- show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
37
- nested_node_parser: typing.Optional[NodeParser]
38
- class_name: typing.Optional[str]
39
-
40
- def json(self, **kwargs: typing.Any) -> str:
41
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
42
- return super().json(**kwargs_with_defaults)
43
-
44
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
45
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
46
- return super().dict(**kwargs_with_defaults)
47
-
48
- class Config:
49
- frozen = True
50
- smart_union = True
51
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,52 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class MarkdownNodeParser(pydantic.BaseModel):
18
- """
19
- Markdown node parser.
20
-
21
- Splits a document into Nodes using Markdown header-based splitting logic.
22
- Each node contains its text content and the path of headers leading to it.
23
-
24
- Args:
25
- include_metadata (bool): whether to include metadata in nodes
26
- include_prev_next_rel (bool): whether to include prev/next relationships
27
- header_path_separator (str): separator char used for section header path metadata
28
- """
29
-
30
- include_metadata: typing.Optional[bool] = pydantic.Field(
31
- description="Whether or not to consider metadata when splitting."
32
- )
33
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
34
- callback_manager: typing.Optional[typing.Any]
35
- id_func: typing.Optional[str]
36
- header_path_separator: typing.Optional[str] = pydantic.Field(
37
- description="Separator char used for section header path metadata."
38
- )
39
- class_name: typing.Optional[str]
40
-
41
- def json(self, **kwargs: typing.Any) -> str:
42
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
- return super().json(**kwargs_with_defaults)
44
-
45
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
46
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
47
- return super().dict(**kwargs_with_defaults)
48
-
49
- class Config:
50
- frozen = True
51
- smart_union = True
52
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,41 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class PydanticProgramMode(str, enum.Enum):
10
- """
11
- Pydantic program mode.
12
- """
13
-
14
- DEFAULT = "default"
15
- OPENAI = "openai"
16
- LLM = "llm"
17
- FUNCTION = "function"
18
- GUIDANCE = "guidance"
19
- LM_FORMAT_ENFORCER = "lm-format-enforcer"
20
-
21
- def visit(
22
- self,
23
- default: typing.Callable[[], T_Result],
24
- openai: typing.Callable[[], T_Result],
25
- llm: typing.Callable[[], T_Result],
26
- function: typing.Callable[[], T_Result],
27
- guidance: typing.Callable[[], T_Result],
28
- lm_format_enforcer: typing.Callable[[], T_Result],
29
- ) -> T_Result:
30
- if self is PydanticProgramMode.DEFAULT:
31
- return default()
32
- if self is PydanticProgramMode.OPENAI:
33
- return openai()
34
- if self is PydanticProgramMode.LLM:
35
- return llm()
36
- if self is PydanticProgramMode.FUNCTION:
37
- return function()
38
- if self is PydanticProgramMode.GUIDANCE:
39
- return guidance()
40
- if self is PydanticProgramMode.LM_FORMAT_ENFORCER:
41
- return lm_format_enforcer()
@@ -1,50 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class SentenceSplitter(pydantic.BaseModel):
18
- """
19
- Parse text with a preference for complete sentences.
20
-
21
- In general, this class tries to keep sentences and paragraphs together. Therefore
22
- compared to the original TokenTextSplitter, there are less likely to be
23
- hanging sentences or parts of sentences at the end of the node chunk.
24
- """
25
-
26
- include_metadata: typing.Optional[bool] = pydantic.Field(
27
- description="Whether or not to consider metadata when splitting."
28
- )
29
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
30
- callback_manager: typing.Optional[typing.Any]
31
- id_func: typing.Optional[str]
32
- chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
33
- chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
34
- separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
35
- paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
36
- secondary_chunking_regex: typing.Optional[str]
37
- class_name: typing.Optional[str]
38
-
39
- def json(self, **kwargs: typing.Any) -> str:
40
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
- return super().json(**kwargs_with_defaults)
42
-
43
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
- return super().dict(**kwargs_with_defaults)
46
-
47
- class Config:
48
- frozen = True
49
- smart_union = True
50
- json_encoders = {dt.datetime: serialize_datetime}
@@ -1,50 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class TokenTextSplitter(pydantic.BaseModel):
18
- """
19
- Implementation of splitting text that looks at word tokens.
20
- """
21
-
22
- include_metadata: typing.Optional[bool] = pydantic.Field(
23
- description="Whether or not to consider metadata when splitting."
24
- )
25
- include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
26
- callback_manager: typing.Optional[typing.Any]
27
- id_func: typing.Optional[str]
28
- chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
29
- chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
30
- separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
31
- backup_separators: typing.Optional[typing.List[typing.Any]] = pydantic.Field(
32
- description="Additional separators for splitting."
33
- )
34
- keep_whitespaces: typing.Optional[bool] = pydantic.Field(
35
- description="Whether to keep leading/trailing whitespaces in the chunk."
36
- )
37
- class_name: typing.Optional[str]
38
-
39
- def json(self, **kwargs: typing.Any) -> str:
40
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
- return super().json(**kwargs_with_defaults)
42
-
43
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
- return super().dict(**kwargs_with_defaults)
46
-
47
- class Config:
48
- frozen = True
49
- smart_union = True
50
- json_encoders = {dt.datetime: serialize_datetime}