llama-cloud 0.0.8__py3-none-any.whl → 0.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (32) hide show
  1. llama_cloud/__init__.py +16 -0
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +13 -1
  4. llama_cloud/resources/data_sinks/client.py +40 -8
  5. llama_cloud/resources/data_sources/client.py +48 -12
  6. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +2 -0
  7. llama_cloud/resources/extraction/client.py +4 -20
  8. llama_cloud/resources/organizations/__init__.py +2 -0
  9. llama_cloud/resources/organizations/client.py +786 -0
  10. llama_cloud/resources/pipelines/client.py +166 -10
  11. llama_cloud/resources/projects/client.py +28 -8
  12. llama_cloud/types/__init__.py +14 -0
  13. llama_cloud/types/chat_params.py +38 -0
  14. llama_cloud/types/cloud_jira_data_source.py +43 -0
  15. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  16. llama_cloud/types/configurable_data_source_names.py +4 -0
  17. llama_cloud/types/data_source_component_one.py +2 -0
  18. llama_cloud/types/data_source_create_component_one.py +2 -0
  19. llama_cloud/types/eval_dataset_job_record.py +1 -0
  20. llama_cloud/types/extraction_schema.py +0 -1
  21. llama_cloud/types/organization.py +38 -0
  22. llama_cloud/types/organization_create.py +35 -0
  23. llama_cloud/types/pipeline_data_source_component_one.py +2 -0
  24. llama_cloud/types/preset_retrieval_params.py +5 -0
  25. llama_cloud/types/project.py +1 -1
  26. llama_cloud/types/retrieval_mode.py +29 -0
  27. llama_cloud/types/user_organization.py +40 -0
  28. llama_cloud/types/user_organization_create.py +36 -0
  29. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.9.dist-info}/METADATA +2 -1
  30. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.9.dist-info}/RECORD +32 -23
  31. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.9.dist-info}/WHEEL +1 -1
  32. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.9.dist-info}/LICENSE +0 -0
@@ -9,6 +9,7 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.chat_params import ChatParams
12
13
  from ...types.cloud_document import CloudDocument
13
14
  from ...types.cloud_document_create import CloudDocumentCreate
14
15
  from ...types.configured_transformation_item import ConfiguredTransformationItem
@@ -30,6 +31,7 @@ from ...types.pipeline_file import PipelineFile
30
31
  from ...types.pipeline_file_create import PipelineFileCreate
31
32
  from ...types.pipeline_type import PipelineType
32
33
  from ...types.preset_retrieval_params import PresetRetrievalParams
34
+ from ...types.retrieval_mode import RetrievalMode
33
35
  from ...types.retrieve_results import RetrieveResults
34
36
  from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
35
37
 
@@ -52,19 +54,22 @@ class PipelinesClient:
52
54
  def search_pipelines(
53
55
  self,
54
56
  *,
55
- project_name: str,
57
+ project_name: typing.Optional[str] = None,
56
58
  pipeline_name: typing.Optional[str] = None,
57
59
  pipeline_type: typing.Optional[PipelineType] = None,
60
+ project_id: typing.Optional[str] = None,
58
61
  ) -> typing.List[Pipeline]:
59
62
  """
60
63
  Search for pipelines by various parameters.
61
64
 
62
65
  Parameters:
63
- - project_name: str.
66
+ - project_name: typing.Optional[str].
64
67
 
65
68
  - pipeline_name: typing.Optional[str].
66
69
 
67
70
  - pipeline_type: typing.Optional[PipelineType].
71
+
72
+ - project_id: typing.Optional[str].
68
73
  ---
69
74
  from llama_cloud import PipelineType
70
75
  from llama_cloud.client import LlamaCloud
@@ -73,7 +78,6 @@ class PipelinesClient:
73
78
  token="YOUR_TOKEN",
74
79
  )
75
80
  client.pipelines.search_pipelines(
76
- project_name="string",
77
81
  pipeline_type=PipelineType.PLAYGROUND,
78
82
  )
79
83
  """
@@ -81,7 +85,12 @@ class PipelinesClient:
81
85
  "GET",
82
86
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
83
87
  params=remove_none_from_dict(
84
- {"project_name": project_name, "pipeline_name": pipeline_name, "pipeline_type": pipeline_type}
88
+ {
89
+ "project_name": project_name,
90
+ "pipeline_name": pipeline_name,
91
+ "pipeline_type": pipeline_type,
92
+ "project_id": project_id,
93
+ }
85
94
  ),
86
95
  headers=self._client_wrapper.get_headers(),
87
96
  timeout=60,
@@ -115,6 +124,7 @@ class PipelinesClient:
115
124
  PipelineCreate,
116
125
  PipelineType,
117
126
  PresetRetrievalParams,
127
+ RetrievalMode,
118
128
  SupportedEvalLlmModelNames,
119
129
  )
120
130
  from llama_cloud.client import LlamaCloud
@@ -133,6 +143,7 @@ class PipelinesClient:
133
143
  filters=[],
134
144
  condition=FilterCondition.AND,
135
145
  ),
146
+ retrieval_mode=RetrievalMode.CHUNKS,
136
147
  ),
137
148
  eval_parameters=EvalExecutionParams(
138
149
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -181,6 +192,7 @@ class PipelinesClient:
181
192
  PipelineCreate,
182
193
  PipelineType,
183
194
  PresetRetrievalParams,
195
+ RetrievalMode,
184
196
  SupportedEvalLlmModelNames,
185
197
  )
186
198
  from llama_cloud.client import LlamaCloud
@@ -199,6 +211,7 @@ class PipelinesClient:
199
211
  filters=[],
200
212
  condition=FilterCondition.AND,
201
213
  ),
214
+ retrieval_mode=RetrievalMode.CHUNKS,
202
215
  ),
203
216
  eval_parameters=EvalExecutionParams(
204
217
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -305,6 +318,7 @@ class PipelinesClient:
305
318
  LlamaParseParameters,
306
319
  MetadataFilters,
307
320
  PresetRetrievalParams,
321
+ RetrievalMode,
308
322
  SupportedEvalLlmModelNames,
309
323
  )
310
324
  from llama_cloud.client import LlamaCloud
@@ -323,6 +337,7 @@ class PipelinesClient:
323
337
  filters=[],
324
338
  condition=FilterCondition.AND,
325
339
  ),
340
+ retrieval_mode=RetrievalMode.CHUNKS,
326
341
  ),
327
342
  eval_parameters=EvalExecutionParams(
328
343
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1069,6 +1084,8 @@ class PipelinesClient:
1069
1084
  rerank_top_n: typing.Optional[int] = OMIT,
1070
1085
  alpha: typing.Optional[float] = OMIT,
1071
1086
  search_filters: typing.Optional[MetadataFilters] = OMIT,
1087
+ files_top_k: typing.Optional[int] = OMIT,
1088
+ retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
1072
1089
  query: str,
1073
1090
  ) -> RetrieveResults:
1074
1091
  """
@@ -1089,9 +1106,13 @@ class PipelinesClient:
1089
1106
 
1090
1107
  - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
1091
1108
 
1109
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
1110
+
1111
+ - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
1112
+
1092
1113
  - query: str. The query to retrieve against.
1093
1114
  ---
1094
- from llama_cloud import FilterCondition, MetadataFilters
1115
+ from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
1095
1116
  from llama_cloud.client import LlamaCloud
1096
1117
 
1097
1118
  client = LlamaCloud(
@@ -1103,6 +1124,7 @@ class PipelinesClient:
1103
1124
  filters=[],
1104
1125
  condition=FilterCondition.AND,
1105
1126
  ),
1127
+ retrieval_mode=RetrievalMode.CHUNKS,
1106
1128
  query="string",
1107
1129
  )
1108
1130
  """
@@ -1119,6 +1141,10 @@ class PipelinesClient:
1119
1141
  _request["alpha"] = alpha
1120
1142
  if search_filters is not OMIT:
1121
1143
  _request["search_filters"] = search_filters
1144
+ if files_top_k is not OMIT:
1145
+ _request["files_top_k"] = files_top_k
1146
+ if retrieval_mode is not OMIT:
1147
+ _request["retrieval_mode"] = retrieval_mode
1122
1148
  _response = self._client_wrapper.httpx_client.request(
1123
1149
  "POST",
1124
1150
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
@@ -1205,6 +1231,58 @@ class PipelinesClient:
1205
1231
  raise ApiError(status_code=_response.status_code, body=_response.text)
1206
1232
  raise ApiError(status_code=_response.status_code, body=_response_json)
1207
1233
 
1234
+ def chat(self, pipeline_id: str, *, retrieval_parameters: PresetRetrievalParams, data: ChatParams) -> typing.Any:
1235
+ """
1236
+ Parameters:
1237
+ - pipeline_id: str.
1238
+
1239
+ - retrieval_parameters: PresetRetrievalParams.
1240
+
1241
+ - data: ChatParams.
1242
+ ---
1243
+ from llama_cloud import (
1244
+ ChatParams,
1245
+ FilterCondition,
1246
+ MetadataFilters,
1247
+ PresetRetrievalParams,
1248
+ RetrievalMode,
1249
+ )
1250
+ from llama_cloud.client import LlamaCloud
1251
+
1252
+ client = LlamaCloud(
1253
+ token="YOUR_TOKEN",
1254
+ )
1255
+ client.pipelines.chat(
1256
+ pipeline_id="string",
1257
+ retrieval_parameters=PresetRetrievalParams(
1258
+ search_filters=MetadataFilters(
1259
+ filters=[],
1260
+ condition=FilterCondition.AND,
1261
+ ),
1262
+ retrieval_mode=RetrievalMode.CHUNKS,
1263
+ ),
1264
+ data=ChatParams(
1265
+ messages=[],
1266
+ ),
1267
+ )
1268
+ """
1269
+ _response = self._client_wrapper.httpx_client.request(
1270
+ "POST",
1271
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
1272
+ json=jsonable_encoder({"retrieval_parameters": retrieval_parameters, "data": data}),
1273
+ headers=self._client_wrapper.get_headers(),
1274
+ timeout=60,
1275
+ )
1276
+ if 200 <= _response.status_code < 300:
1277
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1278
+ if _response.status_code == 422:
1279
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1280
+ try:
1281
+ _response_json = _response.json()
1282
+ except JSONDecodeError:
1283
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1284
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1285
+
1208
1286
  def list_pipeline_documents(
1209
1287
  self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
1210
1288
  ) -> typing.List[CloudDocument]:
@@ -1446,19 +1524,22 @@ class AsyncPipelinesClient:
1446
1524
  async def search_pipelines(
1447
1525
  self,
1448
1526
  *,
1449
- project_name: str,
1527
+ project_name: typing.Optional[str] = None,
1450
1528
  pipeline_name: typing.Optional[str] = None,
1451
1529
  pipeline_type: typing.Optional[PipelineType] = None,
1530
+ project_id: typing.Optional[str] = None,
1452
1531
  ) -> typing.List[Pipeline]:
1453
1532
  """
1454
1533
  Search for pipelines by various parameters.
1455
1534
 
1456
1535
  Parameters:
1457
- - project_name: str.
1536
+ - project_name: typing.Optional[str].
1458
1537
 
1459
1538
  - pipeline_name: typing.Optional[str].
1460
1539
 
1461
1540
  - pipeline_type: typing.Optional[PipelineType].
1541
+
1542
+ - project_id: typing.Optional[str].
1462
1543
  ---
1463
1544
  from llama_cloud import PipelineType
1464
1545
  from llama_cloud.client import AsyncLlamaCloud
@@ -1467,7 +1548,6 @@ class AsyncPipelinesClient:
1467
1548
  token="YOUR_TOKEN",
1468
1549
  )
1469
1550
  await client.pipelines.search_pipelines(
1470
- project_name="string",
1471
1551
  pipeline_type=PipelineType.PLAYGROUND,
1472
1552
  )
1473
1553
  """
@@ -1475,7 +1555,12 @@ class AsyncPipelinesClient:
1475
1555
  "GET",
1476
1556
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
1477
1557
  params=remove_none_from_dict(
1478
- {"project_name": project_name, "pipeline_name": pipeline_name, "pipeline_type": pipeline_type}
1558
+ {
1559
+ "project_name": project_name,
1560
+ "pipeline_name": pipeline_name,
1561
+ "pipeline_type": pipeline_type,
1562
+ "project_id": project_id,
1563
+ }
1479
1564
  ),
1480
1565
  headers=self._client_wrapper.get_headers(),
1481
1566
  timeout=60,
@@ -1509,6 +1594,7 @@ class AsyncPipelinesClient:
1509
1594
  PipelineCreate,
1510
1595
  PipelineType,
1511
1596
  PresetRetrievalParams,
1597
+ RetrievalMode,
1512
1598
  SupportedEvalLlmModelNames,
1513
1599
  )
1514
1600
  from llama_cloud.client import AsyncLlamaCloud
@@ -1527,6 +1613,7 @@ class AsyncPipelinesClient:
1527
1613
  filters=[],
1528
1614
  condition=FilterCondition.AND,
1529
1615
  ),
1616
+ retrieval_mode=RetrievalMode.CHUNKS,
1530
1617
  ),
1531
1618
  eval_parameters=EvalExecutionParams(
1532
1619
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1575,6 +1662,7 @@ class AsyncPipelinesClient:
1575
1662
  PipelineCreate,
1576
1663
  PipelineType,
1577
1664
  PresetRetrievalParams,
1665
+ RetrievalMode,
1578
1666
  SupportedEvalLlmModelNames,
1579
1667
  )
1580
1668
  from llama_cloud.client import AsyncLlamaCloud
@@ -1593,6 +1681,7 @@ class AsyncPipelinesClient:
1593
1681
  filters=[],
1594
1682
  condition=FilterCondition.AND,
1595
1683
  ),
1684
+ retrieval_mode=RetrievalMode.CHUNKS,
1596
1685
  ),
1597
1686
  eval_parameters=EvalExecutionParams(
1598
1687
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -1699,6 +1788,7 @@ class AsyncPipelinesClient:
1699
1788
  LlamaParseParameters,
1700
1789
  MetadataFilters,
1701
1790
  PresetRetrievalParams,
1791
+ RetrievalMode,
1702
1792
  SupportedEvalLlmModelNames,
1703
1793
  )
1704
1794
  from llama_cloud.client import AsyncLlamaCloud
@@ -1717,6 +1807,7 @@ class AsyncPipelinesClient:
1717
1807
  filters=[],
1718
1808
  condition=FilterCondition.AND,
1719
1809
  ),
1810
+ retrieval_mode=RetrievalMode.CHUNKS,
1720
1811
  ),
1721
1812
  eval_parameters=EvalExecutionParams(
1722
1813
  llm_model=SupportedEvalLlmModelNames.GPT_3_5_TURBO,
@@ -2465,6 +2556,8 @@ class AsyncPipelinesClient:
2465
2556
  rerank_top_n: typing.Optional[int] = OMIT,
2466
2557
  alpha: typing.Optional[float] = OMIT,
2467
2558
  search_filters: typing.Optional[MetadataFilters] = OMIT,
2559
+ files_top_k: typing.Optional[int] = OMIT,
2560
+ retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
2468
2561
  query: str,
2469
2562
  ) -> RetrieveResults:
2470
2563
  """
@@ -2485,9 +2578,13 @@ class AsyncPipelinesClient:
2485
2578
 
2486
2579
  - search_filters: typing.Optional[MetadataFilters]. Search filters for retrieval.
2487
2580
 
2581
+ - files_top_k: typing.Optional[int]. Number of files to retrieve (only for retrieval mode files_via_metadata and files_via_content).
2582
+
2583
+ - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
2584
+
2488
2585
  - query: str. The query to retrieve against.
2489
2586
  ---
2490
- from llama_cloud import FilterCondition, MetadataFilters
2587
+ from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
2491
2588
  from llama_cloud.client import AsyncLlamaCloud
2492
2589
 
2493
2590
  client = AsyncLlamaCloud(
@@ -2499,6 +2596,7 @@ class AsyncPipelinesClient:
2499
2596
  filters=[],
2500
2597
  condition=FilterCondition.AND,
2501
2598
  ),
2599
+ retrieval_mode=RetrievalMode.CHUNKS,
2502
2600
  query="string",
2503
2601
  )
2504
2602
  """
@@ -2515,6 +2613,10 @@ class AsyncPipelinesClient:
2515
2613
  _request["alpha"] = alpha
2516
2614
  if search_filters is not OMIT:
2517
2615
  _request["search_filters"] = search_filters
2616
+ if files_top_k is not OMIT:
2617
+ _request["files_top_k"] = files_top_k
2618
+ if retrieval_mode is not OMIT:
2619
+ _request["retrieval_mode"] = retrieval_mode
2518
2620
  _response = await self._client_wrapper.httpx_client.request(
2519
2621
  "POST",
2520
2622
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
@@ -2601,6 +2703,60 @@ class AsyncPipelinesClient:
2601
2703
  raise ApiError(status_code=_response.status_code, body=_response.text)
2602
2704
  raise ApiError(status_code=_response.status_code, body=_response_json)
2603
2705
 
2706
+ async def chat(
2707
+ self, pipeline_id: str, *, retrieval_parameters: PresetRetrievalParams, data: ChatParams
2708
+ ) -> typing.Any:
2709
+ """
2710
+ Parameters:
2711
+ - pipeline_id: str.
2712
+
2713
+ - retrieval_parameters: PresetRetrievalParams.
2714
+
2715
+ - data: ChatParams.
2716
+ ---
2717
+ from llama_cloud import (
2718
+ ChatParams,
2719
+ FilterCondition,
2720
+ MetadataFilters,
2721
+ PresetRetrievalParams,
2722
+ RetrievalMode,
2723
+ )
2724
+ from llama_cloud.client import AsyncLlamaCloud
2725
+
2726
+ client = AsyncLlamaCloud(
2727
+ token="YOUR_TOKEN",
2728
+ )
2729
+ await client.pipelines.chat(
2730
+ pipeline_id="string",
2731
+ retrieval_parameters=PresetRetrievalParams(
2732
+ search_filters=MetadataFilters(
2733
+ filters=[],
2734
+ condition=FilterCondition.AND,
2735
+ ),
2736
+ retrieval_mode=RetrievalMode.CHUNKS,
2737
+ ),
2738
+ data=ChatParams(
2739
+ messages=[],
2740
+ ),
2741
+ )
2742
+ """
2743
+ _response = await self._client_wrapper.httpx_client.request(
2744
+ "POST",
2745
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
2746
+ json=jsonable_encoder({"retrieval_parameters": retrieval_parameters, "data": data}),
2747
+ headers=self._client_wrapper.get_headers(),
2748
+ timeout=60,
2749
+ )
2750
+ if 200 <= _response.status_code < 300:
2751
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
2752
+ if _response.status_code == 422:
2753
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2754
+ try:
2755
+ _response_json = _response.json()
2756
+ except JSONDecodeError:
2757
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2758
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2759
+
2604
2760
  async def list_pipeline_documents(
2605
2761
  self, pipeline_id: str, *, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
2606
2762
  ) -> typing.List[CloudDocument]:
@@ -34,11 +34,15 @@ class ProjectsClient:
34
34
  def __init__(self, *, client_wrapper: SyncClientWrapper):
35
35
  self._client_wrapper = client_wrapper
36
36
 
37
- def list_projects(self, *, project_name: typing.Optional[str] = None) -> typing.List[Project]:
37
+ def list_projects(
38
+ self, *, organization_id: typing.Optional[str] = None, project_name: typing.Optional[str] = None
39
+ ) -> typing.List[Project]:
38
40
  """
39
41
  List projects or get one by name
40
42
 
41
43
  Parameters:
44
+ - organization_id: typing.Optional[str].
45
+
42
46
  - project_name: typing.Optional[str].
43
47
  ---
44
48
  from llama_cloud.client import LlamaCloud
@@ -51,7 +55,7 @@ class ProjectsClient:
51
55
  _response = self._client_wrapper.httpx_client.request(
52
56
  "GET",
53
57
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
54
- params=remove_none_from_dict({"project_name": project_name}),
58
+ params=remove_none_from_dict({"organization_id": organization_id, "project_name": project_name}),
55
59
  headers=self._client_wrapper.get_headers(),
56
60
  timeout=60,
57
61
  )
@@ -65,11 +69,13 @@ class ProjectsClient:
65
69
  raise ApiError(status_code=_response.status_code, body=_response.text)
66
70
  raise ApiError(status_code=_response.status_code, body=_response_json)
67
71
 
68
- def create_project(self, *, request: ProjectCreate) -> Project:
72
+ def create_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
69
73
  """
70
74
  Create a new project.
71
75
 
72
76
  Parameters:
77
+ - organization_id: typing.Optional[str].
78
+
73
79
  - request: ProjectCreate.
74
80
  ---
75
81
  from llama_cloud import ProjectCreate
@@ -87,6 +93,7 @@ class ProjectsClient:
87
93
  _response = self._client_wrapper.httpx_client.request(
88
94
  "POST",
89
95
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
96
+ params=remove_none_from_dict({"organization_id": organization_id}),
90
97
  json=jsonable_encoder(request),
91
98
  headers=self._client_wrapper.get_headers(),
92
99
  timeout=60,
@@ -101,12 +108,14 @@ class ProjectsClient:
101
108
  raise ApiError(status_code=_response.status_code, body=_response.text)
102
109
  raise ApiError(status_code=_response.status_code, body=_response_json)
103
110
 
104
- def upsert_project(self, *, request: ProjectCreate) -> Project:
111
+ def upsert_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
105
112
  """
106
113
  Upsert a project.
107
114
  Updates if a project with the same name already exists. Otherwise, creates a new project.
108
115
 
109
116
  Parameters:
117
+ - organization_id: typing.Optional[str].
118
+
110
119
  - request: ProjectCreate.
111
120
  ---
112
121
  from llama_cloud import ProjectCreate
@@ -124,6 +133,7 @@ class ProjectsClient:
124
133
  _response = self._client_wrapper.httpx_client.request(
125
134
  "PUT",
126
135
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
136
+ params=remove_none_from_dict({"organization_id": organization_id}),
127
137
  json=jsonable_encoder(request),
128
138
  headers=self._client_wrapper.get_headers(),
129
139
  timeout=60,
@@ -620,11 +630,15 @@ class AsyncProjectsClient:
620
630
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
621
631
  self._client_wrapper = client_wrapper
622
632
 
623
- async def list_projects(self, *, project_name: typing.Optional[str] = None) -> typing.List[Project]:
633
+ async def list_projects(
634
+ self, *, organization_id: typing.Optional[str] = None, project_name: typing.Optional[str] = None
635
+ ) -> typing.List[Project]:
624
636
  """
625
637
  List projects or get one by name
626
638
 
627
639
  Parameters:
640
+ - organization_id: typing.Optional[str].
641
+
628
642
  - project_name: typing.Optional[str].
629
643
  ---
630
644
  from llama_cloud.client import AsyncLlamaCloud
@@ -637,7 +651,7 @@ class AsyncProjectsClient:
637
651
  _response = await self._client_wrapper.httpx_client.request(
638
652
  "GET",
639
653
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
640
- params=remove_none_from_dict({"project_name": project_name}),
654
+ params=remove_none_from_dict({"organization_id": organization_id, "project_name": project_name}),
641
655
  headers=self._client_wrapper.get_headers(),
642
656
  timeout=60,
643
657
  )
@@ -651,11 +665,13 @@ class AsyncProjectsClient:
651
665
  raise ApiError(status_code=_response.status_code, body=_response.text)
652
666
  raise ApiError(status_code=_response.status_code, body=_response_json)
653
667
 
654
- async def create_project(self, *, request: ProjectCreate) -> Project:
668
+ async def create_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
655
669
  """
656
670
  Create a new project.
657
671
 
658
672
  Parameters:
673
+ - organization_id: typing.Optional[str].
674
+
659
675
  - request: ProjectCreate.
660
676
  ---
661
677
  from llama_cloud import ProjectCreate
@@ -673,6 +689,7 @@ class AsyncProjectsClient:
673
689
  _response = await self._client_wrapper.httpx_client.request(
674
690
  "POST",
675
691
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
692
+ params=remove_none_from_dict({"organization_id": organization_id}),
676
693
  json=jsonable_encoder(request),
677
694
  headers=self._client_wrapper.get_headers(),
678
695
  timeout=60,
@@ -687,12 +704,14 @@ class AsyncProjectsClient:
687
704
  raise ApiError(status_code=_response.status_code, body=_response.text)
688
705
  raise ApiError(status_code=_response.status_code, body=_response_json)
689
706
 
690
- async def upsert_project(self, *, request: ProjectCreate) -> Project:
707
+ async def upsert_project(self, *, organization_id: typing.Optional[str] = None, request: ProjectCreate) -> Project:
691
708
  """
692
709
  Upsert a project.
693
710
  Updates if a project with the same name already exists. Otherwise, creates a new project.
694
711
 
695
712
  Parameters:
713
+ - organization_id: typing.Optional[str].
714
+
696
715
  - request: ProjectCreate.
697
716
  ---
698
717
  from llama_cloud import ProjectCreate
@@ -710,6 +729,7 @@ class AsyncProjectsClient:
710
729
  _response = await self._client_wrapper.httpx_client.request(
711
730
  "PUT",
712
731
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects"),
732
+ params=remove_none_from_dict({"organization_id": organization_id}),
713
733
  json=jsonable_encoder(request),
714
734
  headers=self._client_wrapper.get_headers(),
715
735
  timeout=60,
@@ -5,11 +5,13 @@ from .base import Base
5
5
  from .base_prompt_template import BasePromptTemplate
6
6
  from .bedrock_embedding import BedrockEmbedding
7
7
  from .chat_message import ChatMessage
8
+ from .chat_params import ChatParams
8
9
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
9
10
  from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
10
11
  from .cloud_chroma_vector_store import CloudChromaVectorStore
11
12
  from .cloud_document import CloudDocument
12
13
  from .cloud_document_create import CloudDocumentCreate
14
+ from .cloud_jira_data_source import CloudJiraDataSource
13
15
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
14
16
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
15
17
  from .cloud_pinecone_vector_store import CloudPineconeVectorStore
@@ -86,6 +88,8 @@ from .metric_result import MetricResult
86
88
  from .node_parser import NodeParser
87
89
  from .object_type import ObjectType
88
90
  from .open_ai_embedding import OpenAiEmbedding
91
+ from .organization import Organization
92
+ from .organization_create import OrganizationCreate
89
93
  from .parser_languages import ParserLanguages
90
94
  from .parsing_history_item import ParsingHistoryItem
91
95
  from .parsing_job import ParsingJob
@@ -116,6 +120,7 @@ from .prompt_mixin_prompts import PromptMixinPrompts
116
120
  from .prompt_spec import PromptSpec
117
121
  from .pydantic_program_mode import PydanticProgramMode
118
122
  from .related_node_info import RelatedNodeInfo
123
+ from .retrieval_mode import RetrievalMode
119
124
  from .retrieve_results import RetrieveResults
120
125
  from .sentence_splitter import SentenceSplitter
121
126
  from .simple_file_node_parser import SimpleFileNodeParser
@@ -127,6 +132,8 @@ from .text_node_relationships_value import TextNodeRelationshipsValue
127
132
  from .text_node_with_score import TextNodeWithScore
128
133
  from .token_text_splitter import TokenTextSplitter
129
134
  from .transformation_category_names import TransformationCategoryNames
135
+ from .user_organization import UserOrganization
136
+ from .user_organization_create import UserOrganizationCreate
130
137
  from .validation_error import ValidationError
131
138
  from .validation_error_loc_item import ValidationErrorLocItem
132
139
 
@@ -136,11 +143,13 @@ __all__ = [
136
143
  "BasePromptTemplate",
137
144
  "BedrockEmbedding",
138
145
  "ChatMessage",
146
+ "ChatParams",
139
147
  "CloudAzStorageBlobDataSource",
140
148
  "CloudAzureAiSearchVectorStore",
141
149
  "CloudChromaVectorStore",
142
150
  "CloudDocument",
143
151
  "CloudDocumentCreate",
152
+ "CloudJiraDataSource",
144
153
  "CloudNotionPageDataSource",
145
154
  "CloudOneDriveDataSource",
146
155
  "CloudPineconeVectorStore",
@@ -217,6 +226,8 @@ __all__ = [
217
226
  "NodeParser",
218
227
  "ObjectType",
219
228
  "OpenAiEmbedding",
229
+ "Organization",
230
+ "OrganizationCreate",
220
231
  "ParserLanguages",
221
232
  "ParsingHistoryItem",
222
233
  "ParsingJob",
@@ -247,6 +258,7 @@ __all__ = [
247
258
  "PromptSpec",
248
259
  "PydanticProgramMode",
249
260
  "RelatedNodeInfo",
261
+ "RetrievalMode",
250
262
  "RetrieveResults",
251
263
  "SentenceSplitter",
252
264
  "SimpleFileNodeParser",
@@ -258,6 +270,8 @@ __all__ = [
258
270
  "TextNodeWithScore",
259
271
  "TokenTextSplitter",
260
272
  "TransformationCategoryNames",
273
+ "UserOrganization",
274
+ "UserOrganizationCreate",
261
275
  "ValidationError",
262
276
  "ValidationErrorLocItem",
263
277
  ]
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .chat_message import ChatMessage
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ChatParams(pydantic.BaseModel):
19
+ """
20
+ Base schema model for BaseComponent classes used in the platform.
21
+ Comes with special serialization logic for types used commonly in platform codebase.
22
+ """
23
+
24
+ messages: typing.List[ChatMessage]
25
+ class_name: typing.Optional[str]
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}