llama-cloud 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (117) hide show
  1. llama_cloud/__init__.py +76 -10
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/environment.py +1 -1
  4. llama_cloud/resources/__init__.py +23 -1
  5. llama_cloud/resources/data_sinks/client.py +26 -20
  6. llama_cloud/resources/data_sources/client.py +16 -16
  7. llama_cloud/resources/embedding_model_configs/__init__.py +23 -0
  8. llama_cloud/resources/embedding_model_configs/client.py +416 -0
  9. llama_cloud/resources/embedding_model_configs/types/__init__.py +23 -0
  10. llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +89 -0
  11. llama_cloud/resources/evals/client.py +36 -26
  12. llama_cloud/resources/extraction/client.py +32 -32
  13. llama_cloud/resources/files/__init__.py +2 -2
  14. llama_cloud/resources/files/client.py +310 -54
  15. llama_cloud/resources/files/types/__init__.py +3 -1
  16. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +7 -0
  17. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  18. llama_cloud/resources/organizations/client.py +125 -56
  19. llama_cloud/resources/parsing/client.py +652 -264
  20. llama_cloud/resources/pipelines/client.py +617 -310
  21. llama_cloud/resources/projects/client.py +341 -136
  22. llama_cloud/types/__init__.py +58 -10
  23. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  24. llama_cloud/types/base_prompt_template.py +6 -2
  25. llama_cloud/types/bedrock_embedding.py +12 -6
  26. llama_cloud/types/character_splitter.py +4 -2
  27. llama_cloud/types/chat_message.py +1 -1
  28. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  29. llama_cloud/types/cloud_box_data_source.py +13 -6
  30. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  31. llama_cloud/types/cloud_document.py +3 -1
  32. llama_cloud/types/cloud_document_create.py +3 -1
  33. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  34. llama_cloud/types/cloud_jira_data_source.py +7 -4
  35. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  36. llama_cloud/types/cloud_one_drive_data_source.py +6 -2
  37. llama_cloud/types/cloud_postgres_vector_store.py +1 -1
  38. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  39. llama_cloud/types/cloud_sharepoint_data_source.py +9 -5
  40. llama_cloud/types/cloud_slack_data_source.py +7 -6
  41. llama_cloud/types/code_splitter.py +1 -1
  42. llama_cloud/types/cohere_embedding.py +7 -3
  43. llama_cloud/types/data_sink.py +4 -4
  44. llama_cloud/types/data_sink_create.py +1 -1
  45. llama_cloud/types/data_source.py +7 -5
  46. llama_cloud/types/data_source_create.py +4 -2
  47. llama_cloud/types/embedding_model_config.py +43 -0
  48. llama_cloud/types/embedding_model_config_embedding_config.py +89 -0
  49. llama_cloud/types/embedding_model_config_update.py +35 -0
  50. llama_cloud/types/embedding_model_config_update_embedding_config.py +89 -0
  51. llama_cloud/types/eval_dataset.py +2 -2
  52. llama_cloud/types/eval_dataset_job_record.py +13 -7
  53. llama_cloud/types/eval_execution_params_override.py +6 -2
  54. llama_cloud/types/eval_question.py +2 -2
  55. llama_cloud/types/extraction_result.py +2 -2
  56. llama_cloud/types/extraction_schema.py +5 -3
  57. llama_cloud/types/file.py +15 -7
  58. llama_cloud/types/file_permission_info_value.py +5 -0
  59. llama_cloud/types/filter_operator.py +2 -2
  60. llama_cloud/types/gemini_embedding.py +10 -6
  61. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  62. llama_cloud/types/input_message.py +3 -1
  63. llama_cloud/types/interval_usage_and_plan.py +36 -0
  64. llama_cloud/types/job_name_mapping.py +4 -0
  65. llama_cloud/types/llama_parse_parameters.py +21 -0
  66. llama_cloud/types/llm.py +4 -2
  67. llama_cloud/types/llm_parameters.py +5 -2
  68. llama_cloud/types/local_eval.py +10 -8
  69. llama_cloud/types/local_eval_results.py +1 -1
  70. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  71. llama_cloud/types/markdown_element_node_parser.py +5 -3
  72. llama_cloud/types/markdown_node_parser.py +3 -2
  73. llama_cloud/types/metadata_filter.py +2 -2
  74. llama_cloud/types/metric_result.py +3 -3
  75. llama_cloud/types/node_parser.py +1 -1
  76. llama_cloud/types/open_ai_embedding.py +12 -6
  77. llama_cloud/types/organization.py +2 -2
  78. llama_cloud/types/page_splitter_node_parser.py +2 -2
  79. llama_cloud/types/paginated_list_pipeline_files_response.py +35 -0
  80. llama_cloud/types/parsing_job_structured_result.py +32 -0
  81. llama_cloud/types/permission.py +3 -3
  82. llama_cloud/types/pipeline.py +17 -6
  83. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  84. llama_cloud/types/pipeline_create.py +15 -4
  85. llama_cloud/types/pipeline_data_source.py +13 -7
  86. llama_cloud/types/pipeline_data_source_create.py +3 -1
  87. llama_cloud/types/pipeline_deployment.py +4 -4
  88. llama_cloud/types/pipeline_file.py +25 -10
  89. llama_cloud/types/pipeline_file_create.py +3 -1
  90. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  91. llama_cloud/types/plan.py +40 -0
  92. llama_cloud/types/playground_session.py +2 -2
  93. llama_cloud/types/preset_retrieval_params.py +14 -7
  94. llama_cloud/types/presigned_url.py +3 -1
  95. llama_cloud/types/project.py +2 -2
  96. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  97. llama_cloud/types/prompt_spec.py +4 -2
  98. llama_cloud/types/role.py +3 -3
  99. llama_cloud/types/sentence_splitter.py +4 -2
  100. llama_cloud/types/text_node.py +3 -3
  101. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  102. llama_cloud/types/token_text_splitter.py +1 -1
  103. llama_cloud/types/usage.py +41 -0
  104. llama_cloud/types/user_organization.py +9 -5
  105. llama_cloud/types/user_organization_create.py +4 -4
  106. llama_cloud/types/user_organization_delete.py +2 -2
  107. llama_cloud/types/user_organization_role.py +2 -2
  108. llama_cloud/types/value.py +5 -0
  109. llama_cloud/types/vertex_text_embedding.py +9 -5
  110. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +1 -1
  111. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +113 -99
  112. llama_cloud/types/data_sink_component.py +0 -20
  113. llama_cloud/types/data_source_component.py +0 -28
  114. llama_cloud/types/metadata_filter_value.py +0 -5
  115. llama_cloud/types/pipeline_data_source_component.py +0 -28
  116. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
  117. {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +0 -0
@@ -11,6 +11,7 @@ from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.eval_dataset import EvalDataset
13
13
  from ...types.http_validation_error import HttpValidationError
14
+ from ...types.interval_usage_and_plan import IntervalUsageAndPlan
14
15
  from ...types.local_eval import LocalEval
15
16
  from ...types.local_eval_results import LocalEvalResults
16
17
  from ...types.local_eval_sets import LocalEvalSets
@@ -86,7 +87,7 @@ class ProjectsClient:
86
87
  )
87
88
  client.projects.create_project(
88
89
  request=ProjectCreate(
89
- name="string",
90
+ name="name",
90
91
  ),
91
92
  )
92
93
  """
@@ -126,7 +127,7 @@ class ProjectsClient:
126
127
  )
127
128
  client.projects.upsert_project(
128
129
  request=ProjectCreate(
129
- name="string",
130
+ name="name",
130
131
  ),
131
132
  )
132
133
  """
@@ -148,25 +149,26 @@ class ProjectsClient:
148
149
  raise ApiError(status_code=_response.status_code, body=_response.text)
149
150
  raise ApiError(status_code=_response.status_code, body=_response_json)
150
151
 
151
- def get_project(self, project_id: str) -> Project:
152
+ def get_project(self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None) -> Project:
152
153
  """
153
154
  Get a project by ID.
154
155
 
155
156
  Parameters:
156
- - project_id: str.
157
+ - project_id: typing.Optional[str].
158
+
159
+ - organization_id: typing.Optional[str].
157
160
  ---
158
161
  from llama_cloud.client import LlamaCloud
159
162
 
160
163
  client = LlamaCloud(
161
164
  token="YOUR_TOKEN",
162
165
  )
163
- client.projects.get_project(
164
- project_id="string",
165
- )
166
+ client.projects.get_project()
166
167
  """
167
168
  _response = self._client_wrapper.httpx_client.request(
168
169
  "GET",
169
170
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
171
+ params=remove_none_from_dict({"organization_id": organization_id}),
170
172
  headers=self._client_wrapper.get_headers(),
171
173
  timeout=60,
172
174
  )
@@ -180,12 +182,16 @@ class ProjectsClient:
180
182
  raise ApiError(status_code=_response.status_code, body=_response.text)
181
183
  raise ApiError(status_code=_response.status_code, body=_response_json)
182
184
 
183
- def update_existing_project(self, project_id: str, *, name: str) -> Project:
185
+ def update_existing_project(
186
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
187
+ ) -> Project:
184
188
  """
185
189
  Update an existing project.
186
190
 
187
191
  Parameters:
188
- - project_id: str.
192
+ - project_id: typing.Optional[str].
193
+
194
+ - organization_id: typing.Optional[str].
189
195
 
190
196
  - name: str.
191
197
  ---
@@ -195,13 +201,13 @@ class ProjectsClient:
195
201
  token="YOUR_TOKEN",
196
202
  )
197
203
  client.projects.update_existing_project(
198
- project_id="string",
199
- name="string",
204
+ name="name",
200
205
  )
201
206
  """
202
207
  _response = self._client_wrapper.httpx_client.request(
203
208
  "PUT",
204
209
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
210
+ params=remove_none_from_dict({"organization_id": organization_id}),
205
211
  json=jsonable_encoder({"name": name}),
206
212
  headers=self._client_wrapper.get_headers(),
207
213
  timeout=60,
@@ -216,25 +222,26 @@ class ProjectsClient:
216
222
  raise ApiError(status_code=_response.status_code, body=_response.text)
217
223
  raise ApiError(status_code=_response.status_code, body=_response_json)
218
224
 
219
- def delete_project(self, project_id: str) -> None:
225
+ def delete_project(self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None) -> None:
220
226
  """
221
227
  Delete a project by ID.
222
228
 
223
229
  Parameters:
224
- - project_id: str.
230
+ - project_id: typing.Optional[str].
231
+
232
+ - organization_id: typing.Optional[str].
225
233
  ---
226
234
  from llama_cloud.client import LlamaCloud
227
235
 
228
236
  client = LlamaCloud(
229
237
  token="YOUR_TOKEN",
230
238
  )
231
- client.projects.delete_project(
232
- project_id="string",
233
- )
239
+ client.projects.delete_project()
234
240
  """
235
241
  _response = self._client_wrapper.httpx_client.request(
236
242
  "DELETE",
237
243
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
244
+ params=remove_none_from_dict({"organization_id": organization_id}),
238
245
  headers=self._client_wrapper.get_headers(),
239
246
  timeout=60,
240
247
  )
@@ -248,27 +255,65 @@ class ProjectsClient:
248
255
  raise ApiError(status_code=_response.status_code, body=_response.text)
249
256
  raise ApiError(status_code=_response.status_code, body=_response_json)
250
257
 
251
- def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
258
+ def get_project_usage(
259
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
260
+ ) -> IntervalUsageAndPlan:
252
261
  """
253
- List eval datasets for a project.
262
+ Get usage for a project
254
263
 
255
264
  Parameters:
256
- - project_id: str.
265
+ - project_id: typing.Optional[str].
266
+
267
+ - organization_id: typing.Optional[str].
257
268
  ---
258
269
  from llama_cloud.client import LlamaCloud
259
270
 
260
271
  client = LlamaCloud(
261
272
  token="YOUR_TOKEN",
262
273
  )
263
- client.projects.list_datasets_for_project(
264
- project_id="string",
274
+ client.projects.get_project_usage()
275
+ """
276
+ _response = self._client_wrapper.httpx_client.request(
277
+ "GET",
278
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/usage"),
279
+ params=remove_none_from_dict({"organization_id": organization_id}),
280
+ headers=self._client_wrapper.get_headers(),
281
+ timeout=60,
282
+ )
283
+ if 200 <= _response.status_code < 300:
284
+ return pydantic.parse_obj_as(IntervalUsageAndPlan, _response.json()) # type: ignore
285
+ if _response.status_code == 422:
286
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
287
+ try:
288
+ _response_json = _response.json()
289
+ except JSONDecodeError:
290
+ raise ApiError(status_code=_response.status_code, body=_response.text)
291
+ raise ApiError(status_code=_response.status_code, body=_response_json)
292
+
293
+ def list_datasets_for_project(
294
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
295
+ ) -> typing.List[EvalDataset]:
296
+ """
297
+ List eval datasets for a project.
298
+
299
+ Parameters:
300
+ - project_id: typing.Optional[str].
301
+
302
+ - organization_id: typing.Optional[str].
303
+ ---
304
+ from llama_cloud.client import LlamaCloud
305
+
306
+ client = LlamaCloud(
307
+ token="YOUR_TOKEN",
265
308
  )
309
+ client.projects.list_datasets_for_project()
266
310
  """
267
311
  _response = self._client_wrapper.httpx_client.request(
268
312
  "GET",
269
313
  urllib.parse.urljoin(
270
314
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
271
315
  ),
316
+ params=remove_none_from_dict({"organization_id": organization_id}),
272
317
  headers=self._client_wrapper.get_headers(),
273
318
  timeout=60,
274
319
  )
@@ -282,12 +327,16 @@ class ProjectsClient:
282
327
  raise ApiError(status_code=_response.status_code, body=_response.text)
283
328
  raise ApiError(status_code=_response.status_code, body=_response_json)
284
329
 
285
- def create_eval_dataset_for_project(self, project_id: str, *, name: str) -> EvalDataset:
330
+ def create_eval_dataset_for_project(
331
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
332
+ ) -> EvalDataset:
286
333
  """
287
334
  Create a new eval dataset for a project.
288
335
 
289
336
  Parameters:
290
- - project_id: str.
337
+ - project_id: typing.Optional[str].
338
+
339
+ - organization_id: typing.Optional[str].
291
340
 
292
341
  - name: str. The name of the EvalDataset.
293
342
  ---
@@ -297,8 +346,7 @@ class ProjectsClient:
297
346
  token="YOUR_TOKEN",
298
347
  )
299
348
  client.projects.create_eval_dataset_for_project(
300
- project_id="string",
301
- name="string",
349
+ name="name",
302
350
  )
303
351
  """
304
352
  _response = self._client_wrapper.httpx_client.request(
@@ -306,6 +354,7 @@ class ProjectsClient:
306
354
  urllib.parse.urljoin(
307
355
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
308
356
  ),
357
+ params=remove_none_from_dict({"organization_id": organization_id}),
309
358
  json=jsonable_encoder({"name": name}),
310
359
  headers=self._client_wrapper.get_headers(),
311
360
  timeout=60,
@@ -321,27 +370,34 @@ class ProjectsClient:
321
370
  raise ApiError(status_code=_response.status_code, body=_response_json)
322
371
 
323
372
  def create_local_eval_set_for_project(
324
- self, project_id: str, *, app_name: str, results: typing.Dict[str, typing.List[LocalEval]]
373
+ self,
374
+ project_id: typing.Optional[str],
375
+ *,
376
+ organization_id: typing.Optional[str] = None,
377
+ app_name: str,
378
+ results: typing.Dict[str, typing.List[LocalEval]],
325
379
  ) -> typing.List[LocalEvalResults]:
326
380
  """
327
381
  Create a new local eval set.
328
382
 
329
383
  Parameters:
330
- - project_id: str.
384
+ - project_id: typing.Optional[str].
385
+
386
+ - organization_id: typing.Optional[str].
331
387
 
332
388
  - app_name: str. The name of the app.
333
389
 
334
390
  - results: typing.Dict[str, typing.List[LocalEval]]. The eval results.
335
391
  ---
392
+ from llama_cloud import LocalEval
336
393
  from llama_cloud.client import LlamaCloud
337
394
 
338
395
  client = LlamaCloud(
339
396
  token="YOUR_TOKEN",
340
397
  )
341
398
  client.projects.create_local_eval_set_for_project(
342
- project_id="string",
343
- app_name="string",
344
- results={"string": []},
399
+ app_name="app_name",
400
+ results={"key": [LocalEval()]},
345
401
  )
346
402
  """
347
403
  _response = self._client_wrapper.httpx_client.request(
@@ -349,6 +405,7 @@ class ProjectsClient:
349
405
  urllib.parse.urljoin(
350
406
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalset"
351
407
  ),
408
+ params=remove_none_from_dict({"organization_id": organization_id}),
352
409
  json=jsonable_encoder({"app_name": app_name, "results": results}),
353
410
  headers=self._client_wrapper.get_headers(),
354
411
  timeout=60,
@@ -363,25 +420,28 @@ class ProjectsClient:
363
420
  raise ApiError(status_code=_response.status_code, body=_response.text)
364
421
  raise ApiError(status_code=_response.status_code, body=_response_json)
365
422
 
366
- def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
423
+ def list_local_evals_for_project(
424
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
425
+ ) -> typing.List[LocalEvalResults]:
367
426
  """
368
427
  List local eval results for a project.
369
428
 
370
429
  Parameters:
371
- - project_id: str.
430
+ - project_id: typing.Optional[str].
431
+
432
+ - organization_id: typing.Optional[str].
372
433
  ---
373
434
  from llama_cloud.client import LlamaCloud
374
435
 
375
436
  client = LlamaCloud(
376
437
  token="YOUR_TOKEN",
377
438
  )
378
- client.projects.list_local_evals_for_project(
379
- project_id="string",
380
- )
439
+ client.projects.list_local_evals_for_project()
381
440
  """
382
441
  _response = self._client_wrapper.httpx_client.request(
383
442
  "GET",
384
443
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localeval"),
444
+ params=remove_none_from_dict({"organization_id": organization_id}),
385
445
  headers=self._client_wrapper.get_headers(),
386
446
  timeout=60,
387
447
  )
@@ -395,27 +455,30 @@ class ProjectsClient:
395
455
  raise ApiError(status_code=_response.status_code, body=_response.text)
396
456
  raise ApiError(status_code=_response.status_code, body=_response_json)
397
457
 
398
- def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
458
+ def list_local_eval_sets_for_project(
459
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
460
+ ) -> typing.List[LocalEvalSets]:
399
461
  """
400
462
  List local eval sets for a project.
401
463
 
402
464
  Parameters:
403
- - project_id: str.
465
+ - project_id: typing.Optional[str].
466
+
467
+ - organization_id: typing.Optional[str].
404
468
  ---
405
469
  from llama_cloud.client import LlamaCloud
406
470
 
407
471
  client = LlamaCloud(
408
472
  token="YOUR_TOKEN",
409
473
  )
410
- client.projects.list_local_eval_sets_for_project(
411
- project_id="string",
412
- )
474
+ client.projects.list_local_eval_sets_for_project()
413
475
  """
414
476
  _response = self._client_wrapper.httpx_client.request(
415
477
  "GET",
416
478
  urllib.parse.urljoin(
417
479
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalsets"
418
480
  ),
481
+ params=remove_none_from_dict({"organization_id": organization_id}),
419
482
  headers=self._client_wrapper.get_headers(),
420
483
  timeout=60,
421
484
  )
@@ -429,14 +492,18 @@ class ProjectsClient:
429
492
  raise ApiError(status_code=_response.status_code, body=_response.text)
430
493
  raise ApiError(status_code=_response.status_code, body=_response_json)
431
494
 
432
- def delete_local_eval_set(self, project_id: str, local_eval_set_id: str) -> typing.Any:
495
+ def delete_local_eval_set(
496
+ self, project_id: typing.Optional[str], local_eval_set_id: str, *, organization_id: typing.Optional[str] = None
497
+ ) -> typing.Any:
433
498
  """
434
499
  Delete a local eval set.
435
500
 
436
501
  Parameters:
437
- - project_id: str.
502
+ - project_id: typing.Optional[str].
438
503
 
439
504
  - local_eval_set_id: str.
505
+
506
+ - organization_id: typing.Optional[str].
440
507
  ---
441
508
  from llama_cloud.client import LlamaCloud
442
509
 
@@ -444,8 +511,7 @@ class ProjectsClient:
444
511
  token="YOUR_TOKEN",
445
512
  )
446
513
  client.projects.delete_local_eval_set(
447
- project_id="string",
448
- local_eval_set_id="string",
514
+ local_eval_set_id="local_eval_set_id",
449
515
  )
450
516
  """
451
517
  _response = self._client_wrapper.httpx_client.request(
@@ -454,6 +520,7 @@ class ProjectsClient:
454
520
  f"{self._client_wrapper.get_base_url()}/",
455
521
  f"api/v1/projects/{project_id}/localevalset/{local_eval_set_id}",
456
522
  ),
523
+ params=remove_none_from_dict({"organization_id": organization_id}),
457
524
  headers=self._client_wrapper.get_headers(),
458
525
  timeout=60,
459
526
  )
@@ -467,25 +534,28 @@ class ProjectsClient:
467
534
  raise ApiError(status_code=_response.status_code, body=_response.text)
468
535
  raise ApiError(status_code=_response.status_code, body=_response_json)
469
536
 
470
- def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
537
+ def list_promptmixin_prompts(
538
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
539
+ ) -> typing.List[PromptMixinPrompts]:
471
540
  """
472
541
  List PromptMixin prompt sets for a project.
473
542
 
474
543
  Parameters:
475
- - project_id: str.
544
+ - project_id: typing.Optional[str].
545
+
546
+ - organization_id: typing.Optional[str].
476
547
  ---
477
548
  from llama_cloud.client import LlamaCloud
478
549
 
479
550
  client = LlamaCloud(
480
551
  token="YOUR_TOKEN",
481
552
  )
482
- client.projects.list_promptmixin_prompts(
483
- project_id="string",
484
- )
553
+ client.projects.list_promptmixin_prompts()
485
554
  """
486
555
  _response = self._client_wrapper.httpx_client.request(
487
556
  "GET",
488
557
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
558
+ params=remove_none_from_dict({"organization_id": organization_id}),
489
559
  headers=self._client_wrapper.get_headers(),
490
560
  timeout=60,
491
561
  )
@@ -499,33 +569,47 @@ class ProjectsClient:
499
569
  raise ApiError(status_code=_response.status_code, body=_response.text)
500
570
  raise ApiError(status_code=_response.status_code, body=_response_json)
501
571
 
502
- def create_prompt_mixin_prompts(self, project_id: str, *, request: PromptMixinPrompts) -> PromptMixinPrompts:
572
+ def create_prompt_mixin_prompts(
573
+ self,
574
+ project_id: typing.Optional[str],
575
+ *,
576
+ organization_id: typing.Optional[str] = None,
577
+ request: PromptMixinPrompts,
578
+ ) -> PromptMixinPrompts:
503
579
  """
504
580
  Create a new PromptMixin prompt set.
505
581
 
506
582
  Parameters:
507
- - project_id: str.
583
+ - project_id: typing.Optional[str].
584
+
585
+ - organization_id: typing.Optional[str].
508
586
 
509
587
  - request: PromptMixinPrompts.
510
588
  ---
511
- from llama_cloud import PromptMixinPrompts
589
+ from llama_cloud import PromptMixinPrompts, PromptSpec
512
590
  from llama_cloud.client import LlamaCloud
513
591
 
514
592
  client = LlamaCloud(
515
593
  token="YOUR_TOKEN",
516
594
  )
517
595
  client.projects.create_prompt_mixin_prompts(
518
- project_id="string",
519
596
  request=PromptMixinPrompts(
520
- project_id="string",
521
- name="string",
522
- prompts=[],
597
+ project_id="project_id",
598
+ name="name",
599
+ prompts=[
600
+ PromptSpec(
601
+ prompt_key="prompt_key",
602
+ prompt_class="prompt_class",
603
+ prompt_type="prompt_type",
604
+ )
605
+ ],
523
606
  ),
524
607
  )
525
608
  """
526
609
  _response = self._client_wrapper.httpx_client.request(
527
610
  "POST",
528
611
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
612
+ params=remove_none_from_dict({"organization_id": organization_id}),
529
613
  json=jsonable_encoder(request),
530
614
  headers=self._client_wrapper.get_headers(),
531
615
  timeout=60,
@@ -541,31 +625,43 @@ class ProjectsClient:
541
625
  raise ApiError(status_code=_response.status_code, body=_response_json)
542
626
 
543
627
  def update_promptmixin_prompts(
544
- self, project_id: str, prompt_set_id: str, *, request: PromptMixinPrompts
628
+ self,
629
+ project_id: typing.Optional[str],
630
+ prompt_set_id: str,
631
+ *,
632
+ organization_id: typing.Optional[str] = None,
633
+ request: PromptMixinPrompts,
545
634
  ) -> PromptMixinPrompts:
546
635
  """
547
636
  Update a PromptMixin prompt set.
548
637
 
549
638
  Parameters:
550
- - project_id: str.
639
+ - project_id: typing.Optional[str].
551
640
 
552
641
  - prompt_set_id: str.
553
642
 
643
+ - organization_id: typing.Optional[str].
644
+
554
645
  - request: PromptMixinPrompts.
555
646
  ---
556
- from llama_cloud import PromptMixinPrompts
647
+ from llama_cloud import PromptMixinPrompts, PromptSpec
557
648
  from llama_cloud.client import LlamaCloud
558
649
 
559
650
  client = LlamaCloud(
560
651
  token="YOUR_TOKEN",
561
652
  )
562
653
  client.projects.update_promptmixin_prompts(
563
- project_id="string",
564
- prompt_set_id="string",
654
+ prompt_set_id="prompt_set_id",
565
655
  request=PromptMixinPrompts(
566
- project_id="string",
567
- name="string",
568
- prompts=[],
656
+ project_id="project_id",
657
+ name="name",
658
+ prompts=[
659
+ PromptSpec(
660
+ prompt_key="prompt_key",
661
+ prompt_class="prompt_class",
662
+ prompt_type="prompt_type",
663
+ )
664
+ ],
569
665
  ),
570
666
  )
571
667
  """
@@ -574,6 +670,7 @@ class ProjectsClient:
574
670
  urllib.parse.urljoin(
575
671
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
576
672
  ),
673
+ params=remove_none_from_dict({"organization_id": organization_id}),
577
674
  json=jsonable_encoder(request),
578
675
  headers=self._client_wrapper.get_headers(),
579
676
  timeout=60,
@@ -588,14 +685,18 @@ class ProjectsClient:
588
685
  raise ApiError(status_code=_response.status_code, body=_response.text)
589
686
  raise ApiError(status_code=_response.status_code, body=_response_json)
590
687
 
591
- def delete_prompt_mixin_prompts(self, project_id: str, prompt_set_id: str) -> typing.Any:
688
+ def delete_prompt_mixin_prompts(
689
+ self, project_id: typing.Optional[str], prompt_set_id: str, *, organization_id: typing.Optional[str] = None
690
+ ) -> typing.Any:
592
691
  """
593
692
  Delete a PromptMixin prompt set.
594
693
 
595
694
  Parameters:
596
- - project_id: str.
695
+ - project_id: typing.Optional[str].
597
696
 
598
697
  - prompt_set_id: str.
698
+
699
+ - organization_id: typing.Optional[str].
599
700
  ---
600
701
  from llama_cloud.client import LlamaCloud
601
702
 
@@ -603,8 +704,7 @@ class ProjectsClient:
603
704
  token="YOUR_TOKEN",
604
705
  )
605
706
  client.projects.delete_prompt_mixin_prompts(
606
- project_id="string",
607
- prompt_set_id="string",
707
+ prompt_set_id="prompt_set_id",
608
708
  )
609
709
  """
610
710
  _response = self._client_wrapper.httpx_client.request(
@@ -612,6 +712,7 @@ class ProjectsClient:
612
712
  urllib.parse.urljoin(
613
713
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
614
714
  ),
715
+ params=remove_none_from_dict({"organization_id": organization_id}),
615
716
  headers=self._client_wrapper.get_headers(),
616
717
  timeout=60,
617
718
  )
@@ -682,7 +783,7 @@ class AsyncProjectsClient:
682
783
  )
683
784
  await client.projects.create_project(
684
785
  request=ProjectCreate(
685
- name="string",
786
+ name="name",
686
787
  ),
687
788
  )
688
789
  """
@@ -722,7 +823,7 @@ class AsyncProjectsClient:
722
823
  )
723
824
  await client.projects.upsert_project(
724
825
  request=ProjectCreate(
725
- name="string",
826
+ name="name",
726
827
  ),
727
828
  )
728
829
  """
@@ -744,25 +845,28 @@ class AsyncProjectsClient:
744
845
  raise ApiError(status_code=_response.status_code, body=_response.text)
745
846
  raise ApiError(status_code=_response.status_code, body=_response_json)
746
847
 
747
- async def get_project(self, project_id: str) -> Project:
848
+ async def get_project(
849
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
850
+ ) -> Project:
748
851
  """
749
852
  Get a project by ID.
750
853
 
751
854
  Parameters:
752
- - project_id: str.
855
+ - project_id: typing.Optional[str].
856
+
857
+ - organization_id: typing.Optional[str].
753
858
  ---
754
859
  from llama_cloud.client import AsyncLlamaCloud
755
860
 
756
861
  client = AsyncLlamaCloud(
757
862
  token="YOUR_TOKEN",
758
863
  )
759
- await client.projects.get_project(
760
- project_id="string",
761
- )
864
+ await client.projects.get_project()
762
865
  """
763
866
  _response = await self._client_wrapper.httpx_client.request(
764
867
  "GET",
765
868
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
869
+ params=remove_none_from_dict({"organization_id": organization_id}),
766
870
  headers=self._client_wrapper.get_headers(),
767
871
  timeout=60,
768
872
  )
@@ -776,12 +880,16 @@ class AsyncProjectsClient:
776
880
  raise ApiError(status_code=_response.status_code, body=_response.text)
777
881
  raise ApiError(status_code=_response.status_code, body=_response_json)
778
882
 
779
- async def update_existing_project(self, project_id: str, *, name: str) -> Project:
883
+ async def update_existing_project(
884
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
885
+ ) -> Project:
780
886
  """
781
887
  Update an existing project.
782
888
 
783
889
  Parameters:
784
- - project_id: str.
890
+ - project_id: typing.Optional[str].
891
+
892
+ - organization_id: typing.Optional[str].
785
893
 
786
894
  - name: str.
787
895
  ---
@@ -791,13 +899,13 @@ class AsyncProjectsClient:
791
899
  token="YOUR_TOKEN",
792
900
  )
793
901
  await client.projects.update_existing_project(
794
- project_id="string",
795
- name="string",
902
+ name="name",
796
903
  )
797
904
  """
798
905
  _response = await self._client_wrapper.httpx_client.request(
799
906
  "PUT",
800
907
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
908
+ params=remove_none_from_dict({"organization_id": organization_id}),
801
909
  json=jsonable_encoder({"name": name}),
802
910
  headers=self._client_wrapper.get_headers(),
803
911
  timeout=60,
@@ -812,25 +920,28 @@ class AsyncProjectsClient:
812
920
  raise ApiError(status_code=_response.status_code, body=_response.text)
813
921
  raise ApiError(status_code=_response.status_code, body=_response_json)
814
922
 
815
- async def delete_project(self, project_id: str) -> None:
923
+ async def delete_project(
924
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
925
+ ) -> None:
816
926
  """
817
927
  Delete a project by ID.
818
928
 
819
929
  Parameters:
820
- - project_id: str.
930
+ - project_id: typing.Optional[str].
931
+
932
+ - organization_id: typing.Optional[str].
821
933
  ---
822
934
  from llama_cloud.client import AsyncLlamaCloud
823
935
 
824
936
  client = AsyncLlamaCloud(
825
937
  token="YOUR_TOKEN",
826
938
  )
827
- await client.projects.delete_project(
828
- project_id="string",
829
- )
939
+ await client.projects.delete_project()
830
940
  """
831
941
  _response = await self._client_wrapper.httpx_client.request(
832
942
  "DELETE",
833
943
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
944
+ params=remove_none_from_dict({"organization_id": organization_id}),
834
945
  headers=self._client_wrapper.get_headers(),
835
946
  timeout=60,
836
947
  )
@@ -844,27 +955,65 @@ class AsyncProjectsClient:
844
955
  raise ApiError(status_code=_response.status_code, body=_response.text)
845
956
  raise ApiError(status_code=_response.status_code, body=_response_json)
846
957
 
847
- async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
958
+ async def get_project_usage(
959
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
960
+ ) -> IntervalUsageAndPlan:
848
961
  """
849
- List eval datasets for a project.
962
+ Get usage for a project
850
963
 
851
964
  Parameters:
852
- - project_id: str.
965
+ - project_id: typing.Optional[str].
966
+
967
+ - organization_id: typing.Optional[str].
853
968
  ---
854
969
  from llama_cloud.client import AsyncLlamaCloud
855
970
 
856
971
  client = AsyncLlamaCloud(
857
972
  token="YOUR_TOKEN",
858
973
  )
859
- await client.projects.list_datasets_for_project(
860
- project_id="string",
974
+ await client.projects.get_project_usage()
975
+ """
976
+ _response = await self._client_wrapper.httpx_client.request(
977
+ "GET",
978
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/usage"),
979
+ params=remove_none_from_dict({"organization_id": organization_id}),
980
+ headers=self._client_wrapper.get_headers(),
981
+ timeout=60,
982
+ )
983
+ if 200 <= _response.status_code < 300:
984
+ return pydantic.parse_obj_as(IntervalUsageAndPlan, _response.json()) # type: ignore
985
+ if _response.status_code == 422:
986
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
987
+ try:
988
+ _response_json = _response.json()
989
+ except JSONDecodeError:
990
+ raise ApiError(status_code=_response.status_code, body=_response.text)
991
+ raise ApiError(status_code=_response.status_code, body=_response_json)
992
+
993
+ async def list_datasets_for_project(
994
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
995
+ ) -> typing.List[EvalDataset]:
996
+ """
997
+ List eval datasets for a project.
998
+
999
+ Parameters:
1000
+ - project_id: typing.Optional[str].
1001
+
1002
+ - organization_id: typing.Optional[str].
1003
+ ---
1004
+ from llama_cloud.client import AsyncLlamaCloud
1005
+
1006
+ client = AsyncLlamaCloud(
1007
+ token="YOUR_TOKEN",
861
1008
  )
1009
+ await client.projects.list_datasets_for_project()
862
1010
  """
863
1011
  _response = await self._client_wrapper.httpx_client.request(
864
1012
  "GET",
865
1013
  urllib.parse.urljoin(
866
1014
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
867
1015
  ),
1016
+ params=remove_none_from_dict({"organization_id": organization_id}),
868
1017
  headers=self._client_wrapper.get_headers(),
869
1018
  timeout=60,
870
1019
  )
@@ -878,12 +1027,16 @@ class AsyncProjectsClient:
878
1027
  raise ApiError(status_code=_response.status_code, body=_response.text)
879
1028
  raise ApiError(status_code=_response.status_code, body=_response_json)
880
1029
 
881
- async def create_eval_dataset_for_project(self, project_id: str, *, name: str) -> EvalDataset:
1030
+ async def create_eval_dataset_for_project(
1031
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
1032
+ ) -> EvalDataset:
882
1033
  """
883
1034
  Create a new eval dataset for a project.
884
1035
 
885
1036
  Parameters:
886
- - project_id: str.
1037
+ - project_id: typing.Optional[str].
1038
+
1039
+ - organization_id: typing.Optional[str].
887
1040
 
888
1041
  - name: str. The name of the EvalDataset.
889
1042
  ---
@@ -893,8 +1046,7 @@ class AsyncProjectsClient:
893
1046
  token="YOUR_TOKEN",
894
1047
  )
895
1048
  await client.projects.create_eval_dataset_for_project(
896
- project_id="string",
897
- name="string",
1049
+ name="name",
898
1050
  )
899
1051
  """
900
1052
  _response = await self._client_wrapper.httpx_client.request(
@@ -902,6 +1054,7 @@ class AsyncProjectsClient:
902
1054
  urllib.parse.urljoin(
903
1055
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
904
1056
  ),
1057
+ params=remove_none_from_dict({"organization_id": organization_id}),
905
1058
  json=jsonable_encoder({"name": name}),
906
1059
  headers=self._client_wrapper.get_headers(),
907
1060
  timeout=60,
@@ -917,27 +1070,34 @@ class AsyncProjectsClient:
917
1070
  raise ApiError(status_code=_response.status_code, body=_response_json)
918
1071
 
919
1072
  async def create_local_eval_set_for_project(
920
- self, project_id: str, *, app_name: str, results: typing.Dict[str, typing.List[LocalEval]]
1073
+ self,
1074
+ project_id: typing.Optional[str],
1075
+ *,
1076
+ organization_id: typing.Optional[str] = None,
1077
+ app_name: str,
1078
+ results: typing.Dict[str, typing.List[LocalEval]],
921
1079
  ) -> typing.List[LocalEvalResults]:
922
1080
  """
923
1081
  Create a new local eval set.
924
1082
 
925
1083
  Parameters:
926
- - project_id: str.
1084
+ - project_id: typing.Optional[str].
1085
+
1086
+ - organization_id: typing.Optional[str].
927
1087
 
928
1088
  - app_name: str. The name of the app.
929
1089
 
930
1090
  - results: typing.Dict[str, typing.List[LocalEval]]. The eval results.
931
1091
  ---
1092
+ from llama_cloud import LocalEval
932
1093
  from llama_cloud.client import AsyncLlamaCloud
933
1094
 
934
1095
  client = AsyncLlamaCloud(
935
1096
  token="YOUR_TOKEN",
936
1097
  )
937
1098
  await client.projects.create_local_eval_set_for_project(
938
- project_id="string",
939
- app_name="string",
940
- results={"string": []},
1099
+ app_name="app_name",
1100
+ results={"key": [LocalEval()]},
941
1101
  )
942
1102
  """
943
1103
  _response = await self._client_wrapper.httpx_client.request(
@@ -945,6 +1105,7 @@ class AsyncProjectsClient:
945
1105
  urllib.parse.urljoin(
946
1106
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalset"
947
1107
  ),
1108
+ params=remove_none_from_dict({"organization_id": organization_id}),
948
1109
  json=jsonable_encoder({"app_name": app_name, "results": results}),
949
1110
  headers=self._client_wrapper.get_headers(),
950
1111
  timeout=60,
@@ -959,25 +1120,28 @@ class AsyncProjectsClient:
959
1120
  raise ApiError(status_code=_response.status_code, body=_response.text)
960
1121
  raise ApiError(status_code=_response.status_code, body=_response_json)
961
1122
 
962
- async def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
1123
+ async def list_local_evals_for_project(
1124
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
1125
+ ) -> typing.List[LocalEvalResults]:
963
1126
  """
964
1127
  List local eval results for a project.
965
1128
 
966
1129
  Parameters:
967
- - project_id: str.
1130
+ - project_id: typing.Optional[str].
1131
+
1132
+ - organization_id: typing.Optional[str].
968
1133
  ---
969
1134
  from llama_cloud.client import AsyncLlamaCloud
970
1135
 
971
1136
  client = AsyncLlamaCloud(
972
1137
  token="YOUR_TOKEN",
973
1138
  )
974
- await client.projects.list_local_evals_for_project(
975
- project_id="string",
976
- )
1139
+ await client.projects.list_local_evals_for_project()
977
1140
  """
978
1141
  _response = await self._client_wrapper.httpx_client.request(
979
1142
  "GET",
980
1143
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localeval"),
1144
+ params=remove_none_from_dict({"organization_id": organization_id}),
981
1145
  headers=self._client_wrapper.get_headers(),
982
1146
  timeout=60,
983
1147
  )
@@ -991,27 +1155,30 @@ class AsyncProjectsClient:
991
1155
  raise ApiError(status_code=_response.status_code, body=_response.text)
992
1156
  raise ApiError(status_code=_response.status_code, body=_response_json)
993
1157
 
994
- async def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
1158
+ async def list_local_eval_sets_for_project(
1159
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
1160
+ ) -> typing.List[LocalEvalSets]:
995
1161
  """
996
1162
  List local eval sets for a project.
997
1163
 
998
1164
  Parameters:
999
- - project_id: str.
1165
+ - project_id: typing.Optional[str].
1166
+
1167
+ - organization_id: typing.Optional[str].
1000
1168
  ---
1001
1169
  from llama_cloud.client import AsyncLlamaCloud
1002
1170
 
1003
1171
  client = AsyncLlamaCloud(
1004
1172
  token="YOUR_TOKEN",
1005
1173
  )
1006
- await client.projects.list_local_eval_sets_for_project(
1007
- project_id="string",
1008
- )
1174
+ await client.projects.list_local_eval_sets_for_project()
1009
1175
  """
1010
1176
  _response = await self._client_wrapper.httpx_client.request(
1011
1177
  "GET",
1012
1178
  urllib.parse.urljoin(
1013
1179
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalsets"
1014
1180
  ),
1181
+ params=remove_none_from_dict({"organization_id": organization_id}),
1015
1182
  headers=self._client_wrapper.get_headers(),
1016
1183
  timeout=60,
1017
1184
  )
@@ -1025,14 +1192,18 @@ class AsyncProjectsClient:
1025
1192
  raise ApiError(status_code=_response.status_code, body=_response.text)
1026
1193
  raise ApiError(status_code=_response.status_code, body=_response_json)
1027
1194
 
1028
- async def delete_local_eval_set(self, project_id: str, local_eval_set_id: str) -> typing.Any:
1195
+ async def delete_local_eval_set(
1196
+ self, project_id: typing.Optional[str], local_eval_set_id: str, *, organization_id: typing.Optional[str] = None
1197
+ ) -> typing.Any:
1029
1198
  """
1030
1199
  Delete a local eval set.
1031
1200
 
1032
1201
  Parameters:
1033
- - project_id: str.
1202
+ - project_id: typing.Optional[str].
1034
1203
 
1035
1204
  - local_eval_set_id: str.
1205
+
1206
+ - organization_id: typing.Optional[str].
1036
1207
  ---
1037
1208
  from llama_cloud.client import AsyncLlamaCloud
1038
1209
 
@@ -1040,8 +1211,7 @@ class AsyncProjectsClient:
1040
1211
  token="YOUR_TOKEN",
1041
1212
  )
1042
1213
  await client.projects.delete_local_eval_set(
1043
- project_id="string",
1044
- local_eval_set_id="string",
1214
+ local_eval_set_id="local_eval_set_id",
1045
1215
  )
1046
1216
  """
1047
1217
  _response = await self._client_wrapper.httpx_client.request(
@@ -1050,6 +1220,7 @@ class AsyncProjectsClient:
1050
1220
  f"{self._client_wrapper.get_base_url()}/",
1051
1221
  f"api/v1/projects/{project_id}/localevalset/{local_eval_set_id}",
1052
1222
  ),
1223
+ params=remove_none_from_dict({"organization_id": organization_id}),
1053
1224
  headers=self._client_wrapper.get_headers(),
1054
1225
  timeout=60,
1055
1226
  )
@@ -1063,25 +1234,28 @@ class AsyncProjectsClient:
1063
1234
  raise ApiError(status_code=_response.status_code, body=_response.text)
1064
1235
  raise ApiError(status_code=_response.status_code, body=_response_json)
1065
1236
 
1066
- async def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
1237
+ async def list_promptmixin_prompts(
1238
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
1239
+ ) -> typing.List[PromptMixinPrompts]:
1067
1240
  """
1068
1241
  List PromptMixin prompt sets for a project.
1069
1242
 
1070
1243
  Parameters:
1071
- - project_id: str.
1244
+ - project_id: typing.Optional[str].
1245
+
1246
+ - organization_id: typing.Optional[str].
1072
1247
  ---
1073
1248
  from llama_cloud.client import AsyncLlamaCloud
1074
1249
 
1075
1250
  client = AsyncLlamaCloud(
1076
1251
  token="YOUR_TOKEN",
1077
1252
  )
1078
- await client.projects.list_promptmixin_prompts(
1079
- project_id="string",
1080
- )
1253
+ await client.projects.list_promptmixin_prompts()
1081
1254
  """
1082
1255
  _response = await self._client_wrapper.httpx_client.request(
1083
1256
  "GET",
1084
1257
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
1258
+ params=remove_none_from_dict({"organization_id": organization_id}),
1085
1259
  headers=self._client_wrapper.get_headers(),
1086
1260
  timeout=60,
1087
1261
  )
@@ -1095,33 +1269,47 @@ class AsyncProjectsClient:
1095
1269
  raise ApiError(status_code=_response.status_code, body=_response.text)
1096
1270
  raise ApiError(status_code=_response.status_code, body=_response_json)
1097
1271
 
1098
- async def create_prompt_mixin_prompts(self, project_id: str, *, request: PromptMixinPrompts) -> PromptMixinPrompts:
1272
+ async def create_prompt_mixin_prompts(
1273
+ self,
1274
+ project_id: typing.Optional[str],
1275
+ *,
1276
+ organization_id: typing.Optional[str] = None,
1277
+ request: PromptMixinPrompts,
1278
+ ) -> PromptMixinPrompts:
1099
1279
  """
1100
1280
  Create a new PromptMixin prompt set.
1101
1281
 
1102
1282
  Parameters:
1103
- - project_id: str.
1283
+ - project_id: typing.Optional[str].
1284
+
1285
+ - organization_id: typing.Optional[str].
1104
1286
 
1105
1287
  - request: PromptMixinPrompts.
1106
1288
  ---
1107
- from llama_cloud import PromptMixinPrompts
1289
+ from llama_cloud import PromptMixinPrompts, PromptSpec
1108
1290
  from llama_cloud.client import AsyncLlamaCloud
1109
1291
 
1110
1292
  client = AsyncLlamaCloud(
1111
1293
  token="YOUR_TOKEN",
1112
1294
  )
1113
1295
  await client.projects.create_prompt_mixin_prompts(
1114
- project_id="string",
1115
1296
  request=PromptMixinPrompts(
1116
- project_id="string",
1117
- name="string",
1118
- prompts=[],
1297
+ project_id="project_id",
1298
+ name="name",
1299
+ prompts=[
1300
+ PromptSpec(
1301
+ prompt_key="prompt_key",
1302
+ prompt_class="prompt_class",
1303
+ prompt_type="prompt_type",
1304
+ )
1305
+ ],
1119
1306
  ),
1120
1307
  )
1121
1308
  """
1122
1309
  _response = await self._client_wrapper.httpx_client.request(
1123
1310
  "POST",
1124
1311
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
1312
+ params=remove_none_from_dict({"organization_id": organization_id}),
1125
1313
  json=jsonable_encoder(request),
1126
1314
  headers=self._client_wrapper.get_headers(),
1127
1315
  timeout=60,
@@ -1137,31 +1325,43 @@ class AsyncProjectsClient:
1137
1325
  raise ApiError(status_code=_response.status_code, body=_response_json)
1138
1326
 
1139
1327
  async def update_promptmixin_prompts(
1140
- self, project_id: str, prompt_set_id: str, *, request: PromptMixinPrompts
1328
+ self,
1329
+ project_id: typing.Optional[str],
1330
+ prompt_set_id: str,
1331
+ *,
1332
+ organization_id: typing.Optional[str] = None,
1333
+ request: PromptMixinPrompts,
1141
1334
  ) -> PromptMixinPrompts:
1142
1335
  """
1143
1336
  Update a PromptMixin prompt set.
1144
1337
 
1145
1338
  Parameters:
1146
- - project_id: str.
1339
+ - project_id: typing.Optional[str].
1147
1340
 
1148
1341
  - prompt_set_id: str.
1149
1342
 
1343
+ - organization_id: typing.Optional[str].
1344
+
1150
1345
  - request: PromptMixinPrompts.
1151
1346
  ---
1152
- from llama_cloud import PromptMixinPrompts
1347
+ from llama_cloud import PromptMixinPrompts, PromptSpec
1153
1348
  from llama_cloud.client import AsyncLlamaCloud
1154
1349
 
1155
1350
  client = AsyncLlamaCloud(
1156
1351
  token="YOUR_TOKEN",
1157
1352
  )
1158
1353
  await client.projects.update_promptmixin_prompts(
1159
- project_id="string",
1160
- prompt_set_id="string",
1354
+ prompt_set_id="prompt_set_id",
1161
1355
  request=PromptMixinPrompts(
1162
- project_id="string",
1163
- name="string",
1164
- prompts=[],
1356
+ project_id="project_id",
1357
+ name="name",
1358
+ prompts=[
1359
+ PromptSpec(
1360
+ prompt_key="prompt_key",
1361
+ prompt_class="prompt_class",
1362
+ prompt_type="prompt_type",
1363
+ )
1364
+ ],
1165
1365
  ),
1166
1366
  )
1167
1367
  """
@@ -1170,6 +1370,7 @@ class AsyncProjectsClient:
1170
1370
  urllib.parse.urljoin(
1171
1371
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
1172
1372
  ),
1373
+ params=remove_none_from_dict({"organization_id": organization_id}),
1173
1374
  json=jsonable_encoder(request),
1174
1375
  headers=self._client_wrapper.get_headers(),
1175
1376
  timeout=60,
@@ -1184,14 +1385,18 @@ class AsyncProjectsClient:
1184
1385
  raise ApiError(status_code=_response.status_code, body=_response.text)
1185
1386
  raise ApiError(status_code=_response.status_code, body=_response_json)
1186
1387
 
1187
- async def delete_prompt_mixin_prompts(self, project_id: str, prompt_set_id: str) -> typing.Any:
1388
+ async def delete_prompt_mixin_prompts(
1389
+ self, project_id: typing.Optional[str], prompt_set_id: str, *, organization_id: typing.Optional[str] = None
1390
+ ) -> typing.Any:
1188
1391
  """
1189
1392
  Delete a PromptMixin prompt set.
1190
1393
 
1191
1394
  Parameters:
1192
- - project_id: str.
1395
+ - project_id: typing.Optional[str].
1193
1396
 
1194
1397
  - prompt_set_id: str.
1398
+
1399
+ - organization_id: typing.Optional[str].
1195
1400
  ---
1196
1401
  from llama_cloud.client import AsyncLlamaCloud
1197
1402
 
@@ -1199,8 +1404,7 @@ class AsyncProjectsClient:
1199
1404
  token="YOUR_TOKEN",
1200
1405
  )
1201
1406
  await client.projects.delete_prompt_mixin_prompts(
1202
- project_id="string",
1203
- prompt_set_id="string",
1407
+ prompt_set_id="prompt_set_id",
1204
1408
  )
1205
1409
  """
1206
1410
  _response = await self._client_wrapper.httpx_client.request(
@@ -1208,6 +1412,7 @@ class AsyncProjectsClient:
1208
1412
  urllib.parse.urljoin(
1209
1413
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
1210
1414
  ),
1415
+ params=remove_none_from_dict({"organization_id": organization_id}),
1211
1416
  headers=self._client_wrapper.get_headers(),
1212
1417
  timeout=60,
1213
1418
  )