llama-cloud 0.1.5__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (105) hide show
  1. llama_cloud/__init__.py +12 -10
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/__init__.py +2 -1
  4. llama_cloud/resources/data_sinks/client.py +14 -14
  5. llama_cloud/resources/data_sources/client.py +16 -16
  6. llama_cloud/resources/embedding_model_configs/client.py +80 -24
  7. llama_cloud/resources/evals/client.py +36 -26
  8. llama_cloud/resources/extraction/client.py +32 -32
  9. llama_cloud/resources/files/__init__.py +2 -2
  10. llama_cloud/resources/files/client.py +53 -28
  11. llama_cloud/resources/files/types/__init__.py +2 -1
  12. llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +60 -56
  14. llama_cloud/resources/parsing/client.py +555 -324
  15. llama_cloud/resources/pipelines/client.py +446 -302
  16. llama_cloud/resources/projects/client.py +270 -136
  17. llama_cloud/types/__init__.py +10 -10
  18. llama_cloud/types/azure_open_ai_embedding.py +12 -6
  19. llama_cloud/types/base_prompt_template.py +6 -2
  20. llama_cloud/types/bedrock_embedding.py +12 -6
  21. llama_cloud/types/character_splitter.py +4 -2
  22. llama_cloud/types/chat_message.py +1 -1
  23. llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
  24. llama_cloud/types/cloud_box_data_source.py +13 -6
  25. llama_cloud/types/cloud_confluence_data_source.py +7 -6
  26. llama_cloud/types/cloud_document.py +3 -1
  27. llama_cloud/types/cloud_document_create.py +3 -1
  28. llama_cloud/types/cloud_google_drive_data_source.py +1 -0
  29. llama_cloud/types/cloud_jira_data_source.py +7 -4
  30. llama_cloud/types/cloud_notion_page_data_source.py +3 -2
  31. llama_cloud/types/cloud_one_drive_data_source.py +6 -3
  32. llama_cloud/types/cloud_s_3_data_source.py +9 -4
  33. llama_cloud/types/cloud_sharepoint_data_source.py +9 -6
  34. llama_cloud/types/cloud_slack_data_source.py +7 -6
  35. llama_cloud/types/code_splitter.py +1 -1
  36. llama_cloud/types/cohere_embedding.py +7 -3
  37. llama_cloud/types/data_sink.py +4 -4
  38. llama_cloud/types/data_sink_create.py +1 -1
  39. llama_cloud/types/data_source.py +7 -5
  40. llama_cloud/types/data_source_create.py +4 -2
  41. llama_cloud/types/embedding_model_config.py +2 -2
  42. llama_cloud/types/embedding_model_config_update.py +4 -2
  43. llama_cloud/types/eval_dataset.py +2 -2
  44. llama_cloud/types/eval_dataset_job_record.py +13 -7
  45. llama_cloud/types/eval_execution_params_override.py +6 -2
  46. llama_cloud/types/eval_question.py +2 -2
  47. llama_cloud/types/extraction_result.py +2 -2
  48. llama_cloud/types/extraction_schema.py +5 -3
  49. llama_cloud/types/file.py +15 -7
  50. llama_cloud/types/file_permission_info_value.py +5 -0
  51. llama_cloud/types/filter_operator.py +2 -2
  52. llama_cloud/types/gemini_embedding.py +10 -6
  53. llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
  54. llama_cloud/types/input_message.py +3 -1
  55. llama_cloud/types/job_name_mapping.py +4 -0
  56. llama_cloud/types/llama_parse_parameters.py +11 -0
  57. llama_cloud/types/llm.py +4 -2
  58. llama_cloud/types/llm_parameters.py +5 -2
  59. llama_cloud/types/local_eval.py +10 -8
  60. llama_cloud/types/local_eval_results.py +1 -1
  61. llama_cloud/types/managed_ingestion_status_response.py +5 -3
  62. llama_cloud/types/markdown_element_node_parser.py +5 -3
  63. llama_cloud/types/markdown_node_parser.py +1 -1
  64. llama_cloud/types/metadata_filter.py +2 -2
  65. llama_cloud/types/metric_result.py +3 -3
  66. llama_cloud/types/node_parser.py +1 -1
  67. llama_cloud/types/open_ai_embedding.py +12 -6
  68. llama_cloud/types/organization.py +2 -2
  69. llama_cloud/types/page_splitter_node_parser.py +2 -2
  70. llama_cloud/types/parsing_job_structured_result.py +32 -0
  71. llama_cloud/types/permission.py +3 -3
  72. llama_cloud/types/pipeline.py +17 -7
  73. llama_cloud/types/pipeline_configuration_hashes.py +3 -3
  74. llama_cloud/types/pipeline_create.py +15 -5
  75. llama_cloud/types/pipeline_data_source.py +13 -7
  76. llama_cloud/types/pipeline_data_source_create.py +3 -1
  77. llama_cloud/types/pipeline_deployment.py +4 -4
  78. llama_cloud/types/pipeline_file.py +25 -11
  79. llama_cloud/types/pipeline_file_create.py +3 -1
  80. llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
  81. llama_cloud/types/playground_session.py +2 -2
  82. llama_cloud/types/preset_retrieval_params.py +14 -7
  83. llama_cloud/types/presigned_url.py +3 -1
  84. llama_cloud/types/project.py +2 -2
  85. llama_cloud/types/prompt_mixin_prompts.py +1 -1
  86. llama_cloud/types/prompt_spec.py +4 -2
  87. llama_cloud/types/role.py +3 -3
  88. llama_cloud/types/sentence_splitter.py +4 -2
  89. llama_cloud/types/text_node.py +3 -3
  90. llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
  91. llama_cloud/types/token_text_splitter.py +1 -1
  92. llama_cloud/types/user_organization.py +9 -5
  93. llama_cloud/types/user_organization_create.py +4 -4
  94. llama_cloud/types/user_organization_delete.py +2 -2
  95. llama_cloud/types/user_organization_role.py +2 -2
  96. llama_cloud/types/value.py +5 -0
  97. llama_cloud/types/vertex_text_embedding.py +9 -5
  98. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +2 -1
  99. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +101 -100
  100. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +1 -1
  101. llama_cloud/types/data_sink_component.py +0 -20
  102. llama_cloud/types/data_source_component.py +0 -28
  103. llama_cloud/types/metadata_filter_value.py +0 -5
  104. llama_cloud/types/pipeline_data_source_component.py +0 -28
  105. {llama_cloud-0.1.5.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
@@ -87,7 +87,7 @@ class ProjectsClient:
87
87
  )
88
88
  client.projects.create_project(
89
89
  request=ProjectCreate(
90
- name="string",
90
+ name="name",
91
91
  ),
92
92
  )
93
93
  """
@@ -127,7 +127,7 @@ class ProjectsClient:
127
127
  )
128
128
  client.projects.upsert_project(
129
129
  request=ProjectCreate(
130
- name="string",
130
+ name="name",
131
131
  ),
132
132
  )
133
133
  """
@@ -149,25 +149,26 @@ class ProjectsClient:
149
149
  raise ApiError(status_code=_response.status_code, body=_response.text)
150
150
  raise ApiError(status_code=_response.status_code, body=_response_json)
151
151
 
152
- def get_project(self, project_id: str) -> Project:
152
+ def get_project(self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None) -> Project:
153
153
  """
154
154
  Get a project by ID.
155
155
 
156
156
  Parameters:
157
- - project_id: str.
157
+ - project_id: typing.Optional[str].
158
+
159
+ - organization_id: typing.Optional[str].
158
160
  ---
159
161
  from llama_cloud.client import LlamaCloud
160
162
 
161
163
  client = LlamaCloud(
162
164
  token="YOUR_TOKEN",
163
165
  )
164
- client.projects.get_project(
165
- project_id="string",
166
- )
166
+ client.projects.get_project()
167
167
  """
168
168
  _response = self._client_wrapper.httpx_client.request(
169
169
  "GET",
170
170
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
171
+ params=remove_none_from_dict({"organization_id": organization_id}),
171
172
  headers=self._client_wrapper.get_headers(),
172
173
  timeout=60,
173
174
  )
@@ -181,12 +182,16 @@ class ProjectsClient:
181
182
  raise ApiError(status_code=_response.status_code, body=_response.text)
182
183
  raise ApiError(status_code=_response.status_code, body=_response_json)
183
184
 
184
- def update_existing_project(self, project_id: str, *, name: str) -> Project:
185
+ def update_existing_project(
186
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
187
+ ) -> Project:
185
188
  """
186
189
  Update an existing project.
187
190
 
188
191
  Parameters:
189
- - project_id: str.
192
+ - project_id: typing.Optional[str].
193
+
194
+ - organization_id: typing.Optional[str].
190
195
 
191
196
  - name: str.
192
197
  ---
@@ -196,13 +201,13 @@ class ProjectsClient:
196
201
  token="YOUR_TOKEN",
197
202
  )
198
203
  client.projects.update_existing_project(
199
- project_id="string",
200
- name="string",
204
+ name="name",
201
205
  )
202
206
  """
203
207
  _response = self._client_wrapper.httpx_client.request(
204
208
  "PUT",
205
209
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
210
+ params=remove_none_from_dict({"organization_id": organization_id}),
206
211
  json=jsonable_encoder({"name": name}),
207
212
  headers=self._client_wrapper.get_headers(),
208
213
  timeout=60,
@@ -217,25 +222,26 @@ class ProjectsClient:
217
222
  raise ApiError(status_code=_response.status_code, body=_response.text)
218
223
  raise ApiError(status_code=_response.status_code, body=_response_json)
219
224
 
220
- def delete_project(self, project_id: str) -> None:
225
+ def delete_project(self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None) -> None:
221
226
  """
222
227
  Delete a project by ID.
223
228
 
224
229
  Parameters:
225
- - project_id: str.
230
+ - project_id: typing.Optional[str].
231
+
232
+ - organization_id: typing.Optional[str].
226
233
  ---
227
234
  from llama_cloud.client import LlamaCloud
228
235
 
229
236
  client = LlamaCloud(
230
237
  token="YOUR_TOKEN",
231
238
  )
232
- client.projects.delete_project(
233
- project_id="string",
234
- )
239
+ client.projects.delete_project()
235
240
  """
236
241
  _response = self._client_wrapper.httpx_client.request(
237
242
  "DELETE",
238
243
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
244
+ params=remove_none_from_dict({"organization_id": organization_id}),
239
245
  headers=self._client_wrapper.get_headers(),
240
246
  timeout=60,
241
247
  )
@@ -284,27 +290,30 @@ class ProjectsClient:
284
290
  raise ApiError(status_code=_response.status_code, body=_response.text)
285
291
  raise ApiError(status_code=_response.status_code, body=_response_json)
286
292
 
287
- def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
293
+ def list_datasets_for_project(
294
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
295
+ ) -> typing.List[EvalDataset]:
288
296
  """
289
297
  List eval datasets for a project.
290
298
 
291
299
  Parameters:
292
- - project_id: str.
300
+ - project_id: typing.Optional[str].
301
+
302
+ - organization_id: typing.Optional[str].
293
303
  ---
294
304
  from llama_cloud.client import LlamaCloud
295
305
 
296
306
  client = LlamaCloud(
297
307
  token="YOUR_TOKEN",
298
308
  )
299
- client.projects.list_datasets_for_project(
300
- project_id="string",
301
- )
309
+ client.projects.list_datasets_for_project()
302
310
  """
303
311
  _response = self._client_wrapper.httpx_client.request(
304
312
  "GET",
305
313
  urllib.parse.urljoin(
306
314
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
307
315
  ),
316
+ params=remove_none_from_dict({"organization_id": organization_id}),
308
317
  headers=self._client_wrapper.get_headers(),
309
318
  timeout=60,
310
319
  )
@@ -318,12 +327,16 @@ class ProjectsClient:
318
327
  raise ApiError(status_code=_response.status_code, body=_response.text)
319
328
  raise ApiError(status_code=_response.status_code, body=_response_json)
320
329
 
321
- def create_eval_dataset_for_project(self, project_id: str, *, name: str) -> EvalDataset:
330
+ def create_eval_dataset_for_project(
331
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
332
+ ) -> EvalDataset:
322
333
  """
323
334
  Create a new eval dataset for a project.
324
335
 
325
336
  Parameters:
326
- - project_id: str.
337
+ - project_id: typing.Optional[str].
338
+
339
+ - organization_id: typing.Optional[str].
327
340
 
328
341
  - name: str. The name of the EvalDataset.
329
342
  ---
@@ -333,8 +346,7 @@ class ProjectsClient:
333
346
  token="YOUR_TOKEN",
334
347
  )
335
348
  client.projects.create_eval_dataset_for_project(
336
- project_id="string",
337
- name="string",
349
+ name="name",
338
350
  )
339
351
  """
340
352
  _response = self._client_wrapper.httpx_client.request(
@@ -342,6 +354,7 @@ class ProjectsClient:
342
354
  urllib.parse.urljoin(
343
355
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
344
356
  ),
357
+ params=remove_none_from_dict({"organization_id": organization_id}),
345
358
  json=jsonable_encoder({"name": name}),
346
359
  headers=self._client_wrapper.get_headers(),
347
360
  timeout=60,
@@ -357,27 +370,34 @@ class ProjectsClient:
357
370
  raise ApiError(status_code=_response.status_code, body=_response_json)
358
371
 
359
372
  def create_local_eval_set_for_project(
360
- self, project_id: str, *, app_name: str, results: typing.Dict[str, typing.List[LocalEval]]
373
+ self,
374
+ project_id: typing.Optional[str],
375
+ *,
376
+ organization_id: typing.Optional[str] = None,
377
+ app_name: str,
378
+ results: typing.Dict[str, typing.List[LocalEval]],
361
379
  ) -> typing.List[LocalEvalResults]:
362
380
  """
363
381
  Create a new local eval set.
364
382
 
365
383
  Parameters:
366
- - project_id: str.
384
+ - project_id: typing.Optional[str].
385
+
386
+ - organization_id: typing.Optional[str].
367
387
 
368
388
  - app_name: str. The name of the app.
369
389
 
370
390
  - results: typing.Dict[str, typing.List[LocalEval]]. The eval results.
371
391
  ---
392
+ from llama_cloud import LocalEval
372
393
  from llama_cloud.client import LlamaCloud
373
394
 
374
395
  client = LlamaCloud(
375
396
  token="YOUR_TOKEN",
376
397
  )
377
398
  client.projects.create_local_eval_set_for_project(
378
- project_id="string",
379
- app_name="string",
380
- results={"string": []},
399
+ app_name="app_name",
400
+ results={"key": [LocalEval()]},
381
401
  )
382
402
  """
383
403
  _response = self._client_wrapper.httpx_client.request(
@@ -385,6 +405,7 @@ class ProjectsClient:
385
405
  urllib.parse.urljoin(
386
406
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalset"
387
407
  ),
408
+ params=remove_none_from_dict({"organization_id": organization_id}),
388
409
  json=jsonable_encoder({"app_name": app_name, "results": results}),
389
410
  headers=self._client_wrapper.get_headers(),
390
411
  timeout=60,
@@ -399,25 +420,28 @@ class ProjectsClient:
399
420
  raise ApiError(status_code=_response.status_code, body=_response.text)
400
421
  raise ApiError(status_code=_response.status_code, body=_response_json)
401
422
 
402
- def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
423
+ def list_local_evals_for_project(
424
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
425
+ ) -> typing.List[LocalEvalResults]:
403
426
  """
404
427
  List local eval results for a project.
405
428
 
406
429
  Parameters:
407
- - project_id: str.
430
+ - project_id: typing.Optional[str].
431
+
432
+ - organization_id: typing.Optional[str].
408
433
  ---
409
434
  from llama_cloud.client import LlamaCloud
410
435
 
411
436
  client = LlamaCloud(
412
437
  token="YOUR_TOKEN",
413
438
  )
414
- client.projects.list_local_evals_for_project(
415
- project_id="string",
416
- )
439
+ client.projects.list_local_evals_for_project()
417
440
  """
418
441
  _response = self._client_wrapper.httpx_client.request(
419
442
  "GET",
420
443
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localeval"),
444
+ params=remove_none_from_dict({"organization_id": organization_id}),
421
445
  headers=self._client_wrapper.get_headers(),
422
446
  timeout=60,
423
447
  )
@@ -431,27 +455,30 @@ class ProjectsClient:
431
455
  raise ApiError(status_code=_response.status_code, body=_response.text)
432
456
  raise ApiError(status_code=_response.status_code, body=_response_json)
433
457
 
434
- def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
458
+ def list_local_eval_sets_for_project(
459
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
460
+ ) -> typing.List[LocalEvalSets]:
435
461
  """
436
462
  List local eval sets for a project.
437
463
 
438
464
  Parameters:
439
- - project_id: str.
465
+ - project_id: typing.Optional[str].
466
+
467
+ - organization_id: typing.Optional[str].
440
468
  ---
441
469
  from llama_cloud.client import LlamaCloud
442
470
 
443
471
  client = LlamaCloud(
444
472
  token="YOUR_TOKEN",
445
473
  )
446
- client.projects.list_local_eval_sets_for_project(
447
- project_id="string",
448
- )
474
+ client.projects.list_local_eval_sets_for_project()
449
475
  """
450
476
  _response = self._client_wrapper.httpx_client.request(
451
477
  "GET",
452
478
  urllib.parse.urljoin(
453
479
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalsets"
454
480
  ),
481
+ params=remove_none_from_dict({"organization_id": organization_id}),
455
482
  headers=self._client_wrapper.get_headers(),
456
483
  timeout=60,
457
484
  )
@@ -465,14 +492,18 @@ class ProjectsClient:
465
492
  raise ApiError(status_code=_response.status_code, body=_response.text)
466
493
  raise ApiError(status_code=_response.status_code, body=_response_json)
467
494
 
468
- def delete_local_eval_set(self, project_id: str, local_eval_set_id: str) -> typing.Any:
495
+ def delete_local_eval_set(
496
+ self, project_id: typing.Optional[str], local_eval_set_id: str, *, organization_id: typing.Optional[str] = None
497
+ ) -> typing.Any:
469
498
  """
470
499
  Delete a local eval set.
471
500
 
472
501
  Parameters:
473
- - project_id: str.
502
+ - project_id: typing.Optional[str].
474
503
 
475
504
  - local_eval_set_id: str.
505
+
506
+ - organization_id: typing.Optional[str].
476
507
  ---
477
508
  from llama_cloud.client import LlamaCloud
478
509
 
@@ -480,8 +511,7 @@ class ProjectsClient:
480
511
  token="YOUR_TOKEN",
481
512
  )
482
513
  client.projects.delete_local_eval_set(
483
- project_id="string",
484
- local_eval_set_id="string",
514
+ local_eval_set_id="local_eval_set_id",
485
515
  )
486
516
  """
487
517
  _response = self._client_wrapper.httpx_client.request(
@@ -490,6 +520,7 @@ class ProjectsClient:
490
520
  f"{self._client_wrapper.get_base_url()}/",
491
521
  f"api/v1/projects/{project_id}/localevalset/{local_eval_set_id}",
492
522
  ),
523
+ params=remove_none_from_dict({"organization_id": organization_id}),
493
524
  headers=self._client_wrapper.get_headers(),
494
525
  timeout=60,
495
526
  )
@@ -503,25 +534,28 @@ class ProjectsClient:
503
534
  raise ApiError(status_code=_response.status_code, body=_response.text)
504
535
  raise ApiError(status_code=_response.status_code, body=_response_json)
505
536
 
506
- def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
537
+ def list_promptmixin_prompts(
538
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
539
+ ) -> typing.List[PromptMixinPrompts]:
507
540
  """
508
541
  List PromptMixin prompt sets for a project.
509
542
 
510
543
  Parameters:
511
- - project_id: str.
544
+ - project_id: typing.Optional[str].
545
+
546
+ - organization_id: typing.Optional[str].
512
547
  ---
513
548
  from llama_cloud.client import LlamaCloud
514
549
 
515
550
  client = LlamaCloud(
516
551
  token="YOUR_TOKEN",
517
552
  )
518
- client.projects.list_promptmixin_prompts(
519
- project_id="string",
520
- )
553
+ client.projects.list_promptmixin_prompts()
521
554
  """
522
555
  _response = self._client_wrapper.httpx_client.request(
523
556
  "GET",
524
557
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
558
+ params=remove_none_from_dict({"organization_id": organization_id}),
525
559
  headers=self._client_wrapper.get_headers(),
526
560
  timeout=60,
527
561
  )
@@ -535,33 +569,47 @@ class ProjectsClient:
535
569
  raise ApiError(status_code=_response.status_code, body=_response.text)
536
570
  raise ApiError(status_code=_response.status_code, body=_response_json)
537
571
 
538
- def create_prompt_mixin_prompts(self, project_id: str, *, request: PromptMixinPrompts) -> PromptMixinPrompts:
572
+ def create_prompt_mixin_prompts(
573
+ self,
574
+ project_id: typing.Optional[str],
575
+ *,
576
+ organization_id: typing.Optional[str] = None,
577
+ request: PromptMixinPrompts,
578
+ ) -> PromptMixinPrompts:
539
579
  """
540
580
  Create a new PromptMixin prompt set.
541
581
 
542
582
  Parameters:
543
- - project_id: str.
583
+ - project_id: typing.Optional[str].
584
+
585
+ - organization_id: typing.Optional[str].
544
586
 
545
587
  - request: PromptMixinPrompts.
546
588
  ---
547
- from llama_cloud import PromptMixinPrompts
589
+ from llama_cloud import PromptMixinPrompts, PromptSpec
548
590
  from llama_cloud.client import LlamaCloud
549
591
 
550
592
  client = LlamaCloud(
551
593
  token="YOUR_TOKEN",
552
594
  )
553
595
  client.projects.create_prompt_mixin_prompts(
554
- project_id="string",
555
596
  request=PromptMixinPrompts(
556
- project_id="string",
557
- name="string",
558
- prompts=[],
597
+ project_id="project_id",
598
+ name="name",
599
+ prompts=[
600
+ PromptSpec(
601
+ prompt_key="prompt_key",
602
+ prompt_class="prompt_class",
603
+ prompt_type="prompt_type",
604
+ )
605
+ ],
559
606
  ),
560
607
  )
561
608
  """
562
609
  _response = self._client_wrapper.httpx_client.request(
563
610
  "POST",
564
611
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
612
+ params=remove_none_from_dict({"organization_id": organization_id}),
565
613
  json=jsonable_encoder(request),
566
614
  headers=self._client_wrapper.get_headers(),
567
615
  timeout=60,
@@ -577,31 +625,43 @@ class ProjectsClient:
577
625
  raise ApiError(status_code=_response.status_code, body=_response_json)
578
626
 
579
627
  def update_promptmixin_prompts(
580
- self, project_id: str, prompt_set_id: str, *, request: PromptMixinPrompts
628
+ self,
629
+ project_id: typing.Optional[str],
630
+ prompt_set_id: str,
631
+ *,
632
+ organization_id: typing.Optional[str] = None,
633
+ request: PromptMixinPrompts,
581
634
  ) -> PromptMixinPrompts:
582
635
  """
583
636
  Update a PromptMixin prompt set.
584
637
 
585
638
  Parameters:
586
- - project_id: str.
639
+ - project_id: typing.Optional[str].
587
640
 
588
641
  - prompt_set_id: str.
589
642
 
643
+ - organization_id: typing.Optional[str].
644
+
590
645
  - request: PromptMixinPrompts.
591
646
  ---
592
- from llama_cloud import PromptMixinPrompts
647
+ from llama_cloud import PromptMixinPrompts, PromptSpec
593
648
  from llama_cloud.client import LlamaCloud
594
649
 
595
650
  client = LlamaCloud(
596
651
  token="YOUR_TOKEN",
597
652
  )
598
653
  client.projects.update_promptmixin_prompts(
599
- project_id="string",
600
- prompt_set_id="string",
654
+ prompt_set_id="prompt_set_id",
601
655
  request=PromptMixinPrompts(
602
- project_id="string",
603
- name="string",
604
- prompts=[],
656
+ project_id="project_id",
657
+ name="name",
658
+ prompts=[
659
+ PromptSpec(
660
+ prompt_key="prompt_key",
661
+ prompt_class="prompt_class",
662
+ prompt_type="prompt_type",
663
+ )
664
+ ],
605
665
  ),
606
666
  )
607
667
  """
@@ -610,6 +670,7 @@ class ProjectsClient:
610
670
  urllib.parse.urljoin(
611
671
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
612
672
  ),
673
+ params=remove_none_from_dict({"organization_id": organization_id}),
613
674
  json=jsonable_encoder(request),
614
675
  headers=self._client_wrapper.get_headers(),
615
676
  timeout=60,
@@ -624,14 +685,18 @@ class ProjectsClient:
624
685
  raise ApiError(status_code=_response.status_code, body=_response.text)
625
686
  raise ApiError(status_code=_response.status_code, body=_response_json)
626
687
 
627
- def delete_prompt_mixin_prompts(self, project_id: str, prompt_set_id: str) -> typing.Any:
688
+ def delete_prompt_mixin_prompts(
689
+ self, project_id: typing.Optional[str], prompt_set_id: str, *, organization_id: typing.Optional[str] = None
690
+ ) -> typing.Any:
628
691
  """
629
692
  Delete a PromptMixin prompt set.
630
693
 
631
694
  Parameters:
632
- - project_id: str.
695
+ - project_id: typing.Optional[str].
633
696
 
634
697
  - prompt_set_id: str.
698
+
699
+ - organization_id: typing.Optional[str].
635
700
  ---
636
701
  from llama_cloud.client import LlamaCloud
637
702
 
@@ -639,8 +704,7 @@ class ProjectsClient:
639
704
  token="YOUR_TOKEN",
640
705
  )
641
706
  client.projects.delete_prompt_mixin_prompts(
642
- project_id="string",
643
- prompt_set_id="string",
707
+ prompt_set_id="prompt_set_id",
644
708
  )
645
709
  """
646
710
  _response = self._client_wrapper.httpx_client.request(
@@ -648,6 +712,7 @@ class ProjectsClient:
648
712
  urllib.parse.urljoin(
649
713
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
650
714
  ),
715
+ params=remove_none_from_dict({"organization_id": organization_id}),
651
716
  headers=self._client_wrapper.get_headers(),
652
717
  timeout=60,
653
718
  )
@@ -718,7 +783,7 @@ class AsyncProjectsClient:
718
783
  )
719
784
  await client.projects.create_project(
720
785
  request=ProjectCreate(
721
- name="string",
786
+ name="name",
722
787
  ),
723
788
  )
724
789
  """
@@ -758,7 +823,7 @@ class AsyncProjectsClient:
758
823
  )
759
824
  await client.projects.upsert_project(
760
825
  request=ProjectCreate(
761
- name="string",
826
+ name="name",
762
827
  ),
763
828
  )
764
829
  """
@@ -780,25 +845,28 @@ class AsyncProjectsClient:
780
845
  raise ApiError(status_code=_response.status_code, body=_response.text)
781
846
  raise ApiError(status_code=_response.status_code, body=_response_json)
782
847
 
783
- async def get_project(self, project_id: str) -> Project:
848
+ async def get_project(
849
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
850
+ ) -> Project:
784
851
  """
785
852
  Get a project by ID.
786
853
 
787
854
  Parameters:
788
- - project_id: str.
855
+ - project_id: typing.Optional[str].
856
+
857
+ - organization_id: typing.Optional[str].
789
858
  ---
790
859
  from llama_cloud.client import AsyncLlamaCloud
791
860
 
792
861
  client = AsyncLlamaCloud(
793
862
  token="YOUR_TOKEN",
794
863
  )
795
- await client.projects.get_project(
796
- project_id="string",
797
- )
864
+ await client.projects.get_project()
798
865
  """
799
866
  _response = await self._client_wrapper.httpx_client.request(
800
867
  "GET",
801
868
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
869
+ params=remove_none_from_dict({"organization_id": organization_id}),
802
870
  headers=self._client_wrapper.get_headers(),
803
871
  timeout=60,
804
872
  )
@@ -812,12 +880,16 @@ class AsyncProjectsClient:
812
880
  raise ApiError(status_code=_response.status_code, body=_response.text)
813
881
  raise ApiError(status_code=_response.status_code, body=_response_json)
814
882
 
815
- async def update_existing_project(self, project_id: str, *, name: str) -> Project:
883
+ async def update_existing_project(
884
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
885
+ ) -> Project:
816
886
  """
817
887
  Update an existing project.
818
888
 
819
889
  Parameters:
820
- - project_id: str.
890
+ - project_id: typing.Optional[str].
891
+
892
+ - organization_id: typing.Optional[str].
821
893
 
822
894
  - name: str.
823
895
  ---
@@ -827,13 +899,13 @@ class AsyncProjectsClient:
827
899
  token="YOUR_TOKEN",
828
900
  )
829
901
  await client.projects.update_existing_project(
830
- project_id="string",
831
- name="string",
902
+ name="name",
832
903
  )
833
904
  """
834
905
  _response = await self._client_wrapper.httpx_client.request(
835
906
  "PUT",
836
907
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
908
+ params=remove_none_from_dict({"organization_id": organization_id}),
837
909
  json=jsonable_encoder({"name": name}),
838
910
  headers=self._client_wrapper.get_headers(),
839
911
  timeout=60,
@@ -848,25 +920,28 @@ class AsyncProjectsClient:
848
920
  raise ApiError(status_code=_response.status_code, body=_response.text)
849
921
  raise ApiError(status_code=_response.status_code, body=_response_json)
850
922
 
851
- async def delete_project(self, project_id: str) -> None:
923
+ async def delete_project(
924
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
925
+ ) -> None:
852
926
  """
853
927
  Delete a project by ID.
854
928
 
855
929
  Parameters:
856
- - project_id: str.
930
+ - project_id: typing.Optional[str].
931
+
932
+ - organization_id: typing.Optional[str].
857
933
  ---
858
934
  from llama_cloud.client import AsyncLlamaCloud
859
935
 
860
936
  client = AsyncLlamaCloud(
861
937
  token="YOUR_TOKEN",
862
938
  )
863
- await client.projects.delete_project(
864
- project_id="string",
865
- )
939
+ await client.projects.delete_project()
866
940
  """
867
941
  _response = await self._client_wrapper.httpx_client.request(
868
942
  "DELETE",
869
943
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}"),
944
+ params=remove_none_from_dict({"organization_id": organization_id}),
870
945
  headers=self._client_wrapper.get_headers(),
871
946
  timeout=60,
872
947
  )
@@ -915,27 +990,30 @@ class AsyncProjectsClient:
915
990
  raise ApiError(status_code=_response.status_code, body=_response.text)
916
991
  raise ApiError(status_code=_response.status_code, body=_response_json)
917
992
 
918
- async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
993
+ async def list_datasets_for_project(
994
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
995
+ ) -> typing.List[EvalDataset]:
919
996
  """
920
997
  List eval datasets for a project.
921
998
 
922
999
  Parameters:
923
- - project_id: str.
1000
+ - project_id: typing.Optional[str].
1001
+
1002
+ - organization_id: typing.Optional[str].
924
1003
  ---
925
1004
  from llama_cloud.client import AsyncLlamaCloud
926
1005
 
927
1006
  client = AsyncLlamaCloud(
928
1007
  token="YOUR_TOKEN",
929
1008
  )
930
- await client.projects.list_datasets_for_project(
931
- project_id="string",
932
- )
1009
+ await client.projects.list_datasets_for_project()
933
1010
  """
934
1011
  _response = await self._client_wrapper.httpx_client.request(
935
1012
  "GET",
936
1013
  urllib.parse.urljoin(
937
1014
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
938
1015
  ),
1016
+ params=remove_none_from_dict({"organization_id": organization_id}),
939
1017
  headers=self._client_wrapper.get_headers(),
940
1018
  timeout=60,
941
1019
  )
@@ -949,12 +1027,16 @@ class AsyncProjectsClient:
949
1027
  raise ApiError(status_code=_response.status_code, body=_response.text)
950
1028
  raise ApiError(status_code=_response.status_code, body=_response_json)
951
1029
 
952
- async def create_eval_dataset_for_project(self, project_id: str, *, name: str) -> EvalDataset:
1030
+ async def create_eval_dataset_for_project(
1031
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None, name: str
1032
+ ) -> EvalDataset:
953
1033
  """
954
1034
  Create a new eval dataset for a project.
955
1035
 
956
1036
  Parameters:
957
- - project_id: str.
1037
+ - project_id: typing.Optional[str].
1038
+
1039
+ - organization_id: typing.Optional[str].
958
1040
 
959
1041
  - name: str. The name of the EvalDataset.
960
1042
  ---
@@ -964,8 +1046,7 @@ class AsyncProjectsClient:
964
1046
  token="YOUR_TOKEN",
965
1047
  )
966
1048
  await client.projects.create_eval_dataset_for_project(
967
- project_id="string",
968
- name="string",
1049
+ name="name",
969
1050
  )
970
1051
  """
971
1052
  _response = await self._client_wrapper.httpx_client.request(
@@ -973,6 +1054,7 @@ class AsyncProjectsClient:
973
1054
  urllib.parse.urljoin(
974
1055
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/eval/dataset"
975
1056
  ),
1057
+ params=remove_none_from_dict({"organization_id": organization_id}),
976
1058
  json=jsonable_encoder({"name": name}),
977
1059
  headers=self._client_wrapper.get_headers(),
978
1060
  timeout=60,
@@ -988,27 +1070,34 @@ class AsyncProjectsClient:
988
1070
  raise ApiError(status_code=_response.status_code, body=_response_json)
989
1071
 
990
1072
  async def create_local_eval_set_for_project(
991
- self, project_id: str, *, app_name: str, results: typing.Dict[str, typing.List[LocalEval]]
1073
+ self,
1074
+ project_id: typing.Optional[str],
1075
+ *,
1076
+ organization_id: typing.Optional[str] = None,
1077
+ app_name: str,
1078
+ results: typing.Dict[str, typing.List[LocalEval]],
992
1079
  ) -> typing.List[LocalEvalResults]:
993
1080
  """
994
1081
  Create a new local eval set.
995
1082
 
996
1083
  Parameters:
997
- - project_id: str.
1084
+ - project_id: typing.Optional[str].
1085
+
1086
+ - organization_id: typing.Optional[str].
998
1087
 
999
1088
  - app_name: str. The name of the app.
1000
1089
 
1001
1090
  - results: typing.Dict[str, typing.List[LocalEval]]. The eval results.
1002
1091
  ---
1092
+ from llama_cloud import LocalEval
1003
1093
  from llama_cloud.client import AsyncLlamaCloud
1004
1094
 
1005
1095
  client = AsyncLlamaCloud(
1006
1096
  token="YOUR_TOKEN",
1007
1097
  )
1008
1098
  await client.projects.create_local_eval_set_for_project(
1009
- project_id="string",
1010
- app_name="string",
1011
- results={"string": []},
1099
+ app_name="app_name",
1100
+ results={"key": [LocalEval()]},
1012
1101
  )
1013
1102
  """
1014
1103
  _response = await self._client_wrapper.httpx_client.request(
@@ -1016,6 +1105,7 @@ class AsyncProjectsClient:
1016
1105
  urllib.parse.urljoin(
1017
1106
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalset"
1018
1107
  ),
1108
+ params=remove_none_from_dict({"organization_id": organization_id}),
1019
1109
  json=jsonable_encoder({"app_name": app_name, "results": results}),
1020
1110
  headers=self._client_wrapper.get_headers(),
1021
1111
  timeout=60,
@@ -1030,25 +1120,28 @@ class AsyncProjectsClient:
1030
1120
  raise ApiError(status_code=_response.status_code, body=_response.text)
1031
1121
  raise ApiError(status_code=_response.status_code, body=_response_json)
1032
1122
 
1033
- async def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
1123
+ async def list_local_evals_for_project(
1124
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
1125
+ ) -> typing.List[LocalEvalResults]:
1034
1126
  """
1035
1127
  List local eval results for a project.
1036
1128
 
1037
1129
  Parameters:
1038
- - project_id: str.
1130
+ - project_id: typing.Optional[str].
1131
+
1132
+ - organization_id: typing.Optional[str].
1039
1133
  ---
1040
1134
  from llama_cloud.client import AsyncLlamaCloud
1041
1135
 
1042
1136
  client = AsyncLlamaCloud(
1043
1137
  token="YOUR_TOKEN",
1044
1138
  )
1045
- await client.projects.list_local_evals_for_project(
1046
- project_id="string",
1047
- )
1139
+ await client.projects.list_local_evals_for_project()
1048
1140
  """
1049
1141
  _response = await self._client_wrapper.httpx_client.request(
1050
1142
  "GET",
1051
1143
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localeval"),
1144
+ params=remove_none_from_dict({"organization_id": organization_id}),
1052
1145
  headers=self._client_wrapper.get_headers(),
1053
1146
  timeout=60,
1054
1147
  )
@@ -1062,27 +1155,30 @@ class AsyncProjectsClient:
1062
1155
  raise ApiError(status_code=_response.status_code, body=_response.text)
1063
1156
  raise ApiError(status_code=_response.status_code, body=_response_json)
1064
1157
 
1065
- async def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
1158
+ async def list_local_eval_sets_for_project(
1159
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
1160
+ ) -> typing.List[LocalEvalSets]:
1066
1161
  """
1067
1162
  List local eval sets for a project.
1068
1163
 
1069
1164
  Parameters:
1070
- - project_id: str.
1165
+ - project_id: typing.Optional[str].
1166
+
1167
+ - organization_id: typing.Optional[str].
1071
1168
  ---
1072
1169
  from llama_cloud.client import AsyncLlamaCloud
1073
1170
 
1074
1171
  client = AsyncLlamaCloud(
1075
1172
  token="YOUR_TOKEN",
1076
1173
  )
1077
- await client.projects.list_local_eval_sets_for_project(
1078
- project_id="string",
1079
- )
1174
+ await client.projects.list_local_eval_sets_for_project()
1080
1175
  """
1081
1176
  _response = await self._client_wrapper.httpx_client.request(
1082
1177
  "GET",
1083
1178
  urllib.parse.urljoin(
1084
1179
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/localevalsets"
1085
1180
  ),
1181
+ params=remove_none_from_dict({"organization_id": organization_id}),
1086
1182
  headers=self._client_wrapper.get_headers(),
1087
1183
  timeout=60,
1088
1184
  )
@@ -1096,14 +1192,18 @@ class AsyncProjectsClient:
1096
1192
  raise ApiError(status_code=_response.status_code, body=_response.text)
1097
1193
  raise ApiError(status_code=_response.status_code, body=_response_json)
1098
1194
 
1099
- async def delete_local_eval_set(self, project_id: str, local_eval_set_id: str) -> typing.Any:
1195
+ async def delete_local_eval_set(
1196
+ self, project_id: typing.Optional[str], local_eval_set_id: str, *, organization_id: typing.Optional[str] = None
1197
+ ) -> typing.Any:
1100
1198
  """
1101
1199
  Delete a local eval set.
1102
1200
 
1103
1201
  Parameters:
1104
- - project_id: str.
1202
+ - project_id: typing.Optional[str].
1105
1203
 
1106
1204
  - local_eval_set_id: str.
1205
+
1206
+ - organization_id: typing.Optional[str].
1107
1207
  ---
1108
1208
  from llama_cloud.client import AsyncLlamaCloud
1109
1209
 
@@ -1111,8 +1211,7 @@ class AsyncProjectsClient:
1111
1211
  token="YOUR_TOKEN",
1112
1212
  )
1113
1213
  await client.projects.delete_local_eval_set(
1114
- project_id="string",
1115
- local_eval_set_id="string",
1214
+ local_eval_set_id="local_eval_set_id",
1116
1215
  )
1117
1216
  """
1118
1217
  _response = await self._client_wrapper.httpx_client.request(
@@ -1121,6 +1220,7 @@ class AsyncProjectsClient:
1121
1220
  f"{self._client_wrapper.get_base_url()}/",
1122
1221
  f"api/v1/projects/{project_id}/localevalset/{local_eval_set_id}",
1123
1222
  ),
1223
+ params=remove_none_from_dict({"organization_id": organization_id}),
1124
1224
  headers=self._client_wrapper.get_headers(),
1125
1225
  timeout=60,
1126
1226
  )
@@ -1134,25 +1234,28 @@ class AsyncProjectsClient:
1134
1234
  raise ApiError(status_code=_response.status_code, body=_response.text)
1135
1235
  raise ApiError(status_code=_response.status_code, body=_response_json)
1136
1236
 
1137
- async def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
1237
+ async def list_promptmixin_prompts(
1238
+ self, project_id: typing.Optional[str], *, organization_id: typing.Optional[str] = None
1239
+ ) -> typing.List[PromptMixinPrompts]:
1138
1240
  """
1139
1241
  List PromptMixin prompt sets for a project.
1140
1242
 
1141
1243
  Parameters:
1142
- - project_id: str.
1244
+ - project_id: typing.Optional[str].
1245
+
1246
+ - organization_id: typing.Optional[str].
1143
1247
  ---
1144
1248
  from llama_cloud.client import AsyncLlamaCloud
1145
1249
 
1146
1250
  client = AsyncLlamaCloud(
1147
1251
  token="YOUR_TOKEN",
1148
1252
  )
1149
- await client.projects.list_promptmixin_prompts(
1150
- project_id="string",
1151
- )
1253
+ await client.projects.list_promptmixin_prompts()
1152
1254
  """
1153
1255
  _response = await self._client_wrapper.httpx_client.request(
1154
1256
  "GET",
1155
1257
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
1258
+ params=remove_none_from_dict({"organization_id": organization_id}),
1156
1259
  headers=self._client_wrapper.get_headers(),
1157
1260
  timeout=60,
1158
1261
  )
@@ -1166,33 +1269,47 @@ class AsyncProjectsClient:
1166
1269
  raise ApiError(status_code=_response.status_code, body=_response.text)
1167
1270
  raise ApiError(status_code=_response.status_code, body=_response_json)
1168
1271
 
1169
- async def create_prompt_mixin_prompts(self, project_id: str, *, request: PromptMixinPrompts) -> PromptMixinPrompts:
1272
+ async def create_prompt_mixin_prompts(
1273
+ self,
1274
+ project_id: typing.Optional[str],
1275
+ *,
1276
+ organization_id: typing.Optional[str] = None,
1277
+ request: PromptMixinPrompts,
1278
+ ) -> PromptMixinPrompts:
1170
1279
  """
1171
1280
  Create a new PromptMixin prompt set.
1172
1281
 
1173
1282
  Parameters:
1174
- - project_id: str.
1283
+ - project_id: typing.Optional[str].
1284
+
1285
+ - organization_id: typing.Optional[str].
1175
1286
 
1176
1287
  - request: PromptMixinPrompts.
1177
1288
  ---
1178
- from llama_cloud import PromptMixinPrompts
1289
+ from llama_cloud import PromptMixinPrompts, PromptSpec
1179
1290
  from llama_cloud.client import AsyncLlamaCloud
1180
1291
 
1181
1292
  client = AsyncLlamaCloud(
1182
1293
  token="YOUR_TOKEN",
1183
1294
  )
1184
1295
  await client.projects.create_prompt_mixin_prompts(
1185
- project_id="string",
1186
1296
  request=PromptMixinPrompts(
1187
- project_id="string",
1188
- name="string",
1189
- prompts=[],
1297
+ project_id="project_id",
1298
+ name="name",
1299
+ prompts=[
1300
+ PromptSpec(
1301
+ prompt_key="prompt_key",
1302
+ prompt_class="prompt_class",
1303
+ prompt_type="prompt_type",
1304
+ )
1305
+ ],
1190
1306
  ),
1191
1307
  )
1192
1308
  """
1193
1309
  _response = await self._client_wrapper.httpx_client.request(
1194
1310
  "POST",
1195
1311
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts"),
1312
+ params=remove_none_from_dict({"organization_id": organization_id}),
1196
1313
  json=jsonable_encoder(request),
1197
1314
  headers=self._client_wrapper.get_headers(),
1198
1315
  timeout=60,
@@ -1208,31 +1325,43 @@ class AsyncProjectsClient:
1208
1325
  raise ApiError(status_code=_response.status_code, body=_response_json)
1209
1326
 
1210
1327
  async def update_promptmixin_prompts(
1211
- self, project_id: str, prompt_set_id: str, *, request: PromptMixinPrompts
1328
+ self,
1329
+ project_id: typing.Optional[str],
1330
+ prompt_set_id: str,
1331
+ *,
1332
+ organization_id: typing.Optional[str] = None,
1333
+ request: PromptMixinPrompts,
1212
1334
  ) -> PromptMixinPrompts:
1213
1335
  """
1214
1336
  Update a PromptMixin prompt set.
1215
1337
 
1216
1338
  Parameters:
1217
- - project_id: str.
1339
+ - project_id: typing.Optional[str].
1218
1340
 
1219
1341
  - prompt_set_id: str.
1220
1342
 
1343
+ - organization_id: typing.Optional[str].
1344
+
1221
1345
  - request: PromptMixinPrompts.
1222
1346
  ---
1223
- from llama_cloud import PromptMixinPrompts
1347
+ from llama_cloud import PromptMixinPrompts, PromptSpec
1224
1348
  from llama_cloud.client import AsyncLlamaCloud
1225
1349
 
1226
1350
  client = AsyncLlamaCloud(
1227
1351
  token="YOUR_TOKEN",
1228
1352
  )
1229
1353
  await client.projects.update_promptmixin_prompts(
1230
- project_id="string",
1231
- prompt_set_id="string",
1354
+ prompt_set_id="prompt_set_id",
1232
1355
  request=PromptMixinPrompts(
1233
- project_id="string",
1234
- name="string",
1235
- prompts=[],
1356
+ project_id="project_id",
1357
+ name="name",
1358
+ prompts=[
1359
+ PromptSpec(
1360
+ prompt_key="prompt_key",
1361
+ prompt_class="prompt_class",
1362
+ prompt_type="prompt_type",
1363
+ )
1364
+ ],
1236
1365
  ),
1237
1366
  )
1238
1367
  """
@@ -1241,6 +1370,7 @@ class AsyncProjectsClient:
1241
1370
  urllib.parse.urljoin(
1242
1371
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
1243
1372
  ),
1373
+ params=remove_none_from_dict({"organization_id": organization_id}),
1244
1374
  json=jsonable_encoder(request),
1245
1375
  headers=self._client_wrapper.get_headers(),
1246
1376
  timeout=60,
@@ -1255,14 +1385,18 @@ class AsyncProjectsClient:
1255
1385
  raise ApiError(status_code=_response.status_code, body=_response.text)
1256
1386
  raise ApiError(status_code=_response.status_code, body=_response_json)
1257
1387
 
1258
- async def delete_prompt_mixin_prompts(self, project_id: str, prompt_set_id: str) -> typing.Any:
1388
+ async def delete_prompt_mixin_prompts(
1389
+ self, project_id: typing.Optional[str], prompt_set_id: str, *, organization_id: typing.Optional[str] = None
1390
+ ) -> typing.Any:
1259
1391
  """
1260
1392
  Delete a PromptMixin prompt set.
1261
1393
 
1262
1394
  Parameters:
1263
- - project_id: str.
1395
+ - project_id: typing.Optional[str].
1264
1396
 
1265
1397
  - prompt_set_id: str.
1398
+
1399
+ - organization_id: typing.Optional[str].
1266
1400
  ---
1267
1401
  from llama_cloud.client import AsyncLlamaCloud
1268
1402
 
@@ -1270,8 +1404,7 @@ class AsyncProjectsClient:
1270
1404
  token="YOUR_TOKEN",
1271
1405
  )
1272
1406
  await client.projects.delete_prompt_mixin_prompts(
1273
- project_id="string",
1274
- prompt_set_id="string",
1407
+ prompt_set_id="prompt_set_id",
1275
1408
  )
1276
1409
  """
1277
1410
  _response = await self._client_wrapper.httpx_client.request(
@@ -1279,6 +1412,7 @@ class AsyncProjectsClient:
1279
1412
  urllib.parse.urljoin(
1280
1413
  f"{self._client_wrapper.get_base_url()}/", f"api/v1/projects/{project_id}/prompts/{prompt_set_id}"
1281
1414
  ),
1415
+ params=remove_none_from_dict({"organization_id": organization_id}),
1282
1416
  headers=self._client_wrapper.get_headers(),
1283
1417
  timeout=60,
1284
1418
  )