llama-cloud 0.0.8__py3-none-any.whl → 0.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (40) hide show
  1. llama_cloud/__init__.py +22 -0
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +13 -1
  4. llama_cloud/resources/data_sinks/client.py +40 -8
  5. llama_cloud/resources/data_sources/client.py +48 -12
  6. llama_cloud/resources/data_sources/types/data_source_update_component_one.py +4 -0
  7. llama_cloud/resources/extraction/client.py +55 -38
  8. llama_cloud/resources/organizations/__init__.py +2 -0
  9. llama_cloud/resources/organizations/client.py +867 -0
  10. llama_cloud/resources/parsing/client.py +104 -0
  11. llama_cloud/resources/pipelines/client.py +358 -24
  12. llama_cloud/resources/projects/client.py +28 -8
  13. llama_cloud/types/__init__.py +20 -0
  14. llama_cloud/types/chat_data.py +38 -0
  15. llama_cloud/types/cloud_azure_ai_search_vector_store.py +1 -1
  16. llama_cloud/types/cloud_confluence_data_source.py +45 -0
  17. llama_cloud/types/cloud_jira_data_source.py +43 -0
  18. llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
  19. llama_cloud/types/configurable_data_source_names.py +8 -0
  20. llama_cloud/types/data_source_component_one.py +4 -0
  21. llama_cloud/types/data_source_create_component_one.py +4 -0
  22. llama_cloud/types/eval_dataset_job_record.py +1 -0
  23. llama_cloud/types/extraction_job.py +35 -0
  24. llama_cloud/types/extraction_schema.py +1 -2
  25. llama_cloud/types/llama_parse_parameters.py +5 -0
  26. llama_cloud/types/organization.py +38 -0
  27. llama_cloud/types/organization_create.py +35 -0
  28. llama_cloud/types/pipeline.py +0 -3
  29. llama_cloud/types/pipeline_create.py +0 -3
  30. llama_cloud/types/pipeline_data_source_component_one.py +4 -0
  31. llama_cloud/types/preset_retrieval_params.py +5 -0
  32. llama_cloud/types/project.py +1 -1
  33. llama_cloud/types/retrieval_mode.py +29 -0
  34. llama_cloud/types/user_organization.py +49 -0
  35. llama_cloud/types/user_organization_create.py +36 -0
  36. llama_cloud/types/user_organization_delete.py +36 -0
  37. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/METADATA +2 -1
  38. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/RECORD +40 -28
  39. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/WHEEL +1 -1
  40. {llama_cloud-0.0.8.dist-info → llama_cloud-0.0.10.dist-info}/LICENSE +0 -0
@@ -9,6 +9,7 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.remove_none_from_dict import remove_none_from_dict
11
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
+ from ...types.extraction_job import ExtractionJob
12
13
  from ...types.extraction_result import ExtractionResult
13
14
  from ...types.extraction_schema import ExtractionSchema
14
15
  from ...types.http_validation_error import HttpValidationError
@@ -31,17 +32,25 @@ class ExtractionClient:
31
32
  self._client_wrapper = client_wrapper
32
33
 
33
34
  def infer_schema(
34
- self, *, name: str, project_id: typing.Optional[str] = OMIT, file_ids: typing.List[str], openai_api_key: str
35
+ self,
36
+ *,
37
+ schema_id: typing.Optional[str] = OMIT,
38
+ name: str,
39
+ project_id: typing.Optional[str] = OMIT,
40
+ file_ids: typing.List[str],
41
+ stream: typing.Optional[bool] = OMIT,
35
42
  ) -> ExtractionSchema:
36
43
  """
37
44
  Parameters:
45
+ - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
46
+
38
47
  - name: str. The name of the extraction schema
39
48
 
40
49
  - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
41
50
 
42
51
  - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
43
52
 
44
- - openai_api_key: str. The API key for the OpenAI API
53
+ - stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
45
54
  ---
46
55
  from llama_cloud.client import LlamaCloud
47
56
 
@@ -51,12 +60,15 @@ class ExtractionClient:
51
60
  client.extraction.infer_schema(
52
61
  name="string",
53
62
  file_ids=[],
54
- openai_api_key="string",
55
63
  )
56
64
  """
57
- _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids, "openai_api_key": openai_api_key}
65
+ _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
66
+ if schema_id is not OMIT:
67
+ _request["schema_id"] = schema_id
58
68
  if project_id is not OMIT:
59
69
  _request["project_id"] = project_id
70
+ if stream is not OMIT:
71
+ _request["stream"] = stream
60
72
  _response = self._client_wrapper.httpx_client.request(
61
73
  "POST",
62
74
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
@@ -138,15 +150,12 @@ class ExtractionClient:
138
150
  schema_id: str,
139
151
  *,
140
152
  data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]] = OMIT,
141
- openai_api_key: typing.Optional[str] = OMIT,
142
153
  ) -> ExtractionSchema:
143
154
  """
144
155
  Parameters:
145
156
  - schema_id: str.
146
157
 
147
158
  - data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]]. The schema of the data
148
-
149
- - openai_api_key: typing.Optional[str]. The API key for the OpenAI API
150
159
  ---
151
160
  from llama_cloud.client import LlamaCloud
152
161
 
@@ -160,8 +169,6 @@ class ExtractionClient:
160
169
  _request: typing.Dict[str, typing.Any] = {}
161
170
  if data_schema is not OMIT:
162
171
  _request["data_schema"] = data_schema
163
- if openai_api_key is not OMIT:
164
- _request["openai_api_key"] = openai_api_key
165
172
  _response = self._client_wrapper.httpx_client.request(
166
173
  "PUT",
167
174
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/schemas/{schema_id}"),
@@ -179,17 +186,19 @@ class ExtractionClient:
179
186
  raise ApiError(status_code=_response.status_code, body=_response.text)
180
187
  raise ApiError(status_code=_response.status_code, body=_response_json)
181
188
 
182
- def list_jobs(self, *, schema_id: typing.Optional[str] = None) -> typing.List[ExtractionResult]:
189
+ def list_jobs(self, *, schema_id: str) -> typing.List[ExtractionJob]:
183
190
  """
184
191
  Parameters:
185
- - schema_id: typing.Optional[str].
192
+ - schema_id: str.
186
193
  ---
187
194
  from llama_cloud.client import LlamaCloud
188
195
 
189
196
  client = LlamaCloud(
190
197
  token="YOUR_TOKEN",
191
198
  )
192
- client.extraction.list_jobs()
199
+ client.extraction.list_jobs(
200
+ schema_id="string",
201
+ )
193
202
  """
194
203
  _response = self._client_wrapper.httpx_client.request(
195
204
  "GET",
@@ -199,7 +208,7 @@ class ExtractionClient:
199
208
  timeout=60,
200
209
  )
201
210
  if 200 <= _response.status_code < 300:
202
- return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
211
+ return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
203
212
  if _response.status_code == 422:
204
213
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
205
214
  try:
@@ -208,7 +217,7 @@ class ExtractionClient:
208
217
  raise ApiError(status_code=_response.status_code, body=_response.text)
209
218
  raise ApiError(status_code=_response.status_code, body=_response_json)
210
219
 
211
- def run_job(self, *, schema_id: str, file_id: str) -> ExtractionResult:
220
+ def run_job(self, *, schema_id: str, file_id: str) -> ExtractionJob:
212
221
  """
213
222
  Parameters:
214
223
  - schema_id: str. The id of the schema
@@ -233,7 +242,7 @@ class ExtractionClient:
233
242
  timeout=60,
234
243
  )
235
244
  if 200 <= _response.status_code < 300:
236
- return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
245
+ return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
237
246
  if _response.status_code == 422:
238
247
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
239
248
  try:
@@ -242,7 +251,7 @@ class ExtractionClient:
242
251
  raise ApiError(status_code=_response.status_code, body=_response.text)
243
252
  raise ApiError(status_code=_response.status_code, body=_response_json)
244
253
 
245
- def get_job(self, job_id: str) -> ExtractionResult:
254
+ def get_job(self, job_id: str) -> ExtractionJob:
246
255
  """
247
256
  Parameters:
248
257
  - job_id: str.
@@ -263,7 +272,7 @@ class ExtractionClient:
263
272
  timeout=60,
264
273
  )
265
274
  if 200 <= _response.status_code < 300:
266
- return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
275
+ return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
267
276
  if _response.status_code == 422:
268
277
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
269
278
  try:
@@ -272,7 +281,7 @@ class ExtractionClient:
272
281
  raise ApiError(status_code=_response.status_code, body=_response.text)
273
282
  raise ApiError(status_code=_response.status_code, body=_response_json)
274
283
 
275
- def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionResult]:
284
+ def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionJob]:
276
285
  """
277
286
  Parameters:
278
287
  - schema_id: str. The id of the schema
@@ -297,7 +306,7 @@ class ExtractionClient:
297
306
  timeout=60,
298
307
  )
299
308
  if 200 <= _response.status_code < 300:
300
- return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
309
+ return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
301
310
  if _response.status_code == 422:
302
311
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
303
312
  try:
@@ -342,17 +351,25 @@ class AsyncExtractionClient:
342
351
  self._client_wrapper = client_wrapper
343
352
 
344
353
  async def infer_schema(
345
- self, *, name: str, project_id: typing.Optional[str] = OMIT, file_ids: typing.List[str], openai_api_key: str
354
+ self,
355
+ *,
356
+ schema_id: typing.Optional[str] = OMIT,
357
+ name: str,
358
+ project_id: typing.Optional[str] = OMIT,
359
+ file_ids: typing.List[str],
360
+ stream: typing.Optional[bool] = OMIT,
346
361
  ) -> ExtractionSchema:
347
362
  """
348
363
  Parameters:
364
+ - schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
365
+
349
366
  - name: str. The name of the extraction schema
350
367
 
351
368
  - project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
352
369
 
353
370
  - file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
354
371
 
355
- - openai_api_key: str. The API key for the OpenAI API
372
+ - stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
356
373
  ---
357
374
  from llama_cloud.client import AsyncLlamaCloud
358
375
 
@@ -362,12 +379,15 @@ class AsyncExtractionClient:
362
379
  await client.extraction.infer_schema(
363
380
  name="string",
364
381
  file_ids=[],
365
- openai_api_key="string",
366
382
  )
367
383
  """
368
- _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids, "openai_api_key": openai_api_key}
384
+ _request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
385
+ if schema_id is not OMIT:
386
+ _request["schema_id"] = schema_id
369
387
  if project_id is not OMIT:
370
388
  _request["project_id"] = project_id
389
+ if stream is not OMIT:
390
+ _request["stream"] = stream
371
391
  _response = await self._client_wrapper.httpx_client.request(
372
392
  "POST",
373
393
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
@@ -449,15 +469,12 @@ class AsyncExtractionClient:
449
469
  schema_id: str,
450
470
  *,
451
471
  data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]] = OMIT,
452
- openai_api_key: typing.Optional[str] = OMIT,
453
472
  ) -> ExtractionSchema:
454
473
  """
455
474
  Parameters:
456
475
  - schema_id: str.
457
476
 
458
477
  - data_schema: typing.Optional[typing.Dict[str, ExtractionSchemaUpdateDataSchemaValue]]. The schema of the data
459
-
460
- - openai_api_key: typing.Optional[str]. The API key for the OpenAI API
461
478
  ---
462
479
  from llama_cloud.client import AsyncLlamaCloud
463
480
 
@@ -471,8 +488,6 @@ class AsyncExtractionClient:
471
488
  _request: typing.Dict[str, typing.Any] = {}
472
489
  if data_schema is not OMIT:
473
490
  _request["data_schema"] = data_schema
474
- if openai_api_key is not OMIT:
475
- _request["openai_api_key"] = openai_api_key
476
491
  _response = await self._client_wrapper.httpx_client.request(
477
492
  "PUT",
478
493
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/schemas/{schema_id}"),
@@ -490,17 +505,19 @@ class AsyncExtractionClient:
490
505
  raise ApiError(status_code=_response.status_code, body=_response.text)
491
506
  raise ApiError(status_code=_response.status_code, body=_response_json)
492
507
 
493
- async def list_jobs(self, *, schema_id: typing.Optional[str] = None) -> typing.List[ExtractionResult]:
508
+ async def list_jobs(self, *, schema_id: str) -> typing.List[ExtractionJob]:
494
509
  """
495
510
  Parameters:
496
- - schema_id: typing.Optional[str].
511
+ - schema_id: str.
497
512
  ---
498
513
  from llama_cloud.client import AsyncLlamaCloud
499
514
 
500
515
  client = AsyncLlamaCloud(
501
516
  token="YOUR_TOKEN",
502
517
  )
503
- await client.extraction.list_jobs()
518
+ await client.extraction.list_jobs(
519
+ schema_id="string",
520
+ )
504
521
  """
505
522
  _response = await self._client_wrapper.httpx_client.request(
506
523
  "GET",
@@ -510,7 +527,7 @@ class AsyncExtractionClient:
510
527
  timeout=60,
511
528
  )
512
529
  if 200 <= _response.status_code < 300:
513
- return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
530
+ return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
514
531
  if _response.status_code == 422:
515
532
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
516
533
  try:
@@ -519,7 +536,7 @@ class AsyncExtractionClient:
519
536
  raise ApiError(status_code=_response.status_code, body=_response.text)
520
537
  raise ApiError(status_code=_response.status_code, body=_response_json)
521
538
 
522
- async def run_job(self, *, schema_id: str, file_id: str) -> ExtractionResult:
539
+ async def run_job(self, *, schema_id: str, file_id: str) -> ExtractionJob:
523
540
  """
524
541
  Parameters:
525
542
  - schema_id: str. The id of the schema
@@ -544,7 +561,7 @@ class AsyncExtractionClient:
544
561
  timeout=60,
545
562
  )
546
563
  if 200 <= _response.status_code < 300:
547
- return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
564
+ return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
548
565
  if _response.status_code == 422:
549
566
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
550
567
  try:
@@ -553,7 +570,7 @@ class AsyncExtractionClient:
553
570
  raise ApiError(status_code=_response.status_code, body=_response.text)
554
571
  raise ApiError(status_code=_response.status_code, body=_response_json)
555
572
 
556
- async def get_job(self, job_id: str) -> ExtractionResult:
573
+ async def get_job(self, job_id: str) -> ExtractionJob:
557
574
  """
558
575
  Parameters:
559
576
  - job_id: str.
@@ -574,7 +591,7 @@ class AsyncExtractionClient:
574
591
  timeout=60,
575
592
  )
576
593
  if 200 <= _response.status_code < 300:
577
- return pydantic.parse_obj_as(ExtractionResult, _response.json()) # type: ignore
594
+ return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
578
595
  if _response.status_code == 422:
579
596
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
580
597
  try:
@@ -583,7 +600,7 @@ class AsyncExtractionClient:
583
600
  raise ApiError(status_code=_response.status_code, body=_response.text)
584
601
  raise ApiError(status_code=_response.status_code, body=_response_json)
585
602
 
586
- async def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionResult]:
603
+ async def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionJob]:
587
604
  """
588
605
  Parameters:
589
606
  - schema_id: str. The id of the schema
@@ -608,7 +625,7 @@ class AsyncExtractionClient:
608
625
  timeout=60,
609
626
  )
610
627
  if 200 <= _response.status_code < 300:
611
- return pydantic.parse_obj_as(typing.List[ExtractionResult], _response.json()) # type: ignore
628
+ return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
612
629
  if _response.status_code == 422:
613
630
  raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
614
631
  try:
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+