llama-cloud 0.1.17__py3-none-any.whl → 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +0 -2
- llama_cloud/resources/chat_apps/client.py +4 -4
- llama_cloud/resources/llama_extract/client.py +0 -4
- llama_cloud/resources/parsing/client.py +8 -65
- llama_cloud/resources/pipelines/client.py +2 -2
- llama_cloud/resources/projects/client.py +70 -0
- llama_cloud/types/__init__.py +0 -2
- llama_cloud/types/extract_job_create.py +2 -1
- llama_cloud/types/llama_extract_settings.py +2 -2
- llama_cloud/types/llama_parse_parameters.py +3 -2
- llama_cloud/types/supported_llm_model_names.py +0 -12
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.18.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.18.dist-info}/RECORD +15 -16
- llama_cloud/types/parsing_usage.py +0 -32
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.18.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.17.dist-info → llama_cloud-0.1.18.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -199,7 +199,6 @@ from .types import (
|
|
|
199
199
|
ParsingJobStructuredResult,
|
|
200
200
|
ParsingJobTextResult,
|
|
201
201
|
ParsingMode,
|
|
202
|
-
ParsingUsage,
|
|
203
202
|
PartitionNames,
|
|
204
203
|
Permission,
|
|
205
204
|
Pipeline,
|
|
@@ -570,7 +569,6 @@ __all__ = [
|
|
|
570
569
|
"ParsingJobStructuredResult",
|
|
571
570
|
"ParsingJobTextResult",
|
|
572
571
|
"ParsingMode",
|
|
573
|
-
"ParsingUsage",
|
|
574
572
|
"PartitionNames",
|
|
575
573
|
"Permission",
|
|
576
574
|
"Pipeline",
|
|
@@ -108,7 +108,7 @@ class ChatAppsClient:
|
|
|
108
108
|
name="string",
|
|
109
109
|
retriever_id="string",
|
|
110
110
|
llm_config=LlmParameters(
|
|
111
|
-
model_name=SupportedLlmModelNames.
|
|
111
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
112
112
|
),
|
|
113
113
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
114
114
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -217,7 +217,7 @@ class ChatAppsClient:
|
|
|
217
217
|
client.chat_apps.update_chat_app(
|
|
218
218
|
id="string",
|
|
219
219
|
llm_config=LlmParameters(
|
|
220
|
-
model_name=SupportedLlmModelNames.
|
|
220
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
221
221
|
),
|
|
222
222
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
223
223
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -401,7 +401,7 @@ class AsyncChatAppsClient:
|
|
|
401
401
|
name="string",
|
|
402
402
|
retriever_id="string",
|
|
403
403
|
llm_config=LlmParameters(
|
|
404
|
-
model_name=SupportedLlmModelNames.
|
|
404
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
405
405
|
),
|
|
406
406
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
407
407
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -510,7 +510,7 @@ class AsyncChatAppsClient:
|
|
|
510
510
|
await client.chat_apps.update_chat_app(
|
|
511
511
|
id="string",
|
|
512
512
|
llm_config=LlmParameters(
|
|
513
|
-
model_name=SupportedLlmModelNames.
|
|
513
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
514
514
|
),
|
|
515
515
|
retrieval_config=PresetCompositeRetrievalParams(
|
|
516
516
|
mode=CompositeRetrievalMode.ROUTING,
|
|
@@ -357,7 +357,6 @@ class LlamaExtractClient:
|
|
|
357
357
|
client.llama_extract.run_job(
|
|
358
358
|
request=ExtractJobCreate(
|
|
359
359
|
extraction_agent_id="string",
|
|
360
|
-
file_id="string",
|
|
361
360
|
config_override=ExtractConfig(
|
|
362
361
|
extraction_target=ExtractTarget.PER_DOC,
|
|
363
362
|
extraction_mode=ExtractMode.FAST,
|
|
@@ -446,7 +445,6 @@ class LlamaExtractClient:
|
|
|
446
445
|
client.llama_extract.run_job_test_user(
|
|
447
446
|
job_create=ExtractJobCreate(
|
|
448
447
|
extraction_agent_id="string",
|
|
449
|
-
file_id="string",
|
|
450
448
|
config_override=ExtractConfig(
|
|
451
449
|
extraction_target=ExtractTarget.PER_DOC,
|
|
452
450
|
extraction_mode=ExtractMode.FAST,
|
|
@@ -1043,7 +1041,6 @@ class AsyncLlamaExtractClient:
|
|
|
1043
1041
|
await client.llama_extract.run_job(
|
|
1044
1042
|
request=ExtractJobCreate(
|
|
1045
1043
|
extraction_agent_id="string",
|
|
1046
|
-
file_id="string",
|
|
1047
1044
|
config_override=ExtractConfig(
|
|
1048
1045
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1049
1046
|
extraction_mode=ExtractMode.FAST,
|
|
@@ -1132,7 +1129,6 @@ class AsyncLlamaExtractClient:
|
|
|
1132
1129
|
await client.llama_extract.run_job_test_user(
|
|
1133
1130
|
job_create=ExtractJobCreate(
|
|
1134
1131
|
extraction_agent_id="string",
|
|
1135
|
-
file_id="string",
|
|
1136
1132
|
config_override=ExtractConfig(
|
|
1137
1133
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1138
1134
|
extraction_mode=ExtractMode.FAST,
|
|
@@ -19,7 +19,6 @@ from ...types.parsing_job_markdown_result import ParsingJobMarkdownResult
|
|
|
19
19
|
from ...types.parsing_job_structured_result import ParsingJobStructuredResult
|
|
20
20
|
from ...types.parsing_job_text_result import ParsingJobTextResult
|
|
21
21
|
from ...types.parsing_mode import ParsingMode
|
|
22
|
-
from ...types.parsing_usage import ParsingUsage
|
|
23
22
|
from ...types.presigned_url import PresignedUrl
|
|
24
23
|
|
|
25
24
|
try:
|
|
@@ -212,6 +211,7 @@ class ParsingClient:
|
|
|
212
211
|
bbox_left: float,
|
|
213
212
|
bbox_right: float,
|
|
214
213
|
bbox_top: float,
|
|
214
|
+
compact_markdown_table: bool,
|
|
215
215
|
disable_ocr: bool,
|
|
216
216
|
disable_reconstruction: bool,
|
|
217
217
|
disable_image_extraction: bool,
|
|
@@ -314,6 +314,8 @@ class ParsingClient:
|
|
|
314
314
|
|
|
315
315
|
- bbox_top: float.
|
|
316
316
|
|
|
317
|
+
- compact_markdown_table: bool.
|
|
318
|
+
|
|
317
319
|
- disable_ocr: bool.
|
|
318
320
|
|
|
319
321
|
- disable_reconstruction: bool.
|
|
@@ -452,6 +454,7 @@ class ParsingClient:
|
|
|
452
454
|
"bbox_left": bbox_left,
|
|
453
455
|
"bbox_right": bbox_right,
|
|
454
456
|
"bbox_top": bbox_top,
|
|
457
|
+
"compact_markdown_table": compact_markdown_table,
|
|
455
458
|
"disable_ocr": disable_ocr,
|
|
456
459
|
"disable_reconstruction": disable_reconstruction,
|
|
457
460
|
"disable_image_extraction": disable_image_extraction,
|
|
@@ -536,38 +539,6 @@ class ParsingClient:
|
|
|
536
539
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
537
540
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
538
541
|
|
|
539
|
-
def usage(self, *, organization_id: typing.Optional[str] = None) -> ParsingUsage:
|
|
540
|
-
"""
|
|
541
|
-
DEPRECATED: use either /organizations/{organization_id}/usage or /projects/{project_id}/usage instead
|
|
542
|
-
Get parsing usage for user
|
|
543
|
-
|
|
544
|
-
Parameters:
|
|
545
|
-
- organization_id: typing.Optional[str].
|
|
546
|
-
---
|
|
547
|
-
from llama_cloud.client import LlamaCloud
|
|
548
|
-
|
|
549
|
-
client = LlamaCloud(
|
|
550
|
-
token="YOUR_TOKEN",
|
|
551
|
-
)
|
|
552
|
-
client.parsing.usage()
|
|
553
|
-
"""
|
|
554
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
555
|
-
"GET",
|
|
556
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/usage"),
|
|
557
|
-
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
558
|
-
headers=self._client_wrapper.get_headers(),
|
|
559
|
-
timeout=60,
|
|
560
|
-
)
|
|
561
|
-
if 200 <= _response.status_code < 300:
|
|
562
|
-
return pydantic.parse_obj_as(ParsingUsage, _response.json()) # type: ignore
|
|
563
|
-
if _response.status_code == 422:
|
|
564
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
565
|
-
try:
|
|
566
|
-
_response_json = _response.json()
|
|
567
|
-
except JSONDecodeError:
|
|
568
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
569
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
570
|
-
|
|
571
542
|
def get_job(self, job_id: str) -> ParsingJob:
|
|
572
543
|
"""
|
|
573
544
|
Get a job by id
|
|
@@ -1192,6 +1163,7 @@ class AsyncParsingClient:
|
|
|
1192
1163
|
bbox_left: float,
|
|
1193
1164
|
bbox_right: float,
|
|
1194
1165
|
bbox_top: float,
|
|
1166
|
+
compact_markdown_table: bool,
|
|
1195
1167
|
disable_ocr: bool,
|
|
1196
1168
|
disable_reconstruction: bool,
|
|
1197
1169
|
disable_image_extraction: bool,
|
|
@@ -1294,6 +1266,8 @@ class AsyncParsingClient:
|
|
|
1294
1266
|
|
|
1295
1267
|
- bbox_top: float.
|
|
1296
1268
|
|
|
1269
|
+
- compact_markdown_table: bool.
|
|
1270
|
+
|
|
1297
1271
|
- disable_ocr: bool.
|
|
1298
1272
|
|
|
1299
1273
|
- disable_reconstruction: bool.
|
|
@@ -1432,6 +1406,7 @@ class AsyncParsingClient:
|
|
|
1432
1406
|
"bbox_left": bbox_left,
|
|
1433
1407
|
"bbox_right": bbox_right,
|
|
1434
1408
|
"bbox_top": bbox_top,
|
|
1409
|
+
"compact_markdown_table": compact_markdown_table,
|
|
1435
1410
|
"disable_ocr": disable_ocr,
|
|
1436
1411
|
"disable_reconstruction": disable_reconstruction,
|
|
1437
1412
|
"disable_image_extraction": disable_image_extraction,
|
|
@@ -1516,38 +1491,6 @@ class AsyncParsingClient:
|
|
|
1516
1491
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1517
1492
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1518
1493
|
|
|
1519
|
-
async def usage(self, *, organization_id: typing.Optional[str] = None) -> ParsingUsage:
|
|
1520
|
-
"""
|
|
1521
|
-
DEPRECATED: use either /organizations/{organization_id}/usage or /projects/{project_id}/usage instead
|
|
1522
|
-
Get parsing usage for user
|
|
1523
|
-
|
|
1524
|
-
Parameters:
|
|
1525
|
-
- organization_id: typing.Optional[str].
|
|
1526
|
-
---
|
|
1527
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1528
|
-
|
|
1529
|
-
client = AsyncLlamaCloud(
|
|
1530
|
-
token="YOUR_TOKEN",
|
|
1531
|
-
)
|
|
1532
|
-
await client.parsing.usage()
|
|
1533
|
-
"""
|
|
1534
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1535
|
-
"GET",
|
|
1536
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/usage"),
|
|
1537
|
-
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
1538
|
-
headers=self._client_wrapper.get_headers(),
|
|
1539
|
-
timeout=60,
|
|
1540
|
-
)
|
|
1541
|
-
if 200 <= _response.status_code < 300:
|
|
1542
|
-
return pydantic.parse_obj_as(ParsingUsage, _response.json()) # type: ignore
|
|
1543
|
-
if _response.status_code == 422:
|
|
1544
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1545
|
-
try:
|
|
1546
|
-
_response_json = _response.json()
|
|
1547
|
-
except JSONDecodeError:
|
|
1548
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1549
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1550
|
-
|
|
1551
1494
|
async def get_job(self, job_id: str) -> ParsingJob:
|
|
1552
1495
|
"""
|
|
1553
1496
|
Get a job by id
|
|
@@ -1218,7 +1218,7 @@ class PipelinesClient:
|
|
|
1218
1218
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1219
1219
|
),
|
|
1220
1220
|
llm_parameters=LlmParameters(
|
|
1221
|
-
model_name=SupportedLlmModelNames.
|
|
1221
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
1222
1222
|
),
|
|
1223
1223
|
),
|
|
1224
1224
|
)
|
|
@@ -2775,7 +2775,7 @@ class AsyncPipelinesClient:
|
|
|
2775
2775
|
retrieval_mode=RetrievalMode.CHUNKS,
|
|
2776
2776
|
),
|
|
2777
2777
|
llm_parameters=LlmParameters(
|
|
2778
|
-
model_name=SupportedLlmModelNames.
|
|
2778
|
+
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
2779
2779
|
),
|
|
2780
2780
|
),
|
|
2781
2781
|
)
|
|
@@ -250,6 +250,41 @@ class ProjectsClient:
|
|
|
250
250
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
251
251
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
252
252
|
|
|
253
|
+
def get_current_project(
|
|
254
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
255
|
+
) -> Project:
|
|
256
|
+
"""
|
|
257
|
+
Get the current project.
|
|
258
|
+
|
|
259
|
+
Parameters:
|
|
260
|
+
- project_id: typing.Optional[str].
|
|
261
|
+
|
|
262
|
+
- organization_id: typing.Optional[str].
|
|
263
|
+
---
|
|
264
|
+
from llama_cloud.client import LlamaCloud
|
|
265
|
+
|
|
266
|
+
client = LlamaCloud(
|
|
267
|
+
token="YOUR_TOKEN",
|
|
268
|
+
)
|
|
269
|
+
client.projects.get_current_project()
|
|
270
|
+
"""
|
|
271
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
272
|
+
"GET",
|
|
273
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects/current"),
|
|
274
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
275
|
+
headers=self._client_wrapper.get_headers(),
|
|
276
|
+
timeout=60,
|
|
277
|
+
)
|
|
278
|
+
if 200 <= _response.status_code < 300:
|
|
279
|
+
return pydantic.parse_obj_as(Project, _response.json()) # type: ignore
|
|
280
|
+
if _response.status_code == 422:
|
|
281
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
282
|
+
try:
|
|
283
|
+
_response_json = _response.json()
|
|
284
|
+
except JSONDecodeError:
|
|
285
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
286
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
287
|
+
|
|
253
288
|
def get_project_usage(
|
|
254
289
|
self,
|
|
255
290
|
project_id: typing.Optional[str],
|
|
@@ -522,6 +557,41 @@ class AsyncProjectsClient:
|
|
|
522
557
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
523
558
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
524
559
|
|
|
560
|
+
async def get_current_project(
|
|
561
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
562
|
+
) -> Project:
|
|
563
|
+
"""
|
|
564
|
+
Get the current project.
|
|
565
|
+
|
|
566
|
+
Parameters:
|
|
567
|
+
- project_id: typing.Optional[str].
|
|
568
|
+
|
|
569
|
+
- organization_id: typing.Optional[str].
|
|
570
|
+
---
|
|
571
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
572
|
+
|
|
573
|
+
client = AsyncLlamaCloud(
|
|
574
|
+
token="YOUR_TOKEN",
|
|
575
|
+
)
|
|
576
|
+
await client.projects.get_current_project()
|
|
577
|
+
"""
|
|
578
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
579
|
+
"GET",
|
|
580
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects/current"),
|
|
581
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
582
|
+
headers=self._client_wrapper.get_headers(),
|
|
583
|
+
timeout=60,
|
|
584
|
+
)
|
|
585
|
+
if 200 <= _response.status_code < 300:
|
|
586
|
+
return pydantic.parse_obj_as(Project, _response.json()) # type: ignore
|
|
587
|
+
if _response.status_code == 422:
|
|
588
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
589
|
+
try:
|
|
590
|
+
_response_json = _response.json()
|
|
591
|
+
except JSONDecodeError:
|
|
592
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
593
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
594
|
+
|
|
525
595
|
async def get_project_usage(
|
|
526
596
|
self,
|
|
527
597
|
project_id: typing.Optional[str],
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -208,7 +208,6 @@ from .parsing_job_markdown_result import ParsingJobMarkdownResult
|
|
|
208
208
|
from .parsing_job_structured_result import ParsingJobStructuredResult
|
|
209
209
|
from .parsing_job_text_result import ParsingJobTextResult
|
|
210
210
|
from .parsing_mode import ParsingMode
|
|
211
|
-
from .parsing_usage import ParsingUsage
|
|
212
211
|
from .partition_names import PartitionNames
|
|
213
212
|
from .permission import Permission
|
|
214
213
|
from .pipeline import Pipeline
|
|
@@ -527,7 +526,6 @@ __all__ = [
|
|
|
527
526
|
"ParsingJobStructuredResult",
|
|
528
527
|
"ParsingJobTextResult",
|
|
529
528
|
"ParsingMode",
|
|
530
|
-
"ParsingUsage",
|
|
531
529
|
"PartitionNames",
|
|
532
530
|
"Permission",
|
|
533
531
|
"Pipeline",
|
|
@@ -22,7 +22,8 @@ class ExtractJobCreate(pydantic.BaseModel):
|
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
24
|
extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
|
|
25
|
-
file_id: str
|
|
25
|
+
file_id: typing.Optional[str]
|
|
26
|
+
file: typing.Optional[str]
|
|
26
27
|
data_schema_override: typing.Optional[ExtractJobCreateDataSchemaOverride] = pydantic.Field(
|
|
27
28
|
description="The data schema to override the extraction agent's data schema with"
|
|
28
29
|
)
|
|
@@ -26,8 +26,8 @@ class LlamaExtractSettings(pydantic.BaseModel):
|
|
|
26
26
|
max_file_size: typing.Optional[int] = pydantic.Field(
|
|
27
27
|
description="The maximum file size (in bytes) allowed for the document."
|
|
28
28
|
)
|
|
29
|
-
|
|
30
|
-
description="The maximum
|
|
29
|
+
max_file_size_ui: typing.Optional[int] = pydantic.Field(
|
|
30
|
+
description="The maximum file size (in bytes) allowed for the document."
|
|
31
31
|
)
|
|
32
32
|
max_pages: typing.Optional[int] = pydantic.Field(
|
|
33
33
|
description="The maximum number of pages allowed for the document."
|
|
@@ -26,6 +26,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
26
26
|
disable_ocr: typing.Optional[bool]
|
|
27
27
|
annotate_links: typing.Optional[bool]
|
|
28
28
|
adaptive_long_table: typing.Optional[bool]
|
|
29
|
+
compact_markdown_table: typing.Optional[bool]
|
|
29
30
|
disable_reconstruction: typing.Optional[bool]
|
|
30
31
|
disable_image_extraction: typing.Optional[bool]
|
|
31
32
|
invalidate_cache: typing.Optional[bool]
|
|
@@ -61,9 +62,9 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
61
62
|
is_formatting_instruction: typing.Optional[bool]
|
|
62
63
|
premium_mode: typing.Optional[bool]
|
|
63
64
|
continuous_mode: typing.Optional[bool]
|
|
64
|
-
|
|
65
|
+
input_s_3_path: typing.Optional[str] = pydantic.Field(alias="input_s3_path")
|
|
65
66
|
input_s_3_region: typing.Optional[str] = pydantic.Field(alias="input_s3_region")
|
|
66
|
-
|
|
67
|
+
output_s_3_path_prefix: typing.Optional[str] = pydantic.Field(alias="output_s3_path_prefix")
|
|
67
68
|
output_s_3_region: typing.Optional[str] = pydantic.Field(alias="output_s3_region")
|
|
68
69
|
project_id: typing.Optional[str]
|
|
69
70
|
azure_openai_deployment_name: typing.Optional[str]
|
|
@@ -7,9 +7,6 @@ T_Result = typing.TypeVar("T_Result")
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class SupportedLlmModelNames(str, enum.Enum):
|
|
10
|
-
GPT_3_5_TURBO = "GPT_3_5_TURBO"
|
|
11
|
-
GPT_4 = "GPT_4"
|
|
12
|
-
GPT_4_TURBO = "GPT_4_TURBO"
|
|
13
10
|
GPT_4_O = "GPT_4O"
|
|
14
11
|
GPT_4_O_MINI = "GPT_4O_MINI"
|
|
15
12
|
AZURE_OPENAI_GPT_3_5_TURBO = "AZURE_OPENAI_GPT_3_5_TURBO"
|
|
@@ -22,9 +19,6 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
22
19
|
|
|
23
20
|
def visit(
|
|
24
21
|
self,
|
|
25
|
-
gpt_3_5_turbo: typing.Callable[[], T_Result],
|
|
26
|
-
gpt_4: typing.Callable[[], T_Result],
|
|
27
|
-
gpt_4_turbo: typing.Callable[[], T_Result],
|
|
28
22
|
gpt_4_o: typing.Callable[[], T_Result],
|
|
29
23
|
gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
30
24
|
azure_openai_gpt_3_5_turbo: typing.Callable[[], T_Result],
|
|
@@ -35,12 +29,6 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
35
29
|
bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
|
|
36
30
|
vertex_ai_claude_3_5_sonnet: typing.Callable[[], T_Result],
|
|
37
31
|
) -> T_Result:
|
|
38
|
-
if self is SupportedLlmModelNames.GPT_3_5_TURBO:
|
|
39
|
-
return gpt_3_5_turbo()
|
|
40
|
-
if self is SupportedLlmModelNames.GPT_4:
|
|
41
|
-
return gpt_4()
|
|
42
|
-
if self is SupportedLlmModelNames.GPT_4_TURBO:
|
|
43
|
-
return gpt_4_turbo()
|
|
44
32
|
if self is SupportedLlmModelNames.GPT_4_O:
|
|
45
33
|
return gpt_4_o()
|
|
46
34
|
if self is SupportedLlmModelNames.GPT_4_O_MINI:
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llama_cloud/__init__.py,sha256=
|
|
1
|
+
llama_cloud/__init__.py,sha256=ginbvrHI_2KVE9mklzHcw4ftPluYxfrNwovh9cYXmLU,22689
|
|
2
2
|
llama_cloud/client.py,sha256=0fK6iRBCA77eSs0zFrYQj-zD0BLy6Dr2Ss0ETJ4WaOY,5555
|
|
3
3
|
llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
|
|
4
4
|
llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
@@ -11,7 +11,7 @@ llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6
|
|
|
11
11
|
llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
|
|
12
12
|
llama_cloud/resources/__init__.py,sha256=h2kWef5KlC8qpr-1MJyIoFVCsNBidRUUUWztnsr9AHs,3298
|
|
13
13
|
llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
14
|
-
llama_cloud/resources/chat_apps/client.py,sha256=
|
|
14
|
+
llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
|
|
15
15
|
llama_cloud/resources/component_definitions/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
16
16
|
llama_cloud/resources/component_definitions/client.py,sha256=YYfoXNa1qim2OdD5y4N5mvoBZKtrCuXS560mtqH_-1c,7569
|
|
17
17
|
llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
|
|
@@ -38,26 +38,26 @@ llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-
|
|
|
38
38
|
llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
39
39
|
llama_cloud/resources/jobs/client.py,sha256=mN9uOzys9aZkhOJkApUy0yhfNeK8X09xQxT34ZPptNY,5386
|
|
40
40
|
llama_cloud/resources/llama_extract/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
41
|
-
llama_cloud/resources/llama_extract/client.py,sha256=
|
|
41
|
+
llama_cloud/resources/llama_extract/client.py,sha256=Vj1I6g6BoIQt3SF1ageePvRTMaqekVkNs8RbitM_oi4,56662
|
|
42
42
|
llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
43
43
|
llama_cloud/resources/organizations/client.py,sha256=OGSVpkfY5wu8-22IFWVmtbYSDiy0-KqA3Lc1E_jNHvg,55889
|
|
44
44
|
llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
45
|
-
llama_cloud/resources/parsing/client.py,sha256=
|
|
45
|
+
llama_cloud/resources/parsing/client.py,sha256=X1ewrFnirLnHPIQx963kO8yG26ukTy1ki9UX27zrKZE,71600
|
|
46
46
|
llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
|
|
47
|
-
llama_cloud/resources/pipelines/client.py,sha256=
|
|
47
|
+
llama_cloud/resources/pipelines/client.py,sha256=Zn_I4-6OJCVnmX5E7xqbdWrY89MwIdzraUfJFjmSiWk,125059
|
|
48
48
|
llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
|
|
49
49
|
llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
|
|
50
50
|
llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
|
|
51
51
|
llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256=KbkyULMv-qeS3qRd31ia6pd5rOdypS0o2UL42NRcA7E,321
|
|
52
52
|
llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
53
|
-
llama_cloud/resources/projects/client.py,sha256=
|
|
53
|
+
llama_cloud/resources/projects/client.py,sha256=PF36iWtSa5amUt3q56YwLypOZjclIXSubCRv9NttpLs,25404
|
|
54
54
|
llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7KzvzMzbtPH94,217
|
|
55
55
|
llama_cloud/resources/reports/client.py,sha256=kHjtXVVc1Xi3T1GyBvSW5K4mTdr6xQwZA3vw-liRKBg,46736
|
|
56
56
|
llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
|
|
57
57
|
llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
|
|
58
58
|
llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
59
59
|
llama_cloud/resources/retrievers/client.py,sha256=T7fu41wXAYUTGh23ZWlKPM4e8zH7mg5MDa8F1GxNYwQ,31502
|
|
60
|
-
llama_cloud/types/__init__.py,sha256=
|
|
60
|
+
llama_cloud/types/__init__.py,sha256=aJzAfMdbYY2sgKi3FgiqH6JZho1d885w9GMxNt7uKqg,27960
|
|
61
61
|
llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
|
|
62
62
|
llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
|
|
63
63
|
llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
|
|
@@ -142,7 +142,7 @@ llama_cloud/types/extract_agent_update_data_schema.py,sha256=argR5gPRUYWY6ADCMKR
|
|
|
142
142
|
llama_cloud/types/extract_agent_update_data_schema_zero_value.py,sha256=Nvd892EFhg-PzlqoFp5i2owL7hCZ2SsuL7U4Tk9NeRI,217
|
|
143
143
|
llama_cloud/types/extract_config.py,sha256=oR_6uYl8-58q6a5BsgymJuqCKPn6JoY7SAUmjT9M3es,1369
|
|
144
144
|
llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
|
|
145
|
-
llama_cloud/types/extract_job_create.py,sha256=
|
|
145
|
+
llama_cloud/types/extract_job_create.py,sha256=xgLzFifEx5vxPSoqCh1arNZlxXWNNzvPeMCXS00GnZo,1542
|
|
146
146
|
llama_cloud/types/extract_job_create_batch.py,sha256=64BAproProYtPk7vAPGvFoxvlgg7ZLb1LSg3ChIf7AM,1589
|
|
147
147
|
llama_cloud/types/extract_job_create_batch_data_schema_override.py,sha256=GykJ1BBecRtWYD3ZPi1YINqrr-me_pyr2w_4Ei4QOZQ,351
|
|
148
148
|
llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py,sha256=7zXOgTYUwVAeyYeqWvX69m-7mhvK0V9cBRvgqVSd0X0,228
|
|
@@ -186,10 +186,10 @@ llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuAB
|
|
|
186
186
|
llama_cloud/types/job_names.py,sha256=ZapQT__pLI14SagjGi8AsEwWY949hBoplQemMgb_Aoc,4098
|
|
187
187
|
llama_cloud/types/job_record.py,sha256=r2WzLQXSOFogNMN2rl10rAlYI9OTCmVn06QaZXxa0rQ,2058
|
|
188
188
|
llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
|
|
189
|
-
llama_cloud/types/llama_extract_settings.py,sha256=
|
|
189
|
+
llama_cloud/types/llama_extract_settings.py,sha256=IQFxtKa4GtHKc9w-fLwsH0LSKDWzR9_vZ_cTFJ9cGBI,2288
|
|
190
190
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
|
|
191
191
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=tTglUqrSUaVc2Wsi4uIt5MU-80_oxZzTnhf8ziilVGY,874
|
|
192
|
-
llama_cloud/types/llama_parse_parameters.py,sha256=
|
|
192
|
+
llama_cloud/types/llama_parse_parameters.py,sha256=WS4nVsYQIKJlikdpD7jcZP0d_Qs12xZkGTIJImpKltU,5317
|
|
193
193
|
llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
|
|
194
194
|
llama_cloud/types/llm.py,sha256=7iIItVPjURp4u5xxJDAFIefUdhUKwIuA245WXilJPXE,2234
|
|
195
195
|
llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
|
|
@@ -232,7 +232,6 @@ llama_cloud/types/parsing_job_markdown_result.py,sha256=gPIUO0JwtKwvSHcRYEr995DN
|
|
|
232
232
|
llama_cloud/types/parsing_job_structured_result.py,sha256=w_Z4DOHjwUPmffjc4qJiGYbniWTpkjpVcD4irL1dDj0,1017
|
|
233
233
|
llama_cloud/types/parsing_job_text_result.py,sha256=TP-7IRTWZLAZz7NYLkzi4PsGnaRJuPTt40p56Mk6Rhw,1065
|
|
234
234
|
llama_cloud/types/parsing_mode.py,sha256=s89EhQB3N9yH9a5EtuB8tDcrHLe2KJTM6e0Do-iU7FE,2038
|
|
235
|
-
llama_cloud/types/parsing_usage.py,sha256=JLlozu-vIkcRKqWaOVJ9Z2TrY7peJRTzOpYjOThGKGQ,1012
|
|
236
235
|
llama_cloud/types/partition_names.py,sha256=zZZn-sn59gwch2fa7fGMwFWUEuu5Dfen3ZqKtcPnBEM,1877
|
|
237
236
|
llama_cloud/types/permission.py,sha256=LjhZdo0oLvk7ZVIF1d6Qja--AKH5Ri0naUhuJvZS6Ng,1345
|
|
238
237
|
llama_cloud/types/pipeline.py,sha256=eVNfQjfQTArB3prPeDkfDK6PtfhhBxW7-_VhH9MzlsE,2789
|
|
@@ -302,7 +301,7 @@ llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDP
|
|
|
302
301
|
llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
|
|
303
302
|
llama_cloud/types/struct_parse_conf.py,sha256=Od5f8azJlJTJJ6rwtZEIaEsSSYBdrNsHtLeMtdpMtxM,2101
|
|
304
303
|
llama_cloud/types/supported_llm_model.py,sha256=0v-g01LyZB7TeN0zwAeSJejRoT95SVaXOJhNz7boJwM,1461
|
|
305
|
-
llama_cloud/types/supported_llm_model_names.py,sha256=
|
|
304
|
+
llama_cloud/types/supported_llm_model_names.py,sha256=xZhgu4NcxnA61vmQsxDFgPSRjWtczcXOoCKrtwOBWqc,2161
|
|
306
305
|
llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
|
|
307
306
|
llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
|
|
308
307
|
llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
|
|
@@ -324,7 +323,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
|
|
|
324
323
|
llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
|
|
325
324
|
llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
|
|
326
325
|
llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
|
|
327
|
-
llama_cloud-0.1.
|
|
328
|
-
llama_cloud-0.1.
|
|
329
|
-
llama_cloud-0.1.
|
|
330
|
-
llama_cloud-0.1.
|
|
326
|
+
llama_cloud-0.1.18.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
|
|
327
|
+
llama_cloud-0.1.18.dist-info/METADATA,sha256=X5HryqbkGcNToG0AitXrvbgzLP4b-Xe0atTODJRjZck,902
|
|
328
|
+
llama_cloud-0.1.18.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
|
329
|
+
llama_cloud-0.1.18.dist-info/RECORD,,
|
|
@@ -1,32 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
|
|
8
|
-
try:
|
|
9
|
-
import pydantic
|
|
10
|
-
if pydantic.__version__.startswith("1."):
|
|
11
|
-
raise ImportError
|
|
12
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
-
except ImportError:
|
|
14
|
-
import pydantic # type: ignore
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class ParsingUsage(pydantic.BaseModel):
|
|
18
|
-
usage_pdf_pages: int
|
|
19
|
-
max_pdf_pages: typing.Optional[int]
|
|
20
|
-
|
|
21
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
-
return super().json(**kwargs_with_defaults)
|
|
24
|
-
|
|
25
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
-
return super().dict(**kwargs_with_defaults)
|
|
28
|
-
|
|
29
|
-
class Config:
|
|
30
|
-
frozen = True
|
|
31
|
-
smart_union = True
|
|
32
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
File without changes
|
|
File without changes
|