llama-cloud 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

llama_cloud/__init__.py CHANGED
@@ -131,6 +131,7 @@ from .types import (
131
131
  ParsingJobTextResult,
132
132
  ParsingUsage,
133
133
  PartitionNames,
134
+ Permission,
134
135
  Pipeline,
135
136
  PipelineConfigurationHashes,
136
137
  PipelineCreate,
@@ -178,6 +179,7 @@ from .types import (
178
179
  RelatedNodeInfo,
179
180
  RetrievalMode,
180
181
  RetrieveResults,
182
+ Role,
181
183
  SemanticChunkingConfig,
182
184
  SentenceChunkingConfig,
183
185
  SentenceSplitter,
@@ -193,6 +195,7 @@ from .types import (
193
195
  UserOrganization,
194
196
  UserOrganizationCreate,
195
197
  UserOrganizationDelete,
198
+ UserOrganizationRole,
196
199
  ValidationError,
197
200
  ValidationErrorLocItem,
198
201
  VertexAiEmbeddingConfig,
@@ -368,6 +371,7 @@ __all__ = [
368
371
  "ParsingJobTextResult",
369
372
  "ParsingUsage",
370
373
  "PartitionNames",
374
+ "Permission",
371
375
  "Pipeline",
372
376
  "PipelineConfigurationHashes",
373
377
  "PipelineCreate",
@@ -425,6 +429,7 @@ __all__ = [
425
429
  "RelatedNodeInfo",
426
430
  "RetrievalMode",
427
431
  "RetrieveResults",
432
+ "Role",
428
433
  "SemanticChunkingConfig",
429
434
  "SentenceChunkingConfig",
430
435
  "SentenceSplitter",
@@ -441,6 +446,7 @@ __all__ = [
441
446
  "UserOrganization",
442
447
  "UserOrganizationCreate",
443
448
  "UserOrganizationDelete",
449
+ "UserOrganizationRole",
444
450
  "ValidationError",
445
451
  "ValidationErrorLocItem",
446
452
  "VertexAiEmbeddingConfig",
@@ -7,13 +7,17 @@ from json.decoder import JSONDecodeError
7
7
  from ...core.api_error import ApiError
8
8
  from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
10
11
  from ...errors.unprocessable_entity_error import UnprocessableEntityError
11
12
  from ...types.http_validation_error import HttpValidationError
12
13
  from ...types.organization import Organization
13
14
  from ...types.organization_create import OrganizationCreate
15
+ from ...types.project import Project
16
+ from ...types.role import Role
14
17
  from ...types.user_organization import UserOrganization
15
18
  from ...types.user_organization_create import UserOrganizationCreate
16
19
  from ...types.user_organization_delete import UserOrganizationDelete
20
+ from ...types.user_organization_role import UserOrganizationRole
17
21
 
18
22
  try:
19
23
  import pydantic
@@ -446,6 +450,247 @@ class OrganizationsClient:
446
450
  raise ApiError(status_code=_response.status_code, body=_response.text)
447
451
  raise ApiError(status_code=_response.status_code, body=_response_json)
448
452
 
453
+ def list_roles(self, organization_id: str) -> typing.List[Role]:
454
+ """
455
+ List all roles in an organization.
456
+
457
+ Parameters:
458
+ - organization_id: str.
459
+ ---
460
+ from llama_cloud.client import LlamaCloud
461
+
462
+ client = LlamaCloud(
463
+ token="YOUR_TOKEN",
464
+ )
465
+ client.organizations.list_roles(
466
+ organization_id="string",
467
+ )
468
+ """
469
+ _response = self._client_wrapper.httpx_client.request(
470
+ "GET",
471
+ urllib.parse.urljoin(
472
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/roles"
473
+ ),
474
+ headers=self._client_wrapper.get_headers(),
475
+ timeout=60,
476
+ )
477
+ if 200 <= _response.status_code < 300:
478
+ return pydantic.parse_obj_as(typing.List[Role], _response.json()) # type: ignore
479
+ if _response.status_code == 422:
480
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
481
+ try:
482
+ _response_json = _response.json()
483
+ except JSONDecodeError:
484
+ raise ApiError(status_code=_response.status_code, body=_response.text)
485
+ raise ApiError(status_code=_response.status_code, body=_response_json)
486
+
487
+ def get_user_role(self, organization_id: str) -> UserOrganizationRole:
488
+ """
489
+ Get the role of a user in an organization.
490
+
491
+ Parameters:
492
+ - organization_id: str.
493
+ ---
494
+ from llama_cloud.client import LlamaCloud
495
+
496
+ client = LlamaCloud(
497
+ token="YOUR_TOKEN",
498
+ )
499
+ client.organizations.get_user_role(
500
+ organization_id="string",
501
+ )
502
+ """
503
+ _response = self._client_wrapper.httpx_client.request(
504
+ "GET",
505
+ urllib.parse.urljoin(
506
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/roles"
507
+ ),
508
+ headers=self._client_wrapper.get_headers(),
509
+ timeout=60,
510
+ )
511
+ if 200 <= _response.status_code < 300:
512
+ return pydantic.parse_obj_as(UserOrganizationRole, _response.json()) # type: ignore
513
+ if _response.status_code == 422:
514
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
515
+ try:
516
+ _response_json = _response.json()
517
+ except JSONDecodeError:
518
+ raise ApiError(status_code=_response.status_code, body=_response.text)
519
+ raise ApiError(status_code=_response.status_code, body=_response_json)
520
+
521
+ def assign_role_to_user_in_organization(
522
+ self, organization_id: str, *, user_id: str, user_organization_role_create_organization_id: str, role_id: str
523
+ ) -> UserOrganizationRole:
524
+ """
525
+ Assign a role to a user in an organization.
526
+
527
+ Parameters:
528
+ - organization_id: str.
529
+
530
+ - user_id: str. The user's ID.
531
+
532
+ - user_organization_role_create_organization_id: str. The organization's ID.
533
+
534
+ - role_id: str. The role's ID.
535
+ ---
536
+ from llama_cloud.client import LlamaCloud
537
+
538
+ client = LlamaCloud(
539
+ token="YOUR_TOKEN",
540
+ )
541
+ client.organizations.assign_role_to_user_in_organization(
542
+ organization_id="string",
543
+ user_id="string",
544
+ user_organization_role_create_organization_id="string",
545
+ role_id="string",
546
+ )
547
+ """
548
+ _response = self._client_wrapper.httpx_client.request(
549
+ "PUT",
550
+ urllib.parse.urljoin(
551
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/roles"
552
+ ),
553
+ json=jsonable_encoder(
554
+ {
555
+ "user_id": user_id,
556
+ "organization_id": user_organization_role_create_organization_id,
557
+ "role_id": role_id,
558
+ }
559
+ ),
560
+ headers=self._client_wrapper.get_headers(),
561
+ timeout=60,
562
+ )
563
+ if 200 <= _response.status_code < 300:
564
+ return pydantic.parse_obj_as(UserOrganizationRole, _response.json()) # type: ignore
565
+ if _response.status_code == 422:
566
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
567
+ try:
568
+ _response_json = _response.json()
569
+ except JSONDecodeError:
570
+ raise ApiError(status_code=_response.status_code, body=_response.text)
571
+ raise ApiError(status_code=_response.status_code, body=_response_json)
572
+
573
+ def list_projects_by_user(self, organization_id: str, user_id: str) -> typing.List[Project]:
574
+ """
575
+ List all projects for a user in an organization.
576
+
577
+ Parameters:
578
+ - organization_id: str.
579
+
580
+ - user_id: str.
581
+ ---
582
+ from llama_cloud.client import LlamaCloud
583
+
584
+ client = LlamaCloud(
585
+ token="YOUR_TOKEN",
586
+ )
587
+ client.organizations.list_projects_by_user(
588
+ organization_id="string",
589
+ user_id="string",
590
+ )
591
+ """
592
+ _response = self._client_wrapper.httpx_client.request(
593
+ "GET",
594
+ urllib.parse.urljoin(
595
+ f"{self._client_wrapper.get_base_url()}/",
596
+ f"api/v1/organizations/{organization_id}/users/{user_id}/projects",
597
+ ),
598
+ headers=self._client_wrapper.get_headers(),
599
+ timeout=60,
600
+ )
601
+ if 200 <= _response.status_code < 300:
602
+ return pydantic.parse_obj_as(typing.List[Project], _response.json()) # type: ignore
603
+ if _response.status_code == 422:
604
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
605
+ try:
606
+ _response_json = _response.json()
607
+ except JSONDecodeError:
608
+ raise ApiError(status_code=_response.status_code, body=_response.text)
609
+ raise ApiError(status_code=_response.status_code, body=_response_json)
610
+
611
+ def add_user_to_project(self, organization_id: str, user_id: str, *, project_id: str) -> typing.Any:
612
+ """
613
+ Add a user to a project.
614
+
615
+ Parameters:
616
+ - organization_id: str.
617
+
618
+ - user_id: str.
619
+
620
+ - project_id: str.
621
+ ---
622
+ from llama_cloud.client import LlamaCloud
623
+
624
+ client = LlamaCloud(
625
+ token="YOUR_TOKEN",
626
+ )
627
+ client.organizations.add_user_to_project(
628
+ organization_id="string",
629
+ user_id="string",
630
+ project_id="string",
631
+ )
632
+ """
633
+ _response = self._client_wrapper.httpx_client.request(
634
+ "PUT",
635
+ urllib.parse.urljoin(
636
+ f"{self._client_wrapper.get_base_url()}/",
637
+ f"api/v1/organizations/{organization_id}/users/{user_id}/projects",
638
+ ),
639
+ params=remove_none_from_dict({"project_id": project_id}),
640
+ headers=self._client_wrapper.get_headers(),
641
+ timeout=60,
642
+ )
643
+ if 200 <= _response.status_code < 300:
644
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
645
+ if _response.status_code == 422:
646
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
647
+ try:
648
+ _response_json = _response.json()
649
+ except JSONDecodeError:
650
+ raise ApiError(status_code=_response.status_code, body=_response.text)
651
+ raise ApiError(status_code=_response.status_code, body=_response_json)
652
+
653
+ def remove_user_from_project(self, organization_id: str, user_id: str, project_id: str) -> typing.Any:
654
+ """
655
+ Remove a user from a project.
656
+
657
+ Parameters:
658
+ - organization_id: str.
659
+
660
+ - user_id: str.
661
+
662
+ - project_id: str.
663
+ ---
664
+ from llama_cloud.client import LlamaCloud
665
+
666
+ client = LlamaCloud(
667
+ token="YOUR_TOKEN",
668
+ )
669
+ client.organizations.remove_user_from_project(
670
+ organization_id="string",
671
+ user_id="string",
672
+ project_id="string",
673
+ )
674
+ """
675
+ _response = self._client_wrapper.httpx_client.request(
676
+ "DELETE",
677
+ urllib.parse.urljoin(
678
+ f"{self._client_wrapper.get_base_url()}/",
679
+ f"api/v1/organizations/{organization_id}/users/{user_id}/projects/{project_id}",
680
+ ),
681
+ headers=self._client_wrapper.get_headers(),
682
+ timeout=60,
683
+ )
684
+ if 200 <= _response.status_code < 300:
685
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
686
+ if _response.status_code == 422:
687
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
688
+ try:
689
+ _response_json = _response.json()
690
+ except JSONDecodeError:
691
+ raise ApiError(status_code=_response.status_code, body=_response.text)
692
+ raise ApiError(status_code=_response.status_code, body=_response_json)
693
+
449
694
 
450
695
  class AsyncOrganizationsClient:
451
696
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -865,3 +1110,244 @@ class AsyncOrganizationsClient:
865
1110
  except JSONDecodeError:
866
1111
  raise ApiError(status_code=_response.status_code, body=_response.text)
867
1112
  raise ApiError(status_code=_response.status_code, body=_response_json)
1113
+
1114
+ async def list_roles(self, organization_id: str) -> typing.List[Role]:
1115
+ """
1116
+ List all roles in an organization.
1117
+
1118
+ Parameters:
1119
+ - organization_id: str.
1120
+ ---
1121
+ from llama_cloud.client import AsyncLlamaCloud
1122
+
1123
+ client = AsyncLlamaCloud(
1124
+ token="YOUR_TOKEN",
1125
+ )
1126
+ await client.organizations.list_roles(
1127
+ organization_id="string",
1128
+ )
1129
+ """
1130
+ _response = await self._client_wrapper.httpx_client.request(
1131
+ "GET",
1132
+ urllib.parse.urljoin(
1133
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/roles"
1134
+ ),
1135
+ headers=self._client_wrapper.get_headers(),
1136
+ timeout=60,
1137
+ )
1138
+ if 200 <= _response.status_code < 300:
1139
+ return pydantic.parse_obj_as(typing.List[Role], _response.json()) # type: ignore
1140
+ if _response.status_code == 422:
1141
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1142
+ try:
1143
+ _response_json = _response.json()
1144
+ except JSONDecodeError:
1145
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1146
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1147
+
1148
+ async def get_user_role(self, organization_id: str) -> UserOrganizationRole:
1149
+ """
1150
+ Get the role of a user in an organization.
1151
+
1152
+ Parameters:
1153
+ - organization_id: str.
1154
+ ---
1155
+ from llama_cloud.client import AsyncLlamaCloud
1156
+
1157
+ client = AsyncLlamaCloud(
1158
+ token="YOUR_TOKEN",
1159
+ )
1160
+ await client.organizations.get_user_role(
1161
+ organization_id="string",
1162
+ )
1163
+ """
1164
+ _response = await self._client_wrapper.httpx_client.request(
1165
+ "GET",
1166
+ urllib.parse.urljoin(
1167
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/roles"
1168
+ ),
1169
+ headers=self._client_wrapper.get_headers(),
1170
+ timeout=60,
1171
+ )
1172
+ if 200 <= _response.status_code < 300:
1173
+ return pydantic.parse_obj_as(UserOrganizationRole, _response.json()) # type: ignore
1174
+ if _response.status_code == 422:
1175
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1176
+ try:
1177
+ _response_json = _response.json()
1178
+ except JSONDecodeError:
1179
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1180
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1181
+
1182
+ async def assign_role_to_user_in_organization(
1183
+ self, organization_id: str, *, user_id: str, user_organization_role_create_organization_id: str, role_id: str
1184
+ ) -> UserOrganizationRole:
1185
+ """
1186
+ Assign a role to a user in an organization.
1187
+
1188
+ Parameters:
1189
+ - organization_id: str.
1190
+
1191
+ - user_id: str. The user's ID.
1192
+
1193
+ - user_organization_role_create_organization_id: str. The organization's ID.
1194
+
1195
+ - role_id: str. The role's ID.
1196
+ ---
1197
+ from llama_cloud.client import AsyncLlamaCloud
1198
+
1199
+ client = AsyncLlamaCloud(
1200
+ token="YOUR_TOKEN",
1201
+ )
1202
+ await client.organizations.assign_role_to_user_in_organization(
1203
+ organization_id="string",
1204
+ user_id="string",
1205
+ user_organization_role_create_organization_id="string",
1206
+ role_id="string",
1207
+ )
1208
+ """
1209
+ _response = await self._client_wrapper.httpx_client.request(
1210
+ "PUT",
1211
+ urllib.parse.urljoin(
1212
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/roles"
1213
+ ),
1214
+ json=jsonable_encoder(
1215
+ {
1216
+ "user_id": user_id,
1217
+ "organization_id": user_organization_role_create_organization_id,
1218
+ "role_id": role_id,
1219
+ }
1220
+ ),
1221
+ headers=self._client_wrapper.get_headers(),
1222
+ timeout=60,
1223
+ )
1224
+ if 200 <= _response.status_code < 300:
1225
+ return pydantic.parse_obj_as(UserOrganizationRole, _response.json()) # type: ignore
1226
+ if _response.status_code == 422:
1227
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1228
+ try:
1229
+ _response_json = _response.json()
1230
+ except JSONDecodeError:
1231
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1232
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1233
+
1234
+ async def list_projects_by_user(self, organization_id: str, user_id: str) -> typing.List[Project]:
1235
+ """
1236
+ List all projects for a user in an organization.
1237
+
1238
+ Parameters:
1239
+ - organization_id: str.
1240
+
1241
+ - user_id: str.
1242
+ ---
1243
+ from llama_cloud.client import AsyncLlamaCloud
1244
+
1245
+ client = AsyncLlamaCloud(
1246
+ token="YOUR_TOKEN",
1247
+ )
1248
+ await client.organizations.list_projects_by_user(
1249
+ organization_id="string",
1250
+ user_id="string",
1251
+ )
1252
+ """
1253
+ _response = await self._client_wrapper.httpx_client.request(
1254
+ "GET",
1255
+ urllib.parse.urljoin(
1256
+ f"{self._client_wrapper.get_base_url()}/",
1257
+ f"api/v1/organizations/{organization_id}/users/{user_id}/projects",
1258
+ ),
1259
+ headers=self._client_wrapper.get_headers(),
1260
+ timeout=60,
1261
+ )
1262
+ if 200 <= _response.status_code < 300:
1263
+ return pydantic.parse_obj_as(typing.List[Project], _response.json()) # type: ignore
1264
+ if _response.status_code == 422:
1265
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1266
+ try:
1267
+ _response_json = _response.json()
1268
+ except JSONDecodeError:
1269
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1270
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1271
+
1272
+ async def add_user_to_project(self, organization_id: str, user_id: str, *, project_id: str) -> typing.Any:
1273
+ """
1274
+ Add a user to a project.
1275
+
1276
+ Parameters:
1277
+ - organization_id: str.
1278
+
1279
+ - user_id: str.
1280
+
1281
+ - project_id: str.
1282
+ ---
1283
+ from llama_cloud.client import AsyncLlamaCloud
1284
+
1285
+ client = AsyncLlamaCloud(
1286
+ token="YOUR_TOKEN",
1287
+ )
1288
+ await client.organizations.add_user_to_project(
1289
+ organization_id="string",
1290
+ user_id="string",
1291
+ project_id="string",
1292
+ )
1293
+ """
1294
+ _response = await self._client_wrapper.httpx_client.request(
1295
+ "PUT",
1296
+ urllib.parse.urljoin(
1297
+ f"{self._client_wrapper.get_base_url()}/",
1298
+ f"api/v1/organizations/{organization_id}/users/{user_id}/projects",
1299
+ ),
1300
+ params=remove_none_from_dict({"project_id": project_id}),
1301
+ headers=self._client_wrapper.get_headers(),
1302
+ timeout=60,
1303
+ )
1304
+ if 200 <= _response.status_code < 300:
1305
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1306
+ if _response.status_code == 422:
1307
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1308
+ try:
1309
+ _response_json = _response.json()
1310
+ except JSONDecodeError:
1311
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1312
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1313
+
1314
+ async def remove_user_from_project(self, organization_id: str, user_id: str, project_id: str) -> typing.Any:
1315
+ """
1316
+ Remove a user from a project.
1317
+
1318
+ Parameters:
1319
+ - organization_id: str.
1320
+
1321
+ - user_id: str.
1322
+
1323
+ - project_id: str.
1324
+ ---
1325
+ from llama_cloud.client import AsyncLlamaCloud
1326
+
1327
+ client = AsyncLlamaCloud(
1328
+ token="YOUR_TOKEN",
1329
+ )
1330
+ await client.organizations.remove_user_from_project(
1331
+ organization_id="string",
1332
+ user_id="string",
1333
+ project_id="string",
1334
+ )
1335
+ """
1336
+ _response = await self._client_wrapper.httpx_client.request(
1337
+ "DELETE",
1338
+ urllib.parse.urljoin(
1339
+ f"{self._client_wrapper.get_base_url()}/",
1340
+ f"api/v1/organizations/{organization_id}/users/{user_id}/projects/{project_id}",
1341
+ ),
1342
+ headers=self._client_wrapper.get_headers(),
1343
+ timeout=60,
1344
+ )
1345
+ if 200 <= _response.status_code < 300:
1346
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1347
+ if _response.status_code == 422:
1348
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1349
+ try:
1350
+ _response_json = _response.json()
1351
+ except JSONDecodeError:
1352
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1353
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -109,6 +109,7 @@ class ParsingClient:
109
109
  gpt_4_o_mode: bool,
110
110
  fast_mode: bool,
111
111
  premium_mode: bool,
112
+ continuous_mode: bool,
112
113
  gpt_4_o_api_key: str,
113
114
  do_not_unroll_columns: bool,
114
115
  page_separator: str,
@@ -121,7 +122,9 @@ class ParsingClient:
121
122
  page_suffix: str,
122
123
  webhook_url: str,
123
124
  take_screenshot: bool,
125
+ is_formatting_instruction: bool,
124
126
  disable_ocr: bool,
127
+ annotate_links: bool,
125
128
  disable_reconstruction: bool,
126
129
  input_s_3_path: str,
127
130
  output_s_3_path_prefix: str,
@@ -151,6 +154,8 @@ class ParsingClient:
151
154
 
152
155
  - premium_mode: bool.
153
156
 
157
+ - continuous_mode: bool.
158
+
154
159
  - gpt_4_o_api_key: str.
155
160
 
156
161
  - do_not_unroll_columns: bool.
@@ -175,8 +180,12 @@ class ParsingClient:
175
180
 
176
181
  - take_screenshot: bool.
177
182
 
183
+ - is_formatting_instruction: bool.
184
+
178
185
  - disable_ocr: bool.
179
186
 
187
+ - annotate_links: bool.
188
+
180
189
  - disable_reconstruction: bool.
181
190
 
182
191
  - input_s_3_path: str.
@@ -202,6 +211,7 @@ class ParsingClient:
202
211
  "gpt4o_mode": gpt_4_o_mode,
203
212
  "fast_mode": fast_mode,
204
213
  "premium_mode": premium_mode,
214
+ "continuous_mode": continuous_mode,
205
215
  "gpt4o_api_key": gpt_4_o_api_key,
206
216
  "do_not_unroll_columns": do_not_unroll_columns,
207
217
  "page_separator": page_separator,
@@ -214,7 +224,9 @@ class ParsingClient:
214
224
  "page_suffix": page_suffix,
215
225
  "webhook_url": webhook_url,
216
226
  "take_screenshot": take_screenshot,
227
+ "is_formatting_instruction": is_formatting_instruction,
217
228
  "disable_ocr": disable_ocr,
229
+ "annotate_links": annotate_links,
218
230
  "disable_reconstruction": disable_reconstruction,
219
231
  "input_s3_path": input_s_3_path,
220
232
  "output_s3_path_prefix": output_s_3_path_prefix,
@@ -678,6 +690,7 @@ class AsyncParsingClient:
678
690
  gpt_4_o_mode: bool,
679
691
  fast_mode: bool,
680
692
  premium_mode: bool,
693
+ continuous_mode: bool,
681
694
  gpt_4_o_api_key: str,
682
695
  do_not_unroll_columns: bool,
683
696
  page_separator: str,
@@ -690,7 +703,9 @@ class AsyncParsingClient:
690
703
  page_suffix: str,
691
704
  webhook_url: str,
692
705
  take_screenshot: bool,
706
+ is_formatting_instruction: bool,
693
707
  disable_ocr: bool,
708
+ annotate_links: bool,
694
709
  disable_reconstruction: bool,
695
710
  input_s_3_path: str,
696
711
  output_s_3_path_prefix: str,
@@ -720,6 +735,8 @@ class AsyncParsingClient:
720
735
 
721
736
  - premium_mode: bool.
722
737
 
738
+ - continuous_mode: bool.
739
+
723
740
  - gpt_4_o_api_key: str.
724
741
 
725
742
  - do_not_unroll_columns: bool.
@@ -744,8 +761,12 @@ class AsyncParsingClient:
744
761
 
745
762
  - take_screenshot: bool.
746
763
 
764
+ - is_formatting_instruction: bool.
765
+
747
766
  - disable_ocr: bool.
748
767
 
768
+ - annotate_links: bool.
769
+
749
770
  - disable_reconstruction: bool.
750
771
 
751
772
  - input_s_3_path: str.
@@ -771,6 +792,7 @@ class AsyncParsingClient:
771
792
  "gpt4o_mode": gpt_4_o_mode,
772
793
  "fast_mode": fast_mode,
773
794
  "premium_mode": premium_mode,
795
+ "continuous_mode": continuous_mode,
774
796
  "gpt4o_api_key": gpt_4_o_api_key,
775
797
  "do_not_unroll_columns": do_not_unroll_columns,
776
798
  "page_separator": page_separator,
@@ -783,7 +805,9 @@ class AsyncParsingClient:
783
805
  "page_suffix": page_suffix,
784
806
  "webhook_url": webhook_url,
785
807
  "take_screenshot": take_screenshot,
808
+ "is_formatting_instruction": is_formatting_instruction,
786
809
  "disable_ocr": disable_ocr,
810
+ "annotate_links": annotate_links,
787
811
  "disable_reconstruction": disable_reconstruction,
788
812
  "input_s3_path": input_s_3_path,
789
813
  "output_s3_path_prefix": output_s_3_path_prefix,
@@ -134,6 +134,7 @@ from .parsing_job_markdown_result import ParsingJobMarkdownResult
134
134
  from .parsing_job_text_result import ParsingJobTextResult
135
135
  from .parsing_usage import ParsingUsage
136
136
  from .partition_names import PartitionNames
137
+ from .permission import Permission
137
138
  from .pipeline import Pipeline
138
139
  from .pipeline_configuration_hashes import PipelineConfigurationHashes
139
140
  from .pipeline_create import PipelineCreate
@@ -187,6 +188,7 @@ from .pydantic_program_mode import PydanticProgramMode
187
188
  from .related_node_info import RelatedNodeInfo
188
189
  from .retrieval_mode import RetrievalMode
189
190
  from .retrieve_results import RetrieveResults
191
+ from .role import Role
190
192
  from .semantic_chunking_config import SemanticChunkingConfig
191
193
  from .sentence_chunking_config import SentenceChunkingConfig
192
194
  from .sentence_splitter import SentenceSplitter
@@ -202,6 +204,7 @@ from .transformation_category_names import TransformationCategoryNames
202
204
  from .user_organization import UserOrganization
203
205
  from .user_organization_create import UserOrganizationCreate
204
206
  from .user_organization_delete import UserOrganizationDelete
207
+ from .user_organization_role import UserOrganizationRole
205
208
  from .validation_error import ValidationError
206
209
  from .validation_error_loc_item import ValidationErrorLocItem
207
210
  from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
@@ -339,6 +342,7 @@ __all__ = [
339
342
  "ParsingJobTextResult",
340
343
  "ParsingUsage",
341
344
  "PartitionNames",
345
+ "Permission",
342
346
  "Pipeline",
343
347
  "PipelineConfigurationHashes",
344
348
  "PipelineCreate",
@@ -386,6 +390,7 @@ __all__ = [
386
390
  "RelatedNodeInfo",
387
391
  "RetrievalMode",
388
392
  "RetrieveResults",
393
+ "Role",
389
394
  "SemanticChunkingConfig",
390
395
  "SentenceChunkingConfig",
391
396
  "SentenceSplitter",
@@ -401,6 +406,7 @@ __all__ = [
401
406
  "UserOrganization",
402
407
  "UserOrganizationCreate",
403
408
  "UserOrganizationDelete",
409
+ "UserOrganizationRole",
404
410
  "ValidationError",
405
411
  "ValidationErrorLocItem",
406
412
  "VertexAiEmbeddingConfig",
llama_cloud/types/base.py CHANGED
@@ -1,29 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class Base(pydantic.BaseModel):
18
- def json(self, **kwargs: typing.Any) -> str:
19
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
- return super().json(**kwargs_with_defaults)
21
-
22
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
- return super().dict(**kwargs_with_defaults)
25
-
26
- class Config:
27
- frozen = True
28
- smart_union = True
29
- json_encoders = {dt.datetime: serialize_datetime}
@@ -23,6 +23,7 @@ class LlamaParseParameters(pydantic.BaseModel):
23
23
  languages: typing.Optional[typing.List[ParserLanguages]]
24
24
  parsing_instruction: typing.Optional[str]
25
25
  disable_ocr: typing.Optional[bool]
26
+ annotate_links: typing.Optional[bool]
26
27
  disable_reconstruction: typing.Optional[bool]
27
28
  invalidate_cache: typing.Optional[bool]
28
29
  do_not_cache: typing.Optional[bool]
@@ -41,7 +42,9 @@ class LlamaParseParameters(pydantic.BaseModel):
41
42
  page_suffix: typing.Optional[str]
42
43
  webhook_url: typing.Optional[str]
43
44
  take_screenshot: typing.Optional[bool]
45
+ is_formatting_instruction: typing.Optional[bool]
44
46
  premium_mode: typing.Optional[bool]
47
+ continuous_mode: typing.Optional[bool]
45
48
  s_3_input_path: typing.Optional[str] = pydantic.Field(alias="s3_input_path")
46
49
  s_3_output_path_prefix: typing.Optional[str] = pydantic.Field(alias="s3_output_path_prefix")
47
50
  azure_openai_deployment_name: typing.Optional[str]
@@ -0,0 +1,40 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class Permission(pydantic.BaseModel):
18
+ """
19
+ Schema for a permission.
20
+ """
21
+
22
+ id: str = pydantic.Field(description="Unique identifier")
23
+ created_at: typing.Optional[dt.datetime]
24
+ updated_at: typing.Optional[dt.datetime]
25
+ name: str = pydantic.Field(description="A name for the permission.")
26
+ description: typing.Optional[str]
27
+ access: bool = pydantic.Field(description="Whether the permission is granted or not.")
28
+
29
+ def json(self, **kwargs: typing.Any) -> str:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().json(**kwargs_with_defaults)
32
+
33
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
34
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
35
+ return super().dict(**kwargs_with_defaults)
36
+
37
+ class Config:
38
+ frozen = True
39
+ smart_union = True
40
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -10,12 +10,14 @@ class RetrievalMode(str, enum.Enum):
10
10
  CHUNKS = "chunks"
11
11
  FILES_VIA_METADATA = "files_via_metadata"
12
12
  FILES_VIA_CONTENT = "files_via_content"
13
+ AUTO_ROUTED = "auto_routed"
13
14
 
14
15
  def visit(
15
16
  self,
16
17
  chunks: typing.Callable[[], T_Result],
17
18
  files_via_metadata: typing.Callable[[], T_Result],
18
19
  files_via_content: typing.Callable[[], T_Result],
20
+ auto_routed: typing.Callable[[], T_Result],
19
21
  ) -> T_Result:
20
22
  if self is RetrievalMode.CHUNKS:
21
23
  return chunks()
@@ -23,3 +25,5 @@ class RetrievalMode(str, enum.Enum):
23
25
  return files_via_metadata()
24
26
  if self is RetrievalMode.FILES_VIA_CONTENT:
25
27
  return files_via_content()
28
+ if self is RetrievalMode.AUTO_ROUTED:
29
+ return auto_routed()
@@ -31,6 +31,9 @@ class RetrieveResults(pydantic.BaseModel):
31
31
  retrieval_latency: typing.Dict[str, float] = pydantic.Field(
32
32
  description="The end-to-end latency for retrieval and reranking."
33
33
  )
34
+ metadata: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
35
+ description="Metadata associated with the retrieval execution"
36
+ )
34
37
  class_name: typing.Optional[str]
35
38
 
36
39
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .permission import Permission
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class Role(pydantic.BaseModel):
19
+ """
20
+ Schema for a role.
21
+ """
22
+
23
+ id: str = pydantic.Field(description="Unique identifier")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
+ name: str = pydantic.Field(description="A name for the role.")
27
+ organization_id: typing.Optional[str]
28
+ permissions: typing.List[Permission] = pydantic.Field(description="The actual permissions of the role.")
29
+
30
+ def json(self, **kwargs: typing.Any) -> str:
31
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
32
+ return super().json(**kwargs_with_defaults)
33
+
34
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
35
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
36
+ return super().dict(**kwargs_with_defaults)
37
+
38
+ class Config:
39
+ frozen = True
40
+ smart_union = True
41
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .user_organization_role import UserOrganizationRole
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -30,6 +31,7 @@ class UserOrganization(pydantic.BaseModel):
30
31
  )
31
32
  invited_by_user_id: typing.Optional[str]
32
33
  invited_by_user_email: typing.Optional[str]
34
+ roles: typing.List[UserOrganizationRole] = pydantic.Field(description="The roles of the user in the organization.")
33
35
 
34
36
  def json(self, **kwargs: typing.Any) -> str:
35
37
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -21,6 +21,8 @@ class UserOrganizationCreate(pydantic.BaseModel):
21
21
 
22
22
  user_id: typing.Optional[str]
23
23
  email: typing.Optional[str]
24
+ project_ids: typing.Optional[typing.List[str]]
25
+ role_id: typing.Optional[str]
24
26
 
25
27
  def json(self, **kwargs: typing.Any) -> str:
26
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,42 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .role import Role
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class UserOrganizationRole(pydantic.BaseModel):
19
+ """
20
+ Schema for a user's role in an organization.
21
+ """
22
+
23
+ id: str = pydantic.Field(description="Unique identifier")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
+ user_id: str = pydantic.Field(description="The user's ID.")
27
+ organization_id: str = pydantic.Field(description="The organization's ID.")
28
+ role_id: str = pydantic.Field(description="The role's ID.")
29
+ role: Role = pydantic.Field(description="The role.")
30
+
31
+ def json(self, **kwargs: typing.Any) -> str:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().json(**kwargs_with_defaults)
34
+
35
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().dict(**kwargs_with_defaults)
38
+
39
+ class Config:
40
+ frozen = True
41
+ smart_union = True
42
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -8,15 +8,8 @@ T_Result = typing.TypeVar("T_Result")
8
8
 
9
9
  class VertexEmbeddingMode(str, enum.Enum):
10
10
  """
11
- VertexAI embedding mode.
12
-
13
- Attributes:
14
- DEFAULT_MODE (str): The default embedding mode, for older models before August 2023,
15
- that does not support task_type
16
- CLASSIFICATION_MODE (str): Optimizes embeddings for classification tasks.
17
- CLUSTERING_MODE (str): Optimizes embeddings for clustering tasks.
18
- SEMANTIC_SIMILARITY_MODE (str): Optimizes embeddings for tasks that require assessments of semantic similarity.
19
- RETRIEVAL_MODE (str): Optimizes embeddings for retrieval tasks, including search and document retrieval.
11
+ Copied from llama_index.embeddings.vertex.base.VertexEmbeddingMode
12
+ since importing llama_index.embeddings.vertex.base incurs a lot of memory usage.
20
13
  """
21
14
 
22
15
  DEFAULT = "default"
@@ -1,10 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.1
3
+ Version: 0.1.3
4
4
  Summary:
5
+ License: MIT
5
6
  Author: Logan Markewich
6
7
  Author-email: logan@runllama.ai
7
8
  Requires-Python: >=3.8,<4
9
+ Classifier: License :: OSI Approved :: MIT License
8
10
  Classifier: Programming Language :: Python :: 3
9
11
  Classifier: Programming Language :: Python :: 3.8
10
12
  Classifier: Programming Language :: Python :: 3.9
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=KlJJ33jmA3Tq3FxZu1a04nSK_NeydK9qsu21-YYXL-c,13781
1
+ llama_cloud/__init__.py,sha256=IOE7g-z73GqQ85hcyHFrQp5pDtED4CWF96G34iQ6blw,13891
2
2
  llama_cloud/client.py,sha256=bhZPiYd1TQSn3PRgHZ66MgMnBneG4Skc9g6UsT0wQnE,4299
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -33,9 +33,9 @@ llama_cloud/resources/files/client.py,sha256=Tmi3E1lnAYT2BwYeCH9Ch7W8xqqqPdHMtwi
33
33
  llama_cloud/resources/files/types/__init__.py,sha256=ZWnnYWuDYZSfUJc7Jv3HyovzijdB--DTK4YB-uPcDsA,181
34
34
  llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-CJf7fnbvIqE3xOI5XOrmPwLbVJLC7zpxMu8Zopk,201
35
35
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
36
- llama_cloud/resources/organizations/client.py,sha256=P2u7AhSDXSpHOHe-tnyWqJAcKLLfLNxcc4-mfUqr7zs,34109
36
+ llama_cloud/resources/organizations/client.py,sha256=mFDfu2rruaAUpmKS3ZttLjDg_GNvFJOaUc4Ny1wjVxU,52840
37
37
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
38
- llama_cloud/resources/parsing/client.py,sha256=W5lpAvLdtJNgCyTu9jZD_nxbDE9dSLuid9BoA3GxYkY,43128
38
+ llama_cloud/resources/parsing/client.py,sha256=8QvAQYlYdUtUkSbUmMafUfzcYGUarM4b8VlxnlfoIbo,43902
39
39
  llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
40
40
  llama_cloud/resources/pipelines/client.py,sha256=wGmcHdHXojUtZURaXLKeJEB0v4J6ImKbygvMUp8W-ic,125954
41
41
  llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
@@ -44,14 +44,14 @@ llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256
44
44
  llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256=KbkyULMv-qeS3qRd31ia6pd5rOdypS0o2UL42NRcA7E,321
45
45
  llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
46
46
  llama_cloud/resources/projects/client.py,sha256=nK81HdhGbWY1rh8rSEsKzRuvyvCQ-IkhLHIPDqEqVFU,47754
47
- llama_cloud/types/__init__.py,sha256=_BhNDO3MaagDL3Zr0zlJySmXYWbBmtBbTP7l_C3ElIo,17204
47
+ llama_cloud/types/__init__.py,sha256=AYJYaJX_YDGFEWk2uSRMUIVq5mjgi_n36HMb3M-20W0,17377
48
48
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
49
49
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
50
50
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
51
51
  llama_cloud/types/auto_transform_config.py,sha256=HVeHZM75DMRznScqLTfrMwcZwIdyWPuaEYbPewnHqwc,1168
52
52
  llama_cloud/types/azure_open_ai_embedding.py,sha256=MeDqZoPYFN7Nv_imY9cfqDU9SPlEyAY4HcQZ4PF5X3g,2264
53
53
  llama_cloud/types/azure_open_ai_embedding_config.py,sha256=o1zZhzcGElH3SeixFErrm7P_WFHQ6LvrLem_nKJWunw,1170
54
- llama_cloud/types/base.py,sha256=cn_Zn61yLMDCMm1iZTPvKILSRlqRzOqZtSYyYBY5dIE,938
54
+ llama_cloud/types/base.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
55
  llama_cloud/types/base_prompt_template.py,sha256=Cw3887tnytHZ5bJBSlniyU9k5ASidv9VYR86--IbNqo,1248
56
56
  llama_cloud/types/bedrock_embedding.py,sha256=qrUoVW9Q2DLg-3nBRfGsZqUWGszfzc6ZHR8LJiXTZk4,1908
57
57
  llama_cloud/types/bedrock_embedding_config.py,sha256=32dMhoA2cLx1jeogDnCl9WPVb83Hn99nAALnt5BM208,1147
@@ -127,7 +127,7 @@ llama_cloud/types/hugging_face_inference_api_embedding_token.py,sha256=A7-_YryBc
127
127
  llama_cloud/types/ingestion_error_response.py,sha256=8u0cyT44dnpkNeUKemTvJMUqi_WyPcYQKP_DMTqaFPY,1259
128
128
  llama_cloud/types/input_message.py,sha256=H0vsGsIo_J71d3NnHGzs7WytjEhNPzhOEDY9e_9Y_w0,1329
129
129
  llama_cloud/types/job_name_mapping.py,sha256=scAbHrxvowCE3jHRZyYr2bBE5wvMMdBw7zpQ-lp5dY0,1433
130
- llama_cloud/types/llama_parse_parameters.py,sha256=DoG4u2ZOemavuCMNWxwryGDFfAk-6z-y0Afr_0B19Yo,2595
130
+ llama_cloud/types/llama_parse_parameters.py,sha256=KQPMFeruoCeGPIOTmJKucvuaRRk9nRnLpL2ylHPssSk,2733
131
131
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
132
132
  llama_cloud/types/llm.py,sha256=v5a7Cq4oSUh3LYYIpECOpV0llDVP7XbFvdlxsIXnmhI,2197
133
133
  llama_cloud/types/llm_model_data.py,sha256=QgyFe03psw5Aon3w1LC6ovCa1o9MVNcaGcmpapw-4D0,1263
@@ -167,6 +167,7 @@ llama_cloud/types/parsing_job_markdown_result.py,sha256=gPIUO0JwtKwvSHcRYEr995DN
167
167
  llama_cloud/types/parsing_job_text_result.py,sha256=TP-7IRTWZLAZz7NYLkzi4PsGnaRJuPTt40p56Mk6Rhw,1065
168
168
  llama_cloud/types/parsing_usage.py,sha256=JLlozu-vIkcRKqWaOVJ9Z2TrY7peJRTzOpYjOThGKGQ,1012
169
169
  llama_cloud/types/partition_names.py,sha256=zZZn-sn59gwch2fa7fGMwFWUEuu5Dfen3ZqKtcPnBEM,1877
170
+ llama_cloud/types/permission.py,sha256=LjhZdo0oLvk7ZVIF1d6Qja--AKH5Ri0naUhuJvZS6Ng,1345
170
171
  llama_cloud/types/pipeline.py,sha256=7yyjgxY3echp-8jEIKiUOUsGbR1UFeqJF67SM9OBkUc,2737
171
172
  llama_cloud/types/pipeline_configuration_hashes.py,sha256=7_MbOcPWV6iyMflJeXoo9vLzD04E5WM7YxYp4ls0jQs,1169
172
173
  llama_cloud/types/pipeline_create.py,sha256=EY9Esd5QHjQeyBBzGAQvfO6HORWlfYSpH7aTvjOBkD0,2453
@@ -196,8 +197,9 @@ llama_cloud/types/prompt_mixin_prompts.py,sha256=_ipiIFWmWSuaJ5VFI5rXa_C7lHaIL3Y
196
197
  llama_cloud/types/prompt_spec.py,sha256=lpq9m4lK47FUfbFEzUDbLoZtSGX4EDthJ4u5Xd3SWKQ,1368
197
198
  llama_cloud/types/pydantic_program_mode.py,sha256=QfvpqR7TqyNuOxo78Sr58VOu7KDSBrHJM4XXBB0F5z0,1202
198
199
  llama_cloud/types/related_node_info.py,sha256=Ikm76x9hXs20uV_n_2MIG6bWh_ugPoehjyA-ZrR6J6o,1174
199
- llama_cloud/types/retrieval_mode.py,sha256=LTKT4y7zEGanZJ_mtlGSRSyosnGWPG90T3_lkA9l4Zc,747
200
- llama_cloud/types/retrieve_results.py,sha256=ZHBoNOO3Ta2vg8579XM0oEI18BUr656hk17F0_e9gbE,1780
200
+ llama_cloud/types/retrieval_mode.py,sha256=wV9q3OdHTuyDWbJCGdxq9Hw6U95WFlJcaMq6KWSTzyw,910
201
+ llama_cloud/types/retrieve_results.py,sha256=rbvPk-WylPzUFLgKUGXtmqrf7DL_hHd37UFznvlZ814,1928
202
+ llama_cloud/types/role.py,sha256=SCi2TyFbc68RJuNB-OdcP8ut03Uv5zPZk84QMmf17w8,1384
201
203
  llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
202
204
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
203
205
  llama_cloud/types/sentence_splitter.py,sha256=GbC3KE20Nd85uzO4bqJttjqJhQ_1co2gKnSQxzfOAiM,2140
@@ -210,15 +212,16 @@ llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586
210
212
  llama_cloud/types/token_chunking_config.py,sha256=XNvnTsNd--YOMQ_Ad8hoqhYgQftqkBHKVn6i7nJnMqs,1067
211
213
  llama_cloud/types/token_text_splitter.py,sha256=iTT3x9yO021v757B2r-0Z-WFQiIESLqEJUCmUUwPQ_o,1899
212
214
  llama_cloud/types/transformation_category_names.py,sha256=Wb7NBB0f-tEtfEZQis-iKy71SUKmmHFcXf6XLn6g0XU,545
213
- llama_cloud/types/user_organization.py,sha256=wLL-bV2g17VVpusFAFXeVBYbUQ2oodHUiDaDz8z7jpM,1593
214
- llama_cloud/types/user_organization_create.py,sha256=bxm6mLTu-gOgiw4L_o-fwZYz27qVVU5fjhZudK-_q3I,1104
215
+ llama_cloud/types/user_organization.py,sha256=Ydel7grMnKiPMWJmSWhCFCm3v_n286Gk36ANtDLNLd4,1770
216
+ llama_cloud/types/user_organization_create.py,sha256=Zj57s9xuYVnLW2p8i4j2QORL-G1y7Ab3avXE1baERQY,1189
215
217
  llama_cloud/types/user_organization_delete.py,sha256=IDYLKfFAXfcJfkEpA0ARbaA0JDcEBe7fTLv833DZXHs,1104
218
+ llama_cloud/types/user_organization_role.py,sha256=AHxDylWTkIrYUs0P-FDzTU8AmAsr0m-ErBuOA9RvIXs,1461
216
219
  llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
217
220
  llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
218
221
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
219
- llama_cloud/types/vertex_embedding_mode.py,sha256=AkoY7nzOF5MHb4bCnEy-FJol7WxFNBLcQ8PHHtBWH_o,1605
222
+ llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
220
223
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
221
- llama_cloud-0.1.1.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
222
- llama_cloud-0.1.1.dist-info/METADATA,sha256=YB7klRrfxoZYnluP2wVXhhMIt28M0jcP4mkMpB9QaHo,750
223
- llama_cloud-0.1.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
224
- llama_cloud-0.1.1.dist-info/RECORD,,
224
+ llama_cloud-0.1.3.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
225
+ llama_cloud-0.1.3.dist-info/METADATA,sha256=RhdR5OAXdKp4EW2GMzFaEa7eJgdh2RBLWo9beus5aNs,814
226
+ llama_cloud-0.1.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
227
+ llama_cloud-0.1.3.dist-info/RECORD,,