llama-cloud 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/types/parsing_usage.py +1 -1
- llama_cloud/types/supported_eval_llm_model_names.py +8 -0
- {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.13.dist-info}/METADATA +1 -1
- {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.13.dist-info}/RECORD +6 -6
- {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.13.dist-info}/LICENSE +0 -0
- {llama_cloud-0.0.12.dist-info → llama_cloud-0.0.13.dist-info}/WHEEL +0 -0
|
@@ -16,7 +16,7 @@ except ImportError:
|
|
|
16
16
|
|
|
17
17
|
class ParsingUsage(pydantic.BaseModel):
|
|
18
18
|
usage_pdf_pages: int
|
|
19
|
-
max_pdf_pages: int
|
|
19
|
+
max_pdf_pages: typing.Optional[int]
|
|
20
20
|
|
|
21
21
|
def json(self, **kwargs: typing.Any) -> str:
|
|
22
22
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -14,12 +14,16 @@ class SupportedEvalLlmModelNames(str, enum.Enum):
|
|
|
14
14
|
GPT_3_5_TURBO = "GPT_3_5_TURBO"
|
|
15
15
|
GPT_4 = "GPT_4"
|
|
16
16
|
GPT_4_TURBO = "GPT_4_TURBO"
|
|
17
|
+
GPT_4_O = "GPT_4O"
|
|
18
|
+
GPT_4_O_MINI = "GPT_4O_MINI"
|
|
17
19
|
|
|
18
20
|
def visit(
|
|
19
21
|
self,
|
|
20
22
|
gpt_3_5_turbo: typing.Callable[[], T_Result],
|
|
21
23
|
gpt_4: typing.Callable[[], T_Result],
|
|
22
24
|
gpt_4_turbo: typing.Callable[[], T_Result],
|
|
25
|
+
gpt_4_o: typing.Callable[[], T_Result],
|
|
26
|
+
gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
23
27
|
) -> T_Result:
|
|
24
28
|
if self is SupportedEvalLlmModelNames.GPT_3_5_TURBO:
|
|
25
29
|
return gpt_3_5_turbo()
|
|
@@ -27,3 +31,7 @@ class SupportedEvalLlmModelNames(str, enum.Enum):
|
|
|
27
31
|
return gpt_4()
|
|
28
32
|
if self is SupportedEvalLlmModelNames.GPT_4_TURBO:
|
|
29
33
|
return gpt_4_turbo()
|
|
34
|
+
if self is SupportedEvalLlmModelNames.GPT_4_O:
|
|
35
|
+
return gpt_4_o()
|
|
36
|
+
if self is SupportedEvalLlmModelNames.GPT_4_O_MINI:
|
|
37
|
+
return gpt_4_o_mini()
|
|
@@ -165,7 +165,7 @@ llama_cloud/types/parsing_job.py,sha256=9hoKN4h-t0fka4-fX-79VbvcK2EEZRk2bDDZvCja
|
|
|
165
165
|
llama_cloud/types/parsing_job_json_result.py,sha256=vC0FNMklitCgcB0esthMfv_RbbyFOzvwzvQsh58Im8o,1040
|
|
166
166
|
llama_cloud/types/parsing_job_markdown_result.py,sha256=E3-CVNFH1IMyuGs_xzYfYdNgq9AdnDshA_CxOTXz_dQ,1094
|
|
167
167
|
llama_cloud/types/parsing_job_text_result.py,sha256=1QZielAWXuzPFOgr_DWshXPjmbExAAgAHKAEYVQVtJ8,1082
|
|
168
|
-
llama_cloud/types/parsing_usage.py,sha256=
|
|
168
|
+
llama_cloud/types/parsing_usage.py,sha256=JLlozu-vIkcRKqWaOVJ9Z2TrY7peJRTzOpYjOThGKGQ,1012
|
|
169
169
|
llama_cloud/types/pipeline.py,sha256=h-Xo7HirFCvgiu7NaqSrUTM2wJKd9WXzcqnZ_j_kRkU,2661
|
|
170
170
|
llama_cloud/types/pipeline_create.py,sha256=usFxjKpz4PpzcAs66iqNDMxMPZRyT6Ezyyr4aRhJnE4,3102
|
|
171
171
|
llama_cloud/types/pipeline_create_transform_config.py,sha256=CiMil0NrwvxR34CAzrSWw9Uo0117tz409sptH1k_r48,854
|
|
@@ -197,7 +197,7 @@ llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9
|
|
|
197
197
|
llama_cloud/types/sentence_splitter.py,sha256=mkP5vQsXnLhn6iZZN4MrAfVoFdBYhZTIHoA5AewXwZY,2213
|
|
198
198
|
llama_cloud/types/status_enum.py,sha256=2kQLDa8PdvK45yJDSV2i53rBA3wCR1PJj-IdK0Dcr2E,868
|
|
199
199
|
llama_cloud/types/supported_eval_llm_model.py,sha256=CKWBCKPNa_NjjlmenTDLbc9tt113qzwjq2Xi3WJ6wq8,1364
|
|
200
|
-
llama_cloud/types/supported_eval_llm_model_names.py,sha256=
|
|
200
|
+
llama_cloud/types/supported_eval_llm_model_names.py,sha256=W6fIA0JC4e4hbuvZ_EFbDdRp0viZf--_e0v_b8MIPT0,1111
|
|
201
201
|
llama_cloud/types/text_node.py,sha256=ANT9oPqBs9IJFPhtq-6PC4l44FA3ZYjz_9nOE8h0RAM,2940
|
|
202
202
|
llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
|
|
203
203
|
llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586_HsvDoQbLfuQ,1229
|
|
@@ -210,7 +210,7 @@ llama_cloud/types/user_organization_create.py,sha256=YESlfcI64710OFdQzgGD4a7aItg
|
|
|
210
210
|
llama_cloud/types/user_organization_delete.py,sha256=Z8RSRXc0AGAuGxv6eQPC2S1XIdRfNCXBggfEefgPseM,1209
|
|
211
211
|
llama_cloud/types/validation_error.py,sha256=yZDLtjUHDY5w82Ra6CW0H9sLAr18R0RY1UNgJKR72DQ,1084
|
|
212
212
|
llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPXjdtN9EB7HrLVo6EP0,128
|
|
213
|
-
llama_cloud-0.0.
|
|
214
|
-
llama_cloud-0.0.
|
|
215
|
-
llama_cloud-0.0.
|
|
216
|
-
llama_cloud-0.0.
|
|
213
|
+
llama_cloud-0.0.13.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
|
|
214
|
+
llama_cloud-0.0.13.dist-info/METADATA,sha256=pgfKBQAqdeqwvud1PkR3fr_36qkR3ZJz772qCHX6bnY,751
|
|
215
|
+
llama_cloud-0.0.13.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
216
|
+
llama_cloud-0.0.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|