llama-cloud 0.1.18__py3-none-any.whl → 0.1.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +202 -42
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +61 -2
- llama_cloud/resources/beta/__init__.py +2 -0
- llama_cloud/resources/beta/client.py +371 -0
- llama_cloud/resources/data_sinks/__init__.py +18 -2
- llama_cloud/resources/data_sinks/client.py +2 -94
- llama_cloud/resources/data_sinks/types/__init__.py +18 -2
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +65 -7
- llama_cloud/resources/data_sources/__init__.py +30 -2
- llama_cloud/resources/data_sources/types/__init__.py +28 -1
- llama_cloud/resources/data_sources/types/data_source_update_component.py +2 -23
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +122 -0
- llama_cloud/resources/embedding_model_configs/client.py +82 -22
- llama_cloud/resources/files/client.py +18 -4
- llama_cloud/resources/llama_extract/__init__.py +21 -0
- llama_cloud/resources/llama_extract/client.py +227 -114
- llama_cloud/resources/llama_extract/types/__init__.py +21 -0
- llama_cloud/resources/parsing/client.py +123 -4
- llama_cloud/resources/pipelines/client.py +116 -11
- llama_cloud/types/__init__.py +172 -52
- llama_cloud/types/{extract_schema_validate_request.py → audio_block.py} +5 -3
- llama_cloud/types/batch.py +47 -0
- llama_cloud/types/batch_item.py +40 -0
- llama_cloud/types/batch_paginated_list.py +35 -0
- llama_cloud/types/{base_prompt_template.py → batch_public_output.py} +7 -7
- llama_cloud/types/cloud_confluence_data_source.py +1 -0
- llama_cloud/types/cloud_jira_data_source.py +0 -4
- llama_cloud/types/cloud_postgres_vector_store.py +2 -0
- llama_cloud/types/cloud_sharepoint_data_source.py +1 -0
- llama_cloud/types/data_sink_component.py +65 -7
- llama_cloud/types/data_sink_create_component.py +65 -7
- llama_cloud/types/data_source_component.py +2 -23
- llama_cloud/types/data_source_component_one.py +122 -0
- llama_cloud/types/data_source_create_component.py +2 -23
- llama_cloud/types/data_source_create_component_one.py +122 -0
- llama_cloud/types/{extract_agent_update.py → data_source_update_dispatcher_config.py} +6 -6
- llama_cloud/types/{node_parser.py → delete_params.py} +7 -9
- llama_cloud/types/{extract_agent_create.py → document_ingestion_job_params.py} +11 -7
- llama_cloud/types/extract_config.py +2 -0
- llama_cloud/types/extract_job_create.py +1 -2
- llama_cloud/types/fail_page_mode.py +29 -0
- llama_cloud/types/file_count_by_status_response.py +37 -0
- llama_cloud/types/file_parse_public.py +36 -0
- llama_cloud/types/job_names.py +8 -12
- llama_cloud/types/job_record.py +2 -2
- llama_cloud/types/job_record_parameters.py +111 -0
- llama_cloud/types/l_lama_parse_transform_config.py +37 -0
- llama_cloud/types/legacy_parse_job_config.py +189 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +13 -1
- llama_cloud/types/llama_parse_parameters.py +8 -0
- llama_cloud/types/load_files_job_config.py +35 -0
- llama_cloud/types/message_role.py +4 -0
- llama_cloud/types/parse_job_config.py +134 -0
- llama_cloud/types/pg_vector_distance_method.py +43 -0
- llama_cloud/types/{extract_job_create_batch.py → pg_vector_hnsw_settings.py} +12 -9
- llama_cloud/types/pg_vector_vector_type.py +35 -0
- llama_cloud/types/pipeline.py +2 -4
- llama_cloud/types/pipeline_create.py +3 -2
- llama_cloud/types/pipeline_data_source.py +3 -0
- llama_cloud/types/pipeline_data_source_component.py +2 -23
- llama_cloud/types/pipeline_data_source_component_one.py +122 -0
- llama_cloud/types/pipeline_data_source_status.py +33 -0
- llama_cloud/types/pipeline_file.py +1 -0
- llama_cloud/types/pipeline_file_update_dispatcher_config.py +38 -0
- llama_cloud/types/{markdown_node_parser.py → pipeline_file_updater_config.py} +14 -15
- llama_cloud/types/pipeline_managed_ingestion_job_params.py +37 -0
- llama_cloud/types/pipeline_metadata_config.py +36 -0
- llama_cloud/types/prompt_conf.py +3 -0
- llama_cloud/types/struct_parse_conf.py +4 -1
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/METADATA +4 -2
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/RECORD +82 -68
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/WHEEL +1 -1
- llama_cloud/types/character_splitter.py +0 -46
- llama_cloud/types/code_splitter.py +0 -50
- llama_cloud/types/configured_transformation_item.py +0 -46
- llama_cloud/types/configured_transformation_item_component.py +0 -22
- llama_cloud/types/llm.py +0 -60
- llama_cloud/types/markdown_element_node_parser.py +0 -51
- llama_cloud/types/page_splitter_node_parser.py +0 -42
- llama_cloud/types/pydantic_program_mode.py +0 -41
- llama_cloud/types/sentence_splitter.py +0 -50
- llama_cloud/types/token_text_splitter.py +0 -47
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_batch_data_schema_override_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema_zero_value.py +0 -0
- {llama_cloud-0.1.18.dist-info → llama_cloud-0.1.20.dist-info}/LICENSE +0 -0
llama_cloud/types/llm.py
DELETED
|
@@ -1,60 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .base_prompt_template import BasePromptTemplate
|
|
8
|
-
from .pydantic_program_mode import PydanticProgramMode
|
|
9
|
-
|
|
10
|
-
try:
|
|
11
|
-
import pydantic
|
|
12
|
-
if pydantic.__version__.startswith("1."):
|
|
13
|
-
raise ImportError
|
|
14
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
-
except ImportError:
|
|
16
|
-
import pydantic # type: ignore
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class Llm(pydantic.BaseModel):
|
|
20
|
-
"""
|
|
21
|
-
The LLM class is the main class for interacting with language models.
|
|
22
|
-
|
|
23
|
-
Attributes:
|
|
24
|
-
system_prompt (Optional[str]):
|
|
25
|
-
System prompt for LLM calls.
|
|
26
|
-
messages_to_prompt (Callable):
|
|
27
|
-
Function to convert a list of messages to an LLM prompt.
|
|
28
|
-
completion_to_prompt (Callable):
|
|
29
|
-
Function to convert a completion to an LLM prompt.
|
|
30
|
-
output_parser (Optional[BaseOutputParser]):
|
|
31
|
-
Output parser to parse, validate, and correct errors programmatically.
|
|
32
|
-
pydantic_program_mode (PydanticProgramMode):
|
|
33
|
-
Pydantic program mode to use for structured prediction.
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
callback_manager: typing.Optional[typing.Any]
|
|
37
|
-
system_prompt: typing.Optional[str]
|
|
38
|
-
messages_to_prompt: typing.Optional[str] = pydantic.Field(
|
|
39
|
-
description="Function to convert a list of messages to an LLM prompt."
|
|
40
|
-
)
|
|
41
|
-
completion_to_prompt: typing.Optional[str] = pydantic.Field(
|
|
42
|
-
description="Function to convert a completion to an LLM prompt."
|
|
43
|
-
)
|
|
44
|
-
output_parser: typing.Optional[typing.Any]
|
|
45
|
-
pydantic_program_mode: typing.Optional[PydanticProgramMode]
|
|
46
|
-
query_wrapper_prompt: typing.Optional[BasePromptTemplate]
|
|
47
|
-
class_name: typing.Optional[str]
|
|
48
|
-
|
|
49
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
50
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
51
|
-
return super().json(**kwargs_with_defaults)
|
|
52
|
-
|
|
53
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
54
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
55
|
-
return super().dict(**kwargs_with_defaults)
|
|
56
|
-
|
|
57
|
-
class Config:
|
|
58
|
-
frozen = True
|
|
59
|
-
smart_union = True
|
|
60
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -1,51 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .llm import Llm
|
|
8
|
-
from .node_parser import NodeParser
|
|
9
|
-
|
|
10
|
-
try:
|
|
11
|
-
import pydantic
|
|
12
|
-
if pydantic.__version__.startswith("1."):
|
|
13
|
-
raise ImportError
|
|
14
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
-
except ImportError:
|
|
16
|
-
import pydantic # type: ignore
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class MarkdownElementNodeParser(pydantic.BaseModel):
|
|
20
|
-
"""
|
|
21
|
-
Markdown element node parser.
|
|
22
|
-
|
|
23
|
-
Splits a markdown document into Text Nodes and Index Nodes corresponding to embedded objects
|
|
24
|
-
(e.g. tables).
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
include_metadata: typing.Optional[bool] = pydantic.Field(
|
|
28
|
-
description="Whether or not to consider metadata when splitting."
|
|
29
|
-
)
|
|
30
|
-
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
31
|
-
callback_manager: typing.Optional[typing.Any]
|
|
32
|
-
id_func: typing.Optional[str]
|
|
33
|
-
llm: typing.Optional[Llm]
|
|
34
|
-
summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
|
|
35
|
-
num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
|
|
36
|
-
show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
|
|
37
|
-
nested_node_parser: typing.Optional[NodeParser]
|
|
38
|
-
class_name: typing.Optional[str]
|
|
39
|
-
|
|
40
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
41
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
42
|
-
return super().json(**kwargs_with_defaults)
|
|
43
|
-
|
|
44
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
45
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
46
|
-
return super().dict(**kwargs_with_defaults)
|
|
47
|
-
|
|
48
|
-
class Config:
|
|
49
|
-
frozen = True
|
|
50
|
-
smart_union = True
|
|
51
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -1,42 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
|
|
8
|
-
try:
|
|
9
|
-
import pydantic
|
|
10
|
-
if pydantic.__version__.startswith("1."):
|
|
11
|
-
raise ImportError
|
|
12
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
-
except ImportError:
|
|
14
|
-
import pydantic # type: ignore
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class PageSplitterNodeParser(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Split text into pages.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
include_metadata: typing.Optional[bool] = pydantic.Field(
|
|
23
|
-
description="Whether or not to consider metadata when splitting."
|
|
24
|
-
)
|
|
25
|
-
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
|
-
callback_manager: typing.Optional[typing.Any]
|
|
27
|
-
id_func: typing.Optional[str]
|
|
28
|
-
page_separator: typing.Optional[str]
|
|
29
|
-
class_name: typing.Optional[str]
|
|
30
|
-
|
|
31
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
32
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
-
return super().json(**kwargs_with_defaults)
|
|
34
|
-
|
|
35
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
36
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
37
|
-
return super().dict(**kwargs_with_defaults)
|
|
38
|
-
|
|
39
|
-
class Config:
|
|
40
|
-
frozen = True
|
|
41
|
-
smart_union = True
|
|
42
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import enum
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
T_Result = typing.TypeVar("T_Result")
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class PydanticProgramMode(str, enum.Enum):
|
|
10
|
-
"""
|
|
11
|
-
Pydantic program mode.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
DEFAULT = "default"
|
|
15
|
-
OPENAI = "openai"
|
|
16
|
-
LLM = "llm"
|
|
17
|
-
FUNCTION = "function"
|
|
18
|
-
GUIDANCE = "guidance"
|
|
19
|
-
LM_FORMAT_ENFORCER = "lm-format-enforcer"
|
|
20
|
-
|
|
21
|
-
def visit(
|
|
22
|
-
self,
|
|
23
|
-
default: typing.Callable[[], T_Result],
|
|
24
|
-
openai: typing.Callable[[], T_Result],
|
|
25
|
-
llm: typing.Callable[[], T_Result],
|
|
26
|
-
function: typing.Callable[[], T_Result],
|
|
27
|
-
guidance: typing.Callable[[], T_Result],
|
|
28
|
-
lm_format_enforcer: typing.Callable[[], T_Result],
|
|
29
|
-
) -> T_Result:
|
|
30
|
-
if self is PydanticProgramMode.DEFAULT:
|
|
31
|
-
return default()
|
|
32
|
-
if self is PydanticProgramMode.OPENAI:
|
|
33
|
-
return openai()
|
|
34
|
-
if self is PydanticProgramMode.LLM:
|
|
35
|
-
return llm()
|
|
36
|
-
if self is PydanticProgramMode.FUNCTION:
|
|
37
|
-
return function()
|
|
38
|
-
if self is PydanticProgramMode.GUIDANCE:
|
|
39
|
-
return guidance()
|
|
40
|
-
if self is PydanticProgramMode.LM_FORMAT_ENFORCER:
|
|
41
|
-
return lm_format_enforcer()
|
|
@@ -1,50 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
|
|
8
|
-
try:
|
|
9
|
-
import pydantic
|
|
10
|
-
if pydantic.__version__.startswith("1."):
|
|
11
|
-
raise ImportError
|
|
12
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
-
except ImportError:
|
|
14
|
-
import pydantic # type: ignore
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class SentenceSplitter(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Parse text with a preference for complete sentences.
|
|
20
|
-
|
|
21
|
-
In general, this class tries to keep sentences and paragraphs together. Therefore
|
|
22
|
-
compared to the original TokenTextSplitter, there are less likely to be
|
|
23
|
-
hanging sentences or parts of sentences at the end of the node chunk.
|
|
24
|
-
"""
|
|
25
|
-
|
|
26
|
-
include_metadata: typing.Optional[bool] = pydantic.Field(
|
|
27
|
-
description="Whether or not to consider metadata when splitting."
|
|
28
|
-
)
|
|
29
|
-
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
30
|
-
callback_manager: typing.Optional[typing.Any]
|
|
31
|
-
id_func: typing.Optional[str]
|
|
32
|
-
chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
|
|
33
|
-
chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
|
|
34
|
-
separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
|
|
35
|
-
paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
|
|
36
|
-
secondary_chunking_regex: typing.Optional[str]
|
|
37
|
-
class_name: typing.Optional[str]
|
|
38
|
-
|
|
39
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
40
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
41
|
-
return super().json(**kwargs_with_defaults)
|
|
42
|
-
|
|
43
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
44
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
45
|
-
return super().dict(**kwargs_with_defaults)
|
|
46
|
-
|
|
47
|
-
class Config:
|
|
48
|
-
frozen = True
|
|
49
|
-
smart_union = True
|
|
50
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import datetime as dt
|
|
4
|
-
import typing
|
|
5
|
-
|
|
6
|
-
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
|
|
8
|
-
try:
|
|
9
|
-
import pydantic
|
|
10
|
-
if pydantic.__version__.startswith("1."):
|
|
11
|
-
raise ImportError
|
|
12
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
-
except ImportError:
|
|
14
|
-
import pydantic # type: ignore
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class TokenTextSplitter(pydantic.BaseModel):
|
|
18
|
-
"""
|
|
19
|
-
Implementation of splitting text that looks at word tokens.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
include_metadata: typing.Optional[bool] = pydantic.Field(
|
|
23
|
-
description="Whether or not to consider metadata when splitting."
|
|
24
|
-
)
|
|
25
|
-
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
|
-
callback_manager: typing.Optional[typing.Any]
|
|
27
|
-
id_func: typing.Optional[str]
|
|
28
|
-
chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
|
|
29
|
-
chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
|
|
30
|
-
separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
|
|
31
|
-
backup_separators: typing.Optional[typing.List[typing.Any]] = pydantic.Field(
|
|
32
|
-
description="Additional separators for splitting."
|
|
33
|
-
)
|
|
34
|
-
class_name: typing.Optional[str]
|
|
35
|
-
|
|
36
|
-
def json(self, **kwargs: typing.Any) -> str:
|
|
37
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
-
return super().json(**kwargs_with_defaults)
|
|
39
|
-
|
|
40
|
-
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
41
|
-
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
42
|
-
return super().dict(**kwargs_with_defaults)
|
|
43
|
-
|
|
44
|
-
class Config:
|
|
45
|
-
frozen = True
|
|
46
|
-
smart_union = True
|
|
47
|
-
json_encoders = {dt.datetime: serialize_datetime}
|
|
File without changes
|
/llama_cloud/{types → resources/llama_extract/types}/extract_agent_create_data_schema_zero_value.py
RENAMED
|
File without changes
|
|
File without changes
|
/llama_cloud/{types → resources/llama_extract/types}/extract_agent_update_data_schema_zero_value.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
/llama_cloud/{types → resources/llama_extract/types}/extract_schema_validate_request_data_schema.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|