llama-cloud 0.1.6__py3-none-any.whl → 0.1.7a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +140 -6
- llama_cloud/client.py +15 -0
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/__init__.py +15 -0
- llama_cloud/{types/token.py → resources/chat_apps/__init__.py} +0 -3
- llama_cloud/resources/chat_apps/client.py +620 -0
- llama_cloud/resources/data_sinks/client.py +12 -12
- llama_cloud/resources/data_sources/client.py +14 -14
- llama_cloud/resources/embedding_model_configs/client.py +20 -76
- llama_cloud/resources/evals/client.py +26 -36
- llama_cloud/resources/extraction/client.py +32 -32
- llama_cloud/resources/files/client.py +40 -44
- llama_cloud/resources/jobs/__init__.py +2 -0
- llama_cloud/resources/jobs/client.py +148 -0
- llama_cloud/resources/llama_extract/__init__.py +5 -0
- llama_cloud/resources/llama_extract/client.py +1038 -0
- llama_cloud/resources/llama_extract/types/__init__.py +6 -0
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_value.py +7 -0
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_value.py +7 -0
- llama_cloud/resources/organizations/client.py +66 -70
- llama_cloud/resources/parsing/client.py +448 -428
- llama_cloud/resources/pipelines/client.py +256 -344
- llama_cloud/resources/projects/client.py +34 -60
- llama_cloud/resources/reports/__init__.py +5 -0
- llama_cloud/resources/reports/client.py +1198 -0
- llama_cloud/resources/reports/types/__init__.py +7 -0
- llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +25 -0
- llama_cloud/resources/retrievers/__init__.py +2 -0
- llama_cloud/resources/retrievers/client.py +654 -0
- llama_cloud/types/__init__.py +128 -6
- llama_cloud/types/{chat_message.py → app_schema_chat_chat_message.py} +3 -3
- llama_cloud/types/azure_open_ai_embedding.py +6 -12
- llama_cloud/types/base_prompt_template.py +2 -6
- llama_cloud/types/bedrock_embedding.py +6 -12
- llama_cloud/types/character_splitter.py +2 -4
- llama_cloud/types/chat_app.py +44 -0
- llama_cloud/types/chat_app_response.py +41 -0
- llama_cloud/types/cloud_az_storage_blob_data_source.py +7 -15
- llama_cloud/types/cloud_box_data_source.py +6 -12
- llama_cloud/types/cloud_confluence_data_source.py +6 -6
- llama_cloud/types/cloud_document.py +1 -3
- llama_cloud/types/cloud_document_create.py +1 -3
- llama_cloud/types/cloud_jira_data_source.py +4 -6
- llama_cloud/types/cloud_notion_page_data_source.py +2 -2
- llama_cloud/types/cloud_one_drive_data_source.py +3 -5
- llama_cloud/types/cloud_postgres_vector_store.py +1 -0
- llama_cloud/types/cloud_s_3_data_source.py +4 -8
- llama_cloud/types/cloud_sharepoint_data_source.py +6 -8
- llama_cloud/types/cloud_slack_data_source.py +6 -6
- llama_cloud/types/code_splitter.py +1 -1
- llama_cloud/types/cohere_embedding.py +3 -7
- llama_cloud/types/composite_retrieval_mode.py +21 -0
- llama_cloud/types/composite_retrieval_result.py +38 -0
- llama_cloud/types/composite_retrieved_text_node.py +42 -0
- llama_cloud/types/data_sink.py +4 -4
- llama_cloud/types/data_sink_component.py +20 -0
- llama_cloud/types/data_source.py +5 -7
- llama_cloud/types/data_source_component.py +28 -0
- llama_cloud/types/data_source_create.py +1 -3
- llama_cloud/types/edit_suggestion.py +39 -0
- llama_cloud/types/embedding_model_config.py +2 -2
- llama_cloud/types/embedding_model_config_update.py +2 -4
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +8 -13
- llama_cloud/types/eval_execution_params_override.py +2 -6
- llama_cloud/types/eval_question.py +2 -2
- llama_cloud/types/extract_agent.py +45 -0
- llama_cloud/types/extract_agent_data_schema_value.py +5 -0
- llama_cloud/types/extract_config.py +40 -0
- llama_cloud/types/extract_job.py +35 -0
- llama_cloud/types/extract_job_create.py +40 -0
- llama_cloud/types/extract_job_create_data_schema_override_value.py +7 -0
- llama_cloud/types/extract_mode.py +17 -0
- llama_cloud/types/extract_resultset.py +46 -0
- llama_cloud/types/extract_resultset_data.py +11 -0
- llama_cloud/types/extract_resultset_data_item_value.py +7 -0
- llama_cloud/types/extract_resultset_data_zero_value.py +7 -0
- llama_cloud/types/extract_resultset_extraction_metadata_value.py +7 -0
- llama_cloud/types/extraction_result.py +2 -2
- llama_cloud/types/extraction_schema.py +3 -5
- llama_cloud/types/file.py +9 -14
- llama_cloud/types/filter_condition.py +9 -1
- llama_cloud/types/filter_operator.py +6 -2
- llama_cloud/types/gemini_embedding.py +6 -10
- llama_cloud/types/hugging_face_inference_api_embedding.py +11 -27
- llama_cloud/types/hugging_face_inference_api_embedding_token.py +5 -0
- llama_cloud/types/image_block.py +35 -0
- llama_cloud/types/input_message.py +2 -4
- llama_cloud/types/job_names.py +89 -0
- llama_cloud/types/job_record.py +57 -0
- llama_cloud/types/job_record_with_usage_metrics.py +36 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +39 -0
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +33 -0
- llama_cloud/types/llama_parse_parameters.py +4 -0
- llama_cloud/types/llm.py +3 -4
- llama_cloud/types/llm_model_data.py +1 -0
- llama_cloud/types/llm_parameters.py +3 -5
- llama_cloud/types/local_eval.py +8 -10
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status.py +4 -0
- llama_cloud/types/managed_ingestion_status_response.py +4 -5
- llama_cloud/types/markdown_element_node_parser.py +3 -5
- llama_cloud/types/markdown_node_parser.py +1 -1
- llama_cloud/types/metadata_filter.py +2 -2
- llama_cloud/types/metadata_filter_value.py +5 -0
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +1 -1
- llama_cloud/types/object_type.py +4 -0
- llama_cloud/types/open_ai_embedding.py +6 -12
- llama_cloud/types/organization.py +7 -2
- llama_cloud/types/page_splitter_node_parser.py +2 -2
- llama_cloud/types/paginated_jobs_history_with_metrics.py +35 -0
- llama_cloud/types/paginated_report_response.py +35 -0
- llama_cloud/types/parse_plan_level.py +21 -0
- llama_cloud/types/permission.py +3 -3
- llama_cloud/types/pipeline.py +7 -17
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +8 -16
- llama_cloud/types/pipeline_data_source.py +7 -13
- llama_cloud/types/pipeline_data_source_component.py +28 -0
- llama_cloud/types/pipeline_data_source_create.py +1 -3
- llama_cloud/types/pipeline_deployment.py +4 -4
- llama_cloud/types/pipeline_file.py +13 -24
- llama_cloud/types/pipeline_file_create.py +1 -3
- llama_cloud/types/playground_session.py +4 -4
- llama_cloud/types/preset_retrieval_params.py +8 -14
- llama_cloud/types/presigned_url.py +1 -3
- llama_cloud/types/progress_event.py +44 -0
- llama_cloud/types/progress_event_status.py +33 -0
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +3 -5
- llama_cloud/types/related_node_info.py +2 -2
- llama_cloud/types/related_node_info_node_type.py +7 -0
- llama_cloud/types/report.py +33 -0
- llama_cloud/types/report_block.py +34 -0
- llama_cloud/types/report_block_dependency.py +29 -0
- llama_cloud/types/report_create_response.py +31 -0
- llama_cloud/types/report_event_item.py +40 -0
- llama_cloud/types/report_event_item_event_data.py +45 -0
- llama_cloud/types/report_event_type.py +37 -0
- llama_cloud/types/report_metadata.py +39 -0
- llama_cloud/types/report_plan.py +36 -0
- llama_cloud/types/report_plan_block.py +36 -0
- llama_cloud/types/report_query.py +33 -0
- llama_cloud/types/report_response.py +41 -0
- llama_cloud/types/report_state.py +37 -0
- llama_cloud/types/report_state_event.py +38 -0
- llama_cloud/types/report_update_event.py +38 -0
- llama_cloud/types/retrieve_results.py +1 -1
- llama_cloud/types/retriever.py +45 -0
- llama_cloud/types/retriever_create.py +37 -0
- llama_cloud/types/retriever_pipeline.py +37 -0
- llama_cloud/types/role.py +3 -3
- llama_cloud/types/sentence_splitter.py +2 -4
- llama_cloud/types/status_enum.py +4 -0
- llama_cloud/types/supported_llm_model_names.py +4 -0
- llama_cloud/types/text_block.py +31 -0
- llama_cloud/types/text_node.py +15 -8
- llama_cloud/types/token_text_splitter.py +1 -1
- llama_cloud/types/usage_metric_response.py +34 -0
- llama_cloud/types/user_job_record.py +32 -0
- llama_cloud/types/user_organization.py +5 -9
- llama_cloud/types/user_organization_create.py +4 -4
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/user_organization_role.py +2 -2
- llama_cloud/types/vertex_text_embedding.py +5 -9
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/METADATA +2 -1
- llama_cloud-0.1.7a1.dist-info/RECORD +310 -0
- llama_cloud/types/value.py +0 -5
- llama_cloud-0.1.6.dist-info/RECORD +0 -241
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.6.dist-info → llama_cloud-0.1.7a1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .report_block import ReportBlock
|
|
8
|
+
from .report_block_dependency import ReportBlockDependency
|
|
9
|
+
from .report_query import ReportQuery
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import pydantic
|
|
13
|
+
if pydantic.__version__.startswith("1."):
|
|
14
|
+
raise ImportError
|
|
15
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
16
|
+
except ImportError:
|
|
17
|
+
import pydantic # type: ignore
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ReportPlanBlock(pydantic.BaseModel):
|
|
21
|
+
block: ReportBlock
|
|
22
|
+
queries: typing.Optional[typing.List[ReportQuery]] = pydantic.Field(description="The queries for the block")
|
|
23
|
+
dependency: ReportBlockDependency = pydantic.Field(description="The dependency for the block")
|
|
24
|
+
|
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().json(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
|
32
|
+
|
|
33
|
+
class Config:
|
|
34
|
+
frozen = True
|
|
35
|
+
smart_union = True
|
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ReportQuery(pydantic.BaseModel):
|
|
18
|
+
field: str = pydantic.Field(description="The field in the template that needs to be filled in")
|
|
19
|
+
prompt: str = pydantic.Field(description="The prompt for filling in the field")
|
|
20
|
+
context: str = pydantic.Field(description="Any additional context for the query")
|
|
21
|
+
|
|
22
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
24
|
+
return super().json(**kwargs_with_defaults)
|
|
25
|
+
|
|
26
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
28
|
+
return super().dict(**kwargs_with_defaults)
|
|
29
|
+
|
|
30
|
+
class Config:
|
|
31
|
+
frozen = True
|
|
32
|
+
smart_union = True
|
|
33
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .report import Report
|
|
8
|
+
from .report_plan import ReportPlan
|
|
9
|
+
from .report_state import ReportState
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import pydantic
|
|
13
|
+
if pydantic.__version__.startswith("1."):
|
|
14
|
+
raise ImportError
|
|
15
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
16
|
+
except ImportError:
|
|
17
|
+
import pydantic # type: ignore
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ReportResponse(pydantic.BaseModel):
|
|
21
|
+
name: str
|
|
22
|
+
report_id: str
|
|
23
|
+
report: typing.Optional[Report]
|
|
24
|
+
plan: typing.Optional[ReportPlan]
|
|
25
|
+
version: int
|
|
26
|
+
last_updated: dt.datetime
|
|
27
|
+
status: ReportState
|
|
28
|
+
total_versions: int
|
|
29
|
+
|
|
30
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
31
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
32
|
+
return super().json(**kwargs_with_defaults)
|
|
33
|
+
|
|
34
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
35
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
36
|
+
return super().dict(**kwargs_with_defaults)
|
|
37
|
+
|
|
38
|
+
class Config:
|
|
39
|
+
frozen = True
|
|
40
|
+
smart_union = True
|
|
41
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ReportState(str, enum.Enum):
|
|
10
|
+
PENDING = "pending"
|
|
11
|
+
PLANNING = "planning"
|
|
12
|
+
WAITING_APPROVAL = "waiting_approval"
|
|
13
|
+
GENERATING = "generating"
|
|
14
|
+
COMPLETED = "completed"
|
|
15
|
+
ERROR = "error"
|
|
16
|
+
|
|
17
|
+
def visit(
|
|
18
|
+
self,
|
|
19
|
+
pending: typing.Callable[[], T_Result],
|
|
20
|
+
planning: typing.Callable[[], T_Result],
|
|
21
|
+
waiting_approval: typing.Callable[[], T_Result],
|
|
22
|
+
generating: typing.Callable[[], T_Result],
|
|
23
|
+
completed: typing.Callable[[], T_Result],
|
|
24
|
+
error: typing.Callable[[], T_Result],
|
|
25
|
+
) -> T_Result:
|
|
26
|
+
if self is ReportState.PENDING:
|
|
27
|
+
return pending()
|
|
28
|
+
if self is ReportState.PLANNING:
|
|
29
|
+
return planning()
|
|
30
|
+
if self is ReportState.WAITING_APPROVAL:
|
|
31
|
+
return waiting_approval()
|
|
32
|
+
if self is ReportState.GENERATING:
|
|
33
|
+
return generating()
|
|
34
|
+
if self is ReportState.COMPLETED:
|
|
35
|
+
return completed()
|
|
36
|
+
if self is ReportState.ERROR:
|
|
37
|
+
return error()
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .report_state import ReportState
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ReportStateEvent(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Event for notifying when an report's state changes.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
timestamp: typing.Optional[dt.datetime]
|
|
24
|
+
msg: str = pydantic.Field(description="The message to display to the user")
|
|
25
|
+
status: ReportState = pydantic.Field(description="The new state of the report")
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .report_block import ReportBlock
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ReportUpdateEvent(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Event for updating the state of an report.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
timestamp: typing.Optional[dt.datetime]
|
|
24
|
+
msg: typing.Optional[str] = pydantic.Field(description="The message to display to the user")
|
|
25
|
+
block: ReportBlock = pydantic.Field(description="The block to update")
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -28,7 +28,7 @@ class RetrieveResults(pydantic.BaseModel):
|
|
|
28
28
|
image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
|
|
29
29
|
description="The image nodes retrieved by the pipeline for the given query."
|
|
30
30
|
)
|
|
31
|
-
retrieval_latency: typing.Dict[str, float] = pydantic.Field(
|
|
31
|
+
retrieval_latency: typing.Optional[typing.Dict[str, float]] = pydantic.Field(
|
|
32
32
|
description="The end-to-end latency for retrieval and reranking."
|
|
33
33
|
)
|
|
34
34
|
metadata: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .retriever_pipeline import RetrieverPipeline
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Retriever(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
An entity that retrieves context nodes from several sub RetrieverTools.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
name: str = pydantic.Field(
|
|
24
|
+
description="A name for the retriever tool. Will default to the pipeline name if not provided."
|
|
25
|
+
)
|
|
26
|
+
pipelines: typing.Optional[typing.List[RetrieverPipeline]] = pydantic.Field(
|
|
27
|
+
description="The pipelines this retriever uses."
|
|
28
|
+
)
|
|
29
|
+
id: str = pydantic.Field(description="Unique identifier")
|
|
30
|
+
created_at: typing.Optional[dt.datetime]
|
|
31
|
+
updated_at: typing.Optional[dt.datetime]
|
|
32
|
+
project_id: str = pydantic.Field(description="The ID of the project this retriever resides in.")
|
|
33
|
+
|
|
34
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
35
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
36
|
+
return super().json(**kwargs_with_defaults)
|
|
37
|
+
|
|
38
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
39
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
40
|
+
return super().dict(**kwargs_with_defaults)
|
|
41
|
+
|
|
42
|
+
class Config:
|
|
43
|
+
frozen = True
|
|
44
|
+
smart_union = True
|
|
45
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .retriever_pipeline import RetrieverPipeline
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class RetrieverCreate(pydantic.BaseModel):
|
|
19
|
+
name: str = pydantic.Field(
|
|
20
|
+
description="A name for the retriever tool. Will default to the pipeline name if not provided."
|
|
21
|
+
)
|
|
22
|
+
pipelines: typing.Optional[typing.List[RetrieverPipeline]] = pydantic.Field(
|
|
23
|
+
description="The pipelines this retriever uses."
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
28
|
+
return super().json(**kwargs_with_defaults)
|
|
29
|
+
|
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
31
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
32
|
+
return super().dict(**kwargs_with_defaults)
|
|
33
|
+
|
|
34
|
+
class Config:
|
|
35
|
+
frozen = True
|
|
36
|
+
smart_union = True
|
|
37
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .preset_retrieval_params import PresetRetrievalParams
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class RetrieverPipeline(pydantic.BaseModel):
|
|
19
|
+
name: typing.Optional[str]
|
|
20
|
+
description: typing.Optional[str]
|
|
21
|
+
pipeline_id: str = pydantic.Field(description="The ID of the pipeline this tool uses.")
|
|
22
|
+
preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
|
|
23
|
+
description="Parameters for retrieval configuration."
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
28
|
+
return super().json(**kwargs_with_defaults)
|
|
29
|
+
|
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
31
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
32
|
+
return super().dict(**kwargs_with_defaults)
|
|
33
|
+
|
|
34
|
+
class Config:
|
|
35
|
+
frozen = True
|
|
36
|
+
smart_union = True
|
|
37
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/role.py
CHANGED
|
@@ -21,10 +21,10 @@ class Role(pydantic.BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
id: str = pydantic.Field(description="Unique identifier")
|
|
24
|
-
created_at: typing.Optional[dt.datetime]
|
|
25
|
-
updated_at: typing.Optional[dt.datetime]
|
|
24
|
+
created_at: typing.Optional[dt.datetime]
|
|
25
|
+
updated_at: typing.Optional[dt.datetime]
|
|
26
26
|
name: str = pydantic.Field(description="A name for the role.")
|
|
27
|
-
organization_id: typing.Optional[str]
|
|
27
|
+
organization_id: typing.Optional[str]
|
|
28
28
|
permissions: typing.List[Permission] = pydantic.Field(description="The actual permissions of the role.")
|
|
29
29
|
|
|
30
30
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -28,14 +28,12 @@ class SentenceSplitter(pydantic.BaseModel):
|
|
|
28
28
|
)
|
|
29
29
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
30
30
|
callback_manager: typing.Optional[typing.Any]
|
|
31
|
-
id_func: typing.Optional[str]
|
|
31
|
+
id_func: typing.Optional[str]
|
|
32
32
|
chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
|
|
33
33
|
chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
|
|
34
34
|
separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
|
|
35
35
|
paragraph_separator: typing.Optional[str] = pydantic.Field(description="Separator between paragraphs.")
|
|
36
|
-
secondary_chunking_regex: typing.Optional[str]
|
|
37
|
-
description="Backup regex for splitting into sentences."
|
|
38
|
-
)
|
|
36
|
+
secondary_chunking_regex: typing.Optional[str]
|
|
39
37
|
class_name: typing.Optional[str]
|
|
40
38
|
|
|
41
39
|
def json(self, **kwargs: typing.Any) -> str:
|
llama_cloud/types/status_enum.py
CHANGED
|
@@ -15,6 +15,7 @@ class StatusEnum(str, enum.Enum):
|
|
|
15
15
|
SUCCESS = "SUCCESS"
|
|
16
16
|
ERROR = "ERROR"
|
|
17
17
|
PARTIAL_SUCCESS = "PARTIAL_SUCCESS"
|
|
18
|
+
CANCELLED = "CANCELLED"
|
|
18
19
|
|
|
19
20
|
def visit(
|
|
20
21
|
self,
|
|
@@ -22,6 +23,7 @@ class StatusEnum(str, enum.Enum):
|
|
|
22
23
|
success: typing.Callable[[], T_Result],
|
|
23
24
|
error: typing.Callable[[], T_Result],
|
|
24
25
|
partial_success: typing.Callable[[], T_Result],
|
|
26
|
+
cancelled: typing.Callable[[], T_Result],
|
|
25
27
|
) -> T_Result:
|
|
26
28
|
if self is StatusEnum.PENDING:
|
|
27
29
|
return pending()
|
|
@@ -31,3 +33,5 @@ class StatusEnum(str, enum.Enum):
|
|
|
31
33
|
return error()
|
|
32
34
|
if self is StatusEnum.PARTIAL_SUCCESS:
|
|
33
35
|
return partial_success()
|
|
36
|
+
if self is StatusEnum.CANCELLED:
|
|
37
|
+
return cancelled()
|
|
@@ -13,6 +13,7 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
13
13
|
GPT_4_O = "GPT_4O"
|
|
14
14
|
GPT_4_O_MINI = "GPT_4O_MINI"
|
|
15
15
|
AZURE_OPENAI = "AZURE_OPENAI"
|
|
16
|
+
CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
|
|
16
17
|
|
|
17
18
|
def visit(
|
|
18
19
|
self,
|
|
@@ -22,6 +23,7 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
22
23
|
gpt_4_o: typing.Callable[[], T_Result],
|
|
23
24
|
gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
24
25
|
azure_openai: typing.Callable[[], T_Result],
|
|
26
|
+
claude_3_5_sonnet: typing.Callable[[], T_Result],
|
|
25
27
|
) -> T_Result:
|
|
26
28
|
if self is SupportedLlmModelNames.GPT_3_5_TURBO:
|
|
27
29
|
return gpt_3_5_turbo()
|
|
@@ -35,3 +37,5 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
35
37
|
return gpt_4_o_mini()
|
|
36
38
|
if self is SupportedLlmModelNames.AZURE_OPENAI:
|
|
37
39
|
return azure_openai()
|
|
40
|
+
if self is SupportedLlmModelNames.CLAUDE_3_5_SONNET:
|
|
41
|
+
return claude_3_5_sonnet()
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class TextBlock(pydantic.BaseModel):
|
|
18
|
+
text: str
|
|
19
|
+
|
|
20
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
22
|
+
return super().json(**kwargs_with_defaults)
|
|
23
|
+
|
|
24
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
26
|
+
return super().dict(**kwargs_with_defaults)
|
|
27
|
+
|
|
28
|
+
class Config:
|
|
29
|
+
frozen = True
|
|
30
|
+
smart_union = True
|
|
31
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/text_node.py
CHANGED
|
@@ -16,8 +16,15 @@ except ImportError:
|
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class TextNode(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Provided for backward compatibility.
|
|
21
|
+
|
|
22
|
+
Note: we keep the field with the typo "seperator" to maintain backward compatibility for
|
|
23
|
+
serialized objects.
|
|
24
|
+
"""
|
|
25
|
+
|
|
19
26
|
id: typing.Optional[str] = pydantic.Field(alias="id_", description="Unique ID of the node.")
|
|
20
|
-
embedding: typing.Optional[typing.List[float]]
|
|
27
|
+
embedding: typing.Optional[typing.List[float]]
|
|
21
28
|
extra_info: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
|
|
22
29
|
description="A flat dictionary of metadata fields"
|
|
23
30
|
)
|
|
@@ -30,19 +37,19 @@ class TextNode(pydantic.BaseModel):
|
|
|
30
37
|
relationships: typing.Optional[typing.Dict[str, TextNodeRelationshipsValue]] = pydantic.Field(
|
|
31
38
|
description="A mapping of relationships to other node information."
|
|
32
39
|
)
|
|
33
|
-
text: typing.Optional[str] = pydantic.Field(description="Text content of the node.")
|
|
34
|
-
mimetype: typing.Optional[str] = pydantic.Field(description="MIME type of the node content.")
|
|
35
|
-
start_char_idx: typing.Optional[int] = pydantic.Field(description="Start char index of the node.")
|
|
36
|
-
end_char_idx: typing.Optional[int] = pydantic.Field(description="End char index of the node.")
|
|
37
|
-
text_template: typing.Optional[str] = pydantic.Field(
|
|
38
|
-
description="Template for how text is formatted, with {content} and {metadata_str} placeholders."
|
|
39
|
-
)
|
|
40
40
|
metadata_template: typing.Optional[str] = pydantic.Field(
|
|
41
41
|
description="Template for how metadata is formatted, with {key} and {value} placeholders."
|
|
42
42
|
)
|
|
43
43
|
metadata_seperator: typing.Optional[str] = pydantic.Field(
|
|
44
44
|
description="Separator between metadata fields when converting to string."
|
|
45
45
|
)
|
|
46
|
+
text: typing.Optional[str] = pydantic.Field(description="Text content of the node.")
|
|
47
|
+
mimetype: typing.Optional[str] = pydantic.Field(description="MIME type of the node content.")
|
|
48
|
+
start_char_idx: typing.Optional[int]
|
|
49
|
+
end_char_idx: typing.Optional[int]
|
|
50
|
+
text_template: typing.Optional[str] = pydantic.Field(
|
|
51
|
+
description="Template for how text is formatted, with {content} and {metadata_str} placeholders."
|
|
52
|
+
)
|
|
46
53
|
class_name: typing.Optional[str]
|
|
47
54
|
|
|
48
55
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -24,7 +24,7 @@ class TokenTextSplitter(pydantic.BaseModel):
|
|
|
24
24
|
)
|
|
25
25
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
26
|
callback_manager: typing.Optional[typing.Any]
|
|
27
|
-
id_func: typing.Optional[str]
|
|
27
|
+
id_func: typing.Optional[str]
|
|
28
28
|
chunk_size: typing.Optional[int] = pydantic.Field(description="The token chunk size for each chunk.")
|
|
29
29
|
chunk_overlap: typing.Optional[int] = pydantic.Field(description="The token overlap of each chunk when splitting.")
|
|
30
30
|
separator: typing.Optional[str] = pydantic.Field(description="Default separator for splitting into words")
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class UsageMetricResponse(pydantic.BaseModel):
|
|
18
|
+
feature_usage: typing.Dict[str, typing.Any]
|
|
19
|
+
day: str
|
|
20
|
+
source: str
|
|
21
|
+
job_id: str
|
|
22
|
+
|
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
25
|
+
return super().json(**kwargs_with_defaults)
|
|
26
|
+
|
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().dict(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
class Config:
|
|
32
|
+
frozen = True
|
|
33
|
+
smart_union = True
|
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class UserJobRecord(pydantic.BaseModel):
|
|
18
|
+
id: str = pydantic.Field(description="The user id from who triggered the job")
|
|
19
|
+
name: str = pydantic.Field(description="The name of the user")
|
|
20
|
+
|
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
+
return super().json(**kwargs_with_defaults)
|
|
24
|
+
|
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -21,20 +21,16 @@ class UserOrganization(pydantic.BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
id: str = pydantic.Field(description="Unique identifier")
|
|
24
|
-
created_at: typing.Optional[dt.datetime]
|
|
25
|
-
updated_at: typing.Optional[dt.datetime]
|
|
24
|
+
created_at: typing.Optional[dt.datetime]
|
|
25
|
+
updated_at: typing.Optional[dt.datetime]
|
|
26
26
|
email: str = pydantic.Field(description="The user's email address.")
|
|
27
|
-
user_id: typing.Optional[str]
|
|
27
|
+
user_id: typing.Optional[str]
|
|
28
28
|
organization_id: str = pydantic.Field(description="The organization's ID.")
|
|
29
29
|
pending: typing.Optional[bool] = pydantic.Field(
|
|
30
30
|
description="Whether the user's membership is pending account signup."
|
|
31
31
|
)
|
|
32
|
-
invited_by_user_id: typing.Optional[str]
|
|
33
|
-
|
|
34
|
-
)
|
|
35
|
-
invited_by_user_email: typing.Optional[str] = pydantic.Field(
|
|
36
|
-
description="The email address of the user who added the user to the organization."
|
|
37
|
-
)
|
|
32
|
+
invited_by_user_id: typing.Optional[str]
|
|
33
|
+
invited_by_user_email: typing.Optional[str]
|
|
38
34
|
roles: typing.List[UserOrganizationRole] = pydantic.Field(description="The roles of the user in the organization.")
|
|
39
35
|
|
|
40
36
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -19,10 +19,10 @@ class UserOrganizationCreate(pydantic.BaseModel):
|
|
|
19
19
|
Schema for creating a user's membership to an organization.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
user_id: typing.Optional[str]
|
|
23
|
-
email: typing.Optional[str]
|
|
24
|
-
project_ids: typing.Optional[typing.List[str]]
|
|
25
|
-
role_id: typing.Optional[str]
|
|
22
|
+
user_id: typing.Optional[str]
|
|
23
|
+
email: typing.Optional[str]
|
|
24
|
+
project_ids: typing.Optional[typing.List[str]]
|
|
25
|
+
role_id: typing.Optional[str]
|
|
26
26
|
|
|
27
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
28
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -19,8 +19,8 @@ class UserOrganizationDelete(pydantic.BaseModel):
|
|
|
19
19
|
Schema for deleting a user's membership to an organization.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
user_id: typing.Optional[str]
|
|
23
|
-
email: typing.Optional[str]
|
|
22
|
+
user_id: typing.Optional[str]
|
|
23
|
+
email: typing.Optional[str]
|
|
24
24
|
|
|
25
25
|
def json(self, **kwargs: typing.Any) -> str:
|
|
26
26
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -21,8 +21,8 @@ class UserOrganizationRole(pydantic.BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
id: str = pydantic.Field(description="Unique identifier")
|
|
24
|
-
created_at: typing.Optional[dt.datetime]
|
|
25
|
-
updated_at: typing.Optional[dt.datetime]
|
|
24
|
+
created_at: typing.Optional[dt.datetime]
|
|
25
|
+
updated_at: typing.Optional[dt.datetime]
|
|
26
26
|
user_id: str = pydantic.Field(description="The user's ID.")
|
|
27
27
|
organization_id: str = pydantic.Field(description="The organization's ID.")
|
|
28
28
|
role_id: str = pydantic.Field(description="The role's ID.")
|