lightningrod-ai 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lightningrod/__init__.py +66 -0
- lightningrod/_display.py +204 -0
- lightningrod/_errors.py +67 -0
- lightningrod/_generated/__init__.py +8 -0
- lightningrod/_generated/api/__init__.py +1 -0
- lightningrod/_generated/api/datasets/__init__.py +1 -0
- lightningrod/_generated/api/datasets/create_dataset_datasets_post.py +133 -0
- lightningrod/_generated/api/datasets/get_dataset_datasets_dataset_id_get.py +168 -0
- lightningrod/_generated/api/datasets/get_dataset_samples_datasets_dataset_id_samples_get.py +209 -0
- lightningrod/_generated/api/datasets/upload_samples_datasets_dataset_id_samples_post.py +190 -0
- lightningrod/_generated/api/file_sets/__init__.py +1 -0
- lightningrod/_generated/api/file_sets/add_file_to_set_filesets_file_set_id_files_post.py +190 -0
- lightningrod/_generated/api/file_sets/create_file_set_filesets_post.py +174 -0
- lightningrod/_generated/api/file_sets/get_file_set_filesets_file_set_id_get.py +168 -0
- lightningrod/_generated/api/file_sets/list_file_sets_filesets_get.py +173 -0
- lightningrod/_generated/api/file_sets/list_files_in_set_filesets_file_set_id_files_get.py +209 -0
- lightningrod/_generated/api/files/__init__.py +1 -0
- lightningrod/_generated/api/files/create_file_upload_files_post.py +174 -0
- lightningrod/_generated/api/open_ai_compatible/__init__.py +1 -0
- lightningrod/_generated/api/open_ai_compatible/chat_completions_openai_chat_completions_post.py +174 -0
- lightningrod/_generated/api/organizations/__init__.py +1 -0
- lightningrod/_generated/api/organizations/get_balance_organizations_balance_get.py +131 -0
- lightningrod/_generated/api/samples/__init__.py +1 -0
- lightningrod/_generated/api/samples/validate_sample_samples_validate_post.py +174 -0
- lightningrod/_generated/api/transform_jobs/__init__.py +1 -0
- lightningrod/_generated/api/transform_jobs/cost_estimation_transform_jobs_cost_estimation_post.py +174 -0
- lightningrod/_generated/api/transform_jobs/create_transform_job_transform_jobs_post.py +174 -0
- lightningrod/_generated/api/transform_jobs/get_transform_job_metrics_transform_jobs_job_id_metrics_get.py +172 -0
- lightningrod/_generated/api/transform_jobs/get_transform_job_transform_jobs_job_id_get.py +168 -0
- lightningrod/_generated/client.py +268 -0
- lightningrod/_generated/errors.py +16 -0
- lightningrod/_generated/models/__init__.py +147 -0
- lightningrod/_generated/models/answer_type.py +129 -0
- lightningrod/_generated/models/answer_type_enum.py +11 -0
- lightningrod/_generated/models/balance_response.py +61 -0
- lightningrod/_generated/models/chat_completion_request.py +216 -0
- lightningrod/_generated/models/chat_completion_response.py +146 -0
- lightningrod/_generated/models/chat_message.py +69 -0
- lightningrod/_generated/models/choice.py +97 -0
- lightningrod/_generated/models/create_dataset_response.py +61 -0
- lightningrod/_generated/models/create_file_set_file_request.py +101 -0
- lightningrod/_generated/models/create_file_set_file_request_metadata_type_0.py +46 -0
- lightningrod/_generated/models/create_file_set_request.py +83 -0
- lightningrod/_generated/models/create_file_upload_request.py +91 -0
- lightningrod/_generated/models/create_file_upload_response.py +165 -0
- lightningrod/_generated/models/create_file_upload_response_metadata_type_0.py +46 -0
- lightningrod/_generated/models/create_transform_job_request.py +312 -0
- lightningrod/_generated/models/dataset_metadata.py +69 -0
- lightningrod/_generated/models/estimate_cost_request.py +243 -0
- lightningrod/_generated/models/estimate_cost_response.py +117 -0
- lightningrod/_generated/models/event_usage_summary.py +80 -0
- lightningrod/_generated/models/file_set.py +128 -0
- lightningrod/_generated/models/file_set_file.py +203 -0
- lightningrod/_generated/models/file_set_file_metadata_type_0.py +57 -0
- lightningrod/_generated/models/file_set_query_seed_generator.py +136 -0
- lightningrod/_generated/models/file_set_seed_generator.py +126 -0
- lightningrod/_generated/models/filter_criteria.py +83 -0
- lightningrod/_generated/models/forward_looking_question.py +130 -0
- lightningrod/_generated/models/forward_looking_question_generator.py +217 -0
- lightningrod/_generated/models/gdelt_seed_generator.py +103 -0
- lightningrod/_generated/models/http_validation_error.py +79 -0
- lightningrod/_generated/models/job_usage.py +185 -0
- lightningrod/_generated/models/job_usage_by_step_type_0.py +59 -0
- lightningrod/_generated/models/label.py +143 -0
- lightningrod/_generated/models/list_file_set_files_response.py +113 -0
- lightningrod/_generated/models/list_file_sets_response.py +75 -0
- lightningrod/_generated/models/llm_model_usage_summary.py +98 -0
- lightningrod/_generated/models/mock_transform_config.py +243 -0
- lightningrod/_generated/models/mock_transform_config_metadata_additions.py +46 -0
- lightningrod/_generated/models/model_config.py +316 -0
- lightningrod/_generated/models/model_source_type.py +16 -0
- lightningrod/_generated/models/news_context.py +82 -0
- lightningrod/_generated/models/news_context_generator.py +127 -0
- lightningrod/_generated/models/news_seed_generator.py +220 -0
- lightningrod/_generated/models/paginated_samples_response.py +113 -0
- lightningrod/_generated/models/pipeline_metrics_response.py +99 -0
- lightningrod/_generated/models/question.py +74 -0
- lightningrod/_generated/models/question_and_label_generator.py +217 -0
- lightningrod/_generated/models/question_generator.py +217 -0
- lightningrod/_generated/models/question_pipeline.py +417 -0
- lightningrod/_generated/models/question_renderer.py +123 -0
- lightningrod/_generated/models/rag_context.py +82 -0
- lightningrod/_generated/models/response_message.py +69 -0
- lightningrod/_generated/models/rollout.py +130 -0
- lightningrod/_generated/models/rollout_generator.py +139 -0
- lightningrod/_generated/models/rollout_parsed_output_type_0.py +46 -0
- lightningrod/_generated/models/sample.py +323 -0
- lightningrod/_generated/models/sample_meta.py +46 -0
- lightningrod/_generated/models/seed.py +135 -0
- lightningrod/_generated/models/step_cost_breakdown.py +109 -0
- lightningrod/_generated/models/transform_job.py +268 -0
- lightningrod/_generated/models/transform_job_status.py +11 -0
- lightningrod/_generated/models/transform_step_metrics_response.py +131 -0
- lightningrod/_generated/models/transform_type.py +25 -0
- lightningrod/_generated/models/upload_samples_request.py +75 -0
- lightningrod/_generated/models/upload_samples_response.py +69 -0
- lightningrod/_generated/models/usage.py +77 -0
- lightningrod/_generated/models/usage_summary.py +102 -0
- lightningrod/_generated/models/usage_summary_events.py +59 -0
- lightningrod/_generated/models/usage_summary_llm_by_model.py +59 -0
- lightningrod/_generated/models/validate_sample_response.py +69 -0
- lightningrod/_generated/models/validation_error.py +90 -0
- lightningrod/_generated/models/web_search_labeler.py +120 -0
- lightningrod/_generated/py.typed +1 -0
- lightningrod/_generated/types.py +54 -0
- lightningrod/client.py +48 -0
- lightningrod/datasets/__init__.py +4 -0
- lightningrod/datasets/client.py +174 -0
- lightningrod/datasets/dataset.py +255 -0
- lightningrod/files/__init__.py +0 -0
- lightningrod/files/client.py +58 -0
- lightningrod/filesets/__init__.py +0 -0
- lightningrod/filesets/client.py +106 -0
- lightningrod/organization/__init__.py +0 -0
- lightningrod/organization/client.py +17 -0
- lightningrod/py.typed +0 -0
- lightningrod/transforms/__init__.py +0 -0
- lightningrod/transforms/client.py +154 -0
- lightningrod_ai-0.1.6.dist-info/METADATA +122 -0
- lightningrod_ai-0.1.6.dist-info/RECORD +123 -0
- lightningrod_ai-0.1.6.dist-info/WHEEL +5 -0
- lightningrod_ai-0.1.6.dist-info/licenses/LICENSE +23 -0
- lightningrod_ai-0.1.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import Any, Literal, TypeVar, cast
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="FileSetQuerySeedGenerator")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@_attrs_define
|
|
15
|
+
class FileSetQuerySeedGenerator:
|
|
16
|
+
"""Configuration for FileSet Query Seed Generator transform.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
file_set_id (str): FileSet ID to query
|
|
20
|
+
prompts (list[str]): List of queries to run against the fileset
|
|
21
|
+
config_type (Literal['FILESET_QUERY_SEED_GENERATOR'] | Unset): Type of transform configuration Default:
|
|
22
|
+
'FILESET_QUERY_SEED_GENERATOR'.
|
|
23
|
+
metadata_filters (list[str] | None | Unset): Optional list of AIP-160 metadata filters to select which documents
|
|
24
|
+
to process. Documents matching ANY filter will be included. (e.g., ["ticker='AAL'", "ticker='MSFT'"])
|
|
25
|
+
system_instruction (None | str | Unset): Optional system instruction for the Gemini model
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
file_set_id: str
|
|
29
|
+
prompts: list[str]
|
|
30
|
+
config_type: Literal["FILESET_QUERY_SEED_GENERATOR"] | Unset = "FILESET_QUERY_SEED_GENERATOR"
|
|
31
|
+
metadata_filters: list[str] | None | Unset = UNSET
|
|
32
|
+
system_instruction: None | str | Unset = UNSET
|
|
33
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
34
|
+
|
|
35
|
+
def to_dict(self) -> dict[str, Any]:
|
|
36
|
+
file_set_id = self.file_set_id
|
|
37
|
+
|
|
38
|
+
prompts = self.prompts
|
|
39
|
+
|
|
40
|
+
config_type = self.config_type
|
|
41
|
+
|
|
42
|
+
metadata_filters: list[str] | None | Unset
|
|
43
|
+
if isinstance(self.metadata_filters, Unset):
|
|
44
|
+
metadata_filters = UNSET
|
|
45
|
+
elif isinstance(self.metadata_filters, list):
|
|
46
|
+
metadata_filters = self.metadata_filters
|
|
47
|
+
|
|
48
|
+
else:
|
|
49
|
+
metadata_filters = self.metadata_filters
|
|
50
|
+
|
|
51
|
+
system_instruction: None | str | Unset
|
|
52
|
+
if isinstance(self.system_instruction, Unset):
|
|
53
|
+
system_instruction = UNSET
|
|
54
|
+
else:
|
|
55
|
+
system_instruction = self.system_instruction
|
|
56
|
+
|
|
57
|
+
field_dict: dict[str, Any] = {}
|
|
58
|
+
field_dict.update(self.additional_properties)
|
|
59
|
+
field_dict.update(
|
|
60
|
+
{
|
|
61
|
+
"file_set_id": file_set_id,
|
|
62
|
+
"prompts": prompts,
|
|
63
|
+
}
|
|
64
|
+
)
|
|
65
|
+
if config_type is not UNSET:
|
|
66
|
+
field_dict["config_type"] = config_type
|
|
67
|
+
if metadata_filters is not UNSET:
|
|
68
|
+
field_dict["metadata_filters"] = metadata_filters
|
|
69
|
+
if system_instruction is not UNSET:
|
|
70
|
+
field_dict["system_instruction"] = system_instruction
|
|
71
|
+
|
|
72
|
+
return field_dict
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
76
|
+
d = dict(src_dict)
|
|
77
|
+
file_set_id = d.pop("file_set_id")
|
|
78
|
+
|
|
79
|
+
prompts = cast(list[str], d.pop("prompts"))
|
|
80
|
+
|
|
81
|
+
config_type = cast(Literal["FILESET_QUERY_SEED_GENERATOR"] | Unset, d.pop("config_type", UNSET))
|
|
82
|
+
if config_type != "FILESET_QUERY_SEED_GENERATOR" and not isinstance(config_type, Unset):
|
|
83
|
+
raise ValueError(f"config_type must match const 'FILESET_QUERY_SEED_GENERATOR', got '{config_type}'")
|
|
84
|
+
|
|
85
|
+
def _parse_metadata_filters(data: object) -> list[str] | None | Unset:
|
|
86
|
+
if data is None:
|
|
87
|
+
return data
|
|
88
|
+
if isinstance(data, Unset):
|
|
89
|
+
return data
|
|
90
|
+
try:
|
|
91
|
+
if not isinstance(data, list):
|
|
92
|
+
raise TypeError()
|
|
93
|
+
metadata_filters_type_0 = cast(list[str], data)
|
|
94
|
+
|
|
95
|
+
return metadata_filters_type_0
|
|
96
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
97
|
+
pass
|
|
98
|
+
return cast(list[str] | None | Unset, data)
|
|
99
|
+
|
|
100
|
+
metadata_filters = _parse_metadata_filters(d.pop("metadata_filters", UNSET))
|
|
101
|
+
|
|
102
|
+
def _parse_system_instruction(data: object) -> None | str | Unset:
|
|
103
|
+
if data is None:
|
|
104
|
+
return data
|
|
105
|
+
if isinstance(data, Unset):
|
|
106
|
+
return data
|
|
107
|
+
return cast(None | str | Unset, data)
|
|
108
|
+
|
|
109
|
+
system_instruction = _parse_system_instruction(d.pop("system_instruction", UNSET))
|
|
110
|
+
|
|
111
|
+
file_set_query_seed_generator = cls(
|
|
112
|
+
file_set_id=file_set_id,
|
|
113
|
+
prompts=prompts,
|
|
114
|
+
config_type=config_type,
|
|
115
|
+
metadata_filters=metadata_filters,
|
|
116
|
+
system_instruction=system_instruction,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
file_set_query_seed_generator.additional_properties = d
|
|
120
|
+
return file_set_query_seed_generator
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def additional_keys(self) -> list[str]:
|
|
124
|
+
return list(self.additional_properties.keys())
|
|
125
|
+
|
|
126
|
+
def __getitem__(self, key: str) -> Any:
|
|
127
|
+
return self.additional_properties[key]
|
|
128
|
+
|
|
129
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
130
|
+
self.additional_properties[key] = value
|
|
131
|
+
|
|
132
|
+
def __delitem__(self, key: str) -> None:
|
|
133
|
+
del self.additional_properties[key]
|
|
134
|
+
|
|
135
|
+
def __contains__(self, key: str) -> bool:
|
|
136
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import Any, Literal, TypeVar, cast
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="FileSetSeedGenerator")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@_attrs_define
|
|
15
|
+
class FileSetSeedGenerator:
|
|
16
|
+
"""Configuration for FileSet Seed Generator transform.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
file_set_id (str): FileSet ID to read files from
|
|
20
|
+
config_type (Literal['FILESET_SEED_GENERATOR'] | Unset): Type of transform configuration Default:
|
|
21
|
+
'FILESET_SEED_GENERATOR'.
|
|
22
|
+
chunk_size (int | Unset): Number of characters per chunk Default: 4000.
|
|
23
|
+
chunk_overlap (int | Unset): Number of overlapping characters between consecutive chunks Default: 200.
|
|
24
|
+
metadata_filters (list[str] | None | Unset): Optional list of metadata filters to select which files to process.
|
|
25
|
+
Files matching ANY filter will be included. (e.g., ["ticker='AAL'", "ticker='MSFT'"])
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
file_set_id: str
|
|
29
|
+
config_type: Literal["FILESET_SEED_GENERATOR"] | Unset = "FILESET_SEED_GENERATOR"
|
|
30
|
+
chunk_size: int | Unset = 4000
|
|
31
|
+
chunk_overlap: int | Unset = 200
|
|
32
|
+
metadata_filters: list[str] | None | Unset = UNSET
|
|
33
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
34
|
+
|
|
35
|
+
def to_dict(self) -> dict[str, Any]:
|
|
36
|
+
file_set_id = self.file_set_id
|
|
37
|
+
|
|
38
|
+
config_type = self.config_type
|
|
39
|
+
|
|
40
|
+
chunk_size = self.chunk_size
|
|
41
|
+
|
|
42
|
+
chunk_overlap = self.chunk_overlap
|
|
43
|
+
|
|
44
|
+
metadata_filters: list[str] | None | Unset
|
|
45
|
+
if isinstance(self.metadata_filters, Unset):
|
|
46
|
+
metadata_filters = UNSET
|
|
47
|
+
elif isinstance(self.metadata_filters, list):
|
|
48
|
+
metadata_filters = self.metadata_filters
|
|
49
|
+
|
|
50
|
+
else:
|
|
51
|
+
metadata_filters = self.metadata_filters
|
|
52
|
+
|
|
53
|
+
field_dict: dict[str, Any] = {}
|
|
54
|
+
field_dict.update(self.additional_properties)
|
|
55
|
+
field_dict.update(
|
|
56
|
+
{
|
|
57
|
+
"file_set_id": file_set_id,
|
|
58
|
+
}
|
|
59
|
+
)
|
|
60
|
+
if config_type is not UNSET:
|
|
61
|
+
field_dict["config_type"] = config_type
|
|
62
|
+
if chunk_size is not UNSET:
|
|
63
|
+
field_dict["chunk_size"] = chunk_size
|
|
64
|
+
if chunk_overlap is not UNSET:
|
|
65
|
+
field_dict["chunk_overlap"] = chunk_overlap
|
|
66
|
+
if metadata_filters is not UNSET:
|
|
67
|
+
field_dict["metadata_filters"] = metadata_filters
|
|
68
|
+
|
|
69
|
+
return field_dict
|
|
70
|
+
|
|
71
|
+
@classmethod
|
|
72
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
73
|
+
d = dict(src_dict)
|
|
74
|
+
file_set_id = d.pop("file_set_id")
|
|
75
|
+
|
|
76
|
+
config_type = cast(Literal["FILESET_SEED_GENERATOR"] | Unset, d.pop("config_type", UNSET))
|
|
77
|
+
if config_type != "FILESET_SEED_GENERATOR" and not isinstance(config_type, Unset):
|
|
78
|
+
raise ValueError(f"config_type must match const 'FILESET_SEED_GENERATOR', got '{config_type}'")
|
|
79
|
+
|
|
80
|
+
chunk_size = d.pop("chunk_size", UNSET)
|
|
81
|
+
|
|
82
|
+
chunk_overlap = d.pop("chunk_overlap", UNSET)
|
|
83
|
+
|
|
84
|
+
def _parse_metadata_filters(data: object) -> list[str] | None | Unset:
|
|
85
|
+
if data is None:
|
|
86
|
+
return data
|
|
87
|
+
if isinstance(data, Unset):
|
|
88
|
+
return data
|
|
89
|
+
try:
|
|
90
|
+
if not isinstance(data, list):
|
|
91
|
+
raise TypeError()
|
|
92
|
+
metadata_filters_type_0 = cast(list[str], data)
|
|
93
|
+
|
|
94
|
+
return metadata_filters_type_0
|
|
95
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
96
|
+
pass
|
|
97
|
+
return cast(list[str] | None | Unset, data)
|
|
98
|
+
|
|
99
|
+
metadata_filters = _parse_metadata_filters(d.pop("metadata_filters", UNSET))
|
|
100
|
+
|
|
101
|
+
file_set_seed_generator = cls(
|
|
102
|
+
file_set_id=file_set_id,
|
|
103
|
+
config_type=config_type,
|
|
104
|
+
chunk_size=chunk_size,
|
|
105
|
+
chunk_overlap=chunk_overlap,
|
|
106
|
+
metadata_filters=metadata_filters,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
file_set_seed_generator.additional_properties = d
|
|
110
|
+
return file_set_seed_generator
|
|
111
|
+
|
|
112
|
+
@property
|
|
113
|
+
def additional_keys(self) -> list[str]:
|
|
114
|
+
return list(self.additional_properties.keys())
|
|
115
|
+
|
|
116
|
+
def __getitem__(self, key: str) -> Any:
|
|
117
|
+
return self.additional_properties[key]
|
|
118
|
+
|
|
119
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
120
|
+
self.additional_properties[key] = value
|
|
121
|
+
|
|
122
|
+
def __delitem__(self, key: str) -> None:
|
|
123
|
+
del self.additional_properties[key]
|
|
124
|
+
|
|
125
|
+
def __contains__(self, key: str) -> bool:
|
|
126
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import Any, TypeVar
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="FilterCriteria")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@_attrs_define
|
|
15
|
+
class FilterCriteria:
|
|
16
|
+
"""Reusable filter criteria for LLM-based content scoring and filtering.
|
|
17
|
+
|
|
18
|
+
Attributes:
|
|
19
|
+
rubric (str): Scoring rubric/prompt for evaluating content
|
|
20
|
+
min_score (float | Unset): Minimum score threshold Default: 0.5.
|
|
21
|
+
model_name (str | Unset): Name of the model (in openrouter) to use for scoring Default: 'google/gemini-3-flash-
|
|
22
|
+
preview'.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
rubric: str
|
|
26
|
+
min_score: float | Unset = 0.5
|
|
27
|
+
model_name: str | Unset = "google/gemini-3-flash-preview"
|
|
28
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
29
|
+
|
|
30
|
+
def to_dict(self) -> dict[str, Any]:
|
|
31
|
+
rubric = self.rubric
|
|
32
|
+
|
|
33
|
+
min_score = self.min_score
|
|
34
|
+
|
|
35
|
+
model_name = self.model_name
|
|
36
|
+
|
|
37
|
+
field_dict: dict[str, Any] = {}
|
|
38
|
+
field_dict.update(self.additional_properties)
|
|
39
|
+
field_dict.update(
|
|
40
|
+
{
|
|
41
|
+
"rubric": rubric,
|
|
42
|
+
}
|
|
43
|
+
)
|
|
44
|
+
if min_score is not UNSET:
|
|
45
|
+
field_dict["min_score"] = min_score
|
|
46
|
+
if model_name is not UNSET:
|
|
47
|
+
field_dict["model_name"] = model_name
|
|
48
|
+
|
|
49
|
+
return field_dict
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
53
|
+
d = dict(src_dict)
|
|
54
|
+
rubric = d.pop("rubric")
|
|
55
|
+
|
|
56
|
+
min_score = d.pop("min_score", UNSET)
|
|
57
|
+
|
|
58
|
+
model_name = d.pop("model_name", UNSET)
|
|
59
|
+
|
|
60
|
+
filter_criteria = cls(
|
|
61
|
+
rubric=rubric,
|
|
62
|
+
min_score=min_score,
|
|
63
|
+
model_name=model_name,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
filter_criteria.additional_properties = d
|
|
67
|
+
return filter_criteria
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def additional_keys(self) -> list[str]:
|
|
71
|
+
return list(self.additional_properties.keys())
|
|
72
|
+
|
|
73
|
+
def __getitem__(self, key: str) -> Any:
|
|
74
|
+
return self.additional_properties[key]
|
|
75
|
+
|
|
76
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
77
|
+
self.additional_properties[key] = value
|
|
78
|
+
|
|
79
|
+
def __delitem__(self, key: str) -> None:
|
|
80
|
+
del self.additional_properties[key]
|
|
81
|
+
|
|
82
|
+
def __contains__(self, key: str) -> bool:
|
|
83
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
from collections.abc import Mapping
|
|
5
|
+
from typing import Any, Literal, TypeVar, cast
|
|
6
|
+
|
|
7
|
+
from attrs import define as _attrs_define
|
|
8
|
+
from attrs import field as _attrs_field
|
|
9
|
+
from dateutil.parser import isoparse
|
|
10
|
+
|
|
11
|
+
from ..types import UNSET, Unset
|
|
12
|
+
|
|
13
|
+
T = TypeVar("T", bound="ForwardLookingQuestion")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@_attrs_define
|
|
17
|
+
class ForwardLookingQuestion:
|
|
18
|
+
"""
|
|
19
|
+
Attributes:
|
|
20
|
+
question_text (str):
|
|
21
|
+
date_close (datetime.datetime):
|
|
22
|
+
event_date (datetime.datetime):
|
|
23
|
+
resolution_criteria (str):
|
|
24
|
+
question_type (Literal['FORWARD_LOOKING_QUESTION'] | Unset): Default: 'FORWARD_LOOKING_QUESTION'.
|
|
25
|
+
prediction_date (datetime.datetime | None | Unset):
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
question_text: str
|
|
29
|
+
date_close: datetime.datetime
|
|
30
|
+
event_date: datetime.datetime
|
|
31
|
+
resolution_criteria: str
|
|
32
|
+
question_type: Literal["FORWARD_LOOKING_QUESTION"] | Unset = "FORWARD_LOOKING_QUESTION"
|
|
33
|
+
prediction_date: datetime.datetime | None | Unset = UNSET
|
|
34
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
35
|
+
|
|
36
|
+
def to_dict(self) -> dict[str, Any]:
|
|
37
|
+
question_text = self.question_text
|
|
38
|
+
|
|
39
|
+
date_close = self.date_close.isoformat()
|
|
40
|
+
|
|
41
|
+
event_date = self.event_date.isoformat()
|
|
42
|
+
|
|
43
|
+
resolution_criteria = self.resolution_criteria
|
|
44
|
+
|
|
45
|
+
question_type = self.question_type
|
|
46
|
+
|
|
47
|
+
prediction_date: None | str | Unset
|
|
48
|
+
if isinstance(self.prediction_date, Unset):
|
|
49
|
+
prediction_date = UNSET
|
|
50
|
+
elif isinstance(self.prediction_date, datetime.datetime):
|
|
51
|
+
prediction_date = self.prediction_date.isoformat()
|
|
52
|
+
else:
|
|
53
|
+
prediction_date = self.prediction_date
|
|
54
|
+
|
|
55
|
+
field_dict: dict[str, Any] = {}
|
|
56
|
+
field_dict.update(self.additional_properties)
|
|
57
|
+
field_dict.update(
|
|
58
|
+
{
|
|
59
|
+
"question_text": question_text,
|
|
60
|
+
"date_close": date_close,
|
|
61
|
+
"event_date": event_date,
|
|
62
|
+
"resolution_criteria": resolution_criteria,
|
|
63
|
+
}
|
|
64
|
+
)
|
|
65
|
+
if question_type is not UNSET:
|
|
66
|
+
field_dict["question_type"] = question_type
|
|
67
|
+
if prediction_date is not UNSET:
|
|
68
|
+
field_dict["prediction_date"] = prediction_date
|
|
69
|
+
|
|
70
|
+
return field_dict
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
74
|
+
d = dict(src_dict)
|
|
75
|
+
question_text = d.pop("question_text")
|
|
76
|
+
|
|
77
|
+
date_close = isoparse(d.pop("date_close"))
|
|
78
|
+
|
|
79
|
+
event_date = isoparse(d.pop("event_date"))
|
|
80
|
+
|
|
81
|
+
resolution_criteria = d.pop("resolution_criteria")
|
|
82
|
+
|
|
83
|
+
question_type = cast(Literal["FORWARD_LOOKING_QUESTION"] | Unset, d.pop("question_type", UNSET))
|
|
84
|
+
if question_type != "FORWARD_LOOKING_QUESTION" and not isinstance(question_type, Unset):
|
|
85
|
+
raise ValueError(f"question_type must match const 'FORWARD_LOOKING_QUESTION', got '{question_type}'")
|
|
86
|
+
|
|
87
|
+
def _parse_prediction_date(data: object) -> datetime.datetime | None | Unset:
|
|
88
|
+
if data is None:
|
|
89
|
+
return data
|
|
90
|
+
if isinstance(data, Unset):
|
|
91
|
+
return data
|
|
92
|
+
try:
|
|
93
|
+
if not isinstance(data, str):
|
|
94
|
+
raise TypeError()
|
|
95
|
+
prediction_date_type_0 = isoparse(data)
|
|
96
|
+
|
|
97
|
+
return prediction_date_type_0
|
|
98
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
99
|
+
pass
|
|
100
|
+
return cast(datetime.datetime | None | Unset, data)
|
|
101
|
+
|
|
102
|
+
prediction_date = _parse_prediction_date(d.pop("prediction_date", UNSET))
|
|
103
|
+
|
|
104
|
+
forward_looking_question = cls(
|
|
105
|
+
question_text=question_text,
|
|
106
|
+
date_close=date_close,
|
|
107
|
+
event_date=event_date,
|
|
108
|
+
resolution_criteria=resolution_criteria,
|
|
109
|
+
question_type=question_type,
|
|
110
|
+
prediction_date=prediction_date,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
forward_looking_question.additional_properties = d
|
|
114
|
+
return forward_looking_question
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def additional_keys(self) -> list[str]:
|
|
118
|
+
return list(self.additional_properties.keys())
|
|
119
|
+
|
|
120
|
+
def __getitem__(self, key: str) -> Any:
|
|
121
|
+
return self.additional_properties[key]
|
|
122
|
+
|
|
123
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
124
|
+
self.additional_properties[key] = value
|
|
125
|
+
|
|
126
|
+
def __delitem__(self, key: str) -> None:
|
|
127
|
+
del self.additional_properties[key]
|
|
128
|
+
|
|
129
|
+
def __contains__(self, key: str) -> bool:
|
|
130
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Literal, TypeVar, cast
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from ..models.answer_type import AnswerType
|
|
13
|
+
from ..models.filter_criteria import FilterCriteria
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
T = TypeVar("T", bound="ForwardLookingQuestionGenerator")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@_attrs_define
|
|
20
|
+
class ForwardLookingQuestionGenerator:
|
|
21
|
+
"""
|
|
22
|
+
Attributes:
|
|
23
|
+
config_type (Literal['FORWARD_LOOKING_QUESTION_GENERATOR'] | Unset): Type of transform configuration Default:
|
|
24
|
+
'FORWARD_LOOKING_QUESTION_GENERATOR'.
|
|
25
|
+
instructions (None | str | Unset): Instructions for question generation. If not provided, uses sensible
|
|
26
|
+
defaults.
|
|
27
|
+
examples (list[str] | Unset): Example questions to guide generation
|
|
28
|
+
bad_examples (list[str] | Unset): Examples of questions to avoid
|
|
29
|
+
filter_ (FilterCriteria | list[FilterCriteria] | None | Unset): Optional filter criteria to apply after question
|
|
30
|
+
generation
|
|
31
|
+
questions_per_seed (int | Unset): Number of questions to generate per seed Default: 1.
|
|
32
|
+
include_default_filter (bool | Unset): Whether to include the default filter for generated questions Default:
|
|
33
|
+
False.
|
|
34
|
+
answer_type (AnswerType | None | Unset): The type of answer expected for generated questions
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
config_type: Literal["FORWARD_LOOKING_QUESTION_GENERATOR"] | Unset = "FORWARD_LOOKING_QUESTION_GENERATOR"
|
|
38
|
+
instructions: None | str | Unset = UNSET
|
|
39
|
+
examples: list[str] | Unset = UNSET
|
|
40
|
+
bad_examples: list[str] | Unset = UNSET
|
|
41
|
+
filter_: FilterCriteria | list[FilterCriteria] | None | Unset = UNSET
|
|
42
|
+
questions_per_seed: int | Unset = 1
|
|
43
|
+
include_default_filter: bool | Unset = False
|
|
44
|
+
answer_type: AnswerType | None | Unset = UNSET
|
|
45
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
46
|
+
|
|
47
|
+
def to_dict(self) -> dict[str, Any]:
|
|
48
|
+
from ..models.answer_type import AnswerType
|
|
49
|
+
from ..models.filter_criteria import FilterCriteria
|
|
50
|
+
|
|
51
|
+
config_type = self.config_type
|
|
52
|
+
|
|
53
|
+
instructions: None | str | Unset
|
|
54
|
+
if isinstance(self.instructions, Unset):
|
|
55
|
+
instructions = UNSET
|
|
56
|
+
else:
|
|
57
|
+
instructions = self.instructions
|
|
58
|
+
|
|
59
|
+
examples: list[str] | Unset = UNSET
|
|
60
|
+
if not isinstance(self.examples, Unset):
|
|
61
|
+
examples = self.examples
|
|
62
|
+
|
|
63
|
+
bad_examples: list[str] | Unset = UNSET
|
|
64
|
+
if not isinstance(self.bad_examples, Unset):
|
|
65
|
+
bad_examples = self.bad_examples
|
|
66
|
+
|
|
67
|
+
filter_: dict[str, Any] | list[dict[str, Any]] | None | Unset
|
|
68
|
+
if isinstance(self.filter_, Unset):
|
|
69
|
+
filter_ = UNSET
|
|
70
|
+
elif isinstance(self.filter_, FilterCriteria):
|
|
71
|
+
filter_ = self.filter_.to_dict()
|
|
72
|
+
elif isinstance(self.filter_, list):
|
|
73
|
+
filter_ = []
|
|
74
|
+
for filter_type_1_item_data in self.filter_:
|
|
75
|
+
filter_type_1_item = filter_type_1_item_data.to_dict()
|
|
76
|
+
filter_.append(filter_type_1_item)
|
|
77
|
+
|
|
78
|
+
else:
|
|
79
|
+
filter_ = self.filter_
|
|
80
|
+
|
|
81
|
+
questions_per_seed = self.questions_per_seed
|
|
82
|
+
|
|
83
|
+
include_default_filter = self.include_default_filter
|
|
84
|
+
|
|
85
|
+
answer_type: dict[str, Any] | None | Unset
|
|
86
|
+
if isinstance(self.answer_type, Unset):
|
|
87
|
+
answer_type = UNSET
|
|
88
|
+
elif isinstance(self.answer_type, AnswerType):
|
|
89
|
+
answer_type = self.answer_type.to_dict()
|
|
90
|
+
else:
|
|
91
|
+
answer_type = self.answer_type
|
|
92
|
+
|
|
93
|
+
field_dict: dict[str, Any] = {}
|
|
94
|
+
field_dict.update(self.additional_properties)
|
|
95
|
+
field_dict.update({})
|
|
96
|
+
if config_type is not UNSET:
|
|
97
|
+
field_dict["config_type"] = config_type
|
|
98
|
+
if instructions is not UNSET:
|
|
99
|
+
field_dict["instructions"] = instructions
|
|
100
|
+
if examples is not UNSET:
|
|
101
|
+
field_dict["examples"] = examples
|
|
102
|
+
if bad_examples is not UNSET:
|
|
103
|
+
field_dict["bad_examples"] = bad_examples
|
|
104
|
+
if filter_ is not UNSET:
|
|
105
|
+
field_dict["filter"] = filter_
|
|
106
|
+
if questions_per_seed is not UNSET:
|
|
107
|
+
field_dict["questions_per_seed"] = questions_per_seed
|
|
108
|
+
if include_default_filter is not UNSET:
|
|
109
|
+
field_dict["include_default_filter"] = include_default_filter
|
|
110
|
+
if answer_type is not UNSET:
|
|
111
|
+
field_dict["answer_type"] = answer_type
|
|
112
|
+
|
|
113
|
+
return field_dict
|
|
114
|
+
|
|
115
|
+
@classmethod
|
|
116
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
117
|
+
from ..models.answer_type import AnswerType
|
|
118
|
+
from ..models.filter_criteria import FilterCriteria
|
|
119
|
+
|
|
120
|
+
d = dict(src_dict)
|
|
121
|
+
config_type = cast(Literal["FORWARD_LOOKING_QUESTION_GENERATOR"] | Unset, d.pop("config_type", UNSET))
|
|
122
|
+
if config_type != "FORWARD_LOOKING_QUESTION_GENERATOR" and not isinstance(config_type, Unset):
|
|
123
|
+
raise ValueError(f"config_type must match const 'FORWARD_LOOKING_QUESTION_GENERATOR', got '{config_type}'")
|
|
124
|
+
|
|
125
|
+
def _parse_instructions(data: object) -> None | str | Unset:
|
|
126
|
+
if data is None:
|
|
127
|
+
return data
|
|
128
|
+
if isinstance(data, Unset):
|
|
129
|
+
return data
|
|
130
|
+
return cast(None | str | Unset, data)
|
|
131
|
+
|
|
132
|
+
instructions = _parse_instructions(d.pop("instructions", UNSET))
|
|
133
|
+
|
|
134
|
+
examples = cast(list[str], d.pop("examples", UNSET))
|
|
135
|
+
|
|
136
|
+
bad_examples = cast(list[str], d.pop("bad_examples", UNSET))
|
|
137
|
+
|
|
138
|
+
def _parse_filter_(data: object) -> FilterCriteria | list[FilterCriteria] | None | Unset:
|
|
139
|
+
if data is None:
|
|
140
|
+
return data
|
|
141
|
+
if isinstance(data, Unset):
|
|
142
|
+
return data
|
|
143
|
+
try:
|
|
144
|
+
if not isinstance(data, dict):
|
|
145
|
+
raise TypeError()
|
|
146
|
+
filter_type_0 = FilterCriteria.from_dict(data)
|
|
147
|
+
|
|
148
|
+
return filter_type_0
|
|
149
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
150
|
+
pass
|
|
151
|
+
try:
|
|
152
|
+
if not isinstance(data, list):
|
|
153
|
+
raise TypeError()
|
|
154
|
+
filter_type_1 = []
|
|
155
|
+
_filter_type_1 = data
|
|
156
|
+
for filter_type_1_item_data in _filter_type_1:
|
|
157
|
+
filter_type_1_item = FilterCriteria.from_dict(filter_type_1_item_data)
|
|
158
|
+
|
|
159
|
+
filter_type_1.append(filter_type_1_item)
|
|
160
|
+
|
|
161
|
+
return filter_type_1
|
|
162
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
163
|
+
pass
|
|
164
|
+
return cast(FilterCriteria | list[FilterCriteria] | None | Unset, data)
|
|
165
|
+
|
|
166
|
+
filter_ = _parse_filter_(d.pop("filter", UNSET))
|
|
167
|
+
|
|
168
|
+
questions_per_seed = d.pop("questions_per_seed", UNSET)
|
|
169
|
+
|
|
170
|
+
include_default_filter = d.pop("include_default_filter", UNSET)
|
|
171
|
+
|
|
172
|
+
def _parse_answer_type(data: object) -> AnswerType | None | Unset:
|
|
173
|
+
if data is None:
|
|
174
|
+
return data
|
|
175
|
+
if isinstance(data, Unset):
|
|
176
|
+
return data
|
|
177
|
+
try:
|
|
178
|
+
if not isinstance(data, dict):
|
|
179
|
+
raise TypeError()
|
|
180
|
+
answer_type_type_0 = AnswerType.from_dict(data)
|
|
181
|
+
|
|
182
|
+
return answer_type_type_0
|
|
183
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
184
|
+
pass
|
|
185
|
+
return cast(AnswerType | None | Unset, data)
|
|
186
|
+
|
|
187
|
+
answer_type = _parse_answer_type(d.pop("answer_type", UNSET))
|
|
188
|
+
|
|
189
|
+
forward_looking_question_generator = cls(
|
|
190
|
+
config_type=config_type,
|
|
191
|
+
instructions=instructions,
|
|
192
|
+
examples=examples,
|
|
193
|
+
bad_examples=bad_examples,
|
|
194
|
+
filter_=filter_,
|
|
195
|
+
questions_per_seed=questions_per_seed,
|
|
196
|
+
include_default_filter=include_default_filter,
|
|
197
|
+
answer_type=answer_type,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
forward_looking_question_generator.additional_properties = d
|
|
201
|
+
return forward_looking_question_generator
|
|
202
|
+
|
|
203
|
+
@property
|
|
204
|
+
def additional_keys(self) -> list[str]:
|
|
205
|
+
return list(self.additional_properties.keys())
|
|
206
|
+
|
|
207
|
+
def __getitem__(self, key: str) -> Any:
|
|
208
|
+
return self.additional_properties[key]
|
|
209
|
+
|
|
210
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
211
|
+
self.additional_properties[key] = value
|
|
212
|
+
|
|
213
|
+
def __delitem__(self, key: str) -> None:
|
|
214
|
+
del self.additional_properties[key]
|
|
215
|
+
|
|
216
|
+
def __contains__(self, key: str) -> bool:
|
|
217
|
+
return key in self.additional_properties
|