lightningrod-ai 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lightningrod/__init__.py +66 -0
- lightningrod/_display.py +204 -0
- lightningrod/_errors.py +67 -0
- lightningrod/_generated/__init__.py +8 -0
- lightningrod/_generated/api/__init__.py +1 -0
- lightningrod/_generated/api/datasets/__init__.py +1 -0
- lightningrod/_generated/api/datasets/create_dataset_datasets_post.py +133 -0
- lightningrod/_generated/api/datasets/get_dataset_datasets_dataset_id_get.py +168 -0
- lightningrod/_generated/api/datasets/get_dataset_samples_datasets_dataset_id_samples_get.py +209 -0
- lightningrod/_generated/api/datasets/upload_samples_datasets_dataset_id_samples_post.py +190 -0
- lightningrod/_generated/api/file_sets/__init__.py +1 -0
- lightningrod/_generated/api/file_sets/add_file_to_set_filesets_file_set_id_files_post.py +190 -0
- lightningrod/_generated/api/file_sets/create_file_set_filesets_post.py +174 -0
- lightningrod/_generated/api/file_sets/get_file_set_filesets_file_set_id_get.py +168 -0
- lightningrod/_generated/api/file_sets/list_file_sets_filesets_get.py +173 -0
- lightningrod/_generated/api/file_sets/list_files_in_set_filesets_file_set_id_files_get.py +209 -0
- lightningrod/_generated/api/files/__init__.py +1 -0
- lightningrod/_generated/api/files/create_file_upload_files_post.py +174 -0
- lightningrod/_generated/api/open_ai_compatible/__init__.py +1 -0
- lightningrod/_generated/api/open_ai_compatible/chat_completions_openai_chat_completions_post.py +174 -0
- lightningrod/_generated/api/organizations/__init__.py +1 -0
- lightningrod/_generated/api/organizations/get_balance_organizations_balance_get.py +131 -0
- lightningrod/_generated/api/samples/__init__.py +1 -0
- lightningrod/_generated/api/samples/validate_sample_samples_validate_post.py +174 -0
- lightningrod/_generated/api/transform_jobs/__init__.py +1 -0
- lightningrod/_generated/api/transform_jobs/cost_estimation_transform_jobs_cost_estimation_post.py +174 -0
- lightningrod/_generated/api/transform_jobs/create_transform_job_transform_jobs_post.py +174 -0
- lightningrod/_generated/api/transform_jobs/get_transform_job_metrics_transform_jobs_job_id_metrics_get.py +172 -0
- lightningrod/_generated/api/transform_jobs/get_transform_job_transform_jobs_job_id_get.py +168 -0
- lightningrod/_generated/client.py +268 -0
- lightningrod/_generated/errors.py +16 -0
- lightningrod/_generated/models/__init__.py +147 -0
- lightningrod/_generated/models/answer_type.py +129 -0
- lightningrod/_generated/models/answer_type_enum.py +11 -0
- lightningrod/_generated/models/balance_response.py +61 -0
- lightningrod/_generated/models/chat_completion_request.py +216 -0
- lightningrod/_generated/models/chat_completion_response.py +146 -0
- lightningrod/_generated/models/chat_message.py +69 -0
- lightningrod/_generated/models/choice.py +97 -0
- lightningrod/_generated/models/create_dataset_response.py +61 -0
- lightningrod/_generated/models/create_file_set_file_request.py +101 -0
- lightningrod/_generated/models/create_file_set_file_request_metadata_type_0.py +46 -0
- lightningrod/_generated/models/create_file_set_request.py +83 -0
- lightningrod/_generated/models/create_file_upload_request.py +91 -0
- lightningrod/_generated/models/create_file_upload_response.py +165 -0
- lightningrod/_generated/models/create_file_upload_response_metadata_type_0.py +46 -0
- lightningrod/_generated/models/create_transform_job_request.py +312 -0
- lightningrod/_generated/models/dataset_metadata.py +69 -0
- lightningrod/_generated/models/estimate_cost_request.py +243 -0
- lightningrod/_generated/models/estimate_cost_response.py +117 -0
- lightningrod/_generated/models/event_usage_summary.py +80 -0
- lightningrod/_generated/models/file_set.py +128 -0
- lightningrod/_generated/models/file_set_file.py +203 -0
- lightningrod/_generated/models/file_set_file_metadata_type_0.py +57 -0
- lightningrod/_generated/models/file_set_query_seed_generator.py +136 -0
- lightningrod/_generated/models/file_set_seed_generator.py +126 -0
- lightningrod/_generated/models/filter_criteria.py +83 -0
- lightningrod/_generated/models/forward_looking_question.py +130 -0
- lightningrod/_generated/models/forward_looking_question_generator.py +217 -0
- lightningrod/_generated/models/gdelt_seed_generator.py +103 -0
- lightningrod/_generated/models/http_validation_error.py +79 -0
- lightningrod/_generated/models/job_usage.py +185 -0
- lightningrod/_generated/models/job_usage_by_step_type_0.py +59 -0
- lightningrod/_generated/models/label.py +143 -0
- lightningrod/_generated/models/list_file_set_files_response.py +113 -0
- lightningrod/_generated/models/list_file_sets_response.py +75 -0
- lightningrod/_generated/models/llm_model_usage_summary.py +98 -0
- lightningrod/_generated/models/mock_transform_config.py +243 -0
- lightningrod/_generated/models/mock_transform_config_metadata_additions.py +46 -0
- lightningrod/_generated/models/model_config.py +316 -0
- lightningrod/_generated/models/model_source_type.py +16 -0
- lightningrod/_generated/models/news_context.py +82 -0
- lightningrod/_generated/models/news_context_generator.py +127 -0
- lightningrod/_generated/models/news_seed_generator.py +220 -0
- lightningrod/_generated/models/paginated_samples_response.py +113 -0
- lightningrod/_generated/models/pipeline_metrics_response.py +99 -0
- lightningrod/_generated/models/question.py +74 -0
- lightningrod/_generated/models/question_and_label_generator.py +217 -0
- lightningrod/_generated/models/question_generator.py +217 -0
- lightningrod/_generated/models/question_pipeline.py +417 -0
- lightningrod/_generated/models/question_renderer.py +123 -0
- lightningrod/_generated/models/rag_context.py +82 -0
- lightningrod/_generated/models/response_message.py +69 -0
- lightningrod/_generated/models/rollout.py +130 -0
- lightningrod/_generated/models/rollout_generator.py +139 -0
- lightningrod/_generated/models/rollout_parsed_output_type_0.py +46 -0
- lightningrod/_generated/models/sample.py +323 -0
- lightningrod/_generated/models/sample_meta.py +46 -0
- lightningrod/_generated/models/seed.py +135 -0
- lightningrod/_generated/models/step_cost_breakdown.py +109 -0
- lightningrod/_generated/models/transform_job.py +268 -0
- lightningrod/_generated/models/transform_job_status.py +11 -0
- lightningrod/_generated/models/transform_step_metrics_response.py +131 -0
- lightningrod/_generated/models/transform_type.py +25 -0
- lightningrod/_generated/models/upload_samples_request.py +75 -0
- lightningrod/_generated/models/upload_samples_response.py +69 -0
- lightningrod/_generated/models/usage.py +77 -0
- lightningrod/_generated/models/usage_summary.py +102 -0
- lightningrod/_generated/models/usage_summary_events.py +59 -0
- lightningrod/_generated/models/usage_summary_llm_by_model.py +59 -0
- lightningrod/_generated/models/validate_sample_response.py +69 -0
- lightningrod/_generated/models/validation_error.py +90 -0
- lightningrod/_generated/models/web_search_labeler.py +120 -0
- lightningrod/_generated/py.typed +1 -0
- lightningrod/_generated/types.py +54 -0
- lightningrod/client.py +48 -0
- lightningrod/datasets/__init__.py +4 -0
- lightningrod/datasets/client.py +174 -0
- lightningrod/datasets/dataset.py +255 -0
- lightningrod/files/__init__.py +0 -0
- lightningrod/files/client.py +58 -0
- lightningrod/filesets/__init__.py +0 -0
- lightningrod/filesets/client.py +106 -0
- lightningrod/organization/__init__.py +0 -0
- lightningrod/organization/client.py +17 -0
- lightningrod/py.typed +0 -0
- lightningrod/transforms/__init__.py +0 -0
- lightningrod/transforms/client.py +154 -0
- lightningrod_ai-0.1.6.dist-info/METADATA +122 -0
- lightningrod_ai-0.1.6.dist-info/RECORD +123 -0
- lightningrod_ai-0.1.6.dist-info/WHEEL +5 -0
- lightningrod_ai-0.1.6.dist-info/licenses/LICENSE +23 -0
- lightningrod_ai-0.1.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import Any, TypeVar, cast
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..models.model_source_type import ModelSourceType
|
|
10
|
+
from ..types import UNSET, Unset
|
|
11
|
+
|
|
12
|
+
T = TypeVar("T", bound="ModelConfig")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@_attrs_define
|
|
16
|
+
class ModelConfig:
|
|
17
|
+
"""
|
|
18
|
+
Attributes:
|
|
19
|
+
model_name (str | Unset): Default: 'meta-llama/llama-3.3-70b-instruct'.
|
|
20
|
+
model_source (ModelSourceType | Unset):
|
|
21
|
+
temperature (float | Unset): Default: 1.0.
|
|
22
|
+
max_tokens (int | None | Unset):
|
|
23
|
+
ip_address (None | str | Unset):
|
|
24
|
+
lora_base_model_name (None | str | Unset):
|
|
25
|
+
lora_repo_path (None | str | Unset):
|
|
26
|
+
lora_checkpoint_path (None | str | Unset):
|
|
27
|
+
runpod_endpoint_id (None | str | Unset):
|
|
28
|
+
is_lightningrod_model (bool | None | Unset):
|
|
29
|
+
openrouter_provider (list[str] | None | Unset):
|
|
30
|
+
reasoning_effort (None | str | Unset):
|
|
31
|
+
is_reasoning_model (bool | None | Unset):
|
|
32
|
+
disable_reasoning (bool | Unset): Default: False.
|
|
33
|
+
use_pipeline_key (bool | Unset): Default: False.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
model_name: str | Unset = "meta-llama/llama-3.3-70b-instruct"
|
|
37
|
+
model_source: ModelSourceType | Unset = UNSET
|
|
38
|
+
temperature: float | Unset = 1.0
|
|
39
|
+
max_tokens: int | None | Unset = UNSET
|
|
40
|
+
ip_address: None | str | Unset = UNSET
|
|
41
|
+
lora_base_model_name: None | str | Unset = UNSET
|
|
42
|
+
lora_repo_path: None | str | Unset = UNSET
|
|
43
|
+
lora_checkpoint_path: None | str | Unset = UNSET
|
|
44
|
+
runpod_endpoint_id: None | str | Unset = UNSET
|
|
45
|
+
is_lightningrod_model: bool | None | Unset = UNSET
|
|
46
|
+
openrouter_provider: list[str] | None | Unset = UNSET
|
|
47
|
+
reasoning_effort: None | str | Unset = UNSET
|
|
48
|
+
is_reasoning_model: bool | None | Unset = UNSET
|
|
49
|
+
disable_reasoning: bool | Unset = False
|
|
50
|
+
use_pipeline_key: bool | Unset = False
|
|
51
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> dict[str, Any]:
|
|
54
|
+
model_name = self.model_name
|
|
55
|
+
|
|
56
|
+
model_source: str | Unset = UNSET
|
|
57
|
+
if not isinstance(self.model_source, Unset):
|
|
58
|
+
model_source = self.model_source.value
|
|
59
|
+
|
|
60
|
+
temperature = self.temperature
|
|
61
|
+
|
|
62
|
+
max_tokens: int | None | Unset
|
|
63
|
+
if isinstance(self.max_tokens, Unset):
|
|
64
|
+
max_tokens = UNSET
|
|
65
|
+
else:
|
|
66
|
+
max_tokens = self.max_tokens
|
|
67
|
+
|
|
68
|
+
ip_address: None | str | Unset
|
|
69
|
+
if isinstance(self.ip_address, Unset):
|
|
70
|
+
ip_address = UNSET
|
|
71
|
+
else:
|
|
72
|
+
ip_address = self.ip_address
|
|
73
|
+
|
|
74
|
+
lora_base_model_name: None | str | Unset
|
|
75
|
+
if isinstance(self.lora_base_model_name, Unset):
|
|
76
|
+
lora_base_model_name = UNSET
|
|
77
|
+
else:
|
|
78
|
+
lora_base_model_name = self.lora_base_model_name
|
|
79
|
+
|
|
80
|
+
lora_repo_path: None | str | Unset
|
|
81
|
+
if isinstance(self.lora_repo_path, Unset):
|
|
82
|
+
lora_repo_path = UNSET
|
|
83
|
+
else:
|
|
84
|
+
lora_repo_path = self.lora_repo_path
|
|
85
|
+
|
|
86
|
+
lora_checkpoint_path: None | str | Unset
|
|
87
|
+
if isinstance(self.lora_checkpoint_path, Unset):
|
|
88
|
+
lora_checkpoint_path = UNSET
|
|
89
|
+
else:
|
|
90
|
+
lora_checkpoint_path = self.lora_checkpoint_path
|
|
91
|
+
|
|
92
|
+
runpod_endpoint_id: None | str | Unset
|
|
93
|
+
if isinstance(self.runpod_endpoint_id, Unset):
|
|
94
|
+
runpod_endpoint_id = UNSET
|
|
95
|
+
else:
|
|
96
|
+
runpod_endpoint_id = self.runpod_endpoint_id
|
|
97
|
+
|
|
98
|
+
is_lightningrod_model: bool | None | Unset
|
|
99
|
+
if isinstance(self.is_lightningrod_model, Unset):
|
|
100
|
+
is_lightningrod_model = UNSET
|
|
101
|
+
else:
|
|
102
|
+
is_lightningrod_model = self.is_lightningrod_model
|
|
103
|
+
|
|
104
|
+
openrouter_provider: list[str] | None | Unset
|
|
105
|
+
if isinstance(self.openrouter_provider, Unset):
|
|
106
|
+
openrouter_provider = UNSET
|
|
107
|
+
elif isinstance(self.openrouter_provider, list):
|
|
108
|
+
openrouter_provider = self.openrouter_provider
|
|
109
|
+
|
|
110
|
+
else:
|
|
111
|
+
openrouter_provider = self.openrouter_provider
|
|
112
|
+
|
|
113
|
+
reasoning_effort: None | str | Unset
|
|
114
|
+
if isinstance(self.reasoning_effort, Unset):
|
|
115
|
+
reasoning_effort = UNSET
|
|
116
|
+
else:
|
|
117
|
+
reasoning_effort = self.reasoning_effort
|
|
118
|
+
|
|
119
|
+
is_reasoning_model: bool | None | Unset
|
|
120
|
+
if isinstance(self.is_reasoning_model, Unset):
|
|
121
|
+
is_reasoning_model = UNSET
|
|
122
|
+
else:
|
|
123
|
+
is_reasoning_model = self.is_reasoning_model
|
|
124
|
+
|
|
125
|
+
disable_reasoning = self.disable_reasoning
|
|
126
|
+
|
|
127
|
+
use_pipeline_key = self.use_pipeline_key
|
|
128
|
+
|
|
129
|
+
field_dict: dict[str, Any] = {}
|
|
130
|
+
field_dict.update(self.additional_properties)
|
|
131
|
+
field_dict.update({})
|
|
132
|
+
if model_name is not UNSET:
|
|
133
|
+
field_dict["model_name"] = model_name
|
|
134
|
+
if model_source is not UNSET:
|
|
135
|
+
field_dict["model_source"] = model_source
|
|
136
|
+
if temperature is not UNSET:
|
|
137
|
+
field_dict["temperature"] = temperature
|
|
138
|
+
if max_tokens is not UNSET:
|
|
139
|
+
field_dict["max_tokens"] = max_tokens
|
|
140
|
+
if ip_address is not UNSET:
|
|
141
|
+
field_dict["ip_address"] = ip_address
|
|
142
|
+
if lora_base_model_name is not UNSET:
|
|
143
|
+
field_dict["lora_base_model_name"] = lora_base_model_name
|
|
144
|
+
if lora_repo_path is not UNSET:
|
|
145
|
+
field_dict["lora_repo_path"] = lora_repo_path
|
|
146
|
+
if lora_checkpoint_path is not UNSET:
|
|
147
|
+
field_dict["lora_checkpoint_path"] = lora_checkpoint_path
|
|
148
|
+
if runpod_endpoint_id is not UNSET:
|
|
149
|
+
field_dict["runpod_endpoint_id"] = runpod_endpoint_id
|
|
150
|
+
if is_lightningrod_model is not UNSET:
|
|
151
|
+
field_dict["is_lightningrod_model"] = is_lightningrod_model
|
|
152
|
+
if openrouter_provider is not UNSET:
|
|
153
|
+
field_dict["openrouter_provider"] = openrouter_provider
|
|
154
|
+
if reasoning_effort is not UNSET:
|
|
155
|
+
field_dict["reasoning_effort"] = reasoning_effort
|
|
156
|
+
if is_reasoning_model is not UNSET:
|
|
157
|
+
field_dict["is_reasoning_model"] = is_reasoning_model
|
|
158
|
+
if disable_reasoning is not UNSET:
|
|
159
|
+
field_dict["disable_reasoning"] = disable_reasoning
|
|
160
|
+
if use_pipeline_key is not UNSET:
|
|
161
|
+
field_dict["use_pipeline_key"] = use_pipeline_key
|
|
162
|
+
|
|
163
|
+
return field_dict
|
|
164
|
+
|
|
165
|
+
@classmethod
|
|
166
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
167
|
+
d = dict(src_dict)
|
|
168
|
+
model_name = d.pop("model_name", UNSET)
|
|
169
|
+
|
|
170
|
+
_model_source = d.pop("model_source", UNSET)
|
|
171
|
+
model_source: ModelSourceType | Unset
|
|
172
|
+
if isinstance(_model_source, Unset):
|
|
173
|
+
model_source = UNSET
|
|
174
|
+
else:
|
|
175
|
+
model_source = ModelSourceType(_model_source)
|
|
176
|
+
|
|
177
|
+
temperature = d.pop("temperature", UNSET)
|
|
178
|
+
|
|
179
|
+
def _parse_max_tokens(data: object) -> int | None | Unset:
|
|
180
|
+
if data is None:
|
|
181
|
+
return data
|
|
182
|
+
if isinstance(data, Unset):
|
|
183
|
+
return data
|
|
184
|
+
return cast(int | None | Unset, data)
|
|
185
|
+
|
|
186
|
+
max_tokens = _parse_max_tokens(d.pop("max_tokens", UNSET))
|
|
187
|
+
|
|
188
|
+
def _parse_ip_address(data: object) -> None | str | Unset:
|
|
189
|
+
if data is None:
|
|
190
|
+
return data
|
|
191
|
+
if isinstance(data, Unset):
|
|
192
|
+
return data
|
|
193
|
+
return cast(None | str | Unset, data)
|
|
194
|
+
|
|
195
|
+
ip_address = _parse_ip_address(d.pop("ip_address", UNSET))
|
|
196
|
+
|
|
197
|
+
def _parse_lora_base_model_name(data: object) -> None | str | Unset:
|
|
198
|
+
if data is None:
|
|
199
|
+
return data
|
|
200
|
+
if isinstance(data, Unset):
|
|
201
|
+
return data
|
|
202
|
+
return cast(None | str | Unset, data)
|
|
203
|
+
|
|
204
|
+
lora_base_model_name = _parse_lora_base_model_name(d.pop("lora_base_model_name", UNSET))
|
|
205
|
+
|
|
206
|
+
def _parse_lora_repo_path(data: object) -> None | str | Unset:
|
|
207
|
+
if data is None:
|
|
208
|
+
return data
|
|
209
|
+
if isinstance(data, Unset):
|
|
210
|
+
return data
|
|
211
|
+
return cast(None | str | Unset, data)
|
|
212
|
+
|
|
213
|
+
lora_repo_path = _parse_lora_repo_path(d.pop("lora_repo_path", UNSET))
|
|
214
|
+
|
|
215
|
+
def _parse_lora_checkpoint_path(data: object) -> None | str | Unset:
|
|
216
|
+
if data is None:
|
|
217
|
+
return data
|
|
218
|
+
if isinstance(data, Unset):
|
|
219
|
+
return data
|
|
220
|
+
return cast(None | str | Unset, data)
|
|
221
|
+
|
|
222
|
+
lora_checkpoint_path = _parse_lora_checkpoint_path(d.pop("lora_checkpoint_path", UNSET))
|
|
223
|
+
|
|
224
|
+
def _parse_runpod_endpoint_id(data: object) -> None | str | Unset:
|
|
225
|
+
if data is None:
|
|
226
|
+
return data
|
|
227
|
+
if isinstance(data, Unset):
|
|
228
|
+
return data
|
|
229
|
+
return cast(None | str | Unset, data)
|
|
230
|
+
|
|
231
|
+
runpod_endpoint_id = _parse_runpod_endpoint_id(d.pop("runpod_endpoint_id", UNSET))
|
|
232
|
+
|
|
233
|
+
def _parse_is_lightningrod_model(data: object) -> bool | None | Unset:
|
|
234
|
+
if data is None:
|
|
235
|
+
return data
|
|
236
|
+
if isinstance(data, Unset):
|
|
237
|
+
return data
|
|
238
|
+
return cast(bool | None | Unset, data)
|
|
239
|
+
|
|
240
|
+
is_lightningrod_model = _parse_is_lightningrod_model(d.pop("is_lightningrod_model", UNSET))
|
|
241
|
+
|
|
242
|
+
def _parse_openrouter_provider(data: object) -> list[str] | None | Unset:
|
|
243
|
+
if data is None:
|
|
244
|
+
return data
|
|
245
|
+
if isinstance(data, Unset):
|
|
246
|
+
return data
|
|
247
|
+
try:
|
|
248
|
+
if not isinstance(data, list):
|
|
249
|
+
raise TypeError()
|
|
250
|
+
openrouter_provider_type_0 = cast(list[str], data)
|
|
251
|
+
|
|
252
|
+
return openrouter_provider_type_0
|
|
253
|
+
except (TypeError, ValueError, AttributeError, KeyError):
|
|
254
|
+
pass
|
|
255
|
+
return cast(list[str] | None | Unset, data)
|
|
256
|
+
|
|
257
|
+
openrouter_provider = _parse_openrouter_provider(d.pop("openrouter_provider", UNSET))
|
|
258
|
+
|
|
259
|
+
def _parse_reasoning_effort(data: object) -> None | str | Unset:
|
|
260
|
+
if data is None:
|
|
261
|
+
return data
|
|
262
|
+
if isinstance(data, Unset):
|
|
263
|
+
return data
|
|
264
|
+
return cast(None | str | Unset, data)
|
|
265
|
+
|
|
266
|
+
reasoning_effort = _parse_reasoning_effort(d.pop("reasoning_effort", UNSET))
|
|
267
|
+
|
|
268
|
+
def _parse_is_reasoning_model(data: object) -> bool | None | Unset:
|
|
269
|
+
if data is None:
|
|
270
|
+
return data
|
|
271
|
+
if isinstance(data, Unset):
|
|
272
|
+
return data
|
|
273
|
+
return cast(bool | None | Unset, data)
|
|
274
|
+
|
|
275
|
+
is_reasoning_model = _parse_is_reasoning_model(d.pop("is_reasoning_model", UNSET))
|
|
276
|
+
|
|
277
|
+
disable_reasoning = d.pop("disable_reasoning", UNSET)
|
|
278
|
+
|
|
279
|
+
use_pipeline_key = d.pop("use_pipeline_key", UNSET)
|
|
280
|
+
|
|
281
|
+
model_config = cls(
|
|
282
|
+
model_name=model_name,
|
|
283
|
+
model_source=model_source,
|
|
284
|
+
temperature=temperature,
|
|
285
|
+
max_tokens=max_tokens,
|
|
286
|
+
ip_address=ip_address,
|
|
287
|
+
lora_base_model_name=lora_base_model_name,
|
|
288
|
+
lora_repo_path=lora_repo_path,
|
|
289
|
+
lora_checkpoint_path=lora_checkpoint_path,
|
|
290
|
+
runpod_endpoint_id=runpod_endpoint_id,
|
|
291
|
+
is_lightningrod_model=is_lightningrod_model,
|
|
292
|
+
openrouter_provider=openrouter_provider,
|
|
293
|
+
reasoning_effort=reasoning_effort,
|
|
294
|
+
is_reasoning_model=is_reasoning_model,
|
|
295
|
+
disable_reasoning=disable_reasoning,
|
|
296
|
+
use_pipeline_key=use_pipeline_key,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
model_config.additional_properties = d
|
|
300
|
+
return model_config
|
|
301
|
+
|
|
302
|
+
@property
|
|
303
|
+
def additional_keys(self) -> list[str]:
|
|
304
|
+
return list(self.additional_properties.keys())
|
|
305
|
+
|
|
306
|
+
def __getitem__(self, key: str) -> Any:
|
|
307
|
+
return self.additional_properties[key]
|
|
308
|
+
|
|
309
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
310
|
+
self.additional_properties[key] = value
|
|
311
|
+
|
|
312
|
+
def __delitem__(self, key: str) -> None:
|
|
313
|
+
del self.additional_properties[key]
|
|
314
|
+
|
|
315
|
+
def __contains__(self, key: str) -> bool:
|
|
316
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class ModelSourceType(str, Enum):
|
|
5
|
+
DEEPINFRA = "DEEPINFRA"
|
|
6
|
+
DEEPSEEK = "DEEPSEEK"
|
|
7
|
+
HUGGINGFACE_ENDPOINTS = "HUGGINGFACE_ENDPOINTS"
|
|
8
|
+
OPENAI = "OPENAI"
|
|
9
|
+
OPEN_ROUTER = "OPEN_ROUTER"
|
|
10
|
+
RUNPOD = "RUNPOD"
|
|
11
|
+
SGLANG = "SGLANG"
|
|
12
|
+
TINKER = "TINKER"
|
|
13
|
+
VLLM = "VLLM"
|
|
14
|
+
|
|
15
|
+
def __str__(self) -> str:
|
|
16
|
+
return str(self.value)
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import Any, Literal, TypeVar, cast
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="NewsContext")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@_attrs_define
|
|
15
|
+
class NewsContext:
|
|
16
|
+
"""
|
|
17
|
+
Attributes:
|
|
18
|
+
rendered_context (str):
|
|
19
|
+
search_query (str):
|
|
20
|
+
context_type (Literal['NEWS_CONTEXT'] | Unset): Default: 'NEWS_CONTEXT'.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
rendered_context: str
|
|
24
|
+
search_query: str
|
|
25
|
+
context_type: Literal["NEWS_CONTEXT"] | Unset = "NEWS_CONTEXT"
|
|
26
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
27
|
+
|
|
28
|
+
def to_dict(self) -> dict[str, Any]:
|
|
29
|
+
rendered_context = self.rendered_context
|
|
30
|
+
|
|
31
|
+
search_query = self.search_query
|
|
32
|
+
|
|
33
|
+
context_type = self.context_type
|
|
34
|
+
|
|
35
|
+
field_dict: dict[str, Any] = {}
|
|
36
|
+
field_dict.update(self.additional_properties)
|
|
37
|
+
field_dict.update(
|
|
38
|
+
{
|
|
39
|
+
"rendered_context": rendered_context,
|
|
40
|
+
"search_query": search_query,
|
|
41
|
+
}
|
|
42
|
+
)
|
|
43
|
+
if context_type is not UNSET:
|
|
44
|
+
field_dict["context_type"] = context_type
|
|
45
|
+
|
|
46
|
+
return field_dict
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
50
|
+
d = dict(src_dict)
|
|
51
|
+
rendered_context = d.pop("rendered_context")
|
|
52
|
+
|
|
53
|
+
search_query = d.pop("search_query")
|
|
54
|
+
|
|
55
|
+
context_type = cast(Literal["NEWS_CONTEXT"] | Unset, d.pop("context_type", UNSET))
|
|
56
|
+
if context_type != "NEWS_CONTEXT" and not isinstance(context_type, Unset):
|
|
57
|
+
raise ValueError(f"context_type must match const 'NEWS_CONTEXT', got '{context_type}'")
|
|
58
|
+
|
|
59
|
+
news_context = cls(
|
|
60
|
+
rendered_context=rendered_context,
|
|
61
|
+
search_query=search_query,
|
|
62
|
+
context_type=context_type,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
news_context.additional_properties = d
|
|
66
|
+
return news_context
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def additional_keys(self) -> list[str]:
|
|
70
|
+
return list(self.additional_properties.keys())
|
|
71
|
+
|
|
72
|
+
def __getitem__(self, key: str) -> Any:
|
|
73
|
+
return self.additional_properties[key]
|
|
74
|
+
|
|
75
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
76
|
+
self.additional_properties[key] = value
|
|
77
|
+
|
|
78
|
+
def __delitem__(self, key: str) -> None:
|
|
79
|
+
del self.additional_properties[key]
|
|
80
|
+
|
|
81
|
+
def __contains__(self, key: str) -> bool:
|
|
82
|
+
return key in self.additional_properties
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
from typing import Any, Literal, TypeVar, cast
|
|
5
|
+
|
|
6
|
+
from attrs import define as _attrs_define
|
|
7
|
+
from attrs import field as _attrs_field
|
|
8
|
+
|
|
9
|
+
from ..types import UNSET, Unset
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T", bound="NewsContextGenerator")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@_attrs_define
|
|
15
|
+
class NewsContextGenerator:
|
|
16
|
+
"""
|
|
17
|
+
Attributes:
|
|
18
|
+
config_type (Literal['NEWS_CONTEXT_GENERATOR'] | Unset): Type of transform configuration Default:
|
|
19
|
+
'NEWS_CONTEXT_GENERATOR'.
|
|
20
|
+
num_search_queries (int | Unset): Number of search queries to generate per question Default: 5.
|
|
21
|
+
articles_per_query (int | Unset): Number of news articles to return per search query Default: 5.
|
|
22
|
+
num_articles (int | Unset): Maximum number of news articles to include in final output Default: 10.
|
|
23
|
+
relevance_threshold (int | Unset): Minimum relevance rating (1-6 scale) to include article Default: 2.
|
|
24
|
+
min_articles (int | Unset): Minimum number of articles to ensure Default: 6.
|
|
25
|
+
time_delta_days (int | Unset): Number of days to look back for news articles Default: 30.
|
|
26
|
+
enable_relevance_ranking (bool | Unset): Whether to perform LLM-based relevance ranking Default: True.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
config_type: Literal["NEWS_CONTEXT_GENERATOR"] | Unset = "NEWS_CONTEXT_GENERATOR"
|
|
30
|
+
num_search_queries: int | Unset = 5
|
|
31
|
+
articles_per_query: int | Unset = 5
|
|
32
|
+
num_articles: int | Unset = 10
|
|
33
|
+
relevance_threshold: int | Unset = 2
|
|
34
|
+
min_articles: int | Unset = 6
|
|
35
|
+
time_delta_days: int | Unset = 30
|
|
36
|
+
enable_relevance_ranking: bool | Unset = True
|
|
37
|
+
additional_properties: dict[str, Any] = _attrs_field(init=False, factory=dict)
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> dict[str, Any]:
|
|
40
|
+
config_type = self.config_type
|
|
41
|
+
|
|
42
|
+
num_search_queries = self.num_search_queries
|
|
43
|
+
|
|
44
|
+
articles_per_query = self.articles_per_query
|
|
45
|
+
|
|
46
|
+
num_articles = self.num_articles
|
|
47
|
+
|
|
48
|
+
relevance_threshold = self.relevance_threshold
|
|
49
|
+
|
|
50
|
+
min_articles = self.min_articles
|
|
51
|
+
|
|
52
|
+
time_delta_days = self.time_delta_days
|
|
53
|
+
|
|
54
|
+
enable_relevance_ranking = self.enable_relevance_ranking
|
|
55
|
+
|
|
56
|
+
field_dict: dict[str, Any] = {}
|
|
57
|
+
field_dict.update(self.additional_properties)
|
|
58
|
+
field_dict.update({})
|
|
59
|
+
if config_type is not UNSET:
|
|
60
|
+
field_dict["config_type"] = config_type
|
|
61
|
+
if num_search_queries is not UNSET:
|
|
62
|
+
field_dict["num_search_queries"] = num_search_queries
|
|
63
|
+
if articles_per_query is not UNSET:
|
|
64
|
+
field_dict["articles_per_query"] = articles_per_query
|
|
65
|
+
if num_articles is not UNSET:
|
|
66
|
+
field_dict["num_articles"] = num_articles
|
|
67
|
+
if relevance_threshold is not UNSET:
|
|
68
|
+
field_dict["relevance_threshold"] = relevance_threshold
|
|
69
|
+
if min_articles is not UNSET:
|
|
70
|
+
field_dict["min_articles"] = min_articles
|
|
71
|
+
if time_delta_days is not UNSET:
|
|
72
|
+
field_dict["time_delta_days"] = time_delta_days
|
|
73
|
+
if enable_relevance_ranking is not UNSET:
|
|
74
|
+
field_dict["enable_relevance_ranking"] = enable_relevance_ranking
|
|
75
|
+
|
|
76
|
+
return field_dict
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def from_dict(cls: type[T], src_dict: Mapping[str, Any]) -> T:
|
|
80
|
+
d = dict(src_dict)
|
|
81
|
+
config_type = cast(Literal["NEWS_CONTEXT_GENERATOR"] | Unset, d.pop("config_type", UNSET))
|
|
82
|
+
if config_type != "NEWS_CONTEXT_GENERATOR" and not isinstance(config_type, Unset):
|
|
83
|
+
raise ValueError(f"config_type must match const 'NEWS_CONTEXT_GENERATOR', got '{config_type}'")
|
|
84
|
+
|
|
85
|
+
num_search_queries = d.pop("num_search_queries", UNSET)
|
|
86
|
+
|
|
87
|
+
articles_per_query = d.pop("articles_per_query", UNSET)
|
|
88
|
+
|
|
89
|
+
num_articles = d.pop("num_articles", UNSET)
|
|
90
|
+
|
|
91
|
+
relevance_threshold = d.pop("relevance_threshold", UNSET)
|
|
92
|
+
|
|
93
|
+
min_articles = d.pop("min_articles", UNSET)
|
|
94
|
+
|
|
95
|
+
time_delta_days = d.pop("time_delta_days", UNSET)
|
|
96
|
+
|
|
97
|
+
enable_relevance_ranking = d.pop("enable_relevance_ranking", UNSET)
|
|
98
|
+
|
|
99
|
+
news_context_generator = cls(
|
|
100
|
+
config_type=config_type,
|
|
101
|
+
num_search_queries=num_search_queries,
|
|
102
|
+
articles_per_query=articles_per_query,
|
|
103
|
+
num_articles=num_articles,
|
|
104
|
+
relevance_threshold=relevance_threshold,
|
|
105
|
+
min_articles=min_articles,
|
|
106
|
+
time_delta_days=time_delta_days,
|
|
107
|
+
enable_relevance_ranking=enable_relevance_ranking,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
news_context_generator.additional_properties = d
|
|
111
|
+
return news_context_generator
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def additional_keys(self) -> list[str]:
|
|
115
|
+
return list(self.additional_properties.keys())
|
|
116
|
+
|
|
117
|
+
def __getitem__(self, key: str) -> Any:
|
|
118
|
+
return self.additional_properties[key]
|
|
119
|
+
|
|
120
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
121
|
+
self.additional_properties[key] = value
|
|
122
|
+
|
|
123
|
+
def __delitem__(self, key: str) -> None:
|
|
124
|
+
del self.additional_properties[key]
|
|
125
|
+
|
|
126
|
+
def __contains__(self, key: str) -> bool:
|
|
127
|
+
return key in self.additional_properties
|