llama-cloud 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +8 -28
- llama_cloud/resources/evals/client.py +0 -643
- llama_cloud/resources/llama_extract/client.py +168 -6
- llama_cloud/resources/parsing/client.py +0 -8
- llama_cloud/resources/pipelines/client.py +10 -371
- llama_cloud/resources/projects/client.py +72 -923
- llama_cloud/resources/retrievers/client.py +124 -0
- llama_cloud/types/__init__.py +8 -28
- llama_cloud/types/chunk_mode.py +4 -0
- llama_cloud/types/extract_config.py +0 -3
- llama_cloud/types/{local_eval.py → extract_job_create_batch.py} +9 -14
- llama_cloud/types/extract_job_create_batch_data_schema_override.py +9 -0
- llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py +7 -0
- llama_cloud/types/extract_mode.py +9 -1
- llama_cloud/types/llama_parse_parameters.py +0 -1
- llama_cloud/types/{local_eval_results.py → paginated_extract_runs_response.py} +7 -8
- llama_cloud/types/prompt_conf.py +1 -0
- llama_cloud/types/report_block.py +1 -0
- llama_cloud/types/struct_mode.py +4 -0
- llama_cloud/types/struct_parse_conf.py +6 -0
- llama_cloud/types/usage.py +2 -1
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/RECORD +25 -35
- llama_cloud/types/eval_dataset.py +0 -40
- llama_cloud/types/eval_dataset_job_params.py +0 -39
- llama_cloud/types/eval_dataset_job_record.py +0 -58
- llama_cloud/types/eval_execution_params_override.py +0 -37
- llama_cloud/types/eval_metric.py +0 -17
- llama_cloud/types/eval_question.py +0 -38
- llama_cloud/types/eval_question_create.py +0 -31
- llama_cloud/types/eval_question_result.py +0 -52
- llama_cloud/types/local_eval_sets.py +0 -33
- llama_cloud/types/metric_result.py +0 -33
- llama_cloud/types/prompt_mixin_prompts.py +0 -39
- llama_cloud/types/prompt_spec.py +0 -36
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/WHEEL +0 -0
|
@@ -6,11 +6,7 @@ from json.decoder import JSONDecodeError
|
|
|
6
6
|
|
|
7
7
|
from ...core.api_error import ApiError
|
|
8
8
|
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
-
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
9
|
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
11
|
-
from ...types.eval_dataset import EvalDataset
|
|
12
|
-
from ...types.eval_question import EvalQuestion
|
|
13
|
-
from ...types.eval_question_create import EvalQuestionCreate
|
|
14
10
|
from ...types.http_validation_error import HttpValidationError
|
|
15
11
|
from ...types.supported_llm_model import SupportedLlmModel
|
|
16
12
|
|
|
@@ -22,332 +18,11 @@ try:
|
|
|
22
18
|
except ImportError:
|
|
23
19
|
import pydantic # type: ignore
|
|
24
20
|
|
|
25
|
-
# this is used as the default value for optional parameters
|
|
26
|
-
OMIT = typing.cast(typing.Any, ...)
|
|
27
|
-
|
|
28
21
|
|
|
29
22
|
class EvalsClient:
|
|
30
23
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
31
24
|
self._client_wrapper = client_wrapper
|
|
32
25
|
|
|
33
|
-
def get_dataset(self, dataset_id: str) -> EvalDataset:
|
|
34
|
-
"""
|
|
35
|
-
Get a dataset by ID.
|
|
36
|
-
|
|
37
|
-
Parameters:
|
|
38
|
-
- dataset_id: str.
|
|
39
|
-
---
|
|
40
|
-
from llama_cloud.client import LlamaCloud
|
|
41
|
-
|
|
42
|
-
client = LlamaCloud(
|
|
43
|
-
token="YOUR_TOKEN",
|
|
44
|
-
)
|
|
45
|
-
client.evals.get_dataset(
|
|
46
|
-
dataset_id="string",
|
|
47
|
-
)
|
|
48
|
-
"""
|
|
49
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
50
|
-
"GET",
|
|
51
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}"),
|
|
52
|
-
headers=self._client_wrapper.get_headers(),
|
|
53
|
-
timeout=60,
|
|
54
|
-
)
|
|
55
|
-
if 200 <= _response.status_code < 300:
|
|
56
|
-
return pydantic.parse_obj_as(EvalDataset, _response.json()) # type: ignore
|
|
57
|
-
if _response.status_code == 422:
|
|
58
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
59
|
-
try:
|
|
60
|
-
_response_json = _response.json()
|
|
61
|
-
except JSONDecodeError:
|
|
62
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
63
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
64
|
-
|
|
65
|
-
def update_dataset(self, dataset_id: str, *, name: str) -> EvalDataset:
|
|
66
|
-
"""
|
|
67
|
-
Update a dataset.
|
|
68
|
-
|
|
69
|
-
Parameters:
|
|
70
|
-
- dataset_id: str.
|
|
71
|
-
|
|
72
|
-
- name: str. The name of the EvalDataset.
|
|
73
|
-
---
|
|
74
|
-
from llama_cloud.client import LlamaCloud
|
|
75
|
-
|
|
76
|
-
client = LlamaCloud(
|
|
77
|
-
token="YOUR_TOKEN",
|
|
78
|
-
)
|
|
79
|
-
client.evals.update_dataset(
|
|
80
|
-
dataset_id="string",
|
|
81
|
-
name="string",
|
|
82
|
-
)
|
|
83
|
-
"""
|
|
84
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
85
|
-
"PUT",
|
|
86
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}"),
|
|
87
|
-
json=jsonable_encoder({"name": name}),
|
|
88
|
-
headers=self._client_wrapper.get_headers(),
|
|
89
|
-
timeout=60,
|
|
90
|
-
)
|
|
91
|
-
if 200 <= _response.status_code < 300:
|
|
92
|
-
return pydantic.parse_obj_as(EvalDataset, _response.json()) # type: ignore
|
|
93
|
-
if _response.status_code == 422:
|
|
94
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
95
|
-
try:
|
|
96
|
-
_response_json = _response.json()
|
|
97
|
-
except JSONDecodeError:
|
|
98
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
99
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
100
|
-
|
|
101
|
-
def delete_dataset(self, dataset_id: str) -> None:
|
|
102
|
-
"""
|
|
103
|
-
Delete a dataset.
|
|
104
|
-
|
|
105
|
-
Parameters:
|
|
106
|
-
- dataset_id: str.
|
|
107
|
-
---
|
|
108
|
-
from llama_cloud.client import LlamaCloud
|
|
109
|
-
|
|
110
|
-
client = LlamaCloud(
|
|
111
|
-
token="YOUR_TOKEN",
|
|
112
|
-
)
|
|
113
|
-
client.evals.delete_dataset(
|
|
114
|
-
dataset_id="string",
|
|
115
|
-
)
|
|
116
|
-
"""
|
|
117
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
118
|
-
"DELETE",
|
|
119
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}"),
|
|
120
|
-
headers=self._client_wrapper.get_headers(),
|
|
121
|
-
timeout=60,
|
|
122
|
-
)
|
|
123
|
-
if 200 <= _response.status_code < 300:
|
|
124
|
-
return
|
|
125
|
-
if _response.status_code == 422:
|
|
126
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
127
|
-
try:
|
|
128
|
-
_response_json = _response.json()
|
|
129
|
-
except JSONDecodeError:
|
|
130
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
131
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
132
|
-
|
|
133
|
-
def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
|
|
134
|
-
"""
|
|
135
|
-
List questions for a dataset.
|
|
136
|
-
|
|
137
|
-
Parameters:
|
|
138
|
-
- dataset_id: str.
|
|
139
|
-
---
|
|
140
|
-
from llama_cloud.client import LlamaCloud
|
|
141
|
-
|
|
142
|
-
client = LlamaCloud(
|
|
143
|
-
token="YOUR_TOKEN",
|
|
144
|
-
)
|
|
145
|
-
client.evals.list_questions(
|
|
146
|
-
dataset_id="string",
|
|
147
|
-
)
|
|
148
|
-
"""
|
|
149
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
150
|
-
"GET",
|
|
151
|
-
urllib.parse.urljoin(
|
|
152
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}/question"
|
|
153
|
-
),
|
|
154
|
-
headers=self._client_wrapper.get_headers(),
|
|
155
|
-
timeout=60,
|
|
156
|
-
)
|
|
157
|
-
if 200 <= _response.status_code < 300:
|
|
158
|
-
return pydantic.parse_obj_as(typing.List[EvalQuestion], _response.json()) # type: ignore
|
|
159
|
-
if _response.status_code == 422:
|
|
160
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
161
|
-
try:
|
|
162
|
-
_response_json = _response.json()
|
|
163
|
-
except JSONDecodeError:
|
|
164
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
165
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
166
|
-
|
|
167
|
-
def create_question(self, dataset_id: str, *, request: EvalQuestionCreate) -> EvalQuestion:
|
|
168
|
-
"""
|
|
169
|
-
Create a new question.
|
|
170
|
-
|
|
171
|
-
Parameters:
|
|
172
|
-
- dataset_id: str.
|
|
173
|
-
|
|
174
|
-
- request: EvalQuestionCreate.
|
|
175
|
-
---
|
|
176
|
-
from llama_cloud import EvalQuestionCreate
|
|
177
|
-
from llama_cloud.client import LlamaCloud
|
|
178
|
-
|
|
179
|
-
client = LlamaCloud(
|
|
180
|
-
token="YOUR_TOKEN",
|
|
181
|
-
)
|
|
182
|
-
client.evals.create_question(
|
|
183
|
-
dataset_id="string",
|
|
184
|
-
request=EvalQuestionCreate(
|
|
185
|
-
content="string",
|
|
186
|
-
),
|
|
187
|
-
)
|
|
188
|
-
"""
|
|
189
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
190
|
-
"POST",
|
|
191
|
-
urllib.parse.urljoin(
|
|
192
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}/question"
|
|
193
|
-
),
|
|
194
|
-
json=jsonable_encoder(request),
|
|
195
|
-
headers=self._client_wrapper.get_headers(),
|
|
196
|
-
timeout=60,
|
|
197
|
-
)
|
|
198
|
-
if 200 <= _response.status_code < 300:
|
|
199
|
-
return pydantic.parse_obj_as(EvalQuestion, _response.json()) # type: ignore
|
|
200
|
-
if _response.status_code == 422:
|
|
201
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
202
|
-
try:
|
|
203
|
-
_response_json = _response.json()
|
|
204
|
-
except JSONDecodeError:
|
|
205
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
206
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
207
|
-
|
|
208
|
-
def create_questions(
|
|
209
|
-
self, dataset_id: str, *, request: typing.List[EvalQuestionCreate]
|
|
210
|
-
) -> typing.List[EvalQuestion]:
|
|
211
|
-
"""
|
|
212
|
-
Create a new question.
|
|
213
|
-
|
|
214
|
-
Parameters:
|
|
215
|
-
- dataset_id: str.
|
|
216
|
-
|
|
217
|
-
- request: typing.List[EvalQuestionCreate].
|
|
218
|
-
---
|
|
219
|
-
from llama_cloud.client import LlamaCloud
|
|
220
|
-
|
|
221
|
-
client = LlamaCloud(
|
|
222
|
-
token="YOUR_TOKEN",
|
|
223
|
-
)
|
|
224
|
-
client.evals.create_questions(
|
|
225
|
-
dataset_id="string",
|
|
226
|
-
request=[],
|
|
227
|
-
)
|
|
228
|
-
"""
|
|
229
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
230
|
-
"POST",
|
|
231
|
-
urllib.parse.urljoin(
|
|
232
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}/questions"
|
|
233
|
-
),
|
|
234
|
-
json=jsonable_encoder(request),
|
|
235
|
-
headers=self._client_wrapper.get_headers(),
|
|
236
|
-
timeout=60,
|
|
237
|
-
)
|
|
238
|
-
if 200 <= _response.status_code < 300:
|
|
239
|
-
return pydantic.parse_obj_as(typing.List[EvalQuestion], _response.json()) # type: ignore
|
|
240
|
-
if _response.status_code == 422:
|
|
241
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
242
|
-
try:
|
|
243
|
-
_response_json = _response.json()
|
|
244
|
-
except JSONDecodeError:
|
|
245
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
246
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
247
|
-
|
|
248
|
-
def get_question(self, question_id: str) -> EvalQuestion:
|
|
249
|
-
"""
|
|
250
|
-
Get a question by ID.
|
|
251
|
-
|
|
252
|
-
Parameters:
|
|
253
|
-
- question_id: str.
|
|
254
|
-
---
|
|
255
|
-
from llama_cloud.client import LlamaCloud
|
|
256
|
-
|
|
257
|
-
client = LlamaCloud(
|
|
258
|
-
token="YOUR_TOKEN",
|
|
259
|
-
)
|
|
260
|
-
client.evals.get_question(
|
|
261
|
-
question_id="string",
|
|
262
|
-
)
|
|
263
|
-
"""
|
|
264
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
265
|
-
"GET",
|
|
266
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/questions/{question_id}"),
|
|
267
|
-
headers=self._client_wrapper.get_headers(),
|
|
268
|
-
timeout=60,
|
|
269
|
-
)
|
|
270
|
-
if 200 <= _response.status_code < 300:
|
|
271
|
-
return pydantic.parse_obj_as(EvalQuestion, _response.json()) # type: ignore
|
|
272
|
-
if _response.status_code == 422:
|
|
273
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
274
|
-
try:
|
|
275
|
-
_response_json = _response.json()
|
|
276
|
-
except JSONDecodeError:
|
|
277
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
278
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
279
|
-
|
|
280
|
-
def replace_question(self, question_id: str, *, request: EvalQuestionCreate) -> EvalQuestion:
|
|
281
|
-
"""
|
|
282
|
-
Replace a question.
|
|
283
|
-
|
|
284
|
-
Parameters:
|
|
285
|
-
- question_id: str.
|
|
286
|
-
|
|
287
|
-
- request: EvalQuestionCreate.
|
|
288
|
-
---
|
|
289
|
-
from llama_cloud import EvalQuestionCreate
|
|
290
|
-
from llama_cloud.client import LlamaCloud
|
|
291
|
-
|
|
292
|
-
client = LlamaCloud(
|
|
293
|
-
token="YOUR_TOKEN",
|
|
294
|
-
)
|
|
295
|
-
client.evals.replace_question(
|
|
296
|
-
question_id="string",
|
|
297
|
-
request=EvalQuestionCreate(
|
|
298
|
-
content="string",
|
|
299
|
-
),
|
|
300
|
-
)
|
|
301
|
-
"""
|
|
302
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
303
|
-
"PUT",
|
|
304
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/questions/{question_id}"),
|
|
305
|
-
json=jsonable_encoder(request),
|
|
306
|
-
headers=self._client_wrapper.get_headers(),
|
|
307
|
-
timeout=60,
|
|
308
|
-
)
|
|
309
|
-
if 200 <= _response.status_code < 300:
|
|
310
|
-
return pydantic.parse_obj_as(EvalQuestion, _response.json()) # type: ignore
|
|
311
|
-
if _response.status_code == 422:
|
|
312
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
313
|
-
try:
|
|
314
|
-
_response_json = _response.json()
|
|
315
|
-
except JSONDecodeError:
|
|
316
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
317
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
318
|
-
|
|
319
|
-
def delete_question(self, question_id: str) -> None:
|
|
320
|
-
"""
|
|
321
|
-
Delete a question.
|
|
322
|
-
|
|
323
|
-
Parameters:
|
|
324
|
-
- question_id: str.
|
|
325
|
-
---
|
|
326
|
-
from llama_cloud.client import LlamaCloud
|
|
327
|
-
|
|
328
|
-
client = LlamaCloud(
|
|
329
|
-
token="YOUR_TOKEN",
|
|
330
|
-
)
|
|
331
|
-
client.evals.delete_question(
|
|
332
|
-
question_id="string",
|
|
333
|
-
)
|
|
334
|
-
"""
|
|
335
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
336
|
-
"DELETE",
|
|
337
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/questions/{question_id}"),
|
|
338
|
-
headers=self._client_wrapper.get_headers(),
|
|
339
|
-
timeout=60,
|
|
340
|
-
)
|
|
341
|
-
if 200 <= _response.status_code < 300:
|
|
342
|
-
return
|
|
343
|
-
if _response.status_code == 422:
|
|
344
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
345
|
-
try:
|
|
346
|
-
_response_json = _response.json()
|
|
347
|
-
except JSONDecodeError:
|
|
348
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
349
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
350
|
-
|
|
351
26
|
def list_supported_models(self) -> typing.List[SupportedLlmModel]:
|
|
352
27
|
"""
|
|
353
28
|
List supported models.
|
|
@@ -381,324 +56,6 @@ class AsyncEvalsClient:
|
|
|
381
56
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
382
57
|
self._client_wrapper = client_wrapper
|
|
383
58
|
|
|
384
|
-
async def get_dataset(self, dataset_id: str) -> EvalDataset:
|
|
385
|
-
"""
|
|
386
|
-
Get a dataset by ID.
|
|
387
|
-
|
|
388
|
-
Parameters:
|
|
389
|
-
- dataset_id: str.
|
|
390
|
-
---
|
|
391
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
392
|
-
|
|
393
|
-
client = AsyncLlamaCloud(
|
|
394
|
-
token="YOUR_TOKEN",
|
|
395
|
-
)
|
|
396
|
-
await client.evals.get_dataset(
|
|
397
|
-
dataset_id="string",
|
|
398
|
-
)
|
|
399
|
-
"""
|
|
400
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
401
|
-
"GET",
|
|
402
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}"),
|
|
403
|
-
headers=self._client_wrapper.get_headers(),
|
|
404
|
-
timeout=60,
|
|
405
|
-
)
|
|
406
|
-
if 200 <= _response.status_code < 300:
|
|
407
|
-
return pydantic.parse_obj_as(EvalDataset, _response.json()) # type: ignore
|
|
408
|
-
if _response.status_code == 422:
|
|
409
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
410
|
-
try:
|
|
411
|
-
_response_json = _response.json()
|
|
412
|
-
except JSONDecodeError:
|
|
413
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
414
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
415
|
-
|
|
416
|
-
async def update_dataset(self, dataset_id: str, *, name: str) -> EvalDataset:
|
|
417
|
-
"""
|
|
418
|
-
Update a dataset.
|
|
419
|
-
|
|
420
|
-
Parameters:
|
|
421
|
-
- dataset_id: str.
|
|
422
|
-
|
|
423
|
-
- name: str. The name of the EvalDataset.
|
|
424
|
-
---
|
|
425
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
426
|
-
|
|
427
|
-
client = AsyncLlamaCloud(
|
|
428
|
-
token="YOUR_TOKEN",
|
|
429
|
-
)
|
|
430
|
-
await client.evals.update_dataset(
|
|
431
|
-
dataset_id="string",
|
|
432
|
-
name="string",
|
|
433
|
-
)
|
|
434
|
-
"""
|
|
435
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
436
|
-
"PUT",
|
|
437
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}"),
|
|
438
|
-
json=jsonable_encoder({"name": name}),
|
|
439
|
-
headers=self._client_wrapper.get_headers(),
|
|
440
|
-
timeout=60,
|
|
441
|
-
)
|
|
442
|
-
if 200 <= _response.status_code < 300:
|
|
443
|
-
return pydantic.parse_obj_as(EvalDataset, _response.json()) # type: ignore
|
|
444
|
-
if _response.status_code == 422:
|
|
445
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
446
|
-
try:
|
|
447
|
-
_response_json = _response.json()
|
|
448
|
-
except JSONDecodeError:
|
|
449
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
450
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
451
|
-
|
|
452
|
-
async def delete_dataset(self, dataset_id: str) -> None:
|
|
453
|
-
"""
|
|
454
|
-
Delete a dataset.
|
|
455
|
-
|
|
456
|
-
Parameters:
|
|
457
|
-
- dataset_id: str.
|
|
458
|
-
---
|
|
459
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
460
|
-
|
|
461
|
-
client = AsyncLlamaCloud(
|
|
462
|
-
token="YOUR_TOKEN",
|
|
463
|
-
)
|
|
464
|
-
await client.evals.delete_dataset(
|
|
465
|
-
dataset_id="string",
|
|
466
|
-
)
|
|
467
|
-
"""
|
|
468
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
469
|
-
"DELETE",
|
|
470
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}"),
|
|
471
|
-
headers=self._client_wrapper.get_headers(),
|
|
472
|
-
timeout=60,
|
|
473
|
-
)
|
|
474
|
-
if 200 <= _response.status_code < 300:
|
|
475
|
-
return
|
|
476
|
-
if _response.status_code == 422:
|
|
477
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
478
|
-
try:
|
|
479
|
-
_response_json = _response.json()
|
|
480
|
-
except JSONDecodeError:
|
|
481
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
482
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
483
|
-
|
|
484
|
-
async def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
|
|
485
|
-
"""
|
|
486
|
-
List questions for a dataset.
|
|
487
|
-
|
|
488
|
-
Parameters:
|
|
489
|
-
- dataset_id: str.
|
|
490
|
-
---
|
|
491
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
492
|
-
|
|
493
|
-
client = AsyncLlamaCloud(
|
|
494
|
-
token="YOUR_TOKEN",
|
|
495
|
-
)
|
|
496
|
-
await client.evals.list_questions(
|
|
497
|
-
dataset_id="string",
|
|
498
|
-
)
|
|
499
|
-
"""
|
|
500
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
501
|
-
"GET",
|
|
502
|
-
urllib.parse.urljoin(
|
|
503
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}/question"
|
|
504
|
-
),
|
|
505
|
-
headers=self._client_wrapper.get_headers(),
|
|
506
|
-
timeout=60,
|
|
507
|
-
)
|
|
508
|
-
if 200 <= _response.status_code < 300:
|
|
509
|
-
return pydantic.parse_obj_as(typing.List[EvalQuestion], _response.json()) # type: ignore
|
|
510
|
-
if _response.status_code == 422:
|
|
511
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
512
|
-
try:
|
|
513
|
-
_response_json = _response.json()
|
|
514
|
-
except JSONDecodeError:
|
|
515
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
516
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
517
|
-
|
|
518
|
-
async def create_question(self, dataset_id: str, *, request: EvalQuestionCreate) -> EvalQuestion:
|
|
519
|
-
"""
|
|
520
|
-
Create a new question.
|
|
521
|
-
|
|
522
|
-
Parameters:
|
|
523
|
-
- dataset_id: str.
|
|
524
|
-
|
|
525
|
-
- request: EvalQuestionCreate.
|
|
526
|
-
---
|
|
527
|
-
from llama_cloud import EvalQuestionCreate
|
|
528
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
529
|
-
|
|
530
|
-
client = AsyncLlamaCloud(
|
|
531
|
-
token="YOUR_TOKEN",
|
|
532
|
-
)
|
|
533
|
-
await client.evals.create_question(
|
|
534
|
-
dataset_id="string",
|
|
535
|
-
request=EvalQuestionCreate(
|
|
536
|
-
content="string",
|
|
537
|
-
),
|
|
538
|
-
)
|
|
539
|
-
"""
|
|
540
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
541
|
-
"POST",
|
|
542
|
-
urllib.parse.urljoin(
|
|
543
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}/question"
|
|
544
|
-
),
|
|
545
|
-
json=jsonable_encoder(request),
|
|
546
|
-
headers=self._client_wrapper.get_headers(),
|
|
547
|
-
timeout=60,
|
|
548
|
-
)
|
|
549
|
-
if 200 <= _response.status_code < 300:
|
|
550
|
-
return pydantic.parse_obj_as(EvalQuestion, _response.json()) # type: ignore
|
|
551
|
-
if _response.status_code == 422:
|
|
552
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
553
|
-
try:
|
|
554
|
-
_response_json = _response.json()
|
|
555
|
-
except JSONDecodeError:
|
|
556
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
557
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
558
|
-
|
|
559
|
-
async def create_questions(
|
|
560
|
-
self, dataset_id: str, *, request: typing.List[EvalQuestionCreate]
|
|
561
|
-
) -> typing.List[EvalQuestion]:
|
|
562
|
-
"""
|
|
563
|
-
Create a new question.
|
|
564
|
-
|
|
565
|
-
Parameters:
|
|
566
|
-
- dataset_id: str.
|
|
567
|
-
|
|
568
|
-
- request: typing.List[EvalQuestionCreate].
|
|
569
|
-
---
|
|
570
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
571
|
-
|
|
572
|
-
client = AsyncLlamaCloud(
|
|
573
|
-
token="YOUR_TOKEN",
|
|
574
|
-
)
|
|
575
|
-
await client.evals.create_questions(
|
|
576
|
-
dataset_id="string",
|
|
577
|
-
request=[],
|
|
578
|
-
)
|
|
579
|
-
"""
|
|
580
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
581
|
-
"POST",
|
|
582
|
-
urllib.parse.urljoin(
|
|
583
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/datasets/{dataset_id}/questions"
|
|
584
|
-
),
|
|
585
|
-
json=jsonable_encoder(request),
|
|
586
|
-
headers=self._client_wrapper.get_headers(),
|
|
587
|
-
timeout=60,
|
|
588
|
-
)
|
|
589
|
-
if 200 <= _response.status_code < 300:
|
|
590
|
-
return pydantic.parse_obj_as(typing.List[EvalQuestion], _response.json()) # type: ignore
|
|
591
|
-
if _response.status_code == 422:
|
|
592
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
593
|
-
try:
|
|
594
|
-
_response_json = _response.json()
|
|
595
|
-
except JSONDecodeError:
|
|
596
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
597
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
598
|
-
|
|
599
|
-
async def get_question(self, question_id: str) -> EvalQuestion:
|
|
600
|
-
"""
|
|
601
|
-
Get a question by ID.
|
|
602
|
-
|
|
603
|
-
Parameters:
|
|
604
|
-
- question_id: str.
|
|
605
|
-
---
|
|
606
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
607
|
-
|
|
608
|
-
client = AsyncLlamaCloud(
|
|
609
|
-
token="YOUR_TOKEN",
|
|
610
|
-
)
|
|
611
|
-
await client.evals.get_question(
|
|
612
|
-
question_id="string",
|
|
613
|
-
)
|
|
614
|
-
"""
|
|
615
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
616
|
-
"GET",
|
|
617
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/questions/{question_id}"),
|
|
618
|
-
headers=self._client_wrapper.get_headers(),
|
|
619
|
-
timeout=60,
|
|
620
|
-
)
|
|
621
|
-
if 200 <= _response.status_code < 300:
|
|
622
|
-
return pydantic.parse_obj_as(EvalQuestion, _response.json()) # type: ignore
|
|
623
|
-
if _response.status_code == 422:
|
|
624
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
625
|
-
try:
|
|
626
|
-
_response_json = _response.json()
|
|
627
|
-
except JSONDecodeError:
|
|
628
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
629
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
630
|
-
|
|
631
|
-
async def replace_question(self, question_id: str, *, request: EvalQuestionCreate) -> EvalQuestion:
|
|
632
|
-
"""
|
|
633
|
-
Replace a question.
|
|
634
|
-
|
|
635
|
-
Parameters:
|
|
636
|
-
- question_id: str.
|
|
637
|
-
|
|
638
|
-
- request: EvalQuestionCreate.
|
|
639
|
-
---
|
|
640
|
-
from llama_cloud import EvalQuestionCreate
|
|
641
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
642
|
-
|
|
643
|
-
client = AsyncLlamaCloud(
|
|
644
|
-
token="YOUR_TOKEN",
|
|
645
|
-
)
|
|
646
|
-
await client.evals.replace_question(
|
|
647
|
-
question_id="string",
|
|
648
|
-
request=EvalQuestionCreate(
|
|
649
|
-
content="string",
|
|
650
|
-
),
|
|
651
|
-
)
|
|
652
|
-
"""
|
|
653
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
654
|
-
"PUT",
|
|
655
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/questions/{question_id}"),
|
|
656
|
-
json=jsonable_encoder(request),
|
|
657
|
-
headers=self._client_wrapper.get_headers(),
|
|
658
|
-
timeout=60,
|
|
659
|
-
)
|
|
660
|
-
if 200 <= _response.status_code < 300:
|
|
661
|
-
return pydantic.parse_obj_as(EvalQuestion, _response.json()) # type: ignore
|
|
662
|
-
if _response.status_code == 422:
|
|
663
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
664
|
-
try:
|
|
665
|
-
_response_json = _response.json()
|
|
666
|
-
except JSONDecodeError:
|
|
667
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
668
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
669
|
-
|
|
670
|
-
async def delete_question(self, question_id: str) -> None:
|
|
671
|
-
"""
|
|
672
|
-
Delete a question.
|
|
673
|
-
|
|
674
|
-
Parameters:
|
|
675
|
-
- question_id: str.
|
|
676
|
-
---
|
|
677
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
678
|
-
|
|
679
|
-
client = AsyncLlamaCloud(
|
|
680
|
-
token="YOUR_TOKEN",
|
|
681
|
-
)
|
|
682
|
-
await client.evals.delete_question(
|
|
683
|
-
question_id="string",
|
|
684
|
-
)
|
|
685
|
-
"""
|
|
686
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
687
|
-
"DELETE",
|
|
688
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/evals/questions/{question_id}"),
|
|
689
|
-
headers=self._client_wrapper.get_headers(),
|
|
690
|
-
timeout=60,
|
|
691
|
-
)
|
|
692
|
-
if 200 <= _response.status_code < 300:
|
|
693
|
-
return
|
|
694
|
-
if _response.status_code == 422:
|
|
695
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
696
|
-
try:
|
|
697
|
-
_response_json = _response.json()
|
|
698
|
-
except JSONDecodeError:
|
|
699
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
700
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
701
|
-
|
|
702
59
|
async def list_supported_models(self) -> typing.List[SupportedLlmModel]:
|
|
703
60
|
"""
|
|
704
61
|
List supported models.
|