deepeval 3.6.6__py3-none-any.whl → 3.6.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepeval/_version.py +1 -1
- deepeval/benchmarks/equity_med_qa/equity_med_qa.py +1 -0
- deepeval/cli/main.py +42 -0
- deepeval/confident/api.py +1 -0
- deepeval/config/settings.py +22 -4
- deepeval/constants.py +8 -1
- deepeval/dataset/dataset.py +2 -11
- deepeval/dataset/utils.py +1 -1
- deepeval/errors.py +20 -2
- deepeval/evaluate/evaluate.py +5 -1
- deepeval/evaluate/execute.py +811 -248
- deepeval/evaluate/types.py +1 -0
- deepeval/evaluate/utils.py +33 -119
- deepeval/integrations/crewai/__init__.py +7 -1
- deepeval/integrations/crewai/handler.py +1 -1
- deepeval/integrations/crewai/subs.py +51 -0
- deepeval/integrations/crewai/tool.py +71 -0
- deepeval/integrations/crewai/wrapper.py +45 -5
- deepeval/integrations/llama_index/__init__.py +0 -4
- deepeval/integrations/llama_index/handler.py +20 -21
- deepeval/integrations/pydantic_ai/instrumentator.py +125 -76
- deepeval/metrics/__init__.py +13 -0
- deepeval/metrics/answer_relevancy/answer_relevancy.py +12 -3
- deepeval/metrics/api.py +281 -0
- deepeval/metrics/argument_correctness/argument_correctness.py +12 -2
- deepeval/metrics/base_metric.py +1 -0
- deepeval/metrics/bias/bias.py +12 -3
- deepeval/metrics/contextual_precision/contextual_precision.py +39 -24
- deepeval/metrics/contextual_recall/contextual_recall.py +12 -3
- deepeval/metrics/contextual_relevancy/contextual_relevancy.py +12 -1
- deepeval/metrics/conversation_completeness/conversation_completeness.py +12 -0
- deepeval/metrics/conversational_dag/conversational_dag.py +12 -0
- deepeval/metrics/conversational_dag/nodes.py +12 -4
- deepeval/metrics/conversational_g_eval/__init__.py +3 -0
- deepeval/metrics/conversational_g_eval/conversational_g_eval.py +84 -66
- deepeval/metrics/dag/dag.py +12 -0
- deepeval/metrics/dag/nodes.py +12 -4
- deepeval/metrics/dag/schema.py +1 -1
- deepeval/metrics/dag/templates.py +2 -2
- deepeval/metrics/faithfulness/faithfulness.py +12 -1
- deepeval/metrics/g_eval/g_eval.py +11 -0
- deepeval/metrics/goal_accuracy/__init__.py +1 -0
- deepeval/metrics/goal_accuracy/goal_accuracy.py +349 -0
- deepeval/metrics/goal_accuracy/schema.py +17 -0
- deepeval/metrics/goal_accuracy/template.py +235 -0
- deepeval/metrics/hallucination/hallucination.py +20 -9
- deepeval/metrics/indicator.py +8 -2
- deepeval/metrics/json_correctness/json_correctness.py +12 -1
- deepeval/metrics/knowledge_retention/knowledge_retention.py +12 -0
- deepeval/metrics/mcp/mcp_task_completion.py +20 -2
- deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +29 -6
- deepeval/metrics/mcp_use_metric/mcp_use_metric.py +14 -2
- deepeval/metrics/misuse/misuse.py +12 -1
- deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +3 -0
- deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +3 -0
- deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +3 -0
- deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +3 -0
- deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +6 -1
- deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +38 -25
- deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +3 -0
- deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +3 -0
- deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +3 -0
- deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +3 -0
- deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +10 -5
- deepeval/metrics/non_advice/non_advice.py +12 -0
- deepeval/metrics/pii_leakage/pii_leakage.py +12 -1
- deepeval/metrics/plan_adherence/__init__.py +1 -0
- deepeval/metrics/plan_adherence/plan_adherence.py +292 -0
- deepeval/metrics/plan_adherence/schema.py +11 -0
- deepeval/metrics/plan_adherence/template.py +170 -0
- deepeval/metrics/plan_quality/__init__.py +1 -0
- deepeval/metrics/plan_quality/plan_quality.py +292 -0
- deepeval/metrics/plan_quality/schema.py +11 -0
- deepeval/metrics/plan_quality/template.py +101 -0
- deepeval/metrics/prompt_alignment/prompt_alignment.py +12 -1
- deepeval/metrics/role_adherence/role_adherence.py +12 -0
- deepeval/metrics/role_violation/role_violation.py +12 -0
- deepeval/metrics/step_efficiency/__init__.py +1 -0
- deepeval/metrics/step_efficiency/schema.py +11 -0
- deepeval/metrics/step_efficiency/step_efficiency.py +234 -0
- deepeval/metrics/step_efficiency/template.py +256 -0
- deepeval/metrics/summarization/summarization.py +12 -1
- deepeval/metrics/task_completion/task_completion.py +4 -0
- deepeval/metrics/tool_correctness/schema.py +6 -0
- deepeval/metrics/tool_correctness/template.py +88 -0
- deepeval/metrics/tool_correctness/tool_correctness.py +233 -21
- deepeval/metrics/tool_use/__init__.py +1 -0
- deepeval/metrics/tool_use/schema.py +19 -0
- deepeval/metrics/tool_use/template.py +220 -0
- deepeval/metrics/tool_use/tool_use.py +458 -0
- deepeval/metrics/topic_adherence/__init__.py +1 -0
- deepeval/metrics/topic_adherence/schema.py +16 -0
- deepeval/metrics/topic_adherence/template.py +162 -0
- deepeval/metrics/topic_adherence/topic_adherence.py +355 -0
- deepeval/metrics/toxicity/toxicity.py +12 -0
- deepeval/metrics/turn_relevancy/turn_relevancy.py +12 -0
- deepeval/models/embedding_models/azure_embedding_model.py +37 -36
- deepeval/models/embedding_models/local_embedding_model.py +30 -32
- deepeval/models/embedding_models/ollama_embedding_model.py +18 -20
- deepeval/models/embedding_models/openai_embedding_model.py +22 -31
- deepeval/models/llms/grok_model.py +1 -1
- deepeval/models/llms/openai_model.py +2 -0
- deepeval/openai/__init__.py +14 -32
- deepeval/openai/extractors.py +85 -50
- deepeval/openai/patch.py +258 -167
- deepeval/openai/types.py +20 -0
- deepeval/openai/utils.py +205 -56
- deepeval/prompt/__init__.py +19 -1
- deepeval/prompt/api.py +160 -0
- deepeval/prompt/prompt.py +245 -62
- deepeval/prompt/utils.py +186 -15
- deepeval/synthesizer/chunking/context_generator.py +209 -152
- deepeval/synthesizer/chunking/doc_chunker.py +46 -12
- deepeval/synthesizer/synthesizer.py +19 -15
- deepeval/test_case/api.py +131 -0
- deepeval/test_case/llm_test_case.py +6 -2
- deepeval/test_run/__init__.py +1 -0
- deepeval/test_run/hyperparameters.py +47 -8
- deepeval/test_run/test_run.py +292 -206
- deepeval/tracing/__init__.py +2 -1
- deepeval/tracing/api.py +3 -1
- deepeval/tracing/otel/exporter.py +3 -4
- deepeval/tracing/otel/utils.py +24 -5
- deepeval/tracing/trace_context.py +89 -5
- deepeval/tracing/tracing.py +74 -3
- deepeval/tracing/types.py +20 -2
- deepeval/tracing/utils.py +8 -0
- deepeval/utils.py +21 -0
- {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/METADATA +1 -1
- {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/RECORD +133 -103
- deepeval/integrations/llama_index/agent/patched.py +0 -68
- {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/LICENSE.md +0 -0
- {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/WHEEL +0 -0
- {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/entry_points.txt +0 -0
deepeval/prompt/prompt.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
|
-
from typing import
|
|
2
|
+
from typing import Optional, List, Dict, Type, Literal
|
|
3
3
|
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn
|
|
4
4
|
from rich.console import Console
|
|
5
5
|
import time
|
|
6
6
|
import json
|
|
7
7
|
import os
|
|
8
|
-
from pydantic import BaseModel
|
|
8
|
+
from pydantic import BaseModel, ValidationError
|
|
9
9
|
import asyncio
|
|
10
10
|
import portalocker
|
|
11
11
|
import threading
|
|
@@ -17,8 +17,20 @@ from deepeval.prompt.api import (
|
|
|
17
17
|
PromptInterpolationType,
|
|
18
18
|
PromptPushRequest,
|
|
19
19
|
PromptVersionsHttpResponse,
|
|
20
|
+
PromptMessageList,
|
|
21
|
+
PromptUpdateRequest,
|
|
22
|
+
ModelSettings,
|
|
23
|
+
OutputSchema,
|
|
24
|
+
OutputType,
|
|
25
|
+
ReasoningEffort,
|
|
26
|
+
Verbosity,
|
|
27
|
+
ModelProvider,
|
|
28
|
+
)
|
|
29
|
+
from deepeval.prompt.utils import (
|
|
30
|
+
interpolate_text,
|
|
31
|
+
construct_base_model,
|
|
32
|
+
construct_output_schema,
|
|
20
33
|
)
|
|
21
|
-
from deepeval.prompt.utils import interpolate_text
|
|
22
34
|
from deepeval.confident.api import Api, Endpoints, HttpMethods
|
|
23
35
|
from deepeval.constants import HIDDEN_DIR
|
|
24
36
|
|
|
@@ -73,45 +85,51 @@ class CachedPrompt(BaseModel):
|
|
|
73
85
|
prompt_version_id: str
|
|
74
86
|
type: PromptType
|
|
75
87
|
interpolation_type: PromptInterpolationType
|
|
88
|
+
model_settings: Optional[ModelSettings]
|
|
89
|
+
output_type: Optional[OutputType]
|
|
90
|
+
output_schema: Optional[OutputSchema]
|
|
76
91
|
|
|
77
92
|
class Config:
|
|
78
93
|
use_enum_values = True
|
|
79
94
|
|
|
80
95
|
|
|
81
96
|
class Prompt:
|
|
82
|
-
label: Optional[str] = None
|
|
83
|
-
_prompt_version_id: Optional[str] = None
|
|
84
|
-
_type: Optional[PromptType] = None
|
|
85
|
-
_interpolation_type: Optional[PromptInterpolationType] = None
|
|
86
97
|
|
|
87
98
|
def __init__(
|
|
88
99
|
self,
|
|
89
100
|
alias: Optional[str] = None,
|
|
90
|
-
|
|
101
|
+
text_template: Optional[str] = None,
|
|
91
102
|
messages_template: Optional[List[PromptMessage]] = None,
|
|
103
|
+
model_settings: Optional[ModelSettings] = None,
|
|
104
|
+
output_type: Optional[OutputType] = None,
|
|
105
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
92
106
|
):
|
|
93
|
-
if
|
|
94
|
-
raise TypeError(
|
|
95
|
-
"Unable to create Prompt where 'alias' and 'template' are both None. Please provide at least one to continue."
|
|
96
|
-
)
|
|
97
|
-
if template and messages_template:
|
|
107
|
+
if text_template and messages_template:
|
|
98
108
|
raise TypeError(
|
|
99
|
-
"Unable to create Prompt where '
|
|
109
|
+
"Unable to create Prompt where 'text_template' and 'messages_template' are both provided. Please provide only one to continue."
|
|
100
110
|
)
|
|
101
|
-
|
|
102
111
|
self.alias = alias
|
|
103
|
-
self.
|
|
104
|
-
self.
|
|
112
|
+
self.text_template = text_template
|
|
113
|
+
self.messages_template = messages_template
|
|
114
|
+
self.model_settings: Optional[ModelSettings] = model_settings
|
|
115
|
+
self.output_type: Optional[OutputType] = output_type
|
|
116
|
+
self.output_schema: Optional[Type[BaseModel]] = output_schema
|
|
117
|
+
self.label: Optional[str] = None
|
|
118
|
+
self.interpolation_type: Optional[PromptInterpolationType] = None
|
|
119
|
+
|
|
105
120
|
self._version = None
|
|
121
|
+
self._prompt_version_id: Optional[str] = None
|
|
106
122
|
self._polling_tasks: Dict[str, Dict[str, asyncio.Task]] = {}
|
|
107
123
|
self._refresh_map: Dict[str, Dict[str, int]] = {}
|
|
108
124
|
self._lock = (
|
|
109
125
|
threading.Lock()
|
|
110
126
|
) # Protect instance attributes from race conditions
|
|
111
|
-
|
|
112
|
-
|
|
127
|
+
|
|
128
|
+
self.type: Optional[PromptType] = None
|
|
129
|
+
if text_template:
|
|
130
|
+
self.type = PromptType.TEXT
|
|
113
131
|
elif messages_template:
|
|
114
|
-
self.
|
|
132
|
+
self.type = PromptType.LIST
|
|
115
133
|
|
|
116
134
|
def __del__(self):
|
|
117
135
|
"""Cleanup polling tasks when instance is destroyed"""
|
|
@@ -135,12 +153,48 @@ class Prompt:
|
|
|
135
153
|
def version(self, value):
|
|
136
154
|
self._version = value
|
|
137
155
|
|
|
156
|
+
def load(self, file_path: str, messages_key: Optional[str] = None):
|
|
157
|
+
_, ext = os.path.splitext(file_path)
|
|
158
|
+
if ext != ".json" and ext != ".txt":
|
|
159
|
+
raise ValueError("Only .json and .txt files are supported")
|
|
160
|
+
|
|
161
|
+
file_name = os.path.basename(file_path).split(".")[0]
|
|
162
|
+
self.alias = file_name
|
|
163
|
+
with open(file_path, "r") as f:
|
|
164
|
+
content = f.read()
|
|
165
|
+
try:
|
|
166
|
+
data = json.loads(content)
|
|
167
|
+
except:
|
|
168
|
+
self.text_template = content
|
|
169
|
+
return content
|
|
170
|
+
|
|
171
|
+
text_template = None
|
|
172
|
+
messages_template = None
|
|
173
|
+
try:
|
|
174
|
+
if isinstance(data, list):
|
|
175
|
+
messages_template = PromptMessageList.validate_python(data)
|
|
176
|
+
elif isinstance(data, dict):
|
|
177
|
+
if messages_key is None:
|
|
178
|
+
raise ValueError(
|
|
179
|
+
"messages `key` must be provided if file is a dictionary"
|
|
180
|
+
)
|
|
181
|
+
messages = data[messages_key]
|
|
182
|
+
messages_template = PromptMessageList.validate_python(messages)
|
|
183
|
+
else:
|
|
184
|
+
text_template = content
|
|
185
|
+
except ValidationError:
|
|
186
|
+
text_template = content
|
|
187
|
+
|
|
188
|
+
self.text_template = text_template
|
|
189
|
+
self.messages_template = messages_template
|
|
190
|
+
return text_template or messages_template
|
|
191
|
+
|
|
138
192
|
def interpolate(self, **kwargs):
|
|
139
193
|
with self._lock:
|
|
140
|
-
prompt_type = self.
|
|
141
|
-
text_template = self.
|
|
142
|
-
messages_template = self.
|
|
143
|
-
interpolation_type = self.
|
|
194
|
+
prompt_type = self.type
|
|
195
|
+
text_template = self.text_template
|
|
196
|
+
messages_template = self.messages_template
|
|
197
|
+
interpolation_type = self.interpolation_type
|
|
144
198
|
|
|
145
199
|
if prompt_type == PromptType.TEXT:
|
|
146
200
|
if text_template is None:
|
|
@@ -148,6 +202,7 @@ class Prompt:
|
|
|
148
202
|
"Unable to interpolate empty prompt template. Please pull a prompt from Confident AI or set template manually to continue."
|
|
149
203
|
)
|
|
150
204
|
|
|
205
|
+
print("@@@@@")
|
|
151
206
|
return interpolate_text(interpolation_type, text_template, **kwargs)
|
|
152
207
|
|
|
153
208
|
elif prompt_type == PromptType.LIST:
|
|
@@ -166,7 +221,11 @@ class Prompt:
|
|
|
166
221
|
)
|
|
167
222
|
return interpolated_messages
|
|
168
223
|
else:
|
|
169
|
-
raise ValueError(f"Unsupported prompt type: {
|
|
224
|
+
raise ValueError(f"Unsupported prompt type: {self.type}")
|
|
225
|
+
|
|
226
|
+
############################################
|
|
227
|
+
### Utils
|
|
228
|
+
############################################
|
|
170
229
|
|
|
171
230
|
def _get_versions(self) -> List:
|
|
172
231
|
if self.alias is None:
|
|
@@ -232,6 +291,9 @@ class Prompt:
|
|
|
232
291
|
prompt_version_id: Optional[str] = None,
|
|
233
292
|
type: Optional[PromptType] = None,
|
|
234
293
|
interpolation_type: Optional[PromptInterpolationType] = None,
|
|
294
|
+
model_settings: Optional[ModelSettings] = None,
|
|
295
|
+
output_type: Optional[OutputType] = None,
|
|
296
|
+
output_schema: Optional[OutputSchema] = None,
|
|
235
297
|
):
|
|
236
298
|
if not self.alias:
|
|
237
299
|
return
|
|
@@ -276,6 +338,9 @@ class Prompt:
|
|
|
276
338
|
"prompt_version_id": prompt_version_id,
|
|
277
339
|
"type": type,
|
|
278
340
|
"interpolation_type": interpolation_type,
|
|
341
|
+
"model_settings": model_settings,
|
|
342
|
+
"output_type": output_type,
|
|
343
|
+
"output_schema": output_schema,
|
|
279
344
|
}
|
|
280
345
|
|
|
281
346
|
if cache_key == VERSION_CACHE_KEY:
|
|
@@ -313,14 +378,27 @@ class Prompt:
|
|
|
313
378
|
raise ValueError("Unable to fetch prompt and load from cache")
|
|
314
379
|
|
|
315
380
|
with self._lock:
|
|
316
|
-
self.
|
|
381
|
+
self._version = cached_prompt.version
|
|
317
382
|
self.label = cached_prompt.label
|
|
318
|
-
self.
|
|
319
|
-
self.
|
|
383
|
+
self.text_template = cached_prompt.template
|
|
384
|
+
self.messages_template = cached_prompt.messages_template
|
|
320
385
|
self._prompt_version_id = cached_prompt.prompt_version_id
|
|
321
|
-
self.
|
|
322
|
-
|
|
323
|
-
|
|
386
|
+
self.type = (
|
|
387
|
+
PromptType(cached_prompt.type) if cached_prompt.type else None
|
|
388
|
+
)
|
|
389
|
+
self.interpolation_type = (
|
|
390
|
+
PromptInterpolationType(cached_prompt.interpolation_type)
|
|
391
|
+
if cached_prompt.interpolation_type
|
|
392
|
+
else None
|
|
393
|
+
)
|
|
394
|
+
self.model_settings = cached_prompt.model_settings
|
|
395
|
+
self.output_type = (
|
|
396
|
+
OutputType(cached_prompt.output_type)
|
|
397
|
+
if cached_prompt.output_type
|
|
398
|
+
else None
|
|
399
|
+
)
|
|
400
|
+
self.output_schema = construct_base_model(
|
|
401
|
+
cached_prompt.output_schema
|
|
324
402
|
)
|
|
325
403
|
|
|
326
404
|
end_time = time.perf_counter()
|
|
@@ -330,6 +408,10 @@ class Prompt:
|
|
|
330
408
|
description=f"{progress.tasks[task_id].description}[rgb(25,227,160)]Loaded from cache! ({time_taken}s)",
|
|
331
409
|
)
|
|
332
410
|
|
|
411
|
+
############################################
|
|
412
|
+
### Pull, Push, Update
|
|
413
|
+
############################################
|
|
414
|
+
|
|
333
415
|
def pull(
|
|
334
416
|
self,
|
|
335
417
|
version: Optional[str] = None,
|
|
@@ -369,18 +451,33 @@ class Prompt:
|
|
|
369
451
|
)
|
|
370
452
|
if cached_prompt:
|
|
371
453
|
with self._lock:
|
|
372
|
-
self.
|
|
454
|
+
self._version = cached_prompt.version
|
|
373
455
|
self.label = cached_prompt.label
|
|
374
|
-
self.
|
|
375
|
-
self.
|
|
376
|
-
cached_prompt.messages_template
|
|
377
|
-
)
|
|
456
|
+
self.text_template = cached_prompt.template
|
|
457
|
+
self.messages_template = cached_prompt.messages_template
|
|
378
458
|
self._prompt_version_id = (
|
|
379
459
|
cached_prompt.prompt_version_id
|
|
380
460
|
)
|
|
381
|
-
self.
|
|
382
|
-
|
|
383
|
-
cached_prompt.
|
|
461
|
+
self.type = (
|
|
462
|
+
PromptType(cached_prompt.type)
|
|
463
|
+
if cached_prompt.type
|
|
464
|
+
else None
|
|
465
|
+
)
|
|
466
|
+
self.interpolation_type = (
|
|
467
|
+
PromptInterpolationType(
|
|
468
|
+
cached_prompt.interpolation_type
|
|
469
|
+
)
|
|
470
|
+
if cached_prompt.interpolation_type
|
|
471
|
+
else None
|
|
472
|
+
)
|
|
473
|
+
self.model_settings = cached_prompt.model_settings
|
|
474
|
+
self.output_type = (
|
|
475
|
+
OutputType(cached_prompt.output_type)
|
|
476
|
+
if cached_prompt.output_type
|
|
477
|
+
else None
|
|
478
|
+
)
|
|
479
|
+
self.output_schema = construct_base_model(
|
|
480
|
+
cached_prompt.output_schema
|
|
384
481
|
)
|
|
385
482
|
return
|
|
386
483
|
except:
|
|
@@ -432,6 +529,9 @@ class Prompt:
|
|
|
432
529
|
messages=data.get("messages", None),
|
|
433
530
|
type=data["type"],
|
|
434
531
|
interpolation_type=data["interpolationType"],
|
|
532
|
+
model_settings=data.get("modelSettings", None),
|
|
533
|
+
output_type=data.get("outputType", None),
|
|
534
|
+
output_schema=data.get("outputSchema", None),
|
|
435
535
|
)
|
|
436
536
|
except Exception:
|
|
437
537
|
if fallback_to_cache:
|
|
@@ -446,13 +546,18 @@ class Prompt:
|
|
|
446
546
|
raise
|
|
447
547
|
|
|
448
548
|
with self._lock:
|
|
449
|
-
self.
|
|
549
|
+
self._version = response.version
|
|
450
550
|
self.label = response.label
|
|
451
|
-
self.
|
|
452
|
-
self.
|
|
551
|
+
self.text_template = response.text
|
|
552
|
+
self.messages_template = response.messages
|
|
453
553
|
self._prompt_version_id = response.id
|
|
454
|
-
self.
|
|
455
|
-
self.
|
|
554
|
+
self.type = response.type
|
|
555
|
+
self.interpolation_type = response.interpolation_type
|
|
556
|
+
self.model_settings = response.model_settings
|
|
557
|
+
self.output_type = response.output_type
|
|
558
|
+
self.output_schema = construct_base_model(
|
|
559
|
+
response.output_schema
|
|
560
|
+
)
|
|
456
561
|
|
|
457
562
|
end_time = time.perf_counter()
|
|
458
563
|
time_taken = format(end_time - start_time, ".2f")
|
|
@@ -471,6 +576,9 @@ class Prompt:
|
|
|
471
576
|
prompt_version_id=response.id,
|
|
472
577
|
type=response.type,
|
|
473
578
|
interpolation_type=response.interpolation_type,
|
|
579
|
+
model_settings=response.model_settings,
|
|
580
|
+
output_type=response.output_type,
|
|
581
|
+
output_schema=response.output_schema,
|
|
474
582
|
)
|
|
475
583
|
|
|
476
584
|
def push(
|
|
@@ -480,26 +588,36 @@ class Prompt:
|
|
|
480
588
|
interpolation_type: Optional[
|
|
481
589
|
PromptInterpolationType
|
|
482
590
|
] = PromptInterpolationType.FSTRING,
|
|
591
|
+
model_settings: Optional[ModelSettings] = None,
|
|
592
|
+
output_type: Optional[OutputType] = None,
|
|
593
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
594
|
+
_verbose: Optional[bool] = True,
|
|
483
595
|
):
|
|
484
596
|
if self.alias is None:
|
|
485
597
|
raise ValueError(
|
|
486
598
|
"Prompt alias is not set. Please set an alias to continue."
|
|
487
599
|
)
|
|
488
|
-
|
|
489
|
-
|
|
600
|
+
text_template = text or self.text_template
|
|
601
|
+
messages_template = messages or self.messages_template
|
|
602
|
+
if text_template is None and messages_template is None:
|
|
490
603
|
raise ValueError("Either text or messages must be provided")
|
|
491
|
-
|
|
492
|
-
if text is not None and messages is not None:
|
|
604
|
+
if text_template is not None and messages_template is not None:
|
|
493
605
|
raise ValueError("Only one of text or messages can be provided")
|
|
494
606
|
|
|
495
607
|
body = PromptPushRequest(
|
|
496
608
|
alias=self.alias,
|
|
497
|
-
text=
|
|
498
|
-
messages=
|
|
499
|
-
interpolation_type=interpolation_type,
|
|
609
|
+
text=text_template,
|
|
610
|
+
messages=messages_template,
|
|
611
|
+
interpolation_type=interpolation_type or self.interpolation_type,
|
|
612
|
+
model_settings=model_settings or self.model_settings,
|
|
613
|
+
output_type=output_type or self.output_type,
|
|
614
|
+
output_schema=construct_output_schema(output_schema)
|
|
615
|
+
or construct_output_schema(self.output_schema),
|
|
500
616
|
)
|
|
501
617
|
try:
|
|
502
|
-
body = body.model_dump(
|
|
618
|
+
body = body.model_dump(
|
|
619
|
+
by_alias=True, exclude_none=True, mode="json"
|
|
620
|
+
)
|
|
503
621
|
except AttributeError:
|
|
504
622
|
# Pydantic version below 2.0
|
|
505
623
|
body = body.dict(by_alias=True, exclude_none=True)
|
|
@@ -510,13 +628,78 @@ class Prompt:
|
|
|
510
628
|
endpoint=Endpoints.PROMPTS_ENDPOINT,
|
|
511
629
|
body=body,
|
|
512
630
|
)
|
|
631
|
+
versions = self._get_versions()
|
|
513
632
|
|
|
514
|
-
if link:
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
633
|
+
if link and versions:
|
|
634
|
+
self._prompt_version_id = versions[-1].id
|
|
635
|
+
self.text_template = text_template
|
|
636
|
+
self.messages_template = messages_template
|
|
637
|
+
self.interpolation_type = (
|
|
638
|
+
interpolation_type or self.interpolation_type
|
|
639
|
+
)
|
|
640
|
+
self.model_settings = model_settings or self.model_settings
|
|
641
|
+
self.output_type = output_type or self.output_type
|
|
642
|
+
self.output_schema = output_schema or self.output_schema
|
|
643
|
+
self.type = PromptType.TEXT if text_template else PromptType.LIST
|
|
644
|
+
if _verbose:
|
|
645
|
+
console = Console()
|
|
646
|
+
console.print(
|
|
647
|
+
"✅ Prompt successfully pushed to Confident AI! View at "
|
|
648
|
+
f"[link={link}]{link}[/link]"
|
|
649
|
+
)
|
|
650
|
+
|
|
651
|
+
def update(
|
|
652
|
+
self,
|
|
653
|
+
version: str,
|
|
654
|
+
text: Optional[str] = None,
|
|
655
|
+
messages: Optional[List[PromptMessage]] = None,
|
|
656
|
+
interpolation_type: Optional[
|
|
657
|
+
PromptInterpolationType
|
|
658
|
+
] = PromptInterpolationType.FSTRING,
|
|
659
|
+
model_settings: Optional[ModelSettings] = None,
|
|
660
|
+
output_type: Optional[OutputType] = None,
|
|
661
|
+
output_schema: Optional[Type[BaseModel]] = None,
|
|
662
|
+
):
|
|
663
|
+
if self.alias is None:
|
|
664
|
+
raise ValueError(
|
|
665
|
+
"Prompt alias is not set. Please set an alias to continue."
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
body = PromptUpdateRequest(
|
|
669
|
+
text=text,
|
|
670
|
+
messages=messages,
|
|
671
|
+
interpolation_type=interpolation_type,
|
|
672
|
+
model_settings=model_settings,
|
|
673
|
+
output_type=output_type,
|
|
674
|
+
output_schema=construct_output_schema(output_schema),
|
|
675
|
+
)
|
|
676
|
+
try:
|
|
677
|
+
body = body.model_dump(
|
|
678
|
+
by_alias=True, exclude_none=True, mode="json"
|
|
519
679
|
)
|
|
680
|
+
except AttributeError:
|
|
681
|
+
body = body.dict(by_alias=True, exclude_none=True)
|
|
682
|
+
api = Api()
|
|
683
|
+
data, _ = api.send_request(
|
|
684
|
+
method=HttpMethods.PUT,
|
|
685
|
+
endpoint=Endpoints.PROMPTS_VERSION_ID_ENDPOINT,
|
|
686
|
+
url_params={
|
|
687
|
+
"alias": self.alias,
|
|
688
|
+
"versionId": version,
|
|
689
|
+
},
|
|
690
|
+
body=body,
|
|
691
|
+
)
|
|
692
|
+
if data:
|
|
693
|
+
self._version = version
|
|
694
|
+
self.text_template = text
|
|
695
|
+
self.messages_template = messages
|
|
696
|
+
self.interpolation_type = interpolation_type
|
|
697
|
+
self.model_settings = model_settings
|
|
698
|
+
self.output_type = output_type
|
|
699
|
+
self.output_schema = output_schema
|
|
700
|
+
self.type = PromptType.TEXT if text else PromptType.LIST
|
|
701
|
+
console = Console()
|
|
702
|
+
console.print("✅ Prompt successfully updated on Confident AI!")
|
|
520
703
|
|
|
521
704
|
############################################
|
|
522
705
|
### Polling
|
|
@@ -614,13 +797,13 @@ class Prompt:
|
|
|
614
797
|
|
|
615
798
|
# Update in-memory properties with fresh data (thread-safe)
|
|
616
799
|
with self._lock:
|
|
617
|
-
self.
|
|
800
|
+
self._version = response.version
|
|
618
801
|
self.label = response.label
|
|
619
|
-
self.
|
|
620
|
-
self.
|
|
802
|
+
self.text_template = response.text
|
|
803
|
+
self.messages_template = response.messages
|
|
621
804
|
self._prompt_version_id = response.id
|
|
622
|
-
self.
|
|
623
|
-
self.
|
|
805
|
+
self.type = response.type
|
|
806
|
+
self.interpolation_type = response.interpolation_type
|
|
624
807
|
|
|
625
808
|
except Exception:
|
|
626
809
|
pass
|