deepeval 3.6.6__py3-none-any.whl → 3.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. deepeval/_version.py +1 -1
  2. deepeval/benchmarks/equity_med_qa/equity_med_qa.py +1 -0
  3. deepeval/cli/main.py +42 -0
  4. deepeval/confident/api.py +1 -0
  5. deepeval/config/settings.py +22 -4
  6. deepeval/constants.py +8 -1
  7. deepeval/dataset/dataset.py +2 -11
  8. deepeval/dataset/utils.py +1 -1
  9. deepeval/errors.py +20 -2
  10. deepeval/evaluate/evaluate.py +5 -1
  11. deepeval/evaluate/execute.py +811 -248
  12. deepeval/evaluate/types.py +1 -0
  13. deepeval/evaluate/utils.py +33 -119
  14. deepeval/integrations/crewai/__init__.py +7 -1
  15. deepeval/integrations/crewai/handler.py +1 -1
  16. deepeval/integrations/crewai/subs.py +51 -0
  17. deepeval/integrations/crewai/tool.py +71 -0
  18. deepeval/integrations/crewai/wrapper.py +45 -5
  19. deepeval/integrations/llama_index/__init__.py +0 -4
  20. deepeval/integrations/llama_index/handler.py +20 -21
  21. deepeval/integrations/pydantic_ai/instrumentator.py +125 -76
  22. deepeval/metrics/__init__.py +13 -0
  23. deepeval/metrics/answer_relevancy/answer_relevancy.py +12 -3
  24. deepeval/metrics/api.py +281 -0
  25. deepeval/metrics/argument_correctness/argument_correctness.py +12 -2
  26. deepeval/metrics/base_metric.py +1 -0
  27. deepeval/metrics/bias/bias.py +12 -3
  28. deepeval/metrics/contextual_precision/contextual_precision.py +39 -24
  29. deepeval/metrics/contextual_recall/contextual_recall.py +12 -3
  30. deepeval/metrics/contextual_relevancy/contextual_relevancy.py +12 -1
  31. deepeval/metrics/conversation_completeness/conversation_completeness.py +12 -0
  32. deepeval/metrics/conversational_dag/conversational_dag.py +12 -0
  33. deepeval/metrics/conversational_dag/nodes.py +12 -4
  34. deepeval/metrics/conversational_g_eval/__init__.py +3 -0
  35. deepeval/metrics/conversational_g_eval/conversational_g_eval.py +84 -66
  36. deepeval/metrics/dag/dag.py +12 -0
  37. deepeval/metrics/dag/nodes.py +12 -4
  38. deepeval/metrics/dag/schema.py +1 -1
  39. deepeval/metrics/dag/templates.py +2 -2
  40. deepeval/metrics/faithfulness/faithfulness.py +12 -1
  41. deepeval/metrics/g_eval/g_eval.py +11 -0
  42. deepeval/metrics/goal_accuracy/__init__.py +1 -0
  43. deepeval/metrics/goal_accuracy/goal_accuracy.py +349 -0
  44. deepeval/metrics/goal_accuracy/schema.py +17 -0
  45. deepeval/metrics/goal_accuracy/template.py +235 -0
  46. deepeval/metrics/hallucination/hallucination.py +20 -9
  47. deepeval/metrics/indicator.py +8 -2
  48. deepeval/metrics/json_correctness/json_correctness.py +12 -1
  49. deepeval/metrics/knowledge_retention/knowledge_retention.py +12 -0
  50. deepeval/metrics/mcp/mcp_task_completion.py +20 -2
  51. deepeval/metrics/mcp/multi_turn_mcp_use_metric.py +29 -6
  52. deepeval/metrics/mcp_use_metric/mcp_use_metric.py +14 -2
  53. deepeval/metrics/misuse/misuse.py +12 -1
  54. deepeval/metrics/multimodal_metrics/image_coherence/image_coherence.py +3 -0
  55. deepeval/metrics/multimodal_metrics/image_editing/image_editing.py +3 -0
  56. deepeval/metrics/multimodal_metrics/image_helpfulness/image_helpfulness.py +3 -0
  57. deepeval/metrics/multimodal_metrics/image_reference/image_reference.py +3 -0
  58. deepeval/metrics/multimodal_metrics/multimodal_answer_relevancy/multimodal_answer_relevancy.py +6 -1
  59. deepeval/metrics/multimodal_metrics/multimodal_contextual_precision/multimodal_contextual_precision.py +38 -25
  60. deepeval/metrics/multimodal_metrics/multimodal_contextual_recall/multimodal_contextual_recall.py +3 -0
  61. deepeval/metrics/multimodal_metrics/multimodal_contextual_relevancy/multimodal_contextual_relevancy.py +3 -0
  62. deepeval/metrics/multimodal_metrics/multimodal_faithfulness/multimodal_faithfulness.py +3 -0
  63. deepeval/metrics/multimodal_metrics/multimodal_g_eval/multimodal_g_eval.py +3 -0
  64. deepeval/metrics/multimodal_metrics/multimodal_tool_correctness/multimodal_tool_correctness.py +10 -5
  65. deepeval/metrics/non_advice/non_advice.py +12 -0
  66. deepeval/metrics/pii_leakage/pii_leakage.py +12 -1
  67. deepeval/metrics/plan_adherence/__init__.py +1 -0
  68. deepeval/metrics/plan_adherence/plan_adherence.py +292 -0
  69. deepeval/metrics/plan_adherence/schema.py +11 -0
  70. deepeval/metrics/plan_adherence/template.py +170 -0
  71. deepeval/metrics/plan_quality/__init__.py +1 -0
  72. deepeval/metrics/plan_quality/plan_quality.py +292 -0
  73. deepeval/metrics/plan_quality/schema.py +11 -0
  74. deepeval/metrics/plan_quality/template.py +101 -0
  75. deepeval/metrics/prompt_alignment/prompt_alignment.py +12 -1
  76. deepeval/metrics/role_adherence/role_adherence.py +12 -0
  77. deepeval/metrics/role_violation/role_violation.py +12 -0
  78. deepeval/metrics/step_efficiency/__init__.py +1 -0
  79. deepeval/metrics/step_efficiency/schema.py +11 -0
  80. deepeval/metrics/step_efficiency/step_efficiency.py +234 -0
  81. deepeval/metrics/step_efficiency/template.py +256 -0
  82. deepeval/metrics/summarization/summarization.py +12 -1
  83. deepeval/metrics/task_completion/task_completion.py +4 -0
  84. deepeval/metrics/tool_correctness/schema.py +6 -0
  85. deepeval/metrics/tool_correctness/template.py +88 -0
  86. deepeval/metrics/tool_correctness/tool_correctness.py +233 -21
  87. deepeval/metrics/tool_use/__init__.py +1 -0
  88. deepeval/metrics/tool_use/schema.py +19 -0
  89. deepeval/metrics/tool_use/template.py +220 -0
  90. deepeval/metrics/tool_use/tool_use.py +458 -0
  91. deepeval/metrics/topic_adherence/__init__.py +1 -0
  92. deepeval/metrics/topic_adherence/schema.py +16 -0
  93. deepeval/metrics/topic_adherence/template.py +162 -0
  94. deepeval/metrics/topic_adherence/topic_adherence.py +355 -0
  95. deepeval/metrics/toxicity/toxicity.py +12 -0
  96. deepeval/metrics/turn_relevancy/turn_relevancy.py +12 -0
  97. deepeval/models/embedding_models/azure_embedding_model.py +37 -36
  98. deepeval/models/embedding_models/local_embedding_model.py +30 -32
  99. deepeval/models/embedding_models/ollama_embedding_model.py +18 -20
  100. deepeval/models/embedding_models/openai_embedding_model.py +22 -31
  101. deepeval/models/llms/grok_model.py +1 -1
  102. deepeval/models/llms/openai_model.py +2 -0
  103. deepeval/openai/__init__.py +14 -32
  104. deepeval/openai/extractors.py +85 -50
  105. deepeval/openai/patch.py +258 -167
  106. deepeval/openai/types.py +20 -0
  107. deepeval/openai/utils.py +205 -56
  108. deepeval/prompt/__init__.py +19 -1
  109. deepeval/prompt/api.py +160 -0
  110. deepeval/prompt/prompt.py +245 -62
  111. deepeval/prompt/utils.py +186 -15
  112. deepeval/synthesizer/chunking/context_generator.py +209 -152
  113. deepeval/synthesizer/chunking/doc_chunker.py +46 -12
  114. deepeval/synthesizer/synthesizer.py +19 -15
  115. deepeval/test_case/api.py +131 -0
  116. deepeval/test_case/llm_test_case.py +6 -2
  117. deepeval/test_run/__init__.py +1 -0
  118. deepeval/test_run/hyperparameters.py +47 -8
  119. deepeval/test_run/test_run.py +292 -206
  120. deepeval/tracing/__init__.py +2 -1
  121. deepeval/tracing/api.py +3 -1
  122. deepeval/tracing/otel/exporter.py +3 -4
  123. deepeval/tracing/otel/utils.py +24 -5
  124. deepeval/tracing/trace_context.py +89 -5
  125. deepeval/tracing/tracing.py +74 -3
  126. deepeval/tracing/types.py +20 -2
  127. deepeval/tracing/utils.py +8 -0
  128. deepeval/utils.py +21 -0
  129. {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/METADATA +1 -1
  130. {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/RECORD +133 -103
  131. deepeval/integrations/llama_index/agent/patched.py +0 -68
  132. {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/LICENSE.md +0 -0
  133. {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/WHEEL +0 -0
  134. {deepeval-3.6.6.dist-info → deepeval-3.6.8.dist-info}/entry_points.txt +0 -0
deepeval/openai/utils.py CHANGED
@@ -1,67 +1,43 @@
1
- from typing import List
1
+ import json
2
2
  import uuid
3
+ from typing import Any, Dict, List, Optional, Iterable
4
+
5
+ from openai.types.chat.chat_completion_message_param import (
6
+ ChatCompletionMessageParam,
7
+ )
3
8
 
4
9
  from deepeval.tracing.types import ToolSpan, TraceSpanStatus
5
- from deepeval.openai.extractors import InputParameters, OutputParameters
6
10
  from deepeval.tracing.context import current_span_context
7
- from deepeval.test_case import LLMTestCase
8
- from deepeval.metrics import BaseMetric
9
- from deepeval.tracing.types import TestCaseMetricPair
10
-
11
- openai_test_case_pairs: List[TestCaseMetricPair] = []
12
-
13
-
14
- def set_attr_path(obj, attr_path: str, value):
15
- *pre_path, final_attr = attr_path.split(".")
16
- for attr in pre_path:
17
- obj = getattr(obj, attr, None)
18
- if obj is None:
19
- return
20
- setattr(obj, final_attr, value)
21
-
22
-
23
- def get_attr_path(obj, attr_path: str):
24
- for attr in attr_path.split("."):
25
- obj = getattr(obj, attr, None)
26
- if obj is None:
27
- return None
28
- return obj
29
-
30
-
31
- def add_test_case(
32
- test_case: LLMTestCase,
33
- metrics: List[BaseMetric],
34
- input_parameters: InputParameters,
35
- ):
36
- openai_test_case_pairs.append(
37
- TestCaseMetricPair(
38
- test_case=test_case,
39
- metrics=metrics,
40
- hyperparameters=create_hyperparameters_map(input_parameters),
11
+ from deepeval.utils import shorten, len_long
12
+ from deepeval.openai.types import OutputParameters
13
+
14
+
15
+ _URL_MAX = 200
16
+ _JSON_MAX = max(
17
+ len_long(), 400
18
+ ) # <- make this bigger by increasing DEEPEVAL_MAXLEN_LONG above 400
19
+
20
+
21
+ def _compact_dump(value: Any) -> str:
22
+ try:
23
+ dumped = json.dumps(
24
+ value, ensure_ascii=False, default=str, separators=(",", ":")
41
25
  )
42
- )
43
-
44
-
45
- def create_hyperparameters_map(input_parameters: InputParameters):
46
- hyperparameters = {"model": input_parameters.model}
47
- if input_parameters.instructions:
48
- hyperparameters["system_prompt"] = input_parameters.instructions
49
- elif input_parameters.messages:
50
- system_messages = [
51
- m["content"]
52
- for m in input_parameters.messages
53
- if m["role"] == "system"
54
- ]
55
- if system_messages:
56
- hyperparameters["system_prompt"] = (
57
- system_messages[0]
58
- if len(system_messages) == 1
59
- else str(system_messages)
60
- )
61
- return hyperparameters
26
+ except Exception:
27
+ dumped = repr(value)
28
+ return shorten(dumped, max_len=_JSON_MAX)
29
+
30
+
31
+ def _fmt_url(url: Optional[str]) -> str:
32
+ if not url:
33
+ return ""
34
+ if url.startswith("data:"):
35
+ return "[data-uri]"
36
+ return shorten(url, max_len=_URL_MAX)
62
37
 
63
38
 
64
39
  def create_child_tool_spans(output_parameters: OutputParameters):
40
+
65
41
  if output_parameters.tools_called is None:
66
42
  return
67
43
 
@@ -84,3 +60,176 @@ def create_child_tool_spans(output_parameters: OutputParameters):
84
60
  }
85
61
  )
86
62
  current_span.children.append(tool_span)
63
+
64
+
65
+ def stringify_multimodal_content(content: Any) -> str:
66
+ """
67
+ Return a short, human-readable summary string for an OpenAI-style multimodal `content` value.
68
+
69
+ This is used to populate span summaries, such as `InputParameters.input`. It never raises and
70
+ never returns huge blobs.
71
+
72
+ Notes:
73
+ - Data URIs are redacted to "[data-uri]".
74
+ - Output is capped via `deepeval.utils.shorten` (configurable through settings).
75
+ - Fields that are not explicitly handled are returned as size-capped JSON dumps
76
+ - This string is for display/summary only, not intended to be parsable.
77
+
78
+ Args:
79
+ content: The value of an OpenAI message `content`, may be a str or list of typed parts,
80
+ or any nested structure.
81
+
82
+ Returns:
83
+ A short, readable `str` summary.
84
+ """
85
+ if content is None:
86
+ return ""
87
+ if isinstance(content, str):
88
+ return content
89
+ if isinstance(content, (bytes, bytearray)):
90
+ return f"[bytes:{len(content)}]"
91
+
92
+ # list of parts for Chat & Responses
93
+ if isinstance(content, list):
94
+ parts: List[str] = []
95
+ for part in content:
96
+ s = stringify_multimodal_content(part)
97
+ if s:
98
+ parts.append(s)
99
+ return "\n".join(parts)
100
+
101
+ # documented dict shapes (Chat & Responses)
102
+ if isinstance(content, dict):
103
+ t = content.get("type")
104
+
105
+ # Chat Completions
106
+ if t == "text":
107
+ return str(content.get("text", ""))
108
+ if t == "image_url":
109
+ image_url = content.get("image_url")
110
+ if isinstance(image_url, str):
111
+ url = image_url
112
+ else:
113
+ url = (image_url or {}).get("url") or content.get("url")
114
+ return f"[image:{_fmt_url(url)}]"
115
+
116
+ # Responses API variants
117
+ if t == "input_text":
118
+ return str(content.get("text", ""))
119
+ if t == "input_image":
120
+ image_url = content.get("image_url")
121
+ if isinstance(image_url, str):
122
+ url = image_url
123
+ else:
124
+ url = (image_url or {}).get("url") or content.get("url")
125
+ return f"[image:{_fmt_url(url)}]"
126
+
127
+ # readability for other input_* types we don't currently handle
128
+ if t and t.startswith("input_"):
129
+ return f"[{t}]"
130
+
131
+ # unknown dicts and types returned as shortened JSON
132
+ return _compact_dump(content)
133
+
134
+
135
+ def render_messages(
136
+ messages: Iterable[ChatCompletionMessageParam],
137
+ ) -> List[Dict[str, Any]]:
138
+
139
+ messages_list = []
140
+
141
+ for message in messages:
142
+ role = message.get("role")
143
+ content = message.get("content")
144
+ if role == "assistant" and message.get("tool_calls"):
145
+ tool_calls = message.get("tool_calls")
146
+ if isinstance(tool_calls, list):
147
+ for tool_call in tool_calls:
148
+ # Extract type - either "function" or "custom"
149
+ tool_type = tool_call.get("type", "function")
150
+
151
+ # Extract name and arguments based on type
152
+ if tool_type == "function":
153
+ function_data = tool_call.get("function", {})
154
+ name = function_data.get("name", "")
155
+ arguments = function_data.get("arguments", "")
156
+ elif tool_type == "custom":
157
+ custom_data = tool_call.get("custom", {})
158
+ name = custom_data.get("name", "")
159
+ arguments = custom_data.get("input", "")
160
+ else:
161
+ name = ""
162
+ arguments = ""
163
+
164
+ messages_list.append(
165
+ {
166
+ "id": tool_call.get("id", ""),
167
+ "call_id": tool_call.get(
168
+ "id", ""
169
+ ), # OpenAI uses 'id', not 'call_id'
170
+ "name": name,
171
+ "type": tool_type,
172
+ "arguments": json.loads(arguments),
173
+ }
174
+ )
175
+
176
+ elif role == "tool":
177
+ messages_list.append(
178
+ {
179
+ "call_id": message.get("tool_call_id", ""),
180
+ "type": role, # "tool"
181
+ "output": message.get("content", {}),
182
+ }
183
+ )
184
+ else:
185
+ messages_list.append(
186
+ {
187
+ "role": role,
188
+ "content": content,
189
+ }
190
+ )
191
+
192
+ return messages_list
193
+
194
+
195
+ def render_response_input(input: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
196
+
197
+ messages_list = []
198
+
199
+ for item in input:
200
+ type = item.get("type")
201
+ role = item.get("role")
202
+
203
+ if type == "message":
204
+ messages_list.append(
205
+ {
206
+ "role": role,
207
+ "content": item.get("content"),
208
+ }
209
+ )
210
+ else:
211
+ messages_list.append(item)
212
+
213
+ return messages_list
214
+
215
+
216
+ def _render_content(content: Dict[str, Any], indent: int = 0) -> str:
217
+ """
218
+ Renders a dictionary as a formatted string with indentation for nested structures.
219
+ """
220
+ if not content:
221
+ return ""
222
+
223
+ lines = []
224
+ prefix = " " * indent
225
+
226
+ for key, value in content.items():
227
+ if isinstance(value, dict):
228
+ lines.append(f"{prefix}{key}:")
229
+ lines.append(_render_content(value, indent + 1))
230
+ elif isinstance(value, list):
231
+ lines.append(f"{prefix}{key}: {_compact_dump(value)}")
232
+ else:
233
+ lines.append(f"{prefix}{key}: {value}")
234
+
235
+ return "\n".join(lines)
@@ -1,3 +1,21 @@
1
1
  from .prompt import Prompt
2
+ from .api import (
3
+ PromptMessage,
4
+ ModelSettings,
5
+ ModelProvider,
6
+ Verbosity,
7
+ ReasoningEffort,
8
+ OutputType,
9
+ PromptInterpolationType,
10
+ )
2
11
 
3
- __all__ = ["Prompt"]
12
+ __all__ = [
13
+ "Prompt",
14
+ "PromptMessage",
15
+ "ModelSettings",
16
+ "ModelProvider",
17
+ "Verbosity",
18
+ "ReasoningEffort",
19
+ "OutputType",
20
+ "PromptInterpolationType",
21
+ ]
deepeval/prompt/api.py CHANGED
@@ -1,6 +1,119 @@
1
1
  from pydantic import BaseModel, Field, AliasChoices
2
2
  from enum import Enum
3
3
  from typing import List, Optional
4
+ from pydantic import TypeAdapter
5
+
6
+ ###################################
7
+ # Model Settings
8
+ ###################################
9
+
10
+
11
+ class ReasoningEffort(Enum):
12
+ MINIMAL = "MINIMAL"
13
+ LOW = "LOW"
14
+ MEDIUM = "MEDIUM"
15
+ HIGH = "HIGH"
16
+
17
+
18
+ class Verbosity(Enum):
19
+ LOW = "LOW"
20
+ MEDIUM = "MEDIUM"
21
+ HIGH = "HIGH"
22
+
23
+
24
+ class ModelProvider(Enum):
25
+ OPEN_AI = "OPEN_AI"
26
+ ANTHROPIC = "ANTHROPIC"
27
+ GEMINI = "GEMINI"
28
+ X_AI = "X_AI"
29
+ DEEPSEEK = "DEEPSEEK"
30
+ BEDROCK = "BEDROCK"
31
+
32
+
33
+ class ModelSettings(BaseModel):
34
+ provider: Optional[ModelProvider] = None
35
+ name: Optional[str] = None
36
+ temperature: Optional[float] = None
37
+ max_tokens: Optional[int] = Field(
38
+ default=None,
39
+ serialization_alias="maxTokens",
40
+ validation_alias=AliasChoices("max_tokens", "maxTokens"),
41
+ )
42
+ top_p: Optional[float] = Field(
43
+ default=None,
44
+ serialization_alias="topP",
45
+ validation_alias=AliasChoices("top_p", "topP"),
46
+ )
47
+ frequency_penalty: Optional[float] = Field(
48
+ default=None,
49
+ serialization_alias="frequencyPenalty",
50
+ validation_alias=AliasChoices("frequency_penalty", "frequencyPenalty"),
51
+ )
52
+ presence_penalty: Optional[float] = Field(
53
+ default=None,
54
+ serialization_alias="presencePenalty",
55
+ validation_alias=AliasChoices("presence_penalty", "presencePenalty"),
56
+ )
57
+ stop_sequence: Optional[List[str]] = Field(
58
+ default=None,
59
+ serialization_alias="stopSequence",
60
+ validation_alias=AliasChoices("stop_sequence", "stopSequence"),
61
+ )
62
+ reasoning_effort: Optional[ReasoningEffort] = Field(
63
+ default=None,
64
+ serialization_alias="reasoningEffort",
65
+ validation_alias=AliasChoices("reasoning_effort", "reasoningEffort"),
66
+ )
67
+ verbosity: Optional[Verbosity] = Field(
68
+ default=None,
69
+ serialization_alias="verbosity",
70
+ validation_alias=AliasChoices("verbosity", "verbosity"),
71
+ )
72
+
73
+
74
+ ###################################
75
+ # Output Settings
76
+ ###################################
77
+
78
+
79
+ class OutputType(Enum):
80
+ TEXT = "TEXT"
81
+ JSON = "JSON"
82
+ SCHEMA = "SCHEMA"
83
+
84
+
85
+ class SchemaDataType(Enum):
86
+ OBJECT = "OBJECT"
87
+ STRING = "STRING"
88
+ FLOAT = "FLOAT"
89
+ INTEGER = "INTEGER"
90
+ BOOLEAN = "BOOLEAN"
91
+ NULL = "NULL"
92
+
93
+
94
+ class OutputSchemaField(BaseModel):
95
+ id: str
96
+ type: SchemaDataType
97
+ name: str
98
+ required: Optional[bool] = False
99
+ parent_id: Optional[str] = Field(
100
+ default=None,
101
+ serialization_alias="parentId",
102
+ validation_alias=AliasChoices("parent_id", "parentId"),
103
+ )
104
+
105
+ class Config:
106
+ use_enum_values = True
107
+
108
+
109
+ class OutputSchema(BaseModel):
110
+ fields: Optional[List[OutputSchemaField]] = None
111
+ name: str
112
+
113
+
114
+ ###################################
115
+ # Prompt
116
+ ###################################
4
117
 
5
118
 
6
119
  class PromptInterpolationType(Enum):
@@ -16,6 +129,9 @@ class PromptMessage(BaseModel):
16
129
  content: str
17
130
 
18
131
 
132
+ PromptMessageList = TypeAdapter(List[PromptMessage])
133
+
134
+
19
135
  class PromptType(Enum):
20
136
  TEXT = "TEXT"
21
137
  LIST = "LIST"
@@ -53,6 +169,21 @@ class PromptHttpResponse(BaseModel):
53
169
  serialization_alias="interpolationType"
54
170
  )
55
171
  type: PromptType
172
+ model_settings: Optional[ModelSettings] = Field(
173
+ default=None,
174
+ serialization_alias="modelSettings",
175
+ validation_alias=AliasChoices("model_settings", "modelSettings"),
176
+ )
177
+ output_type: Optional[OutputType] = Field(
178
+ default=None,
179
+ serialization_alias="outputType",
180
+ validation_alias=AliasChoices("output_type", "outputType"),
181
+ )
182
+ output_schema: Optional[OutputSchema] = Field(
183
+ default=None,
184
+ serialization_alias="outputSchema",
185
+ validation_alias=AliasChoices("output_schema", "outputSchema"),
186
+ )
56
187
 
57
188
 
58
189
  class PromptPushRequest(BaseModel):
@@ -62,6 +193,35 @@ class PromptPushRequest(BaseModel):
62
193
  interpolation_type: PromptInterpolationType = Field(
63
194
  serialization_alias="interpolationType"
64
195
  )
196
+ model_settings: Optional[ModelSettings] = Field(
197
+ default=None, serialization_alias="modelSettings"
198
+ )
199
+ output_schema: Optional[OutputSchema] = Field(
200
+ default=None, serialization_alias="outputSchema"
201
+ )
202
+ output_type: Optional[OutputType] = Field(
203
+ default=None, serialization_alias="outputType"
204
+ )
205
+
206
+ class Config:
207
+ use_enum_values = True
208
+
209
+
210
+ class PromptUpdateRequest(BaseModel):
211
+ text: Optional[str] = None
212
+ messages: Optional[List[PromptMessage]] = None
213
+ interpolation_type: PromptInterpolationType = Field(
214
+ serialization_alias="interpolationType"
215
+ )
216
+ model_settings: Optional[ModelSettings] = Field(
217
+ default=None, serialization_alias="modelSettings"
218
+ )
219
+ output_schema: Optional[OutputSchema] = Field(
220
+ default=None, serialization_alias="outputSchema"
221
+ )
222
+ output_type: Optional[OutputType] = Field(
223
+ default=None, serialization_alias="outputType"
224
+ )
65
225
 
66
226
  class Config:
67
227
  use_enum_values = True