arize-phoenix 7.12.3__py3-none-any.whl → 8.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of arize-phoenix might be problematic. Click here for more details.
- {arize_phoenix-7.12.3.dist-info → arize_phoenix-8.0.1.dist-info}/METADATA +31 -28
- {arize_phoenix-7.12.3.dist-info → arize_phoenix-8.0.1.dist-info}/RECORD +70 -47
- phoenix/db/migrations/versions/bc8fea3c2bc8_add_prompt_tables.py +197 -0
- phoenix/db/models.py +307 -0
- phoenix/db/types/__init__.py +0 -0
- phoenix/db/types/identifier.py +7 -0
- phoenix/db/types/model_provider.py +8 -0
- phoenix/server/api/context.py +2 -0
- phoenix/server/api/dataloaders/__init__.py +2 -0
- phoenix/server/api/dataloaders/prompt_version_sequence_number.py +35 -0
- phoenix/server/api/helpers/jsonschema.py +135 -0
- phoenix/server/api/helpers/playground_clients.py +15 -15
- phoenix/server/api/helpers/playground_spans.py +9 -0
- phoenix/server/api/helpers/prompts/__init__.py +0 -0
- phoenix/server/api/helpers/prompts/conversions/__init__.py +0 -0
- phoenix/server/api/helpers/prompts/conversions/anthropic.py +87 -0
- phoenix/server/api/helpers/prompts/conversions/openai.py +78 -0
- phoenix/server/api/helpers/prompts/models.py +575 -0
- phoenix/server/api/input_types/ChatCompletionInput.py +9 -4
- phoenix/server/api/input_types/PromptTemplateOptions.py +10 -0
- phoenix/server/api/input_types/PromptVersionInput.py +133 -0
- phoenix/server/api/mutations/__init__.py +6 -0
- phoenix/server/api/mutations/chat_mutations.py +18 -16
- phoenix/server/api/mutations/prompt_label_mutations.py +191 -0
- phoenix/server/api/mutations/prompt_mutations.py +312 -0
- phoenix/server/api/mutations/prompt_version_tag_mutations.py +148 -0
- phoenix/server/api/mutations/user_mutations.py +7 -6
- phoenix/server/api/openapi/schema.py +1 -0
- phoenix/server/api/queries.py +84 -31
- phoenix/server/api/routers/oauth2.py +3 -2
- phoenix/server/api/routers/v1/__init__.py +2 -0
- phoenix/server/api/routers/v1/datasets.py +1 -1
- phoenix/server/api/routers/v1/experiment_evaluations.py +1 -1
- phoenix/server/api/routers/v1/experiment_runs.py +1 -1
- phoenix/server/api/routers/v1/experiments.py +1 -1
- phoenix/server/api/routers/v1/models.py +45 -0
- phoenix/server/api/routers/v1/prompts.py +415 -0
- phoenix/server/api/routers/v1/spans.py +1 -1
- phoenix/server/api/routers/v1/traces.py +1 -1
- phoenix/server/api/routers/v1/utils.py +1 -1
- phoenix/server/api/subscriptions.py +21 -24
- phoenix/server/api/types/GenerativeProvider.py +4 -4
- phoenix/server/api/types/Identifier.py +15 -0
- phoenix/server/api/types/Project.py +5 -7
- phoenix/server/api/types/Prompt.py +134 -0
- phoenix/server/api/types/PromptLabel.py +41 -0
- phoenix/server/api/types/PromptVersion.py +148 -0
- phoenix/server/api/types/PromptVersionTag.py +27 -0
- phoenix/server/api/types/PromptVersionTemplate.py +148 -0
- phoenix/server/api/types/ResponseFormat.py +9 -0
- phoenix/server/api/types/ToolDefinition.py +9 -0
- phoenix/server/app.py +3 -0
- phoenix/server/static/.vite/manifest.json +45 -45
- phoenix/server/static/assets/components-B-qgPyHv.js +2699 -0
- phoenix/server/static/assets/index-D4KO1IcF.js +1125 -0
- phoenix/server/static/assets/pages-DdcuL3Rh.js +5634 -0
- phoenix/server/static/assets/vendor-DQp7CrDA.js +894 -0
- phoenix/server/static/assets/vendor-arizeai-C1nEIEQq.js +657 -0
- phoenix/server/static/assets/vendor-codemirror-BZXYUIkP.js +24 -0
- phoenix/server/static/assets/vendor-recharts-BUFpwCVD.js +59 -0
- phoenix/server/static/assets/{vendor-shiki-Cl9QBraO.js → vendor-shiki-C8L-c9jT.js} +2 -2
- phoenix/server/static/assets/{vendor-three-DwGkEfCM.js → vendor-three-C-AGeJYv.js} +1 -1
- phoenix/session/client.py +25 -21
- phoenix/utilities/client.py +6 -0
- phoenix/version.py +1 -1
- phoenix/server/api/input_types/TemplateOptions.py +0 -10
- phoenix/server/api/routers/v1/pydantic_compat.py +0 -78
- phoenix/server/api/types/TemplateLanguage.py +0 -10
- phoenix/server/static/assets/components-DckIzNmE.js +0 -2125
- phoenix/server/static/assets/index-Bf25Ogon.js +0 -113
- phoenix/server/static/assets/pages-DL7J9q9w.js +0 -4463
- phoenix/server/static/assets/vendor-DvC8cT4X.js +0 -894
- phoenix/server/static/assets/vendor-arizeai-Do1793cv.js +0 -662
- phoenix/server/static/assets/vendor-codemirror-BzwZPyJM.js +0 -24
- phoenix/server/static/assets/vendor-recharts-_Jb7JjhG.js +0 -59
- {arize_phoenix-7.12.3.dist-info → arize_phoenix-8.0.1.dist-info}/WHEEL +0 -0
- {arize_phoenix-7.12.3.dist-info → arize_phoenix-8.0.1.dist-info}/entry_points.txt +0 -0
- {arize_phoenix-7.12.3.dist-info → arize_phoenix-8.0.1.dist-info}/licenses/IP_NOTICE +0 -0
- {arize_phoenix-7.12.3.dist-info → arize_phoenix-8.0.1.dist-info}/licenses/LICENSE +0 -0
- /phoenix/server/static/assets/{vendor-DxkFTwjz.css → vendor-Cg6lcjUC.css} +0 -0
|
@@ -0,0 +1,575 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any, Literal, Mapping, Optional, Union
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field, RootModel
|
|
7
|
+
from typing_extensions import Annotated, TypeAlias, TypeGuard, assert_never
|
|
8
|
+
|
|
9
|
+
from phoenix.db.types.model_provider import ModelProvider
|
|
10
|
+
from phoenix.server.api.helpers.prompts.conversions.anthropic import AnthropicToolChoiceConversion
|
|
11
|
+
from phoenix.server.api.helpers.prompts.conversions.openai import OpenAIToolChoiceConversion
|
|
12
|
+
|
|
13
|
+
JSONSerializable = Union[None, bool, int, float, str, dict[str, Any], list[Any]]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Undefined:
|
|
17
|
+
"""
|
|
18
|
+
A singleton class that represents an unset or undefined value. Needed since Pydantic
|
|
19
|
+
can't natively distinguish between an undefined value and a value that is set to
|
|
20
|
+
None.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __new__(cls) -> Any:
|
|
24
|
+
if not hasattr(cls, "_instance"):
|
|
25
|
+
cls._instance = super().__new__(cls)
|
|
26
|
+
return cls._instance
|
|
27
|
+
|
|
28
|
+
def __bool__(self) -> bool:
|
|
29
|
+
return False
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
UNDEFINED: Any = Undefined()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class PromptTemplateType(str, Enum):
|
|
36
|
+
STRING = "STR"
|
|
37
|
+
CHAT = "CHAT"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class PromptMessageRole(str, Enum):
|
|
41
|
+
USER = "USER"
|
|
42
|
+
SYSTEM = "SYSTEM" # e.g. the OpenAI developer role or an Anthropic system instruction
|
|
43
|
+
AI = "AI" # E.g. the assistant. Normalize to AI for consistency.
|
|
44
|
+
TOOL = "TOOL"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class PromptTemplateFormat(str, Enum):
|
|
48
|
+
MUSTACHE = "MUSTACHE"
|
|
49
|
+
F_STRING = "F_STRING"
|
|
50
|
+
NONE = "NONE"
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class PromptModel(BaseModel):
|
|
54
|
+
model_config = ConfigDict(
|
|
55
|
+
extra="forbid", # disallow extra attributes
|
|
56
|
+
use_enum_values=True,
|
|
57
|
+
validate_assignment=True,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
61
|
+
kwargs = {k: v for k, v in kwargs.items() if v is not UNDEFINED}
|
|
62
|
+
super().__init__(*args, **kwargs)
|
|
63
|
+
|
|
64
|
+
def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
|
|
65
|
+
return super().model_dump(*args, exclude_unset=True, by_alias=True, **kwargs)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class TextContentPart(PromptModel):
|
|
69
|
+
type: Literal["text"]
|
|
70
|
+
text: str
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ToolCallFunction(PromptModel):
|
|
74
|
+
type: Literal["function"]
|
|
75
|
+
name: str
|
|
76
|
+
arguments: str
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class ToolCallContentPart(PromptModel):
|
|
80
|
+
type: Literal["tool_call"]
|
|
81
|
+
tool_call_id: str
|
|
82
|
+
tool_call: Annotated[
|
|
83
|
+
ToolCallFunction,
|
|
84
|
+
Field(..., discriminator="type"),
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ToolResultContentPart(PromptModel):
|
|
89
|
+
type: Literal["tool_result"]
|
|
90
|
+
tool_call_id: str
|
|
91
|
+
tool_result: JSONSerializable
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
ContentPart: TypeAlias = Annotated[
|
|
95
|
+
Union[TextContentPart, ToolCallContentPart, ToolResultContentPart],
|
|
96
|
+
Field(..., discriminator="type"),
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
Role: TypeAlias = Literal["user", "assistant", "model", "ai", "tool", "system", "developer"]
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class RoleConversion:
|
|
103
|
+
@staticmethod
|
|
104
|
+
def from_gql(role: PromptMessageRole) -> Role:
|
|
105
|
+
if role is PromptMessageRole.USER:
|
|
106
|
+
return "user"
|
|
107
|
+
if role is PromptMessageRole.AI:
|
|
108
|
+
return "ai"
|
|
109
|
+
if role is PromptMessageRole.TOOL:
|
|
110
|
+
return "tool"
|
|
111
|
+
if role is PromptMessageRole.SYSTEM:
|
|
112
|
+
return "system"
|
|
113
|
+
assert_never(role)
|
|
114
|
+
|
|
115
|
+
@staticmethod
|
|
116
|
+
def to_gql(role: Role) -> PromptMessageRole:
|
|
117
|
+
if role == "user":
|
|
118
|
+
return PromptMessageRole.USER
|
|
119
|
+
if role == "assistant":
|
|
120
|
+
return PromptMessageRole.AI
|
|
121
|
+
if role == "model":
|
|
122
|
+
return PromptMessageRole.AI
|
|
123
|
+
if role == "ai":
|
|
124
|
+
return PromptMessageRole.AI
|
|
125
|
+
if role == "tool":
|
|
126
|
+
return PromptMessageRole.TOOL
|
|
127
|
+
if role == "system":
|
|
128
|
+
return PromptMessageRole.SYSTEM
|
|
129
|
+
if role == "developer":
|
|
130
|
+
return PromptMessageRole.SYSTEM
|
|
131
|
+
assert_never(role)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class PromptMessage(PromptModel):
|
|
135
|
+
role: Role
|
|
136
|
+
content: Union[str, Annotated[list[ContentPart], Field(..., min_length=1)]]
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class PromptChatTemplate(PromptModel):
|
|
140
|
+
type: Literal["chat"]
|
|
141
|
+
messages: list[PromptMessage]
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class PromptStringTemplate(PromptModel):
|
|
145
|
+
type: Literal["string"]
|
|
146
|
+
template: str
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
PromptTemplate: TypeAlias = Annotated[
|
|
150
|
+
Union[PromptChatTemplate, PromptStringTemplate], Field(..., discriminator="type")
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def is_prompt_template(value: Any) -> TypeGuard[PromptTemplate]:
|
|
155
|
+
return isinstance(value, (PromptChatTemplate, PromptStringTemplate))
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
class PromptTemplateRootModel(RootModel[PromptTemplate]):
|
|
159
|
+
root: PromptTemplate
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class PromptToolFunction(PromptModel):
|
|
163
|
+
type: Literal["function"]
|
|
164
|
+
function: PromptToolFunctionDefinition
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class PromptToolFunctionDefinition(PromptModel):
|
|
168
|
+
name: str
|
|
169
|
+
description: str = UNDEFINED
|
|
170
|
+
parameters: dict[str, Any] = UNDEFINED
|
|
171
|
+
strict: bool = UNDEFINED
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
PromptTool: TypeAlias = Annotated[Union[PromptToolFunction], Field(..., discriminator="type")]
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class PromptTools(PromptModel):
|
|
178
|
+
type: Literal["tools"]
|
|
179
|
+
tools: Annotated[list[PromptTool], Field(..., min_length=1)]
|
|
180
|
+
tool_choice: PromptToolChoice = UNDEFINED
|
|
181
|
+
disable_parallel_tool_calls: bool = UNDEFINED
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
class PromptToolChoiceNone(PromptModel):
|
|
185
|
+
type: Literal["none"]
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
class PromptToolChoiceZeroOrMore(PromptModel):
|
|
189
|
+
type: Literal["zero_or_more"]
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class PromptToolChoiceOneOrMore(PromptModel):
|
|
193
|
+
type: Literal["one_or_more"]
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
class PromptToolChoiceSpecificFunctionTool(PromptModel):
|
|
197
|
+
type: Literal["specific_function"]
|
|
198
|
+
function_name: str
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
PromptToolChoice: TypeAlias = Annotated[
|
|
202
|
+
Union[
|
|
203
|
+
PromptToolChoiceNone,
|
|
204
|
+
PromptToolChoiceZeroOrMore,
|
|
205
|
+
PromptToolChoiceOneOrMore,
|
|
206
|
+
PromptToolChoiceSpecificFunctionTool,
|
|
207
|
+
],
|
|
208
|
+
Field(..., discriminator="type"),
|
|
209
|
+
]
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class PromptOpenAIJSONSchema(PromptModel):
|
|
213
|
+
"""
|
|
214
|
+
Based on https://github.com/openai/openai-python/blob/d16e6edde5a155626910b5758a0b939bfedb9ced/src/openai/types/shared/response_format_json_schema.py#L13
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
name: str
|
|
218
|
+
description: str = UNDEFINED
|
|
219
|
+
schema_: dict[str, Any] = Field(
|
|
220
|
+
...,
|
|
221
|
+
alias="schema", # an alias is used to avoid conflict with the pydantic schema class method
|
|
222
|
+
)
|
|
223
|
+
strict: Optional[bool] = UNDEFINED
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class PromptOpenAIResponseFormatJSONSchema(PromptModel):
|
|
227
|
+
"""
|
|
228
|
+
Based on https://github.com/openai/openai-python/blob/d16e6edde5a155626910b5758a0b939bfedb9ced/src/openai/types/shared/response_format_json_schema.py#L40
|
|
229
|
+
"""
|
|
230
|
+
|
|
231
|
+
json_schema: PromptOpenAIJSONSchema
|
|
232
|
+
type: Literal["json_schema"]
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class PromptResponseFormatJSONSchema(PromptModel):
|
|
236
|
+
type: Literal["json_schema"]
|
|
237
|
+
json_schema: PromptResponseFormatJSONSchemaDefinition
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class PromptResponseFormatJSONSchemaDefinition(PromptModel):
|
|
241
|
+
name: str
|
|
242
|
+
description: str = UNDEFINED
|
|
243
|
+
schema_: dict[str, Any] = Field(UNDEFINED, alias="schema")
|
|
244
|
+
strict: bool = UNDEFINED
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
PromptResponseFormat: TypeAlias = Annotated[
|
|
248
|
+
Union[PromptResponseFormatJSONSchema], Field(..., discriminator="type")
|
|
249
|
+
]
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class PromptResponseFormatRootModel(RootModel[PromptResponseFormat]):
|
|
253
|
+
root: PromptResponseFormat
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def _openai_to_prompt_response_format(
|
|
257
|
+
schema: PromptOpenAIResponseFormatJSONSchema,
|
|
258
|
+
) -> PromptResponseFormat:
|
|
259
|
+
json_schema = schema.json_schema
|
|
260
|
+
return PromptResponseFormatJSONSchema(
|
|
261
|
+
type="json_schema",
|
|
262
|
+
json_schema=PromptResponseFormatJSONSchemaDefinition(
|
|
263
|
+
name=json_schema.name,
|
|
264
|
+
description=json_schema.description,
|
|
265
|
+
schema=json_schema.schema_,
|
|
266
|
+
strict=json_schema.strict if isinstance(json_schema.strict, bool) else UNDEFINED,
|
|
267
|
+
),
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def _prompt_to_openai_response_format(
|
|
272
|
+
response_format: PromptResponseFormat,
|
|
273
|
+
) -> PromptOpenAIResponseFormatJSONSchema:
|
|
274
|
+
assert isinstance(response_format, PromptResponseFormatJSONSchema)
|
|
275
|
+
json_schema = response_format.json_schema
|
|
276
|
+
return PromptOpenAIResponseFormatJSONSchema(
|
|
277
|
+
type="json_schema",
|
|
278
|
+
json_schema=PromptOpenAIJSONSchema(
|
|
279
|
+
name=json_schema.name,
|
|
280
|
+
description=json_schema.description,
|
|
281
|
+
schema=json_schema.schema_,
|
|
282
|
+
strict=json_schema.strict if isinstance(json_schema.strict, bool) else UNDEFINED,
|
|
283
|
+
),
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def normalize_response_format(
|
|
288
|
+
response_format: dict[str, Any], model_provider: ModelProvider
|
|
289
|
+
) -> PromptResponseFormat:
|
|
290
|
+
if model_provider is ModelProvider.OPENAI or model_provider is ModelProvider.AZURE_OPENAI:
|
|
291
|
+
openai_response_format = PromptOpenAIResponseFormatJSONSchema.model_validate(
|
|
292
|
+
response_format
|
|
293
|
+
)
|
|
294
|
+
return _openai_to_prompt_response_format(openai_response_format)
|
|
295
|
+
raise ValueError(f"Unsupported model provider: {model_provider}")
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def denormalize_response_format(
|
|
299
|
+
response_format: PromptResponseFormat, model_provider: ModelProvider
|
|
300
|
+
) -> dict[str, Any]:
|
|
301
|
+
if model_provider is ModelProvider.OPENAI or model_provider is ModelProvider.AZURE_OPENAI:
|
|
302
|
+
openai_response_format = _prompt_to_openai_response_format(response_format)
|
|
303
|
+
return openai_response_format.model_dump()
|
|
304
|
+
raise ValueError(f"Unsupported model provider: {model_provider}")
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
# OpenAI tool definitions
|
|
308
|
+
class OpenAIFunctionDefinition(PromptModel):
|
|
309
|
+
"""
|
|
310
|
+
Based on https://github.com/openai/openai-python/blob/1e07c9d839e7e96f02d0a4b745f379a43086334c/src/openai/types/shared_params/function_definition.py#L13
|
|
311
|
+
"""
|
|
312
|
+
|
|
313
|
+
name: str
|
|
314
|
+
description: str = UNDEFINED
|
|
315
|
+
parameters: dict[str, Any] = UNDEFINED
|
|
316
|
+
strict: Optional[bool] = UNDEFINED
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
class OpenAIToolDefinition(PromptModel):
|
|
320
|
+
"""
|
|
321
|
+
Based on https://github.com/openai/openai-python/blob/1e07c9d839e7e96f02d0a4b745f379a43086334c/src/openai/types/chat/chat_completion_tool_param.py#L12
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
function: OpenAIFunctionDefinition
|
|
325
|
+
type: Literal["function"]
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
# Anthropic tool definitions
|
|
329
|
+
class AnthropicCacheControlParam(PromptModel):
|
|
330
|
+
"""
|
|
331
|
+
Based on https://github.com/anthropics/anthropic-sdk-python/blob/93cbbbde964e244f02bf1bd2b579c5fabce4e267/src/anthropic/types/cache_control_ephemeral_param.py#L10
|
|
332
|
+
"""
|
|
333
|
+
|
|
334
|
+
type: Literal["ephemeral"]
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
class AnthropicToolDefinition(PromptModel):
|
|
338
|
+
"""
|
|
339
|
+
Based on https://github.com/anthropics/anthropic-sdk-python/blob/93cbbbde964e244f02bf1bd2b579c5fabce4e267/src/anthropic/types/tool_param.py#L22
|
|
340
|
+
"""
|
|
341
|
+
|
|
342
|
+
input_schema: dict[str, Any]
|
|
343
|
+
name: str
|
|
344
|
+
cache_control: Optional[AnthropicCacheControlParam] = UNDEFINED
|
|
345
|
+
description: str = UNDEFINED
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
class PromptOpenAIInvocationParametersContent(PromptModel):
|
|
349
|
+
temperature: float = UNDEFINED
|
|
350
|
+
max_tokens: int = UNDEFINED
|
|
351
|
+
frequency_penalty: float = UNDEFINED
|
|
352
|
+
presence_penalty: float = UNDEFINED
|
|
353
|
+
top_p: float = UNDEFINED
|
|
354
|
+
seed: int = UNDEFINED
|
|
355
|
+
reasoning_effort: Literal["low", "medium", "high"] = UNDEFINED
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
class PromptOpenAIInvocationParameters(PromptModel):
|
|
359
|
+
type: Literal["openai"]
|
|
360
|
+
openai: PromptOpenAIInvocationParametersContent
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
class PromptAzureOpenAIInvocationParametersContent(PromptOpenAIInvocationParametersContent):
|
|
364
|
+
pass
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
class PromptAzureOpenAIInvocationParameters(PromptModel):
|
|
368
|
+
type: Literal["azure_openai"]
|
|
369
|
+
azure_openai: PromptAzureOpenAIInvocationParametersContent
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
class PromptAnthropicInvocationParametersContent(PromptModel):
|
|
373
|
+
max_tokens: int
|
|
374
|
+
temperature: float = UNDEFINED
|
|
375
|
+
top_p: float = UNDEFINED
|
|
376
|
+
stop_sequences: list[str] = UNDEFINED
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class PromptAnthropicInvocationParameters(PromptModel):
|
|
380
|
+
type: Literal["anthropic"]
|
|
381
|
+
anthropic: PromptAnthropicInvocationParametersContent
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
class PromptGoogleInvocationParametersContent(PromptModel):
|
|
385
|
+
temperature: float = UNDEFINED
|
|
386
|
+
max_output_tokens: int = UNDEFINED
|
|
387
|
+
stop_sequences: list[str] = UNDEFINED
|
|
388
|
+
presence_penalty: float = UNDEFINED
|
|
389
|
+
frequency_penalty: float = UNDEFINED
|
|
390
|
+
top_p: float = UNDEFINED
|
|
391
|
+
top_k: int = UNDEFINED
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
class PromptGoogleInvocationParameters(PromptModel):
|
|
395
|
+
type: Literal["google"]
|
|
396
|
+
google: PromptGoogleInvocationParametersContent
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
PromptInvocationParameters: TypeAlias = Annotated[
|
|
400
|
+
Union[
|
|
401
|
+
PromptOpenAIInvocationParameters,
|
|
402
|
+
PromptAzureOpenAIInvocationParameters,
|
|
403
|
+
PromptAnthropicInvocationParameters,
|
|
404
|
+
PromptGoogleInvocationParameters,
|
|
405
|
+
],
|
|
406
|
+
Field(..., discriminator="type"),
|
|
407
|
+
]
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def get_raw_invocation_parameters(
|
|
411
|
+
invocation_parameters: PromptInvocationParameters,
|
|
412
|
+
) -> dict[str, Any]:
|
|
413
|
+
if isinstance(invocation_parameters, PromptOpenAIInvocationParameters):
|
|
414
|
+
return invocation_parameters.openai.model_dump()
|
|
415
|
+
if isinstance(invocation_parameters, PromptAzureOpenAIInvocationParameters):
|
|
416
|
+
return invocation_parameters.azure_openai.model_dump()
|
|
417
|
+
if isinstance(invocation_parameters, PromptAnthropicInvocationParameters):
|
|
418
|
+
return invocation_parameters.anthropic.model_dump()
|
|
419
|
+
if isinstance(invocation_parameters, PromptGoogleInvocationParameters):
|
|
420
|
+
return invocation_parameters.google.model_dump()
|
|
421
|
+
assert_never(invocation_parameters)
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def is_prompt_invocation_parameters(
|
|
425
|
+
invocation_parameters: Any,
|
|
426
|
+
) -> TypeGuard[PromptInvocationParameters]:
|
|
427
|
+
return isinstance(
|
|
428
|
+
invocation_parameters,
|
|
429
|
+
(
|
|
430
|
+
PromptOpenAIInvocationParameters,
|
|
431
|
+
PromptAzureOpenAIInvocationParameters,
|
|
432
|
+
PromptAnthropicInvocationParameters,
|
|
433
|
+
PromptGoogleInvocationParameters,
|
|
434
|
+
),
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
class PromptInvocationParametersRootModel(RootModel[PromptInvocationParameters]):
|
|
439
|
+
root: PromptInvocationParameters
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def validate_invocation_parameters(
|
|
443
|
+
invocation_parameters: dict[str, Any],
|
|
444
|
+
model_provider: ModelProvider,
|
|
445
|
+
) -> PromptInvocationParameters:
|
|
446
|
+
if model_provider is ModelProvider.OPENAI:
|
|
447
|
+
return PromptOpenAIInvocationParameters(
|
|
448
|
+
type="openai",
|
|
449
|
+
openai=PromptOpenAIInvocationParametersContent.model_validate(invocation_parameters),
|
|
450
|
+
)
|
|
451
|
+
elif model_provider is ModelProvider.AZURE_OPENAI:
|
|
452
|
+
return PromptAzureOpenAIInvocationParameters(
|
|
453
|
+
type="azure_openai",
|
|
454
|
+
azure_openai=PromptAzureOpenAIInvocationParametersContent.model_validate(
|
|
455
|
+
invocation_parameters
|
|
456
|
+
),
|
|
457
|
+
)
|
|
458
|
+
elif model_provider is ModelProvider.ANTHROPIC:
|
|
459
|
+
return PromptAnthropicInvocationParameters(
|
|
460
|
+
type="anthropic",
|
|
461
|
+
anthropic=PromptAnthropicInvocationParametersContent.model_validate(
|
|
462
|
+
invocation_parameters
|
|
463
|
+
),
|
|
464
|
+
)
|
|
465
|
+
elif model_provider is ModelProvider.GOOGLE:
|
|
466
|
+
return PromptGoogleInvocationParameters(
|
|
467
|
+
type="google",
|
|
468
|
+
google=PromptGoogleInvocationParametersContent.model_validate(invocation_parameters),
|
|
469
|
+
)
|
|
470
|
+
assert_never(model_provider)
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def normalize_tools(
|
|
474
|
+
schemas: list[dict[str, Any]],
|
|
475
|
+
model_provider: ModelProvider,
|
|
476
|
+
tool_choice: Optional[Union[str, Mapping[str, Any]]] = None,
|
|
477
|
+
) -> PromptTools:
|
|
478
|
+
tools: list[PromptToolFunction]
|
|
479
|
+
if model_provider is ModelProvider.OPENAI or model_provider is ModelProvider.AZURE_OPENAI:
|
|
480
|
+
openai_tools = [OpenAIToolDefinition.model_validate(schema) for schema in schemas]
|
|
481
|
+
tools = [_openai_to_prompt_tool(openai_tool) for openai_tool in openai_tools]
|
|
482
|
+
elif model_provider is ModelProvider.ANTHROPIC:
|
|
483
|
+
anthropic_tools = [AnthropicToolDefinition.model_validate(schema) for schema in schemas]
|
|
484
|
+
tools = [_anthropic_to_prompt_tool(anthropic_tool) for anthropic_tool in anthropic_tools]
|
|
485
|
+
else:
|
|
486
|
+
raise ValueError(f"Unsupported model provider: {model_provider}")
|
|
487
|
+
ans = PromptTools(type="tools", tools=tools)
|
|
488
|
+
if tool_choice is not None:
|
|
489
|
+
if model_provider is ModelProvider.OPENAI or model_provider is ModelProvider.AZURE_OPENAI:
|
|
490
|
+
ans.tool_choice = OpenAIToolChoiceConversion.from_openai(tool_choice) # type: ignore[arg-type]
|
|
491
|
+
elif model_provider is ModelProvider.ANTHROPIC:
|
|
492
|
+
choice, disable_parallel_tool_calls = AnthropicToolChoiceConversion.from_anthropic(
|
|
493
|
+
tool_choice # type: ignore[arg-type]
|
|
494
|
+
)
|
|
495
|
+
ans.tool_choice = choice
|
|
496
|
+
if disable_parallel_tool_calls is not None:
|
|
497
|
+
ans.disable_parallel_tool_calls = disable_parallel_tool_calls
|
|
498
|
+
return ans
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def denormalize_tools(
|
|
502
|
+
tools: PromptTools, model_provider: ModelProvider
|
|
503
|
+
) -> tuple[list[dict[str, Any]], Optional[Any]]:
|
|
504
|
+
assert tools.type == "tools"
|
|
505
|
+
denormalized_tools: list[PromptModel]
|
|
506
|
+
tool_choice: Optional[Any] = None
|
|
507
|
+
if model_provider is ModelProvider.OPENAI or model_provider is ModelProvider.AZURE_OPENAI:
|
|
508
|
+
denormalized_tools = [_prompt_to_openai_tool(tool) for tool in tools.tools]
|
|
509
|
+
if tools.tool_choice:
|
|
510
|
+
tool_choice = OpenAIToolChoiceConversion.to_openai(tools.tool_choice)
|
|
511
|
+
elif model_provider is ModelProvider.ANTHROPIC:
|
|
512
|
+
denormalized_tools = [_prompt_to_anthropic_tool(tool) for tool in tools.tools]
|
|
513
|
+
if tools.tool_choice and tools.tool_choice.type != "none":
|
|
514
|
+
tool_choice = AnthropicToolChoiceConversion.to_anthropic(tools.tool_choice)
|
|
515
|
+
else:
|
|
516
|
+
raise ValueError(f"Unsupported model provider: {model_provider}")
|
|
517
|
+
return [tool.model_dump() for tool in denormalized_tools], tool_choice
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
def _openai_to_prompt_tool(
|
|
521
|
+
tool: OpenAIToolDefinition,
|
|
522
|
+
) -> PromptToolFunction:
|
|
523
|
+
function_definition = tool.function
|
|
524
|
+
name = function_definition.name
|
|
525
|
+
description = function_definition.description
|
|
526
|
+
return PromptToolFunction(
|
|
527
|
+
type="function",
|
|
528
|
+
function=PromptToolFunctionDefinition(
|
|
529
|
+
name=name,
|
|
530
|
+
description=description,
|
|
531
|
+
parameters=function_definition.parameters,
|
|
532
|
+
strict=function_definition.strict
|
|
533
|
+
if isinstance(function_definition.strict, bool)
|
|
534
|
+
else UNDEFINED,
|
|
535
|
+
),
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
def _prompt_to_openai_tool(
|
|
540
|
+
tool: PromptToolFunction,
|
|
541
|
+
) -> OpenAIToolDefinition:
|
|
542
|
+
function = tool.function
|
|
543
|
+
return OpenAIToolDefinition(
|
|
544
|
+
type="function",
|
|
545
|
+
function=OpenAIFunctionDefinition(
|
|
546
|
+
name=function.name,
|
|
547
|
+
description=function.description,
|
|
548
|
+
parameters=function.parameters,
|
|
549
|
+
strict=function.strict if isinstance(function.strict, bool) else UNDEFINED,
|
|
550
|
+
),
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
def _anthropic_to_prompt_tool(
|
|
555
|
+
tool: AnthropicToolDefinition,
|
|
556
|
+
) -> PromptToolFunction:
|
|
557
|
+
return PromptToolFunction(
|
|
558
|
+
type="function",
|
|
559
|
+
function=PromptToolFunctionDefinition(
|
|
560
|
+
name=tool.name,
|
|
561
|
+
description=tool.description,
|
|
562
|
+
parameters=tool.input_schema,
|
|
563
|
+
),
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
def _prompt_to_anthropic_tool(
|
|
568
|
+
tool: PromptToolFunction,
|
|
569
|
+
) -> AnthropicToolDefinition:
|
|
570
|
+
function = tool.function
|
|
571
|
+
return AnthropicToolDefinition(
|
|
572
|
+
input_schema=function.parameters if function.parameters is not UNDEFINED else {},
|
|
573
|
+
name=function.name,
|
|
574
|
+
description=function.description,
|
|
575
|
+
)
|
|
@@ -5,12 +5,15 @@ from strawberry import UNSET
|
|
|
5
5
|
from strawberry.relay.types import GlobalID
|
|
6
6
|
from strawberry.scalars import JSON
|
|
7
7
|
|
|
8
|
-
from phoenix.server.api.
|
|
8
|
+
from phoenix.server.api.helpers.prompts.models import (
|
|
9
|
+
PromptTemplateFormat,
|
|
10
|
+
)
|
|
11
|
+
from phoenix.server.api.types.Identifier import Identifier
|
|
9
12
|
|
|
10
13
|
from .ChatCompletionMessageInput import ChatCompletionMessageInput
|
|
11
14
|
from .GenerativeModelInput import GenerativeModelInput
|
|
12
15
|
from .InvocationParameters import InvocationParameterInput
|
|
13
|
-
from .
|
|
16
|
+
from .PromptTemplateOptions import PromptTemplateOptions
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
@strawberry.input
|
|
@@ -20,7 +23,8 @@ class ChatCompletionInput:
|
|
|
20
23
|
invocation_parameters: list[InvocationParameterInput] = strawberry.field(default_factory=list)
|
|
21
24
|
tools: Optional[list[JSON]] = UNSET
|
|
22
25
|
api_key: Optional[str] = strawberry.field(default=None)
|
|
23
|
-
template: Optional[
|
|
26
|
+
template: Optional[PromptTemplateOptions] = UNSET
|
|
27
|
+
prompt_name: Optional[Identifier] = None
|
|
24
28
|
|
|
25
29
|
|
|
26
30
|
@strawberry.input
|
|
@@ -30,9 +34,10 @@ class ChatCompletionOverDatasetInput:
|
|
|
30
34
|
invocation_parameters: list[InvocationParameterInput] = strawberry.field(default_factory=list)
|
|
31
35
|
tools: Optional[list[JSON]] = UNSET
|
|
32
36
|
api_key: Optional[str] = strawberry.field(default=None)
|
|
33
|
-
|
|
37
|
+
template_format: PromptTemplateFormat = PromptTemplateFormat.MUSTACHE
|
|
34
38
|
dataset_id: GlobalID
|
|
35
39
|
dataset_version_id: Optional[GlobalID] = None
|
|
36
40
|
experiment_name: Optional[str] = None
|
|
37
41
|
experiment_description: Optional[str] = None
|
|
38
42
|
experiment_metadata: Optional[JSON] = strawberry.field(default_factory=dict)
|
|
43
|
+
prompt_name: Optional[Identifier] = None
|