arize-phoenix 4.5.0__py3-none-any.whl → 4.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (123) hide show
  1. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/METADATA +16 -8
  2. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/RECORD +122 -58
  3. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/WHEEL +1 -1
  4. phoenix/__init__.py +0 -27
  5. phoenix/config.py +42 -7
  6. phoenix/core/model.py +25 -25
  7. phoenix/core/model_schema.py +64 -62
  8. phoenix/core/model_schema_adapter.py +27 -25
  9. phoenix/datetime_utils.py +4 -0
  10. phoenix/db/bulk_inserter.py +54 -14
  11. phoenix/db/insertion/dataset.py +237 -0
  12. phoenix/db/insertion/evaluation.py +10 -10
  13. phoenix/db/insertion/helpers.py +17 -14
  14. phoenix/db/insertion/span.py +3 -3
  15. phoenix/db/migrations/types.py +29 -0
  16. phoenix/db/migrations/versions/10460e46d750_datasets.py +291 -0
  17. phoenix/db/migrations/versions/cf03bd6bae1d_init.py +2 -28
  18. phoenix/db/models.py +236 -4
  19. phoenix/experiments/__init__.py +6 -0
  20. phoenix/experiments/evaluators/__init__.py +29 -0
  21. phoenix/experiments/evaluators/base.py +153 -0
  22. phoenix/experiments/evaluators/code_evaluators.py +99 -0
  23. phoenix/experiments/evaluators/llm_evaluators.py +244 -0
  24. phoenix/experiments/evaluators/utils.py +186 -0
  25. phoenix/experiments/functions.py +757 -0
  26. phoenix/experiments/tracing.py +85 -0
  27. phoenix/experiments/types.py +753 -0
  28. phoenix/experiments/utils.py +24 -0
  29. phoenix/inferences/fixtures.py +23 -23
  30. phoenix/inferences/inferences.py +7 -7
  31. phoenix/inferences/validation.py +1 -1
  32. phoenix/server/api/context.py +20 -0
  33. phoenix/server/api/dataloaders/__init__.py +20 -0
  34. phoenix/server/api/dataloaders/average_experiment_run_latency.py +54 -0
  35. phoenix/server/api/dataloaders/dataset_example_revisions.py +100 -0
  36. phoenix/server/api/dataloaders/dataset_example_spans.py +43 -0
  37. phoenix/server/api/dataloaders/experiment_annotation_summaries.py +85 -0
  38. phoenix/server/api/dataloaders/experiment_error_rates.py +43 -0
  39. phoenix/server/api/dataloaders/experiment_run_counts.py +42 -0
  40. phoenix/server/api/dataloaders/experiment_sequence_number.py +49 -0
  41. phoenix/server/api/dataloaders/project_by_name.py +31 -0
  42. phoenix/server/api/dataloaders/span_descendants.py +2 -3
  43. phoenix/server/api/dataloaders/span_projects.py +33 -0
  44. phoenix/server/api/dataloaders/trace_row_ids.py +39 -0
  45. phoenix/server/api/helpers/dataset_helpers.py +179 -0
  46. phoenix/server/api/input_types/AddExamplesToDatasetInput.py +16 -0
  47. phoenix/server/api/input_types/AddSpansToDatasetInput.py +14 -0
  48. phoenix/server/api/input_types/ClearProjectInput.py +15 -0
  49. phoenix/server/api/input_types/CreateDatasetInput.py +12 -0
  50. phoenix/server/api/input_types/DatasetExampleInput.py +14 -0
  51. phoenix/server/api/input_types/DatasetSort.py +17 -0
  52. phoenix/server/api/input_types/DatasetVersionSort.py +16 -0
  53. phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +13 -0
  54. phoenix/server/api/input_types/DeleteDatasetInput.py +7 -0
  55. phoenix/server/api/input_types/DeleteExperimentsInput.py +9 -0
  56. phoenix/server/api/input_types/PatchDatasetExamplesInput.py +35 -0
  57. phoenix/server/api/input_types/PatchDatasetInput.py +14 -0
  58. phoenix/server/api/mutations/__init__.py +13 -0
  59. phoenix/server/api/mutations/auth.py +11 -0
  60. phoenix/server/api/mutations/dataset_mutations.py +520 -0
  61. phoenix/server/api/mutations/experiment_mutations.py +65 -0
  62. phoenix/server/api/{types/ExportEventsMutation.py → mutations/export_events_mutations.py} +17 -14
  63. phoenix/server/api/mutations/project_mutations.py +47 -0
  64. phoenix/server/api/openapi/__init__.py +0 -0
  65. phoenix/server/api/openapi/main.py +6 -0
  66. phoenix/server/api/openapi/schema.py +16 -0
  67. phoenix/server/api/queries.py +503 -0
  68. phoenix/server/api/routers/v1/__init__.py +77 -2
  69. phoenix/server/api/routers/v1/dataset_examples.py +178 -0
  70. phoenix/server/api/routers/v1/datasets.py +965 -0
  71. phoenix/server/api/routers/v1/evaluations.py +8 -13
  72. phoenix/server/api/routers/v1/experiment_evaluations.py +143 -0
  73. phoenix/server/api/routers/v1/experiment_runs.py +220 -0
  74. phoenix/server/api/routers/v1/experiments.py +302 -0
  75. phoenix/server/api/routers/v1/spans.py +9 -5
  76. phoenix/server/api/routers/v1/traces.py +1 -4
  77. phoenix/server/api/schema.py +2 -303
  78. phoenix/server/api/types/AnnotatorKind.py +10 -0
  79. phoenix/server/api/types/Cluster.py +19 -19
  80. phoenix/server/api/types/CreateDatasetPayload.py +8 -0
  81. phoenix/server/api/types/Dataset.py +282 -63
  82. phoenix/server/api/types/DatasetExample.py +85 -0
  83. phoenix/server/api/types/DatasetExampleRevision.py +34 -0
  84. phoenix/server/api/types/DatasetVersion.py +14 -0
  85. phoenix/server/api/types/Dimension.py +30 -29
  86. phoenix/server/api/types/EmbeddingDimension.py +40 -34
  87. phoenix/server/api/types/Event.py +16 -16
  88. phoenix/server/api/types/ExampleRevisionInterface.py +14 -0
  89. phoenix/server/api/types/Experiment.py +147 -0
  90. phoenix/server/api/types/ExperimentAnnotationSummary.py +13 -0
  91. phoenix/server/api/types/ExperimentComparison.py +19 -0
  92. phoenix/server/api/types/ExperimentRun.py +91 -0
  93. phoenix/server/api/types/ExperimentRunAnnotation.py +57 -0
  94. phoenix/server/api/types/Inferences.py +80 -0
  95. phoenix/server/api/types/InferencesRole.py +23 -0
  96. phoenix/server/api/types/Model.py +43 -42
  97. phoenix/server/api/types/Project.py +26 -12
  98. phoenix/server/api/types/Span.py +79 -2
  99. phoenix/server/api/types/TimeSeries.py +6 -6
  100. phoenix/server/api/types/Trace.py +15 -4
  101. phoenix/server/api/types/UMAPPoints.py +1 -1
  102. phoenix/server/api/types/node.py +5 -111
  103. phoenix/server/api/types/pagination.py +10 -52
  104. phoenix/server/app.py +103 -49
  105. phoenix/server/main.py +49 -27
  106. phoenix/server/openapi/docs.py +3 -0
  107. phoenix/server/static/index.js +2300 -1294
  108. phoenix/server/templates/index.html +1 -0
  109. phoenix/services.py +15 -15
  110. phoenix/session/client.py +581 -22
  111. phoenix/session/session.py +47 -37
  112. phoenix/trace/exporter.py +14 -9
  113. phoenix/trace/fixtures.py +133 -7
  114. phoenix/trace/schemas.py +1 -2
  115. phoenix/trace/span_evaluations.py +3 -3
  116. phoenix/trace/trace_dataset.py +6 -6
  117. phoenix/utilities/json.py +61 -0
  118. phoenix/utilities/re.py +50 -0
  119. phoenix/version.py +1 -1
  120. phoenix/server/api/types/DatasetRole.py +0 -23
  121. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/licenses/IP_NOTICE +0 -0
  122. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/licenses/LICENSE +0 -0
  123. /phoenix/server/api/{helpers.py → helpers/__init__.py} +0 -0
@@ -0,0 +1,42 @@
1
+ from typing import (
2
+ AsyncContextManager,
3
+ Callable,
4
+ List,
5
+ )
6
+
7
+ from sqlalchemy import func, select
8
+ from sqlalchemy.ext.asyncio import AsyncSession
9
+ from strawberry.dataloader import DataLoader
10
+ from typing_extensions import TypeAlias
11
+
12
+ from phoenix.db import models
13
+
14
+ ExperimentID: TypeAlias = int
15
+ RunCount: TypeAlias = int
16
+ Key: TypeAlias = ExperimentID
17
+ Result: TypeAlias = RunCount
18
+
19
+
20
+ class ExperimentRunCountsDataLoader(DataLoader[Key, Result]):
21
+ def __init__(
22
+ self,
23
+ db: Callable[[], AsyncContextManager[AsyncSession]],
24
+ ) -> None:
25
+ super().__init__(load_fn=self._load_fn)
26
+ self._db = db
27
+
28
+ async def _load_fn(self, keys: List[Key]) -> List[Result]:
29
+ experiment_ids = keys
30
+ async with self._db() as session:
31
+ run_counts = {
32
+ experiment_id: run_count
33
+ async for experiment_id, run_count in await session.stream(
34
+ select(models.ExperimentRun.experiment_id, func.count())
35
+ .where(models.ExperimentRun.experiment_id.in_(set(experiment_ids)))
36
+ .group_by(models.ExperimentRun.experiment_id)
37
+ )
38
+ }
39
+ return [
40
+ run_counts.get(experiment_id, ValueError(f"Unknown experiment: {experiment_id}"))
41
+ for experiment_id in experiment_ids
42
+ ]
@@ -0,0 +1,49 @@
1
+ from typing import (
2
+ AsyncContextManager,
3
+ Callable,
4
+ List,
5
+ Optional,
6
+ )
7
+
8
+ from sqlalchemy import distinct, func, select
9
+ from sqlalchemy.ext.asyncio import AsyncSession
10
+ from strawberry.dataloader import DataLoader
11
+ from typing_extensions import TypeAlias
12
+
13
+ from phoenix.db import models
14
+
15
+ ExperimentId: TypeAlias = int
16
+ Key: TypeAlias = ExperimentId
17
+ Result: TypeAlias = Optional[int]
18
+
19
+
20
+ class ExperimentSequenceNumberDataLoader(DataLoader[Key, Result]):
21
+ def __init__(self, db: Callable[[], AsyncContextManager[AsyncSession]]) -> None:
22
+ super().__init__(load_fn=self._load_fn)
23
+ self._db = db
24
+
25
+ async def _load_fn(self, keys: List[Key]) -> List[Result]:
26
+ experiment_ids = keys
27
+ dataset_ids = (
28
+ select(distinct(models.Experiment.dataset_id))
29
+ .where(models.Experiment.id.in_(experiment_ids))
30
+ .scalar_subquery()
31
+ )
32
+ row_number = (
33
+ func.row_number().over(
34
+ partition_by=models.Experiment.dataset_id,
35
+ order_by=models.Experiment.id,
36
+ )
37
+ ).label("row_number")
38
+ subq = (
39
+ select(models.Experiment.id, row_number)
40
+ .where(models.Experiment.dataset_id.in_(dataset_ids))
41
+ .subquery()
42
+ )
43
+ stmt = select(subq).where(subq.c.id.in_(experiment_ids))
44
+ async with self._db() as session:
45
+ result = {
46
+ experiment_id: sequence_number
47
+ async for experiment_id, sequence_number in await session.stream(stmt)
48
+ }
49
+ return [result.get(experiment_id) for experiment_id in experiment_ids]
@@ -0,0 +1,31 @@
1
+ from collections import defaultdict
2
+ from typing import AsyncContextManager, Callable, DefaultDict, List, Optional
3
+
4
+ from sqlalchemy import select
5
+ from sqlalchemy.ext.asyncio import AsyncSession
6
+ from strawberry.dataloader import DataLoader
7
+ from typing_extensions import TypeAlias
8
+
9
+ from phoenix.db import models
10
+
11
+ ProjectName: TypeAlias = str
12
+ Key: TypeAlias = ProjectName
13
+ Result: TypeAlias = Optional[models.Project]
14
+
15
+
16
+ class ProjectByNameDataLoader(DataLoader[Key, Result]):
17
+ def __init__(self, db: Callable[[], AsyncContextManager[AsyncSession]]) -> None:
18
+ super().__init__(load_fn=self._load_fn)
19
+ self._db = db
20
+
21
+ async def _load_fn(self, keys: List[Key]) -> List[Result]:
22
+ project_names = list(set(keys))
23
+ projects_by_name: DefaultDict[Key, Result] = defaultdict(None)
24
+ async with self._db() as session:
25
+ data = await session.stream_scalars(
26
+ select(models.Project).where(models.Project.name.in_(project_names))
27
+ )
28
+ async for project in data:
29
+ projects_by_name[project.name] = project
30
+
31
+ return [projects_by_name[project_name] for project_name in project_names]
@@ -9,7 +9,7 @@ from typing import (
9
9
  from aioitertools.itertools import groupby
10
10
  from sqlalchemy import select
11
11
  from sqlalchemy.ext.asyncio import AsyncSession
12
- from sqlalchemy.orm import contains_eager
12
+ from sqlalchemy.orm import joinedload
13
13
  from strawberry.dataloader import DataLoader
14
14
  from typing_extensions import TypeAlias
15
15
 
@@ -52,8 +52,7 @@ class SpanDescendantsDataLoader(DataLoader[Key, Result]):
52
52
  stmt = (
53
53
  select(descendant_ids.c[root_id_label], models.Span)
54
54
  .join(descendant_ids, models.Span.id == descendant_ids.c.id)
55
- .join(models.Trace)
56
- .options(contains_eager(models.Span.trace))
55
+ .options(joinedload(models.Span.trace, innerjoin=True).load_only(models.Trace.trace_id))
57
56
  .order_by(descendant_ids.c[root_id_label])
58
57
  )
59
58
  results: Dict[SpanId, Result] = {key: [] for key in keys}
@@ -0,0 +1,33 @@
1
+ from typing import AsyncContextManager, Callable, List, Union
2
+
3
+ from sqlalchemy import select
4
+ from sqlalchemy.ext.asyncio import AsyncSession
5
+ from strawberry.dataloader import DataLoader
6
+ from typing_extensions import TypeAlias
7
+
8
+ from phoenix.db import models
9
+
10
+ SpanID: TypeAlias = int
11
+ Key: TypeAlias = SpanID
12
+ Result: TypeAlias = models.Project
13
+
14
+
15
+ class SpanProjectsDataLoader(DataLoader[Key, Result]):
16
+ def __init__(self, db: Callable[[], AsyncContextManager[AsyncSession]]) -> None:
17
+ super().__init__(load_fn=self._load_fn)
18
+ self._db = db
19
+
20
+ async def _load_fn(self, keys: List[Key]) -> List[Union[Result, ValueError]]:
21
+ span_ids = list(set(keys))
22
+ async with self._db() as session:
23
+ projects = {
24
+ span_id: project
25
+ async for span_id, project in await session.stream(
26
+ select(models.Span.id, models.Project)
27
+ .select_from(models.Span)
28
+ .join(models.Trace, models.Span.trace_rowid == models.Trace.id)
29
+ .join(models.Project, models.Trace.project_rowid == models.Project.id)
30
+ .where(models.Span.id.in_(span_ids))
31
+ )
32
+ }
33
+ return [projects.get(span_id) or ValueError("Invalid span ID") for span_id in span_ids]
@@ -0,0 +1,39 @@
1
+ from typing import (
2
+ AsyncContextManager,
3
+ Callable,
4
+ List,
5
+ Optional,
6
+ Tuple,
7
+ )
8
+
9
+ from sqlalchemy import select
10
+ from sqlalchemy.ext.asyncio import AsyncSession
11
+ from strawberry.dataloader import DataLoader
12
+ from typing_extensions import TypeAlias
13
+
14
+ from phoenix.db import models
15
+
16
+ TraceId: TypeAlias = str
17
+ Key: TypeAlias = TraceId
18
+ TraceRowId: TypeAlias = int
19
+ ProjectRowId: TypeAlias = int
20
+ Result: TypeAlias = Optional[Tuple[TraceRowId, ProjectRowId]]
21
+
22
+
23
+ class TraceRowIdsDataLoader(DataLoader[Key, Result]):
24
+ def __init__(self, db: Callable[[], AsyncContextManager[AsyncSession]]) -> None:
25
+ super().__init__(load_fn=self._load_fn)
26
+ self._db = db
27
+
28
+ async def _load_fn(self, keys: List[Key]) -> List[Result]:
29
+ stmt = select(
30
+ models.Trace.trace_id,
31
+ models.Trace.id,
32
+ models.Trace.project_rowid,
33
+ ).where(models.Trace.trace_id.in_(keys))
34
+ async with self._db() as session:
35
+ result = {
36
+ trace_id: (id_, project_rowid)
37
+ async for trace_id, id_, project_rowid in await session.stream(stmt)
38
+ }
39
+ return list(map(result.get, keys))
@@ -0,0 +1,179 @@
1
+ import json
2
+ from typing import Any, Dict, Literal, Mapping, Optional, Protocol
3
+
4
+ from openinference.semconv.trace import (
5
+ MessageAttributes,
6
+ OpenInferenceMimeTypeValues,
7
+ OpenInferenceSpanKindValues,
8
+ ToolCallAttributes,
9
+ )
10
+
11
+ from phoenix.trace.attributes import get_attribute_value
12
+
13
+
14
+ class HasSpanIO(Protocol):
15
+ """
16
+ An interface that contains the information needed to extract dataset example
17
+ input and output values from a span.
18
+ """
19
+
20
+ span_kind: Optional[str]
21
+ input_value: Any
22
+ input_mime_type: Optional[str]
23
+ output_value: Any
24
+ output_mime_type: Optional[str]
25
+ llm_prompt_template_variables: Any
26
+ llm_input_messages: Any
27
+ llm_output_messages: Any
28
+ retrieval_documents: Any
29
+
30
+
31
+ def get_dataset_example_input(span: HasSpanIO) -> Dict[str, Any]:
32
+ """
33
+ Extracts the input value from a span and returns it as a dictionary. Input
34
+ values from LLM spans are extracted from the input messages and prompt
35
+ template variables (if present). For other span kinds, the input is
36
+ extracted from the input value and input mime type attributes.
37
+ """
38
+ input_value = span.input_value
39
+ input_mime_type = span.input_mime_type
40
+ if span.span_kind == OpenInferenceSpanKindValues.LLM.value:
41
+ return _get_llm_span_input(
42
+ input_messages=span.llm_input_messages,
43
+ input_value=input_value,
44
+ input_mime_type=input_mime_type,
45
+ prompt_template_variables=span.llm_prompt_template_variables,
46
+ )
47
+ return _get_generic_io_value(io_value=input_value, mime_type=input_mime_type, kind="input")
48
+
49
+
50
+ def get_dataset_example_output(span: HasSpanIO) -> Dict[str, Any]:
51
+ """
52
+ Extracts the output value from a span and returns it as a dictionary. Output
53
+ values from LLM spans are extracted from the output messages (if present).
54
+ Output from retriever spans are extracted from the retrieval documents (if
55
+ present). For other span kinds, the output is extracted from the output
56
+ value and output mime type attributes.
57
+ """
58
+
59
+ output_value = span.output_value
60
+ output_mime_type = span.output_mime_type
61
+ if (span_kind := span.span_kind) == OpenInferenceSpanKindValues.LLM.value:
62
+ return _get_llm_span_output(
63
+ output_messages=span.llm_output_messages,
64
+ output_value=output_value,
65
+ output_mime_type=output_mime_type,
66
+ )
67
+ if span_kind == OpenInferenceSpanKindValues.RETRIEVER.value:
68
+ return _get_retriever_span_output(
69
+ retrieval_documents=span.retrieval_documents,
70
+ output_value=output_value,
71
+ output_mime_type=output_mime_type,
72
+ )
73
+ return _get_generic_io_value(io_value=output_value, mime_type=output_mime_type, kind="output")
74
+
75
+
76
+ def _get_llm_span_input(
77
+ input_messages: Any,
78
+ input_value: Any,
79
+ input_mime_type: Optional[str],
80
+ prompt_template_variables: Any,
81
+ ) -> Dict[str, Any]:
82
+ """
83
+ Extracts the input value from an LLM span and returns it as a dictionary.
84
+ The input is extracted from the input messages (if present) and prompt
85
+ template variables (if present).
86
+ """
87
+ input: Dict[str, Any] = {}
88
+ if messages := [_get_message(m) for m in input_messages or ()]:
89
+ input["messages"] = messages
90
+ if not input:
91
+ input = _get_generic_io_value(io_value=input_value, mime_type=input_mime_type, kind="input")
92
+ if prompt_template_variables:
93
+ input = {**input, "prompt_template_variables": prompt_template_variables}
94
+ return input
95
+
96
+
97
+ def _get_llm_span_output(
98
+ output_messages: Any,
99
+ output_value: Any,
100
+ output_mime_type: Optional[str],
101
+ ) -> Dict[str, Any]:
102
+ """
103
+ Extracts the output value from an LLM span and returns it as a dictionary.
104
+ The output is extracted from the output messages (if present).
105
+ """
106
+ if messages := [_get_message(m) for m in output_messages or ()]:
107
+ return {"messages": messages}
108
+ return _get_generic_io_value(io_value=output_value, mime_type=output_mime_type, kind="output")
109
+
110
+
111
+ def _get_retriever_span_output(
112
+ retrieval_documents: Any,
113
+ output_value: Any,
114
+ output_mime_type: Optional[str],
115
+ ) -> Dict[str, Any]:
116
+ """
117
+ Extracts the output value from a retriever span and returns it as a dictionary.
118
+ The output is extracted from the retrieval documents (if present).
119
+ """
120
+ if retrieval_documents is not None:
121
+ return {"documents": retrieval_documents}
122
+ return _get_generic_io_value(io_value=output_value, mime_type=output_mime_type, kind="output")
123
+
124
+
125
+ def _get_generic_io_value(
126
+ io_value: Any, mime_type: Optional[str], kind: Literal["input", "output"]
127
+ ) -> Dict[str, Any]:
128
+ """
129
+ Makes a best-effort attempt to extract the input or output value from a span
130
+ and returns it as a dictionary.
131
+ """
132
+ if mime_type == OpenInferenceMimeTypeValues.JSON.value:
133
+ parsed_value = json.loads(io_value)
134
+ if isinstance(parsed_value, dict):
135
+ return parsed_value
136
+ else:
137
+ return {kind: parsed_value}
138
+ if isinstance(io_value, str):
139
+ return {kind: io_value}
140
+ return {}
141
+
142
+
143
+ def _get_message(message: Mapping[str, Any]) -> Dict[str, Any]:
144
+ content = get_attribute_value(message, MESSAGE_CONTENT)
145
+ name = get_attribute_value(message, MESSAGE_NAME)
146
+ function_call_name = get_attribute_value(message, MESSAGE_FUNCTION_CALL_NAME)
147
+ function_call_arguments = get_attribute_value(message, MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON)
148
+ function_call = (
149
+ {"name": function_call_name, "arguments": function_call_arguments}
150
+ if function_call_name is not None or function_call_arguments is not None
151
+ else None
152
+ )
153
+ tool_calls = [
154
+ {
155
+ "function": {
156
+ "name": get_attribute_value(tool_call, TOOL_CALL_FUNCTION_NAME),
157
+ "arguments": get_attribute_value(tool_call, TOOL_CALL_FUNCTION_ARGUMENTS_JSON),
158
+ }
159
+ }
160
+ for tool_call in get_attribute_value(message, MESSAGE_TOOL_CALLS) or ()
161
+ ]
162
+ return {
163
+ "role": get_attribute_value(message, MESSAGE_ROLE),
164
+ **({"content": content} if content is not None else {}),
165
+ **({"name": name} if name is not None else {}),
166
+ **({"function_call": function_call} if function_call is not None else {}),
167
+ **({"tool_calls": tool_calls} if tool_calls else {}),
168
+ }
169
+
170
+
171
+ MESSAGE_CONTENT = MessageAttributes.MESSAGE_CONTENT
172
+ MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON = MessageAttributes.MESSAGE_FUNCTION_CALL_ARGUMENTS_JSON
173
+ MESSAGE_FUNCTION_CALL_NAME = MessageAttributes.MESSAGE_FUNCTION_CALL_NAME
174
+ MESSAGE_NAME = MessageAttributes.MESSAGE_NAME
175
+ MESSAGE_ROLE = MessageAttributes.MESSAGE_ROLE
176
+ MESSAGE_TOOL_CALLS = MessageAttributes.MESSAGE_TOOL_CALLS
177
+
178
+ TOOL_CALL_FUNCTION_NAME = ToolCallAttributes.TOOL_CALL_FUNCTION_NAME
179
+ TOOL_CALL_FUNCTION_ARGUMENTS_JSON = ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON
@@ -0,0 +1,16 @@
1
+ from typing import List, Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.relay import GlobalID
6
+ from strawberry.scalars import JSON
7
+
8
+ from .DatasetExampleInput import DatasetExampleInput
9
+
10
+
11
+ @strawberry.input
12
+ class AddExamplesToDatasetInput:
13
+ dataset_id: GlobalID
14
+ examples: List[DatasetExampleInput]
15
+ dataset_version_description: Optional[str] = UNSET
16
+ dataset_version_metadata: Optional[JSON] = UNSET
@@ -0,0 +1,14 @@
1
+ from typing import List, Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.relay import GlobalID
6
+ from strawberry.scalars import JSON
7
+
8
+
9
+ @strawberry.input
10
+ class AddSpansToDatasetInput:
11
+ dataset_id: GlobalID
12
+ span_ids: List[GlobalID]
13
+ dataset_version_description: Optional[str] = UNSET
14
+ dataset_version_metadata: Optional[JSON] = UNSET
@@ -0,0 +1,15 @@
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ import strawberry
5
+ from strawberry import UNSET
6
+ from strawberry.relay import GlobalID
7
+
8
+
9
+ @strawberry.input
10
+ class ClearProjectInput:
11
+ id: GlobalID
12
+ end_time: Optional[datetime] = strawberry.field(
13
+ default=UNSET,
14
+ description="The time up to which to purge data. Time is right-open /non-inclusive.",
15
+ )
@@ -0,0 +1,12 @@
1
+ from typing import Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.scalars import JSON
6
+
7
+
8
+ @strawberry.input
9
+ class CreateDatasetInput:
10
+ name: str
11
+ description: Optional[str] = UNSET
12
+ metadata: Optional[JSON] = UNSET
@@ -0,0 +1,14 @@
1
+ from typing import Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.relay import GlobalID
6
+ from strawberry.scalars import JSON
7
+
8
+
9
+ @strawberry.input
10
+ class DatasetExampleInput:
11
+ input: JSON
12
+ output: JSON
13
+ metadata: JSON
14
+ span_id: Optional[GlobalID] = UNSET
@@ -0,0 +1,17 @@
1
+ from enum import Enum
2
+
3
+ import strawberry
4
+
5
+ from phoenix.server.api.types.SortDir import SortDir
6
+
7
+
8
+ @strawberry.enum
9
+ class DatasetColumn(Enum):
10
+ createdAt = "created_at"
11
+ name = "name"
12
+
13
+
14
+ @strawberry.input(description="The sort key and direction for dataset connections")
15
+ class DatasetSort:
16
+ col: DatasetColumn
17
+ dir: SortDir
@@ -0,0 +1,16 @@
1
+ from enum import Enum
2
+
3
+ import strawberry
4
+
5
+ from phoenix.server.api.types.SortDir import SortDir
6
+
7
+
8
+ @strawberry.enum
9
+ class DatasetVersionColumn(Enum):
10
+ createdAt = "created_at"
11
+
12
+
13
+ @strawberry.input(description="The sort key and direction for dataset version connections")
14
+ class DatasetVersionSort:
15
+ col: DatasetVersionColumn
16
+ dir: SortDir
@@ -0,0 +1,13 @@
1
+ from typing import List, Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.relay import GlobalID
6
+ from strawberry.scalars import JSON
7
+
8
+
9
+ @strawberry.input
10
+ class DeleteDatasetExamplesInput:
11
+ example_ids: List[GlobalID]
12
+ dataset_version_description: Optional[str] = UNSET
13
+ dataset_version_metadata: Optional[JSON] = UNSET
@@ -0,0 +1,7 @@
1
+ import strawberry
2
+ from strawberry.relay import GlobalID
3
+
4
+
5
+ @strawberry.input
6
+ class DeleteDatasetInput:
7
+ dataset_id: GlobalID
@@ -0,0 +1,9 @@
1
+ from typing import List
2
+
3
+ import strawberry
4
+ from strawberry.relay import GlobalID
5
+
6
+
7
+ @strawberry.input
8
+ class DeleteExperimentsInput:
9
+ experiment_ids: List[GlobalID]
@@ -0,0 +1,35 @@
1
+ from typing import List, Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.relay import GlobalID
6
+ from strawberry.scalars import JSON
7
+
8
+
9
+ @strawberry.input
10
+ class DatasetExamplePatch:
11
+ """
12
+ Contains the information needed to apply a patch revision to a dataset example.
13
+ """
14
+
15
+ example_id: GlobalID
16
+ input: Optional[JSON] = UNSET
17
+ output: Optional[JSON] = UNSET
18
+ metadata: Optional[JSON] = UNSET
19
+
20
+ def is_empty(self) -> bool:
21
+ """
22
+ Non-empty patches have at least one field set.
23
+ """
24
+ return all(field is UNSET for field in (self.input, self.output, self.metadata))
25
+
26
+
27
+ @strawberry.input
28
+ class PatchDatasetExamplesInput:
29
+ """
30
+ Input type to the patchDatasetExamples mutation.
31
+ """
32
+
33
+ patches: List[DatasetExamplePatch]
34
+ version_description: Optional[str] = UNSET
35
+ version_metadata: Optional[JSON] = UNSET
@@ -0,0 +1,14 @@
1
+ from typing import Optional
2
+
3
+ import strawberry
4
+ from strawberry import UNSET
5
+ from strawberry.relay import GlobalID
6
+ from strawberry.scalars import JSON
7
+
8
+
9
+ @strawberry.input
10
+ class PatchDatasetInput:
11
+ dataset_id: GlobalID
12
+ name: Optional[str] = UNSET
13
+ description: Optional[str] = UNSET
14
+ metadata: Optional[JSON] = UNSET
@@ -0,0 +1,13 @@
1
+ import strawberry
2
+
3
+ from phoenix.server.api.mutations.dataset_mutations import DatasetMutationMixin
4
+ from phoenix.server.api.mutations.experiment_mutations import ExperimentMutationMixin
5
+ from phoenix.server.api.mutations.export_events_mutations import ExportEventsMutationMixin
6
+ from phoenix.server.api.mutations.project_mutations import ProjectMutationMixin
7
+
8
+
9
+ @strawberry.type
10
+ class Mutation(
11
+ ProjectMutationMixin, DatasetMutationMixin, ExperimentMutationMixin, ExportEventsMutationMixin
12
+ ):
13
+ pass
@@ -0,0 +1,11 @@
1
+ from typing import Any
2
+
3
+ from strawberry import Info
4
+ from strawberry.permission import BasePermission
5
+
6
+
7
+ class IsAuthenticated(BasePermission):
8
+ message = "User is not authenticated"
9
+
10
+ async def has_permission(self, source: Any, info: Info, **kwargs: Any) -> bool:
11
+ return not info.context.read_only