arize-phoenix 5.5.2__py3-none-any.whl → 5.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (186) hide show
  1. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/METADATA +4 -7
  2. arize_phoenix-5.7.0.dist-info/RECORD +330 -0
  3. phoenix/config.py +50 -8
  4. phoenix/core/model.py +3 -3
  5. phoenix/core/model_schema.py +41 -50
  6. phoenix/core/model_schema_adapter.py +17 -16
  7. phoenix/datetime_utils.py +2 -2
  8. phoenix/db/bulk_inserter.py +10 -20
  9. phoenix/db/engines.py +2 -1
  10. phoenix/db/enums.py +2 -2
  11. phoenix/db/helpers.py +8 -7
  12. phoenix/db/insertion/dataset.py +9 -19
  13. phoenix/db/insertion/document_annotation.py +14 -13
  14. phoenix/db/insertion/helpers.py +6 -16
  15. phoenix/db/insertion/span_annotation.py +14 -13
  16. phoenix/db/insertion/trace_annotation.py +14 -13
  17. phoenix/db/insertion/types.py +19 -30
  18. phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py +8 -8
  19. phoenix/db/models.py +28 -28
  20. phoenix/experiments/evaluators/base.py +2 -1
  21. phoenix/experiments/evaluators/code_evaluators.py +4 -5
  22. phoenix/experiments/evaluators/llm_evaluators.py +157 -4
  23. phoenix/experiments/evaluators/utils.py +3 -2
  24. phoenix/experiments/functions.py +10 -21
  25. phoenix/experiments/tracing.py +2 -1
  26. phoenix/experiments/types.py +20 -29
  27. phoenix/experiments/utils.py +2 -1
  28. phoenix/inferences/errors.py +6 -5
  29. phoenix/inferences/fixtures.py +6 -5
  30. phoenix/inferences/inferences.py +37 -37
  31. phoenix/inferences/schema.py +11 -10
  32. phoenix/inferences/validation.py +13 -14
  33. phoenix/logging/_formatter.py +3 -3
  34. phoenix/metrics/__init__.py +5 -4
  35. phoenix/metrics/binning.py +2 -1
  36. phoenix/metrics/metrics.py +2 -1
  37. phoenix/metrics/mixins.py +7 -6
  38. phoenix/metrics/retrieval_metrics.py +2 -1
  39. phoenix/metrics/timeseries.py +5 -4
  40. phoenix/metrics/wrappers.py +2 -2
  41. phoenix/pointcloud/clustering.py +3 -4
  42. phoenix/pointcloud/pointcloud.py +7 -5
  43. phoenix/pointcloud/umap_parameters.py +2 -1
  44. phoenix/server/api/dataloaders/annotation_summaries.py +12 -19
  45. phoenix/server/api/dataloaders/average_experiment_run_latency.py +2 -2
  46. phoenix/server/api/dataloaders/cache/two_tier_cache.py +3 -2
  47. phoenix/server/api/dataloaders/dataset_example_revisions.py +3 -8
  48. phoenix/server/api/dataloaders/dataset_example_spans.py +2 -5
  49. phoenix/server/api/dataloaders/document_evaluation_summaries.py +12 -18
  50. phoenix/server/api/dataloaders/document_evaluations.py +3 -7
  51. phoenix/server/api/dataloaders/document_retrieval_metrics.py +6 -13
  52. phoenix/server/api/dataloaders/experiment_annotation_summaries.py +4 -8
  53. phoenix/server/api/dataloaders/experiment_error_rates.py +2 -5
  54. phoenix/server/api/dataloaders/experiment_run_annotations.py +3 -7
  55. phoenix/server/api/dataloaders/experiment_run_counts.py +1 -5
  56. phoenix/server/api/dataloaders/experiment_sequence_number.py +2 -5
  57. phoenix/server/api/dataloaders/latency_ms_quantile.py +21 -30
  58. phoenix/server/api/dataloaders/min_start_or_max_end_times.py +7 -13
  59. phoenix/server/api/dataloaders/project_by_name.py +3 -3
  60. phoenix/server/api/dataloaders/record_counts.py +11 -18
  61. phoenix/server/api/dataloaders/span_annotations.py +3 -7
  62. phoenix/server/api/dataloaders/span_dataset_examples.py +3 -8
  63. phoenix/server/api/dataloaders/span_descendants.py +3 -7
  64. phoenix/server/api/dataloaders/span_projects.py +2 -2
  65. phoenix/server/api/dataloaders/token_counts.py +12 -19
  66. phoenix/server/api/dataloaders/trace_row_ids.py +3 -7
  67. phoenix/server/api/dataloaders/user_roles.py +3 -3
  68. phoenix/server/api/dataloaders/users.py +3 -3
  69. phoenix/server/api/helpers/__init__.py +4 -3
  70. phoenix/server/api/helpers/dataset_helpers.py +10 -9
  71. phoenix/server/api/helpers/playground_clients.py +671 -0
  72. phoenix/server/api/helpers/playground_registry.py +70 -0
  73. phoenix/server/api/helpers/playground_spans.py +325 -0
  74. phoenix/server/api/input_types/AddExamplesToDatasetInput.py +2 -2
  75. phoenix/server/api/input_types/AddSpansToDatasetInput.py +2 -2
  76. phoenix/server/api/input_types/ChatCompletionInput.py +38 -0
  77. phoenix/server/api/input_types/ChatCompletionMessageInput.py +13 -1
  78. phoenix/server/api/input_types/ClusterInput.py +2 -2
  79. phoenix/server/api/input_types/DeleteAnnotationsInput.py +1 -3
  80. phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +2 -2
  81. phoenix/server/api/input_types/DeleteExperimentsInput.py +1 -3
  82. phoenix/server/api/input_types/DimensionFilter.py +4 -4
  83. phoenix/server/api/input_types/GenerativeModelInput.py +17 -0
  84. phoenix/server/api/input_types/Granularity.py +1 -1
  85. phoenix/server/api/input_types/InvocationParameters.py +156 -13
  86. phoenix/server/api/input_types/PatchDatasetExamplesInput.py +2 -2
  87. phoenix/server/api/input_types/TemplateOptions.py +10 -0
  88. phoenix/server/api/mutations/__init__.py +4 -0
  89. phoenix/server/api/mutations/chat_mutations.py +374 -0
  90. phoenix/server/api/mutations/dataset_mutations.py +4 -4
  91. phoenix/server/api/mutations/experiment_mutations.py +1 -2
  92. phoenix/server/api/mutations/export_events_mutations.py +7 -7
  93. phoenix/server/api/mutations/span_annotations_mutations.py +4 -4
  94. phoenix/server/api/mutations/trace_annotations_mutations.py +4 -4
  95. phoenix/server/api/mutations/user_mutations.py +4 -4
  96. phoenix/server/api/openapi/schema.py +2 -2
  97. phoenix/server/api/queries.py +61 -72
  98. phoenix/server/api/routers/oauth2.py +4 -4
  99. phoenix/server/api/routers/v1/datasets.py +22 -36
  100. phoenix/server/api/routers/v1/evaluations.py +6 -5
  101. phoenix/server/api/routers/v1/experiment_evaluations.py +2 -2
  102. phoenix/server/api/routers/v1/experiment_runs.py +2 -2
  103. phoenix/server/api/routers/v1/experiments.py +4 -4
  104. phoenix/server/api/routers/v1/spans.py +13 -12
  105. phoenix/server/api/routers/v1/traces.py +5 -5
  106. phoenix/server/api/routers/v1/utils.py +5 -5
  107. phoenix/server/api/schema.py +42 -10
  108. phoenix/server/api/subscriptions.py +347 -494
  109. phoenix/server/api/types/AnnotationSummary.py +3 -3
  110. phoenix/server/api/types/ChatCompletionSubscriptionPayload.py +44 -0
  111. phoenix/server/api/types/Cluster.py +8 -7
  112. phoenix/server/api/types/Dataset.py +5 -4
  113. phoenix/server/api/types/Dimension.py +3 -3
  114. phoenix/server/api/types/DocumentEvaluationSummary.py +8 -7
  115. phoenix/server/api/types/EmbeddingDimension.py +6 -5
  116. phoenix/server/api/types/EvaluationSummary.py +3 -3
  117. phoenix/server/api/types/Event.py +7 -7
  118. phoenix/server/api/types/Experiment.py +3 -3
  119. phoenix/server/api/types/ExperimentComparison.py +2 -4
  120. phoenix/server/api/types/GenerativeProvider.py +27 -3
  121. phoenix/server/api/types/Inferences.py +9 -8
  122. phoenix/server/api/types/InferencesRole.py +2 -2
  123. phoenix/server/api/types/Model.py +2 -2
  124. phoenix/server/api/types/Project.py +11 -18
  125. phoenix/server/api/types/Segments.py +3 -3
  126. phoenix/server/api/types/Span.py +45 -7
  127. phoenix/server/api/types/TemplateLanguage.py +9 -0
  128. phoenix/server/api/types/TimeSeries.py +8 -7
  129. phoenix/server/api/types/Trace.py +2 -2
  130. phoenix/server/api/types/UMAPPoints.py +6 -6
  131. phoenix/server/api/types/User.py +3 -3
  132. phoenix/server/api/types/node.py +1 -3
  133. phoenix/server/api/types/pagination.py +4 -4
  134. phoenix/server/api/utils.py +2 -4
  135. phoenix/server/app.py +76 -37
  136. phoenix/server/bearer_auth.py +4 -10
  137. phoenix/server/dml_event.py +3 -3
  138. phoenix/server/dml_event_handler.py +10 -24
  139. phoenix/server/grpc_server.py +3 -2
  140. phoenix/server/jwt_store.py +22 -21
  141. phoenix/server/main.py +17 -4
  142. phoenix/server/oauth2.py +3 -2
  143. phoenix/server/rate_limiters.py +5 -8
  144. phoenix/server/static/.vite/manifest.json +31 -31
  145. phoenix/server/static/assets/components-Csu8UKOs.js +1612 -0
  146. phoenix/server/static/assets/{index-DCzakdJq.js → index-Bk5C9EA7.js} +2 -2
  147. phoenix/server/static/assets/{pages-CAL1FDMt.js → pages-UeWaKXNs.js} +337 -442
  148. phoenix/server/static/assets/{vendor-6IcPAw_j.js → vendor-CtqfhlbC.js} +6 -6
  149. phoenix/server/static/assets/{vendor-arizeai-DRZuoyuF.js → vendor-arizeai-C_3SBz56.js} +2 -2
  150. phoenix/server/static/assets/{vendor-codemirror-DVE2_WBr.js → vendor-codemirror-wfdk9cjp.js} +1 -1
  151. phoenix/server/static/assets/{vendor-recharts-DwrexFA4.js → vendor-recharts-BiVnSv90.js} +1 -1
  152. phoenix/server/templates/index.html +1 -0
  153. phoenix/server/thread_server.py +1 -1
  154. phoenix/server/types.py +17 -29
  155. phoenix/services.py +8 -3
  156. phoenix/session/client.py +12 -24
  157. phoenix/session/data_extractor.py +3 -3
  158. phoenix/session/evaluation.py +1 -2
  159. phoenix/session/session.py +26 -21
  160. phoenix/trace/attributes.py +16 -28
  161. phoenix/trace/dsl/filter.py +17 -21
  162. phoenix/trace/dsl/helpers.py +3 -3
  163. phoenix/trace/dsl/query.py +13 -22
  164. phoenix/trace/fixtures.py +11 -17
  165. phoenix/trace/otel.py +5 -15
  166. phoenix/trace/projects.py +3 -2
  167. phoenix/trace/schemas.py +2 -2
  168. phoenix/trace/span_evaluations.py +9 -8
  169. phoenix/trace/span_json_decoder.py +3 -3
  170. phoenix/trace/span_json_encoder.py +2 -2
  171. phoenix/trace/trace_dataset.py +6 -5
  172. phoenix/trace/utils.py +6 -6
  173. phoenix/utilities/deprecation.py +3 -2
  174. phoenix/utilities/error_handling.py +3 -2
  175. phoenix/utilities/json.py +2 -1
  176. phoenix/utilities/logging.py +2 -2
  177. phoenix/utilities/project.py +1 -1
  178. phoenix/utilities/re.py +3 -4
  179. phoenix/utilities/template_formatters.py +16 -5
  180. phoenix/version.py +1 -1
  181. arize_phoenix-5.5.2.dist-info/RECORD +0 -321
  182. phoenix/server/static/assets/components-hX0LgYz3.js +0 -1428
  183. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/WHEEL +0 -0
  184. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/entry_points.txt +0 -0
  185. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/licenses/IP_NOTICE +0 -0
  186. {arize_phoenix-5.5.2.dist-info → arize_phoenix-5.7.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,13 +1,6 @@
1
1
  from collections import defaultdict
2
2
  from datetime import datetime
3
- from typing import (
4
- Any,
5
- DefaultDict,
6
- List,
7
- Literal,
8
- Optional,
9
- Tuple,
10
- )
3
+ from typing import Any, Literal, Optional
11
4
 
12
5
  import pandas as pd
13
6
  from aioitertools.itertools import groupby
@@ -25,20 +18,20 @@ from phoenix.trace.dsl import SpanFilter
25
18
 
26
19
  Kind: TypeAlias = Literal["span", "trace"]
27
20
  ProjectRowId: TypeAlias = int
28
- TimeInterval: TypeAlias = Tuple[Optional[datetime], Optional[datetime]]
21
+ TimeInterval: TypeAlias = tuple[Optional[datetime], Optional[datetime]]
29
22
  FilterCondition: TypeAlias = Optional[str]
30
23
  AnnotationName: TypeAlias = str
31
24
 
32
- Segment: TypeAlias = Tuple[Kind, ProjectRowId, TimeInterval, FilterCondition]
25
+ Segment: TypeAlias = tuple[Kind, ProjectRowId, TimeInterval, FilterCondition]
33
26
  Param: TypeAlias = AnnotationName
34
27
 
35
- Key: TypeAlias = Tuple[Kind, ProjectRowId, Optional[TimeRange], FilterCondition, AnnotationName]
28
+ Key: TypeAlias = tuple[Kind, ProjectRowId, Optional[TimeRange], FilterCondition, AnnotationName]
36
29
  Result: TypeAlias = Optional[AnnotationSummary]
37
30
  ResultPosition: TypeAlias = int
38
31
  DEFAULT_VALUE: Result = None
39
32
 
40
33
 
41
- def _cache_key_fn(key: Key) -> Tuple[Segment, Param]:
34
+ def _cache_key_fn(key: Key) -> tuple[Segment, Param]:
42
35
  kind, project_rowid, time_range, filter_condition, eval_name = key
43
36
  interval = (
44
37
  (time_range.start, time_range.end) if isinstance(time_range, TimeRange) else (None, None)
@@ -46,8 +39,8 @@ def _cache_key_fn(key: Key) -> Tuple[Segment, Param]:
46
39
  return (kind, project_rowid, interval, filter_condition), eval_name
47
40
 
48
41
 
49
- _Section: TypeAlias = Tuple[ProjectRowId, AnnotationName, Kind]
50
- _SubKey: TypeAlias = Tuple[TimeInterval, FilterCondition]
42
+ _Section: TypeAlias = tuple[ProjectRowId, AnnotationName, Kind]
43
+ _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition]
51
44
 
52
45
 
53
46
  class AnnotationSummaryCache(
@@ -67,7 +60,7 @@ class AnnotationSummaryCache(
67
60
  if section[0] == project_rowid:
68
61
  del self._cache[section]
69
62
 
70
- def _cache_key(self, key: Key) -> Tuple[_Section, _SubKey]:
63
+ def _cache_key(self, key: Key) -> tuple[_Section, _SubKey]:
71
64
  (kind, project_rowid, interval, filter_condition), annotation_name = _cache_key_fn(key)
72
65
  return (project_rowid, annotation_name, kind), (interval, filter_condition)
73
66
 
@@ -85,11 +78,11 @@ class AnnotationSummaryDataLoader(DataLoader[Key, Result]):
85
78
  )
86
79
  self._db = db
87
80
 
88
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
89
- results: List[Result] = [DEFAULT_VALUE] * len(keys)
90
- arguments: DefaultDict[
81
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
82
+ results: list[Result] = [DEFAULT_VALUE] * len(keys)
83
+ arguments: defaultdict[
91
84
  Segment,
92
- DefaultDict[Param, List[ResultPosition]],
85
+ defaultdict[Param, list[ResultPosition]],
93
86
  ] = defaultdict(lambda: defaultdict(list))
94
87
  for position, key in enumerate(keys):
95
88
  segment, param = _cache_key_fn(key)
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import Optional
2
2
 
3
3
  from sqlalchemy import func, select
4
4
  from strawberry.dataloader import DataLoader
@@ -21,7 +21,7 @@ class AverageExperimentRunLatencyDataLoader(DataLoader[Key, Result]):
21
21
  super().__init__(load_fn=self._load_fn)
22
22
  self._db = db
23
23
 
24
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
24
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
25
25
  experiment_ids = keys
26
26
  resolved_experiment_ids = (
27
27
  select(models.Experiment.id)
@@ -9,7 +9,8 @@ subset that we want to invalidate.
9
9
 
10
10
  from abc import ABC, abstractmethod
11
11
  from asyncio import Future
12
- from typing import Any, Callable, Generic, Optional, Tuple, TypeVar
12
+ from collections.abc import Callable
13
+ from typing import Any, Generic, Optional, TypeVar
13
14
 
14
15
  from cachetools import Cache
15
16
  from strawberry.dataloader import AbstractCache
@@ -38,7 +39,7 @@ class TwoTierCache(
38
39
  self._sub_cache_factory = sub_cache_factory
39
40
 
40
41
  @abstractmethod
41
- def _cache_key(self, key: _Key) -> Tuple[_Section, _SubKey]: ...
42
+ def _cache_key(self, key: _Key) -> tuple[_Section, _SubKey]: ...
42
43
 
43
44
  def invalidate(self, section: _Section) -> None:
44
45
  if sub_cache := self._cache.get(section):
@@ -1,9 +1,4 @@
1
- from typing import (
2
- List,
3
- Optional,
4
- Tuple,
5
- Union,
6
- )
1
+ from typing import Optional, Union
7
2
 
8
3
  from sqlalchemy import and_, case, func, null, or_, select
9
4
  from sqlalchemy.sql.expression import literal
@@ -17,7 +12,7 @@ from phoenix.server.types import DbSessionFactory
17
12
 
18
13
  ExampleID: TypeAlias = int
19
14
  VersionID: TypeAlias = Optional[int]
20
- Key: TypeAlias = Tuple[ExampleID, Optional[VersionID]]
15
+ Key: TypeAlias = tuple[ExampleID, Optional[VersionID]]
21
16
  Result: TypeAlias = DatasetExampleRevision
22
17
 
23
18
 
@@ -29,7 +24,7 @@ class DatasetExampleRevisionsDataLoader(DataLoader[Key, Result]):
29
24
  )
30
25
  self._db = db
31
26
 
32
- async def _load_fn(self, keys: List[Key]) -> List[Union[Result, NotFound]]:
27
+ async def _load_fn(self, keys: list[Key]) -> list[Union[Result, NotFound]]:
33
28
  example_and_version_ids = tuple(
34
29
  set(
35
30
  (example_id, version_id)
@@ -1,7 +1,4 @@
1
- from typing import (
2
- List,
3
- Optional,
4
- )
1
+ from typing import Optional
5
2
 
6
3
  from sqlalchemy import select
7
4
  from sqlalchemy.orm import joinedload
@@ -21,7 +18,7 @@ class DatasetExampleSpansDataLoader(DataLoader[Key, Result]):
21
18
  super().__init__(load_fn=self._load_fn)
22
19
  self._db = db
23
20
 
24
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
21
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
25
22
  example_ids = keys
26
23
  async with self._db() as session:
27
24
  spans = {
@@ -1,12 +1,6 @@
1
1
  from collections import defaultdict
2
2
  from datetime import datetime
3
- from typing import (
4
- Any,
5
- DefaultDict,
6
- List,
7
- Optional,
8
- Tuple,
9
- )
3
+ from typing import Any, Optional
10
4
 
11
5
  import numpy as np
12
6
  from aioitertools.itertools import groupby
@@ -25,20 +19,20 @@ from phoenix.server.types import DbSessionFactory
25
19
  from phoenix.trace.dsl import SpanFilter
26
20
 
27
21
  ProjectRowId: TypeAlias = int
28
- TimeInterval: TypeAlias = Tuple[Optional[datetime], Optional[datetime]]
22
+ TimeInterval: TypeAlias = tuple[Optional[datetime], Optional[datetime]]
29
23
  FilterCondition: TypeAlias = Optional[str]
30
24
  EvalName: TypeAlias = str
31
25
 
32
- Segment: TypeAlias = Tuple[ProjectRowId, TimeInterval, FilterCondition]
26
+ Segment: TypeAlias = tuple[ProjectRowId, TimeInterval, FilterCondition]
33
27
  Param: TypeAlias = EvalName
34
28
 
35
- Key: TypeAlias = Tuple[ProjectRowId, Optional[TimeRange], FilterCondition, EvalName]
29
+ Key: TypeAlias = tuple[ProjectRowId, Optional[TimeRange], FilterCondition, EvalName]
36
30
  Result: TypeAlias = Optional[DocumentEvaluationSummary]
37
31
  ResultPosition: TypeAlias = int
38
32
  DEFAULT_VALUE: Result = None
39
33
 
40
34
 
41
- def _cache_key_fn(key: Key) -> Tuple[Segment, Param]:
35
+ def _cache_key_fn(key: Key) -> tuple[Segment, Param]:
42
36
  project_rowid, time_range, filter_condition, eval_name = key
43
37
  interval = (
44
38
  (time_range.start, time_range.end) if isinstance(time_range, TimeRange) else (None, None)
@@ -46,8 +40,8 @@ def _cache_key_fn(key: Key) -> Tuple[Segment, Param]:
46
40
  return (project_rowid, interval, filter_condition), eval_name
47
41
 
48
42
 
49
- _Section: TypeAlias = Tuple[ProjectRowId, EvalName]
50
- _SubKey: TypeAlias = Tuple[TimeInterval, FilterCondition]
43
+ _Section: TypeAlias = tuple[ProjectRowId, EvalName]
44
+ _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition]
51
45
 
52
46
 
53
47
  class DocumentEvaluationSummaryCache(
@@ -67,7 +61,7 @@ class DocumentEvaluationSummaryCache(
67
61
  if section[0] == project_rowid:
68
62
  del self._cache[section]
69
63
 
70
- def _cache_key(self, key: Key) -> Tuple[_Section, _SubKey]:
64
+ def _cache_key(self, key: Key) -> tuple[_Section, _SubKey]:
71
65
  (project_rowid, interval, filter_condition), eval_name = _cache_key_fn(key)
72
66
  return (project_rowid, eval_name), (interval, filter_condition)
73
67
 
@@ -85,11 +79,11 @@ class DocumentEvaluationSummaryDataLoader(DataLoader[Key, Result]):
85
79
  )
86
80
  self._db = db
87
81
 
88
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
89
- results: List[Result] = [DEFAULT_VALUE] * len(keys)
90
- arguments: DefaultDict[
82
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
83
+ results: list[Result] = [DEFAULT_VALUE] * len(keys)
84
+ arguments: defaultdict[
91
85
  Segment,
92
- DefaultDict[Param, List[ResultPosition]],
86
+ defaultdict[Param, list[ResultPosition]],
93
87
  ] = defaultdict(lambda: defaultdict(list))
94
88
  for position, key in enumerate(keys):
95
89
  segment, param = _cache_key_fn(key)
@@ -1,8 +1,4 @@
1
1
  from collections import defaultdict
2
- from typing import (
3
- DefaultDict,
4
- List,
5
- )
6
2
 
7
3
  from sqlalchemy import select
8
4
  from strawberry.dataloader import DataLoader
@@ -13,7 +9,7 @@ from phoenix.server.api.types.Evaluation import DocumentEvaluation
13
9
  from phoenix.server.types import DbSessionFactory
14
10
 
15
11
  Key: TypeAlias = int
16
- Result: TypeAlias = List[DocumentEvaluation]
12
+ Result: TypeAlias = list[DocumentEvaluation]
17
13
 
18
14
 
19
15
  class DocumentEvaluationsDataLoader(DataLoader[Key, Result]):
@@ -21,8 +17,8 @@ class DocumentEvaluationsDataLoader(DataLoader[Key, Result]):
21
17
  super().__init__(load_fn=self._load_fn)
22
18
  self._db = db
23
19
 
24
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
25
- document_evaluations_by_id: DefaultDict[Key, Result] = defaultdict(list)
20
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
21
+ document_evaluations_by_id: defaultdict[Key, Result] = defaultdict(list)
26
22
  mda = models.DocumentAnnotation
27
23
  async with self._db() as session:
28
24
  data = await session.stream_scalars(
@@ -1,12 +1,5 @@
1
1
  from collections import defaultdict
2
- from typing import (
3
- DefaultDict,
4
- Dict,
5
- List,
6
- Optional,
7
- Set,
8
- Tuple,
9
- )
2
+ from typing import Optional
10
3
 
11
4
  import numpy as np
12
5
  from aioitertools.itertools import groupby
@@ -23,8 +16,8 @@ RowId: TypeAlias = int
23
16
  NumDocs: TypeAlias = int
24
17
  EvalName: TypeAlias = Optional[str]
25
18
 
26
- Key: TypeAlias = Tuple[RowId, EvalName, NumDocs]
27
- Result: TypeAlias = List[DocumentRetrievalMetrics]
19
+ Key: TypeAlias = tuple[RowId, EvalName, NumDocs]
20
+ Result: TypeAlias = list[DocumentRetrievalMetrics]
28
21
 
29
22
 
30
23
  class DocumentRetrievalMetricsDataLoader(DataLoader[Key, Result]):
@@ -32,7 +25,7 @@ class DocumentRetrievalMetricsDataLoader(DataLoader[Key, Result]):
32
25
  super().__init__(load_fn=self._load_fn)
33
26
  self._db = db
34
27
 
35
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
28
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
36
29
  mda = models.DocumentAnnotation
37
30
  stmt = (
38
31
  select(
@@ -57,8 +50,8 @@ class DocumentRetrievalMetricsDataLoader(DataLoader[Key, Result]):
57
50
  stmt = stmt.where(mda.name.in_(all_eval_names))
58
51
  max_position = max(num_docs for _, _, num_docs in keys)
59
52
  stmt = stmt.where(mda.document_position < max_position)
60
- results: Dict[Key, Result] = {key: [] for key in keys}
61
- requested_num_docs: DefaultDict[Tuple[RowId, EvalName], Set[NumDocs]] = defaultdict(set)
53
+ results: dict[Key, Result] = {key: [] for key in keys}
54
+ requested_num_docs: defaultdict[tuple[RowId, EvalName], set[NumDocs]] = defaultdict(set)
62
55
  for row_id, eval_name, num_docs in results.keys():
63
56
  requested_num_docs[(row_id, eval_name)].add(num_docs)
64
57
  async with self._db() as session:
@@ -1,10 +1,6 @@
1
1
  from collections import defaultdict
2
2
  from dataclasses import dataclass
3
- from typing import (
4
- DefaultDict,
5
- List,
6
- Optional,
7
- )
3
+ from typing import Optional
8
4
 
9
5
  from sqlalchemy import func, select
10
6
  from strawberry.dataloader import AbstractCache, DataLoader
@@ -26,7 +22,7 @@ class ExperimentAnnotationSummary:
26
22
 
27
23
  ExperimentID: TypeAlias = int
28
24
  Key: TypeAlias = ExperimentID
29
- Result: TypeAlias = List[ExperimentAnnotationSummary]
25
+ Result: TypeAlias = list[ExperimentAnnotationSummary]
30
26
 
31
27
 
32
28
  class ExperimentAnnotationSummaryDataLoader(DataLoader[Key, Result]):
@@ -38,9 +34,9 @@ class ExperimentAnnotationSummaryDataLoader(DataLoader[Key, Result]):
38
34
  super().__init__(load_fn=self._load_fn)
39
35
  self._db = db
40
36
 
41
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
37
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
42
38
  experiment_ids = keys
43
- summaries: DefaultDict[ExperimentID, Result] = defaultdict(list)
39
+ summaries: defaultdict[ExperimentID, Result] = defaultdict(list)
44
40
  async with self._db() as session:
45
41
  async for (
46
42
  experiment_id,
@@ -1,7 +1,4 @@
1
- from typing import (
2
- List,
3
- Optional,
4
- )
1
+ from typing import Optional
5
2
 
6
3
  from sqlalchemy import case, func, select
7
4
  from strawberry.dataloader import DataLoader
@@ -24,7 +21,7 @@ class ExperimentErrorRatesDataLoader(DataLoader[Key, Result]):
24
21
  super().__init__(load_fn=self._load_fn)
25
22
  self._db = db
26
23
 
27
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
24
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
28
25
  experiment_ids = keys
29
26
  resolved_experiment_ids = (
30
27
  select(models.Experiment.id)
@@ -1,8 +1,4 @@
1
1
  from collections import defaultdict
2
- from typing import (
3
- DefaultDict,
4
- List,
5
- )
6
2
 
7
3
  from sqlalchemy import select
8
4
  from strawberry.dataloader import DataLoader
@@ -13,7 +9,7 @@ from phoenix.server.types import DbSessionFactory
13
9
 
14
10
  ExperimentRunID: TypeAlias = int
15
11
  Key: TypeAlias = ExperimentRunID
16
- Result: TypeAlias = List[OrmExperimentRunAnnotation]
12
+ Result: TypeAlias = list[OrmExperimentRunAnnotation]
17
13
 
18
14
 
19
15
  class ExperimentRunAnnotations(DataLoader[Key, Result]):
@@ -24,9 +20,9 @@ class ExperimentRunAnnotations(DataLoader[Key, Result]):
24
20
  super().__init__(load_fn=self._load_fn)
25
21
  self._db = db
26
22
 
27
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
23
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
28
24
  run_ids = keys
29
- annotations: DefaultDict[Key, Result] = defaultdict(list)
25
+ annotations: defaultdict[Key, Result] = defaultdict(list)
30
26
  async with self._db() as session:
31
27
  async for run_id, annotation in await session.stream(
32
28
  select(
@@ -1,7 +1,3 @@
1
- from typing import (
2
- List,
3
- )
4
-
5
1
  from sqlalchemy import func, select
6
2
  from strawberry.dataloader import DataLoader
7
3
  from typing_extensions import TypeAlias
@@ -23,7 +19,7 @@ class ExperimentRunCountsDataLoader(DataLoader[Key, Result]):
23
19
  super().__init__(load_fn=self._load_fn)
24
20
  self._db = db
25
21
 
26
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
22
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
27
23
  experiment_ids = keys
28
24
  resolved_experiment_ids = (
29
25
  select(models.Experiment.id)
@@ -1,7 +1,4 @@
1
- from typing import (
2
- List,
3
- Optional,
4
- )
1
+ from typing import Optional
5
2
 
6
3
  from sqlalchemy import distinct, func, select
7
4
  from strawberry.dataloader import DataLoader
@@ -20,7 +17,7 @@ class ExperimentSequenceNumberDataLoader(DataLoader[Key, Result]):
20
17
  super().__init__(load_fn=self._load_fn)
21
18
  self._db = db
22
19
 
23
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
20
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
24
21
  experiment_ids = keys
25
22
  dataset_ids = (
26
23
  select(distinct(models.Experiment.dataset_id))
@@ -1,16 +1,7 @@
1
1
  from collections import defaultdict
2
+ from collections.abc import AsyncIterator, Mapping
2
3
  from datetime import datetime
3
- from typing import (
4
- Any,
5
- AsyncIterator,
6
- DefaultDict,
7
- List,
8
- Literal,
9
- Mapping,
10
- Optional,
11
- Tuple,
12
- cast,
13
- )
4
+ from typing import Any, Literal, Optional, cast
14
5
 
15
6
  from cachetools import LFUCache, TTLCache
16
7
  from sqlalchemy import (
@@ -39,15 +30,15 @@ from phoenix.trace.dsl import SpanFilter
39
30
 
40
31
  Kind: TypeAlias = Literal["span", "trace"]
41
32
  ProjectRowId: TypeAlias = int
42
- TimeInterval: TypeAlias = Tuple[Optional[datetime], Optional[datetime]]
33
+ TimeInterval: TypeAlias = tuple[Optional[datetime], Optional[datetime]]
43
34
  FilterCondition: TypeAlias = Optional[str]
44
35
  Probability: TypeAlias = float
45
36
  QuantileValue: TypeAlias = float
46
37
 
47
- Segment: TypeAlias = Tuple[Kind, TimeInterval, FilterCondition]
48
- Param: TypeAlias = Tuple[ProjectRowId, Probability]
38
+ Segment: TypeAlias = tuple[Kind, TimeInterval, FilterCondition]
39
+ Param: TypeAlias = tuple[ProjectRowId, Probability]
49
40
 
50
- Key: TypeAlias = Tuple[Kind, ProjectRowId, Optional[TimeRange], FilterCondition, Probability]
41
+ Key: TypeAlias = tuple[Kind, ProjectRowId, Optional[TimeRange], FilterCondition, Probability]
51
42
  Result: TypeAlias = Optional[QuantileValue]
52
43
  ResultPosition: TypeAlias = int
53
44
  DEFAULT_VALUE: Result = None
@@ -55,7 +46,7 @@ DEFAULT_VALUE: Result = None
55
46
  FloatCol: TypeAlias = SQLColumnExpression[Float[float]]
56
47
 
57
48
 
58
- def _cache_key_fn(key: Key) -> Tuple[Segment, Param]:
49
+ def _cache_key_fn(key: Key) -> tuple[Segment, Param]:
59
50
  kind, project_rowid, time_range, filter_condition, probability = key
60
51
  interval = (
61
52
  (time_range.start, time_range.end) if isinstance(time_range, TimeRange) else (None, None)
@@ -64,7 +55,7 @@ def _cache_key_fn(key: Key) -> Tuple[Segment, Param]:
64
55
 
65
56
 
66
57
  _Section: TypeAlias = ProjectRowId
67
- _SubKey: TypeAlias = Tuple[TimeInterval, FilterCondition, Kind, Probability]
58
+ _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition, Kind, Probability]
68
59
 
69
60
 
70
61
  class LatencyMsQuantileCache(
@@ -79,7 +70,7 @@ class LatencyMsQuantileCache(
79
70
  sub_cache_factory=lambda: LFUCache(maxsize=2 * 2 * 2 * 16),
80
71
  )
81
72
 
82
- def _cache_key(self, key: Key) -> Tuple[_Section, _SubKey]:
73
+ def _cache_key(self, key: Key) -> tuple[_Section, _SubKey]:
83
74
  (kind, interval, filter_condition), (project_rowid, probability) = _cache_key_fn(key)
84
75
  return project_rowid, (interval, filter_condition, kind, probability)
85
76
 
@@ -97,11 +88,11 @@ class LatencyMsQuantileDataLoader(DataLoader[Key, Result]):
97
88
  )
98
89
  self._db = db
99
90
 
100
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
101
- results: List[Result] = [DEFAULT_VALUE] * len(keys)
102
- arguments: DefaultDict[
91
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
92
+ results: list[Result] = [DEFAULT_VALUE] * len(keys)
93
+ arguments: defaultdict[
103
94
  Segment,
104
- DefaultDict[Param, List[ResultPosition]],
95
+ defaultdict[Param, list[ResultPosition]],
105
96
  ] = defaultdict(lambda: defaultdict(list))
106
97
  for position, key in enumerate(keys):
107
98
  segment, param = _cache_key_fn(key)
@@ -120,8 +111,8 @@ async def _get_results(
120
111
  dialect: SupportedSQLDialect,
121
112
  session: AsyncSession,
122
113
  segment: Segment,
123
- params: Mapping[Param, List[ResultPosition]],
124
- ) -> AsyncIterator[Tuple[ResultPosition, QuantileValue]]:
114
+ params: Mapping[Param, list[ResultPosition]],
115
+ ) -> AsyncIterator[tuple[ResultPosition, QuantileValue]]:
125
116
  kind, (start_time, end_time), filter_condition = segment
126
117
  stmt = select(models.Trace.project_rowid)
127
118
  if kind == "trace":
@@ -154,9 +145,9 @@ async def _get_results_sqlite(
154
145
  session: AsyncSession,
155
146
  base_stmt: Select[Any],
156
147
  latency_column: FloatCol,
157
- params: Mapping[Param, List[ResultPosition]],
158
- ) -> AsyncIterator[Tuple[ResultPosition, QuantileValue]]:
159
- projects_per_prob: DefaultDict[Probability, List[ProjectRowId]] = defaultdict(list)
148
+ params: Mapping[Param, list[ResultPosition]],
149
+ ) -> AsyncIterator[tuple[ResultPosition, QuantileValue]]:
150
+ projects_per_prob: defaultdict[Probability, list[ProjectRowId]] = defaultdict(list)
160
151
  for project_rowid, probability in params.keys():
161
152
  projects_per_prob[probability].append(project_rowid)
162
153
  pid = models.Trace.project_rowid
@@ -175,9 +166,9 @@ async def _get_results_postgresql(
175
166
  session: AsyncSession,
176
167
  base_stmt: Select[Any],
177
168
  latency_column: FloatCol,
178
- params: Mapping[Param, List[ResultPosition]],
179
- ) -> AsyncIterator[Tuple[ResultPosition, QuantileValue]]:
180
- probs_per_project: DefaultDict[ProjectRowId, List[Probability]] = defaultdict(list)
169
+ params: Mapping[Param, list[ResultPosition]],
170
+ ) -> AsyncIterator[tuple[ResultPosition, QuantileValue]]:
171
+ probs_per_project: defaultdict[ProjectRowId, list[Probability]] = defaultdict(list)
181
172
  for project_rowid, probability in params.keys():
182
173
  probs_per_project[project_rowid].append(probability)
183
174
  pp: Values = values(
@@ -1,12 +1,6 @@
1
1
  from collections import defaultdict
2
2
  from datetime import datetime
3
- from typing import (
4
- DefaultDict,
5
- List,
6
- Literal,
7
- Optional,
8
- Tuple,
9
- )
3
+ from typing import Literal, Optional
10
4
 
11
5
  from cachetools import LFUCache
12
6
  from sqlalchemy import func, select
@@ -23,7 +17,7 @@ ProjectRowId: TypeAlias = int
23
17
  Segment: TypeAlias = ProjectRowId
24
18
  Param: TypeAlias = Kind
25
19
 
26
- Key: TypeAlias = Tuple[ProjectRowId, Kind]
20
+ Key: TypeAlias = tuple[ProjectRowId, Kind]
27
21
  Result: TypeAlias = Optional[datetime]
28
22
  ResultPosition: TypeAlias = int
29
23
  DEFAULT_VALUE: Result = None
@@ -41,7 +35,7 @@ class MinStartOrMaxEndTimeCache(
41
35
  sub_cache_factory=lambda: LFUCache(maxsize=2),
42
36
  )
43
37
 
44
- def _cache_key(self, key: Key) -> Tuple[_Section, _SubKey]:
38
+ def _cache_key(self, key: Key) -> tuple[_Section, _SubKey]:
45
39
  return key
46
40
 
47
41
 
@@ -57,11 +51,11 @@ class MinStartOrMaxEndTimeDataLoader(DataLoader[Key, Result]):
57
51
  )
58
52
  self._db = db
59
53
 
60
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
61
- results: List[Result] = [DEFAULT_VALUE] * len(keys)
62
- arguments: DefaultDict[
54
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
55
+ results: list[Result] = [DEFAULT_VALUE] * len(keys)
56
+ arguments: defaultdict[
63
57
  Segment,
64
- DefaultDict[Param, List[ResultPosition]],
58
+ defaultdict[Param, list[ResultPosition]],
65
59
  ] = defaultdict(lambda: defaultdict(list))
66
60
  for position, key in enumerate(keys):
67
61
  segment, param = key
@@ -1,5 +1,5 @@
1
1
  from collections import defaultdict
2
- from typing import DefaultDict, List, Optional
2
+ from typing import Optional
3
3
 
4
4
  from sqlalchemy import select
5
5
  from strawberry.dataloader import DataLoader
@@ -18,9 +18,9 @@ class ProjectByNameDataLoader(DataLoader[Key, Result]):
18
18
  super().__init__(load_fn=self._load_fn)
19
19
  self._db = db
20
20
 
21
- async def _load_fn(self, keys: List[Key]) -> List[Result]:
21
+ async def _load_fn(self, keys: list[Key]) -> list[Result]:
22
22
  project_names = list(set(keys))
23
- projects_by_name: DefaultDict[Key, Result] = defaultdict(None)
23
+ projects_by_name: defaultdict[Key, Result] = defaultdict(None)
24
24
  async with self._db() as session:
25
25
  data = await session.stream_scalars(
26
26
  select(models.Project).where(models.Project.name.in_(project_names))