arize-phoenix 11.24.0__py3-none-any.whl → 11.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (34) hide show
  1. {arize_phoenix-11.24.0.dist-info → arize_phoenix-11.25.0.dist-info}/METADATA +1 -1
  2. {arize_phoenix-11.24.0.dist-info → arize_phoenix-11.25.0.dist-info}/RECORD +34 -33
  3. phoenix/__init__.py +2 -1
  4. phoenix/db/engines.py +12 -18
  5. phoenix/db/insertion/document_annotation.py +0 -1
  6. phoenix/db/insertion/span_annotation.py +0 -1
  7. phoenix/db/insertion/trace_annotation.py +0 -1
  8. phoenix/db/insertion/types.py +0 -15
  9. phoenix/db/models.py +2 -2
  10. phoenix/server/api/dataloaders/annotation_summaries.py +60 -8
  11. phoenix/server/api/dataloaders/latency_ms_quantile.py +40 -8
  12. phoenix/server/api/dataloaders/record_counts.py +37 -10
  13. phoenix/server/api/dataloaders/span_cost_summary_by_project.py +28 -14
  14. phoenix/server/api/helpers/playground_clients.py +10 -4
  15. phoenix/server/api/routers/v1/experiment_evaluations.py +14 -3
  16. phoenix/server/api/routers/v1/spans.py +17 -17
  17. phoenix/server/api/types/Project.py +106 -44
  18. phoenix/server/email/sender.py +5 -2
  19. phoenix/server/session_filters.py +49 -0
  20. phoenix/server/static/.vite/manifest.json +43 -43
  21. phoenix/server/static/assets/{components-CBjkyAZ_.js → components-Dpf_EwE7.js} +334 -332
  22. phoenix/server/static/assets/{index-kl9TdGAc.js → index-gkM1FDH4.js} +5 -2
  23. phoenix/server/static/assets/{pages-B8T9ap-6.js → pages-Dz3WExiG.js} +703 -462
  24. phoenix/server/static/assets/{vendor-CU36oj8y.js → vendor-RdRDaQiR.js} +1 -1
  25. phoenix/server/static/assets/{vendor-arizeai-Ctgw0e1G.js → vendor-arizeai-DsYDNOqt.js} +2 -2
  26. phoenix/server/static/assets/{vendor-codemirror-Cojjzqb9.js → vendor-codemirror-BzJDUbEx.js} +3 -3
  27. phoenix/server/static/assets/{vendor-recharts-Bw30oz1A.js → vendor-recharts-BTHn5Y2R.js} +1 -1
  28. phoenix/server/static/assets/{vendor-shiki-DZajAPeq.js → vendor-shiki-BAcocHFl.js} +1 -1
  29. phoenix/trace/dsl/query.py +2 -0
  30. phoenix/version.py +1 -1
  31. {arize_phoenix-11.24.0.dist-info → arize_phoenix-11.25.0.dist-info}/WHEEL +0 -0
  32. {arize_phoenix-11.24.0.dist-info → arize_phoenix-11.25.0.dist-info}/entry_points.txt +0 -0
  33. {arize_phoenix-11.24.0.dist-info → arize_phoenix-11.25.0.dist-info}/licenses/IP_NOTICE +0 -0
  34. {arize_phoenix-11.24.0.dist-info → arize_phoenix-11.25.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,13 +3,14 @@ from datetime import datetime
3
3
  from typing import Any, Literal, Optional
4
4
 
5
5
  from cachetools import LFUCache, TTLCache
6
- from sqlalchemy import Select, func, select
6
+ from sqlalchemy import Select, distinct, func, select
7
7
  from strawberry.dataloader import AbstractCache, DataLoader
8
8
  from typing_extensions import TypeAlias, assert_never
9
9
 
10
10
  from phoenix.db import models
11
11
  from phoenix.server.api.dataloaders.cache import TwoTierCache
12
12
  from phoenix.server.api.input_types.TimeRange import TimeRange
13
+ from phoenix.server.session_filters import get_filtered_session_rowids_subquery
13
14
  from phoenix.server.types import DbSessionFactory
14
15
  from phoenix.trace.dsl import SpanFilter
15
16
 
@@ -17,27 +18,35 @@ Kind: TypeAlias = Literal["span", "trace"]
17
18
  ProjectRowId: TypeAlias = int
18
19
  TimeInterval: TypeAlias = tuple[Optional[datetime], Optional[datetime]]
19
20
  FilterCondition: TypeAlias = Optional[str]
21
+ SessionFilterCondition: TypeAlias = Optional[str]
20
22
  SpanCount: TypeAlias = int
21
23
 
22
- Segment: TypeAlias = tuple[Kind, TimeInterval, FilterCondition]
24
+ Segment: TypeAlias = tuple[Kind, TimeInterval, FilterCondition, SessionFilterCondition]
23
25
  Param: TypeAlias = ProjectRowId
24
26
 
25
- Key: TypeAlias = tuple[Kind, ProjectRowId, Optional[TimeRange], FilterCondition]
27
+ Key: TypeAlias = tuple[
28
+ Kind, ProjectRowId, Optional[TimeRange], FilterCondition, SessionFilterCondition
29
+ ]
26
30
  Result: TypeAlias = SpanCount
27
31
  ResultPosition: TypeAlias = int
28
32
  DEFAULT_VALUE: Result = 0
29
33
 
30
34
 
31
35
  def _cache_key_fn(key: Key) -> tuple[Segment, Param]:
32
- kind, project_rowid, time_range, filter_condition = key
36
+ kind, project_rowid, time_range, filter_condition, session_filter_condition = key
33
37
  interval = (
34
38
  (time_range.start, time_range.end) if isinstance(time_range, TimeRange) else (None, None)
35
39
  )
36
- return (kind, interval, filter_condition), project_rowid
40
+ return (
41
+ kind,
42
+ interval,
43
+ filter_condition,
44
+ session_filter_condition,
45
+ ), project_rowid
37
46
 
38
47
 
39
48
  _Section: TypeAlias = ProjectRowId
40
- _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition, Kind]
49
+ _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition, SessionFilterCondition, Kind]
41
50
 
42
51
 
43
52
  class RecordCountCache(
@@ -53,8 +62,10 @@ class RecordCountCache(
53
62
  )
54
63
 
55
64
  def _cache_key(self, key: Key) -> tuple[_Section, _SubKey]:
56
- (kind, interval, filter_condition), project_rowid = _cache_key_fn(key)
57
- return project_rowid, (interval, filter_condition, kind)
65
+ (kind, interval, filter_condition, session_filter_condition), project_rowid = _cache_key_fn(
66
+ key
67
+ )
68
+ return project_rowid, (interval, filter_condition, session_filter_condition, kind)
58
69
 
59
70
 
60
71
  class RecordCountDataLoader(DataLoader[Key, Result]):
@@ -93,7 +104,7 @@ def _get_stmt(
93
104
  segment: Segment,
94
105
  *project_rowids: Param,
95
106
  ) -> Select[Any]:
96
- kind, (start_time, end_time), filter_condition = segment
107
+ kind, (start_time, end_time), filter_condition, session_filter_condition = segment
97
108
  pid = models.Trace.project_rowid
98
109
  stmt = select(pid)
99
110
  if kind == "span":
@@ -102,12 +113,28 @@ def _get_stmt(
102
113
  if filter_condition:
103
114
  sf = SpanFilter(filter_condition)
104
115
  stmt = sf(stmt)
116
+ stmt = stmt.add_columns(func.count().label("count"))
105
117
  elif kind == "trace":
106
118
  time_column = models.Trace.start_time
119
+ if filter_condition:
120
+ stmt = stmt.join(models.Span, models.Trace.id == models.Span.trace_rowid)
121
+ stmt = stmt.add_columns(func.count(distinct(models.Trace.id)).label("count"))
122
+ sf = SpanFilter(filter_condition)
123
+ stmt = sf(stmt)
124
+ else:
125
+ stmt = stmt.add_columns(func.count().label("count"))
107
126
  else:
108
127
  assert_never(kind)
109
- stmt = stmt.add_columns(func.count().label("count"))
110
128
  stmt = stmt.where(pid.in_(project_rowids))
129
+
130
+ if session_filter_condition:
131
+ filtered_session_rowids = get_filtered_session_rowids_subquery(
132
+ session_filter_condition=session_filter_condition,
133
+ project_rowids=project_rowids,
134
+ start_time=start_time,
135
+ end_time=end_time,
136
+ )
137
+ stmt = stmt.where(models.Trace.project_session_rowid.in_(filtered_session_rowids))
111
138
  stmt = stmt.group_by(pid)
112
139
  if start_time:
113
140
  stmt = stmt.where(start_time <= time_column)
@@ -12,32 +12,38 @@ from phoenix.db import models
12
12
  from phoenix.server.api.dataloaders.cache import TwoTierCache
13
13
  from phoenix.server.api.dataloaders.types import CostBreakdown, SpanCostSummary
14
14
  from phoenix.server.api.input_types.TimeRange import TimeRange
15
+ from phoenix.server.session_filters import get_filtered_session_rowids_subquery
15
16
  from phoenix.server.types import DbSessionFactory
16
17
  from phoenix.trace.dsl import SpanFilter
17
18
 
18
19
  ProjectRowId: TypeAlias = int
19
20
  TimeInterval: TypeAlias = tuple[Optional[datetime], Optional[datetime]]
20
21
  FilterCondition: TypeAlias = Optional[str]
22
+ SessionFilterCondition: TypeAlias = Optional[str]
21
23
 
22
- Segment: TypeAlias = tuple[TimeInterval, FilterCondition]
24
+ Segment: TypeAlias = tuple[
25
+ TimeInterval,
26
+ FilterCondition,
27
+ SessionFilterCondition,
28
+ ]
23
29
  Param: TypeAlias = ProjectRowId
24
30
 
25
- Key: TypeAlias = tuple[ProjectRowId, Optional[TimeRange], FilterCondition]
31
+ Key: TypeAlias = tuple[ProjectRowId, Optional[TimeRange], FilterCondition, SessionFilterCondition]
26
32
  Result: TypeAlias = SpanCostSummary
27
33
  ResultPosition: TypeAlias = int
28
34
  DEFAULT_VALUE: Result = SpanCostSummary()
29
35
 
30
36
 
31
37
  def _cache_key_fn(key: Key) -> tuple[Segment, Param]:
32
- project_rowid, time_range, filter_condition = key
38
+ project_rowid, time_range, filter_condition, session_filter_condition = key
33
39
  interval = (
34
40
  (time_range.start, time_range.end) if isinstance(time_range, TimeRange) else (None, None)
35
41
  )
36
- return (interval, filter_condition), project_rowid
42
+ return (interval, filter_condition, session_filter_condition), project_rowid
37
43
 
38
44
 
39
45
  _Section: TypeAlias = ProjectRowId
40
- _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition]
46
+ _SubKey: TypeAlias = tuple[TimeInterval, FilterCondition, SessionFilterCondition]
41
47
 
42
48
 
43
49
  class SpanCostSummaryCache(
@@ -53,8 +59,8 @@ class SpanCostSummaryCache(
53
59
  )
54
60
 
55
61
  def _cache_key(self, key: Key) -> tuple[_Section, _SubKey]:
56
- (interval, filter_condition), project_rowid = _cache_key_fn(key)
57
- return project_rowid, (interval, filter_condition)
62
+ (interval, filter_condition, session_filter_condition), project_rowid = _cache_key_fn(key)
63
+ return project_rowid, (interval, filter_condition, session_filter_condition)
58
64
 
59
65
 
60
66
  class SpanCostSummaryByProjectDataLoader(DataLoader[Key, Result]):
@@ -106,12 +112,12 @@ def _get_stmt(
106
112
  segment: Segment,
107
113
  *params: Param,
108
114
  ) -> Select[Any]:
109
- (start_time, end_time), filter_condition = segment
110
- pid = models.Trace.project_rowid
115
+ project_rowids = params
116
+ (start_time, end_time), filter_condition, session_filter_condition = segment
111
117
 
112
118
  stmt: Select[Any] = (
113
119
  select(
114
- pid,
120
+ models.Trace.project_rowid,
115
121
  coalesce(func.sum(models.SpanCost.prompt_cost), 0).label("prompt_cost"),
116
122
  coalesce(func.sum(models.SpanCost.completion_cost), 0).label("completion_cost"),
117
123
  coalesce(func.sum(models.SpanCost.total_cost), 0).label("total_cost"),
@@ -119,8 +125,10 @@ def _get_stmt(
119
125
  coalesce(func.sum(models.SpanCost.completion_tokens), 0).label("completion_tokens"),
120
126
  coalesce(func.sum(models.SpanCost.total_tokens), 0).label("total_tokens"),
121
127
  )
122
- .join_from(models.SpanCost, models.Trace)
123
- .group_by(pid)
128
+ .select_from(models.Trace)
129
+ .join(models.SpanCost, models.Trace.id == models.SpanCost.trace_rowid)
130
+ .where(models.Trace.project_rowid.in_(project_rowids))
131
+ .group_by(models.Trace.project_rowid)
124
132
  )
125
133
 
126
134
  if start_time:
@@ -132,7 +140,13 @@ def _get_stmt(
132
140
  sf = SpanFilter(filter_condition)
133
141
  stmt = sf(stmt.join_from(models.SpanCost, models.Span))
134
142
 
135
- project_ids = [rowid for rowid in params]
136
- stmt = stmt.where(pid.in_(project_ids))
143
+ if session_filter_condition:
144
+ filtered_session_rowids = get_filtered_session_rowids_subquery(
145
+ session_filter_condition=session_filter_condition,
146
+ project_rowids=project_rowids,
147
+ start_time=start_time,
148
+ end_time=end_time,
149
+ )
150
+ stmt = stmt.where(models.Trace.project_session_rowid.in_(filtered_session_rowids))
137
151
 
138
152
  return stmt
@@ -543,7 +543,10 @@ class DeepSeekStreamingClient(OpenAIBaseStreamingClient):
543
543
  raise BadRequest("An API key is required for DeepSeek models")
544
544
  api_key = "sk-fake-api-key"
545
545
 
546
- client = AsyncOpenAI(api_key=api_key, base_url=base_url or "https://api.deepseek.com")
546
+ client = AsyncOpenAI(
547
+ api_key=api_key,
548
+ base_url=base_url or "https://api.deepseek.com",
549
+ )
547
550
  super().__init__(client=client, model=model, credentials=credentials)
548
551
  # DeepSeek uses OpenAI-compatible API but we'll track it as a separate provider
549
552
  # Adding a custom "deepseek" provider value to make it distinguishable in traces
@@ -581,7 +584,10 @@ class XAIStreamingClient(OpenAIBaseStreamingClient):
581
584
  raise BadRequest("An API key is required for xAI models")
582
585
  api_key = "sk-fake-api-key"
583
586
 
584
- client = AsyncOpenAI(api_key=api_key, base_url=base_url or "https://api.x.ai/v1")
587
+ client = AsyncOpenAI(
588
+ api_key=api_key,
589
+ base_url=base_url or "https://api.x.ai/v1",
590
+ )
585
591
  super().__init__(client=client, model=model, credentials=credentials)
586
592
  # xAI uses OpenAI-compatible API but we'll track it as a separate provider
587
593
  # Adding a custom "xai" provider value to make it distinguishable in traces
@@ -1134,7 +1140,7 @@ class OpenAIStreamingClient(OpenAIBaseStreamingClient):
1134
1140
  raise BadRequest("An API key is required for OpenAI models")
1135
1141
  api_key = "sk-fake-api-key"
1136
1142
 
1137
- client = AsyncOpenAI(api_key=api_key, base_url=base_url)
1143
+ client = AsyncOpenAI(api_key=api_key, base_url=base_url, timeout=30)
1138
1144
  super().__init__(client=client, model=model, credentials=credentials)
1139
1145
  self._attributes[LLM_PROVIDER] = OpenInferenceLLMProviderValues.OPENAI.value
1140
1146
  self._attributes[LLM_SYSTEM] = OpenInferenceLLMSystemValues.OPENAI.value
@@ -1489,7 +1495,7 @@ class AnthropicStreamingClient(PlaygroundStreamingClient):
1489
1495
  invocation_name="top_p",
1490
1496
  canonical_name=CanonicalParameterName.TOP_P,
1491
1497
  label="Top P",
1492
- default_value=1.0,
1498
+ default_value=None,
1493
1499
  min_value=0.0,
1494
1500
  max_value=1.0,
1495
1501
  ),
@@ -3,10 +3,11 @@ from typing import Any, Literal, Optional
3
3
 
4
4
  from dateutil.parser import isoparse
5
5
  from fastapi import APIRouter, HTTPException
6
- from pydantic import Field
6
+ from pydantic import Field, model_validator
7
7
  from starlette.requests import Request
8
8
  from starlette.status import HTTP_404_NOT_FOUND
9
9
  from strawberry.relay import GlobalID
10
+ from typing_extensions import Self
10
11
 
11
12
  from phoenix.db import models
12
13
  from phoenix.db.helpers import SupportedSQLDialect
@@ -36,15 +37,25 @@ class UpsertExperimentEvaluationRequestBody(V1RoutesBaseModel):
36
37
  )
37
38
  start_time: datetime = Field(description="The start time of the evaluation in ISO format")
38
39
  end_time: datetime = Field(description="The end time of the evaluation in ISO format")
39
- result: ExperimentEvaluationResult = Field(description="The result of the evaluation")
40
+ result: Optional[ExperimentEvaluationResult] = Field(
41
+ None, description="The result of the evaluation. Either result or error must be provided."
42
+ )
40
43
  error: Optional[str] = Field(
41
- None, description="Optional error message if the evaluation encountered an error"
44
+ None,
45
+ description="Error message if the evaluation encountered an error. "
46
+ "Either result or error must be provided.",
42
47
  )
43
48
  metadata: Optional[dict[str, Any]] = Field(
44
49
  default=None, description="Metadata for the evaluation"
45
50
  )
46
51
  trace_id: Optional[str] = Field(default=None, description="Optional trace ID for tracking")
47
52
 
53
+ @model_validator(mode="after")
54
+ def validate_result_or_error(self) -> Self:
55
+ if self.result is None and self.error is None:
56
+ raise ValueError("Either 'result' or 'error' must be provided")
57
+ return self
58
+
48
59
 
49
60
  class UpsertExperimentEvaluationResponseBodyData(V1RoutesBaseModel):
50
61
  id: str = Field(description="The ID of the upserted experiment evaluation")
@@ -469,26 +469,26 @@ async def query_spans_handler(
469
469
  detail=f"Invalid query: {e}",
470
470
  status_code=HTTP_422_UNPROCESSABLE_ENTITY,
471
471
  )
472
+
472
473
  async with request.app.state.db() as session:
473
- results = []
474
+ results: list[pd.DataFrame] = []
474
475
  for query in span_queries:
475
- results.append(
476
- await session.run_sync(
477
- query,
478
- project_name=project_name,
479
- start_time=normalize_datetime(
480
- request_body.start_time,
481
- timezone.utc,
482
- ),
483
- end_time=normalize_datetime(
484
- end_time,
485
- timezone.utc,
486
- ),
487
- limit=request_body.limit,
488
- root_spans_only=request_body.root_spans_only,
489
- orphan_span_as_root_span=request_body.orphan_span_as_root_span,
490
- )
476
+ df = await session.run_sync(
477
+ query,
478
+ project_name=project_name,
479
+ start_time=normalize_datetime(
480
+ request_body.start_time,
481
+ timezone.utc,
482
+ ),
483
+ end_time=normalize_datetime(
484
+ end_time,
485
+ timezone.utc,
486
+ ),
487
+ limit=request_body.limit,
488
+ root_spans_only=request_body.root_spans_only,
489
+ orphan_span_as_root_span=request_body.orphan_span_as_root_span,
491
490
  )
491
+ results.append(df)
492
492
  if not results:
493
493
  raise HTTPException(status_code=HTTP_404_NOT_FOUND)
494
494
 
@@ -1,5 +1,3 @@
1
- from __future__ import annotations
2
-
3
1
  import operator
4
2
  from datetime import datetime, timezone
5
3
  from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Literal, Optional, cast
@@ -50,6 +48,7 @@ from phoenix.server.api.types.SpanCostSummary import SpanCostSummary
50
48
  from phoenix.server.api.types.TimeSeries import TimeSeries, TimeSeriesDataPoint
51
49
  from phoenix.server.api.types.Trace import Trace
52
50
  from phoenix.server.api.types.ValidationResult import ValidationResult
51
+ from phoenix.server.session_filters import get_filtered_session_rowids_subquery
53
52
  from phoenix.server.types import DbSessionFactory
54
53
  from phoenix.trace.dsl import SpanFilter
55
54
 
@@ -135,9 +134,21 @@ class Project(Node):
135
134
  info: Info[Context, None],
136
135
  time_range: Optional[TimeRange] = UNSET,
137
136
  filter_condition: Optional[str] = UNSET,
137
+ session_filter_condition: Optional[str] = UNSET,
138
138
  ) -> int:
139
+ if filter_condition and session_filter_condition:
140
+ raise BadRequest(
141
+ "Both a filter condition and session filter condition "
142
+ "cannot be applied at the same time"
143
+ )
139
144
  return await info.context.data_loaders.record_counts.load(
140
- ("span", self.project_rowid, time_range, filter_condition),
145
+ (
146
+ "span",
147
+ self.project_rowid,
148
+ time_range or None,
149
+ filter_condition or None,
150
+ session_filter_condition or None,
151
+ ),
141
152
  )
142
153
 
143
154
  @strawberry.field
@@ -145,9 +156,22 @@ class Project(Node):
145
156
  self,
146
157
  info: Info[Context, None],
147
158
  time_range: Optional[TimeRange] = UNSET,
159
+ filter_condition: Optional[str] = UNSET,
160
+ session_filter_condition: Optional[str] = UNSET,
148
161
  ) -> int:
162
+ if filter_condition and session_filter_condition:
163
+ raise BadRequest(
164
+ "Both a filter condition and session filter condition "
165
+ "cannot be applied at the same time"
166
+ )
149
167
  return await info.context.data_loaders.record_counts.load(
150
- ("trace", self.project_rowid, time_range, None),
168
+ (
169
+ "trace",
170
+ self.project_rowid,
171
+ time_range or None,
172
+ filter_condition or None,
173
+ session_filter_condition or None,
174
+ ),
151
175
  )
152
176
 
153
177
  @strawberry.field
@@ -189,9 +213,21 @@ class Project(Node):
189
213
  info: Info[Context, None],
190
214
  time_range: Optional[TimeRange] = UNSET,
191
215
  filter_condition: Optional[str] = UNSET,
216
+ session_filter_condition: Optional[str] = UNSET,
192
217
  ) -> SpanCostSummary:
193
- loader = info.context.data_loaders.span_cost_summary_by_project
194
- summary = await loader.load((self.project_rowid, time_range, filter_condition))
218
+ if filter_condition and session_filter_condition:
219
+ raise BadRequest(
220
+ "Both a filter condition and session filter condition "
221
+ "cannot be applied at the same time"
222
+ )
223
+ summary = await info.context.data_loaders.span_cost_summary_by_project.load(
224
+ (
225
+ self.project_rowid,
226
+ time_range or None,
227
+ filter_condition or None,
228
+ session_filter_condition or None,
229
+ )
230
+ )
195
231
  return SpanCostSummary(
196
232
  prompt=CostBreakdown(
197
233
  tokens=summary.prompt.tokens,
@@ -213,13 +249,21 @@ class Project(Node):
213
249
  info: Info[Context, None],
214
250
  probability: float,
215
251
  time_range: Optional[TimeRange] = UNSET,
252
+ filter_condition: Optional[str] = UNSET,
253
+ session_filter_condition: Optional[str] = UNSET,
216
254
  ) -> Optional[float]:
255
+ if filter_condition and session_filter_condition:
256
+ raise BadRequest(
257
+ "Both a filter condition and session filter condition "
258
+ "cannot be applied at the same time"
259
+ )
217
260
  return await info.context.data_loaders.latency_ms_quantile.load(
218
261
  (
219
262
  "trace",
220
263
  self.project_rowid,
221
- time_range,
222
- None,
264
+ time_range or None,
265
+ filter_condition or None,
266
+ session_filter_condition or None,
223
267
  probability,
224
268
  ),
225
269
  )
@@ -231,13 +275,20 @@ class Project(Node):
231
275
  probability: float,
232
276
  time_range: Optional[TimeRange] = UNSET,
233
277
  filter_condition: Optional[str] = UNSET,
278
+ session_filter_condition: Optional[str] = UNSET,
234
279
  ) -> Optional[float]:
280
+ if filter_condition and session_filter_condition:
281
+ raise BadRequest(
282
+ "Both a filter condition and session filter condition "
283
+ "cannot be applied at the same time"
284
+ )
235
285
  return await info.context.data_loaders.latency_ms_quantile.load(
236
286
  (
237
287
  "span",
238
288
  self.project_rowid,
239
- time_range,
240
- filter_condition,
289
+ time_range or None,
290
+ filter_condition or None,
291
+ session_filter_condition or None,
241
292
  probability,
242
293
  ),
243
294
  )
@@ -397,31 +448,13 @@ class Project(Node):
397
448
  if time_range.end:
398
449
  stmt = stmt.where(table.start_time < time_range.end)
399
450
  if filter_io_substring:
400
- filter_stmt = (
401
- select(distinct(models.Trace.project_session_rowid).label("id"))
402
- .filter_by(project_rowid=self.project_rowid)
403
- .join_from(models.Trace, models.Span)
404
- .where(models.Span.parent_id.is_(None))
405
- .where(
406
- or_(
407
- models.CaseInsensitiveContains(
408
- models.Span.attributes[INPUT_VALUE].as_string(),
409
- filter_io_substring,
410
- ),
411
- models.CaseInsensitiveContains(
412
- models.Span.attributes[OUTPUT_VALUE].as_string(),
413
- filter_io_substring,
414
- ),
415
- )
416
- )
451
+ filtered_session_rowids = get_filtered_session_rowids_subquery(
452
+ session_filter_condition=filter_io_substring,
453
+ project_rowids=[self.project_rowid],
454
+ start_time=time_range.start if time_range else None,
455
+ end_time=time_range.end if time_range else None,
417
456
  )
418
- if time_range:
419
- if time_range.start:
420
- filter_stmt = filter_stmt.where(time_range.start <= models.Trace.start_time)
421
- if time_range.end:
422
- filter_stmt = filter_stmt.where(models.Trace.start_time < time_range.end)
423
- filter_subq = filter_stmt.subquery()
424
- stmt = stmt.join(filter_subq, table.id == filter_subq.c.id)
457
+ stmt = stmt.where(table.id.in_(filtered_session_rowids))
425
458
  if sort:
426
459
  key: ColumnElement[Any]
427
460
  if sort.col is ProjectSessionColumn.startTime:
@@ -576,10 +609,24 @@ class Project(Node):
576
609
  self,
577
610
  info: Info[Context, None],
578
611
  annotation_name: str,
612
+ filter_condition: Optional[str] = UNSET,
613
+ session_filter_condition: Optional[str] = UNSET,
579
614
  time_range: Optional[TimeRange] = UNSET,
580
615
  ) -> Optional[AnnotationSummary]:
616
+ if filter_condition and session_filter_condition:
617
+ raise BadRequest(
618
+ "Both a filter condition and session filter condition "
619
+ "cannot be applied at the same time"
620
+ )
581
621
  return await info.context.data_loaders.annotation_summaries.load(
582
- ("trace", self.project_rowid, time_range, None, annotation_name),
622
+ (
623
+ "trace",
624
+ self.project_rowid,
625
+ time_range or None,
626
+ filter_condition or None,
627
+ session_filter_condition or None,
628
+ annotation_name,
629
+ ),
583
630
  )
584
631
 
585
632
  @strawberry.field
@@ -589,9 +636,22 @@ class Project(Node):
589
636
  annotation_name: str,
590
637
  time_range: Optional[TimeRange] = UNSET,
591
638
  filter_condition: Optional[str] = UNSET,
639
+ session_filter_condition: Optional[str] = UNSET,
592
640
  ) -> Optional[AnnotationSummary]:
641
+ if filter_condition and session_filter_condition:
642
+ raise BadRequest(
643
+ "Both a filter condition and session filter condition "
644
+ "cannot be applied at the same time"
645
+ )
593
646
  return await info.context.data_loaders.annotation_summaries.load(
594
- ("span", self.project_rowid, time_range, filter_condition, annotation_name),
647
+ (
648
+ "span",
649
+ self.project_rowid,
650
+ time_range or None,
651
+ filter_condition or None,
652
+ session_filter_condition or None,
653
+ annotation_name,
654
+ ),
595
655
  )
596
656
 
597
657
  @strawberry.field
@@ -680,7 +740,7 @@ class Project(Node):
680
740
  async def trace_retention_policy(
681
741
  self,
682
742
  info: Info[Context, None],
683
- ) -> Annotated[ProjectTraceRetentionPolicy, lazy(".ProjectTraceRetentionPolicy")]:
743
+ ) -> Annotated["ProjectTraceRetentionPolicy", lazy(".ProjectTraceRetentionPolicy")]:
684
744
  from .ProjectTraceRetentionPolicy import ProjectTraceRetentionPolicy
685
745
 
686
746
  id_ = await info.context.data_loaders.trace_retention_policy_id_by_project_id.load(
@@ -721,7 +781,7 @@ class Project(Node):
721
781
  time_range: TimeRange,
722
782
  time_bin_config: Optional[TimeBinConfig] = UNSET,
723
783
  filter_condition: Optional[str] = UNSET,
724
- ) -> SpanCountTimeSeries:
784
+ ) -> "SpanCountTimeSeries":
725
785
  if time_range.start is None:
726
786
  raise BadRequest("Start time is required")
727
787
 
@@ -806,7 +866,7 @@ class Project(Node):
806
866
  info: Info[Context, None],
807
867
  time_range: TimeRange,
808
868
  time_bin_config: Optional[TimeBinConfig] = UNSET,
809
- ) -> TraceCountTimeSeries:
869
+ ) -> "TraceCountTimeSeries":
810
870
  if time_range.start is None:
811
871
  raise BadRequest("Start time is required")
812
872
 
@@ -869,7 +929,7 @@ class Project(Node):
869
929
  info: Info[Context, None],
870
930
  time_range: TimeRange,
871
931
  time_bin_config: Optional[TimeBinConfig] = UNSET,
872
- ) -> TraceCountByStatusTimeSeries:
932
+ ) -> "TraceCountByStatusTimeSeries":
873
933
  if time_range.start is None:
874
934
  raise BadRequest("Start time is required")
875
935
 
@@ -962,7 +1022,7 @@ class Project(Node):
962
1022
  info: Info[Context, None],
963
1023
  time_range: TimeRange,
964
1024
  time_bin_config: Optional[TimeBinConfig] = UNSET,
965
- ) -> TraceLatencyPercentileTimeSeries:
1025
+ ) -> "TraceLatencyPercentileTimeSeries":
966
1026
  if time_range.start is None:
967
1027
  raise BadRequest("Start time is required")
968
1028
 
@@ -1066,7 +1126,7 @@ class Project(Node):
1066
1126
  info: Info[Context, None],
1067
1127
  time_range: TimeRange,
1068
1128
  time_bin_config: Optional[TimeBinConfig] = UNSET,
1069
- ) -> TraceTokenCountTimeSeries:
1129
+ ) -> "TraceTokenCountTimeSeries":
1070
1130
  if time_range.start is None:
1071
1131
  raise BadRequest("Start time is required")
1072
1132
 
@@ -1149,7 +1209,7 @@ class Project(Node):
1149
1209
  info: Info[Context, None],
1150
1210
  time_range: TimeRange,
1151
1211
  time_bin_config: Optional[TimeBinConfig] = UNSET,
1152
- ) -> TraceTokenCostTimeSeries:
1212
+ ) -> "TraceTokenCostTimeSeries":
1153
1213
  if time_range.start is None:
1154
1214
  raise BadRequest("Start time is required")
1155
1215
 
@@ -1232,7 +1292,7 @@ class Project(Node):
1232
1292
  info: Info[Context, None],
1233
1293
  time_range: TimeRange,
1234
1294
  time_bin_config: Optional[TimeBinConfig] = UNSET,
1235
- ) -> SpanAnnotationScoreTimeSeries:
1295
+ ) -> "SpanAnnotationScoreTimeSeries":
1236
1296
  if time_range.start is None:
1237
1297
  raise BadRequest("Start time is required")
1238
1298
 
@@ -1287,6 +1347,8 @@ class Project(Node):
1287
1347
  name,
1288
1348
  average_score,
1289
1349
  ) in await session.stream(stmt):
1350
+ if average_score is None:
1351
+ continue
1290
1352
  timestamp = _as_datetime(t)
1291
1353
  if timestamp not in scores:
1292
1354
  scores[timestamp] = {}
@@ -88,21 +88,24 @@ class SimpleEmailSender:
88
88
  allocated_storage_gibibytes: float,
89
89
  notification_threshold_percentage: float,
90
90
  ) -> None:
91
- subject = "[Phoenix] Database Usage Threshold Exceeded"
91
+ subject = "[Phoenix] Database Disk Space Usage Threshold Exceeded"
92
92
  template_name = "db_disk_usage_notification.html"
93
93
 
94
+ support_email = get_env_support_email()
94
95
  template = self.env.get_template(template_name)
95
96
  html_content = template.render(
96
97
  current_usage_gibibytes=current_usage_gibibytes,
97
98
  allocated_storage_gibibytes=allocated_storage_gibibytes,
98
99
  notification_threshold_percentage=notification_threshold_percentage,
99
- support_email=get_env_support_email(),
100
+ support_email=support_email,
100
101
  )
101
102
 
102
103
  msg = EmailMessage()
103
104
  msg["Subject"] = subject
104
105
  msg["From"] = self.sender_email
105
106
  msg["To"] = email
107
+ if support_email:
108
+ msg["Cc"] = support_email
106
109
  msg.set_content(html_content, subtype="html")
107
110
 
108
111
  await to_thread.run_sync(self._send_email, msg)