arize-phoenix 11.8.0__py3-none-any.whl → 11.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (27) hide show
  1. {arize_phoenix-11.8.0.dist-info → arize_phoenix-11.9.0.dist-info}/METADATA +1 -1
  2. {arize_phoenix-11.8.0.dist-info → arize_phoenix-11.9.0.dist-info}/RECORD +24 -23
  3. phoenix/db/insertion/span.py +12 -10
  4. phoenix/db/insertion/types.py +9 -2
  5. phoenix/server/api/input_types/CreateProjectInput.py +27 -0
  6. phoenix/server/api/mutations/project_mutations.py +37 -1
  7. phoenix/server/api/mutations/trace_mutations.py +45 -1
  8. phoenix/server/api/types/Project.py +554 -0
  9. phoenix/server/cost_tracking/model_cost_manifest.json +85 -0
  10. phoenix/server/dml_event.py +4 -0
  11. phoenix/server/static/.vite/manifest.json +36 -36
  12. phoenix/server/static/assets/{components-5M9nebi4.js → components-IBd-PDxA.js} +368 -290
  13. phoenix/server/static/assets/{index-OU2WTnGN.js → index-B8EBC_Z5.js} +8 -2
  14. phoenix/server/static/assets/{pages-DF8rqxJ4.js → pages-6D1duYIe.js} +558 -435
  15. phoenix/server/static/assets/vendor-BzZ0oklU.js +939 -0
  16. phoenix/server/static/assets/vendor-arizeai-CvjUqTrl.js +168 -0
  17. phoenix/server/static/assets/{vendor-codemirror-vlcH1_iR.js → vendor-codemirror-CKK25Gd7.js} +1 -1
  18. phoenix/server/static/assets/vendor-recharts-CWtaRhQC.js +37 -0
  19. phoenix/server/static/assets/{vendor-shiki-BsknB7bv.js → vendor-shiki-D30GF-p9.js} +1 -1
  20. phoenix/version.py +1 -1
  21. phoenix/server/static/assets/vendor-Bl7CyFDw.js +0 -911
  22. phoenix/server/static/assets/vendor-arizeai-B_viEUUA.js +0 -180
  23. phoenix/server/static/assets/vendor-recharts-C9cQu72o.js +0 -59
  24. {arize_phoenix-11.8.0.dist-info → arize_phoenix-11.9.0.dist-info}/WHEEL +0 -0
  25. {arize_phoenix-11.8.0.dist-info → arize_phoenix-11.9.0.dist-info}/entry_points.txt +0 -0
  26. {arize_phoenix-11.8.0.dist-info → arize_phoenix-11.9.0.dist-info}/licenses/IP_NOTICE +0 -0
  27. {arize_phoenix-11.8.0.dist-info → arize_phoenix-11.9.0.dist-info}/licenses/LICENSE +0 -0
@@ -11,6 +11,7 @@ from sqlalchemy import and_, desc, distinct, exists, func, or_, select
11
11
  from sqlalchemy.dialects import postgresql, sqlite
12
12
  from sqlalchemy.sql.elements import ColumnElement
13
13
  from sqlalchemy.sql.expression import tuple_
14
+ from sqlalchemy.sql.functions import percentile_cont
14
15
  from strawberry import ID, UNSET, Private, lazy
15
16
  from strawberry.relay import Connection, Edge, Node, NodeID, PageInfo
16
17
  from strawberry.types import Info
@@ -841,6 +842,475 @@ class Project(Node):
841
842
  data[timestamp] = TimeSeriesDataPoint(timestamp=timestamp)
842
843
  return TraceCountTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
843
844
 
845
+ @strawberry.field
846
+ async def trace_count_by_status_time_series(
847
+ self,
848
+ info: Info[Context, None],
849
+ time_range: TimeRange,
850
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
851
+ ) -> TraceCountByStatusTimeSeries:
852
+ if time_range.start is None:
853
+ raise BadRequest("Start time is required")
854
+
855
+ dialect = info.context.db.dialect
856
+ utc_offset_minutes = 0
857
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
858
+ if time_bin_config:
859
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
860
+ if time_bin_config.scale is TimeBinScale.MINUTE:
861
+ field = "minute"
862
+ elif time_bin_config.scale is TimeBinScale.HOUR:
863
+ field = "hour"
864
+ elif time_bin_config.scale is TimeBinScale.DAY:
865
+ field = "day"
866
+ elif time_bin_config.scale is TimeBinScale.WEEK:
867
+ field = "week"
868
+ elif time_bin_config.scale is TimeBinScale.MONTH:
869
+ field = "month"
870
+ elif time_bin_config.scale is TimeBinScale.YEAR:
871
+ field = "year"
872
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
873
+ trace_error_status_counts = (
874
+ select(
875
+ models.Span.trace_rowid,
876
+ )
877
+ .where(models.Span.parent_id.is_(None))
878
+ .group_by(models.Span.trace_rowid)
879
+ .having(func.max(models.Span.cumulative_error_count) > 0)
880
+ ).subquery()
881
+ stmt = (
882
+ select(
883
+ bucket,
884
+ func.count(models.Trace.id).label("total_count"),
885
+ func.coalesce(func.count(trace_error_status_counts.c.trace_rowid), 0).label(
886
+ "error_count"
887
+ ),
888
+ )
889
+ .join_from(
890
+ models.Trace,
891
+ trace_error_status_counts,
892
+ onclause=trace_error_status_counts.c.trace_rowid == models.Trace.id,
893
+ isouter=True,
894
+ )
895
+ .where(models.Trace.project_rowid == self.project_rowid)
896
+ .group_by(bucket)
897
+ .order_by(bucket)
898
+ )
899
+ if time_range:
900
+ if time_range.start:
901
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
902
+ if time_range.end:
903
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
904
+ data: dict[datetime, TraceCountByStatusTimeSeriesDataPoint] = {}
905
+ async with info.context.db() as session:
906
+ async for t, total_count, error_count in await session.stream(stmt):
907
+ timestamp = _as_datetime(t)
908
+ data[timestamp] = TraceCountByStatusTimeSeriesDataPoint(
909
+ timestamp=timestamp,
910
+ ok_count=total_count - error_count,
911
+ error_count=error_count,
912
+ total_count=total_count,
913
+ )
914
+
915
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
916
+ min_time = min([*data_timestamps, time_range.start])
917
+ max_time = max(
918
+ [
919
+ *data_timestamps,
920
+ *([time_range.end] if time_range.end else []),
921
+ ],
922
+ default=datetime.now(timezone.utc),
923
+ )
924
+ for timestamp in get_timestamp_range(
925
+ start_time=min_time,
926
+ end_time=max_time,
927
+ stride=field,
928
+ utc_offset_minutes=utc_offset_minutes,
929
+ ):
930
+ if timestamp not in data:
931
+ data[timestamp] = TraceCountByStatusTimeSeriesDataPoint(
932
+ timestamp=timestamp,
933
+ ok_count=0,
934
+ error_count=0,
935
+ total_count=0,
936
+ )
937
+ return TraceCountByStatusTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
938
+
939
+ @strawberry.field
940
+ async def trace_latency_ms_percentile_time_series(
941
+ self,
942
+ info: Info[Context, None],
943
+ time_range: TimeRange,
944
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
945
+ ) -> TraceLatencyPercentileTimeSeries:
946
+ if time_range.start is None:
947
+ raise BadRequest("Start time is required")
948
+
949
+ dialect = info.context.db.dialect
950
+ utc_offset_minutes = 0
951
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
952
+ if time_bin_config:
953
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
954
+ if time_bin_config.scale is TimeBinScale.MINUTE:
955
+ field = "minute"
956
+ elif time_bin_config.scale is TimeBinScale.HOUR:
957
+ field = "hour"
958
+ elif time_bin_config.scale is TimeBinScale.DAY:
959
+ field = "day"
960
+ elif time_bin_config.scale is TimeBinScale.WEEK:
961
+ field = "week"
962
+ elif time_bin_config.scale is TimeBinScale.MONTH:
963
+ field = "month"
964
+ elif time_bin_config.scale is TimeBinScale.YEAR:
965
+ field = "year"
966
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
967
+
968
+ stmt = select(bucket).where(models.Trace.project_rowid == self.project_rowid)
969
+ if time_range.start:
970
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
971
+ if time_range.end:
972
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
973
+
974
+ if dialect is SupportedSQLDialect.POSTGRESQL:
975
+ stmt = stmt.add_columns(
976
+ percentile_cont(0.50).within_group(models.Trace.latency_ms.asc()).label("p50"),
977
+ percentile_cont(0.75).within_group(models.Trace.latency_ms.asc()).label("p75"),
978
+ percentile_cont(0.90).within_group(models.Trace.latency_ms.asc()).label("p90"),
979
+ percentile_cont(0.95).within_group(models.Trace.latency_ms.asc()).label("p95"),
980
+ percentile_cont(0.99).within_group(models.Trace.latency_ms.asc()).label("p99"),
981
+ percentile_cont(0.999).within_group(models.Trace.latency_ms.asc()).label("p999"),
982
+ func.max(models.Trace.latency_ms).label("max"),
983
+ )
984
+ elif dialect is SupportedSQLDialect.SQLITE:
985
+ stmt = stmt.add_columns(
986
+ func.percentile(models.Trace.latency_ms, 50).label("p50"),
987
+ func.percentile(models.Trace.latency_ms, 75).label("p75"),
988
+ func.percentile(models.Trace.latency_ms, 90).label("p90"),
989
+ func.percentile(models.Trace.latency_ms, 95).label("p95"),
990
+ func.percentile(models.Trace.latency_ms, 99).label("p99"),
991
+ func.percentile(models.Trace.latency_ms, 99.9).label("p999"),
992
+ func.max(models.Trace.latency_ms).label("max"),
993
+ )
994
+ else:
995
+ assert_never(dialect)
996
+
997
+ stmt = stmt.group_by(bucket).order_by(bucket)
998
+
999
+ data: dict[datetime, TraceLatencyMsPercentileTimeSeriesDataPoint] = {}
1000
+ async with info.context.db() as session:
1001
+ async for (
1002
+ bucket_time,
1003
+ p50,
1004
+ p75,
1005
+ p90,
1006
+ p95,
1007
+ p99,
1008
+ p999,
1009
+ max_latency,
1010
+ ) in await session.stream(stmt):
1011
+ timestamp = _as_datetime(bucket_time)
1012
+ data[timestamp] = TraceLatencyMsPercentileTimeSeriesDataPoint(
1013
+ timestamp=timestamp,
1014
+ p50=p50,
1015
+ p75=p75,
1016
+ p90=p90,
1017
+ p95=p95,
1018
+ p99=p99,
1019
+ p999=p999,
1020
+ max=max_latency,
1021
+ )
1022
+
1023
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
1024
+ min_time = min([*data_timestamps, time_range.start])
1025
+ max_time = max(
1026
+ [
1027
+ *data_timestamps,
1028
+ *([time_range.end] if time_range.end else []),
1029
+ ],
1030
+ default=datetime.now(timezone.utc),
1031
+ )
1032
+ for timestamp in get_timestamp_range(
1033
+ start_time=min_time,
1034
+ end_time=max_time,
1035
+ stride=field,
1036
+ utc_offset_minutes=utc_offset_minutes,
1037
+ ):
1038
+ if timestamp not in data:
1039
+ data[timestamp] = TraceLatencyMsPercentileTimeSeriesDataPoint(timestamp=timestamp)
1040
+ return TraceLatencyPercentileTimeSeries(
1041
+ data=sorted(data.values(), key=lambda x: x.timestamp)
1042
+ )
1043
+
1044
+ @strawberry.field
1045
+ async def trace_token_count_time_series(
1046
+ self,
1047
+ info: Info[Context, None],
1048
+ time_range: TimeRange,
1049
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
1050
+ ) -> TraceTokenCountTimeSeries:
1051
+ if time_range.start is None:
1052
+ raise BadRequest("Start time is required")
1053
+
1054
+ dialect = info.context.db.dialect
1055
+ utc_offset_minutes = 0
1056
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
1057
+ if time_bin_config:
1058
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
1059
+ if time_bin_config.scale is TimeBinScale.MINUTE:
1060
+ field = "minute"
1061
+ elif time_bin_config.scale is TimeBinScale.HOUR:
1062
+ field = "hour"
1063
+ elif time_bin_config.scale is TimeBinScale.DAY:
1064
+ field = "day"
1065
+ elif time_bin_config.scale is TimeBinScale.WEEK:
1066
+ field = "week"
1067
+ elif time_bin_config.scale is TimeBinScale.MONTH:
1068
+ field = "month"
1069
+ elif time_bin_config.scale is TimeBinScale.YEAR:
1070
+ field = "year"
1071
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
1072
+ stmt = (
1073
+ select(
1074
+ bucket,
1075
+ func.sum(models.SpanCost.total_tokens),
1076
+ func.sum(models.SpanCost.prompt_tokens),
1077
+ func.sum(models.SpanCost.completion_tokens),
1078
+ )
1079
+ .join_from(
1080
+ models.Trace,
1081
+ models.SpanCost,
1082
+ onclause=models.SpanCost.trace_rowid == models.Trace.id,
1083
+ )
1084
+ .where(models.Trace.project_rowid == self.project_rowid)
1085
+ .group_by(bucket)
1086
+ .order_by(bucket)
1087
+ )
1088
+ if time_range:
1089
+ if time_range.start:
1090
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
1091
+ if time_range.end:
1092
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
1093
+ data: dict[datetime, TraceTokenCountTimeSeriesDataPoint] = {}
1094
+ async with info.context.db() as session:
1095
+ async for (
1096
+ t,
1097
+ total_tokens,
1098
+ prompt_tokens,
1099
+ completion_tokens,
1100
+ ) in await session.stream(stmt):
1101
+ timestamp = _as_datetime(t)
1102
+ data[timestamp] = TraceTokenCountTimeSeriesDataPoint(
1103
+ timestamp=timestamp,
1104
+ prompt_token_count=prompt_tokens,
1105
+ completion_token_count=completion_tokens,
1106
+ total_token_count=total_tokens,
1107
+ )
1108
+
1109
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
1110
+ min_time = min([*data_timestamps, time_range.start])
1111
+ max_time = max(
1112
+ [
1113
+ *data_timestamps,
1114
+ *([time_range.end] if time_range.end else []),
1115
+ ],
1116
+ default=datetime.now(timezone.utc),
1117
+ )
1118
+ for timestamp in get_timestamp_range(
1119
+ start_time=min_time,
1120
+ end_time=max_time,
1121
+ stride=field,
1122
+ utc_offset_minutes=utc_offset_minutes,
1123
+ ):
1124
+ if timestamp not in data:
1125
+ data[timestamp] = TraceTokenCountTimeSeriesDataPoint(timestamp=timestamp)
1126
+ return TraceTokenCountTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
1127
+
1128
+ @strawberry.field
1129
+ async def trace_token_cost_time_series(
1130
+ self,
1131
+ info: Info[Context, None],
1132
+ time_range: TimeRange,
1133
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
1134
+ ) -> TraceTokenCostTimeSeries:
1135
+ if time_range.start is None:
1136
+ raise BadRequest("Start time is required")
1137
+
1138
+ dialect = info.context.db.dialect
1139
+ utc_offset_minutes = 0
1140
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
1141
+ if time_bin_config:
1142
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
1143
+ if time_bin_config.scale is TimeBinScale.MINUTE:
1144
+ field = "minute"
1145
+ elif time_bin_config.scale is TimeBinScale.HOUR:
1146
+ field = "hour"
1147
+ elif time_bin_config.scale is TimeBinScale.DAY:
1148
+ field = "day"
1149
+ elif time_bin_config.scale is TimeBinScale.WEEK:
1150
+ field = "week"
1151
+ elif time_bin_config.scale is TimeBinScale.MONTH:
1152
+ field = "month"
1153
+ elif time_bin_config.scale is TimeBinScale.YEAR:
1154
+ field = "year"
1155
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
1156
+ stmt = (
1157
+ select(
1158
+ bucket,
1159
+ func.sum(models.SpanCost.total_cost),
1160
+ func.sum(models.SpanCost.prompt_cost),
1161
+ func.sum(models.SpanCost.completion_cost),
1162
+ )
1163
+ .join_from(
1164
+ models.Trace,
1165
+ models.SpanCost,
1166
+ onclause=models.SpanCost.trace_rowid == models.Trace.id,
1167
+ )
1168
+ .where(models.Trace.project_rowid == self.project_rowid)
1169
+ .group_by(bucket)
1170
+ .order_by(bucket)
1171
+ )
1172
+ if time_range:
1173
+ if time_range.start:
1174
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
1175
+ if time_range.end:
1176
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
1177
+ data: dict[datetime, TraceTokenCostTimeSeriesDataPoint] = {}
1178
+ async with info.context.db() as session:
1179
+ async for (
1180
+ t,
1181
+ total_cost,
1182
+ prompt_cost,
1183
+ completion_cost,
1184
+ ) in await session.stream(stmt):
1185
+ timestamp = _as_datetime(t)
1186
+ data[timestamp] = TraceTokenCostTimeSeriesDataPoint(
1187
+ timestamp=timestamp,
1188
+ prompt_cost=prompt_cost,
1189
+ completion_cost=completion_cost,
1190
+ total_cost=total_cost,
1191
+ )
1192
+
1193
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
1194
+ min_time = min([*data_timestamps, time_range.start])
1195
+ max_time = max(
1196
+ [
1197
+ *data_timestamps,
1198
+ *([time_range.end] if time_range.end else []),
1199
+ ],
1200
+ default=datetime.now(timezone.utc),
1201
+ )
1202
+ for timestamp in get_timestamp_range(
1203
+ start_time=min_time,
1204
+ end_time=max_time,
1205
+ stride=field,
1206
+ utc_offset_minutes=utc_offset_minutes,
1207
+ ):
1208
+ if timestamp not in data:
1209
+ data[timestamp] = TraceTokenCostTimeSeriesDataPoint(timestamp=timestamp)
1210
+ return TraceTokenCostTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
1211
+
1212
+ @strawberry.field
1213
+ async def span_annotation_score_time_series(
1214
+ self,
1215
+ info: Info[Context, None],
1216
+ time_range: TimeRange,
1217
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
1218
+ ) -> SpanAnnotationScoreTimeSeries:
1219
+ if time_range.start is None:
1220
+ raise BadRequest("Start time is required")
1221
+
1222
+ dialect = info.context.db.dialect
1223
+ utc_offset_minutes = 0
1224
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
1225
+ if time_bin_config:
1226
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
1227
+ if time_bin_config.scale is TimeBinScale.MINUTE:
1228
+ field = "minute"
1229
+ elif time_bin_config.scale is TimeBinScale.HOUR:
1230
+ field = "hour"
1231
+ elif time_bin_config.scale is TimeBinScale.DAY:
1232
+ field = "day"
1233
+ elif time_bin_config.scale is TimeBinScale.WEEK:
1234
+ field = "week"
1235
+ elif time_bin_config.scale is TimeBinScale.MONTH:
1236
+ field = "month"
1237
+ elif time_bin_config.scale is TimeBinScale.YEAR:
1238
+ field = "year"
1239
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
1240
+ stmt = (
1241
+ select(
1242
+ bucket,
1243
+ models.SpanAnnotation.name,
1244
+ func.avg(models.SpanAnnotation.score).label("average_score"),
1245
+ )
1246
+ .join_from(
1247
+ models.SpanAnnotation,
1248
+ models.Span,
1249
+ onclause=models.SpanAnnotation.span_rowid == models.Span.id,
1250
+ )
1251
+ .join_from(
1252
+ models.Span,
1253
+ models.Trace,
1254
+ onclause=models.Span.trace_rowid == models.Trace.id,
1255
+ )
1256
+ .where(models.Trace.project_rowid == self.project_rowid)
1257
+ .group_by(bucket, models.SpanAnnotation.name)
1258
+ .order_by(bucket)
1259
+ )
1260
+ if time_range:
1261
+ if time_range.start:
1262
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
1263
+ if time_range.end:
1264
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
1265
+ scores: dict[datetime, dict[str, float]] = {}
1266
+ unique_names: set[str] = set()
1267
+ async with info.context.db() as session:
1268
+ async for (
1269
+ t,
1270
+ name,
1271
+ average_score,
1272
+ ) in await session.stream(stmt):
1273
+ timestamp = _as_datetime(t)
1274
+ if timestamp not in scores:
1275
+ scores[timestamp] = {}
1276
+ scores[timestamp][name] = average_score
1277
+ unique_names.add(name)
1278
+
1279
+ score_timestamps: list[datetime] = [timestamp for timestamp in scores]
1280
+ min_time = min([*score_timestamps, time_range.start])
1281
+ max_time = max(
1282
+ [
1283
+ *score_timestamps,
1284
+ *([time_range.end] if time_range.end else []),
1285
+ ],
1286
+ default=datetime.now(timezone.utc),
1287
+ )
1288
+ data: dict[datetime, SpanAnnotationScoreTimeSeriesDataPoint] = {
1289
+ timestamp: SpanAnnotationScoreTimeSeriesDataPoint(
1290
+ timestamp=timestamp,
1291
+ scores_with_labels=[
1292
+ SpanAnnotationScoreWithLabel(label=label, score=scores[timestamp][label])
1293
+ for label in scores[timestamp]
1294
+ ],
1295
+ )
1296
+ for timestamp in score_timestamps
1297
+ }
1298
+ for timestamp in get_timestamp_range(
1299
+ start_time=min_time,
1300
+ end_time=max_time,
1301
+ stride=field,
1302
+ utc_offset_minutes=utc_offset_minutes,
1303
+ ):
1304
+ if timestamp not in data:
1305
+ data[timestamp] = SpanAnnotationScoreTimeSeriesDataPoint(
1306
+ timestamp=timestamp,
1307
+ scores_with_labels=[],
1308
+ )
1309
+ return SpanAnnotationScoreTimeSeries(
1310
+ data=sorted(data.values(), key=lambda x: x.timestamp),
1311
+ names=sorted(list(unique_names)),
1312
+ )
1313
+
844
1314
 
845
1315
  @strawberry.type
846
1316
  class SpanCountTimeSeries(TimeSeries):
@@ -852,6 +1322,80 @@ class TraceCountTimeSeries(TimeSeries):
852
1322
  """A time series of trace count"""
853
1323
 
854
1324
 
1325
+ @strawberry.type
1326
+ class TraceCountByStatusTimeSeriesDataPoint:
1327
+ timestamp: datetime
1328
+ ok_count: int
1329
+ error_count: int
1330
+ total_count: int
1331
+
1332
+
1333
+ @strawberry.type
1334
+ class TraceCountByStatusTimeSeries:
1335
+ data: list[TraceCountByStatusTimeSeriesDataPoint]
1336
+
1337
+
1338
+ @strawberry.type
1339
+ class TraceLatencyMsPercentileTimeSeriesDataPoint:
1340
+ timestamp: datetime
1341
+ p50: Optional[float] = None
1342
+ p75: Optional[float] = None
1343
+ p90: Optional[float] = None
1344
+ p95: Optional[float] = None
1345
+ p99: Optional[float] = None
1346
+ p999: Optional[float] = None
1347
+ max: Optional[float] = None
1348
+
1349
+
1350
+ @strawberry.type
1351
+ class TraceLatencyPercentileTimeSeries:
1352
+ data: list[TraceLatencyMsPercentileTimeSeriesDataPoint]
1353
+
1354
+
1355
+ @strawberry.type
1356
+ class TraceTokenCountTimeSeriesDataPoint:
1357
+ timestamp: datetime
1358
+ prompt_token_count: Optional[float] = None
1359
+ completion_token_count: Optional[float] = None
1360
+ total_token_count: Optional[float] = None
1361
+
1362
+
1363
+ @strawberry.type
1364
+ class TraceTokenCountTimeSeries:
1365
+ data: list[TraceTokenCountTimeSeriesDataPoint]
1366
+
1367
+
1368
+ @strawberry.type
1369
+ class TraceTokenCostTimeSeriesDataPoint:
1370
+ timestamp: datetime
1371
+ prompt_cost: Optional[float] = None
1372
+ completion_cost: Optional[float] = None
1373
+ total_cost: Optional[float] = None
1374
+
1375
+
1376
+ @strawberry.type
1377
+ class TraceTokenCostTimeSeries:
1378
+ data: list[TraceTokenCostTimeSeriesDataPoint]
1379
+
1380
+
1381
+ @strawberry.type
1382
+ class SpanAnnotationScoreWithLabel:
1383
+ label: str
1384
+ score: float
1385
+
1386
+
1387
+ @strawberry.type
1388
+ class SpanAnnotationScoreTimeSeriesDataPoint:
1389
+ timestamp: datetime
1390
+ scores_with_labels: list[SpanAnnotationScoreWithLabel]
1391
+
1392
+
1393
+ @strawberry.type
1394
+ class SpanAnnotationScoreTimeSeries:
1395
+ data: list[SpanAnnotationScoreTimeSeriesDataPoint]
1396
+ names: list[str]
1397
+
1398
+
855
1399
  INPUT_VALUE = SpanAttributes.INPUT_VALUE.split(".")
856
1400
  OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE.split(".")
857
1401
 
@@ -1060,3 +1604,13 @@ async def _paginate_span_by_trace_start_time(
1060
1604
  has_next_page=has_next_page,
1061
1605
  ),
1062
1606
  )
1607
+
1608
+
1609
+ def to_gql_project(project: models.Project) -> Project:
1610
+ """
1611
+ Converts an ORM project to a GraphQL project.
1612
+ """
1613
+ return Project(
1614
+ project_rowid=project.id,
1615
+ db_project=project,
1616
+ )