arize-phoenix 11.7.0__py3-none-any.whl → 11.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (42) hide show
  1. {arize_phoenix-11.7.0.dist-info → arize_phoenix-11.9.0.dist-info}/METADATA +14 -2
  2. {arize_phoenix-11.7.0.dist-info → arize_phoenix-11.9.0.dist-info}/RECORD +39 -37
  3. phoenix/config.py +33 -0
  4. phoenix/datetime_utils.py +112 -1
  5. phoenix/db/helpers.py +156 -1
  6. phoenix/db/insertion/span.py +12 -10
  7. phoenix/db/insertion/types.py +9 -2
  8. phoenix/server/api/auth.py +28 -6
  9. phoenix/server/api/dataloaders/span_cost_summary_by_experiment.py +6 -7
  10. phoenix/server/api/exceptions.py +6 -0
  11. phoenix/server/api/input_types/CreateProjectInput.py +27 -0
  12. phoenix/server/api/input_types/TimeBinConfig.py +23 -0
  13. phoenix/server/api/mutations/project_mutations.py +37 -1
  14. phoenix/server/api/mutations/trace_mutations.py +45 -1
  15. phoenix/server/api/routers/oauth2.py +19 -2
  16. phoenix/server/api/types/CostBreakdown.py +4 -7
  17. phoenix/server/api/types/Project.py +891 -69
  18. phoenix/server/app.py +7 -3
  19. phoenix/server/authorization.py +27 -2
  20. phoenix/server/cost_tracking/cost_details_calculator.py +22 -16
  21. phoenix/server/cost_tracking/model_cost_manifest.json +85 -0
  22. phoenix/server/daemons/span_cost_calculator.py +2 -8
  23. phoenix/server/dml_event.py +4 -0
  24. phoenix/server/email/sender.py +2 -1
  25. phoenix/server/email/templates/db_disk_usage_notification.html +3 -0
  26. phoenix/server/static/.vite/manifest.json +36 -36
  27. phoenix/server/static/assets/{components-J3qjrjBf.js → components-IBd-PDxA.js} +452 -293
  28. phoenix/server/static/assets/{index-CEObsQf_.js → index-B8EBC_Z5.js} +17 -11
  29. phoenix/server/static/assets/{pages-CW1UdBht.js → pages-6D1duYIe.js} +569 -439
  30. phoenix/server/static/assets/vendor-BzZ0oklU.js +939 -0
  31. phoenix/server/static/assets/vendor-arizeai-CvjUqTrl.js +168 -0
  32. phoenix/server/static/assets/{vendor-codemirror-k3zCIjlN.js → vendor-codemirror-CKK25Gd7.js} +1 -1
  33. phoenix/server/static/assets/vendor-recharts-CWtaRhQC.js +37 -0
  34. phoenix/server/static/assets/{vendor-shiki-DPtuv2M4.js → vendor-shiki-D30GF-p9.js} +1 -1
  35. phoenix/version.py +1 -1
  36. phoenix/server/static/assets/vendor-BnPh9i9e.js +0 -911
  37. phoenix/server/static/assets/vendor-arizeai-Cr9o_Iu_.js +0 -642
  38. phoenix/server/static/assets/vendor-recharts-BdblEuGB.js +0 -59
  39. {arize_phoenix-11.7.0.dist-info → arize_phoenix-11.9.0.dist-info}/WHEEL +0 -0
  40. {arize_phoenix-11.7.0.dist-info → arize_phoenix-11.9.0.dist-info}/entry_points.txt +0 -0
  41. {arize_phoenix-11.7.0.dist-info → arize_phoenix-11.9.0.dist-info}/licenses/IP_NOTICE +0 -0
  42. {arize_phoenix-11.7.0.dist-info → arize_phoenix-11.9.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,30 +1,33 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import operator
4
- from datetime import datetime, timedelta
5
- from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Optional
4
+ from datetime import datetime, timezone
5
+ from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Literal, Optional, cast
6
6
 
7
7
  import strawberry
8
- from aioitertools.itertools import islice
8
+ from aioitertools.itertools import groupby, islice
9
9
  from openinference.semconv.trace import SpanAttributes
10
- from sqlalchemy import desc, distinct, func, or_, select
10
+ from sqlalchemy import and_, desc, distinct, exists, func, or_, select
11
11
  from sqlalchemy.dialects import postgresql, sqlite
12
12
  from sqlalchemy.sql.elements import ColumnElement
13
13
  from sqlalchemy.sql.expression import tuple_
14
+ from sqlalchemy.sql.functions import percentile_cont
14
15
  from strawberry import ID, UNSET, Private, lazy
15
- from strawberry.relay import Connection, Node, NodeID
16
+ from strawberry.relay import Connection, Edge, Node, NodeID, PageInfo
16
17
  from strawberry.types import Info
17
18
  from typing_extensions import assert_never
18
19
 
19
- from phoenix.datetime_utils import right_open_time_range
20
+ from phoenix.datetime_utils import get_timestamp_range, normalize_datetime, right_open_time_range
20
21
  from phoenix.db import models
21
- from phoenix.db.helpers import SupportedSQLDialect
22
+ from phoenix.db.helpers import SupportedSQLDialect, date_trunc
22
23
  from phoenix.server.api.context import Context
24
+ from phoenix.server.api.exceptions import BadRequest
23
25
  from phoenix.server.api.input_types.ProjectSessionSort import (
24
26
  ProjectSessionColumn,
25
27
  ProjectSessionSort,
26
28
  )
27
- from phoenix.server.api.input_types.SpanSort import SpanSort, SpanSortConfig
29
+ from phoenix.server.api.input_types.SpanSort import SpanColumn, SpanSort, SpanSortConfig
30
+ from phoenix.server.api.input_types.TimeBinConfig import TimeBinConfig, TimeBinScale
28
31
  from phoenix.server.api.input_types.TimeRange import TimeRange
29
32
  from phoenix.server.api.types.AnnotationConfig import AnnotationConfig, to_gql_annotation_config
30
33
  from phoenix.server.api.types.AnnotationSummary import AnnotationSummary
@@ -34,6 +37,7 @@ from phoenix.server.api.types.pagination import (
34
37
  ConnectionArgs,
35
38
  Cursor,
36
39
  CursorSortColumn,
40
+ CursorSortColumnDataType,
37
41
  CursorString,
38
42
  connection_from_cursors_and_nodes,
39
43
  connection_from_list,
@@ -45,6 +49,7 @@ from phoenix.server.api.types.SpanCostSummary import SpanCostSummary
45
49
  from phoenix.server.api.types.TimeSeries import TimeSeries, TimeSeriesDataPoint
46
50
  from phoenix.server.api.types.Trace import Trace
47
51
  from phoenix.server.api.types.ValidationResult import ValidationResult
52
+ from phoenix.server.types import DbSessionFactory
48
53
  from phoenix.trace.dsl import SpanFilter
49
54
 
50
55
  DEFAULT_PAGE_SIZE = 30
@@ -262,6 +267,16 @@ class Project(Node):
262
267
  filter_condition: Optional[str] = UNSET,
263
268
  orphan_span_as_root_span: Optional[bool] = True,
264
269
  ) -> Connection[Span]:
270
+ if root_spans_only and not filter_condition and sort and sort.col is SpanColumn.startTime:
271
+ return await _paginate_span_by_trace_start_time(
272
+ db=info.context.db,
273
+ project_rowid=self.project_rowid,
274
+ time_range=time_range,
275
+ first=first,
276
+ after=after,
277
+ sort=sort,
278
+ orphan_span_as_root_span=orphan_span_as_root_span,
279
+ )
265
280
  stmt = (
266
281
  select(models.Span.id)
267
282
  .select_from(models.Span)
@@ -698,83 +713,603 @@ class Project(Node):
698
713
  )
699
714
  return updated_at
700
715
 
701
- @strawberry.field(
702
- description="Hourly span count for the project.",
703
- ) # type: ignore
716
+ @strawberry.field
704
717
  async def span_count_time_series(
705
718
  self,
706
719
  info: Info[Context, None],
707
- time_range: Optional[TimeRange] = UNSET,
720
+ time_range: TimeRange,
721
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
708
722
  ) -> SpanCountTimeSeries:
709
- """Returns a time series of span counts grouped by hour for the project.
723
+ if time_range.start is None:
724
+ raise BadRequest("Start time is required")
710
725
 
711
- This field provides hourly aggregated span counts, which can be useful for
712
- visualizing span activity over time. The data points represent the number
713
- of spans that started in each hour.
726
+ dialect = info.context.db.dialect
727
+ utc_offset_minutes = 0
728
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
729
+ if time_bin_config:
730
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
731
+ if time_bin_config.scale is TimeBinScale.MINUTE:
732
+ field = "minute"
733
+ elif time_bin_config.scale is TimeBinScale.HOUR:
734
+ field = "hour"
735
+ elif time_bin_config.scale is TimeBinScale.DAY:
736
+ field = "day"
737
+ elif time_bin_config.scale is TimeBinScale.WEEK:
738
+ field = "week"
739
+ elif time_bin_config.scale is TimeBinScale.MONTH:
740
+ field = "month"
741
+ elif time_bin_config.scale is TimeBinScale.YEAR:
742
+ field = "year"
743
+ bucket = date_trunc(dialect, field, models.Span.start_time, utc_offset_minutes)
744
+ stmt = (
745
+ select(bucket, func.count(models.Span.id))
746
+ .join_from(models.Span, models.Trace)
747
+ .where(models.Trace.project_rowid == self.project_rowid)
748
+ .group_by(bucket)
749
+ .order_by(bucket)
750
+ )
751
+ if time_range.start:
752
+ stmt = stmt.where(time_range.start <= models.Span.start_time)
753
+ if time_range.end:
754
+ stmt = stmt.where(models.Span.start_time < time_range.end)
714
755
 
715
- Args:
716
- info: The GraphQL info object containing context information.
717
- time_range: Optional time range to filter the spans. If provided, only
718
- spans that started within this range will be counted.
756
+ data = {}
757
+ async with info.context.db() as session:
758
+ async for t, v in await session.stream(stmt):
759
+ timestamp = _as_datetime(t)
760
+ data[timestamp] = TimeSeriesDataPoint(timestamp=timestamp, value=v)
719
761
 
720
- Returns:
721
- A SpanCountTimeSeries object containing data points with timestamps
722
- (rounded to the nearest hour) and corresponding span counts.
723
-
724
- Notes:
725
- - The timestamps are rounded down to the nearest hour.
726
- - If a time range is provided, the start time is rounded down to the
727
- nearest hour, and the end time is rounded up to the nearest hour.
728
- - The SQL query is optimized for both PostgreSQL and SQLite databases.
729
- """
730
- # Determine the appropriate SQL function to truncate timestamps to hours
731
- # based on the database dialect
732
- if info.context.db.dialect is SupportedSQLDialect.POSTGRESQL:
733
- # PostgreSQL uses date_trunc for timestamp truncation
734
- hour = func.date_trunc("hour", models.Span.start_time)
735
- elif info.context.db.dialect is SupportedSQLDialect.SQLITE:
736
- # SQLite uses strftime for timestamp formatting
737
- hour = func.strftime("%Y-%m-%dT%H:00:00.000+00:00", models.Span.start_time)
762
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
763
+ min_time = min([*data_timestamps, time_range.start])
764
+ max_time = max(
765
+ [
766
+ *data_timestamps,
767
+ *([time_range.end] if time_range.end else []),
768
+ ],
769
+ default=datetime.now(timezone.utc),
770
+ )
771
+ for timestamp in get_timestamp_range(
772
+ start_time=min_time,
773
+ end_time=max_time,
774
+ stride=field,
775
+ utc_offset_minutes=utc_offset_minutes,
776
+ ):
777
+ if timestamp not in data:
778
+ data[timestamp] = TimeSeriesDataPoint(timestamp=timestamp)
779
+ return SpanCountTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
780
+
781
+ @strawberry.field
782
+ async def trace_count_time_series(
783
+ self,
784
+ info: Info[Context, None],
785
+ time_range: TimeRange,
786
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
787
+ ) -> TraceCountTimeSeries:
788
+ if time_range.start is None:
789
+ raise BadRequest("Start time is required")
790
+
791
+ dialect = info.context.db.dialect
792
+ utc_offset_minutes = 0
793
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
794
+ if time_bin_config:
795
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
796
+ if time_bin_config.scale is TimeBinScale.MINUTE:
797
+ field = "minute"
798
+ elif time_bin_config.scale is TimeBinScale.HOUR:
799
+ field = "hour"
800
+ elif time_bin_config.scale is TimeBinScale.DAY:
801
+ field = "day"
802
+ elif time_bin_config.scale is TimeBinScale.WEEK:
803
+ field = "week"
804
+ elif time_bin_config.scale is TimeBinScale.MONTH:
805
+ field = "month"
806
+ elif time_bin_config.scale is TimeBinScale.YEAR:
807
+ field = "year"
808
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
809
+ stmt = (
810
+ select(bucket, func.count(models.Trace.id))
811
+ .where(models.Trace.project_rowid == self.project_rowid)
812
+ .group_by(bucket)
813
+ .order_by(bucket)
814
+ )
815
+ if time_range:
816
+ if time_range.start:
817
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
818
+ if time_range.end:
819
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
820
+ data = {}
821
+ async with info.context.db() as session:
822
+ async for t, v in await session.stream(stmt):
823
+ timestamp = _as_datetime(t)
824
+ data[timestamp] = TimeSeriesDataPoint(timestamp=timestamp, value=v)
825
+
826
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
827
+ min_time = min([*data_timestamps, time_range.start])
828
+ max_time = max(
829
+ [
830
+ *data_timestamps,
831
+ *([time_range.end] if time_range.end else []),
832
+ ],
833
+ default=datetime.now(timezone.utc),
834
+ )
835
+ for timestamp in get_timestamp_range(
836
+ start_time=min_time,
837
+ end_time=max_time,
838
+ stride=field,
839
+ utc_offset_minutes=utc_offset_minutes,
840
+ ):
841
+ if timestamp not in data:
842
+ data[timestamp] = TimeSeriesDataPoint(timestamp=timestamp)
843
+ return TraceCountTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
844
+
845
+ @strawberry.field
846
+ async def trace_count_by_status_time_series(
847
+ self,
848
+ info: Info[Context, None],
849
+ time_range: TimeRange,
850
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
851
+ ) -> TraceCountByStatusTimeSeries:
852
+ if time_range.start is None:
853
+ raise BadRequest("Start time is required")
854
+
855
+ dialect = info.context.db.dialect
856
+ utc_offset_minutes = 0
857
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
858
+ if time_bin_config:
859
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
860
+ if time_bin_config.scale is TimeBinScale.MINUTE:
861
+ field = "minute"
862
+ elif time_bin_config.scale is TimeBinScale.HOUR:
863
+ field = "hour"
864
+ elif time_bin_config.scale is TimeBinScale.DAY:
865
+ field = "day"
866
+ elif time_bin_config.scale is TimeBinScale.WEEK:
867
+ field = "week"
868
+ elif time_bin_config.scale is TimeBinScale.MONTH:
869
+ field = "month"
870
+ elif time_bin_config.scale is TimeBinScale.YEAR:
871
+ field = "year"
872
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
873
+ trace_error_status_counts = (
874
+ select(
875
+ models.Span.trace_rowid,
876
+ )
877
+ .where(models.Span.parent_id.is_(None))
878
+ .group_by(models.Span.trace_rowid)
879
+ .having(func.max(models.Span.cumulative_error_count) > 0)
880
+ ).subquery()
881
+ stmt = (
882
+ select(
883
+ bucket,
884
+ func.count(models.Trace.id).label("total_count"),
885
+ func.coalesce(func.count(trace_error_status_counts.c.trace_rowid), 0).label(
886
+ "error_count"
887
+ ),
888
+ )
889
+ .join_from(
890
+ models.Trace,
891
+ trace_error_status_counts,
892
+ onclause=trace_error_status_counts.c.trace_rowid == models.Trace.id,
893
+ isouter=True,
894
+ )
895
+ .where(models.Trace.project_rowid == self.project_rowid)
896
+ .group_by(bucket)
897
+ .order_by(bucket)
898
+ )
899
+ if time_range:
900
+ if time_range.start:
901
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
902
+ if time_range.end:
903
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
904
+ data: dict[datetime, TraceCountByStatusTimeSeriesDataPoint] = {}
905
+ async with info.context.db() as session:
906
+ async for t, total_count, error_count in await session.stream(stmt):
907
+ timestamp = _as_datetime(t)
908
+ data[timestamp] = TraceCountByStatusTimeSeriesDataPoint(
909
+ timestamp=timestamp,
910
+ ok_count=total_count - error_count,
911
+ error_count=error_count,
912
+ total_count=total_count,
913
+ )
914
+
915
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
916
+ min_time = min([*data_timestamps, time_range.start])
917
+ max_time = max(
918
+ [
919
+ *data_timestamps,
920
+ *([time_range.end] if time_range.end else []),
921
+ ],
922
+ default=datetime.now(timezone.utc),
923
+ )
924
+ for timestamp in get_timestamp_range(
925
+ start_time=min_time,
926
+ end_time=max_time,
927
+ stride=field,
928
+ utc_offset_minutes=utc_offset_minutes,
929
+ ):
930
+ if timestamp not in data:
931
+ data[timestamp] = TraceCountByStatusTimeSeriesDataPoint(
932
+ timestamp=timestamp,
933
+ ok_count=0,
934
+ error_count=0,
935
+ total_count=0,
936
+ )
937
+ return TraceCountByStatusTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
938
+
939
+ @strawberry.field
940
+ async def trace_latency_ms_percentile_time_series(
941
+ self,
942
+ info: Info[Context, None],
943
+ time_range: TimeRange,
944
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
945
+ ) -> TraceLatencyPercentileTimeSeries:
946
+ if time_range.start is None:
947
+ raise BadRequest("Start time is required")
948
+
949
+ dialect = info.context.db.dialect
950
+ utc_offset_minutes = 0
951
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
952
+ if time_bin_config:
953
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
954
+ if time_bin_config.scale is TimeBinScale.MINUTE:
955
+ field = "minute"
956
+ elif time_bin_config.scale is TimeBinScale.HOUR:
957
+ field = "hour"
958
+ elif time_bin_config.scale is TimeBinScale.DAY:
959
+ field = "day"
960
+ elif time_bin_config.scale is TimeBinScale.WEEK:
961
+ field = "week"
962
+ elif time_bin_config.scale is TimeBinScale.MONTH:
963
+ field = "month"
964
+ elif time_bin_config.scale is TimeBinScale.YEAR:
965
+ field = "year"
966
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
967
+
968
+ stmt = select(bucket).where(models.Trace.project_rowid == self.project_rowid)
969
+ if time_range.start:
970
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
971
+ if time_range.end:
972
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
973
+
974
+ if dialect is SupportedSQLDialect.POSTGRESQL:
975
+ stmt = stmt.add_columns(
976
+ percentile_cont(0.50).within_group(models.Trace.latency_ms.asc()).label("p50"),
977
+ percentile_cont(0.75).within_group(models.Trace.latency_ms.asc()).label("p75"),
978
+ percentile_cont(0.90).within_group(models.Trace.latency_ms.asc()).label("p90"),
979
+ percentile_cont(0.95).within_group(models.Trace.latency_ms.asc()).label("p95"),
980
+ percentile_cont(0.99).within_group(models.Trace.latency_ms.asc()).label("p99"),
981
+ percentile_cont(0.999).within_group(models.Trace.latency_ms.asc()).label("p999"),
982
+ func.max(models.Trace.latency_ms).label("max"),
983
+ )
984
+ elif dialect is SupportedSQLDialect.SQLITE:
985
+ stmt = stmt.add_columns(
986
+ func.percentile(models.Trace.latency_ms, 50).label("p50"),
987
+ func.percentile(models.Trace.latency_ms, 75).label("p75"),
988
+ func.percentile(models.Trace.latency_ms, 90).label("p90"),
989
+ func.percentile(models.Trace.latency_ms, 95).label("p95"),
990
+ func.percentile(models.Trace.latency_ms, 99).label("p99"),
991
+ func.percentile(models.Trace.latency_ms, 99.9).label("p999"),
992
+ func.max(models.Trace.latency_ms).label("max"),
993
+ )
738
994
  else:
739
- assert_never(info.context.db.dialect)
995
+ assert_never(dialect)
740
996
 
741
- # Build the base query to count spans grouped by hour
997
+ stmt = stmt.group_by(bucket).order_by(bucket)
998
+
999
+ data: dict[datetime, TraceLatencyMsPercentileTimeSeriesDataPoint] = {}
1000
+ async with info.context.db() as session:
1001
+ async for (
1002
+ bucket_time,
1003
+ p50,
1004
+ p75,
1005
+ p90,
1006
+ p95,
1007
+ p99,
1008
+ p999,
1009
+ max_latency,
1010
+ ) in await session.stream(stmt):
1011
+ timestamp = _as_datetime(bucket_time)
1012
+ data[timestamp] = TraceLatencyMsPercentileTimeSeriesDataPoint(
1013
+ timestamp=timestamp,
1014
+ p50=p50,
1015
+ p75=p75,
1016
+ p90=p90,
1017
+ p95=p95,
1018
+ p99=p99,
1019
+ p999=p999,
1020
+ max=max_latency,
1021
+ )
1022
+
1023
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
1024
+ min_time = min([*data_timestamps, time_range.start])
1025
+ max_time = max(
1026
+ [
1027
+ *data_timestamps,
1028
+ *([time_range.end] if time_range.end else []),
1029
+ ],
1030
+ default=datetime.now(timezone.utc),
1031
+ )
1032
+ for timestamp in get_timestamp_range(
1033
+ start_time=min_time,
1034
+ end_time=max_time,
1035
+ stride=field,
1036
+ utc_offset_minutes=utc_offset_minutes,
1037
+ ):
1038
+ if timestamp not in data:
1039
+ data[timestamp] = TraceLatencyMsPercentileTimeSeriesDataPoint(timestamp=timestamp)
1040
+ return TraceLatencyPercentileTimeSeries(
1041
+ data=sorted(data.values(), key=lambda x: x.timestamp)
1042
+ )
1043
+
1044
+ @strawberry.field
1045
+ async def trace_token_count_time_series(
1046
+ self,
1047
+ info: Info[Context, None],
1048
+ time_range: TimeRange,
1049
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
1050
+ ) -> TraceTokenCountTimeSeries:
1051
+ if time_range.start is None:
1052
+ raise BadRequest("Start time is required")
1053
+
1054
+ dialect = info.context.db.dialect
1055
+ utc_offset_minutes = 0
1056
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
1057
+ if time_bin_config:
1058
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
1059
+ if time_bin_config.scale is TimeBinScale.MINUTE:
1060
+ field = "minute"
1061
+ elif time_bin_config.scale is TimeBinScale.HOUR:
1062
+ field = "hour"
1063
+ elif time_bin_config.scale is TimeBinScale.DAY:
1064
+ field = "day"
1065
+ elif time_bin_config.scale is TimeBinScale.WEEK:
1066
+ field = "week"
1067
+ elif time_bin_config.scale is TimeBinScale.MONTH:
1068
+ field = "month"
1069
+ elif time_bin_config.scale is TimeBinScale.YEAR:
1070
+ field = "year"
1071
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
742
1072
  stmt = (
743
- select(hour, func.count())
744
- .join(models.Trace)
1073
+ select(
1074
+ bucket,
1075
+ func.sum(models.SpanCost.total_tokens),
1076
+ func.sum(models.SpanCost.prompt_tokens),
1077
+ func.sum(models.SpanCost.completion_tokens),
1078
+ )
1079
+ .join_from(
1080
+ models.Trace,
1081
+ models.SpanCost,
1082
+ onclause=models.SpanCost.trace_rowid == models.Trace.id,
1083
+ )
745
1084
  .where(models.Trace.project_rowid == self.project_rowid)
746
- .group_by(hour)
747
- .order_by(hour)
1085
+ .group_by(bucket)
1086
+ .order_by(bucket)
748
1087
  )
1088
+ if time_range:
1089
+ if time_range.start:
1090
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
1091
+ if time_range.end:
1092
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
1093
+ data: dict[datetime, TraceTokenCountTimeSeriesDataPoint] = {}
1094
+ async with info.context.db() as session:
1095
+ async for (
1096
+ t,
1097
+ total_tokens,
1098
+ prompt_tokens,
1099
+ completion_tokens,
1100
+ ) in await session.stream(stmt):
1101
+ timestamp = _as_datetime(t)
1102
+ data[timestamp] = TraceTokenCountTimeSeriesDataPoint(
1103
+ timestamp=timestamp,
1104
+ prompt_token_count=prompt_tokens,
1105
+ completion_token_count=completion_tokens,
1106
+ total_token_count=total_tokens,
1107
+ )
1108
+
1109
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
1110
+ min_time = min([*data_timestamps, time_range.start])
1111
+ max_time = max(
1112
+ [
1113
+ *data_timestamps,
1114
+ *([time_range.end] if time_range.end else []),
1115
+ ],
1116
+ default=datetime.now(timezone.utc),
1117
+ )
1118
+ for timestamp in get_timestamp_range(
1119
+ start_time=min_time,
1120
+ end_time=max_time,
1121
+ stride=field,
1122
+ utc_offset_minutes=utc_offset_minutes,
1123
+ ):
1124
+ if timestamp not in data:
1125
+ data[timestamp] = TraceTokenCountTimeSeriesDataPoint(timestamp=timestamp)
1126
+ return TraceTokenCountTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
1127
+
1128
+ @strawberry.field
1129
+ async def trace_token_cost_time_series(
1130
+ self,
1131
+ info: Info[Context, None],
1132
+ time_range: TimeRange,
1133
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
1134
+ ) -> TraceTokenCostTimeSeries:
1135
+ if time_range.start is None:
1136
+ raise BadRequest("Start time is required")
749
1137
 
750
- # Apply time range filtering if provided
1138
+ dialect = info.context.db.dialect
1139
+ utc_offset_minutes = 0
1140
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
1141
+ if time_bin_config:
1142
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
1143
+ if time_bin_config.scale is TimeBinScale.MINUTE:
1144
+ field = "minute"
1145
+ elif time_bin_config.scale is TimeBinScale.HOUR:
1146
+ field = "hour"
1147
+ elif time_bin_config.scale is TimeBinScale.DAY:
1148
+ field = "day"
1149
+ elif time_bin_config.scale is TimeBinScale.WEEK:
1150
+ field = "week"
1151
+ elif time_bin_config.scale is TimeBinScale.MONTH:
1152
+ field = "month"
1153
+ elif time_bin_config.scale is TimeBinScale.YEAR:
1154
+ field = "year"
1155
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
1156
+ stmt = (
1157
+ select(
1158
+ bucket,
1159
+ func.sum(models.SpanCost.total_cost),
1160
+ func.sum(models.SpanCost.prompt_cost),
1161
+ func.sum(models.SpanCost.completion_cost),
1162
+ )
1163
+ .join_from(
1164
+ models.Trace,
1165
+ models.SpanCost,
1166
+ onclause=models.SpanCost.trace_rowid == models.Trace.id,
1167
+ )
1168
+ .where(models.Trace.project_rowid == self.project_rowid)
1169
+ .group_by(bucket)
1170
+ .order_by(bucket)
1171
+ )
751
1172
  if time_range:
752
- if t := time_range.start:
753
- # Round down to nearest hour for the start time
754
- start = t.replace(minute=0, second=0, microsecond=0)
755
- stmt = stmt.where(start <= models.Span.start_time)
756
- if t := time_range.end:
757
- # Round up to nearest hour for the end time
758
- # If the time is already at the start of an hour, use it as is
759
- if t.minute == 0 and t.second == 0 and t.microsecond == 0:
760
- end = t
761
- else:
762
- # Otherwise, round up to the next hour
763
- end = t.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
764
- stmt = stmt.where(models.Span.start_time < end)
1173
+ if time_range.start:
1174
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
1175
+ if time_range.end:
1176
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
1177
+ data: dict[datetime, TraceTokenCostTimeSeriesDataPoint] = {}
1178
+ async with info.context.db() as session:
1179
+ async for (
1180
+ t,
1181
+ total_cost,
1182
+ prompt_cost,
1183
+ completion_cost,
1184
+ ) in await session.stream(stmt):
1185
+ timestamp = _as_datetime(t)
1186
+ data[timestamp] = TraceTokenCostTimeSeriesDataPoint(
1187
+ timestamp=timestamp,
1188
+ prompt_cost=prompt_cost,
1189
+ completion_cost=completion_cost,
1190
+ total_cost=total_cost,
1191
+ )
765
1192
 
766
- # Execute the query and convert the results to a time series
1193
+ data_timestamps: list[datetime] = [data_point.timestamp for data_point in data.values()]
1194
+ min_time = min([*data_timestamps, time_range.start])
1195
+ max_time = max(
1196
+ [
1197
+ *data_timestamps,
1198
+ *([time_range.end] if time_range.end else []),
1199
+ ],
1200
+ default=datetime.now(timezone.utc),
1201
+ )
1202
+ for timestamp in get_timestamp_range(
1203
+ start_time=min_time,
1204
+ end_time=max_time,
1205
+ stride=field,
1206
+ utc_offset_minutes=utc_offset_minutes,
1207
+ ):
1208
+ if timestamp not in data:
1209
+ data[timestamp] = TraceTokenCostTimeSeriesDataPoint(timestamp=timestamp)
1210
+ return TraceTokenCostTimeSeries(data=sorted(data.values(), key=lambda x: x.timestamp))
1211
+
1212
+ @strawberry.field
1213
+ async def span_annotation_score_time_series(
1214
+ self,
1215
+ info: Info[Context, None],
1216
+ time_range: TimeRange,
1217
+ time_bin_config: Optional[TimeBinConfig] = UNSET,
1218
+ ) -> SpanAnnotationScoreTimeSeries:
1219
+ if time_range.start is None:
1220
+ raise BadRequest("Start time is required")
1221
+
1222
+ dialect = info.context.db.dialect
1223
+ utc_offset_minutes = 0
1224
+ field: Literal["minute", "hour", "day", "week", "month", "year"] = "hour"
1225
+ if time_bin_config:
1226
+ utc_offset_minutes = time_bin_config.utc_offset_minutes
1227
+ if time_bin_config.scale is TimeBinScale.MINUTE:
1228
+ field = "minute"
1229
+ elif time_bin_config.scale is TimeBinScale.HOUR:
1230
+ field = "hour"
1231
+ elif time_bin_config.scale is TimeBinScale.DAY:
1232
+ field = "day"
1233
+ elif time_bin_config.scale is TimeBinScale.WEEK:
1234
+ field = "week"
1235
+ elif time_bin_config.scale is TimeBinScale.MONTH:
1236
+ field = "month"
1237
+ elif time_bin_config.scale is TimeBinScale.YEAR:
1238
+ field = "year"
1239
+ bucket = date_trunc(dialect, field, models.Trace.start_time, utc_offset_minutes)
1240
+ stmt = (
1241
+ select(
1242
+ bucket,
1243
+ models.SpanAnnotation.name,
1244
+ func.avg(models.SpanAnnotation.score).label("average_score"),
1245
+ )
1246
+ .join_from(
1247
+ models.SpanAnnotation,
1248
+ models.Span,
1249
+ onclause=models.SpanAnnotation.span_rowid == models.Span.id,
1250
+ )
1251
+ .join_from(
1252
+ models.Span,
1253
+ models.Trace,
1254
+ onclause=models.Span.trace_rowid == models.Trace.id,
1255
+ )
1256
+ .where(models.Trace.project_rowid == self.project_rowid)
1257
+ .group_by(bucket, models.SpanAnnotation.name)
1258
+ .order_by(bucket)
1259
+ )
1260
+ if time_range:
1261
+ if time_range.start:
1262
+ stmt = stmt.where(time_range.start <= models.Trace.start_time)
1263
+ if time_range.end:
1264
+ stmt = stmt.where(models.Trace.start_time < time_range.end)
1265
+ scores: dict[datetime, dict[str, float]] = {}
1266
+ unique_names: set[str] = set()
767
1267
  async with info.context.db() as session:
768
- data = await session.stream(stmt)
769
- return SpanCountTimeSeries(
770
- data=[
771
- TimeSeriesDataPoint(
772
- timestamp=_as_datetime(t),
773
- value=v,
774
- )
775
- async for t, v in data
776
- ]
1268
+ async for (
1269
+ t,
1270
+ name,
1271
+ average_score,
1272
+ ) in await session.stream(stmt):
1273
+ timestamp = _as_datetime(t)
1274
+ if timestamp not in scores:
1275
+ scores[timestamp] = {}
1276
+ scores[timestamp][name] = average_score
1277
+ unique_names.add(name)
1278
+
1279
+ score_timestamps: list[datetime] = [timestamp for timestamp in scores]
1280
+ min_time = min([*score_timestamps, time_range.start])
1281
+ max_time = max(
1282
+ [
1283
+ *score_timestamps,
1284
+ *([time_range.end] if time_range.end else []),
1285
+ ],
1286
+ default=datetime.now(timezone.utc),
1287
+ )
1288
+ data: dict[datetime, SpanAnnotationScoreTimeSeriesDataPoint] = {
1289
+ timestamp: SpanAnnotationScoreTimeSeriesDataPoint(
1290
+ timestamp=timestamp,
1291
+ scores_with_labels=[
1292
+ SpanAnnotationScoreWithLabel(label=label, score=scores[timestamp][label])
1293
+ for label in scores[timestamp]
1294
+ ],
777
1295
  )
1296
+ for timestamp in score_timestamps
1297
+ }
1298
+ for timestamp in get_timestamp_range(
1299
+ start_time=min_time,
1300
+ end_time=max_time,
1301
+ stride=field,
1302
+ utc_offset_minutes=utc_offset_minutes,
1303
+ ):
1304
+ if timestamp not in data:
1305
+ data[timestamp] = SpanAnnotationScoreTimeSeriesDataPoint(
1306
+ timestamp=timestamp,
1307
+ scores_with_labels=[],
1308
+ )
1309
+ return SpanAnnotationScoreTimeSeries(
1310
+ data=sorted(data.values(), key=lambda x: x.timestamp),
1311
+ names=sorted(list(unique_names)),
1312
+ )
778
1313
 
779
1314
 
780
1315
  @strawberry.type
@@ -782,6 +1317,85 @@ class SpanCountTimeSeries(TimeSeries):
782
1317
  """A time series of span count"""
783
1318
 
784
1319
 
1320
+ @strawberry.type
1321
+ class TraceCountTimeSeries(TimeSeries):
1322
+ """A time series of trace count"""
1323
+
1324
+
1325
+ @strawberry.type
1326
+ class TraceCountByStatusTimeSeriesDataPoint:
1327
+ timestamp: datetime
1328
+ ok_count: int
1329
+ error_count: int
1330
+ total_count: int
1331
+
1332
+
1333
+ @strawberry.type
1334
+ class TraceCountByStatusTimeSeries:
1335
+ data: list[TraceCountByStatusTimeSeriesDataPoint]
1336
+
1337
+
1338
+ @strawberry.type
1339
+ class TraceLatencyMsPercentileTimeSeriesDataPoint:
1340
+ timestamp: datetime
1341
+ p50: Optional[float] = None
1342
+ p75: Optional[float] = None
1343
+ p90: Optional[float] = None
1344
+ p95: Optional[float] = None
1345
+ p99: Optional[float] = None
1346
+ p999: Optional[float] = None
1347
+ max: Optional[float] = None
1348
+
1349
+
1350
+ @strawberry.type
1351
+ class TraceLatencyPercentileTimeSeries:
1352
+ data: list[TraceLatencyMsPercentileTimeSeriesDataPoint]
1353
+
1354
+
1355
+ @strawberry.type
1356
+ class TraceTokenCountTimeSeriesDataPoint:
1357
+ timestamp: datetime
1358
+ prompt_token_count: Optional[float] = None
1359
+ completion_token_count: Optional[float] = None
1360
+ total_token_count: Optional[float] = None
1361
+
1362
+
1363
+ @strawberry.type
1364
+ class TraceTokenCountTimeSeries:
1365
+ data: list[TraceTokenCountTimeSeriesDataPoint]
1366
+
1367
+
1368
+ @strawberry.type
1369
+ class TraceTokenCostTimeSeriesDataPoint:
1370
+ timestamp: datetime
1371
+ prompt_cost: Optional[float] = None
1372
+ completion_cost: Optional[float] = None
1373
+ total_cost: Optional[float] = None
1374
+
1375
+
1376
+ @strawberry.type
1377
+ class TraceTokenCostTimeSeries:
1378
+ data: list[TraceTokenCostTimeSeriesDataPoint]
1379
+
1380
+
1381
+ @strawberry.type
1382
+ class SpanAnnotationScoreWithLabel:
1383
+ label: str
1384
+ score: float
1385
+
1386
+
1387
+ @strawberry.type
1388
+ class SpanAnnotationScoreTimeSeriesDataPoint:
1389
+ timestamp: datetime
1390
+ scores_with_labels: list[SpanAnnotationScoreWithLabel]
1391
+
1392
+
1393
+ @strawberry.type
1394
+ class SpanAnnotationScoreTimeSeries:
1395
+ data: list[SpanAnnotationScoreTimeSeriesDataPoint]
1396
+ names: list[str]
1397
+
1398
+
785
1399
  INPUT_VALUE = SpanAttributes.INPUT_VALUE.split(".")
786
1400
  OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE.split(".")
787
1401
 
@@ -790,5 +1404,213 @@ def _as_datetime(value: Any) -> datetime:
790
1404
  if isinstance(value, datetime):
791
1405
  return value
792
1406
  if isinstance(value, str):
793
- return datetime.fromisoformat(value)
1407
+ return cast(datetime, normalize_datetime(datetime.fromisoformat(value), timezone.utc))
794
1408
  raise ValueError(f"Cannot convert {value} to datetime")
1409
+
1410
+
1411
+ async def _paginate_span_by_trace_start_time(
1412
+ db: DbSessionFactory,
1413
+ project_rowid: int,
1414
+ time_range: Optional[TimeRange] = None,
1415
+ first: Optional[int] = DEFAULT_PAGE_SIZE,
1416
+ after: Optional[CursorString] = None,
1417
+ sort: SpanSort = SpanSort(col=SpanColumn.startTime, dir=SortDir.desc),
1418
+ orphan_span_as_root_span: Optional[bool] = True,
1419
+ retries: int = 3,
1420
+ ) -> Connection[Span]:
1421
+ """Return one representative root span per trace, ordered by trace start time.
1422
+
1423
+ **Note**: Despite the function name, cursors are based on trace rowids, not span rowids.
1424
+ This is because we paginate by traces (one span per trace), not individual spans.
1425
+
1426
+ **Important**: The edges list can be empty while has_next_page=True. This happens
1427
+ when traces exist but have no matching root spans. Pagination continues because there
1428
+ may be more traces ahead with spans.
1429
+
1430
+ Args:
1431
+ db: Database session factory.
1432
+ project_rowid: Project ID to query spans from.
1433
+ time_range: Optional time range filter on trace start times.
1434
+ first: Maximum number of edges to return (default: DEFAULT_PAGE_SIZE).
1435
+ after: Cursor for pagination (points to trace position, not span).
1436
+ sort: Sort by trace start time (asc/desc only).
1437
+ orphan_span_as_root_span: Whether to include orphan spans as root spans.
1438
+ True: spans with parent_id=NULL OR pointing to non-existent spans.
1439
+ False: only spans with parent_id=NULL.
1440
+ retries: Maximum number of retry attempts when insufficient edges are found.
1441
+ When traces exist but lack root spans, the function retries pagination
1442
+ to find traces with spans. Set to 0 to disable retries.
1443
+
1444
+ Returns:
1445
+ Connection[Span] with:
1446
+ - edges: At most one Edge per trace (may be empty list).
1447
+ - page_info: Pagination info based on trace positions.
1448
+
1449
+ Key Points:
1450
+ - Traces without root spans produce NO edges
1451
+ - Spans ordered by trace start time, not span start time
1452
+ - Cursors track trace positions for efficient large-scale pagination
1453
+ """
1454
+ # Build base trace query ordered by start time
1455
+ traces = select(
1456
+ models.Trace.id,
1457
+ models.Trace.start_time,
1458
+ ).where(models.Trace.project_rowid == project_rowid)
1459
+ if sort.dir is SortDir.desc:
1460
+ traces = traces.order_by(
1461
+ models.Trace.start_time.desc(),
1462
+ models.Trace.id.desc(),
1463
+ )
1464
+ else:
1465
+ traces = traces.order_by(
1466
+ models.Trace.start_time.asc(),
1467
+ models.Trace.id.asc(),
1468
+ )
1469
+
1470
+ # Apply time range filters
1471
+ if time_range:
1472
+ if time_range.start:
1473
+ traces = traces.where(time_range.start <= models.Trace.start_time)
1474
+ if time_range.end:
1475
+ traces = traces.where(models.Trace.start_time < time_range.end)
1476
+
1477
+ # Apply cursor pagination
1478
+ if after:
1479
+ cursor = Cursor.from_string(after)
1480
+ assert cursor.sort_column
1481
+ compare = operator.lt if sort.dir is SortDir.desc else operator.gt
1482
+ traces = traces.where(
1483
+ compare(
1484
+ tuple_(models.Trace.start_time, models.Trace.id),
1485
+ (cursor.sort_column.value, cursor.rowid),
1486
+ )
1487
+ )
1488
+
1489
+ # Limit for pagination
1490
+ if first:
1491
+ traces = traces.limit(
1492
+ first + 1 # over-fetch by one to determine whether there's a next page
1493
+ )
1494
+ traces_cte = traces.cte()
1495
+
1496
+ # Define join condition for root spans
1497
+ if orphan_span_as_root_span:
1498
+ # Include both NULL parent_id and orphaned spans
1499
+ parent_spans = select(models.Span.span_id).alias("parent_spans")
1500
+ onclause = and_(
1501
+ models.Span.trace_rowid == traces_cte.c.id,
1502
+ or_(
1503
+ models.Span.parent_id.is_(None),
1504
+ ~exists().where(models.Span.parent_id == parent_spans.c.span_id),
1505
+ ),
1506
+ )
1507
+ else:
1508
+ # Only spans with no parent (parent_id is NULL, excludes orphaned spans)
1509
+ onclause = and_(
1510
+ models.Span.trace_rowid == traces_cte.c.id,
1511
+ models.Span.parent_id.is_(None),
1512
+ )
1513
+
1514
+ # Join traces with root spans (left join allows traces without spans)
1515
+ stmt = select(
1516
+ traces_cte.c.id,
1517
+ traces_cte.c.start_time,
1518
+ models.Span.id,
1519
+ ).join_from(
1520
+ traces_cte,
1521
+ models.Span,
1522
+ onclause=onclause,
1523
+ isouter=True,
1524
+ )
1525
+
1526
+ # Order by trace time, then pick earliest span per trace
1527
+ if sort.dir is SortDir.desc:
1528
+ stmt = stmt.order_by(
1529
+ traces_cte.c.start_time.desc(),
1530
+ traces_cte.c.id.desc(),
1531
+ models.Span.start_time.asc(), # earliest span
1532
+ models.Span.id.desc(),
1533
+ )
1534
+ else:
1535
+ stmt = stmt.order_by(
1536
+ traces_cte.c.start_time.asc(),
1537
+ traces_cte.c.id.asc(),
1538
+ models.Span.start_time.asc(), # earliest span
1539
+ models.Span.id.desc(),
1540
+ )
1541
+
1542
+ # Use DISTINCT for PostgreSQL, manual grouping for SQLite
1543
+ if db.dialect is SupportedSQLDialect.POSTGRESQL:
1544
+ stmt = stmt.distinct(traces_cte.c.start_time, traces_cte.c.id)
1545
+ elif db.dialect is SupportedSQLDialect.SQLITE:
1546
+ # too complicated for SQLite, so we rely on groupby() below
1547
+ pass
1548
+ else:
1549
+ assert_never(db.dialect)
1550
+
1551
+ # Process results and build edges
1552
+ edges: list[Edge[Span]] = []
1553
+ start_cursor: Optional[str] = None
1554
+ end_cursor: Optional[str] = None
1555
+ async with db() as session:
1556
+ records = groupby(await session.stream(stmt), key=lambda record: record[:2])
1557
+ async for (trace_rowid, trace_start_time), group in islice(records, first):
1558
+ cursor = Cursor(
1559
+ rowid=trace_rowid,
1560
+ sort_column=CursorSortColumn(
1561
+ type=CursorSortColumnDataType.DATETIME,
1562
+ value=trace_start_time,
1563
+ ),
1564
+ )
1565
+ if start_cursor is None:
1566
+ start_cursor = str(cursor)
1567
+ end_cursor = str(cursor)
1568
+ first_record = group[0]
1569
+ # Only create edge if trace has a root span
1570
+ if (span_rowid := first_record[2]) is not None:
1571
+ edges.append(Edge(node=Span(span_rowid=span_rowid), cursor=str(cursor)))
1572
+ has_next_page = True
1573
+ try:
1574
+ await records.__anext__()
1575
+ except StopAsyncIteration:
1576
+ has_next_page = False
1577
+
1578
+ # Retry if we need more edges and more traces exist
1579
+ if first and len(edges) < first and has_next_page:
1580
+ while retries and (num_needed := first - len(edges)) and has_next_page:
1581
+ retries -= 1
1582
+ batch_size = max(first, 1000)
1583
+ more = await _paginate_span_by_trace_start_time(
1584
+ db=db,
1585
+ project_rowid=project_rowid,
1586
+ time_range=time_range,
1587
+ first=batch_size,
1588
+ after=end_cursor,
1589
+ sort=sort,
1590
+ orphan_span_as_root_span=orphan_span_as_root_span,
1591
+ retries=0,
1592
+ )
1593
+ edges.extend(more.edges[:num_needed])
1594
+ start_cursor = start_cursor or more.page_info.start_cursor
1595
+ end_cursor = more.page_info.end_cursor if len(edges) < first else edges[-1].cursor
1596
+ has_next_page = len(more.edges) > num_needed or more.page_info.has_next_page
1597
+
1598
+ return Connection(
1599
+ edges=edges,
1600
+ page_info=PageInfo(
1601
+ start_cursor=start_cursor,
1602
+ end_cursor=end_cursor,
1603
+ has_previous_page=False,
1604
+ has_next_page=has_next_page,
1605
+ ),
1606
+ )
1607
+
1608
+
1609
+ def to_gql_project(project: models.Project) -> Project:
1610
+ """
1611
+ Converts an ORM project to a GraphQL project.
1612
+ """
1613
+ return Project(
1614
+ project_rowid=project.id,
1615
+ db_project=project,
1616
+ )