arize-phoenix 11.38.0__py3-none-any.whl → 12.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (72) hide show
  1. {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.0.0.dist-info}/METADATA +3 -3
  2. {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.0.0.dist-info}/RECORD +71 -50
  3. phoenix/config.py +1 -11
  4. phoenix/db/bulk_inserter.py +8 -0
  5. phoenix/db/facilitator.py +1 -1
  6. phoenix/db/helpers.py +202 -33
  7. phoenix/db/insertion/dataset.py +7 -0
  8. phoenix/db/insertion/helpers.py +2 -2
  9. phoenix/db/insertion/session_annotation.py +176 -0
  10. phoenix/db/insertion/types.py +30 -0
  11. phoenix/db/migrations/versions/01a8342c9cdf_add_user_id_on_datasets.py +40 -0
  12. phoenix/db/migrations/versions/0df286449799_add_session_annotations_table.py +105 -0
  13. phoenix/db/migrations/versions/272b66ff50f8_drop_single_indices.py +119 -0
  14. phoenix/db/migrations/versions/58228d933c91_dataset_labels.py +67 -0
  15. phoenix/db/migrations/versions/699f655af132_experiment_tags.py +57 -0
  16. phoenix/db/migrations/versions/735d3d93c33e_add_composite_indices.py +41 -0
  17. phoenix/db/migrations/versions/ab513d89518b_add_user_id_on_dataset_versions.py +40 -0
  18. phoenix/db/migrations/versions/d0690a79ea51_users_on_experiments.py +40 -0
  19. phoenix/db/migrations/versions/deb2c81c0bb2_dataset_splits.py +139 -0
  20. phoenix/db/migrations/versions/e76cbd66ffc3_add_experiments_dataset_examples.py +87 -0
  21. phoenix/db/models.py +285 -46
  22. phoenix/server/api/context.py +13 -2
  23. phoenix/server/api/dataloaders/__init__.py +6 -2
  24. phoenix/server/api/dataloaders/dataset_example_splits.py +40 -0
  25. phoenix/server/api/dataloaders/session_annotations_by_session.py +29 -0
  26. phoenix/server/api/dataloaders/table_fields.py +2 -2
  27. phoenix/server/api/dataloaders/trace_annotations_by_trace.py +27 -0
  28. phoenix/server/api/helpers/playground_clients.py +65 -35
  29. phoenix/server/api/helpers/playground_users.py +26 -0
  30. phoenix/server/api/input_types/{SpanAnnotationFilter.py → AnnotationFilter.py} +22 -14
  31. phoenix/server/api/input_types/CreateProjectSessionAnnotationInput.py +37 -0
  32. phoenix/server/api/input_types/UpdateAnnotationInput.py +34 -0
  33. phoenix/server/api/mutations/__init__.py +6 -0
  34. phoenix/server/api/mutations/chat_mutations.py +8 -3
  35. phoenix/server/api/mutations/dataset_mutations.py +5 -0
  36. phoenix/server/api/mutations/dataset_split_mutations.py +387 -0
  37. phoenix/server/api/mutations/project_session_annotations_mutations.py +161 -0
  38. phoenix/server/api/queries.py +32 -0
  39. phoenix/server/api/routers/v1/__init__.py +2 -0
  40. phoenix/server/api/routers/v1/annotations.py +320 -0
  41. phoenix/server/api/routers/v1/datasets.py +5 -0
  42. phoenix/server/api/routers/v1/experiments.py +10 -3
  43. phoenix/server/api/routers/v1/sessions.py +111 -0
  44. phoenix/server/api/routers/v1/traces.py +1 -2
  45. phoenix/server/api/routers/v1/users.py +7 -0
  46. phoenix/server/api/subscriptions.py +5 -2
  47. phoenix/server/api/types/DatasetExample.py +11 -0
  48. phoenix/server/api/types/DatasetSplit.py +32 -0
  49. phoenix/server/api/types/Experiment.py +0 -4
  50. phoenix/server/api/types/Project.py +16 -0
  51. phoenix/server/api/types/ProjectSession.py +88 -3
  52. phoenix/server/api/types/ProjectSessionAnnotation.py +68 -0
  53. phoenix/server/api/types/Span.py +5 -5
  54. phoenix/server/api/types/Trace.py +61 -0
  55. phoenix/server/app.py +6 -2
  56. phoenix/server/cost_tracking/model_cost_manifest.json +132 -2
  57. phoenix/server/dml_event.py +13 -0
  58. phoenix/server/static/.vite/manifest.json +39 -39
  59. phoenix/server/static/assets/{components-BQPHTBfv.js → components-Dl9SUw1U.js} +371 -327
  60. phoenix/server/static/assets/{index-BL5BMgJU.js → index-CqQS0dTo.js} +2 -2
  61. phoenix/server/static/assets/{pages-C0Y17J0T.js → pages-DKSjVA_E.js} +762 -514
  62. phoenix/server/static/assets/{vendor-BdjZxMii.js → vendor-CtbHQYl8.js} +1 -1
  63. phoenix/server/static/assets/{vendor-arizeai-CHYlS8jV.js → vendor-arizeai-D-lWOwIS.js} +1 -1
  64. phoenix/server/static/assets/{vendor-codemirror-Di6t4HnH.js → vendor-codemirror-BRBpy3_z.js} +3 -3
  65. phoenix/server/static/assets/{vendor-recharts-C9wCDYj3.js → vendor-recharts--KdSwB3m.js} +1 -1
  66. phoenix/server/static/assets/{vendor-shiki-MNnmOotP.js → vendor-shiki-CvRzZnIo.js} +1 -1
  67. phoenix/version.py +1 -1
  68. phoenix/server/api/dataloaders/experiment_repetition_counts.py +0 -39
  69. {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.0.0.dist-info}/WHEEL +0 -0
  70. {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.0.0.dist-info}/entry_points.txt +0 -0
  71. {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.0.0.dist-info}/licenses/IP_NOTICE +0 -0
  72. {arize_phoenix-11.38.0.dist-info → arize_phoenix-12.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,14 +1,19 @@
1
+ from collections import defaultdict
2
+ from dataclasses import asdict, dataclass
1
3
  from datetime import datetime
2
4
  from typing import TYPE_CHECKING, Annotated, ClassVar, Optional, Type
3
5
 
6
+ import pandas as pd
4
7
  import strawberry
5
8
  from openinference.semconv.trace import SpanAttributes
6
9
  from sqlalchemy import select
7
10
  from strawberry import UNSET, Info, Private, lazy
8
- from strawberry.relay import Connection, GlobalID, Node, NodeID
11
+ from strawberry.relay import Connection, Node, NodeID
9
12
 
10
13
  from phoenix.db import models
11
14
  from phoenix.server.api.context import Context
15
+ from phoenix.server.api.input_types.AnnotationFilter import AnnotationFilter, satisfies_filter
16
+ from phoenix.server.api.types.AnnotationSummary import AnnotationSummary
12
17
  from phoenix.server.api.types.CostBreakdown import CostBreakdown
13
18
  from phoenix.server.api.types.MimeType import MimeType
14
19
  from phoenix.server.api.types.pagination import ConnectionArgs, CursorString, connection_from_list
@@ -18,6 +23,8 @@ from phoenix.server.api.types.SpanIOValue import SpanIOValue
18
23
  from phoenix.server.api.types.TokenUsage import TokenUsage
19
24
 
20
25
  if TYPE_CHECKING:
26
+ from phoenix.server.api.types.Project import Project
27
+ from phoenix.server.api.types.ProjectSessionAnnotation import ProjectSessionAnnotation
21
28
  from phoenix.server.api.types.Trace import Trace
22
29
 
23
30
 
@@ -31,10 +38,13 @@ class ProjectSession(Node):
31
38
  end_time: datetime
32
39
 
33
40
  @strawberry.field
34
- async def project_id(self) -> GlobalID:
41
+ async def project(
42
+ self,
43
+ info: Info[Context, None],
44
+ ) -> Annotated["Project", lazy(".Project")]:
35
45
  from phoenix.server.api.types.Project import Project
36
46
 
37
- return GlobalID(type_name=Project.__name__, node_id=str(self.project_rowid))
47
+ return Project(project_rowid=self.project_rowid)
38
48
 
39
49
  @strawberry.field
40
50
  async def num_traces(
@@ -165,6 +175,81 @@ class ProjectSession(Node):
165
175
  for entry in summary
166
176
  ]
167
177
 
178
+ @strawberry.field
179
+ async def session_annotations(
180
+ self,
181
+ info: Info[Context, None],
182
+ ) -> list[Annotated["ProjectSessionAnnotation", lazy(".ProjectSessionAnnotation")]]:
183
+ """Get all annotations for this session."""
184
+ from phoenix.server.api.types.ProjectSessionAnnotation import (
185
+ to_gql_project_session_annotation,
186
+ )
187
+
188
+ stmt = select(models.ProjectSessionAnnotation).filter_by(project_session_id=self.id_attr)
189
+ async with info.context.db() as session:
190
+ annotations = await session.stream_scalars(stmt)
191
+ return [
192
+ to_gql_project_session_annotation(annotation) async for annotation in annotations
193
+ ]
194
+
195
+ @strawberry.field(
196
+ description="Summarizes each annotation (by name) associated with the session"
197
+ ) # type: ignore
198
+ async def session_annotation_summaries(
199
+ self,
200
+ info: Info[Context, None],
201
+ filter: Optional[AnnotationFilter] = None,
202
+ ) -> list[AnnotationSummary]:
203
+ """
204
+ Retrieves and summarizes annotations associated with this span.
205
+
206
+ This method aggregates annotation data by name and label, calculating metrics
207
+ such as count of occurrences and sum of scores. The results are organized
208
+ into a structured format that can be easily converted to a DataFrame.
209
+
210
+ Args:
211
+ info: GraphQL context information
212
+ filter: Optional filter to apply to annotations before processing
213
+
214
+ Returns:
215
+ A list of AnnotationSummary objects, each containing:
216
+ - name: The name of the annotation
217
+ - data: A list of dictionaries with label statistics
218
+ """
219
+ # Load all annotations for this span from the data loader
220
+ annotations = await info.context.data_loaders.session_annotations_by_session.load(
221
+ self.id_attr
222
+ )
223
+
224
+ # Apply filter if provided to narrow down the annotations
225
+ if filter:
226
+ annotations = [
227
+ annotation for annotation in annotations if satisfies_filter(annotation, filter)
228
+ ]
229
+
230
+ @dataclass
231
+ class Metrics:
232
+ record_count: int = 0
233
+ label_count: int = 0
234
+ score_sum: float = 0
235
+ score_count: int = 0
236
+
237
+ summaries: defaultdict[str, defaultdict[Optional[str], Metrics]] = defaultdict(
238
+ lambda: defaultdict(Metrics)
239
+ )
240
+ for annotation in annotations:
241
+ metrics = summaries[annotation.name][annotation.label]
242
+ metrics.record_count += 1
243
+ metrics.label_count += int(annotation.label is not None)
244
+ metrics.score_sum += annotation.score or 0
245
+ metrics.score_count += int(annotation.score is not None)
246
+
247
+ result: list[AnnotationSummary] = []
248
+ for name, label_metrics in summaries.items():
249
+ rows = [{"label": label, **asdict(metrics)} for label, metrics in label_metrics.items()]
250
+ result.append(AnnotationSummary(name=name, df=pd.DataFrame(rows), simple_avg=True))
251
+ return result
252
+
168
253
 
169
254
  def to_gql_project_session(project_session: models.ProjectSession) -> ProjectSession:
170
255
  return ProjectSession(
@@ -0,0 +1,68 @@
1
+ from typing import Optional
2
+
3
+ import strawberry
4
+ from strawberry import Private
5
+ from strawberry.relay import GlobalID, Node, NodeID
6
+ from strawberry.scalars import JSON
7
+ from strawberry.types import Info
8
+
9
+ from phoenix.db import models
10
+ from phoenix.server.api.context import Context
11
+ from phoenix.server.api.types.AnnotatorKind import AnnotatorKind
12
+
13
+ from .AnnotationSource import AnnotationSource
14
+ from .User import User, to_gql_user
15
+
16
+
17
+ @strawberry.type
18
+ class ProjectSessionAnnotation(Node):
19
+ id_attr: NodeID[int]
20
+ user_id: Private[Optional[int]]
21
+ name: str
22
+ annotator_kind: AnnotatorKind
23
+ label: Optional[str]
24
+ score: Optional[float]
25
+ explanation: Optional[str]
26
+ metadata: JSON
27
+ _project_session_id: Private[Optional[int]]
28
+ identifier: str
29
+ source: AnnotationSource
30
+
31
+ @strawberry.field
32
+ async def project_session_id(self) -> GlobalID:
33
+ from phoenix.server.api.types.ProjectSession import ProjectSession
34
+
35
+ return GlobalID(type_name=ProjectSession.__name__, node_id=str(self._project_session_id))
36
+
37
+ @strawberry.field
38
+ async def user(
39
+ self,
40
+ info: Info[Context, None],
41
+ ) -> Optional[User]:
42
+ if self.user_id is None:
43
+ return None
44
+ user = await info.context.data_loaders.users.load(self.user_id)
45
+ if user is None:
46
+ return None
47
+ return to_gql_user(user)
48
+
49
+
50
+ def to_gql_project_session_annotation(
51
+ annotation: models.ProjectSessionAnnotation,
52
+ ) -> ProjectSessionAnnotation:
53
+ """
54
+ Converts an ORM projectSession annotation to a GraphQL ProjectSessionAnnotation.
55
+ """
56
+ return ProjectSessionAnnotation(
57
+ id_attr=annotation.id,
58
+ user_id=annotation.user_id,
59
+ _project_session_id=annotation.project_session_id,
60
+ name=annotation.name,
61
+ annotator_kind=AnnotatorKind(annotation.annotator_kind),
62
+ label=annotation.label,
63
+ score=annotation.score,
64
+ explanation=annotation.explanation,
65
+ metadata=JSON(annotation.metadata_),
66
+ identifier=annotation.identifier,
67
+ source=AnnotationSource(annotation.source),
68
+ )
@@ -23,11 +23,11 @@ from phoenix.server.api.helpers.dataset_helpers import (
23
23
  get_dataset_example_input,
24
24
  get_dataset_example_output,
25
25
  )
26
- from phoenix.server.api.input_types.InvocationParameters import InvocationParameter
27
- from phoenix.server.api.input_types.SpanAnnotationFilter import (
28
- SpanAnnotationFilter,
26
+ from phoenix.server.api.input_types.AnnotationFilter import (
27
+ AnnotationFilter,
29
28
  satisfies_filter,
30
29
  )
30
+ from phoenix.server.api.input_types.InvocationParameters import InvocationParameter
31
31
  from phoenix.server.api.input_types.SpanAnnotationSort import (
32
32
  SpanAnnotationColumn,
33
33
  SpanAnnotationSort,
@@ -547,7 +547,7 @@ class Span(Node):
547
547
  self,
548
548
  info: Info[Context, None],
549
549
  sort: Optional[SpanAnnotationSort] = UNSET,
550
- filter: Optional[SpanAnnotationFilter] = None,
550
+ filter: Optional[AnnotationFilter] = None,
551
551
  ) -> list[SpanAnnotation]:
552
552
  span_id = self.span_rowid
553
553
  annotations = await info.context.data_loaders.span_annotations.load(span_id)
@@ -580,7 +580,7 @@ class Span(Node):
580
580
  async def span_annotation_summaries(
581
581
  self,
582
582
  info: Info[Context, None],
583
- filter: Optional[SpanAnnotationFilter] = None,
583
+ filter: Optional[AnnotationFilter] = None,
584
584
  ) -> list[AnnotationSummary]:
585
585
  """
586
586
  Retrieves and summarizes annotations associated with this span.
@@ -1,8 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from collections import defaultdict
4
+ from dataclasses import asdict, dataclass
3
5
  from datetime import datetime
4
6
  from typing import TYPE_CHECKING, Annotated, Optional, Union
5
7
 
8
+ import pandas as pd
6
9
  import strawberry
7
10
  from openinference.semconv.trace import SpanAttributes
8
11
  from sqlalchemy import desc, select
@@ -13,7 +16,9 @@ from typing_extensions import TypeAlias
13
16
 
14
17
  from phoenix.db import models
15
18
  from phoenix.server.api.context import Context
19
+ from phoenix.server.api.input_types.AnnotationFilter import AnnotationFilter, satisfies_filter
16
20
  from phoenix.server.api.input_types.TraceAnnotationSort import TraceAnnotationSort
21
+ from phoenix.server.api.types.AnnotationSummary import AnnotationSummary
17
22
  from phoenix.server.api.types.CostBreakdown import CostBreakdown
18
23
  from phoenix.server.api.types.pagination import (
19
24
  ConnectionArgs,
@@ -229,6 +234,62 @@ class Trace(Node):
229
234
  annotations = await session.scalars(stmt)
230
235
  return [to_gql_trace_annotation(annotation) for annotation in annotations]
231
236
 
237
+ @strawberry.field(description="Summarizes each annotation (by name) associated with the trace") # type: ignore
238
+ async def trace_annotation_summaries(
239
+ self,
240
+ info: Info[Context, None],
241
+ filter: Optional[AnnotationFilter] = None,
242
+ ) -> list[AnnotationSummary]:
243
+ """
244
+ Retrieves and summarizes annotations associated with this span.
245
+
246
+ This method aggregates annotation data by name and label, calculating metrics
247
+ such as count of occurrences and sum of scores. The results are organized
248
+ into a structured format that can be easily converted to a DataFrame.
249
+
250
+ Args:
251
+ info: GraphQL context information
252
+ filter: Optional filter to apply to annotations before processing
253
+
254
+ Returns:
255
+ A list of AnnotationSummary objects, each containing:
256
+ - name: The name of the annotation
257
+ - data: A list of dictionaries with label statistics
258
+ """
259
+ # Load all annotations for this span from the data loader
260
+ annotations = await info.context.data_loaders.trace_annotations_by_trace.load(
261
+ self.trace_rowid
262
+ )
263
+
264
+ # Apply filter if provided to narrow down the annotations
265
+ if filter:
266
+ annotations = [
267
+ annotation for annotation in annotations if satisfies_filter(annotation, filter)
268
+ ]
269
+
270
+ @dataclass
271
+ class Metrics:
272
+ record_count: int = 0
273
+ label_count: int = 0
274
+ score_sum: float = 0
275
+ score_count: int = 0
276
+
277
+ summaries: defaultdict[str, defaultdict[Optional[str], Metrics]] = defaultdict(
278
+ lambda: defaultdict(Metrics)
279
+ )
280
+ for annotation in annotations:
281
+ metrics = summaries[annotation.name][annotation.label]
282
+ metrics.record_count += 1
283
+ metrics.label_count += int(annotation.label is not None)
284
+ metrics.score_sum += annotation.score or 0
285
+ metrics.score_count += int(annotation.score is not None)
286
+
287
+ result: list[AnnotationSummary] = []
288
+ for name, label_metrics in summaries.items():
289
+ rows = [{"label": label, **asdict(metrics)} for label, metrics in label_metrics.items()]
290
+ result.append(AnnotationSummary(name=name, df=pd.DataFrame(rows), simple_avg=True))
291
+ return result
292
+
232
293
  @strawberry.field
233
294
  async def cost_summary(
234
295
  self,
phoenix/server/app.py CHANGED
@@ -92,6 +92,7 @@ from phoenix.server.api.dataloaders import (
92
92
  DatasetExampleRevisionsDataLoader,
93
93
  DatasetExamplesAndVersionsByExperimentRunDataLoader,
94
94
  DatasetExampleSpansDataLoader,
95
+ DatasetExampleSplitsDataLoader,
95
96
  DocumentEvaluationsDataLoader,
96
97
  DocumentEvaluationSummaryDataLoader,
97
98
  DocumentRetrievalMetricsDataLoader,
@@ -99,7 +100,6 @@ from phoenix.server.api.dataloaders import (
99
100
  ExperimentErrorRatesDataLoader,
100
101
  ExperimentRepeatedRunGroupAnnotationSummariesDataLoader,
101
102
  ExperimentRepeatedRunGroupsDataLoader,
102
- ExperimentRepetitionCountsDataLoader,
103
103
  ExperimentRunAnnotations,
104
104
  ExperimentRunCountsDataLoader,
105
105
  ExperimentSequenceNumberDataLoader,
@@ -112,6 +112,7 @@ from phoenix.server.api.dataloaders import (
112
112
  ProjectIdsByTraceRetentionPolicyIdDataLoader,
113
113
  PromptVersionSequenceNumberDataLoader,
114
114
  RecordCountDataLoader,
115
+ SessionAnnotationsBySessionDataLoader,
115
116
  SessionIODataLoader,
116
117
  SessionNumTracesDataLoader,
117
118
  SessionNumTracesWithErrorDataLoader,
@@ -137,6 +138,7 @@ from phoenix.server.api.dataloaders import (
137
138
  SpanProjectsDataLoader,
138
139
  TableFieldsDataLoader,
139
140
  TokenCountDataLoader,
141
+ TraceAnnotationsByTraceDataLoader,
140
142
  TraceByTraceIdsDataLoader,
141
143
  TraceRetentionPolicyIdByProjectIdDataLoader,
142
144
  TraceRootSpansDataLoader,
@@ -715,6 +717,7 @@ def create_graphql_router(
715
717
  dataset_examples_and_versions_by_experiment_run=DatasetExamplesAndVersionsByExperimentRunDataLoader(
716
718
  db
717
719
  ),
720
+ dataset_example_splits=DatasetExampleSplitsDataLoader(db),
718
721
  document_evaluation_summaries=DocumentEvaluationSummaryDataLoader(
719
722
  db,
720
723
  cache_map=(
@@ -737,7 +740,6 @@ def create_graphql_router(
737
740
  db
738
741
  ),
739
742
  experiment_repeated_run_groups=ExperimentRepeatedRunGroupsDataLoader(db),
740
- experiment_repetition_counts=ExperimentRepetitionCountsDataLoader(db),
741
743
  experiment_run_annotations=ExperimentRunAnnotations(db),
742
744
  experiment_run_counts=ExperimentRunCountsDataLoader(db),
743
745
  experiment_sequence_number=ExperimentSequenceNumberDataLoader(db),
@@ -769,6 +771,7 @@ def create_graphql_router(
769
771
  db,
770
772
  cache_map=cache_for_dataloaders.record_count if cache_for_dataloaders else None,
771
773
  ),
774
+ session_annotations_by_session=SessionAnnotationsBySessionDataLoader(db),
772
775
  session_first_inputs=SessionIODataLoader(db, "first_input"),
773
776
  session_last_outputs=SessionIODataLoader(db, "last_output"),
774
777
  session_num_traces=SessionNumTracesDataLoader(db),
@@ -815,6 +818,7 @@ def create_graphql_router(
815
818
  db,
816
819
  cache_map=cache_for_dataloaders.token_count if cache_for_dataloaders else None,
817
820
  ),
821
+ trace_annotations_by_trace=TraceAnnotationsByTraceDataLoader(db),
818
822
  trace_by_trace_ids=TraceByTraceIdsDataLoader(db),
819
823
  trace_fields=TableFieldsDataLoader(db, models.Trace),
820
824
  trace_retention_policy_id_by_project_id=TraceRetentionPolicyIdByProjectIdDataLoader(
@@ -768,6 +768,33 @@
768
768
  }
769
769
  ]
770
770
  },
771
+ {
772
+ "name": "gemini-2.5-flash-lite-preview-09-2025",
773
+ "name_pattern": "gemini-2\\.5-flash-lite-preview-09-2025",
774
+ "source": "litellm",
775
+ "token_prices": [
776
+ {
777
+ "base_rate": 1e-7,
778
+ "is_prompt": true,
779
+ "token_type": "input"
780
+ },
781
+ {
782
+ "base_rate": 4e-7,
783
+ "is_prompt": false,
784
+ "token_type": "output"
785
+ },
786
+ {
787
+ "base_rate": 2.5e-8,
788
+ "is_prompt": true,
789
+ "token_type": "cache_read"
790
+ },
791
+ {
792
+ "base_rate": 3e-7,
793
+ "is_prompt": true,
794
+ "token_type": "audio"
795
+ }
796
+ ]
797
+ },
771
798
  {
772
799
  "name": "gemini-2.5-flash-preview-04-17",
773
800
  "name_pattern": "gemini-2\\.5-flash-preview-04-17",
@@ -822,6 +849,33 @@
822
849
  }
823
850
  ]
824
851
  },
852
+ {
853
+ "name": "gemini-2.5-flash-preview-09-2025",
854
+ "name_pattern": "gemini-2\\.5-flash-preview-09-2025",
855
+ "source": "litellm",
856
+ "token_prices": [
857
+ {
858
+ "base_rate": 3e-7,
859
+ "is_prompt": true,
860
+ "token_type": "input"
861
+ },
862
+ {
863
+ "base_rate": 2.5e-6,
864
+ "is_prompt": false,
865
+ "token_type": "output"
866
+ },
867
+ {
868
+ "base_rate": 7.5e-8,
869
+ "is_prompt": true,
870
+ "token_type": "cache_read"
871
+ },
872
+ {
873
+ "base_rate": 1e-6,
874
+ "is_prompt": true,
875
+ "token_type": "audio"
876
+ }
877
+ ]
878
+ },
825
879
  {
826
880
  "name": "gemini-2.5-pro",
827
881
  "name_pattern": "gemini-2.5-pro(@[a-zA-Z0-9]+)?",
@@ -974,18 +1028,72 @@
974
1028
  }
975
1029
  ]
976
1030
  },
1031
+ {
1032
+ "name": "gemini-flash-latest",
1033
+ "name_pattern": "gemini-flash-latest",
1034
+ "source": "litellm",
1035
+ "token_prices": [
1036
+ {
1037
+ "base_rate": 3e-7,
1038
+ "is_prompt": true,
1039
+ "token_type": "input"
1040
+ },
1041
+ {
1042
+ "base_rate": 2.5e-6,
1043
+ "is_prompt": false,
1044
+ "token_type": "output"
1045
+ },
1046
+ {
1047
+ "base_rate": 7.5e-8,
1048
+ "is_prompt": true,
1049
+ "token_type": "cache_read"
1050
+ },
1051
+ {
1052
+ "base_rate": 1e-6,
1053
+ "is_prompt": true,
1054
+ "token_type": "audio"
1055
+ }
1056
+ ]
1057
+ },
1058
+ {
1059
+ "name": "gemini-flash-lite-latest",
1060
+ "name_pattern": "gemini-flash-lite-latest",
1061
+ "source": "litellm",
1062
+ "token_prices": [
1063
+ {
1064
+ "base_rate": 1e-7,
1065
+ "is_prompt": true,
1066
+ "token_type": "input"
1067
+ },
1068
+ {
1069
+ "base_rate": 4e-7,
1070
+ "is_prompt": false,
1071
+ "token_type": "output"
1072
+ },
1073
+ {
1074
+ "base_rate": 2.5e-8,
1075
+ "is_prompt": true,
1076
+ "token_type": "cache_read"
1077
+ },
1078
+ {
1079
+ "base_rate": 3e-7,
1080
+ "is_prompt": true,
1081
+ "token_type": "audio"
1082
+ }
1083
+ ]
1084
+ },
977
1085
  {
978
1086
  "name": "gpt-3.5-turbo",
979
1087
  "name_pattern": "gpt-(35|3.5)-turbo",
980
1088
  "source": "litellm",
981
1089
  "token_prices": [
982
1090
  {
983
- "base_rate": 1.5e-6,
1091
+ "base_rate": 5e-7,
984
1092
  "is_prompt": true,
985
1093
  "token_type": "input"
986
1094
  },
987
1095
  {
988
- "base_rate": 2e-6,
1096
+ "base_rate": 1.5e-6,
989
1097
  "is_prompt": false,
990
1098
  "token_type": "output"
991
1099
  }
@@ -2247,6 +2355,28 @@
2247
2355
  }
2248
2356
  ]
2249
2357
  },
2358
+ {
2359
+ "name": "gpt-5-codex",
2360
+ "name_pattern": "gpt-5-codex",
2361
+ "source": "litellm",
2362
+ "token_prices": [
2363
+ {
2364
+ "base_rate": 1.25e-6,
2365
+ "is_prompt": true,
2366
+ "token_type": "input"
2367
+ },
2368
+ {
2369
+ "base_rate": 0.00001,
2370
+ "is_prompt": false,
2371
+ "token_type": "output"
2372
+ },
2373
+ {
2374
+ "base_rate": 1.25e-7,
2375
+ "is_prompt": true,
2376
+ "token_type": "cache_read"
2377
+ }
2378
+ ]
2379
+ },
2250
2380
  {
2251
2381
  "name": "gpt-5-mini",
2252
2382
  "name_pattern": "gpt-5-mini",
@@ -127,6 +127,19 @@ class TraceAnnotationInsertEvent(TraceAnnotationDmlEvent): ...
127
127
  class TraceAnnotationDeleteEvent(TraceAnnotationDmlEvent): ...
128
128
 
129
129
 
130
+ @dataclass(frozen=True)
131
+ class ProjectSessionAnnotationDmlEvent(DmlEvent):
132
+ table = models.ProjectSessionAnnotation
133
+
134
+
135
+ @dataclass(frozen=True)
136
+ class ProjectSessionAnnotationInsertEvent(ProjectSessionAnnotationDmlEvent): ...
137
+
138
+
139
+ @dataclass(frozen=True)
140
+ class ProjectSessionAnnotationDeleteEvent(ProjectSessionAnnotationDmlEvent): ...
141
+
142
+
130
143
  @dataclass(frozen=True)
131
144
  class DocumentAnnotationDmlEvent(DmlEvent):
132
145
  table = models.DocumentAnnotation