arize-phoenix 12.6.1__py3-none-any.whl → 12.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (32) hide show
  1. {arize_phoenix-12.6.1.dist-info → arize_phoenix-12.7.1.dist-info}/METADATA +17 -11
  2. {arize_phoenix-12.6.1.dist-info → arize_phoenix-12.7.1.dist-info}/RECORD +30 -29
  3. phoenix/server/api/context.py +2 -0
  4. phoenix/server/api/dataloaders/__init__.py +2 -0
  5. phoenix/server/api/dataloaders/experiment_dataset_splits.py +43 -0
  6. phoenix/server/api/input_types/ChatCompletionInput.py +1 -0
  7. phoenix/server/api/input_types/DatasetFilter.py +5 -2
  8. phoenix/server/api/mutations/chat_mutations.py +17 -3
  9. phoenix/server/api/queries.py +53 -28
  10. phoenix/server/api/routers/v1/datasets.py +150 -1
  11. phoenix/server/api/routers/v1/experiments.py +19 -0
  12. phoenix/server/api/subscriptions.py +22 -19
  13. phoenix/server/api/types/Dataset.py +1 -1
  14. phoenix/server/api/types/Experiment.py +14 -0
  15. phoenix/server/app.py +2 -0
  16. phoenix/server/cost_tracking/model_cost_manifest.json +142 -0
  17. phoenix/server/static/.vite/manifest.json +43 -43
  18. phoenix/server/static/assets/{components-XKc983B9.js → components-BLK5vehh.js} +777 -696
  19. phoenix/server/static/assets/{index-DG8e74sg.js → index-BP0Shd90.js} +2 -2
  20. phoenix/server/static/assets/{pages-CSZW-lt0.js → pages-DIVgyYyy.js} +595 -604
  21. phoenix/server/static/assets/vendor-3BvTzoBp.js +920 -0
  22. phoenix/server/static/assets/{vendor-arizeai-Cb1ncvYH.js → vendor-arizeai-C6_oC0y8.js} +1 -1
  23. phoenix/server/static/assets/vendor-codemirror-DPnZGAZA.js +25 -0
  24. phoenix/server/static/assets/{vendor-recharts-BC1ysIKu.js → vendor-recharts-CjgSbsB0.js} +7 -7
  25. phoenix/server/static/assets/{vendor-shiki-B45T-YxN.js → vendor-shiki-CJyhDG0E.js} +1 -1
  26. phoenix/version.py +1 -1
  27. phoenix/server/static/assets/vendor-CQ4tN9P7.js +0 -918
  28. phoenix/server/static/assets/vendor-codemirror-CckmKopH.js +0 -25
  29. {arize_phoenix-12.6.1.dist-info → arize_phoenix-12.7.1.dist-info}/WHEEL +0 -0
  30. {arize_phoenix-12.6.1.dist-info → arize_phoenix-12.7.1.dist-info}/entry_points.txt +0 -0
  31. {arize_phoenix-12.6.1.dist-info → arize_phoenix-12.7.1.dist-info}/licenses/IP_NOTICE +0 -0
  32. {arize_phoenix-12.6.1.dist-info → arize_phoenix-12.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -23,6 +23,9 @@ from starlette.concurrency import run_in_threadpool
23
23
  from starlette.datastructures import FormData, UploadFile
24
24
  from starlette.requests import Request
25
25
  from starlette.responses import Response
26
+ from starlette.status import (
27
+ HTTP_404_NOT_FOUND,
28
+ )
26
29
  from strawberry.relay import GlobalID
27
30
  from typing_extensions import TypeAlias, assert_never
28
31
 
@@ -34,8 +37,10 @@ from phoenix.db.insertion.dataset import (
34
37
  ExampleContent,
35
38
  add_dataset_examples,
36
39
  )
40
+ from phoenix.db.types.db_models import UNDEFINED
37
41
  from phoenix.server.api.types.Dataset import Dataset as DatasetNodeType
38
42
  from phoenix.server.api.types.DatasetExample import DatasetExample as DatasetExampleNodeType
43
+ from phoenix.server.api.types.DatasetSplit import DatasetSplit as DatasetSplitNodeType
39
44
  from phoenix.server.api.types.DatasetVersion import DatasetVersion as DatasetVersionNodeType
40
45
  from phoenix.server.api.types.node import from_global_id_with_expected_type
41
46
  from phoenix.server.api.utils import delete_projects, delete_traces
@@ -697,6 +702,7 @@ class DatasetExample(V1RoutesBaseModel):
697
702
  class ListDatasetExamplesData(V1RoutesBaseModel):
698
703
  dataset_id: str
699
704
  version_id: str
705
+ filtered_splits: list[str] = UNDEFINED
700
706
  examples: list[DatasetExample]
701
707
 
702
708
 
@@ -719,6 +725,10 @@ async def get_dataset_examples(
719
725
  "The ID of the dataset version (if omitted, returns data from the latest version)"
720
726
  ),
721
727
  ),
728
+ split: Optional[list[str]] = Query(
729
+ default=None,
730
+ description="List of dataset split identifiers (GlobalIDs or names) to filter by",
731
+ ),
722
732
  ) -> ListDatasetExamplesResponseBody:
723
733
  try:
724
734
  dataset_gid = GlobalID.from_id(id)
@@ -795,9 +805,13 @@ async def get_dataset_examples(
795
805
  )
796
806
 
797
807
  subquery = partial_subquery.subquery()
808
+
798
809
  # Query for the most recent example revisions that are not deleted
799
810
  query = (
800
- select(models.DatasetExample, models.DatasetExampleRevision)
811
+ select(
812
+ models.DatasetExample,
813
+ models.DatasetExampleRevision,
814
+ )
801
815
  .join(
802
816
  models.DatasetExampleRevision,
803
817
  models.DatasetExample.id == models.DatasetExampleRevision.dataset_example_id,
@@ -810,6 +824,28 @@ async def get_dataset_examples(
810
824
  .filter(models.DatasetExampleRevision.revision_kind != "DELETE")
811
825
  .order_by(models.DatasetExample.id.asc())
812
826
  )
827
+
828
+ # If splits are provided, filter by dataset splits
829
+ resolved_split_names: list[str] = []
830
+ if split:
831
+ # Resolve split identifiers (IDs or names) to IDs and names
832
+ resolved_split_ids, resolved_split_names = await _resolve_split_identifiers(
833
+ session, split
834
+ )
835
+
836
+ # Add filter for splits (join with the association table)
837
+ # Use distinct() to prevent duplicates when an example belongs to
838
+ # multiple splits
839
+ query = (
840
+ query.join(
841
+ models.DatasetSplitDatasetExample,
842
+ models.DatasetExample.id
843
+ == models.DatasetSplitDatasetExample.dataset_example_id,
844
+ )
845
+ .filter(models.DatasetSplitDatasetExample.dataset_split_id.in_(resolved_split_ids))
846
+ .distinct()
847
+ )
848
+
813
849
  examples = [
814
850
  DatasetExample(
815
851
  id=str(GlobalID("DatasetExample", str(example.id))),
@@ -824,6 +860,7 @@ async def get_dataset_examples(
824
860
  data=ListDatasetExamplesData(
825
861
  dataset_id=str(GlobalID("Dataset", str(resolved_dataset_id))),
826
862
  version_id=str(GlobalID("DatasetVersion", str(resolved_version_id))),
863
+ filtered_splits=resolved_split_names,
827
864
  examples=examples,
828
865
  )
829
866
  )
@@ -1080,3 +1117,115 @@ async def _get_db_examples(
1080
1117
 
1081
1118
  def _is_all_dict(seq: Sequence[Any]) -> bool:
1082
1119
  return all(map(lambda obj: isinstance(obj, dict), seq))
1120
+
1121
+
1122
+ # Split identifier helper types and functions
1123
+ class _SplitId(int): ...
1124
+
1125
+
1126
+ _SplitIdentifier: TypeAlias = Union[_SplitId, str]
1127
+
1128
+
1129
+ def _parse_split_identifier(split_identifier: str) -> _SplitIdentifier:
1130
+ """
1131
+ Parse a split identifier as either a GlobalID or a name.
1132
+
1133
+ Args:
1134
+ split_identifier: The identifier string (GlobalID or name)
1135
+
1136
+ Returns:
1137
+ Either a _SplitId or an Identifier
1138
+
1139
+ Raises:
1140
+ HTTPException: If the identifier format is invalid
1141
+ """
1142
+ if not split_identifier:
1143
+ raise HTTPException(422, "Invalid split identifier")
1144
+ try:
1145
+ split_id = from_global_id_with_expected_type(
1146
+ GlobalID.from_id(split_identifier),
1147
+ DatasetSplitNodeType.__name__,
1148
+ )
1149
+ except ValueError:
1150
+ return split_identifier
1151
+ return _SplitId(split_id)
1152
+
1153
+
1154
+ async def _resolve_split_identifiers(
1155
+ session: AsyncSession,
1156
+ split_identifiers: list[str],
1157
+ ) -> tuple[list[int], list[str]]:
1158
+ """
1159
+ Resolve a list of split identifiers (IDs or names) to split IDs and names.
1160
+
1161
+ Args:
1162
+ session: The database session
1163
+ split_identifiers: List of split identifiers (GlobalIDs or names)
1164
+
1165
+ Returns:
1166
+ Tuple of (list of split IDs, list of split names)
1167
+
1168
+ Raises:
1169
+ HTTPException: If any split identifier is invalid or not found
1170
+ """
1171
+ split_ids: list[int] = []
1172
+ split_names: list[str] = []
1173
+
1174
+ # Parse all identifiers first
1175
+ parsed_identifiers: list[_SplitIdentifier] = []
1176
+ for identifier_str in split_identifiers:
1177
+ parsed_identifiers.append(_parse_split_identifier(identifier_str.strip()))
1178
+
1179
+ # Separate IDs and names
1180
+ requested_ids: list[int] = []
1181
+ requested_names: list[str] = []
1182
+ for identifier in parsed_identifiers:
1183
+ if isinstance(identifier, _SplitId):
1184
+ requested_ids.append(int(identifier))
1185
+ elif isinstance(identifier, str):
1186
+ requested_names.append(identifier)
1187
+ else:
1188
+ assert_never(identifier)
1189
+
1190
+ # Query for splits by ID
1191
+ if requested_ids:
1192
+ id_results = await session.stream(
1193
+ select(models.DatasetSplit.id, models.DatasetSplit.name).where(
1194
+ models.DatasetSplit.id.in_(requested_ids)
1195
+ )
1196
+ )
1197
+ async for split_id, split_name in id_results:
1198
+ split_ids.append(split_id)
1199
+ split_names.append(split_name)
1200
+
1201
+ # Check if all requested IDs were found
1202
+ found_ids = set(split_ids[-len(requested_ids) :] if requested_ids else [])
1203
+ missing_ids = [sid for sid in requested_ids if sid not in found_ids]
1204
+ if missing_ids:
1205
+ raise HTTPException(
1206
+ status_code=HTTP_404_NOT_FOUND,
1207
+ detail=f"Dataset splits not found for IDs: {', '.join(map(str, missing_ids))}",
1208
+ )
1209
+
1210
+ # Query for splits by name
1211
+ if requested_names:
1212
+ name_results = await session.stream(
1213
+ select(models.DatasetSplit.id, models.DatasetSplit.name).where(
1214
+ models.DatasetSplit.name.in_(requested_names)
1215
+ )
1216
+ )
1217
+ name_to_id: dict[str, int] = {}
1218
+ async for split_id, split_name in name_results:
1219
+ split_ids.append(split_id)
1220
+ split_names.append(split_name)
1221
+ name_to_id[split_name] = split_id
1222
+
1223
+ # Check if all requested names were found
1224
+ missing_names = [name for name in requested_names if name not in name_to_id]
1225
+ if missing_names:
1226
+ raise HTTPException(
1227
+ status_code=HTTP_404_NOT_FOUND,
1228
+ detail=f"Dataset splits not found: {', '.join(missing_names)}",
1229
+ )
1230
+
1231
+ return split_ids, split_names
@@ -25,6 +25,7 @@ from phoenix.server.bearer_auth import PhoenixUser
25
25
  from phoenix.server.dml_event import ExperimentInsertEvent
26
26
  from phoenix.server.experiments.utils import generate_experiment_project_name
27
27
 
28
+ from .datasets import _resolve_split_identifiers
28
29
  from .models import V1RoutesBaseModel
29
30
  from .utils import ResponseBody, add_errors_to_responses, add_text_csv_content_to_responses
30
31
 
@@ -80,6 +81,10 @@ class CreateExperimentRequestBody(V1RoutesBaseModel):
80
81
  "(if omitted, the latest version will be used)"
81
82
  ),
82
83
  )
84
+ splits: Optional[list[str]] = Field(
85
+ default=None,
86
+ description="List of dataset split identifiers (GlobalIDs or names) to filter by",
87
+ )
83
88
  repetitions: int = Field(
84
89
  default=1, description="Number of times the experiment should be repeated for each example"
85
90
  )
@@ -192,6 +197,20 @@ async def create_experiment(
192
197
  project_name=project_name,
193
198
  user_id=user_id,
194
199
  )
200
+
201
+ if request_body.splits is not None:
202
+ # Resolve split identifiers (IDs or names) to IDs and names
203
+ resolved_split_ids, _ = await _resolve_split_identifiers(session, request_body.splits)
204
+
205
+ # Generate experiment dataset splits relation
206
+ # prior to the crosswalk table insert
207
+ # in insert_experiment_with_examples_snapshot
208
+ experiment.experiment_dataset_splits = [
209
+ models.ExperimentDatasetSplit(dataset_split_id=split_id)
210
+ for split_id in resolved_split_ids
211
+ ]
212
+
213
+ # crosswalk table assumes the relation is already present
195
214
  await insert_experiment_with_examples_snapshot(session, experiment)
196
215
 
197
216
  dialect = SupportedSQLDialect(session.bind.dialect.name)
@@ -17,7 +17,7 @@ from typing import (
17
17
  import strawberry
18
18
  from openinference.instrumentation import safe_json_dumps
19
19
  from openinference.semconv.trace import SpanAttributes
20
- from sqlalchemy import and_, func, insert, select
20
+ from sqlalchemy import and_, insert, select
21
21
  from sqlalchemy.orm import load_only
22
22
  from strawberry.relay.types import GlobalID
23
23
  from strawberry.types import Info
@@ -26,7 +26,10 @@ from typing_extensions import TypeAlias, assert_never
26
26
  from phoenix.config import PLAYGROUND_PROJECT_NAME
27
27
  from phoenix.datetime_utils import local_now, normalize_datetime
28
28
  from phoenix.db import models
29
- from phoenix.db.helpers import insert_experiment_with_examples_snapshot
29
+ from phoenix.db.helpers import (
30
+ get_dataset_example_revisions,
31
+ insert_experiment_with_examples_snapshot,
32
+ )
30
33
  from phoenix.server.api.auth import IsLocked, IsNotReadOnly, IsNotViewer
31
34
  from phoenix.server.api.context import Context
32
35
  from phoenix.server.api.exceptions import BadRequest, CustomGraphQLError, NotFound
@@ -257,27 +260,22 @@ class Subscription:
257
260
  )
258
261
  ) is None:
259
262
  raise NotFound(f"Could not find dataset version with ID {version_id}")
260
- revision_ids = (
261
- select(func.max(models.DatasetExampleRevision.id))
262
- .join(models.DatasetExample)
263
- .where(
264
- and_(
265
- models.DatasetExample.dataset_id == dataset_id,
266
- models.DatasetExampleRevision.dataset_version_id <= resolved_version_id,
267
- )
268
- )
269
- .group_by(models.DatasetExampleRevision.dataset_example_id)
270
- )
263
+
264
+ # Parse split IDs if provided
265
+ resolved_split_ids: Optional[list[int]] = None
266
+ if input.split_ids is not None and len(input.split_ids) > 0:
267
+ resolved_split_ids = [
268
+ from_global_id_with_expected_type(split_id, models.DatasetSplit.__name__)
269
+ for split_id in input.split_ids
270
+ ]
271
+
271
272
  if not (
272
273
  revisions := [
273
274
  rev
274
275
  async for rev in await session.stream_scalars(
275
- select(models.DatasetExampleRevision)
276
- .where(
277
- and_(
278
- models.DatasetExampleRevision.id.in_(revision_ids),
279
- models.DatasetExampleRevision.revision_kind != "DELETE",
280
- )
276
+ get_dataset_example_revisions(
277
+ resolved_version_id,
278
+ split_ids=resolved_split_ids,
281
279
  )
282
280
  .order_by(models.DatasetExampleRevision.dataset_example_id.asc())
283
281
  .options(
@@ -316,6 +314,11 @@ class Subscription:
316
314
  project_name=project_name,
317
315
  user_id=user_id,
318
316
  )
317
+ if resolved_split_ids:
318
+ experiment.experiment_dataset_splits = [
319
+ models.ExperimentDatasetSplit(dataset_split_id=split_id)
320
+ for split_id in resolved_split_ids
321
+ ]
319
322
  await insert_experiment_with_examples_snapshot(session, experiment)
320
323
  yield ChatCompletionSubscriptionExperiment(
321
324
  experiment=to_gql_experiment(experiment)
@@ -231,7 +231,7 @@ class Dataset(Node):
231
231
  models.DatasetExampleRevision.revision_kind != "DELETE",
232
232
  )
233
233
  )
234
- .order_by(models.DatasetExampleRevision.dataset_example_id.desc())
234
+ .order_by(models.DatasetExample.id.desc())
235
235
  )
236
236
 
237
237
  # Filter by split IDs if provided
@@ -18,13 +18,16 @@ from phoenix.server.api.input_types.ExperimentRunSort import (
18
18
  get_experiment_run_cursor,
19
19
  )
20
20
  from phoenix.server.api.types.CostBreakdown import CostBreakdown
21
+ from phoenix.server.api.types.DatasetSplit import DatasetSplit, to_gql_dataset_split
21
22
  from phoenix.server.api.types.DatasetVersion import DatasetVersion
22
23
  from phoenix.server.api.types.ExperimentAnnotationSummary import ExperimentAnnotationSummary
23
24
  from phoenix.server.api.types.ExperimentRun import ExperimentRun, to_gql_experiment_run
24
25
  from phoenix.server.api.types.pagination import (
26
+ ConnectionArgs,
25
27
  Cursor,
26
28
  CursorString,
27
29
  connection_from_cursors_and_nodes,
30
+ connection_from_list,
28
31
  )
29
32
  from phoenix.server.api.types.Project import Project
30
33
  from phoenix.server.api.types.SpanCostDetailSummaryEntry import SpanCostDetailSummaryEntry
@@ -228,6 +231,17 @@ class Experiment(Node):
228
231
  async for token_type, is_prompt, cost, tokens in data
229
232
  ]
230
233
 
234
+ @strawberry.field
235
+ async def dataset_splits(
236
+ self,
237
+ info: Info[Context, None],
238
+ ) -> Connection[DatasetSplit]:
239
+ """Returns the dataset splits associated with this experiment."""
240
+ splits = await info.context.data_loaders.experiment_dataset_splits.load(self.id_attr)
241
+ return connection_from_list(
242
+ [to_gql_dataset_split(split) for split in splits], ConnectionArgs()
243
+ )
244
+
231
245
 
232
246
  def to_gql_experiment(
233
247
  experiment: models.Experiment,
phoenix/server/app.py CHANGED
@@ -97,6 +97,7 @@ from phoenix.server.api.dataloaders import (
97
97
  DocumentEvaluationSummaryDataLoader,
98
98
  DocumentRetrievalMetricsDataLoader,
99
99
  ExperimentAnnotationSummaryDataLoader,
100
+ ExperimentDatasetSplitsDataLoader,
100
101
  ExperimentErrorRatesDataLoader,
101
102
  ExperimentRepeatedRunGroupAnnotationSummariesDataLoader,
102
103
  ExperimentRepeatedRunGroupsDataLoader,
@@ -735,6 +736,7 @@ def create_graphql_router(
735
736
  ),
736
737
  ),
737
738
  experiment_annotation_summaries=ExperimentAnnotationSummaryDataLoader(db),
739
+ experiment_dataset_splits=ExperimentDatasetSplitsDataLoader(db),
738
740
  experiment_error_rates=ExperimentErrorRatesDataLoader(db),
739
741
  experiment_repeated_run_group_annotation_summaries=ExperimentRepeatedRunGroupAnnotationSummariesDataLoader(
740
742
  db
@@ -341,6 +341,60 @@
341
341
  }
342
342
  ]
343
343
  },
344
+ {
345
+ "name": "claude-haiku-4-5",
346
+ "name_pattern": "claude-haiku-4-5",
347
+ "source": "litellm",
348
+ "token_prices": [
349
+ {
350
+ "base_rate": 1e-6,
351
+ "is_prompt": true,
352
+ "token_type": "input"
353
+ },
354
+ {
355
+ "base_rate": 5e-6,
356
+ "is_prompt": false,
357
+ "token_type": "output"
358
+ },
359
+ {
360
+ "base_rate": 1e-7,
361
+ "is_prompt": true,
362
+ "token_type": "cache_read"
363
+ },
364
+ {
365
+ "base_rate": 1.25e-6,
366
+ "is_prompt": true,
367
+ "token_type": "cache_write"
368
+ }
369
+ ]
370
+ },
371
+ {
372
+ "name": "claude-haiku-4-5-20251001",
373
+ "name_pattern": "claude-haiku-4-5-20251001",
374
+ "source": "litellm",
375
+ "token_prices": [
376
+ {
377
+ "base_rate": 1e-6,
378
+ "is_prompt": true,
379
+ "token_type": "input"
380
+ },
381
+ {
382
+ "base_rate": 5e-6,
383
+ "is_prompt": false,
384
+ "token_type": "output"
385
+ },
386
+ {
387
+ "base_rate": 1e-7,
388
+ "is_prompt": true,
389
+ "token_type": "cache_read"
390
+ },
391
+ {
392
+ "base_rate": 1.25e-6,
393
+ "is_prompt": true,
394
+ "token_type": "cache_write"
395
+ }
396
+ ]
397
+ },
344
398
  {
345
399
  "name": "claude-opus-4-1",
346
400
  "name_pattern": "claude-opus-4-1",
@@ -741,6 +795,33 @@
741
795
  }
742
796
  ]
743
797
  },
798
+ {
799
+ "name": "gemini-2.5-flash-image",
800
+ "name_pattern": "gemini-2\\.5-flash-image",
801
+ "source": "litellm",
802
+ "token_prices": [
803
+ {
804
+ "base_rate": 3e-7,
805
+ "is_prompt": true,
806
+ "token_type": "input"
807
+ },
808
+ {
809
+ "base_rate": 2.5e-6,
810
+ "is_prompt": false,
811
+ "token_type": "output"
812
+ },
813
+ {
814
+ "base_rate": 3e-8,
815
+ "is_prompt": true,
816
+ "token_type": "cache_read"
817
+ },
818
+ {
819
+ "base_rate": 1e-6,
820
+ "is_prompt": true,
821
+ "token_type": "audio"
822
+ }
823
+ ]
824
+ },
744
825
  {
745
826
  "name": "gemini-2.5-flash-image-preview",
746
827
  "name_pattern": "gemini-2\\.5-flash-image-preview",
@@ -2465,6 +2546,40 @@
2465
2546
  }
2466
2547
  ]
2467
2548
  },
2549
+ {
2550
+ "name": "gpt-5-pro",
2551
+ "name_pattern": "gpt-5-pro",
2552
+ "source": "litellm",
2553
+ "token_prices": [
2554
+ {
2555
+ "base_rate": 0.000015,
2556
+ "is_prompt": true,
2557
+ "token_type": "input"
2558
+ },
2559
+ {
2560
+ "base_rate": 0.00012,
2561
+ "is_prompt": false,
2562
+ "token_type": "output"
2563
+ }
2564
+ ]
2565
+ },
2566
+ {
2567
+ "name": "gpt-5-pro-2025-10-06",
2568
+ "name_pattern": "gpt-5-pro-2025-10-06",
2569
+ "source": "litellm",
2570
+ "token_prices": [
2571
+ {
2572
+ "base_rate": 0.000015,
2573
+ "is_prompt": true,
2574
+ "token_type": "input"
2575
+ },
2576
+ {
2577
+ "base_rate": 0.00012,
2578
+ "is_prompt": false,
2579
+ "token_type": "output"
2580
+ }
2581
+ ]
2582
+ },
2468
2583
  {
2469
2584
  "name": "gpt-realtime",
2470
2585
  "name_pattern": "gpt-realtime",
@@ -2529,6 +2644,33 @@
2529
2644
  }
2530
2645
  ]
2531
2646
  },
2647
+ {
2648
+ "name": "gpt-realtime-mini",
2649
+ "name_pattern": "gpt-realtime-mini",
2650
+ "source": "litellm",
2651
+ "token_prices": [
2652
+ {
2653
+ "base_rate": 6e-7,
2654
+ "is_prompt": true,
2655
+ "token_type": "input"
2656
+ },
2657
+ {
2658
+ "base_rate": 2.4e-6,
2659
+ "is_prompt": false,
2660
+ "token_type": "output"
2661
+ },
2662
+ {
2663
+ "base_rate": 0.00001,
2664
+ "is_prompt": true,
2665
+ "token_type": "audio"
2666
+ },
2667
+ {
2668
+ "base_rate": 0.00002,
2669
+ "is_prompt": false,
2670
+ "token_type": "audio"
2671
+ }
2672
+ ]
2673
+ },
2532
2674
  {
2533
2675
  "name": "o1",
2534
2676
  "name_pattern": "o1",
@@ -1,32 +1,28 @@
1
1
  {
2
- "_components-XKc983B9.js": {
3
- "file": "assets/components-XKc983B9.js",
2
+ "_components-BLK5vehh.js": {
3
+ "file": "assets/components-BLK5vehh.js",
4
4
  "name": "components",
5
5
  "imports": [
6
- "_vendor-CQ4tN9P7.js",
7
- "_pages-CSZW-lt0.js",
8
- "_vendor-arizeai-Cb1ncvYH.js",
9
- "_vendor-codemirror-CckmKopH.js",
6
+ "_vendor-3BvTzoBp.js",
7
+ "_pages-DIVgyYyy.js",
8
+ "_vendor-arizeai-C6_oC0y8.js",
9
+ "_vendor-codemirror-DPnZGAZA.js",
10
10
  "_vendor-three-BtCyLs1w.js"
11
11
  ]
12
12
  },
13
- "_pages-CSZW-lt0.js": {
14
- "file": "assets/pages-CSZW-lt0.js",
13
+ "_pages-DIVgyYyy.js": {
14
+ "file": "assets/pages-DIVgyYyy.js",
15
15
  "name": "pages",
16
16
  "imports": [
17
- "_vendor-CQ4tN9P7.js",
18
- "_components-XKc983B9.js",
19
- "_vendor-arizeai-Cb1ncvYH.js",
20
- "_vendor-codemirror-CckmKopH.js",
21
- "_vendor-recharts-BC1ysIKu.js"
17
+ "_vendor-3BvTzoBp.js",
18
+ "_components-BLK5vehh.js",
19
+ "_vendor-arizeai-C6_oC0y8.js",
20
+ "_vendor-codemirror-DPnZGAZA.js",
21
+ "_vendor-recharts-CjgSbsB0.js"
22
22
  ]
23
23
  },
24
- "_vendor-BGzfc4EU.css": {
25
- "file": "assets/vendor-BGzfc4EU.css",
26
- "src": "_vendor-BGzfc4EU.css"
27
- },
28
- "_vendor-CQ4tN9P7.js": {
29
- "file": "assets/vendor-CQ4tN9P7.js",
24
+ "_vendor-3BvTzoBp.js": {
25
+ "file": "assets/vendor-3BvTzoBp.js",
30
26
  "name": "vendor",
31
27
  "imports": [
32
28
  "_vendor-three-BtCyLs1w.js"
@@ -35,39 +31,43 @@
35
31
  "assets/vendor-BGzfc4EU.css"
36
32
  ]
37
33
  },
38
- "_vendor-arizeai-Cb1ncvYH.js": {
39
- "file": "assets/vendor-arizeai-Cb1ncvYH.js",
34
+ "_vendor-BGzfc4EU.css": {
35
+ "file": "assets/vendor-BGzfc4EU.css",
36
+ "src": "_vendor-BGzfc4EU.css"
37
+ },
38
+ "_vendor-arizeai-C6_oC0y8.js": {
39
+ "file": "assets/vendor-arizeai-C6_oC0y8.js",
40
40
  "name": "vendor-arizeai",
41
41
  "imports": [
42
- "_vendor-CQ4tN9P7.js"
42
+ "_vendor-3BvTzoBp.js"
43
43
  ]
44
44
  },
45
- "_vendor-codemirror-CckmKopH.js": {
46
- "file": "assets/vendor-codemirror-CckmKopH.js",
45
+ "_vendor-codemirror-DPnZGAZA.js": {
46
+ "file": "assets/vendor-codemirror-DPnZGAZA.js",
47
47
  "name": "vendor-codemirror",
48
48
  "imports": [
49
- "_vendor-CQ4tN9P7.js",
50
- "_vendor-shiki-B45T-YxN.js"
49
+ "_vendor-3BvTzoBp.js",
50
+ "_vendor-shiki-CJyhDG0E.js"
51
51
  ],
52
52
  "dynamicImports": [
53
- "_vendor-shiki-B45T-YxN.js",
54
- "_vendor-shiki-B45T-YxN.js",
55
- "_vendor-shiki-B45T-YxN.js"
53
+ "_vendor-shiki-CJyhDG0E.js",
54
+ "_vendor-shiki-CJyhDG0E.js",
55
+ "_vendor-shiki-CJyhDG0E.js"
56
56
  ]
57
57
  },
58
- "_vendor-recharts-BC1ysIKu.js": {
59
- "file": "assets/vendor-recharts-BC1ysIKu.js",
58
+ "_vendor-recharts-CjgSbsB0.js": {
59
+ "file": "assets/vendor-recharts-CjgSbsB0.js",
60
60
  "name": "vendor-recharts",
61
61
  "imports": [
62
- "_vendor-CQ4tN9P7.js"
62
+ "_vendor-3BvTzoBp.js"
63
63
  ]
64
64
  },
65
- "_vendor-shiki-B45T-YxN.js": {
66
- "file": "assets/vendor-shiki-B45T-YxN.js",
65
+ "_vendor-shiki-CJyhDG0E.js": {
66
+ "file": "assets/vendor-shiki-CJyhDG0E.js",
67
67
  "name": "vendor-shiki",
68
68
  "isDynamicEntry": true,
69
69
  "imports": [
70
- "_vendor-CQ4tN9P7.js"
70
+ "_vendor-3BvTzoBp.js"
71
71
  ]
72
72
  },
73
73
  "_vendor-three-BtCyLs1w.js": {
@@ -75,19 +75,19 @@
75
75
  "name": "vendor-three"
76
76
  },
77
77
  "index.tsx": {
78
- "file": "assets/index-DG8e74sg.js",
78
+ "file": "assets/index-BP0Shd90.js",
79
79
  "name": "index",
80
80
  "src": "index.tsx",
81
81
  "isEntry": true,
82
82
  "imports": [
83
- "_vendor-CQ4tN9P7.js",
84
- "_vendor-arizeai-Cb1ncvYH.js",
85
- "_pages-CSZW-lt0.js",
86
- "_components-XKc983B9.js",
83
+ "_vendor-3BvTzoBp.js",
84
+ "_vendor-arizeai-C6_oC0y8.js",
85
+ "_pages-DIVgyYyy.js",
86
+ "_components-BLK5vehh.js",
87
87
  "_vendor-three-BtCyLs1w.js",
88
- "_vendor-codemirror-CckmKopH.js",
89
- "_vendor-shiki-B45T-YxN.js",
90
- "_vendor-recharts-BC1ysIKu.js"
88
+ "_vendor-codemirror-DPnZGAZA.js",
89
+ "_vendor-shiki-CJyhDG0E.js",
90
+ "_vendor-recharts-CjgSbsB0.js"
91
91
  ]
92
92
  }
93
93
  }