arize-phoenix 11.32.1__py3-none-any.whl → 11.34.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (63) hide show
  1. {arize_phoenix-11.32.1.dist-info → arize_phoenix-11.34.0.dist-info}/METADATA +1 -1
  2. {arize_phoenix-11.32.1.dist-info → arize_phoenix-11.34.0.dist-info}/RECORD +57 -50
  3. phoenix/config.py +44 -0
  4. phoenix/db/bulk_inserter.py +111 -116
  5. phoenix/inferences/inferences.py +1 -2
  6. phoenix/server/api/context.py +20 -0
  7. phoenix/server/api/dataloaders/__init__.py +20 -0
  8. phoenix/server/api/dataloaders/average_experiment_repeated_run_group_latency.py +50 -0
  9. phoenix/server/api/dataloaders/dataset_example_revisions.py +0 -1
  10. phoenix/server/api/dataloaders/dataset_examples_and_versions_by_experiment_run.py +47 -0
  11. phoenix/server/api/dataloaders/experiment_repeated_run_group_annotation_summaries.py +77 -0
  12. phoenix/server/api/dataloaders/experiment_repeated_run_groups.py +59 -0
  13. phoenix/server/api/dataloaders/experiment_repetition_counts.py +39 -0
  14. phoenix/server/api/dataloaders/span_cost_summary_by_experiment_repeated_run_group.py +64 -0
  15. phoenix/server/api/helpers/playground_clients.py +4 -0
  16. phoenix/server/api/mutations/prompt_label_mutations.py +67 -58
  17. phoenix/server/api/queries.py +52 -37
  18. phoenix/server/api/routers/v1/documents.py +1 -1
  19. phoenix/server/api/routers/v1/evaluations.py +4 -4
  20. phoenix/server/api/routers/v1/experiment_runs.py +1 -1
  21. phoenix/server/api/routers/v1/experiments.py +1 -1
  22. phoenix/server/api/routers/v1/spans.py +2 -2
  23. phoenix/server/api/routers/v1/traces.py +18 -3
  24. phoenix/server/api/types/DatasetExample.py +49 -1
  25. phoenix/server/api/types/Experiment.py +12 -2
  26. phoenix/server/api/types/ExperimentComparison.py +3 -9
  27. phoenix/server/api/types/ExperimentRepeatedRunGroup.py +146 -0
  28. phoenix/server/api/types/ExperimentRepeatedRunGroupAnnotationSummary.py +9 -0
  29. phoenix/server/api/types/ExperimentRun.py +12 -19
  30. phoenix/server/api/types/Prompt.py +11 -0
  31. phoenix/server/api/types/PromptLabel.py +2 -19
  32. phoenix/server/api/types/node.py +10 -0
  33. phoenix/server/app.py +78 -20
  34. phoenix/server/cost_tracking/model_cost_manifest.json +1 -1
  35. phoenix/server/daemons/span_cost_calculator.py +10 -8
  36. phoenix/server/grpc_server.py +9 -9
  37. phoenix/server/prometheus.py +30 -6
  38. phoenix/server/static/.vite/manifest.json +43 -43
  39. phoenix/server/static/assets/components-CdQiQTvs.js +5778 -0
  40. phoenix/server/static/assets/{index-D1FDMBMV.js → index-B1VuXYRI.js} +12 -21
  41. phoenix/server/static/assets/pages-CnfZ3RhB.js +9163 -0
  42. phoenix/server/static/assets/vendor-BGzfc4EU.css +1 -0
  43. phoenix/server/static/assets/vendor-Cfrr9FCF.js +903 -0
  44. phoenix/server/static/assets/{vendor-arizeai-DsYDNOqt.js → vendor-arizeai-Dz0kN-lQ.js} +4 -4
  45. phoenix/server/static/assets/vendor-codemirror-ClqtONZQ.js +25 -0
  46. phoenix/server/static/assets/{vendor-recharts-BTHn5Y2R.js → vendor-recharts-D6kvOpmb.js} +2 -2
  47. phoenix/server/static/assets/{vendor-shiki-BAcocHFl.js → vendor-shiki-xSOiKxt0.js} +1 -1
  48. phoenix/session/client.py +55 -1
  49. phoenix/session/data_extractor.py +5 -0
  50. phoenix/session/evaluation.py +8 -4
  51. phoenix/session/session.py +13 -0
  52. phoenix/trace/projects.py +1 -2
  53. phoenix/version.py +1 -1
  54. phoenix/server/static/assets/components-Cs9c4Nxp.js +0 -5698
  55. phoenix/server/static/assets/pages-Cbj9SjBx.js +0 -8928
  56. phoenix/server/static/assets/vendor-CqDb5u4o.css +0 -1
  57. phoenix/server/static/assets/vendor-RdRDaQiR.js +0 -905
  58. phoenix/server/static/assets/vendor-codemirror-BzJDUbEx.js +0 -25
  59. phoenix/utilities/deprecation.py +0 -31
  60. {arize_phoenix-11.32.1.dist-info → arize_phoenix-11.34.0.dist-info}/WHEEL +0 -0
  61. {arize_phoenix-11.32.1.dist-info → arize_phoenix-11.34.0.dist-info}/entry_points.txt +0 -0
  62. {arize_phoenix-11.32.1.dist-info → arize_phoenix-11.34.0.dist-info}/licenses/IP_NOTICE +0 -0
  63. {arize_phoenix-11.32.1.dist-info → arize_phoenix-11.34.0.dist-info}/licenses/LICENSE +0 -0
phoenix/server/app.py CHANGED
@@ -4,7 +4,6 @@ import importlib
4
4
  import json
5
5
  import logging
6
6
  import os
7
- from collections.abc import AsyncIterator, Awaitable, Callable, Iterable, Sequence
8
7
  from contextlib import AbstractAsyncContextManager, AsyncExitStack
9
8
  from dataclasses import dataclass, field
10
9
  from datetime import datetime, timedelta, timezone
@@ -14,9 +13,14 @@ from types import MethodType
14
13
  from typing import (
15
14
  TYPE_CHECKING,
16
15
  Any,
16
+ AsyncIterator,
17
+ Awaitable,
18
+ Callable,
19
+ Iterable,
17
20
  NamedTuple,
18
21
  Optional,
19
22
  Protocol,
23
+ Sequence,
20
24
  TypedDict,
21
25
  Union,
22
26
  cast,
@@ -64,6 +68,7 @@ from phoenix.config import (
64
68
  get_env_grpc_interceptor_paths,
65
69
  get_env_host,
66
70
  get_env_host_root_path,
71
+ get_env_max_spans_queue_size,
67
72
  get_env_port,
68
73
  get_env_support_email,
69
74
  server_instrumentation_is_enabled,
@@ -81,15 +86,20 @@ from phoenix.server.api.context import Context, DataLoaders
81
86
  from phoenix.server.api.dataloaders import (
82
87
  AnnotationConfigsByProjectDataLoader,
83
88
  AnnotationSummaryDataLoader,
89
+ AverageExperimentRepeatedRunGroupLatencyDataLoader,
84
90
  AverageExperimentRunLatencyDataLoader,
85
91
  CacheForDataLoaders,
86
92
  DatasetExampleRevisionsDataLoader,
93
+ DatasetExamplesAndVersionsByExperimentRunDataLoader,
87
94
  DatasetExampleSpansDataLoader,
88
95
  DocumentEvaluationsDataLoader,
89
96
  DocumentEvaluationSummaryDataLoader,
90
97
  DocumentRetrievalMetricsDataLoader,
91
98
  ExperimentAnnotationSummaryDataLoader,
92
99
  ExperimentErrorRatesDataLoader,
100
+ ExperimentRepeatedRunGroupAnnotationSummariesDataLoader,
101
+ ExperimentRepeatedRunGroupsDataLoader,
102
+ ExperimentRepetitionCountsDataLoader,
93
103
  ExperimentRunAnnotations,
94
104
  ExperimentRunCountsDataLoader,
95
105
  ExperimentSequenceNumberDataLoader,
@@ -116,6 +126,7 @@ from phoenix.server.api.dataloaders import (
116
126
  SpanCostDetailSummaryEntriesBySpanDataLoader,
117
127
  SpanCostDetailSummaryEntriesByTraceDataLoader,
118
128
  SpanCostSummaryByExperimentDataLoader,
129
+ SpanCostSummaryByExperimentRepeatedRunGroupDataLoader,
119
130
  SpanCostSummaryByExperimentRunDataLoader,
120
131
  SpanCostSummaryByGenerativeModelDataLoader,
121
132
  SpanCostSummaryByProjectDataLoader,
@@ -151,6 +162,7 @@ from phoenix.server.grpc_server import GrpcServer
151
162
  from phoenix.server.jwt_store import JwtStore
152
163
  from phoenix.server.middleware.gzip import GZipMiddleware
153
164
  from phoenix.server.oauth2 import OAuth2Clients
165
+ from phoenix.server.prometheus import SPAN_QUEUE_REJECTIONS
154
166
  from phoenix.server.retention import TraceDataSweeper
155
167
  from phoenix.server.telemetry import initialize_opentelemetry_tracer_provider
156
168
  from phoenix.server.types import (
@@ -427,13 +439,13 @@ class Scaffolder(DaemonTask):
427
439
  def __init__(
428
440
  self,
429
441
  config: ScaffolderConfig,
430
- queue_span: Callable[[Span, ProjectName], Awaitable[None]],
431
- queue_evaluation: Callable[[pb.Evaluation], Awaitable[None]],
442
+ enqueue_span: Callable[[Span, ProjectName], Awaitable[None]],
443
+ enqueue_evaluation: Callable[[pb.Evaluation], Awaitable[None]],
432
444
  ) -> None:
433
445
  super().__init__()
434
446
  self._db = config.db
435
- self._queue_span = queue_span
436
- self._queue_evaluation = queue_evaluation
447
+ self._enqueue_span = enqueue_span
448
+ self._enqueue_evaluation = enqueue_evaluation
437
449
  self._tracing_fixtures = [
438
450
  get_trace_fixture_by_name(name) for name in set(config.tracing_fixture_names)
439
451
  ]
@@ -504,9 +516,9 @@ class Scaffolder(DaemonTask):
504
516
  project_name = fixture.project_name or fixture.name
505
517
  logger.info(f"Loading '{project_name}' fixtures...")
506
518
  for span in fixture_spans:
507
- await self._queue_span(span, project_name)
519
+ await self._enqueue_span(span, project_name)
508
520
  for evaluation in fixture_evals:
509
- await self._queue_evaluation(evaluation)
521
+ await self._enqueue_evaluation(evaluation)
510
522
 
511
523
  except FileNotFoundError:
512
524
  logger.warning(f"Fixture file not found for '{fixture.name}'")
@@ -529,6 +541,32 @@ class Scaffolder(DaemonTask):
529
541
  logger.error(f"Error processing dataset fixture: {e}")
530
542
 
531
543
 
544
+ class _CapacityIndicator(Protocol):
545
+ @property
546
+ def is_full(self) -> bool: ...
547
+
548
+
549
+ class CapacityInterceptor(AsyncServerInterceptor):
550
+ def __init__(self, indicator: _CapacityIndicator):
551
+ self._indicator = indicator
552
+
553
+ @override
554
+ async def intercept(
555
+ self,
556
+ method: Callable[[Any, grpc.aio.ServicerContext], Awaitable[Any]],
557
+ request_or_iterator: Any,
558
+ context: grpc.aio.ServicerContext,
559
+ method_name: str,
560
+ ) -> Any:
561
+ if self._indicator.is_full:
562
+ SPAN_QUEUE_REJECTIONS.inc()
563
+ context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
564
+ context.set_details("Server is at capacity and cannot process more requests")
565
+ return
566
+
567
+ return await method(request_or_iterator, context)
568
+
569
+
532
570
  def _lifespan(
533
571
  *,
534
572
  db: DbSessionFactory,
@@ -555,18 +593,23 @@ def _lifespan(
555
593
  db.lock = asyncio.Lock() if db.dialect is SupportedSQLDialect.SQLITE else None
556
594
  async with AsyncExitStack() as stack:
557
595
  (
558
- enqueue,
559
- queue_span,
560
- queue_evaluation,
596
+ enqueue_annotations,
597
+ enqueue_span,
598
+ enqueue_evaluation,
561
599
  enqueue_operation,
562
600
  ) = await stack.enter_async_context(bulk_inserter)
601
+ interceptors = [
602
+ CapacityInterceptor(bulk_inserter),
603
+ *user_grpc_interceptors(),
604
+ *grpc_interceptors,
605
+ ]
563
606
  grpc_server = GrpcServer(
564
- queue_span,
607
+ enqueue_span,
565
608
  disabled=read_only,
566
609
  tracer_provider=tracer_provider,
567
610
  enable_prometheus=enable_prometheus,
568
611
  token_store=token_store,
569
- interceptors=user_grpc_interceptors() + list(grpc_interceptors),
612
+ interceptors=interceptors,
570
613
  )
571
614
  await stack.enter_async_context(grpc_server)
572
615
  await stack.enter_async_context(dml_event_handler)
@@ -578,17 +621,17 @@ def _lifespan(
578
621
  if scaffolder_config:
579
622
  scaffolder = Scaffolder(
580
623
  config=scaffolder_config,
581
- queue_span=queue_span,
582
- queue_evaluation=queue_evaluation,
624
+ enqueue_span=enqueue_span,
625
+ enqueue_evaluation=enqueue_evaluation,
583
626
  )
584
627
  await stack.enter_async_context(scaffolder)
585
628
  if isinstance(token_store, AbstractAsyncContextManager):
586
629
  await stack.enter_async_context(token_store)
587
630
  yield {
588
631
  "event_queue": dml_event_handler,
589
- "enqueue": enqueue,
590
- "queue_span_for_bulk_insert": queue_span,
591
- "queue_evaluation_for_bulk_insert": queue_evaluation,
632
+ "enqueue_annotations": enqueue_annotations,
633
+ "enqueue_span": enqueue_span,
634
+ "enqueue_evaluation": enqueue_evaluation,
592
635
  "enqueue_operation": enqueue_operation,
593
636
  }
594
637
  for callback in shutdown_callbacks:
@@ -663,9 +706,15 @@ def create_graphql_router(
663
706
  event_queue=event_queue,
664
707
  data_loaders=DataLoaders(
665
708
  annotation_configs_by_project=AnnotationConfigsByProjectDataLoader(db),
709
+ average_experiment_repeated_run_group_latency=AverageExperimentRepeatedRunGroupLatencyDataLoader(
710
+ db
711
+ ),
666
712
  average_experiment_run_latency=AverageExperimentRunLatencyDataLoader(db),
667
713
  dataset_example_revisions=DatasetExampleRevisionsDataLoader(db),
668
714
  dataset_example_spans=DatasetExampleSpansDataLoader(db),
715
+ dataset_examples_and_versions_by_experiment_run=DatasetExamplesAndVersionsByExperimentRunDataLoader(
716
+ db
717
+ ),
669
718
  document_evaluation_summaries=DocumentEvaluationSummaryDataLoader(
670
719
  db,
671
720
  cache_map=(
@@ -684,6 +733,11 @@ def create_graphql_router(
684
733
  ),
685
734
  experiment_annotation_summaries=ExperimentAnnotationSummaryDataLoader(db),
686
735
  experiment_error_rates=ExperimentErrorRatesDataLoader(db),
736
+ experiment_repeated_run_group_annotation_summaries=ExperimentRepeatedRunGroupAnnotationSummariesDataLoader(
737
+ db
738
+ ),
739
+ experiment_repeated_run_groups=ExperimentRepeatedRunGroupsDataLoader(db),
740
+ experiment_repetition_counts=ExperimentRepetitionCountsDataLoader(db),
687
741
  experiment_run_annotations=ExperimentRunAnnotations(db),
688
742
  experiment_run_counts=ExperimentRunCountsDataLoader(db),
689
743
  experiment_sequence_number=ExperimentSequenceNumberDataLoader(db),
@@ -740,6 +794,11 @@ def create_graphql_router(
740
794
  span_cost_details_by_span_cost=SpanCostDetailsBySpanCostDataLoader(db),
741
795
  span_cost_detail_fields=TableFieldsDataLoader(db, models.SpanCostDetail),
742
796
  span_cost_fields=TableFieldsDataLoader(db, models.SpanCost),
797
+ span_cost_summary_by_experiment=SpanCostSummaryByExperimentDataLoader(db),
798
+ span_cost_summary_by_experiment_repeated_run_group=SpanCostSummaryByExperimentRepeatedRunGroupDataLoader(
799
+ db
800
+ ),
801
+ span_cost_summary_by_experiment_run=SpanCostSummaryByExperimentRunDataLoader(db),
743
802
  span_cost_summary_by_generative_model=SpanCostSummaryByGenerativeModelDataLoader(
744
803
  db
745
804
  ),
@@ -768,8 +827,6 @@ def create_graphql_router(
768
827
  project_by_name=ProjectByNameDataLoader(db),
769
828
  users=UsersDataLoader(db),
770
829
  user_roles=UserRolesDataLoader(db),
771
- span_cost_summary_by_experiment=SpanCostSummaryByExperimentDataLoader(db),
772
- span_cost_summary_by_experiment_run=SpanCostSummaryByExperimentRunDataLoader(db),
773
830
  ),
774
831
  cache_for_dataloaders=cache_for_dataloaders,
775
832
  read_only=read_only,
@@ -969,11 +1026,11 @@ def create_app(
969
1026
  span_cost_calculator = SpanCostCalculator(db, generative_model_store)
970
1027
  bulk_inserter = bulk_inserter_factory(
971
1028
  db,
972
- enable_prometheus=enable_prometheus,
973
1029
  span_cost_calculator=span_cost_calculator,
974
1030
  event_queue=dml_event_handler,
975
1031
  initial_batch_of_spans=initial_batch_of_spans,
976
1032
  initial_batch_of_evaluations=initial_batch_of_evaluations,
1033
+ max_spans_queue_size=get_env_max_spans_queue_size(),
977
1034
  )
978
1035
  tracer_provider = None
979
1036
  graphql_schema_extensions: list[Union[type[SchemaExtension], SchemaExtension]] = []
@@ -1104,6 +1161,7 @@ def create_app(
1104
1161
  app.state.db = db
1105
1162
  app.state.email_sender = email_sender
1106
1163
  app.state.span_cost_calculator = span_cost_calculator
1164
+ app.state.span_queue_is_full = lambda: bulk_inserter.is_full
1107
1165
  app = _add_get_secret_method(app=app, secret=secret)
1108
1166
  app = _add_get_token_store_method(app=app, token_store=token_store)
1109
1167
  if tracer_provider:
@@ -698,7 +698,7 @@
698
698
  "token_type": "input"
699
699
  },
700
700
  {
701
- "base_rate": 2.5e-6,
701
+ "base_rate": 0.00003,
702
702
  "is_prompt": false,
703
703
  "token_type": "output"
704
704
  },
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import logging
4
4
  from asyncio import sleep
5
+ from collections import deque
5
6
  from datetime import datetime
6
7
  from typing import Any, Mapping, NamedTuple, Optional
7
8
 
@@ -35,21 +36,25 @@ class SpanCostCalculator(DaemonTask):
35
36
  super().__init__()
36
37
  self._db = db
37
38
  self._model_store = model_store
38
- self._queue: list[SpanCostCalculatorQueueItem] = []
39
+ self._queue: deque[SpanCostCalculatorQueueItem] = deque()
40
+ self._max_items_per_transaction = 1000
39
41
 
40
42
  async def _run(self) -> None:
41
43
  while self._running:
44
+ num_items_to_insert = min(self._max_items_per_transaction, len(self._queue))
42
45
  try:
43
- await self._insert_costs()
46
+ await self._insert_costs(num_items_to_insert)
44
47
  except Exception as e:
45
48
  logger.exception(f"Failed to insert costs: {e}")
46
49
  await sleep(self._SLEEP_INTERVAL)
47
50
 
48
- async def _insert_costs(self) -> None:
49
- if not self._queue:
51
+ async def _insert_costs(self, num_items_to_insert: int) -> None:
52
+ if not num_items_to_insert or not self._queue:
50
53
  return
51
54
  costs: list[models.SpanCost] = []
52
- for item in self._queue:
55
+ while num_items_to_insert > 0:
56
+ num_items_to_insert -= 1
57
+ item = self._queue.popleft()
53
58
  try:
54
59
  cost = self.calculate_cost(item.span_start_time, item.attributes)
55
60
  except Exception as e:
@@ -65,9 +70,6 @@ class SpanCostCalculator(DaemonTask):
65
70
  session.add_all(costs)
66
71
  except Exception as e:
67
72
  logger.exception(f"Failed to insert costs: {e}")
68
- finally:
69
- # Clear the queue after processing
70
- self._queue.clear()
71
73
 
72
74
  def put_nowait(self, item: SpanCostCalculatorQueueItem) -> None:
73
75
  self._queue.append(item)
@@ -1,5 +1,4 @@
1
- from collections.abc import Awaitable, Callable
2
- from typing import TYPE_CHECKING, Any, Iterable, Optional
1
+ from typing import TYPE_CHECKING, Any, Awaitable, Callable, Iterable, Optional
3
2
 
4
3
  import grpc
5
4
  from grpc.aio import RpcContext, Server, ServerInterceptor
@@ -11,6 +10,7 @@ from opentelemetry.proto.collector.trace.v1.trace_service_pb2_grpc import (
11
10
  TraceServiceServicer,
12
11
  add_TraceServiceServicer_to_server,
13
12
  )
13
+ from starlette.concurrency import run_in_threadpool
14
14
  from typing_extensions import TypeAlias
15
15
 
16
16
  from phoenix.auth import CanReadToken
@@ -34,10 +34,10 @@ ProjectName: TypeAlias = str
34
34
  class Servicer(TraceServiceServicer): # type: ignore[misc,unused-ignore]
35
35
  def __init__(
36
36
  self,
37
- callback: Callable[[Span, ProjectName], Awaitable[None]],
37
+ enqueue_span: Callable[[Span, ProjectName], Awaitable[None]],
38
38
  ) -> None:
39
39
  super().__init__()
40
- self._callback = callback
40
+ self._enqueue_span = enqueue_span
41
41
 
42
42
  async def Export(
43
43
  self,
@@ -48,22 +48,22 @@ class Servicer(TraceServiceServicer): # type: ignore[misc,unused-ignore]
48
48
  project_name = get_project_name(resource_spans.resource.attributes)
49
49
  for scope_span in resource_spans.scope_spans:
50
50
  for otlp_span in scope_span.spans:
51
- span = decode_otlp_span(otlp_span)
52
- await self._callback(span, project_name)
51
+ span = await run_in_threadpool(decode_otlp_span, otlp_span)
52
+ await self._enqueue_span(span, project_name)
53
53
  return ExportTraceServiceResponse()
54
54
 
55
55
 
56
56
  class GrpcServer:
57
57
  def __init__(
58
58
  self,
59
- callback: Callable[[Span, ProjectName], Awaitable[None]],
59
+ enqueue_span: Callable[[Span, ProjectName], Awaitable[None]],
60
60
  tracer_provider: Optional["TracerProvider"] = None,
61
61
  enable_prometheus: bool = False,
62
62
  disabled: bool = False,
63
63
  token_store: Optional[CanReadToken] = None,
64
64
  interceptors: Iterable[ServerInterceptor] = (),
65
65
  ) -> None:
66
- self._callback = callback
66
+ self._enqueue_span = enqueue_span
67
67
  self._server: Optional[Server] = None
68
68
  self._tracer_provider = tracer_provider
69
69
  self._enable_prometheus = enable_prometheus
@@ -106,7 +106,7 @@ class GrpcServer:
106
106
  server.add_secure_port(f"[::]:{get_env_grpc_port()}", server_credentials)
107
107
  else:
108
108
  server.add_insecure_port(f"[::]:{get_env_grpc_port()}")
109
- add_TraceServiceServicer_to_server(Servicer(self._callback), server) # type: ignore[no-untyped-call,unused-ignore]
109
+ add_TraceServiceServicer_to_server(Servicer(self._enqueue_span), server) # type: ignore[no-untyped-call,unused-ignore]
110
110
  await server.start()
111
111
  self._server = server
112
112
 
@@ -9,6 +9,7 @@ import psutil
9
9
  from prometheus_client import (
10
10
  Counter,
11
11
  Gauge,
12
+ Histogram,
12
13
  Summary,
13
14
  start_http_server,
14
15
  )
@@ -36,14 +37,19 @@ CPU_METRIC = Gauge(
36
37
  name="cpu_usage_percent",
37
38
  documentation="CPU usage percent",
38
39
  )
39
- BULK_LOADER_INSERTION_TIME = Summary(
40
- name="bulk_loader_insertion_time_seconds_summary",
41
- documentation="Summary of database insertion time (seconds)",
40
+ BULK_LOADER_SPAN_INSERTION_TIME = Histogram(
41
+ namespace="phoenix",
42
+ name="bulk_loader_span_insertion_time_seconds",
43
+ documentation="Histogram of span database insertion time (seconds)",
44
+ buckets=[0.5, 1.0, 2.0, 5.0, 10.0, 30.0, 60.0, 180.0], # 500ms to 3min
42
45
  )
43
- BULK_LOADER_SPAN_INSERTIONS = Counter(
44
- name="bulk_loader_span_insertions_total",
45
- documentation="Total count of bulk loader span insertions",
46
+
47
+ BULK_LOADER_SPAN_EXCEPTIONS = Counter(
48
+ namespace="phoenix",
49
+ name="bulk_loader_span_exceptions_total",
50
+ documentation="Total count of span insertion exceptions",
46
51
  )
52
+
47
53
  BULK_LOADER_EVALUATION_INSERTIONS = Counter(
48
54
  name="bulk_loader_evaluation_insertions_total",
49
55
  documentation="Total count of bulk loader evaluation insertions",
@@ -95,6 +101,24 @@ DB_DISK_USAGE_WARNING_EMAIL_ERRORS = Counter(
95
101
  documentation="Total count of database disk usage warning email send errors",
96
102
  )
97
103
 
104
+ SPAN_QUEUE_REJECTIONS = Counter(
105
+ namespace="phoenix",
106
+ name="span_queue_rejections_total",
107
+ documentation="Total count of requests rejected due to span queue being full",
108
+ )
109
+
110
+ SPAN_QUEUE_SIZE = Gauge(
111
+ namespace="phoenix",
112
+ name="span_queue_size",
113
+ documentation="Current number of spans in the processing queue",
114
+ )
115
+
116
+ BULK_LOADER_LAST_ACTIVITY = Gauge(
117
+ namespace="phoenix",
118
+ name="bulk_loader_last_activity_timestamp_seconds",
119
+ documentation="Unix timestamp when bulk loader last processed items",
120
+ )
121
+
98
122
 
99
123
  class PrometheusMiddleware(BaseHTTPMiddleware):
100
124
  async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
@@ -1,73 +1,73 @@
1
1
  {
2
- "_components-Cs9c4Nxp.js": {
3
- "file": "assets/components-Cs9c4Nxp.js",
2
+ "_components-CdQiQTvs.js": {
3
+ "file": "assets/components-CdQiQTvs.js",
4
4
  "name": "components",
5
5
  "imports": [
6
- "_vendor-RdRDaQiR.js",
7
- "_pages-Cbj9SjBx.js",
8
- "_vendor-arizeai-DsYDNOqt.js",
9
- "_vendor-codemirror-BzJDUbEx.js",
6
+ "_vendor-Cfrr9FCF.js",
7
+ "_pages-CnfZ3RhB.js",
8
+ "_vendor-arizeai-Dz0kN-lQ.js",
9
+ "_vendor-codemirror-ClqtONZQ.js",
10
10
  "_vendor-three-BLWp5bic.js"
11
11
  ]
12
12
  },
13
- "_pages-Cbj9SjBx.js": {
14
- "file": "assets/pages-Cbj9SjBx.js",
13
+ "_pages-CnfZ3RhB.js": {
14
+ "file": "assets/pages-CnfZ3RhB.js",
15
15
  "name": "pages",
16
16
  "imports": [
17
- "_vendor-RdRDaQiR.js",
18
- "_vendor-arizeai-DsYDNOqt.js",
19
- "_components-Cs9c4Nxp.js",
20
- "_vendor-codemirror-BzJDUbEx.js",
21
- "_vendor-recharts-BTHn5Y2R.js"
17
+ "_vendor-Cfrr9FCF.js",
18
+ "_vendor-arizeai-Dz0kN-lQ.js",
19
+ "_components-CdQiQTvs.js",
20
+ "_vendor-codemirror-ClqtONZQ.js",
21
+ "_vendor-recharts-D6kvOpmb.js"
22
22
  ]
23
23
  },
24
- "_vendor-CqDb5u4o.css": {
25
- "file": "assets/vendor-CqDb5u4o.css",
26
- "src": "_vendor-CqDb5u4o.css"
24
+ "_vendor-BGzfc4EU.css": {
25
+ "file": "assets/vendor-BGzfc4EU.css",
26
+ "src": "_vendor-BGzfc4EU.css"
27
27
  },
28
- "_vendor-RdRDaQiR.js": {
29
- "file": "assets/vendor-RdRDaQiR.js",
28
+ "_vendor-Cfrr9FCF.js": {
29
+ "file": "assets/vendor-Cfrr9FCF.js",
30
30
  "name": "vendor",
31
31
  "imports": [
32
32
  "_vendor-three-BLWp5bic.js"
33
33
  ],
34
34
  "css": [
35
- "assets/vendor-CqDb5u4o.css"
35
+ "assets/vendor-BGzfc4EU.css"
36
36
  ]
37
37
  },
38
- "_vendor-arizeai-DsYDNOqt.js": {
39
- "file": "assets/vendor-arizeai-DsYDNOqt.js",
38
+ "_vendor-arizeai-Dz0kN-lQ.js": {
39
+ "file": "assets/vendor-arizeai-Dz0kN-lQ.js",
40
40
  "name": "vendor-arizeai",
41
41
  "imports": [
42
- "_vendor-RdRDaQiR.js"
42
+ "_vendor-Cfrr9FCF.js"
43
43
  ]
44
44
  },
45
- "_vendor-codemirror-BzJDUbEx.js": {
46
- "file": "assets/vendor-codemirror-BzJDUbEx.js",
45
+ "_vendor-codemirror-ClqtONZQ.js": {
46
+ "file": "assets/vendor-codemirror-ClqtONZQ.js",
47
47
  "name": "vendor-codemirror",
48
48
  "imports": [
49
- "_vendor-RdRDaQiR.js",
50
- "_vendor-shiki-BAcocHFl.js"
49
+ "_vendor-Cfrr9FCF.js",
50
+ "_vendor-shiki-xSOiKxt0.js"
51
51
  ],
52
52
  "dynamicImports": [
53
- "_vendor-shiki-BAcocHFl.js",
54
- "_vendor-shiki-BAcocHFl.js",
55
- "_vendor-shiki-BAcocHFl.js"
53
+ "_vendor-shiki-xSOiKxt0.js",
54
+ "_vendor-shiki-xSOiKxt0.js",
55
+ "_vendor-shiki-xSOiKxt0.js"
56
56
  ]
57
57
  },
58
- "_vendor-recharts-BTHn5Y2R.js": {
59
- "file": "assets/vendor-recharts-BTHn5Y2R.js",
58
+ "_vendor-recharts-D6kvOpmb.js": {
59
+ "file": "assets/vendor-recharts-D6kvOpmb.js",
60
60
  "name": "vendor-recharts",
61
61
  "imports": [
62
- "_vendor-RdRDaQiR.js"
62
+ "_vendor-Cfrr9FCF.js"
63
63
  ]
64
64
  },
65
- "_vendor-shiki-BAcocHFl.js": {
66
- "file": "assets/vendor-shiki-BAcocHFl.js",
65
+ "_vendor-shiki-xSOiKxt0.js": {
66
+ "file": "assets/vendor-shiki-xSOiKxt0.js",
67
67
  "name": "vendor-shiki",
68
68
  "isDynamicEntry": true,
69
69
  "imports": [
70
- "_vendor-RdRDaQiR.js"
70
+ "_vendor-Cfrr9FCF.js"
71
71
  ]
72
72
  },
73
73
  "_vendor-three-BLWp5bic.js": {
@@ -75,19 +75,19 @@
75
75
  "name": "vendor-three"
76
76
  },
77
77
  "index.tsx": {
78
- "file": "assets/index-D1FDMBMV.js",
78
+ "file": "assets/index-B1VuXYRI.js",
79
79
  "name": "index",
80
80
  "src": "index.tsx",
81
81
  "isEntry": true,
82
82
  "imports": [
83
- "_vendor-RdRDaQiR.js",
84
- "_vendor-arizeai-DsYDNOqt.js",
85
- "_pages-Cbj9SjBx.js",
86
- "_components-Cs9c4Nxp.js",
83
+ "_vendor-Cfrr9FCF.js",
84
+ "_vendor-arizeai-Dz0kN-lQ.js",
85
+ "_pages-CnfZ3RhB.js",
86
+ "_components-CdQiQTvs.js",
87
87
  "_vendor-three-BLWp5bic.js",
88
- "_vendor-codemirror-BzJDUbEx.js",
89
- "_vendor-shiki-BAcocHFl.js",
90
- "_vendor-recharts-BTHn5Y2R.js"
88
+ "_vendor-codemirror-ClqtONZQ.js",
89
+ "_vendor-shiki-xSOiKxt0.js",
90
+ "_vendor-recharts-D6kvOpmb.js"
91
91
  ]
92
92
  }
93
93
  }