arize-phoenix 3.16.0__py3-none-any.whl → 7.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (338) hide show
  1. arize_phoenix-7.7.0.dist-info/METADATA +261 -0
  2. arize_phoenix-7.7.0.dist-info/RECORD +345 -0
  3. {arize_phoenix-3.16.0.dist-info → arize_phoenix-7.7.0.dist-info}/WHEEL +1 -1
  4. arize_phoenix-7.7.0.dist-info/entry_points.txt +3 -0
  5. phoenix/__init__.py +86 -14
  6. phoenix/auth.py +309 -0
  7. phoenix/config.py +675 -45
  8. phoenix/core/model.py +32 -30
  9. phoenix/core/model_schema.py +102 -109
  10. phoenix/core/model_schema_adapter.py +48 -45
  11. phoenix/datetime_utils.py +24 -3
  12. phoenix/db/README.md +54 -0
  13. phoenix/db/__init__.py +4 -0
  14. phoenix/db/alembic.ini +85 -0
  15. phoenix/db/bulk_inserter.py +294 -0
  16. phoenix/db/engines.py +208 -0
  17. phoenix/db/enums.py +20 -0
  18. phoenix/db/facilitator.py +113 -0
  19. phoenix/db/helpers.py +159 -0
  20. phoenix/db/insertion/constants.py +2 -0
  21. phoenix/db/insertion/dataset.py +227 -0
  22. phoenix/db/insertion/document_annotation.py +171 -0
  23. phoenix/db/insertion/evaluation.py +191 -0
  24. phoenix/db/insertion/helpers.py +98 -0
  25. phoenix/db/insertion/span.py +193 -0
  26. phoenix/db/insertion/span_annotation.py +158 -0
  27. phoenix/db/insertion/trace_annotation.py +158 -0
  28. phoenix/db/insertion/types.py +256 -0
  29. phoenix/db/migrate.py +86 -0
  30. phoenix/db/migrations/data_migration_scripts/populate_project_sessions.py +199 -0
  31. phoenix/db/migrations/env.py +114 -0
  32. phoenix/db/migrations/script.py.mako +26 -0
  33. phoenix/db/migrations/versions/10460e46d750_datasets.py +317 -0
  34. phoenix/db/migrations/versions/3be8647b87d8_add_token_columns_to_spans_table.py +126 -0
  35. phoenix/db/migrations/versions/4ded9e43755f_create_project_sessions_table.py +66 -0
  36. phoenix/db/migrations/versions/cd164e83824f_users_and_tokens.py +157 -0
  37. phoenix/db/migrations/versions/cf03bd6bae1d_init.py +280 -0
  38. phoenix/db/models.py +807 -0
  39. phoenix/exceptions.py +5 -1
  40. phoenix/experiments/__init__.py +6 -0
  41. phoenix/experiments/evaluators/__init__.py +29 -0
  42. phoenix/experiments/evaluators/base.py +158 -0
  43. phoenix/experiments/evaluators/code_evaluators.py +184 -0
  44. phoenix/experiments/evaluators/llm_evaluators.py +473 -0
  45. phoenix/experiments/evaluators/utils.py +236 -0
  46. phoenix/experiments/functions.py +772 -0
  47. phoenix/experiments/tracing.py +86 -0
  48. phoenix/experiments/types.py +726 -0
  49. phoenix/experiments/utils.py +25 -0
  50. phoenix/inferences/__init__.py +0 -0
  51. phoenix/{datasets → inferences}/errors.py +6 -5
  52. phoenix/{datasets → inferences}/fixtures.py +49 -42
  53. phoenix/{datasets/dataset.py → inferences/inferences.py} +121 -105
  54. phoenix/{datasets → inferences}/schema.py +11 -11
  55. phoenix/{datasets → inferences}/validation.py +13 -14
  56. phoenix/logging/__init__.py +3 -0
  57. phoenix/logging/_config.py +90 -0
  58. phoenix/logging/_filter.py +6 -0
  59. phoenix/logging/_formatter.py +69 -0
  60. phoenix/metrics/__init__.py +5 -4
  61. phoenix/metrics/binning.py +4 -3
  62. phoenix/metrics/metrics.py +2 -1
  63. phoenix/metrics/mixins.py +7 -6
  64. phoenix/metrics/retrieval_metrics.py +2 -1
  65. phoenix/metrics/timeseries.py +5 -4
  66. phoenix/metrics/wrappers.py +9 -3
  67. phoenix/pointcloud/clustering.py +5 -5
  68. phoenix/pointcloud/pointcloud.py +7 -5
  69. phoenix/pointcloud/projectors.py +5 -6
  70. phoenix/pointcloud/umap_parameters.py +53 -52
  71. phoenix/server/api/README.md +28 -0
  72. phoenix/server/api/auth.py +44 -0
  73. phoenix/server/api/context.py +152 -9
  74. phoenix/server/api/dataloaders/__init__.py +91 -0
  75. phoenix/server/api/dataloaders/annotation_summaries.py +139 -0
  76. phoenix/server/api/dataloaders/average_experiment_run_latency.py +54 -0
  77. phoenix/server/api/dataloaders/cache/__init__.py +3 -0
  78. phoenix/server/api/dataloaders/cache/two_tier_cache.py +68 -0
  79. phoenix/server/api/dataloaders/dataset_example_revisions.py +131 -0
  80. phoenix/server/api/dataloaders/dataset_example_spans.py +38 -0
  81. phoenix/server/api/dataloaders/document_evaluation_summaries.py +144 -0
  82. phoenix/server/api/dataloaders/document_evaluations.py +31 -0
  83. phoenix/server/api/dataloaders/document_retrieval_metrics.py +89 -0
  84. phoenix/server/api/dataloaders/experiment_annotation_summaries.py +79 -0
  85. phoenix/server/api/dataloaders/experiment_error_rates.py +58 -0
  86. phoenix/server/api/dataloaders/experiment_run_annotations.py +36 -0
  87. phoenix/server/api/dataloaders/experiment_run_counts.py +49 -0
  88. phoenix/server/api/dataloaders/experiment_sequence_number.py +44 -0
  89. phoenix/server/api/dataloaders/latency_ms_quantile.py +188 -0
  90. phoenix/server/api/dataloaders/min_start_or_max_end_times.py +85 -0
  91. phoenix/server/api/dataloaders/project_by_name.py +31 -0
  92. phoenix/server/api/dataloaders/record_counts.py +116 -0
  93. phoenix/server/api/dataloaders/session_io.py +79 -0
  94. phoenix/server/api/dataloaders/session_num_traces.py +30 -0
  95. phoenix/server/api/dataloaders/session_num_traces_with_error.py +32 -0
  96. phoenix/server/api/dataloaders/session_token_usages.py +41 -0
  97. phoenix/server/api/dataloaders/session_trace_latency_ms_quantile.py +55 -0
  98. phoenix/server/api/dataloaders/span_annotations.py +26 -0
  99. phoenix/server/api/dataloaders/span_dataset_examples.py +31 -0
  100. phoenix/server/api/dataloaders/span_descendants.py +57 -0
  101. phoenix/server/api/dataloaders/span_projects.py +33 -0
  102. phoenix/server/api/dataloaders/token_counts.py +124 -0
  103. phoenix/server/api/dataloaders/trace_by_trace_ids.py +25 -0
  104. phoenix/server/api/dataloaders/trace_root_spans.py +32 -0
  105. phoenix/server/api/dataloaders/user_roles.py +30 -0
  106. phoenix/server/api/dataloaders/users.py +33 -0
  107. phoenix/server/api/exceptions.py +48 -0
  108. phoenix/server/api/helpers/__init__.py +12 -0
  109. phoenix/server/api/helpers/dataset_helpers.py +217 -0
  110. phoenix/server/api/helpers/experiment_run_filters.py +763 -0
  111. phoenix/server/api/helpers/playground_clients.py +948 -0
  112. phoenix/server/api/helpers/playground_registry.py +70 -0
  113. phoenix/server/api/helpers/playground_spans.py +455 -0
  114. phoenix/server/api/input_types/AddExamplesToDatasetInput.py +16 -0
  115. phoenix/server/api/input_types/AddSpansToDatasetInput.py +14 -0
  116. phoenix/server/api/input_types/ChatCompletionInput.py +38 -0
  117. phoenix/server/api/input_types/ChatCompletionMessageInput.py +24 -0
  118. phoenix/server/api/input_types/ClearProjectInput.py +15 -0
  119. phoenix/server/api/input_types/ClusterInput.py +2 -2
  120. phoenix/server/api/input_types/CreateDatasetInput.py +12 -0
  121. phoenix/server/api/input_types/CreateSpanAnnotationInput.py +18 -0
  122. phoenix/server/api/input_types/CreateTraceAnnotationInput.py +18 -0
  123. phoenix/server/api/input_types/DataQualityMetricInput.py +5 -2
  124. phoenix/server/api/input_types/DatasetExampleInput.py +14 -0
  125. phoenix/server/api/input_types/DatasetSort.py +17 -0
  126. phoenix/server/api/input_types/DatasetVersionSort.py +16 -0
  127. phoenix/server/api/input_types/DeleteAnnotationsInput.py +7 -0
  128. phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +13 -0
  129. phoenix/server/api/input_types/DeleteDatasetInput.py +7 -0
  130. phoenix/server/api/input_types/DeleteExperimentsInput.py +7 -0
  131. phoenix/server/api/input_types/DimensionFilter.py +4 -4
  132. phoenix/server/api/input_types/GenerativeModelInput.py +17 -0
  133. phoenix/server/api/input_types/Granularity.py +1 -1
  134. phoenix/server/api/input_types/InvocationParameters.py +162 -0
  135. phoenix/server/api/input_types/PatchAnnotationInput.py +19 -0
  136. phoenix/server/api/input_types/PatchDatasetExamplesInput.py +35 -0
  137. phoenix/server/api/input_types/PatchDatasetInput.py +14 -0
  138. phoenix/server/api/input_types/PerformanceMetricInput.py +5 -2
  139. phoenix/server/api/input_types/ProjectSessionSort.py +29 -0
  140. phoenix/server/api/input_types/SpanAnnotationSort.py +17 -0
  141. phoenix/server/api/input_types/SpanSort.py +134 -69
  142. phoenix/server/api/input_types/TemplateOptions.py +10 -0
  143. phoenix/server/api/input_types/TraceAnnotationSort.py +17 -0
  144. phoenix/server/api/input_types/UserRoleInput.py +9 -0
  145. phoenix/server/api/mutations/__init__.py +28 -0
  146. phoenix/server/api/mutations/api_key_mutations.py +167 -0
  147. phoenix/server/api/mutations/chat_mutations.py +593 -0
  148. phoenix/server/api/mutations/dataset_mutations.py +591 -0
  149. phoenix/server/api/mutations/experiment_mutations.py +75 -0
  150. phoenix/server/api/{types/ExportEventsMutation.py → mutations/export_events_mutations.py} +21 -18
  151. phoenix/server/api/mutations/project_mutations.py +57 -0
  152. phoenix/server/api/mutations/span_annotations_mutations.py +128 -0
  153. phoenix/server/api/mutations/trace_annotations_mutations.py +127 -0
  154. phoenix/server/api/mutations/user_mutations.py +329 -0
  155. phoenix/server/api/openapi/__init__.py +0 -0
  156. phoenix/server/api/openapi/main.py +17 -0
  157. phoenix/server/api/openapi/schema.py +16 -0
  158. phoenix/server/api/queries.py +738 -0
  159. phoenix/server/api/routers/__init__.py +11 -0
  160. phoenix/server/api/routers/auth.py +284 -0
  161. phoenix/server/api/routers/embeddings.py +26 -0
  162. phoenix/server/api/routers/oauth2.py +488 -0
  163. phoenix/server/api/routers/v1/__init__.py +64 -0
  164. phoenix/server/api/routers/v1/datasets.py +1017 -0
  165. phoenix/server/api/routers/v1/evaluations.py +362 -0
  166. phoenix/server/api/routers/v1/experiment_evaluations.py +115 -0
  167. phoenix/server/api/routers/v1/experiment_runs.py +167 -0
  168. phoenix/server/api/routers/v1/experiments.py +308 -0
  169. phoenix/server/api/routers/v1/pydantic_compat.py +78 -0
  170. phoenix/server/api/routers/v1/spans.py +267 -0
  171. phoenix/server/api/routers/v1/traces.py +208 -0
  172. phoenix/server/api/routers/v1/utils.py +95 -0
  173. phoenix/server/api/schema.py +44 -247
  174. phoenix/server/api/subscriptions.py +597 -0
  175. phoenix/server/api/types/Annotation.py +21 -0
  176. phoenix/server/api/types/AnnotationSummary.py +55 -0
  177. phoenix/server/api/types/AnnotatorKind.py +16 -0
  178. phoenix/server/api/types/ApiKey.py +27 -0
  179. phoenix/server/api/types/AuthMethod.py +9 -0
  180. phoenix/server/api/types/ChatCompletionMessageRole.py +11 -0
  181. phoenix/server/api/types/ChatCompletionSubscriptionPayload.py +46 -0
  182. phoenix/server/api/types/Cluster.py +25 -24
  183. phoenix/server/api/types/CreateDatasetPayload.py +8 -0
  184. phoenix/server/api/types/DataQualityMetric.py +31 -13
  185. phoenix/server/api/types/Dataset.py +288 -63
  186. phoenix/server/api/types/DatasetExample.py +85 -0
  187. phoenix/server/api/types/DatasetExampleRevision.py +34 -0
  188. phoenix/server/api/types/DatasetVersion.py +14 -0
  189. phoenix/server/api/types/Dimension.py +32 -31
  190. phoenix/server/api/types/DocumentEvaluationSummary.py +9 -8
  191. phoenix/server/api/types/EmbeddingDimension.py +56 -49
  192. phoenix/server/api/types/Evaluation.py +25 -31
  193. phoenix/server/api/types/EvaluationSummary.py +30 -50
  194. phoenix/server/api/types/Event.py +20 -20
  195. phoenix/server/api/types/ExampleRevisionInterface.py +14 -0
  196. phoenix/server/api/types/Experiment.py +152 -0
  197. phoenix/server/api/types/ExperimentAnnotationSummary.py +13 -0
  198. phoenix/server/api/types/ExperimentComparison.py +17 -0
  199. phoenix/server/api/types/ExperimentRun.py +119 -0
  200. phoenix/server/api/types/ExperimentRunAnnotation.py +56 -0
  201. phoenix/server/api/types/GenerativeModel.py +9 -0
  202. phoenix/server/api/types/GenerativeProvider.py +85 -0
  203. phoenix/server/api/types/Inferences.py +80 -0
  204. phoenix/server/api/types/InferencesRole.py +23 -0
  205. phoenix/server/api/types/LabelFraction.py +7 -0
  206. phoenix/server/api/types/MimeType.py +2 -2
  207. phoenix/server/api/types/Model.py +54 -54
  208. phoenix/server/api/types/PerformanceMetric.py +8 -5
  209. phoenix/server/api/types/Project.py +407 -142
  210. phoenix/server/api/types/ProjectSession.py +139 -0
  211. phoenix/server/api/types/Segments.py +4 -4
  212. phoenix/server/api/types/Span.py +221 -176
  213. phoenix/server/api/types/SpanAnnotation.py +43 -0
  214. phoenix/server/api/types/SpanIOValue.py +15 -0
  215. phoenix/server/api/types/SystemApiKey.py +9 -0
  216. phoenix/server/api/types/TemplateLanguage.py +10 -0
  217. phoenix/server/api/types/TimeSeries.py +19 -15
  218. phoenix/server/api/types/TokenUsage.py +11 -0
  219. phoenix/server/api/types/Trace.py +154 -0
  220. phoenix/server/api/types/TraceAnnotation.py +45 -0
  221. phoenix/server/api/types/UMAPPoints.py +7 -7
  222. phoenix/server/api/types/User.py +60 -0
  223. phoenix/server/api/types/UserApiKey.py +45 -0
  224. phoenix/server/api/types/UserRole.py +15 -0
  225. phoenix/server/api/types/node.py +13 -107
  226. phoenix/server/api/types/pagination.py +156 -57
  227. phoenix/server/api/utils.py +34 -0
  228. phoenix/server/app.py +864 -115
  229. phoenix/server/bearer_auth.py +163 -0
  230. phoenix/server/dml_event.py +136 -0
  231. phoenix/server/dml_event_handler.py +256 -0
  232. phoenix/server/email/__init__.py +0 -0
  233. phoenix/server/email/sender.py +97 -0
  234. phoenix/server/email/templates/__init__.py +0 -0
  235. phoenix/server/email/templates/password_reset.html +19 -0
  236. phoenix/server/email/types.py +11 -0
  237. phoenix/server/grpc_server.py +102 -0
  238. phoenix/server/jwt_store.py +505 -0
  239. phoenix/server/main.py +305 -116
  240. phoenix/server/oauth2.py +52 -0
  241. phoenix/server/openapi/__init__.py +0 -0
  242. phoenix/server/prometheus.py +111 -0
  243. phoenix/server/rate_limiters.py +188 -0
  244. phoenix/server/static/.vite/manifest.json +87 -0
  245. phoenix/server/static/assets/components-Cy9nwIvF.js +2125 -0
  246. phoenix/server/static/assets/index-BKvHIxkk.js +113 -0
  247. phoenix/server/static/assets/pages-CUi2xCVQ.js +4449 -0
  248. phoenix/server/static/assets/vendor-DvC8cT4X.js +894 -0
  249. phoenix/server/static/assets/vendor-DxkFTwjz.css +1 -0
  250. phoenix/server/static/assets/vendor-arizeai-Do1793cv.js +662 -0
  251. phoenix/server/static/assets/vendor-codemirror-BzwZPyJM.js +24 -0
  252. phoenix/server/static/assets/vendor-recharts-_Jb7JjhG.js +59 -0
  253. phoenix/server/static/assets/vendor-shiki-Cl9QBraO.js +5 -0
  254. phoenix/server/static/assets/vendor-three-DwGkEfCM.js +2998 -0
  255. phoenix/server/telemetry.py +68 -0
  256. phoenix/server/templates/index.html +82 -23
  257. phoenix/server/thread_server.py +3 -3
  258. phoenix/server/types.py +275 -0
  259. phoenix/services.py +27 -18
  260. phoenix/session/client.py +743 -68
  261. phoenix/session/data_extractor.py +31 -7
  262. phoenix/session/evaluation.py +3 -9
  263. phoenix/session/session.py +263 -219
  264. phoenix/settings.py +22 -0
  265. phoenix/trace/__init__.py +2 -22
  266. phoenix/trace/attributes.py +338 -0
  267. phoenix/trace/dsl/README.md +116 -0
  268. phoenix/trace/dsl/filter.py +663 -213
  269. phoenix/trace/dsl/helpers.py +73 -21
  270. phoenix/trace/dsl/query.py +574 -201
  271. phoenix/trace/exporter.py +24 -19
  272. phoenix/trace/fixtures.py +368 -32
  273. phoenix/trace/otel.py +71 -219
  274. phoenix/trace/projects.py +3 -2
  275. phoenix/trace/schemas.py +33 -11
  276. phoenix/trace/span_evaluations.py +21 -16
  277. phoenix/trace/span_json_decoder.py +6 -4
  278. phoenix/trace/span_json_encoder.py +2 -2
  279. phoenix/trace/trace_dataset.py +47 -32
  280. phoenix/trace/utils.py +21 -4
  281. phoenix/utilities/__init__.py +0 -26
  282. phoenix/utilities/client.py +132 -0
  283. phoenix/utilities/deprecation.py +31 -0
  284. phoenix/utilities/error_handling.py +3 -2
  285. phoenix/utilities/json.py +109 -0
  286. phoenix/utilities/logging.py +8 -0
  287. phoenix/utilities/project.py +2 -2
  288. phoenix/utilities/re.py +49 -0
  289. phoenix/utilities/span_store.py +0 -23
  290. phoenix/utilities/template_formatters.py +99 -0
  291. phoenix/version.py +1 -1
  292. arize_phoenix-3.16.0.dist-info/METADATA +0 -495
  293. arize_phoenix-3.16.0.dist-info/RECORD +0 -178
  294. phoenix/core/project.py +0 -617
  295. phoenix/core/traces.py +0 -100
  296. phoenix/experimental/evals/__init__.py +0 -73
  297. phoenix/experimental/evals/evaluators.py +0 -413
  298. phoenix/experimental/evals/functions/__init__.py +0 -4
  299. phoenix/experimental/evals/functions/classify.py +0 -453
  300. phoenix/experimental/evals/functions/executor.py +0 -353
  301. phoenix/experimental/evals/functions/generate.py +0 -138
  302. phoenix/experimental/evals/functions/processing.py +0 -76
  303. phoenix/experimental/evals/models/__init__.py +0 -14
  304. phoenix/experimental/evals/models/anthropic.py +0 -175
  305. phoenix/experimental/evals/models/base.py +0 -170
  306. phoenix/experimental/evals/models/bedrock.py +0 -221
  307. phoenix/experimental/evals/models/litellm.py +0 -134
  308. phoenix/experimental/evals/models/openai.py +0 -448
  309. phoenix/experimental/evals/models/rate_limiters.py +0 -246
  310. phoenix/experimental/evals/models/vertex.py +0 -173
  311. phoenix/experimental/evals/models/vertexai.py +0 -186
  312. phoenix/experimental/evals/retrievals.py +0 -96
  313. phoenix/experimental/evals/templates/__init__.py +0 -50
  314. phoenix/experimental/evals/templates/default_templates.py +0 -472
  315. phoenix/experimental/evals/templates/template.py +0 -195
  316. phoenix/experimental/evals/utils/__init__.py +0 -172
  317. phoenix/experimental/evals/utils/threads.py +0 -27
  318. phoenix/server/api/helpers.py +0 -11
  319. phoenix/server/api/routers/evaluation_handler.py +0 -109
  320. phoenix/server/api/routers/span_handler.py +0 -70
  321. phoenix/server/api/routers/trace_handler.py +0 -60
  322. phoenix/server/api/types/DatasetRole.py +0 -23
  323. phoenix/server/static/index.css +0 -6
  324. phoenix/server/static/index.js +0 -7447
  325. phoenix/storage/span_store/__init__.py +0 -23
  326. phoenix/storage/span_store/text_file.py +0 -85
  327. phoenix/trace/dsl/missing.py +0 -60
  328. phoenix/trace/langchain/__init__.py +0 -3
  329. phoenix/trace/langchain/instrumentor.py +0 -35
  330. phoenix/trace/llama_index/__init__.py +0 -3
  331. phoenix/trace/llama_index/callback.py +0 -102
  332. phoenix/trace/openai/__init__.py +0 -3
  333. phoenix/trace/openai/instrumentor.py +0 -30
  334. {arize_phoenix-3.16.0.dist-info → arize_phoenix-7.7.0.dist-info}/licenses/IP_NOTICE +0 -0
  335. {arize_phoenix-3.16.0.dist-info → arize_phoenix-7.7.0.dist-info}/licenses/LICENSE +0 -0
  336. /phoenix/{datasets → db/insertion}/__init__.py +0 -0
  337. /phoenix/{experimental → db/migrations}/__init__.py +0 -0
  338. /phoenix/{storage → db/migrations/data_migration_scripts}/__init__.py +0 -0
@@ -0,0 +1,27 @@
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ import strawberry
5
+
6
+ from phoenix.db.models import ApiKey as ORMApiKey
7
+
8
+
9
+ @strawberry.interface
10
+ class ApiKey:
11
+ name: str = strawberry.field(description="Name of the API key.")
12
+ description: Optional[str] = strawberry.field(description="Description of the API key.")
13
+ created_at: datetime = strawberry.field(
14
+ description="The date and time the API key was created."
15
+ )
16
+ expires_at: Optional[datetime] = strawberry.field(
17
+ description="The date and time the API key will expire."
18
+ )
19
+
20
+
21
+ def to_gql_api_key(api_key: ORMApiKey) -> ApiKey:
22
+ return ApiKey(
23
+ name=api_key.name,
24
+ description=api_key.description,
25
+ created_at=api_key.created_at,
26
+ expires_at=api_key.expires_at,
27
+ )
@@ -0,0 +1,9 @@
1
+ from enum import Enum
2
+
3
+ import strawberry
4
+
5
+
6
+ @strawberry.enum
7
+ class AuthMethod(Enum):
8
+ LOCAL = "LOCAL"
9
+ OAUTH2 = "OAUTH2"
@@ -0,0 +1,11 @@
1
+ from enum import Enum
2
+
3
+ import strawberry
4
+
5
+
6
+ @strawberry.enum
7
+ class ChatCompletionMessageRole(Enum):
8
+ USER = "USER"
9
+ SYSTEM = "SYSTEM"
10
+ TOOL = "TOOL"
11
+ AI = "AI" # E.g. the assistant. Normalize to AI for consistency.
@@ -0,0 +1,46 @@
1
+ from typing import Optional
2
+
3
+ import strawberry
4
+ from strawberry.relay import GlobalID
5
+
6
+ from .Experiment import Experiment
7
+ from .ExperimentRun import ExperimentRun
8
+ from .Span import Span
9
+
10
+
11
+ @strawberry.interface
12
+ class ChatCompletionSubscriptionPayload:
13
+ dataset_example_id: Optional[GlobalID] = None
14
+
15
+
16
+ @strawberry.type
17
+ class TextChunk(ChatCompletionSubscriptionPayload):
18
+ content: str
19
+
20
+
21
+ @strawberry.type
22
+ class FunctionCallChunk(ChatCompletionSubscriptionPayload):
23
+ name: str
24
+ arguments: str
25
+
26
+
27
+ @strawberry.type
28
+ class ToolCallChunk(ChatCompletionSubscriptionPayload):
29
+ id: str
30
+ function: FunctionCallChunk
31
+
32
+
33
+ @strawberry.type
34
+ class ChatCompletionSubscriptionResult(ChatCompletionSubscriptionPayload):
35
+ span: Optional[Span] = None
36
+ experiment_run: Optional[ExperimentRun] = None
37
+
38
+
39
+ @strawberry.type
40
+ class ChatCompletionSubscriptionError(ChatCompletionSubscriptionPayload):
41
+ message: str
42
+
43
+
44
+ @strawberry.type
45
+ class ChatCompletionSubscriptionExperiment(ChatCompletionSubscriptionPayload):
46
+ experiment: Experiment
@@ -1,5 +1,6 @@
1
1
  from collections import Counter, defaultdict
2
- from typing import Dict, List, Mapping, Optional, Set
2
+ from collections.abc import Mapping
3
+ from typing import Optional
3
4
 
4
5
  import strawberry
5
6
  from strawberry import ID
@@ -9,9 +10,9 @@ from phoenix.core.model_schema import PRIMARY, REFERENCE
9
10
  from phoenix.server.api.context import Context
10
11
  from phoenix.server.api.input_types.DataQualityMetricInput import DataQualityMetricInput
11
12
  from phoenix.server.api.input_types.PerformanceMetricInput import PerformanceMetricInput
12
- from phoenix.server.api.types.DatasetRole import AncillaryDatasetRole, DatasetRole
13
13
  from phoenix.server.api.types.DatasetValues import DatasetValues
14
14
  from phoenix.server.api.types.Event import unpack_event_id
15
+ from phoenix.server.api.types.InferencesRole import AncillaryInferencesRole, InferencesRole
15
16
 
16
17
 
17
18
  @strawberry.type
@@ -22,7 +23,7 @@ class Cluster:
22
23
  description="The ID of the cluster",
23
24
  )
24
25
 
25
- event_ids: List[ID] = strawberry.field(
26
+ event_ids: list[ID] = strawberry.field(
26
27
  description="The event IDs of the points in the cluster",
27
28
  )
28
29
 
@@ -36,8 +37,8 @@ class Cluster:
36
37
  """
37
38
  Calculates the drift score of the cluster. The score will be a value
38
39
  representing the balance of points between the primary and the reference
39
- datasets, and will be on a scale between 1 (all primary) and -1 (all
40
- reference), with 0 being an even balance between the two datasets.
40
+ inferences, and will be on a scale between 1 (all primary) and -1 (all
41
+ reference), with 0 being an even balance between the two inference sets.
41
42
 
42
43
  Returns
43
44
  -------
@@ -47,8 +48,8 @@ class Cluster:
47
48
  if model[REFERENCE].empty:
48
49
  return None
49
50
  count_by_role = Counter(unpack_event_id(event_id)[1] for event_id in self.event_ids)
50
- primary_count = count_by_role[DatasetRole.primary]
51
- reference_count = count_by_role[DatasetRole.reference]
51
+ primary_count = count_by_role[InferencesRole.primary]
52
+ reference_count = count_by_role[InferencesRole.reference]
52
53
  return (
53
54
  None
54
55
  if not (denominator := (primary_count + reference_count))
@@ -76,8 +77,8 @@ class Cluster:
76
77
  if corpus is None or corpus[PRIMARY].empty:
77
78
  return None
78
79
  count_by_role = Counter(unpack_event_id(event_id)[1] for event_id in self.event_ids)
79
- primary_count = count_by_role[DatasetRole.primary]
80
- corpus_count = count_by_role[AncillaryDatasetRole.corpus]
80
+ primary_count = count_by_role[InferencesRole.primary]
81
+ corpus_count = count_by_role[AncillaryInferencesRole.corpus]
81
82
  return (
82
83
  None
83
84
  if not (denominator := (primary_count + corpus_count))
@@ -94,19 +95,19 @@ class Cluster:
94
95
  metric: DataQualityMetricInput,
95
96
  ) -> DatasetValues:
96
97
  model = info.context.model
97
- row_ids: Dict[DatasetRole, List[int]] = defaultdict(list)
98
- for row_id, dataset_role in map(unpack_event_id, self.event_ids):
99
- if not isinstance(dataset_role, DatasetRole):
98
+ row_ids: dict[InferencesRole, list[int]] = defaultdict(list)
99
+ for row_id, inferences_role in map(unpack_event_id, self.event_ids):
100
+ if not isinstance(inferences_role, InferencesRole):
100
101
  continue
101
- row_ids[dataset_role].append(row_id)
102
+ row_ids[inferences_role].append(row_id)
102
103
  return DatasetValues(
103
104
  primary_value=metric.metric_instance(
104
105
  model[PRIMARY],
105
- subset_rows=row_ids[DatasetRole.primary],
106
+ subset_rows=row_ids[InferencesRole.primary],
106
107
  ),
107
108
  reference_value=metric.metric_instance(
108
109
  model[REFERENCE],
109
- subset_rows=row_ids[DatasetRole.reference],
110
+ subset_rows=row_ids[InferencesRole.reference],
110
111
  ),
111
112
  )
112
113
 
@@ -120,34 +121,34 @@ class Cluster:
120
121
  metric: PerformanceMetricInput,
121
122
  ) -> DatasetValues:
122
123
  model = info.context.model
123
- row_ids: Dict[DatasetRole, List[int]] = defaultdict(list)
124
- for row_id, dataset_role in map(unpack_event_id, self.event_ids):
125
- if not isinstance(dataset_role, DatasetRole):
124
+ row_ids: dict[InferencesRole, list[int]] = defaultdict(list)
125
+ for row_id, inferences_role in map(unpack_event_id, self.event_ids):
126
+ if not isinstance(inferences_role, InferencesRole):
126
127
  continue
127
- row_ids[dataset_role].append(row_id)
128
+ row_ids[inferences_role].append(row_id)
128
129
  metric_instance = metric.metric_instance(model)
129
130
  return DatasetValues(
130
131
  primary_value=metric_instance(
131
132
  model[PRIMARY],
132
- subset_rows=row_ids[DatasetRole.primary],
133
+ subset_rows=row_ids[InferencesRole.primary],
133
134
  ),
134
135
  reference_value=metric_instance(
135
136
  model[REFERENCE],
136
- subset_rows=row_ids[DatasetRole.reference],
137
+ subset_rows=row_ids[InferencesRole.reference],
137
138
  ),
138
139
  )
139
140
 
140
141
 
141
142
  def to_gql_clusters(
142
- clustered_events: Mapping[str, Set[ID]],
143
- ) -> List[Cluster]:
143
+ clustered_events: Mapping[str, set[ID]],
144
+ ) -> list[Cluster]:
144
145
  """
145
146
  Converts a dictionary of event IDs to cluster IDs to a list of clusters
146
147
  for the graphQL response
147
148
 
148
149
  Parameters
149
150
  ----------
150
- clustered_events: Mapping[str, Set[ID]]
151
+ clustered_events: Mapping[str, set[ID]]
151
152
  A mapping of cluster ID to its set of event IDs
152
153
  """
153
154
 
@@ -0,0 +1,8 @@
1
+ import strawberry
2
+
3
+ from phoenix.server.api.types.Dataset import Dataset
4
+
5
+
6
+ @strawberry.type
7
+ class CreateDatasetPayload:
8
+ dataset: Dataset
@@ -1,22 +1,40 @@
1
- from enum import Enum
1
+ from enum import Enum, auto
2
2
  from functools import partial
3
+ from typing import Callable, Mapping, cast
3
4
 
4
5
  import strawberry
5
6
 
7
+ from phoenix.metrics import Metric
6
8
  from phoenix.metrics.metrics import Cardinality, Count, Max, Mean, Min, PercentEmpty, Quantile, Sum
7
9
 
8
10
 
9
11
  @strawberry.enum
10
12
  class DataQualityMetric(Enum):
11
- cardinality = Cardinality
12
- percentEmpty = PercentEmpty
13
- mean = Mean
14
- sum = Sum
15
- min = Min
16
- max = Max
17
- count = Count
18
- p01 = partial(Quantile, probability=0.01)
19
- p25 = partial(Quantile, probability=0.25)
20
- p50 = partial(Quantile, probability=0.50)
21
- p75 = partial(Quantile, probability=0.75)
22
- p99 = partial(Quantile, probability=0.99)
13
+ cardinality = auto()
14
+ percentEmpty = auto()
15
+ mean = auto()
16
+ sum = auto()
17
+ min = auto()
18
+ max = auto()
19
+ count = auto()
20
+ p01 = auto()
21
+ p25 = auto()
22
+ p50 = auto()
23
+ p75 = auto()
24
+ p99 = auto()
25
+
26
+
27
+ DATA_QUALITY_METRIC_FACTORIES: Mapping[DataQualityMetric, Callable[[], Metric]] = {
28
+ DataQualityMetric.cardinality: cast(Callable[[], Metric], Cardinality),
29
+ DataQualityMetric.percentEmpty: cast(Callable[[], Metric], PercentEmpty),
30
+ DataQualityMetric.mean: cast(Callable[[], Metric], Mean),
31
+ DataQualityMetric.sum: cast(Callable[[], Metric], Sum),
32
+ DataQualityMetric.min: cast(Callable[[], Metric], Min),
33
+ DataQualityMetric.max: cast(Callable[[], Metric], Max),
34
+ DataQualityMetric.count: cast(Callable[[], Metric], Count),
35
+ DataQualityMetric.p01: cast(Callable[[], Metric], partial(Quantile, probability=0.01)),
36
+ DataQualityMetric.p25: cast(Callable[[], Metric], partial(Quantile, probability=0.25)),
37
+ DataQualityMetric.p50: cast(Callable[[], Metric], partial(Quantile, probability=0.50)),
38
+ DataQualityMetric.p75: cast(Callable[[], Metric], partial(Quantile, probability=0.75)),
39
+ DataQualityMetric.p99: cast(Callable[[], Metric], partial(Quantile, probability=0.99)),
40
+ }
@@ -1,80 +1,305 @@
1
+ from collections.abc import AsyncIterable
1
2
  from datetime import datetime
2
- from typing import Iterable, List, Optional, Set, Union
3
+ from typing import ClassVar, Optional, cast
3
4
 
4
5
  import strawberry
5
- from strawberry.scalars import ID
6
- from strawberry.unset import UNSET
6
+ from sqlalchemy import and_, func, select
7
+ from sqlalchemy.sql.functions import count
8
+ from strawberry import UNSET
9
+ from strawberry.relay import Connection, GlobalID, Node, NodeID
10
+ from strawberry.scalars import JSON
11
+ from strawberry.types import Info
7
12
 
8
- import phoenix.core.model_schema as ms
9
- from phoenix.core.model_schema import FEATURE, TAG, ScalarDimension
10
-
11
- from ..input_types.DimensionInput import DimensionInput
12
- from .DatasetRole import AncillaryDatasetRole, DatasetRole
13
- from .Dimension import Dimension, to_gql_dimension
14
- from .Event import Event, create_event, create_event_id, parse_event_ids_by_dataset_role
13
+ from phoenix.db import models
14
+ from phoenix.server.api.context import Context
15
+ from phoenix.server.api.input_types.DatasetVersionSort import DatasetVersionSort
16
+ from phoenix.server.api.types.DatasetExample import DatasetExample
17
+ from phoenix.server.api.types.DatasetVersion import DatasetVersion
18
+ from phoenix.server.api.types.Experiment import Experiment, to_gql_experiment
19
+ from phoenix.server.api.types.ExperimentAnnotationSummary import ExperimentAnnotationSummary
20
+ from phoenix.server.api.types.node import from_global_id_with_expected_type
21
+ from phoenix.server.api.types.pagination import (
22
+ ConnectionArgs,
23
+ CursorString,
24
+ connection_from_list,
25
+ )
26
+ from phoenix.server.api.types.SortDir import SortDir
15
27
 
16
28
 
17
29
  @strawberry.type
18
- class Dataset:
19
- start_time: datetime = strawberry.field(description="The start bookend of the data")
20
- end_time: datetime = strawberry.field(description="The end bookend of the data")
21
- record_count: int = strawberry.field(description="The record count of the data")
22
- dataset: strawberry.Private[ms.Dataset]
23
- dataset_role: strawberry.Private[Union[DatasetRole, AncillaryDatasetRole]]
24
- model: strawberry.Private[ms.Model]
25
-
26
- # type ignored here to get around the following: https://github.com/strawberry-graphql/strawberry/issues/1929
27
- @strawberry.field(description="Returns a human friendly name for the dataset.") # type: ignore
28
- def name(self) -> str:
29
- return self.dataset.display_name
30
+ class Dataset(Node):
31
+ _table: ClassVar[type[models.Base]] = models.Experiment
32
+ id_attr: NodeID[int]
33
+ name: str
34
+ description: Optional[str]
35
+ metadata: JSON
36
+ created_at: datetime
37
+ updated_at: datetime
30
38
 
31
39
  @strawberry.field
32
- def events(
40
+ async def versions(
33
41
  self,
34
- event_ids: List[ID],
35
- dimensions: Optional[List[DimensionInput]] = UNSET,
36
- ) -> List[Event]:
37
- """
38
- Returns events for specific event IDs and dimensions. If no input
39
- dimensions are provided, returns all features and tags.
40
- """
41
- if not event_ids:
42
- return []
43
- row_ids = parse_event_ids_by_dataset_role(event_ids)
44
- if len(row_ids) > 1 or self.dataset_role not in row_ids:
45
- raise ValueError("eventIds contains IDs from incorrect dataset.")
46
- events = self.dataset[row_ids[self.dataset_role]]
47
- requested_gql_dimensions = _get_requested_features_and_tags(
48
- core_dimensions=self.model.scalar_dimensions,
49
- requested_dimension_names=set(dim.name for dim in dimensions)
50
- if isinstance(dimensions, list)
51
- else None,
42
+ info: Info[Context, None],
43
+ first: Optional[int] = 50,
44
+ last: Optional[int] = UNSET,
45
+ after: Optional[CursorString] = UNSET,
46
+ before: Optional[CursorString] = UNSET,
47
+ sort: Optional[DatasetVersionSort] = UNSET,
48
+ ) -> Connection[DatasetVersion]:
49
+ args = ConnectionArgs(
50
+ first=first,
51
+ after=after if isinstance(after, CursorString) else None,
52
+ last=last,
53
+ before=before if isinstance(before, CursorString) else None,
52
54
  )
53
- return [
54
- create_event(
55
- event_id=create_event_id(event.id.row_id, self.dataset_role),
56
- event=event,
57
- dimensions=requested_gql_dimensions,
58
- is_document_record=self.dataset_role is AncillaryDatasetRole.corpus,
55
+ async with info.context.db() as session:
56
+ stmt = select(models.DatasetVersion).filter_by(dataset_id=self.id_attr)
57
+ if sort:
58
+ # For now assume the the column names match 1:1 with the enum values
59
+ sort_col = getattr(models.DatasetVersion, sort.col.value)
60
+ if sort.dir is SortDir.desc:
61
+ stmt = stmt.order_by(sort_col.desc(), models.DatasetVersion.id.desc())
62
+ else:
63
+ stmt = stmt.order_by(sort_col.asc(), models.DatasetVersion.id.asc())
64
+ else:
65
+ stmt = stmt.order_by(models.DatasetVersion.created_at.desc())
66
+ versions = await session.scalars(stmt)
67
+ data = [
68
+ DatasetVersion(
69
+ id_attr=version.id,
70
+ description=version.description,
71
+ metadata=version.metadata_,
72
+ created_at=version.created_at,
59
73
  )
60
- for event in events
74
+ for version in versions
61
75
  ]
76
+ return connection_from_list(data=data, args=args)
77
+
78
+ @strawberry.field(
79
+ description="Number of examples in a specific version if version is specified, or in the "
80
+ "latest version if version is not specified."
81
+ ) # type: ignore
82
+ async def example_count(
83
+ self,
84
+ info: Info[Context, None],
85
+ dataset_version_id: Optional[GlobalID] = UNSET,
86
+ ) -> int:
87
+ dataset_id = self.id_attr
88
+ version_id = (
89
+ from_global_id_with_expected_type(
90
+ global_id=dataset_version_id,
91
+ expected_type_name=DatasetVersion.__name__,
92
+ )
93
+ if dataset_version_id
94
+ else None
95
+ )
96
+ revision_ids = (
97
+ select(func.max(models.DatasetExampleRevision.id))
98
+ .join(models.DatasetExample)
99
+ .where(models.DatasetExample.dataset_id == dataset_id)
100
+ .group_by(models.DatasetExampleRevision.dataset_example_id)
101
+ )
102
+ if version_id:
103
+ version_id_subquery = (
104
+ select(models.DatasetVersion.id)
105
+ .where(models.DatasetVersion.dataset_id == dataset_id)
106
+ .where(models.DatasetVersion.id == version_id)
107
+ .scalar_subquery()
108
+ )
109
+ revision_ids = revision_ids.where(
110
+ models.DatasetExampleRevision.dataset_version_id <= version_id_subquery
111
+ )
112
+ stmt = (
113
+ select(count(models.DatasetExampleRevision.id))
114
+ .where(models.DatasetExampleRevision.id.in_(revision_ids))
115
+ .where(models.DatasetExampleRevision.revision_kind != "DELETE")
116
+ )
117
+ async with info.context.db() as session:
118
+ return (await session.scalar(stmt)) or 0
119
+
120
+ @strawberry.field
121
+ async def examples(
122
+ self,
123
+ info: Info[Context, None],
124
+ dataset_version_id: Optional[GlobalID] = UNSET,
125
+ first: Optional[int] = 50,
126
+ last: Optional[int] = UNSET,
127
+ after: Optional[CursorString] = UNSET,
128
+ before: Optional[CursorString] = UNSET,
129
+ ) -> Connection[DatasetExample]:
130
+ args = ConnectionArgs(
131
+ first=first,
132
+ after=after if isinstance(after, CursorString) else None,
133
+ last=last,
134
+ before=before if isinstance(before, CursorString) else None,
135
+ )
136
+ dataset_id = self.id_attr
137
+ version_id = (
138
+ from_global_id_with_expected_type(
139
+ global_id=dataset_version_id, expected_type_name=DatasetVersion.__name__
140
+ )
141
+ if dataset_version_id
142
+ else None
143
+ )
144
+ revision_ids = (
145
+ select(func.max(models.DatasetExampleRevision.id))
146
+ .join(models.DatasetExample)
147
+ .where(models.DatasetExample.dataset_id == dataset_id)
148
+ .group_by(models.DatasetExampleRevision.dataset_example_id)
149
+ )
150
+ if version_id:
151
+ version_id_subquery = (
152
+ select(models.DatasetVersion.id)
153
+ .where(models.DatasetVersion.dataset_id == dataset_id)
154
+ .where(models.DatasetVersion.id == version_id)
155
+ .scalar_subquery()
156
+ )
157
+ revision_ids = revision_ids.where(
158
+ models.DatasetExampleRevision.dataset_version_id <= version_id_subquery
159
+ )
160
+ query = (
161
+ select(models.DatasetExample)
162
+ .join(
163
+ models.DatasetExampleRevision,
164
+ onclause=models.DatasetExample.id
165
+ == models.DatasetExampleRevision.dataset_example_id,
166
+ )
167
+ .where(
168
+ and_(
169
+ models.DatasetExampleRevision.id.in_(revision_ids),
170
+ models.DatasetExampleRevision.revision_kind != "DELETE",
171
+ )
172
+ )
173
+ .order_by(models.DatasetExampleRevision.dataset_example_id.desc())
174
+ )
175
+ async with info.context.db() as session:
176
+ dataset_examples = [
177
+ DatasetExample(
178
+ id_attr=example.id,
179
+ version_id=version_id,
180
+ created_at=example.created_at,
181
+ )
182
+ async for example in await session.stream_scalars(query)
183
+ ]
184
+ return connection_from_list(data=dataset_examples, args=args)
185
+
186
+ @strawberry.field(
187
+ description="Number of experiments for a specific version if version is specified, "
188
+ "or for all versions if version is not specified."
189
+ ) # type: ignore
190
+ async def experiment_count(
191
+ self,
192
+ info: Info[Context, None],
193
+ dataset_version_id: Optional[GlobalID] = UNSET,
194
+ ) -> int:
195
+ stmt = select(count(models.Experiment.id)).where(
196
+ models.Experiment.dataset_id == self.id_attr
197
+ )
198
+ version_id = (
199
+ from_global_id_with_expected_type(
200
+ global_id=dataset_version_id,
201
+ expected_type_name=DatasetVersion.__name__,
202
+ )
203
+ if dataset_version_id
204
+ else None
205
+ )
206
+ if version_id is not None:
207
+ stmt = stmt.where(models.Experiment.dataset_version_id == version_id)
208
+ async with info.context.db() as session:
209
+ return (await session.scalar(stmt)) or 0
210
+
211
+ @strawberry.field
212
+ async def experiments(
213
+ self,
214
+ info: Info[Context, None],
215
+ first: Optional[int] = 50,
216
+ last: Optional[int] = UNSET,
217
+ after: Optional[CursorString] = UNSET,
218
+ before: Optional[CursorString] = UNSET,
219
+ ) -> Connection[Experiment]:
220
+ args = ConnectionArgs(
221
+ first=first,
222
+ after=after if isinstance(after, CursorString) else None,
223
+ last=last,
224
+ before=before if isinstance(before, CursorString) else None,
225
+ )
226
+ dataset_id = self.id_attr
227
+ row_number = func.row_number().over(order_by=models.Experiment.id).label("row_number")
228
+ query = (
229
+ select(models.Experiment, row_number)
230
+ .where(models.Experiment.dataset_id == dataset_id)
231
+ .order_by(models.Experiment.id.desc())
232
+ )
233
+ async with info.context.db() as session:
234
+ experiments = [
235
+ to_gql_experiment(experiment, sequence_number)
236
+ async for experiment, sequence_number in cast(
237
+ AsyncIterable[tuple[models.Experiment, int]],
238
+ await session.stream(query),
239
+ )
240
+ ]
241
+ return connection_from_list(data=experiments, args=args)
242
+
243
+ @strawberry.field
244
+ async def experiment_annotation_summaries(
245
+ self, info: Info[Context, None]
246
+ ) -> list[ExperimentAnnotationSummary]:
247
+ dataset_id = self.id_attr
248
+ query = (
249
+ select(
250
+ models.ExperimentRunAnnotation.name,
251
+ func.min(models.ExperimentRunAnnotation.score),
252
+ func.max(models.ExperimentRunAnnotation.score),
253
+ func.avg(models.ExperimentRunAnnotation.score),
254
+ func.count(),
255
+ func.count(models.ExperimentRunAnnotation.error),
256
+ )
257
+ .join(
258
+ models.ExperimentRun,
259
+ models.ExperimentRunAnnotation.experiment_run_id == models.ExperimentRun.id,
260
+ )
261
+ .join(
262
+ models.Experiment,
263
+ models.ExperimentRun.experiment_id == models.Experiment.id,
264
+ )
265
+ .where(models.Experiment.dataset_id == dataset_id)
266
+ .group_by(models.ExperimentRunAnnotation.name)
267
+ .order_by(models.ExperimentRunAnnotation.name)
268
+ )
269
+ async with info.context.db() as session:
270
+ return [
271
+ ExperimentAnnotationSummary(
272
+ annotation_name=annotation_name,
273
+ min_score=min_score,
274
+ max_score=max_score,
275
+ mean_score=mean_score,
276
+ count=count_,
277
+ error_count=error_count,
278
+ )
279
+ async for (
280
+ annotation_name,
281
+ min_score,
282
+ max_score,
283
+ mean_score,
284
+ count_,
285
+ error_count,
286
+ ) in await session.stream(query)
287
+ ]
288
+
289
+ @strawberry.field
290
+ def last_updated_at(self, info: Info[Context, None]) -> Optional[datetime]:
291
+ return info.context.last_updated_at.get(self._table, self.id_attr)
62
292
 
63
293
 
64
- def _get_requested_features_and_tags(
65
- core_dimensions: Iterable[ScalarDimension],
66
- requested_dimension_names: Optional[Set[str]] = UNSET,
67
- ) -> List[Dimension]:
294
+ def to_gql_dataset(dataset: models.Dataset) -> Dataset:
68
295
  """
69
- Returns requested features and tags as a list of strawberry Datasets. If no
70
- dimensions are explicitly requested, returns all features and tags.
296
+ Converts an ORM dataset to a GraphQL dataset.
71
297
  """
72
- requested_features_and_tags: List[Dimension] = []
73
- for id, dim in enumerate(core_dimensions):
74
- is_requested = (
75
- not isinstance(requested_dimension_names, Set)
76
- ) or dim.name in requested_dimension_names
77
- is_feature_or_tag = dim.role in (FEATURE, TAG)
78
- if is_requested and is_feature_or_tag:
79
- requested_features_and_tags.append(to_gql_dimension(id_attr=id, dimension=dim))
80
- return requested_features_and_tags
298
+ return Dataset(
299
+ id_attr=dataset.id,
300
+ name=dataset.name,
301
+ description=dataset.description,
302
+ metadata=dataset.metadata_,
303
+ created_at=dataset.created_at,
304
+ updated_at=dataset.updated_at,
305
+ )