arize-phoenix 4.5.0__py3-none-any.whl → 4.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (123) hide show
  1. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/METADATA +16 -8
  2. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/RECORD +122 -58
  3. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/WHEEL +1 -1
  4. phoenix/__init__.py +0 -27
  5. phoenix/config.py +42 -7
  6. phoenix/core/model.py +25 -25
  7. phoenix/core/model_schema.py +64 -62
  8. phoenix/core/model_schema_adapter.py +27 -25
  9. phoenix/datetime_utils.py +4 -0
  10. phoenix/db/bulk_inserter.py +54 -14
  11. phoenix/db/insertion/dataset.py +237 -0
  12. phoenix/db/insertion/evaluation.py +10 -10
  13. phoenix/db/insertion/helpers.py +17 -14
  14. phoenix/db/insertion/span.py +3 -3
  15. phoenix/db/migrations/types.py +29 -0
  16. phoenix/db/migrations/versions/10460e46d750_datasets.py +291 -0
  17. phoenix/db/migrations/versions/cf03bd6bae1d_init.py +2 -28
  18. phoenix/db/models.py +236 -4
  19. phoenix/experiments/__init__.py +6 -0
  20. phoenix/experiments/evaluators/__init__.py +29 -0
  21. phoenix/experiments/evaluators/base.py +153 -0
  22. phoenix/experiments/evaluators/code_evaluators.py +99 -0
  23. phoenix/experiments/evaluators/llm_evaluators.py +244 -0
  24. phoenix/experiments/evaluators/utils.py +186 -0
  25. phoenix/experiments/functions.py +757 -0
  26. phoenix/experiments/tracing.py +85 -0
  27. phoenix/experiments/types.py +753 -0
  28. phoenix/experiments/utils.py +24 -0
  29. phoenix/inferences/fixtures.py +23 -23
  30. phoenix/inferences/inferences.py +7 -7
  31. phoenix/inferences/validation.py +1 -1
  32. phoenix/server/api/context.py +20 -0
  33. phoenix/server/api/dataloaders/__init__.py +20 -0
  34. phoenix/server/api/dataloaders/average_experiment_run_latency.py +54 -0
  35. phoenix/server/api/dataloaders/dataset_example_revisions.py +100 -0
  36. phoenix/server/api/dataloaders/dataset_example_spans.py +43 -0
  37. phoenix/server/api/dataloaders/experiment_annotation_summaries.py +85 -0
  38. phoenix/server/api/dataloaders/experiment_error_rates.py +43 -0
  39. phoenix/server/api/dataloaders/experiment_run_counts.py +42 -0
  40. phoenix/server/api/dataloaders/experiment_sequence_number.py +49 -0
  41. phoenix/server/api/dataloaders/project_by_name.py +31 -0
  42. phoenix/server/api/dataloaders/span_descendants.py +2 -3
  43. phoenix/server/api/dataloaders/span_projects.py +33 -0
  44. phoenix/server/api/dataloaders/trace_row_ids.py +39 -0
  45. phoenix/server/api/helpers/dataset_helpers.py +179 -0
  46. phoenix/server/api/input_types/AddExamplesToDatasetInput.py +16 -0
  47. phoenix/server/api/input_types/AddSpansToDatasetInput.py +14 -0
  48. phoenix/server/api/input_types/ClearProjectInput.py +15 -0
  49. phoenix/server/api/input_types/CreateDatasetInput.py +12 -0
  50. phoenix/server/api/input_types/DatasetExampleInput.py +14 -0
  51. phoenix/server/api/input_types/DatasetSort.py +17 -0
  52. phoenix/server/api/input_types/DatasetVersionSort.py +16 -0
  53. phoenix/server/api/input_types/DeleteDatasetExamplesInput.py +13 -0
  54. phoenix/server/api/input_types/DeleteDatasetInput.py +7 -0
  55. phoenix/server/api/input_types/DeleteExperimentsInput.py +9 -0
  56. phoenix/server/api/input_types/PatchDatasetExamplesInput.py +35 -0
  57. phoenix/server/api/input_types/PatchDatasetInput.py +14 -0
  58. phoenix/server/api/mutations/__init__.py +13 -0
  59. phoenix/server/api/mutations/auth.py +11 -0
  60. phoenix/server/api/mutations/dataset_mutations.py +520 -0
  61. phoenix/server/api/mutations/experiment_mutations.py +65 -0
  62. phoenix/server/api/{types/ExportEventsMutation.py → mutations/export_events_mutations.py} +17 -14
  63. phoenix/server/api/mutations/project_mutations.py +47 -0
  64. phoenix/server/api/openapi/__init__.py +0 -0
  65. phoenix/server/api/openapi/main.py +6 -0
  66. phoenix/server/api/openapi/schema.py +16 -0
  67. phoenix/server/api/queries.py +503 -0
  68. phoenix/server/api/routers/v1/__init__.py +77 -2
  69. phoenix/server/api/routers/v1/dataset_examples.py +178 -0
  70. phoenix/server/api/routers/v1/datasets.py +965 -0
  71. phoenix/server/api/routers/v1/evaluations.py +8 -13
  72. phoenix/server/api/routers/v1/experiment_evaluations.py +143 -0
  73. phoenix/server/api/routers/v1/experiment_runs.py +220 -0
  74. phoenix/server/api/routers/v1/experiments.py +302 -0
  75. phoenix/server/api/routers/v1/spans.py +9 -5
  76. phoenix/server/api/routers/v1/traces.py +1 -4
  77. phoenix/server/api/schema.py +2 -303
  78. phoenix/server/api/types/AnnotatorKind.py +10 -0
  79. phoenix/server/api/types/Cluster.py +19 -19
  80. phoenix/server/api/types/CreateDatasetPayload.py +8 -0
  81. phoenix/server/api/types/Dataset.py +282 -63
  82. phoenix/server/api/types/DatasetExample.py +85 -0
  83. phoenix/server/api/types/DatasetExampleRevision.py +34 -0
  84. phoenix/server/api/types/DatasetVersion.py +14 -0
  85. phoenix/server/api/types/Dimension.py +30 -29
  86. phoenix/server/api/types/EmbeddingDimension.py +40 -34
  87. phoenix/server/api/types/Event.py +16 -16
  88. phoenix/server/api/types/ExampleRevisionInterface.py +14 -0
  89. phoenix/server/api/types/Experiment.py +147 -0
  90. phoenix/server/api/types/ExperimentAnnotationSummary.py +13 -0
  91. phoenix/server/api/types/ExperimentComparison.py +19 -0
  92. phoenix/server/api/types/ExperimentRun.py +91 -0
  93. phoenix/server/api/types/ExperimentRunAnnotation.py +57 -0
  94. phoenix/server/api/types/Inferences.py +80 -0
  95. phoenix/server/api/types/InferencesRole.py +23 -0
  96. phoenix/server/api/types/Model.py +43 -42
  97. phoenix/server/api/types/Project.py +26 -12
  98. phoenix/server/api/types/Span.py +79 -2
  99. phoenix/server/api/types/TimeSeries.py +6 -6
  100. phoenix/server/api/types/Trace.py +15 -4
  101. phoenix/server/api/types/UMAPPoints.py +1 -1
  102. phoenix/server/api/types/node.py +5 -111
  103. phoenix/server/api/types/pagination.py +10 -52
  104. phoenix/server/app.py +103 -49
  105. phoenix/server/main.py +49 -27
  106. phoenix/server/openapi/docs.py +3 -0
  107. phoenix/server/static/index.js +2300 -1294
  108. phoenix/server/templates/index.html +1 -0
  109. phoenix/services.py +15 -15
  110. phoenix/session/client.py +581 -22
  111. phoenix/session/session.py +47 -37
  112. phoenix/trace/exporter.py +14 -9
  113. phoenix/trace/fixtures.py +133 -7
  114. phoenix/trace/schemas.py +1 -2
  115. phoenix/trace/span_evaluations.py +3 -3
  116. phoenix/trace/trace_dataset.py +6 -6
  117. phoenix/utilities/json.py +61 -0
  118. phoenix/utilities/re.py +50 -0
  119. phoenix/version.py +1 -1
  120. phoenix/server/api/types/DatasetRole.py +0 -23
  121. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/licenses/IP_NOTICE +0 -0
  122. {arize_phoenix-4.5.0.dist-info → arize_phoenix-4.6.2.dist-info}/licenses/LICENSE +0 -0
  123. /phoenix/server/api/{helpers.py → helpers/__init__.py} +0 -0
phoenix/session/client.py CHANGED
@@ -1,27 +1,49 @@
1
+ import csv
1
2
  import gzip
2
3
  import logging
4
+ import re
3
5
  import weakref
6
+ from collections import Counter
4
7
  from datetime import datetime
5
8
  from io import BytesIO
6
- from typing import Any, List, Optional, Union, cast
7
- from urllib.parse import urljoin
9
+ from pathlib import Path
10
+ from typing import (
11
+ Any,
12
+ BinaryIO,
13
+ Dict,
14
+ Iterable,
15
+ List,
16
+ Literal,
17
+ Mapping,
18
+ Optional,
19
+ Sequence,
20
+ Tuple,
21
+ Union,
22
+ cast,
23
+ )
24
+ from urllib.parse import quote, urljoin
8
25
 
26
+ import httpx
9
27
  import pandas as pd
10
28
  import pyarrow as pa
29
+ from httpx import HTTPStatusError, Response
11
30
  from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ExportTraceServiceRequest
12
31
  from opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue
13
32
  from opentelemetry.proto.resource.v1.resource_pb2 import Resource
14
33
  from opentelemetry.proto.trace.v1.trace_pb2 import ResourceSpans, ScopeSpans
15
- from pyarrow import ArrowInvalid
16
- from requests import Session
34
+ from pyarrow import ArrowInvalid, Table
35
+ from typing_extensions import TypeAlias, assert_never
17
36
 
18
37
  from phoenix.config import (
38
+ get_env_client_headers,
19
39
  get_env_collector_endpoint,
20
40
  get_env_host,
21
41
  get_env_port,
22
42
  get_env_project_name,
23
43
  )
24
44
  from phoenix.datetime_utils import normalize_datetime
45
+ from phoenix.db.insertion.dataset import DatasetKeys
46
+ from phoenix.experiments.types import Dataset, Example
25
47
  from phoenix.session.data_extractor import DEFAULT_SPAN_LIMIT, TraceDataExtractor
26
48
  from phoenix.trace import Evaluations, TraceDataset
27
49
  from phoenix.trace.dsl import SpanQuery
@@ -29,6 +51,8 @@ from phoenix.trace.otel import encode_span_to_otlp
29
51
 
30
52
  logger = logging.getLogger(__name__)
31
53
 
54
+ DatasetAction: TypeAlias = Literal["create", "append"]
55
+
32
56
 
33
57
  class Client(TraceDataExtractor):
34
58
  def __init__(
@@ -36,14 +60,20 @@ class Client(TraceDataExtractor):
36
60
  *,
37
61
  endpoint: Optional[str] = None,
38
62
  warn_if_server_not_running: bool = True,
63
+ headers: Optional[Mapping[str, str]] = None,
39
64
  **kwargs: Any, # for backward-compatibility
40
65
  ):
41
66
  """
42
67
  Client for connecting to a Phoenix server.
43
68
 
44
69
  Args:
45
- endpoint (str, optional): Phoenix server endpoint, e.g. http://localhost:6006. If not
46
- provided, the endpoint will be inferred from the environment variables.
70
+ endpoint (str, optional): Phoenix server endpoint, e.g.
71
+ http://localhost:6006. If not provided, the endpoint will be
72
+ inferred from the environment variables.
73
+
74
+ headers (Mapping[str, str], optional): Headers to include in each
75
+ network request. If not provided, the headers will be inferred from
76
+ the environment variables (if present).
47
77
  """
48
78
  if kwargs.pop("use_active_session_if_available", None) is not None:
49
79
  print(
@@ -52,17 +82,34 @@ class Client(TraceDataExtractor):
52
82
  )
53
83
  if kwargs:
54
84
  raise TypeError(f"Unexpected keyword arguments: {', '.join(kwargs)}")
85
+ headers = headers or get_env_client_headers()
55
86
  host = get_env_host()
56
87
  if host == "0.0.0.0":
57
88
  host = "127.0.0.1"
58
89
  base_url = endpoint or get_env_collector_endpoint() or f"http://{host}:{get_env_port()}"
59
90
  self._base_url = base_url if base_url.endswith("/") else base_url + "/"
60
-
61
- self._session = Session()
62
- weakref.finalize(self, self._session.close)
91
+ self._client = httpx.Client(headers=headers)
92
+ weakref.finalize(self, self._client.close)
63
93
  if warn_if_server_not_running:
64
94
  self._warn_if_phoenix_is_not_running()
65
95
 
96
+ @property
97
+ def web_url(self) -> str:
98
+ """
99
+ Return the web URL of the Phoenix UI. This is different from the base
100
+ URL in the cases where there is a proxy like colab
101
+
102
+
103
+ Returns:
104
+ str: A fully qualified URL to the Phoenix UI.
105
+ """
106
+ # Avoid circular import
107
+ from phoenix.session.session import active_session
108
+
109
+ if session := active_session():
110
+ return session.url
111
+ return self._base_url
112
+
66
113
  def query_spans(
67
114
  self,
68
115
  *queries: SpanQuery,
@@ -98,9 +145,12 @@ class Client(TraceDataExtractor):
98
145
  "stop_time is deprecated. Use end_time instead.",
99
146
  )
100
147
  end_time = end_time or stop_time
101
- response = self._session.post(
148
+ response = self._client.post(
102
149
  url=urljoin(self._base_url, "v1/spans"),
103
- params={"project-name": project_name},
150
+ params={
151
+ "project_name": project_name,
152
+ "project-name": project_name, # for backward-compatibility
153
+ },
104
154
  json={
105
155
  "queries": [q.to_dict() for q in queries],
106
156
  "start_time": _to_iso_format(normalize_datetime(start_time)),
@@ -145,9 +195,12 @@ class Client(TraceDataExtractor):
145
195
  empty list if no evaluations are found.
146
196
  """
147
197
  project_name = project_name or get_env_project_name()
148
- response = self._session.get(
149
- urljoin(self._base_url, "v1/evaluations"),
150
- params={"project-name": project_name},
198
+ response = self._client.get(
199
+ url=urljoin(self._base_url, "v1/evaluations"),
200
+ params={
201
+ "project_name": project_name,
202
+ "project-name": project_name, # for backward-compatibility
203
+ },
151
204
  )
152
205
  if response.status_code == 404:
153
206
  logger.info("No evaluations found.")
@@ -167,7 +220,7 @@ class Client(TraceDataExtractor):
167
220
 
168
221
  def _warn_if_phoenix_is_not_running(self) -> None:
169
222
  try:
170
- self._session.get(urljoin(self._base_url, "arize_phoenix_version")).raise_for_status()
223
+ self._client.get(urljoin(self._base_url, "arize_phoenix_version")).raise_for_status()
171
224
  except Exception:
172
225
  logger.warning(
173
226
  f"Arize Phoenix is not running on {self._base_url}. Launch Phoenix "
@@ -197,9 +250,9 @@ class Client(TraceDataExtractor):
197
250
  headers = {"content-type": "application/x-pandas-arrow"}
198
251
  with pa.ipc.new_stream(sink, table.schema) as writer:
199
252
  writer.write_table(table)
200
- self._session.post(
201
- urljoin(self._base_url, "v1/evaluations"),
202
- data=cast(bytes, sink.getvalue().to_pybytes()),
253
+ self._client.post(
254
+ url=urljoin(self._base_url, "v1/evaluations"),
255
+ content=cast(bytes, sink.getvalue().to_pybytes()),
203
256
  headers=headers,
204
257
  ).raise_for_status()
205
258
 
@@ -239,16 +292,522 @@ class Client(TraceDataExtractor):
239
292
  ]
240
293
  for otlp_span in otlp_spans:
241
294
  serialized = otlp_span.SerializeToString()
242
- data = gzip.compress(serialized)
243
- self._session.post(
244
- urljoin(self._base_url, "v1/traces"),
245
- data=data,
295
+ content = gzip.compress(serialized)
296
+ self._client.post(
297
+ url=urljoin(self._base_url, "v1/traces"),
298
+ content=content,
246
299
  headers={
247
300
  "content-type": "application/x-protobuf",
248
301
  "content-encoding": "gzip",
249
302
  },
250
303
  ).raise_for_status()
251
304
 
305
+ def _get_dataset_id_by_name(self, name: str) -> str:
306
+ """
307
+ Gets a dataset by name.
308
+
309
+ Args:
310
+ name (str): The name of the dataset.
311
+ version_id (Optional[str]): The version ID of the dataset. Default None.
312
+
313
+ Returns:
314
+ Dataset: The dataset object.
315
+ """
316
+ response = self._client.get(
317
+ urljoin(self._base_url, "/v1/datasets"),
318
+ params={"name": name},
319
+ )
320
+ response.raise_for_status()
321
+ if not (records := response.json()["data"]):
322
+ raise ValueError(f"Failed to query dataset by name: {name}")
323
+ if len(records) > 1 or not records[0]:
324
+ raise ValueError(f"Failed to find a single dataset with the given name: {name}")
325
+ dataset = records[0]
326
+ return str(dataset["id"])
327
+
328
+ def get_dataset(
329
+ self,
330
+ *,
331
+ id: Optional[str] = None,
332
+ name: Optional[str] = None,
333
+ version_id: Optional[str] = None,
334
+ ) -> Dataset:
335
+ """
336
+ Gets the dataset for a specific version, or gets the latest version of
337
+ the dataset if no version is specified.
338
+
339
+ Args:
340
+
341
+ id (Optional[str]): An ID for the dataset.
342
+
343
+ name (Optional[str]): the name for the dataset. If provided, the ID
344
+ is ignored and the dataset is retrieved by name.
345
+
346
+ version_id (Optional[str]): An ID for the version of the dataset, or
347
+ None.
348
+
349
+ Returns:
350
+ A dataset object.
351
+ """
352
+ if name:
353
+ id = self._get_dataset_id_by_name(name)
354
+
355
+ if not id:
356
+ raise ValueError("Dataset id or name must be provided.")
357
+
358
+ response = self._client.get(
359
+ urljoin(self._base_url, f"/v1/datasets/{quote(id)}/examples"),
360
+ params={"version_id": version_id} if version_id else None,
361
+ )
362
+ response.raise_for_status()
363
+ data = response.json()["data"]
364
+ examples = {
365
+ example["id"]: Example(
366
+ id=example["id"],
367
+ input=example["input"],
368
+ output=example["output"],
369
+ metadata=example["metadata"],
370
+ updated_at=datetime.fromisoformat(example["updated_at"]),
371
+ )
372
+ for example in data["examples"]
373
+ }
374
+ resolved_dataset_id = data["dataset_id"]
375
+ resolved_version_id = data["version_id"]
376
+ return Dataset(
377
+ id=resolved_dataset_id,
378
+ version_id=resolved_version_id,
379
+ examples=examples,
380
+ )
381
+
382
+ def get_dataset_versions(
383
+ self,
384
+ dataset_id: str,
385
+ /,
386
+ *,
387
+ limit: Optional[int] = 100,
388
+ ) -> pd.DataFrame:
389
+ """
390
+ Get dataset versions as pandas DataFrame.
391
+
392
+ Args:
393
+ dataset_id (str): dataset ID
394
+ limit (Optional[int]): maximum number of versions to return,
395
+ starting from the most recent version
396
+
397
+ Returns:
398
+ pandas DataFrame
399
+ """
400
+ url = urljoin(self._base_url, f"v1/datasets/{dataset_id}/versions")
401
+ response = httpx.get(url=url, params={"limit": limit})
402
+ response.raise_for_status()
403
+ if not (records := response.json()["data"]):
404
+ return pd.DataFrame()
405
+ df = pd.DataFrame.from_records(records, index="version_id")
406
+ df["created_at"] = pd.to_datetime(df.created_at)
407
+ return df
408
+
409
+ def upload_dataset(
410
+ self,
411
+ *,
412
+ dataset_name: str,
413
+ dataframe: Optional[pd.DataFrame] = None,
414
+ csv_file_path: Optional[Union[str, Path]] = None,
415
+ input_keys: Iterable[str] = (),
416
+ output_keys: Iterable[str] = (),
417
+ metadata_keys: Iterable[str] = (),
418
+ inputs: Iterable[Mapping[str, Any]] = (),
419
+ outputs: Iterable[Mapping[str, Any]] = (),
420
+ metadata: Iterable[Mapping[str, Any]] = (),
421
+ dataset_description: Optional[str] = None,
422
+ ) -> Dataset:
423
+ """
424
+ Upload examples as dataset to the Phoenix server. If `dataframe` or
425
+ `csv_file_path` are provided, must also provide `input_keys` (and
426
+ optionally with `output_keys` or `metadata_keys` or both), which is a
427
+ list of strings denoting the column names in the dataframe or the csv
428
+ file. On the other hand, a sequence of dictionaries can also be provided
429
+ via `inputs` (and optionally with `outputs` or `metadat` or both), each
430
+ item of which represents a separate example in the dataset.
431
+
432
+ Args:
433
+ dataset_name: (str): Name of the dataset.
434
+ dataframe (pd.DataFrame): pandas DataFrame.
435
+ csv_file_path (str | Path): Location of a CSV text file
436
+ input_keys (Iterable[str]): List of column names used as input keys.
437
+ input_keys, output_keys, metadata_keys must be disjoint, and must
438
+ exist in CSV column headers.
439
+ output_keys (Iterable[str]): List of column names used as output keys.
440
+ input_keys, output_keys, metadata_keys must be disjoint, and must
441
+ exist in CSV column headers.
442
+ metadata_keys (Iterable[str]): List of column names used as metadata keys.
443
+ input_keys, output_keys, metadata_keys must be disjoint, and must
444
+ exist in CSV column headers.
445
+ inputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
446
+ corresponding to an example in the dataset.
447
+ outputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
448
+ corresponding to an example in the dataset.
449
+ metadata (Iterable[Mapping[str, Any]]): List of dictionaries object each
450
+ corresponding to an example in the dataset.
451
+ dataset_description: (Optional[str]): Description of the dataset.
452
+
453
+ Returns:
454
+ A Dataset object with the uploaded examples.
455
+ """
456
+ if dataframe is not None or csv_file_path is not None:
457
+ if dataframe is not None and csv_file_path is not None:
458
+ raise ValueError(
459
+ "Please provide either `dataframe` or `csv_file_path`, but not both"
460
+ )
461
+ if list(inputs) or list(outputs) or list(metadata):
462
+ option = "dataframe" if dataframe is not None else "csv_file_path"
463
+ raise ValueError(
464
+ f"Please provide only either `{option}` or list of dictionaries "
465
+ f"via `inputs` (with `outputs` and `metadata`) but not both."
466
+ )
467
+ table = dataframe if dataframe is not None else csv_file_path
468
+ assert table is not None # for type-checker
469
+ return self._upload_tabular_dataset(
470
+ table,
471
+ dataset_name=dataset_name,
472
+ input_keys=input_keys,
473
+ output_keys=output_keys,
474
+ metadata_keys=metadata_keys,
475
+ dataset_description=dataset_description,
476
+ )
477
+ return self._upload_json_dataset(
478
+ dataset_name=dataset_name,
479
+ inputs=inputs,
480
+ outputs=outputs,
481
+ metadata=metadata,
482
+ dataset_description=dataset_description,
483
+ )
484
+
485
+ def append_to_dataset(
486
+ self,
487
+ *,
488
+ dataset_name: str,
489
+ dataframe: Optional[pd.DataFrame] = None,
490
+ csv_file_path: Optional[Union[str, Path]] = None,
491
+ input_keys: Iterable[str] = (),
492
+ output_keys: Iterable[str] = (),
493
+ metadata_keys: Iterable[str] = (),
494
+ inputs: Iterable[Mapping[str, Any]] = (),
495
+ outputs: Iterable[Mapping[str, Any]] = (),
496
+ metadata: Iterable[Mapping[str, Any]] = (),
497
+ dataset_description: Optional[str] = None,
498
+ ) -> Dataset:
499
+ """
500
+ Append examples to dataset on the Phoenix server. If `dataframe` or
501
+ `csv_file_path` are provided, must also provide `input_keys` (and
502
+ optionally with `output_keys` or `metadata_keys` or both), which is a
503
+ list of strings denoting the column names in the dataframe or the csv
504
+ file. On the other hand, a sequence of dictionaries can also be provided
505
+ via `inputs` (and optionally with `outputs` or `metadat` or both), each
506
+ item of which represents a separate example in the dataset.
507
+
508
+ Args:
509
+ dataset_name: (str): Name of the dataset.
510
+ dataframe (pd.DataFrame): pandas DataFrame.
511
+ csv_file_path (str | Path): Location of a CSV text file
512
+ input_keys (Iterable[str]): List of column names used as input keys.
513
+ input_keys, output_keys, metadata_keys must be disjoint, and must
514
+ exist in CSV column headers.
515
+ output_keys (Iterable[str]): List of column names used as output keys.
516
+ input_keys, output_keys, metadata_keys must be disjoint, and must
517
+ exist in CSV column headers.
518
+ metadata_keys (Iterable[str]): List of column names used as metadata keys.
519
+ input_keys, output_keys, metadata_keys must be disjoint, and must
520
+ exist in CSV column headers.
521
+ inputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
522
+ corresponding to an example in the dataset.
523
+ outputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
524
+ corresponding to an example in the dataset.
525
+ metadata (Iterable[Mapping[str, Any]]): List of dictionaries object each
526
+ corresponding to an example in the dataset.
527
+ dataset_description: (Optional[str]): Description of the dataset.
528
+
529
+ Returns:
530
+ A Dataset object with its examples.
531
+ """
532
+ if dataframe is not None or csv_file_path is not None:
533
+ if dataframe is not None and csv_file_path is not None:
534
+ raise ValueError(
535
+ "Please provide either `dataframe` or `csv_file_path`, but not both"
536
+ )
537
+ if list(inputs) or list(outputs) or list(metadata):
538
+ option = "dataframe" if dataframe is not None else "csv_file_path"
539
+ raise ValueError(
540
+ f"Please provide only either `{option}` or list of dictionaries "
541
+ f"via `inputs` (with `outputs` and `metadata`) but not both."
542
+ )
543
+ table = dataframe if dataframe is not None else csv_file_path
544
+ assert table is not None # for type-checker
545
+ return self._upload_tabular_dataset(
546
+ table,
547
+ dataset_name=dataset_name,
548
+ input_keys=input_keys,
549
+ output_keys=output_keys,
550
+ metadata_keys=metadata_keys,
551
+ dataset_description=dataset_description,
552
+ action="append",
553
+ )
554
+ return self._upload_json_dataset(
555
+ dataset_name=dataset_name,
556
+ inputs=inputs,
557
+ outputs=outputs,
558
+ metadata=metadata,
559
+ dataset_description=dataset_description,
560
+ action="append",
561
+ )
562
+
563
+ def _upload_tabular_dataset(
564
+ self,
565
+ table: Union[str, Path, pd.DataFrame],
566
+ /,
567
+ *,
568
+ dataset_name: str,
569
+ input_keys: Iterable[str],
570
+ output_keys: Iterable[str] = (),
571
+ metadata_keys: Iterable[str] = (),
572
+ dataset_description: Optional[str] = None,
573
+ action: DatasetAction = "create",
574
+ ) -> Dataset:
575
+ """
576
+ Upload examples as dataset to the Phoenix server.
577
+
578
+ Args:
579
+ table (str | Path | pd.DataFrame): Location of a CSV text file, or
580
+ pandas DataFrame.
581
+ dataset_name: (str): Name of the dataset. Required if action=append.
582
+ input_keys (Iterable[str]): List of column names used as input keys.
583
+ input_keys, output_keys, metadata_keys must be disjoint, and must
584
+ exist in CSV column headers.
585
+ output_keys (Iterable[str]): List of column names used as output keys.
586
+ input_keys, output_keys, metadata_keys must be disjoint, and must
587
+ exist in CSV column headers.
588
+ metadata_keys (Iterable[str]): List of column names used as metadata keys.
589
+ input_keys, output_keys, metadata_keys must be disjoint, and must
590
+ exist in CSV column headers.
591
+ dataset_description: (Optional[str]): Description of the dataset.
592
+ action: (Literal["create", "append"]): Create new dataset or append to an
593
+ existing one. If action="append" and dataset does not exist, it'll
594
+ be created.
595
+
596
+ Returns:
597
+ A Dataset object with the uploaded examples.
598
+ """
599
+ if action not in ("create", "append"):
600
+ raise ValueError(f"Invalid action: {action}")
601
+ if not dataset_name:
602
+ raise ValueError("Dataset name must not be blank")
603
+ input_keys, output_keys, metadata_keys = (
604
+ (keys,) if isinstance(keys, str) else (keys or ())
605
+ for keys in (input_keys, output_keys, metadata_keys)
606
+ )
607
+ if not any(map(bool, (input_keys, output_keys, metadata_keys))):
608
+ input_keys, output_keys, metadata_keys = _infer_keys(table)
609
+ keys = DatasetKeys(
610
+ frozenset(input_keys),
611
+ frozenset(output_keys),
612
+ frozenset(metadata_keys),
613
+ )
614
+ if isinstance(table, pd.DataFrame):
615
+ file = _prepare_pyarrow(table, keys)
616
+ elif isinstance(table, (str, Path)):
617
+ file = _prepare_csv(Path(table), keys)
618
+ else:
619
+ assert_never(table)
620
+ print("📤 Uploading dataset...")
621
+ response = self._client.post(
622
+ url=urljoin(self._base_url, "v1/datasets/upload"),
623
+ files={"file": file},
624
+ data={
625
+ "action": action,
626
+ "name": dataset_name,
627
+ "description": dataset_description,
628
+ "input_keys[]": sorted(keys.input),
629
+ "output_keys[]": sorted(keys.output),
630
+ "metadata_keys[]": sorted(keys.metadata),
631
+ },
632
+ params={"sync": True},
633
+ )
634
+ return self._process_dataset_upload_response(response)
635
+
636
+ def _upload_json_dataset(
637
+ self,
638
+ *,
639
+ dataset_name: str,
640
+ inputs: Iterable[Mapping[str, Any]],
641
+ outputs: Iterable[Mapping[str, Any]] = (),
642
+ metadata: Iterable[Mapping[str, Any]] = (),
643
+ dataset_description: Optional[str] = None,
644
+ action: DatasetAction = "create",
645
+ ) -> Dataset:
646
+ """
647
+ Upload examples as dataset to the Phoenix server.
648
+
649
+ Args:
650
+ dataset_name: (str): Name of the dataset
651
+ inputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
652
+ corresponding to an example in the dataset.
653
+ outputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
654
+ corresponding to an example in the dataset.
655
+ metadata (Iterable[Mapping[str, Any]]): List of dictionaries object each
656
+ corresponding to an example in the dataset.
657
+ dataset_description: (Optional[str]): Description of the dataset.
658
+ action: (Literal["create", "append"]): Create new dataset or append to an
659
+ existing one. If action="append" and dataset does not exist, it'll
660
+ be created.
661
+
662
+ Returns:
663
+ A Dataset object with the uploaded examples.
664
+ """
665
+ # convert to list to avoid issues with pandas Series
666
+ inputs, outputs, metadata = list(inputs), list(outputs), list(metadata)
667
+ if not inputs or not _is_all_dict(inputs):
668
+ raise ValueError(
669
+ "`inputs` should be a non-empty sequence containing only dictionary objects"
670
+ )
671
+ for name, seq in {"outputs": outputs, "metadata": metadata}.items():
672
+ if seq and not (len(seq) == len(inputs) and _is_all_dict(seq)):
673
+ raise ValueError(
674
+ f"`{name}` should be a sequence of the same length as `inputs` "
675
+ "containing only dictionary objects"
676
+ )
677
+ print("📤 Uploading dataset...")
678
+ response = self._client.post(
679
+ url=urljoin(self._base_url, "v1/datasets/upload"),
680
+ headers={"Content-Encoding": "gzip"},
681
+ json={
682
+ "action": action,
683
+ "name": dataset_name,
684
+ "description": dataset_description,
685
+ "inputs": inputs,
686
+ "outputs": outputs,
687
+ "metadata": metadata,
688
+ },
689
+ params={"sync": True},
690
+ )
691
+ return self._process_dataset_upload_response(response)
692
+
693
+ def _process_dataset_upload_response(self, response: Response) -> Dataset:
694
+ try:
695
+ response.raise_for_status()
696
+ except HTTPStatusError as e:
697
+ if msg := response.text:
698
+ raise DatasetUploadError(msg) from e
699
+ raise
700
+ data = response.json()["data"]
701
+ dataset_id = data["dataset_id"]
702
+ response = self._client.get(
703
+ url=urljoin(self._base_url, f"v1/datasets/{dataset_id}/examples")
704
+ )
705
+ response.raise_for_status()
706
+ data = response.json()["data"]
707
+ version_id = data["version_id"]
708
+ examples = data["examples"]
709
+ print(f"💾 Examples uploaded: {self.web_url}datasets/{dataset_id}/examples")
710
+ print(f"🗄️ Dataset version ID: {version_id}")
711
+
712
+ return Dataset(
713
+ id=dataset_id,
714
+ version_id=version_id,
715
+ examples={
716
+ example["id"]: Example(
717
+ id=example["id"],
718
+ input=example["input"],
719
+ output=example["output"],
720
+ metadata=example["metadata"],
721
+ updated_at=datetime.fromisoformat(example["updated_at"]),
722
+ )
723
+ for example in examples
724
+ },
725
+ )
726
+
727
+
728
+ FileName: TypeAlias = str
729
+ FilePointer: TypeAlias = BinaryIO
730
+ FileType: TypeAlias = str
731
+ FileHeaders: TypeAlias = Dict[str, str]
732
+
733
+
734
+ def _get_csv_column_headers(path: Path) -> Tuple[str, ...]:
735
+ path = path.resolve()
736
+ if not path.is_file():
737
+ raise FileNotFoundError(f"File does not exist: {path}")
738
+ with open(path, "r") as f:
739
+ rows = csv.reader(f)
740
+ try:
741
+ column_headers = tuple(next(rows))
742
+ _ = next(rows)
743
+ except StopIteration:
744
+ raise ValueError("csv file has no data")
745
+ return column_headers
746
+
747
+
748
+ def _prepare_csv(
749
+ path: Path,
750
+ keys: DatasetKeys,
751
+ ) -> Tuple[FileName, FilePointer, FileType, FileHeaders]:
752
+ column_headers = _get_csv_column_headers(path)
753
+ (header, freq), *_ = Counter(column_headers).most_common(1)
754
+ if freq > 1:
755
+ raise ValueError(f"Duplicated column header in CSV file: {header}")
756
+ keys.check_differences(frozenset(column_headers))
757
+ file = BytesIO()
758
+ with open(path, "rb") as f:
759
+ file.write(gzip.compress(f.read()))
760
+ return path.name, file, "text/csv", {"Content-Encoding": "gzip"}
761
+
762
+
763
+ def _prepare_pyarrow(
764
+ df: pd.DataFrame,
765
+ keys: DatasetKeys,
766
+ ) -> Tuple[FileName, FilePointer, FileType, FileHeaders]:
767
+ if df.empty:
768
+ raise ValueError("dataframe has no data")
769
+ (header, freq), *_ = Counter(df.columns).most_common(1)
770
+ if freq > 1:
771
+ raise ValueError(f"Duplicated column header in file: {header}")
772
+ keys.check_differences(frozenset(df.columns))
773
+ table = Table.from_pandas(df.loc[:, list(keys)])
774
+ sink = pa.BufferOutputStream()
775
+ options = pa.ipc.IpcWriteOptions(compression="lz4")
776
+ with pa.ipc.new_stream(sink, table.schema, options=options) as writer:
777
+ writer.write_table(table)
778
+ file = BytesIO(sink.getvalue().to_pybytes())
779
+ return "pandas", file, "application/x-pandas-pyarrow", {}
780
+
781
+
782
+ _response_header = re.compile(r"(?i)(response|answer|output)s*$")
783
+
784
+
785
+ def _infer_keys(
786
+ table: Union[str, Path, pd.DataFrame],
787
+ ) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[str, ...]]:
788
+ column_headers = (
789
+ tuple(table.columns)
790
+ if isinstance(table, pd.DataFrame)
791
+ else _get_csv_column_headers(Path(table))
792
+ )
793
+ for i, header in enumerate(column_headers):
794
+ if _response_header.search(header):
795
+ break
796
+ else:
797
+ i = len(column_headers)
798
+ return (
799
+ column_headers[:i],
800
+ column_headers[i : i + 1],
801
+ column_headers[i + 1 :],
802
+ )
803
+
252
804
 
253
805
  def _to_iso_format(value: Optional[datetime]) -> Optional[str]:
254
806
  return value.isoformat() if value else None
807
+
808
+
809
+ def _is_all_dict(seq: Sequence[Any]) -> bool:
810
+ return all(map(lambda obj: isinstance(obj, dict), seq))
811
+
812
+
813
+ class DatasetUploadError(Exception): ...