arize-phoenix 4.4.4rc5__py3-none-any.whl → 4.4.4rc6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of arize-phoenix might be problematic. Click here for more details.

Files changed (42) hide show
  1. {arize_phoenix-4.4.4rc5.dist-info → arize_phoenix-4.4.4rc6.dist-info}/METADATA +11 -5
  2. {arize_phoenix-4.4.4rc5.dist-info → arize_phoenix-4.4.4rc6.dist-info}/RECORD +39 -36
  3. phoenix/config.py +21 -0
  4. phoenix/datetime_utils.py +4 -0
  5. phoenix/db/insertion/evaluation.py +4 -4
  6. phoenix/db/insertion/helpers.py +4 -12
  7. phoenix/db/insertion/span.py +3 -3
  8. phoenix/db/models.py +1 -1
  9. phoenix/experiments/__init__.py +6 -0
  10. phoenix/experiments/evaluators/__init__.py +29 -0
  11. phoenix/experiments/evaluators/base.py +153 -0
  12. phoenix/{datasets → experiments}/evaluators/code_evaluators.py +7 -7
  13. phoenix/{datasets → experiments}/evaluators/llm_evaluators.py +9 -9
  14. phoenix/{datasets → experiments}/evaluators/utils.py +38 -141
  15. phoenix/{datasets/experiments.py → experiments/functions.py} +248 -182
  16. phoenix/experiments/types.py +722 -0
  17. phoenix/experiments/utils.py +9 -0
  18. phoenix/server/api/context.py +2 -0
  19. phoenix/server/api/dataloaders/__init__.py +2 -0
  20. phoenix/server/api/dataloaders/average_experiment_run_latency.py +54 -0
  21. phoenix/server/api/routers/v1/__init__.py +1 -1
  22. phoenix/server/api/routers/v1/dataset_examples.py +10 -10
  23. phoenix/server/api/routers/v1/datasets.py +6 -6
  24. phoenix/server/api/routers/v1/evaluations.py +4 -11
  25. phoenix/server/api/routers/v1/experiment_evaluations.py +22 -23
  26. phoenix/server/api/routers/v1/experiment_runs.py +4 -16
  27. phoenix/server/api/routers/v1/experiments.py +5 -5
  28. phoenix/server/api/routers/v1/spans.py +6 -4
  29. phoenix/server/api/types/Experiment.py +7 -0
  30. phoenix/server/app.py +2 -0
  31. phoenix/server/static/index.js +648 -570
  32. phoenix/session/client.py +256 -85
  33. phoenix/trace/fixtures.py +6 -6
  34. phoenix/utilities/json.py +8 -8
  35. phoenix/version.py +1 -1
  36. phoenix/datasets/__init__.py +0 -0
  37. phoenix/datasets/evaluators/__init__.py +0 -18
  38. phoenix/datasets/types.py +0 -178
  39. {arize_phoenix-4.4.4rc5.dist-info → arize_phoenix-4.4.4rc6.dist-info}/WHEEL +0 -0
  40. {arize_phoenix-4.4.4rc5.dist-info → arize_phoenix-4.4.4rc6.dist-info}/licenses/IP_NOTICE +0 -0
  41. {arize_phoenix-4.4.4rc5.dist-info → arize_phoenix-4.4.4rc6.dist-info}/licenses/LICENSE +0 -0
  42. /phoenix/{datasets → experiments}/tracing.py +0 -0
phoenix/session/client.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import csv
2
2
  import gzip
3
3
  import logging
4
+ import re
4
5
  import weakref
5
6
  from collections import Counter
6
7
  from datetime import datetime
@@ -25,7 +26,7 @@ from urllib.parse import quote, urljoin
25
26
  import httpx
26
27
  import pandas as pd
27
28
  import pyarrow as pa
28
- from httpx import HTTPStatusError
29
+ from httpx import HTTPStatusError, Response
29
30
  from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import ExportTraceServiceRequest
30
31
  from opentelemetry.proto.common.v1.common_pb2 import AnyValue, KeyValue
31
32
  from opentelemetry.proto.resource.v1.resource_pb2 import Resource
@@ -40,9 +41,9 @@ from phoenix.config import (
40
41
  get_env_port,
41
42
  get_env_project_name,
42
43
  )
43
- from phoenix.datasets.types import Dataset, Example
44
44
  from phoenix.datetime_utils import normalize_datetime
45
45
  from phoenix.db.insertion.dataset import DatasetKeys
46
+ from phoenix.experiments.types import Dataset, Example
46
47
  from phoenix.session.data_extractor import DEFAULT_SPAN_LIMIT, TraceDataExtractor
47
48
  from phoenix.trace import Evaluations, TraceDataset
48
49
  from phoenix.trace.dsl import SpanQuery
@@ -146,7 +147,10 @@ class Client(TraceDataExtractor):
146
147
  end_time = end_time or stop_time
147
148
  response = self._client.post(
148
149
  url=urljoin(self._base_url, "v1/spans"),
149
- params={"project-name": project_name},
150
+ params={
151
+ "project_name": project_name,
152
+ "project-name": project_name, # for backward-compatibility
153
+ },
150
154
  json={
151
155
  "queries": [q.to_dict() for q in queries],
152
156
  "start_time": _to_iso_format(normalize_datetime(start_time)),
@@ -193,7 +197,10 @@ class Client(TraceDataExtractor):
193
197
  project_name = project_name or get_env_project_name()
194
198
  response = self._client.get(
195
199
  url=urljoin(self._base_url, "v1/evaluations"),
196
- params={"project-name": project_name},
200
+ params={
201
+ "project_name": project_name,
202
+ "project-name": project_name, # for backward-compatibility
203
+ },
197
204
  )
198
205
  if response.status_code == 404:
199
206
  logger.info("No evaluations found.")
@@ -350,12 +357,12 @@ class Client(TraceDataExtractor):
350
357
 
351
358
  response = self._client.get(
352
359
  urljoin(self._base_url, f"/v1/datasets/{quote(id)}/examples"),
353
- params={"version-id": version_id} if version_id else None,
360
+ params={"version_id": version_id} if version_id else None,
354
361
  )
355
362
  response.raise_for_status()
356
363
  data = response.json()["data"]
357
- examples = [
358
- Example(
364
+ examples = {
365
+ example["id"]: Example(
359
366
  id=example["id"],
360
367
  input=example["input"],
361
368
  output=example["output"],
@@ -363,7 +370,7 @@ class Client(TraceDataExtractor):
363
370
  updated_at=datetime.fromisoformat(example["updated_at"]),
364
371
  )
365
372
  for example in data["examples"]
366
- ]
373
+ }
367
374
  resolved_dataset_id = data["dataset_id"]
368
375
  resolved_version_id = data["version_id"]
369
376
  return Dataset(
@@ -420,7 +427,7 @@ class Client(TraceDataExtractor):
420
427
  url = f"v1/datasets/{dataset_id}/csv"
421
428
  response = httpx.get(
422
429
  url=urljoin(self._base_url, url),
423
- params={"version": dataset_version_id} if dataset_version_id else {},
430
+ params={"version_id": dataset_version_id} if dataset_version_id else {},
424
431
  )
425
432
  response.raise_for_status()
426
433
  return pd.read_csv(
@@ -428,20 +435,42 @@ class Client(TraceDataExtractor):
428
435
  index_col="example_id",
429
436
  )
430
437
 
431
- def create_examples(
438
+ def upload_dataset(
432
439
  self,
433
440
  *,
434
441
  dataset_name: str,
435
- inputs: Iterable[Mapping[str, Any]],
442
+ dataframe: Optional[pd.DataFrame] = None,
443
+ csv_file_path: Optional[Union[str, Path]] = None,
444
+ input_keys: Iterable[str] = (),
445
+ output_keys: Iterable[str] = (),
446
+ metadata_keys: Iterable[str] = (),
447
+ inputs: Iterable[Mapping[str, Any]] = (),
436
448
  outputs: Iterable[Mapping[str, Any]] = (),
437
449
  metadata: Iterable[Mapping[str, Any]] = (),
438
450
  dataset_description: Optional[str] = None,
439
451
  ) -> Dataset:
440
452
  """
441
- Upload examples as dataset to the Phoenix server.
453
+ Upload examples as dataset to the Phoenix server. If `dataframe` or
454
+ `csv_file_path` are provided, must also provide `input_keys` (and
455
+ optionally with `output_keys` or `metadata_keys` or both), which is a
456
+ list of strings denoting the column names in the dataframe or the csv
457
+ file. On the other hand, a sequence of dictionaries can also be provided
458
+ via `inputs` (and optionally with `outputs` or `metadat` or both), each
459
+ item of which represents a separate example in the dataset.
442
460
 
443
461
  Args:
444
- dataset_name: (str): Name of the dataset
462
+ dataset_name: (str): Name of the dataset.
463
+ dataframe (pd.DataFrame): pandas DataFrame.
464
+ csv_file_path (str | Path): Location of a CSV text file
465
+ input_keys (Iterable[str]): List of column names used as input keys.
466
+ input_keys, output_keys, metadata_keys must be disjoint, and must
467
+ exist in CSV column headers.
468
+ output_keys (Iterable[str]): List of column names used as output keys.
469
+ input_keys, output_keys, metadata_keys must be disjoint, and must
470
+ exist in CSV column headers.
471
+ metadata_keys (Iterable[str]): List of column names used as metadata keys.
472
+ input_keys, output_keys, metadata_keys must be disjoint, and must
473
+ exist in CSV column headers.
445
474
  inputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
446
475
  corresponding to an example in the dataset.
447
476
  outputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
@@ -453,77 +482,124 @@ class Client(TraceDataExtractor):
453
482
  Returns:
454
483
  A Dataset object with the uploaded examples.
455
484
  """
456
- # convert to list to avoid issues with pandas Series
457
- inputs, outputs, metadata = list(inputs), list(outputs), list(metadata)
458
- if not inputs or not _is_all_dict(inputs):
459
- raise ValueError(
460
- "`inputs` should be a non-empty sequence containing only dictionary objects"
461
- )
462
- for name, seq in {"outputs": outputs, "metadata": metadata}.items():
463
- if seq and not (len(seq) == len(inputs) and _is_all_dict(seq)):
485
+ if dataframe is not None or csv_file_path is not None:
486
+ if dataframe is not None and csv_file_path is not None:
464
487
  raise ValueError(
465
- f"`{name}` should be a sequence of the same length as `inputs` "
466
- "containing only dictionary objects"
488
+ "Please provide either `dataframe` or `csv_file_path`, but not both"
467
489
  )
468
- action: DatasetAction = "create"
469
- print("📤 Uploading dataset...")
470
- response = self._client.post(
471
- url=urljoin(self._base_url, "v1/datasets/upload"),
472
- headers={"Content-Encoding": "gzip"},
473
- json={
474
- "action": action,
475
- "name": dataset_name,
476
- "description": dataset_description,
477
- "inputs": inputs,
478
- "outputs": outputs,
479
- "metadata": metadata,
480
- },
481
- params={"sync": True},
482
- )
483
- try:
484
- response.raise_for_status()
485
- except HTTPStatusError as e:
486
- if msg := response.text:
487
- raise DatasetUploadError(msg) from e
488
- raise
489
- data = response.json()["data"]
490
- dataset_id = data["dataset_id"]
491
- response = self._client.get(
492
- url=urljoin(self._base_url, f"v1/datasets/{dataset_id}/examples")
490
+ if list(inputs) or list(outputs) or list(metadata):
491
+ option = "dataframe" if dataframe is not None else "csv_file_path"
492
+ raise ValueError(
493
+ f"Please provide only either `{option}` or list of dictionaries "
494
+ f"via `inputs` (with `outputs` and `metadata`) but not both."
495
+ )
496
+ table = dataframe if dataframe is not None else csv_file_path
497
+ assert table is not None # for type-checker
498
+ return self._upload_tabular_dataset(
499
+ table,
500
+ dataset_name=dataset_name,
501
+ input_keys=input_keys,
502
+ output_keys=output_keys,
503
+ metadata_keys=metadata_keys,
504
+ dataset_description=dataset_description,
505
+ )
506
+ return self._upload_json_dataset(
507
+ dataset_name=dataset_name,
508
+ inputs=inputs,
509
+ outputs=outputs,
510
+ metadata=metadata,
511
+ dataset_description=dataset_description,
493
512
  )
494
- response.raise_for_status()
495
- data = response.json()["data"]
496
- version_id = data["version_id"]
497
- examples = data["examples"]
498
- print(f"💾 Examples uploaded: {self.web_url}datasets/{dataset_id}/examples")
499
- print(f"🗄️ Dataset version ID: {version_id}")
500
513
 
501
- return Dataset(
502
- id=dataset_id,
503
- version_id=version_id,
504
- examples=[
505
- Example(
506
- id=example["id"],
507
- input=example["input"],
508
- output=example["output"],
509
- metadata=example["metadata"],
510
- updated_at=datetime.fromisoformat(example["updated_at"]),
514
+ def append_to_dataset(
515
+ self,
516
+ *,
517
+ dataset_name: str,
518
+ dataframe: Optional[pd.DataFrame] = None,
519
+ csv_file_path: Optional[Union[str, Path]] = None,
520
+ input_keys: Iterable[str] = (),
521
+ output_keys: Iterable[str] = (),
522
+ metadata_keys: Iterable[str] = (),
523
+ inputs: Iterable[Mapping[str, Any]] = (),
524
+ outputs: Iterable[Mapping[str, Any]] = (),
525
+ metadata: Iterable[Mapping[str, Any]] = (),
526
+ dataset_description: Optional[str] = None,
527
+ ) -> Dataset:
528
+ """
529
+ Append examples to dataset on the Phoenix server. If `dataframe` or
530
+ `csv_file_path` are provided, must also provide `input_keys` (and
531
+ optionally with `output_keys` or `metadata_keys` or both), which is a
532
+ list of strings denoting the column names in the dataframe or the csv
533
+ file. On the other hand, a sequence of dictionaries can also be provided
534
+ via `inputs` (and optionally with `outputs` or `metadat` or both), each
535
+ item of which represents a separate example in the dataset.
536
+
537
+ Args:
538
+ dataset_name: (str): Name of the dataset.
539
+ dataframe (pd.DataFrame): pandas DataFrame.
540
+ csv_file_path (str | Path): Location of a CSV text file
541
+ input_keys (Iterable[str]): List of column names used as input keys.
542
+ input_keys, output_keys, metadata_keys must be disjoint, and must
543
+ exist in CSV column headers.
544
+ output_keys (Iterable[str]): List of column names used as output keys.
545
+ input_keys, output_keys, metadata_keys must be disjoint, and must
546
+ exist in CSV column headers.
547
+ metadata_keys (Iterable[str]): List of column names used as metadata keys.
548
+ input_keys, output_keys, metadata_keys must be disjoint, and must
549
+ exist in CSV column headers.
550
+ inputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
551
+ corresponding to an example in the dataset.
552
+ outputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
553
+ corresponding to an example in the dataset.
554
+ metadata (Iterable[Mapping[str, Any]]): List of dictionaries object each
555
+ corresponding to an example in the dataset.
556
+ dataset_description: (Optional[str]): Description of the dataset.
557
+
558
+ Returns:
559
+ A Dataset object with its examples.
560
+ """
561
+ if dataframe is not None or csv_file_path is not None:
562
+ if dataframe is not None and csv_file_path is not None:
563
+ raise ValueError(
564
+ "Please provide either `dataframe` or `csv_file_path`, but not both"
511
565
  )
512
- for example in examples
513
- ],
566
+ if list(inputs) or list(outputs) or list(metadata):
567
+ option = "dataframe" if dataframe is not None else "csv_file_path"
568
+ raise ValueError(
569
+ f"Please provide only either `{option}` or list of dictionaries "
570
+ f"via `inputs` (with `outputs` and `metadata`) but not both."
571
+ )
572
+ table = dataframe if dataframe is not None else csv_file_path
573
+ assert table is not None # for type-checker
574
+ return self._upload_tabular_dataset(
575
+ table,
576
+ dataset_name=dataset_name,
577
+ input_keys=input_keys,
578
+ output_keys=output_keys,
579
+ metadata_keys=metadata_keys,
580
+ dataset_description=dataset_description,
581
+ action="append",
582
+ )
583
+ return self._upload_json_dataset(
584
+ dataset_name=dataset_name,
585
+ inputs=inputs,
586
+ outputs=outputs,
587
+ metadata=metadata,
588
+ dataset_description=dataset_description,
589
+ action="append",
514
590
  )
515
591
 
516
- def upload_dataset(
592
+ def _upload_tabular_dataset(
517
593
  self,
518
594
  table: Union[str, Path, pd.DataFrame],
519
595
  /,
520
596
  *,
521
- name: str,
597
+ dataset_name: str,
522
598
  input_keys: Iterable[str],
523
599
  output_keys: Iterable[str] = (),
524
600
  metadata_keys: Iterable[str] = (),
525
- description: Optional[str] = None,
526
- action: Literal["create", "append"] = "create",
601
+ dataset_description: Optional[str] = None,
602
+ action: DatasetAction = "create",
527
603
  ) -> Dataset:
528
604
  """
529
605
  Upload examples as dataset to the Phoenix server.
@@ -531,7 +607,7 @@ class Client(TraceDataExtractor):
531
607
  Args:
532
608
  table (str | Path | pd.DataFrame): Location of a CSV text file, or
533
609
  pandas DataFrame.
534
- name: (str): Name of the dataset. Required if action=append.
610
+ dataset_name: (str): Name of the dataset. Required if action=append.
535
611
  input_keys (Iterable[str]): List of column names used as input keys.
536
612
  input_keys, output_keys, metadata_keys must be disjoint, and must
537
613
  exist in CSV column headers.
@@ -541,17 +617,24 @@ class Client(TraceDataExtractor):
541
617
  metadata_keys (Iterable[str]): List of column names used as metadata keys.
542
618
  input_keys, output_keys, metadata_keys must be disjoint, and must
543
619
  exist in CSV column headers.
544
- description: (Optional[str]): Description of the dataset.
545
- action: (Literal["create", "append"): Create new dataset or append to an
546
- existing dataset. If action=append, dataset name is required.
620
+ dataset_description: (Optional[str]): Description of the dataset.
621
+ action: (Literal["create", "append"]): Create new dataset or append to an
622
+ existing one. If action="append" and dataset does not exist, it'll
623
+ be created.
547
624
 
548
625
  Returns:
549
626
  A Dataset object with the uploaded examples.
550
627
  """
551
628
  if action not in ("create", "append"):
552
629
  raise ValueError(f"Invalid action: {action}")
553
- if not name:
630
+ if not dataset_name:
554
631
  raise ValueError("Dataset name must not be blank")
632
+ input_keys, output_keys, metadata_keys = (
633
+ (keys,) if isinstance(keys, str) else (keys or ())
634
+ for keys in (input_keys, output_keys, metadata_keys)
635
+ )
636
+ if not any(map(bool, (input_keys, output_keys, metadata_keys))):
637
+ input_keys, output_keys, metadata_keys = _infer_keys(table)
555
638
  keys = DatasetKeys(
556
639
  frozenset(input_keys),
557
640
  frozenset(output_keys),
@@ -569,14 +652,74 @@ class Client(TraceDataExtractor):
569
652
  files={"file": file},
570
653
  data={
571
654
  "action": action,
572
- "name": name,
573
- "description": description,
655
+ "name": dataset_name,
656
+ "description": dataset_description,
574
657
  "input_keys[]": sorted(keys.input),
575
658
  "output_keys[]": sorted(keys.output),
576
659
  "metadata_keys[]": sorted(keys.metadata),
577
660
  },
578
661
  params={"sync": True},
579
662
  )
663
+ return self._process_dataset_upload_response(response)
664
+
665
+ def _upload_json_dataset(
666
+ self,
667
+ *,
668
+ dataset_name: str,
669
+ inputs: Iterable[Mapping[str, Any]],
670
+ outputs: Iterable[Mapping[str, Any]] = (),
671
+ metadata: Iterable[Mapping[str, Any]] = (),
672
+ dataset_description: Optional[str] = None,
673
+ action: DatasetAction = "create",
674
+ ) -> Dataset:
675
+ """
676
+ Upload examples as dataset to the Phoenix server.
677
+
678
+ Args:
679
+ dataset_name: (str): Name of the dataset
680
+ inputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
681
+ corresponding to an example in the dataset.
682
+ outputs (Iterable[Mapping[str, Any]]): List of dictionaries object each
683
+ corresponding to an example in the dataset.
684
+ metadata (Iterable[Mapping[str, Any]]): List of dictionaries object each
685
+ corresponding to an example in the dataset.
686
+ dataset_description: (Optional[str]): Description of the dataset.
687
+ action: (Literal["create", "append"]): Create new dataset or append to an
688
+ existing one. If action="append" and dataset does not exist, it'll
689
+ be created.
690
+
691
+ Returns:
692
+ A Dataset object with the uploaded examples.
693
+ """
694
+ # convert to list to avoid issues with pandas Series
695
+ inputs, outputs, metadata = list(inputs), list(outputs), list(metadata)
696
+ if not inputs or not _is_all_dict(inputs):
697
+ raise ValueError(
698
+ "`inputs` should be a non-empty sequence containing only dictionary objects"
699
+ )
700
+ for name, seq in {"outputs": outputs, "metadata": metadata}.items():
701
+ if seq and not (len(seq) == len(inputs) and _is_all_dict(seq)):
702
+ raise ValueError(
703
+ f"`{name}` should be a sequence of the same length as `inputs` "
704
+ "containing only dictionary objects"
705
+ )
706
+ print("📤 Uploading dataset...")
707
+ response = self._client.post(
708
+ url=urljoin(self._base_url, "v1/datasets/upload"),
709
+ headers={"Content-Encoding": "gzip"},
710
+ json={
711
+ "action": action,
712
+ "name": dataset_name,
713
+ "description": dataset_description,
714
+ "inputs": inputs,
715
+ "outputs": outputs,
716
+ "metadata": metadata,
717
+ },
718
+ params={"sync": True},
719
+ )
720
+ return self._process_dataset_upload_response(response)
721
+
722
+ def _process_dataset_upload_response(self, response: Response) -> Dataset:
580
723
  try:
581
724
  response.raise_for_status()
582
725
  except HTTPStatusError as e:
@@ -598,8 +741,8 @@ class Client(TraceDataExtractor):
598
741
  return Dataset(
599
742
  id=dataset_id,
600
743
  version_id=version_id,
601
- examples=[
602
- Example(
744
+ examples={
745
+ example["id"]: Example(
603
746
  id=example["id"],
604
747
  input=example["input"],
605
748
  output=example["output"],
@@ -607,7 +750,7 @@ class Client(TraceDataExtractor):
607
750
  updated_at=datetime.fromisoformat(example["updated_at"]),
608
751
  )
609
752
  for example in examples
610
- ],
753
+ },
611
754
  )
612
755
 
613
756
 
@@ -617,20 +760,25 @@ FileType: TypeAlias = str
617
760
  FileHeaders: TypeAlias = Dict[str, str]
618
761
 
619
762
 
620
- def _prepare_csv(
621
- path: Path,
622
- keys: DatasetKeys,
623
- ) -> Tuple[FileName, FilePointer, FileType, FileHeaders]:
763
+ def _get_csv_column_headers(path: Path) -> Tuple[str, ...]:
624
764
  path = path.resolve()
625
765
  if not path.is_file():
626
766
  raise FileNotFoundError(f"File does not exist: {path}")
627
767
  with open(path, "r") as f:
628
768
  rows = csv.reader(f)
629
769
  try:
630
- column_headers = next(rows)
770
+ column_headers = tuple(next(rows))
631
771
  _ = next(rows)
632
772
  except StopIteration:
633
773
  raise ValueError("csv file has no data")
774
+ return column_headers
775
+
776
+
777
+ def _prepare_csv(
778
+ path: Path,
779
+ keys: DatasetKeys,
780
+ ) -> Tuple[FileName, FilePointer, FileType, FileHeaders]:
781
+ column_headers = _get_csv_column_headers(path)
634
782
  (header, freq), *_ = Counter(column_headers).most_common(1)
635
783
  if freq > 1:
636
784
  raise ValueError(f"Duplicated column header in CSV file: {header}")
@@ -660,6 +808,29 @@ def _prepare_pyarrow(
660
808
  return "pandas", file, "application/x-pandas-pyarrow", {}
661
809
 
662
810
 
811
+ _response_header = re.compile(r"(?i)(response|answer)s*$")
812
+
813
+
814
+ def _infer_keys(
815
+ table: Union[str, Path, pd.DataFrame],
816
+ ) -> Tuple[Tuple[str, ...], Tuple[str, ...], Tuple[str, ...]]:
817
+ column_headers = (
818
+ tuple(table.columns)
819
+ if isinstance(table, pd.DataFrame)
820
+ else _get_csv_column_headers(Path(table))
821
+ )
822
+ for i, header in enumerate(column_headers):
823
+ if _response_header.search(header):
824
+ break
825
+ else:
826
+ i = len(column_headers)
827
+ return (
828
+ column_headers[:i],
829
+ column_headers[i : i + 1],
830
+ column_headers[i + 1 :],
831
+ )
832
+
833
+
663
834
  def _to_iso_format(value: Optional[datetime]) -> Optional[str]:
664
835
  return value.isoformat() if value else None
665
836
 
phoenix/trace/fixtures.py CHANGED
@@ -244,12 +244,12 @@ def send_dataset_fixtures(
244
244
  try:
245
245
  if i % 2:
246
246
  client.upload_dataset(
247
- fixture.dataframe,
248
- name=fixture.name,
247
+ dataset_name=fixture.name,
248
+ dataframe=fixture.dataframe,
249
249
  input_keys=fixture.input_keys,
250
250
  output_keys=fixture.output_keys,
251
251
  metadata_keys=fixture.metadata_keys,
252
- description=fixture.description,
252
+ dataset_description=fixture.description,
253
253
  )
254
254
  else:
255
255
  with NamedTemporaryFile() as tf:
@@ -257,12 +257,12 @@ def send_dataset_fixtures(
257
257
  shutil.copyfileobj(fixture.csv, f)
258
258
  f.flush()
259
259
  client.upload_dataset(
260
- tf.name,
261
- name=fixture.name,
260
+ dataset_name=fixture.name,
261
+ csv_file_path=tf.name,
262
262
  input_keys=fixture.input_keys,
263
263
  output_keys=fixture.output_keys,
264
264
  metadata_keys=fixture.metadata_keys,
265
- description=fixture.description,
265
+ dataset_description=fixture.description,
266
266
  )
267
267
  except HTTPStatusError as e:
268
268
  print(e.response.content.decode())
phoenix/utilities/json.py CHANGED
@@ -2,7 +2,7 @@ import dataclasses
2
2
  import datetime
3
3
  from enum import Enum
4
4
  from pathlib import Path
5
- from typing import Any, Mapping, Sequence, SupportsFloat, Union, get_args, get_origin
5
+ from typing import Any, Mapping, Sequence, Union, get_args, get_origin
6
6
 
7
7
  import numpy as np
8
8
 
@@ -15,10 +15,10 @@ def jsonify(obj: Any) -> Any:
15
15
  return jsonify(obj.value)
16
16
  if isinstance(obj, (str, int, float, bool)) or obj is None:
17
17
  return obj
18
- if isinstance(obj, np.ndarray):
18
+ if isinstance(obj, (list, set, frozenset, Sequence)):
19
19
  return [jsonify(v) for v in obj]
20
- if isinstance(obj, SupportsFloat):
21
- return float(obj)
20
+ if isinstance(obj, (dict, Mapping)):
21
+ return {jsonify(k): jsonify(v) for k, v in obj.items()}
22
22
  if dataclasses.is_dataclass(obj):
23
23
  return {
24
24
  k: jsonify(v)
@@ -29,10 +29,6 @@ def jsonify(obj: Any) -> Any:
29
29
  and type(None) in get_args(field)
30
30
  )
31
31
  }
32
- if isinstance(obj, (Sequence, set, frozenset)):
33
- return [jsonify(v) for v in obj]
34
- if isinstance(obj, Mapping):
35
- return {jsonify(k): jsonify(v) for k, v in obj.items()}
36
32
  if isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):
37
33
  return obj.isoformat()
38
34
  if isinstance(obj, datetime.timedelta):
@@ -41,6 +37,10 @@ def jsonify(obj: Any) -> Any:
41
37
  return str(obj)
42
38
  if isinstance(obj, BaseException):
43
39
  return str(obj)
40
+ if isinstance(obj, np.ndarray):
41
+ return [jsonify(v) for v in obj]
42
+ if hasattr(obj, "__float__"):
43
+ return float(obj)
44
44
  if hasattr(obj, "model_dump") and callable(obj.model_dump):
45
45
  # pydantic v2
46
46
  try:
phoenix/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "4.4.4rc5"
1
+ __version__ = "4.4.4rc6"
File without changes
@@ -1,18 +0,0 @@
1
- from phoenix.datasets.evaluators.code_evaluators import ContainsKeyword, JSONParsable
2
- from phoenix.datasets.evaluators.llm_evaluators import (
3
- CoherenceEvaluator,
4
- ConcisenessEvaluator,
5
- HelpfulnessEvaluator,
6
- LLMCriteriaEvaluator,
7
- RelevanceEvaluator,
8
- )
9
-
10
- __all__ = [
11
- "ContainsKeyword",
12
- "JSONParsable",
13
- "CoherenceEvaluator",
14
- "ConcisenessEvaluator",
15
- "LLMCriteriaEvaluator",
16
- "HelpfulnessEvaluator",
17
- "RelevanceEvaluator",
18
- ]