arize 8.0.0b0__py3-none-any.whl → 8.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46,7 +46,6 @@ __all__ = [
46
46
  "ExperimentsList200Response",
47
47
  "ExperimentsRunsList200Response",
48
48
  "PaginationMetadata",
49
- "PrimitiveValue",
50
49
  "Problem",
51
50
  "Project",
52
51
  "ProjectsCreateRequest",
@@ -86,7 +85,6 @@ from arize._generated.api_client.models.experiments_create_request import Experi
86
85
  from arize._generated.api_client.models.experiments_list200_response import ExperimentsList200Response as ExperimentsList200Response
87
86
  from arize._generated.api_client.models.experiments_runs_list200_response import ExperimentsRunsList200Response as ExperimentsRunsList200Response
88
87
  from arize._generated.api_client.models.pagination_metadata import PaginationMetadata as PaginationMetadata
89
- from arize._generated.api_client.models.primitive_value import PrimitiveValue as PrimitiveValue
90
88
  from arize._generated.api_client.models.problem import Problem as Problem
91
89
  from arize._generated.api_client.models.project import Project as Project
92
90
  from arize._generated.api_client.models.projects_create_request import ProjectsCreateRequest as ProjectsCreateRequest
@@ -29,7 +29,6 @@ from arize._generated.api_client.models.experiments_create_request import Experi
29
29
  from arize._generated.api_client.models.experiments_list200_response import ExperimentsList200Response
30
30
  from arize._generated.api_client.models.experiments_runs_list200_response import ExperimentsRunsList200Response
31
31
  from arize._generated.api_client.models.pagination_metadata import PaginationMetadata
32
- from arize._generated.api_client.models.primitive_value import PrimitiveValue
33
32
  from arize._generated.api_client.models.problem import Problem
34
33
  from arize._generated.api_client.models.project import Project
35
34
  from arize._generated.api_client.models.projects_create_request import ProjectsCreateRequest
@@ -19,7 +19,6 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field, StrictStr
21
21
  from typing import Any, ClassVar, Dict, List
22
- from arize._generated.api_client.models.primitive_value import PrimitiveValue
23
22
  from typing import Optional, Set
24
23
  from typing_extensions import Self
25
24
 
@@ -29,7 +28,7 @@ class DatasetsCreateRequest(BaseModel):
29
28
  """ # noqa: E501
30
29
  name: StrictStr = Field(description="Name of the new dataset")
31
30
  space_id: StrictStr = Field(description="ID of the space the dataset will belong to")
32
- examples: List[Dict[str, PrimitiveValue]] = Field(description="Array of examples for the new dataset")
31
+ examples: List[Dict[str, Any]] = Field(description="Array of examples for the new dataset")
33
32
  __properties: ClassVar[List[str]] = ["name", "space_id", "examples"]
34
33
 
35
34
  model_config = ConfigDict(
@@ -71,13 +70,6 @@ class DatasetsCreateRequest(BaseModel):
71
70
  exclude=excluded_fields,
72
71
  exclude_none=True,
73
72
  )
74
- # override the default output from pydantic by calling `to_dict()` of each item in examples (list)
75
- _items = []
76
- if self.examples:
77
- for _item_examples in self.examples:
78
- if _item_examples:
79
- _items.append(_item_examples.to_dict())
80
- _dict['examples'] = _items
81
73
  return _dict
82
74
 
83
75
  @classmethod
@@ -97,7 +89,7 @@ class DatasetsCreateRequest(BaseModel):
97
89
  _obj = cls.model_validate({
98
90
  "name": obj.get("name"),
99
91
  "space_id": obj.get("space_id"),
100
- "examples": [Dict[str, PrimitiveValue].from_dict(_item) for _item in obj["examples"]] if obj.get("examples") is not None else None
92
+ "examples": obj.get("examples")
101
93
  })
102
94
  return _obj
103
95
 
@@ -19,7 +19,6 @@ import json
19
19
 
20
20
  from pydantic import BaseModel, ConfigDict, Field
21
21
  from typing import Any, ClassVar, Dict, List
22
- from arize._generated.api_client.models.primitive_value import PrimitiveValue
23
22
  from typing import Optional, Set
24
23
  from typing_extensions import Self
25
24
 
@@ -27,7 +26,7 @@ class DatasetsExamplesInsertRequest(BaseModel):
27
26
  """
28
27
  DatasetsExamplesInsertRequest
29
28
  """ # noqa: E501
30
- examples: List[Dict[str, PrimitiveValue]] = Field(description="Array of examples to append to the dataset version")
29
+ examples: List[Dict[str, Any]] = Field(description="Array of examples to append to the dataset version")
31
30
  __properties: ClassVar[List[str]] = ["examples"]
32
31
 
33
32
  model_config = ConfigDict(
@@ -69,13 +68,6 @@ class DatasetsExamplesInsertRequest(BaseModel):
69
68
  exclude=excluded_fields,
70
69
  exclude_none=True,
71
70
  )
72
- # override the default output from pydantic by calling `to_dict()` of each item in examples (list)
73
- _items = []
74
- if self.examples:
75
- for _item_examples in self.examples:
76
- if _item_examples:
77
- _items.append(_item_examples.to_dict())
78
- _dict['examples'] = _items
79
71
  return _dict
80
72
 
81
73
  @classmethod
@@ -93,7 +85,7 @@ class DatasetsExamplesInsertRequest(BaseModel):
93
85
  raise ValueError("Error due to additional fields (not defined in DatasetsExamplesInsertRequest) in the input: " + _key)
94
86
 
95
87
  _obj = cls.model_validate({
96
- "examples": [Dict[str, PrimitiveValue].from_dict(_item) for _item in obj["examples"]] if obj.get("examples") is not None else None
88
+ "examples": obj.get("examples")
97
89
  })
98
90
  return _obj
99
91
 
@@ -38,9 +38,7 @@ class TestDatasetsCreateRequest(unittest.TestCase):
38
38
  name = '',
39
39
  space_id = '',
40
40
  examples = [
41
- {
42
- 'key' : null
43
- }
41
+ { }
44
42
  ]
45
43
  )
46
44
  else:
@@ -48,9 +46,7 @@ class TestDatasetsCreateRequest(unittest.TestCase):
48
46
  name = '',
49
47
  space_id = '',
50
48
  examples = [
51
- {
52
- 'key' : null
53
- }
49
+ { }
54
50
  ],
55
51
  )
56
52
  """
@@ -36,17 +36,13 @@ class TestDatasetsExamplesInsertRequest(unittest.TestCase):
36
36
  if include_optional:
37
37
  return DatasetsExamplesInsertRequest(
38
38
  examples = [
39
- {
40
- 'key' : null
41
- }
39
+ { }
42
40
  ]
43
41
  )
44
42
  else:
45
43
  return DatasetsExamplesInsertRequest(
46
44
  examples = [
47
- {
48
- 'key' : null
49
- }
45
+ { }
50
46
  ],
51
47
  )
52
48
  """
@@ -36,9 +36,7 @@ class TestDatasetsExamplesList200Response(unittest.TestCase):
36
36
  if include_optional:
37
37
  return DatasetsExamplesList200Response(
38
38
  examples = [
39
- {
40
- 'key' : null
41
- }
39
+ { }
42
40
  ],
43
41
  pagination = arize._generated.api_client.models.pagination_metadata.PaginationMetadata(
44
42
  next_cursor = '',
@@ -47,9 +45,7 @@ class TestDatasetsExamplesList200Response(unittest.TestCase):
47
45
  else:
48
46
  return DatasetsExamplesList200Response(
49
47
  examples = [
50
- {
51
- 'key' : null
52
- }
48
+ { }
53
49
  ],
54
50
  pagination = arize._generated.api_client.models.pagination_metadata.PaginationMetadata(
55
51
  next_cursor = '',
@@ -36,18 +36,14 @@ class TestDatasetsExamplesUpdateRequest(unittest.TestCase):
36
36
  if include_optional:
37
37
  return DatasetsExamplesUpdateRequest(
38
38
  examples = [
39
- {
40
- 'key' : null
41
- }
39
+ { }
42
40
  ],
43
41
  new_version = ''
44
42
  )
45
43
  else:
46
44
  return DatasetsExamplesUpdateRequest(
47
45
  examples = [
48
- {
49
- 'key' : null
50
- }
46
+ { }
51
47
  ],
52
48
  )
53
49
  """
@@ -38,9 +38,7 @@ class TestExperimentsCreateRequest(unittest.TestCase):
38
38
  name = '',
39
39
  dataset_id = '',
40
40
  experiment_runs = [
41
- {
42
- 'key' : null
43
- }
41
+ { }
44
42
  ]
45
43
  )
46
44
  else:
@@ -48,9 +46,7 @@ class TestExperimentsCreateRequest(unittest.TestCase):
48
46
  name = '',
49
47
  dataset_id = '',
50
48
  experiment_runs = [
51
- {
52
- 'key' : null
53
- }
49
+ { }
54
50
  ],
55
51
  )
56
52
  """
@@ -36,9 +36,7 @@ class TestExperimentsRunsList200Response(unittest.TestCase):
36
36
  if include_optional:
37
37
  return ExperimentsRunsList200Response(
38
38
  experiment_runs = [
39
- {
40
- 'key' : null
41
- }
39
+ { }
42
40
  ],
43
41
  pagination = arize._generated.api_client.models.pagination_metadata.PaginationMetadata(
44
42
  next_cursor = '',
@@ -47,9 +45,7 @@ class TestExperimentsRunsList200Response(unittest.TestCase):
47
45
  else:
48
46
  return ExperimentsRunsList200Response(
49
47
  experiment_runs = [
50
- {
51
- 'key' : null
52
- }
48
+ { }
53
49
  ],
54
50
  pagination = arize._generated.api_client.models.pagination_metadata.PaginationMetadata(
55
51
  next_cursor = '',
@@ -109,7 +109,6 @@ Class | Method | HTTP request | Description
109
109
  - [ExperimentsList200Response](arize/_generated/api_client/docs/ExperimentsList200Response.md)
110
110
  - [ExperimentsRunsList200Response](arize/_generated/api_client/docs/ExperimentsRunsList200Response.md)
111
111
  - [PaginationMetadata](arize/_generated/api_client/docs/PaginationMetadata.md)
112
- - [PrimitiveValue](arize/_generated/api_client/docs/PrimitiveValue.md)
113
112
  - [Problem](arize/_generated/api_client/docs/Problem.md)
114
113
  - [Project](arize/_generated/api_client/docs/Project.md)
115
114
  - [ProjectsCreateRequest](arize/_generated/api_client/docs/ProjectsCreateRequest.md)
arize/client.py CHANGED
@@ -20,6 +20,8 @@ if TYPE_CHECKING:
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
+ # TODO(Kiko): InvalidAdditionalHeadersError is unused. Have we handled extra headers?
24
+
23
25
  # TODO(Kiko): Clean commented lines over the SDK
24
26
  # TODO(Kiko): Implement https://github.com/Arize-ai/arize/pull/59917
25
27
 
@@ -105,44 +107,16 @@ class ArizeClient(LazySubclientsMixin):
105
107
  "SpansClient",
106
108
  ),
107
109
  }
108
- _EXTRAS: ClassVar[dict[str, tuple[str | None, tuple[str, ...]]]] = {
109
- # Gate only the generated-backed ones
110
- "datasets": (
111
- "datasets-experiments",
112
- (
113
- "pydantic",
114
- "openinference.semconv",
115
- ),
116
- ),
117
- "experiments": (
118
- "datasets-experiments",
119
- (
120
- "pydantic",
121
- "wrapt",
122
- # "numpy",
123
- # "openinference.semconv",
124
- # "opentelemetry.sdk",
125
- # "opentelemetry.exporter.otlp.proto.grpc.trace_exporter",
126
- ),
127
- ),
128
- "spans": (
129
- "spans",
130
- (
131
- "google.protobuf",
132
- "numpy",
133
- "openinference.semconv",
134
- "opentelemetry",
135
- "pandas",
136
- "pyarrow",
137
- "requests",
138
- "tqdm",
139
- ),
140
- ),
141
- # Imports are gated in each method of the models client
142
- # This is to allow for very lean package install if people only
143
- # want to stream ML records
144
- "ml": (None, ()),
145
- }
110
+ # DISABLED: Optional dependency gating system
111
+ # This dict would map subclients to their optional dependencies and extra names.
112
+ # When enabled, it prevents loading subclients if dependencies aren't installed,
113
+ # showing: "Install via: pip install arize[extra-name]"
114
+ #
115
+ # To re-enable, populate with entries like:
116
+ # "subclient_name": ("extra-name", ("package1", "package2", "package3")),
117
+ # "another_subclient": (None, ()), # No requirements
118
+ #
119
+ _EXTRAS: ClassVar[dict[str, tuple[str | None, tuple[str, ...]]]] = {}
146
120
 
147
121
  def __init__(
148
122
  self,
@@ -4,10 +4,10 @@ from arize.experiments.evaluators.types import (
4
4
  EvaluationResult,
5
5
  EvaluationResultFieldNames,
6
6
  )
7
- from arize.experiments.types import ExperimentTaskResultFieldNames
7
+ from arize.experiments.types import ExperimentTaskFieldNames
8
8
 
9
9
  __all__ = [
10
10
  "EvaluationResult",
11
11
  "EvaluationResultFieldNames",
12
- "ExperimentTaskResultFieldNames",
12
+ "ExperimentTaskFieldNames",
13
13
  ]
@@ -43,7 +43,7 @@ if TYPE_CHECKING:
43
43
  from arize.experiments.evaluators.types import EvaluationResultFieldNames
44
44
  from arize.experiments.types import (
45
45
  ExperimentTask,
46
- ExperimentTaskResultFieldNames,
46
+ ExperimentTaskFieldNames,
47
47
  )
48
48
 
49
49
  logger = logging.getLogger(__name__)
@@ -114,7 +114,7 @@ class ExperimentsClient:
114
114
  name: str,
115
115
  dataset_id: str,
116
116
  experiment_runs: list[dict[str, object]] | pd.DataFrame,
117
- task_fields: ExperimentTaskResultFieldNames,
117
+ task_fields: ExperimentTaskFieldNames,
118
118
  evaluator_columns: dict[str, EvaluationResultFieldNames] | None = None,
119
119
  force_http: bool = False,
120
120
  ) -> models.Experiment:
@@ -175,7 +175,6 @@ class ExperimentsClient:
175
175
  from arize._generated import api_client as gen
176
176
 
177
177
  data = experiment_df.to_dict(orient="records")
178
-
179
178
  body = gen.ExperimentsCreateRequest(
180
179
  name=name,
181
180
  dataset_id=dataset_id,
@@ -56,7 +56,7 @@ from arize.experiments.types import (
56
56
  ExperimentEvaluationRun,
57
57
  ExperimentRun,
58
58
  ExperimentTask,
59
- ExperimentTaskResultFieldNames,
59
+ ExperimentTaskFieldNames,
60
60
  _TaskSummary,
61
61
  )
62
62
 
@@ -768,7 +768,7 @@ def get_result_attr(r: object, attr: str, default: object = None) -> object:
768
768
 
769
769
  def transform_to_experiment_format(
770
770
  experiment_runs: list[dict[str, object]] | pd.DataFrame,
771
- task_fields: ExperimentTaskResultFieldNames,
771
+ task_fields: ExperimentTaskFieldNames,
772
772
  evaluator_fields: dict[str, EvaluationResultFieldNames] | None = None,
773
773
  ) -> pd.DataFrame:
774
774
  """Transform a DataFrame to match the format returned by run_experiment().
@@ -788,7 +788,7 @@ def transform_to_experiment_format(
788
788
  else pd.DataFrame(experiment_runs)
789
789
  )
790
790
  # Validate required columns
791
- required_cols = {task_fields.example_id, task_fields.result}
791
+ required_cols = {task_fields.example_id, task_fields.output}
792
792
  missing_cols = required_cols - set(data.columns)
793
793
  if missing_cols:
794
794
  raise ValueError(f"Missing required columns: {missing_cols}")
@@ -799,11 +799,11 @@ def transform_to_experiment_format(
799
799
  out_df["example_id"] = data[task_fields.example_id]
800
800
  if task_fields.example_id != "example_id":
801
801
  out_df.drop(task_fields.example_id, axis=1, inplace=True)
802
- out_df["result"] = data[task_fields.result].apply(
802
+ out_df["output"] = data[task_fields.output].apply(
803
803
  lambda x: json.dumps(x) if isinstance(x, dict) else x
804
804
  )
805
- if task_fields.result != "result":
806
- out_df.drop(task_fields.result, axis=1, inplace=True)
805
+ if task_fields.output != "output":
806
+ out_df.drop(task_fields.output, axis=1, inplace=True)
807
807
 
808
808
  # Process evaluator results
809
809
  if evaluator_fields:
@@ -397,17 +397,17 @@ def _top_string(s: pd.Series, length: int = 100) -> str | None:
397
397
 
398
398
 
399
399
  @dataclass
400
- class ExperimentTaskResultFieldNames:
400
+ class ExperimentTaskFieldNames:
401
401
  """Column names for mapping experiment task results in a DataFrame.
402
402
 
403
403
  Args:
404
404
  example_id: Name of column containing example IDs.
405
405
  The ID values must match the id of the dataset rows.
406
- result: Name of column containing task results
406
+ output: Name of column containing task results
407
407
  """
408
408
 
409
409
  example_id: str
410
- result: str
410
+ output: str
411
411
 
412
412
 
413
413
  TaskOutput = JSONSerializable
arize/spans/client.py CHANGED
@@ -92,7 +92,7 @@ class SpansClient:
92
92
  before sending to Arize.
93
93
 
94
94
  Returns:
95
- `Response` object
95
+ Response object from the HTTP request.
96
96
 
97
97
  """
98
98
  from arize.spans.columns import (
@@ -1011,7 +1011,7 @@ class SpansClient:
1011
1011
  WHERE clauses and similarity search for semantic retrieval.
1012
1012
 
1013
1013
  Returns:
1014
- pd.DataFrame: DataFrame containing the requested span data with columns
1014
+ DataFrame containing the requested span data with columns
1015
1015
  for span metadata, attributes, events, and any custom fields.
1016
1016
  """
1017
1017
  with ArizeFlightClient(
arize/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """Version information for the Arize SDK."""
2
2
 
3
- __version__ = "8.0.0b0"
3
+ __version__ = "8.0.0b1"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arize
3
- Version: 8.0.0b0
3
+ Version: 8.0.0b1
4
4
  Summary: A helper library to interact with Arize AI APIs
5
5
  Project-URL: Homepage, https://arize.com
6
6
  Project-URL: Documentation, https://docs.arize.com/arize
@@ -26,47 +26,36 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
26
26
  Classifier: Topic :: System :: Logging
27
27
  Classifier: Topic :: System :: Monitoring
28
28
  Requires-Python: >=3.10
29
- Requires-Dist: lazy-imports
30
29
  Requires-Dist: numpy>=2.0.0
31
- Provides-Extra: auto-embeddings
32
- Requires-Dist: datasets!=2.14.*,<3,>=2.8; extra == 'auto-embeddings'
33
- Requires-Dist: pandas<3,>=1.0.0; extra == 'auto-embeddings'
34
- Requires-Dist: pillow<11,>=8.4.0; extra == 'auto-embeddings'
35
- Requires-Dist: tokenizers<1,>=0.13; extra == 'auto-embeddings'
36
- Requires-Dist: torch<3,>=1.13; extra == 'auto-embeddings'
37
- Requires-Dist: transformers<5,>=4.25; extra == 'auto-embeddings'
38
- Provides-Extra: datasets-experiments
39
- Requires-Dist: numpy>=2.0.0; extra == 'datasets-experiments'
40
- Requires-Dist: openinference-semantic-conventions<1,>=0.1.21; extra == 'datasets-experiments'
41
- Requires-Dist: opentelemetry-api>=1.38.0; extra == 'datasets-experiments'
42
- Requires-Dist: opentelemetry-exporter-otlp-proto-common>=1.38.0; extra == 'datasets-experiments'
43
- Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.38.0; extra == 'datasets-experiments'
44
- Requires-Dist: opentelemetry-proto>=1.38.0; extra == 'datasets-experiments'
45
- Requires-Dist: opentelemetry-sdk>=1.38.0; extra == 'datasets-experiments'
46
- Requires-Dist: pydantic; extra == 'datasets-experiments'
47
- Requires-Dist: wrapt<2.0.0,>=1.0.0; extra == 'datasets-experiments'
30
+ Requires-Dist: openinference-semantic-conventions<1,>=0.1.25
31
+ Requires-Dist: opentelemetry-exporter-otlp-proto-common>=1.38.0
32
+ Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.38.0
33
+ Requires-Dist: opentelemetry-sdk>=1.38.0
34
+ Requires-Dist: opentelemetry-semantic-conventions<1,>=0.43b0
35
+ Requires-Dist: pandas<3,>=2.0.0
36
+ Requires-Dist: protobuf<6,>=4.21.0
37
+ Requires-Dist: pyarrow>=0.15.0
38
+ Requires-Dist: pydantic<3,>=2
39
+ Requires-Dist: python-dateutil<3,>=2.8.2
40
+ Requires-Dist: requests-futures<2,>=1.0.0
41
+ Requires-Dist: requests<3,>=2.0.0
42
+ Requires-Dist: tqdm<5,>4
43
+ Requires-Dist: typing-extensions<5,>=4.7.1
44
+ Requires-Dist: urllib3<3,>=2.1.0
45
+ Requires-Dist: wrapt<2.0.0,>=1.0.0
48
46
  Provides-Extra: dev
49
47
  Requires-Dist: pytest==8.4.2; extra == 'dev'
50
48
  Requires-Dist: ruff==0.13.2; extra == 'dev'
51
- Provides-Extra: mimic-explainer
52
- Requires-Dist: interpret-community[mimic]<1,>=0.22.0; extra == 'mimic-explainer'
53
- Provides-Extra: ml-batch
54
- Requires-Dist: pandas<3,>=1.0.0; extra == 'ml-batch'
55
- Requires-Dist: protobuf<6,>=4.21.0; extra == 'ml-batch'
56
- Requires-Dist: pyarrow>=0.15.0; extra == 'ml-batch'
57
- Requires-Dist: requests<3,>=2.0.0; extra == 'ml-batch'
58
- Requires-Dist: tqdm; extra == 'ml-batch'
59
- Provides-Extra: ml-stream
60
- Requires-Dist: protobuf<6,>=4.21.0; extra == 'ml-stream'
61
- Requires-Dist: requests-futures<2,>=1.0.0; extra == 'ml-stream'
62
- Provides-Extra: spans
63
- Requires-Dist: openinference-semantic-conventions<1,>=0.1.21; extra == 'spans'
64
- Requires-Dist: opentelemetry-semantic-conventions<1,>=0.43b0; extra == 'spans'
65
- Requires-Dist: pandas<3,>=1.0.0; extra == 'spans'
66
- Requires-Dist: protobuf<6,>=4.21.0; extra == 'spans'
67
- Requires-Dist: pyarrow>=0.15.0; extra == 'spans'
68
- Requires-Dist: requests<3,>=2.0.0; extra == 'spans'
69
- Requires-Dist: tqdm; extra == 'spans'
49
+ Provides-Extra: embeddings
50
+ Requires-Dist: datasets!=2.14.*,<3,>=2.8; extra == 'embeddings'
51
+ Requires-Dist: pillow<11,>=8.4.0; extra == 'embeddings'
52
+ Requires-Dist: tokenizers<1,>=0.13; extra == 'embeddings'
53
+ Requires-Dist: torch<3,>=1.13; extra == 'embeddings'
54
+ Requires-Dist: transformers<5,>=4.25; extra == 'embeddings'
55
+ Provides-Extra: mimic
56
+ Requires-Dist: interpret-community[mimic]<1,>=0.22.0; extra == 'mimic'
57
+ Provides-Extra: otel
58
+ Requires-Dist: arize-otel<1,>=0.11.0; extra == 'otel'
70
59
  Description-Content-Type: text/markdown
71
60
 
72
61
  <p align="center">
@@ -130,16 +119,18 @@ Description-Content-Type: text/markdown
130
119
 
131
120
  A helper package to interact with Arize AI APIs.
132
121
 
133
- Arize is an AI engineering platform. It helps engineers develop, evaluate, and observe AI applications and agents.
122
+ Arize is an AI engineering platform. It helps engineers develop, evaluate, and observe AI applications and agents.
123
+
124
+ Arize has both Enterprise and OSS products to support this goal:
134
125
 
135
- Arize has both Enterprise and OSS products to support this goal:
136
126
  - [Arize AX](https://arize.com/) — an enterprise AI engineering platform from development to production, with an embedded AI Copilot
137
127
  - [Phoenix](https://github.com/Arize-ai/phoenix) — a lightweight, open-source project for tracing, prompt engineering, and evaluation
138
128
  - [OpenInference](https://github.com/Arize-ai/openinference) — an open-source instrumentation package to trace LLM applications across models and frameworks
139
129
 
140
- We log over 1 trillion inferences and spans, 10 million evaluation runs, and 2 million OSS downloads every month.
130
+ We log over 1 trillion inferences and spans, 10 million evaluation runs, and 2 million OSS downloads every month.
141
131
 
142
132
  # Key Features
133
+
143
134
  - [**_Tracing_**](https://docs.arize.com/arize/observe/tracing) - Trace your LLM application's runtime using OpenTelemetry-based instrumentation.
144
135
  - [**_Evaluation_**](https://docs.arize.com/arize/evaluate/online-evals) - Leverage LLMs to benchmark your application's performance using response and retrieval evals.
145
136
  - [**_Datasets_**](https://docs.arize.com/arize/develop/datasets) - Create versioned datasets of examples for experimentation, evaluation, and fine-tuning.
@@ -149,19 +140,21 @@ We log over 1 trillion inferences and spans, 10 million evaluation runs, and 2 m
149
140
 
150
141
  # Installation
151
142
 
152
- Install Arize (version 8 is currently under alpha release) via `pip` or `conda`:
143
+ Install Arize (version 8 is currently under beta release) via `pip`:
153
144
 
154
145
  ```bash
155
- pip install arize==8.0.0ax
146
+ pip install --pre arize
156
147
  ```
157
- where `x` denotes the specific alpha release. Install the `arize-otel` package for auto-instrumentation of your LLM library:
148
+
149
+ where `--pre` denotes the installation of pre-release versions. Install the
150
+ `arize-otel` package for auto-instrumentation of your LLM library:
158
151
 
159
152
  ```bash
160
153
  pip install arize-otel
161
154
  ```
162
155
 
163
156
  # Usage
164
-
157
+
165
158
  ## Instrumentation
166
159
 
167
160
  See [arize-otel in PyPI](https://pypi.org/project/arize-otel/):
@@ -183,9 +176,8 @@ OpenAIInstrumentor().instrument(tracer_provider=tracer_provider)
183
176
 
184
177
  ## Operations on Spans
185
178
 
186
- Use `arize.spans` to interact with spans: log spans into Arize, update the span's evaluations, annotations and metadata in bulk.
187
-
188
- > **WARNING**: This is currently under an alpha release. Install with `pip install arize==8.0.0ax` where the `x` denotes the specific alpha version. Check the [pre-releases](https://pypi.org/project/arize/#history) page in PyPI.
179
+ Use `arize.spans` to interact with spans: log spans into Arize, update the span's
180
+ evaluations, annotations and metadata in bulk.
189
181
 
190
182
  ### Logging spans
191
183
 
@@ -259,9 +251,8 @@ df = client.spans.export_to_df(
259
251
 
260
252
  ## Operations on ML Models
261
253
 
262
- Use `arize.models` to interact with ML models: log ML data (traininv, validation, production) into Arize, either streaming or in batches.
263
-
264
- > **WARNING**: This is currently under an alpha release. Install with `pip install arize==8.0.0ax` where the `x` denotes the specific alpha version. Check the [pre-releases](https://pypi.org/project/arize/#history) page in PyPI.
254
+ Use `arize.ml` to interact with ML models: log ML data (training, validation, production)
255
+ into Arize, either streaming or in batches.
265
256
 
266
257
  ### Stream log ML Data for a Classification use-case
267
258
 
@@ -276,7 +267,7 @@ MODEL_NAME = "<your-model-name>"
276
267
  features=...
277
268
  embedding_features=...
278
269
 
279
- response = client.models.log_stream(
270
+ response = client.ml.log_stream(
280
271
  space_id=SPACE_ID,
281
272
  model_name=MODEL_NAME,
282
273
  model_type=ModelTypes.SCORE_CATEGORICAL,
@@ -328,7 +319,7 @@ schema = Schema(
328
319
  )
329
320
 
330
321
  # Logging Production DataFrame
331
- response = client.models.log_batch(
322
+ response = client.ml.log_batch(
332
323
  space_id=SPACE_ID,
333
324
  model_name=MODEL_NAME,
334
325
  model_type=ModelTypes.OBJECT_DETECTION,
@@ -356,7 +347,7 @@ SPACE_ID = "<your-space-id>"
356
347
  MODEL_NAME = "<your-model-name>"
357
348
  MODEL_VERSION = "1.0"
358
349
 
359
- df = client.models.export_to_df(
350
+ df = client.ml.export_to_df(
360
351
  space_id=SPACE_ID,
361
352
  model_name=MODEL_NAME,
362
353
  environment=Environments.TRAINING,
@@ -414,7 +405,7 @@ The response is an object of type `DatasetsList200Response`, and you can access
414
405
 
415
406
  ```python
416
407
  # Get the list of datasets from the response
417
- dataset_list = resp.datasets
408
+ dataset_list = resp.datasets
418
409
  # Get the response as a dictionary
419
410
  resp_dict = resp.to_dict()
420
411
  # Get the response in JSON format
@@ -530,7 +521,7 @@ The response is an object of type `ExperimentsList200Response`, and you can acce
530
521
 
531
522
  ```python
532
523
  # Get the list of datasets from the response
533
- experiment_list = resp.experiments
524
+ experiment_list = resp.experiments
534
525
  # Get the response as a dictionary
535
526
  resp_dict = resp.to_dict()
536
527
  # Get the response in JSON format
@@ -554,7 +545,7 @@ experiment, experiment_df = client.run_experiment(
554
545
  concurrency=..., # The number of concurrent tasks to run. Defaults to 3.
555
546
  set_global_tracer_provider=..., # If True, sets the global tracer provider for the experiment. Defaults to False
556
547
  exit_on_error=..., # If True, the experiment will stop running on first occurrence of an error. Defaults to False
557
- )
548
+ )
558
549
  ```
559
550
 
560
551
  The `Experiment` object also counts with convenience method similar to `List***` objects:
@@ -649,7 +640,7 @@ resp_df = resp.to_df()
649
640
 
650
641
  ### In Code
651
642
 
652
- You can use `configure_logging` to set up the logging behavior of the Arize package to your needs.
643
+ You can use `configure_logging` to set up the logging behavior of the Arize package to your needs.
653
644
 
654
645
  ```python
655
646
  from arize.logging import configure_logging
@@ -657,7 +648,7 @@ from arize.logging import configure_logging
657
648
  configure_logging(
658
649
  level=..., # Defaults to logging.INFO
659
650
  structured=..., # if True, emit JSON logs. Defaults to False
660
- )
651
+ )
661
652
  ```
662
653
 
663
654
  ### Via Environment Variables
@@ -668,11 +659,11 @@ Configure the same options as the section above, via:
668
659
  import os
669
660
 
670
661
  # Whether or not you want to disable logging altogether
671
- os.environ["ARIZE_LOG_ENABLE"] = "true"
662
+ os.environ["ARIZE_LOG_ENABLE"] = "true"
672
663
  # Set up the logging level
673
- os.environ["ARIZE_LOG_LEVEL"] = "debug"
664
+ os.environ["ARIZE_LOG_LEVEL"] = "debug"
674
665
  # Whether or not you want structured JSON logs
675
- os.environ["ARIZE_LOG_STRUCTURED"] = "false"
666
+ os.environ["ARIZE_LOG_STRUCTURED"] = "false"
676
667
  ```
677
668
 
678
669
  The default behavior of Arize's logs is: enabled, `INFO` level, and not structured.
@@ -1,11 +1,11 @@
1
1
  arize/__init__.py,sha256=C2NOiNcFL9TMu5NTkJwF88kL6PKAHHjBMG8HTAIQ5TU,3551
2
2
  arize/_lazy.py,sha256=aLdgSGxdfdV9QiLllxlLlkN5k44pFf_7jdKRV9Z9sgo,2864
3
- arize/client.py,sha256=RwnDTmxHzPfpzonYD8u_d4V-RFyehpbUADZV8Xpki0c,13660
3
+ arize/client.py,sha256=XGOaze-rfvDscL9vpWohen9vUbTNwLsx27pF77p9zxA,13150
4
4
  arize/config.py,sha256=80iQgiaAhSqgE1Q-nKlivjQ2z3F7PifiPAsZlhQjozo,16471
5
5
  arize/logging.py,sha256=CZkgdpDKXoNNJ9fQMNwbCQEe-ipUg7fQf_CE1hRN1Qc,8574
6
6
  arize/pre_releases.py,sha256=QWZb5Kko-fHDKDKNk8HUwehMQuLrZPYnyD2c6_mnEqI,1216
7
7
  arize/regions.py,sha256=oEKJdZbW5r8X3fL3PaDVMdk-f2MoZ8OBvUvYZLMGBpw,1039
8
- arize/version.py,sha256=OIc4vIJor74VQDoVG5zuAmxWI0h4EEU19vIPa3y2Tjc,70
8
+ arize/version.py,sha256=wAs5eLjhoEbU63izq_pHXyel5O0ilqn4QY6kj6xnago,70
9
9
  arize/_exporter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  arize/_exporter/client.py,sha256=DRxdREi4k0RiRI-kPvYCwX2zo6DGkdt1XMezF4ZhR08,16425
11
11
  arize/_exporter/validation.py,sha256=9gX4oHrZcChMfkwoBpe3Qp4aK-txy_zZ4azrKf-bEQw,1022
@@ -15,8 +15,8 @@ arize/_flight/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  arize/_flight/client.py,sha256=HpUu22Q2r-jnh_j0RaQtJLZnaXKGBNNjZlZ0dvX98xs,20391
16
16
  arize/_flight/types.py,sha256=GB_4dQu2ElIrcDGAcqhG7oI4g-b0ZdSlbrQkf0TFzVE,194
17
17
  arize/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- arize/_generated/api_client_README.md,sha256=SfBbE1r1thnfwk3AVr6CZUP6kJ7HNy28gx1V7DjQhp8,7451
19
- arize/_generated/api_client/__init__.py,sha256=I_KGjOG-2Z5dokY6Ln9E5fFHUIe53WUvCaUvg4DtQss,4672
18
+ arize/_generated/api_client_README.md,sha256=9UxtVJdMFIdV-_9NqUwg2ox3Many_5LIyXz7ZqYOdU0,7379
19
+ arize/_generated/api_client/__init__.py,sha256=QjXl6DlavvyG5VRCT_-U2_eS_JeFIUVoMIWr19vroK8,4554
20
20
  arize/_generated/api_client/api_client.py,sha256=Cr9leKLpAJYQmL_U1hAPsMNFdaSO6Ti1X52jvGp2WsQ,27899
21
21
  arize/_generated/api_client/api_response.py,sha256=eMxw1mpmJcoGZ3gs9z6jM4oYoZ10Gjk333s9sKxGv7s,652
22
22
  arize/_generated/api_client/configuration.py,sha256=EJ6DkTKzHWqjN0BwMEHQ1D9pEDdClOsM72EcffquZ9I,19406
@@ -26,13 +26,13 @@ arize/_generated/api_client/api/__init__.py,sha256=ycVTd-FBlPyFX82wbCQiuCTbj78pZ
26
26
  arize/_generated/api_client/api/datasets_api.py,sha256=pwS_NU7puzAdoqp49D24XjgcReptPue6K2ZI_C8DwYY,102867
27
27
  arize/_generated/api_client/api/experiments_api.py,sha256=xMMH-QA80mmXXPbGKKPIONMnxNowSGlf8wVdLKqzPss,65311
28
28
  arize/_generated/api_client/api/projects_api.py,sha256=SRMimrrXNZLlKD9zbTFs-iy2SeOHiVDdlMB0Hbp0ApE,48820
29
- arize/_generated/api_client/models/__init__.py,sha256=qajnlyF3Q2lCuIIeCqldVFcdgogFPd2iLr5YMYv8Alw,2261
29
+ arize/_generated/api_client/models/__init__.py,sha256=_kbyaJU4uC0Gdnc87FsdBDfb-QcH8LKID7YNsk11JSk,2183
30
30
  arize/_generated/api_client/models/dataset.py,sha256=eStUwLDPU7j3kMTdSO7RqMCScsGU9rOZtU41xnSXUUs,4332
31
31
  arize/_generated/api_client/models/dataset_example.py,sha256=avmtSuitMruY7Yphw5gDpPbfkx_GKDhePBSoG6S_sc0,3825
32
32
  arize/_generated/api_client/models/dataset_example_update.py,sha256=DMV1_IkN74tQJrnWFPsJ-7Yj9d-tBaF2rVZZlktprpo,3290
33
33
  arize/_generated/api_client/models/dataset_version.py,sha256=OlkhTlcQuNvxJdNxxU5CpHZ9a2OCro5FRPAUMCYLBdE,3709
34
- arize/_generated/api_client/models/datasets_create_request.py,sha256=av3O6kxoeLL1JACsDL_SrHnNKTNVMHlUyrcBhrsiOac,3673
35
- arize/_generated/api_client/models/datasets_examples_insert_request.py,sha256=u_9QqAgcEUVUyZoZJCWnd9Wuz00-vyRrDKL-82hXO4U,3456
34
+ arize/_generated/api_client/models/datasets_create_request.py,sha256=jUARKZVlr8hrkuf2Y59ct466f2PdHtAD2O02d5lqE_w,3149
35
+ arize/_generated/api_client/models/datasets_examples_insert_request.py,sha256=MVuOJ85lDpzVZssJ76d-cgK3lQkphTMwFAumwHTG6BQ,2932
36
36
  arize/_generated/api_client/models/datasets_examples_list200_response.py,sha256=fIY3yg5cbIcRBdawxBRnWqDt5dxOLN1Pxju_g6BDU30,3869
37
37
  arize/_generated/api_client/models/datasets_examples_update_request.py,sha256=awa70zUxyMPLiEddQmcigVCR4tPhqTjVL6ClV-CeqK4,3797
38
38
  arize/_generated/api_client/models/datasets_list200_response.py,sha256=KsDfQU44q7gg7TwR5MDmJfkp69-SNTo_c87d9uz_8fk,3777
@@ -43,7 +43,6 @@ arize/_generated/api_client/models/experiments_create_request.py,sha256=L0DllTYt
43
43
  arize/_generated/api_client/models/experiments_list200_response.py,sha256=AKot7N4isrGFHNr6-mXsSr8ikeo5veneFFsuGYRKSRc,3843
44
44
  arize/_generated/api_client/models/experiments_runs_list200_response.py,sha256=vkaYsUKdAnChSBKCEzxc1DrE5vs_0tG1Ya-yXhjuPBo,3979
45
45
  arize/_generated/api_client/models/pagination_metadata.py,sha256=2NgzlV140yhFqS8IURPfMrVtW2p-0Xr3DaA9GxFfInw,3262
46
- arize/_generated/api_client/models/primitive_value.py,sha256=iDHyARo1ILnJymsS9EaVFE5V37X0BvUB6uTX_aVhKbg,6400
47
46
  arize/_generated/api_client/models/problem.py,sha256=mbAAHZAC8mKYC0r902SGE4QcLCUMfSzFJOLvjyjO8Pw,3584
48
47
  arize/_generated/api_client/models/project.py,sha256=w4OT222655N-dd8IHauiDobgUyMnol4xzZ80Rniv3ts,3636
49
48
  arize/_generated/api_client/models/projects_create_request.py,sha256=hnnerivgBpJ4BNcdE1hfZ8EoIawtmxsx1iv8NVoHkw4,3024
@@ -54,20 +53,19 @@ arize/_generated/api_client/test/test_dataset_example.py,sha256=musVzDrorF1yf5oz
54
53
  arize/_generated/api_client/test/test_dataset_example_update.py,sha256=Z59KydV8_su5QlPdFx7Vs4WAOh-NEYvNVMsIe_5bz0k,1521
55
54
  arize/_generated/api_client/test/test_dataset_version.py,sha256=rPczEI--Htxd5tRYzCgNMYEisu7ub87EOxRE-ZIEbp0,1996
56
55
  arize/_generated/api_client/test/test_datasets_api.py,sha256=2WYqfqygypzUEB61iMzqWZRAZIyYhrZQ3LaNZDAOIhc,1707
57
- arize/_generated/api_client/test/test_datasets_create_request.py,sha256=wjDWgl-NUlaQZTM0eDpVN15fvJ0tH9c3RegBwOKAYCk,1873
58
- arize/_generated/api_client/test/test_datasets_examples_insert_request.py,sha256=N3mAB0BjNAXGdEyN6l3BmRiUeXG4WUoRxgjtvjmWX7w,1854
59
- arize/_generated/api_client/test/test_datasets_examples_list200_response.py,sha256=gfm4cmvhpOr0bXLgoRN1G2I9wc155F4IPDjvVySLWcw,2244
60
- arize/_generated/api_client/test/test_datasets_examples_update_request.py,sha256=a1m-IaI0RGUd3dd6ktQG0nADEORX_r0KLbY-9IqbwBw,1888
56
+ arize/_generated/api_client/test/test_datasets_create_request.py,sha256=GrWJg_lguEbsJWCUw5QJJjSK-fpSBtUsVFJpntw_t20,1751
57
+ arize/_generated/api_client/test/test_datasets_examples_insert_request.py,sha256=1vegfmE4E8kGoELac7jML3iQ1Vi2IlkUXgAKJOab-sw,1732
58
+ arize/_generated/api_client/test/test_datasets_examples_list200_response.py,sha256=C1nYlj8ggOHmczjyPDfBRU4dTvd-f7_xwbe_0fPuR58,2122
59
+ arize/_generated/api_client/test/test_datasets_examples_update_request.py,sha256=_BZqsFRz2lXIkgwOpWfllnZWdrhqDozYiADANQlPVKA,1766
61
60
  arize/_generated/api_client/test/test_datasets_list200_response.py,sha256=jRWD_HBWE3doypQuaexSp39F_fFdp-aJVXsosxM36M8,3905
62
61
  arize/_generated/api_client/test/test_experiment.py,sha256=sOwd8Rep47jAf7vhq1hm3rK0P7I0VpGS9cZJLOb1RSE,2080
63
62
  arize/_generated/api_client/test/test_experiment_run.py,sha256=FPmaQpEPqs6_jR-o7S-Ejaj2OKeJjO7pLOr-r51fetk,1560
64
63
  arize/_generated/api_client/test/test_experiment_run_create.py,sha256=oZorTWEiI6fTmSFw35vZe-1Npw92Ne01nTIbRmfPnAc,1583
65
64
  arize/_generated/api_client/test/test_experiments_api.py,sha256=Ez6hw0XNW9y2IDLR6_7YlgjbTXgfZr9JxBoaVxEU8N8,1414
66
- arize/_generated/api_client/test/test_experiments_create_request.py,sha256=7bNXzkDE3kL5VZCi1ogey9pWmWWqGi_rDG60_93bQoA,1927
65
+ arize/_generated/api_client/test/test_experiments_create_request.py,sha256=-TFrUHrVIR6ewNtMMaVDKQA5XdC3bThr5Z4_WX4aovs,1805
67
66
  arize/_generated/api_client/test/test_experiments_list200_response.py,sha256=NlLuUOjLRXlUqFsgqSI04B2wCl_4OEZUMtGJrVGUwWA,3085
68
- arize/_generated/api_client/test/test_experiments_runs_list200_response.py,sha256=BtOi_RMEYJciMCazYfmCkR3BbXi8VDdYm1qs_8razkE,2246
67
+ arize/_generated/api_client/test/test_experiments_runs_list200_response.py,sha256=M6BVVxY6wI_57yR9tkS3qZEgYEWlaPar10-rP6_zU7Q,2124
69
68
  arize/_generated/api_client/test/test_pagination_metadata.py,sha256=6I0fz9C-PeS0xGlqfnpCb95jieoCp6wzC_77pFefiI4,1546
70
- arize/_generated/api_client/test/test_primitive_value.py,sha256=e3Hj8RATS82D1QpAtxysqRrgccFaYXHJHsX0hkLNC90,1399
71
69
  arize/_generated/api_client/test/test_problem.py,sha256=cP3YV_YYLjkZuh7RCKUoqJy1FuxukuRj1swYKzFmz9g,1514
72
70
  arize/_generated/api_client/test/test_project.py,sha256=89LB5u8gUqrNBGWt9UfC47A9pBFpLZlJ1DqwjUGKsfU,1693
73
71
  arize/_generated/api_client/test/test_projects_api.py,sha256=oFZh9KZlPpc-Jaf5IQ1NhLOHJf0sJBHAJIrvPACYmXI,1207
@@ -106,11 +104,11 @@ arize/exceptions/parameters.py,sha256=xbF38TNZI14PruYMor6DYVMB7uVhyusJfRPUn_YPHz
106
104
  arize/exceptions/spaces.py,sha256=RZdXCnxkc-t5_jY2je7gVUDxaXhBQvKpXcUGXptO8mA,631
107
105
  arize/exceptions/types.py,sha256=ks-4B2YpiVxHVopWUh4Gv_iM2h-LhdJ_zZDXtN3Ctc0,9468
108
106
  arize/exceptions/values.py,sha256=rzT8sZbY2m2Vxc5vt5V-ZjsQyiGbxM0pxPi-bp9d-pU,27920
109
- arize/experiments/__init__.py,sha256=8ESMbJbtGc3VGDo4efQxxCeXmuZlT-0tPv-EBS48Xpo,358
110
- arize/experiments/client.py,sha256=ScUgT44GkNJ6SdwctHeLPNefev3hLCjW55PVkVf_-xI,26233
111
- arize/experiments/functions.py,sha256=b51Dv1qk4oiDiJv6VZgpBhfdKlo4ff8FLTtH5ruEPOA,34675
107
+ arize/experiments/__init__.py,sha256=zQAEQggDaJqZaubqjCJfmqbhemMeY_WakWI4ul-wkk8,346
108
+ arize/experiments/client.py,sha256=wRIVy2VtQ0LsvTRtfBz78cNnUkP1quzMCReUg2L1rHU,26220
109
+ arize/experiments/functions.py,sha256=4rQEB7eB3fvmdjgbCPRRltWlGwXMj1qLSrnCWedP2qY,34663
112
110
  arize/experiments/tracing.py,sha256=P3Q2DTUGNsRhvxRNAx_NXY30Id8y7cw9MV9piduSbB0,10207
113
- arize/experiments/types.py,sha256=qwv-LV9DqfdlKwXc4SdL9p2QsI8pHoEy2_HCldzsdZs,14093
111
+ arize/experiments/types.py,sha256=N9FuJx4atV3VxpdVXch6gTI3ZH6guVrJQzfbeJnDxwQ,14087
114
112
  arize/experiments/evaluators/__init__.py,sha256=FUTfWRir8jaY8PC3pRHUy5m4RqrKRBPD1EJ4GYkIMLY,76
115
113
  arize/experiments/evaluators/base.py,sha256=m9Gk2efWyssSYnZIdSJouB6CNx1KkqHjHUrRWft1L54,10981
116
114
  arize/experiments/evaluators/exceptions.py,sha256=iqg5-OcGfRWY354OCcdmVC39cb3jEb-RO1LnVn7BD7M,376
@@ -133,7 +131,7 @@ arize/ml/surrogate_explainer/mimic.py,sha256=Zx5idIHUyln0AxsU4K0NJoQ1HFOtKk0GoA6
133
131
  arize/projects/__init__.py,sha256=hx29_muM9ZJxH5j518QJ88-_VVRAFymF5WLRvQVpDH8,64
134
132
  arize/projects/client.py,sha256=hjyqg8yfINEIyorPIEwndurnaIfQimjxHQG6AQ_lt-U,4341
135
133
  arize/spans/__init__.py,sha256=rXRx4DshOoP9WmbVRUyYdM_QHtUmUTbfO2pZPkztKL0,57
136
- arize/spans/client.py,sha256=45wkvUmdxtYQKNp75RQYSkuM_Hwx_6Uf-V8osF0h8S8,48785
134
+ arize/spans/client.py,sha256=N3qa6jAd6JXMZr5pb8XbbVl58-6BgrSRmGSFGn3RdKk,48792
137
135
  arize/spans/columns.py,sha256=Sap8UrOeeFebxsI1LbelustuirAPmKrtjW8z3vHJVFk,13378
138
136
  arize/spans/conversion.py,sha256=9GrNd9puZtJJZma2C6UMmNEcTfkb4maG1BxkRiE7zZU,4937
139
137
  arize/spans/validation/__init__.py,sha256=QSb0EFPBCI2riIrgtSaFXPDGDPQ_aYK4N1Dej499cu8,55
@@ -168,8 +166,8 @@ arize/utils/size.py,sha256=qIjUYYg-NttqK25ObX17_32MzkAepLJDDv8N7YxJwAo,805
168
166
  arize/utils/types.py,sha256=BxAQwrqRMeA5BaDZcmEczcKkd-T33mi25FL9kBroNXA,2892
169
167
  arize/utils/online_tasks/__init__.py,sha256=T7778_bgutP0U7h8e9NRPYfwBQSmHOi859M1K2fjpjw,213
170
168
  arize/utils/online_tasks/dataframe_preprocessor.py,sha256=-VcQSetJjZJamWyZryvkA1LhPOT7BdqDGOL4hP-wtik,9399
171
- arize-8.0.0b0.dist-info/METADATA,sha256=wRxcpzxQ06IxGjITW8n-kRBjEr_DlocmQXRAIW74oA0,29211
172
- arize-8.0.0b0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
173
- arize-8.0.0b0.dist-info/licenses/LICENSE,sha256=Yseh419WQGiW16p8pS0MwNJyrAIrXSeW59aQXbijY2o,9723
174
- arize-8.0.0b0.dist-info/licenses/NOTICE,sha256=V9Mdpy_w2tgo5GxgjpsZFTu2WcltLQIh6wBVFwBnJuY,550
175
- arize-8.0.0b0.dist-info/RECORD,,
169
+ arize-8.0.0b1.dist-info/METADATA,sha256=rkXsmAOix3klcDApv4J3sJbXyEO0cF8zsTWXzE_o_N8,27749
170
+ arize-8.0.0b1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
171
+ arize-8.0.0b1.dist-info/licenses/LICENSE,sha256=Yseh419WQGiW16p8pS0MwNJyrAIrXSeW59aQXbijY2o,9723
172
+ arize-8.0.0b1.dist-info/licenses/NOTICE,sha256=V9Mdpy_w2tgo5GxgjpsZFTu2WcltLQIh6wBVFwBnJuY,550
173
+ arize-8.0.0b1.dist-info/RECORD,,
@@ -1,172 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- Arize REST API
5
-
6
- API specification for the backend data server. The API is hosted globally at https://api.arize.com/v2 or in your own environment.
7
-
8
- The version of the OpenAPI document: 2.0.0
9
- Generated by OpenAPI Generator (https://openapi-generator.tech)
10
-
11
- Do not edit the class manually.
12
- """ # noqa: E501
13
-
14
-
15
- from __future__ import annotations
16
- from inspect import getfullargspec
17
- import json
18
- import pprint
19
- import re # noqa: F401
20
- from pydantic import BaseModel, ConfigDict, Field, StrictBool, StrictFloat, StrictInt, StrictStr, ValidationError, field_validator
21
- from typing import Optional, Union
22
- from typing import Union, Any, List, Set, TYPE_CHECKING, Optional, Dict
23
- from typing_extensions import Literal, Self
24
- from pydantic import Field
25
-
26
- PRIMITIVEVALUE_ANY_OF_SCHEMAS = ["bool", "float", "int", "str"]
27
-
28
- class PrimitiveValue(BaseModel):
29
- """
30
- A JSON primitive value (string, number, integer, or boolean)
31
- """
32
-
33
- # data type: int
34
- anyof_schema_1_validator: Optional[StrictInt] = None
35
- # data type: float
36
- anyof_schema_2_validator: Optional[Union[StrictFloat, StrictInt]] = None
37
- # data type: str
38
- anyof_schema_3_validator: Optional[StrictStr] = None
39
- # data type: bool
40
- anyof_schema_4_validator: Optional[StrictBool] = None
41
- if TYPE_CHECKING:
42
- actual_instance: Optional[Union[bool, float, int, str]] = None
43
- else:
44
- actual_instance: Any = None
45
- any_of_schemas: Set[str] = { "bool", "float", "int", "str" }
46
-
47
- model_config = {
48
- "validate_assignment": True,
49
- "protected_namespaces": (),
50
- }
51
-
52
- def __init__(self, *args, **kwargs) -> None:
53
- if args:
54
- if len(args) > 1:
55
- raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
56
- if kwargs:
57
- raise ValueError("If a position argument is used, keyword arguments cannot be used.")
58
- super().__init__(actual_instance=args[0])
59
- else:
60
- super().__init__(**kwargs)
61
-
62
- @field_validator('actual_instance')
63
- def actual_instance_must_validate_anyof(cls, v):
64
- instance = PrimitiveValue.model_construct()
65
- error_messages = []
66
- # validate data type: int
67
- try:
68
- instance.anyof_schema_1_validator = v
69
- return v
70
- except (ValidationError, ValueError) as e:
71
- error_messages.append(str(e))
72
- # validate data type: float
73
- try:
74
- instance.anyof_schema_2_validator = v
75
- return v
76
- except (ValidationError, ValueError) as e:
77
- error_messages.append(str(e))
78
- # validate data type: str
79
- try:
80
- instance.anyof_schema_3_validator = v
81
- return v
82
- except (ValidationError, ValueError) as e:
83
- error_messages.append(str(e))
84
- # validate data type: bool
85
- try:
86
- instance.anyof_schema_4_validator = v
87
- return v
88
- except (ValidationError, ValueError) as e:
89
- error_messages.append(str(e))
90
- if error_messages:
91
- # no match
92
- raise ValueError("No match found when setting the actual_instance in PrimitiveValue with anyOf schemas: bool, float, int, str. Details: " + ", ".join(error_messages))
93
- else:
94
- return v
95
-
96
- @classmethod
97
- def from_dict(cls, obj: Dict[str, Any]) -> Self:
98
- return cls.from_json(json.dumps(obj))
99
-
100
- @classmethod
101
- def from_json(cls, json_str: str) -> Self:
102
- """Returns the object represented by the json string"""
103
- instance = cls.model_construct()
104
- error_messages = []
105
- # deserialize data into int
106
- try:
107
- # validation
108
- instance.anyof_schema_1_validator = json.loads(json_str)
109
- # assign value to actual_instance
110
- instance.actual_instance = instance.anyof_schema_1_validator
111
- return instance
112
- except (ValidationError, ValueError) as e:
113
- error_messages.append(str(e))
114
- # deserialize data into float
115
- try:
116
- # validation
117
- instance.anyof_schema_2_validator = json.loads(json_str)
118
- # assign value to actual_instance
119
- instance.actual_instance = instance.anyof_schema_2_validator
120
- return instance
121
- except (ValidationError, ValueError) as e:
122
- error_messages.append(str(e))
123
- # deserialize data into str
124
- try:
125
- # validation
126
- instance.anyof_schema_3_validator = json.loads(json_str)
127
- # assign value to actual_instance
128
- instance.actual_instance = instance.anyof_schema_3_validator
129
- return instance
130
- except (ValidationError, ValueError) as e:
131
- error_messages.append(str(e))
132
- # deserialize data into bool
133
- try:
134
- # validation
135
- instance.anyof_schema_4_validator = json.loads(json_str)
136
- # assign value to actual_instance
137
- instance.actual_instance = instance.anyof_schema_4_validator
138
- return instance
139
- except (ValidationError, ValueError) as e:
140
- error_messages.append(str(e))
141
-
142
- if error_messages:
143
- # no match
144
- raise ValueError("No match found when deserializing the JSON string into PrimitiveValue with anyOf schemas: bool, float, int, str. Details: " + ", ".join(error_messages))
145
- else:
146
- return instance
147
-
148
- def to_json(self) -> str:
149
- """Returns the JSON representation of the actual instance"""
150
- if self.actual_instance is None:
151
- return "null"
152
-
153
- if hasattr(self.actual_instance, "to_json") and callable(self.actual_instance.to_json):
154
- return self.actual_instance.to_json()
155
- else:
156
- return json.dumps(self.actual_instance)
157
-
158
- def to_dict(self) -> Optional[Union[Dict[str, Any], bool, float, int, str]]:
159
- """Returns the dict representation of the actual instance"""
160
- if self.actual_instance is None:
161
- return None
162
-
163
- if hasattr(self.actual_instance, "to_dict") and callable(self.actual_instance.to_dict):
164
- return self.actual_instance.to_dict()
165
- else:
166
- return self.actual_instance
167
-
168
- def to_str(self) -> str:
169
- """Returns the string representation of the actual instance"""
170
- return pprint.pformat(self.model_dump())
171
-
172
-
@@ -1,50 +0,0 @@
1
- # coding: utf-8
2
-
3
- """
4
- Arize REST API
5
-
6
- API specification for the backend data server. The API is hosted globally at https://api.arize.com/v2 or in your own environment.
7
-
8
- The version of the OpenAPI document: 2.0.0
9
- Generated by OpenAPI Generator (https://openapi-generator.tech)
10
-
11
- Do not edit the class manually.
12
- """ # noqa: E501
13
-
14
-
15
- import unittest
16
-
17
- from arize._generated.api_client.models.primitive_value import PrimitiveValue
18
-
19
- class TestPrimitiveValue(unittest.TestCase):
20
- """PrimitiveValue unit test stubs"""
21
-
22
- def setUp(self):
23
- pass
24
-
25
- def tearDown(self):
26
- pass
27
-
28
- def make_instance(self, include_optional) -> PrimitiveValue:
29
- """Test PrimitiveValue
30
- include_optional is a boolean, when False only required
31
- params are included, when True both required and
32
- optional params are included """
33
- # uncomment below to create an instance of `PrimitiveValue`
34
- """
35
- model = PrimitiveValue()
36
- if include_optional:
37
- return PrimitiveValue(
38
- )
39
- else:
40
- return PrimitiveValue(
41
- )
42
- """
43
-
44
- def testPrimitiveValue(self):
45
- """Test PrimitiveValue"""
46
- # inst_req_only = self.make_instance(include_optional=False)
47
- # inst_req_and_optional = self.make_instance(include_optional=True)
48
-
49
- if __name__ == '__main__':
50
- unittest.main()