dyff-schema 0.37.3__tar.gz → 0.38.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dyff-schema might be problematic. Click here for more details.

Files changed (68) hide show
  1. {dyff_schema-0.37.3/dyff_schema.egg-info → dyff_schema-0.38.0}/PKG-INFO +1 -1
  2. dyff_schema-0.38.0/dyff/schema/_version.py +2 -0
  3. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/platform.py +44 -13
  4. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/requests.py +12 -42
  5. {dyff_schema-0.37.3 → dyff_schema-0.38.0/dyff_schema.egg-info}/PKG-INFO +1 -1
  6. dyff_schema-0.37.3/dyff/schema/_version.py +0 -2
  7. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.gitignore +0 -0
  8. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.gitlab-ci.yml +0 -0
  9. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.idea/dyff-schema.iml +0 -0
  10. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.licenserc.yaml +0 -0
  11. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.pre-commit-config.yaml +0 -0
  12. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.prettierignore +0 -0
  13. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/.secrets.baseline +0 -0
  14. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/CODE_OF_CONDUCT.md +0 -0
  15. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/LICENSE +0 -0
  16. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/NOTICE +0 -0
  17. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/README.md +0 -0
  18. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/__init__.py +0 -0
  19. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/adapters.py +0 -0
  20. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/annotations.py +0 -0
  21. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/base.py +0 -0
  22. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/commands.py +0 -0
  23. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/copydoc.py +0 -0
  24. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/__init__.py +0 -0
  25. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/arrow.py +0 -0
  26. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/binary.py +0 -0
  27. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/classification.py +0 -0
  28. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/embedding.py +0 -0
  29. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/text.py +0 -0
  30. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/dataset/vision.py +0 -0
  31. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/errors.py +0 -0
  32. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/ids.py +0 -0
  33. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/io/__init__.py +0 -0
  34. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/io/vllm.py +0 -0
  35. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/platform.py +0 -0
  36. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/py.typed +0 -0
  37. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/quantity.py +0 -0
  38. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/requests.py +0 -0
  39. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/responses.py +0 -0
  40. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/test.py +0 -0
  41. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/__init__.py +0 -0
  42. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/__init__.py +0 -0
  43. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/adapters.py +0 -0
  44. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/base.py +0 -0
  45. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/commands.py +0 -0
  46. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/__init__.py +0 -0
  47. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/arrow.py +0 -0
  48. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/binary.py +0 -0
  49. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/classification.py +0 -0
  50. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/embedding.py +0 -0
  51. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/text.py +0 -0
  52. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/dataset/vision.py +0 -0
  53. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/io/__init__.py +0 -0
  54. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/io/vllm.py +0 -0
  55. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/oci.py +0 -0
  56. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/responses.py +0 -0
  57. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/test.py +0 -0
  58. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/v0/r1/version.py +0 -0
  59. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff/schema/version.py +0 -0
  60. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff_schema.egg-info/SOURCES.txt +0 -0
  61. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff_schema.egg-info/dependency_links.txt +0 -0
  62. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff_schema.egg-info/requires.txt +0 -0
  63. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/dyff_schema.egg-info/top_level.txt +0 -0
  64. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/makefile +0 -0
  65. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/pyproject.toml +0 -0
  66. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/setup.cfg +0 -0
  67. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/tests/test_adapters.py +0 -0
  68. {dyff_schema-0.37.3 → dyff_schema-0.38.0}/tests/test_import.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dyff-schema
3
- Version: 0.37.3
3
+ Version: 0.38.0
4
4
  Summary: Data models for the Dyff AI auditing platform.
5
5
  Author-email: Digital Safety Research Institute <contact@dsri.org>
6
6
  License: Apache-2.0
@@ -0,0 +1,2 @@
1
+ __version__ = version = "0.38.0"
2
+ __version_tuple__ = version_tuple = (0, 38, 0)
@@ -28,7 +28,6 @@ from datetime import datetime, timedelta, timezone
28
28
  from enum import Enum
29
29
  from pathlib import Path
30
30
  from typing import (
31
- TYPE_CHECKING,
32
31
  Any,
33
32
  Literal,
34
33
  NamedTuple,
@@ -51,12 +50,6 @@ from .base import DyffSchemaBaseModel
51
50
  from .dataset import arrow, make_item_type, make_response_type
52
51
  from .version import SCHEMA_VERSION, SchemaVersion
53
52
 
54
- if TYPE_CHECKING:
55
- from .requests import (
56
- AnalysisCreateRequest,
57
- EvaluationCreateRequest,
58
- )
59
-
60
53
  SYSTEM_ATTRIBUTES = frozenset(["creationTime", "status", "reason"])
61
54
 
62
55
 
@@ -1712,6 +1705,10 @@ class EvaluationClientConfiguration(DyffSchemaBaseModel):
1712
1705
  )
1713
1706
 
1714
1707
 
1708
+ class EvaluationInferenceSessionRequest(InferenceSessionBase):
1709
+ inferenceService: str = pydantic.Field(description="InferenceService ID")
1710
+
1711
+
1715
1712
  class EvaluationBase(DyffSchemaBaseModel):
1716
1713
  dataset: str = pydantic.Field(description="The Dataset to evaluate on.")
1717
1714
 
@@ -1731,13 +1728,41 @@ class EvaluationBase(DyffSchemaBaseModel):
1731
1728
  )
1732
1729
 
1733
1730
 
1731
+ class EvaluationRequestBase(EvaluationBase):
1732
+ """A description of how to run an InferenceService on a Dataset to obtain a set of
1733
+ evaluation results."""
1734
+
1735
+ kind: Literal["EvaluationRequest"] = "EvaluationRequest"
1736
+
1737
+ inferenceSession: Optional[EvaluationInferenceSessionRequest] = pydantic.Field(
1738
+ default=None,
1739
+ description="Specification of the InferenceSession that will perform inference for the evaluation.",
1740
+ )
1741
+
1742
+ inferenceSessionReference: Optional[str] = pydantic.Field(
1743
+ default=None,
1744
+ description="The ID of a running inference session that will be used"
1745
+ " for the evaluation, instead of starting a new one.",
1746
+ )
1747
+
1748
+ @pydantic.model_validator(mode="after")
1749
+ def check_session_exactly_one(self):
1750
+ session = self.inferenceSession is not None
1751
+ session_ref = self.inferenceSessionReference is not None
1752
+ if not (session ^ session_ref):
1753
+ raise ValueError(
1754
+ "must specify exactly one of {inferenceSession, inferenceSessionReference}"
1755
+ )
1756
+ return self
1757
+
1758
+
1734
1759
  class Evaluation(DyffEntity, EvaluationBase):
1735
1760
  """A description of how to run an InferenceService on a Dataset to obtain a set of
1736
1761
  evaluation results."""
1737
1762
 
1738
1763
  kind: Literal["Evaluation"] = Entities.Evaluation.value
1739
1764
 
1740
- inferenceSession: InferenceSessionSpec = pydantic.Field(
1765
+ inferenceSession: InferenceSessionSpec = pydantic.Field( # type: ignore[assignment]
1741
1766
  description="Specification of the InferenceSession that will perform"
1742
1767
  " inference for the evaluation.",
1743
1768
  )
@@ -2229,6 +2254,12 @@ class AnalysisBase(DyffSchemaBaseModel):
2229
2254
  )
2230
2255
 
2231
2256
 
2257
+ class AnalysisRequestBase(AnalysisBase):
2258
+ kind: Literal["AnalysisRequest"] = "AnalysisRequest"
2259
+
2260
+ method: EntityIDField = pydantic.Field(description="Method ID")
2261
+
2262
+
2232
2263
  class AnalysisData(DyffSchemaBaseModel):
2233
2264
  """Arbitrary additional data for the Analysis, specified as a key-value pair where
2234
2265
  the value is the data encoded in base64."""
@@ -2735,8 +2766,8 @@ class ChallengeSubmission(DyffEntity):
2735
2766
  # Pipelines
2736
2767
 
2737
2768
  PipelineNodeRequest: TypeAlias = Union[
2738
- "AnalysisCreateRequest",
2739
- "EvaluationCreateRequest",
2769
+ AnalysisRequestBase,
2770
+ EvaluationRequestBase,
2740
2771
  ]
2741
2772
 
2742
2773
 
@@ -3085,8 +3116,6 @@ _ENTITY_CLASS = {
3085
3116
  Entities.Method: Method,
3086
3117
  Entities.Model: Model,
3087
3118
  Entities.Module: Module,
3088
- Entities.Pipeline: Pipeline,
3089
- Entities.PipelineRun: PipelineRun,
3090
3119
  Entities.Report: Report,
3091
3120
  Entities.SafetyCase: SafetyCase,
3092
3121
  Entities.Team: Team,
@@ -3218,6 +3247,7 @@ __all__ = [
3218
3247
  "AnalysisData",
3219
3248
  "AnalysisInput",
3220
3249
  "AnalysisOutputQueryFields",
3250
+ "AnalysisRequestBase",
3221
3251
  "AnalysisScope",
3222
3252
  "Annotation",
3223
3253
  "APIFunctions",
@@ -3269,6 +3299,7 @@ __all__ = [
3269
3299
  "Evaluation",
3270
3300
  "EvaluationBase",
3271
3301
  "EvaluationClientConfiguration",
3302
+ "EvaluationRequestBase",
3272
3303
  "ExtractorStep",
3273
3304
  "Family",
3274
3305
  "FamilyBase",
@@ -3341,7 +3372,7 @@ __all__ = [
3341
3372
  "Pipeline",
3342
3373
  "PipelineBase",
3343
3374
  "PipelineNode",
3344
- "PipelineNodeRequest",
3375
+ "PipelineParameter",
3345
3376
  "PipelineRun",
3346
3377
  "PipelineRunBase",
3347
3378
  "QueryableDyffEntity",
@@ -19,12 +19,13 @@ from datetime import datetime
19
19
  from typing import Any, Literal, Optional, Union
20
20
 
21
21
  import pydantic
22
+ from typing_extensions import TypeAlias
22
23
 
23
24
  from ... import upcast
24
- from . import commands, oci
25
- from .base import DyffBaseModel, JsonMergePatchSemantics
25
+ from . import commands
26
+ from .base import DyffBaseModel, DyffSchemaBaseModel, JsonMergePatchSemantics
26
27
  from .platform import (
27
- AnalysisBase,
28
+ AnalysisRequestBase,
28
29
  AnalysisScope,
29
30
  ChallengeContent,
30
31
  ChallengeTaskBase,
@@ -32,9 +33,9 @@ from .platform import (
32
33
  DatasetBase,
33
34
  DataView,
34
35
  DocumentationBase,
35
- EntityIDField,
36
36
  Evaluation,
37
- EvaluationBase,
37
+ EvaluationInferenceSessionRequest,
38
+ EvaluationRequestBase,
38
39
  FamilyBase,
39
40
  FamilyMemberBase,
40
41
  InferenceServiceBase,
@@ -43,6 +44,7 @@ from .platform import (
43
44
  ModelSpec,
44
45
  ModuleBase,
45
46
  PipelineBase,
47
+ PipelineRunBase,
46
48
  ReportBase,
47
49
  TagNameType,
48
50
  TeamBase,
@@ -112,12 +114,10 @@ class DyffEntityCreateRequest(DyffRequestBase):
112
114
  account: str = pydantic.Field(description="Account that owns the entity")
113
115
 
114
116
 
115
- class AnalysisCreateRequest(DyffEntityCreateRequest, AnalysisBase):
117
+ class AnalysisCreateRequest(DyffEntityCreateRequest, AnalysisRequestBase):
116
118
  """An Analysis transforms Datasets, Evaluations, and Measurements into new
117
119
  Measurements or SafetyCases."""
118
120
 
119
- method: EntityIDField = pydantic.Field(description="Method ID")
120
-
121
121
  @pydantic.field_validator("scope", check_fields=False)
122
122
  def _validate_scope(cls, scope: AnalysisScope) -> AnalysisScope:
123
123
  # TODO: This has to be a validator function because we can't apply the
@@ -214,40 +214,15 @@ class InferenceSessionTokenCreateRequest(DyffRequestBase):
214
214
  )
215
215
 
216
216
 
217
- class EvaluationInferenceSessionRequest(InferenceSessionBase):
218
- inferenceService: str = pydantic.Field(description="InferenceService ID")
219
-
220
-
221
- class EvaluationCreateRequest(DyffEntityCreateRequest, EvaluationBase):
217
+ class EvaluationCreateRequest(DyffEntityCreateRequest, EvaluationRequestBase):
222
218
  """A description of how to run an InferenceService on a Dataset to obtain a set of
223
219
  evaluation results."""
224
220
 
225
- inferenceSession: Optional[EvaluationInferenceSessionRequest] = pydantic.Field(
226
- default=None,
227
- description="Specification of the InferenceSession that will perform inference for the evaluation.",
228
- )
229
-
230
- inferenceSessionReference: Optional[str] = pydantic.Field(
231
- default=None,
232
- description="The ID of a running inference session that will be used"
233
- " for the evaluation, instead of starting a new one.",
234
- )
235
-
236
- @pydantic.model_validator(mode="after")
237
- def check_session_exactly_one(self):
238
- session = self.inferenceSession is not None
239
- session_ref = self.inferenceSessionReference is not None
240
- if not (session ^ session_ref):
241
- raise ValueError(
242
- "must specify exactly one of {inferenceSession, inferenceSessionReference}"
243
- )
244
- return self
245
-
246
221
  @staticmethod
247
222
  def repeat_of(evaluation: Evaluation) -> EvaluationCreateRequest:
248
223
  """Return a request that will run an existing Evaluation again with the same
249
224
  configuration."""
250
- base = upcast(EvaluationBase, evaluation)
225
+ base = upcast(EvaluationRequestBase, evaluation)
251
226
  if evaluation.inferenceSessionReference:
252
227
  return EvaluationCreateRequest(
253
228
  account=evaluation.account,
@@ -287,13 +262,8 @@ class PipelineCreateRequest(DyffEntityCreateRequest, PipelineBase):
287
262
  pass
288
263
 
289
264
 
290
- class PipelineRunRequest(DyffRequestBase):
291
- """A request to run a pipeline."""
292
-
293
- arguments: dict[str, pydantic.JsonValue] = pydantic.Field(
294
- default_factory=dict,
295
- description="Arguments to pass to the pipeline run.",
296
- )
265
+ class PipelineRunRequest(DyffEntityCreateRequest, PipelineRunBase):
266
+ pass
297
267
 
298
268
 
299
269
  class ReportCreateRequest(DyffEntityCreateRequest, ReportBase):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dyff-schema
3
- Version: 0.37.3
3
+ Version: 0.38.0
4
4
  Summary: Data models for the Dyff AI auditing platform.
5
5
  Author-email: Digital Safety Research Institute <contact@dsri.org>
6
6
  License: Apache-2.0
@@ -1,2 +0,0 @@
1
- __version__ = version = "0.37.3"
2
- __version_tuple__ = version_tuple = (0, 37, 3)
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes