dyff-schema 0.29.0__py3-none-any.whl → 0.30.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dyff-schema might be problematic. Click here for more details.

dyff/schema/__init__.py CHANGED
@@ -55,8 +55,8 @@ def upcast(
55
55
  if not isinstance(obj, dict):
56
56
  # Preserve the unset status
57
57
  obj = obj.dict(exclude_unset=True)
58
- fields = {k: v for k, v in obj.items() if k in t.__fields__}
59
- return t.parse_obj(fields)
58
+ fields = {k: v for k, v in obj.items() if k in t.model_fields}
59
+ return t.model_validate(fields)
60
60
 
61
61
 
62
62
  __all__ = [
@@ -0,0 +1,2 @@
1
+ __version__ = version = "0.30.1"
2
+ __version_tuple__ = version_tuple = (0, 30, 1)
dyff/schema/copydoc.py CHANGED
@@ -24,7 +24,7 @@ def copydoc(fromfunc):
24
24
  else:
25
25
  sourcedoc = fromfunc.__doc__
26
26
 
27
- if func.__doc__ == None:
27
+ if func.__doc__ is None:
28
28
  func.__doc__ = sourcedoc
29
29
  else:
30
30
  func.__doc__ = f"{sourcedoc}\n\n{func.__doc__}"
@@ -789,7 +789,7 @@ def known_adapters() -> dict[str, Type[Adapter]]:
789
789
 
790
790
  def create_adapter(adapter_spec: SchemaAdapter | dict) -> Adapter:
791
791
  if isinstance(adapter_spec, SchemaAdapter):
792
- adapter_spec = adapter_spec.dict()
792
+ adapter_spec = adapter_spec.model_dump()
793
793
  kind = adapter_spec["kind"]
794
794
  if (adapter_t := known_adapters().get(kind)) is not None:
795
795
  adapter_config = adapter_spec.get("configuration")
@@ -1024,7 +1024,7 @@ def _test():
1024
1024
  SchemaAdapter(
1025
1025
  kind="ExplodeCollections",
1026
1026
  configuration={"collections": ["text"]},
1027
- ).dict()
1027
+ ).model_dump()
1028
1028
  ]
1029
1029
  )
1030
1030
 
dyff/schema/v0/r1/base.py CHANGED
@@ -3,14 +3,13 @@
3
3
 
4
4
  from __future__ import annotations
5
5
 
6
- import json
7
6
  from datetime import datetime, timezone
8
- from typing import Any, Generic, Literal, NamedTuple, Optional, Type, TypeVar
7
+ from typing import Any, Mapping, NamedTuple, Optional, Type, TypeVar
9
8
 
10
9
  import pydantic
11
-
12
- # ----------------------------------------------------------------------------
13
- # Fixed-width numeric type value bounds
10
+ from pydantic import ConfigDict, Field, GetCoreSchemaHandler, GetJsonSchemaHandler
11
+ from pydantic_core import CoreSchema, core_schema
12
+ from typing_extensions import Annotated
14
13
 
15
14
 
16
15
  class _dtype(NamedTuple):
@@ -109,174 +108,52 @@ _NumT = TypeVar("_NumT")
109
108
  _ConstrainedNumT = TypeVar("_ConstrainedNumT")
110
109
 
111
110
 
112
- class FixedWidthNumberMeta(
113
- Generic[_NumT, _ConstrainedNumT], pydantic.types.ConstrainedNumberMeta
114
- ):
115
- dtype: str
116
- minval: _NumT
117
- maxval: _NumT
118
-
119
- def __new__(cls, name: str, bases: Any, dct: dict[str, Any]) -> _ConstrainedNumT: # type: ignore
120
- ge = dct.get("ge")
121
- gt = dct.get("gt")
122
- le = dct.get("le")
123
- lt = dct.get("lt")
124
- # For integers, we could technically have e.g., ``lt = maxval + 1``,
125
- # but then the bound is not representable in the same type, so we don't
126
- # allow it
127
- if ge is not None and ge < cls.minval:
128
- raise ValueError(f"ge must be >= minval")
129
- if gt is not None and gt < cls.minval:
130
- raise ValueError(f"gt must be >= minval")
131
- if le is not None and le > cls.maxval:
132
- raise ValueError(f"le must be <= maxval")
133
- if lt is not None and lt > cls.maxval:
134
- raise ValueError(f"lt must be <= maxval")
135
- # Note that the ConstrainedNumberMeta superclass checks that only one
136
- # each of ge/gt and le/lt is defined
137
- if ge is None and gt is None:
138
- ge = cls.minval # default
139
- if le is None and lt is None:
140
- le = cls.maxval # default
141
- # pydantic convention seems to be not to add None properties here
142
- if ge is not None:
143
- dct["ge"] = ge
144
- if gt is not None:
145
- dct["gt"] = gt
146
- if le is not None:
147
- dct["le"] = le
148
- if lt is not None:
149
- dct["lt"] = lt
150
- return super().__new__(cls, name, bases, dct) # type: ignore
151
-
152
-
153
111
  class DType:
154
112
  """Base class for pydantic custom types that have an Arrow .dtype."""
155
113
 
156
114
  @classmethod
157
- def __modify_schema__(
158
- cls,
159
- field_schema: dict[str, Any],
160
- ) -> None:
161
- dtype = type(cls).dtype # type: ignore
115
+ # TODO[pydantic]: We couldn't refactor `__modify_schema__`, please create the `__get_pydantic_json_schema__` manually.
116
+ # Check https://docs.pydantic.dev/latest/migration/#defining-custom-types for more information.
117
+ @classmethod
118
+ def __get_pydantic_json_schema__(
119
+ cls, _core_schema: CoreSchema, handler: GetJsonSchemaHandler
120
+ ):
121
+ """Custom JSON schema generation for DType."""
122
+ json_schema = handler(_core_schema)
123
+ dtype = getattr(cls, "dtype", None)
162
124
  if dtype is None:
163
125
  raise ValueError("subclasses must set cls.dtype")
164
- super().__modify_schema__(field_schema) # type: ignore
165
- field_schema.update({"dyff.io/dtype": dtype})
166
-
167
-
168
- # DType must come first
169
- class FixedWidthInt(DType, pydantic.ConstrainedInt):
170
- pass
171
-
172
-
173
- # DType must come first
174
- class FixedWidthFloat(DType, pydantic.ConstrainedFloat):
175
- pass
176
-
177
-
178
- class Float32Meta(FixedWidthNumberMeta[float, pydantic.ConstrainedFloat]):
179
- dtype: str = DTYPE.float32
180
- minval: float = -float32_max()
181
- maxval: float = float32_max()
182
-
183
-
184
- class Float64Meta(FixedWidthNumberMeta[float, pydantic.ConstrainedFloat]):
185
- dtype: str = DTYPE.float64
186
- minval: float = -float64_max()
187
- maxval: float = float64_max()
188
-
189
-
190
- class Float32(FixedWidthFloat, metaclass=Float32Meta):
191
- """A 32-bit float ("single precision")"""
192
-
193
-
194
- class Float64(FixedWidthFloat, metaclass=Float64Meta):
195
- """A 64-bit float ("double precision")"""
196
-
197
-
198
- class Int8Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
199
- dtype: str = DTYPE.int8
200
- minval: int = int8_min()
201
- maxval: int = int8_max()
202
-
203
-
204
- class Int16Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
205
- dtype: str = DTYPE.int16
206
- minval: int = int16_min()
207
- maxval: int = int16_max()
208
-
209
-
210
- class Int32Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
211
- dtype: str = DTYPE.int32
212
- minval: int = int32_min()
213
- maxval: int = int32_max()
214
-
215
-
216
- class Int64Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
217
- dtype: str = DTYPE.int64
218
- minval: int = int64_min()
219
- maxval: int = int64_max()
220
-
221
-
222
- class UInt8Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
223
- dtype: str = DTYPE.uint8
224
- minval: int = uint8_min()
225
- maxval: int = uint8_max()
226
-
227
-
228
- class UInt16Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
229
- dtype: str = DTYPE.uint16
230
- minval: int = uint16_min()
231
- maxval: int = uint16_max()
232
-
233
-
234
- class UInt32Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
235
- dtype: str = DTYPE.uint32
236
- minval: int = uint32_min()
237
- maxval: int = uint32_max()
238
-
239
-
240
- class UInt64Meta(FixedWidthNumberMeta[int, pydantic.ConstrainedInt]):
241
- dtype: str = DTYPE.uint64
242
- minval: int = uint64_min()
243
- maxval: int = uint64_max()
244
-
245
-
246
- class Int8(FixedWidthInt, metaclass=Int8Meta):
247
- """An 8-bit integer."""
248
-
249
-
250
- class Int16(FixedWidthInt, metaclass=Int16Meta):
251
- """A 16-bit integer."""
252
-
253
-
254
- class Int32(FixedWidthInt, metaclass=Int32Meta):
255
- """A 32-bit integer."""
256
-
257
-
258
- class Int64(FixedWidthInt, metaclass=Int64Meta):
259
- """A 64-bit integer."""
260
-
261
-
262
- class UInt8(FixedWidthInt, metaclass=UInt8Meta):
263
- """An 8-bit unsigned integer."""
264
-
265
-
266
- class UInt16(FixedWidthInt, metaclass=UInt16Meta):
267
- """A 16-bit unsigned integer."""
268
-
269
-
270
- class UInt32(FixedWidthInt, metaclass=UInt32Meta):
271
- """A 32-bit unsigned integer."""
272
-
273
-
274
- class UInt64(FixedWidthInt, metaclass=UInt64Meta):
275
- """A 64-bit unsigned integer."""
276
-
126
+ json_schema["dyff.io/dtype"] = dtype
127
+ return json_schema
128
+
129
+
130
+ def constrained_type(
131
+ _name: str, _dtype: str, base_type: type, **field_constraints: Any
132
+ ) -> Type:
133
+ float_annotated_type = Annotated[
134
+ base_type, # type: ignore [valid-type]
135
+ Field(**field_constraints, json_schema_extra={"dyff.io/dtype": _dtype}),
136
+ ]
137
+
138
+ def __get_pydantic_core_schema__(
139
+ cls, source_type: Any, handler: GetCoreSchemaHandler
140
+ ) -> CoreSchema:
141
+ if issubclass(base_type, float):
142
+ schema: CoreSchema = core_schema.float_schema(**field_constraints)
143
+ elif issubclass(base_type, int):
144
+ schema = core_schema.int_schema(**field_constraints)
145
+ else:
146
+ raise TypeError(f"Unsupported base_type: {base_type}")
147
+ return core_schema.no_info_after_validator_function(cls.val_type, schema)
277
148
 
278
- # ----------------------------------------------------------------------------
279
- # Type annotation constructors
149
+ namespace = {
150
+ "val_type": float_annotated_type,
151
+ "name": _name,
152
+ "__get_pydantic_core_schema__": classmethod(__get_pydantic_core_schema__),
153
+ "dtype": _dtype,
154
+ "description": f"A {base_type.__name__} with constraints {field_constraints}",
155
+ }
156
+ return type(_name, (base_type,), namespace)
280
157
 
281
158
 
282
159
  def float32(
@@ -310,7 +187,7 @@ def float32(
310
187
  multiple_of=multiple_of,
311
188
  allow_inf_nan=allow_inf_nan,
312
189
  )
313
- return type("Float32Value", (Float32,), namespace)
190
+ return constrained_type("Float32Value", DTYPE.float32, float, **namespace)
314
191
 
315
192
 
316
193
  def float64(
@@ -344,7 +221,7 @@ def float64(
344
221
  multiple_of=multiple_of,
345
222
  allow_inf_nan=allow_inf_nan,
346
223
  )
347
- return type("Float64Value", (Float64,), namespace)
224
+ return constrained_type("Float64Value", DTYPE.float64, float, **namespace)
348
225
 
349
226
 
350
227
  def int8(
@@ -369,7 +246,7 @@ def int8(
369
246
  x: int8(lt=42) = pydantic.Field(description="some field")
370
247
  """
371
248
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
372
- return type("Int8Value", (Int8,), namespace)
249
+ return constrained_type("Int8Value", DTYPE.int8, int, **namespace)
373
250
 
374
251
 
375
252
  def int16(
@@ -394,7 +271,7 @@ def int16(
394
271
  x: int16(lt=42) = pydantic.Field(description="some field")
395
272
  """
396
273
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
397
- return type("Int16Value", (Int16,), namespace)
274
+ return constrained_type("Int16Value", DTYPE.int16, int, **namespace)
398
275
 
399
276
 
400
277
  def int32(
@@ -419,7 +296,7 @@ def int32(
419
296
  x: int32(lt=42) = pydantic.Field(description="some field")
420
297
  """
421
298
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
422
- return type("Int32Value", (Int32,), namespace)
299
+ return constrained_type("Int32Value", DTYPE.int32, int, **namespace)
423
300
 
424
301
 
425
302
  def int64(
@@ -444,7 +321,7 @@ def int64(
444
321
  x: int64(lt=42) = pydantic.Field(description="some field")
445
322
  """
446
323
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
447
- return type("Int64Value", (Int64,), namespace)
324
+ return constrained_type("Int64Value", DTYPE.int64, int, **namespace)
448
325
 
449
326
 
450
327
  def uint8(
@@ -469,7 +346,7 @@ def uint8(
469
346
  x: uint8(lt=42) = pydantic.Field(description="some field")
470
347
  """
471
348
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
472
- return type("UInt8Value", (UInt8,), namespace)
349
+ return constrained_type("UInt8Value", DTYPE.uint8, int, **namespace)
473
350
 
474
351
 
475
352
  def uint16(
@@ -494,7 +371,7 @@ def uint16(
494
371
  x: uint16(lt=42) = pydantic.Field(description="some field")
495
372
  """
496
373
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
497
- return type("UInt16Value", (UInt16,), namespace)
374
+ return constrained_type("UInt16Value", DTYPE.uint16, int, **namespace)
498
375
 
499
376
 
500
377
  def uint32(
@@ -519,7 +396,7 @@ def uint32(
519
396
  x: uint32(lt=42) = pydantic.Field(description="some field")
520
397
  """
521
398
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
522
- return type("UInt32Value", (UInt32,), namespace)
399
+ return constrained_type("UInt32Value", DTYPE.uint32, int, **namespace)
523
400
 
524
401
 
525
402
  def uint64(
@@ -544,40 +421,21 @@ def uint64(
544
421
  x: uint64(lt=42) = pydantic.Field(description="some field")
545
422
  """
546
423
  namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of)
547
- return type("UInt64Value", (UInt64,), namespace)
424
+ return constrained_type("UInt64Value", DTYPE.uint64, int, **namespace)
548
425
 
549
426
 
550
427
  _ListElementT = TypeVar("_ListElementT")
551
428
 
552
429
 
553
430
  def list_(
554
- item_type: Type[_ListElementT], *, list_size: Optional[int] = None
555
- ) -> Type[list]:
431
+ item_type: type[_ListElementT], *, list_size: Optional[int] = None
432
+ ) -> type[list]:
556
433
  if list_size is None:
557
- return pydantic.conlist(item_type)
434
+ return Annotated[list[item_type], Field()] # type: ignore [return-value, valid-type]
558
435
  else:
559
436
  if list_size <= 0:
560
437
  raise ValueError(f"list_size {list_size} must be > 0")
561
- return pydantic.conlist(item_type, min_items=list_size, max_items=list_size)
562
-
563
-
564
- class Null:
565
- """Use this type in a Union to make Pydantic generate a JSON Schema that accepts
566
- 'null' for the field value."""
567
-
568
- @classmethod
569
- def __get_validators__(cls): # -> Generator[Callable, None, None]:
570
- yield cls.validate
571
-
572
- @classmethod
573
- def validate(cls, value: Any, field: pydantic.fields.ModelField) -> None:
574
- if value is not None:
575
- raise ValueError()
576
- return None
577
-
578
- @classmethod
579
- def __modify_schema__(cls, field_schema: dict[str, Any]) -> None:
580
- field_schema["type"] = "null"
438
+ return Annotated[list[item_type], Field(min_length=list_size, max_length=list_size)] # type: ignore [return-value, valid-type]
581
439
 
582
440
 
583
441
  # mypy gets confused because 'dict' is the name of a method in DyffBaseModel
@@ -593,8 +451,7 @@ class DyffBaseModel(pydantic.BaseModel):
593
451
  Python reserved words like 'bytes' as field names.
594
452
  """
595
453
 
596
- class Config:
597
- extra = pydantic.Extra.forbid
454
+ model_config = ConfigDict(extra="forbid")
598
455
 
599
456
  # TODO: (DYFF-223) I think that exclude_unset=True should be the default
600
457
  # for all schema objects, but I'm unsure of the consequences of making
@@ -602,31 +459,28 @@ class DyffBaseModel(pydantic.BaseModel):
602
459
  def dict(
603
460
  self, *, by_alias: bool = True, exclude_none: bool = True, **kwargs
604
461
  ) -> _ModelAsDict:
605
- return super().dict(by_alias=by_alias, exclude_none=exclude_none, **kwargs)
462
+ return self.model_dump(by_alias=by_alias, exclude_none=exclude_none, **kwargs)
606
463
 
607
464
  def json(
608
465
  self, *, by_alias: bool = True, exclude_none: bool = True, **kwargs
609
466
  ) -> str:
610
- return super().json(by_alias=by_alias, exclude_none=exclude_none, **kwargs)
467
+ return self.model_dump_json(
468
+ by_alias=by_alias, exclude_none=exclude_none, **kwargs
469
+ )
611
470
 
612
- def model_dump(
613
- self,
614
- *,
615
- mode: Literal["python", "json"] = "python",
616
- **kwargs,
471
+ def model_dump( # type: ignore [override]
472
+ self, *, by_alias: bool = True, exclude_none: bool = True, **kwargs
617
473
  ) -> _ModelAsDict:
618
- """Encode the object as a dict containing only JSON datatypes.
619
-
620
- .. deprecated:: 0.8.0
474
+ return super().model_dump(
475
+ by_alias=by_alias, exclude_none=exclude_none, **kwargs
476
+ )
621
477
 
622
- FIXME: This emulates a Pydantic 2 feature, but the mode="json"
623
- option can only be implemented in an inefficient way. Remove when
624
- we convert to Pydantic 2. See: DYFF-223
625
- """
626
- if mode == "python":
627
- return self.dict(**kwargs)
628
- else:
629
- return json.loads(self.json(**kwargs))
478
+ def model_dump_json( # type: ignore [override]
479
+ self, *, by_alias: bool = True, exclude_none: bool = True, **kwargs
480
+ ) -> str:
481
+ return super().model_dump_json(
482
+ by_alias=by_alias, exclude_none=exclude_none, **kwargs
483
+ )
630
484
 
631
485
 
632
486
  # Note: I *really* wanted to require datetimes to have timezones, like in
@@ -645,16 +499,18 @@ class DyffSchemaBaseModel(DyffBaseModel):
645
499
  datetimes are well-ordered.
646
500
  """
647
501
 
648
- @pydantic.root_validator
502
+ @pydantic.model_validator(mode="after")
649
503
  def _ensure_datetime_timezone_utc(cls, values):
650
- update = {}
651
- for k, v in values.items():
652
- if isinstance(v, datetime):
653
- if v.tzinfo is None:
654
- update[k] = v.replace(tzinfo=timezone.utc)
655
- elif v.tzinfo != timezone.utc:
656
- update[k] = v.astimezone(timezone.utc)
657
- values.update(update)
504
+ for field_name, field_value in values.__dict__.items():
505
+ if isinstance(field_value, datetime):
506
+ if field_value.tzinfo is None:
507
+ # Set UTC timezone for naive datetime
508
+ setattr(
509
+ values, field_name, field_value.replace(tzinfo=timezone.utc)
510
+ )
511
+ elif field_value.tzinfo != timezone.utc:
512
+ # Convert to UTC timezone
513
+ setattr(values, field_name, field_value.astimezone(timezone.utc))
658
514
  return values
659
515
 
660
516
 
@@ -666,18 +522,93 @@ class JsonMergePatchSemantics(DyffSchemaBaseModel):
666
522
  means "leave this field unchanged".
667
523
  """
668
524
 
669
- def dict(
670
- self, *, by_alias: bool = True, exclude_unset=True, exclude_none=False, **kwargs
525
+ @classmethod
526
+ def __pydantic_init_subclass__(cls, **kwargs):
527
+ """This method runs automatically when ANY class inherits from
528
+ JsonMergePatchSemantics."""
529
+ super().__pydantic_init_subclass__(**kwargs)
530
+
531
+ current_config = getattr(cls, "model_config", {})
532
+ if hasattr(current_config, "copy"):
533
+ current_config = current_config.copy()
534
+ else:
535
+ current_config = dict(current_config) if current_config else {}
536
+
537
+ existing_json_schema_extra = current_config.get("json_schema_extra", None)
538
+
539
+ def remove_defaults_from_schema(schema: dict, model_type: type) -> None:
540
+ if existing_json_schema_extra:
541
+ if callable(existing_json_schema_extra):
542
+ existing_json_schema_extra(schema, model_type)
543
+ elif isinstance(existing_json_schema_extra, dict):
544
+ schema.update(existing_json_schema_extra)
545
+
546
+ properties = schema.get("properties", {})
547
+ for field_name, field_schema in properties.items():
548
+ if isinstance(field_schema, dict) and "default" in field_schema:
549
+ field_schema.pop("default")
550
+
551
+ current_config["json_schema_extra"] = remove_defaults_from_schema
552
+ cls.model_config = current_config
553
+
554
+ def dict( # type: ignore [override]
555
+ self,
556
+ *,
557
+ by_alias: bool = True,
558
+ exclude_unset=True,
559
+ exclude_none=False,
560
+ **kwargs: Mapping[str, Any],
671
561
  ) -> _ModelAsDict:
672
- return super().dict(
673
- by_alias=by_alias, exclude_unset=True, exclude_none=False, **kwargs
562
+ return self.model_dump(
563
+ by_alias=by_alias,
564
+ exclude_unset=exclude_unset,
565
+ exclude_none=exclude_none,
566
+ **kwargs,
674
567
  )
675
568
 
676
- def json(
677
- self, *, by_alias: bool = True, exclude_unset=True, exclude_none=False, **kwargs
569
+ def json( # type: ignore [override]
570
+ self,
571
+ *,
572
+ by_alias: bool = True,
573
+ exclude_unset: bool = True,
574
+ exclude_none: bool = False,
575
+ **kwargs: Mapping[str, Any],
576
+ ) -> str:
577
+ return self.model_dump_json(
578
+ by_alias=by_alias,
579
+ exclude_unset=exclude_unset,
580
+ exclude_none=exclude_none,
581
+ **kwargs,
582
+ )
583
+
584
+ def model_dump( # type: ignore [override]
585
+ self,
586
+ *,
587
+ by_alias: bool = True,
588
+ exclude_unset: bool = True,
589
+ exclude_none: bool = False,
590
+ **kwargs: Mapping[str, Any],
591
+ ) -> _ModelAsDict:
592
+ return super().model_dump(
593
+ by_alias=by_alias,
594
+ exclude_unset=exclude_unset,
595
+ exclude_none=exclude_none,
596
+ **kwargs,
597
+ )
598
+
599
+ def model_dump_json( # type: ignore [override]
600
+ self,
601
+ *,
602
+ by_alias: bool = True,
603
+ exclude_unset: bool = True,
604
+ exclude_none: bool = False,
605
+ **kwargs: Mapping[str, Any],
678
606
  ) -> str:
679
- return super().json(
680
- by_alias=by_alias, exclude_unset=True, exclude_none=False, **kwargs
607
+ return super().model_dump_json(
608
+ by_alias=by_alias,
609
+ exclude_unset=exclude_unset,
610
+ exclude_none=exclude_none,
611
+ **kwargs,
681
612
  )
682
613
 
683
614
 
@@ -686,20 +617,7 @@ __all__ = [
686
617
  "DType",
687
618
  "DyffBaseModel",
688
619
  "DyffSchemaBaseModel",
689
- "FixedWidthFloat",
690
- "FixedWidthInt",
691
- "Float32",
692
- "Float64",
693
- "Int8",
694
- "Int16",
695
- "Int32",
696
- "Int64",
697
620
  "JsonMergePatchSemantics",
698
- "Null",
699
- "UInt8",
700
- "UInt16",
701
- "UInt32",
702
- "UInt64",
703
621
  "float32",
704
622
  "float32_max",
705
623
  "float64",