omlish 0.0.0.dev81__py3-none-any.whl → 0.0.0.dev82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. omlish/__about__.py +2 -2
  2. omlish/dataclasses/impl/__init__.py +8 -0
  3. omlish/dataclasses/impl/params.py +3 -0
  4. omlish/dataclasses/impl/slots.py +61 -7
  5. omlish/formats/json/__init__.py +8 -1
  6. omlish/formats/json/backends/__init__.py +7 -0
  7. omlish/formats/json/backends/base.py +38 -0
  8. omlish/formats/json/backends/default.py +10 -0
  9. omlish/formats/json/backends/jiter.py +25 -0
  10. omlish/formats/json/backends/orjson.py +46 -2
  11. omlish/formats/json/backends/std.py +39 -0
  12. omlish/formats/json/backends/ujson.py +49 -0
  13. omlish/formats/json/cli.py +36 -6
  14. omlish/formats/json/consts.py +22 -0
  15. omlish/formats/json/encoding.py +17 -0
  16. omlish/formats/json/json.py +9 -39
  17. omlish/formats/json/render.py +49 -28
  18. omlish/formats/json/stream/__init__.py +0 -0
  19. omlish/formats/json/stream/build.py +113 -0
  20. omlish/formats/json/{stream.py → stream/lex.py} +68 -172
  21. omlish/formats/json/stream/parse.py +244 -0
  22. omlish/formats/json/stream/render.py +119 -0
  23. omlish/genmachine.py +14 -2
  24. omlish/marshal/base.py +2 -0
  25. omlish/marshal/newtypes.py +24 -0
  26. omlish/marshal/standard.py +4 -0
  27. omlish/reflect/__init__.py +1 -0
  28. omlish/reflect/types.py +6 -1
  29. {omlish-0.0.0.dev81.dist-info → omlish-0.0.0.dev82.dist-info}/METADATA +1 -1
  30. {omlish-0.0.0.dev81.dist-info → omlish-0.0.0.dev82.dist-info}/RECORD +34 -24
  31. {omlish-0.0.0.dev81.dist-info → omlish-0.0.0.dev82.dist-info}/LICENSE +0 -0
  32. {omlish-0.0.0.dev81.dist-info → omlish-0.0.0.dev82.dist-info}/WHEEL +0 -0
  33. {omlish-0.0.0.dev81.dist-info → omlish-0.0.0.dev82.dist-info}/entry_points.txt +0 -0
  34. {omlish-0.0.0.dev81.dist-info → omlish-0.0.0.dev82.dist-info}/top_level.txt +0 -0
omlish/__about__.py CHANGED
@@ -1,5 +1,5 @@
1
- __version__ = '0.0.0.dev81'
2
- __revision__ = 'a6b71055e7603077b28ba1999d389905e5663aac'
1
+ __version__ = '0.0.0.dev82'
2
+ __revision__ = '025f44593a2c0c59d06340e1d7dbcd15b1ab7684'
3
3
 
4
4
 
5
5
  #
@@ -22,4 +22,12 @@ TODO:
22
22
  - enums
23
23
  - nodal
24
24
  - embedding? forward kwargs in general? or only for replace?
25
+
26
+ TODO refs:
27
+ - batch up exec calls
28
+ - https://github.com/python/cpython/commit/8945b7ff55b87d11c747af2dad0e3e4d631e62d6
29
+ - add doc parameter to dataclasses.field
30
+ - https://github.com/python/cpython/commit/9c7657f09914254724683d91177aed7947637be5
31
+ - add decorator argument to make_dataclass
32
+ - https://github.com/python/cpython/commit/3e3a4d231518f91ff2f3c5a085b3849e32f1d548
25
33
  """
@@ -12,6 +12,9 @@ class Field_:
12
12
  metadata: Metadata | None = None
13
13
  kw_only: bool | MISSING = MISSING
14
14
 
15
+ if sys.version_info >= (3, 13):
16
+ doc: str | None = None
17
+
15
18
  _field_type: Any = None
16
19
 
17
20
 
@@ -1,5 +1,7 @@
1
1
  import dataclasses as dc
2
+ import inspect
2
3
  import itertools
4
+ import types
3
5
 
4
6
 
5
7
  MISSING = dc.MISSING
@@ -25,7 +27,7 @@ def _get_slots(cls):
25
27
  slots = []
26
28
  if getattr(cls, '__weakrefoffset__', -1) != 0:
27
29
  slots.append('__weakref__')
28
- if getattr(cls, '__dictrefoffset__', -1) != 0:
30
+ if getattr(cls, '__dictoffset__', -1) != 0:
29
31
  slots.append('__dict__')
30
32
  yield from slots
31
33
  case str(slot):
@@ -37,44 +39,96 @@ def _get_slots(cls):
37
39
  raise TypeError(f"Slots of '{cls.__name__}' cannot be determined")
38
40
 
39
41
 
42
+ def _update_func_cell_for__class__(f, oldcls, newcls):
43
+ # Returns True if we update a cell, else False.
44
+ if f is None:
45
+ # f will be None in the case of a property where not all of fget, fset, and fdel are used. Nothing to do in
46
+ # that case.
47
+ return False
48
+ try:
49
+ idx = f.__code__.co_freevars.index('__class__')
50
+ except ValueError:
51
+ # This function doesn't reference __class__, so nothing to do.
52
+ return False
53
+ # Fix the cell to point to the new class, if it's already pointing at the old class. I'm not convinced that the "is
54
+ # oldcls" test is needed, but other than performance can't hurt.
55
+ closure = f.__closure__[idx]
56
+ if closure.cell_contents is oldcls:
57
+ closure.cell_contents = newcls
58
+ return True
59
+ return False
60
+
61
+
40
62
  def add_slots(
41
63
  cls: type,
42
64
  is_frozen: bool,
43
65
  weakref_slot: bool,
44
66
  ) -> type:
67
+ # Need to create a new class, since we can't set __slots__ after a class has been created, and the @dataclass
68
+ # decorator is called after the class is created.
69
+
70
+ # Make sure __slots__ isn't already set.
45
71
  if '__slots__' in cls.__dict__:
46
72
  raise TypeError(f'{cls.__name__} already specifies __slots__')
47
73
 
74
+ # Create a new dict for our new class.
48
75
  cls_dict = dict(cls.__dict__)
49
76
  field_names = tuple(f.name for f in dc.fields(cls)) # noqa
50
77
 
78
+ # Make sure slots don't overlap with those in base classes.
51
79
  inherited_slots = set(itertools.chain.from_iterable(map(_get_slots, cls.__mro__[1:-1])))
52
80
 
81
+ # The slots for our class. Remove slots from our base classes. Add '__weakref__' if weakref_slot was given, unless
82
+ # it is already present.
53
83
  cls_dict['__slots__'] = tuple(
54
84
  itertools.filterfalse(
55
85
  inherited_slots.__contains__,
56
86
  itertools.chain(
57
87
  field_names,
88
+ # gh-93521: '__weakref__' also needs to be filtered out if already present in inherited_slots
58
89
  ('__weakref__',) if weakref_slot else (),
59
90
  ),
60
91
  ),
61
92
  )
62
93
 
63
94
  for field_name in field_names:
95
+ # Remove our attributes, if present. They'll still be available in _MARKER.
64
96
  cls_dict.pop(field_name, None)
65
97
 
98
+ # Remove __dict__ itself.
66
99
  cls_dict.pop('__dict__', None)
100
+
101
+ # Clear existing `__weakref__` descriptor, it belongs to a previous type:
67
102
  cls_dict.pop('__weakref__', None)
68
103
 
69
104
  qualname = getattr(cls, '__qualname__', None)
70
- cls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
105
+ newcls = type(cls)(cls.__name__, cls.__bases__, cls_dict)
71
106
  if qualname is not None:
72
- cls.__qualname__ = qualname
107
+ newcls.__qualname__ = qualname
73
108
 
74
109
  if is_frozen:
110
+ # Need this for pickling frozen classes with slots.
75
111
  if '__getstate__' not in cls_dict:
76
- cls.__getstate__ = _dataclass_getstate # type: ignore
112
+ newcls.__getstate__ = _dataclass_getstate # type: ignore
77
113
  if '__setstate__' not in cls_dict:
78
- cls.__setstate__ = _dataclass_setstate # type: ignore
79
-
80
- return cls
114
+ newcls.__setstate__ = _dataclass_setstate # type: ignore
115
+
116
+ # Fix up any closures which reference __class__. This is used to fix zero argument super so that it points to the
117
+ # correct class (the newly created one, which we're returning) and not the original class. We can break out of this
118
+ # loop as soon as we make an update, since all closures for a class will share a given cell.
119
+ for member in newcls.__dict__.values():
120
+ # If this is a wrapped function, unwrap it.
121
+ member = inspect.unwrap(member)
122
+ if isinstance(member, types.FunctionType):
123
+ if _update_func_cell_for__class__(member, cls, newcls):
124
+ break
125
+
126
+ elif isinstance(member, property):
127
+ if (
128
+ _update_func_cell_for__class__(member.fget, cls, newcls) or
129
+ _update_func_cell_for__class__(member.fset, cls, newcls) or
130
+ _update_func_cell_for__class__(member.fdel, cls, newcls)
131
+ ):
132
+ break
133
+
134
+ return newcls
@@ -1,9 +1,16 @@
1
- from .json import ( # noqa
1
+ from .consts import ( # noqa
2
2
  COMPACT_KWARGS,
3
3
  COMPACT_SEPARATORS,
4
4
  PRETTY_INDENT,
5
5
  PRETTY_KWARGS,
6
+ )
7
+
8
+ from .encoding import ( # noqa
9
+ decodes,
6
10
  detect_encoding,
11
+ )
12
+
13
+ from .json import ( # noqa
7
14
  dump,
8
15
  dump_compact,
9
16
  dump_pretty,
@@ -0,0 +1,7 @@
1
+ from .base import ( # noqa
2
+ Backend,
3
+ )
4
+
5
+ from .default import ( # noqa
6
+ DEFAULT_BACKED,
7
+ )
@@ -0,0 +1,38 @@
1
+ import abc
2
+ import typing as ta
3
+
4
+ from .... import lang
5
+
6
+
7
+ class Backend(lang.Abstract):
8
+ @abc.abstractmethod
9
+ def dump(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
10
+ raise NotImplementedError
11
+
12
+ @abc.abstractmethod
13
+ def dumps(self, obj: ta.Any, **kwargs: ta.Any) -> str:
14
+ raise NotImplementedError
15
+
16
+ @abc.abstractmethod
17
+ def load(self, fp: ta.Any, **kwargs: ta.Any) -> ta.Any:
18
+ raise NotImplementedError
19
+
20
+ @abc.abstractmethod
21
+ def loads(self, s: str | bytes | bytearray, **kwargs: ta.Any) -> ta.Any:
22
+ raise NotImplementedError
23
+
24
+ @abc.abstractmethod
25
+ def dump_pretty(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
26
+ raise NotImplementedError
27
+
28
+ @abc.abstractmethod
29
+ def dumps_pretty(self, obj: ta.Any, **kwargs: ta.Any) -> str:
30
+ raise NotImplementedError
31
+
32
+ @abc.abstractmethod
33
+ def dump_compact(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
34
+ raise NotImplementedError
35
+
36
+ @abc.abstractmethod
37
+ def dumps_compact(self, obj: ta.Any, **kwargs: ta.Any) -> str:
38
+ raise NotImplementedError
@@ -0,0 +1,10 @@
1
+ from .base import Backend
2
+ from .std import STD_BACKEND
3
+ from .ujson import UJSON_BACKEND
4
+
5
+
6
+ DEFAULT_BACKED: Backend
7
+ if UJSON_BACKEND is not None:
8
+ DEFAULT_BACKED = UJSON_BACKEND
9
+ else:
10
+ DEFAULT_BACKED = STD_BACKEND
@@ -0,0 +1,25 @@
1
+ """
2
+ from_json(
3
+ json_data: bytes,
4
+ /,
5
+ *,
6
+ allow_inf_nan: bool = True,
7
+ cache_mode: Literal[True, False, "all", "keys", "none"] = "all",
8
+ partial_mode: Literal[True, False, "off", "on", "trailing-strings"] = False,
9
+ catch_duplicate_keys: bool = False,
10
+ lossless_floats: bool = False,
11
+ ) -> Any:
12
+ json_data: The JSON data to parse
13
+ allow_inf_nan: Whether to allow infinity (`Infinity` an `-Infinity`) and `NaN` values to float fields.
14
+ Defaults to True.
15
+ cache_mode: cache Python strings to improve performance at the cost of some memory usage
16
+ - True / 'all' - cache all strings
17
+ - 'keys' - cache only object keys
18
+ - False / 'none' - cache nothing
19
+ partial_mode: How to handle incomplete strings:
20
+ - False / 'off' - raise an exception if the input is incomplete
21
+ - True / 'on' - allow incomplete JSON but discard the last string if it is incomplete
22
+ - 'trailing-strings' - allow incomplete JSON, and include the last incomplete string in the output
23
+ catch_duplicate_keys: if True, raise an exception if objects contain the same key multiple times
24
+ lossless_floats: if True, preserve full detail on floats using `LosslessFloat`
25
+ """
@@ -1,11 +1,16 @@
1
1
  """
2
- def loads(obj: str | bytes | bytearray | memoryview) -> ta.Any | oj.JSONDEcodeError
3
- def dumps(obj: ta.Any, **DumpOpts) -> bytes
2
+ loads(obj: str | bytes | bytearray | memoryview) -> ta.Any | oj.JSONDEcodeError
3
+ dumps(
4
+ obj: ta.Any,
5
+ default: ta.Callable[[ta.Any], Ata.ny] | None = ...,
6
+ option: int | None = ...,
7
+ ) -> bytes
4
8
  """
5
9
  import dataclasses as dc
6
10
  import typing as ta
7
11
 
8
12
  from .... import lang
13
+ from .base import Backend
9
14
 
10
15
 
11
16
  if ta.TYPE_CHECKING:
@@ -14,6 +19,9 @@ else:
14
19
  oj = lang.proxy_import('orjson')
15
20
 
16
21
 
22
+ ##
23
+
24
+
17
25
  @dc.dataclass(frozen=True, kw_only=True)
18
26
  class Options:
19
27
  append_newline: bool = False # append \n to the output
@@ -70,3 +78,39 @@ class Options:
70
78
  class DumpOpts:
71
79
  default: ta.Callable[[ta.Any], ta.Any] | None = None
72
80
  option: Options = Options()
81
+
82
+
83
+ ##
84
+
85
+
86
+ class OrjsonBackend(Backend):
87
+ def dump(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
88
+ fp.write(self.dumps(obj, **kwargs))
89
+
90
+ def dumps(self, obj: ta.Any, **kwargs: ta.Any) -> str:
91
+ return oj.dumps(obj, **kwargs).decode('utf-8')
92
+
93
+ def load(self, fp: ta.Any, **kwargs: ta.Any) -> ta.Any:
94
+ return oj.loads(fp.read(), **kwargs)
95
+
96
+ def loads(self, s: str | bytes | bytearray, **kwargs: ta.Any) -> ta.Any:
97
+ return oj.loads(s, **kwargs)
98
+
99
+ def dump_pretty(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
100
+ fp.write(self.dumps_pretty(obj, **kwargs))
101
+
102
+ def dumps_pretty(self, obj: ta.Any, **kwargs: ta.Any) -> str:
103
+ return self.dumps(obj, option=kwargs.pop('option', 0) | oj.OPT_INDENT_2, **kwargs)
104
+
105
+ def dump_compact(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
106
+ return self.dump(obj, fp, **kwargs)
107
+
108
+ def dumps_compact(self, obj: ta.Any, **kwargs: ta.Any) -> str:
109
+ return self.dumps(obj, **kwargs)
110
+
111
+
112
+ ORJSON_BACKEND: OrjsonBackend | None
113
+ if lang.can_import('orjson'):
114
+ ORJSON_BACKEND = OrjsonBackend()
115
+ else:
116
+ ORJSON_BACKEND = None
@@ -8,6 +8,13 @@ import dataclasses as dc
8
8
  import json
9
9
  import typing as ta
10
10
 
11
+ from ..consts import COMPACT_KWARGS
12
+ from ..consts import PRETTY_KWARGS
13
+ from .base import Backend
14
+
15
+
16
+ ##
17
+
11
18
 
12
19
  @dc.dataclass(frozen=True, kw_only=True)
13
20
  class DumpOpts:
@@ -35,3 +42,35 @@ class LoadOpts:
35
42
 
36
43
  # called with the result of any object literal decoded with an ordered list of pairs, by default dict # noqa
37
44
  object_pairs_hook: ta.Callable[[list[tuple[str, ta.Any]]], ta.Any] | None = None
45
+
46
+
47
+ ##
48
+
49
+
50
+ class StdBackend(Backend):
51
+ def dump(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
52
+ json.dump(obj, fp, **kwargs)
53
+
54
+ def dumps(self, obj: ta.Any, **kwargs: ta.Any) -> str:
55
+ return json.dumps(obj, **kwargs)
56
+
57
+ def load(self, fp: ta.Any, **kwargs: ta.Any) -> ta.Any:
58
+ return json.load(fp, **kwargs)
59
+
60
+ def loads(self, s: str | bytes | bytearray, **kwargs: ta.Any) -> ta.Any:
61
+ return json.loads(s, **kwargs)
62
+
63
+ def dump_pretty(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
64
+ json.dump(obj, fp, **PRETTY_KWARGS, **kwargs)
65
+
66
+ def dumps_pretty(self, obj: ta.Any, **kwargs: ta.Any) -> str:
67
+ return json.dumps(obj, **PRETTY_KWARGS, **kwargs)
68
+
69
+ def dump_compact(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
70
+ json.dump(obj, fp, **COMPACT_KWARGS, **kwargs)
71
+
72
+ def dumps_compact(self, obj: ta.Any, **kwargs: ta.Any) -> str:
73
+ return json.dumps(obj, **COMPACT_KWARGS, **kwargs)
74
+
75
+
76
+ STD_BACKEND = StdBackend()
@@ -7,6 +7,19 @@ dumps(obj: ta.Any, **DumpOpts) -> None
7
7
  import dataclasses as dc
8
8
  import typing as ta
9
9
 
10
+ from .... import lang
11
+ from ..consts import PRETTY_INDENT
12
+ from .base import Backend
13
+
14
+
15
+ if ta.TYPE_CHECKING:
16
+ import ujson as uj
17
+ else:
18
+ uj = lang.proxy_import('ujson')
19
+
20
+
21
+ ##
22
+
10
23
 
11
24
  @dc.dataclass(frozen=True, kw_only=True)
12
25
  class DumpOpts:
@@ -23,3 +36,39 @@ class DumpOpts:
23
36
  reject_bytes: bool = True
24
37
  default: ta.Callable[[ta.Any], ta.Any] | None = None # should return a serializable version of obj or raise TypeError # noqa
25
38
  separators: tuple[str, str] | None = None
39
+
40
+
41
+ ##
42
+
43
+
44
+ class UjsonBackend(Backend):
45
+ def dump(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
46
+ uj.dump(obj, fp, **kwargs)
47
+
48
+ def dumps(self, obj: ta.Any, **kwargs: ta.Any) -> str:
49
+ return uj.dumps(obj, **kwargs)
50
+
51
+ def load(self, fp: ta.Any, **kwargs: ta.Any) -> ta.Any:
52
+ return uj.load(fp, **kwargs)
53
+
54
+ def loads(self, s: str | bytes | bytearray, **kwargs: ta.Any) -> ta.Any:
55
+ return uj.loads(s, **kwargs)
56
+
57
+ def dump_pretty(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
58
+ uj.dump(obj, fp, indent=PRETTY_INDENT, **kwargs)
59
+
60
+ def dumps_pretty(self, obj: ta.Any, **kwargs: ta.Any) -> str:
61
+ return uj.dumps(obj, indent=PRETTY_INDENT, **kwargs)
62
+
63
+ def dump_compact(self, obj: ta.Any, fp: ta.Any, **kwargs: ta.Any) -> None:
64
+ uj.dump(obj, fp, **kwargs)
65
+
66
+ def dumps_compact(self, obj: ta.Any, **kwargs: ta.Any) -> str:
67
+ return uj.dumps(obj, **kwargs)
68
+
69
+
70
+ UJSON_BACKEND: UjsonBackend | None
71
+ if lang.can_import('ujson'):
72
+ UJSON_BACKEND = UjsonBackend()
73
+ else:
74
+ UJSON_BACKEND = None
@@ -1,3 +1,8 @@
1
+ """
2
+ TODO:
3
+ - xml - [{"att", {"el", {"cdata", ...
4
+ - csv - dict if headers, array if not
5
+ """
1
6
  import argparse
2
7
  import codecs
3
8
  import contextlib
@@ -14,8 +19,10 @@ from ... import check
14
19
  from ... import lang
15
20
  from ... import term
16
21
  from .render import JsonRenderer
17
- from .stream import JsonStreamLexer
18
- from .stream import JsonStreamValueBuilder
22
+ from .stream.build import JsonObjectBuilder
23
+ from .stream.lex import JsonStreamLexer
24
+ from .stream.parse import JsonStreamParser
25
+ from .stream.render import StreamJsonRenderer
19
26
 
20
27
 
21
28
  if ta.TYPE_CHECKING:
@@ -75,7 +82,8 @@ def _main() -> None:
75
82
  parser.add_argument('file', nargs='?')
76
83
 
77
84
  parser.add_argument('--stream', action='store_true')
78
- parser.add_argument('--stream-buffer-size', type=int, default=0x1000)
85
+ parser.add_argument('--stream-build', action='store_true')
86
+ parser.add_argument('--stream-buffer-size', type=int, default=0x4000)
79
87
 
80
88
  parser.add_argument('-f', '--format')
81
89
 
@@ -179,7 +187,21 @@ def _main() -> None:
179
187
 
180
188
  with contextlib.ExitStack() as es2:
181
189
  lex = es2.enter_context(JsonStreamLexer())
182
- vb = es2.enter_context(JsonStreamValueBuilder())
190
+ parse = es2.enter_context(JsonStreamParser())
191
+
192
+ if args.stream_build:
193
+ build = es2.enter_context(JsonObjectBuilder())
194
+ renderer = None
195
+
196
+ else:
197
+ renderer = StreamJsonRenderer(
198
+ out,
199
+ StreamJsonRenderer.Options(
200
+ **kw,
201
+ style=term_color if args.color else None,
202
+ ),
203
+ )
204
+ build = None
183
205
 
184
206
  while True:
185
207
  buf = os.read(fd, args.stream_buffer_size)
@@ -188,8 +210,14 @@ def _main() -> None:
188
210
  n = 0
189
211
  for c in s:
190
212
  for t in lex(c):
191
- for v in vb(t):
192
- print(render_one(v), file=out)
213
+ for e in parse(t):
214
+ if renderer is not None:
215
+ renderer.render((e,))
216
+
217
+ if build is not None:
218
+ for v in build(e):
219
+ print(render_one(v), file=out)
220
+
193
221
  n += 1
194
222
 
195
223
  if n:
@@ -198,6 +226,8 @@ def _main() -> None:
198
226
  if not buf:
199
227
  break
200
228
 
229
+ out.write('\n')
230
+
201
231
  else:
202
232
  with io.TextIOWrapper(in_file) as tw:
203
233
  v = fmt.load(tw)
@@ -0,0 +1,22 @@
1
+ import typing as ta
2
+
3
+
4
+ ##
5
+
6
+
7
+ PRETTY_INDENT = 2
8
+
9
+ PRETTY_KWARGS: ta.Mapping[str, ta.Any] = dict(
10
+ indent=PRETTY_INDENT,
11
+ )
12
+
13
+
14
+ ##
15
+
16
+
17
+ COMPACT_SEPARATORS = (',', ':')
18
+
19
+ COMPACT_KWARGS: ta.Mapping[str, ta.Any] = dict(
20
+ indent=None,
21
+ separators=COMPACT_SEPARATORS,
22
+ )
@@ -0,0 +1,17 @@
1
+ import json
2
+
3
+
4
+ detect_encoding = json.detect_encoding
5
+
6
+
7
+ def decodes(s: str | bytes | bytearray) -> str:
8
+ if isinstance(s, str):
9
+ if s.startswith('\ufeff'):
10
+ raise json.JSONDecodeError('Unexpected UTF-8 BOM (decode using utf-8-sig)', s, 0)
11
+ return s
12
+
13
+ elif isinstance(s, (bytes, bytearray)):
14
+ return s.decode(detect_encoding(s), 'surrogatepass')
15
+
16
+ else:
17
+ raise TypeError(f'the JSON object must be str, bytes or bytearray, not {s.__class__.__name__}')
@@ -1,47 +1,17 @@
1
- """
2
- TODO:
3
- - backend abstr
4
- - streaming
5
- """
6
- import functools
7
- import json as _json
8
- import typing as ta
1
+ from .backends import DEFAULT_BACKED
9
2
 
10
3
 
11
4
  ##
12
5
 
13
6
 
14
- dump = _json.dump
15
- dumps = _json.dumps
7
+ dump = DEFAULT_BACKED.dump
8
+ dumps = DEFAULT_BACKED.dumps
16
9
 
17
- detect_encoding = _json.detect_encoding
10
+ load = DEFAULT_BACKED.load
11
+ loads = DEFAULT_BACKED.loads
18
12
 
19
- load = _json.load
20
- loads = _json.loads
13
+ dump_pretty = DEFAULT_BACKED.dump_pretty
14
+ dumps_pretty = DEFAULT_BACKED.dumps_pretty
21
15
 
22
-
23
- ##
24
-
25
-
26
- PRETTY_INDENT = 2
27
-
28
- PRETTY_KWARGS: ta.Mapping[str, ta.Any] = dict(
29
- indent=PRETTY_INDENT,
30
- )
31
-
32
- dump_pretty: ta.Callable[..., bytes] = functools.partial(dump, **PRETTY_KWARGS) # type: ignore
33
- dumps_pretty: ta.Callable[..., str] = functools.partial(dumps, **PRETTY_KWARGS)
34
-
35
-
36
- ##
37
-
38
-
39
- COMPACT_SEPARATORS = (',', ':')
40
-
41
- COMPACT_KWARGS: ta.Mapping[str, ta.Any] = dict(
42
- indent=None,
43
- separators=COMPACT_SEPARATORS,
44
- )
45
-
46
- dump_compact: ta.Callable[..., bytes] = functools.partial(dump, **COMPACT_KWARGS) # type: ignore
47
- dumps_compact: ta.Callable[..., str] = functools.partial(dumps, **COMPACT_KWARGS)
16
+ dump_compact = DEFAULT_BACKED.dump_compact
17
+ dumps_compact = DEFAULT_BACKED.dumps_compact