crosshair-tool 0.0.99__cp312-cp312-macosx_10_13_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- _crosshair_tracers.cpython-312-darwin.so +0 -0
- crosshair/__init__.py +42 -0
- crosshair/__main__.py +8 -0
- crosshair/_mark_stacks.h +790 -0
- crosshair/_preliminaries_test.py +18 -0
- crosshair/_tracers.h +94 -0
- crosshair/_tracers_pycompat.h +522 -0
- crosshair/_tracers_test.py +138 -0
- crosshair/abcstring.py +245 -0
- crosshair/auditwall.py +190 -0
- crosshair/auditwall_test.py +77 -0
- crosshair/codeconfig.py +113 -0
- crosshair/codeconfig_test.py +117 -0
- crosshair/condition_parser.py +1237 -0
- crosshair/condition_parser_test.py +497 -0
- crosshair/conftest.py +30 -0
- crosshair/copyext.py +155 -0
- crosshair/copyext_test.py +84 -0
- crosshair/core.py +1763 -0
- crosshair/core_and_libs.py +149 -0
- crosshair/core_regestered_types_test.py +82 -0
- crosshair/core_test.py +1316 -0
- crosshair/diff_behavior.py +314 -0
- crosshair/diff_behavior_test.py +261 -0
- crosshair/dynamic_typing.py +346 -0
- crosshair/dynamic_typing_test.py +210 -0
- crosshair/enforce.py +282 -0
- crosshair/enforce_test.py +182 -0
- crosshair/examples/PEP316/__init__.py +1 -0
- crosshair/examples/PEP316/bugs_detected/__init__.py +0 -0
- crosshair/examples/PEP316/bugs_detected/getattr_magic.py +16 -0
- crosshair/examples/PEP316/bugs_detected/hash_consistent_with_equals.py +31 -0
- crosshair/examples/PEP316/bugs_detected/shopping_cart.py +24 -0
- crosshair/examples/PEP316/bugs_detected/showcase.py +39 -0
- crosshair/examples/PEP316/correct_code/__init__.py +0 -0
- crosshair/examples/PEP316/correct_code/arith.py +60 -0
- crosshair/examples/PEP316/correct_code/chess.py +77 -0
- crosshair/examples/PEP316/correct_code/nesting_inference.py +17 -0
- crosshair/examples/PEP316/correct_code/numpy_examples.py +132 -0
- crosshair/examples/PEP316/correct_code/rolling_average.py +35 -0
- crosshair/examples/PEP316/correct_code/showcase.py +104 -0
- crosshair/examples/__init__.py +0 -0
- crosshair/examples/check_examples_test.py +146 -0
- crosshair/examples/deal/__init__.py +1 -0
- crosshair/examples/icontract/__init__.py +1 -0
- crosshair/examples/icontract/bugs_detected/__init__.py +0 -0
- crosshair/examples/icontract/bugs_detected/showcase.py +41 -0
- crosshair/examples/icontract/bugs_detected/wrong_sign.py +8 -0
- crosshair/examples/icontract/correct_code/__init__.py +0 -0
- crosshair/examples/icontract/correct_code/arith.py +51 -0
- crosshair/examples/icontract/correct_code/showcase.py +94 -0
- crosshair/fnutil.py +391 -0
- crosshair/fnutil_test.py +75 -0
- crosshair/fuzz_core_test.py +516 -0
- crosshair/libimpl/__init__.py +0 -0
- crosshair/libimpl/arraylib.py +161 -0
- crosshair/libimpl/binascii_ch_test.py +30 -0
- crosshair/libimpl/binascii_test.py +67 -0
- crosshair/libimpl/binasciilib.py +150 -0
- crosshair/libimpl/bisectlib_test.py +23 -0
- crosshair/libimpl/builtinslib.py +5228 -0
- crosshair/libimpl/builtinslib_ch_test.py +1191 -0
- crosshair/libimpl/builtinslib_test.py +3735 -0
- crosshair/libimpl/codecslib.py +86 -0
- crosshair/libimpl/codecslib_test.py +86 -0
- crosshair/libimpl/collectionslib.py +264 -0
- crosshair/libimpl/collectionslib_ch_test.py +252 -0
- crosshair/libimpl/collectionslib_test.py +332 -0
- crosshair/libimpl/copylib.py +23 -0
- crosshair/libimpl/copylib_test.py +18 -0
- crosshair/libimpl/datetimelib.py +2559 -0
- crosshair/libimpl/datetimelib_ch_test.py +354 -0
- crosshair/libimpl/datetimelib_test.py +112 -0
- crosshair/libimpl/decimallib.py +5257 -0
- crosshair/libimpl/decimallib_ch_test.py +78 -0
- crosshair/libimpl/decimallib_test.py +76 -0
- crosshair/libimpl/encodings/__init__.py +23 -0
- crosshair/libimpl/encodings/_encutil.py +187 -0
- crosshair/libimpl/encodings/ascii.py +44 -0
- crosshair/libimpl/encodings/latin_1.py +40 -0
- crosshair/libimpl/encodings/utf_8.py +93 -0
- crosshair/libimpl/encodings_ch_test.py +83 -0
- crosshair/libimpl/fractionlib.py +16 -0
- crosshair/libimpl/fractionlib_test.py +80 -0
- crosshair/libimpl/functoolslib.py +34 -0
- crosshair/libimpl/functoolslib_test.py +56 -0
- crosshair/libimpl/hashliblib.py +30 -0
- crosshair/libimpl/hashliblib_test.py +18 -0
- crosshair/libimpl/heapqlib.py +47 -0
- crosshair/libimpl/heapqlib_test.py +21 -0
- crosshair/libimpl/importliblib.py +18 -0
- crosshair/libimpl/importliblib_test.py +38 -0
- crosshair/libimpl/iolib.py +216 -0
- crosshair/libimpl/iolib_ch_test.py +128 -0
- crosshair/libimpl/iolib_test.py +19 -0
- crosshair/libimpl/ipaddresslib.py +8 -0
- crosshair/libimpl/itertoolslib.py +44 -0
- crosshair/libimpl/itertoolslib_test.py +44 -0
- crosshair/libimpl/jsonlib.py +984 -0
- crosshair/libimpl/jsonlib_ch_test.py +42 -0
- crosshair/libimpl/jsonlib_test.py +51 -0
- crosshair/libimpl/mathlib.py +179 -0
- crosshair/libimpl/mathlib_ch_test.py +44 -0
- crosshair/libimpl/mathlib_test.py +67 -0
- crosshair/libimpl/oslib.py +7 -0
- crosshair/libimpl/pathliblib_test.py +10 -0
- crosshair/libimpl/randomlib.py +178 -0
- crosshair/libimpl/randomlib_test.py +120 -0
- crosshair/libimpl/relib.py +846 -0
- crosshair/libimpl/relib_ch_test.py +169 -0
- crosshair/libimpl/relib_test.py +493 -0
- crosshair/libimpl/timelib.py +72 -0
- crosshair/libimpl/timelib_test.py +82 -0
- crosshair/libimpl/typeslib.py +15 -0
- crosshair/libimpl/typeslib_test.py +36 -0
- crosshair/libimpl/unicodedatalib.py +75 -0
- crosshair/libimpl/unicodedatalib_test.py +42 -0
- crosshair/libimpl/urlliblib.py +23 -0
- crosshair/libimpl/urlliblib_test.py +19 -0
- crosshair/libimpl/weakreflib.py +13 -0
- crosshair/libimpl/weakreflib_test.py +69 -0
- crosshair/libimpl/zliblib.py +15 -0
- crosshair/libimpl/zliblib_test.py +13 -0
- crosshair/lsp_server.py +261 -0
- crosshair/lsp_server_test.py +30 -0
- crosshair/main.py +973 -0
- crosshair/main_test.py +543 -0
- crosshair/objectproxy.py +376 -0
- crosshair/objectproxy_test.py +41 -0
- crosshair/opcode_intercept.py +601 -0
- crosshair/opcode_intercept_test.py +304 -0
- crosshair/options.py +218 -0
- crosshair/options_test.py +10 -0
- crosshair/patch_equivalence_test.py +75 -0
- crosshair/path_cover.py +209 -0
- crosshair/path_cover_test.py +138 -0
- crosshair/path_search.py +161 -0
- crosshair/path_search_test.py +52 -0
- crosshair/pathing_oracle.py +271 -0
- crosshair/pathing_oracle_test.py +21 -0
- crosshair/pure_importer.py +27 -0
- crosshair/pure_importer_test.py +16 -0
- crosshair/py.typed +0 -0
- crosshair/register_contract.py +273 -0
- crosshair/register_contract_test.py +190 -0
- crosshair/simplestructs.py +1165 -0
- crosshair/simplestructs_test.py +283 -0
- crosshair/smtlib.py +24 -0
- crosshair/smtlib_test.py +14 -0
- crosshair/statespace.py +1199 -0
- crosshair/statespace_test.py +108 -0
- crosshair/stubs_parser.py +352 -0
- crosshair/stubs_parser_test.py +43 -0
- crosshair/test_util.py +329 -0
- crosshair/test_util_test.py +26 -0
- crosshair/tools/__init__.py +0 -0
- crosshair/tools/check_help_in_doc.py +264 -0
- crosshair/tools/check_init_and_setup_coincide.py +119 -0
- crosshair/tools/generate_demo_table.py +127 -0
- crosshair/tracers.py +544 -0
- crosshair/tracers_test.py +154 -0
- crosshair/type_repo.py +151 -0
- crosshair/unicode_categories.py +589 -0
- crosshair/unicode_categories_test.py +27 -0
- crosshair/util.py +741 -0
- crosshair/util_test.py +173 -0
- crosshair/watcher.py +307 -0
- crosshair/watcher_test.py +107 -0
- crosshair/z3util.py +76 -0
- crosshair/z3util_test.py +11 -0
- crosshair_tool-0.0.99.dist-info/METADATA +144 -0
- crosshair_tool-0.0.99.dist-info/RECORD +176 -0
- crosshair_tool-0.0.99.dist-info/WHEEL +6 -0
- crosshair_tool-0.0.99.dist-info/entry_points.txt +3 -0
- crosshair_tool-0.0.99.dist-info/licenses/LICENSE +93 -0
- crosshair_tool-0.0.99.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,984 @@
|
|
|
1
|
+
#
|
|
2
|
+
# This file includes a modified version of CPython 3.10.11's pure python json
|
|
3
|
+
# implementation.
|
|
4
|
+
#
|
|
5
|
+
# The shared source code is licensed under the PSF license and is
|
|
6
|
+
# copyright © 2001-2022 Python Software Foundation; All Rights Reserved
|
|
7
|
+
#
|
|
8
|
+
# See the "LICENSE" file for complete license details on CrossHair.
|
|
9
|
+
#
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
import codecs
|
|
13
|
+
import json
|
|
14
|
+
import re
|
|
15
|
+
|
|
16
|
+
from crosshair import register_patch
|
|
17
|
+
|
|
18
|
+
#
|
|
19
|
+
# Lifted from json/encoder.py:
|
|
20
|
+
#
|
|
21
|
+
|
|
22
|
+
"""Implementation of JSONEncoder
|
|
23
|
+
"""
|
|
24
|
+
c_encode_basestring_ascii = None
|
|
25
|
+
c_encode_basestring = None
|
|
26
|
+
c_make_encoder = None
|
|
27
|
+
|
|
28
|
+
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
|
|
29
|
+
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
|
|
30
|
+
HAS_UTF8 = re.compile(b"[\x80-\xff]")
|
|
31
|
+
ESCAPE_DCT = {
|
|
32
|
+
"\\": "\\\\",
|
|
33
|
+
'"': '\\"',
|
|
34
|
+
"\b": "\\b",
|
|
35
|
+
"\f": "\\f",
|
|
36
|
+
"\n": "\\n",
|
|
37
|
+
"\r": "\\r",
|
|
38
|
+
"\t": "\\t",
|
|
39
|
+
}
|
|
40
|
+
for i in range(0x20):
|
|
41
|
+
ESCAPE_DCT.setdefault(chr(i), "\\u{0:04x}".format(i))
|
|
42
|
+
# ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
|
|
43
|
+
|
|
44
|
+
INFINITY = float("inf")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def py_encode_basestring(s):
|
|
48
|
+
"""Return a JSON representation of a Python string"""
|
|
49
|
+
|
|
50
|
+
def replace(match):
|
|
51
|
+
return ESCAPE_DCT[match.group(0)]
|
|
52
|
+
|
|
53
|
+
return '"' + ESCAPE.sub(replace, s) + '"'
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
encode_basestring = c_encode_basestring or py_encode_basestring
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def py_encode_basestring_ascii(s):
|
|
60
|
+
"""Return an ASCII-only JSON representation of a Python string"""
|
|
61
|
+
|
|
62
|
+
def replace(match):
|
|
63
|
+
s = match.group(0)
|
|
64
|
+
try:
|
|
65
|
+
return ESCAPE_DCT[s]
|
|
66
|
+
except KeyError:
|
|
67
|
+
n = ord(s)
|
|
68
|
+
if n < 0x10000:
|
|
69
|
+
return "\\u{0:04x}".format(n)
|
|
70
|
+
# return '\\u%04x' % (n,)
|
|
71
|
+
else:
|
|
72
|
+
# surrogate pair
|
|
73
|
+
n -= 0x10000
|
|
74
|
+
s1 = 0xD800 | ((n >> 10) & 0x3FF)
|
|
75
|
+
s2 = 0xDC00 | (n & 0x3FF)
|
|
76
|
+
return "\\u{0:04x}\\u{1:04x}".format(s1, s2)
|
|
77
|
+
|
|
78
|
+
return '"' + ESCAPE_ASCII.sub(replace, s) + '"'
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class JSONEncoder(object):
|
|
85
|
+
|
|
86
|
+
item_separator = ", "
|
|
87
|
+
key_separator = ": "
|
|
88
|
+
|
|
89
|
+
def __init__(
|
|
90
|
+
self,
|
|
91
|
+
*,
|
|
92
|
+
skipkeys=False,
|
|
93
|
+
ensure_ascii=True,
|
|
94
|
+
check_circular=True,
|
|
95
|
+
allow_nan=True,
|
|
96
|
+
sort_keys=False,
|
|
97
|
+
indent=None,
|
|
98
|
+
separators=None,
|
|
99
|
+
default=None,
|
|
100
|
+
):
|
|
101
|
+
self.skipkeys = skipkeys
|
|
102
|
+
self.ensure_ascii = ensure_ascii
|
|
103
|
+
self.check_circular = check_circular
|
|
104
|
+
self.allow_nan = allow_nan
|
|
105
|
+
self.sort_keys = sort_keys
|
|
106
|
+
self.indent = indent
|
|
107
|
+
if separators is not None:
|
|
108
|
+
self.item_separator, self.key_separator = separators
|
|
109
|
+
elif indent is not None:
|
|
110
|
+
self.item_separator = ","
|
|
111
|
+
if default is not None:
|
|
112
|
+
self.default = default
|
|
113
|
+
|
|
114
|
+
def default(self, o):
|
|
115
|
+
raise TypeError(
|
|
116
|
+
f"Object of type {o.__class__.__name__} " f"is not JSON serializable"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
def encode(self, o):
|
|
120
|
+
# This is for extremely simple cases and benchmarks.
|
|
121
|
+
if isinstance(o, str):
|
|
122
|
+
if self.ensure_ascii:
|
|
123
|
+
return encode_basestring_ascii(o)
|
|
124
|
+
else:
|
|
125
|
+
return encode_basestring(o)
|
|
126
|
+
# This doesn't pass the iterator directly to ''.join() because the
|
|
127
|
+
# exceptions aren't as detailed. The list call should be roughly
|
|
128
|
+
# equivalent to the PySequence_Fast that ''.join() would do.
|
|
129
|
+
chunks = self.iterencode(o, _one_shot=True)
|
|
130
|
+
if not isinstance(chunks, (list, tuple)):
|
|
131
|
+
chunks = list(chunks)
|
|
132
|
+
return "".join(chunks)
|
|
133
|
+
|
|
134
|
+
def iterencode(self, o, _one_shot=False):
|
|
135
|
+
if self.check_circular:
|
|
136
|
+
markers = {}
|
|
137
|
+
else:
|
|
138
|
+
markers = None
|
|
139
|
+
if self.ensure_ascii:
|
|
140
|
+
_encoder = encode_basestring_ascii
|
|
141
|
+
else:
|
|
142
|
+
_encoder = encode_basestring
|
|
143
|
+
|
|
144
|
+
def floatstr(
|
|
145
|
+
o,
|
|
146
|
+
allow_nan=self.allow_nan,
|
|
147
|
+
_repr=float.__repr__,
|
|
148
|
+
_inf=INFINITY,
|
|
149
|
+
_neginf=-INFINITY,
|
|
150
|
+
):
|
|
151
|
+
# Check for specials. Note that this type of test is processor
|
|
152
|
+
# and/or platform-specific, so do tests which don't depend on the
|
|
153
|
+
# internals.
|
|
154
|
+
|
|
155
|
+
if o != o:
|
|
156
|
+
text = "NaN"
|
|
157
|
+
elif o == _inf:
|
|
158
|
+
text = "Infinity"
|
|
159
|
+
elif o == _neginf:
|
|
160
|
+
text = "-Infinity"
|
|
161
|
+
else:
|
|
162
|
+
return _repr(o)
|
|
163
|
+
|
|
164
|
+
if not allow_nan:
|
|
165
|
+
raise ValueError(
|
|
166
|
+
"Out of range float values are not JSON compliant: " + repr(o)
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
return text
|
|
170
|
+
|
|
171
|
+
if _one_shot and c_make_encoder is not None and self.indent is None:
|
|
172
|
+
_iterencode = c_make_encoder(
|
|
173
|
+
markers,
|
|
174
|
+
self.default,
|
|
175
|
+
_encoder,
|
|
176
|
+
self.indent,
|
|
177
|
+
self.key_separator,
|
|
178
|
+
self.item_separator,
|
|
179
|
+
self.sort_keys,
|
|
180
|
+
self.skipkeys,
|
|
181
|
+
self.allow_nan,
|
|
182
|
+
)
|
|
183
|
+
else:
|
|
184
|
+
_iterencode = _make_iterencode(
|
|
185
|
+
markers,
|
|
186
|
+
self.default,
|
|
187
|
+
_encoder,
|
|
188
|
+
self.indent,
|
|
189
|
+
floatstr,
|
|
190
|
+
self.key_separator,
|
|
191
|
+
self.item_separator,
|
|
192
|
+
self.sort_keys,
|
|
193
|
+
self.skipkeys,
|
|
194
|
+
_one_shot,
|
|
195
|
+
)
|
|
196
|
+
return _iterencode(o, 0)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _make_iterencode(
|
|
200
|
+
markers,
|
|
201
|
+
_default,
|
|
202
|
+
_encoder,
|
|
203
|
+
_indent,
|
|
204
|
+
_floatstr,
|
|
205
|
+
_key_separator,
|
|
206
|
+
_item_separator,
|
|
207
|
+
_sort_keys,
|
|
208
|
+
_skipkeys,
|
|
209
|
+
_one_shot,
|
|
210
|
+
## HACK: hand-optimized bytecode; turn globals into locals
|
|
211
|
+
ValueError=ValueError,
|
|
212
|
+
dict=dict,
|
|
213
|
+
float=float,
|
|
214
|
+
id=id,
|
|
215
|
+
int=int,
|
|
216
|
+
isinstance=isinstance,
|
|
217
|
+
list=list,
|
|
218
|
+
str=str,
|
|
219
|
+
tuple=tuple,
|
|
220
|
+
_intstr=int.__repr__,
|
|
221
|
+
):
|
|
222
|
+
|
|
223
|
+
if _indent is not None and not isinstance(_indent, str):
|
|
224
|
+
_indent = " " * _indent
|
|
225
|
+
|
|
226
|
+
def _iterencode_list(lst, _current_indent_level):
|
|
227
|
+
if not lst:
|
|
228
|
+
yield "[]"
|
|
229
|
+
return
|
|
230
|
+
if markers is not None:
|
|
231
|
+
markerid = id(lst)
|
|
232
|
+
if markerid in markers:
|
|
233
|
+
raise ValueError("Circular reference detected")
|
|
234
|
+
markers[markerid] = lst
|
|
235
|
+
buf = "["
|
|
236
|
+
if _indent is not None:
|
|
237
|
+
_current_indent_level += 1
|
|
238
|
+
newline_indent = "\n" + _indent * _current_indent_level
|
|
239
|
+
separator = _item_separator + newline_indent
|
|
240
|
+
buf += newline_indent
|
|
241
|
+
else:
|
|
242
|
+
newline_indent = None
|
|
243
|
+
separator = _item_separator
|
|
244
|
+
first = True
|
|
245
|
+
for value in lst:
|
|
246
|
+
if first:
|
|
247
|
+
first = False
|
|
248
|
+
else:
|
|
249
|
+
buf = separator
|
|
250
|
+
if isinstance(value, str):
|
|
251
|
+
yield buf + _encoder(value)
|
|
252
|
+
elif value is None:
|
|
253
|
+
yield buf + "null"
|
|
254
|
+
elif value is True:
|
|
255
|
+
yield buf + "true"
|
|
256
|
+
elif value is False:
|
|
257
|
+
yield buf + "false"
|
|
258
|
+
elif isinstance(value, int):
|
|
259
|
+
# Subclasses of int/float may override __repr__, but we still
|
|
260
|
+
# want to encode them as integers/floats in JSON. One example
|
|
261
|
+
# within the standard library is IntEnum.
|
|
262
|
+
yield buf + _intstr(value)
|
|
263
|
+
elif isinstance(value, float):
|
|
264
|
+
# see comment above for int
|
|
265
|
+
yield buf + _floatstr(value)
|
|
266
|
+
else:
|
|
267
|
+
yield buf
|
|
268
|
+
if isinstance(value, (list, tuple)):
|
|
269
|
+
chunks = _iterencode_list(value, _current_indent_level)
|
|
270
|
+
elif isinstance(value, dict):
|
|
271
|
+
chunks = _iterencode_dict(value, _current_indent_level)
|
|
272
|
+
else:
|
|
273
|
+
chunks = _iterencode(value, _current_indent_level)
|
|
274
|
+
yield from chunks
|
|
275
|
+
if newline_indent is not None:
|
|
276
|
+
_current_indent_level -= 1
|
|
277
|
+
yield "\n" + _indent * _current_indent_level
|
|
278
|
+
yield "]"
|
|
279
|
+
if markers is not None:
|
|
280
|
+
del markers[markerid]
|
|
281
|
+
|
|
282
|
+
def _iterencode_dict(dct, _current_indent_level):
|
|
283
|
+
if not dct:
|
|
284
|
+
yield "{}"
|
|
285
|
+
return
|
|
286
|
+
if markers is not None:
|
|
287
|
+
markerid = id(dct)
|
|
288
|
+
if markerid in markers:
|
|
289
|
+
raise ValueError("Circular reference detected")
|
|
290
|
+
markers[markerid] = dct
|
|
291
|
+
yield "{"
|
|
292
|
+
if _indent is not None:
|
|
293
|
+
_current_indent_level += 1
|
|
294
|
+
newline_indent = "\n" + _indent * _current_indent_level
|
|
295
|
+
item_separator = _item_separator + newline_indent
|
|
296
|
+
yield newline_indent
|
|
297
|
+
else:
|
|
298
|
+
newline_indent = None
|
|
299
|
+
item_separator = _item_separator
|
|
300
|
+
first = True
|
|
301
|
+
if _sort_keys:
|
|
302
|
+
items = sorted(dct.items())
|
|
303
|
+
else:
|
|
304
|
+
items = dct.items()
|
|
305
|
+
for key, value in items:
|
|
306
|
+
if isinstance(key, str):
|
|
307
|
+
pass
|
|
308
|
+
# JavaScript is weakly typed for these, so it makes sense to
|
|
309
|
+
# also allow them. Many encoders seem to do something like this.
|
|
310
|
+
elif isinstance(key, float):
|
|
311
|
+
# see comment for int/float in _make_iterencode
|
|
312
|
+
key = _floatstr(key)
|
|
313
|
+
elif key is True:
|
|
314
|
+
key = "true"
|
|
315
|
+
elif key is False:
|
|
316
|
+
key = "false"
|
|
317
|
+
elif key is None:
|
|
318
|
+
key = "null"
|
|
319
|
+
elif isinstance(key, int):
|
|
320
|
+
# see comment for int/float in _make_iterencode
|
|
321
|
+
key = _intstr(key)
|
|
322
|
+
elif _skipkeys:
|
|
323
|
+
continue
|
|
324
|
+
else:
|
|
325
|
+
raise TypeError(
|
|
326
|
+
f"keys must be str, int, float, bool or None, "
|
|
327
|
+
f"not {key.__class__.__name__}"
|
|
328
|
+
)
|
|
329
|
+
if first:
|
|
330
|
+
first = False
|
|
331
|
+
else:
|
|
332
|
+
yield item_separator
|
|
333
|
+
yield _encoder(key)
|
|
334
|
+
yield _key_separator
|
|
335
|
+
if isinstance(value, str):
|
|
336
|
+
yield _encoder(value)
|
|
337
|
+
elif value is None:
|
|
338
|
+
yield "null"
|
|
339
|
+
elif value is True:
|
|
340
|
+
yield "true"
|
|
341
|
+
elif value is False:
|
|
342
|
+
yield "false"
|
|
343
|
+
elif isinstance(value, int):
|
|
344
|
+
# see comment for int/float in _make_iterencode
|
|
345
|
+
yield _intstr(value)
|
|
346
|
+
elif isinstance(value, float):
|
|
347
|
+
# see comment for int/float in _make_iterencode
|
|
348
|
+
yield _floatstr(value)
|
|
349
|
+
else:
|
|
350
|
+
if isinstance(value, (list, tuple)):
|
|
351
|
+
chunks = _iterencode_list(value, _current_indent_level)
|
|
352
|
+
elif isinstance(value, dict):
|
|
353
|
+
chunks = _iterencode_dict(value, _current_indent_level)
|
|
354
|
+
else:
|
|
355
|
+
chunks = _iterencode(value, _current_indent_level)
|
|
356
|
+
yield from chunks
|
|
357
|
+
if newline_indent is not None:
|
|
358
|
+
_current_indent_level -= 1
|
|
359
|
+
yield "\n" + _indent * _current_indent_level
|
|
360
|
+
yield "}"
|
|
361
|
+
if markers is not None:
|
|
362
|
+
del markers[markerid]
|
|
363
|
+
|
|
364
|
+
def _iterencode(o, _current_indent_level):
|
|
365
|
+
if isinstance(o, str):
|
|
366
|
+
yield _encoder(o)
|
|
367
|
+
elif o is None:
|
|
368
|
+
yield "null"
|
|
369
|
+
elif o is True:
|
|
370
|
+
yield "true"
|
|
371
|
+
elif o is False:
|
|
372
|
+
yield "false"
|
|
373
|
+
elif isinstance(o, int):
|
|
374
|
+
# see comment for int/float in _make_iterencode
|
|
375
|
+
yield _intstr(o)
|
|
376
|
+
elif isinstance(o, float):
|
|
377
|
+
# see comment for int/float in _make_iterencode
|
|
378
|
+
yield _floatstr(o)
|
|
379
|
+
elif isinstance(o, (list, tuple)):
|
|
380
|
+
yield from _iterencode_list(o, _current_indent_level)
|
|
381
|
+
elif isinstance(o, dict):
|
|
382
|
+
yield from _iterencode_dict(o, _current_indent_level)
|
|
383
|
+
else:
|
|
384
|
+
if markers is not None:
|
|
385
|
+
markerid = id(o)
|
|
386
|
+
if markerid in markers:
|
|
387
|
+
raise ValueError("Circular reference detected")
|
|
388
|
+
markers[markerid] = o
|
|
389
|
+
o = _default(o)
|
|
390
|
+
yield from _iterencode(o, _current_indent_level)
|
|
391
|
+
if markers is not None:
|
|
392
|
+
del markers[markerid]
|
|
393
|
+
|
|
394
|
+
return _iterencode
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
#
|
|
398
|
+
# Lifted from json/scanner.py:
|
|
399
|
+
#
|
|
400
|
+
|
|
401
|
+
"""JSON token scanner
|
|
402
|
+
"""
|
|
403
|
+
c_make_scanner = None
|
|
404
|
+
|
|
405
|
+
# CrossHair-specific tweak: change the NUMBER_RE regex.
|
|
406
|
+
# The pure python scanner accepts unicode digits, but the C-based scanner does not.
|
|
407
|
+
# Patch json.scanner.NUMBER_RE to include the re.ASCII flag:
|
|
408
|
+
NUMBER_RE = re.compile(
|
|
409
|
+
r"(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?",
|
|
410
|
+
(re.VERBOSE | re.MULTILINE | re.DOTALL | re.ASCII),
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
def py_make_scanner(context):
|
|
415
|
+
parse_object = context.parse_object
|
|
416
|
+
parse_array = context.parse_array
|
|
417
|
+
parse_string = context.parse_string
|
|
418
|
+
match_number = NUMBER_RE.match
|
|
419
|
+
strict = context.strict
|
|
420
|
+
parse_float = context.parse_float
|
|
421
|
+
parse_int = context.parse_int
|
|
422
|
+
parse_constant = context.parse_constant
|
|
423
|
+
object_hook = context.object_hook
|
|
424
|
+
object_pairs_hook = context.object_pairs_hook
|
|
425
|
+
memo = context.memo
|
|
426
|
+
|
|
427
|
+
def _scan_once(string, idx):
|
|
428
|
+
try:
|
|
429
|
+
nextchar = string[idx]
|
|
430
|
+
except IndexError:
|
|
431
|
+
raise StopIteration(idx) from None
|
|
432
|
+
|
|
433
|
+
if nextchar == '"':
|
|
434
|
+
return parse_string(string, idx + 1, strict)
|
|
435
|
+
elif nextchar == "{":
|
|
436
|
+
return parse_object(
|
|
437
|
+
(string, idx + 1),
|
|
438
|
+
strict,
|
|
439
|
+
_scan_once,
|
|
440
|
+
object_hook,
|
|
441
|
+
object_pairs_hook,
|
|
442
|
+
memo,
|
|
443
|
+
)
|
|
444
|
+
elif nextchar == "[":
|
|
445
|
+
return parse_array((string, idx + 1), _scan_once)
|
|
446
|
+
elif nextchar == "n" and string[idx : idx + 4] == "null":
|
|
447
|
+
return None, idx + 4
|
|
448
|
+
elif nextchar == "t" and string[idx : idx + 4] == "true":
|
|
449
|
+
return True, idx + 4
|
|
450
|
+
elif nextchar == "f" and string[idx : idx + 5] == "false":
|
|
451
|
+
return False, idx + 5
|
|
452
|
+
|
|
453
|
+
m = match_number(string, idx)
|
|
454
|
+
if m is not None:
|
|
455
|
+
integer, frac, exp = m.groups()
|
|
456
|
+
if frac or exp:
|
|
457
|
+
res = parse_float(integer + (frac or "") + (exp or ""))
|
|
458
|
+
else:
|
|
459
|
+
res = parse_int(integer)
|
|
460
|
+
return res, m.end()
|
|
461
|
+
elif nextchar == "N" and string[idx : idx + 3] == "NaN":
|
|
462
|
+
return parse_constant("NaN"), idx + 3
|
|
463
|
+
elif nextchar == "I" and string[idx : idx + 8] == "Infinity":
|
|
464
|
+
return parse_constant("Infinity"), idx + 8
|
|
465
|
+
elif nextchar == "-" and string[idx : idx + 9] == "-Infinity":
|
|
466
|
+
return parse_constant("-Infinity"), idx + 9
|
|
467
|
+
else:
|
|
468
|
+
raise StopIteration(idx)
|
|
469
|
+
|
|
470
|
+
def scan_once(string, idx):
|
|
471
|
+
try:
|
|
472
|
+
return _scan_once(string, idx)
|
|
473
|
+
finally:
|
|
474
|
+
memo.clear()
|
|
475
|
+
|
|
476
|
+
return scan_once
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
make_scanner = c_make_scanner or py_make_scanner
|
|
480
|
+
|
|
481
|
+
#
|
|
482
|
+
# Lifted from json/decoder.py:
|
|
483
|
+
#
|
|
484
|
+
|
|
485
|
+
"""Implementation of JSONDecoder
|
|
486
|
+
"""
|
|
487
|
+
c_scanstring = None
|
|
488
|
+
|
|
489
|
+
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
|
|
490
|
+
|
|
491
|
+
NaN = float("nan")
|
|
492
|
+
PosInf = float("inf")
|
|
493
|
+
NegInf = float("-inf")
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
from json import JSONDecodeError
|
|
497
|
+
|
|
498
|
+
_CONSTANTS = {
|
|
499
|
+
"-Infinity": NegInf,
|
|
500
|
+
"Infinity": PosInf,
|
|
501
|
+
"NaN": NaN,
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
|
|
506
|
+
BACKSLASH = {
|
|
507
|
+
'"': '"',
|
|
508
|
+
"\\": "\\",
|
|
509
|
+
"/": "/",
|
|
510
|
+
"b": "\b",
|
|
511
|
+
"f": "\f",
|
|
512
|
+
"n": "\n",
|
|
513
|
+
"r": "\r",
|
|
514
|
+
"t": "\t",
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
def _decode_uXXXX(s, pos):
|
|
519
|
+
esc = s[pos + 1 : pos + 5]
|
|
520
|
+
if len(esc) == 4 and esc[1] not in "xX":
|
|
521
|
+
try:
|
|
522
|
+
return int(esc, 16)
|
|
523
|
+
except ValueError:
|
|
524
|
+
pass
|
|
525
|
+
msg = "Invalid \\uXXXX escape"
|
|
526
|
+
raise JSONDecodeError(msg, s, pos)
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
def py_scanstring(s, end, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
|
|
530
|
+
chunks = []
|
|
531
|
+
_append = chunks.append
|
|
532
|
+
begin = end - 1
|
|
533
|
+
while 1:
|
|
534
|
+
chunk = _m(s, end)
|
|
535
|
+
if chunk is None:
|
|
536
|
+
raise JSONDecodeError("Unterminated string starting at", s, begin)
|
|
537
|
+
end = chunk.end()
|
|
538
|
+
content, terminator = chunk.groups()
|
|
539
|
+
# Content is contains zero or more unescaped string characters
|
|
540
|
+
if content:
|
|
541
|
+
_append(content)
|
|
542
|
+
# Terminator is the end of string, a literal control character,
|
|
543
|
+
# or a backslash denoting that an escape sequence follows
|
|
544
|
+
if terminator == '"':
|
|
545
|
+
break
|
|
546
|
+
elif terminator != "\\":
|
|
547
|
+
if strict:
|
|
548
|
+
# msg = "Invalid control character %r at" % (terminator,)
|
|
549
|
+
msg = "Invalid control character {0!r} at".format(terminator)
|
|
550
|
+
raise JSONDecodeError(msg, s, end)
|
|
551
|
+
else:
|
|
552
|
+
_append(terminator)
|
|
553
|
+
continue
|
|
554
|
+
try:
|
|
555
|
+
esc = s[end]
|
|
556
|
+
except IndexError:
|
|
557
|
+
raise JSONDecodeError("Unterminated string starting at", s, begin) from None
|
|
558
|
+
# If not a unicode escape sequence, must be in the lookup table
|
|
559
|
+
if esc != "u":
|
|
560
|
+
try:
|
|
561
|
+
char = _b[esc]
|
|
562
|
+
except KeyError:
|
|
563
|
+
msg = "Invalid \\escape: {0!r}".format(esc)
|
|
564
|
+
raise JSONDecodeError(msg, s, end)
|
|
565
|
+
end += 1
|
|
566
|
+
else:
|
|
567
|
+
uni = _decode_uXXXX(s, end)
|
|
568
|
+
end += 5
|
|
569
|
+
if 0xD800 <= uni <= 0xDBFF and s[end : end + 2] == "\\u":
|
|
570
|
+
uni2 = _decode_uXXXX(s, end + 1)
|
|
571
|
+
if 0xDC00 <= uni2 <= 0xDFFF:
|
|
572
|
+
uni = 0x10000 + (((uni - 0xD800) << 10) | (uni2 - 0xDC00))
|
|
573
|
+
end += 6
|
|
574
|
+
char = chr(uni)
|
|
575
|
+
_append(char)
|
|
576
|
+
return "".join(chunks), end
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
# Use speedup if available
|
|
580
|
+
scanstring = c_scanstring or py_scanstring
|
|
581
|
+
|
|
582
|
+
WHITESPACE = re.compile(r"[ \t\n\r]*", FLAGS)
|
|
583
|
+
WHITESPACE_STR = " \t\n\r"
|
|
584
|
+
|
|
585
|
+
|
|
586
|
+
def JSONObject(
|
|
587
|
+
s_and_end,
|
|
588
|
+
strict,
|
|
589
|
+
scan_once,
|
|
590
|
+
object_hook,
|
|
591
|
+
object_pairs_hook,
|
|
592
|
+
memo=None,
|
|
593
|
+
_w=WHITESPACE.match,
|
|
594
|
+
_ws=WHITESPACE_STR,
|
|
595
|
+
):
|
|
596
|
+
s, end = s_and_end
|
|
597
|
+
pairs = []
|
|
598
|
+
pairs_append = pairs.append
|
|
599
|
+
# Backwards compatibility
|
|
600
|
+
if memo is None:
|
|
601
|
+
memo = {}
|
|
602
|
+
memo_get = memo.setdefault
|
|
603
|
+
# Use a slice to prevent IndexError from being raised, the following
|
|
604
|
+
# check will raise a more specific ValueError if the string is empty
|
|
605
|
+
nextchar = s[end : end + 1]
|
|
606
|
+
# Normally we expect nextchar == '"'
|
|
607
|
+
if nextchar != '"':
|
|
608
|
+
if nextchar in _ws:
|
|
609
|
+
end = _w(s, end).end()
|
|
610
|
+
nextchar = s[end : end + 1]
|
|
611
|
+
# Trivial empty object
|
|
612
|
+
if nextchar == "}":
|
|
613
|
+
if object_pairs_hook is not None:
|
|
614
|
+
result = object_pairs_hook(pairs)
|
|
615
|
+
return result, end + 1
|
|
616
|
+
pairs = {}
|
|
617
|
+
if object_hook is not None:
|
|
618
|
+
pairs = object_hook(pairs)
|
|
619
|
+
return pairs, end + 1
|
|
620
|
+
elif nextchar != '"':
|
|
621
|
+
raise JSONDecodeError(
|
|
622
|
+
"Expecting property name enclosed in double quotes", s, end
|
|
623
|
+
)
|
|
624
|
+
end += 1
|
|
625
|
+
while True:
|
|
626
|
+
key, end = scanstring(s, end, strict)
|
|
627
|
+
key = memo_get(key, key)
|
|
628
|
+
# To skip some function call overhead we optimize the fast paths where
|
|
629
|
+
# the JSON key separator is ": " or just ":".
|
|
630
|
+
if s[end : end + 1] != ":":
|
|
631
|
+
end = _w(s, end).end()
|
|
632
|
+
if s[end : end + 1] != ":":
|
|
633
|
+
raise JSONDecodeError("Expecting ':' delimiter", s, end)
|
|
634
|
+
end += 1
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
if s[end] in _ws:
|
|
638
|
+
end += 1
|
|
639
|
+
if s[end] in _ws:
|
|
640
|
+
end = _w(s, end + 1).end()
|
|
641
|
+
except IndexError:
|
|
642
|
+
pass
|
|
643
|
+
|
|
644
|
+
try:
|
|
645
|
+
value, end = scan_once(s, end)
|
|
646
|
+
except StopIteration as err:
|
|
647
|
+
raise JSONDecodeError("Expecting value", s, err.value) from None
|
|
648
|
+
pairs_append((key, value))
|
|
649
|
+
try:
|
|
650
|
+
nextchar = s[end]
|
|
651
|
+
if nextchar in _ws:
|
|
652
|
+
end = _w(s, end + 1).end()
|
|
653
|
+
nextchar = s[end]
|
|
654
|
+
except IndexError:
|
|
655
|
+
nextchar = ""
|
|
656
|
+
end += 1
|
|
657
|
+
|
|
658
|
+
if nextchar == "}":
|
|
659
|
+
break
|
|
660
|
+
elif nextchar != ",":
|
|
661
|
+
raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
|
|
662
|
+
end = _w(s, end).end()
|
|
663
|
+
nextchar = s[end : end + 1]
|
|
664
|
+
end += 1
|
|
665
|
+
if nextchar != '"':
|
|
666
|
+
raise JSONDecodeError(
|
|
667
|
+
"Expecting property name enclosed in double quotes", s, end - 1
|
|
668
|
+
)
|
|
669
|
+
if object_pairs_hook is not None:
|
|
670
|
+
result = object_pairs_hook(pairs)
|
|
671
|
+
return result, end
|
|
672
|
+
pairs = dict(pairs)
|
|
673
|
+
if object_hook is not None:
|
|
674
|
+
pairs = object_hook(pairs)
|
|
675
|
+
return pairs, end
|
|
676
|
+
|
|
677
|
+
|
|
678
|
+
def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
|
|
679
|
+
s, end = s_and_end
|
|
680
|
+
values = []
|
|
681
|
+
nextchar = s[end : end + 1]
|
|
682
|
+
if nextchar in _ws:
|
|
683
|
+
end = _w(s, end + 1).end()
|
|
684
|
+
nextchar = s[end : end + 1]
|
|
685
|
+
# Look-ahead for trivial empty array
|
|
686
|
+
if nextchar == "]":
|
|
687
|
+
return values, end + 1
|
|
688
|
+
_append = values.append
|
|
689
|
+
while True:
|
|
690
|
+
try:
|
|
691
|
+
value, end = scan_once(s, end)
|
|
692
|
+
except StopIteration as err:
|
|
693
|
+
raise JSONDecodeError("Expecting value", s, err.value) from None
|
|
694
|
+
_append(value)
|
|
695
|
+
nextchar = s[end : end + 1]
|
|
696
|
+
if nextchar in _ws:
|
|
697
|
+
end = _w(s, end + 1).end()
|
|
698
|
+
nextchar = s[end : end + 1]
|
|
699
|
+
end += 1
|
|
700
|
+
if nextchar == "]":
|
|
701
|
+
break
|
|
702
|
+
elif nextchar != ",":
|
|
703
|
+
raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
|
|
704
|
+
try:
|
|
705
|
+
if s[end] in _ws:
|
|
706
|
+
end += 1
|
|
707
|
+
if s[end] in _ws:
|
|
708
|
+
end = _w(s, end + 1).end()
|
|
709
|
+
except IndexError:
|
|
710
|
+
pass
|
|
711
|
+
|
|
712
|
+
return values, end
|
|
713
|
+
|
|
714
|
+
|
|
715
|
+
class JSONDecoder(object):
|
|
716
|
+
def __init__(
|
|
717
|
+
self,
|
|
718
|
+
*,
|
|
719
|
+
object_hook=None,
|
|
720
|
+
parse_float=None,
|
|
721
|
+
parse_int=None,
|
|
722
|
+
parse_constant=None,
|
|
723
|
+
strict=True,
|
|
724
|
+
object_pairs_hook=None,
|
|
725
|
+
):
|
|
726
|
+
self.object_hook = object_hook
|
|
727
|
+
self.parse_float = parse_float or float
|
|
728
|
+
self.parse_int = parse_int or int
|
|
729
|
+
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
|
|
730
|
+
self.strict = strict
|
|
731
|
+
self.object_pairs_hook = object_pairs_hook
|
|
732
|
+
self.parse_object = JSONObject
|
|
733
|
+
self.parse_array = JSONArray
|
|
734
|
+
self.parse_string = scanstring
|
|
735
|
+
self.memo = {}
|
|
736
|
+
self.scan_once = make_scanner(self)
|
|
737
|
+
|
|
738
|
+
def decode(self, s, _w=WHITESPACE.match):
|
|
739
|
+
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
|
|
740
|
+
end = _w(s, end).end()
|
|
741
|
+
if end != len(s):
|
|
742
|
+
raise JSONDecodeError("Extra data", s, end)
|
|
743
|
+
return obj
|
|
744
|
+
|
|
745
|
+
def raw_decode(self, s, idx=0):
|
|
746
|
+
try:
|
|
747
|
+
obj, end = self.scan_once(s, idx)
|
|
748
|
+
except StopIteration as err:
|
|
749
|
+
raise JSONDecodeError("Expecting value", s, err.value) from None
|
|
750
|
+
return obj, end
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
#
|
|
754
|
+
# Lifted from json/__init__.py
|
|
755
|
+
#
|
|
756
|
+
|
|
757
|
+
_default_encoder = JSONEncoder(
|
|
758
|
+
skipkeys=False,
|
|
759
|
+
ensure_ascii=True,
|
|
760
|
+
check_circular=True,
|
|
761
|
+
allow_nan=True,
|
|
762
|
+
indent=None,
|
|
763
|
+
separators=None,
|
|
764
|
+
default=None,
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def dump(
|
|
769
|
+
obj,
|
|
770
|
+
fp,
|
|
771
|
+
*,
|
|
772
|
+
skipkeys=False,
|
|
773
|
+
ensure_ascii=True,
|
|
774
|
+
check_circular=True,
|
|
775
|
+
allow_nan=True,
|
|
776
|
+
cls=None,
|
|
777
|
+
indent=None,
|
|
778
|
+
separators=None,
|
|
779
|
+
default=None,
|
|
780
|
+
sort_keys=False,
|
|
781
|
+
**kw,
|
|
782
|
+
):
|
|
783
|
+
# cached encoder
|
|
784
|
+
if (
|
|
785
|
+
not skipkeys
|
|
786
|
+
and ensure_ascii
|
|
787
|
+
and check_circular
|
|
788
|
+
and allow_nan
|
|
789
|
+
and cls is None
|
|
790
|
+
and indent is None
|
|
791
|
+
and separators is None
|
|
792
|
+
and default is None
|
|
793
|
+
and not sort_keys
|
|
794
|
+
and not kw
|
|
795
|
+
):
|
|
796
|
+
iterable = _default_encoder.iterencode(obj)
|
|
797
|
+
else:
|
|
798
|
+
if cls is None:
|
|
799
|
+
cls = JSONEncoder
|
|
800
|
+
iterable = cls(
|
|
801
|
+
skipkeys=skipkeys,
|
|
802
|
+
ensure_ascii=ensure_ascii,
|
|
803
|
+
check_circular=check_circular,
|
|
804
|
+
allow_nan=allow_nan,
|
|
805
|
+
indent=indent,
|
|
806
|
+
separators=separators,
|
|
807
|
+
default=default,
|
|
808
|
+
sort_keys=sort_keys,
|
|
809
|
+
**kw,
|
|
810
|
+
).iterencode(obj)
|
|
811
|
+
# could accelerate with writelines in some versions of Python, at
|
|
812
|
+
# a debuggability cost
|
|
813
|
+
for chunk in iterable:
|
|
814
|
+
fp.write(chunk)
|
|
815
|
+
|
|
816
|
+
|
|
817
|
+
def dumps(
|
|
818
|
+
obj,
|
|
819
|
+
*,
|
|
820
|
+
skipkeys=False,
|
|
821
|
+
ensure_ascii=True,
|
|
822
|
+
check_circular=True,
|
|
823
|
+
allow_nan=True,
|
|
824
|
+
cls=None,
|
|
825
|
+
indent=None,
|
|
826
|
+
separators=None,
|
|
827
|
+
default=None,
|
|
828
|
+
sort_keys=False,
|
|
829
|
+
**kw,
|
|
830
|
+
):
|
|
831
|
+
# cached encoder
|
|
832
|
+
if (
|
|
833
|
+
not skipkeys
|
|
834
|
+
and ensure_ascii
|
|
835
|
+
and check_circular
|
|
836
|
+
and allow_nan
|
|
837
|
+
and cls is None
|
|
838
|
+
and indent is None
|
|
839
|
+
and separators is None
|
|
840
|
+
and default is None
|
|
841
|
+
and not sort_keys
|
|
842
|
+
and not kw
|
|
843
|
+
):
|
|
844
|
+
return _default_encoder.encode(obj)
|
|
845
|
+
if cls is None:
|
|
846
|
+
cls = JSONEncoder
|
|
847
|
+
return cls(
|
|
848
|
+
skipkeys=skipkeys,
|
|
849
|
+
ensure_ascii=ensure_ascii,
|
|
850
|
+
check_circular=check_circular,
|
|
851
|
+
allow_nan=allow_nan,
|
|
852
|
+
indent=indent,
|
|
853
|
+
separators=separators,
|
|
854
|
+
default=default,
|
|
855
|
+
sort_keys=sort_keys,
|
|
856
|
+
**kw,
|
|
857
|
+
).encode(obj)
|
|
858
|
+
|
|
859
|
+
|
|
860
|
+
_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
def detect_encoding(b):
|
|
864
|
+
bstartswith = b.startswith
|
|
865
|
+
if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
|
|
866
|
+
return "utf-32"
|
|
867
|
+
if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
|
|
868
|
+
return "utf-16"
|
|
869
|
+
if bstartswith(codecs.BOM_UTF8):
|
|
870
|
+
return "utf-8-sig"
|
|
871
|
+
|
|
872
|
+
if len(b) >= 4:
|
|
873
|
+
if not b[0]:
|
|
874
|
+
# 00 00 -- -- - utf-32-be
|
|
875
|
+
# 00 XX -- -- - utf-16-be
|
|
876
|
+
return "utf-16-be" if b[1] else "utf-32-be"
|
|
877
|
+
if not b[1]:
|
|
878
|
+
# XX 00 00 00 - utf-32-le
|
|
879
|
+
# XX 00 00 XX - utf-16-le
|
|
880
|
+
# XX 00 XX -- - utf-16-le
|
|
881
|
+
return "utf-16-le" if b[2] or b[3] else "utf-32-le"
|
|
882
|
+
elif len(b) == 2:
|
|
883
|
+
if not b[0]:
|
|
884
|
+
# 00 XX - utf-16-be
|
|
885
|
+
return "utf-16-be"
|
|
886
|
+
if not b[1]:
|
|
887
|
+
# XX 00 - utf-16-le
|
|
888
|
+
return "utf-16-le"
|
|
889
|
+
# default
|
|
890
|
+
return "utf-8"
|
|
891
|
+
|
|
892
|
+
|
|
893
|
+
def load(
|
|
894
|
+
fp,
|
|
895
|
+
*,
|
|
896
|
+
cls=None,
|
|
897
|
+
object_hook=None,
|
|
898
|
+
parse_float=None,
|
|
899
|
+
parse_int=None,
|
|
900
|
+
parse_constant=None,
|
|
901
|
+
object_pairs_hook=None,
|
|
902
|
+
**kw,
|
|
903
|
+
):
|
|
904
|
+
return loads(
|
|
905
|
+
fp.read(),
|
|
906
|
+
cls=cls,
|
|
907
|
+
object_hook=object_hook,
|
|
908
|
+
parse_float=parse_float,
|
|
909
|
+
parse_int=parse_int,
|
|
910
|
+
parse_constant=parse_constant,
|
|
911
|
+
object_pairs_hook=object_pairs_hook,
|
|
912
|
+
**kw,
|
|
913
|
+
)
|
|
914
|
+
|
|
915
|
+
|
|
916
|
+
def loads(
|
|
917
|
+
s,
|
|
918
|
+
*,
|
|
919
|
+
cls=None,
|
|
920
|
+
object_hook=None,
|
|
921
|
+
parse_float=None,
|
|
922
|
+
parse_int=None,
|
|
923
|
+
parse_constant=None,
|
|
924
|
+
object_pairs_hook=None,
|
|
925
|
+
**kw,
|
|
926
|
+
):
|
|
927
|
+
if isinstance(s, str):
|
|
928
|
+
if s.startswith("\ufeff"):
|
|
929
|
+
raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)", s, 0)
|
|
930
|
+
else:
|
|
931
|
+
if not isinstance(s, (bytes, bytearray)):
|
|
932
|
+
raise TypeError(
|
|
933
|
+
f"the JSON object must be str, bytes or bytearray, "
|
|
934
|
+
f"not {s.__class__.__name__}"
|
|
935
|
+
)
|
|
936
|
+
s = s.decode(detect_encoding(s), "surrogatepass")
|
|
937
|
+
|
|
938
|
+
if (
|
|
939
|
+
cls is None
|
|
940
|
+
and object_hook is None
|
|
941
|
+
and parse_int is None
|
|
942
|
+
and parse_float is None
|
|
943
|
+
and parse_constant is None
|
|
944
|
+
and object_pairs_hook is None
|
|
945
|
+
and not kw
|
|
946
|
+
):
|
|
947
|
+
return _default_decoder.decode(s)
|
|
948
|
+
if cls is None:
|
|
949
|
+
cls = JSONDecoder
|
|
950
|
+
if object_hook is not None:
|
|
951
|
+
kw["object_hook"] = object_hook
|
|
952
|
+
if object_pairs_hook is not None:
|
|
953
|
+
kw["object_pairs_hook"] = object_pairs_hook
|
|
954
|
+
if parse_float is not None:
|
|
955
|
+
kw["parse_float"] = parse_float
|
|
956
|
+
if parse_int is not None:
|
|
957
|
+
kw["parse_int"] = parse_int
|
|
958
|
+
if parse_constant is not None:
|
|
959
|
+
kw["parse_constant"] = parse_constant
|
|
960
|
+
return cls(**kw).decode(s)
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
#
|
|
964
|
+
# CrossHair-specific code
|
|
965
|
+
#
|
|
966
|
+
|
|
967
|
+
|
|
968
|
+
def make_registrations() -> None:
|
|
969
|
+
register_patch(json.dump, dump)
|
|
970
|
+
register_patch(json.dumps, dumps)
|
|
971
|
+
register_patch(json.load, load)
|
|
972
|
+
register_patch(json.loads, loads)
|
|
973
|
+
register_patch(json.JSONEncoder.default, JSONEncoder.default)
|
|
974
|
+
register_patch(json.JSONEncoder.encode, JSONEncoder.encode)
|
|
975
|
+
register_patch(json.JSONEncoder.iterencode, JSONEncoder.iterencode)
|
|
976
|
+
register_patch(json.JSONDecoder.decode, JSONDecoder.decode)
|
|
977
|
+
register_patch(json.JSONDecoder.raw_decode, JSONDecoder.raw_decode)
|
|
978
|
+
|
|
979
|
+
# Not in the official API ,but used by some packages like hypothesis-jsonschema:
|
|
980
|
+
register_patch(json.encoder._make_iterencode, _make_iterencode) # type: ignore
|
|
981
|
+
register_patch(json.encoder.encode_basestring_ascii, encode_basestring_ascii) # type: ignore
|
|
982
|
+
|
|
983
|
+
# TODO: Not in official API, but perhaps add some of these?:
|
|
984
|
+
# decoder, detect_encoding, encoder, scanner, make_scanner
|