sauce 1.0.2 → 2.0.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.document +5 -0
- data/.gitignore +30 -0
- data/Gemfile +16 -0
- data/README.markdown +39 -145
- data/Rakefile +46 -20
- data/bin/sauce +72 -61
- data/gemfiles/rails2.gemfile +10 -0
- data/gemfiles/rails2.gemfile.lock +77 -0
- data/gemfiles/rails3.gemfile +9 -0
- data/gemfiles/rails3.gemfile.lock +137 -0
- data/lib/generators/sauce/install/install_generator.rb +1 -2
- data/lib/sauce.rb +0 -22
- data/lib/sauce/capybara.rb +70 -32
- data/lib/sauce/capybara/cucumber.rb +121 -0
- data/lib/sauce/config.rb +57 -13
- data/lib/sauce/connect.rb +22 -11
- data/lib/sauce/integrations.rb +27 -69
- data/lib/sauce/jasmine.rb +35 -0
- data/lib/sauce/jasmine/rake.rb +47 -0
- data/lib/sauce/jasmine/runner.rb +4 -0
- data/lib/sauce/job.rb +10 -6
- data/lib/sauce/raketasks.rb +0 -21
- data/lib/sauce/selenium.rb +9 -18
- data/lib/sauce/utilities.rb +0 -17
- data/sauce.gemspec +8 -60
- data/spec/integration/connect_integration_spec.rb +84 -0
- data/spec/sauce/capybara/cucumber_spec.rb +156 -0
- data/spec/sauce/capybara/spec_helper.rb +42 -0
- data/spec/sauce/capybara_spec.rb +121 -0
- data/spec/sauce/config_spec.rb +239 -0
- data/spec/sauce/jasmine_spec.rb +49 -0
- data/spec/sauce/selenium_spec.rb +57 -0
- data/spec/spec_helper.rb +4 -0
- data/support/Sauce-Connect.jar +0 -0
- data/test/test_integrations.rb +202 -0
- data/test/test_testcase.rb +13 -0
- metadata +170 -171
- data/examples/helper.rb +0 -16
- data/examples/other_spec.rb +0 -7
- data/examples/saucelabs_spec.rb +0 -12
- data/examples/test_saucelabs.rb +0 -13
- data/examples/test_saucelabs2.rb +0 -9
- data/support/sauce_connect +0 -938
- data/support/selenium-server.jar +0 -0
- data/support/simplejson/LICENSE.txt +0 -19
- data/support/simplejson/__init__.py +0 -437
- data/support/simplejson/decoder.py +0 -421
- data/support/simplejson/encoder.py +0 -501
- data/support/simplejson/ordered_dict.py +0 -119
- data/support/simplejson/scanner.py +0 -77
- data/support/simplejson/tool.py +0 -39
- data/test/test_config.rb +0 -112
- data/test/test_connect.rb +0 -45
- data/test/test_job.rb +0 -13
- data/test/test_selenium.rb +0 -50
- data/test/test_selenium2.rb +0 -9
@@ -1,421 +0,0 @@
|
|
1
|
-
"""Implementation of JSONDecoder
|
2
|
-
"""
|
3
|
-
import re
|
4
|
-
import sys
|
5
|
-
import struct
|
6
|
-
|
7
|
-
from simplejson.scanner import make_scanner
|
8
|
-
def _import_c_scanstring():
|
9
|
-
try:
|
10
|
-
from simplejson._speedups import scanstring
|
11
|
-
return scanstring
|
12
|
-
except ImportError:
|
13
|
-
return None
|
14
|
-
c_scanstring = _import_c_scanstring()
|
15
|
-
|
16
|
-
__all__ = ['JSONDecoder']
|
17
|
-
|
18
|
-
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
|
19
|
-
|
20
|
-
def _floatconstants():
|
21
|
-
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
|
22
|
-
# The struct module in Python 2.4 would get frexp() out of range here
|
23
|
-
# when an endian is specified in the format string. Fixed in Python 2.5+
|
24
|
-
if sys.byteorder != 'big':
|
25
|
-
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
|
26
|
-
nan, inf = struct.unpack('dd', _BYTES)
|
27
|
-
return nan, inf, -inf
|
28
|
-
|
29
|
-
NaN, PosInf, NegInf = _floatconstants()
|
30
|
-
|
31
|
-
|
32
|
-
class JSONDecodeError(ValueError):
|
33
|
-
"""Subclass of ValueError with the following additional properties:
|
34
|
-
|
35
|
-
msg: The unformatted error message
|
36
|
-
doc: The JSON document being parsed
|
37
|
-
pos: The start index of doc where parsing failed
|
38
|
-
end: The end index of doc where parsing failed (may be None)
|
39
|
-
lineno: The line corresponding to pos
|
40
|
-
colno: The column corresponding to pos
|
41
|
-
endlineno: The line corresponding to end (may be None)
|
42
|
-
endcolno: The column corresponding to end (may be None)
|
43
|
-
|
44
|
-
"""
|
45
|
-
def __init__(self, msg, doc, pos, end=None):
|
46
|
-
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
|
47
|
-
self.msg = msg
|
48
|
-
self.doc = doc
|
49
|
-
self.pos = pos
|
50
|
-
self.end = end
|
51
|
-
self.lineno, self.colno = linecol(doc, pos)
|
52
|
-
if end is not None:
|
53
|
-
self.endlineno, self.endcolno = linecol(doc, pos)
|
54
|
-
else:
|
55
|
-
self.endlineno, self.endcolno = None, None
|
56
|
-
|
57
|
-
|
58
|
-
def linecol(doc, pos):
|
59
|
-
lineno = doc.count('\n', 0, pos) + 1
|
60
|
-
if lineno == 1:
|
61
|
-
colno = pos
|
62
|
-
else:
|
63
|
-
colno = pos - doc.rindex('\n', 0, pos)
|
64
|
-
return lineno, colno
|
65
|
-
|
66
|
-
|
67
|
-
def errmsg(msg, doc, pos, end=None):
|
68
|
-
# Note that this function is called from _speedups
|
69
|
-
lineno, colno = linecol(doc, pos)
|
70
|
-
if end is None:
|
71
|
-
#fmt = '{0}: line {1} column {2} (char {3})'
|
72
|
-
#return fmt.format(msg, lineno, colno, pos)
|
73
|
-
fmt = '%s: line %d column %d (char %d)'
|
74
|
-
return fmt % (msg, lineno, colno, pos)
|
75
|
-
endlineno, endcolno = linecol(doc, end)
|
76
|
-
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
|
77
|
-
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
|
78
|
-
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
|
79
|
-
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
|
80
|
-
|
81
|
-
|
82
|
-
_CONSTANTS = {
|
83
|
-
'-Infinity': NegInf,
|
84
|
-
'Infinity': PosInf,
|
85
|
-
'NaN': NaN,
|
86
|
-
}
|
87
|
-
|
88
|
-
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
|
89
|
-
BACKSLASH = {
|
90
|
-
'"': u'"', '\\': u'\\', '/': u'/',
|
91
|
-
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
|
92
|
-
}
|
93
|
-
|
94
|
-
DEFAULT_ENCODING = "utf-8"
|
95
|
-
|
96
|
-
def py_scanstring(s, end, encoding=None, strict=True,
|
97
|
-
_b=BACKSLASH, _m=STRINGCHUNK.match):
|
98
|
-
"""Scan the string s for a JSON string. End is the index of the
|
99
|
-
character in s after the quote that started the JSON string.
|
100
|
-
Unescapes all valid JSON string escape sequences and raises ValueError
|
101
|
-
on attempt to decode an invalid string. If strict is False then literal
|
102
|
-
control characters are allowed in the string.
|
103
|
-
|
104
|
-
Returns a tuple of the decoded string and the index of the character in s
|
105
|
-
after the end quote."""
|
106
|
-
if encoding is None:
|
107
|
-
encoding = DEFAULT_ENCODING
|
108
|
-
chunks = []
|
109
|
-
_append = chunks.append
|
110
|
-
begin = end - 1
|
111
|
-
while 1:
|
112
|
-
chunk = _m(s, end)
|
113
|
-
if chunk is None:
|
114
|
-
raise JSONDecodeError(
|
115
|
-
"Unterminated string starting at", s, begin)
|
116
|
-
end = chunk.end()
|
117
|
-
content, terminator = chunk.groups()
|
118
|
-
# Content is contains zero or more unescaped string characters
|
119
|
-
if content:
|
120
|
-
if not isinstance(content, unicode):
|
121
|
-
content = unicode(content, encoding)
|
122
|
-
_append(content)
|
123
|
-
# Terminator is the end of string, a literal control character,
|
124
|
-
# or a backslash denoting that an escape sequence follows
|
125
|
-
if terminator == '"':
|
126
|
-
break
|
127
|
-
elif terminator != '\\':
|
128
|
-
if strict:
|
129
|
-
msg = "Invalid control character %r at" % (terminator,)
|
130
|
-
#msg = "Invalid control character {0!r} at".format(terminator)
|
131
|
-
raise JSONDecodeError(msg, s, end)
|
132
|
-
else:
|
133
|
-
_append(terminator)
|
134
|
-
continue
|
135
|
-
try:
|
136
|
-
esc = s[end]
|
137
|
-
except IndexError:
|
138
|
-
raise JSONDecodeError(
|
139
|
-
"Unterminated string starting at", s, begin)
|
140
|
-
# If not a unicode escape sequence, must be in the lookup table
|
141
|
-
if esc != 'u':
|
142
|
-
try:
|
143
|
-
char = _b[esc]
|
144
|
-
except KeyError:
|
145
|
-
msg = "Invalid \\escape: " + repr(esc)
|
146
|
-
raise JSONDecodeError(msg, s, end)
|
147
|
-
end += 1
|
148
|
-
else:
|
149
|
-
# Unicode escape sequence
|
150
|
-
esc = s[end + 1:end + 5]
|
151
|
-
next_end = end + 5
|
152
|
-
if len(esc) != 4:
|
153
|
-
msg = "Invalid \\uXXXX escape"
|
154
|
-
raise JSONDecodeError(msg, s, end)
|
155
|
-
uni = int(esc, 16)
|
156
|
-
# Check for surrogate pair on UCS-4 systems
|
157
|
-
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
|
158
|
-
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
|
159
|
-
if not s[end + 5:end + 7] == '\\u':
|
160
|
-
raise JSONDecodeError(msg, s, end)
|
161
|
-
esc2 = s[end + 7:end + 11]
|
162
|
-
if len(esc2) != 4:
|
163
|
-
raise JSONDecodeError(msg, s, end)
|
164
|
-
uni2 = int(esc2, 16)
|
165
|
-
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
|
166
|
-
next_end += 6
|
167
|
-
char = unichr(uni)
|
168
|
-
end = next_end
|
169
|
-
# Append the unescaped character
|
170
|
-
_append(char)
|
171
|
-
return u''.join(chunks), end
|
172
|
-
|
173
|
-
|
174
|
-
# Use speedup if available
|
175
|
-
scanstring = c_scanstring or py_scanstring
|
176
|
-
|
177
|
-
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
|
178
|
-
WHITESPACE_STR = ' \t\n\r'
|
179
|
-
|
180
|
-
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
|
181
|
-
object_pairs_hook, memo=None,
|
182
|
-
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
|
183
|
-
# Backwards compatibility
|
184
|
-
if memo is None:
|
185
|
-
memo = {}
|
186
|
-
memo_get = memo.setdefault
|
187
|
-
pairs = []
|
188
|
-
# Use a slice to prevent IndexError from being raised, the following
|
189
|
-
# check will raise a more specific ValueError if the string is empty
|
190
|
-
nextchar = s[end:end + 1]
|
191
|
-
# Normally we expect nextchar == '"'
|
192
|
-
if nextchar != '"':
|
193
|
-
if nextchar in _ws:
|
194
|
-
end = _w(s, end).end()
|
195
|
-
nextchar = s[end:end + 1]
|
196
|
-
# Trivial empty object
|
197
|
-
if nextchar == '}':
|
198
|
-
if object_pairs_hook is not None:
|
199
|
-
result = object_pairs_hook(pairs)
|
200
|
-
return result, end
|
201
|
-
pairs = {}
|
202
|
-
if object_hook is not None:
|
203
|
-
pairs = object_hook(pairs)
|
204
|
-
return pairs, end + 1
|
205
|
-
elif nextchar != '"':
|
206
|
-
raise JSONDecodeError("Expecting property name", s, end)
|
207
|
-
end += 1
|
208
|
-
while True:
|
209
|
-
key, end = scanstring(s, end, encoding, strict)
|
210
|
-
key = memo_get(key, key)
|
211
|
-
|
212
|
-
# To skip some function call overhead we optimize the fast paths where
|
213
|
-
# the JSON key separator is ": " or just ":".
|
214
|
-
if s[end:end + 1] != ':':
|
215
|
-
end = _w(s, end).end()
|
216
|
-
if s[end:end + 1] != ':':
|
217
|
-
raise JSONDecodeError("Expecting : delimiter", s, end)
|
218
|
-
|
219
|
-
end += 1
|
220
|
-
|
221
|
-
try:
|
222
|
-
if s[end] in _ws:
|
223
|
-
end += 1
|
224
|
-
if s[end] in _ws:
|
225
|
-
end = _w(s, end + 1).end()
|
226
|
-
except IndexError:
|
227
|
-
pass
|
228
|
-
|
229
|
-
try:
|
230
|
-
value, end = scan_once(s, end)
|
231
|
-
except StopIteration:
|
232
|
-
raise JSONDecodeError("Expecting object", s, end)
|
233
|
-
pairs.append((key, value))
|
234
|
-
|
235
|
-
try:
|
236
|
-
nextchar = s[end]
|
237
|
-
if nextchar in _ws:
|
238
|
-
end = _w(s, end + 1).end()
|
239
|
-
nextchar = s[end]
|
240
|
-
except IndexError:
|
241
|
-
nextchar = ''
|
242
|
-
end += 1
|
243
|
-
|
244
|
-
if nextchar == '}':
|
245
|
-
break
|
246
|
-
elif nextchar != ',':
|
247
|
-
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
|
248
|
-
|
249
|
-
try:
|
250
|
-
nextchar = s[end]
|
251
|
-
if nextchar in _ws:
|
252
|
-
end += 1
|
253
|
-
nextchar = s[end]
|
254
|
-
if nextchar in _ws:
|
255
|
-
end = _w(s, end + 1).end()
|
256
|
-
nextchar = s[end]
|
257
|
-
except IndexError:
|
258
|
-
nextchar = ''
|
259
|
-
|
260
|
-
end += 1
|
261
|
-
if nextchar != '"':
|
262
|
-
raise JSONDecodeError("Expecting property name", s, end - 1)
|
263
|
-
|
264
|
-
if object_pairs_hook is not None:
|
265
|
-
result = object_pairs_hook(pairs)
|
266
|
-
return result, end
|
267
|
-
pairs = dict(pairs)
|
268
|
-
if object_hook is not None:
|
269
|
-
pairs = object_hook(pairs)
|
270
|
-
return pairs, end
|
271
|
-
|
272
|
-
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
|
273
|
-
values = []
|
274
|
-
nextchar = s[end:end + 1]
|
275
|
-
if nextchar in _ws:
|
276
|
-
end = _w(s, end + 1).end()
|
277
|
-
nextchar = s[end:end + 1]
|
278
|
-
# Look-ahead for trivial empty array
|
279
|
-
if nextchar == ']':
|
280
|
-
return values, end + 1
|
281
|
-
_append = values.append
|
282
|
-
while True:
|
283
|
-
try:
|
284
|
-
value, end = scan_once(s, end)
|
285
|
-
except StopIteration:
|
286
|
-
raise JSONDecodeError("Expecting object", s, end)
|
287
|
-
_append(value)
|
288
|
-
nextchar = s[end:end + 1]
|
289
|
-
if nextchar in _ws:
|
290
|
-
end = _w(s, end + 1).end()
|
291
|
-
nextchar = s[end:end + 1]
|
292
|
-
end += 1
|
293
|
-
if nextchar == ']':
|
294
|
-
break
|
295
|
-
elif nextchar != ',':
|
296
|
-
raise JSONDecodeError("Expecting , delimiter", s, end)
|
297
|
-
|
298
|
-
try:
|
299
|
-
if s[end] in _ws:
|
300
|
-
end += 1
|
301
|
-
if s[end] in _ws:
|
302
|
-
end = _w(s, end + 1).end()
|
303
|
-
except IndexError:
|
304
|
-
pass
|
305
|
-
|
306
|
-
return values, end
|
307
|
-
|
308
|
-
class JSONDecoder(object):
|
309
|
-
"""Simple JSON <http://json.org> decoder
|
310
|
-
|
311
|
-
Performs the following translations in decoding by default:
|
312
|
-
|
313
|
-
+---------------+-------------------+
|
314
|
-
| JSON | Python |
|
315
|
-
+===============+===================+
|
316
|
-
| object | dict |
|
317
|
-
+---------------+-------------------+
|
318
|
-
| array | list |
|
319
|
-
+---------------+-------------------+
|
320
|
-
| string | unicode |
|
321
|
-
+---------------+-------------------+
|
322
|
-
| number (int) | int, long |
|
323
|
-
+---------------+-------------------+
|
324
|
-
| number (real) | float |
|
325
|
-
+---------------+-------------------+
|
326
|
-
| true | True |
|
327
|
-
+---------------+-------------------+
|
328
|
-
| false | False |
|
329
|
-
+---------------+-------------------+
|
330
|
-
| null | None |
|
331
|
-
+---------------+-------------------+
|
332
|
-
|
333
|
-
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
|
334
|
-
their corresponding ``float`` values, which is outside the JSON spec.
|
335
|
-
|
336
|
-
"""
|
337
|
-
|
338
|
-
def __init__(self, encoding=None, object_hook=None, parse_float=None,
|
339
|
-
parse_int=None, parse_constant=None, strict=True,
|
340
|
-
object_pairs_hook=None):
|
341
|
-
"""
|
342
|
-
*encoding* determines the encoding used to interpret any
|
343
|
-
:class:`str` objects decoded by this instance (``'utf-8'`` by
|
344
|
-
default). It has no effect when decoding :class:`unicode` objects.
|
345
|
-
|
346
|
-
Note that currently only encodings that are a superset of ASCII work,
|
347
|
-
strings of other encodings should be passed in as :class:`unicode`.
|
348
|
-
|
349
|
-
*object_hook*, if specified, will be called with the result of every
|
350
|
-
JSON object decoded and its return value will be used in place of the
|
351
|
-
given :class:`dict`. This can be used to provide custom
|
352
|
-
deserializations (e.g. to support JSON-RPC class hinting).
|
353
|
-
|
354
|
-
*object_pairs_hook* is an optional function that will be called with
|
355
|
-
the result of any object literal decode with an ordered list of pairs.
|
356
|
-
The return value of *object_pairs_hook* will be used instead of the
|
357
|
-
:class:`dict`. This feature can be used to implement custom decoders
|
358
|
-
that rely on the order that the key and value pairs are decoded (for
|
359
|
-
example, :func:`collections.OrderedDict` will remember the order of
|
360
|
-
insertion). If *object_hook* is also defined, the *object_pairs_hook*
|
361
|
-
takes priority.
|
362
|
-
|
363
|
-
*parse_float*, if specified, will be called with the string of every
|
364
|
-
JSON float to be decoded. By default, this is equivalent to
|
365
|
-
``float(num_str)``. This can be used to use another datatype or parser
|
366
|
-
for JSON floats (e.g. :class:`decimal.Decimal`).
|
367
|
-
|
368
|
-
*parse_int*, if specified, will be called with the string of every
|
369
|
-
JSON int to be decoded. By default, this is equivalent to
|
370
|
-
``int(num_str)``. This can be used to use another datatype or parser
|
371
|
-
for JSON integers (e.g. :class:`float`).
|
372
|
-
|
373
|
-
*parse_constant*, if specified, will be called with one of the
|
374
|
-
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
|
375
|
-
can be used to raise an exception if invalid JSON numbers are
|
376
|
-
encountered.
|
377
|
-
|
378
|
-
*strict* controls the parser's behavior when it encounters an
|
379
|
-
invalid control character in a string. The default setting of
|
380
|
-
``True`` means that unescaped control characters are parse errors, if
|
381
|
-
``False`` then control characters will be allowed in strings.
|
382
|
-
|
383
|
-
"""
|
384
|
-
self.encoding = encoding
|
385
|
-
self.object_hook = object_hook
|
386
|
-
self.object_pairs_hook = object_pairs_hook
|
387
|
-
self.parse_float = parse_float or float
|
388
|
-
self.parse_int = parse_int or int
|
389
|
-
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
|
390
|
-
self.strict = strict
|
391
|
-
self.parse_object = JSONObject
|
392
|
-
self.parse_array = JSONArray
|
393
|
-
self.parse_string = scanstring
|
394
|
-
self.memo = {}
|
395
|
-
self.scan_once = make_scanner(self)
|
396
|
-
|
397
|
-
def decode(self, s, _w=WHITESPACE.match):
|
398
|
-
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
|
399
|
-
instance containing a JSON document)
|
400
|
-
|
401
|
-
"""
|
402
|
-
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
|
403
|
-
end = _w(s, end).end()
|
404
|
-
if end != len(s):
|
405
|
-
raise JSONDecodeError("Extra data", s, end, len(s))
|
406
|
-
return obj
|
407
|
-
|
408
|
-
def raw_decode(self, s, idx=0):
|
409
|
-
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
|
410
|
-
beginning with a JSON document) and return a 2-tuple of the Python
|
411
|
-
representation and the index in ``s`` where the document ended.
|
412
|
-
|
413
|
-
This can be used to decode a JSON document from a string that may
|
414
|
-
have extraneous data at the end.
|
415
|
-
|
416
|
-
"""
|
417
|
-
try:
|
418
|
-
obj, end = self.scan_once(s, idx)
|
419
|
-
except StopIteration:
|
420
|
-
raise JSONDecodeError("No JSON object could be decoded", s, idx)
|
421
|
-
return obj, end
|
@@ -1,501 +0,0 @@
|
|
1
|
-
"""Implementation of JSONEncoder
|
2
|
-
"""
|
3
|
-
import re
|
4
|
-
from decimal import Decimal
|
5
|
-
|
6
|
-
def _import_speedups():
|
7
|
-
try:
|
8
|
-
from simplejson import _speedups
|
9
|
-
return _speedups.encode_basestring_ascii, _speedups.make_encoder
|
10
|
-
except ImportError:
|
11
|
-
return None, None
|
12
|
-
c_encode_basestring_ascii, c_make_encoder = _import_speedups()
|
13
|
-
|
14
|
-
from simplejson.decoder import PosInf
|
15
|
-
|
16
|
-
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
|
17
|
-
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
|
18
|
-
HAS_UTF8 = re.compile(r'[\x80-\xff]')
|
19
|
-
ESCAPE_DCT = {
|
20
|
-
'\\': '\\\\',
|
21
|
-
'"': '\\"',
|
22
|
-
'\b': '\\b',
|
23
|
-
'\f': '\\f',
|
24
|
-
'\n': '\\n',
|
25
|
-
'\r': '\\r',
|
26
|
-
'\t': '\\t',
|
27
|
-
}
|
28
|
-
for i in range(0x20):
|
29
|
-
#ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
|
30
|
-
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
|
31
|
-
|
32
|
-
FLOAT_REPR = repr
|
33
|
-
|
34
|
-
def encode_basestring(s):
|
35
|
-
"""Return a JSON representation of a Python string
|
36
|
-
|
37
|
-
"""
|
38
|
-
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
|
39
|
-
s = s.decode('utf-8')
|
40
|
-
def replace(match):
|
41
|
-
return ESCAPE_DCT[match.group(0)]
|
42
|
-
return u'"' + ESCAPE.sub(replace, s) + u'"'
|
43
|
-
|
44
|
-
|
45
|
-
def py_encode_basestring_ascii(s):
|
46
|
-
"""Return an ASCII-only JSON representation of a Python string
|
47
|
-
|
48
|
-
"""
|
49
|
-
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
|
50
|
-
s = s.decode('utf-8')
|
51
|
-
def replace(match):
|
52
|
-
s = match.group(0)
|
53
|
-
try:
|
54
|
-
return ESCAPE_DCT[s]
|
55
|
-
except KeyError:
|
56
|
-
n = ord(s)
|
57
|
-
if n < 0x10000:
|
58
|
-
#return '\\u{0:04x}'.format(n)
|
59
|
-
return '\\u%04x' % (n,)
|
60
|
-
else:
|
61
|
-
# surrogate pair
|
62
|
-
n -= 0x10000
|
63
|
-
s1 = 0xd800 | ((n >> 10) & 0x3ff)
|
64
|
-
s2 = 0xdc00 | (n & 0x3ff)
|
65
|
-
#return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
|
66
|
-
return '\\u%04x\\u%04x' % (s1, s2)
|
67
|
-
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
|
68
|
-
|
69
|
-
|
70
|
-
encode_basestring_ascii = (
|
71
|
-
c_encode_basestring_ascii or py_encode_basestring_ascii)
|
72
|
-
|
73
|
-
class JSONEncoder(object):
|
74
|
-
"""Extensible JSON <http://json.org> encoder for Python data structures.
|
75
|
-
|
76
|
-
Supports the following objects and types by default:
|
77
|
-
|
78
|
-
+-------------------+---------------+
|
79
|
-
| Python | JSON |
|
80
|
-
+===================+===============+
|
81
|
-
| dict | object |
|
82
|
-
+-------------------+---------------+
|
83
|
-
| list, tuple | array |
|
84
|
-
+-------------------+---------------+
|
85
|
-
| str, unicode | string |
|
86
|
-
+-------------------+---------------+
|
87
|
-
| int, long, float | number |
|
88
|
-
+-------------------+---------------+
|
89
|
-
| True | true |
|
90
|
-
+-------------------+---------------+
|
91
|
-
| False | false |
|
92
|
-
+-------------------+---------------+
|
93
|
-
| None | null |
|
94
|
-
+-------------------+---------------+
|
95
|
-
|
96
|
-
To extend this to recognize other objects, subclass and implement a
|
97
|
-
``.default()`` method with another method that returns a serializable
|
98
|
-
object for ``o`` if possible, otherwise it should call the superclass
|
99
|
-
implementation (to raise ``TypeError``).
|
100
|
-
|
101
|
-
"""
|
102
|
-
item_separator = ', '
|
103
|
-
key_separator = ': '
|
104
|
-
def __init__(self, skipkeys=False, ensure_ascii=True,
|
105
|
-
check_circular=True, allow_nan=True, sort_keys=False,
|
106
|
-
indent=None, separators=None, encoding='utf-8', default=None,
|
107
|
-
use_decimal=False):
|
108
|
-
"""Constructor for JSONEncoder, with sensible defaults.
|
109
|
-
|
110
|
-
If skipkeys is false, then it is a TypeError to attempt
|
111
|
-
encoding of keys that are not str, int, long, float or None. If
|
112
|
-
skipkeys is True, such items are simply skipped.
|
113
|
-
|
114
|
-
If ensure_ascii is true, the output is guaranteed to be str
|
115
|
-
objects with all incoming unicode characters escaped. If
|
116
|
-
ensure_ascii is false, the output will be unicode object.
|
117
|
-
|
118
|
-
If check_circular is true, then lists, dicts, and custom encoded
|
119
|
-
objects will be checked for circular references during encoding to
|
120
|
-
prevent an infinite recursion (which would cause an OverflowError).
|
121
|
-
Otherwise, no such check takes place.
|
122
|
-
|
123
|
-
If allow_nan is true, then NaN, Infinity, and -Infinity will be
|
124
|
-
encoded as such. This behavior is not JSON specification compliant,
|
125
|
-
but is consistent with most JavaScript based encoders and decoders.
|
126
|
-
Otherwise, it will be a ValueError to encode such floats.
|
127
|
-
|
128
|
-
If sort_keys is true, then the output of dictionaries will be
|
129
|
-
sorted by key; this is useful for regression tests to ensure
|
130
|
-
that JSON serializations can be compared on a day-to-day basis.
|
131
|
-
|
132
|
-
If indent is a string, then JSON array elements and object members
|
133
|
-
will be pretty-printed with a newline followed by that string repeated
|
134
|
-
for each level of nesting. ``None`` (the default) selects the most compact
|
135
|
-
representation without any newlines. For backwards compatibility with
|
136
|
-
versions of simplejson earlier than 2.1.0, an integer is also accepted
|
137
|
-
and is converted to a string with that many spaces.
|
138
|
-
|
139
|
-
If specified, separators should be a (item_separator, key_separator)
|
140
|
-
tuple. The default is (', ', ': '). To get the most compact JSON
|
141
|
-
representation you should specify (',', ':') to eliminate whitespace.
|
142
|
-
|
143
|
-
If specified, default is a function that gets called for objects
|
144
|
-
that can't otherwise be serialized. It should return a JSON encodable
|
145
|
-
version of the object or raise a ``TypeError``.
|
146
|
-
|
147
|
-
If encoding is not None, then all input strings will be
|
148
|
-
transformed into unicode using that encoding prior to JSON-encoding.
|
149
|
-
The default is UTF-8.
|
150
|
-
|
151
|
-
If use_decimal is true (not the default), ``decimal.Decimal`` will
|
152
|
-
be supported directly by the encoder. For the inverse, decode JSON
|
153
|
-
with ``parse_float=decimal.Decimal``.
|
154
|
-
|
155
|
-
"""
|
156
|
-
|
157
|
-
self.skipkeys = skipkeys
|
158
|
-
self.ensure_ascii = ensure_ascii
|
159
|
-
self.check_circular = check_circular
|
160
|
-
self.allow_nan = allow_nan
|
161
|
-
self.sort_keys = sort_keys
|
162
|
-
self.use_decimal = use_decimal
|
163
|
-
if isinstance(indent, (int, long)):
|
164
|
-
indent = ' ' * indent
|
165
|
-
self.indent = indent
|
166
|
-
if separators is not None:
|
167
|
-
self.item_separator, self.key_separator = separators
|
168
|
-
if default is not None:
|
169
|
-
self.default = default
|
170
|
-
self.encoding = encoding
|
171
|
-
|
172
|
-
def default(self, o):
|
173
|
-
"""Implement this method in a subclass such that it returns
|
174
|
-
a serializable object for ``o``, or calls the base implementation
|
175
|
-
(to raise a ``TypeError``).
|
176
|
-
|
177
|
-
For example, to support arbitrary iterators, you could
|
178
|
-
implement default like this::
|
179
|
-
|
180
|
-
def default(self, o):
|
181
|
-
try:
|
182
|
-
iterable = iter(o)
|
183
|
-
except TypeError:
|
184
|
-
pass
|
185
|
-
else:
|
186
|
-
return list(iterable)
|
187
|
-
return JSONEncoder.default(self, o)
|
188
|
-
|
189
|
-
"""
|
190
|
-
raise TypeError(repr(o) + " is not JSON serializable")
|
191
|
-
|
192
|
-
def encode(self, o):
|
193
|
-
"""Return a JSON string representation of a Python data structure.
|
194
|
-
|
195
|
-
>>> from simplejson import JSONEncoder
|
196
|
-
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
|
197
|
-
'{"foo": ["bar", "baz"]}'
|
198
|
-
|
199
|
-
"""
|
200
|
-
# This is for extremely simple cases and benchmarks.
|
201
|
-
if isinstance(o, basestring):
|
202
|
-
if isinstance(o, str):
|
203
|
-
_encoding = self.encoding
|
204
|
-
if (_encoding is not None
|
205
|
-
and not (_encoding == 'utf-8')):
|
206
|
-
o = o.decode(_encoding)
|
207
|
-
if self.ensure_ascii:
|
208
|
-
return encode_basestring_ascii(o)
|
209
|
-
else:
|
210
|
-
return encode_basestring(o)
|
211
|
-
# This doesn't pass the iterator directly to ''.join() because the
|
212
|
-
# exceptions aren't as detailed. The list call should be roughly
|
213
|
-
# equivalent to the PySequence_Fast that ''.join() would do.
|
214
|
-
chunks = self.iterencode(o, _one_shot=True)
|
215
|
-
if not isinstance(chunks, (list, tuple)):
|
216
|
-
chunks = list(chunks)
|
217
|
-
if self.ensure_ascii:
|
218
|
-
return ''.join(chunks)
|
219
|
-
else:
|
220
|
-
return u''.join(chunks)
|
221
|
-
|
222
|
-
def iterencode(self, o, _one_shot=False):
|
223
|
-
"""Encode the given object and yield each string
|
224
|
-
representation as available.
|
225
|
-
|
226
|
-
For example::
|
227
|
-
|
228
|
-
for chunk in JSONEncoder().iterencode(bigobject):
|
229
|
-
mysocket.write(chunk)
|
230
|
-
|
231
|
-
"""
|
232
|
-
if self.check_circular:
|
233
|
-
markers = {}
|
234
|
-
else:
|
235
|
-
markers = None
|
236
|
-
if self.ensure_ascii:
|
237
|
-
_encoder = encode_basestring_ascii
|
238
|
-
else:
|
239
|
-
_encoder = encode_basestring
|
240
|
-
if self.encoding != 'utf-8':
|
241
|
-
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
|
242
|
-
if isinstance(o, str):
|
243
|
-
o = o.decode(_encoding)
|
244
|
-
return _orig_encoder(o)
|
245
|
-
|
246
|
-
def floatstr(o, allow_nan=self.allow_nan,
|
247
|
-
_repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf):
|
248
|
-
# Check for specials. Note that this type of test is processor
|
249
|
-
# and/or platform-specific, so do tests which don't depend on
|
250
|
-
# the internals.
|
251
|
-
|
252
|
-
if o != o:
|
253
|
-
text = 'NaN'
|
254
|
-
elif o == _inf:
|
255
|
-
text = 'Infinity'
|
256
|
-
elif o == _neginf:
|
257
|
-
text = '-Infinity'
|
258
|
-
else:
|
259
|
-
return _repr(o)
|
260
|
-
|
261
|
-
if not allow_nan:
|
262
|
-
raise ValueError(
|
263
|
-
"Out of range float values are not JSON compliant: " +
|
264
|
-
repr(o))
|
265
|
-
|
266
|
-
return text
|
267
|
-
|
268
|
-
|
269
|
-
key_memo = {}
|
270
|
-
if (_one_shot and c_make_encoder is not None
|
271
|
-
and not self.indent and not self.sort_keys):
|
272
|
-
_iterencode = c_make_encoder(
|
273
|
-
markers, self.default, _encoder, self.indent,
|
274
|
-
self.key_separator, self.item_separator, self.sort_keys,
|
275
|
-
self.skipkeys, self.allow_nan, key_memo, self.use_decimal)
|
276
|
-
else:
|
277
|
-
_iterencode = _make_iterencode(
|
278
|
-
markers, self.default, _encoder, self.indent, floatstr,
|
279
|
-
self.key_separator, self.item_separator, self.sort_keys,
|
280
|
-
self.skipkeys, _one_shot, self.use_decimal)
|
281
|
-
try:
|
282
|
-
return _iterencode(o, 0)
|
283
|
-
finally:
|
284
|
-
key_memo.clear()
|
285
|
-
|
286
|
-
|
287
|
-
class JSONEncoderForHTML(JSONEncoder):
|
288
|
-
"""An encoder that produces JSON safe to embed in HTML.
|
289
|
-
|
290
|
-
To embed JSON content in, say, a script tag on a web page, the
|
291
|
-
characters &, < and > should be escaped. They cannot be escaped
|
292
|
-
with the usual entities (e.g. &) because they are not expanded
|
293
|
-
within <script> tags.
|
294
|
-
"""
|
295
|
-
|
296
|
-
def encode(self, o):
|
297
|
-
# Override JSONEncoder.encode because it has hacks for
|
298
|
-
# performance that make things more complicated.
|
299
|
-
chunks = self.iterencode(o, True)
|
300
|
-
if self.ensure_ascii:
|
301
|
-
return ''.join(chunks)
|
302
|
-
else:
|
303
|
-
return u''.join(chunks)
|
304
|
-
|
305
|
-
def iterencode(self, o, _one_shot=False):
|
306
|
-
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
|
307
|
-
for chunk in chunks:
|
308
|
-
chunk = chunk.replace('&', '\\u0026')
|
309
|
-
chunk = chunk.replace('<', '\\u003c')
|
310
|
-
chunk = chunk.replace('>', '\\u003e')
|
311
|
-
yield chunk
|
312
|
-
|
313
|
-
|
314
|
-
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
|
315
|
-
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
|
316
|
-
_use_decimal,
|
317
|
-
## HACK: hand-optimized bytecode; turn globals into locals
|
318
|
-
False=False,
|
319
|
-
True=True,
|
320
|
-
ValueError=ValueError,
|
321
|
-
basestring=basestring,
|
322
|
-
Decimal=Decimal,
|
323
|
-
dict=dict,
|
324
|
-
float=float,
|
325
|
-
id=id,
|
326
|
-
int=int,
|
327
|
-
isinstance=isinstance,
|
328
|
-
list=list,
|
329
|
-
long=long,
|
330
|
-
str=str,
|
331
|
-
tuple=tuple,
|
332
|
-
):
|
333
|
-
|
334
|
-
def _iterencode_list(lst, _current_indent_level):
|
335
|
-
if not lst:
|
336
|
-
yield '[]'
|
337
|
-
return
|
338
|
-
if markers is not None:
|
339
|
-
markerid = id(lst)
|
340
|
-
if markerid in markers:
|
341
|
-
raise ValueError("Circular reference detected")
|
342
|
-
markers[markerid] = lst
|
343
|
-
buf = '['
|
344
|
-
if _indent is not None:
|
345
|
-
_current_indent_level += 1
|
346
|
-
newline_indent = '\n' + (_indent * _current_indent_level)
|
347
|
-
separator = _item_separator + newline_indent
|
348
|
-
buf += newline_indent
|
349
|
-
else:
|
350
|
-
newline_indent = None
|
351
|
-
separator = _item_separator
|
352
|
-
first = True
|
353
|
-
for value in lst:
|
354
|
-
if first:
|
355
|
-
first = False
|
356
|
-
else:
|
357
|
-
buf = separator
|
358
|
-
if isinstance(value, basestring):
|
359
|
-
yield buf + _encoder(value)
|
360
|
-
elif value is None:
|
361
|
-
yield buf + 'null'
|
362
|
-
elif value is True:
|
363
|
-
yield buf + 'true'
|
364
|
-
elif value is False:
|
365
|
-
yield buf + 'false'
|
366
|
-
elif isinstance(value, (int, long)):
|
367
|
-
yield buf + str(value)
|
368
|
-
elif isinstance(value, float):
|
369
|
-
yield buf + _floatstr(value)
|
370
|
-
elif _use_decimal and isinstance(value, Decimal):
|
371
|
-
yield buf + str(value)
|
372
|
-
else:
|
373
|
-
yield buf
|
374
|
-
if isinstance(value, (list, tuple)):
|
375
|
-
chunks = _iterencode_list(value, _current_indent_level)
|
376
|
-
elif isinstance(value, dict):
|
377
|
-
chunks = _iterencode_dict(value, _current_indent_level)
|
378
|
-
else:
|
379
|
-
chunks = _iterencode(value, _current_indent_level)
|
380
|
-
for chunk in chunks:
|
381
|
-
yield chunk
|
382
|
-
if newline_indent is not None:
|
383
|
-
_current_indent_level -= 1
|
384
|
-
yield '\n' + (_indent * _current_indent_level)
|
385
|
-
yield ']'
|
386
|
-
if markers is not None:
|
387
|
-
del markers[markerid]
|
388
|
-
|
389
|
-
def _iterencode_dict(dct, _current_indent_level):
|
390
|
-
if not dct:
|
391
|
-
yield '{}'
|
392
|
-
return
|
393
|
-
if markers is not None:
|
394
|
-
markerid = id(dct)
|
395
|
-
if markerid in markers:
|
396
|
-
raise ValueError("Circular reference detected")
|
397
|
-
markers[markerid] = dct
|
398
|
-
yield '{'
|
399
|
-
if _indent is not None:
|
400
|
-
_current_indent_level += 1
|
401
|
-
newline_indent = '\n' + (_indent * _current_indent_level)
|
402
|
-
item_separator = _item_separator + newline_indent
|
403
|
-
yield newline_indent
|
404
|
-
else:
|
405
|
-
newline_indent = None
|
406
|
-
item_separator = _item_separator
|
407
|
-
first = True
|
408
|
-
if _sort_keys:
|
409
|
-
items = dct.items()
|
410
|
-
items.sort(key=lambda kv: kv[0])
|
411
|
-
else:
|
412
|
-
items = dct.iteritems()
|
413
|
-
for key, value in items:
|
414
|
-
if isinstance(key, basestring):
|
415
|
-
pass
|
416
|
-
# JavaScript is weakly typed for these, so it makes sense to
|
417
|
-
# also allow them. Many encoders seem to do something like this.
|
418
|
-
elif isinstance(key, float):
|
419
|
-
key = _floatstr(key)
|
420
|
-
elif key is True:
|
421
|
-
key = 'true'
|
422
|
-
elif key is False:
|
423
|
-
key = 'false'
|
424
|
-
elif key is None:
|
425
|
-
key = 'null'
|
426
|
-
elif isinstance(key, (int, long)):
|
427
|
-
key = str(key)
|
428
|
-
elif _skipkeys:
|
429
|
-
continue
|
430
|
-
else:
|
431
|
-
raise TypeError("key " + repr(key) + " is not a string")
|
432
|
-
if first:
|
433
|
-
first = False
|
434
|
-
else:
|
435
|
-
yield item_separator
|
436
|
-
yield _encoder(key)
|
437
|
-
yield _key_separator
|
438
|
-
if isinstance(value, basestring):
|
439
|
-
yield _encoder(value)
|
440
|
-
elif value is None:
|
441
|
-
yield 'null'
|
442
|
-
elif value is True:
|
443
|
-
yield 'true'
|
444
|
-
elif value is False:
|
445
|
-
yield 'false'
|
446
|
-
elif isinstance(value, (int, long)):
|
447
|
-
yield str(value)
|
448
|
-
elif isinstance(value, float):
|
449
|
-
yield _floatstr(value)
|
450
|
-
elif _use_decimal and isinstance(value, Decimal):
|
451
|
-
yield str(value)
|
452
|
-
else:
|
453
|
-
if isinstance(value, (list, tuple)):
|
454
|
-
chunks = _iterencode_list(value, _current_indent_level)
|
455
|
-
elif isinstance(value, dict):
|
456
|
-
chunks = _iterencode_dict(value, _current_indent_level)
|
457
|
-
else:
|
458
|
-
chunks = _iterencode(value, _current_indent_level)
|
459
|
-
for chunk in chunks:
|
460
|
-
yield chunk
|
461
|
-
if newline_indent is not None:
|
462
|
-
_current_indent_level -= 1
|
463
|
-
yield '\n' + (_indent * _current_indent_level)
|
464
|
-
yield '}'
|
465
|
-
if markers is not None:
|
466
|
-
del markers[markerid]
|
467
|
-
|
468
|
-
def _iterencode(o, _current_indent_level):
|
469
|
-
if isinstance(o, basestring):
|
470
|
-
yield _encoder(o)
|
471
|
-
elif o is None:
|
472
|
-
yield 'null'
|
473
|
-
elif o is True:
|
474
|
-
yield 'true'
|
475
|
-
elif o is False:
|
476
|
-
yield 'false'
|
477
|
-
elif isinstance(o, (int, long)):
|
478
|
-
yield str(o)
|
479
|
-
elif isinstance(o, float):
|
480
|
-
yield _floatstr(o)
|
481
|
-
elif isinstance(o, (list, tuple)):
|
482
|
-
for chunk in _iterencode_list(o, _current_indent_level):
|
483
|
-
yield chunk
|
484
|
-
elif isinstance(o, dict):
|
485
|
-
for chunk in _iterencode_dict(o, _current_indent_level):
|
486
|
-
yield chunk
|
487
|
-
elif _use_decimal and isinstance(o, Decimal):
|
488
|
-
yield str(o)
|
489
|
-
else:
|
490
|
-
if markers is not None:
|
491
|
-
markerid = id(o)
|
492
|
-
if markerid in markers:
|
493
|
-
raise ValueError("Circular reference detected")
|
494
|
-
markers[markerid] = o
|
495
|
-
o = _default(o)
|
496
|
-
for chunk in _iterencode(o, _current_indent_level):
|
497
|
-
yield chunk
|
498
|
-
if markers is not None:
|
499
|
-
del markers[markerid]
|
500
|
-
|
501
|
-
return _iterencode
|