lxml 5.3.2__cp310-cp310-win32.win32.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. lxml/ElementInclude.py +244 -0
  2. lxml/__init__.py +22 -0
  3. lxml/_elementpath.cp310-win32.pyd +0 -0
  4. lxml/_elementpath.py +341 -0
  5. lxml/apihelpers.pxi +1793 -0
  6. lxml/builder.cp310-win32.pyd +0 -0
  7. lxml/builder.py +232 -0
  8. lxml/classlookup.pxi +580 -0
  9. lxml/cleanup.pxi +215 -0
  10. lxml/cssselect.py +101 -0
  11. lxml/debug.pxi +90 -0
  12. lxml/docloader.pxi +178 -0
  13. lxml/doctestcompare.py +488 -0
  14. lxml/dtd.pxi +479 -0
  15. lxml/etree.cp310-win32.pyd +0 -0
  16. lxml/etree.h +248 -0
  17. lxml/etree.pyx +3732 -0
  18. lxml/etree_api.h +195 -0
  19. lxml/extensions.pxi +833 -0
  20. lxml/html/ElementSoup.py +10 -0
  21. lxml/html/__init__.py +1923 -0
  22. lxml/html/_diffcommand.py +86 -0
  23. lxml/html/_html5builder.py +100 -0
  24. lxml/html/_setmixin.py +56 -0
  25. lxml/html/builder.py +133 -0
  26. lxml/html/clean.py +21 -0
  27. lxml/html/defs.py +135 -0
  28. lxml/html/diff.cp310-win32.pyd +0 -0
  29. lxml/html/diff.py +878 -0
  30. lxml/html/formfill.py +299 -0
  31. lxml/html/html5parser.py +260 -0
  32. lxml/html/soupparser.py +314 -0
  33. lxml/html/usedoctest.py +13 -0
  34. lxml/includes/__init__.pxd +0 -0
  35. lxml/includes/__init__.py +0 -0
  36. lxml/includes/c14n.pxd +25 -0
  37. lxml/includes/config.pxd +3 -0
  38. lxml/includes/dtdvalid.pxd +18 -0
  39. lxml/includes/etree_defs.h +379 -0
  40. lxml/includes/etreepublic.pxd +237 -0
  41. lxml/includes/extlibs/__init__.py +0 -0
  42. lxml/includes/extlibs/zconf.h +543 -0
  43. lxml/includes/extlibs/zlib.h +1938 -0
  44. lxml/includes/htmlparser.pxd +56 -0
  45. lxml/includes/libexslt/__init__.py +0 -0
  46. lxml/includes/libexslt/exslt.h +108 -0
  47. lxml/includes/libexslt/exsltconfig.h +70 -0
  48. lxml/includes/libexslt/exsltexports.h +63 -0
  49. lxml/includes/libexslt/libexslt.h +29 -0
  50. lxml/includes/libxml/HTMLparser.h +320 -0
  51. lxml/includes/libxml/HTMLtree.h +147 -0
  52. lxml/includes/libxml/SAX.h +204 -0
  53. lxml/includes/libxml/SAX2.h +173 -0
  54. lxml/includes/libxml/__init__.py +0 -0
  55. lxml/includes/libxml/c14n.h +128 -0
  56. lxml/includes/libxml/catalog.h +182 -0
  57. lxml/includes/libxml/chvalid.h +230 -0
  58. lxml/includes/libxml/debugXML.h +217 -0
  59. lxml/includes/libxml/dict.h +81 -0
  60. lxml/includes/libxml/encoding.h +233 -0
  61. lxml/includes/libxml/entities.h +151 -0
  62. lxml/includes/libxml/globals.h +529 -0
  63. lxml/includes/libxml/hash.h +236 -0
  64. lxml/includes/libxml/list.h +137 -0
  65. lxml/includes/libxml/nanoftp.h +186 -0
  66. lxml/includes/libxml/nanohttp.h +81 -0
  67. lxml/includes/libxml/parser.h +1265 -0
  68. lxml/includes/libxml/parserInternals.h +662 -0
  69. lxml/includes/libxml/pattern.h +100 -0
  70. lxml/includes/libxml/relaxng.h +218 -0
  71. lxml/includes/libxml/schemasInternals.h +958 -0
  72. lxml/includes/libxml/schematron.h +142 -0
  73. lxml/includes/libxml/threads.h +94 -0
  74. lxml/includes/libxml/tree.h +1314 -0
  75. lxml/includes/libxml/uri.h +94 -0
  76. lxml/includes/libxml/valid.h +448 -0
  77. lxml/includes/libxml/xinclude.h +129 -0
  78. lxml/includes/libxml/xlink.h +189 -0
  79. lxml/includes/libxml/xmlIO.h +369 -0
  80. lxml/includes/libxml/xmlautomata.h +146 -0
  81. lxml/includes/libxml/xmlerror.h +919 -0
  82. lxml/includes/libxml/xmlexports.h +50 -0
  83. lxml/includes/libxml/xmlmemory.h +228 -0
  84. lxml/includes/libxml/xmlmodule.h +57 -0
  85. lxml/includes/libxml/xmlreader.h +428 -0
  86. lxml/includes/libxml/xmlregexp.h +222 -0
  87. lxml/includes/libxml/xmlsave.h +88 -0
  88. lxml/includes/libxml/xmlschemas.h +246 -0
  89. lxml/includes/libxml/xmlschemastypes.h +152 -0
  90. lxml/includes/libxml/xmlstring.h +140 -0
  91. lxml/includes/libxml/xmlunicode.h +202 -0
  92. lxml/includes/libxml/xmlversion.h +526 -0
  93. lxml/includes/libxml/xmlwriter.h +488 -0
  94. lxml/includes/libxml/xpath.h +575 -0
  95. lxml/includes/libxml/xpathInternals.h +632 -0
  96. lxml/includes/libxml/xpointer.h +137 -0
  97. lxml/includes/libxslt/__init__.py +0 -0
  98. lxml/includes/libxslt/attributes.h +39 -0
  99. lxml/includes/libxslt/documents.h +93 -0
  100. lxml/includes/libxslt/extensions.h +262 -0
  101. lxml/includes/libxslt/extra.h +72 -0
  102. lxml/includes/libxslt/functions.h +78 -0
  103. lxml/includes/libxslt/imports.h +75 -0
  104. lxml/includes/libxslt/keys.h +53 -0
  105. lxml/includes/libxslt/libxslt.h +36 -0
  106. lxml/includes/libxslt/namespaces.h +68 -0
  107. lxml/includes/libxslt/numbersInternals.h +73 -0
  108. lxml/includes/libxslt/preproc.h +43 -0
  109. lxml/includes/libxslt/security.h +104 -0
  110. lxml/includes/libxslt/templates.h +77 -0
  111. lxml/includes/libxslt/transform.h +207 -0
  112. lxml/includes/libxslt/trio.h +216 -0
  113. lxml/includes/libxslt/triodef.h +220 -0
  114. lxml/includes/libxslt/variables.h +118 -0
  115. lxml/includes/libxslt/win32config.h +51 -0
  116. lxml/includes/libxslt/xslt.h +110 -0
  117. lxml/includes/libxslt/xsltInternals.h +1992 -0
  118. lxml/includes/libxslt/xsltconfig.h +179 -0
  119. lxml/includes/libxslt/xsltexports.h +64 -0
  120. lxml/includes/libxslt/xsltlocale.h +44 -0
  121. lxml/includes/libxslt/xsltutils.h +343 -0
  122. lxml/includes/lxml-version.h +3 -0
  123. lxml/includes/relaxng.pxd +64 -0
  124. lxml/includes/schematron.pxd +34 -0
  125. lxml/includes/tree.pxd +494 -0
  126. lxml/includes/uri.pxd +5 -0
  127. lxml/includes/xinclude.pxd +22 -0
  128. lxml/includes/xmlerror.pxd +852 -0
  129. lxml/includes/xmlparser.pxd +265 -0
  130. lxml/includes/xmlschema.pxd +35 -0
  131. lxml/includes/xpath.pxd +136 -0
  132. lxml/includes/xslt.pxd +190 -0
  133. lxml/isoschematron/__init__.py +348 -0
  134. lxml/isoschematron/resources/rng/iso-schematron.rng +709 -0
  135. lxml/isoschematron/resources/xsl/RNG2Schtrn.xsl +75 -0
  136. lxml/isoschematron/resources/xsl/XSD2Schtrn.xsl +77 -0
  137. lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_abstract_expand.xsl +313 -0
  138. lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_dsdl_include.xsl +1160 -0
  139. lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_message.xsl +55 -0
  140. lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_schematron_skeleton_for_xslt1.xsl +1796 -0
  141. lxml/isoschematron/resources/xsl/iso-schematron-xslt1/iso_svrl_for_xslt1.xsl +588 -0
  142. lxml/isoschematron/resources/xsl/iso-schematron-xslt1/readme.txt +84 -0
  143. lxml/iterparse.pxi +438 -0
  144. lxml/lxml.etree.h +248 -0
  145. lxml/lxml.etree_api.h +195 -0
  146. lxml/nsclasses.pxi +281 -0
  147. lxml/objectify.cp310-win32.pyd +0 -0
  148. lxml/objectify.pyx +2145 -0
  149. lxml/objectpath.pxi +332 -0
  150. lxml/parser.pxi +2000 -0
  151. lxml/parsertarget.pxi +180 -0
  152. lxml/proxy.pxi +619 -0
  153. lxml/public-api.pxi +178 -0
  154. lxml/pyclasslookup.py +3 -0
  155. lxml/readonlytree.pxi +565 -0
  156. lxml/relaxng.pxi +165 -0
  157. lxml/sax.cp310-win32.pyd +0 -0
  158. lxml/sax.py +275 -0
  159. lxml/saxparser.pxi +875 -0
  160. lxml/schematron.pxi +168 -0
  161. lxml/serializer.pxi +1781 -0
  162. lxml/usedoctest.py +13 -0
  163. lxml/xinclude.pxi +67 -0
  164. lxml/xmlerror.pxi +1654 -0
  165. lxml/xmlid.pxi +179 -0
  166. lxml/xmlschema.pxi +215 -0
  167. lxml/xpath.pxi +487 -0
  168. lxml/xslt.pxi +950 -0
  169. lxml/xsltext.pxi +242 -0
  170. lxml-5.3.2.dist-info/METADATA +100 -0
  171. lxml-5.3.2.dist-info/RECORD +175 -0
  172. lxml-5.3.2.dist-info/WHEEL +5 -0
  173. lxml-5.3.2.dist-info/licenses/LICENSE.txt +29 -0
  174. lxml-5.3.2.dist-info/licenses/LICENSES.txt +29 -0
  175. lxml-5.3.2.dist-info/top_level.txt +1 -0
lxml/html/diff.py ADDED
@@ -0,0 +1,878 @@
1
+ # cython: language_level=3
2
+
3
+
4
+ import difflib
5
+ from lxml import etree
6
+ from lxml.html import fragment_fromstring
7
+ import re
8
+
9
+ __all__ = ['html_annotate', 'htmldiff']
10
+
11
+ try:
12
+ from html import escape as html_escape
13
+ except ImportError:
14
+ from cgi import escape as html_escape
15
+ try:
16
+ _unicode = unicode
17
+ except NameError:
18
+ # Python 3
19
+ _unicode = str
20
+ try:
21
+ basestring
22
+ except NameError:
23
+ # Python 3
24
+ basestring = str
25
+
26
+ ############################################################
27
+ ## Annotation
28
+ ############################################################
29
+
30
+ def default_markup(text, version):
31
+ return '<span title="%s">%s</span>' % (
32
+ html_escape(_unicode(version), 1), text)
33
+
34
+ def html_annotate(doclist, markup=default_markup):
35
+ """
36
+ doclist should be ordered from oldest to newest, like::
37
+
38
+ >>> version1 = 'Hello World'
39
+ >>> version2 = 'Goodbye World'
40
+ >>> print(html_annotate([(version1, 'version 1'),
41
+ ... (version2, 'version 2')]))
42
+ <span title="version 2">Goodbye</span> <span title="version 1">World</span>
43
+
44
+ The documents must be *fragments* (str/UTF8 or unicode), not
45
+ complete documents
46
+
47
+ The markup argument is a function to markup the spans of words.
48
+ This function is called like markup('Hello', 'version 2'), and
49
+ returns HTML. The first argument is text and never includes any
50
+ markup. The default uses a span with a title:
51
+
52
+ >>> print(default_markup('Some Text', 'by Joe'))
53
+ <span title="by Joe">Some Text</span>
54
+ """
55
+ # The basic strategy we have is to split the documents up into
56
+ # logical tokens (which are words with attached markup). We then
57
+ # do diffs of each of the versions to track when a token first
58
+ # appeared in the document; the annotation attached to the token
59
+ # is the version where it first appeared.
60
+ tokenlist = [tokenize_annotated(doc, version)
61
+ for doc, version in doclist]
62
+ cur_tokens = tokenlist[0]
63
+ for tokens in tokenlist[1:]:
64
+ html_annotate_merge_annotations(cur_tokens, tokens)
65
+ cur_tokens = tokens
66
+
67
+ # After we've tracked all the tokens, we can combine spans of text
68
+ # that are adjacent and have the same annotation
69
+ cur_tokens = compress_tokens(cur_tokens)
70
+ # And finally add markup
71
+ result = markup_serialize_tokens(cur_tokens, markup)
72
+ return ''.join(result).strip()
73
+
74
+ def tokenize_annotated(doc, annotation):
75
+ """Tokenize a document and add an annotation attribute to each token
76
+ """
77
+ tokens = tokenize(doc, include_hrefs=False)
78
+ for tok in tokens:
79
+ tok.annotation = annotation
80
+ return tokens
81
+
82
+ def html_annotate_merge_annotations(tokens_old, tokens_new):
83
+ """Merge the annotations from tokens_old into tokens_new, when the
84
+ tokens in the new document already existed in the old document.
85
+ """
86
+ s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
87
+ commands = s.get_opcodes()
88
+
89
+ for command, i1, i2, j1, j2 in commands:
90
+ if command == 'equal':
91
+ eq_old = tokens_old[i1:i2]
92
+ eq_new = tokens_new[j1:j2]
93
+ copy_annotations(eq_old, eq_new)
94
+
95
+ def copy_annotations(src, dest):
96
+ """
97
+ Copy annotations from the tokens listed in src to the tokens in dest
98
+ """
99
+ assert len(src) == len(dest)
100
+ for src_tok, dest_tok in zip(src, dest):
101
+ dest_tok.annotation = src_tok.annotation
102
+
103
+ def compress_tokens(tokens):
104
+ """
105
+ Combine adjacent tokens when there is no HTML between the tokens,
106
+ and they share an annotation
107
+ """
108
+ result = [tokens[0]]
109
+ for tok in tokens[1:]:
110
+ if (not result[-1].post_tags and
111
+ not tok.pre_tags and
112
+ result[-1].annotation == tok.annotation):
113
+ compress_merge_back(result, tok)
114
+ else:
115
+ result.append(tok)
116
+ return result
117
+
118
+ def compress_merge_back(tokens, tok):
119
+ """ Merge tok into the last element of tokens (modifying the list of
120
+ tokens in-place). """
121
+ last = tokens[-1]
122
+ if type(last) is not token or type(tok) is not token:
123
+ tokens.append(tok)
124
+ else:
125
+ text = _unicode(last)
126
+ if last.trailing_whitespace:
127
+ text += last.trailing_whitespace
128
+ text += tok
129
+ merged = token(text,
130
+ pre_tags=last.pre_tags,
131
+ post_tags=tok.post_tags,
132
+ trailing_whitespace=tok.trailing_whitespace)
133
+ merged.annotation = last.annotation
134
+ tokens[-1] = merged
135
+
136
+ def markup_serialize_tokens(tokens, markup_func):
137
+ """
138
+ Serialize the list of tokens into a list of text chunks, calling
139
+ markup_func around text to add annotations.
140
+ """
141
+ for token in tokens:
142
+ yield from token.pre_tags
143
+ html = token.html()
144
+ html = markup_func(html, token.annotation)
145
+ if token.trailing_whitespace:
146
+ html += token.trailing_whitespace
147
+ yield html
148
+ yield from token.post_tags
149
+
150
+
151
+ ############################################################
152
+ ## HTML Diffs
153
+ ############################################################
154
+
155
+ def htmldiff(old_html, new_html):
156
+ ## FIXME: this should take parsed documents too, and use their body
157
+ ## or other content.
158
+ """ Do a diff of the old and new document. The documents are HTML
159
+ *fragments* (str/UTF8 or unicode), they are not complete documents
160
+ (i.e., no <html> tag).
161
+
162
+ Returns HTML with <ins> and <del> tags added around the
163
+ appropriate text.
164
+
165
+ Markup is generally ignored, with the markup from new_html
166
+ preserved, and possibly some markup from old_html (though it is
167
+ considered acceptable to lose some of the old markup). Only the
168
+ words in the HTML are diffed. The exception is <img> tags, which
169
+ are treated like words, and the href attribute of <a> tags, which
170
+ are noted inside the tag itself when there are changes.
171
+ """
172
+ old_html_tokens = tokenize(old_html)
173
+ new_html_tokens = tokenize(new_html)
174
+ result = htmldiff_tokens(old_html_tokens, new_html_tokens)
175
+ result = ''.join(result).strip()
176
+ return fixup_ins_del_tags(result)
177
+
178
+ def htmldiff_tokens(html1_tokens, html2_tokens):
179
+ """ Does a diff on the tokens themselves, returning a list of text
180
+ chunks (not tokens).
181
+ """
182
+ # There are several passes as we do the differences. The tokens
183
+ # isolate the portion of the content we care to diff; difflib does
184
+ # all the actual hard work at that point.
185
+ #
186
+ # Then we must create a valid document from pieces of both the old
187
+ # document and the new document. We generally prefer to take
188
+ # markup from the new document, and only do a best effort attempt
189
+ # to keep markup from the old document; anything that we can't
190
+ # resolve we throw away. Also we try to put the deletes as close
191
+ # to the location where we think they would have been -- because
192
+ # we are only keeping the markup from the new document, it can be
193
+ # fuzzy where in the new document the old text would have gone.
194
+ # Again we just do a best effort attempt.
195
+ s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
196
+ commands = s.get_opcodes()
197
+ result = []
198
+ for command, i1, i2, j1, j2 in commands:
199
+ if command == 'equal':
200
+ result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
201
+ continue
202
+ if command == 'insert' or command == 'replace':
203
+ ins_tokens = expand_tokens(html2_tokens[j1:j2])
204
+ merge_insert(ins_tokens, result)
205
+ if command == 'delete' or command == 'replace':
206
+ del_tokens = expand_tokens(html1_tokens[i1:i2])
207
+ merge_delete(del_tokens, result)
208
+ # If deletes were inserted directly as <del> then we'd have an
209
+ # invalid document at this point. Instead we put in special
210
+ # markers, and when the complete diffed document has been created
211
+ # we try to move the deletes around and resolve any problems.
212
+ result = cleanup_delete(result)
213
+
214
+ return result
215
+
216
+ def expand_tokens(tokens, equal=False):
217
+ """Given a list of tokens, return a generator of the chunks of
218
+ text for the data in the tokens.
219
+ """
220
+ for token in tokens:
221
+ yield from token.pre_tags
222
+ if not equal or not token.hide_when_equal:
223
+ if token.trailing_whitespace:
224
+ yield token.html() + token.trailing_whitespace
225
+ else:
226
+ yield token.html()
227
+ yield from token.post_tags
228
+
229
+ def merge_insert(ins_chunks, doc):
230
+ """ doc is the already-handled document (as a list of text chunks);
231
+ here we add <ins>ins_chunks</ins> to the end of that. """
232
+ # Though we don't throw away unbalanced_start or unbalanced_end
233
+ # (we assume there is accompanying markup later or earlier in the
234
+ # document), we only put <ins> around the balanced portion.
235
+ unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
236
+ doc.extend(unbalanced_start)
237
+ if doc and not doc[-1].endswith(' '):
238
+ # Fix up the case where the word before the insert didn't end with
239
+ # a space
240
+ doc[-1] += ' '
241
+ doc.append('<ins>')
242
+ if balanced and balanced[-1].endswith(' '):
243
+ # We move space outside of </ins>
244
+ balanced[-1] = balanced[-1][:-1]
245
+ doc.extend(balanced)
246
+ doc.append('</ins> ')
247
+ doc.extend(unbalanced_end)
248
+
249
+ # These are sentinels to represent the start and end of a <del>
250
+ # segment, until we do the cleanup phase to turn them into proper
251
+ # markup:
252
+ class DEL_START:
253
+ pass
254
+ class DEL_END:
255
+ pass
256
+
257
+ class NoDeletes(Exception):
258
+ """ Raised when the document no longer contains any pending deletes
259
+ (DEL_START/DEL_END) """
260
+
261
+ def merge_delete(del_chunks, doc):
262
+ """ Adds the text chunks in del_chunks to the document doc (another
263
+ list of text chunks) with marker to show it is a delete.
264
+ cleanup_delete later resolves these markers into <del> tags."""
265
+ doc.append(DEL_START)
266
+ doc.extend(del_chunks)
267
+ doc.append(DEL_END)
268
+
269
+ def cleanup_delete(chunks):
270
+ """ Cleans up any DEL_START/DEL_END markers in the document, replacing
271
+ them with <del></del>. To do this while keeping the document
272
+ valid, it may need to drop some tags (either start or end tags).
273
+
274
+ It may also move the del into adjacent tags to try to move it to a
275
+ similar location where it was originally located (e.g., moving a
276
+ delete into preceding <div> tag, if the del looks like (DEL_START,
277
+ 'Text</div>', DEL_END)"""
278
+ while 1:
279
+ # Find a pending DEL_START/DEL_END, splitting the document
280
+ # into stuff-preceding-DEL_START, stuff-inside, and
281
+ # stuff-following-DEL_END
282
+ try:
283
+ pre_delete, delete, post_delete = split_delete(chunks)
284
+ except NoDeletes:
285
+ # Nothing found, we've cleaned up the entire doc
286
+ break
287
+ # The stuff-inside-DEL_START/END may not be well balanced
288
+ # markup. First we figure out what unbalanced portions there are:
289
+ unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete)
290
+ # Then we move the span forward and/or backward based on these
291
+ # unbalanced portions:
292
+ locate_unbalanced_start(unbalanced_start, pre_delete, post_delete)
293
+ locate_unbalanced_end(unbalanced_end, pre_delete, post_delete)
294
+ doc = pre_delete
295
+ if doc and not doc[-1].endswith(' '):
296
+ # Fix up case where the word before us didn't have a trailing space
297
+ doc[-1] += ' '
298
+ doc.append('<del>')
299
+ if balanced and balanced[-1].endswith(' '):
300
+ # We move space outside of </del>
301
+ balanced[-1] = balanced[-1][:-1]
302
+ doc.extend(balanced)
303
+ doc.append('</del> ')
304
+ doc.extend(post_delete)
305
+ chunks = doc
306
+ return chunks
307
+
308
+ def split_unbalanced(chunks):
309
+ """Return (unbalanced_start, balanced, unbalanced_end), where each is
310
+ a list of text and tag chunks.
311
+
312
+ unbalanced_start is a list of all the tags that are opened, but
313
+ not closed in this span. Similarly, unbalanced_end is a list of
314
+ tags that are closed but were not opened. Extracting these might
315
+ mean some reordering of the chunks."""
316
+ start = []
317
+ end = []
318
+ tag_stack = []
319
+ balanced = []
320
+ for chunk in chunks:
321
+ if not chunk.startswith('<'):
322
+ balanced.append(chunk)
323
+ continue
324
+ endtag = chunk[1] == '/'
325
+ name = chunk.split()[0].strip('<>/')
326
+ if name in empty_tags:
327
+ balanced.append(chunk)
328
+ continue
329
+ if endtag:
330
+ if tag_stack and tag_stack[-1][0] == name:
331
+ balanced.append(chunk)
332
+ name, pos, tag = tag_stack.pop()
333
+ balanced[pos] = tag
334
+ elif tag_stack:
335
+ start.extend([tag for name, pos, tag in tag_stack])
336
+ tag_stack = []
337
+ end.append(chunk)
338
+ else:
339
+ end.append(chunk)
340
+ else:
341
+ tag_stack.append((name, len(balanced), chunk))
342
+ balanced.append(None)
343
+ start.extend(
344
+ [chunk for name, pos, chunk in tag_stack])
345
+ balanced = [chunk for chunk in balanced if chunk is not None]
346
+ return start, balanced, end
347
+
348
+ def split_delete(chunks):
349
+ """ Returns (stuff_before_DEL_START, stuff_inside_DEL_START_END,
350
+ stuff_after_DEL_END). Returns the first case found (there may be
351
+ more DEL_STARTs in stuff_after_DEL_END). Raises NoDeletes if
352
+ there's no DEL_START found. """
353
+ try:
354
+ pos = chunks.index(DEL_START)
355
+ except ValueError:
356
+ raise NoDeletes
357
+ pos2 = chunks.index(DEL_END)
358
+ return chunks[:pos], chunks[pos+1:pos2], chunks[pos2+1:]
359
+
360
+ def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete):
361
+ """ pre_delete and post_delete implicitly point to a place in the
362
+ document (where the two were split). This moves that point (by
363
+ popping items from one and pushing them onto the other). It moves
364
+ the point to try to find a place where unbalanced_start applies.
365
+
366
+ As an example::
367
+
368
+ >>> unbalanced_start = ['<div>']
369
+ >>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
370
+ >>> pre, post = doc[:3], doc[3:]
371
+ >>> pre, post
372
+ (['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
373
+ >>> locate_unbalanced_start(unbalanced_start, pre, post)
374
+ >>> pre, post
375
+ (['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
376
+
377
+ As you can see, we moved the point so that the dangling <div> that
378
+ we found will be effectively replaced by the div in the original
379
+ document. If this doesn't work out, we just throw away
380
+ unbalanced_start without doing anything.
381
+ """
382
+ while 1:
383
+ if not unbalanced_start:
384
+ # We have totally succeeded in finding the position
385
+ break
386
+ finding = unbalanced_start[0]
387
+ finding_name = finding.split()[0].strip('<>')
388
+ if not post_delete:
389
+ break
390
+ next = post_delete[0]
391
+ if next is DEL_START or not next.startswith('<'):
392
+ # Reached a word, we can't move the delete text forward
393
+ break
394
+ if next[1] == '/':
395
+ # Reached a closing tag, can we go further? Maybe not...
396
+ break
397
+ name = next.split()[0].strip('<>')
398
+ if name == 'ins':
399
+ # Can't move into an insert
400
+ break
401
+ assert name != 'del', (
402
+ "Unexpected delete tag: %r" % next)
403
+ if name == finding_name:
404
+ unbalanced_start.pop(0)
405
+ pre_delete.append(post_delete.pop(0))
406
+ else:
407
+ # Found a tag that doesn't match
408
+ break
409
+
410
+ def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete):
411
+ """ like locate_unbalanced_start, except handling end tags and
412
+ possibly moving the point earlier in the document. """
413
+ while 1:
414
+ if not unbalanced_end:
415
+ # Success
416
+ break
417
+ finding = unbalanced_end[-1]
418
+ finding_name = finding.split()[0].strip('<>/')
419
+ if not pre_delete:
420
+ break
421
+ next = pre_delete[-1]
422
+ if next is DEL_END or not next.startswith('</'):
423
+ # A word or a start tag
424
+ break
425
+ name = next.split()[0].strip('<>/')
426
+ if name == 'ins' or name == 'del':
427
+ # Can't move into an insert or delete
428
+ break
429
+ if name == finding_name:
430
+ unbalanced_end.pop()
431
+ post_delete.insert(0, pre_delete.pop())
432
+ else:
433
+ # Found a tag that doesn't match
434
+ break
435
+
436
+ class token(_unicode):
437
+ """ Represents a diffable token, generally a word that is displayed to
438
+ the user. Opening tags are attached to this token when they are
439
+ adjacent (pre_tags) and closing tags that follow the word
440
+ (post_tags). Some exceptions occur when there are empty tags
441
+ adjacent to a word, so there may be close tags in pre_tags, or
442
+ open tags in post_tags.
443
+
444
+ We also keep track of whether the word was originally followed by
445
+ whitespace, even though we do not want to treat the word as
446
+ equivalent to a similar word that does not have a trailing
447
+ space."""
448
+
449
+ # When this is true, the token will be eliminated from the
450
+ # displayed diff if no change has occurred:
451
+ hide_when_equal = False
452
+
453
+ def __new__(cls, text, pre_tags=None, post_tags=None, trailing_whitespace=""):
454
+ obj = _unicode.__new__(cls, text)
455
+
456
+ if pre_tags is not None:
457
+ obj.pre_tags = pre_tags
458
+ else:
459
+ obj.pre_tags = []
460
+
461
+ if post_tags is not None:
462
+ obj.post_tags = post_tags
463
+ else:
464
+ obj.post_tags = []
465
+
466
+ obj.trailing_whitespace = trailing_whitespace
467
+
468
+ return obj
469
+
470
+ def __repr__(self):
471
+ return 'token(%s, %r, %r, %r)' % (_unicode.__repr__(self), self.pre_tags,
472
+ self.post_tags, self.trailing_whitespace)
473
+
474
+ def html(self):
475
+ return _unicode(self)
476
+
477
+ class tag_token(token):
478
+
479
+ """ Represents a token that is actually a tag. Currently this is just
480
+ the <img> tag, which takes up visible space just like a word but
481
+ is only represented in a document by a tag. """
482
+
483
+ def __new__(cls, tag, data, html_repr, pre_tags=None,
484
+ post_tags=None, trailing_whitespace=""):
485
+ obj = token.__new__(cls, "%s: %s" % (type, data),
486
+ pre_tags=pre_tags,
487
+ post_tags=post_tags,
488
+ trailing_whitespace=trailing_whitespace)
489
+ obj.tag = tag
490
+ obj.data = data
491
+ obj.html_repr = html_repr
492
+ return obj
493
+
494
+ def __repr__(self):
495
+ return 'tag_token(%s, %s, html_repr=%s, post_tags=%r, pre_tags=%r, trailing_whitespace=%r)' % (
496
+ self.tag,
497
+ self.data,
498
+ self.html_repr,
499
+ self.pre_tags,
500
+ self.post_tags,
501
+ self.trailing_whitespace)
502
+ def html(self):
503
+ return self.html_repr
504
+
505
+ class href_token(token):
506
+
507
+ """ Represents the href in an anchor tag. Unlike other words, we only
508
+ show the href when it changes. """
509
+
510
+ hide_when_equal = True
511
+
512
+ def html(self):
513
+ return ' Link: %s' % self
514
+
515
+ def tokenize(html, include_hrefs=True):
516
+ """
517
+ Parse the given HTML and returns token objects (words with attached tags).
518
+
519
+ This parses only the content of a page; anything in the head is
520
+ ignored, and the <head> and <body> elements are themselves
521
+ optional. The content is then parsed by lxml, which ensures the
522
+ validity of the resulting parsed document (though lxml may make
523
+ incorrect guesses when the markup is particular bad).
524
+
525
+ <ins> and <del> tags are also eliminated from the document, as
526
+ that gets confusing.
527
+
528
+ If include_hrefs is true, then the href attribute of <a> tags is
529
+ included as a special kind of diffable token."""
530
+ if etree.iselement(html):
531
+ body_el = html
532
+ else:
533
+ body_el = parse_html(html, cleanup=True)
534
+ # Then we split the document into text chunks for each tag, word, and end tag:
535
+ chunks = flatten_el(body_el, skip_tag=True, include_hrefs=include_hrefs)
536
+ # Finally re-joining them into token objects:
537
+ return fixup_chunks(chunks)
538
+
539
+ def parse_html(html, cleanup=True):
540
+ """
541
+ Parses an HTML fragment, returning an lxml element. Note that the HTML will be
542
+ wrapped in a <div> tag that was not in the original document.
543
+
544
+ If cleanup is true, make sure there's no <head> or <body>, and get
545
+ rid of any <ins> and <del> tags.
546
+ """
547
+ if cleanup:
548
+ # This removes any extra markup or structure like <head>:
549
+ html = cleanup_html(html)
550
+ return fragment_fromstring(html, create_parent=True)
551
+
552
+ _body_re = re.compile(r'<body.*?>', re.I|re.S)
553
+ _end_body_re = re.compile(r'</body.*?>', re.I|re.S)
554
+ _ins_del_re = re.compile(r'</?(ins|del).*?>', re.I|re.S)
555
+
556
+ def cleanup_html(html):
557
+ """ This 'cleans' the HTML, meaning that any page structure is removed
558
+ (only the contents of <body> are used, if there is any <body).
559
+ Also <ins> and <del> tags are removed. """
560
+ match = _body_re.search(html)
561
+ if match:
562
+ html = html[match.end():]
563
+ match = _end_body_re.search(html)
564
+ if match:
565
+ html = html[:match.start()]
566
+ html = _ins_del_re.sub('', html)
567
+ return html
568
+
569
+
570
+ end_whitespace_re = re.compile(r'[ \t\n\r]$')
571
+
572
+ def split_trailing_whitespace(word):
573
+ """
574
+ This function takes a word, such as 'test\n\n' and returns ('test','\n\n')
575
+ """
576
+ stripped_length = len(word.rstrip())
577
+ return word[0:stripped_length], word[stripped_length:]
578
+
579
+
580
+ def fixup_chunks(chunks):
581
+ """
582
+ This function takes a list of chunks and produces a list of tokens.
583
+ """
584
+ tag_accum = []
585
+ cur_word = None
586
+ result = []
587
+ for chunk in chunks:
588
+ if isinstance(chunk, tuple):
589
+ if chunk[0] == 'img':
590
+ src = chunk[1]
591
+ tag, trailing_whitespace = split_trailing_whitespace(chunk[2])
592
+ cur_word = tag_token('img', src, html_repr=tag,
593
+ pre_tags=tag_accum,
594
+ trailing_whitespace=trailing_whitespace)
595
+ tag_accum = []
596
+ result.append(cur_word)
597
+
598
+ elif chunk[0] == 'href':
599
+ href = chunk[1]
600
+ cur_word = href_token(href, pre_tags=tag_accum, trailing_whitespace=" ")
601
+ tag_accum = []
602
+ result.append(cur_word)
603
+ continue
604
+
605
+ if is_word(chunk):
606
+ chunk, trailing_whitespace = split_trailing_whitespace(chunk)
607
+ cur_word = token(chunk, pre_tags=tag_accum, trailing_whitespace=trailing_whitespace)
608
+ tag_accum = []
609
+ result.append(cur_word)
610
+
611
+ elif is_start_tag(chunk):
612
+ tag_accum.append(chunk)
613
+
614
+ elif is_end_tag(chunk):
615
+ if tag_accum:
616
+ tag_accum.append(chunk)
617
+ else:
618
+ assert cur_word, (
619
+ "Weird state, cur_word=%r, result=%r, chunks=%r of %r"
620
+ % (cur_word, result, chunk, chunks))
621
+ cur_word.post_tags.append(chunk)
622
+ else:
623
+ assert False
624
+
625
+ if not result:
626
+ return [token('', pre_tags=tag_accum)]
627
+ else:
628
+ result[-1].post_tags.extend(tag_accum)
629
+
630
+ return result
631
+
632
+
633
+ # All the tags in HTML that don't require end tags:
634
+ empty_tags = (
635
+ 'param', 'img', 'area', 'br', 'basefont', 'input',
636
+ 'base', 'meta', 'link', 'col')
637
+
638
+ block_level_tags = (
639
+ 'address',
640
+ 'blockquote',
641
+ 'center',
642
+ 'dir',
643
+ 'div',
644
+ 'dl',
645
+ 'fieldset',
646
+ 'form',
647
+ 'h1',
648
+ 'h2',
649
+ 'h3',
650
+ 'h4',
651
+ 'h5',
652
+ 'h6',
653
+ 'hr',
654
+ 'isindex',
655
+ 'menu',
656
+ 'noframes',
657
+ 'noscript',
658
+ 'ol',
659
+ 'p',
660
+ 'pre',
661
+ 'table',
662
+ 'ul',
663
+ )
664
+
665
+ block_level_container_tags = (
666
+ 'dd',
667
+ 'dt',
668
+ 'frameset',
669
+ 'li',
670
+ 'tbody',
671
+ 'td',
672
+ 'tfoot',
673
+ 'th',
674
+ 'thead',
675
+ 'tr',
676
+ )
677
+
678
+
679
+ def flatten_el(el, include_hrefs, skip_tag=False):
680
+ """ Takes an lxml element el, and generates all the text chunks for
681
+ that tag. Each start tag is a chunk, each word is a chunk, and each
682
+ end tag is a chunk.
683
+
684
+ If skip_tag is true, then the outermost container tag is
685
+ not returned (just its contents)."""
686
+ if not skip_tag:
687
+ if el.tag == 'img':
688
+ yield ('img', el.get('src'), start_tag(el))
689
+ else:
690
+ yield start_tag(el)
691
+ if el.tag in empty_tags and not el.text and not len(el) and not el.tail:
692
+ return
693
+ start_words = split_words(el.text)
694
+ for word in start_words:
695
+ yield html_escape(word)
696
+ for child in el:
697
+ yield from flatten_el(child, include_hrefs=include_hrefs)
698
+ if el.tag == 'a' and el.get('href') and include_hrefs:
699
+ yield ('href', el.get('href'))
700
+ if not skip_tag:
701
+ yield end_tag(el)
702
+ end_words = split_words(el.tail)
703
+ for word in end_words:
704
+ yield html_escape(word)
705
+
706
+ split_words_re = re.compile(r'\S+(?:\s+|$)', re.U)
707
+
708
+ def split_words(text):
709
+ """ Splits some text into words. Includes trailing whitespace
710
+ on each word when appropriate. """
711
+ if not text or not text.strip():
712
+ return []
713
+
714
+ words = split_words_re.findall(text)
715
+ return words
716
+
717
+ start_whitespace_re = re.compile(r'^[ \t\n\r]')
718
+
719
+ def start_tag(el):
720
+ """
721
+ The text representation of the start tag for a tag.
722
+ """
723
+ return '<%s%s>' % (
724
+ el.tag, ''.join([' %s="%s"' % (name, html_escape(value, True))
725
+ for name, value in el.attrib.items()]))
726
+
727
+ def end_tag(el):
728
+ """ The text representation of an end tag for a tag. Includes
729
+ trailing whitespace when appropriate. """
730
+ if el.tail and start_whitespace_re.search(el.tail):
731
+ extra = ' '
732
+ else:
733
+ extra = ''
734
+ return '</%s>%s' % (el.tag, extra)
735
+
736
+ def is_word(tok):
737
+ return not tok.startswith('<')
738
+
739
+ def is_end_tag(tok):
740
+ return tok.startswith('</')
741
+
742
+ def is_start_tag(tok):
743
+ return tok.startswith('<') and not tok.startswith('</')
744
+
745
+ def fixup_ins_del_tags(html):
746
+ """ Given an html string, move any <ins> or <del> tags inside of any
747
+ block-level elements, e.g. transform <ins><p>word</p></ins> to
748
+ <p><ins>word</ins></p> """
749
+ doc = parse_html(html, cleanup=False)
750
+ _fixup_ins_del_tags(doc)
751
+ html = serialize_html_fragment(doc, skip_outer=True)
752
+ return html
753
+
754
+ def serialize_html_fragment(el, skip_outer=False):
755
+ """ Serialize a single lxml element as HTML. The serialized form
756
+ includes the elements tail.
757
+
758
+ If skip_outer is true, then don't serialize the outermost tag
759
+ """
760
+ assert not isinstance(el, basestring), (
761
+ "You should pass in an element, not a string like %r" % el)
762
+ html = etree.tostring(el, method="html", encoding=_unicode)
763
+ if skip_outer:
764
+ # Get rid of the extra starting tag:
765
+ html = html[html.find('>')+1:]
766
+ # Get rid of the extra end tag:
767
+ html = html[:html.rfind('<')]
768
+ return html.strip()
769
+ else:
770
+ return html
771
+
772
+ def _fixup_ins_del_tags(doc):
773
+ """fixup_ins_del_tags that works on an lxml document in-place
774
+ """
775
+ for tag in ['ins', 'del']:
776
+ for el in doc.xpath('descendant-or-self::%s' % tag):
777
+ if not _contains_block_level_tag(el):
778
+ continue
779
+ _move_el_inside_block(el, tag=tag)
780
+ el.drop_tag()
781
+ #_merge_element_contents(el)
782
+
783
+ def _contains_block_level_tag(el):
784
+ """True if the element contains any block-level elements, like <p>, <td>, etc.
785
+ """
786
+ if el.tag in block_level_tags or el.tag in block_level_container_tags:
787
+ return True
788
+ for child in el:
789
+ if _contains_block_level_tag(child):
790
+ return True
791
+ return False
792
+
793
+ def _move_el_inside_block(el, tag):
794
+ """ helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
795
+ and moves them inside any block-level tags. """
796
+ for child in el:
797
+ if _contains_block_level_tag(child):
798
+ break
799
+ else:
800
+ # No block-level tags in any child
801
+ children_tag = etree.Element(tag)
802
+ children_tag.text = el.text
803
+ el.text = None
804
+ children_tag.extend(list(el))
805
+ el[:] = [children_tag]
806
+ return
807
+ for child in list(el):
808
+ if _contains_block_level_tag(child):
809
+ _move_el_inside_block(child, tag)
810
+ if child.tail:
811
+ tail_tag = etree.Element(tag)
812
+ tail_tag.text = child.tail
813
+ child.tail = None
814
+ el.insert(el.index(child)+1, tail_tag)
815
+ else:
816
+ child_tag = etree.Element(tag)
817
+ el.replace(child, child_tag)
818
+ child_tag.append(child)
819
+ if el.text:
820
+ text_tag = etree.Element(tag)
821
+ text_tag.text = el.text
822
+ el.text = None
823
+ el.insert(0, text_tag)
824
+
825
+ def _merge_element_contents(el):
826
+ """
827
+ Removes an element, but merges its contents into its place, e.g.,
828
+ given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
829
+ <p>Hi there!</p>
830
+ """
831
+ parent = el.getparent()
832
+ text = el.text or ''
833
+ if el.tail:
834
+ if not len(el):
835
+ text += el.tail
836
+ else:
837
+ if el[-1].tail:
838
+ el[-1].tail += el.tail
839
+ else:
840
+ el[-1].tail = el.tail
841
+ index = parent.index(el)
842
+ if text:
843
+ if index == 0:
844
+ previous = None
845
+ else:
846
+ previous = parent[index-1]
847
+ if previous is None:
848
+ if parent.text:
849
+ parent.text += text
850
+ else:
851
+ parent.text = text
852
+ else:
853
+ if previous.tail:
854
+ previous.tail += text
855
+ else:
856
+ previous.tail = text
857
+ parent[index:index+1] = el.getchildren()
858
+
859
+ class InsensitiveSequenceMatcher(difflib.SequenceMatcher):
860
+ """
861
+ Acts like SequenceMatcher, but tries not to find very small equal
862
+ blocks amidst large spans of changes
863
+ """
864
+
865
+ threshold = 2
866
+
867
+ def get_matching_blocks(self):
868
+ size = min(len(self.b), len(self.b))
869
+ threshold = min(self.threshold, size / 4)
870
+ actual = difflib.SequenceMatcher.get_matching_blocks(self)
871
+ return [item for item in actual
872
+ if item[2] > threshold
873
+ or not item[2]]
874
+
875
+ if __name__ == '__main__':
876
+ from lxml.html import _diffcommand
877
+ _diffcommand.main()
878
+