From 535d047aef8edcf2aed517cd883f4ba95b298bd8 Mon Sep 17 00:00:00 2001 From: Leonard Richardson Date: Mon, 11 Oct 2021 02:13:56 -0400 Subject: Moved the test classes to tests/__init__.py. --- bs4/tests/__init__.py | 1118 +++++++++++++++++++++++++++++++++++++ bs4/tests/test_formatter.py | 2 +- bs4/tests/test_html5lib.py | 2 +- bs4/tests/test_htmlparser.py | 2 +- bs4/tests/test_lxml.py | 3 +- bs4/tests/test_navigablestring.py | 3 +- bs4/tests/test_soup.py | 2 +- bs4/tests/test_tree.py | 2 +- bs4/tests/testing.py | 1118 ------------------------------------- 9 files changed, 1126 insertions(+), 1126 deletions(-) create mode 100644 bs4/tests/__init__.py delete mode 100644 bs4/tests/testing.py diff --git a/bs4/tests/__init__.py b/bs4/tests/__init__.py new file mode 100644 index 0000000..97914c0 --- /dev/null +++ b/bs4/tests/__init__.py @@ -0,0 +1,1118 @@ +# encoding: utf-8 +"""Helper classes for tests.""" + +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +import pickle +import copy +import functools +import warnings +import pytest +from bs4 import BeautifulSoup +from bs4.element import ( + CharsetMetaAttributeValue, + Comment, + ContentMetaAttributeValue, + Doctype, + PYTHON_SPECIFIC_ENCODINGS, + SoupStrainer, + Script, + Stylesheet, + Tag +) + +from bs4.builder import HTMLParserTreeBuilder +default_builder = HTMLParserTreeBuilder + +BAD_DOCUMENT = """A bare string + + +
+
HTML5 does allow CDATA sections in SVG
+
A tag
+
A
tag that supposedly has contents.
+
AT&T
+
+
+
This numeric entity is missing the final semicolon:
+
, that attribute value was closed by the subsequent tag
+
a
+
This document contains (do you see it?)
+
This document ends with That attribute value was bogus
+The doctype is invalid because it contains extra whitespace +
That boolean attribute had no value
+
Here's a nonexistent entity: &#foo; (do you see it?)
+
This document ends before the entity finishes: > +

Paragraphs shouldn't contain block display elements, but this one does:

you see?

+Multiple values for the same attribute. +
Here's a table
+
+
This tag contains nothing but whitespace:
+

This p tag is cut off by

the end of the blockquote tag
+
Here's a nested table:
foo
This table contains bare markup
+ +
This document contains a surprise doctype
+ +
Tag name contains Unicode characters
+ + +""" + + +class SoupTest(object): + + @property + def default_builder(self): + return default_builder + + def soup(self, markup, **kwargs): + """Build a Beautiful Soup object from markup.""" + builder = kwargs.pop('builder', self.default_builder) + return BeautifulSoup(markup, builder=builder, **kwargs) + + def document_for(self, markup, **kwargs): + """Turn an HTML fragment into a document. + + The details depend on the builder. + """ + return self.default_builder(**kwargs).test_fragment_to_document(markup) + + def assert_soup(self, to_parse, compare_parsed_to=None): + """Parse some markup using Beautiful Soup and verify that + the output markup is as expected. + """ + builder = self.default_builder + obj = BeautifulSoup(to_parse, builder=builder) + if compare_parsed_to is None: + compare_parsed_to = to_parse + + # Verify that the documents come out the same. + assert obj.decode() == self.document_for(compare_parsed_to) + + # Also run some checks on the BeautifulSoup object itself: + + # Verify that every tag that was opened was eventually closed. + + # There are no tags in the open tag counter. + assert all(v==0 for v in list(obj.open_tag_counter.values())) + + # The only tag in the tag stack is the one for the root + # document. + assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack] + + assertSoupEquals = assert_soup + + def assertConnectedness(self, element): + """Ensure that next_element and previous_element are properly + set for all descendants of the given element. + """ + earlier = None + for e in element.descendants: + if earlier: + assert e == earlier.next_element + assert earlier == e.previous_element + earlier = e + + def linkage_validator(self, el, _recursive_call=False): + """Ensure proper linkage throughout the document.""" + descendant = None + # Document element should have no previous element or previous sibling. + # It also shouldn't have a next sibling. + if el.parent is None: + assert el.previous_element is None,\ + "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + el, el.previous_element, None + ) + assert el.previous_sibling is None,\ + "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + el, el.previous_sibling, None + ) + assert el.next_sibling is None,\ + "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( + el, el.next_sibling, None + ) + + idx = 0 + child = None + last_child = None + last_idx = len(el.contents) - 1 + for child in el.contents: + descendant = None + + # Parent should link next element to their first child + # That child should have no previous sibling + if idx == 0: + if el.parent is not None: + assert el.next_element is child,\ + "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( + el, el.next_element, child + ) + assert child.previous_element is el,\ + "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + child, child.previous_element, el + ) + assert child.previous_sibling is None,\ + "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format( + child, child.previous_sibling, None + ) + + # If not the first child, previous index should link as sibling to this index + # Previous element should match the last index or the last bubbled up descendant + else: + assert child.previous_sibling is el.contents[idx - 1],\ + "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format( + child, child.previous_sibling, el.contents[idx - 1] + ) + assert el.contents[idx - 1].next_sibling is child,\ + "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + el.contents[idx - 1], el.contents[idx - 1].next_sibling, child + ) + + if last_child is not None: + assert child.previous_element is last_child,\ + "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format( + child, child.previous_element, last_child, child.parent.contents + ) + assert last_child.next_element is child,\ + "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + last_child, last_child.next_element, child + ) + + if isinstance(child, Tag) and child.contents: + descendant = self.linkage_validator(child, True) + # A bubbled up descendant should have no next siblings + assert descendant.next_sibling is None,\ + "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + descendant, descendant.next_sibling, None + ) + + # Mark last child as either the bubbled up descendant or the current child + if descendant is not None: + last_child = descendant + else: + last_child = child + + # If last child, there are non next siblings + if idx == last_idx: + assert child.next_sibling is None,\ + "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_sibling, None + ) + idx += 1 + + child = descendant if descendant is not None else child + if child is None: + child = el + + if not _recursive_call and child is not None: + target = el + while True: + if target is None: + assert child.next_element is None, \ + "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_element, None + ) + break + elif target.next_sibling is not None: + assert child.next_element is target.next_sibling, \ + "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_element, target.next_sibling + ) + break + target = target.parent + + # We are done, so nothing to return + return None + else: + # Return the child to the recursive caller + return child + + +class TreeBuilderSmokeTest(object): + # Tests that are common to HTML and XML tree builders. + + def test_fuzzed_input(self): + # This test centralizes in one place the various fuzz tests + # for Beautiful Soup created by the oss-fuzz project. + + # These strings superficially resemble markup, but they + # generally can't be parsed into anything. The best we can + # hope for is that parsing these strings won't crash the + # parser. + # + # n.b. This markup is commented out because these fuzz tests + # _do_ crash the parser. However the crashes are due to bugs + # in html.parser, not Beautiful Soup -- otherwise I'd fix the + # bugs! + + bad_markup = [ + # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28873 + # https://github.com/guidovranken/python-library-fuzzers/blob/master/corp-html/519e5b4269a01185a0d5e76295251921da2f0700 + # https://bugs.python.org/issue37747 + # + #b'\nSome CSS" + ) + assert isinstance(soup.style.string, Stylesheet) + assert isinstance(soup.script.string, Script) + + soup = self.soup( + "" + ) + assert isinstance(soup.style.string, Stylesheet) + # The contents of the style tag resemble an HTML comment, but + # it's not treated as a comment. + assert soup.style.string == "" + assert isinstance(soup.style.string, Stylesheet) + + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("foo") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def assertDoctypeHandled(self, doctype_fragment): + """Assert that a given doctype string is handled correctly.""" + doctype_str, soup = self._document_with_doctype(doctype_fragment) + + # Make sure a Doctype object was created. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == doctype_fragment + assert soup.encode("utf8")[:len(doctype_str)] == doctype_str + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p.contents[0] == 'foo' + + def _document_with_doctype(self, doctype_fragment, doctype_string="DOCTYPE"): + """Generate and parse a document with the given doctype.""" + doctype = '' % (doctype_string, doctype_fragment) + markup = doctype + '\n

foo

' + soup = self.soup(markup) + return doctype.encode("utf8"), soup + + def test_normal_doctypes(self): + """Make sure normal, everyday HTML doctypes are handled correctly.""" + self.assertDoctypeHandled("html") + self.assertDoctypeHandled( + 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') + + def test_empty_doctype(self): + soup = self.soup("") + doctype = soup.contents[0] + assert "" == doctype.strip() + + def test_mixed_case_doctype(self): + # A lowercase or mixed-case doctype becomes a Doctype. + for doctype_fragment in ("doctype", "DocType"): + doctype_str, soup = self._document_with_doctype( + "html", doctype_fragment + ) + + # Make sure a Doctype object was created and that the DOCTYPE + # is uppercase. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == "html" + assert soup.encode("utf8")[:len(doctype_str)] == b"" + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p.contents[0] == 'foo' + + def test_public_doctype_with_url(self): + doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' + self.assertDoctypeHandled(doctype) + + def test_system_doctype(self): + self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') + + def test_namespaced_system_doctype(self): + # We can handle a namespaced doctype with a system ID. + self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') + + def test_namespaced_public_doctype(self): + # Test a namespaced doctype with a public id. + self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') + + def test_real_xhtml_document(self): + """A real XHTML document should come out more or less the same as it went in.""" + markup = b""" + + +Hello. +Goodbye. +""" + soup = self.soup(markup) + assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"") + + def test_namespaced_html(self): + """When a namespaced XML document is parsed as HTML it should + be treated as HTML with weird tag names. + """ + markup = b"""content""" + soup = self.soup(markup) + assert 2 == len(soup.find_all("ns1:foo")) + + def test_processing_instruction(self): + # We test both Unicode and bytestring to verify that + # process_markup correctly sets processing_instruction_class + # even when the markup is already Unicode and there is no + # need to process anything. + markup = """""" + soup = self.soup(markup) + assert markup == soup.decode() + + markup = b"""""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_deepcopy(self): + """Make sure you can copy the tree builder. + + This is important because the builder is part of a + BeautifulSoup object, and we want to be able to copy that. + """ + copy.deepcopy(self.default_builder) + + def test_p_tag_is_never_empty_element(self): + """A

tag is never designated as an empty-element tag. + + Even if the markup shows it as an empty-element tag, it + shouldn't be presented that way. + """ + soup = self.soup("

") + assert not soup.p.is_empty_element + assert str(soup.p) == "

" + + def test_unclosed_tags_get_closed(self): + """A tag that's not closed by the end of the document should be closed. + + This applies to all tags except empty-element tags. + """ + self.assert_soup("

", "

") + self.assert_soup("", "") + + self.assert_soup("
", "
") + + def test_br_is_always_empty_element_tag(self): + """A
tag is designated as an empty-element tag. + + Some parsers treat

as one
tag, some parsers as + two tags, but it should always be an empty-element tag. + """ + soup = self.soup("

") + assert soup.br.is_empty_element + assert str(soup.br) == "
" + + def test_nested_formatting_elements(self): + self.assert_soup("") + + def test_double_head(self): + html = ''' + + +Ordinary HEAD element test + + + +Hello, world! + + +''' + soup = self.soup(html) + assert "text/javascript" == soup.find('script')['type'] + + def test_comment(self): + # Comments are represented as Comment objects. + markup = "

foobaz

" + self.assert_soup(markup) + + soup = self.soup(markup) + comment = soup.find(text="foobar") + assert comment.__class__ == Comment + + # The comment is properly integrated into the tree. + foo = soup.find(text="foo") + assert comment == foo.next_element + baz = soup.find(text="baz") + assert comment == baz.previous_element + + def test_preserved_whitespace_in_pre_and_textarea(self): + """Whitespace must be preserved in
 and "
+        self.assert_soup(pre_markup)
+        self.assert_soup(textarea_markup)
+
+        soup = self.soup(pre_markup)
+        assert soup.pre.prettify() == pre_markup
+
+        soup = self.soup(textarea_markup)
+        assert soup.textarea.prettify() == textarea_markup
+
+        soup = self.soup("")
+        assert soup.textarea.prettify() == ""
+
+    def test_nested_inline_elements(self):
+        """Inline elements can be nested indefinitely."""
+        b_tag = "Inside a B tag"
+        self.assert_soup(b_tag)
+
+        nested_b_tag = "

A nested tag

" + self.assert_soup(nested_b_tag) + + double_nested_b_tag = "

A doubly nested tag

" + self.assert_soup(nested_b_tag) + + def test_nested_block_level_elements(self): + """Block elements can be nested.""" + soup = self.soup('

Foo

') + blockquote = soup.blockquote + assert blockquote.p.b.string == 'Foo' + assert blockquote.b.string == 'Foo' + + def test_correctly_nested_tables(self): + """One table can go inside another one.""" + markup = ('' + '' + "') + + self.assert_soup( + markup, + '
Here's another table:" + '' + '' + '
foo
Here\'s another table:' + '
foo
' + '
') + + self.assert_soup( + "" + "" + "
Foo
Bar
Baz
") + + def test_multivalued_attribute_with_whitespace(self): + # Whitespace separating the values of a multi-valued attribute + # should be ignored. + + markup = '
' + soup = self.soup(markup) + assert ['foo', 'bar'] == soup.div['class'] + + # If you search by the literal name of the class it's like the whitespace + # wasn't there. + assert soup.div == soup.find('div', class_="foo bar") + + def test_deeply_nested_multivalued_attribute(self): + # html5lib can set the attributes of the same tag many times + # as it rearranges the tree. This has caused problems with + # multivalued attributes. + markup = '
' + soup = self.soup(markup) + assert ["css"] == soup.div.div['class'] + + def test_multivalued_attribute_on_html(self): + # html5lib uses a different API to set the attributes ot the + # tag. This has caused problems with multivalued + # attributes. + markup = '' + soup = self.soup(markup) + assert ["a", "b"] == soup.html['class'] + + def test_angle_brackets_in_attribute_values_are_escaped(self): + self.assert_soup('', '') + + def test_strings_resembling_character_entity_references(self): + # "&T" and "&p" look like incomplete character entities, but they are + # not. + self.assert_soup( + "

• AT&T is in the s&p 500

", + "

\u2022 AT&T is in the s&p 500

" + ) + + def test_apos_entity(self): + self.assert_soup( + "

Bob's Bar

", + "

Bob's Bar

", + ) + + def test_entities_in_foreign_document_encoding(self): + # “ and ” are invalid numeric entities referencing + # Windows-1252 characters. - references a character common + # to Windows-1252 and Unicode, and ☃ references a + # character only found in Unicode. + # + # All of these entities should be converted to Unicode + # characters. + markup = "

“Hello” -☃

" + soup = self.soup(markup) + assert "“Hello” -☃" == soup.p.string + + def test_entities_in_attributes_converted_to_unicode(self): + expect = '

' + self.assert_soup('

', expect) + self.assert_soup('

', expect) + self.assert_soup('

', expect) + self.assert_soup('

', expect) + + def test_entities_in_text_converted_to_unicode(self): + expect = '

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + + def test_quot_entity_converted_to_quotation_mark(self): + self.assert_soup("

I said "good day!"

", + '

I said "good day!"

') + + def test_out_of_range_entity(self): + expect = "\N{REPLACEMENT CHARACTER}" + self.assert_soup("�", expect) + self.assert_soup("�", expect) + self.assert_soup("�", expect) + + def test_multipart_strings(self): + "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." + soup = self.soup("

\nfoo

") + assert "p" == soup.h2.string.next_element.name + assert "p" == soup.p.name + self.assertConnectedness(soup) + + def test_empty_element_tags(self): + """Verify consistent handling of empty-element tags, + no matter how they come in through the markup. + """ + self.assert_soup('


', "


") + self.assert_soup('


', "


") + + def test_head_tag_between_head_and_body(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """ + + foo + +""" + soup = self.soup(content) + assert soup.html.body is not None + self.assertConnectedness(soup) + + def test_multiple_copies_of_a_tag(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """ + + + + + +""" + soup = self.soup(content) + self.assertConnectedness(soup.article) + + def test_basic_namespaces(self): + """Parsers don't need to *understand* namespaces, but at the + very least they should not choke on namespaces or lose + data.""" + + markup = b'4' + soup = self.soup(markup) + assert markup == soup.encode() + html = soup.html + assert 'http://www.w3.org/1999/xhtml' == soup.html['xmlns'] + assert 'http://www.w3.org/1998/Math/MathML' == soup.html['xmlns:mathml'] + assert 'http://www.w3.org/2000/svg' == soup.html['xmlns:svg'] + + def test_multivalued_attribute_value_becomes_list(self): + markup = b'' + soup = self.soup(markup) + assert ['foo', 'bar'] == soup.a['class'] + + # + # Generally speaking, tests below this point are more tests of + # Beautiful Soup than tests of the tree builders. But parsers are + # weird, so we run these tests separately for every tree builder + # to detect any differences between them. + # + + def test_can_parse_unicode_document(self): + # A seemingly innocuous document... but it's in Unicode! And + # it contains characters that can't be represented in the + # encoding found in the declaration! The horror! + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert 'Sacr\xe9 bleu!' == soup.body.string + + def test_soupstrainer(self): + """Parsers should be able to work with SoupStrainers.""" + strainer = SoupStrainer("b") + soup = self.soup("A bold statement", + parse_only=strainer) + assert soup.decode() == "bold" + + def test_single_quote_attribute_values_become_double_quotes(self): + self.assert_soup("", + '') + + def test_attribute_values_with_nested_quotes_are_left_alone(self): + text = """a""" + self.assert_soup(text) + + def test_attribute_values_with_double_nested_quotes_get_quoted(self): + text = """a""" + soup = self.soup(text) + soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' + self.assert_soup( + soup.foo.decode(), + """a""") + + def test_ampersand_in_attribute_value_gets_escaped(self): + self.assert_soup('', + '') + + self.assert_soup( + 'foo', + 'foo') + + def test_escaped_ampersand_in_attribute_value_is_left_alone(self): + self.assert_soup('') + + def test_entities_in_strings_converted_during_parsing(self): + # Both XML and HTML entities are converted to Unicode characters + # during parsing. + text = "

<<sacré bleu!>>

" + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" + self.assert_soup(text, expected) + + def test_smart_quotes_converted_on_the_way_in(self): + # Microsoft smart quotes are converted to Unicode characters during + # parsing. + quote = b"

\x91Foo\x92

" + soup = self.soup(quote) + assert soup.p.string == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}" + + def test_non_breaking_spaces_converted_on_the_way_in(self): + soup = self.soup("  ") + assert soup.a.string == "\N{NO-BREAK SPACE}" * 2 + + def test_entities_converted_on_the_way_out(self): + text = "

<<sacré bleu!>>

" + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") + soup = self.soup(text) + assert soup.p.encode("utf-8") == expected + + def test_real_iso_latin_document(self): + # Smoke test of interrelated functionality, using an + # easy-to-understand document. + + # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. + unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' + + # That's because we're going to encode it into ISO-Latin-1, and use + # that to test. + iso_latin_html = unicode_html.encode("iso-8859-1") + + # Parse the ISO-Latin-1 HTML. + soup = self.soup(iso_latin_html) + # Encode it to UTF-8. + result = soup.encode("utf-8") + + # What do we expect the result to look like? Well, it would + # look like unicode_html, except that the META tag would say + # UTF-8 instead of ISO-Latin-1. + expected = unicode_html.replace("ISO-Latin-1", "utf-8") + + # And, of course, it would be in UTF-8, not Unicode. + expected = expected.encode("utf-8") + + # Ta-da! + assert result == expected + + def test_real_shift_jis_document(self): + # Smoke test to make sure the parser can handle a document in + # Shift-JIS encoding, without choking. + shift_jis_html = ( + b'
'
+            b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
+            b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
+            b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
+            b'
') + unicode_html = shift_jis_html.decode("shift-jis") + soup = self.soup(unicode_html) + + # Make sure the parse tree is correctly encoded to various + # encodings. + assert soup.encode("utf-8") == unicode_html.encode("utf-8") + assert soup.encode("euc_jp") == unicode_html.encode("euc_jp") + + def test_real_hebrew_document(self): + # A real-world test to make sure we can convert ISO-8859-9 (a + # Hebrew encoding) to UTF-8. + hebrew_document = b'Hebrew (ISO 8859-8) in Visual Directionality

Hebrew (ISO 8859-8) in Visual Directionality

\xed\xe5\xec\xf9' + soup = self.soup( + hebrew_document, from_encoding="iso8859-8") + # Some tree builders call it iso8859-8, others call it iso-8859-9. + # That's not a difference we really care about. + assert soup.original_encoding in ('iso8859-8', 'iso-8859-8') + assert soup.encode('utf-8') == ( + hebrew_document.decode("iso8859-8").encode("utf-8") + ) + + def test_meta_tag_reflects_current_encoding(self): + # Here's the tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ('') + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + '\n%s\n' + '' + 'Shift-JIS markup goes here.') % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) + content = parsed_meta['content'] + assert 'text/html; charset=x-sjis' == content + + # But that value is actually a ContentMetaAttributeValue object. + assert isinstance(content, ContentMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert 'text/html; charset=utf8' == content.encode("utf8") + + # For the rest of the story, see TestSubstitutions in + # test_tree.py. + + def test_html5_style_meta_tag_reflects_current_encoding(self): + # Here's the tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ('') + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + '\n%s\n' + '' + 'Shift-JIS markup goes here.') % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find('meta', id="encoding") + charset = parsed_meta['charset'] + assert 'x-sjis' == charset + + # But that value is actually a CharsetMetaAttributeValue object. + assert isinstance(charset, CharsetMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert 'utf8' == charset.encode("utf8") + + def test_python_specific_encodings_not_used_in_charset(self): + # You can encode an HTML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. Instead, the document will appear to + # have no encoding. + for markup in [ + b'' + b'' + ]: + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + 'idna', 'mbcs', 'oem', 'undefined', + 'string_escape', 'string-escape' + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'meta charset=""' in encoded + assert encoding.encode("ascii") not in encoded + + def test_tag_with_no_attributes_can_have_attributes_added(self): + data = self.soup("text") + data.a['foo'] = 'bar' + assert 'text' == data.a.decode() + + def test_closing_tag_with_no_opening_tag(self): + # Without BeautifulSoup.open_tag_counter, the tag will + # cause _popToTag to be called over and over again as we look + # for a tag that wasn't there. The result is that 'text2' + # will show up outside the body of the document. + soup = self.soup("

text1

text2
") + assert "

text1

text2
" == soup.body.decode() + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): + + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("foo") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def test_docstring_generated(self): + soup = self.soup("") + assert soup.encode() == b'\n' + + def test_xml_declaration(self): + markup = b"""\n""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_python_specific_encodings_not_used_in_xml_declaration(self): + # You can encode an XML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. + markup = b"""\n""" + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + 'idna', 'mbcs', 'oem', 'undefined', + 'string_escape', 'string-escape' + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'' in encoded + assert encoding.encode("ascii") not in encoded + + def test_processing_instruction(self): + markup = b"""\n""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_real_xhtml_document(self): + """A real XHTML document should come out *exactly* the same as it went in.""" + markup = b""" + + +Hello. +Goodbye. +""" + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_nested_namespaces(self): + doc = b""" + + + + + +""" + soup = self.soup(doc) + assert doc == soup.encode() + + def test_formatter_processes_script_tag_for_xml_documents(self): + doc = """ + +""" + soup = BeautifulSoup(doc, "lxml-xml") + # lxml would have stripped this while parsing, but we can add + # it later. + soup.script.string = 'console.log("< < hey > > ");' + encoded = soup.encode() + assert b"< < hey > >" in encoded + + def test_can_parse_unicode_document(self): + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert 'Sacr\xe9 bleu!' == soup.root.string + + def test_popping_namespaced_tag(self): + markup = 'b2012-07-02T20:33:42Zcd' + soup = self.soup(markup) + assert str(soup.rss) == markup + + def test_docstring_includes_correct_encoding(self): + soup = self.soup("") + assert soup.encode("latin1") == b'\n' + + def test_large_xml_document(self): + """A large XML document should come out the same as it went in.""" + markup = (b'\n' + + b'0' * (2**12) + + b'') + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): + self.assert_soup("

", "

") + self.assert_soup("

foo

") + + def test_namespaces_are_preserved(self): + markup = 'This tag is in the a namespaceThis tag is in the b namespace' + soup = self.soup(markup) + root = soup.root + assert "http://example.com/" == root['xmlns:a'] + assert "http://example.net/" == root['xmlns:b'] + + def test_closing_namespaced_tag(self): + markup = '

20010504

' + soup = self.soup(markup) + assert str(soup.p) == markup + + def test_namespaced_attributes(self): + markup = '' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_namespaced_attributes_xml_namespace(self): + markup = 'bar' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_find_by_prefixed_name(self): + doc = """ +foo + bar + baz + +""" + soup = self.soup(doc) + + # There are three tags. + assert 3 == len(soup.find_all('tag')) + + # But two of them are ns1:tag and one of them is ns2:tag. + assert 2 == len(soup.find_all('ns1:tag')) + assert 1 == len(soup.find_all('ns2:tag')) + + assert 1, len(soup.find_all('ns2:tag', key='value')) + assert 3, len(soup.find_all(['ns1:tag', 'ns2:tag'])) + + def test_copy_tag_preserves_namespace(self): + xml = """ +""" + + soup = self.soup(xml) + tag = soup.document + duplicate = copy.copy(tag) + + # The two tags have the same namespace prefix. + assert tag.prefix == duplicate.prefix + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): + """Smoke test for a tree builder that supports HTML5.""" + + def test_real_xhtml_document(self): + # Since XHTML is not HTML5, HTML5 parsers are not tested to handle + # XHTML documents in any particular way. + pass + + def test_html_tags_have_namespace(self): + markup = "" + soup = self.soup(markup) + assert "http://www.w3.org/1999/xhtml" == soup.a.namespace + + def test_svg_tags_have_namespace(self): + markup = '' + soup = self.soup(markup) + namespace = "http://www.w3.org/2000/svg" + assert namespace == soup.svg.namespace + assert namespace == soup.circle.namespace + + + def test_mathml_tags_have_namespace(self): + markup = '5' + soup = self.soup(markup) + namespace = 'http://www.w3.org/1998/Math/MathML' + assert namespace == soup.math.namespace + assert namespace == soup.msqrt.namespace + + def test_xml_declaration_becomes_comment(self): + markup = '' + soup = self.soup(markup) + assert isinstance(soup.contents[0], Comment) + assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?' + assert "html" == soup.contents[0].next_element.name + +def skipIf(condition, reason): + def nothing(test, *args, **kwargs): + return None + + def decorator(test_item): + if condition: + return nothing + else: + return test_item + + return decorator diff --git a/bs4/tests/test_formatter.py b/bs4/tests/test_formatter.py index 9b59177..12327ef 100644 --- a/bs4/tests/test_formatter.py +++ b/bs4/tests/test_formatter.py @@ -4,7 +4,7 @@ from bs4.formatter import ( HTMLFormatter, XMLFormatter, ) -from .testing import SoupTest +from . import SoupTest class TestFormatter(SoupTest): diff --git a/bs4/tests/test_html5lib.py b/bs4/tests/test_html5lib.py index ae87e4c..b32ab30 100644 --- a/bs4/tests/test_html5lib.py +++ b/bs4/tests/test_html5lib.py @@ -8,7 +8,7 @@ try: except ImportError as e: HTML5LIB_PRESENT = False from bs4.element import SoupStrainer -from .testing import ( +from . import ( HTML5TreeBuilderSmokeTest, SoupTest, skipIf, diff --git a/bs4/tests/test_htmlparser.py b/bs4/tests/test_htmlparser.py index b6bf0fc..5912cf5 100644 --- a/bs4/tests/test_htmlparser.py +++ b/bs4/tests/test_htmlparser.py @@ -6,7 +6,7 @@ import pickle import warnings from bs4.builder import HTMLParserTreeBuilder from bs4.builder._htmlparser import BeautifulSoupHTMLParser -from .testing import SoupTest, HTMLTreeBuilderSmokeTest +from . import SoupTest, HTMLTreeBuilderSmokeTest class TestHTMLParserTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest): diff --git a/bs4/tests/test_lxml.py b/bs4/tests/test_lxml.py index 31cb94e..1269e57 100644 --- a/bs4/tests/test_lxml.py +++ b/bs4/tests/test_lxml.py @@ -19,8 +19,7 @@ from bs4 import ( BeautifulStoneSoup, ) from bs4.element import Comment, Doctype, SoupStrainer -from . import test_htmlparser -from .testing import ( +from . import ( HTMLTreeBuilderSmokeTest, XMLTreeBuilderSmokeTest, SoupTest, diff --git a/bs4/tests/test_navigablestring.py b/bs4/tests/test_navigablestring.py index e1d7508..37cb86a 100644 --- a/bs4/tests/test_navigablestring.py +++ b/bs4/tests/test_navigablestring.py @@ -1,4 +1,3 @@ -from .testing import SoupTest from bs4.element import ( CData, Comment, @@ -10,6 +9,8 @@ from bs4.element import ( TemplateString, ) +from . import SoupTest + class TestNavigableString(SoupTest): def test_text_acquisition_methods(self): diff --git a/bs4/tests/test_soup.py b/bs4/tests/test_soup.py index a67d1d9..0233e38 100644 --- a/bs4/tests/test_soup.py +++ b/bs4/tests/test_soup.py @@ -33,7 +33,7 @@ from bs4.dammit import ( EntitySubstitution, UnicodeDammit, ) -from .testing import ( +from . import ( default_builder, SoupTest, skipIf, diff --git a/bs4/tests/test_tree.py b/bs4/tests/test_tree.py index c21a254..776bcca 100644 --- a/bs4/tests/test_tree.py +++ b/bs4/tests/test_tree.py @@ -34,7 +34,7 @@ from bs4.element import ( Tag, TemplateString, ) -from .testing import ( +from . import ( SoupTest, skipIf, ) diff --git a/bs4/tests/testing.py b/bs4/tests/testing.py deleted file mode 100644 index 97914c0..0000000 --- a/bs4/tests/testing.py +++ /dev/null @@ -1,1118 +0,0 @@ -# encoding: utf-8 -"""Helper classes for tests.""" - -# Use of this source code is governed by the MIT license. -__license__ = "MIT" - -import pickle -import copy -import functools -import warnings -import pytest -from bs4 import BeautifulSoup -from bs4.element import ( - CharsetMetaAttributeValue, - Comment, - ContentMetaAttributeValue, - Doctype, - PYTHON_SPECIFIC_ENCODINGS, - SoupStrainer, - Script, - Stylesheet, - Tag -) - -from bs4.builder import HTMLParserTreeBuilder -default_builder = HTMLParserTreeBuilder - -BAD_DOCUMENT = """A bare string - - -
-
HTML5 does allow CDATA sections in SVG
-
A tag
-
A
tag that supposedly has contents.
-
AT&T
-
-
-
This numeric entity is missing the final semicolon:
-
-
a
-
This document contains (do you see it?)
-
This document ends with That attribute value was bogus
-The doctype is invalid because it contains extra whitespace -
That boolean attribute had no value
-
Here's a nonexistent entity: &#foo; (do you see it?)
-
This document ends before the entity finishes: > -

Paragraphs shouldn't contain block display elements, but this one does:

you see?

-Multiple values for the same attribute. -
Here's a table
-
-
This tag contains nothing but whitespace:
-

This p tag is cut off by

the end of the blockquote tag
-
Here's a nested table:
foo
This table contains bare markup
- -
This document contains a surprise doctype
- -
Tag name contains Unicode characters
- - -""" - - -class SoupTest(object): - - @property - def default_builder(self): - return default_builder - - def soup(self, markup, **kwargs): - """Build a Beautiful Soup object from markup.""" - builder = kwargs.pop('builder', self.default_builder) - return BeautifulSoup(markup, builder=builder, **kwargs) - - def document_for(self, markup, **kwargs): - """Turn an HTML fragment into a document. - - The details depend on the builder. - """ - return self.default_builder(**kwargs).test_fragment_to_document(markup) - - def assert_soup(self, to_parse, compare_parsed_to=None): - """Parse some markup using Beautiful Soup and verify that - the output markup is as expected. - """ - builder = self.default_builder - obj = BeautifulSoup(to_parse, builder=builder) - if compare_parsed_to is None: - compare_parsed_to = to_parse - - # Verify that the documents come out the same. - assert obj.decode() == self.document_for(compare_parsed_to) - - # Also run some checks on the BeautifulSoup object itself: - - # Verify that every tag that was opened was eventually closed. - - # There are no tags in the open tag counter. - assert all(v==0 for v in list(obj.open_tag_counter.values())) - - # The only tag in the tag stack is the one for the root - # document. - assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack] - - assertSoupEquals = assert_soup - - def assertConnectedness(self, element): - """Ensure that next_element and previous_element are properly - set for all descendants of the given element. - """ - earlier = None - for e in element.descendants: - if earlier: - assert e == earlier.next_element - assert earlier == e.previous_element - earlier = e - - def linkage_validator(self, el, _recursive_call=False): - """Ensure proper linkage throughout the document.""" - descendant = None - # Document element should have no previous element or previous sibling. - # It also shouldn't have a next sibling. - if el.parent is None: - assert el.previous_element is None,\ - "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( - el, el.previous_element, None - ) - assert el.previous_sibling is None,\ - "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( - el, el.previous_sibling, None - ) - assert el.next_sibling is None,\ - "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( - el, el.next_sibling, None - ) - - idx = 0 - child = None - last_child = None - last_idx = len(el.contents) - 1 - for child in el.contents: - descendant = None - - # Parent should link next element to their first child - # That child should have no previous sibling - if idx == 0: - if el.parent is not None: - assert el.next_element is child,\ - "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( - el, el.next_element, child - ) - assert child.previous_element is el,\ - "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( - child, child.previous_element, el - ) - assert child.previous_sibling is None,\ - "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format( - child, child.previous_sibling, None - ) - - # If not the first child, previous index should link as sibling to this index - # Previous element should match the last index or the last bubbled up descendant - else: - assert child.previous_sibling is el.contents[idx - 1],\ - "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format( - child, child.previous_sibling, el.contents[idx - 1] - ) - assert el.contents[idx - 1].next_sibling is child,\ - "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - el.contents[idx - 1], el.contents[idx - 1].next_sibling, child - ) - - if last_child is not None: - assert child.previous_element is last_child,\ - "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format( - child, child.previous_element, last_child, child.parent.contents - ) - assert last_child.next_element is child,\ - "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - last_child, last_child.next_element, child - ) - - if isinstance(child, Tag) and child.contents: - descendant = self.linkage_validator(child, True) - # A bubbled up descendant should have no next siblings - assert descendant.next_sibling is None,\ - "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - descendant, descendant.next_sibling, None - ) - - # Mark last child as either the bubbled up descendant or the current child - if descendant is not None: - last_child = descendant - else: - last_child = child - - # If last child, there are non next siblings - if idx == last_idx: - assert child.next_sibling is None,\ - "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - child, child.next_sibling, None - ) - idx += 1 - - child = descendant if descendant is not None else child - if child is None: - child = el - - if not _recursive_call and child is not None: - target = el - while True: - if target is None: - assert child.next_element is None, \ - "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - child, child.next_element, None - ) - break - elif target.next_sibling is not None: - assert child.next_element is target.next_sibling, \ - "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( - child, child.next_element, target.next_sibling - ) - break - target = target.parent - - # We are done, so nothing to return - return None - else: - # Return the child to the recursive caller - return child - - -class TreeBuilderSmokeTest(object): - # Tests that are common to HTML and XML tree builders. - - def test_fuzzed_input(self): - # This test centralizes in one place the various fuzz tests - # for Beautiful Soup created by the oss-fuzz project. - - # These strings superficially resemble markup, but they - # generally can't be parsed into anything. The best we can - # hope for is that parsing these strings won't crash the - # parser. - # - # n.b. This markup is commented out because these fuzz tests - # _do_ crash the parser. However the crashes are due to bugs - # in html.parser, not Beautiful Soup -- otherwise I'd fix the - # bugs! - - bad_markup = [ - # https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=28873 - # https://github.com/guidovranken/python-library-fuzzers/blob/master/corp-html/519e5b4269a01185a0d5e76295251921da2f0700 - # https://bugs.python.org/issue37747 - # - #b'\nSome CSS" - ) - assert isinstance(soup.style.string, Stylesheet) - assert isinstance(soup.script.string, Script) - - soup = self.soup( - "" - ) - assert isinstance(soup.style.string, Stylesheet) - # The contents of the style tag resemble an HTML comment, but - # it's not treated as a comment. - assert soup.style.string == "" - assert isinstance(soup.style.string, Stylesheet) - - def test_pickle_and_unpickle_identity(self): - # Pickling a tree, then unpickling it, yields a tree identical - # to the original. - tree = self.soup("foo") - dumped = pickle.dumps(tree, 2) - loaded = pickle.loads(dumped) - assert loaded.__class__ == BeautifulSoup - assert loaded.decode() == tree.decode() - - def assertDoctypeHandled(self, doctype_fragment): - """Assert that a given doctype string is handled correctly.""" - doctype_str, soup = self._document_with_doctype(doctype_fragment) - - # Make sure a Doctype object was created. - doctype = soup.contents[0] - assert doctype.__class__ == Doctype - assert doctype == doctype_fragment - assert soup.encode("utf8")[:len(doctype_str)] == doctype_str - - # Make sure that the doctype was correctly associated with the - # parse tree and that the rest of the document parsed. - assert soup.p.contents[0] == 'foo' - - def _document_with_doctype(self, doctype_fragment, doctype_string="DOCTYPE"): - """Generate and parse a document with the given doctype.""" - doctype = '' % (doctype_string, doctype_fragment) - markup = doctype + '\n

foo

' - soup = self.soup(markup) - return doctype.encode("utf8"), soup - - def test_normal_doctypes(self): - """Make sure normal, everyday HTML doctypes are handled correctly.""" - self.assertDoctypeHandled("html") - self.assertDoctypeHandled( - 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') - - def test_empty_doctype(self): - soup = self.soup("") - doctype = soup.contents[0] - assert "" == doctype.strip() - - def test_mixed_case_doctype(self): - # A lowercase or mixed-case doctype becomes a Doctype. - for doctype_fragment in ("doctype", "DocType"): - doctype_str, soup = self._document_with_doctype( - "html", doctype_fragment - ) - - # Make sure a Doctype object was created and that the DOCTYPE - # is uppercase. - doctype = soup.contents[0] - assert doctype.__class__ == Doctype - assert doctype == "html" - assert soup.encode("utf8")[:len(doctype_str)] == b"" - - # Make sure that the doctype was correctly associated with the - # parse tree and that the rest of the document parsed. - assert soup.p.contents[0] == 'foo' - - def test_public_doctype_with_url(self): - doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' - self.assertDoctypeHandled(doctype) - - def test_system_doctype(self): - self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') - - def test_namespaced_system_doctype(self): - # We can handle a namespaced doctype with a system ID. - self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') - - def test_namespaced_public_doctype(self): - # Test a namespaced doctype with a public id. - self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') - - def test_real_xhtml_document(self): - """A real XHTML document should come out more or less the same as it went in.""" - markup = b""" - - -Hello. -Goodbye. -""" - soup = self.soup(markup) - assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"") - - def test_namespaced_html(self): - """When a namespaced XML document is parsed as HTML it should - be treated as HTML with weird tag names. - """ - markup = b"""content""" - soup = self.soup(markup) - assert 2 == len(soup.find_all("ns1:foo")) - - def test_processing_instruction(self): - # We test both Unicode and bytestring to verify that - # process_markup correctly sets processing_instruction_class - # even when the markup is already Unicode and there is no - # need to process anything. - markup = """""" - soup = self.soup(markup) - assert markup == soup.decode() - - markup = b"""""" - soup = self.soup(markup) - assert markup == soup.encode("utf8") - - def test_deepcopy(self): - """Make sure you can copy the tree builder. - - This is important because the builder is part of a - BeautifulSoup object, and we want to be able to copy that. - """ - copy.deepcopy(self.default_builder) - - def test_p_tag_is_never_empty_element(self): - """A

tag is never designated as an empty-element tag. - - Even if the markup shows it as an empty-element tag, it - shouldn't be presented that way. - """ - soup = self.soup("

") - assert not soup.p.is_empty_element - assert str(soup.p) == "

" - - def test_unclosed_tags_get_closed(self): - """A tag that's not closed by the end of the document should be closed. - - This applies to all tags except empty-element tags. - """ - self.assert_soup("

", "

") - self.assert_soup("", "") - - self.assert_soup("
", "
") - - def test_br_is_always_empty_element_tag(self): - """A
tag is designated as an empty-element tag. - - Some parsers treat

as one
tag, some parsers as - two tags, but it should always be an empty-element tag. - """ - soup = self.soup("

") - assert soup.br.is_empty_element - assert str(soup.br) == "
" - - def test_nested_formatting_elements(self): - self.assert_soup("") - - def test_double_head(self): - html = ''' - - -Ordinary HEAD element test - - - -Hello, world! - - -''' - soup = self.soup(html) - assert "text/javascript" == soup.find('script')['type'] - - def test_comment(self): - # Comments are represented as Comment objects. - markup = "

foobaz

" - self.assert_soup(markup) - - soup = self.soup(markup) - comment = soup.find(text="foobar") - assert comment.__class__ == Comment - - # The comment is properly integrated into the tree. - foo = soup.find(text="foo") - assert comment == foo.next_element - baz = soup.find(text="baz") - assert comment == baz.previous_element - - def test_preserved_whitespace_in_pre_and_textarea(self): - """Whitespace must be preserved in
 and "
-        self.assert_soup(pre_markup)
-        self.assert_soup(textarea_markup)
-
-        soup = self.soup(pre_markup)
-        assert soup.pre.prettify() == pre_markup
-
-        soup = self.soup(textarea_markup)
-        assert soup.textarea.prettify() == textarea_markup
-
-        soup = self.soup("")
-        assert soup.textarea.prettify() == ""
-
-    def test_nested_inline_elements(self):
-        """Inline elements can be nested indefinitely."""
-        b_tag = "Inside a B tag"
-        self.assert_soup(b_tag)
-
-        nested_b_tag = "

A nested tag

" - self.assert_soup(nested_b_tag) - - double_nested_b_tag = "

A doubly nested tag

" - self.assert_soup(nested_b_tag) - - def test_nested_block_level_elements(self): - """Block elements can be nested.""" - soup = self.soup('

Foo

') - blockquote = soup.blockquote - assert blockquote.p.b.string == 'Foo' - assert blockquote.b.string == 'Foo' - - def test_correctly_nested_tables(self): - """One table can go inside another one.""" - markup = ('' - '' - "') - - self.assert_soup( - markup, - '
Here's another table:" - '' - '' - '
foo
Here\'s another table:' - '
foo
' - '
') - - self.assert_soup( - "" - "" - "
Foo
Bar
Baz
") - - def test_multivalued_attribute_with_whitespace(self): - # Whitespace separating the values of a multi-valued attribute - # should be ignored. - - markup = '
' - soup = self.soup(markup) - assert ['foo', 'bar'] == soup.div['class'] - - # If you search by the literal name of the class it's like the whitespace - # wasn't there. - assert soup.div == soup.find('div', class_="foo bar") - - def test_deeply_nested_multivalued_attribute(self): - # html5lib can set the attributes of the same tag many times - # as it rearranges the tree. This has caused problems with - # multivalued attributes. - markup = '
' - soup = self.soup(markup) - assert ["css"] == soup.div.div['class'] - - def test_multivalued_attribute_on_html(self): - # html5lib uses a different API to set the attributes ot the - # tag. This has caused problems with multivalued - # attributes. - markup = '' - soup = self.soup(markup) - assert ["a", "b"] == soup.html['class'] - - def test_angle_brackets_in_attribute_values_are_escaped(self): - self.assert_soup('', '') - - def test_strings_resembling_character_entity_references(self): - # "&T" and "&p" look like incomplete character entities, but they are - # not. - self.assert_soup( - "

• AT&T is in the s&p 500

", - "

\u2022 AT&T is in the s&p 500

" - ) - - def test_apos_entity(self): - self.assert_soup( - "

Bob's Bar

", - "

Bob's Bar

", - ) - - def test_entities_in_foreign_document_encoding(self): - # “ and ” are invalid numeric entities referencing - # Windows-1252 characters. - references a character common - # to Windows-1252 and Unicode, and ☃ references a - # character only found in Unicode. - # - # All of these entities should be converted to Unicode - # characters. - markup = "

“Hello” -☃

" - soup = self.soup(markup) - assert "“Hello” -☃" == soup.p.string - - def test_entities_in_attributes_converted_to_unicode(self): - expect = '

' - self.assert_soup('

', expect) - self.assert_soup('

', expect) - self.assert_soup('

', expect) - self.assert_soup('

', expect) - - def test_entities_in_text_converted_to_unicode(self): - expect = '

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' - self.assert_soup("

piñata

", expect) - self.assert_soup("

piñata

", expect) - self.assert_soup("

piñata

", expect) - self.assert_soup("

piñata

", expect) - - def test_quot_entity_converted_to_quotation_mark(self): - self.assert_soup("

I said "good day!"

", - '

I said "good day!"

') - - def test_out_of_range_entity(self): - expect = "\N{REPLACEMENT CHARACTER}" - self.assert_soup("�", expect) - self.assert_soup("�", expect) - self.assert_soup("�", expect) - - def test_multipart_strings(self): - "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." - soup = self.soup("

\nfoo

") - assert "p" == soup.h2.string.next_element.name - assert "p" == soup.p.name - self.assertConnectedness(soup) - - def test_empty_element_tags(self): - """Verify consistent handling of empty-element tags, - no matter how they come in through the markup. - """ - self.assert_soup('


', "


") - self.assert_soup('


', "


") - - def test_head_tag_between_head_and_body(self): - "Prevent recurrence of a bug in the html5lib treebuilder." - content = """ - - foo - -""" - soup = self.soup(content) - assert soup.html.body is not None - self.assertConnectedness(soup) - - def test_multiple_copies_of_a_tag(self): - "Prevent recurrence of a bug in the html5lib treebuilder." - content = """ - - - - - -""" - soup = self.soup(content) - self.assertConnectedness(soup.article) - - def test_basic_namespaces(self): - """Parsers don't need to *understand* namespaces, but at the - very least they should not choke on namespaces or lose - data.""" - - markup = b'4' - soup = self.soup(markup) - assert markup == soup.encode() - html = soup.html - assert 'http://www.w3.org/1999/xhtml' == soup.html['xmlns'] - assert 'http://www.w3.org/1998/Math/MathML' == soup.html['xmlns:mathml'] - assert 'http://www.w3.org/2000/svg' == soup.html['xmlns:svg'] - - def test_multivalued_attribute_value_becomes_list(self): - markup = b'' - soup = self.soup(markup) - assert ['foo', 'bar'] == soup.a['class'] - - # - # Generally speaking, tests below this point are more tests of - # Beautiful Soup than tests of the tree builders. But parsers are - # weird, so we run these tests separately for every tree builder - # to detect any differences between them. - # - - def test_can_parse_unicode_document(self): - # A seemingly innocuous document... but it's in Unicode! And - # it contains characters that can't be represented in the - # encoding found in the declaration! The horror! - markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - assert 'Sacr\xe9 bleu!' == soup.body.string - - def test_soupstrainer(self): - """Parsers should be able to work with SoupStrainers.""" - strainer = SoupStrainer("b") - soup = self.soup("A bold statement", - parse_only=strainer) - assert soup.decode() == "bold" - - def test_single_quote_attribute_values_become_double_quotes(self): - self.assert_soup("", - '') - - def test_attribute_values_with_nested_quotes_are_left_alone(self): - text = """a""" - self.assert_soup(text) - - def test_attribute_values_with_double_nested_quotes_get_quoted(self): - text = """a""" - soup = self.soup(text) - soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' - self.assert_soup( - soup.foo.decode(), - """a""") - - def test_ampersand_in_attribute_value_gets_escaped(self): - self.assert_soup('', - '') - - self.assert_soup( - 'foo', - 'foo') - - def test_escaped_ampersand_in_attribute_value_is_left_alone(self): - self.assert_soup('') - - def test_entities_in_strings_converted_during_parsing(self): - # Both XML and HTML entities are converted to Unicode characters - # during parsing. - text = "

<<sacré bleu!>>

" - expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" - self.assert_soup(text, expected) - - def test_smart_quotes_converted_on_the_way_in(self): - # Microsoft smart quotes are converted to Unicode characters during - # parsing. - quote = b"

\x91Foo\x92

" - soup = self.soup(quote) - assert soup.p.string == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}" - - def test_non_breaking_spaces_converted_on_the_way_in(self): - soup = self.soup("  ") - assert soup.a.string == "\N{NO-BREAK SPACE}" * 2 - - def test_entities_converted_on_the_way_out(self): - text = "

<<sacré bleu!>>

" - expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") - soup = self.soup(text) - assert soup.p.encode("utf-8") == expected - - def test_real_iso_latin_document(self): - # Smoke test of interrelated functionality, using an - # easy-to-understand document. - - # Here it is in Unicode. Note that it claims to be in ISO-Latin-1. - unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' - - # That's because we're going to encode it into ISO-Latin-1, and use - # that to test. - iso_latin_html = unicode_html.encode("iso-8859-1") - - # Parse the ISO-Latin-1 HTML. - soup = self.soup(iso_latin_html) - # Encode it to UTF-8. - result = soup.encode("utf-8") - - # What do we expect the result to look like? Well, it would - # look like unicode_html, except that the META tag would say - # UTF-8 instead of ISO-Latin-1. - expected = unicode_html.replace("ISO-Latin-1", "utf-8") - - # And, of course, it would be in UTF-8, not Unicode. - expected = expected.encode("utf-8") - - # Ta-da! - assert result == expected - - def test_real_shift_jis_document(self): - # Smoke test to make sure the parser can handle a document in - # Shift-JIS encoding, without choking. - shift_jis_html = ( - b'
'
-            b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
-            b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
-            b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
-            b'
') - unicode_html = shift_jis_html.decode("shift-jis") - soup = self.soup(unicode_html) - - # Make sure the parse tree is correctly encoded to various - # encodings. - assert soup.encode("utf-8") == unicode_html.encode("utf-8") - assert soup.encode("euc_jp") == unicode_html.encode("euc_jp") - - def test_real_hebrew_document(self): - # A real-world test to make sure we can convert ISO-8859-9 (a - # Hebrew encoding) to UTF-8. - hebrew_document = b'Hebrew (ISO 8859-8) in Visual Directionality

Hebrew (ISO 8859-8) in Visual Directionality

\xed\xe5\xec\xf9' - soup = self.soup( - hebrew_document, from_encoding="iso8859-8") - # Some tree builders call it iso8859-8, others call it iso-8859-9. - # That's not a difference we really care about. - assert soup.original_encoding in ('iso8859-8', 'iso-8859-8') - assert soup.encode('utf-8') == ( - hebrew_document.decode("iso8859-8").encode("utf-8") - ) - - def test_meta_tag_reflects_current_encoding(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - - # Here's a document incorporating that meta tag. - shift_jis_html = ( - '\n%s\n' - '' - 'Shift-JIS markup goes here.') % meta_tag - soup = self.soup(shift_jis_html) - - # Parse the document, and the charset is seemingly unaffected. - parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) - content = parsed_meta['content'] - assert 'text/html; charset=x-sjis' == content - - # But that value is actually a ContentMetaAttributeValue object. - assert isinstance(content, ContentMetaAttributeValue) - - # And it will take on a value that reflects its current - # encoding. - assert 'text/html; charset=utf8' == content.encode("utf8") - - # For the rest of the story, see TestSubstitutions in - # test_tree.py. - - def test_html5_style_meta_tag_reflects_current_encoding(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - - # Here's a document incorporating that meta tag. - shift_jis_html = ( - '\n%s\n' - '' - 'Shift-JIS markup goes here.') % meta_tag - soup = self.soup(shift_jis_html) - - # Parse the document, and the charset is seemingly unaffected. - parsed_meta = soup.find('meta', id="encoding") - charset = parsed_meta['charset'] - assert 'x-sjis' == charset - - # But that value is actually a CharsetMetaAttributeValue object. - assert isinstance(charset, CharsetMetaAttributeValue) - - # And it will take on a value that reflects its current - # encoding. - assert 'utf8' == charset.encode("utf8") - - def test_python_specific_encodings_not_used_in_charset(self): - # You can encode an HTML document using a Python-specific - # encoding, but that encoding won't be mentioned _inside_ the - # resulting document. Instead, the document will appear to - # have no encoding. - for markup in [ - b'' - b'' - ]: - soup = self.soup(markup) - for encoding in PYTHON_SPECIFIC_ENCODINGS: - if encoding in ( - 'idna', 'mbcs', 'oem', 'undefined', - 'string_escape', 'string-escape' - ): - # For one reason or another, these will raise an - # exception if we actually try to use them, so don't - # bother. - continue - encoded = soup.encode(encoding) - assert b'meta charset=""' in encoded - assert encoding.encode("ascii") not in encoded - - def test_tag_with_no_attributes_can_have_attributes_added(self): - data = self.soup("text") - data.a['foo'] = 'bar' - assert 'text' == data.a.decode() - - def test_closing_tag_with_no_opening_tag(self): - # Without BeautifulSoup.open_tag_counter, the tag will - # cause _popToTag to be called over and over again as we look - # for a tag that wasn't there. The result is that 'text2' - # will show up outside the body of the document. - soup = self.soup("

text1

text2
") - assert "

text1

text2
" == soup.body.decode() - - def test_worst_case(self): - """Test the worst case (currently) for linking issues.""" - - soup = self.soup(BAD_DOCUMENT) - self.linkage_validator(soup) - - -class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): - - def test_pickle_and_unpickle_identity(self): - # Pickling a tree, then unpickling it, yields a tree identical - # to the original. - tree = self.soup("foo") - dumped = pickle.dumps(tree, 2) - loaded = pickle.loads(dumped) - assert loaded.__class__ == BeautifulSoup - assert loaded.decode() == tree.decode() - - def test_docstring_generated(self): - soup = self.soup("") - assert soup.encode() == b'\n' - - def test_xml_declaration(self): - markup = b"""\n""" - soup = self.soup(markup) - assert markup == soup.encode("utf8") - - def test_python_specific_encodings_not_used_in_xml_declaration(self): - # You can encode an XML document using a Python-specific - # encoding, but that encoding won't be mentioned _inside_ the - # resulting document. - markup = b"""\n""" - soup = self.soup(markup) - for encoding in PYTHON_SPECIFIC_ENCODINGS: - if encoding in ( - 'idna', 'mbcs', 'oem', 'undefined', - 'string_escape', 'string-escape' - ): - # For one reason or another, these will raise an - # exception if we actually try to use them, so don't - # bother. - continue - encoded = soup.encode(encoding) - assert b'' in encoded - assert encoding.encode("ascii") not in encoded - - def test_processing_instruction(self): - markup = b"""\n""" - soup = self.soup(markup) - assert markup == soup.encode("utf8") - - def test_real_xhtml_document(self): - """A real XHTML document should come out *exactly* the same as it went in.""" - markup = b""" - - -Hello. -Goodbye. -""" - soup = self.soup(markup) - assert soup.encode("utf-8") == markup - - def test_nested_namespaces(self): - doc = b""" - - - - - -""" - soup = self.soup(doc) - assert doc == soup.encode() - - def test_formatter_processes_script_tag_for_xml_documents(self): - doc = """ - -""" - soup = BeautifulSoup(doc, "lxml-xml") - # lxml would have stripped this while parsing, but we can add - # it later. - soup.script.string = 'console.log("< < hey > > ");' - encoded = soup.encode() - assert b"< < hey > >" in encoded - - def test_can_parse_unicode_document(self): - markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - soup = self.soup(markup) - assert 'Sacr\xe9 bleu!' == soup.root.string - - def test_popping_namespaced_tag(self): - markup = 'b2012-07-02T20:33:42Zcd' - soup = self.soup(markup) - assert str(soup.rss) == markup - - def test_docstring_includes_correct_encoding(self): - soup = self.soup("") - assert soup.encode("latin1") == b'\n' - - def test_large_xml_document(self): - """A large XML document should come out the same as it went in.""" - markup = (b'\n' - + b'0' * (2**12) - + b'') - soup = self.soup(markup) - assert soup.encode("utf-8") == markup - - def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): - self.assert_soup("

", "

") - self.assert_soup("

foo

") - - def test_namespaces_are_preserved(self): - markup = 'This tag is in the a namespaceThis tag is in the b namespace' - soup = self.soup(markup) - root = soup.root - assert "http://example.com/" == root['xmlns:a'] - assert "http://example.net/" == root['xmlns:b'] - - def test_closing_namespaced_tag(self): - markup = '

20010504

' - soup = self.soup(markup) - assert str(soup.p) == markup - - def test_namespaced_attributes(self): - markup = '' - soup = self.soup(markup) - assert str(soup.foo) == markup - - def test_namespaced_attributes_xml_namespace(self): - markup = 'bar' - soup = self.soup(markup) - assert str(soup.foo) == markup - - def test_find_by_prefixed_name(self): - doc = """ -foo - bar - baz - -""" - soup = self.soup(doc) - - # There are three tags. - assert 3 == len(soup.find_all('tag')) - - # But two of them are ns1:tag and one of them is ns2:tag. - assert 2 == len(soup.find_all('ns1:tag')) - assert 1 == len(soup.find_all('ns2:tag')) - - assert 1, len(soup.find_all('ns2:tag', key='value')) - assert 3, len(soup.find_all(['ns1:tag', 'ns2:tag'])) - - def test_copy_tag_preserves_namespace(self): - xml = """ -""" - - soup = self.soup(xml) - tag = soup.document - duplicate = copy.copy(tag) - - # The two tags have the same namespace prefix. - assert tag.prefix == duplicate.prefix - - def test_worst_case(self): - """Test the worst case (currently) for linking issues.""" - - soup = self.soup(BAD_DOCUMENT) - self.linkage_validator(soup) - - -class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): - """Smoke test for a tree builder that supports HTML5.""" - - def test_real_xhtml_document(self): - # Since XHTML is not HTML5, HTML5 parsers are not tested to handle - # XHTML documents in any particular way. - pass - - def test_html_tags_have_namespace(self): - markup = "" - soup = self.soup(markup) - assert "http://www.w3.org/1999/xhtml" == soup.a.namespace - - def test_svg_tags_have_namespace(self): - markup = '' - soup = self.soup(markup) - namespace = "http://www.w3.org/2000/svg" - assert namespace == soup.svg.namespace - assert namespace == soup.circle.namespace - - - def test_mathml_tags_have_namespace(self): - markup = '5' - soup = self.soup(markup) - namespace = 'http://www.w3.org/1998/Math/MathML' - assert namespace == soup.math.namespace - assert namespace == soup.msqrt.namespace - - def test_xml_declaration_becomes_comment(self): - markup = '' - soup = self.soup(markup) - assert isinstance(soup.contents[0], Comment) - assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?' - assert "html" == soup.contents[0].next_element.name - -def skipIf(condition, reason): - def nothing(test, *args, **kwargs): - return None - - def decorator(test_item): - if condition: - return nothing - else: - return test_item - - return decorator -- cgit v1.2.3