summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeonard Richardson <leonard.richardson@canonical.com>2012-01-20 14:22:42 -0500
committerLeonard Richardson <leonard.richardson@canonical.com>2012-01-20 14:22:42 -0500
commitdf26dc64d868875d7cd8ca550f1a174d68dd7c67 (patch)
treed843b35d5d06ee78c2e46016610ac4153994606e
parent0954d5fb94b887349f754e2f5415df4a76e586b7 (diff)
Replaced assertEquals with assertEqual to get rid of deprecation notice.
-rw-r--r--bs4/testing.py2
-rw-r--r--bs4/tests/test_builder_registry.py49
-rw-r--r--bs4/tests/test_html5lib.py52
-rw-r--r--bs4/tests/test_htmlparser.py126
-rw-r--r--bs4/tests/test_lxml.py106
-rw-r--r--bs4/tests/test_soup.py48
-rw-r--r--bs4/tests/test_tree.py160
7 files changed, 334 insertions, 209 deletions
diff --git a/bs4/testing.py b/bs4/testing.py
index 91c623a..c374a29 100644
--- a/bs4/testing.py
+++ b/bs4/testing.py
@@ -30,4 +30,4 @@ class SoupTest(unittest.TestCase):
if compare_parsed_to is None:
compare_parsed_to = to_parse
- self.assertEquals(obj.decode(), self.document_for(compare_parsed_to))
+ self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
diff --git a/bs4/tests/test_builder_registry.py b/bs4/tests/test_builder_registry.py
index 17caace..860639d 100644
--- a/bs4/tests/test_builder_registry.py
+++ b/bs4/tests/test_builder_registry.py
@@ -12,9 +12,8 @@ from bs4.builder import (
)
try:
- from bs4.builder import (
- HTML5TreeBuilder,
- )
+ from bs4.builder import HTML5TreeBuilder
+ HTML5LIB_PRESENT = True
except ImportError:
HTML5LIB_PRESENT = False
@@ -23,37 +22,37 @@ class BuiltInRegistryTest(unittest.TestCase):
"""Test the built-in registry with the default builders registered."""
def test_combination(self):
- self.assertEquals(registry.lookup('fast', 'html'),
+ self.assertEqual(registry.lookup('fast', 'html'),
LXMLTreeBuilder)
- self.assertEquals(registry.lookup('permissive', 'xml'),
+ self.assertEqual(registry.lookup('permissive', 'xml'),
LXMLTreeBuilderForXML)
- self.assertEquals(registry.lookup('strict', 'html'),
+ self.assertEqual(registry.lookup('strict', 'html'),
HTMLParserTreeBuilder)
if HTML5LIB_PRESENT:
- self.assertEquals(registry.lookup('permissive', 'html'),
+ self.assertEqual(registry.lookup('permissive', 'html'),
HTML5TreeBuilder)
def test_lookup_by_markup_type(self):
if HTML5LIB_PRESENT:
- self.assertEquals(registry.lookup('html'), HTML5TreeBuilder)
+ self.assertEqual(registry.lookup('html'), HTML5TreeBuilder)
else:
- self.assertEquals(registry.lookup('html'), LXMLTreeBuilder)
- self.assertEquals(registry.lookup('xml'), LXMLTreeBuilderForXML)
+ self.assertEqual(registry.lookup('html'), LXMLTreeBuilder)
+ self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML)
def test_named_library(self):
- self.assertEquals(registry.lookup('lxml', 'xml'),
+ self.assertEqual(registry.lookup('lxml', 'xml'),
LXMLTreeBuilderForXML)
- self.assertEquals(registry.lookup('lxml', 'html'),
+ self.assertEqual(registry.lookup('lxml', 'html'),
LXMLTreeBuilder)
if HTML5LIB_PRESENT:
- self.assertEquals(registry.lookup('html5lib'),
+ self.assertEqual(registry.lookup('html5lib'),
HTML5TreeBuilder)
- self.assertEquals(registry.lookup('html.parser'),
+ self.assertEqual(registry.lookup('html.parser'),
HTMLParserTreeBuilder)
def test_unimplemented_combinations(self):
- self.assertEquals(registry.lookup('fast', 'permissive', 'html'),
+ self.assertEqual(registry.lookup('fast', 'permissive', 'html'),
None)
def test_beautifulsoup_constructor_does_lookup(self):
@@ -85,28 +84,28 @@ class RegistryTest(unittest.TestCase):
# Since the builder advertises no features, you can't find it
# by looking up features.
- self.assertEquals(self.registry.lookup('foo'), None)
+ self.assertEqual(self.registry.lookup('foo'), None)
# But you can find it by doing a lookup with no features, if
# this happens to be the only registered builder.
- self.assertEquals(self.registry.lookup(), builder)
+ self.assertEqual(self.registry.lookup(), builder)
def test_register_with_features_makes_lookup_succeed(self):
builder = self.builder_for_features('foo', 'bar')
- self.assertEquals(self.registry.lookup('foo'), builder)
- self.assertEquals(self.registry.lookup('bar'), builder)
+ self.assertEqual(self.registry.lookup('foo'), builder)
+ self.assertEqual(self.registry.lookup('bar'), builder)
def test_lookup_fails_when_no_builder_implements_feature(self):
builder = self.builder_for_features('foo', 'bar')
- self.assertEquals(self.registry.lookup('baz'), None)
+ self.assertEqual(self.registry.lookup('baz'), None)
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
builder1 = self.builder_for_features('foo')
builder2 = self.builder_for_features('bar')
- self.assertEquals(self.registry.lookup(), builder2)
+ self.assertEqual(self.registry.lookup(), builder2)
def test_lookup_fails_when_no_tree_builders_registered(self):
- self.assertEquals(self.registry.lookup(), None)
+ self.assertEqual(self.registry.lookup(), None)
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
has_one = self.builder_for_features('foo')
@@ -118,14 +117,14 @@ class RegistryTest(unittest.TestCase):
# There are two builders featuring 'foo' and 'bar', but
# the one that also features 'quux' was registered later.
- self.assertEquals(self.registry.lookup('foo', 'bar'),
+ self.assertEqual(self.registry.lookup('foo', 'bar'),
has_both_late)
# There is only one builder featuring 'foo', 'bar', and 'baz'.
- self.assertEquals(self.registry.lookup('foo', 'bar', 'baz'),
+ self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'),
has_both_early)
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
builder1 = self.builder_for_features('foo', 'bar')
builder2 = self.builder_for_features('foo', 'baz')
- self.assertEquals(self.registry.lookup('bar', 'baz'), None)
+ self.assertEqual(self.registry.lookup('bar', 'baz'), None)
diff --git a/bs4/tests/test_html5lib.py b/bs4/tests/test_html5lib.py
index 85cedbf..b136ba0 100644
--- a/bs4/tests/test_html5lib.py
+++ b/bs4/tests/test_html5lib.py
@@ -27,7 +27,7 @@ class TestHTML5Builder(TestLXMLBuilder):
markup = "<p>A <b>bold</b> statement.</p>"
soup = self.soup(markup,
parse_only=strainer)
- self.assertEquals(
+ self.assertEqual(
soup.decode(), self.document_for(markup))
def test_bare_string(self):
@@ -60,7 +60,7 @@ class TestHTML5Builder(TestLXMLBuilder):
def test_literal_in_textarea(self):
markup = '<textarea>Junk like <b> tags and <&<&amp;</textarea>'
soup = self.soup(markup)
- self.assertEquals(
+ self.assertEqual(
soup.textarea.contents, ["Junk like <b> tags and <&<&"])
def test_collapsed_whitespace(self):
@@ -122,17 +122,17 @@ class TestHTML5BuilderInvalidMarkup(TestLXMLBuilderInvalidMarkup):
markup = "<div><![CDATA[foo]]>"
soup = self.soup(markup)
data = soup.find(text="[CDATA[foo]]")
- self.assertEquals(data.__class__, Comment)
+ self.assertEqual(data.__class__, Comment)
def test_nonsensical_declaration(self):
# Declarations that don't make any sense are turned into comments.
soup = self.soup('<! Foo = -8><p>a</p>')
- self.assertEquals(str(soup),
+ self.assertEqual(str(soup),
("<!-- Foo = -8-->"
"<html><head></head><body><p>a</p></body></html>"))
soup = self.soup('<p>a</p><! Foo = -8>')
- self.assertEquals(str(soup),
+ self.assertEqual(str(soup),
("<html><head></head><body><p>a</p>"
"<!-- Foo = -8--></body></html>"))
@@ -141,7 +141,7 @@ class TestHTML5BuilderInvalidMarkup(TestLXMLBuilderInvalidMarkup):
soup = self.soup((
'<! DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">'
'<p>foo</p>'))
- self.assertEquals(
+ self.assertEqual(
str(soup),
('<!-- DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"-->'
'<html><head></head><body><p>foo</p></body></html>'))
@@ -154,70 +154,70 @@ class TestHTML5BuilderInvalidMarkup(TestLXMLBuilderInvalidMarkup):
# Let's spell that out a little more explicitly.
soup = self.soup(markup)
str1, comment, str2 = soup.body.contents
- self.assertEquals(str1, 'a')
- self.assertEquals(comment.__class__, Comment)
- self.assertEquals(comment, 'b <p')
- self.assertEquals(str2, 'c')
+ self.assertEqual(str1, 'a')
+ self.assertEqual(comment.__class__, Comment)
+ self.assertEqual(comment, 'b <p')
+ self.assertEqual(str2, 'c')
def test_document_starts_with_bogus_declaration(self):
soup = self.soup('<! Foo >a')
# 'Foo' becomes a comment that appears before the HTML.
comment = soup.contents[0]
self.assertTrue(isinstance(comment, Comment))
- self.assertEquals(comment, 'Foo')
+ self.assertEqual(comment, 'Foo')
- self.assertEquals(self.find(text="a") == "a")
+ self.assertEqual(self.find(text="a") == "a")
def test_attribute_value_was_closed_by_subsequent_tag(self):
markup = """<a href="foo</a>, </a><a href="bar">baz</a>"""
soup = self.soup(markup)
# The string between the first and second quotes was interpreted
# as the value of the 'href' attribute.
- self.assertEquals(soup.a['href'], 'foo</a>, </a><a href=')
+ self.assertEqual(soup.a['href'], 'foo</a>, </a><a href=')
#The string after the second quote (bar"), was treated as an
#empty attribute called bar".
- self.assertEquals(soup.a['bar"'], '')
- self.assertEquals(soup.a.string, "baz")
+ self.assertEqual(soup.a['bar"'], '')
+ self.assertEqual(soup.a.string, "baz")
def test_document_starts_with_bogus_declaration(self):
soup = self.soup('<! Foo ><p>a</p>')
# The declaration becomes a comment.
comment = soup.contents[0]
self.assertTrue(isinstance(comment, Comment))
- self.assertEquals(comment, ' Foo ')
- self.assertEquals(soup.p.string, 'a')
+ self.assertEqual(comment, ' Foo ')
+ self.assertEqual(soup.p.string, 'a')
def test_document_ends_with_incomplete_declaration(self):
soup = self.soup('<p>a<!b')
# This becomes a string 'a'. The incomplete declaration is ignored.
# Compare html5lib, which turns it into a comment.
s, comment = soup.p.contents
- self.assertEquals(s, 'a')
+ self.assertEqual(s, 'a')
self.assertTrue(isinstance(comment, Comment))
- self.assertEquals(comment, 'b')
+ self.assertEqual(comment, 'b')
def test_entity_was_not_finished(self):
soup = self.soup("<p>&lt;Hello&gt")
# Compare html5lib, which completes the entity.
- self.assertEquals(soup.p.string, "<Hello>")
+ self.assertEqual(soup.p.string, "<Hello>")
def test_nonexistent_entity(self):
soup = self.soup("<p>foo&#bar;baz</p>")
- self.assertEquals(soup.p.string, "foo&#bar;baz")
+ self.assertEqual(soup.p.string, "foo&#bar;baz")
# Compare a real entity.
soup = self.soup("<p>foo&#100;baz</p>")
- self.assertEquals(soup.p.string, "foodbaz")
+ self.assertEqual(soup.p.string, "foodbaz")
def test_entity_out_of_range(self):
# An entity that's out of range will be converted to
# REPLACEMENT CHARACTER.
soup = self.soup("<p>&#10000000000000;</p>")
- self.assertEquals(soup.p.string, u"\N{REPLACEMENT CHARACTER}")
+ self.assertEqual(soup.p.string, u"\N{REPLACEMENT CHARACTER}")
soup = self.soup("<p>&#x1000000000000;</p>")
- self.assertEquals(soup.p.string, u"\N{REPLACEMENT CHARACTER}")
+ self.assertEqual(soup.p.string, u"\N{REPLACEMENT CHARACTER}")
@unittest.skipIf(
@@ -233,7 +233,7 @@ class TestHTML5LibEncodingConversion(TestLXMLBuilderEncodingConversion):
# Hebrew encoding) to UTF-8.
soup = self.soup(self.HEBREW_DOCUMENT,
from_encoding="iso-8859-8")
- self.assertEquals(soup.original_encoding, 'iso8859-8')
- self.assertEquals(
+ self.assertEqual(soup.original_encoding, 'iso8859-8')
+ self.assertEqual(
soup.encode('utf-8'),
self.HEBREW_DOCUMENT.decode("iso-8859-8").encode("utf-8"))
diff --git a/bs4/tests/test_htmlparser.py b/bs4/tests/test_htmlparser.py
new file mode 100644
index 0000000..d2db38e
--- /dev/null
+++ b/bs4/tests/test_htmlparser.py
@@ -0,0 +1,126 @@
+from HTMLParser import HTMLParseError
+from bs4.builder import HTMLParserTreeBuilder
+from bs4.element import CData
+from test_lxml import (
+ TestLXMLBuilder,
+ TestLXMLBuilderEncodingConversion,
+ TestLXMLBuilderInvalidMarkup,
+ )
+
+class TestHTMLParserTreeBuilder(TestLXMLBuilder):
+ """See `BuilderSmokeTest`."""
+
+ @property
+ def default_builder(self):
+ return HTMLParserTreeBuilder()
+
+ def test_bare_string(self):
+ # A bare string is turned into some kind of HTML document or
+ # fragment recognizable as the original string.
+ #
+ # HTMLParser does not modify the bare string at all.
+ self.assertSoupEquals("A bare string")
+
+ def test_cdata_where_its_ok(self):
+ # HTMLParser recognizes CDATA sections and passes them through.
+ markup = "<svg><![CDATA[foobar]]></svg>"
+ self.assertSoupEquals(markup)
+ soup = self.soup(markup)
+ string = soup.svg.string
+ self.assertEqual(string, "foobar")
+ self.assertTrue(isinstance(string, CData))
+
+ # These are tests that could be 'fixed' by improving the
+ # HTMLParserTreeBuilder, but I don't think it's worth it. Users
+ # will have fewer headaches if they use one of the other tree
+ # builders.
+
+ def test_empty_element(self):
+ # HTML's empty-element tags are not recognized as such
+ # unless they are presented as empty-element tags.
+ self.assertSoupEquals(
+ "<p>A <meta> tag</p>", "<p>A <meta> tag</meta></p>")
+
+ self.assertSoupEquals(
+ "<p>Foo<br/>bar</p>", "<p>Foo<br />bar</p>")
+
+ def test_entities_in_attribute_values_converted_during_parsing(self):
+
+ # The numeric entity isn't recognized without the closing
+ # semicolon.
+ text = '<x t="pi&#241ata">'
+ expected = u"pi\N{LATIN SMALL LETTER N WITH TILDE}ata"
+ soup = self.soup(text)
+ self.assertEqual(soup.x['t'], "pi&#241ata")
+
+ text = '<x t="pi&#241;ata">'
+ expected = u"pi\N{LATIN SMALL LETTER N WITH TILDE}ata"
+ soup = self.soup(text)
+ self.assertEqual(soup.x['t'], u"pi\xf1ata")
+
+ text = '<x t="pi&#xf1;ata">'
+ soup = self.soup(text)
+ self.assertEqual(soup.x['t'], expected)
+
+ text = '<x t="sacr&eacute; bleu">'
+ soup = self.soup(text)
+ self.assertEqual(
+ soup.x['t'],
+ u"sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu")
+
+ # This can cause valid HTML to become invalid.
+ valid_url = '<a href="http://example.org?a=1&amp;b=2;3">foo</a>'
+ soup = self.soup(valid_url)
+ self.assertEqual(soup.a['href'], "http://example.org?a=1&b=2;3")
+
+ # I think it would be very difficult to 'fix' these tests, judging
+ # from my experience with previous versions of Beautiful Soup.
+ def test_naked_ampersands(self):
+ # Ampersands are treated as entities.
+ text = "<p>AT&T</p>"
+ soup = self.soup(text)
+ self.assertEqual(soup.p.string, "AT&T;")
+
+ def test_literal_in_textarea(self):
+ # Anything inside a <textarea> is supposed to be treated as
+ # the literal value of the field, (XXX citation
+ # needed). html5lib does this correctly. But, HTMLParser does its
+ # best to parse the contents of a <textarea> as HTML.
+ text = '<textarea>Junk like <b> tags and <&<&amp;</textarea>'
+ soup = self.soup(text)
+ self.assertEqual(len(soup.textarea.contents), 2)
+ self.assertEqual(soup.textarea.contents[0], u"Junk like ")
+ self.assertEqual(soup.textarea.contents[1].name, 'b')
+ self.assertEqual(soup.textarea.b.string, u" tags and <&<&")
+
+ def test_literal_in_script(self):
+ # The contents of a <script> tag are supposed to be treated as
+ # a literal string, even if that string contains HTML. But
+ # HTMLParser attempts to parse some of the HTML, causing much
+ # pain.
+ javascript = 'if (i < 2) { alert("<b>foo</b>"); }'
+ soup = self.soup('<script>%s</script>' % javascript)
+ self.assertEqual(soup.script.contents,
+ ['if (i < 2) { alert("<b>foo',
+ '"); }'])
+
+ # Namespaced doctypes cause an HTMLParseError
+ def test_namespaced_system_doctype(self):
+ self.assertRaises(HTMLParseError, self._test_doctype,
+ 'xsl:stylesheet SYSTEM "htmlent.dtd"')
+
+ def test_namespaced_public_doctype(self):
+ self.assertRaises(HTMLParseError, self._test_doctype,
+ 'xsl:stylesheet PUBLIC "htmlent.dtd"')
+
+
+class TestHTMLParserTreeBuilderInvalidMarkup(TestLXMLBuilderInvalidMarkup):
+ # Oddly enough, HTMLParser seems to handle invalid markup exactly
+ # the same as lxml.
+ pass
+
+
+class TestHTMLParserTreeBuilderEncodingConversion(
+ TestLXMLBuilderEncodingConversion):
+ # Re-run the lxml tests for HTMLParser
+ pass
diff --git a/bs4/tests/test_lxml.py b/bs4/tests/test_lxml.py
index 0eafab0..d1f4276 100644
--- a/bs4/tests/test_lxml.py
+++ b/bs4/tests/test_lxml.py
@@ -55,7 +55,7 @@ class TestLXMLBuilder(SoupTest):
soup = self.soup(markup)
comment = soup.find(text="foobar")
- self.assertEquals(comment.__class__, Comment)
+ self.assertEqual(comment.__class__, Comment)
def test_nested_inline_elements(self):
# Inline tags can be nested indefinitely.
@@ -138,28 +138,28 @@ class TestLXMLBuilder(SoupTest):
# best to parse the contents of a <textarea> as HTML.
text = '<textarea>Junk like <b> tags and <&<&amp;</textarea>'
soup = self.soup(text)
- self.assertEquals(len(soup.textarea.contents), 2)
- self.assertEquals(soup.textarea.contents[0], u"Junk like ")
- self.assertEquals(soup.textarea.contents[1].name, 'b')
- self.assertEquals(soup.textarea.b.string, u" tags and ")
+ self.assertEqual(len(soup.textarea.contents), 2)
+ self.assertEqual(soup.textarea.contents[0], u"Junk like ")
+ self.assertEqual(soup.textarea.contents[1].name, 'b')
+ self.assertEqual(soup.textarea.b.string, u" tags and ")
def test_literal_in_script(self):
# The contents of a <script> tag are treated as a literal string,
# even if that string contains HTML.
javascript = 'if (i < 2) { alert("<b>foo</b>"); }'
soup = self.soup('<script>%s</script>' % javascript)
- self.assertEquals(soup.script.string, javascript)
+ self.assertEqual(soup.script.string, javascript)
def test_naked_ampersands(self):
# Ampersands are left alone.
text = "<p>AT&T</p>"
soup = self.soup(text)
- self.assertEquals(soup.p.string, "AT&T")
+ self.assertEqual(soup.p.string, "AT&T")
# Even if they're in attribute values.
invalid_url = '<a href="http://example.org?a=1&b=2;3">foo</a>'
soup = self.soup(invalid_url)
- self.assertEquals(soup.a['href'], "http://example.org?a=1&b=2;3")
+ self.assertEqual(soup.a['href'], "http://example.org?a=1&b=2;3")
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
@@ -173,13 +173,13 @@ class TestLXMLBuilder(SoupTest):
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
- self.assertEquals(
+ self.assertEqual(
soup.p.string,
u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a>&nbsp;&nbsp;</a>")
- self.assertEquals(soup.a.string, u"\N{NO-BREAK SPACE}" * 2)
+ self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2)
def test_cdata_where_its_ok(self):
# lxml strips CDATA sections, no matter where they occur.
@@ -192,13 +192,13 @@ class TestLXMLBuilder(SoupTest):
markup = doctype_str + '<p>foo</p>'
soup = self.soup(markup)
doctype = soup.contents[0]
- self.assertEquals(doctype.__class__, Doctype)
- self.assertEquals(doctype, doctype_fragment)
- self.assertEquals(str(soup)[:len(doctype_str)], doctype_str)
+ self.assertEqual(doctype.__class__, Doctype)
+ self.assertEqual(doctype, doctype_fragment)
+ self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
- self.assertEquals(soup.p.contents[0], 'foo')
+ self.assertEqual(soup.p.contents[0], 'foo')
def test_doctype(self):
# Test a normal HTML doctype you'll commonly see in a real document.
@@ -238,7 +238,7 @@ class TestLXMLBuilder(SoupTest):
expected = expected.encode("utf-8")
# Ta-da!
- self.assertEquals(result, expected)
+ self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
@@ -254,8 +254,8 @@ class TestLXMLBuilder(SoupTest):
# Make sure the parse tree is correctly encoded to various
# encodings.
- self.assertEquals(soup.encode("utf-8"), unicode_html.encode("utf-8"))
- self.assertEquals(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
+ self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
+ self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
# Tests below this line need work.
@@ -275,9 +275,9 @@ class TestLXMLBuilder(SoupTest):
# Parse the document, and the charset is replaced with a
# generic value.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
- self.assertEquals(parsed_meta['content'],
+ self.assertEqual(parsed_meta['content'],
'text/html; charset=%SOUP-ENCODING%')
- self.assertEquals(parsed_meta.contains_substitutions, True)
+ self.assertEqual(parsed_meta.contains_substitutions, True)
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
@@ -287,25 +287,25 @@ class TestLXMLBuilder(SoupTest):
expected = u"&lt;&lt;sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!&gt;&gt;".encode("utf-8")
soup = self.soup(text)
str = soup.p.string
- #self.assertEquals(str.encode("utf-8"), expected)
+ #self.assertEqual(str.encode("utf-8"), expected)
def test_br_tag_is_empty_element(self):
"""A <br> tag is designated as an empty-element tag."""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
- self.assertEquals(str(soup.br), "<br />")
+ self.assertEqual(str(soup.br), "<br />")
def test_p_tag_is_not_empty_element(self):
"""A <p> tag is not designated as an empty-element tag."""
soup = self.soup("<p />")
self.assertFalse(soup.p.is_empty_element)
- self.assertEquals(str(soup.p), "<p></p>")
+ self.assertEqual(str(soup.p), "<p></p>")
def test_soupstrainer(self):
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta /> <i>statement</i>",
parse_only=strainer)
- self.assertEquals(soup.decode(), "<b>bold</b>")
+ self.assertEqual(soup.decode(), "<b>bold</b>")
class TestLXMLBuilderInvalidMarkup(SoupTest):
@@ -348,7 +348,7 @@ class TestLXMLBuilderInvalidMarkup(SoupTest):
def test_boolean_attribute_with_no_value_gets_empty_value(self):
soup = self.soup("<table><td nowrap>foo</td></table>")
- self.assertEquals(soup.table.td['nowrap'], '')
+ self.assertEqual(soup.table.td['nowrap'], '')
def test_incorrectly_nested_tables(self):
self.assertSoupEquals(
@@ -359,7 +359,7 @@ class TestLXMLBuilderInvalidMarkup(SoupTest):
markup = self.soup("<p>this is the definition:"
"<dl><dt>first case</dt>")
# The <p> tag is closed before the <dl> tag begins.
- self.assertEquals(markup.p.contents, ["this is the definition:"])
+ self.assertEqual(markup.p.contents, ["this is the definition:"])
def test_empty_element_tag_with_contents(self):
self.assertSoupEquals("<br>foo</br>", "<br />foo")
@@ -391,7 +391,7 @@ class TestLXMLBuilderInvalidMarkup(SoupTest):
def test_attribute_value_never_got_closed(self):
markup = '<a href="http://foo.com/</a> and blah and blah'
soup = self.soup(markup)
- self.assertEquals(
+ self.assertEqual(
soup.a['href'], "http://foo.com/</a> and blah and blah")
def test_attribute_value_was_closed_by_subsequent_tag(self):
@@ -399,28 +399,28 @@ class TestLXMLBuilderInvalidMarkup(SoupTest):
soup = self.soup(markup)
# The string between the first and second quotes was interpreted
# as the value of the 'href' attribute.
- self.assertEquals(soup.a['href'], 'foo</a>, </a><a href=')
+ self.assertEqual(soup.a['href'], 'foo</a>, </a><a href=')
#The string after the second quote (bar"), was treated as an
#empty attribute called bar.
- self.assertEquals(soup.a['bar'], '')
- self.assertEquals(soup.a.string, "baz")
+ self.assertEqual(soup.a['bar'], '')
+ self.assertEqual(soup.a.string, "baz")
def test_unquoted_attribute_value(self):
soup = self.soup('<a style={height:21px;}></a>')
- self.assertEquals(soup.a['style'], '{height:21px;}')
+ self.assertEqual(soup.a['style'], '{height:21px;}')
def test_attribute_value_with_embedded_brackets(self):
soup = self.soup('<a b="<a>">')
- self.assertEquals(soup.a['b'], '<a>')
+ self.assertEqual(soup.a['b'], '<a>')
def test_nonexistent_entity(self):
soup = self.soup("<p>foo&#bar;baz</p>")
- self.assertEquals(soup.p.string, "foobar;baz")
+ self.assertEqual(soup.p.string, "foobar;baz")
# Compare a real entity.
soup = self.soup("<p>foo&#100;baz</p>")
- self.assertEquals(soup.p.string, "foodbaz")
+ self.assertEqual(soup.p.string, "foodbaz")
# Also compare html5lib, which preserves the &# before the
# entity name.
@@ -428,27 +428,27 @@ class TestLXMLBuilderInvalidMarkup(SoupTest):
def test_entity_out_of_range(self):
# An entity that's out of range will be ignored.
soup = self.soup("<p>&#10000000000000;</p>")
- self.assertEquals(soup.p.string, None)
+ self.assertEqual(soup.p.string, None)
soup = self.soup("<p>&#x1000000000000;</p>")
- self.assertEquals(soup.p.string, None)
+ self.assertEqual(soup.p.string, None)
def test_entity_was_not_finished(self):
soup = self.soup("<p>&lt;Hello&gt")
# Compare html5lib, which completes the entity.
- self.assertEquals(soup.p.string, "<Hello&gt")
+ self.assertEqual(soup.p.string, "<Hello&gt")
def test_document_ends_with_incomplete_declaration(self):
soup = self.soup('<p>a<!b')
# This becomes a string 'a'. The incomplete declaration is ignored.
# Compare html5lib, which turns it into a comment.
- self.assertEquals(soup.p.contents, ['a'])
+ self.assertEqual(soup.p.contents, ['a'])
def test_document_starts_with_bogus_declaration(self):
soup = self.soup('<! Foo ><p>a</p>')
# The declaration is ignored altogether.
- self.assertEquals(soup.encode(), b"<html><body><p>a</p></body></html>")
+ self.assertEqual(soup.encode(), b"<html><body><p>a</p></body></html>")
def test_tag_name_contains_unicode(self):
# Unicode characters in tag names are stripped.
@@ -475,28 +475,28 @@ class TestLXMLBuilderEncodingConversion(SoupTest):
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, unicode))
- self.assertEquals(unicode_output, self.document_for(ascii.decode()))
- self.assertEquals(soup_from_ascii.original_encoding, "ascii")
+ self.assertEqual(unicode_output, self.document_for(ascii.decode()))
+ self.assertEqual(soup_from_ascii.original_encoding, "ascii")
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
- self.assertEquals(soup_from_unicode.decode(), self.unicode_data)
- self.assertEquals(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
- self.assertEquals(soup_from_unicode.original_encoding, None)
+ self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
+ self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
+ self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
- self.assertEquals(soup_from_utf8.decode(), self.unicode_data)
- self.assertEquals(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
+ self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
+ self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
- self.assertEquals(soup_from_unicode.encode('utf-8'), self.utf8_data)
+ self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
HEBREW_DOCUMENT = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
@@ -505,8 +505,8 @@ class TestLXMLBuilderEncodingConversion(SoupTest):
# Hebrew encoding) to UTF-8.
soup = self.soup(self.HEBREW_DOCUMENT,
from_encoding="iso-8859-8")
- self.assertEquals(soup.original_encoding, 'iso-8859-8')
- self.assertEquals(
+ self.assertEqual(soup.original_encoding, 'iso-8859-8')
+ self.assertEqual(
soup.encode('utf-8'),
self.HEBREW_DOCUMENT.decode("iso-8859-8").encode("utf-8"))
@@ -539,7 +539,7 @@ class TestLXMLXMLBuilder(SoupTest):
markup = "<foo><![CDATA[iamcdata]]></foo>"
soup = self.soup(markup)
cdata = soup.foo.contents[0]
- self.assertEquals(cdata.__class__.__name__, 'NavigableString')
+ self.assertEqual(cdata.__class__.__name__, 'NavigableString')
def test_can_handle_invalid_xml(self):
@@ -562,20 +562,20 @@ class TestLXMLXMLBuilder(SoupTest):
self.assertTrue(soup.bar.is_empty_element)
soup.bar.insert(1, "Contents")
self.assertFalse(soup.bar.is_empty_element)
- self.assertEquals(str(soup), self.document_for("<bar>Contents</bar>"))
+ self.assertEqual(str(soup), self.document_for("<bar>Contents</bar>"))
def test_designated_empty_element_tag_has_no_closing_tag(self):
builder = LXMLTreeBuilderForXML(empty_element_tags=['bar'])
soup = BeautifulSoup(builder=builder, markup="<bar></bar>")
self.assertTrue(soup.bar.is_empty_element)
- self.assertEquals(str(soup), self.document_for("<bar />"))
+ self.assertEqual(str(soup), self.document_for("<bar />"))
def test_empty_tag_not_in_empty_element_tag_list_has_closing_tag(self):
builder = LXMLTreeBuilderForXML(empty_element_tags=['bar'])
soup = BeautifulSoup(builder=builder, markup="<foo />")
self.assertFalse(soup.foo.is_empty_element)
- self.assertEquals(str(soup), self.document_for("<foo></foo>"))
+ self.assertEqual(str(soup), self.document_for("<foo></foo>"))
def test_designated_empty_element_tag_does_not_change_parser_behavior(self):
# The designated list of empty-element tags only affects how
@@ -583,4 +583,4 @@ class TestLXMLXMLBuilder(SoupTest):
# parsed--that's the parser's job.
builder = LXMLTreeBuilderForXML(empty_element_tags=['bar'])
soup = BeautifulSoup(builder=builder, markup="<bar>contents</bar>")
- self.assertEquals(str(soup), self.document_for("<bar>contents</bar>"))
+ self.assertEqual(str(soup), self.document_for("<bar>contents</bar>"))
diff --git a/bs4/tests/test_soup.py b/bs4/tests/test_soup.py
index 3c5d742..404a468 100644
--- a/bs4/tests/test_soup.py
+++ b/bs4/tests/test_soup.py
@@ -13,7 +13,7 @@ class TestSelectiveParsing(SoupTest):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
- self.assertEquals(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
+ self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
@@ -25,7 +25,7 @@ class TestEntitySubstitution(unittest.TestCase):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = u"foo\u2200\N{SNOWMAN}\u00f5bar"
- self.assertEquals(self.sub.substitute_html(s),
+ self.assertEqual(self.sub.substitute_html(s),
u"foo&forall;\N{SNOWMAN}&otilde;bar")
def test_smart_quote_substitution(self):
@@ -33,51 +33,51 @@ class TestEntitySubstitution(unittest.TestCase):
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
- self.assertEquals(self.sub.substitute_html(dammit.markup),
+ self.assertEqual(self.sub.substitute_html(dammit.markup),
"&lsquo;&rsquo;foo&ldquo;&rdquo;")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
- self.assertEquals(self.sub.substitute_xml(s, False), s)
+ self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
- self.assertEquals(self.sub.substitute_xml("Welcome", True),
+ self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
- self.assertEquals(self.sub.substitute_xml("Bob's Bar", True),
+ self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
- self.assertEquals(self.sub.substitute_xml(s, True),
+ self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
- self.assertEquals(
+ self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to &quot;Bob\'s Bar&quot;"')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
- self.assertEquals(self.sub.substitute_xml(quoted), quoted)
+ self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
- self.assertEquals(
+ self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo&lt;bar&gt;")
def test_xml_quoting_handles_ampersands(self):
- self.assertEquals(self.sub.substitute_xml("AT&T"), "AT&amp;T")
+ self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&amp;T")
def test_xml_quoting_ignores_ampersands_when_they_are_part_of_an_entity(self):
- self.assertEquals(
+ self.assertEqual(
self.sub.substitute_xml("&Aacute;T&T"),
"&Aacute;T&amp;T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
- self.assertEquals(self.sub.substitute_html(text), text)
+ self.assertEqual(self.sub.substitute_html(text), text)
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of Unicode, Dammit."""
@@ -85,46 +85,46 @@ class TestUnicodeDammit(unittest.TestCase):
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
- self.assertEquals(
+ self.assertEqual(
dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
- self.assertEquals(
+ self.assertEqual(
dammit.unicode_markup, "<foo>&#x2018;&#x2019;&#x201C;&#x201D;</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
- self.assertEquals(
+ self.assertEqual(
dammit.unicode_markup, "<foo>&lsquo;&rsquo;&ldquo;&rdquo;</foo>")
def test_detect_utf8(self):
utf8 = b"\xc3\xa9"
dammit = UnicodeDammit(utf8)
- self.assertEquals(dammit.unicode_markup, u'\xe9')
- self.assertEquals(dammit.original_encoding, 'utf-8')
+ self.assertEqual(dammit.unicode_markup, u'\xe9')
+ self.assertEqual(dammit.original_encoding, 'utf-8')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
- self.assertEquals(dammit.original_encoding, 'iso-8859-8')
- self.assertEquals(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
+ self.assertEqual(dammit.original_encoding, 'iso-8859-8')
+ self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
- self.assertEquals(dammit.original_encoding, 'utf-8')
- self.assertEquals(dammit.unicode_markup.encode("utf-8"), utf_8)
+ self.assertEqual(dammit.original_encoding, 'utf-8')
+ self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
- self.assertEquals(dammit.original_encoding, 'utf-8')
+ self.assertEqual(dammit.original_encoding, 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
- self.assertEquals(dammit.original_encoding, 'utf-8')
+ self.assertEqual(dammit.original_encoding, 'utf-8')
diff --git a/bs4/tests/test_tree.py b/bs4/tests/test_tree.py
index 865ac68..f8a55e0 100644
--- a/bs4/tests/test_tree.py
+++ b/bs4/tests/test_tree.py
@@ -279,16 +279,16 @@ class TestParentOperations(TreeTest):
def test_parent(self):
- self.assertEquals(self.start.parent['id'], 'bottom')
- self.assertEquals(self.start.parent.parent['id'], 'middle')
- self.assertEquals(self.start.parent.parent.parent['id'], 'top')
+ self.assertEqual(self.start.parent['id'], 'bottom')
+ self.assertEqual(self.start.parent.parent['id'], 'middle')
+ self.assertEqual(self.start.parent.parent.parent['id'], 'top')
def test_parent_of_top_tag_is_soup_object(self):
top_tag = self.tree.contents[0]
- self.assertEquals(top_tag.parent, self.tree)
+ self.assertEqual(top_tag.parent, self.tree)
def test_soup_object_has_no_parent(self):
- self.assertEquals(None, self.tree.parent)
+ self.assertEqual(None, self.tree.parent)
def test_find_parents(self):
self.assertSelectsIDs(
@@ -297,20 +297,20 @@ class TestParentOperations(TreeTest):
self.start.find_parents('ul', id="middle"), ['middle'])
def test_find_parent(self):
- self.assertEquals(self.start.find_parent('ul')['id'], 'bottom')
+ self.assertEqual(self.start.find_parent('ul')['id'], 'bottom')
def test_parent_of_text_element(self):
text = self.tree.find(text="Start here")
- self.assertEquals(text.parent.name, 'b')
+ self.assertEqual(text.parent.name, 'b')
def test_text_element_find_parent(self):
text = self.tree.find(text="Start here")
- self.assertEquals(text.find_parent('ul')['id'], 'bottom')
+ self.assertEqual(text.find_parent('ul')['id'], 'bottom')
def test_parent_generator(self):
parents = [parent['id'] for parent in self.start.parents
if parent is not None and 'id' in parent.attrs]
- self.assertEquals(parents, ['bottom', 'middle', 'top'])
+ self.assertEqual(parents, ['bottom', 'middle', 'top'])
class ProximityTest(TreeTest):
@@ -328,16 +328,16 @@ class TestNextOperations(ProximityTest):
self.start = self.tree.b
def test_next(self):
- self.assertEquals(self.start.next_element, "One")
- self.assertEquals(self.start.next_element.next_element['id'], "2")
+ self.assertEqual(self.start.next_element, "One")
+ self.assertEqual(self.start.next_element.next_element['id'], "2")
def test_next_of_last_item_is_none(self):
last = self.tree.find(text="Three")
- self.assertEquals(last.next_element, None)
+ self.assertEqual(last.next_element, None)
def test_next_of_root_is_none(self):
# The document root is outside the next/previous chain.
- self.assertEquals(self.tree.next_element, None)
+ self.assertEqual(self.tree.next_element, None)
def test_find_all_next(self):
self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"])
@@ -345,12 +345,12 @@ class TestNextOperations(ProximityTest):
self.assertSelects(self.start.find_all_next(id=3), ["Three"])
def test_find_next(self):
- self.assertEquals(self.start.find_next('b')['id'], '2')
- self.assertEquals(self.start.find_next(text="Three"), "Three")
+ self.assertEqual(self.start.find_next('b')['id'], '2')
+ self.assertEqual(self.start.find_next(text="Three"), "Three")
def test_find_next_for_text_element(self):
text = self.tree.find(text="One")
- self.assertEquals(text.find_next("b").string, "Two")
+ self.assertEqual(text.find_next("b").string, "Two")
self.assertSelects(text.find_all_next("b"), ["Two", "Three"])
def test_next_generator(self):
@@ -359,9 +359,9 @@ class TestNextOperations(ProximityTest):
# There are two successors: the final <b> tag and its text contents.
# Then we go off the end.
tag, contents, none = successors
- self.assertEquals(tag['id'], '3')
- self.assertEquals(contents, "Three")
- self.assertEquals(none, None)
+ self.assertEqual(tag['id'], '3')
+ self.assertEqual(contents, "Three")
+ self.assertEqual(none, None)
# XXX Should next_elements really return None? Seems like it
# should just stop.
@@ -374,17 +374,17 @@ class TestPreviousOperations(ProximityTest):
self.end = self.tree.find(text="Three")
def test_previous(self):
- self.assertEquals(self.end.previous_element['id'], "3")
- self.assertEquals(self.end.previous_element.previous_element, "Two")
+ self.assertEqual(self.end.previous_element['id'], "3")
+ self.assertEqual(self.end.previous_element.previous_element, "Two")
def test_previous_of_first_item_is_none(self):
first = self.tree.find('html')
- self.assertEquals(first.previous_element, None)
+ self.assertEqual(first.previous_element, None)
def test_previous_of_root_is_none(self):
# The document root is outside the next/previous chain.
# XXX This is broken!
- #self.assertEquals(self.tree.previous_element, None)
+ #self.assertEqual(self.tree.previous_element, None)
pass
def test_find_all_previous(self):
@@ -396,12 +396,12 @@ class TestPreviousOperations(ProximityTest):
self.assertSelects(self.end.find_all_previous(id=1), ["One"])
def test_find_previous(self):
- self.assertEquals(self.end.find_previous('b')['id'], '3')
- self.assertEquals(self.end.find_previous(text="One"), "One")
+ self.assertEqual(self.end.find_previous('b')['id'], '3')
+ self.assertEqual(self.end.find_previous(text="One"), "One")
def test_find_previous_for_text_element(self):
text = self.tree.find(text="Three")
- self.assertEquals(text.find_previous("b").string, "Three")
+ self.assertEqual(text.find_previous("b").string, "Three")
self.assertSelects(
text.find_all_previous("b"), ["Three", "Two", "One"])
@@ -413,11 +413,11 @@ class TestPreviousOperations(ProximityTest):
# the <body> tag, the <head> tag, and the <html> tag. Then we
# go off the end.
b, body, head, html, none = predecessors
- self.assertEquals(b['id'], '1')
- self.assertEquals(body.name, "body")
- self.assertEquals(head.name, "head")
- self.assertEquals(html.name, "html")
- self.assertEquals(none, None)
+ self.assertEqual(b['id'], '1')
+ self.assertEqual(body.name, "body")
+ self.assertEqual(head.name, "head")
+ self.assertEqual(html.name, "html")
+ self.assertEqual(none, None)
# Again, we shouldn't be returning None.
@@ -451,26 +451,26 @@ class TestNextSibling(SiblingTest):
self.start = self.tree.find(id="1")
def test_next_sibling_of_root_is_none(self):
- self.assertEquals(self.tree.next_sibling, None)
+ self.assertEqual(self.tree.next_sibling, None)
def test_next_sibling(self):
- self.assertEquals(self.start.next_sibling['id'], '2')
- self.assertEquals(self.start.next_sibling.next_sibling['id'], '3')
+ self.assertEqual(self.start.next_sibling['id'], '2')
+ self.assertEqual(self.start.next_sibling.next_sibling['id'], '3')
# Note the difference between next_sibling and next_element.
- self.assertEquals(self.start.next_element['id'], '1.1')
+ self.assertEqual(self.start.next_element['id'], '1.1')
def test_next_sibling_may_not_exist(self):
- self.assertEquals(self.tree.html.next_sibling, None)
+ self.assertEqual(self.tree.html.next_sibling, None)
nested_span = self.tree.find(id="1.1")
- self.assertEquals(nested_span.next_sibling, None)
+ self.assertEqual(nested_span.next_sibling, None)
last_span = self.tree.find(id="4")
- self.assertEquals(last_span.next_sibling, None)
+ self.assertEqual(last_span.next_sibling, None)
def test_find_next_sibling(self):
- self.assertEquals(self.start.find_next_sibling('span')['id'], '2')
+ self.assertEqual(self.start.find_next_sibling('span')['id'], '2')
def test_next_siblings(self):
self.assertSelectsIDs(self.start.find_next_siblings("span"),
@@ -481,12 +481,12 @@ class TestNextSibling(SiblingTest):
def test_next_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="Foo")
- self.assertEquals(start.next_sibling.name, 'b')
- self.assertEquals(start.next_sibling.next_sibling, 'baz')
+ self.assertEqual(start.next_sibling.name, 'b')
+ self.assertEqual(start.next_sibling.next_sibling, 'baz')
self.assertSelects(start.find_next_siblings('b'), ['bar'])
- self.assertEquals(start.find_next_sibling(text="baz"), "baz")
- self.assertEquals(start.find_next_sibling(text="nonesuch"), None)
+ self.assertEqual(start.find_next_sibling(text="baz"), "baz")
+ self.assertEqual(start.find_next_sibling(text="nonesuch"), None)
class TestPreviousSibling(SiblingTest):
@@ -496,26 +496,26 @@ class TestPreviousSibling(SiblingTest):
self.end = self.tree.find(id="4")
def test_previous_sibling_of_root_is_none(self):
- self.assertEquals(self.tree.previous_sibling, None)
+ self.assertEqual(self.tree.previous_sibling, None)
def test_previous_sibling(self):
- self.assertEquals(self.end.previous_sibling['id'], '3')
- self.assertEquals(self.end.previous_sibling.previous_sibling['id'], '2')
+ self.assertEqual(self.end.previous_sibling['id'], '3')
+ self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2')
# Note the difference between previous_sibling and previous_element.
- self.assertEquals(self.end.previous_element['id'], '3.1')
+ self.assertEqual(self.end.previous_element['id'], '3.1')
def test_previous_sibling_may_not_exist(self):
- self.assertEquals(self.tree.html.previous_sibling, None)
+ self.assertEqual(self.tree.html.previous_sibling, None)
nested_span = self.tree.find(id="1.1")
- self.assertEquals(nested_span.previous_sibling, None)
+ self.assertEqual(nested_span.previous_sibling, None)
first_span = self.tree.find(id="1")
- self.assertEquals(first_span.previous_sibling, None)
+ self.assertEqual(first_span.previous_sibling, None)
def test_find_previous_sibling(self):
- self.assertEquals(self.end.find_previous_sibling('span')['id'], '3')
+ self.assertEqual(self.end.find_previous_sibling('span')['id'], '3')
def test_previous_siblings(self):
self.assertSelectsIDs(self.end.find_previous_siblings("span"),
@@ -526,12 +526,12 @@ class TestPreviousSibling(SiblingTest):
def test_previous_sibling_for_text_element(self):
soup = self.soup("Foo<b>bar</b>baz")
start = soup.find(text="baz")
- self.assertEquals(start.previous_sibling.name, 'b')
- self.assertEquals(start.previous_sibling.previous_sibling, 'Foo')
+ self.assertEqual(start.previous_sibling.name, 'b')
+ self.assertEqual(start.previous_sibling.previous_sibling, 'Foo')
self.assertSelects(start.find_previous_siblings('b'), ['bar'])
- self.assertEquals(start.find_previous_sibling(text="Foo"), "Foo")
- self.assertEquals(start.find_previous_sibling(text="nonesuch"), None)
+ self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo")
+ self.assertEqual(start.find_previous_sibling(text="nonesuch"), None)
class TestTreeModification(SoupTest):
@@ -580,7 +580,7 @@ class TestTreeModification(SoupTest):
soup = self.soup(text)
c = soup.c
soup.c.replace_with(c)
- self.assertEquals(soup.decode(), self.document_for(text))
+ self.assertEqual(soup.decode(), self.document_for(text))
def test_replace_final_node(self):
soup = self.soup("<b>Argh!</b>")
@@ -650,22 +650,22 @@ class TestTreeModification(SoupTest):
# I'm letting this succeed for now.
soup = self.soup("<br />")
soup.br.insert(1, "Contents")
- self.assertEquals(str(soup.br), "<br>Contents</br>")
+ self.assertEqual(str(soup.br), "<br>Contents</br>")
def test_replace_with(self):
soup = self.soup(
"<p>There's <b>no</b> business like <b>show</b> business</p>")
no, show = soup.find_all('b')
show.replace_with(no)
- self.assertEquals(
+ self.assertEqual(
soup.decode(),
self.document_for(
"<p>There's business like <b>no</b> business</p>"))
- self.assertEquals(show.parent, None)
- self.assertEquals(no.parent, soup.p)
- self.assertEquals(no.next_element, "no")
- self.assertEquals(no.next_sibling, " business")
+ self.assertEqual(show.parent, None)
+ self.assertEqual(no.parent, soup.p)
+ self.assertEqual(no.next_element, "no")
+ self.assertEqual(no.next_sibling, " business")
def test_nested_tag_replace_with(self):
soup = self.soup(
@@ -731,10 +731,10 @@ class TestTreeModification(SoupTest):
# The gap where the extracted tag used to be has been mended.
content_1 = soup.find(text="Some content. ")
content_2 = soup.find(text=" More content.")
- self.assertEquals(content_1.next_element, content_2)
- self.assertEquals(content_1.next_sibling, content_2)
- self.assertEquals(content_2.previous_element, content_1)
- self.assertEquals(content_2.previous_sibling, content_1)
+ self.assertEqual(content_1.next_element, content_2)
+ self.assertEqual(content_1.next_sibling, content_2)
+ self.assertEqual(content_2.previous_element, content_1)
+ self.assertEqual(content_2.previous_sibling, content_1)
def test_clear(self):
"""Tag.clear()"""
@@ -768,13 +768,13 @@ class TestElementObjects(SoupTest):
# The BeautifulSoup object itself contains one element: the
# <top> tag.
- self.assertEquals(len(soup.contents), 1)
- self.assertEquals(len(soup), 1)
+ self.assertEqual(len(soup.contents), 1)
+ self.assertEqual(len(soup), 1)
# The <top> tag contains three elements: the text node "1", the
# <b> tag, and the text node "3".
- self.assertEquals(len(soup.top), 3)
- self.assertEquals(len(soup.top.contents), 3)
+ self.assertEqual(len(soup.top), 3)
+ self.assertEqual(len(soup.top.contents), 3)
def test_member_access_invokes_find(self):
"""Accessing a Python member .foo or .fooTag invokes find('foo')"""
@@ -810,7 +810,7 @@ class TestElementObjects(SoupTest):
# A tag that contains only a text node makes that node
# available as .string.
soup = self.soup("<b>foo</b>")
- self.assertEquals(soup.b.string, 'foo')
+ self.assertEqual(soup.b.string, 'foo')
def test_empty_tag_has_no_string(self):
# A tag with no children has no .stirng.
@@ -906,14 +906,14 @@ class TestSubstitutions(SoupTest):
soup = self.soup(
u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>")
decoded = soup.decode(substitute_html_entities=True)
- self.assertEquals(decoded,
+ self.assertEqual(decoded,
self.document_for("<b>Sacr&eacute; bleu!</b>"))
def test_html_entity_substitution_off_by_default(self):
markup = u"<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
soup = self.soup(markup)
encoded = soup.b.encode("utf-8")
- self.assertEquals(encoded, markup.encode('utf-8'))
+ self.assertEqual(encoded, markup.encode('utf-8'))
def test_encoding_substitution(self):
# Here's the <meta> tag saying that a document is
@@ -924,7 +924,7 @@ class TestSubstitutions(SoupTest):
# Parse the document, and the charset is replaced with a
# generic value.
- self.assertEquals(soup.meta['content'],
+ self.assertEqual(soup.meta['content'],
'text/html; charset=%SOUP-ENCODING%')
# Encode the document into some encoding, and the encoding is
@@ -950,7 +950,7 @@ class TestSubstitutions(SoupTest):
# sure that doesn't happen.
strainer = SoupStrainer('pre')
soup = self.soup(markup, parse_only=strainer)
- self.assertEquals(soup.contents[0].name, 'pre')
+ self.assertEqual(soup.contents[0].name, 'pre')
class TestEncoding(SoupTest):
@@ -959,13 +959,13 @@ class TestEncoding(SoupTest):
def test_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
- self.assertEquals(soup.b.string.encode("utf-8"),
+ self.assertEqual(soup.b.string.encode("utf-8"),
u"\N{SNOWMAN}".encode("utf-8"))
def test_tag_containing_unicode_string_can_be_encoded(self):
html = u"<b>\N{SNOWMAN}</b>"
soup = self.soup(html)
- self.assertEquals(
+ self.assertEqual(
soup.b.encode("utf-8"), html.encode("utf-8"))
@@ -977,6 +977,6 @@ class TestNavigableStringSubclasses(SoupTest):
soup = self.soup("")
cdata = CData("foo")
soup.insert(1, cdata)
- self.assertEquals(str(soup), "<![CDATA[foo]]>")
- self.assertEquals(soup.find(text="foo"), "foo")
- self.assertEquals(soup.contents[0], "foo")
+ self.assertEqual(str(soup), "<![CDATA[foo]]>")
+ self.assertEqual(soup.find(text="foo"), "foo")
+ self.assertEqual(soup.contents[0], "foo")