Skip to content

Commit

Permalink
Merge pull request #10458 from sinhrks/assert_isinstance
Browse files Browse the repository at this point in the history
TST/CLN: remove assert_isinstance
  • Loading branch information
jreback committed Jun 30, 2015
2 parents 01995b2 + 572510c commit 71ac2eb
Show file tree
Hide file tree
Showing 37 changed files with 344 additions and 333 deletions.
30 changes: 15 additions & 15 deletions pandas/io/tests/test_excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -424,27 +424,27 @@ def test_reader_converters(self):
for path in (xls_path, xlsx_path):
actual = read_excel(path, 'Sheet1', converters=converters)
tm.assert_frame_equal(actual, expected)

def test_reading_all_sheets(self):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450

_skip_if_no_xlrd()

dfs = read_excel(self.multisheet,sheetname=None)
expected_keys = ['Alpha','Beta','Charlie']
tm.assert_contains_all(expected_keys,dfs.keys())

def test_reading_multiple_specific_sheets(self):
# Test reading specific sheetnames by specifying a mixed list
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.

# Ensure a dict is returned
# See PR #9450
_skip_if_no_xlrd()

#Explicitly request duplicates. Only the set should be returned.
expected_keys = [2,'Charlie','Charlie']
dfs = read_excel(self.multisheet,sheetname=expected_keys)
Expand All @@ -456,27 +456,27 @@ def test_creating_and_reading_multiple_sheets(self):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
# See PR #9450

_skip_if_no_xlrd()
_skip_if_no_xlwt()

def tdf(sheetname):
d, i = [11,22,33], [1,2,3]
return DataFrame(d,i,columns=[sheetname])

sheets = ['AAA','BBB','CCC']

dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets,dfs))

with ensure_clean('.xlsx') as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
df.to_excel(ew,sheetname)
dfs_returned = pd.read_excel(pth,sheetname=sheets)
for s in sheets:
tm.assert_frame_equal(dfs[s],dfs_returned[s])

def test_reader_seconds(self):
# Test reading times with and without milliseconds. GH5945.
_skip_if_no_xlrd()
Expand Down Expand Up @@ -1575,12 +1575,12 @@ def test_ExcelWriter_dispatch(self):

with ensure_clean('.xlsx') as path:
writer = ExcelWriter(path)
tm.assert_isinstance(writer, writer_klass)
tm.assertIsInstance(writer, writer_klass)

_skip_if_no_xlwt()
with ensure_clean('.xls') as path:
writer = ExcelWriter(path)
tm.assert_isinstance(writer, _XlwtWriter)
tm.assertIsInstance(writer, _XlwtWriter)

def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
Expand Down Expand Up @@ -1608,7 +1608,7 @@ def check_called(func):

register_writer(DummyClass)
writer = ExcelWriter('something.test')
tm.assert_isinstance(writer, DummyClass)
tm.assertIsInstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
Expand Down
40 changes: 20 additions & 20 deletions pandas/io/tests/test_html.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,12 +159,12 @@ def test_spam_with_types(self):
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
tm.assert_isinstance(df, DataFrame)
tm.assertIsInstance(df, DataFrame)

def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
tm.assert_isinstance(df, DataFrame)
tm.assertIsInstance(df, DataFrame)

def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=1)[0]
Expand Down Expand Up @@ -307,9 +307,9 @@ def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url), 'First',
attrs={'id': 'table'})
tm.assert_isinstance(dfs, list)
tm.assertIsInstance(dfs, list)
for df in dfs:
tm.assert_isinstance(df, DataFrame)
tm.assertIsInstance(df, DataFrame)

@slow
def test_invalid_table_attrs(self):
Expand All @@ -325,44 +325,44 @@ def _bank_data(self, *args, **kwargs):
@slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
tm.assert_isinstance(df.columns, MultiIndex)
tm.assertIsInstance(df.columns, MultiIndex)

@slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
tm.assert_isinstance(df.index, MultiIndex)
tm.assertIsInstance(df.index, MultiIndex)

@slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
tm.assert_isinstance(df.columns, MultiIndex)
tm.assert_isinstance(df.index, MultiIndex)
tm.assertIsInstance(df.columns, MultiIndex)
tm.assertIsInstance(df.index, MultiIndex)

@slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1, tupleize_cols=True)[0]
tm.assert_isinstance(df.columns, Index)
tm.assertIsInstance(df.columns, Index)

@slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
tm.assert_isinstance(df.columns, MultiIndex)
tm.assertIsInstance(df.columns, MultiIndex)

@slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
tm.assert_isinstance(df.index, MultiIndex)
tm.assert_isinstance(df.columns, MultiIndex)
tm.assertIsInstance(df.index, MultiIndex)
tm.assertIsInstance(df.columns, MultiIndex)

@slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
tm.assert_isinstance(dfs, list)
tm.assertIsInstance(dfs, list)
for df in dfs:
tm.assert_isinstance(df, DataFrame)
tm.assertIsInstance(df, DataFrame)

def test_negative_skiprows(self):
with tm.assertRaisesRegexp(ValueError,
Expand Down Expand Up @@ -426,10 +426,10 @@ def test_empty_tables(self):
res1 = self.read_html(StringIO(data1))
res2 = self.read_html(StringIO(data2))
assert_framelist_equal(res1, res2)

def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
Expand All @@ -452,10 +452,10 @@ def test_tfoot_read(self):

data1 = data_template.format(footer = "")
data2 = data_template.format(footer ="<tr><td>footA</td><th>footB</th></tr>")

d1 = {'A': ['bodyA'], 'B': ['bodyB']}
d2 = {'A': ['bodyA', 'footA'], 'B': ['bodyB', 'footB']}

tm.assert_frame_equal(self.read_html(data1)[0], DataFrame(d1))
tm.assert_frame_equal(self.read_html(data2)[0], DataFrame(d2))

Expand Down Expand Up @@ -721,8 +721,8 @@ def test_data_fail(self):
def test_works_on_valid_markup(self):
filename = os.path.join(DATA_PATH, 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
tm.assert_isinstance(dfs, list)
tm.assert_isinstance(dfs[0], DataFrame)
tm.assertIsInstance(dfs, list)
tm.assertIsInstance(dfs[0], DataFrame)

@slow
def test_fallback_success(self):
Expand Down
12 changes: 6 additions & 6 deletions pandas/io/tests/test_parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ def test_squeeze(self):
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assert_isinstance(result, Series)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)

def test_squeeze_no_view(self):
Expand Down Expand Up @@ -1016,7 +1016,7 @@ def test_parse_dates_column_list(self):
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assert_isinstance(expected['aux_date'][0], datetime)
tm.assertIsInstance(expected['aux_date'][0], datetime)

df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
Expand Down Expand Up @@ -1117,7 +1117,7 @@ def test_read_csv_infer_compression(self):
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assert_isinstance(df1[0].values[0], compat.text_type)
tm.assertIsInstance(df1[0].values[0], compat.text_type)

def test_read_table_wrong_num_columns(self):
# too few!
Expand Down Expand Up @@ -1300,7 +1300,7 @@ def test_iterator(self):

treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assert_isinstance(treader, TextFileReader)
tm.assertIsInstance(treader, TextFileReader)

# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
Expand Down Expand Up @@ -1601,7 +1601,7 @@ def test_converters(self):
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)

tm.assert_isinstance(result['D'][0], (datetime, Timestamp))
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)

Expand Down Expand Up @@ -2727,7 +2727,7 @@ def test_iterator(self):

treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assert_isinstance(treader, TextFileReader)
tm.assertIsInstance(treader, TextFileReader)

# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
Expand Down
8 changes: 4 additions & 4 deletions pandas/sparse/tests/test_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,19 +129,19 @@ def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.values, second.values),
fill_value=first.fill_value)
tm.assert_isinstance(res, SparseArray)
tm.assertIsInstance(res, SparseArray)
assert_almost_equal(res.values, exp.values)

res2 = op(first, second.values)
tm.assert_isinstance(res2, SparseArray)
tm.assertIsInstance(res2, SparseArray)
assert_sp_array_equal(res, res2)

res3 = op(first.values, second)
tm.assert_isinstance(res3, SparseArray)
tm.assertIsInstance(res3, SparseArray)
assert_sp_array_equal(res, res3)

res4 = op(first, 4)
tm.assert_isinstance(res4, SparseArray)
tm.assertIsInstance(res4, SparseArray)

# ignore this if the actual op raises (e.g. pow)
try:
Expand Down
2 changes: 1 addition & 1 deletion pandas/sparse/tests/test_libsparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
# see if survive the round trip
xbindex = xindex.to_int_index().to_block_index()
ybindex = yindex.to_int_index().to_block_index()
tm.assert_isinstance(xbindex, BlockIndex)
tm.assertIsInstance(xbindex, BlockIndex)
self.assertTrue(xbindex.equals(xindex))
self.assertTrue(ybindex.equals(yindex))
check_cases(_check_case)
Expand Down
Loading

0 comments on commit 71ac2eb

Please sign in to comment.