diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index ce0d40d327c15..76c5da0bb855b 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -54,6 +54,7 @@ Backwards incompatible API changes - :class:`pandas.HDFStore`'s string representation is now faster and less detailed. For the previous behavior, use ``pandas.HDFStore.info()``. (:issue:`16503`). - Compression defaults in HDF stores now follow pytable standards. Default is no compression and if ``complib`` is missing and ``complevel`` > 0 ``zlib`` is used (:issue:`15943`) + .. _whatsnew_0210.api: Other API Changes @@ -97,6 +98,9 @@ Bug Fixes Conversion ^^^^^^^^^^ +- Bug in assignment against datetime-like data with ``int`` may incorrectly converted to datetime-like (:issue:`14145`) +- Bug in assignment against ``int64`` data with ``np.ndarray`` with ``float64`` dtype may keep ``int64`` dtype (:issue:`14001`) + Indexing diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 16b0a5c8a74ca..9b5641b72efda 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -272,7 +272,7 @@ def maybe_promote(dtype, fill_value=np.nan): else: if issubclass(dtype.type, np.datetime64): try: - fill_value = lib.Timestamp(fill_value).value + fill_value = Timestamp(fill_value).value except: # the proper thing to do here would probably be to upcast # to object (but numpy 1.6.1 doesn't do this properly) @@ -349,9 +349,9 @@ def infer_dtype_from_scalar(val, pandas_dtype=False): # a 1-element ndarray if isinstance(val, np.ndarray): + msg = "invalid ndarray passed to _infer_dtype_from_scalar" if val.ndim != 0: - raise ValueError( - "invalid ndarray passed to _infer_dtype_from_scalar") + raise ValueError(msg) dtype = val.dtype val = val.item() @@ -552,7 +552,7 @@ def conv(r, dtype): if isnull(r): pass elif dtype == _NS_DTYPE: - r = lib.Timestamp(r) + r = Timestamp(r) elif dtype == _TD_DTYPE: r = _coerce_scalar_to_timedelta_type(r) elif dtype == np.bool_: @@ -1026,3 +1026,19 @@ def find_common_type(types): return np.object return np.find_common_type(types, []) + + +def _cast_scalar_to_array(shape, value, dtype=None): + """ + create np.ndarray of specified shape and dtype, filled with values + """ + + if dtype is None: + dtype, fill_value = _infer_dtype_from_scalar(value) + else: + fill_value = value + + values = np.empty(shape, dtype=dtype) + values.fill(fill_value) + + return values diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2b2e7be62427b..a9c71c613256e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -59,6 +59,7 @@ is_named_tuple) from pandas.core.dtypes.missing import isnull, notnull + from pandas.core.common import (_try_sort, _default_index, _values_from_object, @@ -385,15 +386,10 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, raise_with_traceback(exc) if arr.ndim == 0 and index is not None and columns is not None: - if isinstance(data, compat.string_types) and dtype is None: - dtype = np.object_ - if dtype is None: - dtype, data = infer_dtype_from_scalar(data) - - values = np.empty((len(index), len(columns)), dtype=dtype) - values.fill(data) - mgr = self._init_ndarray(values, index, columns, dtype=dtype, - copy=False) + values = _cast_scalar_to_array((len(index), len(columns)), + data, dtype=dtype) + mgr = self._init_ndarray(values, index, columns, + dtype=values.dtype, copy=False) else: raise ValueError('DataFrame constructor not properly called!') @@ -507,7 +503,7 @@ def _get_axes(N, K, index=index, columns=columns): values = _prep_ndarray(values, copy=copy) if dtype is not None: - if values.dtype != dtype: + if not is_dtype_equal(values.dtype, dtype): try: values = values.astype(dtype) except Exception as orig: @@ -2683,9 +2679,8 @@ def reindexer(value): else: # upcast the scalar - dtype, value = infer_dtype_from_scalar(value) - value = np.repeat(value, len(self.index)).astype(dtype) - value = maybe_cast_to_datetime(value, dtype) + value = _cast_scalar_to_array(len(self.index), value) + value = _possibly_cast_to_datetime(value, value.dtype) # return internal types directly if is_extension_type(value): diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f2a7ac76481d4..106e00859105c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -388,7 +388,8 @@ def fillna(self, value, limit=None, inplace=False, downcast=None, # fillna, but if we cannot coerce, then try again as an ObjectBlock try: - values, _, value, _ = self._try_coerce_args(self.values, value) + values, _, _, _ = self._try_coerce_args(self.values, value) + # value may be converted to internal, thus drop blocks = self.putmask(mask, value, inplace=inplace) blocks = [b.make_block(values=self._try_coerce_result(b.values)) for b in blocks] @@ -682,8 +683,43 @@ def setitem(self, indexer, value, mgr=None): if self.is_numeric: value = np.nan - # coerce args - values, _, value, _ = self._try_coerce_args(self.values, value) + # coerce if block dtype can store value + values = self.values + try: + values, _, value, _ = self._try_coerce_args(values, value) + # can keep its own dtype + if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, + value.dtype): + dtype = self.dtype + else: + dtype = 'infer' + + except (TypeError, ValueError): + # current dtype cannot store value, coerce to common dtype + find_dtype = False + + if hasattr(value, 'dtype'): + dtype = value.dtype + find_dtype = True + + elif is_scalar(value): + if isnull(value): + # NaN promotion is handled in latter path + dtype = False + else: + dtype, _ = _infer_dtype_from_scalar(value, + pandas_dtype=True) + find_dtype = True + else: + dtype = 'infer' + + if find_dtype: + dtype = _find_common_type([values.dtype, dtype]) + if not is_dtype_equal(self.dtype, dtype): + b = self.astype(dtype) + return b.setitem(indexer, value, mgr=mgr) + + # value must be storeable at this moment arr_value = np.array(value) # cast the values to a type that can hold nan (if necessary) @@ -713,19 +749,8 @@ def setitem(self, indexer, value, mgr=None): raise ValueError("cannot set using a slice indexer with a " "different length than the value") - try: - - def _is_scalar_indexer(indexer): - # return True if we are all scalar indexers - - if arr_value.ndim == 1: - if not isinstance(indexer, tuple): - indexer = tuple([indexer]) - return all([is_scalar(idx) for idx in indexer]) - return False - - def _is_empty_indexer(indexer): - # return a boolean if we have an empty indexer + def _is_scalar_indexer(indexer): + # return True if we are all scalar indexers if arr_value.ndim == 1: if not isinstance(indexer, tuple): @@ -777,23 +802,43 @@ def _is_empty_indexer(indexer): raise except TypeError: - # cast to the passed dtype if possible - # otherwise raise the original error - try: - # e.g. we are uint32 and our value is uint64 - # this is for compat with older numpies - block = self.make_block(transf(values.astype(value.dtype))) - return block.setitem(indexer=indexer, value=value, mgr=mgr) + def _is_empty_indexer(indexer): + # return a boolean if we have an empty indexer - except: - pass - - raise + if arr_value.ndim == 1: + if not isinstance(indexer, tuple): + indexer = tuple([indexer]) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 + for idx in indexer) + return False - except Exception: + # empty indexers + # 8669 (empty) + if _is_empty_indexer(indexer): pass - return [self] + # setting a single element for each dim and with a rhs that could + # be say a list + # GH 6043 + elif _is_scalar_indexer(indexer): + values[indexer] = value + + # if we are an exact match (ex-broadcasting), + # then use the resultant dtype + elif (len(arr_value.shape) and + arr_value.shape[0] == values.shape[0] and + np.prod(arr_value.shape) == np.prod(values.shape)): + values[indexer] = value + values = values.astype(arr_value.dtype) + + # set + else: + values[indexer] = value + + # coerce and try to infer the dtypes of the result + values = self._try_coerce_and_cast_result(values, dtype) + block = self.make_block(transf(values), fastpath=True) + return block def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False, mgr=None): @@ -1264,6 +1309,7 @@ def func(cond, values, other): values, values_mask, other, other_mask = self._try_coerce_args( values, other) + try: return self._try_coerce_result(expressions.where( cond, values, other, raise_on_error=True)) @@ -1543,6 +1589,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, new = new[mask] mask = _safe_reshape(mask, new_values.shape) + new_values[mask] = new new_values = self._try_coerce_result(new_values) return [self.make_block(values=new_values)] @@ -1712,7 +1759,7 @@ def fillna(self, value, **kwargs): # allow filling with integers to be # interpreted as seconds - if not isinstance(value, np.timedelta64) and is_integer(value): + if not isinstance(value, np.timedelta64): value = Timedelta(value, unit='s') return super(TimeDeltaBlock, self).fillna(value, **kwargs) @@ -1949,6 +1996,15 @@ def _maybe_downcast(self, blocks, downcast=None): def _can_hold_element(self, element): return True + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ + + if isinstance(other, ABCDatetimeIndex): + # to store DatetimeTZBlock as object + other = other.asobject.values + + return values, False, other, False + def _try_cast(self, element): return element @@ -2288,8 +2344,6 @@ def _try_coerce_args(self, values, other): "naive Block") other_mask = isnull(other) other = other.asm8.view('i8') - elif hasattr(other, 'dtype') and is_integer_dtype(other): - other = other.view('i8') else: try: other = np.asarray(other) @@ -2466,6 +2520,8 @@ def _try_coerce_args(self, values, other): raise ValueError("incompatible or non tz-aware value") other_mask = isnull(other) other = other.value + else: + raise TypeError return values, values_mask, other, other_mask diff --git a/pandas/core/panel.py b/pandas/core/panel.py index d1f5b4587059c..5a47258fa782e 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -178,11 +178,9 @@ def _init_data(self, data, copy, dtype, **kwargs): copy = False dtype = None elif is_scalar(data) and all(x is not None for x in passed_axes): - if dtype is None: - dtype, data = infer_dtype_from_scalar(data) - values = np.empty([len(x) for x in passed_axes], dtype=dtype) - values.fill(data) - mgr = self._init_matrix(values, passed_axes, dtype=dtype, + values = _cast_scalar_to_array([len(x) for x in passed_axes], + data, dtype=dtype) + mgr = self._init_matrix(values, passed_axes, dtype=values.dtype, copy=False) copy = False else: # pragma: no cover @@ -582,9 +580,7 @@ def __setitem__(self, key, value): shape[1:], tuple(map(int, value.shape)))) mat = np.asarray(value) elif is_scalar(value): - dtype, value = infer_dtype_from_scalar(value) - mat = np.empty(shape[1:], dtype=dtype) - mat.fill(value) + mat = _cast_scalar_to_array(shape[1:], value) else: raise TypeError('Cannot set item of type: %s' % str(type(value))) diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index 767e99d98cf29..33fab26408784 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -332,3 +332,100 @@ def test_period_dtype(self): np.dtype('datetime64[ns]'), np.object, np.int64]: assert find_common_type([dtype, dtype2]) == np.object assert find_common_type([dtype2, dtype]) == np.object + + +class TestInferDtype2(tm.TestCase): + + def test_infer_dtype_from_scalar(self): + # Test that _infer_dtype_from_scalar is returning correct dtype for int + # and float. + + for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, + np.int32, np.uint64, np.int64]: + data = dtypec(12) + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, type(data)) + + data = 12 + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, np.int64) + + for dtypec in [np.float16, np.float32, np.float64]: + data = dtypec(12) + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, dtypec) + + data = np.float(12) + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, np.float64) + + for data in [True, False]: + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, np.bool_) + + for data in [np.complex64(1), np.complex128(1)]: + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, np.complex_) + + import datetime + for data in [np.datetime64(1, 'ns'), Timestamp(1), + datetime.datetime(2000, 1, 1, 0, 0)]: + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, 'M8[ns]') + + for data in [np.timedelta64(1, 'ns'), Timedelta(1), + datetime.timedelta(1)]: + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, 'm8[ns]') + + for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']: + dt = Timestamp(1, tz=tz) + dtype, val = _infer_dtype_from_scalar(dt, pandas_dtype=True) + self.assertEqual(dtype, 'datetime64[ns, {0}]'.format(tz)) + self.assertEqual(val, dt.value) + + dtype, val = _infer_dtype_from_scalar(dt) + self.assertEqual(dtype, np.object_) + self.assertEqual(val, dt) + + for freq in ['M', 'D']: + p = Period('2011-01-01', freq=freq) + dtype, val = _infer_dtype_from_scalar(p, pandas_dtype=True) + self.assertEqual(dtype, 'period[{0}]'.format(freq)) + self.assertEqual(val, p.ordinal) + + dtype, val = _infer_dtype_from_scalar(p) + self.assertEqual(dtype, np.object_) + self.assertEqual(val, p) + + for data in [datetime.date(2000, 1, 1), 'foo']: + dtype, val = _infer_dtype_from_scalar(data) + self.assertEqual(dtype, np.object_) + + def test_cast_scalar_to_array(self): + arr = _cast_scalar_to_array((3, 2), 1, dtype=np.int64) + exp = np.ones((3, 2), dtype=np.int64) + tm.assert_numpy_array_equal(arr, exp) + + arr = _cast_scalar_to_array((3, 2), 1.1) + exp = np.empty((3, 2), dtype=np.float64) + exp.fill(1.1) + tm.assert_numpy_array_equal(arr, exp) + + arr = _cast_scalar_to_array((2, 3), Timestamp('2011-01-01')) + exp = np.empty((2, 3), dtype='datetime64[ns]') + exp.fill(np.datetime64('2011-01-01')) + tm.assert_numpy_array_equal(arr, exp) + + # pandas dtype is stored as object dtype + obj = Timestamp('2011-01-01', tz='US/Eastern') + arr = _cast_scalar_to_array((2, 3), obj) + exp = np.empty((2, 3), dtype=np.object) + exp.fill(obj) + tm.assert_numpy_array_equal(arr, exp) + + obj = Period('2011-01-01', freq='D') + arr = _cast_scalar_to_array((2, 3), obj) + exp = np.empty((2, 3), dtype=np.object) + exp.fill(obj) + tm.assert_numpy_array_equal(arr, exp) diff --git a/pandas/tests/dtypes/test_convert.py b/pandas/tests/dtypes/test_convert.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/indexing/test_coercion.py b/pandas/tests/indexing/test_coercion.py index 25cc810299678..92d06950e61d2 100644 --- a/pandas/tests/indexing/test_coercion.py +++ b/pandas/tests/indexing/test_coercion.py @@ -105,6 +105,19 @@ def test_setitem_series_int64(self): exp = pd.Series([1, 1, 3, 4]) self._assert_setitem_series_conversion(obj, True, exp, np.int64) + def test_setitem_series_int8(self): + # integer dtype coercion (no change) + obj = pd.Series([1, 2, 3, 4], dtype=np.int8) + self.assertEqual(obj.dtype, np.int8) + + exp = pd.Series([1, 1, 3, 4], dtype=np.int8) + self._assert_setitem_series_conversion(obj, np.int32(1), exp, np.int8) + + # BUG: it must be Series([1, 1, 3, 4], dtype=np.int16) + exp = pd.Series([1, 0, 3, 4], dtype=np.int8) + self._assert_setitem_series_conversion(obj, np.int16(2**9), exp, + np.int8) + def test_setitem_series_float64(self): obj = pd.Series([1.1, 2.2, 3.3, 4.4]) assert obj.dtype == np.float64 @@ -205,6 +218,13 @@ def test_setitem_series_datetime64(self): pd.Timestamp('2011-01-04')]) self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]') + # datetime64 + object -> object + exp = pd.Series([pd.Timestamp('2011-01-01'), + 'x', + pd.Timestamp('2011-01-03'), + pd.Timestamp('2011-01-04')]) + self._assert_setitem_series_conversion(obj, 'x', exp, np.object) + # ToDo: add more tests once the above issue has been fixed def test_setitem_series_datetime64tz(self): @@ -224,19 +244,62 @@ def test_setitem_series_datetime64tz(self): self._assert_setitem_series_conversion(obj, value, exp, 'datetime64[ns, US/Eastern]') + # datetime64tz + datetime64tz (different tz) -> object + exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), + pd.Timestamp('2012-01-01', tz='US/Pacific'), + pd.Timestamp('2011-01-03', tz=tz), + pd.Timestamp('2011-01-04', tz=tz)]) + value = pd.Timestamp('2012-01-01', tz='US/Pacific') + self._assert_setitem_series_conversion(obj, value, exp, np.object) + + # datetime64tz + datetime64 -> object + exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), + pd.Timestamp('2012-01-01'), + pd.Timestamp('2011-01-03', tz=tz), + pd.Timestamp('2011-01-04', tz=tz)]) + value = pd.Timestamp('2012-01-01') + self._assert_setitem_series_conversion(obj, value, exp, np.object) + # datetime64 + int -> object - # ToDo: The result must be object exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp(1, tz=tz), + 1, pd.Timestamp('2011-01-03', tz=tz), pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_setitem_series_conversion(obj, 1, exp, - 'datetime64[ns, US/Eastern]') + self._assert_setitem_series_conversion(obj, 1, exp, np.object) # ToDo: add more tests once the above issue has been fixed def test_setitem_series_timedelta64(self): - pass + obj = pd.Series([pd.Timedelta('1 day'), + pd.Timedelta('2 day'), + pd.Timedelta('3 day'), + pd.Timedelta('4 day')]) + self.assertEqual(obj.dtype, 'timedelta64[ns]') + + # timedelta64 + timedelta64 -> timedelta64 + exp = pd.Series([pd.Timedelta('1 day'), + pd.Timedelta('12 day'), + pd.Timedelta('3 day'), + pd.Timedelta('4 day')]) + self._assert_setitem_series_conversion(obj, pd.Timedelta('12 day'), + exp, 'timedelta64[ns]') + + # timedelta64 + int -> object + # ToDo: The result must be object + exp = pd.Series([pd.Timedelta('1 day'), + pd.Timedelta(1), + pd.Timedelta('3 day'), + pd.Timedelta('4 day')]) + self._assert_setitem_series_conversion(obj, 1, exp, 'timedelta64[ns]') + + # timedelta64 + object -> object + exp = pd.Series([pd.Timedelta('1 day'), + 'x', + pd.Timedelta('3 day'), + pd.Timedelta('4 day')]) + self._assert_setitem_series_conversion(obj, 'x', exp, np.object) + + # ToDo: add more tests once the above issue has been fixed def test_setitem_series_period(self): pass @@ -1033,14 +1096,12 @@ def test_fillna_series_datetime64tz(self): value = pd.Timestamp('2012-01-01', tz='Asia/Tokyo') self._assert_fillna_conversion(obj, value, exp, np.object) - # datetime64tz + int => datetime64tz - # ToDo: must be object + # datetime64tz + int => object exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), - pd.Timestamp(1, tz=tz), + 1, pd.Timestamp('2011-01-03', tz=tz), pd.Timestamp('2011-01-04', tz=tz)]) - self._assert_fillna_conversion(obj, 1, exp, - 'datetime64[ns, US/Eastern]') + self._assert_fillna_conversion(obj, 1, exp, np.object) # datetime64tz + object => object exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz), diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 9fa677eb624ae..c3fb35b56a9a7 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -354,6 +354,12 @@ def test_multi_assign(self): tm.assert_frame_equal(df2, expected) # with an ndarray on rhs + # coerces to float64 because values has float64 dtype + # GH 14001 + expected = DataFrame({'FC': ['a', np.nan, 'a', 'b', 'a', 'b'], + 'PF': [0, 0, 0, 0, 1, 1], + 'col1': [0., 1., 4., 6., 8., 10.], + 'col2': [12, 7, 16, np.nan, 20, 22]}) df2 = df.copy() df2.loc[mask, cols] = dft.loc[mask, cols].values tm.assert_frame_equal(df2, expected) diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 749af1c56a7f0..528748d123aac 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1033,11 +1033,11 @@ def test_clip_with_datetimes(self): # naive and tz-aware datetimes t = Timestamp('2015-12-01 09:30:30') - s = Series([Timestamp('2015-12-01 09:30:00'), Timestamp( - '2015-12-01 09:31:00')]) + s = Series([Timestamp('2015-12-01 09:30:00'), + Timestamp('2015-12-01 09:31:00')]) result = s.clip(upper=t) - expected = Series([Timestamp('2015-12-01 09:30:00'), Timestamp( - '2015-12-01 09:30:30')]) + expected = Series([Timestamp('2015-12-01 09:30:00'), + Timestamp('2015-12-01 09:30:30')]) assert_series_equal(result, expected) t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern') diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 8e73c17684a16..2409d0a77d54b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -52,14 +52,14 @@ class TestSeriesMissingData(TestData): def test_timedelta_fillna(self): # GH 3371 - s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp( - '20130102'), Timestamp('20130103 9:01:01')]) + s = Series([Timestamp('20130101'), Timestamp('20130101'), + Timestamp('20130102'), Timestamp('20130103 9:01:01')]) td = s.diff() # reg fillna result = td.fillna(0) - expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta( - days=1, seconds=9 * 3600 + 60 + 1)]) + expected = Series([timedelta(0), timedelta(0), timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1)]) assert_series_equal(result, expected) # interprested as seconds @@ -69,8 +69,9 @@ def test_timedelta_fillna(self): assert_series_equal(result, expected) result = td.fillna(timedelta(days=1, seconds=1)) - expected = Series([timedelta(days=1, seconds=1), timedelta( - 0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)]) + expected = Series([timedelta(days=1, seconds=1), timedelta(0), + timedelta(1), + timedelta(days=1, seconds=9 * 3600 + 60 + 1)]) assert_series_equal(result, expected) result = td.fillna(np.timedelta64(int(1e9)))