Skip to content

Commit

Permalink
apacheGH-41978: [Python] Fix pandas tests to follow downstream dateti…
Browse files Browse the repository at this point in the history
…me64 unit changes (apache#41979)

### Rationale for this change

Pandas changed the default unit used in certain cases for the `datetime64[unit]` dtype. This causes some failures in our test suite when tested with pandas 3.0.0.dev

* GitHub Issue: apache#41978

Authored-by: Joris Van den Bossche <[email protected]>
Signed-off-by: Joris Van den Bossche <[email protected]>
  • Loading branch information
jorisvandenbossche authored Jun 18, 2024
1 parent e6da396 commit 370818a
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 5 deletions.
11 changes: 8 additions & 3 deletions python/pyarrow/pandas_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,10 @@ def get_logical_type_from_numpy(pandas_collection):
except KeyError:
if hasattr(pandas_collection.dtype, 'tz'):
return 'datetimetz'
# See https://github.com/pandas-dev/pandas/issues/24739
if str(pandas_collection.dtype) == 'datetime64[ns]':
return 'datetime64[ns]'
# See https://github.com/pandas-dev/pandas/issues/24739 (infer_dtype will
# result in "datetime64" without unit, while pandas astype requires a unit)
if str(pandas_collection.dtype).startswith('datetime64'):
return str(pandas_collection.dtype)
result = _pandas_api.infer_dtype(pandas_collection)
if result == 'string':
return 'unicode'
Expand Down Expand Up @@ -1107,6 +1108,10 @@ def _reconstruct_columns_from_metadata(columns, column_indexes):
tz = pa.lib.string_to_tzinfo(
column_indexes[0]['metadata']['timezone'])
level = pd.to_datetime(level, utc=True).tz_convert(tz)
if _pandas_api.is_ge_v3():
# with pandas 3+, to_datetime returns a unit depending on the string
# data, so we restore it to the original unit from the metadata
level = level.as_unit(np.datetime_data(dtype)[0])
# GH-41503: if the column index was decimal, restore to decimal
elif pandas_dtype == "decimal":
level = _pandas_api.pd.Index([decimal.Decimal(i) for i in level])
Expand Down
6 changes: 4 additions & 2 deletions python/pyarrow/tests/interchange/test_conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -335,8 +335,10 @@ def test_pandas_to_pyarrow_with_missing(np_float):
np_array = np.array([0, np.nan, 2], dtype=np_float)
datetime_array = [None, dt(2007, 7, 14), dt(2007, 7, 15)]
df = pd.DataFrame({
"a": np_array, # float, ColumnNullType.USE_NAN
"dt": datetime_array # ColumnNullType.USE_SENTINEL
# float, ColumnNullType.USE_NAN
"a": np_array,
# ColumnNullType.USE_SENTINEL
"dt": np.array(datetime_array, dtype="datetime64[ns]")
})
expected = pa.table({
"a": pa.array(np_array, from_pandas=True),
Expand Down
1 change: 1 addition & 0 deletions python/pyarrow/tests/parquet/test_datetime.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,7 @@ def get_table(pq_reader_method, filename, **kwargs):
pq_reader_method, filename, coerce_int96_timestamp_unit="s"
)
df_correct = tab_correct.to_pandas(timestamp_as_object=True)
df["a"] = df["a"].astype(object)
tm.assert_frame_equal(df, df_correct)


Expand Down
1 change: 1 addition & 0 deletions python/pyarrow/tests/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -4754,6 +4754,7 @@ def make_df_with_timestamps():
np.datetime64('2050-05-03 15:42', 'ns'),
],
})
df['dateTimeMs'] = df['dateTimeMs'].astype('object')
# Not part of what we're testing, just ensuring that the inputs are what we
# expect.
assert (df.dateTimeMs.dtype, df.dateTimeNs.dtype) == (
Expand Down

0 comments on commit 370818a

Please sign in to comment.