Skip to content

Commit 92d8442

Browse files
committed
isnull instead of isnan
1 parent b045b62 commit 92d8442

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

flox/core.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ def find_group_cohorts(labels, chunks, merge=True, method="cohorts"):
182182
# We always drop NaN; np.unique also considers every NaN to be different so
183183
# it's really important we get rid of them.
184184
raveled = labels.ravel()
185-
unique_labels = np.unique(raveled[~np.isnan(raveled)])
185+
unique_labels = np.unique(raveled[~isnull(raveled)])
186186
# these are chunks where a label is present
187187
label_chunks = {lab: tuple(np.unique(which_chunk[raveled == lab])) for lab in unique_labels}
188188
# These invert the label_chunks mapping so we know which labels occur together.
@@ -363,7 +363,7 @@ def reindex_(
363363
raise ValueError("Filling is required. fill_value cannot be None.")
364364
indexer[axis] = idx == -1
365365
# This allows us to match xarray's type promotion rules
366-
if fill_value is xrdtypes.NA or np.isnan(fill_value):
366+
if fill_value is xrdtypes.NA or isnull(fill_value):
367367
new_dtype, fill_value = xrdtypes.maybe_promote(reindexed.dtype)
368368
reindexed = reindexed.astype(new_dtype, copy=False)
369369
reindexed[tuple(indexer)] = fill_value
@@ -425,7 +425,7 @@ def factorize_(
425425
else:
426426
sorter = None
427427
idx = np.searchsorted(expect, groupvar.ravel(), sorter=sorter)
428-
mask = np.isnan(groupvar.ravel())
428+
mask = isnull(groupvar.ravel())
429429
# TODO: optimize?
430430
idx[mask] = -1
431431
if not sort:
@@ -501,7 +501,7 @@ def chunk_argreduce(
501501
engine=engine,
502502
sort=sort,
503503
)
504-
if not np.isnan(results["groups"]).all():
504+
if not isnull(results["groups"]).all():
505505
# will not work for empty groups...
506506
# glorious
507507
idx = np.broadcast_to(idx, array.shape)
@@ -833,7 +833,7 @@ def _grouped_combine(
833833
# reindexing is unnecessary
834834
# I bet we can minimize the amount of reindexing for mD reductions too, but it's complicated
835835
unique_groups = np.unique(tuple(flatten(deepmap(listify_groups, x_chunk))))
836-
unique_groups = unique_groups[~np.isnan(unique_groups)]
836+
unique_groups = unique_groups[~isnull(unique_groups)]
837837
if len(unique_groups) == 0:
838838
unique_groups = [np.nan]
839839

0 commit comments

Comments
 (0)