Skip to content

Commit 0c18cc6

Browse files
REF: remove take_1d alias of take_nd (#39731)
1 parent 2332161 commit 0c18cc6

File tree

15 files changed

+39
-42
lines changed

15 files changed

+39
-42
lines changed

pandas/_testing/asserters.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
Series,
3030
TimedeltaIndex,
3131
)
32-
from pandas.core.algorithms import safe_sort, take_1d
32+
from pandas.core.algorithms import safe_sort, take_nd
3333
from pandas.core.arrays import (
3434
DatetimeArray,
3535
ExtensionArray,
@@ -309,7 +309,7 @@ def _get_ilevel_values(index, level):
309309
# accept level number only
310310
unique = index.levels[level]
311311
level_codes = index.codes[level]
312-
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
312+
filled = take_nd(unique._values, level_codes, fill_value=unique._na_value)
313313
return unique._shallow_copy(filled, name=index.names[level])
314314

315315
if check_less_precise is not no_default:

pandas/core/algorithms.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1652,7 +1652,7 @@ def take(arr, indices, axis: int = 0, allow_fill: bool = False, fill_value=None)
16521652
if allow_fill:
16531653
# Pandas style, -1 means NA
16541654
validate_indices(indices, arr.shape[axis])
1655-
result = take_1d(
1655+
result = take_nd(
16561656
arr, indices, axis=axis, allow_fill=True, fill_value=fill_value
16571657
)
16581658
else:
@@ -1783,9 +1783,6 @@ def take_nd(
17831783
return out
17841784

17851785

1786-
take_1d = take_nd
1787-
1788-
17891786
def take_2d_multi(arr, indexer, fill_value=np.nan):
17901787
"""
17911788
Specialized Cython take which sets NaN values in one pass.
@@ -2169,9 +2166,9 @@ def safe_sort(
21692166
sorter = ensure_platform_int(t.lookup(ordered))
21702167

21712168
if na_sentinel == -1:
2172-
# take_1d is faster, but only works for na_sentinels of -1
2169+
# take_nd is faster, but only works for na_sentinels of -1
21732170
order2 = sorter.argsort()
2174-
new_codes = take_1d(order2, codes, fill_value=-1)
2171+
new_codes = take_nd(order2, codes, fill_value=-1)
21752172
if verify:
21762173
mask = (codes < -len(values)) | (codes >= len(values))
21772174
else:

pandas/core/arrays/categorical.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@
5959
from pandas.core import ops
6060
from pandas.core.accessor import PandasDelegate, delegate_names
6161
import pandas.core.algorithms as algorithms
62-
from pandas.core.algorithms import factorize, get_data_algo, take_1d, unique1d
62+
from pandas.core.algorithms import factorize, get_data_algo, take_nd, unique1d
6363
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
6464
from pandas.core.base import ExtensionArray, NoNewAttributesMixin, PandasObject
6565
import pandas.core.common as com
@@ -475,7 +475,7 @@ def astype(self, dtype: Dtype, copy: bool = True) -> ArrayLike:
475475
msg = f"Cannot cast {self.categories.dtype} dtype to {dtype}"
476476
raise ValueError(msg)
477477

478-
result = take_1d(new_cats, libalgos.ensure_platform_int(self._codes))
478+
result = take_nd(new_cats, libalgos.ensure_platform_int(self._codes))
479479

480480
return result
481481

@@ -1310,7 +1310,7 @@ def __array__(self, dtype: Optional[NpDtype] = None) -> np.ndarray:
13101310
if dtype==None (default), the same dtype as
13111311
categorical.categories.dtype.
13121312
"""
1313-
ret = take_1d(self.categories._values, self._codes)
1313+
ret = take_nd(self.categories._values, self._codes)
13141314
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
13151315
return np.asarray(ret, dtype)
13161316
# When we're a Categorical[ExtensionArray], like Interval,
@@ -2349,7 +2349,7 @@ def _str_map(self, f, na_value=np.nan, dtype=np.dtype(object)):
23492349
categories = self.categories
23502350
codes = self.codes
23512351
result = PandasArray(categories.to_numpy())._str_map(f, na_value, dtype)
2352-
return take_1d(result, codes, fill_value=na_value)
2352+
return take_nd(result, codes, fill_value=na_value)
23532353

23542354
def _str_get_dummies(self, sep="|"):
23552355
# sep may not be in categories. Just bail on this.
@@ -2600,7 +2600,7 @@ def recode_for_categories(
26002600
indexer = coerce_indexer_dtype(
26012601
new_categories.get_indexer(old_categories), new_categories
26022602
)
2603-
new_codes = take_1d(indexer, codes, fill_value=-1)
2603+
new_codes = take_nd(indexer, codes, fill_value=-1)
26042604
return new_codes
26052605

26062606

pandas/core/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -926,7 +926,7 @@ def _map_values(self, mapper, na_action=None):
926926
values = self._values
927927

928928
indexer = mapper.index.get_indexer(values)
929-
new_values = algorithms.take_1d(mapper._values, indexer)
929+
new_values = algorithms.take_nd(mapper._values, indexer)
930930

931931
return new_values
932932

pandas/core/dtypes/concat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,9 +272,9 @@ def _maybe_unwrap(x):
272272
categories = categories.sort_values()
273273
indexer = categories.get_indexer(first.categories)
274274

275-
from pandas.core.algorithms import take_1d
275+
from pandas.core.algorithms import take_nd
276276

277-
new_codes = take_1d(indexer, new_codes, fill_value=-1)
277+
new_codes = take_nd(indexer, new_codes, fill_value=-1)
278278
elif ignore_order or all(not c.ordered for c in to_union):
279279
# different categories - union and recode
280280
cats = first.categories.append([c.categories for c in to_union[1:]])

pandas/core/groupby/generic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -565,7 +565,7 @@ def _transform_fast(self, result) -> Series:
565565
"""
566566
ids, _, ngroup = self.grouper.group_info
567567
result = result.reindex(self.grouper.result_index, copy=False)
568-
out = algorithms.take_1d(result._values, ids)
568+
out = algorithms.take_nd(result._values, ids)
569569
return self.obj._constructor(out, index=self.obj.index, name=self.obj.name)
570570

571571
def filter(self, func, dropna=True, *args, **kwargs):
@@ -1413,7 +1413,7 @@ def _transform_fast(self, result: DataFrame) -> DataFrame:
14131413
ids, _, ngroup = self.grouper.group_info
14141414
result = result.reindex(self.grouper.result_index, copy=False)
14151415
output = [
1416-
algorithms.take_1d(result.iloc[:, i].values, ids)
1416+
algorithms.take_nd(result.iloc[:, i].values, ids)
14171417
for i, _ in enumerate(result.columns)
14181418
]
14191419

pandas/core/indexes/interval.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
)
4343
from pandas.core.dtypes.dtypes import IntervalDtype
4444

45-
from pandas.core.algorithms import take_1d, unique
45+
from pandas.core.algorithms import take_nd, unique
4646
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
4747
import pandas.core.common as com
4848
from pandas.core.indexers import is_valid_positional_slice
@@ -671,9 +671,9 @@ def _get_indexer(
671671
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
672672
elif is_categorical_dtype(target.dtype):
673673
target = cast("CategoricalIndex", target)
674-
# get an indexer for unique categories then propagate to codes via take_1d
674+
# get an indexer for unique categories then propagate to codes via take_nd
675675
categories_indexer = self.get_indexer(target.categories)
676-
indexer = take_1d(categories_indexer, target.codes, fill_value=-1)
676+
indexer = take_nd(categories_indexer, target.codes, fill_value=-1)
677677
elif not is_object_dtype(target):
678678
# homogeneous scalar index: use IntervalTree
679679
target = self._maybe_convert_i8(target)

pandas/core/indexes/multi.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1349,7 +1349,7 @@ def format(
13491349
# weird all NA case
13501350
formatted = [
13511351
pprint_thing(na if isna(x) else x, escape_chars=("\t", "\r", "\n"))
1352-
for x in algos.take_1d(lev._values, level_codes)
1352+
for x in algos.take_nd(lev._values, level_codes)
13531353
]
13541354
stringified_levels.append(formatted)
13551355

@@ -1638,7 +1638,7 @@ def _get_level_values(self, level: int, unique: bool = False) -> Index:
16381638
name = self._names[level]
16391639
if unique:
16401640
level_codes = algos.unique(level_codes)
1641-
filled = algos.take_1d(lev._values, level_codes, fill_value=lev._na_value)
1641+
filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value)
16421642
return lev._shallow_copy(filled, name=name)
16431643

16441644
def get_level_values(self, level):
@@ -1922,7 +1922,7 @@ def _sort_levels_monotonic(self) -> MultiIndex:
19221922
# indexer to reorder the level codes
19231923
indexer = ensure_int64(indexer)
19241924
ri = lib.get_reverse_indexer(indexer, len(indexer))
1925-
level_codes = algos.take_1d(ri, level_codes)
1925+
level_codes = algos.take_nd(ri, level_codes)
19261926

19271927
new_levels.append(lev)
19281928
new_codes.append(level_codes)

pandas/core/internals/concat.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,8 @@ def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: Dict[int, np.ndarra
138138

139139
if 0 in indexers:
140140
ax0_indexer = indexers.pop(0)
141-
blknos = algos.take_1d(mgr.blknos, ax0_indexer, fill_value=-1)
142-
blklocs = algos.take_1d(mgr.blklocs, ax0_indexer, fill_value=-1)
141+
blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)
142+
blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)
143143
else:
144144

145145
if mgr.is_single_block:

pandas/core/internals/construction.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -645,7 +645,7 @@ def _list_of_series_to_arrays(
645645
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
646646

647647
values = extract_array(s, extract_numpy=True)
648-
aligned_values.append(algorithms.take_1d(values, indexer))
648+
aligned_values.append(algorithms.take_nd(values, indexer))
649649

650650
content = np.vstack(aligned_values)
651651

0 commit comments

Comments
 (0)