diff --git a/doc/source/whatsnew/v3.0.0.rst b/doc/source/whatsnew/v3.0.0.rst index ed0836233553b..321005272817d 100644 --- a/doc/source/whatsnew/v3.0.0.rst +++ b/doc/source/whatsnew/v3.0.0.rst @@ -203,6 +203,67 @@ In cases with mixed-resolution inputs, the highest resolution is used: In [2]: pd.to_datetime([pd.Timestamp("2024-03-22 11:43:01"), "2024-03-22 11:43:01.002"]).dtype Out[2]: dtype('>> df.groupby("gender").value_counts() gender education country - female high FR 1 - US 1 + female high US 1 + FR 1 male low FR 2 US 1 medium FR 1 @@ -2682,8 +2688,8 @@ def value_counts( >>> df.groupby("gender").value_counts(ascending=True) gender education country - female high FR 1 - US 1 + female high US 1 + FR 1 male low US 1 medium FR 1 low FR 2 @@ -2691,8 +2697,8 @@ def value_counts( >>> df.groupby("gender").value_counts(normalize=True) gender education country - female high FR 0.50 - US 0.50 + female high US 0.50 + FR 0.50 male low FR 0.50 US 0.25 medium FR 0.25 @@ -2700,16 +2706,16 @@ def value_counts( >>> df.groupby("gender", as_index=False).value_counts() gender education country count - 0 female high FR 1 - 1 female high US 1 + 0 female high US 1 + 1 female high FR 1 2 male low FR 2 3 male low US 1 4 male medium FR 1 >>> df.groupby("gender", as_index=False).value_counts(normalize=True) gender education country proportion - 0 female high FR 0.50 - 1 female high US 0.50 + 0 female high US 0.50 + 1 female high FR 0.50 2 male low FR 0.50 3 male low US 0.25 4 male medium FR 0.25 diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index e2410788ea95e..68314567d1b5e 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -2519,7 +2519,7 @@ def _value_counts( grouper, _, _ = get_grouper( df, key=key, - sort=self.sort, + sort=False, observed=False, dropna=dropna, ) @@ -2528,7 +2528,7 @@ def _value_counts( # Take the size of the overall columns gb = df.groupby( groupings, - sort=self.sort, + sort=False, observed=self.observed, dropna=self.dropna, ) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 0e99178642715..a82e77140d274 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -755,6 +755,7 @@ def result_index_and_ids(self) -> tuple[Index, npt.NDArray[np.intp]]: obs = [ ping._observed or not ping._passed_categorical for ping in self.groupings ] + sorts = [ping._sort for ping in self.groupings] # When passed a categorical grouping, keep all categories for k, (ping, level) in enumerate(zip(self.groupings, levels)): if ping._passed_categorical: @@ -765,7 +766,9 @@ def result_index_and_ids(self) -> tuple[Index, npt.NDArray[np.intp]]: result_index.name = self.names[0] ids = ensure_platform_int(self.codes[0]) elif all(obs): - result_index, ids = self._ob_index_and_ids(levels, self.codes, self.names) + result_index, ids = self._ob_index_and_ids( + levels, self.codes, self.names, sorts + ) elif not any(obs): result_index, ids = self._unob_index_and_ids(levels, self.codes, self.names) else: @@ -778,6 +781,7 @@ def result_index_and_ids(self) -> tuple[Index, npt.NDArray[np.intp]]: levels=[levels[idx] for idx in ob_indices], codes=[codes[idx] for idx in ob_indices], names=[names[idx] for idx in ob_indices], + sorts=[sorts[idx] for idx in ob_indices], ) unob_index, unob_ids = self._unob_index_and_ids( levels=[levels[idx] for idx in unob_indices], @@ -800,9 +804,18 @@ def result_index_and_ids(self) -> tuple[Index, npt.NDArray[np.intp]]: ).reorder_levels(index) ids = len(unob_index) * ob_ids + unob_ids - if self._sort: + if any(sorts): # Sort result_index and recode ids using the new order - sorter = result_index.argsort() + n_levels = len(sorts) + drop_levels = [ + n_levels - idx + for idx, sort in enumerate(reversed(sorts), 1) + if not sort + ] + if len(drop_levels) > 0: + sorter = result_index._drop_level_numbers(drop_levels).argsort() + else: + sorter = result_index.argsort() result_index = result_index.take(sorter) _, index = np.unique(sorter, return_index=True) ids = ensure_platform_int(ids) @@ -837,10 +850,13 @@ def _ob_index_and_ids( levels: list[Index], codes: list[npt.NDArray[np.intp]], names: list[Hashable], + sorts: list[bool], ) -> tuple[MultiIndex, npt.NDArray[np.intp]]: + consistent_sorting = all(sorts[0] == sort for sort in sorts[1:]) + sort_in_compress = sorts[0] if consistent_sorting else False shape = tuple(len(level) for level in levels) group_index = get_group_index(codes, shape, sort=True, xnull=True) - ob_ids, obs_group_ids = compress_group_index(group_index, sort=self._sort) + ob_ids, obs_group_ids = compress_group_index(group_index, sort=sort_in_compress) ob_ids = ensure_platform_int(ob_ids) ob_index_codes = decons_obs_group_ids( ob_ids, obs_group_ids, shape, codes, xnull=True @@ -851,6 +867,21 @@ def _ob_index_and_ids( names=names, verify_integrity=False, ) + if not consistent_sorting: + # Sort by the levels where the corresponding sort argument is True + n_levels = len(sorts) + drop_levels = [ + n_levels - idx + for idx, sort in enumerate(reversed(sorts), 1) + if not sort + ] + if len(drop_levels) > 0: + sorter = ob_index._drop_level_numbers(drop_levels).argsort() + else: + sorter = ob_index.argsort() + ob_index = ob_index.take(sorter) + _, index = np.unique(sorter, return_index=True) + ob_ids = np.where(ob_ids == -1, -1, index.take(ob_ids)) ob_ids = ensure_platform_int(ob_ids) return ob_index, ob_ids diff --git a/pandas/tests/frame/methods/test_value_counts.py b/pandas/tests/frame/methods/test_value_counts.py index 7670b53f23173..de5029b9f18b2 100644 --- a/pandas/tests/frame/methods/test_value_counts.py +++ b/pandas/tests/frame/methods/test_value_counts.py @@ -128,7 +128,7 @@ def test_data_frame_value_counts_dropna_true(nulls_fixture): expected = pd.Series( data=[1, 1], index=pd.MultiIndex.from_arrays( - [("Beth", "John"), ("Louise", "Smith")], names=["first_name", "middle_name"] + [("John", "Beth"), ("Smith", "Louise")], names=["first_name", "middle_name"] ), name="count", ) @@ -156,7 +156,7 @@ def test_data_frame_value_counts_dropna_false(nulls_fixture): pd.Index(["Anne", "Beth", "John"]), pd.Index(["Louise", "Smith", np.nan]), ], - codes=[[0, 1, 2, 2], [2, 0, 1, 2]], + codes=[[2, 0, 2, 1], [1, 2, 2, 0]], names=["first_name", "middle_name"], ), name="count", diff --git a/pandas/tests/groupby/methods/test_value_counts.py b/pandas/tests/groupby/methods/test_value_counts.py index 8f8f7f64aba75..8f3022fbe551c 100644 --- a/pandas/tests/groupby/methods/test_value_counts.py +++ b/pandas/tests/groupby/methods/test_value_counts.py @@ -255,10 +255,10 @@ def test_basic(education_df, request): index=MultiIndex.from_tuples( [ ("FR", "male", "low"), - ("FR", "female", "high"), ("FR", "male", "medium"), - ("US", "female", "high"), + ("FR", "female", "high"), ("US", "male", "low"), + ("US", "female", "high"), ], names=["country", "gender", "education"], ), @@ -472,11 +472,11 @@ def test_data_frame_value_counts( ( False, False, - [0, 1, 3, 5, 7, 6, 8, 2, 4], + [0, 1, 3, 5, 6, 7, 8, 2, 4], [0.5, 0.5, 1.0, 0.25, 0.25, 0.25, 0.25, 1.0, 1.0], ), (False, True, [0, 1, 3, 5, 2, 4], [0.5, 0.5, 1.0, 1.0, 1.0, 1.0]), - (True, False, [0, 1, 5, 7, 6, 8], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]), + (True, False, [0, 1, 5, 6, 7, 8], [0.5, 0.5, 0.25, 0.25, 0.25, 0.25]), (True, True, [0, 1, 5], [0.5, 0.5, 1.0]), ], ) @@ -518,7 +518,7 @@ def test_dropna_combinations( True, [1, 1], MultiIndex.from_arrays( - [(1, 1), ("Beth", "John"), ("Louise", "Smith")], + [(1, 1), ("John", "Beth"), ("Smith", "Louise")], names=["key", "first_name", "middle_name"], ), ), @@ -531,7 +531,7 @@ def test_dropna_combinations( Index(["Anne", "Beth", "John"]), Index(["Louise", "Smith", np.nan]), ], - codes=[[0, 0, 0, 0], [0, 1, 2, 2], [2, 0, 1, 2]], + codes=[[0, 0, 0, 0], [2, 0, 2, 1], [1, 2, 2, 0]], names=["key", "first_name", "middle_name"], ), ), @@ -609,17 +609,17 @@ def test_categorical_single_grouper_with_only_observed_categories( expected_index = MultiIndex.from_tuples( [ ("FR", "male", "low"), - ("FR", "female", "high"), ("FR", "male", "medium"), + ("FR", "female", "high"), + ("FR", "male", "high"), ("FR", "female", "low"), ("FR", "female", "medium"), - ("FR", "male", "high"), - ("US", "female", "high"), ("US", "male", "low"), + ("US", "female", "high"), + ("US", "male", "medium"), + ("US", "male", "high"), ("US", "female", "low"), ("US", "female", "medium"), - ("US", "male", "high"), - ("US", "male", "medium"), ], names=["country", "gender", "education"], ) @@ -711,17 +711,17 @@ def test_categorical_single_grouper_observed_true( expected_index = [ ("FR", "male", "low"), - ("FR", "female", "high"), ("FR", "male", "medium"), + ("FR", "female", "high"), + ("FR", "male", "high"), ("FR", "female", "low"), ("FR", "female", "medium"), - ("FR", "male", "high"), - ("US", "female", "high"), ("US", "male", "low"), + ("US", "female", "high"), + ("US", "male", "medium"), + ("US", "male", "high"), ("US", "female", "low"), ("US", "female", "medium"), - ("US", "male", "high"), - ("US", "male", "medium"), ] assert_categorical_single_grouper( @@ -791,23 +791,23 @@ def test_categorical_single_grouper_observed_false( expected_index = [ ("FR", "male", "low"), - ("FR", "female", "high"), ("FR", "male", "medium"), + ("FR", "female", "high"), + ("FR", "male", "high"), ("FR", "female", "low"), ("FR", "female", "medium"), - ("FR", "male", "high"), - ("US", "female", "high"), ("US", "male", "low"), + ("US", "female", "high"), + ("US", "male", "medium"), + ("US", "male", "high"), ("US", "female", "low"), ("US", "female", "medium"), - ("US", "male", "high"), - ("US", "male", "medium"), - ("ASIA", "female", "high"), - ("ASIA", "female", "low"), - ("ASIA", "female", "medium"), - ("ASIA", "male", "high"), ("ASIA", "male", "low"), ("ASIA", "male", "medium"), + ("ASIA", "male", "high"), + ("ASIA", "female", "low"), + ("ASIA", "female", "medium"), + ("ASIA", "female", "high"), ] assert_categorical_single_grouper( @@ -837,8 +837,8 @@ def test_categorical_single_grouper_observed_false( ("US", "high", "male"), ("US", "low", "male"), ("US", "low", "female"), - ("US", "medium", "female"), ("US", "medium", "male"), + ("US", "medium", "female"), ], ), ( @@ -949,17 +949,17 @@ def test_categorical_non_groupers( expected_index = [ ("FR", "male", "low"), - ("FR", "female", "high"), ("FR", "male", "medium"), + ("FR", "female", "high"), + ("FR", "male", "high"), ("FR", "female", "low"), ("FR", "female", "medium"), - ("FR", "male", "high"), - ("US", "female", "high"), ("US", "male", "low"), + ("US", "female", "high"), + ("US", "male", "medium"), + ("US", "male", "high"), ("US", "female", "low"), ("US", "female", "medium"), - ("US", "male", "high"), - ("US", "male", "medium"), ] expected_series = Series( data=expected_data, @@ -1178,7 +1178,7 @@ def test_value_counts_sort(sort, vc_sort, normalize): if sort and vc_sort: taker = [0, 1, 2] elif sort and not vc_sort: - taker = [0, 1, 2] + taker = [1, 0, 2] elif not sort and vc_sort: taker = [0, 2, 1] else: