Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions bigframes/core/groupby/dataframe_group_by.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,9 @@ def __iter__(self) -> Iterable[Tuple[blocks.Label, df.DataFrame]]:
filtered_df = df.DataFrame(filtered_block)
yield group_keys, filtered_df

def __len__(self) -> int:
return len(self.agg([]))

def size(self) -> typing.Union[df.DataFrame, series.Series]:
agg_block, _ = self._block.aggregate_size(
by_column_ids=self._by_col_ids,
Expand Down
9 changes: 6 additions & 3 deletions bigframes/core/groupby/series_group_by.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@ def __iter__(self) -> Iterable[Tuple[blocks.Label, series.Series]]:
filtered_series.name = self._value_name
yield group_keys, filtered_series

def __len__(self) -> int:
return len(self.agg([]))

def all(self) -> series.Series:
return self._aggregate(agg_ops.all_op)

Expand Down Expand Up @@ -275,9 +278,9 @@ def agg(self, func=None) -> typing.Union[df.DataFrame, series.Series]:
if column_names:
agg_block = agg_block.with_column_labels(column_names)

if len(aggregations) > 1:
return df.DataFrame(agg_block)
return series.Series(agg_block)
if len(aggregations) == 1:
return series.Series(agg_block)
return df.DataFrame(agg_block)

aggregate = agg

Expand Down
16 changes: 16 additions & 0 deletions tests/system/small/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,15 @@ def test_dataframe_groupby_head(scalars_df_index, scalars_pandas_df_index):
pd.testing.assert_frame_equal(pd_result, bf_result, check_dtype=False)


def test_dataframe_groupby_len(scalars_df_index, scalars_pandas_df_index):
col_names = ["int64_too", "float64_col", "int64_col", "bool_col", "string_col"]

bf_result = len(scalars_df_index[col_names].groupby("bool_col"))
pd_result = len(scalars_pandas_df_index[col_names].groupby("bool_col"))

assert bf_result == pd_result
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added



def test_dataframe_groupby_median(scalars_df_index, scalars_pandas_df_index):
col_names = ["int64_too", "float64_col", "int64_col", "bool_col", "string_col"]
bf_result = (
Expand Down Expand Up @@ -668,6 +677,13 @@ def test_dataframe_groupby_last(
# ==============


def test_series_groupby_len(scalars_df_index, scalars_pandas_df_index):
bf_result = len(scalars_df_index.groupby("bool_col")["int64_col"])
pd_result = len(scalars_pandas_df_index.groupby("bool_col")["int64_col"])

assert bf_result == pd_result


@pytest.mark.parametrize(
("agg"),
[
Expand Down