diff --git a/doc/nextgen/demo.ipynb b/doc/nextgen/demo.ipynb index 91233a3d94..fc655fdfec 100644 --- a/doc/nextgen/demo.ipynb +++ b/doc/nextgen/demo.ipynb @@ -250,7 +250,7 @@ "id": "ae0e288e-74cf-461c-8e68-786e364032a1", "metadata": {}, "source": [ - "### Data transformation: the Stat\n", + "### Data transformations: the Stat\n", "\n", "\n", "Built-in statistical transformations are one of seaborn's key features. But currently, they are tied up with the different visual representations. E.g., you can aggregate data in `lineplot`, but not in `scatterplot`.\n", @@ -273,7 +273,7 @@ "id": "1788d935-5ad5-4262-993f-8d48c66631b9", "metadata": {}, "source": [ - "The `Stat` is computed on subsets of data defined by the semantic mappings:" + "A `Stat` is computed on subsets of data defined by the semantic mappings:" ] }, { @@ -323,7 +323,7 @@ "outputs": [], "source": [ "class PeakAnnotation(so.Mark):\n", - " def plot(self, split_generator, scales, orient):\n", + " def _plot(self, split_generator, scales, orient):\n", " for keys, data, ax in split_generator():\n", " ix = data[\"y\"].idxmax()\n", " ax.annotate(\n", @@ -388,7 +388,7 @@ "source": [ "(\n", " so.Plot(tips, \"day\", \"total_bill\", color=\"time\")\n", - " .add(so.Bar(), so.Agg(), move=so.Dodge())\n", + " .add(so.Dot(), so.Dodge())\n", ")" ] }, @@ -409,7 +409,7 @@ "source": [ "(\n", " so.Plot(tips, \"day\", \"total_bill\", color=\"time\")\n", - " .add(so.Bar(), so.Agg(), move=so.Dodge(empty=\"fill\", gap=.1))\n", + " .add(so.Bar(), so.Agg(), so.Dodge(empty=\"fill\", gap=.1))\n", ")" ] }, @@ -430,7 +430,7 @@ "source": [ "(\n", " so.Plot(tips, \"day\", \"total_bill\", color=\"time\", alpha=\"sex\")\n", - " .add(so.Bar(), so.Agg(), move=so.Dodge())\n", + " .add(so.Bar(), so.Agg(), so.Dodge())\n", ")" ] }, @@ -451,7 +451,7 @@ "source": [ "(\n", " so.Plot(tips, \"day\", \"total_bill\", color=\"time\", alpha=\"smoker\")\n", - " .add(so.Dot(), move=so.Dodge(by=[\"color\"]))\n", + " .add(so.Dot(), so.Dodge(by=[\"color\"]))\n", ")" ] }, @@ -460,7 +460,7 @@ "id": "c001004a-6771-46eb-b231-6accf88fe330", "metadata": {}, "source": [ - "It's also possible to stack multiple moves or kinds of moves by passing a list:" + "It's also possible to stack multiple moves or kinds of moves:" ] }, { @@ -472,10 +472,7 @@ "source": [ "(\n", " so.Plot(tips, \"day\", \"total_bill\", color=\"time\", alpha=\"smoker\")\n", - " .add(\n", - " so.Dot(),\n", - " move=[so.Dodge(by=[\"color\"]), so.Jitter(.5)]\n", - " )\n", + " .add(so.Dot(), so.Dodge(by=[\"color\"]), so.Jitter(.5))\n", ")" ] }, @@ -568,8 +565,8 @@ " so.Plot(planets, x=\"mass\", y=\"distance\", color=\"orbital_period\")\n", " .scale(\n", " x=\"log\",\n", - " y=so.Continuous(transform=\"log\").tick(at=[3, 10, 30, 100, 300]),\n", - " color=so.Continuous(\"rocket\", transform=\"log\"),\n", + " y=so.Continuous(trans=\"log\").tick(at=[3, 10, 30, 100, 300]),\n", + " color=so.Continuous(\"rocket\", trans=\"log\"),\n", " )\n", " .add(so.Dots())\n", ")" diff --git a/seaborn/_core/moves.py b/seaborn/_core/moves.py index 14469be83d..d1247aeaa7 100644 --- a/seaborn/_core/moves.py +++ b/seaborn/_core/moves.py @@ -6,6 +6,7 @@ from pandas import DataFrame from seaborn._core.groupby import GroupBy +from seaborn._core.scales import Scale @dataclass @@ -13,7 +14,9 @@ class Move: group_by_orient: ClassVar[bool] = True - def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame: + def __call__( + self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], + ) -> DataFrame: raise NotImplementedError @@ -31,7 +34,9 @@ class Jitter(Move): # TODO what is the best way to have a reasonable default? # The problem is that "reasonable" seems dependent on the mark - def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame: + def __call__( + self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], + ) -> DataFrame: # TODO is it a problem that GroupBy is not used for anything here? # Should we type it as optional? @@ -68,7 +73,9 @@ class Dodge(Move): # TODO should the default be an "all" singleton? by: Optional[list[str]] = None - def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame: + def __call__( + self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], + ) -> DataFrame: grouping_vars = [v for v in groupby.order if v in data] groups = groupby.agg(data, {"width": "max"}) @@ -138,7 +145,9 @@ def _stack(self, df, orient): return df - def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame: + def __call__( + self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], + ) -> DataFrame: # TODO where to ensure that other semantic variables are sorted properly? # TODO why are we not using the passed in groupby here? @@ -154,7 +163,9 @@ class Shift(Move): x: float = 0 y: float = 0 - def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame: + def __call__( + self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], + ) -> DataFrame: data = data.copy(deep=False) data["x"] = data["x"] + self.x @@ -188,7 +199,9 @@ def _norm(self, df, var): return df - def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame: + def __call__( + self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale], + ) -> DataFrame: other = {"x": "y", "y": "x"}[orient] return groupby.apply(data, self._norm, other) diff --git a/seaborn/_core/plot.py b/seaborn/_core/plot.py index 1a96d3c6cb..9380e0dd40 100644 --- a/seaborn/_core/plot.py +++ b/seaborn/_core/plot.py @@ -11,7 +11,7 @@ from contextlib import contextmanager from collections import abc from collections.abc import Callable, Generator, Hashable -from typing import Any, cast +from typing import Any, List, Optional, cast from cycler import cycler import pandas as pd @@ -338,16 +338,14 @@ def on(self, target: Axes | SubFigure | Figure) -> Plot: def add( self, mark: Mark, - stat: Stat | None = None, - move: Move | list[Move] | None = None, - *, + *transforms: Stat | Mark, orient: str | None = None, legend: bool = True, data: DataSource = None, **variables: VariableSpec, ) -> Plot: """ - Define a layer of the visualization. + Define a layer of the visualization in terms of mark and data transform(s). This is the main method for specifying how the data should be visualized. It can be called multiple times with different arguments to define @@ -357,48 +355,63 @@ def add( ---------- mark : :class:`seaborn.objects.Mark` The visual representation of the data to use in this layer. - stat : :class:`seaborn.objects.Stat` - A transformation applied to the data before plotting. - move : :class:`seaborn.objects.Move` - Additional transformation(s) to handle over-plotting. - legend : bool - Option to suppress the mark/mappings for this layer from the legend. + transforms : :class:`seaborn.objects.Stat` or :class:`seaborn.objects.Move` + Objects representing transforms to be applied before plotting the data. + Current, at most one :class:`seaborn.objects.Stat` can be used, and it + must be passed first. This constraint will be relaxed in the future. orient : "x", "y", "v", or "h" The orientation of the mark, which affects how the stat is computed. Typically corresponds to the axis that defines groups for aggregation. The "v" (vertical) and "h" (horizontal) options are synonyms for "x" / "y", but may be more intuitive with some marks. When not provided, an orientation will be inferred from characteristics of the data and scales. + legend : bool + Option to suppress the mark/mappings for this layer from the legend. data : DataFrame or dict Data source to override the global source provided in the constructor. variables : data vectors or identifiers Additional layer-specific variables, including variables that will be - passed directly to the stat without scaling. + passed directly to the transforms without scaling. """ if not isinstance(mark, Mark): msg = f"mark must be a Mark instance, not {type(mark)!r}." raise TypeError(msg) - if stat is not None and not isinstance(stat, Stat): - msg = f"stat must be a Stat instance, not {type(stat)!r}." + # TODO This API for transforms was a late decision, and previously Plot.add + # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances. + # It will take some work to refactor the internals so that Stat and Move are + # treated identically, and until then well need to "unpack" the transforms + # here and enforce limitations on the order / types. + + stat: Optional[Stat] + move: Optional[List[Move]] + error = False + if not transforms: + stat, move = None, None + elif isinstance(transforms[0], Stat): + stat = transforms[0] + move = [m for m in transforms[1:] if isinstance(m, Move)] + error = len(move) != len(transforms) - 1 + else: + stat = None + move = [m for m in transforms if isinstance(m, Move)] + error = len(move) != len(transforms) + + if error: + msg = " ".join([ + "Transforms must have at most one Stat type (in the first position),", + "and all others must be a Move type. Given transform type(s):", + ", ".join(str(type(t).__name__) for t in transforms) + "." + ]) raise TypeError(msg) - # TODO decide how to allow Mark to have default Stat/Move - # if stat is None and hasattr(mark, "default_stat"): - # stat = mark.default_stat() - - # TODO it doesn't work to supply scalars to variables, but that would be nice - - # TODO accept arbitrary variables defined by the stat (/move?) here - # (but not in the Plot constructor) - # Should stat variables ever go in the constructor, or just in the add call? - new = self._clone() new._layers.append({ "mark": mark, "stat": stat, "move": move, + # TODO it doesn't work to supply scalars to variables, but it should "vars": variables, "source": data, "legend": legend, @@ -1232,7 +1245,7 @@ def get_order(var): move_groupers.insert(0, orient) order = {var: get_order(var) for var in move_groupers} groupby = GroupBy(order) - df = move_step(df, groupby, orient) + df = move_step(df, groupby, orient, scales) df = self._unscale_coords(subplots, df, orient) diff --git a/tests/_core/test_moves.py b/tests/_core/test_moves.py index a9d221da19..bac86d12e2 100644 --- a/tests/_core/test_moves.py +++ b/tests/_core/test_moves.py @@ -83,7 +83,7 @@ def test_width(self, df): width = .4 orient = "x" groupby = self.get_groupby(df, orient) - res = Jitter(width=width)(df, groupby, orient) + res = Jitter(width=width)(df, groupby, orient, {}) self.check_same(res, df, "y", "grp2", "width") self.check_pos(res, df, "x", width * df["width"]) @@ -92,7 +92,7 @@ def test_x(self, df): val = .2 orient = "x" groupby = self.get_groupby(df, orient) - res = Jitter(x=val)(df, groupby, orient) + res = Jitter(x=val)(df, groupby, orient, {}) self.check_same(res, df, "y", "grp2", "width") self.check_pos(res, df, "x", val) @@ -101,7 +101,7 @@ def test_y(self, df): val = .2 orient = "x" groupby = self.get_groupby(df, orient) - res = Jitter(y=val)(df, groupby, orient) + res = Jitter(y=val)(df, groupby, orient, {}) self.check_same(res, df, "x", "grp2", "width") self.check_pos(res, df, "y", val) @@ -110,8 +110,8 @@ def test_seed(self, df): kws = dict(width=.2, y=.1, seed=0) orient = "x" groupby = self.get_groupby(df, orient) - res1 = Jitter(**kws)(df, groupby, orient) - res2 = Jitter(**kws)(df, groupby, orient) + res1 = Jitter(**kws)(df, groupby, orient, {}) + res2 = Jitter(**kws)(df, groupby, orient, {}) for var in "xy": assert_series_equal(res1[var], res2[var]) @@ -123,7 +123,7 @@ class TestDodge(MoveFixtures): def test_default(self, toy_df): groupby = GroupBy(["x", "grp"]) - res = Dodge()(toy_df, groupby, "x") + res = Dodge()(toy_df, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]), assert_array_almost_equal(res["x"], [-.2, .2, 1.2]) @@ -132,7 +132,7 @@ def test_default(self, toy_df): def test_fill(self, toy_df): groupby = GroupBy(["x", "grp"]) - res = Dodge(empty="fill")(toy_df, groupby, "x") + res = Dodge(empty="fill")(toy_df, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]), assert_array_almost_equal(res["x"], [-.2, .2, 1]) @@ -141,7 +141,7 @@ def test_fill(self, toy_df): def test_drop(self, toy_df): groupby = GroupBy(["x", "grp"]) - res = Dodge("drop")(toy_df, groupby, "x") + res = Dodge("drop")(toy_df, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]) assert_array_almost_equal(res["x"], [-.2, .2, 1]) @@ -150,7 +150,7 @@ def test_drop(self, toy_df): def test_gap(self, toy_df): groupby = GroupBy(["x", "grp"]) - res = Dodge(gap=.25)(toy_df, groupby, "x") + res = Dodge(gap=.25)(toy_df, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]) assert_array_almost_equal(res["x"], [-.2, .2, 1.2]) @@ -159,7 +159,7 @@ def test_gap(self, toy_df): def test_widths_default(self, toy_df_widths): groupby = GroupBy(["x", "grp"]) - res = Dodge()(toy_df_widths, groupby, "x") + res = Dodge()(toy_df_widths, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]) assert_array_almost_equal(res["x"], [-.08, .32, 1.1]) @@ -168,7 +168,7 @@ def test_widths_default(self, toy_df_widths): def test_widths_fill(self, toy_df_widths): groupby = GroupBy(["x", "grp"]) - res = Dodge(empty="fill")(toy_df_widths, groupby, "x") + res = Dodge(empty="fill")(toy_df_widths, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]) assert_array_almost_equal(res["x"], [-.08, .32, 1]) @@ -177,7 +177,7 @@ def test_widths_fill(self, toy_df_widths): def test_widths_drop(self, toy_df_widths): groupby = GroupBy(["x", "grp"]) - res = Dodge(empty="drop")(toy_df_widths, groupby, "x") + res = Dodge(empty="drop")(toy_df_widths, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3]) assert_array_almost_equal(res["x"], [-.08, .32, 1]) @@ -186,7 +186,7 @@ def test_widths_drop(self, toy_df_widths): def test_faceted_default(self, toy_df_facets): groupby = GroupBy(["x", "grp", "col"]) - res = Dodge()(toy_df_facets, groupby, "x") + res = Dodge()(toy_df_facets, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3]) assert_array_almost_equal(res["x"], [-.2, .2, .8, .2, .8, 2.2]) @@ -195,7 +195,7 @@ def test_faceted_default(self, toy_df_facets): def test_faceted_fill(self, toy_df_facets): groupby = GroupBy(["x", "grp", "col"]) - res = Dodge(empty="fill")(toy_df_facets, groupby, "x") + res = Dodge(empty="fill")(toy_df_facets, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3]) assert_array_almost_equal(res["x"], [-.2, .2, 1, 0, 1, 2]) @@ -204,7 +204,7 @@ def test_faceted_fill(self, toy_df_facets): def test_faceted_drop(self, toy_df_facets): groupby = GroupBy(["x", "grp", "col"]) - res = Dodge(empty="drop")(toy_df_facets, groupby, "x") + res = Dodge(empty="drop")(toy_df_facets, groupby, "x", {}) assert_array_equal(res["y"], [1, 2, 3, 1, 2, 3]) assert_array_almost_equal(res["x"], [-.2, .2, 1, 0, 1, 2]) @@ -215,7 +215,7 @@ def test_orient(self, toy_df): df = toy_df.assign(x=toy_df["y"], y=toy_df["x"]) groupby = GroupBy(["y", "grp"]) - res = Dodge("drop")(df, groupby, "y") + res = Dodge("drop")(df, groupby, "y", {}) assert_array_equal(res["x"], [1, 2, 3]) assert_array_almost_equal(res["y"], [-.2, .2, 1]) @@ -227,7 +227,7 @@ def test_orient(self, toy_df): def test_single_semantic(self, df, grp): groupby = GroupBy(["x", grp]) - res = Dodge()(df, groupby, "x") + res = Dodge()(df, groupby, "x", {}) levels = categorical_order(df[grp]) w, n = 0.8, len(levels) @@ -245,7 +245,7 @@ def test_single_semantic(self, df, grp): def test_two_semantics(self, df): groupby = GroupBy(["x", "grp2", "grp3"]) - res = Dodge()(df, groupby, "x") + res = Dodge()(df, groupby, "x", {}) levels = categorical_order(df["grp2"]), categorical_order(df["grp3"]) w, n = 0.8, len(levels[0]) * len(levels[1]) @@ -266,7 +266,7 @@ class TestStack(MoveFixtures): def test_basic(self, toy_df): groupby = GroupBy(["color", "group"]) - res = Stack()(toy_df, groupby, "x") + res = Stack()(toy_df, groupby, "x", {}) assert_array_equal(res["x"], [0, 0, 1]) assert_array_equal(res["y"], [1, 3, 3]) @@ -275,7 +275,7 @@ def test_basic(self, toy_df): def test_faceted(self, toy_df_facets): groupby = GroupBy(["color", "group"]) - res = Stack()(toy_df_facets, groupby, "x") + res = Stack()(toy_df_facets, groupby, "x", {}) assert_array_equal(res["x"], [0, 0, 1, 0, 1, 2]) assert_array_equal(res["y"], [1, 3, 3, 1, 2, 3]) @@ -288,7 +288,7 @@ def test_misssing_data(self, toy_df): "y": [2, np.nan, 1], "baseline": [0, 0, 0], }) - res = Stack()(df, None, "x") + res = Stack()(df, None, "x", {}) assert_array_equal(res["y"], [2, np.nan, 3]) assert_array_equal(res["baseline"], [0, np.nan, 2]) @@ -299,7 +299,7 @@ def test_baseline_homogeneity_check(self, toy_df): move = Stack() err = "Stack move cannot be used when baselines" with pytest.raises(RuntimeError, match=err): - move(toy_df, groupby, "x") + move(toy_df, groupby, "x", {}) class TestShift(MoveFixtures): @@ -307,7 +307,7 @@ class TestShift(MoveFixtures): def test_default(self, toy_df): gb = GroupBy(["color", "group"]) - res = Shift()(toy_df, gb, "x") + res = Shift()(toy_df, gb, "x", {}) for col in toy_df: assert_series_equal(toy_df[col], res[col]) @@ -315,7 +315,7 @@ def test_default(self, toy_df): def test_moves(self, toy_df, x, y): gb = GroupBy(["color", "group"]) - res = Shift(x=x, y=y)(toy_df, gb, "x") + res = Shift(x=x, y=y)(toy_df, gb, "x", {}) assert_array_equal(res["x"], toy_df["x"] + x) assert_array_equal(res["y"], toy_df["y"] + y) @@ -327,7 +327,7 @@ def test_default_no_groups(self, df, orient): other = {"x": "y", "y": "x"}[orient] gb = GroupBy(["null"]) - res = Norm()(df, gb, orient) + res = Norm()(df, gb, orient, {}) assert res[other].max() == pytest.approx(1) @pytest.mark.parametrize("orient", ["x", "y"]) @@ -335,24 +335,24 @@ def test_default_groups(self, df, orient): other = {"x": "y", "y": "x"}[orient] gb = GroupBy(["grp2"]) - res = Norm()(df, gb, orient) + res = Norm()(df, gb, orient, {}) for _, grp in res.groupby("grp2"): assert grp[other].max() == pytest.approx(1) def test_sum(self, df): gb = GroupBy(["null"]) - res = Norm("sum")(df, gb, "x") + res = Norm("sum")(df, gb, "x", {}) assert res["y"].sum() == pytest.approx(1) def test_where(self, df): gb = GroupBy(["null"]) - res = Norm(where="x == 2")(df, gb, "x") + res = Norm(where="x == 2")(df, gb, "x", {}) assert res.loc[res["x"] == 2, "y"].max() == pytest.approx(1) def test_percent(self, df): gb = GroupBy(["null"]) - res = Norm(percent=True)(df, gb, "x") + res = Norm(percent=True)(df, gb, "x", {}) assert res["y"].max() == pytest.approx(100) diff --git a/tests/_core/test_plot.py b/tests/_core/test_plot.py index 14c40672fa..d4c6b9a909 100644 --- a/tests/_core/test_plot.py +++ b/tests/_core/test_plot.py @@ -18,6 +18,7 @@ from seaborn._core.scales import Nominal, Continuous from seaborn._core.rules import categorical_order from seaborn._core.moves import Move, Shift, Dodge +from seaborn._stats.aggregation import Agg from seaborn._marks.base import Mark from seaborn._stats.base import Stat from seaborn.external.version import Version @@ -269,7 +270,7 @@ def __call__(self, data, groupby, orient, scales): return data class MockMoveTrackOrient(Move): - def __call__(self, data, groupby, orient): + def __call__(self, data, groupby, orient, scales): self.orient_at_call = orient return data @@ -313,9 +314,20 @@ def test_type_checks(self): class MockStat(Stat): pass - with pytest.raises(TypeError, match="stat must be a Stat instance"): + class MockMove(Move): + pass + + err = "Transforms must have at most one Stat type" + + with pytest.raises(TypeError, match=err): p.add(MockMark(), MockStat) + with pytest.raises(TypeError, match=err): + p.add(MockMark(), MockMove(), MockStat()) + + with pytest.raises(TypeError, match=err): + p.add(MockMark(), MockMark(), MockStat()) + class TestScaling: @@ -878,36 +890,70 @@ def test_theme_error(self): with pytest.raises(TypeError, match=r"theme\(\) takes 1 positional"): p.theme("arg1", "arg2") + def test_stat(self, long_df): + + orig_df = long_df.copy(deep=True) + + m = MockMark() + Plot(long_df, x="a", y="z").add(m, Agg()).plot() + + expected = long_df.groupby("a", sort=False)["z"].mean().reset_index(drop=True) + assert_vector_equal(m.passed_data[0]["y"], expected) + + assert_frame_equal(long_df, orig_df) # Test data was not mutated + def test_move(self, long_df): orig_df = long_df.copy(deep=True) m = MockMark() - Plot(long_df, x="z", y="z").add(m, move=Shift(x=1)).plot() + Plot(long_df, x="z", y="z").add(m, Shift(x=1)).plot() assert_vector_equal(m.passed_data[0]["x"], long_df["z"] + 1) assert_vector_equal(m.passed_data[0]["y"], long_df["z"]) assert_frame_equal(long_df, orig_df) # Test data was not mutated + def test_stat_and_move(self, long_df): + + m = MockMark() + Plot(long_df, x="a", y="z").add(m, Agg(), Shift(y=1)).plot() + + expected = long_df.groupby("a", sort=False)["z"].mean().reset_index(drop=True) + assert_vector_equal(m.passed_data[0]["y"], expected + 1) + + def test_stat_log_scale(self, long_df): + + orig_df = long_df.copy(deep=True) + + m = MockMark() + Plot(long_df, x="a", y="z").add(m, Agg()).scale(y="log").plot() + + x = long_df["a"] + y = np.log10(long_df["z"]) + expected = y.groupby(x, sort=False).mean().reset_index(drop=True) + assert_vector_equal(m.passed_data[0]["y"], 10 ** expected) + + assert_frame_equal(long_df, orig_df) # Test data was not mutated + def test_move_log_scale(self, long_df): m = MockMark() Plot( long_df, x="z", y="z" - ).scale(x="log").add(m, move=Shift(x=-1)).plot() + ).scale(x="log").add(m, Shift(x=-1)).plot() assert_vector_equal(m.passed_data[0]["x"], long_df["z"] / 10) def test_multi_move(self, long_df): m = MockMark() move_stack = [Shift(1), Shift(2)] - Plot(long_df, x="x", y="y").add(m, move=move_stack).plot() + Plot(long_df, x="x", y="y").add(m, *move_stack).plot() assert_vector_equal(m.passed_data[0]["x"], long_df["x"] + 3) def test_multi_move_with_pairing(self, long_df): m = MockMark() move_stack = [Shift(1), Shift(2)] - Plot(long_df, x="x").pair(y=["y", "z"]).add(m, move=move_stack).plot() + Plot(long_df, x="x").pair(y=["y", "z"]).add(m, *move_stack).plot() for frame in m.passed_data: assert_vector_equal(frame["x"], long_df["x"] + 3) @@ -919,7 +965,7 @@ def test_move_with_range(self, long_df): ymax = np.arange(6) * 2 m = MockMark() - Plot(x=x, group=group, ymin=ymin, ymax=ymax).add(m, move=Dodge()).plot() + Plot(x=x, group=group, ymin=ymin, ymax=ymax).add(m, Dodge()).plot() signs = [-1, +1] for i, df in m.passed_data[0].groupby("group"): @@ -1515,14 +1561,14 @@ def test_orient_inference(self, long_df): orient_list = [] class CaptureOrientMove(Move): - def __call__(self, data, groupby, orient): + def __call__(self, data, groupby, orient, scales): orient_list.append(orient) return data ( Plot(long_df, x="x") .pair(y=["b", "z"]) - .add(MockMark(), move=CaptureOrientMove()) + .add(MockMark(), CaptureOrientMove()) .plot() )