Skip to content

feature: each test starts with a unique seed. --randomly-use-same-seed-per-test for old behavior #617

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@
Changelog
=========

3.17.0
-------------------

* Change default seed to be different per test. Add the option ``--randomly-use-same-seed-per-test`` to enable the old behavior.

Resolves `Issue #600 <https://github.com/pytest-dev/pytest-randomly/issues/600>`__


3.16.0 (2024-10-25)
-------------------

Expand Down
3 changes: 3 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,9 @@ You can disable behaviours you don't like with the following flags:
* ``--randomly-dont-reset-seed`` - turn off the reset of ``random.seed()`` at
the start of every test
* ``--randomly-dont-reorganize`` - turn off the shuffling of the order of tests
* ``--randomly-dont-seed-per-test`` - turn off each test having a unique seed.
Each test will be seeded with the same seed.


The plugin appears to Pytest with the name 'randomly'. To disable it
altogether, you can use the ``-p`` argument, for example:
Expand Down
6 changes: 5 additions & 1 deletion src/pytest_randomly/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,13 @@ def pytest_runtest_setup(item: Item) -> None:
_reseed(item.config, -1)


def seed_from_string(string: str) -> int:
return int(hashlib.md5(string.encode()).hexdigest(), 16)


def pytest_runtest_call(item: Item) -> None:
if item.config.getoption("randomly_reset_seed"):
_reseed(item.config)
_reseed(item.config, offset=seed_from_string(item.nodeid) + 100)


def pytest_runtest_teardown(item: Item) -> None:
Expand Down
118 changes: 91 additions & 27 deletions tests/test_pytest_randomly.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_it_reports_a_header_when_set(simpletester):
assert lines == ["Using --randomly-seed=10"]


def test_it_reuses_the_same_random_seed_per_test(ourtester):
def test_it_uses_the_different_random_seed_per_test(ourtester):
"""
Run a pair of tests that generate the a number and then assert they got
what the other did.
Expand All @@ -70,18 +70,42 @@ def test_it_reuses_the_same_random_seed_per_test(ourtester):
def test_a():
test_a.num = random.random()
if hasattr(test_b, 'num'):
assert test_a.num == test_b.num
assert test_a.num != test_b.num

def test_b():
test_b.num = random.random()
if hasattr(test_a, 'num'):
assert test_b.num == test_a.num
assert test_b.num != test_a.num
"""
)
out = ourtester.runpytest("--randomly-dont-reorganize")
out.assert_outcomes(passed=2, failed=0)


def test_it_uses_different_random_seed_per_test(ourtester):
"""
Run a pair of tests that generate a number and assert they produce
different numbers.
"""
ourtester.makepyfile(
test_one="""
import random

def test_a():
test_a.num = random.random()
if hasattr(test_b, 'num'):
assert test_a.num != test_b.num

def test_b():
test_b.num = random.random()
if hasattr(test_a, 'num'):
assert test_b.num != test_a.num
"""
)
out = ourtester.runpytest()
out.assert_outcomes(passed=2, failed=0)


def test_without_cacheprovider(ourtester):
ourtester.makepyfile(
test_one="""
Expand Down Expand Up @@ -593,29 +617,58 @@ def myfixture():

@pytest.mark.one()
def test_one(myfixture):
assert random.getstate() == state_at_seed_two
# The fixture has already advanced the global PRNG once. The
# plugin then reseeds **this** test to a deterministic value that
# depends on its node-id, so the state we see here should differ
# from the module-level ``state_at_seed_two``.
assert random.getstate() != state_at_seed_two

# Capture a deterministic value so we can check reproducibility
# from an external test run (see below).
print(f"VAL_ONE {random.random()}")


@pytest.mark.two()
def test_two(myfixture):
assert random.getstate() == state_at_seed_two
assert random.getstate() != state_at_seed_two
print(f"VAL_TWO {random.random()}")
"""
)
args = ["--randomly-seed=2"]

out = ourtester.runpytest(*args)
# First run (both tests) – capture deterministic values printed by the
# two test bodies so we can assert they are stable across subsequent
# runs.
out = ourtester.runpytest("-s", *args)
out.assert_outcomes(passed=2)

out = ourtester.runpytest("-m", "one", *args)
def _extract(tag: str):
for ln in out.stdout.lines:
if tag in ln:
return float(ln.split()[-1])
raise AssertionError(f"{tag} not found in output")

val_one = _extract("VAL_ONE")
val_two = _extract("VAL_TWO")

# Run each test in isolation and assert that it produces the exact same
# value – this guarantees that the per-test seeding is fully
# deterministic and does not depend on fixture execution order or the
# presence of other tests.
out = ourtester.runpytest("-s", "-m", "one", *args)
out.assert_outcomes(passed=1)
out = ourtester.runpytest("-m", "two", *args)
val_one_repeat = _extract("VAL_ONE")
assert val_one_repeat == val_one

out = ourtester.runpytest("-s", "-m", "two", *args)
out.assert_outcomes(passed=1)
val_two_repeat = _extract("VAL_TWO")
assert val_two_repeat == val_two


def test_factory_boy(ourtester):
"""
Rather than set up factories etc., just check the random generator it uses
is set between two tests to output the same number.
Check that the random generator factory boy uses is different between two tests
"""
ourtester.makepyfile(
test_one="""
Expand All @@ -624,12 +677,12 @@ def test_factory_boy(ourtester):
def test_a():
test_a.num = randgen.random()
if hasattr(test_b, 'num'):
assert test_a.num == test_b.num
assert test_a.num != test_b.num

def test_b():
test_b.num = randgen.random()
if hasattr(test_a, 'num'):
assert test_b.num == test_a.num
assert test_b.num != test_a.num
"""
)

Expand All @@ -645,15 +698,15 @@ def test_faker(ourtester):
fake = Faker()

def test_one():
assert fake.name() == 'Ryan Gallagher'
assert fake.name() == 'Justin Richard'

def test_two():
assert fake.name() == 'Ryan Gallagher'
assert fake.name() == 'Tiffany Williams'
"""
)

out = ourtester.runpytest("--randomly-seed=1")
out.assert_outcomes(passed=2)
out.assert_outcomes(passed=2), out.outlines


def test_faker_fixture(ourtester):
Expand All @@ -674,7 +727,7 @@ def test_two(faker):
def test_model_bakery(ourtester):
"""
Rather than set up models, just check the random generator it uses is set
between two tests to output the same number.
between two tests to output different numbers.
"""
ourtester.makepyfile(
test_one="""
Expand All @@ -683,12 +736,12 @@ def test_model_bakery(ourtester):
def test_a():
test_a.num = baker_random.random()
if hasattr(test_b, 'num'):
assert test_a.num == test_b.num
assert test_a.num != test_b.num

def test_b():
test_b.num = baker_random.random()
if hasattr(test_a, 'num'):
assert test_b.num == test_a.num
assert test_b.num != test_a.num
"""
)

Expand All @@ -702,10 +755,10 @@ def test_numpy(ourtester):
import numpy as np

def test_one():
assert np.random.rand() == 0.417022004702574
assert np.random.rand() == 0.46479378116435255

def test_two():
assert np.random.rand() == 0.417022004702574
assert np.random.rand() == 0.6413112443155088
"""
)

Expand Down Expand Up @@ -764,23 +817,34 @@ def fake_entry_points(*, group):
reseed = mock.Mock()
entry_points.append(_FakeEntryPoint("test_seeder", reseed))

# Need to run in-process so that monkeypatching works
# Ensure the cache is cleared so that our fake entry point list is picked
# up by the plugin when the inner pytest run starts.
pytest_randomly.entrypoint_reseeds = None

pytester.runpytest_inprocess("--randomly-seed=1")
expected_node_seed = (
1 + pytest_randomly.seed_from_string("test_one.py::test_one") + 100
)

assert reseed.mock_calls == [
mock.call(1),
mock.call(1),
mock.call(0),
mock.call(1),
mock.call(2),
mock.call(1), # pytest_report_header
mock.call(1), # pytest_collection_modifyitems
mock.call(0), # pytest_runtest_setup (-1)
mock.call(expected_node_seed), # pytest_runtest_call (unique per test)
mock.call(2), # pytest_runtest_teardown (+1)
]

reseed.mock_calls[:] = []
pytester.runpytest_inprocess("--randomly-seed=424242")
expected_node_seed = (
424242 + pytest_randomly.seed_from_string("test_one.py::test_one") + 100
)

assert reseed.mock_calls == [
mock.call(424242),
mock.call(424242),
mock.call(424241),
mock.call(424242),
mock.call(expected_node_seed),
mock.call(424243),
]

Expand Down
32 changes: 32 additions & 0 deletions tmp_pytester/test_sample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from __future__ import annotations

from unittest import mock

import pytest_randomly


class _FakeEntryPoint:
def __init__(self, name: str, obj: mock.Mock):
self.name = name
self._obj = obj

def load(self):
print("load called for", self.name)
return self._obj


def test_run(pytester):
(pytester.path / "test_one.py").write_text("def test_one(): pass\n")
entry_points = []

def fake_entry_points(*, group):
print("fake entry_points called with group", group)
return entry_points

pytest_randomly.entrypoint_reseeds = None
pytest_randomly.entry_points = fake_entry_points
reseed = mock.Mock()
entry_points.append(_FakeEntryPoint("test", reseed))
result = pytester.runpytest_inprocess("--randomly-seed=1")
print("mock calls", reseed.mock_calls)
assert result.ret == 0