Skip to content

Class peagen.plugins.evaluators.pytest_perf_regression.PytestPerfRegressionEvaluator

peagen.plugins.evaluators.pytest_perf_regression.PytestPerfRegressionEvaluator

PytestPerfRegressionEvaluator(baseline)

Bases: Evaluator

Measure speedup versus a baseline commit using pytest-perf.

Source code in peagen/plugins/evaluators/pytest_perf_regression.py
23
24
25
def __init__(self, baseline: str) -> None:
    super().__init__()
    self.baseline = baseline

baseline instance-attribute

baseline = baseline

last_result instance-attribute

last_result = None

run

run(workspace, bench_cmd, runs=1, **kw)
Source code in peagen/plugins/evaluators/pytest_perf_regression.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def run(self, workspace: Path, bench_cmd: str, runs: int = 1, **kw: Any) -> float:
    cmd = [
        "pytest",
        "--json-report",
        "--control",
        self.baseline,
    ]
    if bench_cmd:
        cmd.extend(bench_cmd.split())

    subprocess.run(cmd, cwd=workspace, check=False)

    report_path = workspace / ".report.json"
    speedup = 0.0
    if report_path.exists():
        data = json.loads(report_path.read_text())
        perf = data.get("perf", {})
        speedup = float(perf.get("speedup", 0.0))
        report_path.unlink()

    self.last_result = {"baseline": self.baseline, "speedup": speedup}
    return speedup