Skip to content

Class peagen.plugins.evaluators.pytest_profiling.PytestProfilingEvaluator

peagen.plugins.evaluators.pytest_profiling.PytestProfilingEvaluator

PytestProfilingEvaluator(**_)

Bases: Evaluator

Fitness based on CPU time recorded by pytest-profiling.

Source code in peagen/plugins/evaluators/base.py
14
15
def __init__(self, **_: Any) -> None:
    self.last_result = None

last_result instance-attribute

last_result = None

run

run(workspace, bench_cmd, runs=1, **kw)
Source code in peagen/plugins/evaluators/pytest_profiling.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def run(self, workspace: Path, bench_cmd: str, runs: int = 1, **kw: Any) -> float:
    cmd_base = shlex.split(bench_cmd)
    scores: List[float] = []
    details: List[Dict[str, Any]] = []

    for _ in range(max(1, runs)):
        cmd = cmd_base + ["--profile", "--json-report", "-q"]
        subprocess.run(cmd, cwd=workspace, capture_output=True, text=True)

        prof_path = workspace / "prof" / "combined.prof"
        if not prof_path.exists():
            continue
        stats = pstats.Stats(str(prof_path))
        cpu_s = stats.total_tt
        scores.append(cpu_s)

        report_file = workspace / ".report.json"
        if report_file.exists():
            report_data = json.loads(report_file.read_text())
            summary = report_data.get("summary", {})
            details.append({"cpu_s": cpu_s, **summary})
            report_file.unlink()
        else:
            details.append({"cpu_s": cpu_s})

        prof_path.unlink()
        prof_path.parent.rmdir()

    if not scores:
        self.last_result = {"error": "profiling_failed"}
        return 0.0

    median_cpu = median(scores)
    self.last_result = {
        "runs": len(scores),
        "median_cpu_s": median_cpu,
        "runs_detail": details,
    }
    return -median_cpu