Source code for pipeline.Evaluation.reporting

import logging

import montetracko as mt
import montetracko.lhcb as mtb
from utils.commonutils.config import cdirs


[docs]def report_evaluation( trackEvaluator: mt.TrackEvaluator, allen_report: bool = True, table_report: bool = True, output_path: str | None = None, detector: str | None = None, ) -> str | None: """Perform the evaluation and produce reports. Args: trackEvaluator: :py:class:`montetracko.TrackEvaluator` object, output of the matching allen_report: whether to generate the Allen report table_report: whether to generate the table reports output_path: Output path where to save the report """ if detector is None: detector = cdirs.detectors[0] if allen_report or table_report: list_reports = [] if allen_report: allen_report_str = trackEvaluator.report( reporter=mt.AllenReporter(auto_numbering=detector == "velo"), categories=mtb.category.detector_to_extended_categories[detector], ) list_reports.append(allen_report_str) if table_report: table_report_str = trackEvaluator.report( reporter=mt.TabReporter( [ "n_particles", "efficiency", "efficiency_per_event", "clone_rate", "hit_purity_per_candidate", "hit_efficiency_per_candidate", ], mode="markdown", # tablefmt="grid", ), categories=mtb.category.detector_to_table_categories[detector], ) list_reports.append(table_report_str) table_report_global = trackEvaluator.report( reporter=mt.TabReporter( metric_names=["n_ghosts", "n_tracks", "ghost_rate"], mode="markdown", # tablefmt="grid", ), ) list_reports.append(table_report_global) total_report = "\n\n".join(list_reports) print(total_report) if output_path is not None: with open(output_path, "w") as report_file: report_file.write(total_report) logging.info(f"Report was saved in {output_path}") return total_report