Source code for pipeline.utils.modelutils.metrics

"""Module to compute metrics to evaluate the classification performance.
"""
import typing
import torch


[docs]def compute_efficiency_purity( n_true_positives: int, n_truths: int, n_positives: int, ): eff = n_true_positives / n_truths if n_truths != 0.0 else 1.0 pur = n_true_positives / n_positives if n_positives != 0.0 else 1.0 return eff, pur
[docs]def compute_classification_efficiency_purity( predictions: torch.Tensor, truths: torch.Tensor, ) -> typing.Tuple[float, float]: """Compute the efficiency and purity of predictions. Args: predictions: tensor of predictions indicating whether each example is genuine (``True``) or fake (``False``) truths: what the ``predictions`` should be to be exact Returns: efficiency and purity of the predictions. """ n_positives = predictions.sum().cpu().numpy() n_truths = truths.sum().cpu().numpy() n_true_positives = (truths.bool() & predictions).sum().cpu().numpy() return compute_efficiency_purity( n_true_positives=n_true_positives, n_truths=n_truths, n_positives=n_positives, )