Source code for secml.ml.peval.metrics.c_metric_fnr_at_th

"""
.. module:: MetricFNRatTH
   :synopsis: Performance Metric: False Negative Rate @ ROC Threshold

.. moduleauthor:: Marco Melis <marco.melis@unica.it>

"""
from secml.array import CArray
from secml.ml.peval.metrics import CMetric
from secml.core.type_utils import is_list


[docs]class CMetricFNRatTH(CMetric): """Performance evaluation metric: False Negative Rate @ ROC Threshold. The metric uses: - y_true (true ground labels) - score (estimated target values) Parameters ---------- th : float or list ROC Threshold to use for computing False Negative Rate. Default 0. This can be a list of multiple values. Attributes ---------- class_type : 'fnr-at-th' Examples -------- >>> from secml.ml.peval.metrics import CMetricFNRatTH >>> from secml.array import CArray >>> peval = CMetricFNRatTH(th=1.7) >>> peval.performance_score(CArray([1, 1, 0, 0]), score=CArray([1.6, 2, 0.5, -1])) 0.5 """ __class_type = 'fnr-at-th' best_value = 1.0 def __init__(self, th=0.0): self.th = float(th) if is_list(th) is False else th def _performance_score(self, y_true, score, rep_idx=0): """Computes the False Negative Rate @ ROC Threshold. Parameters ---------- y_true : CArray Ground truth (true) labels or target scores. score : CArray Flat array with target scores for each pattern, can either be probability estimates of the positive class or confidence values. rep_idx : int, optional Index of the th value to use. Default 0. Returns ------- metric : float Returns metric value as float. """ th = self.th[rep_idx] if is_list(self.th) is True else self.th p = CArray(y_true == 1) # Positives return 1 - (float(CArray(score[p] - th >= 0).sum()) / p.sum())