core.utils.metrics
View Source
def accuracy(tp, fp, fn, tn): """ Returns accuracy as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return (tp+tn)/(tp + fp + fn + tn) if (tp + fp + fn + tn) > 0 else -1 def precision(tp, fp, fn, tn): """ Returns precision as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return tp/(tp+fp) if (tp+fp) > 0 else -1 def recall(tp, fp, fn, tn): """ Returns recall as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return tp/(tp+fn) if (tp+fn) > 0 else -1 def f1(tp, fp, fn, tn): """ Returns f1-score as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return (2*tp)/(2*tp + fp + fn) if (2*tp + fp + fn) > 0 else -1 def calc_metrics(labels, predictions): """ Calculate the metrics accuracy, precision, recall, and f1-score. Args: labels (list): The list of the correct labels (int or bool) predictions (list): The list of the predicted labels (int or bool) Returns: dictionary containing calculates values """ tp, fp, fn, tn = 0, 0, 0, 0 for l,p in zip(labels, predictions): if l and p: tp += 1 elif l and not p: fn += 1 elif not l and p: fp += 1 else: tn += 1 return { 'accuracy': accuracy(tp, fp, fn, tn), 'precision': precision(tp, fp, fn, tn), 'recall': recall(tp, fp, fn, tn), 'f1': f1(tp, fp, fn, tn), 'num' : len(predictions), 'num_positive_labels' : labels.sum(), 'num_negative_labels' : ((labels - 1) * -1).sum(), 'num_positive_predictions' : predictions.sum(), 'num_negative_predictions' : ((predictions - 1) * -1).sum() } def _interval_similarity_or(start_i, end_i, start_j, end_j): if start_j <= start_i and end_j >= end_i: # [ j -- ( i == i ) -- j ] i_intersect = end_i - start_i + 1 elif start_i <= start_j and end_i >= end_j: # ( i -- [ j == j ] -- i ) i_intersect = end_j - start_j + 1 elif start_i <= start_j and end_i <= end_j and end_i >= start_j: # ( i -- [ j == i ) -- j ] i_intersect = end_i - start_j + 1 elif start_i >= start_j and end_i >= end_j and end_j >= start_i: # [ j -- ( i == j ] -- i ) i_intersect = end_j - start_i + 1 else: # no overlap i_intersect = 0 return i_intersect / (end_i - start_i + 1) def interval_similarity(start_i, end_i, start_j, end_j): """ Computes the similarity between two intervals (of natural numbers). Args: start_i (int): The start of interval i end_i (int): The end of interval i start_j (int): The start of interval j end_j (int): The end of interval j Returns: float in [0-1] S_{OR}([start_i, end_i], [start_j, end_j]) See [Novel similarity measure for interval-valued data based on overlapping ratio](https://ieeexplore.ieee.org/document/8015623) for the paper ([PDF](https://www.researchgate.net/publication/318311193_Novel_similarity_measure_for_interval-valued_data_based_on_overlapping_ratio)). Uses the [T_min Norm](https://en.wikipedia.org/wiki/T-norm#Prominent_examples) """ if start_i > end_i: start_i, end_i = end_i, start_i if start_j > end_j: start_j, end_j = end_j, start_j return min( _interval_similarity_or(start_i, end_i, start_j, end_j), _interval_similarity_or(start_j, end_j, start_i, end_i) )
View Source
def accuracy(tp, fp, fn, tn): """ Returns accuracy as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return (tp+tn)/(tp + fp + fn + tn) if (tp + fp + fn + tn) > 0 else -1
Returns accuracy as float given true-positives tp
, false-positives fp
,
false-negatives fn
, true-negatives tn
.
View Source
def precision(tp, fp, fn, tn): """ Returns precision as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return tp/(tp+fp) if (tp+fp) > 0 else -1
Returns precision as float given true-positives tp
, false-positives fp
,
false-negatives fn
, true-negatives tn
.
View Source
def recall(tp, fp, fn, tn): """ Returns recall as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return tp/(tp+fn) if (tp+fn) > 0 else -1
Returns recall as float given true-positives tp
, false-positives fp
,
false-negatives fn
, true-negatives tn
.
View Source
def f1(tp, fp, fn, tn): """ Returns f1-score as float given true-positives ``tp``, false-positives ``fp``, false-negatives ``fn``, true-negatives ``tn``. """ return (2*tp)/(2*tp + fp + fn) if (2*tp + fp + fn) > 0 else -1
Returns f1-score as float given true-positives tp
, false-positives fp
,
false-negatives fn
, true-negatives tn
.
View Source
def calc_metrics(labels, predictions): """ Calculate the metrics accuracy, precision, recall, and f1-score. Args: labels (list): The list of the correct labels (int or bool) predictions (list): The list of the predicted labels (int or bool) Returns: dictionary containing calculates values """ tp, fp, fn, tn = 0, 0, 0, 0 for l,p in zip(labels, predictions): if l and p: tp += 1 elif l and not p: fn += 1 elif not l and p: fp += 1 else: tn += 1 return { 'accuracy': accuracy(tp, fp, fn, tn), 'precision': precision(tp, fp, fn, tn), 'recall': recall(tp, fp, fn, tn), 'f1': f1(tp, fp, fn, tn), 'num' : len(predictions), 'num_positive_labels' : labels.sum(), 'num_negative_labels' : ((labels - 1) * -1).sum(), 'num_positive_predictions' : predictions.sum(), 'num_negative_predictions' : ((predictions - 1) * -1).sum() }
Calculate the metrics accuracy, precision, recall, and f1-score.
Args
- labels (list): The list of the correct labels (int or bool)
- predictions (list): The list of the predicted labels (int or bool)
Returns
dictionary containing calculates values
View Source
def interval_similarity(start_i, end_i, start_j, end_j): """ Computes the similarity between two intervals (of natural numbers). Args: start_i (int): The start of interval i end_i (int): The end of interval i start_j (int): The start of interval j end_j (int): The end of interval j Returns: float in [0-1] S_{OR}([start_i, end_i], [start_j, end_j]) See [Novel similarity measure for interval-valued data based on overlapping ratio](https://ieeexplore.ieee.org/document/8015623) for the paper ([PDF](https://www.researchgate.net/publication/318311193_Novel_similarity_measure_for_interval-valued_data_based_on_overlapping_ratio)). Uses the [T_min Norm](https://en.wikipedia.org/wiki/T-norm#Prominent_examples) """ if start_i > end_i: start_i, end_i = end_i, start_i if start_j > end_j: start_j, end_j = end_j, start_j return min( _interval_similarity_or(start_i, end_i, start_j, end_j), _interval_similarity_or(start_j, end_j, start_i, end_i) )
Computes the similarity between two intervals (of natural numbers).
Args
- start_i (int): The start of interval i
- end_i (int): The end of interval i
- start_j (int): The start of interval j
- end_j (int): The end of interval j
Returns
float in [0-1]
S_{OR}([start_i, end_i], [start_j, end_j])
See Novel similarity measure for interval-valued data based on overlapping ratio for the paper (PDF). Uses the T_min Norm