Spaces:
Running
Running
File size: 2,671 Bytes
911c613 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
# ------------------------------------------------------------ #
#
# file : metrics.py
# author : CM
# Metrics for evaluation
#
# ------------------------------------------------------------ #
from keras import backend as K
# dice coefficient
'''
def dice_coef(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
'''
# the deeplab version of dice coefficient
def dice_coef(y_true, y_pred):
smooth = 0.00001
y_true_f = K.flatten(y_true)
y_pred = K.cast(y_pred, 'float32')
y_pred_f = K.cast(K.greater(K.flatten(y_pred), 0.5), 'float32')
intersection = y_true_f * y_pred_f
score = (2. * K.sum(intersection) + smooth) / ((K.sum(y_true_f) + K.sum(y_pred_f)) + smooth)
return score
# Recall (true positive rate)
def recall(truth, prediction):
TP = K.sum(K.round(K.clip(truth * prediction, 0, 1)))
P = K.sum(K.round(K.clip(truth, 0, 1)))
return TP / (P + K.epsilon())
# Specificity (true negative rate)
def specificity(truth, prediction):
TN = K.sum(K.round(K.clip((1-truth) * (1-prediction), 0, 1)))
N = K.sum(K.round(K.clip(1-truth, 0, 1)))
return TN / (N + K.epsilon())
# Precision (positive prediction value)
def precision(truth, prediction):
TP = K.sum(K.round(K.clip(truth * prediction, 0, 1)))
FP = K.sum(K.round(K.clip((1-truth) * prediction, 0, 1)))
return TP / (TP + FP + K.epsilon())
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
|