Spaces:
Running
Running
# ------------------------------------------------------------ # | |
# | |
# file : losses.py | |
# author : CM | |
# Loss function | |
# | |
# ------------------------------------------------------------ # | |
import keras.backend as K | |
def dice_coef(y_true, y_pred, smooth=1): | |
y_true_f = K.flatten(y_true) | |
y_pred_f = K.flatten(y_pred) | |
intersection = K.sum(y_true_f * y_pred_f) | |
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) | |
def dice_coef_loss(y_true, y_pred): | |
return -dice_coef(y_true, y_pred) | |
# Jaccard distance | |
def jaccard_distance_loss(y_true, y_pred, smooth=100): | |
""" | |
Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|) | |
= sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|)) | |
The jaccard distance loss is usefull for unbalanced datasets. This has been | |
shifted so it converges on 0 and is smoothed to avoid exploding or disapearing | |
gradient. | |
Ref: https://en.wikipedia.org/wiki/Jaccard_index | |
@url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96 | |
@author: wassname | |
""" | |
intersection = K.sum(K.abs(y_true * y_pred), axis=-1) | |
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1) | |
jac = (intersection + smooth) / (sum_ - intersection + smooth) | |
return (1 - jac) * smooth | |
def dice_coef_(y_true, y_pred, smooth=1): | |
""" | |
Dice = (2*|X & Y|)/ (|X|+ |Y|) | |
= 2*sum(|A*B|)/(sum(A^2)+sum(B^2)) | |
ref: https://arxiv.org/pdf/1606.04797v1.pdf | |
""" | |
intersection = K.sum(K.abs(y_true * y_pred), axis=-1) | |
return (2. * intersection + smooth) / (K.sum(K.square(y_true), -1) + K.sum(K.square(y_pred), -1) + smooth) | |
def dice_coef_loss_(y_true, y_pred): | |
return 1 - dice_coef_(y_true, y_pred) | |
''' | |
def dice_loss(y_true, y_pred, smooth=1e-6): | |
""" Loss function base on dice coefficient. | |
Parameters | |
---------- | |
y_true : keras tensor | |
tensor containing target mask. | |
y_pred : keras tensor | |
tensor containing predicted mask. | |
smooth : float | |
small real value used for avoiding division by zero error. | |
Returns | |
------- | |
keras tensor | |
tensor containing dice loss. | |
""" | |
y_true_f = K.flatten(y_true) | |
y_pred_f = K.flatten(y_pred) | |
intersection = K.sum(y_true_f * y_pred_f) | |
answer = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) | |
return -answer | |
''' | |
# the deeplab version of dice_loss | |
def dice_loss(y_true, y_pred): | |
smooth = 1. | |
y_true_f = K.flatten(y_true) | |
y_pred_f = K.flatten(y_pred) | |
intersection = y_true_f * y_pred_f | |
score = (2. * K.sum(intersection) + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) | |
return 1. - score | |
def tversky_loss(y_true, y_pred, alpha=0.3, beta=0.7, smooth=1e-10): | |
""" Tversky loss function. | |
Parameters | |
---------- | |
y_true : keras tensor | |
tensor containing target mask. | |
y_pred : keras tensor | |
tensor containing predicted mask. | |
alpha : float | |
real value, weight of '0' class. | |
beta : float | |
real value, weight of '1' class. | |
smooth : float | |
small real value used for avoiding division by zero error. | |
Returns | |
------- | |
keras tensor | |
tensor containing tversky loss. | |
""" | |
y_true = K.flatten(y_true) | |
y_pred = K.flatten(y_pred) | |
truepos = K.sum(y_true * y_pred) | |
fp_and_fn = alpha * K.sum(y_pred * (1 - y_true)) + beta * K.sum((1 - y_pred) * y_true) | |
answer = (truepos + smooth) / ((truepos + smooth) + fp_and_fn) | |
return -answer | |
def jaccard_coef_logloss(y_true, y_pred, smooth=1e-10): | |
""" Loss function based on jaccard coefficient. | |
Parameters | |
---------- | |
y_true : keras tensor | |
tensor containing target mask. | |
y_pred : keras tensor | |
tensor containing predicted mask. | |
smooth : float | |
small real value used for avoiding division by zero error. | |
Returns | |
------- | |
keras tensor | |
tensor containing negative logarithm of jaccard coefficient. | |
""" | |
y_true = K.flatten(y_true) | |
y_pred = K.flatten(y_pred) | |
truepos = K.sum(y_true * y_pred) | |
falsepos = K.sum(y_pred) - truepos | |
falseneg = K.sum(y_true) - truepos | |
jaccard = (truepos + smooth) / (smooth + truepos + falseneg + falsepos) | |
return -K.log(jaccard + smooth) | |