-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodelUnetVGG19.py
98 lines (80 loc) · 3.32 KB
/
modelUnetVGG19.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import numpy as np
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
from tensorflow.keras import backend as K
import tensorflow as tf
from tensorflow.keras.applications import VGG19
from tensorflow.keras.losses import *
def tversky(y_true, y_pred):
smooth = 1e-15
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos)
false_neg = K.sum(y_true_pos * (1-y_pred_pos))
false_pos = K.sum((1-y_true_pos)*y_pred_pos)
alpha = 0.7
return (true_pos + smooth)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + smooth)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true,y_pred)
def focal_tversky(y_true,y_pred):
pt_1 = tversky(y_true, y_pred)
gamma = 0.75
return K.pow((1-pt_1), gamma)
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coef(y_true, y_pred)
return loss
def log_cosh_dice_loss(y_true, y_pred):
x = dice_loss(y_true, y_pred)
return tf.math.log((tf.exp(x) + tf.exp(-x)) / 2.0)
#https://github.com/nikhilroxtomar/Polyp-Segmentation-using-UNET-in-TensorFlow-2.0/blob/master/train.py
def iou(y_true, y_pred):
def f(y_true, y_pred):
intersection = (y_true * y_pred).sum()
union = y_true.sum() + y_pred.sum() - intersection
x = (intersection + 1e-15) / (union + 1e-15)
x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)
def conv_block(input, num_filters):
x = Conv2D(num_filters, 3, padding="same")(input)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def decoder_block(input, skip_features, num_filters):
x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
def build_vgg19_unet():
""" Input """
inputs = Input(shape=(960, 480, 3))
""" Pre-trained VGG19 Model """
vgg19 = VGG19(include_top=False, weights="imagenet", input_tensor=inputs)
""" Encoder """
s1 = vgg19.get_layer("block1_conv2").output ## (240 x 240)
s2 = vgg19.get_layer("block2_conv2").output ## (120 x 120)
s3 = vgg19.get_layer("block3_conv4").output ## (60 x 60)
s4 = vgg19.get_layer("block4_conv4").output ## (30 x 30)
""" Bridge """
b1 = vgg19.get_layer("block5_conv4").output ## (15 x 15)
""" Decoder """
d1 = decoder_block(b1, s4, 512) ## (30 x 30)
d2 = decoder_block(d1, s3, 256) ## (60 x 60)
d3 = decoder_block(d2, s2, 128) ## (120 x 120)
d4 = decoder_block(d3, s1, 64) ## (240 x 240)
""" Output """
outputs = Conv2D(4, 1, padding="same", activation="softmax")(d4)
model = Model(inputs, outputs, name="VGG19_U-Net")
return model