-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest_heatmap_lucie.py
204 lines (145 loc) · 7.33 KB
/
test_heatmap_lucie.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
# -*- coding: utf-8 -*-
"""test_heatmap_lucie.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1g_KwiPQK5pd214cUTmdySUhelfCAdr4p
# Test des implémentations de l'EDA : Heatmaps
## A Detailed Look At CNN-based Approaches In Facial Landmark Detection
"""
!git clone https://github.com/chihfanhsu/fl_detection.git
import tensorflow_datasets as tfds
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cv2
# Check TF version & make sure it is running on GPU
print(tf.__version__)
print(tf.test.gpu_device_name())
"""### Get AFLW2000"""
ds, info = tfds.load('aflw2k3d', with_info = True, split=['train[:90%]', 'train[90%:]'])
ds_train = ds[0] # 90% (1800 images)
ds_test = ds[1]
len(ds_train), len(ds_test)
ds_train
img_size = 224
"""### HEATMAPS"""
def generate_heatmap(heatmap_size, center_point, sigma):
def _generate_gaussian_map(sigma):
"""Generate gaussian distribution with center value equals to 1."""
heat_range = 2 * sigma * 3 + 1
xs = np.arange(0, heat_range, 1, np.float32)
ys = xs[:, np.newaxis]
x_core = y_core = heat_range // 2
gaussian = np.exp(-((xs - x_core) ** 2 + (ys - y_core)
** 2) / (2 * sigma ** 2))
return gaussian
# Check that any part of the gaussian is in-bounds
map_height, map_width = heatmap_size
x, y = int(center_point[0]), int(center_point[1])
radius = sigma * 3
x0, y0 = x - radius, y - radius
x1, y1 = x + radius + 1, y + radius + 1
# If the distribution is out of the map, return an empty map.
if (x0 >= map_width or y0 >= map_height or x1 < 0 or y1 < 0):
return np.zeros(heatmap_size)
# Generate a Gaussian map.
gaussian = _generate_gaussian_map(sigma)
# Get the intersection area of the Gaussian map.
x_gauss = max(0, -x0), min(x1, map_width) - x0
y_gauss = max(0, -y0), min(y1, map_height) - y0
gaussian = gaussian[y_gauss[0]: y_gauss[1], x_gauss[0]: x_gauss[1]]
# Pad the Gaussian with zeros to get the heatmap.
pad_width = np.max(
[[0, 0, 0, 0], [y0, map_height-y1, x0, map_width-x1, ]], axis=0).reshape([2, 2])
heatmap = np.pad(gaussian, pad_width, mode='constant')
return heatmap
def generate_heatmaps(norm_marks, map_size=(64, 64), sigma=3):
"""Generate heatmaps for all the marks."""
maps = []
width, height = map_size
for norm_mark in norm_marks:
x = width * norm_mark[0]
y = height * norm_mark[1]
heatmap = generate_heatmap(map_size, (x, y), sigma)
maps.append(heatmap)
maps = np.array(maps,dtype=np.float32)
return np.einsum("kij->ijk",maps)
def map_gaussian(landmarks):
heatmaps = generate_heatmaps(landmarks, (img_size, img_size))
return heatmaps
def tf_map_gaussian(ex):
return tf.py_function(map_gaussian, [ex], tf.float32)
#for image, heatmap in ds_train.take(2):
# plt.imshow(np.sum(heatmap.numpy(), axis = 0))
resize_and_rescale = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.Resizing(img_size,img_size),
tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
])
ds_train = ds_train.map(lambda x: (resize_and_rescale(x['image']), tf.reshape(tf_map_gaussian(x['landmarks_68_3d_xy_normalized']), (img_size,img_size,68))), num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(100)
ds_train = ds_train.batch(16)
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_train
for image,landmarks in ds_train.take(1):
training = landmarks
plt.imshow(training[0,:,:,9])
ds_test = ds_test.map(lambda x: (resize_and_rescale(x['image']), tf.reshape(tf_map_gaussian(x['landmarks_68_3d_xy_normalized']), (img_size,img_size,68))), num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.batch(16)
ds_test = ds_test.cache()
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
"""### Modèle"""
#Helper function for building model
def conv_block(x, nconvs, n_filters, block_name, wd=None):
for i in range(nconvs):
x = tf.keras.layers.Conv2D(n_filters, kernel_size=(3, 3), strides=1, padding='same', activation='relu',
kernel_regularizer=wd, name=block_name + "_conv" + str(i + 1))(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name=block_name + "_pool")(x)
return x
#Represents one stage of the model
def stages(x, stage_num, num_keypoints = 68):
#Block 1
x = conv_block(x, nconvs=2, n_filters=64, block_name="block1_stage{}".format(stage_num))
#Block 2
x = conv_block(x, nconvs=2, n_filters=128, block_name="block2_stage{}".format(stage_num))
#Block 3
pool3 = conv_block(x, nconvs=3, n_filters=256, block_name="block3_stage{}".format(stage_num))
#Block 4
pool4 = conv_block(pool3, nconvs=3, n_filters=512, block_name="block4_stage{}".format(stage_num))
#Block 5
x = conv_block(pool4, nconvs=3, n_filters=512, block_name="block5_stage{}".format(stage_num))
#Convolution 6
x = tf.keras.layers.Conv2D(4096, kernel_size=(1, 1), strides=1, padding="same", activation="relu", name="conv6_stage{}".format(stage_num))(x)
#Convolution 7
x = tf.keras.layers.Conv2D(68, kernel_size=(1, 1), strides=1, padding="same", activation="relu", name="conv7_stage{}".format(stage_num))(x)
#upsampling
preds_pool3 = tf.keras.layers.Conv2D(68, kernel_size=(1, 1), strides=1, padding="same", name="preds_pool3_stage{}".format(stage_num))(pool3)
preds_pool4 = tf.keras.layers.Conv2D(68, kernel_size=(1, 1), strides=1, padding="same", name="preds_pool4_stage{}".format(stage_num))(pool4)
up_pool4 = tf.keras.layers.Conv2DTranspose(filters=68, kernel_size=2, strides=2, activation='relu', name="ConvT_pool4_stage{}".format(stage_num))(preds_pool4)
up_conv7 = tf.keras.layers.Conv2DTranspose(filters=68, kernel_size=4, strides=4, activation='relu', name="ConvT_conv7_stage{}".format(stage_num))(x)
fusion = tf.keras.layers.add([preds_pool3, up_pool4, up_conv7])
heatmaps = tf.keras.layers.Conv2DTranspose(filters=68, kernel_size=8, strides=8, activation='relu', name="convT_fusion_stage{}".format(stage_num))(fusion)
heatmaps = tf.keras.layers.Conv2D(num_keypoints, kernel_size=(1, 1), strides=1, padding="same", activation="linear", name="output_stage{}".format(stage_num))(heatmaps)
return heatmaps
def build_model(input_shape):
outputs = []
img = tf.keras.Input(shape=input_shape, name="Input_stage")
### Stage 1 ###
heatmaps1 = stages(img, 1)
outputs.append(heatmaps1)
### Stage 2 ###
x = tf.keras.layers.concatenate([img, heatmaps1])
heatmaps2 = stages(x, 2)
outputs.append(heatmaps2)
model = tf.keras.Model(inputs=img, outputs=outputs, name="FCN_Final")
return model
model = build_model((img_size,img_size,3))
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.mean_squared_error
model.compile(optimizer, loss_object)
model.summary()
history = model.fit(ds_train, epochs=40, validation_data=ds_test)
import pandas as pd
pd.DataFrame.from_dict(history.history).to_csv('history_lucie.csv',index=False)
model.save_weights('model_heatmaps_lucie.h5')