-
Notifications
You must be signed in to change notification settings - Fork 0
/
main - 3.train classification model predict probability and generate saliency map.py
167 lines (124 loc) · 5.37 KB
/
main - 3.train classification model predict probability and generate saliency map.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import os
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from tests.tf_keras_vis.attentions_test import saliency
from tf_keras_vis.saliency import Saliency
from tf_keras_vis.utils import normalize
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
def score_function(inputs):
return tf.reduce_max(inputs, axis=1)
def loadImages(path, urls, target):
images = []
labels = []
for i in range(len(urls)):
img_path = os.path.join(path, urls[i])
img = cv2.imread(img_path)
#img = img * 255.0
#img = cv2.resize(img, (100, 100))
images.append(img)
labels.append(target)
images = np.asarray(images)
return images, labels
# Load and preprocess the data
covid_path = "H:/WFH/AI/data/patient"
covid_urls = os.listdir(covid_path)
covidImages, covidTargets = loadImages(covid_path, covid_urls, 1)
normal_path = "H:/WFH/AI/data/control"
normal_urls = os.listdir(normal_path)
normalImages, normalTargets = loadImages(normal_path, normal_urls, 0)
data = np.concatenate((covidImages, normalImages), axis=0)
targets = np.concatenate((covidTargets, normalTargets), axis=0)
x_train, x_test, y_train, y_test = train_test_split(data, targets, test_size=0.8)
#Build the model
model = Sequential([
Conv2D(32, 3, input_shape=(205, 101, 3), activation='relu'),
MaxPooling2D(),
Conv2D(16, 3, activation='relu'),
MaxPooling2D(),
Conv2D(16, 3, activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(256, activation='relu'),
Dense(1, activation='sigmoid')
])
print(len(x_test))
print(len(y_test))
# Compile and train the model
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(), metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=10, validation_data=(x_test, y_test))
# Save the trained model
model.save("H:/WFH/AI/data/trained_model.keras")
# Load the saved model
loaded_model = tf.keras.models.load_model("H:/WFH/AI/data/trained_model.keras")
# Perform model predictions
predictions = loaded_model.predict(x_test)
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Perform model predictions
predictions = loaded_model.predict(x_test)
# Extract the prediction probabilities
prediction_probabilities = predictions.squeeze() # Squeeze to remove extra dimensions
# Create a histogram of prediction probabilities
plt.hist(prediction_probabilities, bins=20, edgecolor='black')
plt.xlabel('Prediction Probabilities')
plt.ylabel('Frequency')
plt.title('Distribution of Prediction Probabilities')
plt.show()
# Convert probabilities to binary predictions (0 or 1)
binary_predictions = (predictions >= 0.5).astype(int)
# Calculate accuracy
accuracy = accuracy_score(y_test, binary_predictions)
# Print the accuracy percentage
accuracy_percentage = accuracy * 100
print(f"Accuracy: {accuracy_percentage:.2f}%")
# Define the file names of the selected samples
file_names = ['control_1.png', 'control_2.png', 'patient_11.png', 'patient_15.png']
# Create a custom color map with green for important regions and red for unimportant regions
cmap = plt.cm.RdYlGn_r
# Create a subplot for displaying the images and saliency maps
fig, ax = plt.subplots(4, 3, figsize=(16, 24))
# Generate and display the images and saliency maps
# Generate and display the images and saliency maps directly
for i, file_name in enumerate(file_names):
# Load and preprocess the input image
img_path = 'H:/WFH/AI/data/' + ('patient' if 'patient' in file_name else 'control') + '/'+file_name
input_image = cv2.imread(img_path)
input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
#input_image = cv2.resize(input_image, (100, 100)) / 255.0
input_image_preprocessed = np.expand_dims(input_image, axis=0)
# Create a Saliency instance (assuming you have already defined the score_function)
saliency = Saliency(loaded_model, model_modifier=None)
def score_function(inputs):
# Perform operations to compute the score
score = tf.reduce_max(inputs, axis=1)
return score
input_image_preprocessed = tf.cast(input_image_preprocessed, tf.float32)
# Generate the saliency map
saliency_map = saliency(score_function, input_image_preprocessed)
# Normalize the saliency map
saliency_map = normalize(saliency_map)
# Display the original image in the left panel
ax[i, 0].imshow(input_image)
ax[i, 0].axis('off')
ax[i, 0].set_title(file_name)
# Display the saliency map in the middle panel with custom color map
ax[i, 1].imshow(saliency_map[0], cmap=cmap)
ax[i, 1].axis('off')
ax[i, 1].set_title('Saliency Map')
# Display the saliency map in the right panel with transparency
ax[i, 2].imshow(saliency_map[0], cmap=cmap, alpha=0.5)
ax[i, 2].imshow(input_image, alpha=0.5)
ax[i, 2].axis('off')
ax[i, 2].set_title('Overlay')
# Adjust the spacing between subplots
plt.tight_layout()
# Save the plot as a JPEG file
plt.savefig('H:/WFH/AI/Classicses saliency_map_with_overlay.jpg', dpi=600, bbox_inches='tight', pad_inches=0)
# Show the plot
plt.show()