# Import packages
%matplotlib inline
from PIL import Image
import numpy as np
import os
import re
from skimage.color import gray2rgb
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
!pip install tensorflow
!pip install keras
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, GaussianNoise, BatchNormalization, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D
from keras import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing import image
from keras.models import Model
from keras import backend as K
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
!pip install git+https://github.com/raghakot/keras-vis.git --upgrade
from vis.visualization import visualize_cam, visualize_saliency, overlay
from keras import activations
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import zipfile
from keras.models import model_from_json
import matplotlib as mpl
Requirement already satisfied: tensorflow in c:\programdata\anaconda3\lib\site-packages (1.12.0) Requirement already satisfied: protobuf>=3.6.1 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (3.6.1) Requirement already satisfied: keras-preprocessing>=1.0.5 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.0.5) Requirement already satisfied: termcolor>=1.1.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.1.0) Requirement already satisfied: six>=1.10.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.11.0) Requirement already satisfied: numpy>=1.13.3 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.14.3) Requirement already satisfied: grpcio>=1.8.6 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.18.0) Requirement already satisfied: absl-py>=0.1.6 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.7.0) Requirement already satisfied: gast>=0.2.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.2.2) Requirement already satisfied: keras-applications>=1.0.6 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.0.6) Requirement already satisfied: wheel>=0.26 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.32.3) Requirement already satisfied: tensorboard<1.13.0,>=1.12.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.12.2) Requirement already satisfied: astor>=0.6.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.7.1) Requirement already satisfied: setuptools in c:\programdata\anaconda3\lib\site-packages (from protobuf>=3.6.1->tensorflow) (40.6.3) Requirement already satisfied: h5py in c:\programdata\anaconda3\lib\site-packages (from keras-applications>=1.0.6->tensorflow) (2.7.1) Requirement already satisfied: werkzeug>=0.11.10 in c:\programdata\anaconda3\lib\site-packages (from tensorboard<1.13.0,>=1.12.0->tensorflow) (0.14.1) Requirement already satisfied: markdown>=2.6.8 in c:\programdata\anaconda3\lib\site-packages (from tensorboard<1.13.0,>=1.12.0->tensorflow) (3.0.1)
You are using pip version 19.0.1, however version 19.0.3 is available. You should consider upgrading via the 'python -m pip install --upgrade pip' command.
Requirement already satisfied: keras in c:\programdata\anaconda3\lib\site-packages (2.2.4) Requirement already satisfied: keras-preprocessing>=1.0.5 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.0.5) Requirement already satisfied: h5py in c:\programdata\anaconda3\lib\site-packages (from keras) (2.7.1) Requirement already satisfied: scipy>=0.14 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.1.0) Requirement already satisfied: keras-applications>=1.0.6 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.0.6) Requirement already satisfied: pyyaml in c:\programdata\anaconda3\lib\site-packages (from keras) (3.12) Requirement already satisfied: six>=1.9.0 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.11.0) Requirement already satisfied: numpy>=1.9.1 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.14.3)
You are using pip version 19.0.1, however version 19.0.3 is available. You should consider upgrading via the 'python -m pip install --upgrade pip' command. C:\ProgramData\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.
Collecting git+https://github.com/raghakot/keras-vis.git Cloning https://github.com/raghakot/keras-vis.git to c:\users\tempch~1.022\appdata\local\temp\pip-req-build-3dnuw4fj Requirement already satisfied, skipping upgrade: keras in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (2.2.4) Requirement already satisfied, skipping upgrade: six in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (1.11.0) Requirement already satisfied, skipping upgrade: scikit-image in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (0.13.1) Requirement already satisfied, skipping upgrade: matplotlib in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (2.2.2) Requirement already satisfied, skipping upgrade: h5py in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (2.7.1) Requirement already satisfied, skipping upgrade: pyyaml in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (3.12) Requirement already satisfied, skipping upgrade: keras-applications>=1.0.6 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.0.6) Requirement already satisfied, skipping upgrade: numpy>=1.9.1 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.14.3) Requirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.0.5) Requirement already satisfied, skipping upgrade: scipy>=0.14 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.1.0) Requirement already satisfied, skipping upgrade: networkx>=1.8 in c:\programdata\anaconda3\lib\site-packages (from scikit-image->keras-vis==0.4.1) (2.1) Requirement already satisfied, skipping upgrade: pillow>=2.1.0 in c:\programdata\anaconda3\lib\site-packages (from scikit-image->keras-vis==0.4.1) (5.1.0) Requirement already satisfied, skipping upgrade: PyWavelets>=0.4.0 in c:\programdata\anaconda3\lib\site-packages (from scikit-image->keras-vis==0.4.1) (0.5.2) Requirement already satisfied, skipping upgrade: cycler>=0.10 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (0.10.0) Requirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (2.2.0) Requirement already satisfied, skipping upgrade: python-dateutil>=2.1 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (2.7.3) Requirement already satisfied, skipping upgrade: pytz in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (2018.4) Requirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (1.0.1) Requirement already satisfied, skipping upgrade: decorator>=4.1.0 in c:\programdata\anaconda3\lib\site-packages (from networkx>=1.8->scikit-image->keras-vis==0.4.1) (4.3.0) Requirement already satisfied, skipping upgrade: setuptools in c:\programdata\anaconda3\lib\site-packages (from kiwisolver>=1.0.1->matplotlib->keras-vis==0.4.1) (40.6.3) Building wheels for collected packages: keras-vis Building wheel for keras-vis (setup.py): started Building wheel for keras-vis (setup.py): finished with status 'done' Stored in directory: C:\Users\TEMPCH~1.022\AppData\Local\Temp\pip-ephem-wheel-cache-x0m_d12t\wheels\c5\ae\e7\b34d1cb48b1898f606a5cce08ebc9521fa0588f37f1e590d9f Successfully built keras-vis Installing collected packages: keras-vis Found existing installation: keras-vis 0.4.1 Uninstalling keras-vis-0.4.1: Successfully uninstalled keras-vis-0.4.1 Successfully installed keras-vis-0.4.1
You are using pip version 19.0.1, however version 19.0.3 is available. You should consider upgrading via the 'python -m pip install --upgrade pip' command.
These images come from a 12 year-old male with TSC. In total, there are 44 images consisting of 30 consecutive axial T2 MRI slices and 14 consecutive axial FLAIR MRI slices.
# Set the figure size
mpl.rcParams['figure.figsize'] = (16,10)
# Unzip files
with zipfile.ZipFile("TestCaseVIT2.zip","r") as zip_ref:
zip_ref.extractall()
with zipfile.ZipFile("TestCaseVIFLAIR.zip","r") as zip_ref:
zip_ref.extractall()
# Path to the folder with the original images
pathtoimagesT2test = './TestCaseVIT2/'
pathtoimagesFLAIRtest = './TestCaseVIFLAIR/'
# Functions to sort images with numbers within their name
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
## T2
# Define the image size
image_size = (224, 224)
# Read in the test images for T2
T2test_images = []
T2test_dir = pathtoimagesT2test
T2test_files = os.listdir(T2test_dir)
T2test_files.sort(key=natural_keys)
# For each image
for f in T2test_files:
# Open the image
img = Image.open(T2test_dir + f)
# Resize the image so that it has a size 224x224
img = img.resize(image_size)
# Transform into a numpy array
img_arr = np.array(img)
# Transform from 224x224 to 224x224x3
if img_arr.shape == image_size:
img_arr = np.expand_dims(img_arr, 3)
img_arr = gray2rgb(img_arr[:, :, 0])
# Add the image to the array of images
T2test_images.append(img_arr)
# After having transformed all images, transform the list into a numpy array
T2test_X = np.array(T2test_images)
# Create an array of labels (as read by the radiologist)
T2test_y = np.array([[0], [1], [1], [1], [1], [1], [0], [0], [0], [1],
[1], [1], [1], [1], [1], [1], [1], [1], [1], [1],
[1], [1], [1], [1], [1], [1], [1], [1], [1], [1]])
# GPU expects values to be 32-bit floats
T2test_X = T2test_X.astype(np.float32)
# Rescale the values to be between 0 and 1
T2test_X /= 255.
T2test_X.shape
(30, 224, 224, 3)
# Example of an image to make sure they were converted right
plt.imshow(T2test_X[0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
plt.show()
T2test_y.shape
(30, 1)
T2test_y[0]
array([0])
## FLAIR
# Define the image size
image_size = (224, 224)
# Read in the test images for FLAIR
FLAIRtest_images = []
FLAIRtest_dir = pathtoimagesFLAIRtest
FLAIRtest_files = os.listdir(FLAIRtest_dir)
FLAIRtest_files.sort(key=natural_keys)
# For each image
for f in FLAIRtest_files:
# Open the image
img = Image.open(FLAIRtest_dir + f)
# Resize the image so that it has a size 224x224
img = img.resize(image_size)
# Transform into a numpy array
img_arr = np.array(img)
# Transform from 224x224 to 224x224x3
if img_arr.shape == image_size:
img_arr = np.expand_dims(img_arr, 3)
img_arr = gray2rgb(img_arr[:, :, 0])
# Add the image to the array of images
FLAIRtest_images.append(img_arr)
# After having transformed all images, transform the list into a numpy array
FLAIRtest_X = np.array(FLAIRtest_images)
# Create an array of labels (as read by the radiologist)
FLAIRtest_y = np.array([[1], [0], [0], [1], [1], [1], [1], [1], [1], [1],
[1], [1], [1], [1]])
# GPU expects values to be 32-bit floats
FLAIRtest_X = FLAIRtest_X.astype(np.float32)
# Rescale the values to be between 0 and 1
FLAIRtest_X /= 255.
FLAIRtest_X.shape
(14, 224, 224, 3)
# Example of an image to make sure they were converted right
plt.imshow(FLAIRtest_X[0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
plt.show()
FLAIRtest_y.shape
(14, 1)
FLAIRtest_y[0]
array([1])
# load model
json_file = open('InceptionV3.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("InceptionV3.h5")
# Compile model
model.compile(optimizer = Adam(lr = 0.00025), loss = 'binary_crossentropy', metrics = ['accuracy'])
# Generate predictions on test data in the form of probabilities for T2
testInceptionV3T2 = model.predict(T2test_X, batch_size = 16)
testInceptionV3T2
array([[8.91966596e-02], [3.25329930e-01], [9.81550336e-01], [2.72877961e-02], [9.41658139e-01], [9.93410885e-01], [7.26149321e-01], [8.71696293e-01], [9.94991243e-01], [1.16672097e-02], [3.69621303e-05], [3.21406493e-04], [2.22215918e-03], [1.79489632e-03], [1.11742975e-05], [2.89743889e-06], [1.08731096e-04], [5.01255225e-03], [1.39816939e-05], [8.86273980e-01], [6.67993486e-01], [7.01079607e-01], [1.69479602e-03], [5.21564595e-02], [5.93621377e-03], [2.68984456e-02], [9.44773614e-01], [5.36338806e-01], [5.97211421e-01], [8.56907248e-01]], dtype=float32)
# Generate predictions on test data in the form of probabilities for FLAIR
testInceptionV3FLAIR = model.predict(FLAIRtest_X, batch_size = 16)
testInceptionV3FLAIR
array([[4.1748704e-06], [1.0746115e-06], [1.3767136e-05], [7.0781003e-05], [1.9624995e-05], [1.1489106e-05], [3.4413172e-06], [6.8435603e-01], [9.9228424e-01], [3.1245395e-07], [1.6181253e-05], [9.9991477e-01], [1.7182721e-05], [3.6751451e-05]], dtype=float32)
# Create the confusion matrix for T2
y_trueT2 = T2test_y
y_predInceptionV3T2 = testInceptionV3T2 > 0.5
confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])
array([[ 1, 3], [16, 10]], dtype=int64)
# Create the confusion matrix for FLAIR
y_trueFLAIR = FLAIRtest_y
y_predInceptionV3FLAIR = testInceptionV3FLAIR > 0.5
confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)
array([[2, 0], [9, 3]], dtype=int64)
# Calculate accuracy for T2
accuracy_InceptionV3T2 = (confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[0, 0] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[1, 1]) / (confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[0, 0] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[0, 1] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[1, 0] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[1, 1])
print('The accuracy in the test set is {}.'.format(accuracy_InceptionV3T2))
The accuracy in the test set is 0.36666666666666664.
# Calculate accuracy for FLAIR
accuracy_InceptionV3FLAIR = (confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[0, 0] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[1, 1]) / (confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[0, 0] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[0, 1] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[1, 0] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[1, 1])
print('The accuracy in the test set is {}.'.format(accuracy_InceptionV3FLAIR))
The accuracy in the test set is 0.35714285714285715.
# Visualize the structure and layers of the model
model.layers
[<keras.engine.input_layer.InputLayer at 0xf99e048>, <keras.layers.convolutional.Conv2D at 0xf99e0b8>, <keras.layers.normalization.BatchNormalization at 0xf99e630>, <keras.layers.core.Activation at 0xf99e4a8>, <keras.layers.convolutional.Conv2D at 0xf99e6a0>, <keras.layers.normalization.BatchNormalization at 0xf99e7b8>, <keras.layers.core.Activation at 0xf99e940>, <keras.layers.convolutional.Conv2D at 0xf99ea58>, <keras.layers.normalization.BatchNormalization at 0xf99ea90>, <keras.layers.core.Activation at 0xf99ec18>, <keras.layers.pooling.MaxPooling2D at 0xf99ed30>, <keras.layers.convolutional.Conv2D at 0xf99ed68>, <keras.layers.normalization.BatchNormalization at 0xf99ee10>, <keras.layers.core.Activation at 0xf99ef98>, <keras.layers.convolutional.Conv2D at 0xf9b8128>, <keras.layers.normalization.BatchNormalization at 0xf9b82b0>, <keras.layers.core.Activation at 0xf9b83c8>, <keras.layers.pooling.MaxPooling2D at 0xf9b8400>, <keras.layers.convolutional.Conv2D at 0xf9b84a8>, <keras.layers.normalization.BatchNormalization at 0xf9b8630>, <keras.layers.core.Activation at 0xf9b8748>, <keras.layers.convolutional.Conv2D at 0xf9b8780>, <keras.layers.convolutional.Conv2D at 0xf9b8908>, <keras.layers.normalization.BatchNormalization at 0xf9b8a90>, <keras.layers.normalization.BatchNormalization at 0xf9b8ba8>, <keras.layers.core.Activation at 0xf9b8cc0>, <keras.layers.core.Activation at 0xf9b8cf8>, <keras.layers.pooling.AveragePooling2D at 0xf9b8d30>, <keras.layers.convolutional.Conv2D at 0xf9b8dd8>, <keras.layers.convolutional.Conv2D at 0xf9b8f60>, <keras.layers.convolutional.Conv2D at 0xf9bf128>, <keras.layers.convolutional.Conv2D at 0xf9bf2b0>, <keras.layers.normalization.BatchNormalization at 0xf9bf438>, <keras.layers.normalization.BatchNormalization at 0xf9bf550>, <keras.layers.normalization.BatchNormalization at 0xf9bf668>, <keras.layers.normalization.BatchNormalization at 0xf9bf780>, <keras.layers.core.Activation at 0xf9bf898>, <keras.layers.core.Activation at 0xf9bf8d0>, <keras.layers.core.Activation at 0xf9bf908>, <keras.layers.core.Activation at 0xf9bf940>, <keras.layers.merge.Concatenate at 0xf9bf978>, <keras.layers.convolutional.Conv2D at 0xf9bf9b0>, <keras.layers.normalization.BatchNormalization at 0xf9bfb38>, <keras.layers.core.Activation at 0xf9bfc50>, <keras.layers.convolutional.Conv2D at 0xf9bfc88>, <keras.layers.convolutional.Conv2D at 0xf9bfe10>, <keras.layers.normalization.BatchNormalization at 0xf9bff98>, <keras.layers.normalization.BatchNormalization at 0xf99efd0>, <keras.layers.core.Activation at 0xf9c7208>, <keras.layers.core.Activation at 0xf9c7240>, <keras.layers.pooling.AveragePooling2D at 0xf9c7278>, <keras.layers.convolutional.Conv2D at 0xf9c7320>, <keras.layers.convolutional.Conv2D at 0xf9c74a8>, <keras.layers.convolutional.Conv2D at 0xf9c7630>, <keras.layers.convolutional.Conv2D at 0xf9c77b8>, <keras.layers.normalization.BatchNormalization at 0xf9c7940>, <keras.layers.normalization.BatchNormalization at 0xf9c7a58>, <keras.layers.normalization.BatchNormalization at 0xf9c7b70>, <keras.layers.normalization.BatchNormalization at 0xf9c7c88>, <keras.layers.core.Activation at 0xf9c7da0>, <keras.layers.core.Activation at 0xf9c7dd8>, <keras.layers.core.Activation at 0xf9c7e10>, <keras.layers.core.Activation at 0xf9c7e48>, <keras.layers.merge.Concatenate at 0xf9c7e80>, <keras.layers.convolutional.Conv2D at 0xf9c7eb8>, <keras.layers.normalization.BatchNormalization at 0xf9ce080>, <keras.layers.core.Activation at 0xf9ce198>, <keras.layers.convolutional.Conv2D at 0xf9ce1d0>, <keras.layers.convolutional.Conv2D at 0xf9ce358>, <keras.layers.normalization.BatchNormalization at 0xf9ce4e0>, <keras.layers.normalization.BatchNormalization at 0xf9ce5f8>, <keras.layers.core.Activation at 0xf9ce710>, <keras.layers.core.Activation at 0xf9ce748>, <keras.layers.pooling.AveragePooling2D at 0xf9ce780>, <keras.layers.convolutional.Conv2D at 0xf9ce828>, <keras.layers.convolutional.Conv2D at 0xf9ce9b0>, <keras.layers.convolutional.Conv2D at 0xf9ceb38>, <keras.layers.convolutional.Conv2D at 0xf9cecc0>, <keras.layers.normalization.BatchNormalization at 0xf9cee48>, <keras.layers.normalization.BatchNormalization at 0xf9cef60>, <keras.layers.normalization.BatchNormalization at 0xf9bffd0>, <keras.layers.normalization.BatchNormalization at 0xf9d51d0>, <keras.layers.core.Activation at 0xf9d52e8>, <keras.layers.core.Activation at 0xf9d5320>, <keras.layers.core.Activation at 0xf9d5358>, <keras.layers.core.Activation at 0xf9d5390>, <keras.layers.merge.Concatenate at 0xf9d53c8>, <keras.layers.convolutional.Conv2D at 0xf9d5400>, <keras.layers.normalization.BatchNormalization at 0xf9d5588>, <keras.layers.core.Activation at 0xf9d56a0>, <keras.layers.convolutional.Conv2D at 0xf9d56d8>, <keras.layers.normalization.BatchNormalization at 0xf9d5860>, <keras.layers.core.Activation at 0xf9d5978>, <keras.layers.convolutional.Conv2D at 0xf9d59b0>, <keras.layers.convolutional.Conv2D at 0xf9d5b38>, <keras.layers.normalization.BatchNormalization at 0xf9d5cc0>, <keras.layers.normalization.BatchNormalization at 0xf9d5dd8>, <keras.layers.core.Activation at 0xf9d5ef0>, <keras.layers.core.Activation at 0xf9d5f28>, <keras.layers.pooling.MaxPooling2D at 0xf9d5f60>, <keras.layers.merge.Concatenate at 0xf9dc048>, <keras.layers.convolutional.Conv2D at 0xf9dc080>, <keras.layers.normalization.BatchNormalization at 0xf9dc208>, <keras.layers.core.Activation at 0xf9dc320>, <keras.layers.convolutional.Conv2D at 0xf9dc358>, <keras.layers.normalization.BatchNormalization at 0xf9dc4e0>, <keras.layers.core.Activation at 0xf9dc5f8>, <keras.layers.convolutional.Conv2D at 0xf9dc630>, <keras.layers.convolutional.Conv2D at 0xf9dc7b8>, <keras.layers.normalization.BatchNormalization at 0xf9dc940>, <keras.layers.normalization.BatchNormalization at 0xf9dca58>, <keras.layers.core.Activation at 0xf9dcb70>, <keras.layers.core.Activation at 0xf9dcba8>, <keras.layers.convolutional.Conv2D at 0xf9dcbe0>, <keras.layers.convolutional.Conv2D at 0xf9dcd68>, <keras.layers.normalization.BatchNormalization at 0xf9dcef0>, <keras.layers.normalization.BatchNormalization at 0xf9cefd0>, <keras.layers.core.Activation at 0xf9e5160>, <keras.layers.core.Activation at 0xf9e5198>, <keras.layers.pooling.AveragePooling2D at 0xf9e51d0>, <keras.layers.convolutional.Conv2D at 0xf9e5278>, <keras.layers.convolutional.Conv2D at 0xf9e5400>, <keras.layers.convolutional.Conv2D at 0xf9e5588>, <keras.layers.convolutional.Conv2D at 0xf9e5710>, <keras.layers.normalization.BatchNormalization at 0xf9e5898>, <keras.layers.normalization.BatchNormalization at 0xf9e59b0>, <keras.layers.normalization.BatchNormalization at 0xf9e5ac8>, <keras.layers.normalization.BatchNormalization at 0xf9e5be0>, <keras.layers.core.Activation at 0xf9e5cf8>, <keras.layers.core.Activation at 0xf9e5d30>, <keras.layers.core.Activation at 0xf9e5d68>, <keras.layers.core.Activation at 0xf9e5da0>, <keras.layers.merge.Concatenate at 0xf9e5dd8>, <keras.layers.convolutional.Conv2D at 0xf9e5e10>, <keras.layers.normalization.BatchNormalization at 0xf9e5f98>, <keras.layers.core.Activation at 0xf9dcfd0>, <keras.layers.convolutional.Conv2D at 0xf9ec128>, <keras.layers.normalization.BatchNormalization at 0xf9ec2b0>, <keras.layers.core.Activation at 0xf9ec3c8>, <keras.layers.convolutional.Conv2D at 0xf9ec400>, <keras.layers.convolutional.Conv2D at 0xf9ec588>, <keras.layers.normalization.BatchNormalization at 0xf9ec710>, <keras.layers.normalization.BatchNormalization at 0xf9ec828>, <keras.layers.core.Activation at 0xf9ec940>, <keras.layers.core.Activation at 0xf9ec978>, <keras.layers.convolutional.Conv2D at 0xf9ec9b0>, <keras.layers.convolutional.Conv2D at 0xf9ecb38>, <keras.layers.normalization.BatchNormalization at 0xf9eccc0>, <keras.layers.normalization.BatchNormalization at 0xf9ecdd8>, <keras.layers.core.Activation at 0xf9ecef0>, <keras.layers.core.Activation at 0xf9ecf28>, <keras.layers.pooling.AveragePooling2D at 0xf9ecf60>, <keras.layers.convolutional.Conv2D at 0xf9f3048>, <keras.layers.convolutional.Conv2D at 0xf9f31d0>, <keras.layers.convolutional.Conv2D at 0xf9f3358>, <keras.layers.convolutional.Conv2D at 0xf9f34e0>, <keras.layers.normalization.BatchNormalization at 0xf9f3668>, <keras.layers.normalization.BatchNormalization at 0xf9f3780>, <keras.layers.normalization.BatchNormalization at 0xf9f3898>, <keras.layers.normalization.BatchNormalization at 0xf9f39b0>, <keras.layers.core.Activation at 0xf9f3ac8>, <keras.layers.core.Activation at 0xf9f3b00>, <keras.layers.core.Activation at 0xf9f3b38>, <keras.layers.core.Activation at 0xf9f3b70>, <keras.layers.merge.Concatenate at 0xf9f3ba8>, <keras.layers.convolutional.Conv2D at 0xf9f3be0>, <keras.layers.normalization.BatchNormalization at 0xf9f3d68>, <keras.layers.core.Activation at 0xf9f3e80>, <keras.layers.convolutional.Conv2D at 0xf9f3eb8>, <keras.layers.normalization.BatchNormalization at 0xf9fb080>, <keras.layers.core.Activation at 0xf9fb198>, <keras.layers.convolutional.Conv2D at 0xf9fb1d0>, <keras.layers.convolutional.Conv2D at 0xf9fb358>, <keras.layers.normalization.BatchNormalization at 0xf9fb4e0>, <keras.layers.normalization.BatchNormalization at 0xf9fb5f8>, <keras.layers.core.Activation at 0xf9fb710>, <keras.layers.core.Activation at 0xf9fb748>, <keras.layers.convolutional.Conv2D at 0xf9fb780>, <keras.layers.convolutional.Conv2D at 0xf9fb908>, <keras.layers.normalization.BatchNormalization at 0xf9fba90>, <keras.layers.normalization.BatchNormalization at 0xf9fbba8>, <keras.layers.core.Activation at 0xf9fbcc0>, <keras.layers.core.Activation at 0xf9fbcf8>, <keras.layers.pooling.AveragePooling2D at 0xf9fbd30>, <keras.layers.convolutional.Conv2D at 0xf9fbdd8>, <keras.layers.convolutional.Conv2D at 0xf9fbf60>, <keras.layers.convolutional.Conv2D at 0xfa03128>, <keras.layers.convolutional.Conv2D at 0xfa032b0>, <keras.layers.normalization.BatchNormalization at 0xfa03438>, <keras.layers.normalization.BatchNormalization at 0xfa03550>, <keras.layers.normalization.BatchNormalization at 0xfa03668>, <keras.layers.normalization.BatchNormalization at 0xfa03780>, <keras.layers.core.Activation at 0xfa03898>, <keras.layers.core.Activation at 0xfa038d0>, <keras.layers.core.Activation at 0xfa03908>, <keras.layers.core.Activation at 0xfa03940>, <keras.layers.merge.Concatenate at 0xfa03978>, <keras.layers.convolutional.Conv2D at 0xfa039b0>, <keras.layers.normalization.BatchNormalization at 0xfa03b38>, <keras.layers.core.Activation at 0xfa03c50>, <keras.layers.convolutional.Conv2D at 0xfa03c88>, <keras.layers.normalization.BatchNormalization at 0xfa03e10>, <keras.layers.core.Activation at 0xfa03f28>, <keras.layers.convolutional.Conv2D at 0xfa03f60>, <keras.layers.convolutional.Conv2D at 0xfa0b128>, <keras.layers.normalization.BatchNormalization at 0xfa0b2b0>, <keras.layers.normalization.BatchNormalization at 0xfa0b3c8>, <keras.layers.core.Activation at 0xfa0b4e0>, <keras.layers.core.Activation at 0xfa0b518>, <keras.layers.convolutional.Conv2D at 0xfa0b550>, <keras.layers.convolutional.Conv2D at 0xfa0b6d8>, <keras.layers.normalization.BatchNormalization at 0xfa0b860>, <keras.layers.normalization.BatchNormalization at 0xfa0b978>, <keras.layers.core.Activation at 0xfa0ba90>, <keras.layers.core.Activation at 0xfa0bac8>, <keras.layers.pooling.AveragePooling2D at 0xfa0bb00>, <keras.layers.convolutional.Conv2D at 0xfa0bba8>, <keras.layers.convolutional.Conv2D at 0xfa0bd30>, <keras.layers.convolutional.Conv2D at 0xfa0beb8>, <keras.layers.convolutional.Conv2D at 0xfb92080>, <keras.layers.normalization.BatchNormalization at 0xfb92208>, <keras.layers.normalization.BatchNormalization at 0xfb92320>, <keras.layers.normalization.BatchNormalization at 0xfb92438>, <keras.layers.normalization.BatchNormalization at 0xfb92550>, <keras.layers.core.Activation at 0xfb92668>, <keras.layers.core.Activation at 0xfb926a0>, <keras.layers.core.Activation at 0xfb926d8>, <keras.layers.core.Activation at 0xfb92710>, <keras.layers.merge.Concatenate at 0xfb92748>, <keras.layers.convolutional.Conv2D at 0xfb92780>, <keras.layers.normalization.BatchNormalization at 0xfb92908>, <keras.layers.core.Activation at 0xfb92a20>, <keras.layers.convolutional.Conv2D at 0xfb92a58>, <keras.layers.normalization.BatchNormalization at 0xfb92be0>, <keras.layers.core.Activation at 0xfb92cf8>, <keras.layers.convolutional.Conv2D at 0xfb92d30>, <keras.layers.convolutional.Conv2D at 0xfb92eb8>, <keras.layers.normalization.BatchNormalization at 0xfb9a080>, <keras.layers.normalization.BatchNormalization at 0xfb9a198>, <keras.layers.core.Activation at 0xfb9a2b0>, <keras.layers.core.Activation at 0xfb9a2e8>, <keras.layers.convolutional.Conv2D at 0xfb9a320>, <keras.layers.convolutional.Conv2D at 0xfb9a4a8>, <keras.layers.normalization.BatchNormalization at 0xfb9a630>, <keras.layers.normalization.BatchNormalization at 0xfb9a748>, <keras.layers.core.Activation at 0xfb9a860>, <keras.layers.core.Activation at 0xfb9a898>, <keras.layers.pooling.MaxPooling2D at 0xfb9a8d0>, <keras.layers.merge.Concatenate at 0xfb9a978>, <keras.layers.convolutional.Conv2D at 0xfb9a9b0>, <keras.layers.normalization.BatchNormalization at 0xfb9ab38>, <keras.layers.core.Activation at 0xfb9ac50>, <keras.layers.convolutional.Conv2D at 0xfb9ac88>, <keras.layers.convolutional.Conv2D at 0xfb9ae10>, <keras.layers.normalization.BatchNormalization at 0xfb9af98>, <keras.layers.normalization.BatchNormalization at 0xf9e5fd0>, <keras.layers.core.Activation at 0xfba2208>, <keras.layers.core.Activation at 0xfba2240>, <keras.layers.convolutional.Conv2D at 0xfba2278>, <keras.layers.convolutional.Conv2D at 0xfba2400>, <keras.layers.convolutional.Conv2D at 0xfba2588>, <keras.layers.convolutional.Conv2D at 0xfba2710>, <keras.layers.pooling.AveragePooling2D at 0xfba2898>, <keras.layers.convolutional.Conv2D at 0xfba2940>, <keras.layers.normalization.BatchNormalization at 0xfba2ac8>, <keras.layers.normalization.BatchNormalization at 0xfba2be0>, <keras.layers.normalization.BatchNormalization at 0xfba2cf8>, <keras.layers.normalization.BatchNormalization at 0xfba2e10>, <keras.layers.convolutional.Conv2D at 0xfba2f28>, <keras.layers.normalization.BatchNormalization at 0xfba90f0>, <keras.layers.core.Activation at 0xfba9208>, <keras.layers.core.Activation at 0xfba9240>, <keras.layers.core.Activation at 0xfba9278>, <keras.layers.core.Activation at 0xfba92b0>, <keras.layers.normalization.BatchNormalization at 0xfba92e8>, <keras.layers.core.Activation at 0xfba9400>, <keras.layers.merge.Concatenate at 0xfba9438>, <keras.layers.merge.Concatenate at 0xfba9470>, <keras.layers.core.Activation at 0xfba94a8>, <keras.layers.merge.Concatenate at 0xfba94e0>, <keras.layers.convolutional.Conv2D at 0xfba9518>, <keras.layers.normalization.BatchNormalization at 0xfba96a0>, <keras.layers.core.Activation at 0xfba97b8>, <keras.layers.convolutional.Conv2D at 0xfba97f0>, <keras.layers.convolutional.Conv2D at 0xfba9978>, <keras.layers.normalization.BatchNormalization at 0xfba9b00>, <keras.layers.normalization.BatchNormalization at 0xfba9c18>, <keras.layers.core.Activation at 0xfba9d30>, <keras.layers.core.Activation at 0xfba9d68>, <keras.layers.convolutional.Conv2D at 0xfba9da0>, <keras.layers.convolutional.Conv2D at 0xfba9f28>, <keras.layers.convolutional.Conv2D at 0xfbb20f0>, <keras.layers.convolutional.Conv2D at 0xfbb2278>, <keras.layers.pooling.AveragePooling2D at 0xfbb2400>, <keras.layers.convolutional.Conv2D at 0xfbb24a8>, <keras.layers.normalization.BatchNormalization at 0xfbb2630>, <keras.layers.normalization.BatchNormalization at 0xfbb2748>, <keras.layers.normalization.BatchNormalization at 0xfbb2860>, <keras.layers.normalization.BatchNormalization at 0xfbb2978>, <keras.layers.convolutional.Conv2D at 0xfbb2a90>, <keras.layers.normalization.BatchNormalization at 0xfbb2c18>, <keras.layers.core.Activation at 0xfbb2d30>, <keras.layers.core.Activation at 0xfbb2d68>, <keras.layers.core.Activation at 0xfbb2da0>, <keras.layers.core.Activation at 0xfbb2dd8>, <keras.layers.normalization.BatchNormalization at 0xfbb2e10>, <keras.layers.core.Activation at 0xfbb2f28>, <keras.layers.merge.Concatenate at 0xfbb2f60>, <keras.layers.merge.Concatenate at 0xfbb2f98>, <keras.layers.core.Activation at 0xfb9afd0>, <keras.layers.merge.Concatenate at 0xfbb8048>, <keras.layers.pooling.GlobalAveragePooling2D at 0xfbb8080>, <keras.layers.core.Dense at 0xfbb80f0>, <keras.layers.core.Dense at 0xfbb8240>]
# Iterate through the MRIs in T2
print('\n \n' + '\033[1m' + 'EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m' + '\n')
print('\033[1m' + 'FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE)' + '\033[0m'+ '\n \n \n \n')
for i in range(T2test_X.shape[0]):
# Print spaces to separate from the next image
print('\n \n \n \n \n \n \n \n')
# Print real classification of the image
print('\033[1m' + 'REAL CLASSIFICATION OF THE IMAGE: {}'.format('TUBER(S)' if y_trueT2[i][0]==1 else 'NO TUBER(S)') + '\033[0m')
# Print model classification and model probability of TSC
print('Model classification of this image: {} \nEstimated probability of tuber(s): {} \n'.format('TUBER(S)' if testInceptionV3T2[i][0]>0.5 else 'NO TUBER(S)', testInceptionV3T2[i][0]))
# Print title
print('\033[1m' + 'CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m')
# Original image
plt.subplot(2,3,1)
plt.imshow(T2test_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
plt.subplot(2,3,2)
heat_map = visualize_cam(model, layer_idx=300, filter_indices=None, seed_input=T2test_X[i])
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,3)
plt.imshow(T2test_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3T2[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Original image
plt.subplot(2,3,4)
plt.imshow(T2test_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
heat_map = visualize_saliency(model, layer_idx=300, filter_indices=None, seed_input=T2test_X[i])
plt.subplot(2,3,5)
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,6)
plt.imshow(T2test_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3T2[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Show the image and close it
plt.show()
plt.close()
EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW) FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE) REAL CLASSIFICATION OF THE IMAGE: NO TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.08919665962457657 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.3253299295902252 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9815503358840942 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.027287796139717102 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9416581392288208 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9934108853340149 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: NO TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.726149320602417 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: NO TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.8716962933540344 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: NO TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9949912428855896 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.011667209677398205 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 3.696213025250472e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.0003214064927306026 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.0022221591789275408 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.0017948963213711977 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.1174297469551675e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 2.8974388897040626e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.00010873109567910433 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.0050125522539019585 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.3981693882669788e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.886273980140686 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.6679934859275818 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.7010796070098877 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.0016947960248216987 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.05215645954012871 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.00593621376901865 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.026898445561528206 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9447736144065857 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.5363388061523438 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.5972114205360413 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.8569072484970093 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
# Iterate through the MRIs in FLAIR
print('\n \n' + '\033[1m' + 'EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m' + '\n')
print('\033[1m' + 'FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE)' + '\033[0m'+ '\n \n \n \n')
for i in range(FLAIRtest_X.shape[0]):
# Print spaces to separate from the next image
print('\n \n \n \n \n \n \n \n')
# Print real classification of the image
print('\033[1m' + 'REAL CLASSIFICATION OF THE IMAGE: {}'.format('TUBER(S)' if y_trueFLAIR[i][0]==1 else 'NO TUBER(S)') + '\033[0m')
# Print model classification and model probability of TSC
print('Model classification of this image: {} \nEstimated probability of tuber(s): {} \n'.format('TUBER(S)' if testInceptionV3FLAIR[i][0]>0.5 else 'NO TUBER(S)', testInceptionV3FLAIR[i][0]))
# Print title
print('\033[1m' + 'CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m')
# Original image
plt.subplot(2,3,1)
plt.imshow(FLAIRtest_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
plt.subplot(2,3,2)
heat_map = visualize_cam(model, layer_idx=300, filter_indices=None, seed_input=FLAIRtest_X[i])
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,3)
plt.imshow(FLAIRtest_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3FLAIR[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Original image
plt.subplot(2,3,4)
plt.imshow(FLAIRtest_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
heat_map = visualize_saliency(model, layer_idx=300, filter_indices=None, seed_input=FLAIRtest_X[i])
plt.subplot(2,3,5)
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,6)
plt.imshow(FLAIRtest_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3FLAIR[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Show the image and close it
plt.show()
plt.close()
EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW) FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE) REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 4.174870355200255e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: NO TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.074611532203562e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: NO TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.3767135897069238e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 7.078100315993652e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.9624994820333086e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.148910632764455e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 3.4413171761116246e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.6843560338020325 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9922842383384705 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 3.1245394893630873e-07 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.618125315872021e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999147653579712 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.7182721421704628e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 3.675145126180723e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)