# Import packages
%matplotlib inline
from PIL import Image
import numpy as np
import os
import re
from skimage.color import gray2rgb
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
!pip install tensorflow
!pip install keras
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, GaussianNoise, BatchNormalization, GlobalAveragePooling2D
from keras.layers import Conv2D, MaxPooling2D
from keras import Sequential
from keras.optimizers import Adam
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing import image
from keras.models import Model
from keras import backend as K
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
!pip install git+https://github.com/raghakot/keras-vis.git --upgrade
from vis.visualization import visualize_cam, visualize_saliency, overlay
from keras import activations
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import zipfile
from keras.models import model_from_json
import matplotlib as mpl
Requirement already satisfied: tensorflow in c:\programdata\anaconda3\lib\site-packages (1.12.0) Requirement already satisfied: keras-applications>=1.0.6 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.0.6) Requirement already satisfied: gast>=0.2.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.2.2) Requirement already satisfied: termcolor>=1.1.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.1.0) Requirement already satisfied: six>=1.10.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.11.0) Requirement already satisfied: tensorboard<1.13.0,>=1.12.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.12.2) Requirement already satisfied: grpcio>=1.8.6 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.18.0) Requirement already satisfied: keras-preprocessing>=1.0.5 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.0.5) Requirement already satisfied: absl-py>=0.1.6 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.7.0) Requirement already satisfied: astor>=0.6.0 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.7.1) Requirement already satisfied: wheel>=0.26 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (0.32.3) Requirement already satisfied: protobuf>=3.6.1 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (3.6.1) Requirement already satisfied: numpy>=1.13.3 in c:\programdata\anaconda3\lib\site-packages (from tensorflow) (1.14.3) Requirement already satisfied: h5py in c:\programdata\anaconda3\lib\site-packages (from keras-applications>=1.0.6->tensorflow) (2.7.1) Requirement already satisfied: markdown>=2.6.8 in c:\programdata\anaconda3\lib\site-packages (from tensorboard<1.13.0,>=1.12.0->tensorflow) (3.0.1) Requirement already satisfied: werkzeug>=0.11.10 in c:\programdata\anaconda3\lib\site-packages (from tensorboard<1.13.0,>=1.12.0->tensorflow) (0.14.1) Requirement already satisfied: setuptools in c:\programdata\anaconda3\lib\site-packages (from protobuf>=3.6.1->tensorflow) (40.6.3)
You are using pip version 19.0.1, however version 19.0.3 is available. You should consider upgrading via the 'python -m pip install --upgrade pip' command.
Requirement already satisfied: keras in c:\programdata\anaconda3\lib\site-packages (2.2.4) Requirement already satisfied: keras-preprocessing>=1.0.5 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.0.5) Requirement already satisfied: six>=1.9.0 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.11.0) Requirement already satisfied: pyyaml in c:\programdata\anaconda3\lib\site-packages (from keras) (3.12) Requirement already satisfied: keras-applications>=1.0.6 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.0.6) Requirement already satisfied: h5py in c:\programdata\anaconda3\lib\site-packages (from keras) (2.7.1) Requirement already satisfied: numpy>=1.9.1 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.14.3) Requirement already satisfied: scipy>=0.14 in c:\programdata\anaconda3\lib\site-packages (from keras) (1.1.0)
You are using pip version 19.0.1, however version 19.0.3 is available. You should consider upgrading via the 'python -m pip install --upgrade pip' command. C:\ProgramData\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.
Collecting git+https://github.com/raghakot/keras-vis.git Cloning https://github.com/raghakot/keras-vis.git to c:\users\tempch~1.021\appdata\local\temp\pip-req-build-zapklytv Requirement already satisfied, skipping upgrade: keras in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (2.2.4) Requirement already satisfied, skipping upgrade: six in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (1.11.0) Requirement already satisfied, skipping upgrade: scikit-image in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (0.13.1) Requirement already satisfied, skipping upgrade: matplotlib in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (2.2.2) Requirement already satisfied, skipping upgrade: h5py in c:\programdata\anaconda3\lib\site-packages (from keras-vis==0.4.1) (2.7.1) Requirement already satisfied, skipping upgrade: pyyaml in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (3.12) Requirement already satisfied, skipping upgrade: keras-applications>=1.0.6 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.0.6) Requirement already satisfied, skipping upgrade: numpy>=1.9.1 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.14.3) Requirement already satisfied, skipping upgrade: scipy>=0.14 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.1.0) Requirement already satisfied, skipping upgrade: keras-preprocessing>=1.0.5 in c:\programdata\anaconda3\lib\site-packages (from keras->keras-vis==0.4.1) (1.0.5) Requirement already satisfied, skipping upgrade: networkx>=1.8 in c:\programdata\anaconda3\lib\site-packages (from scikit-image->keras-vis==0.4.1) (2.1) Requirement already satisfied, skipping upgrade: pillow>=2.1.0 in c:\programdata\anaconda3\lib\site-packages (from scikit-image->keras-vis==0.4.1) (5.1.0) Requirement already satisfied, skipping upgrade: PyWavelets>=0.4.0 in c:\programdata\anaconda3\lib\site-packages (from scikit-image->keras-vis==0.4.1) (0.5.2) Requirement already satisfied, skipping upgrade: cycler>=0.10 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (0.10.0) Requirement already satisfied, skipping upgrade: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (2.2.0) Requirement already satisfied, skipping upgrade: python-dateutil>=2.1 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (2.7.3) Requirement already satisfied, skipping upgrade: pytz in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (2018.4) Requirement already satisfied, skipping upgrade: kiwisolver>=1.0.1 in c:\programdata\anaconda3\lib\site-packages (from matplotlib->keras-vis==0.4.1) (1.0.1) Requirement already satisfied, skipping upgrade: decorator>=4.1.0 in c:\programdata\anaconda3\lib\site-packages (from networkx>=1.8->scikit-image->keras-vis==0.4.1) (4.3.0) Requirement already satisfied, skipping upgrade: setuptools in c:\programdata\anaconda3\lib\site-packages (from kiwisolver>=1.0.1->matplotlib->keras-vis==0.4.1) (40.6.3) Building wheels for collected packages: keras-vis Building wheel for keras-vis (setup.py): started Building wheel for keras-vis (setup.py): finished with status 'done' Stored in directory: C:\Users\TEMPCH~1.021\AppData\Local\Temp\pip-ephem-wheel-cache-az7rxbe0\wheels\c5\ae\e7\b34d1cb48b1898f606a5cce08ebc9521fa0588f37f1e590d9f Successfully built keras-vis Installing collected packages: keras-vis Found existing installation: keras-vis 0.4.1 Uninstalling keras-vis-0.4.1: Successfully uninstalled keras-vis-0.4.1 Successfully installed keras-vis-0.4.1
You are using pip version 19.0.1, however version 19.0.3 is available. You should consider upgrading via the 'python -m pip install --upgrade pip' command.
These images come from a 3 year-old male with TSC. In total, there are 45 images consisting of 30 consecutive axial T2 MRI slices and 15 consecutive axial FLAIR MRI slices.
# Set the figure size
mpl.rcParams['figure.figsize'] = (16,10)
# Unzip files
with zipfile.ZipFile("TestCaseIIIT2.zip","r") as zip_ref:
zip_ref.extractall()
with zipfile.ZipFile("TestCaseIIIFLAIR.zip","r") as zip_ref:
zip_ref.extractall()
# Path to the folder with the original images
pathtoimagesT2test = './TestCaseIIIT2/'
pathtoimagesFLAIRtest = './TestCaseIIIFLAIR/'
# Functions to sort images with numbers within their name
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split(r'(\d+)', text) ]
## T2
# Define the image size
image_size = (224, 224)
# Read in the test images for T2
T2test_images = []
T2test_dir = pathtoimagesT2test
T2test_files = os.listdir(T2test_dir)
T2test_files.sort(key=natural_keys)
# For each image
for f in T2test_files:
# Open the image
img = Image.open(T2test_dir + f)
# Resize the image so that it has a size 224x224
img = img.resize(image_size)
# Transform into a numpy array
img_arr = np.array(img)
# Transform from 224x224 to 224x224x3
if img_arr.shape == image_size:
img_arr = np.expand_dims(img_arr, 3)
img_arr = gray2rgb(img_arr[:, :, 0])
# Add the image to the array of images
T2test_images.append(img_arr)
# After having transformed all images, transform the list into a numpy array
T2test_X = np.array(T2test_images)
# Create an array of labels (as read by the radiologist)
T2test_y = np.array([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1],
[1], [1], [1], [1], [1], [1], [1], [1], [1], [1],
[1], [1], [1], [1], [1], [1], [1], [1], [1], [1]])
# GPU expects values to be 32-bit floats
T2test_X = T2test_X.astype(np.float32)
# Rescale the values to be between 0 and 1
T2test_X /= 255.
T2test_X.shape
(30, 224, 224, 3)
# Example of an image to make sure they were converted right
plt.imshow(T2test_X[0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
plt.show()
T2test_y.shape
(30, 1)
T2test_y[0]
array([1])
## FLAIR
# Define the image size
image_size = (224, 224)
# Read in the test images for FLAIR
FLAIRtest_images = []
FLAIRtest_dir = pathtoimagesFLAIRtest
FLAIRtest_files = os.listdir(FLAIRtest_dir)
FLAIRtest_files.sort(key=natural_keys)
# For each image
for f in FLAIRtest_files:
# Open the image
img = Image.open(FLAIRtest_dir + f)
# Resize the image so that it has a size 224x224
img = img.resize(image_size)
# Transform into a numpy array
img_arr = np.array(img)
# Transform from 224x224 to 224x224x3
if img_arr.shape == image_size:
img_arr = np.expand_dims(img_arr, 3)
img_arr = gray2rgb(img_arr[:, :, 0])
# Add the image to the array of images
FLAIRtest_images.append(img_arr)
# After having transformed all images, transform the list into a numpy array
FLAIRtest_X = np.array(FLAIRtest_images)
# Create an array of labels (as read by the radiologist)
FLAIRtest_y = np.array([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1],
[1], [1], [1], [1], [1]])
# GPU expects values to be 32-bit floats
FLAIRtest_X = FLAIRtest_X.astype(np.float32)
# Rescale the values to be between 0 and 1
FLAIRtest_X /= 255.
FLAIRtest_X.shape
(15, 224, 224, 3)
# Example of an image to make sure they were converted right
plt.imshow(FLAIRtest_X[0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
plt.show()
FLAIRtest_y.shape
(15, 1)
FLAIRtest_y[0]
array([1])
# load model
json_file = open('InceptionV3.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("InceptionV3.h5")
# Compile model
model.compile(optimizer = Adam(lr = 0.00025), loss = 'binary_crossentropy', metrics = ['accuracy'])
# Generate predictions on test data in the form of probabilities for T2
testInceptionV3T2 = model.predict(T2test_X, batch_size = 16)
testInceptionV3T2
array([[0.97128016], [0.98313373], [0.9936854 ], [0.8826302 ], [0.9887059 ], [0.9701818 ], [0.99304694], [0.9985353 ], [0.991055 ], [0.99696606], [0.9991079 ], [0.99964345], [0.99947494], [0.06948267], [0.7276965 ], [0.9915318 ], [0.9998078 ], [0.99777645], [0.95671356], [0.9988028 ], [0.7611516 ], [0.93883866], [0.99327374], [0.99999714], [0.99999964], [0.9999666 ], [0.9995834 ], [0.99939716], [0.96550536], [0.99648064]], dtype=float32)
# Generate predictions on test data in the form of probabilities for FLAIR
testInceptionV3FLAIR = model.predict(FLAIRtest_X, batch_size = 16)
testInceptionV3FLAIR
array([[2.3385956e-06], [6.1750497e-06], [8.7732601e-01], [1.2183644e-05], [2.7003624e-02], [9.9812394e-01], [1.8968593e-04], [8.9711766e-06], [9.9560213e-01], [4.3499611e-05], [6.5420043e-01], [2.1050316e-04], [9.9999952e-01], [9.9999940e-01], [9.9999952e-01]], dtype=float32)
# Create the confusion matrix for T2
y_trueT2 = T2test_y
y_predInceptionV3T2 = testInceptionV3T2 > 0.5
confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])
array([[ 0, 0], [ 1, 29]], dtype=int64)
# Create the confusion matrix for FLAIR
y_trueFLAIR = FLAIRtest_y
y_predInceptionV3FLAIR = testInceptionV3FLAIR > 0.5
confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)
array([[0, 0], [8, 7]], dtype=int64)
# Calculate accuracy for T2
accuracy_InceptionV3T2 = (confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[0, 0] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[1, 1]) / (confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[0, 0] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[0, 1] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[1, 0] + confusion_matrix(y_trueT2, y_predInceptionV3T2, labels=[0,1])[1, 1])
print('The accuracy in the test set is {}.'.format(accuracy_InceptionV3T2))
The accuracy in the test set is 0.9666666666666667.
# Calculate accuracy for FLAIR
accuracy_InceptionV3FLAIR = (confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[0, 0] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[1, 1]) / (confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[0, 0] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[0, 1] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[1, 0] + confusion_matrix(y_trueFLAIR, y_predInceptionV3FLAIR)[1, 1])
print('The accuracy in the test set is {}.'.format(accuracy_InceptionV3FLAIR))
The accuracy in the test set is 0.4666666666666667.
# Visualize the structure and layers of the model
model.layers
[<keras.engine.input_layer.InputLayer at 0xf7b1240>, <keras.layers.convolutional.Conv2D at 0xf7b12b0>, <keras.layers.normalization.BatchNormalization at 0xf7b17f0>, <keras.layers.core.Activation at 0xf7b1668>, <keras.layers.convolutional.Conv2D at 0xf7b1860>, <keras.layers.normalization.BatchNormalization at 0xf7b1978>, <keras.layers.core.Activation at 0xf7b1b00>, <keras.layers.convolutional.Conv2D at 0xf7b1c18>, <keras.layers.normalization.BatchNormalization at 0xf7b1c50>, <keras.layers.core.Activation at 0xf7b1dd8>, <keras.layers.pooling.MaxPooling2D at 0xf7b1ef0>, <keras.layers.convolutional.Conv2D at 0xf7b1f28>, <keras.layers.normalization.BatchNormalization at 0xf7c9198>, <keras.layers.core.Activation at 0xf7c92b0>, <keras.layers.convolutional.Conv2D at 0xf7c92e8>, <keras.layers.normalization.BatchNormalization at 0xf7c9470>, <keras.layers.core.Activation at 0xf7c9588>, <keras.layers.pooling.MaxPooling2D at 0xf7c95c0>, <keras.layers.convolutional.Conv2D at 0xf7c9668>, <keras.layers.normalization.BatchNormalization at 0xf7c97f0>, <keras.layers.core.Activation at 0xf7c9908>, <keras.layers.convolutional.Conv2D at 0xf7c9940>, <keras.layers.convolutional.Conv2D at 0xf7c9ac8>, <keras.layers.normalization.BatchNormalization at 0xf7c9c50>, <keras.layers.normalization.BatchNormalization at 0xf7c9d68>, <keras.layers.core.Activation at 0xf7c9e80>, <keras.layers.core.Activation at 0xf7c9eb8>, <keras.layers.pooling.AveragePooling2D at 0xf7c9ef0>, <keras.layers.convolutional.Conv2D at 0xf7c9f98>, <keras.layers.convolutional.Conv2D at 0xf7d1160>, <keras.layers.convolutional.Conv2D at 0xf7d12e8>, <keras.layers.convolutional.Conv2D at 0xf7d1470>, <keras.layers.normalization.BatchNormalization at 0xf7d15f8>, <keras.layers.normalization.BatchNormalization at 0xf7d1710>, <keras.layers.normalization.BatchNormalization at 0xf7d1828>, <keras.layers.normalization.BatchNormalization at 0xf7d1940>, <keras.layers.core.Activation at 0xf7d1a58>, <keras.layers.core.Activation at 0xf7d1a90>, <keras.layers.core.Activation at 0xf7d1ac8>, <keras.layers.core.Activation at 0xf7d1b00>, <keras.layers.merge.Concatenate at 0xf7d1b38>, <keras.layers.convolutional.Conv2D at 0xf7d1b70>, <keras.layers.normalization.BatchNormalization at 0xf7d1cf8>, <keras.layers.core.Activation at 0xf7d1e10>, <keras.layers.convolutional.Conv2D at 0xf7d1e48>, <keras.layers.convolutional.Conv2D at 0xf7b1fd0>, <keras.layers.normalization.BatchNormalization at 0xf7d7198>, <keras.layers.normalization.BatchNormalization at 0xf7d72b0>, <keras.layers.core.Activation at 0xf7d73c8>, <keras.layers.core.Activation at 0xf7d7400>, <keras.layers.pooling.AveragePooling2D at 0xf7d7438>, <keras.layers.convolutional.Conv2D at 0xf7d74e0>, <keras.layers.convolutional.Conv2D at 0xf7d7668>, <keras.layers.convolutional.Conv2D at 0xf7d77f0>, <keras.layers.convolutional.Conv2D at 0xf7d7978>, <keras.layers.normalization.BatchNormalization at 0xf7d7b00>, <keras.layers.normalization.BatchNormalization at 0xf7d7c18>, <keras.layers.normalization.BatchNormalization at 0xf7d7d30>, <keras.layers.normalization.BatchNormalization at 0xf7d7e48>, <keras.layers.core.Activation at 0xf7d7f60>, <keras.layers.core.Activation at 0xf7d7f98>, <keras.layers.core.Activation at 0xf7d1fd0>, <keras.layers.core.Activation at 0xf7de048>, <keras.layers.merge.Concatenate at 0xf7de080>, <keras.layers.convolutional.Conv2D at 0xf7de0b8>, <keras.layers.normalization.BatchNormalization at 0xf7de240>, <keras.layers.core.Activation at 0xf7de358>, <keras.layers.convolutional.Conv2D at 0xf7de390>, <keras.layers.convolutional.Conv2D at 0xf7de518>, <keras.layers.normalization.BatchNormalization at 0xf7de6a0>, <keras.layers.normalization.BatchNormalization at 0xf7de7b8>, <keras.layers.core.Activation at 0xf7de8d0>, <keras.layers.core.Activation at 0xf7de908>, <keras.layers.pooling.AveragePooling2D at 0xf7de940>, <keras.layers.convolutional.Conv2D at 0xf7de9e8>, <keras.layers.convolutional.Conv2D at 0xf7deb70>, <keras.layers.convolutional.Conv2D at 0xf7decf8>, <keras.layers.convolutional.Conv2D at 0xf7dee80>, <keras.layers.normalization.BatchNormalization at 0xf7e6048>, <keras.layers.normalization.BatchNormalization at 0xf7e6160>, <keras.layers.normalization.BatchNormalization at 0xf7e6278>, <keras.layers.normalization.BatchNormalization at 0xf7e6390>, <keras.layers.core.Activation at 0xf7e64a8>, <keras.layers.core.Activation at 0xf7e64e0>, <keras.layers.core.Activation at 0xf7e6518>, <keras.layers.core.Activation at 0xf7e6550>, <keras.layers.merge.Concatenate at 0xf7e6588>, <keras.layers.convolutional.Conv2D at 0xf7e65c0>, <keras.layers.normalization.BatchNormalization at 0xf7e6748>, <keras.layers.core.Activation at 0xf7e6860>, <keras.layers.convolutional.Conv2D at 0xf7e6898>, <keras.layers.normalization.BatchNormalization at 0xf7e6a20>, <keras.layers.core.Activation at 0xf7e6b38>, <keras.layers.convolutional.Conv2D at 0xf7e6b70>, <keras.layers.convolutional.Conv2D at 0xf7e6cf8>, <keras.layers.normalization.BatchNormalization at 0xf7e6e80>, <keras.layers.normalization.BatchNormalization at 0xf7e6f98>, <keras.layers.core.Activation at 0xf7d7fd0>, <keras.layers.core.Activation at 0xf7ed128>, <keras.layers.pooling.MaxPooling2D at 0xf7ed160>, <keras.layers.merge.Concatenate at 0xf7ed208>, <keras.layers.convolutional.Conv2D at 0xf7ed240>, <keras.layers.normalization.BatchNormalization at 0xf7ed3c8>, <keras.layers.core.Activation at 0xf7ed4e0>, <keras.layers.convolutional.Conv2D at 0xf7ed518>, <keras.layers.normalization.BatchNormalization at 0xf7ed6a0>, <keras.layers.core.Activation at 0xf7ed7b8>, <keras.layers.convolutional.Conv2D at 0xf7ed7f0>, <keras.layers.convolutional.Conv2D at 0xf7ed978>, <keras.layers.normalization.BatchNormalization at 0xf7edb00>, <keras.layers.normalization.BatchNormalization at 0xf7edc18>, <keras.layers.core.Activation at 0xf7edd30>, <keras.layers.core.Activation at 0xf7edd68>, <keras.layers.convolutional.Conv2D at 0xf7edda0>, <keras.layers.convolutional.Conv2D at 0xf7edf28>, <keras.layers.normalization.BatchNormalization at 0xf7f60f0>, <keras.layers.normalization.BatchNormalization at 0xf7f6208>, <keras.layers.core.Activation at 0xf7f6320>, <keras.layers.core.Activation at 0xf7f6358>, <keras.layers.pooling.AveragePooling2D at 0xf7f6390>, <keras.layers.convolutional.Conv2D at 0xf7f6438>, <keras.layers.convolutional.Conv2D at 0xf7f65c0>, <keras.layers.convolutional.Conv2D at 0xf7f6748>, <keras.layers.convolutional.Conv2D at 0xf7f68d0>, <keras.layers.normalization.BatchNormalization at 0xf7f6a58>, <keras.layers.normalization.BatchNormalization at 0xf7f6b70>, <keras.layers.normalization.BatchNormalization at 0xf7f6c88>, <keras.layers.normalization.BatchNormalization at 0xf7f6da0>, <keras.layers.core.Activation at 0xf7f6eb8>, <keras.layers.core.Activation at 0xf7f6ef0>, <keras.layers.core.Activation at 0xf7f6f28>, <keras.layers.core.Activation at 0xf7f6f60>, <keras.layers.merge.Concatenate at 0xf7f6f98>, <keras.layers.convolutional.Conv2D at 0xf7e6fd0>, <keras.layers.normalization.BatchNormalization at 0xf7fd198>, <keras.layers.core.Activation at 0xf7fd2b0>, <keras.layers.convolutional.Conv2D at 0xf7fd2e8>, <keras.layers.normalization.BatchNormalization at 0xf7fd470>, <keras.layers.core.Activation at 0xf7fd588>, <keras.layers.convolutional.Conv2D at 0xf7fd5c0>, <keras.layers.convolutional.Conv2D at 0xf7fd748>, <keras.layers.normalization.BatchNormalization at 0xf7fd8d0>, <keras.layers.normalization.BatchNormalization at 0xf7fd9e8>, <keras.layers.core.Activation at 0xf7fdb00>, <keras.layers.core.Activation at 0xf7fdb38>, <keras.layers.convolutional.Conv2D at 0xf7fdb70>, <keras.layers.convolutional.Conv2D at 0xf7fdcf8>, <keras.layers.normalization.BatchNormalization at 0xf7fde80>, <keras.layers.normalization.BatchNormalization at 0xf7fdf98>, <keras.layers.core.Activation at 0xf7f6fd0>, <keras.layers.core.Activation at 0xf804128>, <keras.layers.pooling.AveragePooling2D at 0xf804160>, <keras.layers.convolutional.Conv2D at 0xf804208>, <keras.layers.convolutional.Conv2D at 0xf804390>, <keras.layers.convolutional.Conv2D at 0xf804518>, <keras.layers.convolutional.Conv2D at 0xf8046a0>, <keras.layers.normalization.BatchNormalization at 0xf804828>, <keras.layers.normalization.BatchNormalization at 0xf804940>, <keras.layers.normalization.BatchNormalization at 0xf804a58>, <keras.layers.normalization.BatchNormalization at 0xf804b70>, <keras.layers.core.Activation at 0xf804c88>, <keras.layers.core.Activation at 0xf804cc0>, <keras.layers.core.Activation at 0xf804cf8>, <keras.layers.core.Activation at 0xf804d30>, <keras.layers.merge.Concatenate at 0xf804d68>, <keras.layers.convolutional.Conv2D at 0xf804da0>, <keras.layers.normalization.BatchNormalization at 0xf804f28>, <keras.layers.core.Activation at 0xf7fdfd0>, <keras.layers.convolutional.Conv2D at 0xf80c0b8>, <keras.layers.normalization.BatchNormalization at 0xf80c240>, <keras.layers.core.Activation at 0xf80c358>, <keras.layers.convolutional.Conv2D at 0xf80c390>, <keras.layers.convolutional.Conv2D at 0xf80c518>, <keras.layers.normalization.BatchNormalization at 0xf80c6a0>, <keras.layers.normalization.BatchNormalization at 0xf80c7b8>, <keras.layers.core.Activation at 0xf80c8d0>, <keras.layers.core.Activation at 0xf80c908>, <keras.layers.convolutional.Conv2D at 0xf80c940>, <keras.layers.convolutional.Conv2D at 0xf80cac8>, <keras.layers.normalization.BatchNormalization at 0xf80cc50>, <keras.layers.normalization.BatchNormalization at 0xf80cd68>, <keras.layers.core.Activation at 0xf80ce80>, <keras.layers.core.Activation at 0xf80ceb8>, <keras.layers.pooling.AveragePooling2D at 0xf80cef0>, <keras.layers.convolutional.Conv2D at 0xf80cf98>, <keras.layers.convolutional.Conv2D at 0xf813160>, <keras.layers.convolutional.Conv2D at 0xf8132e8>, <keras.layers.convolutional.Conv2D at 0xf813470>, <keras.layers.normalization.BatchNormalization at 0xf8135f8>, <keras.layers.normalization.BatchNormalization at 0xf813710>, <keras.layers.normalization.BatchNormalization at 0xf813828>, <keras.layers.normalization.BatchNormalization at 0xf813940>, <keras.layers.core.Activation at 0xf813a58>, <keras.layers.core.Activation at 0xf813a90>, <keras.layers.core.Activation at 0xf813ac8>, <keras.layers.core.Activation at 0xf813b00>, <keras.layers.merge.Concatenate at 0xf813b38>, <keras.layers.convolutional.Conv2D at 0xf813b70>, <keras.layers.normalization.BatchNormalization at 0xf813cf8>, <keras.layers.core.Activation at 0xf813e10>, <keras.layers.convolutional.Conv2D at 0xf813e48>, <keras.layers.normalization.BatchNormalization at 0xf804fd0>, <keras.layers.core.Activation at 0xf81a128>, <keras.layers.convolutional.Conv2D at 0xf81a160>, <keras.layers.convolutional.Conv2D at 0xf81a2e8>, <keras.layers.normalization.BatchNormalization at 0xf81a470>, <keras.layers.normalization.BatchNormalization at 0xf81a588>, <keras.layers.core.Activation at 0xf81a6a0>, <keras.layers.core.Activation at 0xf81a6d8>, <keras.layers.convolutional.Conv2D at 0xf81a710>, <keras.layers.convolutional.Conv2D at 0xf81a898>, <keras.layers.normalization.BatchNormalization at 0xf81aa20>, <keras.layers.normalization.BatchNormalization at 0xf81ab38>, <keras.layers.core.Activation at 0xf81ac50>, <keras.layers.core.Activation at 0xf81ac88>, <keras.layers.pooling.AveragePooling2D at 0xf81acc0>, <keras.layers.convolutional.Conv2D at 0xf81ad68>, <keras.layers.convolutional.Conv2D at 0xf81aef0>, <keras.layers.convolutional.Conv2D at 0xfef20b8>, <keras.layers.convolutional.Conv2D at 0xfef2240>, <keras.layers.normalization.BatchNormalization at 0xfef23c8>, <keras.layers.normalization.BatchNormalization at 0xfef24e0>, <keras.layers.normalization.BatchNormalization at 0xfef25f8>, <keras.layers.normalization.BatchNormalization at 0xfef2710>, <keras.layers.core.Activation at 0xfef2828>, <keras.layers.core.Activation at 0xfef2860>, <keras.layers.core.Activation at 0xfef2898>, <keras.layers.core.Activation at 0xfef28d0>, <keras.layers.merge.Concatenate at 0xfef2908>, <keras.layers.convolutional.Conv2D at 0xfef2940>, <keras.layers.normalization.BatchNormalization at 0xfef2ac8>, <keras.layers.core.Activation at 0xfef2be0>, <keras.layers.convolutional.Conv2D at 0xfef2c18>, <keras.layers.normalization.BatchNormalization at 0xfef2da0>, <keras.layers.core.Activation at 0xfef2eb8>, <keras.layers.convolutional.Conv2D at 0xfef2ef0>, <keras.layers.convolutional.Conv2D at 0xfefb0b8>, <keras.layers.normalization.BatchNormalization at 0xfefb240>, <keras.layers.normalization.BatchNormalization at 0xfefb358>, <keras.layers.core.Activation at 0xfefb470>, <keras.layers.core.Activation at 0xfefb4a8>, <keras.layers.convolutional.Conv2D at 0xfefb4e0>, <keras.layers.convolutional.Conv2D at 0xfefb668>, <keras.layers.normalization.BatchNormalization at 0xfefb7f0>, <keras.layers.normalization.BatchNormalization at 0xfefb908>, <keras.layers.core.Activation at 0xfefba20>, <keras.layers.core.Activation at 0xfefba58>, <keras.layers.pooling.MaxPooling2D at 0xfefba90>, <keras.layers.merge.Concatenate at 0xfefbb38>, <keras.layers.convolutional.Conv2D at 0xfefbb70>, <keras.layers.normalization.BatchNormalization at 0xfefbcf8>, <keras.layers.core.Activation at 0xfefbe10>, <keras.layers.convolutional.Conv2D at 0xfefbe48>, <keras.layers.convolutional.Conv2D at 0xf813fd0>, <keras.layers.normalization.BatchNormalization at 0xff03198>, <keras.layers.normalization.BatchNormalization at 0xff032b0>, <keras.layers.core.Activation at 0xff033c8>, <keras.layers.core.Activation at 0xff03400>, <keras.layers.convolutional.Conv2D at 0xff03438>, <keras.layers.convolutional.Conv2D at 0xff035c0>, <keras.layers.convolutional.Conv2D at 0xff03748>, <keras.layers.convolutional.Conv2D at 0xff038d0>, <keras.layers.pooling.AveragePooling2D at 0xff03a58>, <keras.layers.convolutional.Conv2D at 0xff03b00>, <keras.layers.normalization.BatchNormalization at 0xff03c88>, <keras.layers.normalization.BatchNormalization at 0xff03da0>, <keras.layers.normalization.BatchNormalization at 0xff03eb8>, <keras.layers.normalization.BatchNormalization at 0xfefbfd0>, <keras.layers.convolutional.Conv2D at 0xff09128>, <keras.layers.normalization.BatchNormalization at 0xff092b0>, <keras.layers.core.Activation at 0xff093c8>, <keras.layers.core.Activation at 0xff09400>, <keras.layers.core.Activation at 0xff09438>, <keras.layers.core.Activation at 0xff09470>, <keras.layers.normalization.BatchNormalization at 0xff094a8>, <keras.layers.core.Activation at 0xff095c0>, <keras.layers.merge.Concatenate at 0xff095f8>, <keras.layers.merge.Concatenate at 0xff09630>, <keras.layers.core.Activation at 0xff09668>, <keras.layers.merge.Concatenate at 0xff096a0>, <keras.layers.convolutional.Conv2D at 0xff096d8>, <keras.layers.normalization.BatchNormalization at 0xff09860>, <keras.layers.core.Activation at 0xff09978>, <keras.layers.convolutional.Conv2D at 0xff099b0>, <keras.layers.convolutional.Conv2D at 0xff09b38>, <keras.layers.normalization.BatchNormalization at 0xff09cc0>, <keras.layers.normalization.BatchNormalization at 0xff09dd8>, <keras.layers.core.Activation at 0xff09ef0>, <keras.layers.core.Activation at 0xff09f28>, <keras.layers.convolutional.Conv2D at 0xff09f60>, <keras.layers.convolutional.Conv2D at 0xff12128>, <keras.layers.convolutional.Conv2D at 0xff122b0>, <keras.layers.convolutional.Conv2D at 0xff12438>, <keras.layers.pooling.AveragePooling2D at 0xff125c0>, <keras.layers.convolutional.Conv2D at 0xff12668>, <keras.layers.normalization.BatchNormalization at 0xff127f0>, <keras.layers.normalization.BatchNormalization at 0xff12908>, <keras.layers.normalization.BatchNormalization at 0xff12a20>, <keras.layers.normalization.BatchNormalization at 0xff12b38>, <keras.layers.convolutional.Conv2D at 0xff12c50>, <keras.layers.normalization.BatchNormalization at 0xff12dd8>, <keras.layers.core.Activation at 0xff12ef0>, <keras.layers.core.Activation at 0xff12f28>, <keras.layers.core.Activation at 0xff12f60>, <keras.layers.core.Activation at 0xff12f98>, <keras.layers.normalization.BatchNormalization at 0xff03fd0>, <keras.layers.core.Activation at 0xff19128>, <keras.layers.merge.Concatenate at 0xff19160>, <keras.layers.merge.Concatenate at 0xff19198>, <keras.layers.core.Activation at 0xff191d0>, <keras.layers.merge.Concatenate at 0xff19208>, <keras.layers.pooling.GlobalAveragePooling2D at 0xff19240>, <keras.layers.core.Dense at 0xff192b0>, <keras.layers.core.Dense at 0xff19400>]
# Iterate through the MRIs in T2
print('\n \n' + '\033[1m' + 'EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m' + '\n')
print('\033[1m' + 'FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE)' + '\033[0m'+ '\n \n \n \n')
for i in range(T2test_X.shape[0]):
# Print spaces to separate from the next image
print('\n \n \n \n \n \n \n \n')
# Print real classification of the image
print('\033[1m' + 'REAL CLASSIFICATION OF THE IMAGE: {}'.format('TUBER(S)' if y_trueT2[i][0]==1 else 'NO TUBER(S)') + '\033[0m')
# Print model classification and model probability of TSC
print('Model classification of this image: {} \nEstimated probability of tuber(s): {} \n'.format('TUBER(S)' if testInceptionV3T2[i][0]>0.5 else 'NO TUBER(S)', testInceptionV3T2[i][0]))
# Print title
print('\033[1m' + 'CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m')
# Original image
plt.subplot(2,3,1)
plt.imshow(T2test_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
plt.subplot(2,3,2)
heat_map = visualize_cam(model, layer_idx=300, filter_indices=None, seed_input=T2test_X[i])
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,3)
plt.imshow(T2test_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3T2[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Original image
plt.subplot(2,3,4)
plt.imshow(T2test_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
heat_map = visualize_saliency(model, layer_idx=300, filter_indices=None, seed_input=T2test_X[i])
plt.subplot(2,3,5)
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,6)
plt.imshow(T2test_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3T2[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Show the image and close it
plt.show()
plt.close()
EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW) FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE) REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9712801575660706 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9831337332725525 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9936854243278503 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.8826302289962769 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9887058734893799 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9701818227767944 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9930469393730164 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9985352754592896 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9910550117492676 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9969660639762878 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9991078972816467 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9996434450149536 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9994749426841736 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.06948266923427582 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.7276964783668518 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9915317893028259 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9998077750205994 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.997776448726654 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9567135572433472 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9988027811050415 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.7611516118049622 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9388386607170105 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9932737350463867 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999971389770508 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999996423721313 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999666213989258 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9995834231376648 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9993971586227417 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9655053615570068 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9964806437492371 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
# Iterate through the MRIs in FLAIR
print('\n \n' + '\033[1m' + 'EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m' + '\n')
print('\033[1m' + 'FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE)' + '\033[0m'+ '\n \n \n \n')
for i in range(FLAIRtest_X.shape[0]):
# Print spaces to separate from the next image
print('\n \n \n \n \n \n \n \n')
# Print real classification of the image
print('\033[1m' + 'REAL CLASSIFICATION OF THE IMAGE: {}'.format('TUBER(S)' if y_trueFLAIR[i][0]==1 else 'NO TUBER(S)') + '\033[0m')
# Print model classification and model probability of TSC
print('Model classification of this image: {} \nEstimated probability of tuber(s): {} \n'.format('TUBER(S)' if testInceptionV3FLAIR[i][0]>0.5 else 'NO TUBER(S)', testInceptionV3FLAIR[i][0]))
# Print title
print('\033[1m' + 'CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)' + '\033[0m')
# Original image
plt.subplot(2,3,1)
plt.imshow(FLAIRtest_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
plt.subplot(2,3,2)
heat_map = visualize_cam(model, layer_idx=300, filter_indices=None, seed_input=FLAIRtest_X[i])
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,3)
plt.imshow(FLAIRtest_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3FLAIR[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Original image
plt.subplot(2,3,4)
plt.imshow(FLAIRtest_X[i])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map
heat_map = visualize_saliency(model, layer_idx=300, filter_indices=None, seed_input=FLAIRtest_X[i])
plt.subplot(2,3,5)
plt.imshow(heat_map)
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Heat map superimposed on original image
plt.subplot(2,3,6)
plt.imshow(FLAIRtest_X[i])
plt.imshow(heat_map, alpha = 0.8 * testInceptionV3FLAIR[i][0])
plt.grid(b=None)
plt.xticks([])
plt.yticks([])
# Show the image and close it
plt.show()
plt.close()
EACH ORIGINAL IMAGE IS ANALYZED WITH TWO METHODS: CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW) FOR EACH METHOD, THE FIRST IMAGE IS THE ORIGINAL IMAGE, THE SECOND IMAGE IS THE MAP, AND THE THIRD IMAGE IS THE MAP SUPERIMPOSED ON THE ORIGINAL IMAGE WITH A TRANSPARENCY THAT IS PROPORTIONAL TO THE ESTIMATED PROBABILITY OF THE IMAGE HAVING TUBER(S) (HIGHER ESTIMATED PROBABILITIES PRODUCE CLEARLY SEEN MAPS OVERLAID ON THE ORIGINAL IMAGE AND LOWER ESTIMATED PROBABILITIES PRODUCE VERY TRANSPARENT MAPS OVERLAYED ON THE ORIGINAL IMAGE) REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 2.3385955500998534e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 6.175049747980665e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.8773260116577148 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 1.218364377564285e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.02700362354516983 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9981239438056946 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.00018968593212775886 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 8.97117661224911e-06 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9956021308898926 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 4.349961091065779e-05 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.6542004346847534 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: NO TUBER(S) Estimated probability of tuber(s): 0.00021050315990578383 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999995231628418 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999994039535522 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)
REAL CLASSIFICATION OF THE IMAGE: TUBER(S) Model classification of this image: TUBER(S) Estimated probability of tuber(s): 0.9999995231628418 CLASS ACTIVATION MAP (UPPER ROW) AND SALIENCY MAP (LOWER ROW)