File size: 5,935 Bytes
4ede577
e05dd7f
 
 
 
 
 
4ede577
 
e05dd7f
4c1ce98
 
 
 
 
 
4ede577
e05dd7f
4ede577
 
 
 
 
 
e05dd7f
4ede577
 
 
 
e05dd7f
4ede577
e05dd7f
4ede577
 
 
 
 
e05dd7f
4ede577
e05dd7f
4ede577
 
 
 
 
e05dd7f
4ede577
 
 
e05dd7f
4ede577
 
e05dd7f
4ede577
 
 
e05dd7f
4ede577
e05dd7f
4ede577
 
 
 
e05dd7f
4ede577
e05dd7f
 
4ede577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c1ce98
4ede577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e05dd7f
4ede577
e05dd7f
4ede577
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import gradio as gr
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
import cv2
import PIL.Image
import traceback
from io import BytesIO

# Function to generate a simple explanation based on the saliency map and prediction
def generate_explanation(model_prediction, confidence):
    explanation = (
        f"The model predicts the tumor type is '{model_prediction}' with a confidence of {confidence * 100:.2f}%. "
        "This prediction is based on the highlighted regions of the MRI scan which contributed most to the decision."
    )
    return explanation

def generate_saliency_map(model, img_array, class_index, img_size):
    with tf.GradientTape() as tape:
        img_tensor = tf.convert_to_tensor(img_array)
        tape.watch(img_tensor)
        predictions = model(img_tensor)
        target_class = predictions[:, class_index]

    gradients = tape.gradient(target_class, img_tensor)
    gradients = tf.math.abs(gradients)
    gradients = tf.reduce_max(gradients, axis=-1)
    gradients = gradients.numpy().squeeze()

    gradients = cv2.resize(gradients, img_size)

    # Create a circular mask to focus on the brain region
    center = (gradients.shape[0] // 2, gradients.shape[1] // 2)
    radius = min(center[0], center[1]) - 10
    y, x = np.ogrid[:gradients.shape[0], :gradients.shape[1]]
    mask = (x - center[0])**2 + (y - center[1])**2 <= radius**2

    gradients = gradients * mask

    # Normalize the gradients within the brain region
    brain_gradients = gradients[mask]
    if brain_gradients.max() > brain_gradients.min():
        brain_gradients = (brain_gradients - brain_gradients.min()) / (brain_gradients.max() - brain_gradients.min())
    gradients[mask] = brain_gradients

    # Apply thresholding to highlight important regions
    threshold = np.percentile(gradients[mask], 80)
    gradients[gradients < threshold] = 0

    # Apply Gaussian blur to smooth the saliency map
    gradients = cv2.GaussianBlur(gradients, (11, 11), 0)

    # Create a heatmap from the gradients
    heatmap = cv2.applyColorMap(np.uint8(255 * gradients), cv2.COLORMAP_JET)
    heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)

    heatmap = cv2.resize(heatmap, img_size)

    # Superimpose the heatmap on the original image
    original_img = image.img_to_array(PIL.Image.fromarray((img_array[0] * 255).astype(np.uint8)))
    superimposed_img = heatmap * 0.7 + original_img * 0.3
    superimposed_img = superimposed_img.astype(np.uint8)

    return superimposed_img

def load_xception_model(model_path):
    img_shape = (299, 299, 3)
    base_model = tf.keras.applications.Xception(include_top=False, weights="imagenet", input_shape=img_shape, pooling='max')

    model = tf.keras.Sequential([
        base_model,
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dropout(rate=0.3),
        tf.keras.layers.Dense(128, activation='relu'),
        tf.keras.layers.Dropout(rate=0.25),
        tf.keras.layers.Dense(4, activation='softmax')
    ])

    model.compile(optimizer=tf.keras.optimizers.Adamax(learning_rate=0.001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
    model.load_weights(model_path)
    return model

def classify_brain_tumor(image_file, model_choice):
    try:
        # Load the selected model
        if model_choice == "Transfer Learning - Xception":
            model = load_xception_model('xception_model.weights.h5')
            img_size = (299, 299)
        else:
            model = load_model('cnn_model.h5')
            img_size = (224, 224)

        labels = ['Glioma', 'Meningioma', 'No Tumor', 'Pituitary']

        # Preprocess the input image
        img = image.load_img(image_file, target_size=img_size)
        img_array = image.img_to_array(img)
        img_array = np.expand_dims(img_array, axis=0)
        img_array /= 255.0

        # Make the prediction
        prediction = model.predict(img_array)
        class_index = np.argmax(prediction[0])
        result = labels[class_index]
        confidence = prediction[0][class_index]

        # Generate the saliency map
        saliency_map = generate_saliency_map(model, img_array, class_index, img_size)

        # Generate the explanation
        explanation = generate_explanation(result, confidence)

        # Prepare probabilities for all classes
        probabilities = prediction[0]
        prob_dict = dict(zip(labels, probabilities))

        # Return the outputs in the expected order
        return [
            result,
            confidence,
            saliency_map,
            explanation,
            "",  # Empty string for Logs
            prob_dict  # For displaying probabilities
        ]
    except Exception as e:
        # Return error information
        return [
            "Error",
            0.0,
            None,
            "",
            f"Error: {str(e)}\nTraceback:\n{traceback.format_exc()}",
            {}  # Empty probabilities
        ]

def main():
    # Define the interface
    interface = gr.Interface(
        fn=classify_brain_tumor,
        inputs=[
            gr.Image(type="filepath"),
            gr.Radio(choices=["Transfer Learning - Xception", "Custom CNN"], label="Select Model")
        ],
        outputs=[
            gr.Textbox(label="Prediction"),
            gr.Number(label="Confidence", precision=2),
            gr.Image(type="numpy", label="Saliency Map"),
            gr.Textbox(label="Explanation"),
            gr.Textbox(label="Logs"),
            gr.Label(num_top_classes=4, label="Class Probabilities")
        ],
        title="Brain Tumor Classification",
        description="Upload an MRI scan image to classify the tumor and view saliency maps with model explanations.",
    )
    interface.launch()

if __name__ == "__main__":
    main()