Created
February 24, 2024 11:05
-
-
Save emharsha1812/ee6bd80d1a0103ac29dd3599f7ddbaff to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # %% | |
| import pandas as pd | |
| import tensorflow as tf | |
| from keras import backend as K | |
| from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
| import os | |
| import cv2 | |
| from tensorflow.keras.layers import SpatialDropout2D, Dense, Activation, Flatten, Dropout, GlobalAveragePooling2D, GlobalMaxPooling2D, Conv2D, BatchNormalization, MaxPooling2D, Input, Concatenate, ReLU, AveragePooling2D, UpSampling2D | |
| # from tensorflow.keras.applications import DenseNet201, InceptionResNetV2, MobileNetV2, EfficientNetB3, Xception, VGG19, InceptionV3, EfficientNetB0, EfficientNetB2, Xception | |
| from tensorflow.keras import regularizers, Model | |
| from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint | |
| from tensorflow.keras.utils import Sequence | |
| from keras.models import Model | |
| from matplotlib import pyplot as plt | |
| from sklearn.model_selection import train_test_split | |
| from tensorflow.keras.optimizers import SGD, Adam | |
| import numpy as np | |
| import random | |
| import shutil | |
| # %% | |
| from tensorflow.keras.applications import MobileNetV3Small, MobileNetV3Large | |
| from tensorflow.keras.callbacks import ModelCheckpoint | |
| from tensorflow.keras.metrics import AUC,Precision,Recall | |
| # %% | |
| from tensorflow.keras.applications.mobilenet_v3 import preprocess_input | |
| target = 256 | |
| # inject noise but keep dark parts black | |
| def addNoise(image): | |
| gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY).astype(np.uint8) | |
| ret, mask = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY) | |
| randStd = random.uniform(0, 10.0) # 15 | |
| gaussian = np.random.normal(randStd*-1, randStd, (target, target,3)) | |
| noisy_image = image + gaussian | |
| image = np.clip(noisy_image, 0, 255).astype(np.uint8) | |
| image[mask == 0] = [0,0,0] | |
| image = preprocess_input(image) | |
| return image | |
| # %% | |
| import cv2 | |
| import numpy as np | |
| # def preprocess_image(image): | |
| # # Convert image to LAB color space | |
| # lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB) | |
| # # Split the LAB image into channels | |
| # l, a, b = cv2.split(lab) | |
| # # Convert the L channel to the appropriate data type | |
| # l = l.astype(np.uint8) # or l = l.astype(np.uint16) for 16-bit images | |
| # # Apply CLAHE to the L channel | |
| # clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) | |
| # cl = clahe.apply(l) | |
| # # Check if dimensions match | |
| # if cl.shape[:2] != a.shape[:2] or cl.shape[:2] != b.shape[:2]: | |
| # raise ValueError("Dimensions of CLAHE-enhanced L channel and A/B channels do not match.") | |
| # # Convert A and B channels to 8-bit | |
| # a = a.astype(np.uint8) | |
| # b = b.astype(np.uint8) | |
| # # Merge the CLAHE enhanced L channel with the original A and B channels | |
| # merged = cv2.merge((cl, a, b)) | |
| # # Convert the LAB image back to RGB color space | |
| # preprocessed_image = cv2.cvtColor(merged, cv2.COLOR_LAB2RGB) | |
| # return preprocessed_image | |
| # %% | |
| # def addNoise(image, target=256): | |
| # # input_image=preprocess_image(image) | |
| # # gray = cv2.cvtColor(input_image, cv2.COLOR_RGB2GRAY).astype(np.uint8) | |
| # gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY).astype(np.uint8) | |
| # # Use adaptive thresholding for a more refined mask | |
| # mask = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, | |
| # cv2.THRESH_BINARY, 11, 2) | |
| # randStd = random.uniform(0, 10.0) # Adjust the upper limit as needed | |
| # gaussian = np.random.normal(randStd*-1, randStd, (target, target, 3)) | |
| # noisy_image = image + gaussian | |
| # image = np.clip(noisy_image, 0, 255).astype(np.uint8) | |
| # # Apply mask to keep dark parts of the image black | |
| # image[mask == 0] = [0, 0, 0] | |
| # # Preprocess the image for the model | |
| # image = preprocess_input(image) | |
| # return image | |
| # %% | |
| dataPath='final/' | |
| # %% | |
| # combine two unique generators using noise injection | |
| batchSize = 4 | |
| trainDataGen = ImageDataGenerator(preprocessing_function=addNoise, horizontal_flip=True,vertical_flip=True,rotation_range=0,brightness_range=(0.95, 1.05)) | |
| trainGen1 = trainDataGen.flow_from_directory(batch_size = batchSize, shuffle=True, class_mode="categorical", target_size=(target, target), directory=dataPath + 'train', color_mode='rgb', seed=0) | |
| trainGen2 = trainDataGen.flow_from_directory(batch_size = batchSize, shuffle=True, class_mode="categorical", target_size=(target, target), directory=dataPath + 'train', color_mode='rgb', seed=42) | |
| def combine_gen(*gens): | |
| while True: | |
| for g in gens: | |
| yield next(g) | |
| trainGen = combine_gen(trainGen1, trainGen2) | |
| valDataGen = ImageDataGenerator(preprocessing_function=preprocess_input) | |
| valGen = valDataGen.flow_from_directory(batch_size = 1, class_mode="categorical", target_size=(target, target), directory=dataPath + 'validation', color_mode='rgb') | |
| testDataGen = ImageDataGenerator(preprocessing_function=preprocess_input) | |
| testGen = testDataGen.flow_from_directory(batch_size = 1, class_mode="categorical", target_size=(target, target), directory=dataPath + 'test', color_mode='rgb') | |
| # %% | |
| from keras.layers.pooling.global_max_pooling2d import GlobalMaxPool2D | |
| # simple model that uses mobilenet background | |
| def getModel(image_size, num_classes): | |
| model_input = Input(shape=(image_size, image_size, 3)) | |
| transfer =MobileNetV3Large( | |
| weights='imagenet', include_top=False, input_tensor=model_input,minimalistic=False,classifier_activation='softmax' | |
| ) | |
| x = transfer.get_layer(index=142).output | |
| x = SpatialDropout2D(0.15)(x) | |
| x = Conv2D(filters=64, kernel_size=1, activation='swish', kernel_regularizer=regularizers.L1L2(l1=1e-1))(x) | |
| x = GlobalMaxPool2D()(x) | |
| x = Dropout(0.20)(x) | |
| model_output = Dense(4, activation='softmax') (x) | |
| return Model(inputs=model_input, outputs=model_output) | |
| model = getModel(image_size=target, num_classes=4) | |
| reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.85, patience=2, min_lr=1e-5) # factor=0.85 | |
| model_checkpoint_callback = ModelCheckpoint( | |
| filepath='superultra.h5', | |
| monitor='val_loss', | |
| mode='min', | |
| save_best_only=True) | |
| # %% | |
| model.summary() | |
| # %% | |
| model.compile(optimizer=Adam(1e-4), loss='categorical_crossentropy', metrics=['accuracy']) | |
| history = model.fit(trainGen, steps_per_epoch = len(trainGen1)*2,validation_data=valGen, validation_steps=len(valGen), epochs=30, callbacks=[reduce_lr, model_checkpoint_callback]) | |
| # %% | |
| model.load_weights('superultra.h5') | |
| # %% | |
| model.evaluate(testGen) | |
| # %% | |
| converter=tf.lite.TFLiteConverter.from_keras_model(model) | |
| tflite_model=converter.convert() | |
| with open("model3.tflite",'wb') as f: | |
| f.write(tflite_model) | |
| # %% | |
| from sklearn.metrics import roc_curve, auc | |
| import seaborn as sns | |
| import matplotlib.pyplot as plt | |
| from sklearn.preprocessing import label_binarize | |
| from sklearn.metrics import roc_auc_score | |
| # Predict probabilities on the test set | |
| y_score = model.predict(testGen, steps=len(testGen), verbose=1) | |
| # Get true labels | |
| y_true = testGen.classes | |
| # Binarize the labels for multiclass ROC-AUC | |
| y_true_bin = label_binarize(y_true, classes=[0, 1, 2, 3]) | |
| # Compute ROC curve and AUC for each class | |
| fpr = dict() | |
| tpr = dict() | |
| roc_auc = dict() | |
| for i in range(4): | |
| fpr[i], tpr[i], _ = roc_curve(y_true_bin[:, i], y_score[:, i]) | |
| roc_auc[i] = auc(fpr[i], tpr[i]) | |
| # Plot ROC curves for multiclass using seaborn | |
| plt.figure(figsize=(10, 6)) | |
| for i in range(4): | |
| plt.plot(fpr[i], tpr[i], label=f'Class {i} (AUC = {roc_auc[i]:.2f})') | |
| plt.plot([0, 1], [0, 1], linestyle='--', color='black', label='Random') | |
| plt.xlabel('False Positive Rate') | |
| plt.ylabel('True Positive Rate') | |
| plt.title('ROC Curve for Multiclass Classification') | |
| plt.legend() | |
| plt.show() | |
| # %% | |
| from sklearn.metrics import confusion_matrix | |
| import seaborn as sns | |
| import numpy as np | |
| # Predict class probabilities on the test set | |
| y_pred_prob = model.predict(testGen, steps=len(testGen), verbose=1) | |
| # Get predicted class labels | |
| y_pred = np.argmax(y_pred_prob, axis=1) | |
| # Create confusion matrix for multiclass | |
| cm = confusion_matrix(y_true, y_pred) | |
| # Plot confusion matrix using seaborn heatmap | |
| plt.figure(figsize=(8, 6)) | |
| sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", xticklabels=["ARMD", "DR", "RG", "NRG"], yticklabels=["ARMD", "DR", "RG", "NRG"]) | |
| plt.xlabel('Predicted') | |
| plt.ylabel('True') | |
| plt.title('Confusion Matrix for Multiclass Classification') | |
| plt.show() | |
| # %% | |
| plt.figure(figsize=(10, 6)) | |
| plt.plot(history.history['loss'], label='Training Loss') | |
| plt.plot(history.history['val_loss'], label='Validation Loss') | |
| plt.xlabel('Epoch') | |
| plt.ylabel('Loss') | |
| plt.title('Model Loss Over Epochs') | |
| plt.legend() | |
| plt.show() | |
| # %% | |
| from sklearn.metrics import precision_recall_curve, average_precision_score | |
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| # ... (Previous code for model prediction and precision-recall curve) | |
| # Plot Precision-Recall curves using plt.plot | |
| plt.figure(figsize=(10, 6)) | |
| for i in range(4): | |
| plt.plot(recall[i], precision[i], label=f'Class {i} (AUC = {pr_auc[i]:.2f})') | |
| plt.xlabel('Recall') | |
| plt.ylabel('Precision') | |
| plt.title('Precision-Recall Curve for Multiclass Classification') | |
| plt.legend() | |
| plt.show() | |
| # %% | |
| # Plot the training and validation accuracy | |
| plt.figure(figsize=(10, 6)) | |
| plt.plot(history.history['accuracy'], label='Training Accuracy') | |
| plt.plot(history.history['val_accuracy'], label='Validation Accuracy') | |
| plt.xlabel('Epoch') | |
| plt.ylabel('Accuracy') | |
| plt.title('Model Accuracy Over Epochs') | |
| plt.legend() | |
| plt.show() | |
| # %% | |
| # Evaluate the model on the test set | |
| test_loss, test_accuracy = model.evaluate(testGen) | |
| # %% | |
| # Plot the training, validation, and test accuracy | |
| plt.figure(figsize=(10, 6)) | |
| plt.plot(history.history['accuracy'], label='Training Accuracy') | |
| plt.plot(history.history['val_accuracy'], label='Validation Accuracy') | |
| plt.axhline(y=test_accuracy, color='r', linestyle='--', label=f'Test Accuracy = {test_accuracy:.4f}') | |
| plt.xlabel('Epoch') | |
| plt.ylabel('Accuracy') | |
| plt.title('Model Accuracy Over Epochs') | |
| plt.legend() | |
| plt.show() | |
| print(f'Test Loss: {test_loss:.4f}') | |
| print(f'Test Accuracy: {test_accuracy:.4f}') | |
| # %% | |
| converter=tf.lite.TFLiteConverter.from_keras_model(model) | |
| tflite_model=converter.convert() | |
| with open("superultra.tflite",'wb') as f: | |
| f.write(tflite_model) | |
| # %% | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment