defplot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes)
if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, cm[i, j], horizontalalignment="center", color="white"if cm[i, j] > thresh else"black")
defdisplay_errors(errors_index,img_errors,pred_errors, obs_errors): """ This function shows 6 images with their predicted and real labels""" n = 0 nrows = 2 ncols = 3 fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True) for row inrange(nrows): for col inrange(ncols): error = errors_index[n] ax[row,col].imshow((img_errors[error]).reshape((28,28))) ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error])) n += 1
# Probabilities of the wrong predicted numbers Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
# Predicted probabilities of the true values in the error set true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
# Difference between the probability of the predicted label and the true label delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
# Sorted list of the delta prob errors sorted_dela_errors = np.argsort(delta_pred_true_errors)
# Top 6 errors most_important_errors = sorted_dela_errors[-6:]
# Show the top 6 errors display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)