100字范文,内容丰富有趣,生活中的好帮手!
100字范文 > keras搭建简单CNN模型实现kaggle比赛数字识别

keras搭建简单CNN模型实现kaggle比赛数字识别

时间:2019-12-27 10:10:25

相关推荐

keras搭建简单CNN模型实现kaggle比赛数字识别

前言

Digit Recognizer是一个Kaggle练习题。

然后麻雀虽小,五脏俱全。为了优化Score,前前后后长了多个方法的和多次模型的改进,Accuracy score也从~0.96 -> 0.98-> 0.99 -> 到目前的1.0。

这个代码正是获得test accuracy 100%的Notebook,仅供参考和交流。

(当然这个notebook的框架也是站在前任的基础上,感谢在kaggle和其他网站分享notebook和结题思路的朋友)

下面进入正题

# 导入必要的libsimport pandas as pdimport numpy as npimport matplotlib.pyplot as pltimport matplotlib.image as mpimgimport seaborn as sns%matplotlib inlinenp.random.seed(2)from sklearn.model_selection import train_test_splitfrom sklearn.metrics import confusion_matriximport itertoolsfrom keras.utils.np_utils import to_categorical # convert to one-hot-encodingfrom keras.models import Sequentialfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalizationfrom keras.optimizers import RMSpropfrom keras.preprocessing.image import ImageDataGeneratorfrom keras.callbacks import ReduceLROnPlateaufrom keras.datasets import mnistsns.set(style='white', context='notebook', palette='deep')

Using TensorFlow backend.

数据集:链接:/s/1SmBFmWp4iynF-t0O01_Psg

提取码:mgm8

# 加载数据train = pd.read_csv("../input/train.csv")test = pd.read_csv("../input/test.csv")Y_train = train["label"]X_train = train.drop(labels = ["label"], axis = 1)

# 加载更多的数据集,如果没这批数据,validation accuracy = 0.9964# 有这批数据后,validation accuracy 可以到达 0.9985(x_train1, y_train1), (x_test1, y_test1) = mnist.load_data()train1 = np.concatenate([x_train1, x_test1], axis=0)y_train1 = np.concatenate([y_train1, y_test1], axis=0)Y_train1 = y_train1X_train1 = train1.reshape(-1, 28*28)

Downloading data from /img-datasets/mnist.npz11493376/11490434 [==============================] - 1s 0us/step

# 打印数据的直方图g = sns.countplot(Y_train)

# 归一化数据,让CNN更快X_train = X_train / 255.0test = test / 255.0X_train1 = X_train1 / 255.0

# Reshape 图片为 3D array (height = 28px, width = 28px , canal = 1)X_train = np.concatenate((X_train.values, X_train1))Y_train = np.concatenate((Y_train, Y_train1))

X_train = X_train.reshape(-1,28,28,1)test = test.values.reshape(-1,28,28,1)

# 把label转换为one hot vectors (ex : 2 -> [0,0,1,0,0,0,0,0,0,0])Y_train = to_categorical(Y_train, num_classes = 10)

# 拆分数据集为训练集和验证集X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2)

# 画一个数据集的例子来看看g = plt.imshow(X_train[0][:,:,0])

# 创建CNN model # 模型:"""[[Conv2D->relu]*2 -> BatchNormalization -> MaxPool2D -> Dropout]*2 -> [Conv2D->relu]*2 -> BatchNormalization -> Dropout -> Flatten -> Dense -> BatchNormalization -> Dropout -> Out"""model = Sequential()model.add(Conv2D(filters = 64, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (28,28,1)))model.add(BatchNormalization())model.add(Conv2D(filters = 64, kernel_size = (5,5),padding = 'Same', activation ='relu'))model.add(BatchNormalization())model.add(MaxPool2D(pool_size=(2,2)))model.add(Dropout(0.25))model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))model.add(BatchNormalization())model.add(Conv2D(filters = 64, kernel_size = (3,3),padding = 'Same', activation ='relu'))model.add(BatchNormalization())model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))model.add(Dropout(0.25))model.add(Conv2D(filters = 64, kernel_size = (3,3), padding = 'Same', activation ='relu'))model.add(BatchNormalization())model.add(Dropout(0.25))model.add(Flatten())model.add(Dense(256, activation = "relu"))model.add(BatchNormalization())model.add(Dropout(0.25))model.add(Dense(10, activation = "softmax"))

# 打印出model 看看from keras.utils import plot_modelplot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)from IPython.display import ImageImage("model.png")

# 定义Optimizeroptimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)

# 编译pile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])

# 设置学习率的动态调整learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001)

epochs = 50batch_size = 128

# 通过数据增强来防止过度拟合datagen = ImageDataGenerator(featurewise_center=False, # set input mean to 0 over the datasetsamplewise_center=False, # set each sample mean to 0featurewise_std_normalization=False, # divide inputs by std of the datasetsamplewise_std_normalization=False, # divide each input by its stdzca_whitening=False, # apply ZCA whiteningrotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)zoom_range = 0.1, # Randomly zoom image width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)height_shift_range=0.1, # randomly shift images vertically (fraction of total height)horizontal_flip=False, # randomly flip imagesvertical_flip=False) # randomly flip imagesdatagen.fit(X_train)

# 训练模型history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),epochs = epochs, validation_data = (X_val,Y_val),verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size, callbacks=[learning_rate_reduction])

Epoch 1/50- 47s - loss: 0.1388 - acc: 0.9564 - val_loss: 0.0434 - val_acc: 0.9852Epoch 2/50- 43s - loss: 0.0496 - acc: 0.9845 - val_loss: 0.0880 - val_acc: 0.9767Epoch 3/50- 43s - loss: 0.0384 - acc: 0.9884 - val_loss: 0.0230 - val_acc: 0.9933Epoch 4/50- 44s - loss: 0.0331 - acc: 0.9898 - val_loss: 0.0224 - val_acc: 0.9942Epoch 5/50- 42s - loss: 0.0300 - acc: 0.9910 - val_loss: 0.0209 - val_acc: 0.9933Epoch 6/50- 42s - loss: 0.0257 - acc: 0.9924 - val_loss: 0.0167 - val_acc: 0.9953Epoch 7/50- 42s - loss: 0.0250 - acc: 0.9924 - val_loss: 0.0159 - val_acc: 0.9952Epoch 8/50- 43s - loss: 0.0248 - acc: 0.9928 - val_loss: 0.0149 - val_acc: 0.9951Epoch 9/50- 42s - loss: 0.0218 - acc: 0.9934 - val_loss: 0.0170 - val_acc: 0.9954Epoch 00009: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.Epoch 10/50- 42s - loss: 0.0176 - acc: 0.9947 - val_loss: 0.0106 - val_acc: 0.9965Epoch 11/50- 43s - loss: 0.0149 - acc: 0.9956 - val_loss: 0.0101 - val_acc: 0.9969Epoch 12/50- 42s - loss: 0.0152 - acc: 0.9953 - val_loss: 0.0084 - val_acc: 0.9973Epoch 13/50- 42s - loss: 0.0146 - acc: 0.9958 - val_loss: 0.0079 - val_acc: 0.9980Epoch 14/50- 43s - loss: 0.0134 - acc: 0.9959 - val_loss: 0.0129 - val_acc: 0.9962Epoch 15/50- 42s - loss: 0.0135 - acc: 0.9959 - val_loss: 0.0093 - val_acc: 0.9971Epoch 16/50- 43s - loss: 0.0129 - acc: 0.9960 - val_loss: 0.0085 - val_acc: 0.9974Epoch 00016: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.Epoch 17/50- 43s - loss: 0.0109 - acc: 0.9968 - val_loss: 0.0064 - val_acc: 0.9980Epoch 18/50- 44s - loss: 0.0107 - acc: 0.9966 - val_loss: 0.0068 - val_acc: 0.9984Epoch 19/50- 43s - loss: 0.0104 - acc: 0.9969 - val_loss: 0.0065 - val_acc: 0.9986Epoch 20/50- 43s - loss: 0.0097 - acc: 0.9969 - val_loss: 0.0057 - val_acc: 0.9985Epoch 21/50- 43s - loss: 0.0092 - acc: 0.9971 - val_loss: 0.0073 - val_acc: 0.9981Epoch 22/50- 43s - loss: 0.0097 - acc: 0.9970 - val_loss: 0.0068 - val_acc: 0.9982Epoch 00022: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.Epoch 23/50- 43s - loss: 0.0083 - acc: 0.9975 - val_loss: 0.0064 - val_acc: 0.9984Epoch 24/50- 43s - loss: 0.0085 - acc: 0.9974 - val_loss: 0.0061 - val_acc: 0.9985Epoch 25/50- 43s - loss: 0.0081 - acc: 0.9976 - val_loss: 0.0058 - val_acc: 0.9988Epoch 26/50- 43s - loss: 0.0080 - acc: 0.9977 - val_loss: 0.0065 - val_acc: 0.9986Epoch 27/50- 43s - loss: 0.0078 - acc: 0.9977 - val_loss: 0.0066 - val_acc: 0.9984Epoch 28/50- 44s - loss: 0.0088 - acc: 0.9975 - val_loss: 0.0060 - val_acc: 0.9988Epoch 00028: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.Epoch 29/50- 44s - loss: 0.0077 - acc: 0.9975 - val_loss: 0.0056 - val_acc: 0.9988Epoch 30/50- 43s - loss: 0.0063 - acc: 0.9980 - val_loss: 0.0054 - val_acc: 0.9988Epoch 31/50- 44s - loss: 0.0069 - acc: 0.9980 - val_loss: 0.0056 - val_acc: 0.9988Epoch 00031: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.Epoch 32/50- 44s - loss: 0.0068 - acc: 0.9980 - val_loss: 0.0055 - val_acc: 0.9986Epoch 33/50- 43s - loss: 0.0066 - acc: 0.9981 - val_loss: 0.0055 - val_acc: 0.9987Epoch 34/50- 43s - loss: 0.0069 - acc: 0.9979 - val_loss: 0.0055 - val_acc: 0.9988Epoch 00034: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.Epoch 35/50- 43s - loss: 0.0065 - acc: 0.9979 - val_loss: 0.0055 - val_acc: 0.9988Epoch 36/50- 42s - loss: 0.0069 - acc: 0.9980 - val_loss: 0.0054 - val_acc: 0.9988Epoch 37/50- 43s - loss: 0.0064 - acc: 0.9980 - val_loss: 0.0054 - val_acc: 0.9988Epoch 00037: ReduceLROnPlateau reducing learning rate to 1e-05.Epoch 38/50- 42s - loss: 0.0067 - acc: 0.9979 - val_loss: 0.0054 - val_acc: 0.9989Epoch 39/50- 43s - loss: 0.0067 - acc: 0.9979 - val_loss: 0.0055 - val_acc: 0.9988Epoch 40/50- 43s - loss: 0.0060 - acc: 0.9983 - val_loss: 0.0055 - val_acc: 0.9988Epoch 41/50- 42s - loss: 0.0056 - acc: 0.9983 - val_loss: 0.0055 - val_acc: 0.9988Epoch 42/50- 43s - loss: 0.0064 - acc: 0.9981 - val_loss: 0.0055 - val_acc: 0.9988Epoch 43/50- 42s - loss: 0.0060 - acc: 0.9982 - val_loss: 0.0054 - val_acc: 0.9988Epoch 44/50- 42s - loss: 0.0062 - acc: 0.9981 - val_loss: 0.0054 - val_acc: 0.9989Epoch 45/50- 42s - loss: 0.0061 - acc: 0.9980 - val_loss: 0.0055 - val_acc: 0.9989Epoch 46/50- 42s - loss: 0.0059 - acc: 0.9983 - val_loss: 0.0056 - val_acc: 0.9989Epoch 47/50- 42s - loss: 0.0065 - acc: 0.9980 - val_loss: 0.0054 - val_acc: 0.9989Epoch 48/50- 43s - loss: 0.0069 - acc: 0.9980 - val_loss: 0.0055 - val_acc: 0.9989Epoch 49/50- 42s - loss: 0.0068 - acc: 0.9980 - val_loss: 0.0055 - val_acc: 0.9989Epoch 50/50- 42s - loss: 0.0065 - acc: 0.9981 - val_loss: 0.0054 - val_acc: 0.9988

# 画训练集和验证集的loss和accuracy曲线。可以判断是否欠拟合或过拟合fig, ax = plt.subplots(2,1)ax[0].plot(history.history['loss'], color='b', label="Training loss")ax[0].plot(history.history['val_loss'], color='r', label="validation loss",axes =ax[0])legend = ax[0].legend(loc='best', shadow=True)ax[1].plot(history.history['acc'], color='b', label="Training accuracy")ax[1].plot(history.history['val_acc'], color='r',label="Validation accuracy")legend = ax[1].legend(loc='best', shadow=True)

# 画出混淆矩阵,可以用来观察误判比较高的情况def plot_confusion_matrix(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):"""This function prints and plots the confusion matrix.Normalization can be applied by setting `normalize=True`."""plt.imshow(cm, interpolation='nearest', cmap=cmap)plt.title(title)plt.colorbar()tick_marks = np.arange(len(classes))plt.xticks(tick_marks, classes, rotation=45)plt.yticks(tick_marks, classes)if normalize:cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]thresh = cm.max() / 2.for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):plt.text(j, i, cm[i, j],horizontalalignment="center",color="white" if cm[i, j] > thresh else "black")plt.tight_layout()plt.ylabel('True label')plt.xlabel('Predicted label')# Predict the values from the validation datasetY_pred = model.predict(X_val)# Convert predictions classes to one hot vectors Y_pred_classes = np.argmax(Y_pred,axis = 1) # Convert validation observations to one hot vectorsY_true = np.argmax(Y_val,axis = 1) # compute the confusion matrixconfusion_mtx = confusion_matrix(Y_true, Y_pred_classes) # plot the confusion matrixplot_confusion_matrix(confusion_mtx, classes = range(10))

# 显示一些错误结果,及预测标签和真实标签之间的不同errors = (Y_pred_classes - Y_true != 0)Y_pred_classes_errors = Y_pred_classes[errors]Y_pred_errors = Y_pred[errors]Y_true_errors = Y_true[errors]X_val_errors = X_val[errors]def display_errors(errors_index,img_errors,pred_errors, obs_errors):""" This function shows 6 images with their predicted and real labels"""n = 0nrows = 2ncols = 3fig, ax = plt.subplots(nrows,ncols,sharex=True,sharey=True)for row in range(nrows):for col in range(ncols):error = errors_index[n]ax[row,col].imshow((img_errors[error]).reshape((28,28)))ax[row,col].set_title("Predicted label :{}\nTrue label :{}".format(pred_errors[error],obs_errors[error]))n += 1# Probabilities of the wrong predicted numbersY_pred_errors_prob = np.max(Y_pred_errors,axis = 1)# Predicted probabilities of the true values in the error settrue_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))# Difference between the probability of the predicted label and the true labeldelta_pred_true_errors = Y_pred_errors_prob - true_prob_errors# Sorted list of the delta prob errorssorted_dela_errors = np.argsort(delta_pred_true_errors)# Top 6 errors most_important_errors = sorted_dela_errors[-6:]# Show the top 6 errorsdisplay_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)

# 对测试集做预测results = model.predict(test)# 把one-hot vector转换为数字results = np.argmax(results,axis = 1)results = pd.Series(results,name="Label")

# 保存最终的结果submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)submission.to_csv("cnn_mnist_submission.csv",index=False)

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。