# add a dropout layer with a rate of 0.5 after the first dense layer to help # prevent overfitting. The dropout layer randomly drops out half of the # activations in the layer, which can help to prevent the model from # memorizing the training data. # plot the loss and accuracy curves for both training and validation data, # import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt # Load the fashion MNIST dataset fashion_mnist = keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() # Scale pixel values to [0, 1] x_train = x_train / 255.0 x_test = x_test / 255.0 # Define the Sequential model with dropout regularization model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation='relu'), keras.layers.Dropout(0.5), keras.layers.Dense(10, activation='softmax') ]) # Compile the model with categorical cross-entropy loss and accuracy metric model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Train the model with 10 epochs and batch size of 32 history = model.fit(x_train, y_train, epochs=25, batch_size=32, validation_data=(x_test, y_test)) # Plot the loss and accuracy curves for training and validation data plt.figure(figsize=(10, 5)) plt.subplot(1, 2, 1) plt.plot(history.history['loss'], label='Training Loss') plt.plot(history.history['val_loss'], label='Validation Loss') plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend() plt.subplot(1, 2, 2) plt.plot(history.history['accuracy'], label='Training Accuracy') plt.plot(history.history['val_accuracy'], label='Validation Accuracy') plt.title('Model Accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend() plt.show() # Evaluate the model on the test dataset and print the test accuracy test_loss, test_acc = model.evaluate(x_test, y_test, verbose=2) print('\nTest accuracy:', test_acc)