# # An example of the minimzer usage in tensor flow # the loss function is plotted and the result in terms of a line # with the minimized numbers # import numpy as np import matplotlib.pyplot as plt import tensorflow as tf # Define the training data train_X = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) train_Y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) # the input to the model is represented by the train_X # y_train represents the target or the truth values for the training data # The model will recieve train_X and make predictions on the weights # The difference between these predictions and the actual target values # train_Y will be used to update the weights and minimize the loss function. # Define the model to a simple linear regression with only one dense layer and # no activation function for the first layer all train_X points are input # model = tf.keras.models.Sequential([ # tf.keras.layers.Dense(1, input_shape=[1]) #]) # This model has 2 dense layers the first with relu activation # and the 2nd layer has 1 output unit and uses the default # linear activation function. # Compile the model model = tf.keras.models.Sequential([ tf.keras.layers.Dense(17, activation='relu',input_shape=[1]), # tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(1) ]) #model.compile(optimizer=tf.keras.optimizers.Adam(0.01), loss='mean_squared_error') #model.compile(optimizer=tf.keras.optimizers.SGD(0.01), loss='mean_squared_error') #model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.01), loss='mean_squared_error') #model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.01), loss='mean_squared_error') model.compile(optimizer=tf.keras.optimizers.Ftrl(learning_rate=0.015), loss='mean_squared_error') # Train the model and access training parameters history = model.fit(train_X, train_Y, epochs=60) print(history.params) # Get the weights of the Dense layer weights = model.layers[0].get_weights() # Print the weight matrix and bias vector print('Weight matrix shape:', weights[0].shape) print('Bias vector shape:', weights[1].shape) print (weights[0]) # Plot the loss function plt.plot(history.history['loss']) plt.title("Loss Function") plt.xlabel("Epoch") plt.ylabel("Loss") plt.show() # Plot the input data and the predicted values plt.plot(train_X, train_Y, 'ro', label="Original Data") plt.plot(train_X, model.predict(train_X), label="Predicted") plt.legend() plt.show()