We will start with a simple example to introduce Deep Learning
Saddle Function => $$ Z = 2*X*X - 3*Y*Y + 5 + e$$
import numpy as np
import pandas as pd
import keras
import matplotlib.pyplot as plt # Visualisation Library
%matplotlib inline
x = np.arange(-1, 1, 0.01)
y = np.arange(-1, 1, 0.01)
X,Y = np.meshgrid(x,y)
c = np.ones([200, 200])
e = np.random.rand(200, 200)*0.1
Z = 2 *X*X - 3*Y*Y + 5*c + e
import sys
sys.path.append("../")
from mpl_toolkits.mplot3d import Axes3D
def plot3d(X,Y,Z):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, color='y')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
plot3d(X, Y, Z)
from keras.models import Sequential
from keras.layers import Dense
input_xy = np.c_[X.reshape(-1), Y.reshape(-1)] ### X input in ML
output_z = Z.reshape(-1) ### y output in ML
input_xy.shape, output_z.shape
model = Sequential()
model.add(Dense(4, input_dim=2, activation="linear"))
model.add(Dense(2, input_dim=2, activation="linear"))
model.add(Dense(1))
model.summary()
from keras.utils import plot_model
plot_model(model, show_layer_names=True, show_shapes=True)
model.compile(loss="mean_squared_error", optimizer="sgd", metrics=["mse"])
%%time
output = model.fit(input_xy, output_z, epochs=10, validation_split=0.2, shuffle=True, verbose=1)
output_df = pd.DataFrame(output.history)
output_df.head()
output_df.plot.line(y=["val_loss", "loss"]);
Z_pred = model.predict(input_xy).reshape(200,200)
plot3d(X,Y, Z_pred)
Dense?