# Import constant from TensorFlow
from tensorflow import constant
# Convert the credit_numpy array into a tensorflow constant
credit_constant = constant(credit_numpy)
# Print constant datatype
print('\n The datatype is:', credit_constant.dtype)
# Print constant shape
print('\n The shape is:', credit_constant.shape)
# Define the 1-dimensional variable A1
A1 = Variable([1, 2, 3, 4])
# Print the variable A1
print('\n A1: ', A1)
# Convert A1 to a numpy array and assign it to B1
B1 = A1.numpy()
# Print B1
print('\n B1: ', B1)
# Define tensors A1 and A23 as constants
A1 = constant([1, 2, 3, 4])
A23 = constant([[1, 2, 3], [1, 6, 4]])
# Define B1 and B23 to have the correct shape
B1 = ones_like(1,)
B23 = ones_like(2,3)
# Perform element-wise multiplication
C1 = multiply(A1, B1)
C23 = multiply(A23,B23)
# Print the tensors C1 and C23
print('\n C1: {}'.format(C1.numpy()))
print('\n C23: {}'.format(C23.numpy()))
# Define features, params, and bill as constants
features = constant([[2, 24], [2, 26], [2, 57], [1, 37]])
params = constant([[1000], [150]])
bill = constant([[3913], [2682], [8617], [64400]])
# Compute billpred using features and params
billpred = matmul(features, params)
# Compute and print the error
error = bill - billpred
print(error.numpy())
In [2]:
print(wealth)
tf.Tensor(
[[11 50]
[ 7 2]
[ 4 60]
[ 3 0]
[25 10]], shape=(5, 2), dtype=int32
In [4]:
reduce_sum(wealth, 0)
Out[4]:
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([ 50, 122], dtype=int32)>
In [5]:
Out[5]:<tf.Tensor: shape=(5,), dtype=int32, numpy=array([61, 9, 64, 3, 35], dtype=int32)reduce_sum(wealth, 1)
# Reshape the grayscale image tensor into a vector
gray_vector = reshape(gray_tensor, (784, 1))
# Reshape the color image tensor into a vector
color_vector = reshape(color_tensor, (2352, 1))
Test gradient for y = x^2
def compute_gradient(x0):
# Define x as a variable with an initial value of x0
x = Variable(x0)
with GradientTape() as tape:
tape.watch(x)
# Define y using the multiply operation
y = multiply(x,x)
# Return the gradient of y with respect to x
return tape.gradient(y, x).numpy()
# Compute and print gradients at x = -1, 1, and 0
print(compute_gradient(-1.0))
print(compute_gradient(1.0))
print(compute_gradient(0.0))
letter (3X3), model (1X3)
# Reshape model from a 1x3 to a 3x1 tensor
model = reshape(model, (3, 1))
# Multiply letter by model
output = matmul(letter, model)
# Sum over output and print prediction using the numpy method
prediction = reduce_sum(output)
print(prediction.numpy())
# Import numpy and tensorflow with their standard aliases
import numpy as np
import tensorflow as tf
# Use a numpy array to define price as a 32-bit float
price = np.array(housing['price'], np.float32)
# Define waterfront as a Boolean using cast
waterfront = tf.cast(housing['waterfront'], tf.bool)
# Print price and waterfront
print(price)
print(waterfront)
# Import the keras module from tensorflow
from tensorflow import keras
# Compute the mean absolute error (mae)
loss = keras.losses.mse(price, predictions)
# Compute the mean absolute error (mae)
loss = keras.losses.mae(price, predictions)
# Print the mean absolute error (mae)
print(loss.numpy())
# Initialize a variable named scalar
scalar = Variable(1.0, float32)
# Define the model
def model(scalar, features = features):
return scalar * features
# Define a loss function
def loss_function(scalar, features = features, targets = targets):
# Compute the predicted values
predictions = model(scalar, features)
# Return the mean absolute error loss
return keras.losses.mae(targets, predictions)
# Evaluate the loss function and print the loss
print(loss_function(scalar).numpy())
# Define a linear regression model
def linear_regression(intercept, slope, features = size_log):
return intercept + slope * features
# Set loss_function() to take the variables as arguments
def loss_function(intercept, slope, features = size_log, targets = price_log):
# Set the predicted values
predictions = linear_regression(intercept, slope, features)
# Return the mean squared error loss
return keras.losses.mse(targets, predictions)
# Compute the loss for different slope and intercept values
print(loss_function(0.1, 0.1).numpy())
print(loss_function(0.1, 0.5).numpy())
# Initialize an Adam optimizer
opt = keras.optimizers.Adam(0.5)
for j in range(100):
# Apply minimize, pass the loss function, and supply the variables
opt.minimize(lambda: loss_function(intercept, slope), var_list=[intercept, slope])
# Print every 10th value of the loss
if j % 10 == 0:
print(loss_function(intercept, slope).numpy())
# Plot data and regression line
plot_results(intercept, slope)
# Define the linear regression model
def linear_regression(params, feature1 = size_log, feature2 = bedrooms):
return params[0] + feature1*params[1] + feature2*params[2]
# Define the loss function
def loss_function(params, targets = price_log, feature1 = size_log, feature2 = bedrooms):
# Set the predicted values
predictions = linear_regression(params, feature1, feature2)
# Use the mean absolute error loss
return keras.losses.mae(targets, predictions)
# Define the optimize operation
opt = keras.optimizers.Adam()
# Perform minimization and print trainable variables
for j in range(10):
opt.minimize(lambda: loss_function(params), var_list=[params])
print_results(params)
# Define the intercept and slope
intercept = Variable(10, float32)
slope = Variable(0.5, float32)
# Define the model
def linear_regression(intercept, slope, features):
# Define the predicted values
return intercept + slope * features
# Define the loss function
def loss_function (intercept, slope, targets, features):
# Define the predicted values
predictions = linear_regression(intercept, slope, features)
# Define the MSE loss
return keras.losses.mse(targets, predictions)
# Initialize Adam optimizer
opt = keras.optimizers.Adam()
# Load data in batches
for batch in pd.read_csv('kc_house_data.csv', chunksize=100):
size_batch = np.array(batch['sqft_lot'], np.float32)
# Extract the price values for the current batch
price_batch = np.array(batch['price'], np.float32)
# Complete the loss, fill in the variable list, and minimize
opt.minimize(lambda: loss_function(intercept, slope, price_batch, size_batch), var_list=[intercept, slope])
# Print trained parameters
print(intercept.numpy(), slope.numpy())
# From previous step
bias1 = Variable(1.0)
weights1 = Variable(ones((3, 2)))
product1 = matmul(borrower_features, weights1)
dense1 = keras.activations.sigmoid(product1 + bias1)
# Initialize bias2 and weights2
bias2 = Variable(1.0)
weights2 = Variable(ones((2, 1)))
# Perform matrix multiplication of dense1 and weights2
product2 = matmul(dense1, weights2)
# Apply activation to product2 + bias2 and print the prediction
prediction = keras.activations.sigmoid(product2 + bias2)
print('\n prediction: {}'.format(prediction.numpy()[0,0]))
print('\n actual: 1')
# Compute the product of borrower_features and weights1
products1 = matmul(borrower_features, weights1)
# Apply a sigmoid activation function to products1 + bias1
dense1 = keras.activations.sigmoid(products1+bias1)
# Print the shapes of borrower_features, weights1, bias1, and dense1
print('\n shape of borrower_features: ', borrower_features.shape)
print('\n shape of weights1: ', weights1.shape)
print('\n shape of bias1: ', bias1.shape)
print('\n shape of dense1: ', dense1.shape)
# Define the first dense layer
dense1 = keras.layers.Dense(7, activation='sigmoid')(borrower_features)
# Define a dense layer with 3 output nodes
dense2 = keras.layers.Dense(3, activation='sigmoid')(dense1)
# Define a dense layer with 1 output node
predictions = keras.layers.Dense(1, activation='sigmoid')(dense2)
# Print the shapes of dense1, dense2, and predictions
print('\n shape of dense1: ', dense1.shape)
print('\n shape of dense2: ', dense2.shape)
print('\n shape of predictions: ', predictions.shape)
# Construct input layer from features
inputs = constant(bill_amounts, float32)
# Define first dense layer
dense1 = keras.layers.Dense(3, activation='relu')(inputs)
# Define second dense layer
dense2 = keras.layers.Dense(2, activation='relu')(dense1)
# Define output layer
outputs = keras.layers.Dense(1, activation='sigmoid')(dense2)
# Print error for first five examples
error = default[:5] - outputs.numpy()[:5]
print(error)
Multiclassification:
# Construct input layer from borrower features
inputs = constant(borrower_features, float32)
# Define first dense layer
dense1 = keras.layers.Dense(10, activation='sigmoid')(inputs)
# Define second dense layer
dense2 = keras.layers.Dense(8, activation='relu')(dense1)
# Define output layer
outputs = keras.layers.Dense(6, activation='softmax')(dense2)
# Print first five predictions
print(outputs.numpy()[:5])
# Initialize x_1 and x_2
x_1 = Variable(6.0,float32)
x_2 = Variable(0.3,float32)
# Define the optimization operation
opt = keras.optimizers.SGD(learning_rate=0.01)
for j in range(100):
# Perform minimization using the loss function and x_1
opt.minimize(lambda: loss_function(x_1), var_list=[x_1])
# Perform minimization using the loss function and x_2
opt.minimize(lambda: loss_function(x_2), var_list=[x_2])
# Print x_1 and x_2 as numpy arrays
print(x_1.numpy(), x_2.numpy())
# Initialize x_1 and x_2
x_1 = Variable(0.05,float32)
x_2 = Variable(0.05,float32)
# Define the optimization operation for opt_1 and opt_2
opt_1 = keras.optimizers.RMSprop(learning_rate=0.01, momentum=0.99)
opt_2 = keras.optimizers.RMSprop(learning_rate=0.01, momentum=0.00)
for j in range(100):
opt_1.minimize(lambda: loss_function(x_1), var_list=[x_1])
# Define the minimization operation for opt_2
opt_2.minimize(lambda: loss_function(x_2), var_list=[x_2])
# Print x_1 and x_2 as numpy arrays
print(x_1.numpy(), x_2.numpy())
# Define the layer 1 weights
w1 = Variable(random.normal([23, 7]))
# Initialize the layer 1 bias
b1 = Variable(ones([7]))
# Define the layer 2 weights
w2 = Variable(random.normal([7,1]))
# Define the layer 2 bias
b2 = Variable(0.0)
# Define the model
def model(w1, b1, w2, b2, features = borrower_features):
# Apply relu activation functions to layer 1
layer1 = keras.activations.relu(matmul(features, w1) + b1)
# Apply dropout rate of 0.25
dropout = keras.layers.Dropout(0.25)(layer1)
return keras.activations.sigmoid(matmul(dropout, w2) + b2)
# Define the loss function
def loss_function(w1, b1, w2, b2, features = borrower_features, targets = default):
predictions = model(w1, b1, w2, b2)
# Pass targets and predictions to the cross entropy loss
return keras.losses.binary_crossentropy(targets, predictions)
# Train the model
for j in range(100):
# Complete the optimizer
opt.minimize(lambda: loss_function(w1, b1, w2, b2),
var_list=[w1, b1, w2, b2])
# Make predictions with model using test features
model_predictions = model(w1, b1, w2, b2, test_features)
# Construct the confusion matrix
confusion_matrix(test_targets, model_predictions)
# Define a Keras sequential model
model = keras.Sequential()
# Define the first dense layer
model.add(keras.layers.Dense(16, activation='relu', input_shape=(784,)))
# Define the second dense layer
model.add(keras.layers.Dense(8, activation='relu', input_shape=(16,)))
# Define the output layer
model.add(keras.layers.Dense(8, activation='softmax', input_shape=(4,)))
# Print the model architecture
print(model.summary())
# Define a Keras sequential model
model = keras.Sequential()
# Define the first dense layer
model.add(keras.layers.Dense(16, activation='relu', input_shape=(784,)))
# Define the second dense layer
model.add(keras.layers.Dense(8, activation='relu', input_shape=(16,)))
# Define the output layer
model.add(keras.layers.Dense(4, activation='softmax', input_shape=(4,)))
# Print the model architecture
print(model.summary())
# Define the first dense layer
model.add(keras.layers.Dense(16, activation = 'sigmoid', input_shape= (784, )))
# Apply dropout to the first layer's output
model.add(keras.layers.Dropout(0.25))
# Define the output layer
model.add(keras.layers.Dense(4, activation = 'softmax', input_shape = (8, )))
# Compile the model
model.compile('adam', loss='categorical_crossentropy')
# Print a model summary
print(model.summary())
# For model 1, pass the input layer to layer 1 and layer 1 to layer 2
m1_layer1 = keras.layers.Dense(12, activation='sigmoid')(m1_inputs)
m1_layer2 = keras.layers.Dense(4, activation='softmax')(m1_layer1)
# For model 2, pass the input layer to layer 1 and layer 1 to layer 2
m2_layer1 = keras.layers.Dense(12, activation='relu')(m2_inputs)
m2_layer2 = keras.layers.Dense(4, activation='softmax')(m2_layer1)
# Merge model outputs and define a functional model
merged = keras.layers.add([m1_layer2, m2_layer2])
model = keras.Model(inputs=[m1_inputs, m2_inputs], outputs=merged)
# Print a model summary
print(model.summary())
# Define a sequential model
model = keras.Sequential()
# Define a hidden layer
model.add(keras.layers.Dense(16, activation='relu', input_shape=(784,)))
# Define the output layer
model.add(keras.layers.Dense(4, activation='softmax', input_shape=(16,)))
# Compile the model
model.compile('SGD', loss='categorical_crossentropy')
# Complete the fitting operation
model.fit(sign_language_features, sign_language_labels, epochs= 5)
# Define sequential model
model = keras.Sequential()
# Define the first layer
model.add(keras.layers.Dense(32, activation = 'sigmoid', input_shape = (784, )))
# Add activation function to classifier
model.add(keras.layers.Dense(4, activation='softmax'))
# Set the optimizer, loss function, and metrics
model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Add the number of epochs and the validation split
model.fit(sign_language_features, sign_language_labels, epochs=10, validation_split=0.1)
Overfitting detection
# Finish the model compilation
model.compile(optimizer=keras.optimizers.Adam(lr=0.001),
loss='categorical_crossentropy', metrics=['accuracy'])
# Complete the model fit operation
model.fit(sign_language_features, sign_language_labels, epochs=50, validation_split=0.5)
Evaluate model
# Evaluate the small model using the train data
small_train = small_model.evaluate(train_features, train_labels)
# Evaluate the small model using the test data
small_test = small_model.evaluate(test_features, test_labels)
# Evaluate the large model using the train data
large_train = large_model.evaluate(train_features, train_labels)
# Evaluate the large model using the test data
large_test = large_model.evaluate(test_features, test_labels)
# Print losses
print('\n Small - Train: {}, Test: {}'.format(small_train, small_test))
print('Large - Train: {}, Test: {}'.format(large_train, large_test)) Use estimators
Use estimators
# Define feature columns for bedrooms and bathrooms
bedrooms = feature_column.numeric_column("bedrooms")
bathrooms = feature_column.numeric_column("bathrooms")
# Define the list of feature columns
feature_list = [bedrooms, bathrooms]
def input_fn():
# Define the labels
labels = np.array(housing['price'])
# Define the features
features = {'bedrooms':np.array(housing['bedrooms']),
'bathrooms':np.array(housing['bathrooms'])}
return features, labels
# Define the model and set the number of steps
model = estimator.DNNRegressor(feature_columns=feature_list, hidden_units=[2,2])
model.train(input_fn, steps=1)
# Define the model and set the number of steps
model = estimator.LinearRegressor(feature_columns=feature_list, )
model.train(input_fn, steps=2)