from sklearn.datasets import load_boston
import pandas as pd
import matplotlib.pyplot as plt
boston = load_boston()
data = pd.DataFrame(boston.data)
data['target'] = boston.target
plt.plot(data[5], data['target'], '.')
plt.show()
def reg(x, y, alpha, step, theta0, theta1):
m = len(x)
assert m == len(y)
for i in range(step):
temp = theta0 + theta1 * x - y
theta0 = theta0 - alpha / m * sum(temp)
theta1 = theta1 - alpha / m * sum(temp * x)
return theta0, theta1
def plot_data(x, y, theta0, theta1):
plt.plot(x, y, '.')
plt.plot(x, theta0 + theta1 * x)
plt.show()
theta0 = 0
theta1 = 0
theta0, theta1 = reg(data[5], data['target'], 0.02, 20000, theta0, theta1)
print theta0, theta1
plot_data(data[5], data['target'], theta0, theta1)
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(data[5].values.reshape(-1, 1), data['target'].values.reshape(-1, 1))
y = model.predict(data[5].values.reshape(-1, 1))
plt.plot(data[5], data['target'], '.', label='data')
plt.plot(data[5], y, label='sklearn')
plt.plot(data[5], theta0 + data[5] * theta1, label='myself')
plt.legend()
plt.show()
model.score(data[5].values.reshape(-1, 1), data['target'].values.reshape(-1, 1))
print model.intercept_, model.coef_
import tensorflow as tf
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
w = tf.Variable(0.0, name='weights')
b = tf.Variable(0.0, name='biases')
y = X * w + b
cost = tf.square(Y - y)
optim = tf.train.GradientDescentOptimizer(0.02).minimize(cost)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for j in range(200):
for i in range(100):
sess.run(optim, feed_dict={X: data[5].values[i], Y: data['target'].values[i]})
print sess.run(w)
print sess.run(b)
learning_rate = 0.02
training_epochs = 200
display_step = 50
train_X = data[5].values
train_y = data['target'].values
n_samples = train_X.shape[0]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(np.random.randn())
b = tf.Variable(np.random.randn())
activation = W * X + b
cost = tf.square(Y - activation)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
for i in range(100):
sess.run(optimizer, feed_dict={X: train_X[i], Y: train_y[i]})
if epoch % display_step == 0:
print('Epoch:%d' % (epoch + 1), 'cost:',
'W:', sess.run(W), 'b:', sess.run(b))
print("Optimization Finished!")
print('Epoch:%d' % (epoch + 1),
'W:', sess.run(W), 'b:', sess.run(b))
plt.scatter(train_X, train_y, c='r', marker='o')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b))
plt.show()