code demo

this is a code demo!

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets

# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
# x: Petal Width, y: Sepal Length
iris = datasets.load_iris()
x_train = np.array([x[3] for x in iris.data])
y_train = np.array([y[0] for y in iris.data])


x = tf.placeholder(shape=[None, 1], dtype=np.float32)
y = tf.placeholder(shape=[None, 1], dtype=np.float32)
W = tf.Variable(tf.random_normal(shape=[1, 1]))
b = tf.Variable(tf.zeros(shape=[1,1]))
y_ = tf.add(tf.matmul(x, W), b)
loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
# optimizer = tf.train.AdamOptimizer(learning_rate= 0.05).minimize(loss)

loss_vec = []
batch_size = 25
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(100):
        index = np.random.choice(len(x_train), size=batch_size)
        x_batch = np.transpose([x_train[index]])
        y_batch = np.transpose([y_train[index]])
        sess.run(optimizer, feed_dict={x:x_batch, y:y_batch})
        loss_batch = sess.run(loss, feed_dict={x:x_batch, y:y_batch})
        loss_vec.append(loss_batch)
        if (i+1) % 20 == 0:
            print("Step: "+str(i+1)+", W: "+str(sess.run(W))+", b: "+str(sess.run(b)))
            print("Loss: "+str(loss_batch))

# Plot loss over time
# 迭代100次的L2正则损失函数
plt.plot(loss_vec, 'k-')
plt.title('L2 Loss per Generation')
plt.xlabel('Generation')
plt.ylabel('L2 Loss')
plt.show()

  1. import tensorflow as tf
  2. import numpy as np
  3. import matplotlib.pyplot as plt
  4. from sklearn import datasets
  5.  
  6. # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
  7. # x: Petal Width, y: Sepal Length
  8. iris = datasets.load_iris()
  9. x_train = np.array([x[3] for x in iris.data])
  10. y_train = np.array([y[0] for y in iris.data])
  11.  
  12.  
  13. x = tf.placeholder(shape=[None, 1], dtype=np.float32)
  14. y = tf.placeholder(shape=[None, 1], dtype=np.float32)
  15. W = tf.Variable(tf.random_normal(shape=[1, 1]))
  16. b = tf.Variable(tf.zeros(shape=[1,1]))
  17. y_ = tf.add(tf.matmul(x, W), b)
  18. loss = tf.reduce_mean(tf.square(y - y_))
  19. optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
  20. # optimizer = tf.train.AdamOptimizer(learning_rate= 0.05).minimize(loss)
  21.  
  22. loss_vec = []
  23. batch_size = 25
  24. with tf.Session() as sess:
  25.     sess.run(tf.global_variables_initializer())
  26.     for i in range(100):
  27.         index = np.random.choice(len(x_train), size=batch_size)
  28.         x_batch = np.transpose([x_train[index]])
  29.         y_batch = np.transpose([y_train[index]])
  30.         sess.run(optimizer, feed_dict={x:x_batch, y:y_batch})
  31.         loss_batch = sess.run(loss, feed_dict={x:x_batch, y:y_batch})
  32.         loss_vec.append(loss_batch)
  33.         if (i+1) % 20 == 0:
  34.             print("Step: "+str(i+1)+", W: "+str(sess.run(W))+", b: "+str(sess.run(b)))
  35.             print("Loss: "+str(loss_batch))
  36.  
  37. # Plot loss over time
  38. # 迭代100次的L2正则损失函数
  39. plt.plot(loss_vec, 'k-')
  40. plt.title('L2 Loss per Generation')
  41. plt.xlabel('Generation')
  42. plt.ylabel('L2 Loss')
  43. plt.show()

GAN – github