import tensorflow as tf import matplotlib.pylab as plt import numpy as np # 调用数据 boston_house = tf.keras.datasets.boston_housing (train_x, train_y), (test_x, test_y) = boston_house.load_data(test_split=0.1) print(train_x.shape,train_y.shape) # 样本数据归一化 # 测试样本 X = (train_x - train_x.min(axis = 0)) / (train_x.max(axis = 0) - train_x.min(axis = 0)) # 训练样本 t_X = (test_x - test_x.min(axis = 0)) / (test_x.max(axis = 0) - test_x.min(axis = 0)) print(\'X:\',X,X.shape) Y = train_y.reshape(-1,1) # 将房价y转化为列向量 # 测试集 t_Y = test_y.reshape(-1,1) print(\'Y.shape:\',Y.shape) # 超参数 iter =20000 # 迭代次数 learn_rate = 0.01 # 学习率 # 设置模型参数初始值 w = np.random.randn(13,1) W = tf.Variable(w) loss_list = [] test_loss_list = [] # print(\'W:{},W.shape:{}\'.format(W,W.shape)) for i in range(iter): with tf.GradientTape() as tape: pre_price = tf.matmul(X,W) pre_test = tf.matmul(t_X, W) loss = 0.5 * (tf.reduce_mean(pow(Y - pre_price,2))) test_loss = 0.5 * (tf.reduce_mean(pow(t_Y - pre_test,2))) loss_list.append(loss) test_loss_list.append(test_loss) dloss_dw = tape.gradient(loss,W) # 偏导也是一个2维向量 # print(\'dloss_dw:\',dloss_dw) W.assign_sub(learn_rate * dloss_dw) if i % 1000 == 0: print(\'训练集:i:{},loss:{},W:{}\'.format(i,loss,W)) print(\'测试集:i:{},test_loss:{},W:{}\'.format(i, test_loss, W)) pre_test = tf.matmul(t_X,W) test_loss = 0.5 * (tf.reduce_mean(pow(t_Y - pre_test,2))) print(\'测试集loss为:\',test_loss) # 画图 plt.rcParams["font.family"] = \'SimHei\' # 将字体改为中文 plt.rcParams[\'axes.unicode_minus\'] = False # 设置了中文字体默认后,坐标的"-"号无法显示,设置这个参数就可以避免 plt.subplot(221) plt.plot(loss_list,label = \'训练集损失值\') plt.legend() plt.subplot(223) plt.plot(test_loss_list,label = \'测试集损失值\') plt.legend() plt.show()