1.导入数据包:cifar-10-batches-py

import tensorflow as tf
import os
import pickle as pk
import numpy as np

CIFAR_DIR = "./cifar-10-batches-py/"
print (os.listdir(CIFAR_DIR)) # listdir--返回指定的文件夹包含的文件或文件夹的名字的列表。

2.读取数据并处理

def load_data(filename):
    '''从数据文件中读取数据'''
    with open(filename, 'rb') as f:
        data = pk.load(f , encoding='bytes')
        return data[b'data'],data[b'labels']
    
class CifarData:
    '''数据处理'''
    def __init__(self, filenames, need_shuffle):
        # need_shuffle 打乱数据集, 是数据之间没有相互依赖关系
        
        all_data = []
        all_labels = []
        
        for filename in filenames:
            data , labels = load_data(filename)
            all_data.append(data)
            all_labels.append(labels)
                    
        self._data = np.vstack(all_data) # 纵向合并为矩阵
        self._data = self._data / 127.5 - 1 # 相当于归一化 ,因为像素是255
        self._labels = np.hstack(all_labels)
        print(self._data.shape)
        print(self._labels.shape)
        
        self._num_examples = self._data.shape[0]
        self._need_shuffle = need_shuffle
        self._indicator = 0 # 当前数据遍历到哪个位置
        if self._need_shuffle:
            self._shuffle_data()
    
    def _shuffle_data(self):
        '''打乱数据'''
        p = np.random.permutation(self._num_examples) # 做混排,eg:[0,1,2,3,4,5] --> [5,3,2,4,0,1]
        self._data = self._data[p]
        self._labels = self._labels[p]
        
    def next_batch(self, batch_size):
        '''返回batch_size个样本'''
        end_indicator = self._indicator + batch_size
        if end_indicator > self._num_examples:
            if self._need_shuffle:
                self._shuffle_data()
                self._indicator = 0
                end_indicator = batch_size
            else:
                raise Exception('have no more examples')
        if end_indicator > self._num_examples:
            raise Exception('batch size in larger than all examples')
        batch_data = self._data[self._indicator:end_indicator]
        batch_labels = self._labels[self._indicator: end_indicator]
        self._indicator = end_indicator
        return batch_data, batch_labels
    
train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1,6)]
test_data_filenames = [os.path.join(CIFAR_DIR,'test_batch')]

train_data = CifarData(train_filenames, True)
test_data = CifarData(test_data_filenames,False)

# batch_data , batch_labels = train_data.next_batch(10)
# print(batch_data)
# print(batch_labels)

3. 搭建计算图

x = tf.placeholder(tf.float32, [None, 3072]) # placeholder--占位符 , None--输入样本数目是不确定的

x_image = tf.reshape(x , [-1,3,32,32]) # 将一维向量展开为三通道的图片格式
x_image = tf.transpose(x_image,perm=[0,2,3,1]) # 交换通道


# y --shape [None] ,真实值 eg: [0,5,6,3]
y = tf.placeholder(tf.int64, [None])

# conv1 : 神经元图、feature_map 、 输出图像
# 第一个卷积层
conv1_1 = tf.layers.conv2d(x_image ,
                         32 ,
                         (3,3) , 
                         padding = 'same',
                         name = 'conv1_1') 
conv1_2 = tf.layers.conv2d(conv1_1 ,
                         32 ,
                         (3,3) , 
                         padding = 'same',
                         name = 'conv1_2') 
# 池化层 输出:16 * 16
pooling1 = tf.layers.max_pooling2d(conv1_2,
                                  (2,2), 
                                  (2,2),
                                   name = 'pool1')
# 卷积层
conv2_1 = tf.layers.conv2d(pooling1 ,
                         32 ,
                         (3,3) , 
                         padding = 'same',
                         name = 'conv2_1') 
conv2_2 = tf.layers.conv2d(conv2_1 ,
                         32 ,
                         (3,3) , 
                         padding = 'same',
                         name = 'conv2_2') 
# 池化层  输出:8 * 8
pooling2 = tf.layers.max_pooling2d(conv2_2,
                                  (2,2),
                                  (2,2), 
                                   name = 'pool2')
# 卷积层
conv3_1 = tf.layers.conv2d(pooling2 ,
                         32 ,
                         (3,3) , 
                         padding = 'same',
                         name = 'conv3_1') 
conv3_2 = tf.layers.conv2d(conv3_1 ,
                         32 ,
                         (3,3) , 
                         padding = 'same',
                         name = 'conv3_2') 
# 池化层 输出:4 * 4 *32
pooling3 = tf.layers.max_pooling2d(conv3_2,
                                  (2,2), 
                                  (2,2), 
                                   name = 'pool3'
                            )
# 展平 --二维[None , 4 * 4 * 32]
flatten = tf.layers.flatten(pooling3)

# 连接全连接层 , 映射到10个类别
y_ = tf.layers.dense(flatten , 10)


# 交叉熵损失函数
# 1. y_ --> softmax
# 2. y --> one_hot
# 3. loss --> ylogy_
loss = tf.losses.sparse_softmax_cross_entropy(labels = y , logits=y_)


'''
# p_y_1 -- shape [None , 1]
p_y_1 = tf.nn.sigmoid(y_) # 转化为概率值

# y --shape [None , 10]
y_reshape = tf.reshape(y , (-1,1))
y_reshape_float = tf.cast(y_reshape, tf.float32)

loss = tf.reduce_mean(tf.square(y_reshape_float - p_y_1))
'''

# 得到index
predict = tf.argmax(y_ , 1)

correct_prediction = tf.equal(predict , y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction , tf.float64))

# 定义梯度下降的方法
with tf.name_scope('train_op'):
    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

4.定义面板上显示的变量

def variable_summary(var , name):
    '''功能性函数:给一个变量的很多统计量建立summary
        var:计算summary的变量
        name:指定一个命名空间,防止命名冲突
    '''
    with tf.name_scope(name):
        mean = tf.reduce_mean(var)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('mean', mean)
        tf.summary.scalar('stddev', stddev)
        tf.summary.scalar('min', tf.reduce_min(var))
        tf.summary.scalar('max', tf.reduce_max(var))
        tf.summary.histogram('histogram',var)
        # histogram :打印直方图,反应变量分布
        
with tf.name_scope('summary'):
    '''将variable_summary应用到各个节点中去'''
    variable_summary(conv1_1,'conv1_1')
    variable_summary(conv1_2,'conv1_2')
    variable_summary(conv2_1,'conv2_1')
    variable_summary(conv2_2,'conv2_2')
    variable_summary(conv3_1,'conv3_1')
    variable_summary(conv3_2,'conv3_2')

loss_summary = tf.summary.scalar('loss', loss)
# tf.summary.scalar : 将节点中间过程的值聚合起来的标识符
# eg : 'loss' : <10 , 1.1>  , <20 , 1.08> ,... ,  <步数 , 值>

accuracy_summary = tf.summary.scalar('accuracy', accuracy)

source_image = (x_image + 1 ) * 127.5
inputs_summary = tf.summary.image('inputs_image', source_image)
# tf.summary.image :默认是一张图,图的值是 0 - 255 之间,是个像素值

merged_summary = tf.summary.merge_all()
merged_summary_test = tf.summary.merge([loss_summary,accuracy_summary])
# merge_all : 将之前的调用tf.summary的合并在一起

LOG_DIR = '.'
# 指定文件夹 
run_label = 'run_vgg_tensorboard'
# 创建子文件夹
run_dir = os.path.join(LOG_DIR , run_label)
if not os.path.exists(run_dir):
    os.mkdir(run_dir)
train_log_dir = os.path.join(run_dir,'train')
test_log_dir = os.path.join(run_dir,'test')
if not os.path.join(train_log_dir):
    os.mkdir(train_log_dir)
if not os.path.join(test_log_dir):
    os.mkdir(test_log_dir)

5.训练过程中将这些变量计算出来,输出到文件中

init = tf.global_variables_initializer() # 执行初始化
batch_size = 20
train_steps = 10000
test_steps = 100

output_summary_every_steps = 100
# 每100次计算一下summary

#回话,执行
with tf.Session() as sess:
    sess.run(init)
    train_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    # sess.graph:指定计算图
    # FileWriter :打开文件,向文件中写数据
    test_writer = tf.summary.FileWriter(test_log_dir)
    
    fixed_test_batch_data, fixed_test_batch_labels = test_data.next_batch(batch_size)
    
    for i in range(train_steps):
        batch_data , batch_labels = train_data.next_batch(batch_size)
        eval_ops = [loss , accuracy , train_op]
        should_output_summary = ((i+1) % output_summary_every_steps == 0)
        
        if should_output_summary:
            eval_ops.append(merged_summary)
            
        eval_ops_results = sess.run(
            eval_ops,
            feed_dict = {
                x : batch_data,
                y : batch_labels}) 
        loss_val , accu_val = eval_ops_results[0:2]
        if should_output_summary:
            train_summary_str = eval_ops_results[-1]
            train_writer.add_summary(train_summary_str , i+1)
            # i+1:指定步数
            test_summary_str = sess.run([merged_summary_test],
                                       feed_dict={
                                           x:fixed_test_batch_data,
                                           y:fixed_test_batch_labels,
                                       })[0]
            test_writer.add_summary(test_summary_str,i+1)
            
        # train_op 进行训练,若没有, 只计算值
        
        if (i+1) % 500 == 0:
            print('[训练] 步骤:%d , 损失函数:%4.5f , 准确率:%4.5f' % ((i+1) , loss_val , accu_val))
        if(i+1) % 5000 == 0:
            test_data = CifarData(test_data_filenames , False)
            all_test_acc_val = []
            for j in range(test_steps):
                test_batch_data, test_batch_labels = test_data.next_batch(batch_size)
                test_acc_val = sess.run(
                    [accuracy] , feed_dict = {
                        x : test_batch_data,
                        y : test_batch_labels
                    }
                )
                all_test_acc_val.append(test_acc_val)
            test_acc = np.mean(all_test_acc_val)
            print('[测试] 步骤:%d , 准确率: %4.5f' % (i+1,test_acc))

5.文件解析

tensorboard --logdir=train:"train",test:"test"

注意:是双引号,单引号我这里出错了

6.结果显示

Tensorboard查看VGGTensorboard查看VGGTensorboard查看VGG

Tensorboard查看VGGTensorboard查看VGG

相关文章: