0.本文给出完整代码和每行代码的详细解释

编程环境:jupyter notebook ,python3.6 ,tensorflow1.12
数据集采用清华数据集本文作者截取了一部分,数据集百度云下载链接:
链接:https://pan.baidu.com/s/1-Rm9PU7ekU3zx_7gTB9ibw 提取码:3d3o
数据集和代码放同一文件夹里,前续部分请参考文章:[link]https://blog.csdn.net/weixin_43435675/article/details/88129137

#以下为获取文件
import os
def getFilePathList(rootDir):
    filePath_list = []
    for walk in os.walk(rootDir):
        part_filePath_list = [os.path.join(walk[0], file) for file in walk[2]]
        filePath_list.extend(part_filePath_list)
    return filePath_list
filePath_list = getFilePathList('part_cnews')
len(filePath_list)#一共有1400个.txt文件组成一个list

运行结果:
新浪新闻分类(tensorflow+cnn)

filePath_list[5]

运行结果:
新浪新闻分类(tensorflow+cnn)

#所有样本标签的值汇总成一个列表,赋给标签列表label_list
#windows和Linux系统路径字符串的间隔符有区别
label_list = []
for filePath in filePath_list:
    label = filePath.split('\\')[1]
    label_list.append(label)
len(label_list)

运行结果:
新浪新闻分类(tensorflow+cnn)

#标签统计计数
import pandas as pd 
pd.value_counts(label_list)

运行结果:
新浪新闻分类(tensorflow+cnn)

#用pickle库保存label_list
import pickle
with open('label_list.pickle','wb') as file:
    pickle.dump(label_list,file)
#获取所有样本内容,保存content_list
#pickle.dump可以将python中对象持久化为二进制文件,二进制文件的加载速度非常快。避免内存溢出,每读取一定数量的文件就利用pickle库的dump方法保存
import time
import pickle
import re

def getFile(filePath):
   with open(filePath, encoding='utf8') as file:
       fileStr = ''.join(file.readlines(1000))
   return fileStr

interval = 100
n_samples = len(label_list)
startTime = time.time()
directory_name = 'content_list'
if not os.path.isdir(directory_name):
   os.mkdir(directory_name)
for i in range(0, n_samples, interval):
   startIndex = i
   endIndex = i + interval
   content_list = []
   print('%06d-%06d start' %(startIndex, endIndex))
   for filePath in filePath_list[startIndex:endIndex]:
       fileStr = getFile(filePath)
       content = re.sub('\s+', ' ', fileStr)
       content_list.append(content)
   save_fileName = directory_name + '/%06d-%06d.pickle' %(startIndex, endIndex)
   with open(save_fileName, 'wb') as file:
       pickle.dump(content_list, file)
   used_time = time.time() - startTime
   print('%06d-%06d used time: %.2f seconds' %(startIndex, endIndex, used_time))

运行结果:
新浪新闻分类(tensorflow+cnn)

#前面为获取数据,拿到原始数据进行处理过程,【content_list文件夹、label_list文件、代码文件】这三者处于相同路径
#加载数据
import time
import pickle
import os

def getFilePathList(rootDir):
    filePath_list = []
    for walk in os.walk(rootDir):
        part_filePath_list = [os.path.join(walk[0], file) for file in walk[2]]
        filePath_list.extend(part_filePath_list)
    return filePath_list

startTime = time.time()
contentListPath_list = getFilePathList('content_list')
content_list = []
for filePath in contentListPath_list:
    with open(filePath, 'rb') as file:
        part_content_list = pickle.load(file)
    content_list.extend(part_content_list)
with open('label_list.pickle', 'rb') as file:
    label_list = pickle.load(file)
used_time = time.time() - startTime
print('used time: %.2f seconds' %used_time)
sample_size = len(content_list)
print('length of content_list,mean sample size: %d' %sample_size)

运行结果:
新浪新闻分类(tensorflow+cnn)

len(content_list)

运行结果:
新浪新闻分类(tensorflow+cnn)

#制作词汇表:内容列表content_list中的元素是每篇文章内容,数据类型为字符串。
#对所有文章内容中的字做统计计数,出现次数排名前5000的字赋值给变量vocabulary_list。
from collections import Counter 
def getVocabularyList(content_list, vocabulary_size):
    allContent_str = ''.join(content_list)
    counter = Counter(allContent_str)
    vocabulary_list = [k[0] for k in counter.most_common(vocabulary_size)]
    return ['PAD'] + vocabulary_list
startTime = time.time()
vocabulary_list = getVocabularyList(content_list, 5000)
used_time = time.time() - startTime
print('used time: %.2f seconds' %used_time)

运行结果:
新浪新闻分类(tensorflow+cnn)

#保存词汇表
import pickle 

with open('vocabulary_list.pickle', 'wb') as file:
    pickle.dump(vocabulary_list, file)
#加载词汇表:完成制作词汇表后,将其保存。之后再运行代码则直接加载保存的词汇表,节省了复制作词汇表花费的时间。
import pickle

with open('vocabulary_list.pickle', 'rb') as file:
    vocabulary_list = pickle.load(file)
#数据准备
#在代码块中按Esc键,进入命令模式,代码块左边的竖线会显示蓝色。在命令模式下,点击L键,会显示代码行数
import time
startTime = time.time()#记录本段代码运行开始时间,赋值给变量startTime
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(content_list, label_list)
train_content_list = train_X
train_label_list = train_y
test_content_list = test_X
test_label_list = test_y
used_time = time.time() - startTime #打印提示信息,表示程序运行至此步花费时间
print('train_test_split used time : %.2f seconds' %used_time)

vocabulary_size = 10000  # 词汇表达小
sequence_length = 600  # 序列长度
embedding_size = 64  # 词向量维度
num_filters = 256  # 卷积核数目
filter_size = 5  # 卷积核尺寸
num_fc_units = 128  # 全连接层神经元
dropout_keep_probability = 0.5  # dropout保留比例
learning_rate = 1e-3  # 学习率
batch_size = 64  # 每批训练大小

word2id_dict = dict([(b, a) for a, b in enumerate(vocabulary_list)])#使用列表推导式得到词汇及其id对应的列表,并调用dict方法将列表强制转换为字典
content2idList = lambda content : [word2id_dict[word] for word in content if word in word2id_dict]#使用列表推导式和匿名函数定义函数content2idlist,函数作用是将文章中的每个字转换为id
train_idlist_list = [content2idList(content) for content in train_content_list]#使用列表推导式得到的结果是列表的列表,总列表train_idlist_list中的元素是每篇文章中的字对应的id列表
used_time = time.time() - startTime#代码打印提示信息,表示程序运行至此步花费时间
print('content2idList used time : %.2f seconds' %used_time)

import numpy as np
num_classes = np.unique(label_list).shape[0]#获取标签的类别数量,例如本文类别数量为14,即变量num_classes的值为14
#以下6行为获得能够用于模型训练的特征矩阵和预测目标值
import tensorflow.contrib.keras as kr
train_X = kr.preprocessing.sequence.pad_sequences(train_idlist_list, sequence_length)#将每个样本统一长度为seq_length,即600上面超参数已设定600
from sklearn.preprocessing import LabelEncoder#导入sklearn.preprocessing库的labelEncoder方法
labelEncoder = LabelEncoder()#实例化LabelEncoder对象
train_y = labelEncoder.fit_transform(train_label_list)#调用LabelEncoder对象的fit_transform方法做标签编码
train_Y = kr.utils.to_categorical(train_y, num_classes)#调用keras.untils库的to_categorical方法将标签编码的结果再做Ont-Hot编码

import tensorflow as tf
tf.reset_default_graph()#重置tensorflow图,加强代码的健壮性
X_holder = tf.placeholder(tf.int32, [None, sequence_length])
Y_holder = tf.placeholder(tf.float32, [None, num_classes])
used_time = time.time() - startTime
print('data preparation used time : %.2f seconds' %used_time)

运行结果:
新浪新闻分类(tensorflow+cnn)

list(word2id_dict.items())[:20]

运行结果:
新浪新闻分类(tensorflow+cnn)

#搭建神经网络
embedding = tf.get_variable('embedding', 
                            [vocabulary_size, embedding_size])#调用tf库的get_variable方法实例化可以更新的模型参数embedding,矩阵形状为vocabulary_size*embedding_size,即10000*64
embedding_inputs = tf.nn.embedding_lookup(embedding,#将输入数据做词嵌入,得到新变量embedding_inputs的形状为batch_size*sequence_length*embedding_size,即64*600*64
                                          X_holder)
conv = tf.layers.conv1d(embedding_inputs,  #调用tf.layers.conv1d方法,方法需要3个参数,第1个参数是输入数据,第2个参数是卷积核数量num_filters,第3个参数是卷积核大小filter_size
                        num_filters,       #方法结果赋值给变量conv,形状为batch_size*596*num_filters,596是600-5+1的结果
                        filter_size)
max_pooling = tf.reduce_max(conv,  #对变量conv的第1个维度做求最大值操作。方法结果赋值给变量max_pooling,形状为batch_size*num_filters,即64*256
                            [1])
full_connect = tf.layers.dense(max_pooling, #添加全连接层,tf.layers.dense方法结果赋值给变量full_connect,形状为batch_size*num_fc_units,即64*128
                               num_fc_units)
full_connect_dropout = tf.contrib.layers.dropout(full_connect, #代码调用tf.contrib.layers.dropout方法,方法需要2个参数,第1个参数是输入数据,第2个参数是保留比例
                                                 keep_prob=dropout_keep_probability)
full_connect_activate = tf.nn.relu(full_connect_dropout) #**函数
softmax_before = tf.layers.dense(full_connect_activate, #添加全连接层,tf.layers.dense方法结果赋值给变量softmax_before,形状为batch_size*num_classes,即64*14
                                 num_classes)
predict_Y = tf.nn.softmax(softmax_before)  #tf.nn.softmax方法,方法结果是预测概率值
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y_holder,
                                                           logits=softmax_before)
loss = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
isCorrect = tf.equal(tf.argmax(Y_holder, 1), tf.argmax(predict_Y, 1))
accuracy = tf.reduce_mean(tf.cast(isCorrect, tf.float32))
#参数初始化,对于神经网络模型,重要是其中的参数。开始神经网络模型训练之前,需要做参数初始化
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
#模型训练
test_idlist_list = [content2idList(content) for content in test_content_list] #获取测试集中的数据
test_X = kr.preprocessing.sequence.pad_sequences(test_idlist_list, sequence_length)
test_y = labelEncoder.transform(test_label_list)
test_Y = kr.utils.to_categorical(test_y, num_classes)
import random
for i in range(100): #模型迭代训练100次
    selected_index = random.sample(list(range(len(train_y))), k=batch_size)#从训练集中选取batch_size大小,即64个样本做批量梯度下降
    batch_X = train_X[selected_index]
    batch_Y = train_Y[selected_index]
    session.run(train, {X_holder:batch_X, Y_holder:batch_Y}) #每运行1次,表示模型训练1次
    step = i + 1 
    if step % 10 == 0:#每间隔10步打印
        selected_index = random.sample(list(range(len(test_y))), k=100)#从测试集中随机选取100个样本
        batch_X = test_X[selected_index]
        batch_Y = test_Y[selected_index]
        loss_value, accuracy_value = session.run([loss, accuracy], {X_holder:batch_X, Y_holder:batch_Y})
        print('step:%d loss:%.4f accuracy:%.4f' %(step, loss_value, accuracy_value))

运行结果:
新浪新闻分类(tensorflow+cnn)

import warnings
warnings.filterwarnings("ignore")
def predict(input_content):
   #idList的数据类型必须是列表list,
   #否则调用kr.preprocessing.sequence.pad_sequences方法会报错
   idList = [content2idList(input_content)]
   X = kr.preprocessing.sequence.pad_sequences(idList, sequence_length)
   Y = session.run(predict_Y, {X_holder:X})
   y = np.argmax(Y, axis=1)
   label = labelEncoder.inverse_transform(y)[0]
   return label

selected_index = random.sample(range(len(test_content_list)), k=1)[0]
selected_sample = test_content_list[selected_index]
true_label = test_label_list[selected_index]
predict_label = predict(selected_sample)

print('selected_sample :', selected_sample)
print('true_label :', true_label)
print('predict_label :', predict_label, '\n')
print('predict whatever you want, for example:')
input_content = "足球裁判打人"
print('predict("%s") :' %input_content, predict(input_content))

运行结果:
新浪新闻分类(tensorflow+cnn)

#混淆矩阵
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix

def predictAll(test_X, batch_size=100):
    predict_value_list = []
    for i in range(0, len(test_X), batch_size):
        selected_X = test_X[i: i + batch_size]
        predict_value = session.run(predict_Y, {X_holder:selected_X})
        predict_value_list.extend(predict_value)
    return np.array(predict_value_list)

Y = predictAll(test_X)
y = np.argmax(Y, axis=1)
predict_label_list = labelEncoder.inverse_transform(y)
pd.DataFrame(confusion_matrix(test_label_list, predict_label_list), 
             columns=labelEncoder.classes_,
             index=labelEncoder.classes_ )

运行结果:
新浪新闻分类(tensorflow+cnn)

#报告表
import numpy as np
from sklearn.metrics import precision_recall_fscore_support

def eval_model(y_true, y_pred, labels):
   # 计算每个分类的Precision, Recall, f1, support
   p, r, f1, s = precision_recall_fscore_support(y_true, y_pred)
   # 计算总体的平均Precision, Recall, f1, support
   tot_p = np.average(p, weights=s)
   tot_r = np.average(r, weights=s)
   tot_f1 = np.average(f1, weights=s)
   tot_s = np.sum(s)
   res1 = pd.DataFrame({
       u'Label': labels,
       u'Precision': p,
       u'Recall': r,
       u'F1': f1,
       u'Support': s
   })
   res2 = pd.DataFrame({
       u'Label': ['总体'],
       u'Precision': [tot_p],
       u'Recall': [tot_r],
       u'F1': [tot_f1],
       u'Support': [tot_s]
   })
   res2.index = [999]
   res = pd.concat([res1, res2])
   return res[['Label', 'Precision', 'Recall', 'F1', 'Support']]

eval_model(test_label_list, predict_label_list, labelEncoder.classes_)

运行结果:
新浪新闻分类(tensorflow+cnn)

相关文章: