1 import numpy as np
 2 import pandas as pd
 3 import tensorflow as tf
 4 
 5 import data_helper
 6 
 7 n_class = 3
 8 learning_rate = 0.05
 9 s_limit_len = 10
10 word_embedding_size = 100
11 voc_size = 7000
12 
13 def get_weights(shape):
14     return tf.Variable(tf.truncated_normal(shape,stddev=0.1))
15 def get_bias(shape):
16     return tf.Variable(tf.constant(0.1))
17 
18 def conv2d(input_x, W):
19     return tf.nn.conv2d(input_x,W,strides=[1,1,1,1],padding="SAME")
20 
21 def maxpooling(x,kszie,strides):
22     return tf.nn.max_pool(x,ksize=kszie,strides=strides,padding="SAME")
23 
24 inputs = tf.placeholder(tf.int32,[None,s_limit_len],name="inputs")
25 labels = tf.placeholder(tf.int32,[None,n_class],name="label_one-hot")
26 
27 
28 embedding_w = tf.Variable(tf.truncated_normal([voc_size,word_embedding_size],stddev=0.1,dtype=tf.float32))
29 embedding_layer = tf.nn.embedding_lookup(embedding_w,inputs)
30 
31 conv1_W = get_weights([1,word_embedding_size])
32 conv1 = tf.nn.conv2d(embedding_layer,conv1_W)
33 
34 conv3_W = get_weights([3,word_embedding_size])
35 conv3 = tf.nn.conv2d(embedding_layer,conv3_W)
36 
37 conv5_W = get_weights([5,word_embedding_size])
38 conv5 = tf.nn.conv2d(embedding_layer,conv5_W)
39 
40 conv7_W = get_weights([7,word_embedding_size])
41 conv7 = tf.nn.conv2d(embedding_layer,conv7_W)
42 
43 feature_map_1 = maxpooling(conv1)
44 feature_map_3 = maxpooling(conv3)
45 feature_map_5 = maxpooling(conv5)
46 feature_map_7 = maxpooling(conv7)
47 
48 tf.concat()
View Code

相关文章: