-
-
Notifications
You must be signed in to change notification settings - Fork 143
Expand file tree
/
Copy pathtext_fast.py
More file actions
executable file
·107 lines (88 loc) · 5.19 KB
/
text_fast.py
File metadata and controls
executable file
·107 lines (88 loc) · 5.19 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import tensorflow as tf
class TextFAST(object):
"""A FASTTEXT for text classification."""
def __init__(
self, sequence_length, vocab_size, embedding_type, embedding_size,
num_classes, l2_reg_lambda=0.0, pretrained_embedding=None):
# Placeholders for input, output, dropout_prob and training_tag
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.is_training = tf.placeholder(tf.bool, name="is_training")
self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
def _linear(input_, output_size, scope="SimpleLinear"):
"""
Linear map: output[k] = sum_i(Matrix[k, i] * args[i] ) + Bias[k]
Args:
input_: a tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
scope: VariableScope for the created subgraph; defaults to "SimpleLinear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
shape = input_.get_shape().as_list()
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: {0}".format(str(shape)))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: {0}".format(str(shape)))
input_size = shape[1]
# Now the computation.
with tf.variable_scope(scope):
W = tf.get_variable("W", [input_size, output_size], dtype=input_.dtype)
b = tf.get_variable("b", [output_size], dtype=input_.dtype)
return tf.nn.xw_plus_b(input_, W, b)
def _highway_layer(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu):
"""
Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
for idx in range(num_layers):
g = f(_linear(input_, size, scope=("highway_lin_{0}".format(idx))))
t = tf.sigmoid(_linear(input_, size, scope=("highway_gate_{0}".format(idx))) + bias)
output = t * g + (1. - t) * input_
input_ = output
return output
# Embedding Layer
with tf.device("/cpu:0"), tf.name_scope("embedding"):
# Use random generated the word vector by default
# Can also be obtained through our own word vectors trained by our corpus
if pretrained_embedding is None:
self.embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], minval=-1.0, maxval=1.0,
dtype=tf.float32), trainable=True, name="embedding")
else:
if embedding_type == 0:
self.embedding = tf.constant(pretrained_embedding, dtype=tf.float32, name="embedding")
if embedding_type == 1:
self.embedding = tf.Variable(pretrained_embedding, trainable=True,
dtype=tf.float32, name="embedding")
self.embedded_sentence = tf.nn.embedding_lookup(self.embedding, self.input_x)
# Average Vectors
self.embedded_sentence_average = tf.reduce_mean(self.embedded_sentence, axis=1) # [batch_size, embedding_size]
# Highway Layer
with tf.name_scope("highway"):
self.highway = _highway_layer(self.embedded_sentence_average,
self.embedded_sentence_average.get_shape()[1], num_layers=1, bias=0)
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.highway, self.dropout_keep_prob)
# Final scores
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal(shape=[embedding_size, num_classes],
stddev=0.1, dtype=tf.float32), name="W")
b = tf.Variable(tf.constant(value=0.1, shape=[num_classes], dtype=tf.float32), name="b")
self.logits = tf.nn.xw_plus_b(self.h_drop, W, b, name="logits")
self.scores = tf.sigmoid(self.logits, name="scores")
# Calculate mean cross-entropy loss, L2 loss
with tf.name_scope("loss"):
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_y, logits=self.logits)
losses = tf.reduce_mean(tf.reduce_sum(losses, axis=1), name="sigmoid_losses")
l2_losses = tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()],
name="l2_losses") * l2_reg_lambda
self.loss = tf.add(losses, l2_losses, name="loss")