Skip to content

Commit

Permalink
compatible with tensorflow 1.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Matheus Della Croce Oliveira committed Mar 3, 2017
1 parent 757b3a7 commit aa50f41
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions text_cnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ def __init__(

# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
W = tf.Variable(
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)

# Create a convolution + maxpool layer for each filter size
Expand Down Expand Up @@ -54,7 +54,7 @@ def __init__(

# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(3, pooled_outputs)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])

# Add dropout
Expand All @@ -75,7 +75,7 @@ def __init__(

# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss

# Accuracy
Expand Down

0 comments on commit aa50f41

Please sign in to comment.