diff --git a/dlnd_language_translation.html b/dlnd_language_translation.html new file mode 100644 index 0000000..dcf513f --- /dev/null +++ b/dlnd_language_translation.html @@ -0,0 +1,13547 @@ + + +
+In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
+Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+import helper
+import problem_unittests as tests
+
+source_path = '/data/small_vocab_en'
+target_path = '/data/small_vocab_fr'
+source_text = helper.load_data(source_path)
+target_text = helper.load_data(target_path)
+
Play around with view_sentence_range to view different parts of the data.
+ +view_sentence_range = (0, 10)
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+import numpy as np
+
+print('Dataset Stats')
+print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
+
+sentences = source_text.split('\n')
+word_counts = [len(sentence.split()) for sentence in sentences]
+print('Number of sentences: {}'.format(len(sentences)))
+print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
+
+print()
+print('English sentences {} to {}:'.format(*view_sentence_range))
+print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
+print()
+print('French sentences {} to {}:'.format(*view_sentence_range))
+print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
+
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids()
, you'll turn source_text
and target_text
from words to ids. However, you need to add the <EOS>
word id at the end of target_text
. This will help the neural network predict when the sentence should end.
You can get the <EOS>
word id by doing:
target_vocab_to_int['<EOS>']
+
You can get other word ids using source_vocab_to_int
and target_vocab_to_int
.
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
+ """
+ Convert source and target text to proper word ids
+ :param source_text: String that contains all the source text.
+ :param target_text: String that contains all the target text.
+ :param source_vocab_to_int: Dictionary to go from the source words to an id
+ :param target_vocab_to_int: Dictionary to go from the target words to an id
+ :return: A tuple of lists (source_id_text, target_id_text)
+ """
+
+ # Process source text
+ words = [[word for word in line.split()] for line in source_text.split('\n')]
+ source_word_ids = [[source_vocab_to_int.get(word, source_vocab_to_int['<UNK>']) for word in line.split()] for line in source_text.split('\n')] # use get to replace ignored/unknown characters by <UNK>
+
+
+ # Process target text
+ target_word_ids = [[target_vocab_to_int.get(word, target_vocab_to_int['<UNK>']) for word in line.split()] + [target_vocab_to_int['<EOS>']] for line in target_text.split('\n')]
+
+
+ return source_word_ids, target_word_ids
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_text_to_ids(text_to_ids)
+
Running the code cell below will preprocess all the data and save it to file.
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
+
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+import numpy as np
+import helper
+import problem_unittests as tests
+
+(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
+
This will check to make sure you have the correct version of TensorFlow and access to a GPU
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+from distutils.version import LooseVersion
+import warnings
+import tensorflow as tf
+from tensorflow.python.layers.core import Dense
+
+# Check TensorFlow Version
+assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
+print('TensorFlow Version: {}'.format(tf.__version__))
+
+# Check for a GPU
+if not tf.test.gpu_device_name():
+ warnings.warn('No GPU found. Please use a GPU to train your neural network.')
+else:
+ print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
+
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
+model_inputs
process_decoder_input
encoding_layer
decoding_layer_train
decoding_layer_infer
decoding_layer
seq2seq_model
Implement the model_inputs()
function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Return the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)
+ +def model_inputs():
+ """
+ Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
+ :return: Tuple (input, targets, learning rate, keep probability, target sequence length,
+ max target sequence length, source sequence length)
+ """
+
+ input_text = tf.placeholder(tf.int32, [None, None], name='input')
+ targets = tf.placeholder(tf.int32, [None, None], name='targets')
+ lr = tf.placeholder(tf.float32, name='learning_rate' )
+ keep = tf.placeholder(tf.float32, name='keep_prob')
+ target_seq_len = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
+ max_target_seq_len = tf.reduce_max(target_seq_len, name='max_target_len')
+ source_seq_len = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
+
+ return input_text, targets, lr, keep, target_seq_len, max_target_seq_len, source_seq_len
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_model_inputs(model_inputs)
+
Implement process_decoder_input
by removing the last word id from each batch in target_data
and concat the GO ID to the begining of each batch.
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
+ """
+ Preprocess target data for encoding
+ :param target_data: Target Placehoder
+ :param target_vocab_to_int: Dictionary to go from the target words to an id
+ :param batch_size: Batch Size
+ :return: Preprocessed target data
+ """
+ # TODO: Implement Function
+ ending = tf.strided_slice(target_data, [0,0], [batch_size, -1], [1,1])
+ dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
+
+ return dec_input
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_process_encoding_input(process_decoder_input)
+
Implement encoding_layer()
to create a Encoder RNN layer:
tf.contrib.layers.embed_sequence
tf.contrib.rnn.LSTMCell
wrapped in a tf.contrib.rnn.DropoutWrapper
tf.nn.dynamic_rnn()
from imp import reload
+reload(tests)
+
+def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
+ source_sequence_length, source_vocab_size,
+ encoding_embedding_size):
+ """
+ Create encoding layer
+ :param rnn_inputs: Inputs for the RNN
+ :param rnn_size: RNN Size
+ :param num_layers: Number of layers
+ :param keep_prob: Dropout keep probability
+ :param source_sequence_length: a list of the lengths of each sequence in the batch
+ :param source_vocab_size: vocabulary size of source data
+ :param encoding_embedding_size: embedding size of source data
+ :return: tuple (RNN output, RNN state)
+ """
+
+ enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
+
+ #Rnn cell
+ def make_cell(rnn_size):
+ cell = tf.contrib.rnn.LSTMCell(rnn_size,
+ initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
+ # add dropout layer
+ enc_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
+ return enc_cell
+
+ enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
+
+ enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
+
+
+ return enc_output, enc_state
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_encoding_layer(encoding_layer)
+
Create a training decoding layer:
+tf.contrib.seq2seq.TrainingHelper
tf.contrib.seq2seq.BasicDecoder
tf.contrib.seq2seq.dynamic_decode
from IPython.core.debugger import Tracer
+
+def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
+ target_sequence_length, max_summary_length,
+ output_layer, keep_prob):
+ """
+ Create a decoding layer for training
+ :param encoder_state: Encoder State
+ :param dec_cell: Decoder RNN Cell
+ :param dec_embed_input: Decoder embedded input
+ :param target_sequence_length: The lengths of each sequence in the target batch
+ :param max_summary_length: The length of the longest sequence in the batch
+ :param output_layer: Function to apply the output layer
+ :param keep_prob: Dropout keep probability
+ :return: BasicDecoderOutput containing training logits and sample_id
+ """
+ # Question: Why are we receiving keep_prob here
+ # Where would we add dropout layer here
+
+ # Helper for the training process. Used by BasicDecoder to read inputs.
+ training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
+ sequence_length=target_sequence_length,
+ time_major=False)
+
+
+ # Basic decoder
+ training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
+ training_helper,
+ encoder_state,
+ output_layer)
+
+ # Perform dynamic decoding using the decoder
+ training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder,
+ impute_finished=True,
+ maximum_iterations=max_summary_length)[0]
+
+ return training_decoder_output
+
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_decoding_layer_train(decoding_layer_train)
+
Create inference decoder:
+tf.contrib.seq2seq.GreedyEmbeddingHelper
tf.contrib.seq2seq.BasicDecoder
tf.contrib.seq2seq.dynamic_decode
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
+ end_of_sequence_id, max_target_sequence_length,
+ vocab_size, output_layer, batch_size, keep_prob):
+ """
+ Create a decoding layer for inference
+ :param encoder_state: Encoder state
+ :param dec_cell: Decoder RNN Cell
+ :param dec_embeddings: Decoder embeddings
+ :param start_of_sequence_id: GO ID
+ :param end_of_sequence_id: EOS Id
+ :param max_target_sequence_length: Maximum length of target sequences
+ :param vocab_size: Size of decoder/target vocabulary
+ :param decoding_scope: TenorFlow Variable Scope for decoding
+ :param output_layer: Function to apply the output layer
+ :param batch_size: Batch size
+ :param keep_prob: Dropout keep probability
+ :return: BasicDecoderOutput containing inference logits and sample_id
+ """
+ # Start from GO
+ start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size], name='start_tokens')
+
+
+ # Helper for the inference process.
+ inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
+ start_tokens,
+ end_of_sequence_id)
+
+ # Basic decoder
+ inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
+ inference_helper,
+ encoder_state,
+ output_layer)
+
+ # Perform dynamic decoding using the decoder
+ inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
+ impute_finished=True,
+ maximum_iterations=max_target_sequence_length)[0]
+
+ return inference_decoder_output
+
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_decoding_layer_infer(decoding_layer_infer)
+
Implement decoding_layer()
to create a Decoder RNN layer.
decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)
function to get the training logits.decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)
function to get the inference logits.Note: You'll need to use tf.variable_scope to share variables between training and inference.
+ +def decoding_layer(dec_input, encoder_state,
+ target_sequence_length, max_target_sequence_length,
+ rnn_size,
+ num_layers, target_vocab_to_int, target_vocab_size,
+ batch_size, keep_prob, decoding_embedding_size):
+ """
+ Create decoding layer
+ :param dec_input: Decoder input
+ :param encoder_state: Encoder state
+ :param target_sequence_length: The lengths of each sequence in the target batch
+ :param max_target_sequence_length: Maximum length of target sequences
+ :param rnn_size: RNN Size
+ :param num_layers: Number of layers
+ :param target_vocab_to_int: Dictionary to go from the target words to an id
+ :param target_vocab_size: Size of target vocabulary
+ :param batch_size: The size of the batch
+ :param keep_prob: Dropout keep probability
+ :param decoding_embedding_size: Decoding embedding size
+ :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
+ """
+
+ # 1. Decoder Embedding
+ dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
+ dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
+
+ # 2. Construct the decoder cell
+ def make_cell(rnn_size):
+ dec_cell = tf.contrib.rnn.LSTMCell(rnn_size,
+ initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
+
+ # Add dropout layer
+ dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
+
+ return dec_cell
+
+ dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
+
+ # 3. Dense layer to translate the decoder's output at each time
+ # step into a choice from the target vocabulary
+ output_layer = Dense(target_vocab_size,
+ kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
+
+
+ # 4. Get training and inference outputs
+
+ ## In training mode
+ with tf.variable_scope('decode'):
+
+ training_decoder_output = decoding_layer_train(encoder_state,
+ dec_cell,
+ dec_embed_input,
+ target_sequence_length,
+ max_target_sequence_length,
+ output_layer,
+ keep_prob)
+
+ ## In inference mode we reuse variables
+ with tf.variable_scope('decode') as scope:
+ scope.reuse_variables()
+
+ inference_decoder_output = decoding_layer_infer(encoder_state,
+ dec_cell,
+ dec_embeddings,
+ target_vocab_to_int['<GO>'], #start of seq ID
+ target_vocab_to_int['<EOS>'], # end of seq ID
+ max_target_sequence_length,
+ target_vocab_size,
+ output_layer,
+ batch_size,
+ keep_prob)
+
+
+
+ return training_decoder_output, inference_decoder_output
+
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_decoding_layer(decoding_layer)
+
Apply the functions you implemented above to:
+encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)
.process_decoder_input(target_data, target_vocab_to_int, batch_size)
function.decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)
function.def seq2seq_model(input_data, target_data, keep_prob, batch_size,
+ source_sequence_length, target_sequence_length,
+ max_target_sentence_length,
+ source_vocab_size, target_vocab_size,
+ enc_embedding_size, dec_embedding_size,
+ rnn_size, num_layers, target_vocab_to_int):
+ """
+ Build the Sequence-to-Sequence part of the neural network
+ :param input_data: Input placeholder
+ :param target_data: Target placeholder
+ :param keep_prob: Dropout keep probability placeholder
+ :param batch_size: Batch Size
+ :param source_sequence_length: Sequence Lengths of source sequences in the batch
+ :param target_sequence_length: Sequence Lengths of target sequences in the batch
+ :param source_vocab_size: Source vocabulary size
+ :param target_vocab_size: Target vocabulary size
+ :param enc_embedding_size: Decoder embedding size
+ :param dec_embedding_size: Encoder embedding size
+ :param rnn_size: RNN Size
+ :param num_layers: Number of layers
+ :param target_vocab_to_int: Dictionary to go from the target words to an id
+ :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
+ """
+ # TODO: Implement Function
+
+
+ # Pass the input data through the encoder. We'll ignore the encoder output, but use the state
+ _, enc_state = encoding_layer(input_data,
+ rnn_size,
+ num_layers,
+ keep_prob,
+ source_sequence_length,
+ source_vocab_size,
+ enc_embedding_size)
+
+ # Prepare the target sequences we'll feed to the decoder in training mode
+ dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
+
+
+ # Pass encoder state and decoder inputs to the decoders
+ training_decoder_output, inference_decoder_output = decoding_layer(dec_input,
+ enc_state,
+ target_sequence_length,
+ max_target_sentence_length,
+ rnn_size,
+ num_layers,
+ target_vocab_to_int,
+ target_vocab_size,
+ batch_size,
+ keep_prob,
+ dec_embedding_size)
+
+
+ return training_decoder_output, inference_decoder_output
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_seq2seq_model(seq2seq_model)
+
Tune the following parameters:
+epochs
to the number of epochs.batch_size
to the batch size.rnn_size
to the size of the RNNs.num_layers
to the number of layers.encoding_embedding_size
to the size of the embedding for the encoder.decoding_embedding_size
to the size of the embedding for the decoder.learning_rate
to the learning rate.keep_probability
to the Dropout keep probabilitydisplay_step
to state how many steps between each debug output statement# Number of Epochs
+epochs = 5
+# Batch Size
+batch_size = 256
+# RNN Size
+rnn_size = 256
+# Number of Layers
+num_layers = 2
+# Embedding Size
+encoding_embedding_size = 260
+decoding_embedding_size = 260
+# Learning Rate
+learning_rate = 0.001
+# Dropout Keep Probability
+keep_probability = 0.5
+display_step = 10
+
RUN_NUMBER = 1
+LOG_DIR = '/output/run_{}/logs/'
+CHECKPOINT_DIR = '/output/run_{}/checkpoints/'
+CHECKPOINT_PATH = CHECKPOINT_DIR.format(RUN_NUMBER)
+LOG_PATH = LOG_DIR.format(RUN_NUMBER)
+
Build the graph using the neural network you implemented.
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+save_path = CHECKPOINT_PATH
+(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
+max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
+
+train_graph = tf.Graph()
+with train_graph.as_default():
+ input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
+
+ #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
+ input_shape = tf.shape(input_data)
+
+ train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
+ targets,
+ keep_prob,
+ batch_size,
+ source_sequence_length,
+ target_sequence_length,
+ max_target_sequence_length,
+ len(source_vocab_to_int),
+ len(target_vocab_to_int),
+ encoding_embedding_size,
+ decoding_embedding_size,
+ rnn_size,
+ num_layers,
+ target_vocab_to_int)
+
+
+ training_logits = tf.identity(train_logits.rnn_output, name='logits')
+ inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
+
+ masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
+
+ with tf.name_scope("optimization"):
+ # Loss function
+ cost = tf.contrib.seq2seq.sequence_loss(
+ training_logits,
+ targets,
+ masks)
+ tf.summary.scalar('cost', cost)
+
+ # Optimizer
+ optimizer = tf.train.AdamOptimizer(lr)
+
+ # Gradient Clipping
+ gradients = optimizer.compute_gradients(cost)
+ capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
+ train_op = optimizer.apply_gradients(capped_gradients)
+
+ merged = tf.summary.merge_all()
+
+
Batch and pad the source and target sequences
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+def pad_sentence_batch(sentence_batch, pad_int):
+ """Pad sentences with <PAD> so that each sentence of a batch has the same length"""
+ max_sentence = max([len(sentence) for sentence in sentence_batch])
+ return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
+
+
+def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
+ """Batch targets, sources, and the lengths of their sentences together"""
+ for batch_i in range(0, len(sources)//batch_size):
+ start_i = batch_i * batch_size
+
+ # Slice the right amount for the batch
+ sources_batch = sources[start_i:start_i + batch_size]
+ targets_batch = targets[start_i:start_i + batch_size]
+
+ # Pad
+ pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
+ pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
+
+ # Need the lengths for the _lengths parameters
+ pad_targets_lengths = []
+ for target in pad_targets_batch:
+ pad_targets_lengths.append(len(target))
+
+ pad_source_lengths = []
+ for source in pad_sources_batch:
+ pad_source_lengths.append(len(source))
+
+ yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
+
# write out the graph for tensorboard
+
+with tf.Session(graph=train_graph) as sess:
+ train_writer = tf.summary.FileWriter(LOG_PATH + '/train', sess.graph)
+ test_writer = tf.summary.FileWriter(LOG_PATH + '/test')
+
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
+ +"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+def get_accuracy(target, logits, _type):
+ """
+ Calculate accuracy
+ """
+ max_seq = max(target.shape[1], logits.shape[1])
+ if max_seq - target.shape[1]:
+ target = np.pad(
+ target,
+ [(0,0),(0,max_seq - target.shape[1])],
+ 'constant')
+ if max_seq - logits.shape[1]:
+ logits = np.pad(
+ logits,
+ [(0,0),(0,max_seq - logits.shape[1])],
+ 'constant')
+
+ acc = np.mean(np.equal(target, logits))
+ if _type is 'train':
+ with tf.name_scope('optimization'):
+ summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=acc)])
+ else:
+ with tf.name_scope('validation'):
+ summary = tf.Summary(value=[tf.Summary.Value(tag="accuracy", simple_value=acc)])
+ return summary, acc
+
+# Split data to training and validation sets
+train_source = source_int_text[batch_size:]
+train_target = target_int_text[batch_size:]
+valid_source = source_int_text[:batch_size]
+valid_target = target_int_text[:batch_size]
+(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
+ valid_target,
+ batch_size,
+ source_vocab_to_int['<PAD>'],
+ target_vocab_to_int['<PAD>']))
+with tf.Session(graph=train_graph) as sess:
+ sess.run(tf.global_variables_initializer())
+
+ saver = tf.train.Saver(keep_checkpoint_every_n_hours=0.5)
+
+
+ for epoch_i in range(epochs):
+
+
+ n_batches = len(train_source)//batch_size
+ for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
+ get_batches(train_source, train_target, batch_size,
+ source_vocab_to_int['<PAD>'],
+ target_vocab_to_int['<PAD>'])):
+
+ iteration = epoch_i*n_batches + batch_i
+
+ summary, _, loss = sess.run(
+ [merged, train_op, cost],
+ {input_data: source_batch,
+ targets: target_batch,
+ lr: learning_rate,
+ target_sequence_length: targets_lengths,
+ source_sequence_length: sources_lengths,
+ keep_prob: keep_probability})
+
+ train_writer.add_summary(summary, iteration)
+
+ if epoch_i % 5 == 0:
+ saver.save(sess, save_path + 'ckpt', global_step=epoch_i)
+
+ if batch_i % display_step == 0 and batch_i > 0:
+
+
+ batch_train_logits = sess.run(
+ inference_logits,
+ {input_data: source_batch,
+ source_sequence_length: sources_lengths,
+ target_sequence_length: targets_lengths,
+ keep_prob: 1.0})
+
+
+ batch_valid_logits = sess.run(
+ inference_logits,
+ {input_data: valid_sources_batch,
+ source_sequence_length: valid_sources_lengths,
+ target_sequence_length: valid_targets_lengths,
+ keep_prob: 1.0})
+
+ train_acc_sum, train_acc = get_accuracy(target_batch, batch_train_logits, _type='train')
+
+ valid_acc_sum, valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits, _type='test')
+
+
+ train_writer.add_summary(train_acc_sum, iteration)
+ test_writer.add_summary(valid_acc_sum, iteration)
+
+
+
+ print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
+ .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
+
+
+ # Save Model
+ saver.save(sess, save_path + 'last-ckpt')
+ print('Model Trained and Saved')
+
Save the batch_size
and save_path
parameters for inference.
"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+# Save parameters for checkpoint
+helper.save_params(save_path)
+
"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+import tensorflow as tf
+import numpy as np
+import helper
+import problem_unittests as tests
+
+_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
+load_path = helper.load_params()
+
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq()
to preprocess new sentences.
vocab_to_int
<UNK>
word id.def sentence_to_seq(sentence, vocab_to_int):
+ """
+ Convert a sentence to a sequence of ids
+ :param sentence: String
+ :param vocab_to_int: Dictionary to go from the words to an id
+ :return: List of word ids
+ """
+
+
+ return [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence.lower().split(' ')]
+
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
+"""
+tests.test_sentence_to_seq(sentence_to_seq)
+
This will translate translate_sentence
from English to French.
translate_sentence = 'he saw a old yellow truck .'
+
+
+"""
+DON'T MODIFY ANYTHING IN THIS CELL
+"""
+translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
+
+loaded_graph = tf.Graph()
+with tf.Session(graph=loaded_graph) as sess:
+ # Load saved model
+ loader = tf.train.import_meta_graph(load_path + 'last-ckpt.meta')
+ loader.restore(sess, load_path + 'last-ckpt')
+
+ input_data = loaded_graph.get_tensor_by_name('input:0')
+ logits = loaded_graph.get_tensor_by_name('predictions:0')
+ target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
+ source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
+ keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
+
+ translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
+ target_sequence_length: [len(translate_sentence)*2]*batch_size,
+ source_sequence_length: [len(translate_sentence)]*batch_size,
+ keep_prob: 1.0})[0]
+
+print('Input')
+print(' Word Ids: {}'.format([i for i in translate_sentence]))
+print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
+
+print('\nPrediction')
+print(' Word Ids: {}'.format([i for i in translate_logits]))
+print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
+
You might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.
+You can train on the WMT10 French-English corpus. This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.
+When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_language_translation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
+ +../ -checkpoint 01-Sep-2017 10:56 109 -ckpt-10.data-00000-of-00001 01-Sep-2017 09:41 6929504 -ckpt-10.index 01-Sep-2017 09:41 1966 -ckpt-10.meta 01-Sep-2017 09:41 1272733 -ckpt-15.data-00000-of-00001 01-Sep-2017 10:02 6929504 -ckpt-15.index 01-Sep-2017 10:02 1966 -ckpt-15.meta 01-Sep-2017 10:03 1272733 -ckpt-20.data-00000-of-00001 01-Sep-2017 10:24 6929504 -ckpt-20.index 01-Sep-2017 10:24 1966 -ckpt-20.meta 01-Sep-2017 10:24 1272733 -ckpt-25.data-00000-of-00001 01-Sep-2017 10:46 6929504 -ckpt-25.index 01-Sep-2017 10:46 1966 -ckpt-25.meta 01-Sep-2017 10:46 1272733 -ckpt-5.data-00000-of-00001 01-Sep-2017 09:19 6929504 -ckpt-5.index 01-Sep-2017 09:19 1966 -ckpt-5.meta 01-Sep-2017 09:19 1272733 +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/checkpoints/
../ +checkpoint 06-Sep-2017 06:41 190 +ckpt-0.data-00000-of-00001 06-Sep-2017 06:36 28255040 +ckpt-0.index 06-Sep-2017 06:36 1577 +ckpt-0.meta 06-Sep-2017 06:36 913507 +last-ckpt.data-00000-of-00001 06-Sep-2017 06:41 28255040 +last-ckpt.index 06-Sep-2017 06:41 1577 +last-ckpt.meta 06-Sep-2017 06:41 913507
diff --git a/run_1/checkpoints/last-ckpt.data-00000-of-00001 b/run_1/checkpoints/last-ckpt.data-00000-of-00001 new file mode 100644 index 0000000..d1c514c Binary files /dev/null and b/run_1/checkpoints/last-ckpt.data-00000-of-00001 differ diff --git a/run_1/checkpoints/last-ckpt.index b/run_1/checkpoints/last-ckpt.index new file mode 100644 index 0000000..633f41e Binary files /dev/null and b/run_1/checkpoints/last-ckpt.index differ diff --git a/run_1/checkpoints/ckpt-15.meta b/run_1/checkpoints/last-ckpt.meta similarity index 61% rename from run_1/checkpoints/ckpt-15.meta rename to run_1/checkpoints/last-ckpt.meta index 83b8e2f..17a69f9 100644 Binary files a/run_1/checkpoints/ckpt-15.meta and b/run_1/checkpoints/last-ckpt.meta differ diff --git a/run_1/index.html b/run_1/index.html index 9723e35..2b01284 100644 --- a/run_1/index.html +++ b/run_1/index.html @@ -1,8 +1,8 @@ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/ +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/
../ -checkpoints/ 01-Sep-2017 10:56 - -logs/ 01-Sep-2017 08:45 - +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/
../ +checkpoints/ 06-Sep-2017 06:41 - +logs/ 06-Sep-2017 06:29 -
diff --git a/run_1/logs/index.html b/run_1/logs/index.html index 6770a86..15344ce 100644 --- a/run_1/logs/index.html +++ b/run_1/logs/index.html @@ -1,8 +1,8 @@ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/logs/ +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/logs/ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/logs/
../ -test/ 01-Sep-2017 08:45 - -train/ 01-Sep-2017 08:45 - +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/logs/
../ +test/ 06-Sep-2017 06:29 - +train/ 06-Sep-2017 06:29 -
diff --git a/run_1/logs/test/events.out.tfevents.1504255553.task-instance-container b/run_1/logs/test/events.out.tfevents.1504255553.task-instance-container deleted file mode 100644 index 385db4f..0000000 Binary files a/run_1/logs/test/events.out.tfevents.1504255553.task-instance-container and /dev/null differ diff --git a/run_1/logs/test/events.out.tfevents.1504679367.task-instance-container b/run_1/logs/test/events.out.tfevents.1504679367.task-instance-container new file mode 100644 index 0000000..ed5e9af Binary files /dev/null and b/run_1/logs/test/events.out.tfevents.1504679367.task-instance-container differ diff --git a/run_1/logs/test/index.html b/run_1/logs/test/index.html index 9d4b9e1..a5f940e 100644 --- a/run_1/logs/test/index.html +++ b/run_1/logs/test/index.html @@ -1,7 +1,7 @@ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/logs/test/ +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/logs/test/ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/logs/test/
../ -events.out.tfevents.1504255553.task-instance-co..> 01-Sep-2017 10:56 152431 +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/logs/test/
../ +events.out.tfevents.1504679367.task-instance-co..> 06-Sep-2017 06:41 12342
diff --git a/run_1/logs/train/events.out.tfevents.1504255543.task-instance-container b/run_1/logs/train/events.out.tfevents.1504255543.task-instance-container deleted file mode 100644 index 1a49276..0000000 Binary files a/run_1/logs/train/events.out.tfevents.1504255543.task-instance-container and /dev/null differ diff --git a/run_1/checkpoints/ckpt-10.meta b/run_1/logs/train/events.out.tfevents.1504679353.task-instance-container similarity index 52% rename from run_1/checkpoints/ckpt-10.meta rename to run_1/logs/train/events.out.tfevents.1504679353.task-instance-container index cca4598..12a736a 100644 Binary files a/run_1/checkpoints/ckpt-10.meta and b/run_1/logs/train/events.out.tfevents.1504679353.task-instance-container differ diff --git a/run_1/logs/train/index.html b/run_1/logs/train/index.html index 3369fc5..edcf1ac 100644 --- a/run_1/logs/train/index.html +++ b/run_1/logs/train/index.html @@ -1,7 +1,7 @@ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/logs/train/ +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/logs/train/ -Index of /viewer/data/Hz6f7dxuUUi5phtaMUsCvH/YQ8jPSubysLQWyikKxwT3n/run_1/logs/train/
../ -events.out.tfevents.1504255543.task-instance-co..> 01-Sep-2017 10:56 4266556 +Index of /viewer/data/F2zxzWryKghugtHxn8SPJ9/N9utKsBVnBkbHVfzd5uP4X/run_1/logs/train/
../ +events.out.tfevents.1504679353.task-instance-co..> 06-Sep-2017 06:41 1792834