设为首页 加入收藏

TOP

学习笔记TF035:实现基于LSTM语言模型(四)
2017-09-30 17:00:23 】 浏览:8154
Tags:学习 笔记 TF035: 实现 基于 LSTM 语言 模型
;        "Train using 16-bit floats instead of 32bit floats")
#FLAGS = flags.FLAGS #def data_type(): #  return tf.float16 if FLAGS.use_fp16 else tf.float32 class PTBInput(object):   """The input data."""   def __init__(self, config, data, name=None):     self.batch_size = batch_size = config.batch_size     self.num_steps = num_steps = config.num_steps     self.epoch_size = ((len(data) // batch_size) - 1) // num_steps     self.input_data, self.targets = reader.ptb_producer(         data, batch_size, num_steps, name=name) class PTBModel(object):   """The PTB model."""   def __init__(self, is_training, config, input_):     self._input = input_     batch_size = input_.batch_size     num_steps = input_.num_steps     size = config.hidden_size     vocab_size = config.vocab_size     # Slightly better results can be obtained with forget gate biases     # initialized to 1 but the hyperparameters of the model would need to be     # different than reported in the paper.     def lstm_cell():       return tf.contrib.rnn.BasicLSTMCell(           size, forget_bias=0.0, state_is_tuple=True)     attn_cell = lstm_cell     if is_training and config.keep_prob < 1:       def attn_cell():         return tf.contrib.rnn.DropoutWrapper(             lstm_cell(), output_keep_prob=config.keep_prob)     cell = tf.contrib.rnn.MultiRNNCell(         [attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)     self._initial_state = cell.zero_state(batch_size, tf.float32)     with tf.device("/cpu:0"):       embedding = tf.get_variable(           "embedding", [vocab_size, size], dtype=tf.float32)       inputs = tf.nn.embedding_lookup(embedding, input_.input_data)     if is_training and config.keep_prob < 1:       inputs = tf.nn.dropout(inputs, config.keep_prob)     # Simplified version of models/tutorials/rnn/rnn.py's rnn().     # This builds an unrolled LSTM for tutorial purposes only.     # In general, use the rnn() or state_saving_rnn() from rnn.py.     #     # The alternative version of the code below is:     #     # inputs = tf.unstack(inputs, num=num_steps, axis=1)     # outputs, state = tf.nn.rnn(cell, inputs,     #                            initial_state=self._initial_state)     outputs = []     state = self._initial_state     with tf.variable_scope("RNN"):       for time_step in range(num_steps):         if time_step > 0: tf.get_variable_scope().reuse_variables()         (cell_output, state) = cell(inputs[:, time_step, :], state)         outputs.append(cell_output)    
首页 上一页 1 2 3 4 5 6 下一页 尾页 4/6/6
】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇day5模块学习--re正则模块 下一篇Python 3 学习笔记(四)----字符..

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目