设为首页 加入收藏

TOP

学习笔记TF035:实现基于LSTM语言模型(五)
2017-09-30 17:00:23 】 浏览:8153
Tags:学习 笔记 TF035: 实现 基于 LSTM 语言 模型
output
= tf.reshape(tf.concat(outputs, 1), [-1, size])     softmax_w = tf.get_variable(         "softmax_w", [size, vocab_size], dtype=tf.float32)     softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=tf.float32)     logits = tf.matmul(output, softmax_w) + softmax_b     loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(         [logits],         [tf.reshape(input_.targets, [-1])],         [tf.ones([batch_size * num_steps], dtype=tf.float32)])     self._cost = cost = tf.reduce_sum(loss) / batch_size     self._final_state = state     if not is_training:       return     self._lr = tf.Variable(0.0, trainable=False)     tvars = tf.trainable_variables()     grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),                                       config.max_grad_norm)     optimizer = tf.train.GradientDescentOptimizer(self._lr)     self._train_op = optimizer.apply_gradients(         zip(grads, tvars),         global_step=tf.contrib.framework.get_or_create_global_step())     self._new_lr = tf.placeholder(         tf.float32, shape=[], name="new_learning_rate")     self._lr_update = tf.assign(self._lr, self._new_lr)   def assign_lr(self, session, lr_value):     session.run(self._lr_update, feed_dict={self._new_lr: lr_value})   @property   def input(self):     return self._input   @property   def initial_state(self):     return self._initial_state   @property   def cost(self):     return self._cost   @property   def final_state(self):     return self._final_state   @property   def lr(self):     return self._lr   @property   def train_op(self):     return self._train_op class SmallConfig(object):   """Small config."""   init_scale = 0.1   learning_rate = 1.0   max_grad_norm = 5   num_layers = 2   num_steps = 20   hidden_size = 200   max_epoch = 4   max_max_epoch = 13   keep_prob = 1.0   lr_decay = 0.5   batch_size = 20   vocab_size = 10000 class MediumConfig(object):   """Medium config."""   init_scale = 0.05   learning_rate = 1.0   max_grad_norm = 5   num_layers = 2   num_steps = 35   hidden_size = 650   max_epoch = 6   max_max_epoch = 39   keep_prob = 0.5   lr_decay = 0.8   batch_size = 20   vocab_size = 10000 class LargeConfig(object):   """Large config."""   init_scale = 0.04   learning_rate = 1.0   max_grad_norm = 10   num_layers = 2   num_steps = 35   hidden_size = 1500   max_epoch = 14   max_max_epoch = 55   keep_prob = 0.35   lr_decay = 1 / 1.15   batch_size = 20   vocab_size = 10000 class
首页 上一页 2 3 4 5 6 下一页 尾页 5/6/6
】【打印繁体】【投稿】【收藏】 【推荐】【举报】【评论】 【关闭】 【返回顶部
上一篇day5模块学习--re正则模块 下一篇Python 3 学习笔记(四)----字符..

最新文章

热门文章

Hot 文章

Python

C 语言

C++基础

大数据基础

linux编程基础

C/C++面试题目