from __future__ import absolute_import
import tensorflow as tf
__author__ = 'buriburisuri@gmail.com'
# noinspection PyAbstractClass
[docs]class AdaMaxOptimizer(tf.train.Optimizer):
r"""Optimizer that implements the Adamax algorithm.
See [Kingma et. al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
excerpted from https://github.com/openai/iaf/blob/master/tf_utils/adamax.py
@@__init__
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, use_locking=False, name="Adamax"):
super(AdaMaxOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
def _prepare(self):
self._lr_t = tf.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = tf.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = tf.convert_to_tensor(self._beta2, name="beta2")
def _create_slots(self, var_list):
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
else:
eps = 1e-8
v = self.get_slot(var, "v")
v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
m = self.get_slot(var, "m")
m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
g_t = v_t / m_t
var_update = tf.assign_sub(var, lr_t * g_t)
return tf.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_dense(grad, var)
# noinspection PyAbstractClass
[docs]class MaxPropOptimizer(tf.train.Optimizer):
r"""Optimizer that implements the MaxProp algorithm by buriburisuri@gmail.com.
"""
def __init__(self, learning_rate=0.001, beta2=0.999, use_locking=False, name="MaxProp"):
super(MaxPropOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta2 = beta2
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta2_t = None
def _prepare(self):
self._lr_t = tf.convert_to_tensor(self._lr, name="learning_rate")
self._beta2_t = tf.convert_to_tensor(self._beta2, name="beta2")
def _create_slots(self, var_list):
# Create slots for the second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
def _apply_dense(self, grad, var):
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
else:
eps = 1e-8
m = self.get_slot(var, "m")
m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
g_t = grad / m_t
var_update = tf.assign_sub(var, lr_t * g_t)
return tf.group(*[var_update, m_t])
def _apply_sparse(self, grad, var):
return self._apply_dense(grad, var)