Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/torch/optim.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorkoray kavukcuoglu <koray@kavukcuoglu.org>2015-04-17 12:24:50 +0300
committerkoray kavukcuoglu <koray@kavukcuoglu.org>2015-04-17 12:24:50 +0300
commit446de4f45f3e901adc889f4d6a49051028e437e8 (patch)
tree5d44d511c5cd6a3bf24ccac182468ec6526a130f
parente97f00706f5ac4b38909ae64e2afeaddd6409cb0 (diff)
parent695ef7b7765453b2a3056ff87fc9719e23392c40 (diff)
Merge pull request #57 from karpathy/master
rmsprop improved
-rw-r--r--rmsprop.lua33
-rw-r--r--test/test_rmsprop.lua23
2 files changed, 38 insertions, 18 deletions
diff --git a/rmsprop.lua b/rmsprop.lua
index b6baf96..f0e6310 100644
--- a/rmsprop.lua
+++ b/rmsprop.lua
@@ -9,11 +9,10 @@ ARGS:
- 'config.learningRate' : learning rate
- 'config.alpha' : smoothing constant
- 'config.epsilon' : value with which to inistialise m
-- 'config.epsilon2' : stablisation to prevent mean square going to zero
-- 'config.max_gain' : stabilisation to prevent lr multiplier exploding
-- 'config.min_gain' : stabilisation to prevent lr multiplier exploding
-- 'state = {m, dfdx_sq}' : a table describing the state of the optimizer; after each
- call the state is modified
+- 'state' : a table describing the state of the optimizer;
+ after each call the state is modified
+- 'state.m' : leaky sum of squares of parameter gradients,
+- 'state.tmp' : and the square root (with epsilon smoothing)
RETURN:
- `x` : the new x vector
@@ -25,29 +24,27 @@ function optim.rmsprop(opfunc, x, config, state)
-- (0) get/update state
local config = config or {}
local state = state or config
- local lr = config.learningRate or 1e-4
- local alpha = config.alpha or 0.998
+ local lr = config.learningRate or 1e-2
+ local alpha = config.alpha or 0.99
local epsilon = config.epsilon or 1e-8
- local epsilon2 = config.epsilon2 or 1e-8
- local max_gain = config.max_gain or 1000
- local min_gain = config.min_gain or 1e-8
-- (1) evaluate f(x) and df/dx
local fx, dfdx = opfunc(x)
-- (2) initialize mean square values and square gradient storage
- state.m = state.m or torch.Tensor():typeAs(dfdx):resizeAs(dfdx):fill(epsilon)
- state.tmp = state.tmp or x.new(dfdx:size()):zero()
+ if not state.m then
+ state.m = torch.Tensor():typeAs(x):resizeAs(dfdx):zero()
+ state.tmp = torch.Tensor():typeAs(x):resizeAs(dfdx)
+ end
- -- (3) calculate new mean squared values
+ -- (3) calculate new (leaky) mean squared values
state.m:mul(alpha)
- state.m:addcmul(1.0-alpha,dfdx,dfdx):add(epsilon2)
+ state.m:addcmul(1.0-alpha, dfdx, dfdx)
-- (4) perform update
- state.tmp:copy(state.m):pow(-0.5):clamp(min_gain, max_gain)
- x:add(-lr, state.tmp)
+ state.tmp:sqrt(state.m):add(epsilon)
+ x:addcdiv(-lr, dfdx, state.tmp)
-- return x*, f(x) before optimization
- return x, {fx}, state.tmp
+ return x, {fx}
end
-
diff --git a/test/test_rmsprop.lua b/test/test_rmsprop.lua
new file mode 100644
index 0000000..069810f
--- /dev/null
+++ b/test/test_rmsprop.lua
@@ -0,0 +1,23 @@
+require 'torch'
+require 'optim'
+
+require 'rosenbrock'
+require 'l2'
+
+x = torch.Tensor(2):fill(0)
+fx = {}
+
+config = {learningRate=5e-4}
+for i = 1,10001 do
+ x,f=optim.rmsprop(rosenbrock,x,config)
+ if (i-1)%1000 == 0 then
+ table.insert(fx,f[1])
+ end
+end
+
+print()
+print('Rosenbrock test')
+print()
+print('x=');print(x)
+print('fx=')
+for i=1,#fx do print((i-1)*1000+1,fx[i]); end