diff options
author | koray kavukcuoglu <koray@kavukcuoglu.org> | 2015-10-01 18:27:45 +0300 |
---|---|---|
committer | koray kavukcuoglu <koray@kavukcuoglu.org> | 2015-10-01 18:27:45 +0300 |
commit | 7dfaad9019f7a1808b2f1516ebde1dd775a8c3c3 (patch) | |
tree | 1935094f274b99e37e7f2c0ee314fcd1009489ab | |
parent | d13bd2132f314f58e0f30c31e134bef729ab4b32 (diff) | |
parent | 20bb3d623166a2cac369ec55664a9165b3aa27f2 (diff) |
Merge pull request #76 from fidlej/topic_adamax
Add AdaMax.
-rw-r--r-- | adamax.lua | 60 | ||||
-rw-r--r-- | init.lua | 1 | ||||
-rw-r--r-- | test/test_adamax.lua | 23 |
3 files changed, 84 insertions, 0 deletions
diff --git a/adamax.lua b/adamax.lua new file mode 100644 index 0000000..7075345 --- /dev/null +++ b/adamax.lua @@ -0,0 +1,60 @@ +--[[ An implementation of AdaMax http://arxiv.org/pdf/1412.6980.pdf + +ARGS: + +- 'opfunc' : a function that takes a single input (X), the point + of a evaluation, and returns f(X) and df/dX +- 'x' : the initial point +- 'config` : a table with configuration parameters for the optimizer +- 'config.learningRate' : learning rate +- 'config.beta1' : first moment coefficient +- 'config.beta2' : second moment coefficient +- 'config.epsilon' : for numerical stability +- 'state' : a table describing the state of the optimizer; + after each call the state is modified. + +RETURN: +- `x` : the new x vector +- `f(x)` : the function, evaluated before the update + +]] + +function optim.adamax(opfunc, x, config, state) + -- (0) get/update state + local config = config or {} + local state = state or config + local lr = config.learningRate or 0.002 + + local beta1 = config.beta1 or 0.9 + local beta2 = config.beta2 or 0.999 + local epsilon = config.epsilon or 1e-38 + + -- (1) evaluate f(x) and df/dx + local fx, dfdx = opfunc(x) + + -- Initialization + state.t = state.t or 0 + -- Exponential moving average of gradient values + state.m = state.m or x.new(dfdx:size()):zero() + -- Exponential moving average of the infinity norm + state.u = state.u or x.new(dfdx:size()):zero() + -- A tmp tensor to hold the input to max() + state.max = state.max or x.new(2, unpack(dfdx:size():totable())):zero() + + state.t = state.t + 1 + + -- Update biased first moment estimate. + state.m:mul(beta1):add(1-beta1, dfdx) + -- Update the exponentially weighted infinity norm. + state.max[1]:copy(state.u):mul(beta2) + state.max[2]:copy(dfdx):abs():add(epsilon) + state.u:max(state.max, 1) + + local biasCorrection1 = 1 - beta1^state.t + local stepSize = lr/biasCorrection1 + -- (2) update x + x:addcdiv(-stepSize, state.m, state.u) + + -- return x*, f(x) before optimization + return x, {fx} +end @@ -13,6 +13,7 @@ torch.include('optim', 'lbfgs.lua') torch.include('optim', 'adagrad.lua') torch.include('optim', 'rprop.lua') torch.include('optim', 'adam.lua') +torch.include('optim', 'adamax.lua') torch.include('optim', 'rmsprop.lua') torch.include('optim', 'adadelta.lua') diff --git a/test/test_adamax.lua b/test/test_adamax.lua new file mode 100644 index 0000000..a62a9a5 --- /dev/null +++ b/test/test_adamax.lua @@ -0,0 +1,23 @@ + +require 'torch' +require 'optim' +require 'rosenbrock' +require 'l2' + +x = torch.Tensor(2):fill(0) +fx = {} +state = {} +config = {} +for i = 1,10001 do + x,f=optim.adamax(rosenbrock,x,config,state) + if (i-1)%1000 == 0 then + table.insert(fx,f[1]) + end +end + +print() +print('Rosenbrock test') +print() +print('x=');print(x) +print('fx=') +for i=1,#fx do print((i-1)*1000+1,fx[i]); end |