Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/vowpal_wabbit.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Langford <jl@hunch.net>2015-01-02 21:09:45 +0300
committerJohn Langford <jl@hunch.net>2015-01-02 21:09:45 +0300
commit8784ea7a1bb34e3dca02e6571cacda1ffe38aafa (patch)
tree8e00ad57ca51a776372ed32b28608633acd863aa
parenta51dac423995feb3d8ac57f63d7c42015fcce157 (diff)
simpler options
-rw-r--r--vowpalwabbit/active.cc14
-rw-r--r--vowpalwabbit/active.h4
-rw-r--r--vowpalwabbit/autolink.cc10
-rw-r--r--vowpalwabbit/autolink.h4
-rw-r--r--vowpalwabbit/bfgs.cc5
-rw-r--r--vowpalwabbit/bfgs.h4
-rw-r--r--vowpalwabbit/binary.cc8
-rw-r--r--vowpalwabbit/binary.h4
-rw-r--r--vowpalwabbit/bs.cc14
-rw-r--r--vowpalwabbit/bs.h2
-rw-r--r--vowpalwabbit/cb_algs.cc7
-rw-r--r--vowpalwabbit/cb_algs.h2
-rw-r--r--vowpalwabbit/cbify.cc7
-rw-r--r--vowpalwabbit/cbify.h4
-rw-r--r--vowpalwabbit/csoaa.cc17
-rw-r--r--vowpalwabbit/csoaa.h17
-rw-r--r--vowpalwabbit/ect.cc14
-rw-r--r--vowpalwabbit/ect.h5
-rw-r--r--vowpalwabbit/ftrl_proximal.cc14
-rw-r--r--vowpalwabbit/ftrl_proximal.h9
-rw-r--r--vowpalwabbit/gd.cc5
-rw-r--r--vowpalwabbit/gd.h2
-rw-r--r--vowpalwabbit/gd_mf.cc5
-rw-r--r--vowpalwabbit/gd_mf.h4
-rw-r--r--vowpalwabbit/global_data.cc27
-rw-r--r--vowpalwabbit/global_data.h11
-rw-r--r--vowpalwabbit/kernel_svm.cc5
-rw-r--r--vowpalwabbit/kernel_svm.h3
-rw-r--r--vowpalwabbit/lda_core.cc5
-rw-r--r--vowpalwabbit/lda_core.h4
-rw-r--r--vowpalwabbit/log_multi.cc7
-rw-r--r--vowpalwabbit/log_multi.h5
-rw-r--r--vowpalwabbit/lrq.cc7
-rw-r--r--vowpalwabbit/lrq.h4
-rw-r--r--vowpalwabbit/mf.cc10
-rw-r--r--vowpalwabbit/mf.h4
-rw-r--r--vowpalwabbit/nn.cc7
-rw-r--r--vowpalwabbit/nn.h5
-rw-r--r--vowpalwabbit/noop.cc4
-rw-r--r--vowpalwabbit/noop.h3
-rw-r--r--vowpalwabbit/oaa.cc20
-rw-r--r--vowpalwabbit/oaa.h3
-rw-r--r--vowpalwabbit/parse_args.cc77
-rw-r--r--vowpalwabbit/parse_args.h2
-rw-r--r--vowpalwabbit/parse_regressor.cc8
-rw-r--r--vowpalwabbit/parse_regressor.h2
-rw-r--r--vowpalwabbit/parser.cc18
-rw-r--r--vowpalwabbit/parser.h2
-rw-r--r--vowpalwabbit/print.cc6
-rw-r--r--vowpalwabbit/print.h3
-rw-r--r--vowpalwabbit/scorer.cc8
-rw-r--r--vowpalwabbit/scorer.h4
-rw-r--r--vowpalwabbit/search.cc16
-rw-r--r--vowpalwabbit/search.h2
-rw-r--r--vowpalwabbit/sender.cc10
-rw-r--r--vowpalwabbit/sender.h3
-rw-r--r--vowpalwabbit/stagewise_poly.cc7
-rw-r--r--vowpalwabbit/stagewise_poly.h5
-rw-r--r--vowpalwabbit/topk.cc10
-rw-r--r--vowpalwabbit/topk.h5
60 files changed, 256 insertions, 247 deletions
diff --git a/vowpalwabbit/active.cc b/vowpalwabbit/active.cc
index 72b000dc..774b4696 100644
--- a/vowpalwabbit/active.cc
+++ b/vowpalwabbit/active.cc
@@ -151,29 +151,29 @@ namespace ACTIVE {
VW::finish_example(all,&ec);
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{//parse and set arguments
po::options_description opts("Active Learning options");
opts.add_options()
("active", "enable active learning")
("simulation", "active learning simulation mode")
("mellowness", po::value<float>(), "active learning mellowness parameter c_0. Default 8");
- vm = add_options(all, opts);
- if(!vm.count("active"))
+ add_options(all, opts);
+ if(!all.vm.count("active"))
return NULL;
active& data = calloc_or_die<active>();
data.active_c0 = 8;
data.all=&all;
- if (vm.count("mellowness"))
- data.active_c0 = vm["mellowness"].as<float>();
+ if (all.vm.count("mellowness"))
+ data.active_c0 = all.vm["mellowness"].as<float>();
- base_learner* base = setup_base(all,vm);
+ base_learner* base = setup_base(all);
//Create new learner
learner<active>* ret;
- if (vm.count("simulation"))
+ if (all.vm.count("simulation"))
ret = &init_learner(&data, base, predict_or_learn_simulation<true>,
predict_or_learn_simulation<false>);
else
diff --git a/vowpalwabbit/active.h b/vowpalwabbit/active.h
index d71950ab..b1145e8c 100644
--- a/vowpalwabbit/active.h
+++ b/vowpalwabbit/active.h
@@ -1,4 +1,2 @@
#pragma once
-namespace ACTIVE {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace ACTIVE { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/autolink.cc b/vowpalwabbit/autolink.cc
index 44fc05ec..d78662c0 100644
--- a/vowpalwabbit/autolink.cc
+++ b/vowpalwabbit/autolink.cc
@@ -37,22 +37,22 @@ namespace ALINK {
ec.total_sum_feat_sq -= sum_sq;
}
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("Autolink options");
opts.add_options()
("autolink", po::value<size_t>(), "create link function with polynomial d");
- vm = add_options(all,opts);
- if(!vm.count("autolink"))
+ add_options(all,opts);
+ if(!all.vm.count("autolink"))
return NULL;
autolink& data = calloc_or_die<autolink>();
- data.d = (uint32_t)vm["autolink"].as<size_t>();
+ data.d = (uint32_t)all.vm["autolink"].as<size_t>();
data.stride_shift = all.reg.stride_shift;
*all.file_options << " --autolink " << data.d;
- LEARNER::base_learner* base = setup_base(all,vm);
+ LEARNER::base_learner* base = setup_base(all);
LEARNER::learner<autolink>& ret = init_learner(&data, base, predict_or_learn<true>,
predict_or_learn<false>);
diff --git a/vowpalwabbit/autolink.h b/vowpalwabbit/autolink.h
index 3bb70dc1..e7bfcf51 100644
--- a/vowpalwabbit/autolink.h
+++ b/vowpalwabbit/autolink.h
@@ -1,4 +1,2 @@
#pragma once
-namespace ALINK {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace ALINK { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/bfgs.cc b/vowpalwabbit/bfgs.cc
index 50306dd6..47be2d70 100644
--- a/vowpalwabbit/bfgs.cc
+++ b/vowpalwabbit/bfgs.cc
@@ -968,7 +968,7 @@ void save_load(bfgs& b, io_buf& model_file, bool read, bool text)
b.backstep_on = true;
}
-base_learner* setup(vw& all, po::variables_map& vm)
+base_learner* setup(vw& all)
{
po::options_description opts("LBFGS options");
opts.add_options()
@@ -977,7 +977,8 @@ base_learner* setup(vw& all, po::variables_map& vm)
("hessian_on", "use second derivative in line search")
("mem", po::value<uint32_t>()->default_value(15), "memory in bfgs")
("termination", po::value<float>()->default_value(0.001f),"Termination threshold");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if(!vm.count("bfgs") && !vm.count("conjugate_gradient"))
return NULL;
diff --git a/vowpalwabbit/bfgs.h b/vowpalwabbit/bfgs.h
index 1960662b..e66a0df0 100644
--- a/vowpalwabbit/bfgs.h
+++ b/vowpalwabbit/bfgs.h
@@ -4,6 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace BFGS {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace BFGS { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/binary.cc b/vowpalwabbit/binary.cc
index 74d894b6..2f0765db 100644
--- a/vowpalwabbit/binary.cc
+++ b/vowpalwabbit/binary.cc
@@ -28,18 +28,18 @@ namespace BINARY {
}
}
-LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+LEARNER::base_learner* setup(vw& all)
{//parse and set arguments
po::options_description opts("Binary options");
opts.add_options()
("binary", "report loss as binary classification on -1,1");
- vm = add_options(all,opts);
- if(!vm.count("binary"))
+ add_options(all,opts);
+ if(!all.vm.count("binary"))
return NULL;
//Create new learner
LEARNER::learner<char>& ret =
- LEARNER::init_learner<char>(NULL, setup_base(all,vm),
+ LEARNER::init_learner<char>(NULL, setup_base(all),
predict_or_learn<true>, predict_or_learn<false>);
return make_base(ret);
}
diff --git a/vowpalwabbit/binary.h b/vowpalwabbit/binary.h
index 609de90b..7b79946d 100644
--- a/vowpalwabbit/binary.h
+++ b/vowpalwabbit/binary.h
@@ -1,4 +1,2 @@
#pragma once
-namespace BINARY {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace BINARY { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/bs.cc b/vowpalwabbit/bs.cc
index cff38145..1b35d972 100644
--- a/vowpalwabbit/bs.cc
+++ b/vowpalwabbit/bs.cc
@@ -239,31 +239,31 @@ namespace BS {
d.pred_vec.~vector();
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("Bootstrap options");
opts.add_options()
("bootstrap,B", po::value<size_t>(), "bootstrap mode with k rounds by online importance resampling")
("bs_type", po::value<string>(), "prediction type {mean,vote}");
- vm = add_options(all, opts);
- if (!vm.count("bootstrap"))
+ add_options(all, opts);
+ if (!all.vm.count("bootstrap"))
return NULL;
bs& data = calloc_or_die<bs>();
data.ub = FLT_MAX;
data.lb = -FLT_MAX;
- data.B = (uint32_t)vm["bootstrap"].as<size_t>();
+ data.B = (uint32_t)all.vm["bootstrap"].as<size_t>();
//append bs with number of samples to options_from_file so it is saved to regressor later
*all.file_options << " --bootstrap " << data.B;
std::string type_string("mean");
- if (vm.count("bs_type"))
+ if (all.vm.count("bs_type"))
{
- type_string = vm["bs_type"].as<std::string>();
+ type_string = all.vm["bs_type"].as<std::string>();
if (type_string.compare("mean") == 0) {
data.bs_type = BS_TYPE_MEAN;
@@ -283,7 +283,7 @@ namespace BS {
data.pred_vec.reserve(data.B);
data.all = &all;
- learner<bs>& l = init_learner(&data, setup_base(all,vm), predict_or_learn<true>,
+ learner<bs>& l = init_learner(&data, setup_base(all), predict_or_learn<true>,
predict_or_learn<false>, data.B);
l.set_finish_example(finish_example);
l.set_finish(finish);
diff --git a/vowpalwabbit/bs.h b/vowpalwabbit/bs.h
index e05bcd05..062c195a 100644
--- a/vowpalwabbit/bs.h
+++ b/vowpalwabbit/bs.h
@@ -11,7 +11,7 @@ license as described in the file LICENSE.
namespace BS
{
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
+ LEARNER::base_learner* setup(vw& all);
void print_result(int f, float res, float weight, v_array<char> tag, float lb, float ub);
void output_example(vw& all, example* ec, float lb, float ub);
diff --git a/vowpalwabbit/cb_algs.cc b/vowpalwabbit/cb_algs.cc
index c676f547..cf377a55 100644
--- a/vowpalwabbit/cb_algs.cc
+++ b/vowpalwabbit/cb_algs.cc
@@ -436,14 +436,15 @@ namespace CB_ALGS
VW::finish_example(all, &ec);
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("CB options");
opts.add_options()
("cb", po::value<size_t>(), "Use contextual bandit learning with <k> costs")
("cb_type", po::value<string>(), "contextual bandit method to use in {ips,dm,dr}")
("eval", "Evaluate a policy rather than optimizing.");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if (!vm.count("cb"))
return NULL;
@@ -504,7 +505,7 @@ namespace CB_ALGS
all.args.push_back(ss.str());
}
- base_learner* base = setup_base(all,vm);
+ base_learner* base = setup_base(all);
if (eval)
all.p->lp = CB_EVAL::cb_eval;
else
diff --git a/vowpalwabbit/cb_algs.h b/vowpalwabbit/cb_algs.h
index 7756fc6f..0593fb6c 100644
--- a/vowpalwabbit/cb_algs.h
+++ b/vowpalwabbit/cb_algs.h
@@ -6,7 +6,7 @@ license as described in the file LICENSE.
#pragma once
//TODO: extend to handle CSOAA_LDF and WAP_LDF
namespace CB_ALGS {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
+ LEARNER::base_learner* setup(vw& all);
template <bool is_learn>
float get_cost_pred(vw& all, CB::cb_class* known_cost, example& ec, uint32_t index, uint32_t base)
diff --git a/vowpalwabbit/cbify.cc b/vowpalwabbit/cbify.cc
index 07e8fba1..02e7f748 100644
--- a/vowpalwabbit/cbify.cc
+++ b/vowpalwabbit/cbify.cc
@@ -371,7 +371,7 @@ namespace CBIFY {
void finish(cbify& data)
{ CB::cb_label.delete_label(&data.cb_label); }
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{//parse and set arguments
po::options_description opts("CBIFY options");
opts.add_options()
@@ -380,7 +380,8 @@ namespace CBIFY {
("epsilon",po::value<float>() ,"epsilon-greedy exploration")
("bag",po::value<size_t>() ,"bagging-based exploration")
("cover",po::value<size_t>() ,"bagging-based exploration");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if (!vm.count("cbify"))
return NULL;
@@ -397,7 +398,7 @@ namespace CBIFY {
ss << vm["cbify"].as<size_t>();
all.args.push_back(ss.str());
}
- base_learner* base = setup_base(all,vm);
+ base_learner* base = setup_base(all);
all.p->lp = MULTICLASS::mc_label;
diff --git a/vowpalwabbit/cbify.h b/vowpalwabbit/cbify.h
index aed26b75..2ea0a627 100644
--- a/vowpalwabbit/cbify.h
+++ b/vowpalwabbit/cbify.h
@@ -4,6 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace CBIFY {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace CBIFY { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/csoaa.cc b/vowpalwabbit/csoaa.cc
index f8cd55f3..1f2a1ce4 100644
--- a/vowpalwabbit/csoaa.cc
+++ b/vowpalwabbit/csoaa.cc
@@ -66,27 +66,27 @@ namespace CSOAA {
VW::finish_example(all, &ec);
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("CSOAA options");
opts.add_options()
("csoaa", po::value<size_t>(), "Use one-against-all multiclass learning with <k> costs");
- vm = add_options(all, opts);
- if(!vm.count("csoaa"))
+ add_options(all, opts);
+ if(!all.vm.count("csoaa"))
return NULL;
csoaa& c = calloc_or_die<csoaa>();
c.all = &all;
//first parse for number of actions
uint32_t nb_actions = 0;
- nb_actions = (uint32_t)vm["csoaa"].as<size_t>();
+ nb_actions = (uint32_t)all.vm["csoaa"].as<size_t>();
//append csoaa with nb_actions to file_options so it is saved to regressor later
*all.file_options << " --csoaa " << nb_actions;
all.p->lp = cs_label;
all.sd->k = nb_actions;
- learner<csoaa>& l = init_learner(&c, setup_base(all,vm), predict_or_learn<true>,
+ learner<csoaa>& l = init_learner(&c, setup_base(all), predict_or_learn<true>,
predict_or_learn<false>, nb_actions);
l.set_finish_example(finish_example);
return make_base(l);
@@ -649,14 +649,15 @@ namespace LabelDict {
}
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("LDF Options");
opts.add_options()
("csoaa_ldf", po::value<string>(), "Use one-against-all multiclass learning with label dependent features. Specify singleline or multiline.")
("wap_ldf", po::value<string>(), "Use weighted all-pairs multiclass learning with label dependent features. Specify singleline or multiline.")
("ldf_override", po::value<string>(), "Override singleline or multiline from csoaa_ldf or wap_ldf, eg if stored in file");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if(!vm.count("csoaa_ldf") && !vm.count("wap_ldf"))
return NULL;
@@ -714,7 +715,7 @@ namespace LabelDict {
ld.read_example_this_loop = 0;
ld.need_to_clear = false;
- learner<ldf>& l = init_learner(&ld, setup_base(all,vm), predict_or_learn<true>, predict_or_learn<false>);
+ learner<ldf>& l = init_learner(&ld, setup_base(all), predict_or_learn<true>, predict_or_learn<false>);
if (ld.is_singleline)
l.set_finish_example(finish_singleline_example);
else
diff --git a/vowpalwabbit/csoaa.h b/vowpalwabbit/csoaa.h
index 79f5e4d2..45e4edf2 100644
--- a/vowpalwabbit/csoaa.h
+++ b/vowpalwabbit/csoaa.h
@@ -4,17 +4,14 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace CSOAA {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace CSOAA { LEARNER::base_learner* setup(vw& all); }
namespace CSOAA_AND_WAP_LDF {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-
-namespace LabelDict {
- bool ec_is_example_header(example& ec); // example headers look like "0:-1" or just "shared"
- void add_example_namespaces_from_example(example& target, example& source);
- void del_example_namespaces_from_example(example& target, example& source);
-}
+ LEARNER::base_learner* setup(vw& all);
+ namespace LabelDict {
+ bool ec_is_example_header(example& ec);// example headers look like "0:-1" or just "shared"
+ void add_example_namespaces_from_example(example& target, example& source);
+ void del_example_namespaces_from_example(example& target, example& source);
+ }
}
diff --git a/vowpalwabbit/ect.cc b/vowpalwabbit/ect.cc
index a3a78ba5..6432bb35 100644
--- a/vowpalwabbit/ect.cc
+++ b/vowpalwabbit/ect.cc
@@ -362,24 +362,24 @@ namespace ECT
void finish_example(vw& all, ect&, example& ec) { MULTICLASS::finish_example(all, ec); }
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("ECT options");
opts.add_options()
("ect", po::value<size_t>(), "Use error correcting tournament with <k> labels")
("error", po::value<size_t>(), "error in ECT");
- vm = add_options(all, opts);
- if (!vm.count("ect"))
+ add_options(all, opts);
+ if (!all.vm.count("ect"))
return NULL;
ect& data = calloc_or_die<ect>();
//first parse for number of actions
- data.k = (int)vm["ect"].as<size_t>();
+ data.k = (int)all.vm["ect"].as<size_t>();
//append ect with nb_actions to options_from_file so it is saved to regressor later
- if (vm.count("error")) {
- data.errors = (uint32_t)vm["error"].as<size_t>();
+ if (all.vm.count("error")) {
+ data.errors = (uint32_t)all.vm["error"].as<size_t>();
} else
data.errors = 0;
//append error flag to options_from_file so it is saved in regressor file later
@@ -389,7 +389,7 @@ namespace ECT
size_t wpp = create_circuit(all, data, data.k, data.errors+1);
data.all = &all;
- learner<ect>& l = init_learner(&data, setup_base(all,vm), learn, predict, wpp);
+ learner<ect>& l = init_learner(&data, setup_base(all), learn, predict, wpp);
l.set_finish_example(finish_example);
l.set_finish(finish);
diff --git a/vowpalwabbit/ect.h b/vowpalwabbit/ect.h
index 81129791..c02d3848 100644
--- a/vowpalwabbit/ect.h
+++ b/vowpalwabbit/ect.h
@@ -4,7 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace ECT
-{
- LEARNER::base_learner* setup(vw&, po::variables_map&);
-}
+namespace ECT { LEARNER::base_learner* setup(vw&); }
diff --git a/vowpalwabbit/ftrl_proximal.cc b/vowpalwabbit/ftrl_proximal.cc
index 9c5410d7..f1b8918e 100644
--- a/vowpalwabbit/ftrl_proximal.cc
+++ b/vowpalwabbit/ftrl_proximal.cc
@@ -177,7 +177,7 @@ namespace FTRL {
ec.pred.scalar = ftrl_predict(*all,ec);
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("FTRL options");
opts.add_options()
@@ -185,21 +185,21 @@ namespace FTRL {
("ftrl_alpha", po::value<float>()->default_value(0.0), "Learning rate for FTRL-proximal optimization")
("ftrl_beta", po::value<float>()->default_value(0.1), "FTRL beta")
("progressive_validation", po::value<string>()->default_value("ftrl.evl"), "File to record progressive validation for ftrl-proximal");
- vm = add_options(all, opts);
+ add_options(all, opts);
- if (!vm.count("ftrl"))
+ if (!all.vm.count("ftrl"))
return NULL;
ftrl& b = calloc_or_die<ftrl>();
b.all = &all;
- b.ftrl_beta = vm["ftrl_beta"].as<float>();
- b.ftrl_alpha = vm["ftrl_alpha"].as<float>();
+ b.ftrl_beta = all.vm["ftrl_beta"].as<float>();
+ b.ftrl_alpha = all.vm["ftrl_alpha"].as<float>();
all.reg.stride_shift = 2; // NOTE: for more parameter storage
b.progressive_validation = false;
- if (vm.count("progressive_validation")) {
- std::string filename = vm["progressive_validation"].as<string>();
+ if (all.vm.count("progressive_validation")) {
+ std::string filename = all.vm["progressive_validation"].as<string>();
b.fo = fopen(filename.c_str(), "w");
assert(b.fo != NULL);
b.progressive_validation = true;
diff --git a/vowpalwabbit/ftrl_proximal.h b/vowpalwabbit/ftrl_proximal.h
index 59bf4653..dd495d3e 100644
--- a/vowpalwabbit/ftrl_proximal.h
+++ b/vowpalwabbit/ftrl_proximal.h
@@ -3,10 +3,5 @@ Copyright (c) by respective owners including Yahoo!, Microsoft, and
individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
-#ifndef FTRL_PROXIMAL_H
-#define FTRL_PROXIMAL_H
-
-namespace FTRL {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
-#endif
+#pragma once
+namespace FTRL { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/gd.cc b/vowpalwabbit/gd.cc
index ee6567c5..b09333e9 100644
--- a/vowpalwabbit/gd.cc
+++ b/vowpalwabbit/gd.cc
@@ -844,7 +844,7 @@ uint32_t ceil_log_2(uint32_t v)
return 1 + ceil_log_2(v >> 1);
}
-base_learner* setup(vw& all, po::variables_map& vm)
+base_learner* setup(vw& all)
{
po::options_description opts("Gradient Descent options");
opts.add_options()
@@ -853,7 +853,8 @@ base_learner* setup(vw& all, po::variables_map& vm)
("invariant", "use safe/importance aware updates.")
("normalized", "use per feature normalized updates")
("exact_adaptive_norm", "use current default invariant normalized adaptive update rule");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
gd& g = calloc_or_die<gd>();
g.all = &all;
g.all->normalized_sum_norm_x = 0;
diff --git a/vowpalwabbit/gd.h b/vowpalwabbit/gd.h
index de3964eb..a5413eae 100644
--- a/vowpalwabbit/gd.h
+++ b/vowpalwabbit/gd.h
@@ -24,7 +24,7 @@ namespace GD{
void compute_update(example* ec);
void offset_train(regressor &reg, example* &ec, float update, size_t offset);
void train_one_example_single_thread(regressor& r, example* ex);
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
+ LEARNER::base_learner* setup(vw& all);
void save_load_regressor(vw& all, io_buf& model_file, bool read, bool text);
void save_load_online_state(vw& all, io_buf& model_file, bool read, bool text);
void output_and_account_example(example* ec);
diff --git a/vowpalwabbit/gd_mf.cc b/vowpalwabbit/gd_mf.cc
index 7a09ef82..8cf5ff41 100644
--- a/vowpalwabbit/gd_mf.cc
+++ b/vowpalwabbit/gd_mf.cc
@@ -293,12 +293,13 @@ void sd_offset_update(weight* weights, size_t mask, feature* begin, feature* end
mf_train(d, ec);
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("Gdmf options");
opts.add_options()
("rank", po::value<uint32_t>(), "rank for matrix factorization.");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm=all.vm;
if(!vm.count("rank"))
return NULL;
diff --git a/vowpalwabbit/gd_mf.h b/vowpalwabbit/gd_mf.h
index db093750..09623e61 100644
--- a/vowpalwabbit/gd_mf.h
+++ b/vowpalwabbit/gd_mf.h
@@ -4,6 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace GDMF{
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace GDMF{ LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/global_data.cc b/vowpalwabbit/global_data.cc
index 243e8657..4b8472ff 100644
--- a/vowpalwabbit/global_data.cc
+++ b/vowpalwabbit/global_data.cc
@@ -214,23 +214,42 @@ void compile_limits(vector<string> limits, uint32_t* dest, bool quiet)
}
}
-po::variables_map add_options(vw& all, po::options_description& opts)
+void add_options(vw& all, po::options_description& opts)
{
all.opts.add(opts);
po::variables_map new_vm;
-
//parse local opts once for notifications.
po::parsed_options parsed = po::command_line_parser(all.args).
style(po::command_line_style::default_style ^ po::command_line_style::allow_guessing).
options(opts).allow_unregistered().run();
po::store(parsed, new_vm);
po::notify(new_vm);
+
//parse all opts for a complete variable map.
parsed = po::command_line_parser(all.args).
style(po::command_line_style::default_style ^ po::command_line_style::allow_guessing).
options(all.opts).allow_unregistered().run();
po::store(parsed, new_vm);
- return new_vm;
+ all.vm = new_vm;
+}
+
+bool missing_required(vw& all, po::variables_map& vm)
+{
+ all.opts.add(*all.new_opts);//record required.
+ po::variables_map new_vm;
+ //parse local opts once for notifications.
+ po::parsed_options parsed = po::command_line_parser(all.args).
+ style(po::command_line_style::default_style ^ po::command_line_style::allow_guessing).
+ options(*all.new_opts).allow_unregistered().run();
+ po::store(parsed, new_vm);
+
+ if (new_vm.size() == 0) // required are missing;
+ {
+ delete all.new_opts;
+ return true;
+ }
+ else
+ return false;
}
vw::vw()
@@ -247,7 +266,7 @@ vw::vw()
reg_mode = 0;
current_pass = 0;
- reduction_stack=v_init<LEARNER::base_learner* (*)(vw&, po::variables_map&)>();
+ reduction_stack=v_init<LEARNER::base_learner* (*)(vw&)>();
data_filename = "";
diff --git a/vowpalwabbit/global_data.h b/vowpalwabbit/global_data.h
index 09ff643c..c3bb6b7f 100644
--- a/vowpalwabbit/global_data.h
+++ b/vowpalwabbit/global_data.h
@@ -198,6 +198,8 @@ struct vw {
double normalized_sum_norm_x;
po::options_description opts;
+ po::options_description* new_opts;
+ po::variables_map vm;
std::stringstream* file_options;
vector<std::string> args;
@@ -264,7 +266,7 @@ struct vw {
size_t length () { return ((size_t)1) << num_bits; };
- v_array<LEARNER::base_learner* (*)(vw&, po::variables_map&)> reduction_stack;
+ v_array<LEARNER::base_learner* (*)(vw&)> reduction_stack;
//Prediction output
v_array<int> final_prediction_sink; // set to send global predictions to.
@@ -311,4 +313,9 @@ void get_prediction(int sock, float& res, float& weight);
void compile_gram(vector<string> grams, uint32_t* dest, char* descriptor, bool quiet);
void compile_limits(vector<string> limits, uint32_t* dest, bool quiet);
int print_tag(std::stringstream& ss, v_array<char> tag);
-po::variables_map add_options(vw& all, po::options_description& opts);
+void add_options(vw& all, po::options_description& opts);
+inline po::options_description_easy_init new_options(vw& all, const char* name)
+{
+ all.new_opts = new po::options_description(name);
+ return all.new_opts->add_options();
+}
diff --git a/vowpalwabbit/kernel_svm.cc b/vowpalwabbit/kernel_svm.cc
index 272be528..34ebf23f 100644
--- a/vowpalwabbit/kernel_svm.cc
+++ b/vowpalwabbit/kernel_svm.cc
@@ -790,7 +790,7 @@ namespace KSVM
cerr<<"Done with finish \n";
}
- LEARNER::base_learner* setup(vw &all, po::variables_map& vm) {
+ LEARNER::base_learner* setup(vw &all) {
po::options_description opts("KSVM options");
opts.add_options()
("ksvm", "kernel svm")
@@ -806,7 +806,8 @@ namespace KSVM
("degree", po::value<int>(), "degree of poly kernel")
("lambda", po::value<double>(), "saving regularization for test time");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if (!vm.count("ksvm"))
return NULL;
string loss_function = "hinge";
diff --git a/vowpalwabbit/kernel_svm.h b/vowpalwabbit/kernel_svm.h
index 563d70e2..288e6729 100644
--- a/vowpalwabbit/kernel_svm.h
+++ b/vowpalwabbit/kernel_svm.h
@@ -4,5 +4,4 @@ individual contributors. All rights reserved. Released under a BSD (revised)
license as described in the file LICENSE.
*/
#pragma once
-namespace KSVM
-{ LEARNER::base_learner* setup(vw &all, po::variables_map& vm); }
+namespace KSVM { LEARNER::base_learner* setup(vw &all); }
diff --git a/vowpalwabbit/lda_core.cc b/vowpalwabbit/lda_core.cc
index 42cef9b5..ba7cbbc7 100644
--- a/vowpalwabbit/lda_core.cc
+++ b/vowpalwabbit/lda_core.cc
@@ -754,7 +754,7 @@ void end_examples(lda& l)
}
-base_learner* setup(vw&all, po::variables_map& vm)
+base_learner* setup(vw&all)
{
po::options_description opts("Lda options");
opts.add_options()
@@ -764,7 +764,8 @@ base_learner* setup(vw&all, po::variables_map& vm)
("lda_D", po::value<float>()->default_value(10000.), "Number of documents")
("lda_epsilon", po::value<float>()->default_value(0.001f), "Loop convergence threshold")
("minibatch", po::value<size_t>()->default_value(1), "Minibatch size, for LDA");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm= all.vm;
if(!vm.count("lda"))
return NULL;
else
diff --git a/vowpalwabbit/lda_core.h b/vowpalwabbit/lda_core.h
index 2a065783..e734fcad 100644
--- a/vowpalwabbit/lda_core.h
+++ b/vowpalwabbit/lda_core.h
@@ -4,6 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace LDA{
- LEARNER::base_learner* setup(vw&, po::variables_map&);
-}
+namespace LDA{ LEARNER::base_learner* setup(vw&); }
diff --git a/vowpalwabbit/log_multi.cc b/vowpalwabbit/log_multi.cc
index c91907a8..a13f751e 100644
--- a/vowpalwabbit/log_multi.cc
+++ b/vowpalwabbit/log_multi.cc
@@ -498,14 +498,15 @@ namespace LOG_MULTI
void finish_example(vw& all, log_multi&, example& ec) { MULTICLASS::finish_example(all, ec); }
- base_learner* setup(vw& all, po::variables_map& vm) //learner setup
+ base_learner* setup(vw& all) //learner setup
{
po::options_description opts("Log Multi options");
opts.add_options()
("log_multi", po::value<size_t>(), "Use online tree for multiclass")
("no_progress", "disable progressive validation")
("swap_resistance", po::value<uint32_t>(), "higher = more resistance to swap, default=4");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if(!vm.count("log_multi"))
return NULL;
@@ -533,7 +534,7 @@ namespace LOG_MULTI
data.max_predictors = data.k - 1;
- learner<log_multi>& l = init_learner(&data, setup_base(all,vm), learn, predict, data.max_predictors);
+ learner<log_multi>& l = init_learner(&data, setup_base(all), learn, predict, data.max_predictors);
l.set_save_load(save_load_tree);
l.set_finish_example(finish_example);
l.set_finish(finish);
diff --git a/vowpalwabbit/log_multi.h b/vowpalwabbit/log_multi.h
index 5e1ee3bf..0660334a 100644
--- a/vowpalwabbit/log_multi.h
+++ b/vowpalwabbit/log_multi.h
@@ -4,7 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace LOG_MULTI
-{
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace LOG_MULTI { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/lrq.cc b/vowpalwabbit/lrq.cc
index 99427ead..8b72f7dc 100644
--- a/vowpalwabbit/lrq.cc
+++ b/vowpalwabbit/lrq.cc
@@ -187,13 +187,14 @@ namespace LRQ {
}
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{//parse and set arguments
po::options_description opts("Lrq options");
opts.add_options()
("lrq", po::value<vector<string> > (), "use low rank quadratic features")
("lrqdropout", "use dropout training for low rank quadratic features");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if(!vm.count("lrq"))
return NULL;
@@ -251,7 +252,7 @@ namespace LRQ {
cerr<<endl;
all.wpp = all.wpp * (1 + maxk);
- learner<LRQstate>& l = init_learner(&lrq, setup_base(all,vm), predict_or_learn<true>,
+ learner<LRQstate>& l = init_learner(&lrq, setup_base(all), predict_or_learn<true>,
predict_or_learn<false>, 1 + maxk);
l.set_end_pass(reset_seed);
diff --git a/vowpalwabbit/lrq.h b/vowpalwabbit/lrq.h
index 376bd6e5..b08e24f4 100644
--- a/vowpalwabbit/lrq.h
+++ b/vowpalwabbit/lrq.h
@@ -1,4 +1,2 @@
#pragma once
-namespace LRQ {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace LRQ { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/mf.cc b/vowpalwabbit/mf.cc
index 4c1899f3..52e5e53d 100644
--- a/vowpalwabbit/mf.cc
+++ b/vowpalwabbit/mf.cc
@@ -188,19 +188,19 @@ void finish(mf& o) {
o.sub_predictions.delete_v();
}
- base_learner* setup(vw& all, po::variables_map& vm) {
+ base_learner* setup(vw& all) {
po::options_description opts("MF options");
opts.add_options()
("new_mf", po::value<size_t>(), "rank for reduction-based matrix factorization");
- vm = add_options(all, opts);
- if(!vm.count("new_mf"))
+ add_options(all, opts);
+ if(!all.vm.count("new_mf"))
return NULL;
mf& data = calloc_or_die<mf>();
// copy global data locally
data.all = &all;
- data.rank = (uint32_t)vm["new_mf"].as<size_t>();
+ data.rank = (uint32_t)all.vm["new_mf"].as<size_t>();
// store global pairs in local data structure and clear global pairs
// for eventual calls to base learner
@@ -209,7 +209,7 @@ void finish(mf& o) {
all.random_positive_weights = true;
- learner<mf>& l = init_learner(&data, setup_base(all,vm), learn, predict<false>, 2*data.rank+1);
+ learner<mf>& l = init_learner(&data, setup_base(all), learn, predict<false>, 2*data.rank+1);
l.set_finish(finish);
return make_base(l);
}
diff --git a/vowpalwabbit/mf.h b/vowpalwabbit/mf.h
index 90ddc33a..6d4be4f3 100644
--- a/vowpalwabbit/mf.h
+++ b/vowpalwabbit/mf.h
@@ -4,6 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace MF{
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace MF{ LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/nn.cc b/vowpalwabbit/nn.cc
index 44876e67..c5107fa9 100644
--- a/vowpalwabbit/nn.cc
+++ b/vowpalwabbit/nn.cc
@@ -308,7 +308,7 @@ CONVERSE: // That's right, I'm using goto. So sue me.
free (n.output_layer.atomics[nn_output_namespace].begin);
}
- base_learner* setup(vw& all, po::variables_map& vm)
+ base_learner* setup(vw& all)
{
po::options_description opts("NN options");
opts.add_options()
@@ -316,7 +316,8 @@ CONVERSE: // That's right, I'm using goto. So sue me.
("inpass", "Train or test sigmoidal feedforward network with input passthrough.")
("dropout", "Train or test sigmoidal feedforward network using dropout.")
("meanfield", "Train or test sigmoidal feedforward network using mean field.");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if(!vm.count("nn"))
return NULL;
@@ -366,7 +367,7 @@ CONVERSE: // That's right, I'm using goto. So sue me.
n.save_xsubi = n.xsubi;
- base_learner* base = setup_base(all,vm);
+ base_learner* base = setup_base(all);
n.increment = base->increment;//Indexing of output layer is odd.
learner<nn>& l = init_learner(&n, base, predict_or_learn<true>,
predict_or_learn<false>, n.k+1);
diff --git a/vowpalwabbit/nn.h b/vowpalwabbit/nn.h
index 820157d7..3b000433 100644
--- a/vowpalwabbit/nn.h
+++ b/vowpalwabbit/nn.h
@@ -4,7 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace NN
-{
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace NN { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/noop.cc b/vowpalwabbit/noop.cc
index 065ff777..e6d659d2 100644
--- a/vowpalwabbit/noop.cc
+++ b/vowpalwabbit/noop.cc
@@ -10,13 +10,13 @@ license as described in the file LICENSE.
namespace NOOP {
void learn(char&, LEARNER::base_learner&, example&) {}
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("Noop options");
opts.add_options()
("noop","do no learning");
add_options(all, opts);
- if(!vm.count("noop"))
+ if(!all.vm.count("noop"))
return NULL;
return &LEARNER::init_learner<char>(NULL, learn, 1); }
diff --git a/vowpalwabbit/noop.h b/vowpalwabbit/noop.h
index ac8842e9..ed660870 100644
--- a/vowpalwabbit/noop.h
+++ b/vowpalwabbit/noop.h
@@ -4,5 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace NOOP
-{ LEARNER::base_learner* setup(vw& all, po::variables_map& vm);}
+namespace NOOP { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/oaa.cc b/vowpalwabbit/oaa.cc
index 2cc78da3..f29cbcb4 100644
--- a/vowpalwabbit/oaa.cc
+++ b/vowpalwabbit/oaa.cc
@@ -62,24 +62,34 @@ namespace OAA {
void finish_example(vw& all, oaa&, example& ec) { MULTICLASS::finish_example(all, ec); }
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ /*
+{
+ new_options(all, "One-against-all options")
+ ("oaa", po::value<size_t>(), "Use one-against-all multiclass learning with <k> labels");
+ if (missing_required(all,vm)) return NULL;
+ options(all)
+ ...;
+ add_options(all)
+ }*/
+
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("One-against-all options");
opts.add_options()
("oaa", po::value<size_t>(), "Use one-against-all multiclass learning with <k> labels");
- vm = add_options(all, opts);
- if(!vm.count("oaa"))
+ add_options(all, opts);
+ if(!all.vm.count("oaa"))
return NULL;
oaa& data = calloc_or_die<oaa>();
- data.k = vm["oaa"].as<size_t>();
+ data.k = all.vm["oaa"].as<size_t>();
data.shouldOutput = all.raw_prediction > 0;
data.all = &all;
*all.file_options << " --oaa " << data.k;
all.p->lp = MULTICLASS::mc_label;
- LEARNER::learner<oaa>& l = init_learner(&data, setup_base(all,vm), predict_or_learn<true>,
+ LEARNER::learner<oaa>& l = init_learner(&data, setup_base(all), predict_or_learn<true>,
predict_or_learn<false>, data.k);
l.set_finish_example(finish_example);
return make_base(l);
diff --git a/vowpalwabbit/oaa.h b/vowpalwabbit/oaa.h
index de1b08ab..2bc46649 100644
--- a/vowpalwabbit/oaa.h
+++ b/vowpalwabbit/oaa.h
@@ -4,5 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace OAA
-{ LEARNER::base_learner* setup(vw& all, po::variables_map& vm); }
+namespace OAA { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/parse_args.cc b/vowpalwabbit/parse_args.cc
index 00b2a1d6..bc55cccb 100644
--- a/vowpalwabbit/parse_args.cc
+++ b/vowpalwabbit/parse_args.cc
@@ -178,7 +178,7 @@ void parse_affix_argument(vw&all, string str) {
free(cstr);
}
-void parse_diagnostics(vw& all, po::variables_map& vm, int argc)
+void parse_diagnostics(vw& all, int argc)
{
po::options_description diag_opt("Diagnostic options");
@@ -189,7 +189,9 @@ void parse_diagnostics(vw& all, po::variables_map& vm, int argc)
("quiet", "Don't output disgnostics and progress updates")
("help,h","Look here: http://hunch.net/~vw/ and click on Tutorial.");
- vm = add_options(all, diag_opt);
+ add_options(all, diag_opt);
+
+ po::variables_map& vm = all.vm;
if (vm.count("version")) {
/* upon direct query for version -- spit it out to stdout */
@@ -246,7 +248,7 @@ void parse_diagnostics(vw& all, po::variables_map& vm, int argc)
}
}
-void parse_source(vw& all, po::variables_map& vm)
+void parse_source(vw& all)
{
po::options_description in_opt("Input options");
@@ -263,18 +265,18 @@ void parse_source(vw& all, po::variables_map& vm)
("compressed", "use gzip format whenever possible. If a cache file is being created, this option creates a compressed cache file. A mixture of raw-text & compressed inputs are supported with autodetection.")
("no_stdin", "do not default to reading from stdin");
- vm = add_options(all, in_opt);
+ add_options(all, in_opt);
// Be friendly: if -d was left out, treat positional param as data file
po::positional_options_description p;
p.add("data", -1);
- vm = po::variables_map();
po::parsed_options pos = po::command_line_parser(all.args).
style(po::command_line_style::default_style ^ po::command_line_style::allow_guessing).
options(all.opts).positional(p).run();
- vm = po::variables_map();
- po::store(pos, vm);
+ all.vm = po::variables_map();
+ po::store(pos, all.vm);
+ po::variables_map& vm = all.vm;
//begin input source
if (vm.count("no_stdin"))
@@ -316,7 +318,7 @@ void parse_source(vw& all, po::variables_map& vm)
}
}
-void parse_feature_tweaks(vw& all, po::variables_map& vm)
+void parse_feature_tweaks(vw& all)
{
po::options_description feature_opt("Feature options");
feature_opt.add_options()
@@ -337,7 +339,8 @@ void parse_feature_tweaks(vw& all, po::variables_map& vm)
("cubic", po::value< vector<string> > (),
"Create and use cubic features");
- vm = add_options(all, feature_opt);
+ add_options(all, feature_opt);
+ po::variables_map& vm = all.vm;
//feature manipulation
string hash_function("strings");
@@ -546,7 +549,7 @@ void parse_feature_tweaks(vw& all, po::variables_map& vm)
all.add_constant = false;
}
-void parse_example_tweaks(vw& all, po::variables_map& vm)
+void parse_example_tweaks(vw& all)
{
po::options_description opts("Example options");
@@ -567,7 +570,8 @@ void parse_example_tweaks(vw& all, po::variables_map& vm)
("l1", po::value<float>(&(all.l1_lambda)), "l_1 lambda")
("l2", po::value<float>(&(all.l2_lambda)), "l_2 lambda");
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if (vm.count("testonly") || all.eta == 0.)
{
@@ -622,7 +626,7 @@ void parse_example_tweaks(vw& all, po::variables_map& vm)
}
}
-void parse_output_preds(vw& all, po::variables_map& vm)
+void parse_output_preds(vw& all)
{
po::options_description out_opt("Output options");
@@ -631,7 +635,8 @@ void parse_output_preds(vw& all, po::variables_map& vm)
("raw_predictions,r", po::value< string >(), "File to output unnormalized predictions to")
;
- vm = add_options(all, out_opt);
+ add_options(all, out_opt);
+ po::variables_map& vm = all.vm;
if (vm.count("predictions")) {
if (!all.quiet)
@@ -677,7 +682,7 @@ void parse_output_preds(vw& all, po::variables_map& vm)
}
}
-void parse_output_model(vw& all, po::variables_map& vm)
+void parse_output_model(vw& all)
{
po::options_description output_model("Output model");
@@ -690,7 +695,8 @@ void parse_output_model(vw& all, po::variables_map& vm)
("output_feature_regularizer_binary", po::value< string >(&(all.per_feature_regularizer_output)), "Per feature regularization output file")
("output_feature_regularizer_text", po::value< string >(&(all.per_feature_regularizer_text)), "Per feature regularization output file, in text");
- vm = add_options(all, output_model);
+ add_options(all, output_model);
+ po::variables_map& vm = all.vm;
if (vm.count("final_regressor")) {
all.final_regressor_name = vm["final_regressor"].as<string>();
@@ -715,22 +721,22 @@ void parse_output_model(vw& all, po::variables_map& vm)
all.save_resume = true;
}
-void load_input_model(vw& all, po::variables_map& vm, io_buf& io_temp)
+void load_input_model(vw& all, io_buf& io_temp)
{
// Need to see if we have to load feature mask first or second.
// -i and -mask are from same file, load -i file first so mask can use it
- if (vm.count("feature_mask") && vm.count("initial_regressor")
- && vm["feature_mask"].as<string>() == vm["initial_regressor"].as< vector<string> >()[0]) {
+ if (all.vm.count("feature_mask") && all.vm.count("initial_regressor")
+ && all.vm["feature_mask"].as<string>() == all.vm["initial_regressor"].as< vector<string> >()[0]) {
// load rest of regressor
all.l->save_load(io_temp, true, false);
io_temp.close_file();
// set the mask, which will reuse -i file we just loaded
- parse_mask_regressor_args(all, vm);
+ parse_mask_regressor_args(all);
}
else {
// load mask first
- parse_mask_regressor_args(all, vm);
+ parse_mask_regressor_args(all);
// load rest of regressor
all.l->save_load(io_temp, true, false);
@@ -738,16 +744,16 @@ void load_input_model(vw& all, po::variables_map& vm, io_buf& io_temp)
}
}
-LEARNER::base_learner* setup_base(vw& all, po::variables_map& vm)
+LEARNER::base_learner* setup_base(vw& all)
{
- LEARNER::base_learner* ret = all.reduction_stack.pop()(all,vm);
+ LEARNER::base_learner* ret = all.reduction_stack.pop()(all);
if (ret == NULL)
- return setup_base(all,vm);
+ return setup_base(all);
else
return ret;
}
-void parse_reductions(vw& all, po::variables_map& vm)
+void parse_reductions(vw& all)
{
//Base algorithms
all.reduction_stack.push_back(GD::setup);
@@ -782,7 +788,7 @@ void parse_reductions(vw& all, po::variables_map& vm)
all.reduction_stack.push_back(Search::setup);
all.reduction_stack.push_back(BS::setup);
- all.l = setup_base(all,vm);
+ all.l = setup_base(all);
}
void add_to_args(vw& all, int argc, char* argv[])
@@ -838,11 +844,12 @@ vw* parse_args(int argc, char *argv[])
.add(weight_opt)
.add(cluster_opt);
- po::variables_map vm = add_options(*all, desc);
+ add_options(*all, desc);
+ po::variables_map& vm = all->vm;
msrand48(random_seed);
- parse_diagnostics(*all, vm, argc);
+ parse_diagnostics(*all, argc);
all->sd->weighted_unlabeled_examples = all->sd->t;
all->initial_t = (float)all->sd->t;
@@ -868,13 +875,13 @@ vw* parse_args(int argc, char *argv[])
po::notify(vm);
all->file_options->str("");
- parse_feature_tweaks(*all, vm); //feature tweaks
+ parse_feature_tweaks(*all); //feature tweaks
- parse_example_tweaks(*all, vm); //example manipulation
+ parse_example_tweaks(*all); //example manipulation
- parse_output_model(*all, vm);
+ parse_output_model(*all);
- parse_reductions(*all, vm);
+ parse_reductions(*all);
if (!all->quiet)
{
@@ -886,13 +893,13 @@ vw* parse_args(int argc, char *argv[])
cerr << "decay_learning_rate = " << all->eta_decay_rate << endl;
}
- parse_output_preds(*all, vm);
+ parse_output_preds(*all);
- load_input_model(*all, vm, io_temp);
+ load_input_model(*all, io_temp);
- parse_source(*all, vm);
+ parse_source(*all);
- enable_sources(*all, vm, all->quiet,all->numpasses);
+ enable_sources(*all, all->quiet, all->numpasses);
// force wpp to be a power of 2 to avoid 32-bit overflow
uint32_t i = 0;
diff --git a/vowpalwabbit/parse_args.h b/vowpalwabbit/parse_args.h
index 9be0b2c5..e86fe664 100644
--- a/vowpalwabbit/parse_args.h
+++ b/vowpalwabbit/parse_args.h
@@ -7,4 +7,4 @@ license as described in the file LICENSE.
#include "global_data.h"
vw* parse_args(int argc, char *argv[]);
-LEARNER::base_learner* setup_base(vw& all, po::variables_map& vm);
+LEARNER::base_learner* setup_base(vw& all);
diff --git a/vowpalwabbit/parse_regressor.cc b/vowpalwabbit/parse_regressor.cc
index 6e7cf27e..7cb6a21b 100644
--- a/vowpalwabbit/parse_regressor.cc
+++ b/vowpalwabbit/parse_regressor.cc
@@ -307,19 +307,19 @@ void parse_regressor_args(vw& all, po::variables_map& vm, io_buf& io_temp)
save_load_header(all, io_temp, true, false);
}
-void parse_mask_regressor_args(vw& all, po::variables_map& vm){
-
+void parse_mask_regressor_args(vw& all)
+{
+ po::variables_map& vm = all.vm;
if (vm.count("feature_mask")) {
size_t length = ((size_t)1) << all.num_bits;
string mask_filename = vm["feature_mask"].as<string>();
if (vm.count("initial_regressor")){
vector<string> init_filename = vm["initial_regressor"].as< vector<string> >();
if(mask_filename == init_filename[0]){//-i and -mask are from same file, just generate mask
-
return;
}
}
-
+
//all other cases, including from different file, or -i does not exist, need to read in the mask file
io_buf io_temp_mask;
io_temp_mask.open_file(mask_filename.c_str(), false, io_buf::READ);
diff --git a/vowpalwabbit/parse_regressor.h b/vowpalwabbit/parse_regressor.h
index b76b5cdc..069dd6a2 100644
--- a/vowpalwabbit/parse_regressor.h
+++ b/vowpalwabbit/parse_regressor.h
@@ -20,4 +20,4 @@ void initialize_regressor(vw& all);
void save_predictor(vw& all, std::string reg_name, size_t current_pass);
void save_load_header(vw& all, io_buf& model_file, bool read, bool text);
-void parse_mask_regressor_args(vw& all, po::variables_map& vm);
+void parse_mask_regressor_args(vw& all);
diff --git a/vowpalwabbit/parser.cc b/vowpalwabbit/parser.cc
index 65596402..c71f2956 100644
--- a/vowpalwabbit/parser.cc
+++ b/vowpalwabbit/parser.cc
@@ -405,10 +405,10 @@ void parse_cache(vw& all, po::variables_map &vm, string source,
# define MAP_ANONYMOUS MAP_ANON
#endif
-void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes)
+void enable_sources(vw& all, bool quiet, size_t passes)
{
all.p->input->current = 0;
- parse_cache(all, vm, all.data_filename, quiet);
+ parse_cache(all, all.vm, all.data_filename, quiet);
if (all.daemon || all.active)
{
@@ -431,8 +431,8 @@ void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes)
address.sin_family = AF_INET;
address.sin_addr.s_addr = htonl(INADDR_ANY);
short unsigned int port = 26542;
- if (vm.count("port"))
- port = (uint16_t)vm["port"].as<size_t>();
+ if (all.vm.count("port"))
+ port = (uint16_t)all.vm["port"].as<size_t>();
address.sin_port = htons(port);
// attempt to bind to socket
@@ -449,7 +449,7 @@ void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes)
}
// write port file
- if (vm.count("port_file"))
+ if (all.vm.count("port_file"))
{
socklen_t address_size = sizeof(address);
if (getsockname(all.p->bound_sock, (sockaddr*)&address, &address_size) < 0)
@@ -457,7 +457,7 @@ void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes)
cerr << "getsockname: " << strerror(errno) << endl;
}
ofstream port_file;
- port_file.open(vm["port_file"].as<string>().c_str());
+ port_file.open(all.vm["port_file"].as<string>().c_str());
if (!port_file.is_open())
{
cerr << "error writing port file" << endl;
@@ -474,10 +474,10 @@ void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes)
throw exception();
}
// write pid file
- if (vm.count("pid_file"))
+ if (all.vm.count("pid_file"))
{
ofstream pid_file;
- pid_file.open(vm["pid_file"].as<string>().c_str());
+ pid_file.open(all.vm["pid_file"].as<string>().c_str());
if (!pid_file.is_open())
{
cerr << "error writing pid file" << endl;
@@ -597,7 +597,7 @@ void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes)
}
all.p->resettable = all.p->write_cache || all.daemon;
}
- else // was: else if (vm.count("data"))
+ else
{
if (all.p->input->files.size() > 0)
{
diff --git a/vowpalwabbit/parser.h b/vowpalwabbit/parser.h
index c2d271d1..1c5b8f50 100644
--- a/vowpalwabbit/parser.h
+++ b/vowpalwabbit/parser.h
@@ -58,7 +58,7 @@ struct parser {
parser* new_parser();
-void enable_sources(vw& all, po::variables_map& vm, bool quiet, size_t passes);
+void enable_sources(vw& all, bool quiet, size_t passes);
bool examples_to_finish();
diff --git a/vowpalwabbit/print.cc b/vowpalwabbit/print.cc
index d8d263be..dd8cac99 100644
--- a/vowpalwabbit/print.cc
+++ b/vowpalwabbit/print.cc
@@ -41,13 +41,13 @@ namespace PRINT
cout << endl;
}
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("Print options");
opts.add_options()
("print","print examples");
- vm = add_options(all, opts);
- if(!vm.count("print"))
+ add_options(all, opts);
+ if(!all.vm.count("print"))
return NULL;
print& p = calloc_or_die<print>();
diff --git a/vowpalwabbit/print.h b/vowpalwabbit/print.h
index 2c855eaa..affd09e8 100644
--- a/vowpalwabbit/print.h
+++ b/vowpalwabbit/print.h
@@ -4,5 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace PRINT
-{ LEARNER::base_learner* setup(vw& all, po::variables_map& vm);}
+namespace PRINT { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/scorer.cc b/vowpalwabbit/scorer.cc
index c7e4519b..fa9ea417 100644
--- a/vowpalwabbit/scorer.cc
+++ b/vowpalwabbit/scorer.cc
@@ -31,17 +31,17 @@ namespace Scorer {
float id(float in) { return in; }
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("Link options");
opts.add_options()
("link", po::value<string>()->default_value("identity"), "Specify the link function: identity, logistic or glf1");
- vm = add_options(all, opts);
-
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
scorer& s = calloc_or_die<scorer>();
s.all = &all;
- LEARNER::base_learner* base = setup_base(all,vm);
+ LEARNER::base_learner* base = setup_base(all);
LEARNER::learner<scorer>* l;
string link = vm["link"].as<string>();
diff --git a/vowpalwabbit/scorer.h b/vowpalwabbit/scorer.h
index 2d0ec294..efd95e9a 100644
--- a/vowpalwabbit/scorer.h
+++ b/vowpalwabbit/scorer.h
@@ -1,4 +1,2 @@
#pragma once
-namespace Scorer {
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace Scorer { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/search.cc b/vowpalwabbit/search.cc
index 5bc7c797..1275cdfa 100644
--- a/vowpalwabbit/search.cc
+++ b/vowpalwabbit/search.cc
@@ -1668,14 +1668,15 @@ namespace Search {
ret = false;
}
- void handle_condition_options(vw& vw, auto_condition_settings& acset, po::variables_map& vm) {
+ void handle_condition_options(vw& vw, auto_condition_settings& acset) {
po::options_description condition_options("Search Auto-conditioning Options");
condition_options.add_options()
("search_max_bias_ngram_length", po::value<size_t>(), "add a \"bias\" feature for each ngram up to and including this length. eg., if it's 1 (default), then you get a single feature for each conditional")
("search_max_quad_ngram_length", po::value<size_t>(), "add bias *times* input features for each ngram up to and including this length (def: 0)")
("search_condition_feature_value", po::value<float> (), "how much weight should the conditional features get? (def: 1.)");
- vm = add_options(vw, condition_options);
+ add_options(vw, condition_options);
+ po::variables_map& vm = vw.vm;
check_option<size_t>(acset.max_bias_ngram_length, vw, vm, "search_max_bias_ngram_length", false, size_equal,
"warning: you specified a different value for --search_max_bias_ngram_length than the one loaded from regressor. proceeding with loaded value: ", "");
@@ -1763,7 +1764,7 @@ namespace Search {
delete[] cstr;
}
- base_learner* setup(vw&all, po::variables_map& vm) {
+ base_learner* setup(vw&all) {
po::options_description opts("Search Options");
opts.add_options()
("search", po::value<size_t>(), "use search-based structured prediction, argument=maximum action id or 0 for LDF")
@@ -1791,7 +1792,8 @@ namespace Search {
("search_beam", po::value<size_t>(), "use beam search (arg = beam size, default 0 = no beam)")
("search_kbest", po::value<size_t>(), "size of k-best list to produce (must be <= beam size)")
;
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map& vm = all.vm;
if (!vm.count("search"))
return NULL;
@@ -1966,7 +1968,7 @@ namespace Search {
ss << vm["search"].as<size_t>();
all.args.push_back(ss.str());
}
- base_learner* base = setup_base(all,vm);
+ base_learner* base = setup_base(all);
// default to OAA labels unless the task wants to override this (which they can do in initialize)
all.p->lp = MC::mc_label;
@@ -1977,7 +1979,7 @@ namespace Search {
// set up auto-history if they want it
if (priv.auto_condition_features) {
- handle_condition_options(all, priv.acset, vm);
+ handle_condition_options(all, priv.acset);
// turn off auto-condition if it's irrelevant
if (((priv.acset.max_bias_ngram_length == 0) && (priv.acset.max_quad_ngram_length == 0)) ||
@@ -2077,7 +2079,7 @@ namespace Search {
void search::set_num_learners(size_t num_learners) { this->priv->num_learners = num_learners; }
- void search::add_program_options(po::variables_map& vm, po::options_description& opts) { vm = add_options( *this->priv->all, opts ); }
+ void search::add_program_options(po::variables_map& vw, po::options_description& opts) { add_options( *this->priv->all, opts ); }
size_t search::get_mask() { return this->priv->all->reg.weight_mask;}
size_t search::get_stride_shift() { return this->priv->all->reg.stride_shift;}
diff --git a/vowpalwabbit/search.h b/vowpalwabbit/search.h
index 08633c5e..54c35259 100644
--- a/vowpalwabbit/search.h
+++ b/vowpalwabbit/search.h
@@ -241,5 +241,5 @@ namespace Search {
bool size_equal(size_t a, size_t b);
// our interface within VW
- LEARNER::base_learner* setup(vw&, po::variables_map&);
+ LEARNER::base_learner* setup(vw&);
}
diff --git a/vowpalwabbit/sender.cc b/vowpalwabbit/sender.cc
index b943caec..0fe64fa0 100644
--- a/vowpalwabbit/sender.cc
+++ b/vowpalwabbit/sender.cc
@@ -96,20 +96,20 @@ void end_examples(sender& s)
delete s.buf;
}
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("Sender options");
opts.add_options()
("sendto", po::value< vector<string> >(), "send examples to <host>");
- vm = add_options(all, opts);
- if(!vm.count("sendto"))
+ add_options(all, opts);
+ if(!all.vm.count("sendto"))
return NULL;
sender& s = calloc_or_die<sender>();
s.sd = -1;
- if (vm.count("sendto"))
+ if (all.vm.count("sendto"))
{
- vector<string> hosts = vm["sendto"].as< vector<string> >();
+ vector<string> hosts = all.vm["sendto"].as< vector<string> >();
open_sockets(s, hosts[0]);
}
diff --git a/vowpalwabbit/sender.h b/vowpalwabbit/sender.h
index 55f10754..b8199bf6 100644
--- a/vowpalwabbit/sender.h
+++ b/vowpalwabbit/sender.h
@@ -4,5 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace SENDER
-{ LEARNER::base_learner* setup(vw& all, po::variables_map& vm); }
+namespace SENDER { LEARNER::base_learner* setup(vw& all); }
diff --git a/vowpalwabbit/stagewise_poly.cc b/vowpalwabbit/stagewise_poly.cc
index a40b765c..e5a46b24 100644
--- a/vowpalwabbit/stagewise_poly.cc
+++ b/vowpalwabbit/stagewise_poly.cc
@@ -656,7 +656,7 @@ namespace StagewisePoly
//#endif //DEBUG
}
- base_learner *setup(vw &all, po::variables_map &vm)
+ base_learner *setup(vw &all)
{
po::options_description opts("Stagewise poly options");
opts.add_options()
@@ -668,7 +668,8 @@ namespace StagewisePoly
("magic_argument", po::value<float>(), "magical feature flag")
#endif //MAGIC_ARGUMENT
;
- vm = add_options(all, opts);
+ add_options(all, opts);
+ po::variables_map &vm = all.vm;
if (!vm.count("stage_poly"))
return NULL;
@@ -700,7 +701,7 @@ namespace StagewisePoly
//following is so that saved models know to load us.
*all.file_options << " --stage_poly";
- learner<stagewise_poly>& l = init_learner(&poly, setup_base(all,vm), learn, predict);
+ learner<stagewise_poly>& l = init_learner(&poly, setup_base(all), learn, predict);
l.set_finish(finish);
l.set_save_load(save_load);
l.set_finish_example(finish_example);
diff --git a/vowpalwabbit/stagewise_poly.h b/vowpalwabbit/stagewise_poly.h
index 983b4382..60478e81 100644
--- a/vowpalwabbit/stagewise_poly.h
+++ b/vowpalwabbit/stagewise_poly.h
@@ -4,7 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace StagewisePoly
-{
- LEARNER::base_learner *setup(vw &all, po::variables_map &vm);
-}
+namespace StagewisePoly { LEARNER::base_learner *setup(vw &all); }
diff --git a/vowpalwabbit/topk.cc b/vowpalwabbit/topk.cc
index e9d9fd2b..118b1d74 100644
--- a/vowpalwabbit/topk.cc
+++ b/vowpalwabbit/topk.cc
@@ -100,20 +100,20 @@ namespace TOPK {
VW::finish_example(all, &ec);
}
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm)
+ LEARNER::base_learner* setup(vw& all)
{
po::options_description opts("TOP K options");
opts.add_options()
("top", po::value<size_t>(), "top k recommendation");
- vm = add_options(all,opts);
- if(!vm.count("top"))
+ add_options(all,opts);
+ if(!all.vm.count("top"))
return NULL;
topk& data = calloc_or_die<topk>();
- data.B = (uint32_t)vm["top"].as<size_t>();
+ data.B = (uint32_t)all.vm["top"].as<size_t>();
data.all = &all;
- LEARNER::learner<topk>& l = init_learner(&data, setup_base(all,vm), predict_or_learn<true>,
+ LEARNER::learner<topk>& l = init_learner(&data, setup_base(all), predict_or_learn<true>,
predict_or_learn<false>);
l.set_finish_example(finish_example);
diff --git a/vowpalwabbit/topk.h b/vowpalwabbit/topk.h
index 964ff618..6e9973ad 100644
--- a/vowpalwabbit/topk.h
+++ b/vowpalwabbit/topk.h
@@ -4,7 +4,4 @@ individual contributors. All rights reserved. Released under a BSD
license as described in the file LICENSE.
*/
#pragma once
-namespace TOPK
-{
- LEARNER::base_learner* setup(vw& all, po::variables_map& vm);
-}
+namespace TOPK { LEARNER::base_learner* setup(vw& all); }