Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/vowpal_wabbit.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'vowpalwabbit/mf.cc')
-rw-r--r--vowpalwabbit/mf.cc82
1 files changed, 41 insertions, 41 deletions
diff --git a/vowpalwabbit/mf.cc b/vowpalwabbit/mf.cc
index ab6b8988..d0236ed0 100644
--- a/vowpalwabbit/mf.cc
+++ b/vowpalwabbit/mf.cc
@@ -53,53 +53,53 @@ struct mf {
};
template <bool cache_sub_predictions>
-void predict(mf *data, learner& base, example* ec) {
- vw* all = data->all;
+void predict(mf& data, learner& base, example& ec) {
+ vw* all = data.all;
float prediction = 0;
if (cache_sub_predictions)
- data->sub_predictions.resize(2*all->rank+1, true);
+ data.sub_predictions.resize(2*all->rank+1, true);
// predict from linear terms
base.predict(ec);
// store linear prediction
if (cache_sub_predictions)
- data->sub_predictions[0] = ec->partial_prediction;
- prediction += ec->partial_prediction;
+ data.sub_predictions[0] = ec.partial_prediction;
+ prediction += ec.partial_prediction;
// store namespace indices
- copy_array(data->indices, ec->indices);
+ copy_array(data.indices, ec.indices);
// erase indices
- ec->indices.erase();
- ec->indices.push_back(0);
+ ec.indices.erase();
+ ec.indices.push_back(0);
// add interaction terms to prediction
- for (vector<string>::iterator i = data->pairs.begin(); i != data->pairs.end(); i++) {
+ for (vector<string>::iterator i = data.pairs.begin(); i != data.pairs.end(); i++) {
int left_ns = (int) (*i)[0];
int right_ns = (int) (*i)[1];
- if (ec->atomics[left_ns].size() > 0 && ec->atomics[right_ns].size() > 0) {
+ if (ec.atomics[left_ns].size() > 0 && ec.atomics[right_ns].size() > 0) {
for (size_t k = 1; k <= all->rank; k++) {
- ec->indices[0] = left_ns;
+ ec.indices[0] = left_ns;
// compute l^k * x_l using base learner
base.predict(ec, k);
- float x_dot_l = ec->partial_prediction;
+ float x_dot_l = ec.partial_prediction;
if (cache_sub_predictions)
- data->sub_predictions[2*k-1] = x_dot_l;
+ data.sub_predictions[2*k-1] = x_dot_l;
// set example to right namespace only
- ec->indices[0] = right_ns;
+ ec.indices[0] = right_ns;
// compute r^k * x_r using base learner
base.predict(ec, k + all->rank);
- float x_dot_r = ec->partial_prediction;
+ float x_dot_r = ec.partial_prediction;
if (cache_sub_predictions)
- data->sub_predictions[2*k] = x_dot_r;
+ data.sub_predictions[2*k] = x_dot_r;
// accumulate prediction
prediction += (x_dot_l * x_dot_r);
@@ -107,15 +107,15 @@ void predict(mf *data, learner& base, example* ec) {
}
}
// restore namespace indices and label
- copy_array(ec->indices, data->indices);
+ copy_array(ec.indices, data.indices);
// finalize prediction
- ec->partial_prediction = prediction;
- ec->final_prediction = GD::finalize_prediction(*(data->all), ec->partial_prediction);
+ ec.partial_prediction = prediction;
+ ec.final_prediction = GD::finalize_prediction(*(data.all), ec.partial_prediction);
}
-void learn(mf* data, learner& base, example* ec) {
- vw* all = data->all;
+void learn(mf& data, learner& base, example& ec) {
+ vw* all = data.all;
// predict with current weights
predict<true>(data, base, ec);
@@ -124,71 +124,71 @@ void learn(mf* data, learner& base, example* ec) {
base.update(ec);
// store namespace indices
- copy_array(data->indices, ec->indices);
+ copy_array(data.indices, ec.indices);
// erase indices
- ec->indices.erase();
- ec->indices.push_back(0);
+ ec.indices.erase();
+ ec.indices.push_back(0);
// update interaction terms
// looping over all pairs of non-empty namespaces
- for (vector<string>::iterator i = data->pairs.begin(); i != data->pairs.end(); i++) {
+ for (vector<string>::iterator i = data.pairs.begin(); i != data.pairs.end(); i++) {
int left_ns = (int) (*i)[0];
int right_ns = (int) (*i)[1];
- if (ec->atomics[left_ns].size() > 0 && ec->atomics[right_ns].size() > 0) {
+ if (ec.atomics[left_ns].size() > 0 && ec.atomics[right_ns].size() > 0) {
// set example to left namespace only
- ec->indices[0] = left_ns;
+ ec.indices[0] = left_ns;
// store feature values in left namespace
- copy_array(data->temp_features, ec->atomics[left_ns]);
+ copy_array(data.temp_features, ec.atomics[left_ns]);
for (size_t k = 1; k <= all->rank; k++) {
// multiply features in left namespace by r^k * x_r
- for (feature* f = ec->atomics[left_ns].begin; f != ec->atomics[left_ns].end; f++)
- f->x *= data->sub_predictions[2*k];
+ for (feature* f = ec.atomics[left_ns].begin; f != ec.atomics[left_ns].end; f++)
+ f->x *= data.sub_predictions[2*k];
// update l^k using base learner
base.update(ec, k);
// restore left namespace features (undoing multiply)
- copy_array(ec->atomics[left_ns], data->temp_features);
+ copy_array(ec.atomics[left_ns], data.temp_features);
}
// set example to right namespace only
- ec->indices[0] = right_ns;
+ ec.indices[0] = right_ns;
// store feature values for right namespace
- copy_array(data->temp_features, ec->atomics[right_ns]);
+ copy_array(data.temp_features, ec.atomics[right_ns]);
for (size_t k = 1; k <= all->rank; k++) {
// multiply features in right namespace by l^k * x_l
- for (feature* f = ec->atomics[right_ns].begin; f != ec->atomics[right_ns].end; f++)
- f->x *= data->sub_predictions[2*k-1];
+ for (feature* f = ec.atomics[right_ns].begin; f != ec.atomics[right_ns].end; f++)
+ f->x *= data.sub_predictions[2*k-1];
// update r^k using base learner
base.update(ec, k + all->rank);
// restore right namespace features
- copy_array(ec->atomics[right_ns], data->temp_features);
+ copy_array(ec.atomics[right_ns], data.temp_features);
}
}
}
// restore namespace indices
- copy_array(ec->indices, data->indices);
+ copy_array(ec.indices, data.indices);
}
-void finish(mf* o) {
+void finish(mf& o) {
// restore global pairs
- o->all->pairs = o->pairs;
+ o.all->pairs = o.pairs;
// clean up local v_arrays
- o->indices.delete_v();
- o->sub_predictions.delete_v();
+ o.indices.delete_v();
+ o.sub_predictions.delete_v();
}