Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/vowpal_wabbit.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Langford <jl@hunch.net>2014-01-21 21:51:04 +0400
committerJohn Langford <jl@hunch.net>2014-01-21 21:51:04 +0400
commita0fcc75614751b4bb3e4235a218198646cf399d9 (patch)
treed3b0249dab5761ce9b0c659ad0a32522eedc9306 /vowpalwabbit/mf.cc
parent8b6fe928a1c8595e158a69bcd52490d874b38297 (diff)
changed pointers to references
Diffstat (limited to 'vowpalwabbit/mf.cc')
-rw-r--r--vowpalwabbit/mf.cc56
1 files changed, 28 insertions, 28 deletions
diff --git a/vowpalwabbit/mf.cc b/vowpalwabbit/mf.cc
index ab6b8988..bbeedb6f 100644
--- a/vowpalwabbit/mf.cc
+++ b/vowpalwabbit/mf.cc
@@ -53,7 +53,7 @@ struct mf {
};
template <bool cache_sub_predictions>
-void predict(mf *data, learner& base, example* ec) {
+void predict(mf *data, learner& base, example& ec) {
vw* all = data->all;
float prediction = 0;
@@ -65,15 +65,15 @@ void predict(mf *data, learner& base, example* ec) {
// store linear prediction
if (cache_sub_predictions)
- data->sub_predictions[0] = ec->partial_prediction;
- prediction += ec->partial_prediction;
+ data->sub_predictions[0] = ec.partial_prediction;
+ prediction += ec.partial_prediction;
// store namespace indices
- copy_array(data->indices, ec->indices);
+ copy_array(data->indices, ec.indices);
// erase indices
- ec->indices.erase();
- ec->indices.push_back(0);
+ ec.indices.erase();
+ ec.indices.push_back(0);
// add interaction terms to prediction
for (vector<string>::iterator i = data->pairs.begin(); i != data->pairs.end(); i++) {
@@ -81,23 +81,23 @@ void predict(mf *data, learner& base, example* ec) {
int left_ns = (int) (*i)[0];
int right_ns = (int) (*i)[1];
- if (ec->atomics[left_ns].size() > 0 && ec->atomics[right_ns].size() > 0) {
+ if (ec.atomics[left_ns].size() > 0 && ec.atomics[right_ns].size() > 0) {
for (size_t k = 1; k <= all->rank; k++) {
- ec->indices[0] = left_ns;
+ ec.indices[0] = left_ns;
// compute l^k * x_l using base learner
base.predict(ec, k);
- float x_dot_l = ec->partial_prediction;
+ float x_dot_l = ec.partial_prediction;
if (cache_sub_predictions)
data->sub_predictions[2*k-1] = x_dot_l;
// set example to right namespace only
- ec->indices[0] = right_ns;
+ ec.indices[0] = right_ns;
// compute r^k * x_r using base learner
base.predict(ec, k + all->rank);
- float x_dot_r = ec->partial_prediction;
+ float x_dot_r = ec.partial_prediction;
if (cache_sub_predictions)
data->sub_predictions[2*k] = x_dot_r;
@@ -107,14 +107,14 @@ void predict(mf *data, learner& base, example* ec) {
}
}
// restore namespace indices and label
- copy_array(ec->indices, data->indices);
+ copy_array(ec.indices, data->indices);
// finalize prediction
- ec->partial_prediction = prediction;
- ec->final_prediction = GD::finalize_prediction(*(data->all), ec->partial_prediction);
+ ec.partial_prediction = prediction;
+ ec.final_prediction = GD::finalize_prediction(*(data->all), ec.partial_prediction);
}
-void learn(mf* data, learner& base, example* ec) {
+void learn(mf* data, learner& base, example& ec) {
vw* all = data->all;
// predict with current weights
@@ -124,11 +124,11 @@ void learn(mf* data, learner& base, example* ec) {
base.update(ec);
// store namespace indices
- copy_array(data->indices, ec->indices);
+ copy_array(data->indices, ec.indices);
// erase indices
- ec->indices.erase();
- ec->indices.push_back(0);
+ ec.indices.erase();
+ ec.indices.push_back(0);
// update interaction terms
// looping over all pairs of non-empty namespaces
@@ -137,49 +137,49 @@ void learn(mf* data, learner& base, example* ec) {
int left_ns = (int) (*i)[0];
int right_ns = (int) (*i)[1];
- if (ec->atomics[left_ns].size() > 0 && ec->atomics[right_ns].size() > 0) {
+ if (ec.atomics[left_ns].size() > 0 && ec.atomics[right_ns].size() > 0) {
// set example to left namespace only
- ec->indices[0] = left_ns;
+ ec.indices[0] = left_ns;
// store feature values in left namespace
- copy_array(data->temp_features, ec->atomics[left_ns]);
+ copy_array(data->temp_features, ec.atomics[left_ns]);
for (size_t k = 1; k <= all->rank; k++) {
// multiply features in left namespace by r^k * x_r
- for (feature* f = ec->atomics[left_ns].begin; f != ec->atomics[left_ns].end; f++)
+ for (feature* f = ec.atomics[left_ns].begin; f != ec.atomics[left_ns].end; f++)
f->x *= data->sub_predictions[2*k];
// update l^k using base learner
base.update(ec, k);
// restore left namespace features (undoing multiply)
- copy_array(ec->atomics[left_ns], data->temp_features);
+ copy_array(ec.atomics[left_ns], data->temp_features);
}
// set example to right namespace only
- ec->indices[0] = right_ns;
+ ec.indices[0] = right_ns;
// store feature values for right namespace
- copy_array(data->temp_features, ec->atomics[right_ns]);
+ copy_array(data->temp_features, ec.atomics[right_ns]);
for (size_t k = 1; k <= all->rank; k++) {
// multiply features in right namespace by l^k * x_l
- for (feature* f = ec->atomics[right_ns].begin; f != ec->atomics[right_ns].end; f++)
+ for (feature* f = ec.atomics[right_ns].begin; f != ec.atomics[right_ns].end; f++)
f->x *= data->sub_predictions[2*k-1];
// update r^k using base learner
base.update(ec, k + all->rank);
// restore right namespace features
- copy_array(ec->atomics[right_ns], data->temp_features);
+ copy_array(ec.atomics[right_ns], data->temp_features);
}
}
}
// restore namespace indices
- copy_array(ec->indices, data->indices);
+ copy_array(ec.indices, data->indices);
}
void finish(mf* o) {