Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/vowpal_wabbit.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorHal Daume III <me@hal3.name>2014-09-21 19:30:36 +0400
committerHal Daume III <me@hal3.name>2014-09-21 19:30:36 +0400
commit93cca9f82039b7fc375d81e92e4389c0658c0441 (patch)
tree7a687b79e1464bf07455ddf8f57efd6c631d9f8a /test
parent2f0b934a669cade6d7e169b047aaec3eb3dcda99 (diff)
integrated new version of search, updated relevant tests (and removed ones that use beam, since beam is still not supported)
Diffstat (limited to 'test')
-rwxr-xr-xtest/RunTests105
-rw-r--r--test/train-sets/ref/argmax_data.stderr20
-rw-r--r--test/train-sets/ref/searn_small.stderr0
-rw-r--r--test/train-sets/ref/searn_small.stdout0
-rw-r--r--test/train-sets/ref/searn_wsj.stderr0
-rw-r--r--test/train-sets/ref/searn_wsj.stdout0
-rw-r--r--test/train-sets/ref/searn_wsj2.dat.stderr0
-rw-r--r--test/train-sets/ref/searn_wsj2.dat.stdout0
-rw-r--r--test/train-sets/ref/sequence_data.ldf.test.stderr8
-rw-r--r--test/train-sets/ref/sequence_data.ldf.train.stderr22
-rw-r--r--test/train-sets/ref/sequence_data.nonldf.test.stderr8
-rw-r--r--test/train-sets/ref/sequence_data.nonldf.train.stderr16
-rw-r--r--test/train-sets/ref/sequencespan_data.nonldf-bilou.test.stderr8
-rw-r--r--test/train-sets/ref/sequencespan_data.nonldf-bilou.train.stderr18
-rw-r--r--test/train-sets/ref/sequencespan_data.nonldf.test.stderr8
-rw-r--r--test/train-sets/ref/sequencespan_data.nonldf.train.stderr18
16 files changed, 89 insertions, 142 deletions
diff --git a/test/RunTests b/test/RunTests
index 20ea2de8..048c4815 100755
--- a/test/RunTests
+++ b/test/RunTests
@@ -798,14 +798,14 @@ __DATA__
{VW} -k --ect 10 --error 3 -c --passes 10 --invariant train-sets/multiclass --holdout_off
train-sets/ref/multiclass.stderr
-# Test 13: Run searn on wsj_small for 12 passes, 4 passes per policy, extra features
-{VW} -k -c -d train-sets/wsj_small.dat.gz --passes 12 --invariant --search_passes_per_policy 4 --search_task sequence --search 5 --search_history 2 --search_bigrams --search_features 1 --quiet --holdout_off
- train-sets/ref/searn_wsj.stderr
+# Test 13: Run search (dagger) on wsj_small for 6 passes extra features
+{VW} -k -c -d train-sets/wsj_small.dat.gz --passes 6 --search_task sequence --search 45 --search_alpha 1e-6 --search_max_bias_ngram_length 2 --search_max_quad_ngram_length 1 --holdout_off
+ train-sets/ref/search_wsj.stderr
-# Test 14: Run searn (wap) on wsj_small for 2 passes, 1 pass per policy, extra features
-{VW} -k -b 19 -c -d train-sets/wsj_small.dat.gz --passes 2 --invariant --search_passes_per_policy 1 --search_task sequence --search 5 --wap 5 --search_history 2 --search_bigrams --search_features 1 --quiet --holdout_off
- train-sets/ref/searn_wsj2.dat.stdout
- train-sets/ref/searn_wsj2.dat.stderr
+# Test 14: Run search (searn) on wsj_small for 6 passes extra features
+{VW} -k -c -d train-sets/wsj_small.dat.gz --passes 6 --search_task sequence --search 45 --search_alpha 1e-6 --search_max_bias_ngram_length 2 --search_max_quad_ngram_length 1 --holdout_off --search_passes_per_policy 3 --search_interpolation policy
+ train-sets/ref/search_wsj2.dat.stdout
+ train-sets/ref/search_wsj2.dat.stderr
# Test 15: LBFGS on zero derivative input
{VW} -k -c -d train-sets/zero.dat --loss_function=squared -b 20 --bfgs --mem 7 --passes 5 --l2 1.0 --holdout_off
@@ -821,9 +821,9 @@ __DATA__
{LDA} -k --lda 100 --lda_alpha 0.01 --lda_rho 0.01 --lda_D 1000 -l 1 -b 13 --minibatch 128 --invariant train-sets/wiki1K.dat
train-sets/ref/wiki1K.stderr
-# Test 18: Run searn on seq_small for 12 passes, 4 passes per policy
-{VW} -k -c -d train-sets/seq_small --passes 12 --invariant --search_passes_per_policy 4 --search 4 --search_task sequence --quiet --holdout_off
- train-sets/ref/searn_small.stderr
+# Test 18: Run search on seq_small for 12 passes, 4 passes per policy
+{VW} -k -c -d train-sets/seq_small --passes 12 --invariant --search 4 --search_task sequence --holdout_off
+ train-sets/ref/search_small.stderr
# Test 19: neural network 3-parity with 2 hidden units
{VW} -k -c -d train-sets/3parity --hash all --passes 3000 -b 16 --nn 2 -l 10 --invariant -f models/0021.model --random_seed 15 --quiet --holdout_off
@@ -932,141 +932,106 @@ __DATA__
{VW} -k -d train-sets/lda-2pass-hang.dat --lda 10 -c --passes 2 --holdout_off
train-sets/ref/lda-2pass-hang.stderr
-# Test 43: searn sequence labeling, non-ldf train
+# Test 43: search sequence labeling, non-ldf train
{VW} -k -c -d train-sets/sequence_data --passes 20 --invariant --search_rollout oracle --search_alpha 1e-8 --search_task sequence --search 5 --holdout_off -f models/sequence_data.model
train-sets/ref/sequence_data.nonldf.train.stderr
-# Test 44: searn sequence labeling, non-ldf test
+# Test 44: search sequence labeling, non-ldf test
{VW} -d train-sets/sequence_data -t -i models/sequence_data.model -p sequence_data.predict
train-sets/ref/sequence_data.nonldf.test.stderr
train-sets/ref/sequence_data.nonldf.test.predict
-# Test 45: searn sequence labeling, non-ldf test, beam 1
-{VW} -d train-sets/sequence_data -t -i models/sequence_data.model -p sequence_data.predict --search_beam 1
- train-sets/ref/sequence_data.nonldf.test-beam1.stderr
- train-sets/ref/sequence_data.nonldf.test-beam1.predict
+# Test 45: make sure that history works
+{VW} -k -c -d train-sets/seq_small2 --passes 4 --search 4 --search_task sequence --holdout_off
+ train-sets/ref/search_small2.stderr
-# Test 46: searn sequence labeling, non-ldf test, beam 20
-{VW} -d train-sets/sequence_data -t -i models/sequence_data.model -p sequence_data.predict --search_beam 20 --search_kbest 20
- train-sets/ref/sequence_data.nonldf.test-beam20.stderr
- train-sets/ref/sequence_data.nonldf.test-beam20.predict
-
-# Test 47: searn sequence labeling, ldf train
-{VW} -k -c -d train-sets/sequence_data --passes 20 --invariant --search_rollout oracle --search_alpha 1e-8 --search_task sequence_demoldf --csoaa_ldf m --search 5 --holdout_off -f models/sequence_data.model
+# Test 46: search sequence labeling, ldf train
+{VW} -k -c -d train-sets/sequence_data --passes 20 --search_rollout oracle --search_alpha 1e-8 --search_task sequence_demoldf --csoaa_ldf m --search 5 --holdout_off -f models/sequence_data.model
train-sets/ref/sequence_data.ldf.train.stderr
-# Test 48: searn sequence labeling, ldf test
+# Test 47: search sequence labeling, ldf test
{VW} -d train-sets/sequence_data -t -i models/sequence_data.model -p sequence_data.predict
train-sets/ref/sequence_data.ldf.test.stderr
train-sets/ref/sequence_data.ldf.test.predict
-# Test 49: searn sequence labeling, ldf test, beam 1
-{VW} -d train-sets/sequence_data -t -i models/sequence_data.model -p sequence_data.predict --search_beam 1
- train-sets/ref/sequence_data.ldf.test-beam1.stderr
- train-sets/ref/sequence_data.ldf.test-beam1.predict
-
-# Test 50: searn sequence labeling, ldf test, beam 20
-{VW} -d train-sets/sequence_data -t -i models/sequence_data.model -p sequence_data.predict --search_beam 20 --search_kbest 20
- train-sets/ref/sequence_data.ldf.test-beam20.stderr
- train-sets/ref/sequence_data.ldf.test-beam20.predict
-
-# Test 51: searn sequence SPAN labeling BIO, non-ldf train
+# Test 48: search sequence SPAN labeling BIO, non-ldf train
{VW} -k -c -d train-sets/sequencespan_data --passes 20 --invariant --search_rollout oracle --search_alpha 1e-8 --search_task sequencespan --search 7 --holdout_off -f models/sequencespan_data.model
train-sets/ref/sequencespan_data.nonldf.train.stderr
-# Test 52: searn sequence SPAN labeling BIO, non-ldf test
+# Test 49: search sequence SPAN labeling BIO, non-ldf test
{VW} -d train-sets/sequencespan_data -t -i models/sequencespan_data.model -p sequencespan_data.predict
train-sets/ref/sequencespan_data.nonldf.test.stderr
train-sets/ref/sequencespan_data.nonldf.test.predict
-# Test 53: searn sequence SPAN labeling BIO, non-ldf test, beam 1
-{VW} -d train-sets/sequencespan_data -t -i models/sequencespan_data.model -p sequencespan_data.predict --search_beam 1
- train-sets/ref/sequencespan_data.nonldf.test-beam1.stderr
- train-sets/ref/sequencespan_data.nonldf.test-beam1.predict
-
-# Test 54: searn sequence SPAN labeling BIO, non-ldf test, beam 20
-{VW} -d train-sets/sequencespan_data -t --search_span_bilou -i models/sequencespan_data.model --search_beam 20 --search_kbest 20 --quiet
- train-sets/ref/sequencespan_data.nonldf.test-beam20.stderr
-
-# Test 55: searn sequence SPAN labeling BILOU, non-ldf train
+# Test 50: search sequence SPAN labeling BILOU, non-ldf train
{VW} -k -c -d train-sets/sequencespan_data --passes 20 --invariant --search_rollout oracle --search_alpha 1e-8 --search_task sequencespan --search_span_bilou --search 7 --holdout_off -f models/sequencespan_data.model
train-sets/ref/sequencespan_data.nonldf-bilou.train.stderr
-# Test 56: searn sequence SPAN labeling BILOU, non-ldf test
+# Test 51: search sequence SPAN labeling BILOU, non-ldf test
{VW} -d train-sets/sequencespan_data -t --search_span_bilou -i models/sequencespan_data.model -p sequencespan_data.predict
train-sets/ref/sequencespan_data.nonldf-bilou.test.stderr
train-sets/ref/sequencespan_data.nonldf-bilou.test.predict
-# Test 57: searn sequence SPAN labeling BILOU, non-ldf test, beam 1
-{VW} -d train-sets/sequencespan_data -t --search_span_bilou -i models/sequencespan_data.model -p sequencespan_data.predict --search_beam 1
- train-sets/ref/sequencespan_data.nonldf-bilou.test-beam1.stderr
- train-sets/ref/sequencespan_data.nonldf-bilou.test-beam1.predict
-
-# Test 58: searn sequence SPAN labeling BILOU, non-ldf test, beam 20
-{VW} -d train-sets/sequencespan_data -t --search_span_bilou -i models/sequencespan_data.model -p sequencespan_data.predict --search_beam 20 --search_kbest 20
- train-sets/ref/sequencespan_data.nonldf-bilou.test-beam20.stderr
- train-sets/ref/sequencespan_data.nonldf-bilou.test-beam20.predict
-
-# Test 59: silly test for "argmax" task
+# Test 52: silly test for "argmax" task
{VW} -d train-sets/argmax_data -k -c --passes 20 --search_rollout oracle --search_alpha 1e-8 --search_task argmax --search 2 --holdout_off
train-sets/ref/argmax_data.stderr
-# Test 60: (holdout-broken regression)
+# Test 53: (holdout-broken regression)
# ensure we have no holdout loss of '0 h'
{VW} -k -c --passes 2 train-sets/0001.dat
train-sets/ref/holdout-loss-not-zero.stderr
-# Test 61: stagewise poly with exponent 0.25
+# Test 54: stagewise poly with exponent 0.25
####in the following stage_poly tests, there are minute differences in losses, which are not being fuzzy-diffed;
####thus the stderr is cleared (--quiet) and only comparing (fuzzy-diffed) predictions.
{VW} --stage_poly --sched_exponent 0.25 --batch_sz 1000 --batch_sz_no_doubling -d train-sets/rcv1_small.dat -p stage_poly.s025.predict --quiet
train-sets/ref/stage_poly.s025.stderr
train-sets/ref/stage_poly.s025.predict
-# Test 62: stagewise poly with exponent 1.0
+# Test 55: stagewise poly with exponent 1.0
{VW} --stage_poly --sched_exponent 1.0 --batch_sz 1000 --batch_sz_no_doubling -d train-sets/rcv1_small.dat --quiet
train-sets/ref/stage_poly.s100.stderr
-# Test 63: stagewise poly with exponent 0.25 and doubling batches
+# Test 56: stagewise poly with exponent 0.25 and doubling batches
{VW} --stage_poly --sched_exponent 0.25 --batch_sz 1000 -d train-sets/rcv1_small.dat -p stage_poly.s025.doubling.predict --quiet
train-sets/ref/stage_poly.s025.doubling.stderr
train-sets/ref/stage_poly.s025.doubling.predict
-# Test 64: stagewise poly with exponent 1.0 and doubling batches
+# Test 57: stagewise poly with exponent 1.0 and doubling batches
{VW} --stage_poly --sched_exponent 1.0 --batch_sz 1000 -d train-sets/rcv1_small.dat -p stage_poly.s100.doubling.predict --quiet
train-sets/ref/stage_poly.s100.doubling.stderr
train-sets/ref/stage_poly.s100.doubling.predict
-# Test 65: library test, train the initial model
+# Test 58: library test, train the initial model
{VW} -c -k -d train-sets/library_train -f models/library_train.w -q st --passes 100 --hash all --noconstant --csoaa_ldf m --holdout_off
train-sets/ref/library_train.stdout
train-sets/ref/library_train.stderr
-# Test 66: library test, run ezexample_predict
+# Test 59: library test, run ezexample_predict
../library/ezexample_predict models/library_train.w
train-sets/ref/ezexample_predict.stdout
train-sets/ref/ezexample_predict.stderr
-# Test 67: empty test, bad builds (without make clean)
+# Test 60: empty test, bad builds (without make clean)
# sometimes cause a SEGV even on empty input
{VW} /dev/null
train-sets/ref/empty-set.stderr
-# Test 68: daemon test
+# Test 61: daemon test
./daemon-test.sh
test-sets/ref/vw-daemon.stdout
-# Test 69: SVM linear kernel
+# Test 62: SVM linear kernel
{VW} --ksvm --l2 1 --reprocess 5 -b 18 -p train-sets/ref/ksvm_train.linear.predict -d train-sets/rcv1_smaller.dat
train-sets/ref/ksvm_train.linear.stderr
train-sets/ref/ksvm_train.linear.predict
-# Test 70: SVM polynomial kernel
+# Test 63: SVM polynomial kernel
{VW} --ksvm --l2 1 --reprocess 5 -b 18 --kernel poly -p train-sets/ref/ksvm_train.poly.predict -d train-sets/rcv1_smaller.dat
train-sets/ref/ksvm_train.poly.stderr
train-sets/ref/ksvm_train.poly.predict
-# Test 71: SVM rbf kernel
+# Test 64: SVM rbf kernel
{VW} --ksvm --l2 1 --reprocess 5 -b 18 --kernel rbf -p train-sets/ref/ksvm_train.rbf.predict -d train-sets/rcv1_smaller.dat
train-sets/ref/ksvm_train.rbf.stderr
train-sets/ref/ksvm_train.rbf.predict
diff --git a/test/train-sets/ref/argmax_data.stderr b/test/train-sets/ref/argmax_data.stderr
index 6357dbac..84ca90df 100644
--- a/test/train-sets/ref/argmax_data.stderr
+++ b/test/train-sets/ref/argmax_data.stderr
@@ -6,17 +6,15 @@ decay_learning_rate = 1
creating cache_file = train-sets/argmax_data.cache
Reading datafile = train-sets/argmax_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-10.000000 10.000000 1 1.000000 [2 ] [1 ] 15 0 0 5 5
-5.500000 1.000000 2 2.000000 [1 ] [2 ] 12 0 0 9 9
-5.250000 5.000000 4 4.000000 [2 ] [1 ] 9 0 0 15 15
-2.875000 0.500000 8 8.000000 [2 ] [2 ] 9 1 0 30 30
-1.687500 0.500000 16 16.000000 [2 ] [2 ] 9 3 0 60 60
-1.093750 0.500000 32 32.000000 [2 ] [2 ] 9 7 0 120 120
-0.796875 0.500000 64 64.000000 [2 ] [2 ] 9 15 0 240 240
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+10.000000 10.000000 1 [2 ] [1 ] 0 0 5 0 5 0.000000
+5.500000 1.000000 2 [1 ] [2 ] 0 0 9 0 9 0.000000
+5.250000 5.000000 4 [2 ] [1 ] 0 0 15 0 15 0.000000
+2.875000 0.500000 8 [2 ] [2 ] 1 0 30 0 30 0.000000
+1.687500 0.500000 16 [2 ] [2 ] 3 0 60 0 60 0.000001
+1.093750 0.500000 32 [2 ] [2 ] 7 0 120 0 120 0.000001
+0.796875 0.500000 64 [2 ] [2 ] 15 0 240 0 240 0.000002
finished run
number of examples per pass = 4
diff --git a/test/train-sets/ref/searn_small.stderr b/test/train-sets/ref/searn_small.stderr
deleted file mode 100644
index e69de29b..00000000
--- a/test/train-sets/ref/searn_small.stderr
+++ /dev/null
diff --git a/test/train-sets/ref/searn_small.stdout b/test/train-sets/ref/searn_small.stdout
deleted file mode 100644
index e69de29b..00000000
--- a/test/train-sets/ref/searn_small.stdout
+++ /dev/null
diff --git a/test/train-sets/ref/searn_wsj.stderr b/test/train-sets/ref/searn_wsj.stderr
deleted file mode 100644
index e69de29b..00000000
--- a/test/train-sets/ref/searn_wsj.stderr
+++ /dev/null
diff --git a/test/train-sets/ref/searn_wsj.stdout b/test/train-sets/ref/searn_wsj.stdout
deleted file mode 100644
index e69de29b..00000000
--- a/test/train-sets/ref/searn_wsj.stdout
+++ /dev/null
diff --git a/test/train-sets/ref/searn_wsj2.dat.stderr b/test/train-sets/ref/searn_wsj2.dat.stderr
deleted file mode 100644
index e69de29b..00000000
--- a/test/train-sets/ref/searn_wsj2.dat.stderr
+++ /dev/null
diff --git a/test/train-sets/ref/searn_wsj2.dat.stdout b/test/train-sets/ref/searn_wsj2.dat.stdout
deleted file mode 100644
index e69de29b..00000000
--- a/test/train-sets/ref/searn_wsj2.dat.stdout
+++ /dev/null
diff --git a/test/train-sets/ref/sequence_data.ldf.test.stderr b/test/train-sets/ref/sequence_data.ldf.test.stderr
index 3b66edd3..7deebd56 100644
--- a/test/train-sets/ref/sequence_data.ldf.test.stderr
+++ b/test/train-sets/ref/sequence_data.ldf.test.stderr
@@ -7,11 +7,9 @@ predictions = sequence_data.predict
using no cache
Reading datafile = train-sets/sequence_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-0.000000 0.000000 1 1.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 50 0 0 25 0
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+0.000000 0.000000 1 [5 4 3 2 1 ] [5 4 3 2 1 ] 0 0 5 0 0 0.000000
finished run
number of examples per pass = 1
diff --git a/test/train-sets/ref/sequence_data.ldf.train.stderr b/test/train-sets/ref/sequence_data.ldf.train.stderr
index 243e7c71..052ada4a 100644
--- a/test/train-sets/ref/sequence_data.ldf.train.stderr
+++ b/test/train-sets/ref/sequence_data.ldf.train.stderr
@@ -1,21 +1,19 @@
final_regressor = models/sequence_data.model
Num weight bits = 18
-learning rate = 10
-initial_t = 1
+learning rate = 0.5
+initial_t = 0
power_t = 0.5
decay_learning_rate = 1
creating cache_file = train-sets/sequence_data.cache
Reading datafile = train-sets/sequence_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-4.000000 4.000000 1 1.000000 [5 4 3 2 1 ] [1 1 1 1 1 ] 50 0 0 25 5
-2.000000 0.000000 2 2.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 50 1 0 50 10
-1.000000 0.000000 4 4.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 50 3 0 100 20
-0.500000 0.000000 8 8.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 50 7 0 200 40
-0.250000 0.000000 16 16.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 50 15 0 400 80
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+4.000000 4.000000 1 [5 4 3 2 1 ] [1 1 1 1 1 ] 0 0 5 0 25 0.000000
+2.000000 0.000000 2 [5 4 3 2 1 ] [5 4 3 2 1 ] 1 0 10 0 50 0.000000
+1.000000 0.000000 4 [5 4 3 2 1 ] [5 4 3 2 1 ] 3 0 20 0 100 0.000001
+0.500000 0.000000 8 [5 4 3 2 1 ] [5 4 3 2 1 ] 7 0 40 0 200 0.000002
+0.250000 0.000000 16 [5 4 3 2 1 ] [5 4 3 2 1 ] 15 0 80 0 400 0.000004
finished run
number of examples per pass = 1
@@ -23,5 +21,5 @@ passes used = 20
weighted example sum = 20
weighted label sum = 0
average loss = 0.2
-best constant = -0.0526316
+best constant = 0
total feature number = 1000
diff --git a/test/train-sets/ref/sequence_data.nonldf.test.stderr b/test/train-sets/ref/sequence_data.nonldf.test.stderr
index f2d1cd1c..c3409e1c 100644
--- a/test/train-sets/ref/sequence_data.nonldf.test.stderr
+++ b/test/train-sets/ref/sequence_data.nonldf.test.stderr
@@ -7,11 +7,9 @@ predictions = sequence_data.predict
using no cache
Reading datafile = train-sets/sequence_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-0.000000 0.000000 1 1.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 15 0 0 5 0
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+0.000000 0.000000 1 [5 4 3 2 1 ] [5 4 3 2 1 ] 0 0 5 0 0 0.000000
finished run
number of examples per pass = 1
diff --git a/test/train-sets/ref/sequence_data.nonldf.train.stderr b/test/train-sets/ref/sequence_data.nonldf.train.stderr
index d170aa86..2dbab703 100644
--- a/test/train-sets/ref/sequence_data.nonldf.train.stderr
+++ b/test/train-sets/ref/sequence_data.nonldf.train.stderr
@@ -7,15 +7,13 @@ decay_learning_rate = 1
creating cache_file = train-sets/sequence_data.cache
Reading datafile = train-sets/sequence_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-4.000000 4.000000 1 1.000000 [5 4 3 2 1 ] [1 1 1 1 1 ] 15 0 0 5 5
-4.000000 4.000000 2 2.000000 [5 4 3 2 1 ] [1 1 1 1 1 ] 15 1 0 10 10
-2.000000 0.000000 4 4.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 15 3 0 20 20
-1.000000 0.000000 8 8.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 15 7 0 40 40
-0.500000 0.000000 16 16.000000 [5 4 3 2 1 ] [5 4 3 2 1 ] 15 15 0 80 80
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+4.000000 4.000000 1 [5 4 3 2 1 ] [1 1 1 1 1 ] 0 0 5 0 5 0.000000
+4.000000 4.000000 2 [5 4 3 2 1 ] [1 1 1 1 1 ] 1 0 10 0 10 0.000000
+2.000000 0.000000 4 [5 4 3 2 1 ] [5 4 3 2 1 ] 3 0 20 0 20 0.000000
+1.000000 0.000000 8 [5 4 3 2 1 ] [5 4 3 2 1 ] 7 0 40 0 40 0.000000
+0.500000 0.000000 16 [5 4 3 2 1 ] [5 4 3 2 1 ] 15 0 80 0 80 0.000001
finished run
number of examples per pass = 1
diff --git a/test/train-sets/ref/sequencespan_data.nonldf-bilou.test.stderr b/test/train-sets/ref/sequencespan_data.nonldf-bilou.test.stderr
index 7e008ec0..53c6b6ec 100644
--- a/test/train-sets/ref/sequencespan_data.nonldf-bilou.test.stderr
+++ b/test/train-sets/ref/sequencespan_data.nonldf-bilou.test.stderr
@@ -8,11 +8,9 @@ switching to BILOU encoding for sequence span labeling
using no cache
Reading datafile = train-sets/sequencespan_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-0.000000 0.000000 1 1.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 0 0 15 0
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+0.000000 0.000000 1 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 0 0 15 0 0 0.000000
finished run
number of examples per pass = 1
diff --git a/test/train-sets/ref/sequencespan_data.nonldf-bilou.train.stderr b/test/train-sets/ref/sequencespan_data.nonldf-bilou.train.stderr
index bff3a58f..6b957e0e 100644
--- a/test/train-sets/ref/sequencespan_data.nonldf-bilou.train.stderr
+++ b/test/train-sets/ref/sequencespan_data.nonldf-bilou.train.stderr
@@ -8,21 +8,19 @@ switching to BILOU encoding for sequence span labeling
creating cache_file = train-sets/sequencespan_data.cache
Reading datafile = train-sets/sequencespan_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-10.000000 10.000000 1 1.000000 [2 1 1 2 2 1 6 7 7 ..] [1 1 1 1 1 1 1 1 1 ..] 45 0 0 15 15
-7.500000 5.000000 2 2.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 6 4 1 6 7 7 ..] 45 1 0 30 30
-3.750000 0.000000 4 4.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 3 0 60 60
-1.875000 0.000000 8 8.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 7 0 120 120
-0.937500 0.000000 16 16.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 15 0 240 240
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+6.000000 6.000000 1 [2 1 1 2 2 1 6 7 7 ..] [1 1 1 1 1 1 1 1 1 ..] 0 0 15 0 15 0.000000
+5.000000 4.000000 2 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 6 4 1 6 7 7 ..] 1 0 30 0 30 0.000000
+2.500000 0.000000 4 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 3 0 60 0 60 0.000001
+1.250000 0.000000 8 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 7 0 120 0 120 0.000001
+0.625000 0.000000 16 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 15 0 240 0 240 0.000002
finished run
number of examples per pass = 1
passes used = 20
weighted example sum = 20
weighted label sum = 0
-average loss = 0.75
+average loss = 0.5
best constant = -0.0526316
total feature number = 900
diff --git a/test/train-sets/ref/sequencespan_data.nonldf.test.stderr b/test/train-sets/ref/sequencespan_data.nonldf.test.stderr
index d52ea578..b21bbf15 100644
--- a/test/train-sets/ref/sequencespan_data.nonldf.test.stderr
+++ b/test/train-sets/ref/sequencespan_data.nonldf.test.stderr
@@ -7,11 +7,9 @@ predictions = sequencespan_data.predict
using no cache
Reading datafile = train-sets/sequencespan_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-0.000000 0.000000 1 1.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 0 0 15 0
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+0.000000 0.000000 1 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 0 0 15 0 0 0.000000
finished run
number of examples per pass = 1
diff --git a/test/train-sets/ref/sequencespan_data.nonldf.train.stderr b/test/train-sets/ref/sequencespan_data.nonldf.train.stderr
index 649e61a5..1d7db07c 100644
--- a/test/train-sets/ref/sequencespan_data.nonldf.train.stderr
+++ b/test/train-sets/ref/sequencespan_data.nonldf.train.stderr
@@ -7,21 +7,19 @@ decay_learning_rate = 1
creating cache_file = train-sets/sequencespan_data.cache
Reading datafile = train-sets/sequencespan_data
num sources = 1
-average since example example current current current
-loss last counter weight label predict features
-average since sequence example current label current predicted current cur cur predic. examples
-loss last counter weight sequence prefix sequence prefix features pass pol made gener.
-10.000000 10.000000 1 1.000000 [2 1 1 2 2 1 6 7 7 ..] [1 1 1 1 1 1 1 1 1 ..] 45 0 0 15 15
-11.500000 13.000000 2 2.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 6 4 1 6 4 1 6 ..] 45 1 0 30 30
-7.500000 3.500000 4 4.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 3 0 60 60
-3.750000 0.000000 8 8.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 7 0 120 120
-1.875000 0.000000 16 16.000000 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 45 15 0 240 240
+average since instance current true current predicted cur cur predic cache examples
+loss last counter output prefix output prefix pass pol made hits gener beta
+6.000000 6.000000 1 [2 1 1 2 2 1 6 7 7 ..] [1 1 1 1 1 1 1 1 1 ..] 0 0 15 0 15 0.000000
+8.500000 11.000000 2 [2 1 1 2 2 1 6 7 7 ..] [2 1 6 4 1 6 4 1 6 ..] 1 0 30 0 30 0.000000
+6.000000 3.500000 4 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 3 0 60 0 60 0.000001
+3.000000 0.000000 8 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 7 0 120 0 120 0.000001
+1.500000 0.000000 16 [2 1 1 2 2 1 6 7 7 ..] [2 1 1 2 2 1 6 7 7 ..] 15 0 240 0 240 0.000002
finished run
number of examples per pass = 1
passes used = 20
weighted example sum = 20
weighted label sum = 0
-average loss = 1.5
+average loss = 1.2
best constant = -0.0526316
total feature number = 900