Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorphikoehn <pkoehn@inf.ed.ac.uk>2012-05-26 03:06:34 +0400
committerphikoehn <pkoehn@inf.ed.ac.uk>2012-05-26 03:06:34 +0400
commit180dd773f6507829c551c5512aaad7128f958385 (patch)
tree51a4597b4e77c738d997e88d6f29d52edb9b7549 /scripts/ems
parent480b2f2fcddd2f08bad1cb577bfae4517475991a (diff)
bolt specific settings
Diffstat (limited to 'scripts/ems')
-rw-r--r--scripts/ems/experiment.meta16
1 files changed, 15 insertions, 1 deletions
diff --git a/scripts/ems/experiment.meta b/scripts/ems/experiment.meta
index 51ac0f67a..aed6049ea 100644
--- a/scripts/ems/experiment.meta
+++ b/scripts/ems/experiment.meta
@@ -738,6 +738,20 @@ ibm-bleu-c
ignore-unless: ibm-bleu-c
rerun-on-change: ibm-bleu-c
template: $ibm-bleu-c -s $input-sgm -r IN1 -t IN > OUT
+bolt-bleu
+ in: detokenized-output
+ out: bolt-bleu-score
+ default-name: evaluation/bolt-bleu
+ ignore-unless: bolt-bleu
+ rerun-on-change: bolt-bleu
+ template: $bolt-bleu IN > OUT
+bolt-bleu-c
+ in: detokenized-output
+ out: bolt-bleu-c-score
+ default-name: evaluation/bolt-bleu-c
+ ignore-unless: bolt-bleu-c
+ rerun-on-change: bolt-bleu-c
+ template: $bolt-bleu-c IN > OUT
multi-bleu
in: cleaned-output reference
out: multi-bleu-score
@@ -793,6 +807,6 @@ analysis-precision
[REPORTING] single
report
- in: EVALUATION:nist-bleu-score EVALUATION:nist-bleu-c-score EVALUATION:multi-bleu-score EVALUATION:multi-bleu-c-score EVALUATION:meteor-score EVALUATION:ter-score EVALUATION:wer-score EVALUATION:ibm-bleu-score EVALUATION:ibm-bleu-c-score EVALUATION:analysis EVALUATION:analysis-coverage EVALUATION:analysis-prec TRAINING:biconcor-model
+ in: EVALUATION:nist-bleu-score EVALUATION:nist-bleu-c-score EVALUATION:bolt-bleu-score EVALUATION:bolt-bleu-c-score EVALUATION:multi-bleu-score EVALUATION:multi-bleu-c-score EVALUATION:meteor-score EVALUATION:ter-score EVALUATION:wer-score EVALUATION:ibm-bleu-score EVALUATION:ibm-bleu-c-score EVALUATION:analysis EVALUATION:analysis-coverage EVALUATION:analysis-prec TRAINING:biconcor-model
out: report
default-name: evaluation/report