Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHieu Hoang <hieuhoang@gmail.com>2015-05-03 13:07:12 +0300
committerHieu Hoang <hieuhoang@gmail.com>2015-05-03 13:07:12 +0300
commitd456d9229e20582b9e5a4e86d9f4666eef904940 (patch)
tree50c8f388cde87c1f84cfa5577484a24ffa83107b /scripts
parente5f76ee99e1da3f9ef32338243ef046154f282d9 (diff)
add multi-bleu-detok. Like multi-bleu scoring but will detokenize/post-process before scoring
Diffstat (limited to 'scripts')
-rw-r--r--scripts/ems/experiment.meta20
1 files changed, 19 insertions, 1 deletions
diff --git a/scripts/ems/experiment.meta b/scripts/ems/experiment.meta
index d1448ef44..aa9a457bb 100644
--- a/scripts/ems/experiment.meta
+++ b/scripts/ems/experiment.meta
@@ -1350,6 +1350,24 @@ multi-bleu-c
rerun-on-change: multi-bleu-c
template: $multi-bleu-c IN1 < IN > OUT
final-model: yes
+
+multi-bleu-detok
+ in: detokenized-output tokenized-reference
+ out: multi-bleu-detok-score
+ default-name: evaluation/multi-bleu-detok
+ ignore-unless: multi-bleu-detok
+ rerun-on-change: multi-bleu-detok
+ template: $multi-bleu-detok IN1 < IN > OUT
+ final-model: yes
+multi-bleu-c-detok
+ in: detokenized-output tokenized-reference
+ out: multi-bleu-c-detok-score
+ default-name: evaluation/multi-bleu-c-detok
+ ignore-unless: multi-bleu-c-detok
+ rerun-on-change: multi-bleu-c-detok
+ template: $multi-bleu-c-detok IN1 < IN > OUT
+ final-model: yes
+
ter
in: wrapped-output reference-sgm
out: ter-score
@@ -1397,6 +1415,6 @@ analysis-precision
[REPORTING] single
report
- in: EVALUATION:nist-bleu-score EVALUATION:nist-bleu-c-score EVALUATION:bolt-bleu-score EVALUATION:bolt-bleu-c-score EVALUATION:multi-bleu-score EVALUATION:multi-bleu-c-score EVALUATION:meteor-score EVALUATION:ter-score EVALUATION:wer-score EVALUATION:ibm-bleu-score EVALUATION:ibm-bleu-c-score EVALUATION:analysis EVALUATION:analysis-coverage EVALUATION:analysis-prec TRAINING:biconcor-model EVALUATION:wade-analysis
+ in: EVALUATION:nist-bleu-score EVALUATION:nist-bleu-c-score EVALUATION:bolt-bleu-score EVALUATION:bolt-bleu-c-score EVALUATION:multi-bleu-score EVALUATION:multi-bleu-c-score EVALUATION:multi-bleu-detok-score EVALUATION:multi-bleu-c-detok-score EVALUATION:meteor-score EVALUATION:ter-score EVALUATION:wer-score EVALUATION:ibm-bleu-score EVALUATION:ibm-bleu-c-score EVALUATION:analysis EVALUATION:analysis-coverage EVALUATION:analysis-prec TRAINING:biconcor-model EVALUATION:wade-analysis
out: report
default-name: evaluation/report