Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorphikoehn <pkoehn@inf.ed.ac.uk>2014-07-23 18:44:55 +0400
committerphikoehn <pkoehn@inf.ed.ac.uk>2014-07-23 18:44:55 +0400
commit573076976f7d24f67a60e3a8e4190110517f1f91 (patch)
tree02ca9fc99941310043605991c0ceb783867d3670
parent2d11fe39161e8c5ce0fa7bb66d3aa82ef6919b28 (diff)
added transliteration into ems example config, minor fixes
-rwxr-xr-xscripts/Transliteration/post-decoding-transliteration.pl9
-rwxr-xr-xscripts/Transliteration/train-transliteration-module.pl12
-rw-r--r--scripts/ems/example/config.basic15
-rw-r--r--scripts/ems/example/config.factored15
-rw-r--r--scripts/ems/example/config.hierarchical15
-rw-r--r--scripts/ems/example/config.syntax15
-rw-r--r--scripts/ems/example/config.toy15
7 files changed, 69 insertions, 27 deletions
diff --git a/scripts/Transliteration/post-decoding-transliteration.pl b/scripts/Transliteration/post-decoding-transliteration.pl
index 8aca3460d..69fd8bf46 100755
--- a/scripts/Transliteration/post-decoding-transliteration.pl
+++ b/scripts/Transliteration/post-decoding-transliteration.pl
@@ -21,12 +21,12 @@ die("ERROR: wrong syntax when invoking postDecodingTransliteration.perl")
'transliteration-model-dir=s' => \$TRANSLIT_MODEL,
'input-extension=s' => \$INPUT_EXTENSION,
'output-extension=s' => \$OUTPUT_EXTENSION,
- 'decoder=s' => \$DECODER,
+ 'decoder=s' => \$DECODER,
'oov-file=s' => \$OOV_FILE,
'input-file=s' => \$INPUT_FILE,
'output-file=s' => \$OUTPUT_FILE,
'verbose' => \$VERBOSE,
- 'language-model=s' => \$LM_FILE);
+ 'language-model=s' => \$LM_FILE);
# check if the files are in place
die("ERROR: you need to define --moses-src-dir --external-bin-dir, --transliteration-model-dir, --oov-file, --output-file --input-extension, --output-extension, and --language-model")
@@ -38,6 +38,11 @@ die("ERROR: you need to define --moses-src-dir --external-bin-dir, --translitera
defined($INPUT_FILE)&&
defined($EXTERNAL_BIN_DIR)&&
defined($LM_FILE));
+if (! -e $LM_FILE) {
+ my $LM_FILE_WORD = `ls $LM_FILE*word*`;
+ chop($LM_FILE_WORD);
+ $LM_FILE = $LM_FILE_WORD if $LM_FILE_WORD ne "";
+}
die("ERROR: could not find Language Model '$LM_FILE'")
unless -e $LM_FILE;
die("ERROR: could not find Transliteration Model '$TRANSLIT_MODEL'")
diff --git a/scripts/Transliteration/train-transliteration-module.pl b/scripts/Transliteration/train-transliteration-module.pl
index 355232222..7739e2a2b 100755
--- a/scripts/Transliteration/train-transliteration-module.pl
+++ b/scripts/Transliteration/train-transliteration-module.pl
@@ -13,7 +13,7 @@ print STDERR "Training Transliteration Module - Start\n".`date`;
my $ORDER = 5;
my $OUT_DIR = "/tmp/Transliteration-Model.$$";
my $___FACTOR_DELIMITER = "|";
-my ($MOSES_SRC_DIR,$CORPUS_F,$CORPUS_E,$ALIGNMENT,$SRILM_DIR,$FACTOR,$EXTERNAL_BIN_DIR,$INPUT_EXTENSION, $OUTPUT_EXTENSION, $SOURCE_SYNTAX, $TARGET_SYNTAX);
+my ($MOSES_SRC_DIR,$CORPUS_F,$CORPUS_E,$ALIGNMENT,$SRILM_DIR,$FACTOR,$EXTERNAL_BIN_DIR,$INPUT_EXTENSION, $OUTPUT_EXTENSION, $SOURCE_SYNTAX, $TARGET_SYNTAX,$DECODER);
# utilities
my $ZCAT = "gzip -cd";
@@ -31,8 +31,9 @@ die("ERROR: wrong syntax when invoking train-transliteration-module.perl")
'factor=s' => \$FACTOR,
'srilm-dir=s' => \$SRILM_DIR,
'out-dir=s' => \$OUT_DIR,
- 'source-syntax' => \$SOURCE_SYNTAX,
- 'target-syntax' => \$TARGET_SYNTAX);
+ 'decoder=s' => \$DECODER,
+ 'source-syntax' => \$SOURCE_SYNTAX,
+ 'target-syntax' => \$TARGET_SYNTAX);
# check if the files are in place
die("ERROR: you need to define --corpus-e, --corpus-f, --alignment, --srilm-dir, --moses-src-dir --external-bin-dir, --input-extension and --output-extension")
@@ -48,8 +49,9 @@ die("ERROR: could not find input corpus file '$CORPUS_F'")
unless -e $CORPUS_F;
die("ERROR: could not find output corpus file '$CORPUS_E'")
unless -e $CORPUS_E;
-die("ERROR: could not find algnment file '$ALIGNMENT'")
+die("ERROR: could not find alignment file '$ALIGNMENT'")
unless -e $ALIGNMENT;
+$DECODER = "$MOSES_SRC_DIR/bin/moses" unless defined($DECODER);
`mkdir $OUT_DIR`;
@@ -184,7 +186,7 @@ sub train_transliteration_module{
`$MOSES_SRC_DIR/scripts/ems/support/substitute-filtered-tables.perl $OUT_DIR/tuning/filtered/moses.ini < $OUT_DIR/model/moses.ini > $OUT_DIR/tuning/moses.filtered.ini`;
- `$MOSES_SRC_DIR/scripts/training/mert-moses.pl $OUT_DIR/tuning/input $OUT_DIR/tuning/reference $MOSES_SRC_DIR/bin/moses $OUT_DIR/tuning/moses.filtered.ini --nbest 100 --working-dir $OUT_DIR/tuning/tmp --decoder-flags "-threads 16 -drop-unknown -v 0 -distortion-limit 0" --rootdir $MOSES_SRC_DIR/scripts -mertdir $MOSES_SRC_DIR/mert -threads=16 --no-filter-phrase-table`;
+ `$MOSES_SRC_DIR/scripts/training/mert-moses.pl $OUT_DIR/tuning/input $OUT_DIR/tuning/reference $DECODER $OUT_DIR/tuning/moses.filtered.ini --nbest 100 --working-dir $OUT_DIR/tuning/tmp --decoder-flags "-threads 16 -drop-unknown -v 0 -distortion-limit 0" --rootdir $MOSES_SRC_DIR/scripts -mertdir $MOSES_SRC_DIR/mert -threads=16 --no-filter-phrase-table`;
`cp $OUT_DIR/tuning/tmp/moses.ini $OUT_DIR/tuning/moses.ini`;
diff --git a/scripts/ems/example/config.basic b/scripts/ems/example/config.basic
index 8a813777e..8421a8fa1 100644
--- a/scripts/ems/example/config.basic
+++ b/scripts/ems/example/config.basic
@@ -137,7 +137,7 @@ raw-stem = $wmt12-data/training/undoc.2000.$pair-extension
### tool to be used for language model training
# kenlm training
lm-training = "$moses-script-dir/ems/support/lmplz-wrapper.perl -bin $moses-bin-dir/lmplz"
-settings = "--prune '0 0 1' -T $working-dir/lm/tmp -S 50%"
+settings = "--prune '0 0 1' -T $working-dir/lm -S 20%"
# srilm
#lm-training = $srilm-dir/ngram-count
@@ -299,6 +299,7 @@ script = $moses-script-dir/training/train-model.perl
# * "-mgiza -mgiza-cpus 8" to use mgiza instead of giza
# * "-sort-buffer-size 8G -sort-compress gzip" to reduce on-disk sorting
# * "-sort-parallel 8 -cores 8" to speed up phrase table building
+# * "-parallel" for parallel execution of mkcls and giza
#
#training-options = ""
@@ -388,11 +389,17 @@ alignment-symmetrization-method = grow-diag-final-and
#operation-sequence-model = "yes"
#operation-sequence-model-order = 5
#operation-sequence-model-settings = "-lmplz '$moses-src-dir/bin/lmplz -S 40% -T $working-dir/model/tmp'"
+#
+# if OSM training should be skipped, point to OSM Model
+#osm-model =
-### if OSM training should be skipped,
-# point to OSM Model
+### unsupervised transliteration module
+# Durrani, Sajjad, Hoang and Koehn (EACL, 2014).
+# "Integrating an Unsupervised Transliteration Model
+# into Statistical Machine Translation."
#
-# osm-model =
+#transliteration-module = "yes"
+#post-decoding-transliteration = "yes"
### lexicalized reordering: specify orientation type
# (default: only distance-based reordering model)
diff --git a/scripts/ems/example/config.factored b/scripts/ems/example/config.factored
index 2faa5de4c..9aff587ff 100644
--- a/scripts/ems/example/config.factored
+++ b/scripts/ems/example/config.factored
@@ -137,7 +137,7 @@ raw-stem = $wmt12-data/training/undoc.2000.$pair-extension
### tool to be used for language model training
# kenlm training
lm-training = "$moses-script-dir/ems/support/lmplz-wrapper.perl -bin $moses-bin-dir/lmplz"
-settings = "--prune '0 0 1' -T $working-dir/lm/tmp -S 50%"
+settings = "--prune '0 0 1' -T $working-dir/lm -S 20%"
# srilm
#lm-training = $srilm-dir/ngram-count
@@ -319,6 +319,7 @@ script = $moses-script-dir/training/train-model.perl
# * "-mgiza -mgiza-cpus 8" to use mgiza instead of giza
# * "-sort-buffer-size 8G -sort-compress gzip" to reduce on-disk sorting
# * "-sort-parallel 8 -cores 8" to speed up phrase table building
+# * "-parallel" for parallel execution of mkcls and giza
#
#training-options = ""
@@ -408,11 +409,17 @@ alignment-symmetrization-method = grow-diag-final-and
#operation-sequence-model = "yes"
#operation-sequence-model-order = 5
#operation-sequence-model-settings = "-lmplz '$moses-src-dir/bin/lmplz -S 40% -T $working-dir/model/tmp'"
+#
+# if OSM training should be skipped, point to OSM Model
+#osm-model =
-### if OSM training should be skipped,
-# point to OSM Model
+### unsupervised transliteration module
+# Durrani, Sajjad, Hoang and Koehn (EACL, 2014).
+# "Integrating an Unsupervised Transliteration Model
+# into Statistical Machine Translation."
#
-# osm-model =
+#transliteration-module = "yes"
+#post-decoding-transliteration = "yes"
### lexicalized reordering: specify orientation type
# (default: only distance-based reordering model)
diff --git a/scripts/ems/example/config.hierarchical b/scripts/ems/example/config.hierarchical
index 0494228ff..9d47aa001 100644
--- a/scripts/ems/example/config.hierarchical
+++ b/scripts/ems/example/config.hierarchical
@@ -137,7 +137,7 @@ raw-stem = $wmt12-data/training/undoc.2000.$pair-extension
### tool to be used for language model training
# kenlm training
lm-training = "$moses-script-dir/ems/support/lmplz-wrapper.perl -bin $moses-bin-dir/lmplz"
-settings = "--prune '0 0 1' -T $working-dir/lm/tmp -S 50%"
+settings = "--prune '0 0 1' -T $working-dir/lm -S 20%"
# srilm
#lm-training = $srilm-dir/ngram-count
@@ -299,6 +299,7 @@ script = $moses-script-dir/training/train-model.perl
# * "-mgiza -mgiza-cpus 8" to use mgiza instead of giza
# * "-sort-buffer-size 8G -sort-compress gzip" to reduce on-disk sorting
# * "-sort-parallel 8 -cores 8" to speed up phrase table building
+# * "-parallel" for parallel execution of mkcls and giza
#
#training-options = ""
@@ -388,11 +389,17 @@ alignment-symmetrization-method = grow-diag-final-and
#operation-sequence-model = "yes"
#operation-sequence-model-order = 5
#operation-sequence-model-settings = "-lmplz '$moses-src-dir/bin/lmplz -S 40% -T $working-dir/model/tmp'"
+#
+# if OSM training should be skipped, point to OSM Model
+#osm-model =
-### if OSM training should be skipped,
-# point to OSM Model
+### unsupervised transliteration module
+# Durrani, Sajjad, Hoang and Koehn (EACL, 2014).
+# "Integrating an Unsupervised Transliteration Model
+# into Statistical Machine Translation."
#
-# osm-model =
+#transliteration-module = "yes"
+#post-decoding-transliteration = "yes"
### lexicalized reordering: specify orientation type
# (default: only distance-based reordering model)
diff --git a/scripts/ems/example/config.syntax b/scripts/ems/example/config.syntax
index 93248c672..d874e74c0 100644
--- a/scripts/ems/example/config.syntax
+++ b/scripts/ems/example/config.syntax
@@ -141,7 +141,7 @@ raw-stem = $wmt12-data/training/undoc.2000.$pair-extension
### tool to be used for language model training
# kenlm training
lm-training = "$moses-script-dir/ems/support/lmplz-wrapper.perl -bin $moses-bin-dir/lmplz"
-settings = "--prune '0 0 1' -T $working-dir/lm/tmp -S 50%"
+settings = "--prune '0 0 1' -T $working-dir/lm -S 20%"
# srilm
#lm-training = $srilm-dir/ngram-count
@@ -303,6 +303,7 @@ script = $moses-script-dir/training/train-model.perl
# * "-mgiza -mgiza-cpus 8" to use mgiza instead of giza
# * "-sort-buffer-size 8G -sort-compress gzip" to reduce on-disk sorting
# * "-sort-parallel 8 -cores 8" to speed up phrase table building
+# * "-parallel" for parallel execution of mkcls and giza
#
#training-options = ""
@@ -392,11 +393,17 @@ alignment-symmetrization-method = grow-diag-final-and
#operation-sequence-model = "yes"
#operation-sequence-model-order = 5
#operation-sequence-model-settings = "-lmplz '$moses-src-dir/bin/lmplz -S 40% -T $working-dir/model/tmp'"
+#
+# if OSM training should be skipped, point to OSM Model
+#osm-model =
-### if OSM training should be skipped,
-# point to OSM Model
+### unsupervised transliteration module
+# Durrani, Sajjad, Hoang and Koehn (EACL, 2014).
+# "Integrating an Unsupervised Transliteration Model
+# into Statistical Machine Translation."
#
-# osm-model =
+#transliteration-module = "yes"
+#post-decoding-transliteration = "yes"
### lexicalized reordering: specify orientation type
# (default: only distance-based reordering model)
diff --git a/scripts/ems/example/config.toy b/scripts/ems/example/config.toy
index a89ea428f..195a89fa5 100644
--- a/scripts/ems/example/config.toy
+++ b/scripts/ems/example/config.toy
@@ -131,7 +131,7 @@ raw-stem = $toy-data/nc-5k
### tool to be used for language model training
# kenlm training
lm-training = "$moses-script-dir/ems/support/lmplz-wrapper.perl -bin $moses-bin-dir/lmplz"
-settings = "--prune '0 0 1' -T $working-dir/lm/tmp -S 50%"
+settings = "--prune '0 0 1' -T $working-dir/lm -S 20%"
# srilm
#lm-training = $srilm-dir/ngram-count
@@ -283,6 +283,7 @@ script = $moses-script-dir/training/train-model.perl
# * "-mgiza -mgiza-cpus 8" to use mgiza instead of giza
# * "-sort-buffer-size 8G -sort-compress gzip" to reduce on-disk sorting
# * "-sort-parallel 8 -cores 8" to speed up phrase table building
+# * "-parallel" for parallel execution of mkcls and giza
#
#training-options = ""
@@ -372,11 +373,17 @@ alignment-symmetrization-method = grow-diag-final-and
#operation-sequence-model = "yes"
#operation-sequence-model-order = 5
#operation-sequence-model-settings = ""
+#
+# if OSM training should be skipped, point to OSM Model
+#osm-model =
-### if OSM training should be skipped,
-# point to OSM Model
+### unsupervised transliteration module
+# Durrani, Sajjad, Hoang and Koehn (EACL, 2014).
+# "Integrating an Unsupervised Transliteration Model
+# into Statistical Machine Translation."
#
-# osm-model =
+#transliteration-module = "yes"
+#post-decoding-transliteration = "yes"
### lexicalized reordering: specify orientation type
# (default: only distance-based reordering model)