Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/ems/example/config.toy')
-rw-r--r--scripts/ems/example/config.toy15
1 files changed, 11 insertions, 4 deletions
diff --git a/scripts/ems/example/config.toy b/scripts/ems/example/config.toy
index a89ea428f..195a89fa5 100644
--- a/scripts/ems/example/config.toy
+++ b/scripts/ems/example/config.toy
@@ -131,7 +131,7 @@ raw-stem = $toy-data/nc-5k
### tool to be used for language model training
# kenlm training
lm-training = "$moses-script-dir/ems/support/lmplz-wrapper.perl -bin $moses-bin-dir/lmplz"
-settings = "--prune '0 0 1' -T $working-dir/lm/tmp -S 50%"
+settings = "--prune '0 0 1' -T $working-dir/lm -S 20%"
# srilm
#lm-training = $srilm-dir/ngram-count
@@ -283,6 +283,7 @@ script = $moses-script-dir/training/train-model.perl
# * "-mgiza -mgiza-cpus 8" to use mgiza instead of giza
# * "-sort-buffer-size 8G -sort-compress gzip" to reduce on-disk sorting
# * "-sort-parallel 8 -cores 8" to speed up phrase table building
+# * "-parallel" for parallel execution of mkcls and giza
#
#training-options = ""
@@ -372,11 +373,17 @@ alignment-symmetrization-method = grow-diag-final-and
#operation-sequence-model = "yes"
#operation-sequence-model-order = 5
#operation-sequence-model-settings = ""
+#
+# if OSM training should be skipped, point to OSM Model
+#osm-model =
-### if OSM training should be skipped,
-# point to OSM Model
+### unsupervised transliteration module
+# Durrani, Sajjad, Hoang and Koehn (EACL, 2014).
+# "Integrating an Unsupervised Transliteration Model
+# into Statistical Machine Translation."
#
-# osm-model =
+#transliteration-module = "yes"
+#post-decoding-transliteration = "yes"
### lexicalized reordering: specify orientation type
# (default: only distance-based reordering model)