Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/marian-regression-tests.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorRoman Grundkiewicz <rgrundki@exseed.ed.ac.uk>2020-04-13 16:17:36 +0300
committerRoman Grundkiewicz <rgrundki@exseed.ed.ac.uk>2020-04-13 16:17:36 +0300
commit2840456f969585cf36f6ef94c8efab63fe5a3e1a (patch)
tree78b436308f107ab3dd46c8b2df51ab064d75005e /tests
parent6bb7d43ac2c8912f3a13bd1d335542e059eb98ee (diff)
Call python3 explicitly
Diffstat (limited to 'tests')
-rw-r--r--tests/models/wngt19/test_model_base_fbgemm_packed16.sh2
-rw-r--r--tests/models/wngt19/test_model_base_fbgemm_packed8.sh2
-rw-r--r--tests/sentencepiece/test_bleu_detok.sh2
-rw-r--r--tests/training/features/custom-embeddings/test_custom_embeddings.sh2
-rw-r--r--tests/training/features/custom-embeddings/test_custom_embeddings_s2s_tied.sh2
-rw-r--r--tests/training/features/custom-embeddings/test_custom_embeddings_tied_srctrg.sh2
-rw-r--r--tests/training/features/custom-embeddings/test_custom_embeddings_transformer.sh2
-rw-r--r--tests/training/features/custom-embeddings/test_custom_embeddings_transformer_tied.sh2
-rw-r--r--tests/training/models/amun/test_amun_has_decoder_c_tt.sh2
-rw-r--r--tests/training/restoring/optimizer/test_adagrad_params.sh4
-rw-r--r--tests/training/restoring/optimizer/test_adam_params.sh6
-rw-r--r--tests/training/restoring/optimizer/test_adam_params_async.sh6
-rw-r--r--tests/training/restoring/optimizer/test_adam_params_sync.sh6
-rwxr-xr-xtests/training/validation/compare-trans.sh2
14 files changed, 21 insertions, 21 deletions
diff --git a/tests/models/wngt19/test_model_base_fbgemm_packed16.sh b/tests/models/wngt19/test_model_base_fbgemm_packed16.sh
index f49373e..d2b5703 100644
--- a/tests/models/wngt19/test_model_base_fbgemm_packed16.sh
+++ b/tests/models/wngt19/test_model_base_fbgemm_packed16.sh
@@ -42,7 +42,7 @@ $MRT_MARIAN/marian-decoder \
--max-length 150 --max-length-crop --quiet-translation
# Print current and expected BLEU for debugging
-$MRT_TOOLS/sacrebleu/sacrebleu.py newstest2014.ref < $prefix.out | tee $prefix.out.bleu
+python3 $MRT_TOOLS/sacrebleu/sacrebleu.py newstest2014.ref < $prefix.out | tee $prefix.out.bleu
cat $prefix.$suffix.expected.bleu
# Compare with the expected output
diff --git a/tests/models/wngt19/test_model_base_fbgemm_packed8.sh b/tests/models/wngt19/test_model_base_fbgemm_packed8.sh
index b57e402..fceecbf 100644
--- a/tests/models/wngt19/test_model_base_fbgemm_packed8.sh
+++ b/tests/models/wngt19/test_model_base_fbgemm_packed8.sh
@@ -42,7 +42,7 @@ $MRT_MARIAN/marian-decoder \
--max-length 150 --max-length-crop --quiet-translation
# Print current and expected BLEU for debugging
-$MRT_TOOLS/sacrebleu/sacrebleu.py newstest2014.ref < $prefix.out | tee $prefix.out.bleu
+python3 $MRT_TOOLS/sacrebleu/sacrebleu.py newstest2014.ref < $prefix.out | tee $prefix.out.bleu
cat $prefix.$suffix.expected.bleu
# Compare with the expected output
diff --git a/tests/sentencepiece/test_bleu_detok.sh b/tests/sentencepiece/test_bleu_detok.sh
index 0f6ce90..00374ce 100644
--- a/tests/sentencepiece/test_bleu_detok.sh
+++ b/tests/sentencepiece/test_bleu_detok.sh
@@ -38,7 +38,7 @@ $MRT_TOOLS/diff.sh bleu-detok.bleu bleu-detok.bleu.expected > bleu-detok.bleu.di
# Run sacreBLEU removing the version information
-$MRT_TOOLS/sacrebleu/sacrebleu.py dev.en < bleu-detok.out | sed -r 's/.version[^ ]* / /' > bleu-detok.sacrebleu
+python3 $MRT_TOOLS/sacrebleu/sacrebleu.py dev.en < bleu-detok.out | sed -r 's/.version[^ ]* / /' > bleu-detok.sacrebleu
# Check BLEU from the validation translation output
$MRT_TOOLS/diff.sh bleu-detok.sacrebleu bleu-detok.sacrebleu.expected > bleu-detok.sacrebleu.diff
diff --git a/tests/training/features/custom-embeddings/test_custom_embeddings.sh b/tests/training/features/custom-embeddings/test_custom_embeddings.sh
index a78487b..66a510d 100644
--- a/tests/training/features/custom-embeddings/test_custom_embeddings.sh
+++ b/tests/training/features/custom-embeddings/test_custom_embeddings.sh
@@ -25,7 +25,7 @@ test -e custom_emb.log
grep -q "Loading embedding vectors from" custom_emb.log
# Check if embeddings in the saved model are very similar to the original vectors
-$MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb/model.npz -o custom_emb.all
+python3 $MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb/model.npz -o custom_emb.all
# The custom embeddings have been trained only for the first 100 words from each vocabulary
cat custom_emb.all.src | head -n 101 > custom_emb.src
diff --git a/tests/training/features/custom-embeddings/test_custom_embeddings_s2s_tied.sh b/tests/training/features/custom-embeddings/test_custom_embeddings_s2s_tied.sh
index d498327..ca03b77 100644
--- a/tests/training/features/custom-embeddings/test_custom_embeddings_s2s_tied.sh
+++ b/tests/training/features/custom-embeddings/test_custom_embeddings_s2s_tied.sh
@@ -25,7 +25,7 @@ test -e custom_emb_tied.log
grep -q "Loading embedding vectors from" custom_emb_tied.log
# Check if embeddings in the saved model are very similar to the original vectors
-$MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_tied/model.npz -o custom_emb_tied.all
+python3 $MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_tied/model.npz -o custom_emb_tied.all
# The custom embeddings have been trained only for the first 100 words from each vocabulary
cat custom_emb_tied.all.all | head -n 101 > custom_emb_tied.all
diff --git a/tests/training/features/custom-embeddings/test_custom_embeddings_tied_srctrg.sh b/tests/training/features/custom-embeddings/test_custom_embeddings_tied_srctrg.sh
index 9b278e3..0de46ed 100644
--- a/tests/training/features/custom-embeddings/test_custom_embeddings_tied_srctrg.sh
+++ b/tests/training/features/custom-embeddings/test_custom_embeddings_tied_srctrg.sh
@@ -25,7 +25,7 @@ test -e custom_emb_srctrg.log
grep -q "Loading embedding vectors from" custom_emb_srctrg.log
# Check if embeddings in the saved model are very similar to the original vectors
-$MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_srctrg/model.npz -o custom_emb_srctrg.all
+python3 $MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_srctrg/model.npz -o custom_emb_srctrg.all
# The custom embeddings have been trained only for the first 100 words from each vocabulary
cat custom_emb_srctrg.all.all | head -n 101 > custom_emb_srctrg.all
diff --git a/tests/training/features/custom-embeddings/test_custom_embeddings_transformer.sh b/tests/training/features/custom-embeddings/test_custom_embeddings_transformer.sh
index baa3505..9baf461 100644
--- a/tests/training/features/custom-embeddings/test_custom_embeddings_transformer.sh
+++ b/tests/training/features/custom-embeddings/test_custom_embeddings_transformer.sh
@@ -26,7 +26,7 @@ test -e custom_emb_transformer.log
grep -q "Loading embedding vectors from" custom_emb_transformer.log
# Check if embeddings in the saved model are very similar to the original vectors
-$MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_transformer/model.npz -o custom_emb_transformer.all
+python3 $MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_transformer/model.npz -o custom_emb_transformer.all
# The custom embeddings have been trained only for the first 100 words from each vocabulary
cat custom_emb_transformer.all.src | head -n 101 > custom_emb_transformer.src
diff --git a/tests/training/features/custom-embeddings/test_custom_embeddings_transformer_tied.sh b/tests/training/features/custom-embeddings/test_custom_embeddings_transformer_tied.sh
index fe412c7..8da401b 100644
--- a/tests/training/features/custom-embeddings/test_custom_embeddings_transformer_tied.sh
+++ b/tests/training/features/custom-embeddings/test_custom_embeddings_transformer_tied.sh
@@ -24,7 +24,7 @@ test -e custom_emb_transformer_tied.log
grep -q "Loading embedding vectors from" custom_emb_transformer_tied.log
# Check if embeddings in the saved model are very similar to the original vectors
-$MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_transformer_tied/model.npz -o custom_emb_transformer_tied.all
+python3 $MRT_MARIAN/../scripts/embeddings/export_embeddings.py -m custom_emb_transformer_tied/model.npz -o custom_emb_transformer_tied.all
# The custom embeddings have been trained only for the first 100 words from each vocabulary
cat custom_emb_transformer_tied.all.all | head -n 101 > custom_emb_transformer_tied.all
diff --git a/tests/training/models/amun/test_amun_has_decoder_c_tt.sh b/tests/training/models/amun/test_amun_has_decoder_c_tt.sh
index cfb6af7..5aa3116 100644
--- a/tests/training/models/amun/test_amun_has_decoder_c_tt.sh
+++ b/tests/training/models/amun/test_amun_has_decoder_c_tt.sh
@@ -21,7 +21,7 @@ $MRT_MARIAN/marian \
test -e decoder_c_tt/model.npz
-$MRT_MARIAN/../scripts/contrib/model_info.py -m decoder_c_tt/model.npz > decoder_c_tt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m decoder_c_tt/model.npz > decoder_c_tt.out
grep -q "decoder_c_tt" decoder_c_tt.out
# Exit with success code
diff --git a/tests/training/restoring/optimizer/test_adagrad_params.sh b/tests/training/restoring/optimizer/test_adagrad_params.sh
index ac56fcc..4b5d420 100644
--- a/tests/training/restoring/optimizer/test_adagrad_params.sh
+++ b/tests/training/restoring/optimizer/test_adagrad_params.sh
@@ -20,10 +20,10 @@ test -e adagrad.log
$MRT_TOOLS/extract-costs.sh < adagrad.log > adagrad.costs.out
$MRT_TOOLS/diff-nums.py adagrad.costs.out adagrad.costs.expected -p 0.2 -o adagrad.costs.diff
-$MRT_MARIAN/../scripts/contrib/model_info.py -m adagrad/model.npz.optimizer.npz > adagrad.keys.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adagrad/model.npz.optimizer.npz > adagrad.keys.out
$MRT_TOOLS/diff.sh adagrad.keys.out adagrad.keys.expected > adagrad.keys.diff
-$MRT_MARIAN/../scripts/contrib/model_info.py -m adagrad/model.npz.optimizer.npz -k "adagrad_gt" > adagrad.gt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adagrad/model.npz.optimizer.npz -k "adagrad_gt" > adagrad.gt.out
$MRT_TOOLS/diff-nums.py --numpy -p 0.001 adagrad.gt.out adagrad.gt.expected -o adagrad.gt.diff
# Exit with success code
diff --git a/tests/training/restoring/optimizer/test_adam_params.sh b/tests/training/restoring/optimizer/test_adam_params.sh
index 867f4f9..e70663f 100644
--- a/tests/training/restoring/optimizer/test_adam_params.sh
+++ b/tests/training/restoring/optimizer/test_adam_params.sh
@@ -20,12 +20,12 @@ test -e adam.log
$MRT_TOOLS/extract-costs.sh < adam.log > adam.costs.out
$MRT_TOOLS/diff-nums.py adam.costs.out adam.costs.expected -p 0.2 -o adam.costs.diff
-$MRT_MARIAN/../scripts/contrib/model_info.py -m adam/model.npz.optimizer.npz > adam.keys.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam/model.npz.optimizer.npz > adam.keys.out
$MRT_TOOLS/diff.sh adam.keys.out adam.keys.expected > adam.keys.diff
-$MRT_MARIAN/../scripts/contrib/model_info.py -m adam/model.npz.optimizer.npz -k "adam_mt" > adam.mt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam/model.npz.optimizer.npz -k "adam_mt" > adam.mt.out
$MRT_TOOLS/diff-nums.py --numpy -p 0.0001 adam.mt.out adam.mt.expected -o adam.mt.diff
-$MRT_MARIAN/../scripts/contrib/model_info.py -m adam/model.npz.optimizer.npz -k "adam_vt" > adam.vt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam/model.npz.optimizer.npz -k "adam_vt" > adam.vt.out
$MRT_TOOLS/diff-nums.py --numpy -p 0.0001 adam.vt.out adam.vt.expected -o adam.vt.diff
# Exit with success code
diff --git a/tests/training/restoring/optimizer/test_adam_params_async.sh b/tests/training/restoring/optimizer/test_adam_params_async.sh
index 46420ac..cb335c0 100644
--- a/tests/training/restoring/optimizer/test_adam_params_async.sh
+++ b/tests/training/restoring/optimizer/test_adam_params_async.sh
@@ -27,11 +27,11 @@ test -e adam_async.log
$MRT_TOOLS/extract-costs.sh < adam_async.log > adam_async.costs.out
$MRT_TOOLS/diff-nums.py adam_async.costs.out adam_async.costs.expected -p 500.0 -o adam_async.costs.diff
-python $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_async/model.npz.optimizer.npz > adam_async.keys.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_async/model.npz.optimizer.npz > adam_async.keys.out
$MRT_TOOLS/diff.sh adam_async.keys.out adam.keys.expected > adam_async.keys.diff
-python $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_async/model.npz.optimizer.npz -k "adam_mt" > adam_async.mt.out
-python $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_async/model.npz.optimizer.npz -k "adam_vt" > adam_async.vt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_async/model.npz.optimizer.npz -k "adam_mt" > adam_async.mt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_async/model.npz.optimizer.npz -k "adam_vt" > adam_async.vt.out
$MRT_TOOLS/diff-nums.py --numpy -a -p 0.02 adam_async.mt.out adam_async.mt.expected -o adam_async.mt.diff
$MRT_TOOLS/diff-nums.py --numpy -p 0.001 adam_async.vt.out adam_async.vt.expected -o adam_async.vt.diff
diff --git a/tests/training/restoring/optimizer/test_adam_params_sync.sh b/tests/training/restoring/optimizer/test_adam_params_sync.sh
index 7b42ea8..6697e9b 100644
--- a/tests/training/restoring/optimizer/test_adam_params_sync.sh
+++ b/tests/training/restoring/optimizer/test_adam_params_sync.sh
@@ -25,11 +25,11 @@ test -e adam_sync.log
$MRT_TOOLS/extract-costs.sh < adam_sync.log > adam_sync.costs.out
$MRT_TOOLS/diff-nums.py adam_sync.costs.out adam_sync.costs.expected -p 3.00 -n 2 -o adam_sync.costs.diff
-python $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_sync/model.npz.optimizer.npz > adam_sync.keys.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_sync/model.npz.optimizer.npz > adam_sync.keys.out
$MRT_TOOLS/diff.sh adam_sync.keys.out adam.keys.expected > adam_sync.keys.diff
-python $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_sync/model.npz.optimizer.npz -k "adam_mt" > adam_sync.mt.out
-python $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_sync/model.npz.optimizer.npz -k "adam_vt" > adam_sync.vt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_sync/model.npz.optimizer.npz -k "adam_mt" > adam_sync.mt.out
+python3 $MRT_MARIAN/../scripts/contrib/model_info.py -m adam_sync/model.npz.optimizer.npz -k "adam_vt" > adam_sync.vt.out
$MRT_TOOLS/diff-nums.py --numpy -p 0.3 adam_sync.mt.out adam_sync.mt.expected -o adam_sync.mt.diff
$MRT_TOOLS/diff-nums.py --numpy -p 0.3 adam_sync.vt.out adam_sync.vt.expected -o adam_sync.vt.diff
diff --git a/tests/training/validation/compare-trans.sh b/tests/training/validation/compare-trans.sh
index 57b6536..40a6c24 100755
--- a/tests/training/validation/compare-trans.sh
+++ b/tests/training/validation/compare-trans.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-cat $2 | $1/sacrebleu/sacrebleu.py --tokenize none -b dev.bpe.de
+cat $2 | python3 $1/sacrebleu/sacrebleu.py --tokenize none -b dev.bpe.de