Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/marian-nmt/marian-regression-tests.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoman Grundkiewicz <rgrundkiewicz@gmail.com>2021-03-08 12:45:54 +0300
committerRoman Grundkiewicz <rgrundkiewicz@gmail.com>2021-03-08 12:45:54 +0300
commita54ad93763b580509b1444ee732c6dda3673f5b6 (patch)
treec0d2ff587664292865e61a3bf1ffe959529d4210
parentc923a4c69676b0829324ac4b03acbfbecf3b3a63 (diff)
parent243652dd6b3bc8ce2422b2719489832de4004571 (diff)
Merge branch 'mjd/fp16.2' into merge-fp16
-rw-r--r--tests/interface/config/test_load_config.sh4
-rw-r--r--tests/training/restoring/exp-smoothing/test_expsmooth.sh6
-rw-r--r--tests/training/restoring/exp-smoothing/test_expsmooth_s2s.sh6
-rw-r--r--tests/training/restoring/exp-smoothing/test_expsmooth_sync.sh6
-rw-r--r--tests/training/restoring/optimizer/adagrad.keys.expected1
-rw-r--r--tests/training/restoring/optimizer/adam.keys.expected1
-rw-r--r--tests/training/scheduler/log_epoch_e.expected24
-rw-r--r--tests/training/scheduler/log_epoch_t.expected18
-rw-r--r--tests/training/scheduler/log_epoch_u.expected18
-rw-r--r--tests/training/scheduler/test_logical_epoch.sh2
-rw-r--r--tests/training/scheduler/test_logical_epoch_labels.sh2
-rw-r--r--tests/training/scheduler/test_logical_epoch_updates.sh2
12 files changed, 52 insertions, 38 deletions
diff --git a/tests/interface/config/test_load_config.sh b/tests/interface/config/test_load_config.sh
index 0e66fd4..17fe51a 100644
--- a/tests/interface/config/test_load_config.sh
+++ b/tests/interface/config/test_load_config.sh
@@ -7,7 +7,7 @@ rm -rf load_config load_config.log no_config.log
mkdir -p load_config
# Run with no config file
-$MRT_MARIAN/marian --train-sets $MRT_DATA/europarl.de-en/corpus.bpe.{de,en} --model load_config/model.npz --vocabs vocab.de.yml vocab.en.yml --no-shuffle \
+$MRT_MARIAN/marian --train-sets $MRT_DATA/europarl.de-en/corpus.bpe.{de,en} --model load_config/model.npz --vocabs vocab.de.yml vocab.en.yml --no-shuffle --seed 2222 \
--type s2s --mini-batch 8 --dim-rnn 32 --dim-emb 16 --after-batches 2 --log load_config.log
test -e load_config/model.npz
@@ -19,7 +19,7 @@ rm -rf load_config
mkdir -p load_config
# Run with config file and the same options
-$MRT_MARIAN/marian --train-sets $MRT_DATA/europarl.de-en/corpus.bpe.{de,en} --model load_config/model.npz --vocabs vocab.de.yml vocab.en.yml --no-shuffle \
+$MRT_MARIAN/marian --train-sets $MRT_DATA/europarl.de-en/corpus.bpe.{de,en} --model load_config/model.npz --vocabs vocab.de.yml vocab.en.yml --no-shuffle --seed 2222 \
--config load_config.yml --log load_config.log
test -e load_config/model.npz
diff --git a/tests/training/restoring/exp-smoothing/test_expsmooth.sh b/tests/training/restoring/exp-smoothing/test_expsmooth.sh
index e7c7b6d..47040de 100644
--- a/tests/training/restoring/exp-smoothing/test_expsmooth.sh
+++ b/tests/training/restoring/exp-smoothing/test_expsmooth.sh
@@ -60,7 +60,8 @@ $MRT_MARIAN/marian \
--log expsmooth_1.log
test -e expsmooth/model.npz
-test -e expsmooth/model.npz.orig.npz
+# @TODO: check for averaged parameters in checkpoint file
+# test -e expsmooth/model.npz.orig.npz
test -e expsmooth_1.log
cat expsmooth_1.log | $MRT_TOOLS/strip-timestamps.sh | grep "Ep\. " | grep -v 'valid' | sed 's/ : Time.*//' > expsmooth.out
@@ -74,7 +75,8 @@ $MRT_MARIAN/marian \
--log expsmooth_2.log
test -e expsmooth/model.npz
-test -e expsmooth/model.npz.orig.npz
+# @TODO: check for averaged parameters in checkpoint file
+#test -e expsmooth/model.npz.orig.npz
test -e expsmooth_2.log
cat expsmooth_2.log | $MRT_TOOLS/strip-timestamps.sh | grep "Ep\. " | grep -v 'valid' | sed 's/ : Time.*//' >> expsmooth.out
diff --git a/tests/training/restoring/exp-smoothing/test_expsmooth_s2s.sh b/tests/training/restoring/exp-smoothing/test_expsmooth_s2s.sh
index 831ebec..6062a24 100644
--- a/tests/training/restoring/exp-smoothing/test_expsmooth_s2s.sh
+++ b/tests/training/restoring/exp-smoothing/test_expsmooth_s2s.sh
@@ -60,7 +60,8 @@ $MRT_MARIAN/marian \
--log expsmooth_s2s_1.log
test -e expsmooth_s2s/model.npz
-test -e expsmooth_s2s/model.npz.orig.npz
+# @TODO: test for smoothed parameters in checkpoint file
+# test -e expsmooth_s2s/model.npz.orig.npz
test -e expsmooth_s2s_1.log
cat expsmooth_s2s_1.log | $MRT_TOOLS/strip-timestamps.sh | grep "Ep\. " | grep -v 'valid' | sed 's/ : Time.*//' > expsmooth_s2s.out
@@ -74,7 +75,8 @@ $MRT_MARIAN/marian \
--log expsmooth_s2s_2.log
test -e expsmooth_s2s/model.npz
-test -e expsmooth_s2s/model.npz.orig.npz
+# @TODO: test for smoothed parameters in checkpoint file
+# test -e expsmooth_s2s/model.npz.orig.npz
test -e expsmooth_s2s_2.log
cat expsmooth_s2s_2.log | $MRT_TOOLS/strip-timestamps.sh | grep "Ep\. " | grep -v 'valid' | sed 's/ : Time.*//' >> expsmooth_s2s.out
diff --git a/tests/training/restoring/exp-smoothing/test_expsmooth_sync.sh b/tests/training/restoring/exp-smoothing/test_expsmooth_sync.sh
index eafc1cc..6ab255c 100644
--- a/tests/training/restoring/exp-smoothing/test_expsmooth_sync.sh
+++ b/tests/training/restoring/exp-smoothing/test_expsmooth_sync.sh
@@ -65,7 +65,8 @@ $MRT_MARIAN/marian \
--log expsmooth_sync_1.log
test -e expsmooth_sync/model.npz
-test -e expsmooth_sync/model.npz.orig.npz
+# @TODO: test for smoothed parameters in checkpoint file
+# test -e expsmooth_sync/model.npz.orig.npz
test -e expsmooth_sync_1.log
cat expsmooth_sync_1.log | $MRT_TOOLS/strip-timestamps.sh | grep "Ep\. " | grep -v 'valid' | sed 's/ : Time.*//' > expsmooth_sync.out
@@ -79,7 +80,8 @@ $MRT_MARIAN/marian \
--log expsmooth_sync_2.log
test -e expsmooth_sync/model.npz
-test -e expsmooth_sync/model.npz.orig.npz
+# @TODO: test for smoothed parameters in checkpoint file
+# test -e expsmooth_sync/model.npz.orig.npz
test -e expsmooth_sync_2.log
cat expsmooth_sync_2.log | $MRT_TOOLS/strip-timestamps.sh | grep "Ep\. " | grep -v 'valid' | sed 's/ : Time.*//' >> expsmooth_sync.out
diff --git a/tests/training/restoring/optimizer/adagrad.keys.expected b/tests/training/restoring/optimizer/adagrad.keys.expected
index 2899089..f37412c 100644
--- a/tests/training/restoring/optimizer/adagrad.keys.expected
+++ b/tests/training/restoring/optimizer/adagrad.keys.expected
@@ -1 +1,2 @@
adagrad_gt
+master_parameters
diff --git a/tests/training/restoring/optimizer/adam.keys.expected b/tests/training/restoring/optimizer/adam.keys.expected
index fe9ecb1..e616f76 100644
--- a/tests/training/restoring/optimizer/adam.keys.expected
+++ b/tests/training/restoring/optimizer/adam.keys.expected
@@ -1,3 +1,4 @@
adam_mt
adam_vt
adam_denoms
+master_parameters
diff --git a/tests/training/scheduler/log_epoch_e.expected b/tests/training/scheduler/log_epoch_e.expected
index 87cf167..c262c26 100644
--- a/tests/training/scheduler/log_epoch_e.expected
+++ b/tests/training/scheduler/log_epoch_e.expected
@@ -1,20 +1,22 @@
Training started
-Seen 1542 samples
+Parameter type float32, optimization type float32, casting types false
+Allocating memory for Adam-specific shards
+Seen 1,542 samples
Starting data epoch 2 in logical epoch 1.000
-Ep. 1.000 : Up. 10 : Sen. 768 : Cost 9.68880177 * 61,315 after 61,315
-Seen 1542 samples
+Ep. 1.000 : Up. 10 : Sen. 768 : Cost 9.68879700 * 61,315 @ 6,851 after 61,315
+Seen 1,542 samples
Starting data epoch 3 in logical epoch 1.500
-Ep. 1.500 : Up. 20 : Sen. 1,536 : Cost 9.67091751 * 61,279 after 122,594
-Seen 1542 samples
+Ep. 1.500 : Up. 20 : Sen. 1,536 : Cost 9.67091274 * 61,279 @ 6,585 after 122,594
+Seen 1,542 samples
Starting data epoch 4 in logical epoch 2.000
-Seen 1542 samples
+Seen 1,542 samples
Starting data epoch 5 in logical epoch 2.500
-Ep. 2.500 : Up. 30 : Sen. 512 : Cost 9.65089989 * 54,621 after 177,215
-Seen 1542 samples
+Ep. 2.500 : Up. 30 : Sen. 512 : Cost 9.65089798 * 54,621 @ 7,219 after 177,215
+Seen 1,542 samples
Starting data epoch 6 in logical epoch 3.000
-Ep. 3.000 : Up. 40 : Sen. 1,280 : Cost 9.63199997 * 61,545 after 238,760
-Seen 1542 samples
+Ep. 3.000 : Up. 40 : Sen. 1,280 : Cost 9.63199615 * 61,545 @ 6,916 after 238,760
+Seen 1,542 samples
Starting data epoch 7 in logical epoch 3.500
Training finished
Saving model to log_epoch_e/model.npz
-Saving Adam parameters to log_epoch_e/model.npz.optimizer.npz
+Saving Adam parameters
diff --git a/tests/training/scheduler/log_epoch_t.expected b/tests/training/scheduler/log_epoch_t.expected
index 1f57c2e..0373755 100644
--- a/tests/training/scheduler/log_epoch_t.expected
+++ b/tests/training/scheduler/log_epoch_t.expected
@@ -1,12 +1,14 @@
Training started
-Ep. 2.258 : Up. 4 : Sen. 512 : Cost 9.69286919 * 13,547 after 13,547
-Ep. 3.400 : Up. 6 : Sen. 768 : Cost 9.68953419 * 6,851 after 20,398
-Ep. 5.131 : Up. 9 : Sen. 1,152 : Cost 9.68455887 * 10,387 after 30,785
-Ep. 6.793 : Up. 12 : Sen. 1,536 : Cost 9.68291855 * 9,975 after 40,760
-Seen 1542 samples
+Parameter type float32, optimization type float32, casting types false
+Allocating memory for Adam-specific shards
+Ep. 2.258 : Up. 4 : Sen. 512 : Cost 9.69286919 * 13,547 @ 3,630 after 13,547
+Ep. 3.400 : Up. 6 : Sen. 768 : Cost 9.68952084 * 6,851 @ 3,634 after 20,398
+Ep. 5.131 : Up. 9 : Sen. 1,152 : Cost 9.68455029 * 10,387 @ 3,526 after 30,785
+Ep. 6.793 : Up. 12 : Sen. 1,536 : Cost 9.68291855 * 9,975 @ 3,457 after 40,760
+Seen 1,542 samples
Starting data epoch 2 in logical epoch 6.819
-Ep. 8.472 : Up. 16 : Sen. 384 : Cost 9.67040443 * 10,074 after 50,834
-Ep. 10.219 : Up. 19 : Sen. 768 : Cost 9.66528606 * 10,481 after 61,315
+Ep. 8.472 : Up. 16 : Sen. 384 : Cost 9.67040443 * 10,074 @ 3,589 after 50,834
+Ep. 10.219 : Up. 19 : Sen. 768 : Cost 9.66527557 * 10,481 @ 3,634 after 61,315
Training finished
Saving model to log_epoch_t/model.npz
-Saving Adam parameters to log_epoch_t/model.npz.optimizer.npz
+Saving Adam parameters
diff --git a/tests/training/scheduler/log_epoch_u.expected b/tests/training/scheduler/log_epoch_u.expected
index a8855f2..14a3064 100644
--- a/tests/training/scheduler/log_epoch_u.expected
+++ b/tests/training/scheduler/log_epoch_u.expected
@@ -1,15 +1,17 @@
Training started
-Seen 1542 samples
+Parameter type float32, optimization type float32, casting types false
+Allocating memory for Adam-specific shards
+Seen 1,542 samples
Starting data epoch 2 in logical epoch 0.700
-Ep. 1.000 : Up. 10 : Sen. 768 : Cost 9.68880177 * 61,315 after 61,315
-Seen 1542 samples
+Ep. 1.000 : Up. 10 : Sen. 768 : Cost 9.68879700 * 61,315 @ 6,851 after 61,315
+Seen 1,542 samples
Starting data epoch 3 in logical epoch 1.400
-Ep. 2.000 : Up. 20 : Sen. 1,536 : Cost 9.67091751 * 61,279 after 122,594
-Seen 1542 samples
+Ep. 2.000 : Up. 20 : Sen. 1,536 : Cost 9.67091274 * 61,279 @ 6,585 after 122,594
+Seen 1,542 samples
Starting data epoch 4 in logical epoch 2.100
-Seen 1542 samples
+Seen 1,542 samples
Starting data epoch 5 in logical epoch 2.800
-Ep. 3.000 : Up. 30 : Sen. 512 : Cost 9.65089989 * 54,621 after 177,215
+Ep. 3.000 : Up. 30 : Sen. 512 : Cost 9.65089798 * 54,621 @ 7,219 after 177,215
Training finished
Saving model to log_epoch_u/model.npz
-Saving Adam parameters to log_epoch_u/model.npz.optimizer.npz
+Saving Adam parameters
diff --git a/tests/training/scheduler/test_logical_epoch.sh b/tests/training/scheduler/test_logical_epoch.sh
index fca9f52..dcfa95d 100644
--- a/tests/training/scheduler/test_logical_epoch.sh
+++ b/tests/training/scheduler/test_logical_epoch.sh
@@ -25,7 +25,7 @@ test -e log_epoch_e/model.npz
test -e log_epoch_e.log
# Compare actual and expected outputs
-cat log_epoch_e.log | $MRT_TOOLS/strip-timestamps.sh | grep -v '^\[' | sed 's/ : Time.*//' > log_epoch_e.out
+cat log_epoch_e.log | $MRT_TOOLS/strip-timestamps.sh | grep -v '^\[' | grep -v 'Synced' | sed 's/ : Time.*//' > log_epoch_e.out
$MRT_TOOLS/diff-nums.py log_epoch_e.out log_epoch_e.expected -p 0.01 -o log_epoch_e.diff
# Exit with success code
diff --git a/tests/training/scheduler/test_logical_epoch_labels.sh b/tests/training/scheduler/test_logical_epoch_labels.sh
index c37c0fa..acbeda6 100644
--- a/tests/training/scheduler/test_logical_epoch_labels.sh
+++ b/tests/training/scheduler/test_logical_epoch_labels.sh
@@ -25,7 +25,7 @@ test -e log_epoch_t/model.npz
test -e log_epoch_t.log
# Compare actual and expected outputs
-cat log_epoch_t.log | $MRT_TOOLS/strip-timestamps.sh | grep -v '^\[' | sed 's/ : Time.*//' > log_epoch_t.out
+cat log_epoch_t.log | $MRT_TOOLS/strip-timestamps.sh | grep -v '^\[' | grep -v 'Synced' | sed 's/ : Time.*//' > log_epoch_t.out
$MRT_TOOLS/diff-nums.py log_epoch_t.out log_epoch_t.expected -p 0.01 -o log_epoch_t.diff
# Exit with success code
diff --git a/tests/training/scheduler/test_logical_epoch_updates.sh b/tests/training/scheduler/test_logical_epoch_updates.sh
index 8582120..326d4a6 100644
--- a/tests/training/scheduler/test_logical_epoch_updates.sh
+++ b/tests/training/scheduler/test_logical_epoch_updates.sh
@@ -25,7 +25,7 @@ test -e log_epoch_u/model.npz
test -e log_epoch_u.log
# Compare actual and expected outputs
-cat log_epoch_u.log | $MRT_TOOLS/strip-timestamps.sh | grep -v '^\[' | sed 's/ : Time.*//' > log_epoch_u.out
+cat log_epoch_u.log | $MRT_TOOLS/strip-timestamps.sh | grep -v '^\[' | grep -v 'Synced' | sed 's/ : Time.*//' > log_epoch_u.out
$MRT_TOOLS/diff-nums.py log_epoch_u.out log_epoch_u.expected -p 0.01 -o log_epoch_u.diff
# Exit with success code