Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/TharinduDR/TransQuest.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTharinduDR <rhtdranasinghe@gmail.com>2020-07-07 19:51:35 +0300
committerTharinduDR <rhtdranasinghe@gmail.com>2020-07-07 19:51:35 +0300
commit8fa55f62b74b4f761754122f716bae582656da83 (patch)
tree0113d1fb6291154f64a01549c710b36eb54ff06e
parent78aeebcd530247505e257d62bad443688d5a30d5 (diff)
031: Updating libraries
-rw-r--r--examples/wmt_2020/en_zh/transformer_config.py5
-rw-r--r--examples/wmt_2020/et_en/transformer_config.py11
-rw-r--r--examples/wmt_2020/ne_en/transformer_config.py11
-rw-r--r--examples/wmt_2020/ro_en/transformer_config.py4
-rw-r--r--examples/wmt_2020/ru_en/transformer_config.py5
-rw-r--r--examples/wmt_2020/si_en/transformer_config.py9
6 files changed, 25 insertions, 20 deletions
diff --git a/examples/wmt_2020/en_zh/transformer_config.py b/examples/wmt_2020/en_zh/transformer_config.py
index 9da0bca..f8140d9 100644
--- a/examples/wmt_2020/en_zh/transformer_config.py
+++ b/examples/wmt_2020/en_zh/transformer_config.py
@@ -21,9 +21,9 @@ transformer_config = {
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
- 'num_train_epochs': 3,
+ 'num_train_epochs': 6,
'weight_decay': 0,
- 'learning_rate': 2e-5,
+ 'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
@@ -34,6 +34,7 @@ transformer_config = {
'save_steps': 300,
"no_cache": False,
'save_model_every_epoch': True,
+ 'save_recent_only': True,
'n_fold': 3,
'evaluate_during_training': True,
'evaluate_during_training_steps': 300,
diff --git a/examples/wmt_2020/et_en/transformer_config.py b/examples/wmt_2020/et_en/transformer_config.py
index b5aae72..f8140d9 100644
--- a/examples/wmt_2020/et_en/transformer_config.py
+++ b/examples/wmt_2020/et_en/transformer_config.py
@@ -5,10 +5,10 @@ TEMP_DIRECTORY = "temp/data"
RESULT_FILE = "result.tsv"
RESULT_IMAGE = "result.jpg"
SUBMISSION_FILE = "predictions.txt"
-GOOGLE_DRIVE = True
-DRIVE_FILE_ID = "1-VvMh_Pto0idpSm18yfoBfxUT3Orl3DJ"
+GOOGLE_DRIVE = False
+DRIVE_FILE_ID = None
MODEL_TYPE = "xlmroberta"
-MODEL_NAME = "transquest/ro-en"
+MODEL_NAME = "xlm-roberta-large"
transformer_config = {
'output_dir': 'temp/outputs/',
@@ -21,9 +21,9 @@ transformer_config = {
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
- 'num_train_epochs': 3,
+ 'num_train_epochs': 6,
'weight_decay': 0,
- 'learning_rate': 2e-5,
+ 'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
@@ -34,6 +34,7 @@ transformer_config = {
'save_steps': 300,
"no_cache": False,
'save_model_every_epoch': True,
+ 'save_recent_only': True,
'n_fold': 3,
'evaluate_during_training': True,
'evaluate_during_training_steps': 300,
diff --git a/examples/wmt_2020/ne_en/transformer_config.py b/examples/wmt_2020/ne_en/transformer_config.py
index 5fc7efa..42e2192 100644
--- a/examples/wmt_2020/ne_en/transformer_config.py
+++ b/examples/wmt_2020/ne_en/transformer_config.py
@@ -21,22 +21,23 @@ transformer_config = {
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
- 'num_train_epochs': 3,
+ 'num_train_epochs': 6,
'weight_decay': 0,
- 'learning_rate': 2e-5,
+ 'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
- 'logging_steps': 5,
- 'save_steps': 5,
+ 'logging_steps': 300,
+ 'save_steps': 300,
"no_cache": False,
'save_model_every_epoch': True,
+ 'save_recent_only': True,
'n_fold': 3,
'evaluate_during_training': True,
- 'evaluate_during_training_steps': 5,
+ 'evaluate_during_training_steps': 300,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
'save_eval_checkpoints': True,
diff --git a/examples/wmt_2020/ro_en/transformer_config.py b/examples/wmt_2020/ro_en/transformer_config.py
index 0d09521..5ee79d0 100644
--- a/examples/wmt_2020/ro_en/transformer_config.py
+++ b/examples/wmt_2020/ro_en/transformer_config.py
@@ -21,9 +21,9 @@ transformer_config = {
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
- 'num_train_epochs': 3,
+ 'num_train_epochs': 6,
'weight_decay': 0,
- 'learning_rate': 2e-5,
+ 'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.1,
'warmup_steps': 0,
diff --git a/examples/wmt_2020/ru_en/transformer_config.py b/examples/wmt_2020/ru_en/transformer_config.py
index 40250d3..c00e0ee 100644
--- a/examples/wmt_2020/ru_en/transformer_config.py
+++ b/examples/wmt_2020/ru_en/transformer_config.py
@@ -21,9 +21,9 @@ transformer_config = {
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
- 'num_train_epochs': 3,
+ 'num_train_epochs': 6,
'weight_decay': 0,
- 'learning_rate': 2e-5,
+ 'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
@@ -34,6 +34,7 @@ transformer_config = {
'save_steps': 300,
"no_cache": False,
'save_model_every_epoch': True,
+ 'save_recent_only': True,
'n_fold': 3,
'evaluate_during_training': True,
'evaluate_during_training_steps': 300,
diff --git a/examples/wmt_2020/si_en/transformer_config.py b/examples/wmt_2020/si_en/transformer_config.py
index 5ac3353..fcc85d5 100644
--- a/examples/wmt_2020/si_en/transformer_config.py
+++ b/examples/wmt_2020/si_en/transformer_config.py
@@ -21,19 +21,20 @@ transformer_config = {
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
- 'num_train_epochs': 3,
+ 'num_train_epochs': 6,
'weight_decay': 0,
- 'learning_rate': 2e-5,
+ 'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
- 'logging_steps': 50,
- 'save_steps': 50,
+ 'logging_steps': 300,
+ 'save_steps': 300,
"no_cache": False,
'save_model_every_epoch': True,
+ 'save_recent_only': True,
'n_fold': 3,
'evaluate_during_training': True,
'evaluate_during_training_steps': 50,