1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
from multiprocessing import cpu_count
SEED = 777
TEMP_DIRECTORY = "temp/data"
RESULT_FILE = "result.tsv"
SUBMISSION_FILE = "predictions.txt"
RESULT_IMAGE = "result.jpg"
GOOGLE_DRIVE = False
DRIVE_FILE_ID = None
MODEL_TYPE = "xlmroberta"
MODEL_NAME = "xlm-roberta-large"
siamesetransquest_config = {
'output_dir': 'temp/outputs/',
"best_model_dir": "temp/outputs/best_model",
'cache_dir': 'temp/cache_dir/',
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 80,
'train_batch_size': 8,
'gradient_accumulation_steps': 1,
'eval_batch_size': 8,
'num_train_epochs': 6,
'weight_decay': 0,
'learning_rate': 1e-5,
'adam_epsilon': 1e-8,
'warmup_ratio': 0.06,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'do_lower_case': False,
'logging_steps': 300,
'save_steps': 300,
"no_cache": False,
'save_model_every_epoch': True,
'n_fold': 1,
'evaluate_during_training': True,
'evaluate_during_training_steps': 300,
"evaluate_during_training_verbose": True,
'use_cached_eval_features': False,
'save_eval_checkpoints': True,
'tensorboard_dir': None,
'regression': True,
'overwrite_output_dir': True,
'reprocess_input_data': True,
'process_count': cpu_count() - 2 if cpu_count() > 2 else 1,
'n_gpu': 1,
'use_multiprocessing': True,
'silent': False,
'wandb_project': None,
'wandb_kwargs': {},
"use_early_stopping": True,
"early_stopping_patience": 10,
"early_stopping_delta": 0,
"early_stopping_metric": "eval_loss",
"early_stopping_metric_minimize": True,
"manual_seed": 777,
"encoding": None,
}
|