Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/stanfordnlp/stanza.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Bauer <horatio@gmail.com>2022-09-08 21:17:40 +0300
committerJohn Bauer <horatio@gmail.com>2022-09-08 21:17:40 +0300
commit19e229565f001da003a1949517ca1f9bafd24920 (patch)
treebecf94c76e07795dc9cc636abd3b71e1e821aa10
parent6027ab101ec2a6aa113a6182833c69020466bdd8 (diff)
relearn_structure should reuse the foundation_cache if possible
-rw-r--r--stanza/models/constituency/trainer.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/stanza/models/constituency/trainer.py b/stanza/models/constituency/trainer.py
index 797d9231..af703cf5 100644
--- a/stanza/models/constituency/trainer.py
+++ b/stanza/models/constituency/trainer.py
@@ -357,7 +357,7 @@ def build_trainer(args, train_trees, dev_trees, foundation_cache, model_load_fil
# remove the pattn & lattn layers unless the saved model had them
temp_args.pop('pattn_num_layers', None)
temp_args.pop('lattn_d_proj', None)
- trainer = Trainer.load(model_load_file, temp_args, load_optimizer=False)
+ trainer = Trainer.load(model_load_file, temp_args, load_optimizer=False, foundation_cache=foundation_cache)
# using the model's current values works for if the new
# dataset is the same or smaller