diff options
author | John Bauer <horatio@gmail.com> | 2022-09-14 01:53:49 +0300 |
---|---|---|
committer | John Bauer <horatio@gmail.com> | 2022-09-14 01:53:49 +0300 |
commit | b7fda48e773c17580d5615fdfaafd13d05f09290 (patch) | |
tree | ba083f77bb2a78389c4f5dce964cc8b488fb01bf /stanza | |
parent | c7099238c2e388ee13e0b8cb793a64eb0cfd63b1 (diff) |
Lower log level on some messages we don't want written to the pipeline
Diffstat (limited to 'stanza')
-rw-r--r-- | stanza/models/classifiers/trainer.py | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/stanza/models/classifiers/trainer.py b/stanza/models/classifiers/trainer.py index 089ee656..ff689c08 100644 --- a/stanza/models/classifiers/trainer.py +++ b/stanza/models/classifiers/trainer.py @@ -146,17 +146,17 @@ class Trainer: else: raise RuntimeError("TODO: need to get the wv type back from get_wordvec_file") - logger.info("Looking for pretrained vectors in {}".format(pretrain_file)) + logger.debug("Looking for pretrained vectors in {}".format(pretrain_file)) if os.path.exists(pretrain_file): return load_pretrain(pretrain_file, foundation_cache) elif args.wordvec_raw_file: vec_file = args.wordvec_raw_file - logger.info("Pretrain not found. Looking in {}".format(vec_file)) + logger.debug("Pretrain not found. Looking in {}".format(vec_file)) else: vec_file = utils.get_wordvec_file(args.wordvec_dir, args.shorthand, args.wordvec_type.name.lower()) - logger.info("Pretrain not found. Looking in {}".format(vec_file)) + logger.debug("Pretrain not found. Looking in {}".format(vec_file)) pretrain = Pretrain(pretrain_file, vec_file, args.pretrain_max_vocab) - logger.info("Embedding shape: %s" % str(pretrain.emb.shape)) + logger.debug("Embedding shape: %s" % str(pretrain.emb.shape)) return pretrain |