Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/stanfordnlp/stanza.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Bauer <horatio@gmail.com>2022-09-14 23:42:23 +0300
committerJohn Bauer <horatio@gmail.com>2022-09-14 23:42:23 +0300
commit1f60beb947c59b191eb08e23c446ffbf2b19cf22 (patch)
tree38e7c37f915b757ac47ec065171de2d2610c207e
parentb3a38ae317268fb82b4082a1986af1a8ab91d833 (diff)
DO NOT MERGE - this keeps a large object on the GPU between testsordered_dict
-rw-r--r--stanza/tests/langid/test_multilingual.py13
1 files changed, 8 insertions, 5 deletions
diff --git a/stanza/tests/langid/test_multilingual.py b/stanza/tests/langid/test_multilingual.py
index 29e5c442..3fe4dee6 100644
--- a/stanza/tests/langid/test_multilingual.py
+++ b/stanza/tests/langid/test_multilingual.py
@@ -10,7 +10,7 @@ from stanza.tests import TEST_MODELS_DIR
pytestmark = [pytest.mark.pipeline, pytest.mark.travis]
-def run_multilingual_pipeline(**kwargs):
+def run_multilingual_pipeline(nlp):
english_text = "This is an English sentence."
english_deps_gold = "\n".join((
"('This', 5, 'nsubj')",
@@ -31,7 +31,6 @@ def run_multilingual_pipeline(**kwargs):
"('.', 4, 'punct')"
))
- nlp = MultilingualPipeline(model_dir=TEST_MODELS_DIR, **kwargs)
docs = [english_text, french_text]
docs = nlp(docs)
@@ -40,15 +39,19 @@ def run_multilingual_pipeline(**kwargs):
assert docs[1].lang == "fr"
assert docs[1].sentences[0].dependencies_string() == french_deps_gold
+@pytest.fixture(scope="module")
+def basic_multilingual():
+ return MultilingualPipeline(model_dir=TEST_MODELS_DIR)
-def test_multilingual_pipeline():
+def test_multilingual_pipeline(basic_multilingual):
"""
Basic test of multilingual pipeline
"""
- run_multilingual_pipeline()
+ run_multilingual_pipeline(basic_multilingual)
def test_multilingual_pipeline_small_cache():
"""
Test with the cache size 1
"""
- run_multilingual_pipeline(max_cache_size=1)
+ nlp = MultilingualPipeline(model_dir=TEST_MODELS_DIR, max_cache_size=1)
+ run_multilingual_pipeline(nlp)