Skip to content

Commit

Permalink
fixed pep8 issues
Browse files Browse the repository at this point in the history
  • Loading branch information
tmbo committed Jan 24, 2019
1 parent 4c6b1e3 commit 34b6170
Show file tree
Hide file tree
Showing 9 changed files with 99 additions and 81 deletions.
5 changes: 3 additions & 2 deletions rasa_nlu/classifiers/embedding_intent_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -312,8 +312,9 @@ def _create_tf_embed(self,
name='b')
return emb_a, emb_b

def _tf_sim(self, a: tf.Tensor, b: tf.Tensor) -> Tuple[
tf.Tensor, tf.Tensor]:
def _tf_sim(self,
a: tf.Tensor,
b: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Define similarity
in two cases:
Expand Down
1 change: 1 addition & 0 deletions rasa_nlu/training_data/formats/wit.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ class WitReader(JsonTrainingDataReader):

def read_from_json(self, js: Dict[Text, Any], **kwargs: Any):
"""Loads training data stored in the WIT.ai data format."""
from rasa_nlu.training_data import Message, TrainingData

training_examples = []

Expand Down
1 change: 1 addition & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
pep8maxlinelength = 80
pep8ignore =
docs/conf.py ALL
*.py E251
*.py W503
*.py E126

Expand Down
19 changes: 12 additions & 7 deletions tests/base/test_project.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def mocked_init(*args, **kwargs):
with mock.patch.object(Project, "__init__", mocked_init):
project = Project()

project._models = (MODEL_NAME, )
project._models = (MODEL_NAME,)

project.pull_models = None

Expand All @@ -31,10 +31,11 @@ def mocked_init(*args, **kwargs):
return None

def mocked_search_for_models(self):
self._models = (MODEL_NAME, )
self._models = (MODEL_NAME,)

with mock.patch.object(Project, "__init__", mocked_init):
with mock.patch.object(Project, '_search_for_models', mocked_search_for_models):
with mock.patch.object(Project, '_search_for_models',
mocked_search_for_models):
project = Project()

project._models = ()
Expand All @@ -59,8 +60,10 @@ def mocked_latest_project_model(self):
return LATEST_MODEL_NAME

with mock.patch.object(Project, "__init__", mocked_init):
with mock.patch.object(Project, "_search_for_models", mocked_search_for_models):
with mock.patch.object(Project, "_latest_project_model", mocked_latest_project_model):
with mock.patch.object(Project, "_search_for_models",
mocked_search_for_models):
with mock.patch.object(Project, "_latest_project_model",
mocked_latest_project_model):
project = Project()

project._models = ()
Expand All @@ -85,8 +88,10 @@ def mocked_latest_project_model(self):
return LATEST_MODEL_NAME

with mock.patch.object(Project, "__init__", mocked_init):
with mock.patch.object(Project, "_search_for_models", mocked_search_for_models):
with mock.patch.object(Project, "_latest_project_model", mocked_latest_project_model):
with mock.patch.object(Project, "_search_for_models",
mocked_search_for_models):
with mock.patch.object(Project, "_latest_project_model",
mocked_latest_project_model):
project = Project()

project._models = ()
Expand Down
65 changes: 33 additions & 32 deletions tests/base/test_server.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# -*- coding: utf-8 -*-
import time

import io
import json
import tempfile

import pytest
import time
import ruamel.yaml as yaml
import tempfile
from treq.testing import StubTreq

from rasa_nlu.data_router import DataRouter
Expand Down Expand Up @@ -65,25 +65,25 @@ def test_version(app):

@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy-uri/parse?q=hello",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'}, 'text': 'hello'}
"http://dummy-uri/parse?q=hello",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'}, 'text': 'hello'}
),
ResponseTest(
"http://dummy-uri/parse?query=hello",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'}, 'text': 'hello'}
"http://dummy-uri/parse?query=hello",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'}, 'text': 'hello'}
),
ResponseTest(
"http://dummy-uri/parse?q=hello ńöñàśçií",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello ńöñàśçií'}
"http://dummy-uri/parse?q=hello ńöñàśçií",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello ńöñàśçií'}
),
ResponseTest(
"http://dummy-uri/parse?q=",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 0.0, 'name': None}, 'text': ''}
"http://dummy-uri/parse?q=",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 0.0, 'name': None}, 'text': ''}
),
])
@pytest.inlineCallbacks
Expand All @@ -99,25 +99,25 @@ def test_get_parse(app, response_test):

@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy-uri/parse",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello'},
payload={"q": "hello"}
"http://dummy-uri/parse",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello'},
payload={"q": "hello"}
),
ResponseTest(
"http://dummy-uri/parse",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello'},
payload={"query": "hello"}
"http://dummy-uri/parse",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello'},
payload={"query": "hello"}
),
ResponseTest(
"http://dummy-uri/parse",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello ńöñàśçií'},
payload={"q": "hello ńöñàśçií"}
"http://dummy-uri/parse",
{'project': 'default', 'entities': [], 'model': 'fallback',
'intent': {'confidence': 1.0, 'name': 'greet'},
'text': 'hello ńöñàśçií'},
payload={"q": "hello ńöñàśçií"}
),
])
@pytest.inlineCallbacks
Expand Down Expand Up @@ -243,7 +243,8 @@ def test_unload_model_error(app):
response = yield app.delete(model_err)
rjs = yield response.json()
assert response.code == 500, "Model not found"
assert rjs['error'] == "Failed to unload model my_model for project default."
assert rjs['error'] == ("Failed to unload model my_model for project "
"default.")


@pytest.inlineCallbacks
Expand Down
26 changes: 14 additions & 12 deletions tests/base/test_tokenizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,24 +7,26 @@ def test_whitespace():
from rasa_nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
tk = WhitespaceTokenizer()

assert [t.text for t in tk.tokenize("Forecast for lunch")] == \
['Forecast', 'for', 'lunch']
assert ([t.text for t in tk.tokenize("Forecast for lunch")] ==
['Forecast', 'for', 'lunch'])

assert [t.offset for t in tk.tokenize("Forecast for lunch")] == \
[0, 9, 13]
assert ([t.offset for t in tk.tokenize("Forecast for lunch")] ==
[0, 9, 13])

# we ignore .,!?
assert [t.text for t in tk.tokenize("hey ńöñàśçií how're you?")] == \
['hey', 'ńöñàśçií', 'how\'re', 'you']
assert ([t.text for t in tk.tokenize("hey ńöñàśçií how're you?")] ==
['hey', 'ńöñàśçií', 'how\'re', 'you'])

assert [t.offset for t in tk.tokenize("hey ńöñàśçií how're you?")] == \
[0, 4, 13, 20]
assert ([t.offset for t in tk.tokenize("hey ńöñàśçií how're you?")] ==
[0, 4, 13, 20])

assert [t.text for t in tk.tokenize("привет! 10.000, ńöñàśçií. how're you?")] == \
['привет', '10.000', 'ńöñàśçií', 'how\'re', 'you']
assert ([t.text
for t in tk.tokenize("привет! 10.000, ńöñàśçií. how're you?")] ==
['привет', '10.000', 'ńöñàśçií', 'how\'re', 'you'])

assert [t.offset for t in tk.tokenize("привет! 10.000, ńöñàśçií. how're you?")] == \
[0, 8, 16, 26, 33]
assert ([t.offset
for t in tk.tokenize("привет! 10.000, ńöñàśçií. how're you?")] ==
[0, 8, 16, 26, 33])


def test_spacy(spacy_nlp):
Expand Down
31 changes: 18 additions & 13 deletions tests/base/test_training_data.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,10 @@
# -*- coding: utf-8 -*-

import tempfile

import pytest
import tempfile
from jsonschema import ValidationError

from rasa_nlu import training_data
from rasa_nlu import utils
from rasa_nlu import training_data, utils
from rasa_nlu.convert import convert_training_data
from rasa_nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa_nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
Expand Down Expand Up @@ -65,7 +63,8 @@ def test_dialogflow_data():
assert len(td.training_examples) == 24
assert td.intents == {"affirm", "goodbye", "hi", "inform"}
assert td.entities == {"cuisine", "location"}
non_trivial_synonyms = {k: v for k, v in td.entity_synonyms.items() if k != v}
non_trivial_synonyms = {k: v
for k, v in td.entity_synonyms.items() if k != v}
assert non_trivial_synonyms == {"mexico": "mexican",
"china": "chinese",
"india": "indian"}
Expand All @@ -74,7 +73,7 @@ def test_dialogflow_data():
def test_lookup_table_json():
lookup_fname = 'data/test/lookup_tables/plates.txt'
td_lookup = training_data.load_data(
'data/test/lookup_tables/lookup_table.json')
'data/test/lookup_tables/lookup_table.json')
assert td_lookup.lookup_tables[0]['name'] == 'plates'
assert td_lookup.lookup_tables[0]['elements'] == lookup_fname
assert td_lookup.lookup_tables[1]['name'] == 'drinks'
Expand All @@ -85,15 +84,16 @@ def test_lookup_table_json():
def test_lookup_table_md():
lookup_fname = 'data/test/lookup_tables/plates.txt'
td_lookup = training_data.load_data(
'data/test/lookup_tables/lookup_table.md')
'data/test/lookup_tables/lookup_table.md')
assert td_lookup.lookup_tables[0]['name'] == 'plates'
assert td_lookup.lookup_tables[0]['elements'] == lookup_fname
assert td_lookup.lookup_tables[1]['name'] == 'drinks'
assert td_lookup.lookup_tables[1]['elements'] == [
'mojito', 'lemonade', 'sweet berry wine', 'tea', 'club mate']


@pytest.mark.parametrize("filename", ["data/examples/rasa/demo-rasa.json", 'data/examples/rasa/demo-rasa.md'])
@pytest.mark.parametrize("filename", ["data/examples/rasa/demo-rasa.json",
'data/examples/rasa/demo-rasa.md'])
def test_demo_data(filename):
td = training_data.load_data(filename)
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye"}
Expand Down Expand Up @@ -126,8 +126,10 @@ def test_train_test_split(filename):
assert len(td_test.training_examples) == 10


@pytest.mark.parametrize("files", [('data/examples/rasa/demo-rasa.json', 'data/test/multiple_files_json'),
('data/examples/rasa/demo-rasa.md', 'data/test/multiple_files_markdown')])
@pytest.mark.parametrize("files", [('data/examples/rasa/demo-rasa.json',
'data/test/multiple_files_json'),
('data/examples/rasa/demo-rasa.md',
'data/test/multiple_files_markdown')])
def test_data_merging(files):
td_reference = training_data.load_data(files[0])
td = training_data.load_data(files[1])
Expand All @@ -141,10 +143,13 @@ def test_data_merging(files):


def test_markdown_single_sections():
td_regex_only = training_data.load_data('data/test/markdown_single_sections/regex_only.md')
assert td_regex_only.regex_features == [{"name": "greet", "pattern": "hey[^\s]*"}]
td_regex_only = training_data.load_data(
'data/test/markdown_single_sections/regex_only.md')
assert td_regex_only.regex_features == \
[{"name": "greet", "pattern": "hey[^\s]*"}]

td_syn_only = training_data.load_data('data/test/markdown_single_sections/synonyms_only.md')
td_syn_only = training_data.load_data(
'data/test/markdown_single_sections/synonyms_only.md')
assert td_syn_only.entity_synonyms == {'Chines': 'chinese',
'Chinese': 'chinese'}

Expand Down
21 changes: 11 additions & 10 deletions tests/base/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@
import json
import os
import pickle
import tempfile

import pytest
import tempfile
from httpretty import httpretty

from rasa_nlu import utils
from rasa_nlu.utils import (
relative_normpath, create_dir, is_url, ordered, is_model_dir, remove_model,
write_json_to_file, write_to_file, EndpointConfig)
EndpointConfig, create_dir, is_model_dir, is_url, ordered,
relative_normpath, remove_model, write_json_to_file, write_to_file)


@pytest.fixture
Expand Down Expand Up @@ -59,12 +59,13 @@ def test_ordered():
assert ordered(target) == [('a', [1, 2, 3]), ('b', 1), ('c', 'a')]


@pytest.mark.parametrize(("model_dir", "expected"),
[("test_models/test_model_mitie/model_20170628-002704", True),
("test_models/test_model_mitie_sklearn/model_20170628-002712", True),
("test_models/test_model_spacy_sklearn/model_20170628-002705", True),
("test_models/", False),
("test_models/nonexistent_for_sure_123", False)])
@pytest.mark.parametrize(
("model_dir", "expected"),
[("test_models/test_model_mitie/model_20170628-002704", True),
("test_models/test_model_mitie_sklearn/model_20170628-002712", True),
("test_models/test_model_spacy_sklearn/model_20170628-002705", True),
("test_models/", False),
("test_models/nonexistent_for_sure_123", False)])
def test_is_model_dir(model_dir, expected):
assert is_model_dir(model_dir) == expected

Expand Down
11 changes: 6 additions & 5 deletions tests/training/test_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@

import os
import pytest
from rasa_nlu.config import RasaNLUModelConfig

from tests.conftest import DEFAULT_DATA_PATH
from rasa_nlu import registry, train
from rasa_nlu.model import Trainer, Interpreter
from rasa_nlu.config import RasaNLUModelConfig
from rasa_nlu.model import Interpreter, Trainer
from rasa_nlu.train import create_persistor
from rasa_nlu.training_data import TrainingData
from tests import utilities
from tests.conftest import DEFAULT_DATA_PATH


def as_pipeline(*components):
Expand Down Expand Up @@ -82,11 +82,12 @@ def test_train_model(pipeline_template, component_builder, tmpdir):

@utilities.slowtest
def test_random_seed(component_builder, tmpdir):
'''test if train result is the same for two runs of tf embedding'''
"""test if train result is the same for two runs of tf embedding"""

_config = utilities.base_test_conf("tensorflow_embedding")
# set fixed random seed to 1
_config.set_component_attr("intent_classifier_tensorflow_embedding", random_seed=1)
_config.set_component_attr("intent_classifier_tensorflow_embedding",
random_seed=1)
# first run
(trained_a, _, persisted_path_a) = train.do_train(
_config,
Expand Down

0 comments on commit 34b6170

Please sign in to comment.