Fixed E124 flake8 errors

closing bracket does not match visual indentation
This commit is contained in:
Andras Tim 2017-07-17 01:25:07 +02:00
parent 4b20b9d8d9
commit 051d76a63f
8 changed files with 13 additions and 13 deletions

View File

@ -122,11 +122,11 @@ def pytest_configure(config):
"decorated test function, one with arg1=1 and another with arg1=2." "decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and " "see http://pytest.org/latest/parametrize.html for more info and "
"examples." "examples."
) )
config.addinivalue_line("markers", config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing " "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
) )
@hookimpl(trylast=True) @hookimpl(trylast=True)

View File

@ -40,14 +40,14 @@ def pytest_configure(config):
"skip(reason=None): skip the given test function with an optional reason. " "skip(reason=None): skip the given test function with an optional reason. "
"Example: skip(reason=\"no way of currently testing this\") skips the " "Example: skip(reason=\"no way of currently testing this\") skips the "
"test." "test."
) )
config.addinivalue_line("markers", config.addinivalue_line("markers",
"skipif(condition): skip the given test function if eval(condition) " "skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the " "results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') " "module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see " "skips the test if we are on the win32 platform. see "
"http://pytest.org/latest/skipping.html" "http://pytest.org/latest/skipping.html"
) )
config.addinivalue_line("markers", config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): " "xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) " "mark the test function as an expected failure if eval(condition) "
@ -56,7 +56,7 @@ def pytest_configure(config):
"If only specific exception(s) are expected, you can list them in " "If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as " "raises, and if the test fails in other ways, it will be reported as "
"a true failure. See http://pytest.org/latest/skipping.html" "a true failure. See http://pytest.org/latest/skipping.html"
) )
class XFailed(fail.Exception): class XFailed(fail.Exception):

View File

@ -410,7 +410,7 @@ class TestGeneralUsage(object):
def test_stuff(r): def test_stuff(r):
pass pass
""" """
) )
res = testdir.runpytest(p) res = testdir.runpytest(p)
res.stdout.fnmatch_lines([ res.stdout.fnmatch_lines([
'*1 passed*' '*1 passed*'

View File

@ -934,7 +934,7 @@ raise ValueError()
@pytest.mark.parametrize('reproptions', [ @pytest.mark.parametrize('reproptions', [
{'style': style, 'showlocals': showlocals, {'style': style, 'showlocals': showlocals,
'funcargs': funcargs, 'tbfilter': tbfilter 'funcargs': funcargs, 'tbfilter': tbfilter
} for style in ("long", "short", "no") } for style in ("long", "short", "no")
for showlocals in (True, False) for showlocals in (True, False)
for tbfilter in (True, False) for tbfilter in (True, False)
for funcargs in (True, False)]) for funcargs in (True, False)])

View File

@ -279,7 +279,7 @@ class TestMetafunc(object):
assert result == ["10.0-IndexError()", assert result == ["10.0-IndexError()",
"20-KeyError()", "20-KeyError()",
"three-b2", "three-b2",
] ]
@pytest.mark.issue351 @pytest.mark.issue351
def test_idmaker_idfn_unique_names(self): def test_idmaker_idfn_unique_names(self):
@ -291,11 +291,11 @@ class TestMetafunc(object):
result = idmaker(("a", "b"), [pytest.param(10.0, IndexError()), result = idmaker(("a", "b"), [pytest.param(10.0, IndexError()),
pytest.param(20, KeyError()), pytest.param(20, KeyError()),
pytest.param("three", [1, 2, 3]), pytest.param("three", [1, 2, 3]),
], idfn=ids) ], idfn=ids)
assert result == ["a-a0", assert result == ["a-a0",
"a-a1", "a-a1",
"a-a2", "a-a2",
] ]
@pytest.mark.issue351 @pytest.mark.issue351
def test_idmaker_idfn_exception(self): def test_idmaker_idfn_exception(self):

View File

@ -609,7 +609,7 @@ def test_rewritten():
def test_optimized(): def test_optimized():
"hello" "hello"
assert test_optimized.__doc__ is None""" assert test_optimized.__doc__ is None"""
) )
p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None,
rootdir=testdir.tmpdir) rootdir=testdir.tmpdir)
tmp = "--basetemp=%s" % p tmp = "--basetemp=%s" % p

View File

@ -415,7 +415,7 @@ class TestTerminalFunctional(object):
def test_three(): def test_three():
pass pass
""" """
) )
result = testdir.runpytest("-k", "test_two:", testpath) result = testdir.runpytest("-k", "test_two:", testpath)
result.stdout.fnmatch_lines([ result.stdout.fnmatch_lines([
"*test_deselected.py ..", "*test_deselected.py ..",

View File

@ -196,6 +196,6 @@ filterwarnings =
ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning
[flake8] [flake8]
ignore = E124,E125,E126,E127,E128,E129,E131,E201,E202,E203,E221,E222,E225,E226,E231,E241,E251,E261,E262,E265,E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731 ignore = E125,E126,E127,E128,E129,E131,E201,E202,E203,E221,E222,E225,E226,E231,E241,E251,E261,E262,E265,E271,E272,E293,E301,E302,E303,E401,E402,E501,E701,E702,E704,E712,E731
max-line-length = 120 max-line-length = 120
exclude = _pytest/vendored_packages/pluggy.py exclude = _pytest/vendored_packages/pluggy.py