Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def configs_from_keyword(kw):
- provs = []
- # crappy approximation of pytest evaluation routine
- false = {}
- for known_prov in known_providers:
- false[known_prov] = False
- for known_prov in known_providers:
- if known_prov == kw or '[' + known_prov + ']' == kw:
- ok = True
- else:
- ids = false.copy()
- ids[known_prov]=True
- try:
- ok = eval(kw, {}, ids)
- except NameError as e:
- ok = False
- except Exception as e:
- log.error("%s %s", type(e), e)
- ok = False
- if type(ok) is list:
- ok = any(ok)
- if ok:
- provs += configs_from_name(known_prov)
- return provs
- _registered = False
- def pytest_generate_tests(metafunc):
- global _registered
- if not _registered:
- for known_prov in known_providers:
- metafunc.config.addinivalue_line(
- "markers", known_prov
- )
- _registered = True
- if "provider_config" in metafunc.fixturenames:
- provs = []
- if not provs:
- kw = metafunc.config.getoption("keyword", "")
- if kw:
- provs += configs_from_keyword(kw)
- if not provs:
- provs += configs_from_name("mock")
- ids = [p.param_id for p in provs]
- marks = [pytest.param(p, marks=[getattr(pytest.mark,p.name)]) for p in provs]
- metafunc.parametrize("provider_config", marks, ids=ids)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement