Source: pytest-bdd
Version: 3.2.1-1
Severity: important
User: pyt...@packages.debian.org
Usertags: pytest-v6

Hi,

pytest-bdd FTBFS with pytest 6 in experimental. The pytest changelog may
offer clues as to why:

    https://docs.pytest.org/en/stable/changelog.html

The error log below has more details.

> I: pybuild base:232: python3.9 -m pytest
> ============================= test session starts 
> ==============================
> platform linux -- Python 3.9.1, pytest-6.0.2, py-1.9.0, pluggy-0.13.0 -- 
> /usr/bin/python3.9
> cachedir: .pytest_cache
> rootdir: /<<PKGBUILDDIR>>, configfile: pytest.ini
> plugins: bdd-3.2.1
> collecting ... collected 152 items
> 
> tests/args/cfparse/test_args.py::test_steps <- pytest_bdd/scenario.py PASSED 
> [  0%]
> tests/args/cfparse/test_args.py::test_argument_in_when_step_1 <- 
> pytest_bdd/scenario.py PASSED [  1%]
> tests/args/cfparse/test_args.py::test_argument_in_when_step_2 <- 
> pytest_bdd/scenario.py PASSED [  1%]
> tests/args/parse/test_args.py::test_steps <- pytest_bdd/scenario.py PASSED [  
> 2%]
> tests/args/parse/test_args.py::test_argument_in_when_step_1 <- 
> pytest_bdd/scenario.py PASSED [  3%]
> tests/args/parse/test_args.py::test_argument_in_when_step_2 <- 
> pytest_bdd/scenario.py PASSED [  3%]
> tests/args/regex/test_args.py::test_steps <- pytest_bdd/scenario.py PASSED [  
> 4%]
> tests/args/regex/test_args.py::test_argument_in_when_step_1 <- 
> pytest_bdd/scenario.py PASSED [  5%]
> tests/args/regex/test_args.py::test_argument_in_when_step_2 <- 
> pytest_bdd/scenario.py PASSED [  5%]
> tests/args/subfolder/test_args.py::test_steps <- pytest_bdd/scenario.py 
> PASSED [  6%]
> tests/feature/test_alias.py::test_steps <- pytest_bdd/scenario.py PASSED [  
> 7%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_default_output_be_the_same_as_regular_terminal_reporter
>  <- pytest_bdd/scenario.py FAILED [  7%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_verbose_mode_enable_displaying_feature_and_scenario_names_rather_than_test_names_in_a_single_line
>  <- pytest_bdd/scenario.py PASSED [  8%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_verbose_mode_preserve_displaying_of_regular_tests_as_usual
>  <- pytest_bdd/scenario.py PASSED [  9%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_double_verbose_mode_enable_displaying_of_full_gherkin_scenario_description
>  <- pytest_bdd/scenario.py PASSED [  9%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_no_scenario_is_found[compact
>  mode] <- pytest_bdd/scenario.py PASSED [ 10%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_no_scenario_is_found[line
>  per test] <- pytest_bdd/scenario.py PASSED [ 11%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_no_scenario_is_found[verbose]
>  <- pytest_bdd/scenario.py PASSED [ 11%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_no_step_is_found[compact
>  mode] <- pytest_bdd/scenario.py PASSED [ 12%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_no_step_is_found[line
>  per test] <- pytest_bdd/scenario.py PASSED [ 13%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_no_step_is_found[verbose]
>  <- pytest_bdd/scenario.py PASSED [ 13%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_error_occurs_during_test_execution[compact
>  mode] <- pytest_bdd/scenario.py PASSED [ 14%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_error_occurs_during_test_execution[line
>  per test] <- pytest_bdd/scenario.py PASSED [ 15%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_error_message_be_displayed_when_error_occurs_during_test_execution[verbose]
>  <- pytest_bdd/scenario.py PASSED [ 15%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_local_variables_be_displayed_when___showlocals_option_is_used
>  <- pytest_bdd/scenario.py PASSED [ 16%]
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_step_parameters_be_replaced_by_their_values
>  <- pytest_bdd/scenario.py PASSED [ 17%]
> tests/feature/test_outline.py::test_outlined[12-5.0-7] <- 
> pytest_bdd/scenario.py PASSED [ 17%]
> tests/feature/test_outline.py::test_outlined[5-4.0-1] <- 
> pytest_bdd/scenario.py PASSED [ 18%]
> tests/feature/test_outline.py::test_outlined_with_other_fixtures[1-12-5.0-7] 
> <- pytest_bdd/scenario.py PASSED [ 19%]
> tests/feature/test_outline.py::test_outlined_with_other_fixtures[1-5-4.0-1] 
> <- pytest_bdd/scenario.py PASSED [ 19%]
> tests/feature/test_outline.py::test_outlined_with_other_fixtures[2-12-5.0-7] 
> <- pytest_bdd/scenario.py PASSED [ 20%]
> tests/feature/test_outline.py::test_outlined_with_other_fixtures[2-5-4.0-1] 
> <- pytest_bdd/scenario.py PASSED [ 21%]
> tests/feature/test_outline.py::test_outlined_with_other_fixtures[3-12-5.0-7] 
> <- pytest_bdd/scenario.py PASSED [ 21%]
> tests/feature/test_outline.py::test_outlined_with_other_fixtures[3-5-4.0-1] 
> <- pytest_bdd/scenario.py PASSED [ 22%]
> tests/feature/test_outline.py::test_vertical_example[12-5.0-7] <- 
> pytest_bdd/scenario.py PASSED [ 23%]
> tests/feature/test_outline.py::test_vertical_example[2-1.0-1] <- 
> pytest_bdd/scenario.py PASSED [ 23%]
> tests/feature/test_outline.py::test_outlined_feature[12-5.0-7-oranges] <- 
> pytest_bdd/scenario.py PASSED [ 24%]
> tests/feature/test_outline.py::test_outlined_feature[12-5.0-7-apples] <- 
> pytest_bdd/scenario.py PASSED [ 25%]
> tests/feature/test_outline.py::test_outlined_feature[5-4.0-1-oranges] <- 
> pytest_bdd/scenario.py PASSED [ 25%]
> tests/feature/test_outline.py::test_outlined_feature[5-4.0-1-apples] <- 
> pytest_bdd/scenario.py PASSED [ 26%]
> tests/feature/test_outline_empty_values.py::test_scenario_with_empty_example_values[#--]
>  <- pytest_bdd/scenario.py PASSED [ 26%]
> tests/feature/test_outline_empty_values.py::test_scenario_with_empty_example_values_vertical[#--]
>  <- pytest_bdd/scenario.py PASSED [ 27%]
> tests/feature/test_parametrized.py::test_parametrized[12-5-7] <- 
> pytest_bdd/scenario.py PASSED [ 28%]
> tests/feature/test_parametrized.py::test_parametrized_with_other_fixtures[1-12-5-7]
>  <- pytest_bdd/scenario.py PASSED [ 28%]
> tests/feature/test_parametrized.py::test_parametrized_with_other_fixtures[2-12-5-7]
>  <- pytest_bdd/scenario.py PASSED [ 29%]
> tests/feature/test_reuse.py::test_reuse <- pytest_bdd/scenario.py PASSED [ 
> 30%]
> tests/feature/test_same_function_name.py::test_when_function_name_same_as_step_name
>  <- pytest_bdd/scenario.py PASSED [ 30%]
> tests/feature/test_steps.py::test_steps <- pytest_bdd/scenario.py PASSED [ 
> 31%]
> tests/feature/test_steps.py::test_when_first <- pytest_bdd/scenario.py PASSED 
> [ 32%]
> tests/feature/test_steps.py::test_then_after_given <- pytest_bdd/scenario.py 
> PASSED [ 32%]
> tests/feature/test_steps.py::test_conftest <- pytest_bdd/scenario.py PASSED [ 
> 33%]
> tests/steps/test_given.py::test_given_with_fixture <- pytest_bdd/scenario.py 
> PASSED [ 34%]
> tests/steps/test_given.py::test_root_alias <- pytest_bdd/scenario.py PASSED [ 
> 34%]
> tests/steps/test_given.py::test_session_given <- pytest_bdd/scenario.py 
> PASSED [ 35%]
> tests/steps/test_given.py::test_given_injection <- pytest_bdd/scenario.py 
> PASSED [ 36%]
> tests/steps/test_unicode.py::test_steps_in_feature_file_have_unicode <- 
> pytest_bdd/scenario.py PASSED [ 36%]
> tests/steps/test_unicode.py::test_steps_in_py_file_have_unicode <- 
> pytest_bdd/scenario.py PASSED [ 37%]
> tests/args/cfparse/test_args.py::test_multiple_given PASSED              [ 
> 38%]
> tests/args/parse/test_args.py::test_multiple_given PASSED                [ 
> 38%]
> tests/args/regex/test_args.py::test_multiple_given PASSED                [ 
> 39%]
> tests/args/test_arg_fixture_mix.py::test_arg_fixture_mix PASSED          [ 
> 40%]
> tests/feature/test_background.py::test_background_basic PASSED           [ 
> 40%]
> tests/feature/test_background.py::test_background_check_order PASSED     [ 
> 41%]
> tests/feature/test_cucumber_json.py::test_step_trace PASSED              [ 
> 42%]
> tests/feature/test_cucumber_json.py::test_step_trace_with_expand_option 
> PASSED [ 42%]
> tests/feature/test_description.py::test_description PASSED               [ 
> 43%]
> tests/feature/test_feature_base_dir.py::test_feature_path_not_found[.] PASSED 
> [ 44%]
> tests/feature/test_feature_base_dir.py::test_feature_path_not_found[/does/not/exist/]
>  PASSED [ 44%]
> tests/feature/test_feature_base_dir.py::test_feature_path_ok PASSED      [ 
> 45%]
> tests/feature/test_feature_base_dir.py::test_feature_path_by_param_not_found 
> PASSED [ 46%]
> tests/feature/test_feature_base_dir.py::test_feature_path_by_param_ok[.] 
> PASSED [ 46%]
> tests/feature/test_feature_base_dir.py::test_feature_path_by_param_ok[/does/not/exist/]
>  PASSED [ 47%]
> tests/feature/test_gherkin_terminal_reporter.py::test_scenario_in_expanded_mode[./steps/unicode.feature-./steps/test_unicode.py-test_steps_in_feature_file_have_unicode]
>  PASSED [ 48%]
> tests/feature/test_multiline.py::test_multiline[\nScenario: Multiline step 
> using sub indentation\n    Given I have a step with:\n        Some\n\n        
> Extra\n        Lines\n    Then the text should be parsed with correct 
> indentation\n-Some\n\nExtra\nLines] PASSED [ 48%]
> tests/feature/test_multiline.py::test_multiline[\nScenario: Multiline step 
> using sub indentation\n    Given I have a step with:\n        Some\n\n      
> Extra\n     Lines\n\n    Then the text should be parsed with correct 
> indentation\n-   Some\n\n Extra\nLines] PASSED [ 49%]
> tests/feature/test_multiline.py::test_multiline[\nFeature:\nScenario: 
> Multiline step using sub indentation\n    Given I have a step with:\n        
> Some\n        Extra\n        Lines\n\n-Some\nExtra\nLines] PASSED [ 50%]
> tests/feature/test_multiline.py::test_multiline_wrong_indent PASSED      [ 
> 50%]
> tests/feature/test_no_scenario.py::test_no_scenarios PASSED              [ 
> 51%]
> tests/feature/test_no_scenario.py::test_only_background_strict_mode PASSED [ 
> 51%]
> tests/feature/test_no_sctrict_gherkin.py::test_background_no_strict_gherkin 
> PASSED [ 52%]
> tests/feature/test_no_sctrict_gherkin.py::test_scenario_no_strict_gherkin 
> PASSED [ 53%]
> tests/feature/test_outline.py::test_wrongly_outlined PASSED              [ 
> 53%]
> tests/feature/test_outline.py::test_wrong_vertical_examples_scenario PASSED [ 
> 54%]
> tests/feature/test_outline.py::test_wrong_vertical_examples_feature PASSED [ 
> 55%]
> tests/feature/test_report.py::test_step_trace PASSED                     [ 
> 55%]
> tests/feature/test_report.py::test_complex_types PASSED                  [ 
> 56%]
> tests/feature/test_scenario.py::test_scenario_not_found PASSED           [ 
> 57%]
> tests/feature/test_scenario.py::test_scenario_comments PASSED            [ 
> 57%]
> tests/feature/test_scenario.py::test_scenario_not_decorator PASSED       [ 
> 58%]
> tests/feature/test_scenarios.py::test_scenarios PASSED                   [ 
> 59%]
> tests/feature/test_scenarios.py::test_scenarios_none_found PASSED        [ 
> 59%]
> tests/feature/test_steps.py::test_multiple_given PASSED                  [ 
> 60%]
> tests/feature/test_steps.py::test_step_hooks PASSED                      [ 
> 61%]
> tests/feature/test_steps.py::test_step_trace PASSED                      [ 
> 61%]
> tests/feature/test_tags.py::test_tags_selector PASSED                    [ 
> 62%]
> tests/feature/test_tags.py::test_tags_after_background_issue_160 PASSED  [ 
> 63%]
> tests/feature/test_tags.py::test_apply_tag_hook PASSED                   [ 
> 63%]
> tests/feature/test_tags.py::test_tag_with_spaces PASSED                  [ 
> 64%]
> tests/feature/test_tags.py::test_at_in_scenario PASSED                   [ 
> 65%]
> tests/feature/test_tags.py::test_get_tags[@foo @bar-expected0] PASSED    [ 
> 65%]
> tests/feature/test_tags.py::test_get_tags[@with spaces @bar-expected1] PASSED 
> [ 66%]
> tests/feature/test_tags.py::test_get_tags[@double @double-expected2] PASSED [ 
> 67%]
> tests/feature/test_tags.py::test_get_tags[    @indented-expected3] PASSED [ 
> 67%]
> tests/feature/test_tags.py::test_get_tags[None-expected4] PASSED         [ 
> 68%]
> tests/feature/test_tags.py::test_get_tags[foobar-expected5] PASSED       [ 
> 69%]
> tests/feature/test_tags.py::test_get_tags[-expected6] PASSED             [ 
> 69%]
> tests/feature/test_wrong.py::test_wrong[True-True-when_in_background.feature-When
>  in background] PASSED [ 70%]
> tests/feature/test_wrong.py::test_wrong[True-True-when_after_then.feature-When
>  after then] PASSED [ 71%]
> tests/feature/test_wrong.py::test_wrong[True-True-then_first.feature-Then 
> first] PASSED [ 71%]
> tests/feature/test_wrong.py::test_wrong[True-True-given_after_when.feature-Given
>  after When] PASSED [ 72%]
> tests/feature/test_wrong.py::test_wrong[True-True-given_after_then.feature-Given
>  after Then] PASSED [ 73%]
> tests/feature/test_wrong.py::test_wrong[True-False-when_in_background.feature-When
>  in background] PASSED [ 73%]
> tests/feature/test_wrong.py::test_wrong[True-False-when_after_then.feature-When
>  after then] PASSED [ 74%]
> tests/feature/test_wrong.py::test_wrong[True-False-then_first.feature-Then 
> first] PASSED [ 75%]
> tests/feature/test_wrong.py::test_wrong[True-False-given_after_when.feature-Given
>  after When] PASSED [ 75%]
> tests/feature/test_wrong.py::test_wrong[True-False-given_after_then.feature-Given
>  after Then] PASSED [ 76%]
> tests/feature/test_wrong.py::test_wrong[False-True-when_in_background.feature-When
>  in background] PASSED [ 76%]
> tests/feature/test_wrong.py::test_wrong[False-True-when_after_then.feature-When
>  after then] PASSED [ 77%]
> tests/feature/test_wrong.py::test_wrong[False-True-then_first.feature-Then 
> first] PASSED [ 78%]
> tests/feature/test_wrong.py::test_wrong[False-True-given_after_when.feature-Given
>  after When] PASSED [ 78%]
> tests/feature/test_wrong.py::test_wrong[False-True-given_after_then.feature-Given
>  after Then] PASSED [ 79%]
> tests/feature/test_wrong.py::test_wrong[False-False-when_in_background.feature-When
>  in background] PASSED [ 80%]
> tests/feature/test_wrong.py::test_wrong[False-False-when_after_then.feature-When
>  after then] PASSED [ 80%]
> tests/feature/test_wrong.py::test_wrong[False-False-then_first.feature-Then 
> first] PASSED [ 81%]
> tests/feature/test_wrong.py::test_wrong[False-False-given_after_when.feature-Given
>  after When] PASSED [ 82%]
> tests/feature/test_wrong.py::test_wrong[False-False-given_after_then.feature-Given
>  after Then] PASSED [ 82%]
> tests/feature/test_wrong.py::test_wrong_type_order[When in Given] PASSED [ 
> 83%]
> tests/feature/test_wrong.py::test_wrong_type_order[When in Then] PASSED  [ 
> 84%]
> tests/feature/test_wrong.py::test_wrong_type_order[Then in Given] PASSED [ 
> 84%]
> tests/feature/test_wrong.py::test_wrong_type_order[Given in When] PASSED [ 
> 85%]
> tests/feature/test_wrong.py::test_wrong_type_order[Given in Then] PASSED [ 
> 86%]
> tests/feature/test_wrong.py::test_wrong_type_order[Then in When] PASSED  [ 
> 86%]
> tests/feature/test_wrong.py::test_verbose_output PASSED                  [ 
> 87%]
> tests/feature/test_wrong.py::test_multiple_features_single_file PASSED   [ 
> 88%]
> tests/generation/test_generate_missing.py::test_python_name_generator PASSED 
> [ 88%]
> tests/generation/test_generate_missing.py::test_generate_missing PASSED  [ 
> 89%]
> tests/library/child/test_local_override.py::test_override PASSED         [ 
> 90%]
> tests/library/child/test_local_override.py::test_parent PASSED           [ 
> 90%]
> tests/library/child/test_parent_override.py::test_parent PASSED          [ 
> 91%]
> tests/library/child/test_parent_override.py::test_override PASSED        [ 
> 92%]
> tests/library/test_parent.py::test_parent PASSED                         [ 
> 92%]
> tests/library/test_parent.py::test_global_when_step PASSED               [ 
> 93%]
> tests/scripts/test_generate.py::test_generate PASSED                     [ 
> 94%]
> tests/scripts/test_main.py::test_main PASSED                             [ 
> 94%]
> tests/scripts/test_migrate.py::test_migrate PASSED                       [ 
> 95%]
> tests/steps/test_given.py::test_decorate_with_fixture PASSED             [ 
> 96%]
> tests/steps/test_steps.py::test_when_then PASSED                         [ 
> 96%]
> tests/steps/test_steps.py::test_preserve_decorator[given-Given] PASSED   [ 
> 97%]
> tests/steps/test_steps.py::test_preserve_decorator[when-When] PASSED     [ 
> 98%]
> tests/steps/test_steps.py::test_preserve_decorator[then-Then] PASSED     [ 
> 98%]
> tests/test_hooks.py::test_hooks PASSED                                   [ 
> 99%]
> tests/test_hooks.py::test_item_collection_does_not_break_on_non_function_items
>  FAILED [100%]
> 
> =================================== FAILURES 
> ===================================
> _____ test_Should_default_output_be_the_same_as_regular_terminal_reporter 
> ______
> 
> request = <FixtureRequest for <Function 
> test_Should_default_output_be_the_same_as_regular_terminal_reporter>>
> 
>     @pytest.mark.usefixtures(*function_args)
>     def scenario_wrapper(request):
>>       _execute_scenario(feature, scenario, request, encoding)
> 
> args       = []
> encoding   = 'utf-8'
> feature    = <pytest_bdd.feature.Feature object at 0x7f8d4f851850>
> fn         = <function 
> test_Should_default_output_be_the_same_as_regular_terminal_reporter at 
> 0x7f8d4f7d5160>
> request    = <FixtureRequest for <Function 
> test_Should_default_output_be_the_same_as_regular_terminal_reporter>>
> scenario   = <pytest_bdd.feature.Scenario object at 0x7f8d4f84dd00>
> 
> /<<PKGBUILDDIR>>/pytest_bdd/scenario.py:227: 
> _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
> _ 
> /<<PKGBUILDDIR>>/pytest_bdd/scenario.py:189: in _execute_scenario
>     _execute_step_function(request, scenario, step, step_func)
>         encoding   = 'utf-8'
>         feature    = <pytest_bdd.feature.Feature object at 0x7f8d4f851850>
>         givens     = {'gherkin_scenario'}
>         request    = <FixtureRequest for <Function 
> test_Should_default_output_be_the_same_as_regular_terminal_reporter>>
>         scenario   = <pytest_bdd.feature.Scenario object at 0x7f8d4f84dd00>
>         step       = <pytest_bdd.feature.Step object at 0x7f8d4f84d160>
>         step_func  = <function output_must_be_the_same_as_regular_reporter at 
> 0x7f8d4f725dc0>
> /<<PKGBUILDDIR>>/pytest_bdd/scenario.py:130: in _execute_step_function
>     step_func(**kwargs)
>         kw         = {'feature': <pytest_bdd.feature.Feature object at 
> 0x7f8d4f851850>,
>  'request': <FixtureRequest for <Function 
> test_Should_default_output_be_the_same_as_regular_terminal_reporter>>,
>  'scenario': <pytest_bdd.feature.Scenario object at 0x7f8d4f84dd00>,
>  'step': <pytest_bdd.feature.Step object at 0x7f8d4f84d160>,
>  'step_func': <function output_must_be_the_same_as_regular_reporter at 
> 0x7f8d4f725dc0>,
>  'step_func_args': {'test_execution': {'gherkin': <RunResult ret=ExitCode.OK 
> len(stdout.lines)=7 len(stderr.lines)=0 duration=0.03s>,
>                                        'regular': <RunResult ret=ExitCode.OK 
> len(stdout.lines)=9 len(stderr.lines)=0 duration=0.03s>}}}
>         kwargs     = {'test_execution': {'gherkin': <RunResult 
> ret=ExitCode.OK len(stdout.lines)=7 len(stderr.lines)=0 duration=0.03s>,
>                     'regular': <RunResult ret=ExitCode.OK len(stdout.lines)=9 
> len(stderr.lines)=0 duration=0.03s>}}
>         request    = <FixtureRequest for <Function 
> test_Should_default_output_be_the_same_as_regular_terminal_reporter>>
>         scenario   = <pytest_bdd.feature.Scenario object at 0x7f8d4f84dd00>
>         step       = <pytest_bdd.feature.Step object at 0x7f8d4f84d160>
>         step_func  = <function output_must_be_the_same_as_regular_reporter at 
> 0x7f8d4f725dc0>
> _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
> _ 
> 
> test_execution = {'gherkin': <RunResult ret=ExitCode.OK len(stdout.lines)=7 
> len(stderr.lines)=0 duration=0.03s>, 'regular': <RunResult ret=ExitCode.OK 
> len(stdout.lines)=9 len(stderr.lines)=0 duration=0.03s>}
> 
>     @then("output must be formatted the same way as regular one")
>     def output_must_be_the_same_as_regular_reporter(test_execution):
>         reg = test_execution['regular']
>         ghe = test_execution['gherkin']
>         assert reg.ret == 0
>         assert ghe.ret == 0
>         #  last line can be different because of test execution time is 
> printed
>         reg_lines = reg.stdout.lines if reg.stdout.lines[-1] else 
> reg.stdout.lines[:-2]
>         reg_lines[-1] = re.sub(r' \d+\.\d+ ', ' X ', reg_lines[-1])
>         ghe_lines = ghe.stdout.lines if ghe.stdout.lines[-1] else 
> ghe.stdout.lines[:-2]
>         ghe_lines[-1] = re.sub(r' \d+\.\d+ ', ' X ', ghe_lines[-1])
>         for l1, l2 in zip(reg_lines, ghe_lines):
>>           assert l1 == l2
> E           AssertionError: assert '' == ('.                                  
>                                       '\n '[100%]')
> E             - .                                                             
>            [100%]
> 
> ghe        = <RunResult ret=ExitCode.OK len(stdout.lines)=7 
> len(stderr.lines)=0 duration=0.03s>
> ghe_lines  = ['============================= test session starts '
>  '==============================',
>  'platform linux -- Python 3.9.1, pytest-6.0.2, py-1.9.0, pluggy-0.13.0',
>  'rootdir: /tmp/pytest-of-christian/pytest-0/scenario_wrapper0',
>  'plugins: bdd-3.2.1',
>  'collected 1 item',
>  '.                                                                        '
>  '[100%]',
>  '============================== 1 passed in 0.01s '
>  '===============================']
> l1         = ''
> l2         = ('.                                                              
>           '
>  '[100%]')
> reg        = <RunResult ret=ExitCode.OK len(stdout.lines)=9 
> len(stderr.lines)=0 duration=0.03s>
> reg_lines  = ['============================= test session starts '
>  '==============================',
>  'platform linux -- Python 3.9.1, pytest-6.0.2, py-1.9.0, pluggy-0.13.0',
>  'rootdir: /tmp/pytest-of-christian/pytest-0/scenario_wrapper0',
>  'plugins: bdd-3.2.1',
>  'collected 1 item',
>  '',
>  'test_gherkin.py .                                                        '
>  '[100%]',
>  '',
>  '============================== 1 passed in 0.01s '
>  '===============================']
> test_execution = {'gherkin': <RunResult ret=ExitCode.OK len(stdout.lines)=7 
> len(stderr.lines)=0 duration=0.03s>,
>  'regular': <RunResult ret=ExitCode.OK len(stdout.lines)=9 
> len(stderr.lines)=0 duration=0.03s>}
> 
> /<<PKGBUILDDIR>>/tests/feature/test_gherkin_terminal_reporter.py:173: 
> AssertionError
> ----------------------------- Captured stdout call 
> -----------------------------
> ============================= test session starts 
> ==============================
> platform linux -- Python 3.9.1, pytest-6.0.2, py-1.9.0, pluggy-0.13.0
> rootdir: /tmp/pytest-of-christian/pytest-0/scenario_wrapper0
> plugins: bdd-3.2.1
> collected 1 item
> 
> test_gherkin.py .                                                        
> [100%]
> 
> ============================== 1 passed in 0.01s 
> ===============================
> ============================= test session starts 
> ==============================
> platform linux -- Python 3.9.1, pytest-6.0.2, py-1.9.0, pluggy-0.13.0
> rootdir: /tmp/pytest-of-christian/pytest-0/scenario_wrapper0
> plugins: bdd-3.2.1
> collected 1 item
> .                                                                        
> [100%]
> ============================== 1 passed in 0.01s 
> ===============================
> __________ test_item_collection_does_not_break_on_non_function_items 
> ___________
> 
> testdir = <Testdir 
> local('/tmp/pytest-of-christian/pytest-0/test_item_collection_does_not_break_on_non_function_items0')>
> 
>     def test_item_collection_does_not_break_on_non_function_items(testdir):
>         """Regression test for 
> https://github.com/pytest-dev/pytest-bdd/issues/317""";
>         testdir.makeconftest("""
>         import pytest
>     
>         @pytest.mark.tryfirst
>         def pytest_collection_modifyitems(session, config, items):
>             items[:] = [CustomItem(name=item.name, parent=item.parent) for 
> item in items]
>     
>         class CustomItem(pytest.Item):
>             def runtest(self):
>                 assert True
>         """)
>     
>         testdir.makepyfile("""
>         def test_convert_me_to_custom_item_and_assert_true():
>             assert False
>         """)
>     
>         result = testdir.runpytest()
>>       result.assert_outcomes(passed=1)
> E       AssertionError: assert {'errors': 0,\n 'failed': 0,\n 'passed': 0,\n 
> 'skipped': 0,\n 'xfailed': 0,\n 'xpassed': 0} == {'errors': 0,\n 'failed': 
> 0,\n 'passed': 1,\n 'skipped': 0,\n 'xfailed': 0,\n 'xpassed': 0}
> E         Common items:
> E         {'errors': 0, 'failed': 0, 'skipped': 0, 'xfailed': 0, 'xpassed': 0}
> E         Differing items:
> E         {'passed': 0} != {'passed': 1}
> E         Full diff:
> E           {
> E            'errors': 0,
> E            'failed': 0,
> E         -  'passed': 1,
> E         ?            ^
> E         +  'passed': 0,
> E         ?            ^
> E            'skipped': 0,
> E            'xfailed': 0,
> E            'xpassed': 0,
> E           }
> 
> result     = <RunResult ret=ExitCode.INTERNAL_ERROR len(stdout.lines)=64 
> len(stderr.lines)=0 duration=0.03s>
> testdir    = <Testdir 
> local('/tmp/pytest-of-christian/pytest-0/test_item_collection_does_not_break_on_non_function_items0')>
> 
> /<<PKGBUILDDIR>>/tests/test_hooks.py:55: AssertionError
> ----------------------------- Captured stdout call 
> -----------------------------
> ============================= test session starts 
> ==============================
> platform linux -- Python 3.9.1, pytest-6.0.2, py-1.9.0, pluggy-0.13.0
> rootdir: 
> /tmp/pytest-of-christian/pytest-0/test_item_collection_does_not_break_on_non_function_items0
> plugins: bdd-3.2.1
> collected 1 item
> INTERNALERROR> Traceback (most recent call last):
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/_pytest/main.py", line 
> 240, in wrap_session
> INTERNALERROR>     session.exitstatus = doit(config, session) or 0
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/_pytest/main.py", line 
> 295, in _main
> INTERNALERROR>     config.hook.pytest_collection(session=session)
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/hooks.py", line 
> 286, in __call__
> INTERNALERROR>     return self._hookexec(self, self.get_hookimpls(), kwargs)
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 92, in _hookexec
> INTERNALERROR>     return self._inner_hookexec(hook, methods, kwargs)
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 335, in traced_hookexec
> INTERNALERROR>     return outcome.get_result()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 80, in get_result
> INTERNALERROR>     raise ex[1].with_traceback(ex[2])
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 52, in from_call
> INTERNALERROR>     result = func()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 333, in <lambda>
> INTERNALERROR>     outcome = _Result.from_call(lambda: oldcall(hook, 
> hook_impls, kwargs))
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 83, in <lambda>
> INTERNALERROR>     self._inner_hookexec = lambda hook, methods, kwargs: 
> hook.multicall(
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 208, in _multicall
> INTERNALERROR>     return outcome.get_result()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 80, in get_result
> INTERNALERROR>     raise ex[1].with_traceback(ex[2])
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 187, in _multicall
> INTERNALERROR>     res = hook_impl.function(*args)
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/_pytest/main.py", line 
> 306, in pytest_collection
> INTERNALERROR>     session.perform_collect()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/_pytest/main.py", line 
> 518, in perform_collect
> INTERNALERROR>     hook.pytest_collection_modifyitems(
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/hooks.py", line 
> 286, in __call__
> INTERNALERROR>     return self._hookexec(self, self.get_hookimpls(), kwargs)
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 92, in _hookexec
> INTERNALERROR>     return self._inner_hookexec(hook, methods, kwargs)
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 335, in traced_hookexec
> INTERNALERROR>     return outcome.get_result()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 80, in get_result
> INTERNALERROR>     raise ex[1].with_traceback(ex[2])
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 52, in from_call
> INTERNALERROR>     result = func()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 333, in <lambda>
> INTERNALERROR>     outcome = _Result.from_call(lambda: oldcall(hook, 
> hook_impls, kwargs))
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/manager.py", 
> line 83, in <lambda>
> INTERNALERROR>     self._inner_hookexec = lambda hook, methods, kwargs: 
> hook.multicall(
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 208, in _multicall
> INTERNALERROR>     return outcome.get_result()
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 80, in get_result
> INTERNALERROR>     raise ex[1].with_traceback(ex[2])
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/pluggy/callers.py", 
> line 187, in _multicall
> INTERNALERROR>     res = hook_impl.function(*args)
> INTERNALERROR>   File 
> "/tmp/pytest-of-christian/pytest-0/test_item_collection_does_not_break_on_non_function_items0/conftest.py",
>  line 5, in pytest_collection_modifyitems
> INTERNALERROR>     items[:] = [CustomItem(name=item.name, parent=item.parent) 
> for item in items]
> INTERNALERROR>   File 
> "/tmp/pytest-of-christian/pytest-0/test_item_collection_does_not_break_on_non_function_items0/conftest.py",
>  line 5, in <listcomp>
> INTERNALERROR>     items[:] = [CustomItem(name=item.name, parent=item.parent) 
> for item in items]
> INTERNALERROR>   File "/usr/lib/python3/dist-packages/_pytest/nodes.py", line 
> 95, in __call__
> INTERNALERROR>     
> warnings.warn(NODE_USE_FROM_PARENT.format(name=self.__name__), stacklevel=2)
> INTERNALERROR> pytest.PytestDeprecationWarning: Direct construction of 
> CustomItem has been deprecated, please use CustomItem.from_parent.
> INTERNALERROR> See 
> https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent
>  for more details.
> 
> ============================ no tests ran in 0.01s 
> =============================
> =============================== warnings summary 
> ===============================
> /usr/lib/python3/dist-packages/_pytest/config/__init__.py:1148
>   /usr/lib/python3/dist-packages/_pytest/config/__init__.py:1148: 
> PytestConfigWarning: Unknown config ini key: pep8maxlinelength
>   
>     self._warn_or_fail_if_strict("Unknown config ini key: {}\n".format(key))
> 
> -- Docs: https://docs.pytest.org/en/stable/warnings.html
> =========================== short test summary info 
> ============================
> FAILED 
> tests/feature/test_gherkin_terminal_reporter.py::test_Should_default_output_be_the_same_as_regular_terminal_reporter
> FAILED 
> tests/test_hooks.py::test_item_collection_does_not_break_on_non_function_items
> =================== 2 failed, 150 passed, 1 warning in 3.84s 
> ===================
> E: pybuild pybuild:353: test: plugin custom failed with: exit code=1: 
> python3.9 -m pytest
> dh_auto_test: error: pybuild --test --test-pytest -i python{version} -p 3.9 
> --system=custom "--test-args={interpreter} -m pytest" returned exit code 13
> make[1]: *** [debian/rules:11: override_dh_auto_test] Error 25
> make: *** [debian/rules:6: binary] Error 2
> dpkg-buildpackage: error: debian/rules binary subprocess returned exit status 
> 2
> make[1]: Leaving directory '/<<PKGBUILDDIR>>'

Reply via email to