From 1e5fef1a8c653f8be12b2aeadac3a818fa113f55 Mon Sep 17 00:00:00 2001 From: Marcel Telka <marcel@telka.sk> Date: Wed, 03 Apr 2024 18:39:00 +0200 Subject: [PATCH] python/types-requests: update to 2.31.0.20240403 --- make-rules/setup.py.mk | 131 ++++++++++++++++++++++++++++++++++--------- 1 files changed, 104 insertions(+), 27 deletions(-) diff --git a/make-rules/setup.py.mk b/make-rules/setup.py.mk index d459d67..9bbd73d 100644 --- a/make-rules/setup.py.mk +++ b/make-rules/setup.py.mk @@ -152,6 +152,7 @@ PYTHON_ENV += CFLAGS="$(CFLAGS)" PYTHON_ENV += CXX="$(CXX)" PYTHON_ENV += CXXFLAGS="$(CXXFLAGS)" +PYTHON_ENV += LDFLAGS="$(LDFLAGS)" PYTHON_ENV += PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)" COMPONENT_BUILD_ENV += $(PYTHON_ENV) @@ -233,8 +234,6 @@ # See below for test style specific transforms. COMPONENT_TEST_TRANSFORMS += "-e 's|$(PYTHON_DIR)|\$$(PYTHON_DIR)|g'" -# Make sure the test environment is prepared before we start tests -COMPONENT_TEST_DEP += component-test-environment-prep # Testing depends on install target because we want to test installed modules COMPONENT_TEST_DEP += $(BUILD_DIR)/%/.installed # Point Python to the proto area so it is able to find installed modules there @@ -306,7 +305,7 @@ COMPONENT_TEST_ARGS = --current-env --no-provision COMPONENT_TEST_ARGS += --recreate COMPONENT_TEST_ARGS += $(TOX_TESTENV) -COMPONENT_TEST_TARGETS = +COMPONENT_TEST_TARGETS = $(if $(strip $(TOX_POSARGS)),-- $(TOX_POSARGS)) TOX_TESTENV = -e py$(subst .,,$(PYTHON_VERSION)) @@ -345,8 +344,8 @@ COMPONENT_TEST_TRANSFORMS += "-e 's/^\( congratulations :)\) (.* seconds)$$/\1/'" # Remove useless lines from the "coverage combine" output -COMPONENT_TEST_TRANSFORMS += "-e '/^Combined data file \.tox\/\.coverage\.py/d'" -COMPONENT_TEST_TRANSFORMS += "-e '/^Skipping duplicate data \.tox\/\.coverage\.py/d'" +COMPONENT_TEST_TRANSFORMS += "-e '/^Combined data file .*\.coverage/d'" +COMPONENT_TEST_TRANSFORMS += "-e '/^Skipping duplicate data .*\.coverage/d'" # sort list of Sphinx doctest results COMPONENT_TEST_TRANSFORMS += \ @@ -411,27 +410,48 @@ # Force pytest to not use colored output so the results normalization is unaffected PYTEST_ADDOPTS += --color=no -# -# Some pytest plugins are enabled automatically and could affect test results -# or test output. In a case a component does not expect such a plugin -# installed (it is neither in REQUIRED_PACKAGES nor in TEST_REQUIRED_PACKAGES) -# we simply disable the plugin to get consistent test results. -# +# Avoid loading of unexpected pytest plugins. define disable-pytest-plugin -PYTEST_ADDOPTS += $$(if $$(filter library/python/$(2)-$$(subst .,,$$(PYTHON_VERSION)), $$(REQUIRED_PACKAGES) $$(TEST_REQUIRED_PACKAGES) $$(COMPONENT_FMRI)-$$(subst .,,$$(PYTHON_VERSION))),,-p no:$(1)) +PYTEST_ADDOPTS += $$(if $$(filter library/python/$(2)-$$(subst .,,$$(PYTHON_VERSION)), $$(REQUIRED_PACKAGES) $$(TEST_REQUIRED_PACKAGES) $$(COMPONENT_FMRI)-$$(subst .,,$$(PYTHON_VERSION))),,-p 'no:$(1)') endef +$(eval $(call disable-pytest-plugin,anyio,anyio)) $(eval $(call disable-pytest-plugin,asyncio,pytest-asyncio)) # adds line to test report header $(eval $(call disable-pytest-plugin,benchmark,pytest-benchmark)) # adds line to test report header; adds benchmark report -$(eval $(call disable-pytest-plugin,black,pytest-black)) -$(eval $(call disable-pytest-plugin,checkdocs,pytest-checkdocs)) +$(eval $(call disable-pytest-plugin,black,pytest-black)) # runs extra test(s) +$(eval $(call disable-pytest-plugin,check,pytest-check)) +$(eval $(call disable-pytest-plugin,checkdocs,pytest-checkdocs)) # runs extra test(s) +$(eval $(call disable-pytest-plugin,console-scripts,pytest-console-scripts)) $(eval $(call disable-pytest-plugin,cov,pytest-cov)) +$(eval $(call disable-pytest-plugin,custom_exit_code,pytest-custom-exit-code)) +$(eval $(call disable-pytest-plugin,enabler,pytest-enabler)) +$(eval $(call disable-pytest-plugin,env,pytest-env)) +$(eval $(call disable-pytest-plugin,faker,faker)) +$(eval $(call disable-pytest-plugin,flake8,pytest-flake8)) $(eval $(call disable-pytest-plugin,flaky,flaky)) +$(eval $(call disable-pytest-plugin,freezer,pytest-freezer)) +$(eval $(call disable-pytest-plugin,helpers_namespace,pytest-helpers-namespace)) $(eval $(call disable-pytest-plugin,hypothesispytest,hypothesis)) # adds line to test report header +$(eval $(call disable-pytest-plugin,jaraco.test.http,jaraco-test)) +$(eval $(call disable-pytest-plugin,kgb,kgb)) $(eval $(call disable-pytest-plugin,metadata,pytest-metadata)) # adds line to test report header -$(eval $(call disable-pytest-plugin,mypy,pytest-mypy)) -$(eval $(call disable-pytest-plugin,randomly,pytest-randomly)) -$(eval $(call disable-pytest-plugin,relaxed,pytest-relaxed)) # produces different test report +$(eval $(call disable-pytest-plugin,mypy,pytest-mypy)) # runs extra test(s) +$(eval $(call disable-pytest-plugin,perf,pytest-perf)) # https://github.com/jaraco/pytest-perf/issues/9 +$(eval $(call disable-pytest-plugin,pytest home,pytest-home)) +$(eval $(call disable-pytest-plugin,pytest-datadir,pytest-datadir)) +$(eval $(call disable-pytest-plugin,pytest-mypy-plugins,pytest-mypy-plugins)) # could cause tests to fail +$(eval $(call disable-pytest-plugin,pytest-teamcity,teamcity-messages)) +$(eval $(call disable-pytest-plugin,pytest_expect,pytest-expect)) +$(eval $(call disable-pytest-plugin,pytest_fakefs,pyfakefs)) +$(eval $(call disable-pytest-plugin,pytest_forked,pytest-forked)) +$(eval $(call disable-pytest-plugin,pytest_httpserver,pytest-httpserver)) +$(eval $(call disable-pytest-plugin,pytest_ignore_flaky,pytest-ignore-flaky)) +$(eval $(call disable-pytest-plugin,pytest_lazyfixture,pytest-lazy-fixtures)) +$(eval $(call disable-pytest-plugin,pytest_mock,pytest-mock)) +$(eval $(call disable-pytest-plugin,randomly,pytest-randomly)) # reorders tests +$(eval $(call disable-pytest-plugin,regressions,pytest-regressions)) +$(eval $(call disable-pytest-plugin,relaxed,pytest-relaxed)) # runs extra test(s); produces different test report $(eval $(call disable-pytest-plugin,reporter,pytest-reporter)) # https://github.com/christiansandberg/pytest-reporter/issues/8 +$(eval $(call disable-pytest-plugin,rerunfailures,pytest-rerunfailures)) $(eval $(call disable-pytest-plugin,salt-factories,pytest-salt-factories)) # requires salt $(eval $(call disable-pytest-plugin,salt-factories-event-listener,pytest-salt-factories)) # requires salt $(eval $(call disable-pytest-plugin,salt-factories-factories,pytest-salt-factories)) # requires salt @@ -440,7 +460,18 @@ $(eval $(call disable-pytest-plugin,salt-factories-markers,pytest-salt-factories)) # requires salt $(eval $(call disable-pytest-plugin,salt-factories-sysinfo,pytest-salt-factories)) # requires salt $(eval $(call disable-pytest-plugin,salt-factories-sysstats,pytest-salt-factories)) # requires salt -$(eval $(call disable-pytest-plugin,tempdir,pytest-tempdir)) # adds line to test report header +$(eval $(call disable-pytest-plugin,shell-utilities,pytest-shell-utilities)) +$(eval $(call disable-pytest-plugin,skip-markers,pytest-skip-markers)) +$(eval $(call disable-pytest-plugin,socket,pytest-socket)) +$(eval $(call disable-pytest-plugin,subprocess,pytest-subprocess)) +$(eval $(call disable-pytest-plugin,subtests,pytest-subtests)) +$(eval $(call disable-pytest-plugin,system-statistics,pytest-system-statistics)) +$(eval $(call disable-pytest-plugin,time_machine,time-machine)) +$(eval $(call disable-pytest-plugin,timeout,pytest-timeout)) +$(eval $(call disable-pytest-plugin,travis-fold,pytest-travis-fold)) +$(eval $(call disable-pytest-plugin,typeguard,typeguard)) +$(eval $(call disable-pytest-plugin,xdist,pytest-xdist)) +$(eval $(call disable-pytest-plugin,xdist.looponfail,pytest-xdist)) $(eval $(call disable-pytest-plugin,xprocess,pytest-xprocess)) # adds a reminder line to test output # By default we are not interested in full list of test failures so exit on @@ -449,13 +480,20 @@ PYTEST_FASTFAIL = -x PYTEST_ADDOPTS += $(PYTEST_FASTFAIL) +# By default we are not interested to see the default long tracebacks. +# Detailed tracebacks are shown either for failures or xfails. We aim to see +# testing passed so there should be no failures. Since xfails are expected +# failures we are not interested in detailed tracebacks here at all since they +# could contain random data, like pointers, temporary file names, etc. +PYTEST_TRACEBACK = --tb=line +PYTEST_ADDOPTS += $(PYTEST_TRACEBACK) + # Normalize pytest test results. The pytest framework could be used either # directly or via tox or setup.py so add these transforms for all test styles # unconditionally. COMPONENT_TEST_TRANSFORMS += \ "-e 's/^\(platform sunos5 -- Python \)$(shell echo $(PYTHON_VERSION) | $(GSED) -e 's/\./\\./g')\.[0-9]\{1,\}.*\( -- .*\)/\1\$$(PYTHON_VERSION).X\2/'" COMPONENT_TEST_TRANSFORMS += "-e '/^Using --randomly-seed=[0-9]\{1,\}$$/d'" # this is random -COMPONENT_TEST_TRANSFORMS += "-e '/^benchmark: /d'" # line with version details COMPONENT_TEST_TRANSFORMS += "-e '/^plugins: /d'" # order of listed plugins could vary COMPONENT_TEST_TRANSFORMS += "-e '/^-\{1,\} coverage: /,/^$$/d'" # remove coverage report # sort list of pytest unit tests and drop percentage @@ -469,19 +507,54 @@ COMPONENT_TEST_TRANSFORMS += \ "-e 's/^=\{1,\} \(.*\) in [0-9]\{1,\}\.[0-9]\{1,\}s \(([^)]*) \)\?=\{1,\}$$/======== \1 ========/'" # remove timing # Remove slowest durations report for projects that run pytest with --durations option -COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} slowest [0-9]\{1,\} durations =\{1,\}$$/,/^=/{/^=/!d}'" +COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} slowest [0-9 ]*durations =\{1,\}$$/,/^=/{/^=/!d}'" # Remove short test summary info for projects that run pytest with -r option COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} short test summary info =\{1,\}$$/,/^=/{/^=/!d}'" + +# Normalize test results produced by pytest-benchmark +COMPONENT_TEST_TRANSFORMS += \ + $(if $(filter library/python/pytest-benchmark-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),"| ( \ + $(GSED) -e '/^-\{1,\} benchmark/,/^=/{/^=/!d}' \ + ) | $(COMPONENT_TEST_TRANSFORMER) -e ''") + +# Normalize test results produced by pytest-xdist +COMPONENT_TEST_TRANSFORMS += \ + $(if $(filter library/python/pytest-xdist-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),"| ( \ + $(GSED) -u \ + -e '/^created: .* workers$$/d' \ + -e 's/^[0-9]\{1,\}\( workers \[[0-9]\{1,\} items\]\)$$/X\1/' \ + -e '/^scheduling tests via /q' ; \ + $(GSED) -u -e '/^$$/q' ; \ + $(GSED) -u -n -e '/^\[gw/p' -e '/^$$/Q' | ( $(GSED) \ + -e 's/^\[gw[0-9]\{1,\}\] \[...%\] //' \ + -e 's/ *$$//' \ + -e 's/\([^ ]\{1,\}\) \(.*\)$$/\2 \1/' \ + | $(SORT) | $(NAWK) '{print}END{if(NR>0)printf(\"\\\\n\")}' ; \ + ) ; \ + $(CAT) \ + ) | $(COMPONENT_TEST_TRANSFORMER) -e ''") + +# Normalize stestr test results +USE_STESTR = $(filter library/python/stestr-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)) +COMPONENT_TEST_TRANSFORMS += \ + $(if $(strip $(USE_STESTR)),"| ( \ + $(GSED) -e '0,/^{[0-9]\{1,\}}/{//i\'\$$'\\\n{0}\\\n}' \ + -e 's/^\(Ran: [0-9]\{1,\} tests\{0,1\}\) in .*\$$/\1/' \ + -e '/^Sum of execute time for each test/d' \ + -e '/^ - Worker /d' \ + ) | ( \ + $(GSED) -u -e '/^{0}\$$/Q' ; \ + $(GSED) -u -e 's/^{[0-9]\{1,\}} //' \ + -e 's/\[[.0-9]\{1,\}s\] \.\.\./.../' \ + -e '/^\$$/Q' | $(SORT) | $(GSED) -e '\$$a\'\$$'\\\n\\\n' ; \ + $(CAT) \ + ) | $(COMPONENT_TEST_TRANSFORMER) -e ''") # Normalize setup.py test results. The setup.py testing could be used either # directly or via tox so add these transforms for all test styles # unconditionally. COMPONENT_TEST_TRANSFORMS += "-e '/SetuptoolsDeprecationWarning:/,+1d'" # depends on Python version and is useless COMPONENT_TEST_TRANSFORMS += "-e 's/^\(Ran [0-9]\{1,\} tests\{0,1\}\) in .*$$/\1/'" # delete timing from test results -# Filter out message produced by setuptools-declarative-requirements. -# See also https://github.com/s0undt3ch/setuptools-declarative-requirements/issues/12 -COMPONENT_TEST_TRANSFORMS += \ - $(if $(filter library/python/setuptools-declarative-requirements-$(subst .,,$$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,"-e '/^No '\''requirements-files'\'' section was found\. Nothing to do\.\$$/d'") COMPONENT_TEST_DIR = $(@D)$(COMPONENT_SUBDIR:%=/%) @@ -523,6 +596,7 @@ COMPONENT_PRE_TEST_ACTION += \ for f in $(PROTOUSRBINDIR)/*-$(PYTHON_VERSION) ; do \ [ -f $$f ] || continue ; \ + [ -L $${f%%-$(PYTHON_VERSION)} ] && $(RM) $${f%%-$(PYTHON_VERSION)} ; \ [ -e $${f%%-$(PYTHON_VERSION)} ] && continue ; \ $(SYMLINK) $$(basename $$f) $${f%%-$(PYTHON_VERSION)} ; \ done ; @@ -575,7 +649,7 @@ COMPONENT_POST_INSTALL_ACTION += \ cd $(@D)$(COMPONENT_SUBDIR:%=/%) ; \ ( for f in $(TEST_REQUIREMENTS) ; do \ - $(CAT) $$f ; \ + $(CAT) $$f | $(DOS2UNIX) -ascii ; \ done ; \ for e in $(TEST_REQUIREMENTS_EXTRAS) ; do \ PYTHONPATH=$(PROTO_DIR)/$(PYTHON_DIR)/site-packages:$(PROTO_DIR)/$(PYTHON_LIB) \ @@ -586,10 +660,13 @@ | $(PYTHON) $(WS_TOOLS)/python-requires - >> $(@D)/.depend-test ; # Convert raw per version lists of test dependencies to single list of -# TEST_REQUIRED_PACKAGES entries +# TEST_REQUIRED_PACKAGES entries. Some Python projects lists their own project +# as a test dependency so filter this out here too. $(BUILD_DIR)/META.depend-test.required: $(INSTALL_$(MK_BITS)) $(CAT) $(INSTALL_$(MK_BITS):%.installed=%.depend-test) | $(SORT) -u \ - | $(GSED) -e 's/.*/TEST_REQUIRED_PACKAGES.python += library\/python\/&/' > $@ + | $(GSED) -e 's/.*/TEST_REQUIRED_PACKAGES.python += library\/python\/&/' \ + | ( $(GNU_GREP) -v ' $(COMPONENT_FMRI)$$' || true ) \ + > $@ # Add META.depend-test.required to the generated list of REQUIRED_PACKAGES REQUIRED_PACKAGES_TRANSFORM += -e '$$r $(BUILD_DIR)/META.depend-test.required' -- Gitblit v1.9.3