| | |
| | | PYTHON_ENV += CFLAGS="$(CFLAGS)" |
| | | PYTHON_ENV += CXX="$(CXX)" |
| | | PYTHON_ENV += CXXFLAGS="$(CXXFLAGS)" |
| | | PYTHON_ENV += LDFLAGS="$(LDFLAGS)" |
| | | PYTHON_ENV += PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)" |
| | | |
| | | COMPONENT_BUILD_ENV += $(PYTHON_ENV) |
| | | # We need to set GIT_DIR to workaround the nasty poetry bug: |
| | | # https://github.com/python-poetry/poetry/issues/5547. Technically, we should |
| | | # set this on per-project basis, but we would need to check every newly |
| | | # integrated project to see if it is build by poetry backend or not. We would |
| | | # also need to do similar check on every version bump, because any project |
| | | # could switch to poetry anytime. Since this would be a lot of work we simply |
| | | # opted to set GIT_DIR for every python project. |
| | | COMPONENT_BUILD_ENV += GIT_DIR=$(BUILD_DIR) |
| | | COMPONENT_INSTALL_ENV += $(PYTHON_ENV) |
| | | COMPONENT_TEST_ENV += $(PYTHON_ENV) |
| | | |
| | |
| | | # Rename binaries in /usr/bin to contain version number |
| | | COMPONENT_POST_INSTALL_ACTION += \ |
| | | for f in $(PROTOUSRBINDIR)/* ; do \ |
| | | [[ -f $$f ]] || continue ; \ |
| | | [ -f $$f ] || continue ; \ |
| | | for v in $(PYTHON_VERSIONS) ; do \ |
| | | [[ "$$f" == "$${f%%$$v}" ]] || continue 2 ; \ |
| | | [ "$$f" == "$${f%%$$v}" ] || continue 2 ; \ |
| | | done ; \ |
| | | $(MV) $$f $$f-$(PYTHON_VERSION) ; \ |
| | | done ; |
| | |
| | | COMPONENT_TEST_DEP += $(BUILD_DIR)/%/.installed |
| | | # Point Python to the proto area so it is able to find installed modules there |
| | | COMPONENT_TEST_ENV += PYTHONPATH=$(PROTO_DIR)/$(PYTHON_LIB) |
| | | # Make sure testing is able to find own installed executables (if any) |
| | | COMPONENT_TEST_ENV += PATH=$(PROTOUSRBINDIR):$(PATH) |
| | | |
| | | # determine the type of tests we want to run. |
| | | ifeq ($(strip $(wildcard $(COMPONENT_TEST_RESULTS_DIR)/results-*.master)),) |
| | |
| | | |
| | | TEST_STYLE ?= tox |
| | | ifeq ($(strip $(TEST_STYLE)),tox) |
| | | COMPONENT_TEST_ENV += PATH=$(PATH) # https://github.com/tox-dev/tox/issues/2538 |
| | | # tox needs PATH environment variable - see https://github.com/tox-dev/tox/issues/2538 |
| | | # We already added it to the test environment - see above |
| | | COMPONENT_TEST_ENV += PYTEST_ADDOPTS="$(PYTEST_ADDOPTS)" |
| | | COMPONENT_TEST_ENV += NOSE_VERBOSE=2 |
| | | COMPONENT_TEST_CMD = $(TOX) |
| | |
| | | COMPONENT_TEST_ARGS += $(TOX_TESTENV) |
| | | COMPONENT_TEST_TARGETS = |
| | | |
| | | TOX_TESTENV = -e py$(shell echo $(PYTHON_VERSION) | tr -d .) |
| | | TOX_TESTENV = -e py$(subst .,,$(PYTHON_VERSION)) |
| | | |
| | | # Make sure following tools are called indirectly to properly support tox-current-env |
| | | TOX_CALL_INDIRECTLY += py.test |
| | |
| | | COMPONENT_PRE_TEST_ACTION += true ; |
| | | |
| | | # Normalize tox test results. |
| | | COMPONENT_TEST_TRANSFORMS += "-e 's/py$(shell echo $(PYTHON_VERSION) | tr -d .)/py\$$(PYV)/g'" # normalize PYV |
| | | COMPONENT_TEST_TRANSFORMS += "-e 's/py$(subst .,,$(PYTHON_VERSION))/py\$$(PYV)/g'" # normalize PYV |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^py\$$(PYV) installed:/d'" # depends on set of installed packages |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/PYTHONHASHSEED/d'" # this is random |
| | | |
| | |
| | | # Remove timing for tox 4 test results |
| | | COMPONENT_TEST_TRANSFORMS += "-e 's/^\( py\$$(PYV): OK\) (.* seconds)$$/\1/'" |
| | | COMPONENT_TEST_TRANSFORMS += "-e 's/^\( congratulations :)\) (.* seconds)$$/\1/'" |
| | | |
| | | # Remove useless lines from the "coverage combine" output |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^Combined data file .*\.coverage/d'" |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^Skipping duplicate data .*\.coverage/d'" |
| | | |
| | | # sort list of Sphinx doctest results |
| | | COMPONENT_TEST_TRANSFORMS += \ |
| | |
| | | # Force pytest to not use colored output so the results normalization is unaffected |
| | | PYTEST_ADDOPTS += --color=no |
| | | |
| | | # |
| | | # Some pytest plugins are enabled automatically and could affect test results |
| | | # or test output. In a case a component does not expect such a plugin |
| | | # installed (it is neither in REQUIRED_PACKAGES nor in TEST_REQUIRED_PACKAGES) |
| | | # we simply disable the plugin to get consistent test results. |
| | | # |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-black-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:black) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-checkdocs-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:checkdocs) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-cov-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:cov) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/flaky-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:flaky) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-mypy-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:mypy) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-randomly-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:randomly) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-relaxed-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:relaxed) |
| | | PYTEST_ADDOPTS += $(if $(filter library/python/pytest-reporter-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),,-p no:reporter) |
| | | # Avoid loading of unexpected pytest plugins. |
| | | define disable-pytest-plugin |
| | | PYTEST_ADDOPTS += $$(if $$(filter library/python/$(2)-$$(subst .,,$$(PYTHON_VERSION)), $$(REQUIRED_PACKAGES) $$(TEST_REQUIRED_PACKAGES) $$(COMPONENT_FMRI)-$$(subst .,,$$(PYTHON_VERSION))),,-p 'no:$(1)') |
| | | endef |
| | | $(eval $(call disable-pytest-plugin,anyio,anyio)) |
| | | $(eval $(call disable-pytest-plugin,asyncio,pytest-asyncio)) # adds line to test report header |
| | | $(eval $(call disable-pytest-plugin,benchmark,pytest-benchmark)) # adds line to test report header; adds benchmark report |
| | | $(eval $(call disable-pytest-plugin,black,pytest-black)) # runs extra test(s) |
| | | $(eval $(call disable-pytest-plugin,check,pytest-check)) |
| | | $(eval $(call disable-pytest-plugin,checkdocs,pytest-checkdocs)) # runs extra test(s) |
| | | $(eval $(call disable-pytest-plugin,console-scripts,pytest-console-scripts)) |
| | | $(eval $(call disable-pytest-plugin,cov,pytest-cov)) |
| | | $(eval $(call disable-pytest-plugin,custom_exit_code,pytest-custom-exit-code)) |
| | | $(eval $(call disable-pytest-plugin,enabler,pytest-enabler)) |
| | | $(eval $(call disable-pytest-plugin,env,pytest-env)) |
| | | $(eval $(call disable-pytest-plugin,faker,faker)) |
| | | $(eval $(call disable-pytest-plugin,flake8,pytest-flake8)) |
| | | $(eval $(call disable-pytest-plugin,flaky,flaky)) |
| | | $(eval $(call disable-pytest-plugin,freezegun,pytest-freezegun)) |
| | | $(eval $(call disable-pytest-plugin,freezer,pytest-freezer)) |
| | | $(eval $(call disable-pytest-plugin,helpers_namespace,pytest-helpers-namespace)) |
| | | $(eval $(call disable-pytest-plugin,hypothesispytest,hypothesis)) # adds line to test report header |
| | | $(eval $(call disable-pytest-plugin,jaraco.test.http,jaraco-test)) |
| | | $(eval $(call disable-pytest-plugin,kgb,kgb)) |
| | | $(eval $(call disable-pytest-plugin,lazy-fixture,pytest-lazy-fixture)) |
| | | $(eval $(call disable-pytest-plugin,metadata,pytest-metadata)) # adds line to test report header |
| | | $(eval $(call disable-pytest-plugin,mypy,pytest-mypy)) # runs extra test(s) |
| | | $(eval $(call disable-pytest-plugin,perf,pytest-perf)) # https://github.com/jaraco/pytest-perf/issues/9 |
| | | $(eval $(call disable-pytest-plugin,pytest-datadir,pytest-datadir)) |
| | | $(eval $(call disable-pytest-plugin,pytest-mypy-plugins,pytest-mypy-plugins)) # could cause tests to fail |
| | | $(eval $(call disable-pytest-plugin,pytest-teamcity,teamcity-messages)) |
| | | $(eval $(call disable-pytest-plugin,pytest_expect,pytest-expect)) |
| | | $(eval $(call disable-pytest-plugin,pytest_fakefs,pyfakefs)) |
| | | $(eval $(call disable-pytest-plugin,pytest_forked,pytest-forked)) |
| | | $(eval $(call disable-pytest-plugin,pytest_httpserver,pytest-httpserver)) |
| | | $(eval $(call disable-pytest-plugin,pytest_ignore_flaky,pytest-ignore-flaky)) |
| | | $(eval $(call disable-pytest-plugin,pytest_mock,pytest-mock)) |
| | | $(eval $(call disable-pytest-plugin,randomly,pytest-randomly)) # reorders tests |
| | | $(eval $(call disable-pytest-plugin,regressions,pytest-regressions)) |
| | | $(eval $(call disable-pytest-plugin,relaxed,pytest-relaxed)) # runs extra test(s); produces different test report |
| | | $(eval $(call disable-pytest-plugin,reporter,pytest-reporter)) # https://github.com/christiansandberg/pytest-reporter/issues/8 |
| | | $(eval $(call disable-pytest-plugin,rerunfailures,pytest-rerunfailures)) |
| | | $(eval $(call disable-pytest-plugin,salt-factories,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-event-listener,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-factories,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-loader-mock,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-log-server,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-markers,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-sysinfo,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,salt-factories-sysstats,pytest-salt-factories)) # requires salt |
| | | $(eval $(call disable-pytest-plugin,shell-utilities,pytest-shell-utilities)) |
| | | $(eval $(call disable-pytest-plugin,skip-markers,pytest-skip-markers)) |
| | | $(eval $(call disable-pytest-plugin,socket,pytest-socket)) |
| | | $(eval $(call disable-pytest-plugin,subprocess,pytest-subprocess)) |
| | | $(eval $(call disable-pytest-plugin,subtests,pytest-subtests)) |
| | | $(eval $(call disable-pytest-plugin,tempdir,pytest-tempdir)) # adds line to test report header |
| | | $(eval $(call disable-pytest-plugin,time_machine,time-machine)) |
| | | $(eval $(call disable-pytest-plugin,timeout,pytest-timeout)) |
| | | $(eval $(call disable-pytest-plugin,travis-fold,pytest-travis-fold)) |
| | | $(eval $(call disable-pytest-plugin,typeguard,typeguard)) |
| | | $(eval $(call disable-pytest-plugin,unittest_mock,backports-unittest-mock)) |
| | | $(eval $(call disable-pytest-plugin,xdist,pytest-xdist)) |
| | | $(eval $(call disable-pytest-plugin,xdist.looponfail,pytest-xdist)) |
| | | $(eval $(call disable-pytest-plugin,xprocess,pytest-xprocess)) # adds a reminder line to test output |
| | | |
| | | # By default we are not interested in full list of test failures so exit on |
| | | # first failure to save time. This could be easily overridden from environment |
| | |
| | | COMPONENT_TEST_TRANSFORMS += \ |
| | | "-e 's/^\(platform sunos5 -- Python \)$(shell echo $(PYTHON_VERSION) | $(GSED) -e 's/\./\\./g')\.[0-9]\{1,\}.*\( -- .*\)/\1\$$(PYTHON_VERSION).X\2/'" |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^Using --randomly-seed=[0-9]\{1,\}$$/d'" # this is random |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^benchmark: /d'" # line with version details |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^plugins: /d'" # order of listed plugins could vary |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^-\{1,\} coverage: /,/^$$/d'" # remove coverage report |
| | | # sort list of pytest unit tests and drop percentage |
| | |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} slowest [0-9]\{1,\} durations =\{1,\}$$/,/^=/{/^=/!d}'" |
| | | # Remove short test summary info for projects that run pytest with -r option |
| | | COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} short test summary info =\{1,\}$$/,/^=/{/^=/!d}'" |
| | | |
| | | # Normalize test results produced by pytest-benchmark |
| | | COMPONENT_TEST_TRANSFORMS += \ |
| | | $(if $(filter library/python/pytest-benchmark-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),"| ( \ |
| | | $(GSED) -e '/^-\{1,\} benchmark/,/^=/{/^=/!d}' \ |
| | | ) | $(COMPONENT_TEST_TRANSFORMER) -e ''") |
| | | |
| | | # Normalize test results produced by pytest-xdist |
| | | COMPONENT_TEST_TRANSFORMS += \ |
| | | $(if $(filter library/python/pytest-xdist-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),"| ( \ |
| | | $(GSED) -u \ |
| | | -e '/^created: .* workers$$/d' \ |
| | | -e 's/^[0-9]\{1,\}\( workers \[[0-9]\{1,\} items\]\)$$/X\1/' \ |
| | | -e '/^scheduling tests via /q' ; \ |
| | | $(GSED) -u -e '/^$$/q' ; \ |
| | | $(GSED) -u -n -e '/^\[gw/p' -e '/^$$/Q' | ( $(GSED) \ |
| | | -e 's/^\[gw[0-9]\{1,\}\] \[...%\] //' \ |
| | | -e 's/ *$$//' \ |
| | | -e 's/\([^ ]\{1,\}\) \(.*\)$$/\2 \1/' \ |
| | | | $(SORT) | $(NAWK) '{print}END{if(NR>0)printf(\"\\\\n\")}' ; \ |
| | | ) ; \ |
| | | $(CAT) \ |
| | | ) | $(COMPONENT_TEST_TRANSFORMER) -e ''") |
| | | |
| | | # Normalize setup.py test results. The setup.py testing could be used either |
| | | # directly or via tox so add these transforms for all test styles |
| | |
| | | $(COMPONENT_TEST_CLEANUP) |
| | | $(TOUCH) $@ |
| | | |
| | | ifeq ($(strip $(SINGLE_PYTHON_VERSION)),no) |
| | | # Temporarily create symlinks for renamed binaries |
| | | COMPONENT_PRE_TEST_ACTION += \ |
| | | for f in $(PROTOUSRBINDIR)/*-$(PYTHON_VERSION) ; do \ |
| | | [ -f $$f ] || continue ; \ |
| | | [ -e $${f%%-$(PYTHON_VERSION)} ] && continue ; \ |
| | | $(SYMLINK) $$(basename $$f) $${f%%-$(PYTHON_VERSION)} ; \ |
| | | done ; |
| | | |
| | | # Cleanup of temporary symlinks |
| | | COMPONENT_POST_TEST_ACTION += \ |
| | | for f in $(PROTOUSRBINDIR)/*-$(PYTHON_VERSION) ; do \ |
| | | [ -f $$f ] || continue ; \ |
| | | [ ! -L $${f%%-$(PYTHON_VERSION)} ] || $(RM) $${f%%-$(PYTHON_VERSION)} ; \ |
| | | done ; |
| | | endif |
| | | |
| | | |
| | | ifeq ($(strip $(SINGLE_PYTHON_VERSION)),no) |
| | | # We need to add -$(PYV) to package fmri |
| | |
| | | # Generate raw lists of test dependencies per Python version |
| | | COMPONENT_POST_INSTALL_ACTION += \ |
| | | cd $(@D)$(COMPONENT_SUBDIR:%=/%) ; \ |
| | | for f in $(TEST_REQUIREMENTS) ; do \ |
| | | $(CAT) $$f ; \ |
| | | done | $(WS_TOOLS)/python-resolve-deps \ |
| | | ( for f in $(TEST_REQUIREMENTS) ; do \ |
| | | $(CAT) $$f | $(DOS2UNIX) -ascii ; \ |
| | | done ; \ |
| | | for e in $(TEST_REQUIREMENTS_EXTRAS) ; do \ |
| | | PYTHONPATH=$(PROTO_DIR)/$(PYTHON_DIR)/site-packages:$(PROTO_DIR)/$(PYTHON_LIB) \ |
| | | $(PYTHON) $(WS_TOOLS)/python-requires $(COMPONENT_NAME) $$e ; \ |
| | | done ) | $(WS_TOOLS)/python-resolve-deps \ |
| | | PYTHONPATH=$(PROTO_DIR)/$(PYTHON_DIR)/site-packages:$(PROTO_DIR)/$(PYTHON_LIB) \ |
| | | $(PYTHON) $(WS_TOOLS)/python-requires $(COMPONENT_NAME) \ |
| | | | $(PYTHON) $(WS_TOOLS)/python-requires - >> $(@D)/.depend-test ; |