From 4325c82a563491d25a1f48db6e125ec04b750554 Mon Sep 17 00:00:00 2001
From: Marcel Telka <marcel@telka.sk>
Date: Sat, 23 Mar 2024 06:40:53 +0100
Subject: [PATCH] setup.py.mk: do not fail on empty META.depend-test.required

---
 make-rules/setup.py.mk |  201 ++++++++++++++++++++++++++++++++++++++++++--------
 1 files changed, 169 insertions(+), 32 deletions(-)

diff --git a/make-rules/setup.py.mk b/make-rules/setup.py.mk
index decf0fe..028e955 100644
--- a/make-rules/setup.py.mk
+++ b/make-rules/setup.py.mk
@@ -150,17 +150,12 @@
 
 PYTHON_ENV =	CC="$(CC)"
 PYTHON_ENV +=	CFLAGS="$(CFLAGS)"
+PYTHON_ENV +=	CXX="$(CXX)"
+PYTHON_ENV +=	CXXFLAGS="$(CXXFLAGS)"
+PYTHON_ENV +=	LDFLAGS="$(LDFLAGS)"
 PYTHON_ENV +=	PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)"
 
 COMPONENT_BUILD_ENV += $(PYTHON_ENV)
-# We need to set GIT_DIR to workaround the nasty poetry bug:
-# https://github.com/python-poetry/poetry/issues/5547.  Technically, we should
-# set this on per-project basis, but we would need to check every newly
-# integrated project to see if it is build by poetry backend or not.  We would
-# also need to do similar check on every version bump, because any project
-# could switch to poetry anytime.  Since this would be a lot of work we simply
-# opted to set GIT_DIR for every python project.
-COMPONENT_BUILD_ENV += GIT_DIR=$(BUILD_DIR)
 COMPONENT_INSTALL_ENV += $(PYTHON_ENV)
 COMPONENT_TEST_ENV += $(PYTHON_ENV)
 
@@ -214,9 +209,9 @@
 # Rename binaries in /usr/bin to contain version number
 COMPONENT_POST_INSTALL_ACTION += \
 	for f in $(PROTOUSRBINDIR)/* ; do \
-		[[ -f $$f ]] || continue ; \
+		[ -f $$f ] || continue ; \
 		for v in $(PYTHON_VERSIONS) ; do \
-			[[ "$$f" == "$${f%%$$v}" ]] || continue 2 ; \
+			[ "$$f" == "$${f%%$$v}" ] || continue 2 ; \
 		done ; \
 		$(MV) $$f $$f-$(PYTHON_VERSION) ; \
 	done ;
@@ -239,12 +234,12 @@
 # See below for test style specific transforms.
 COMPONENT_TEST_TRANSFORMS += "-e 's|$(PYTHON_DIR)|\$$(PYTHON_DIR)|g'"
 
-# Make sure the test environment is prepared before we start tests
-COMPONENT_TEST_DEP +=	component-test-environment-prep
 # Testing depends on install target because we want to test installed modules
 COMPONENT_TEST_DEP +=	$(BUILD_DIR)/%/.installed
 # Point Python to the proto area so it is able to find installed modules there
 COMPONENT_TEST_ENV +=	PYTHONPATH=$(PROTO_DIR)/$(PYTHON_LIB)
+# Make sure testing is able to find own installed executables (if any)
+COMPONENT_TEST_ENV +=	PATH=$(PROTOUSRBINDIR):$(PATH)
 
 # determine the type of tests we want to run.
 ifeq ($(strip $(wildcard $(COMPONENT_TEST_RESULTS_DIR)/results-*.master)),)
@@ -302,16 +297,17 @@
 
 TEST_STYLE ?= tox
 ifeq ($(strip $(TEST_STYLE)),tox)
-COMPONENT_TEST_ENV +=		PATH=$(PATH)	# https://github.com/tox-dev/tox/issues/2538
+# tox needs PATH environment variable - see https://github.com/tox-dev/tox/issues/2538
+# We already added it to the test environment - see above
 COMPONENT_TEST_ENV +=		PYTEST_ADDOPTS="$(PYTEST_ADDOPTS)"
 COMPONENT_TEST_ENV +=		NOSE_VERBOSE=2
 COMPONENT_TEST_CMD =		$(TOX)
 COMPONENT_TEST_ARGS =		--current-env --no-provision
 COMPONENT_TEST_ARGS +=		--recreate
 COMPONENT_TEST_ARGS +=		$(TOX_TESTENV)
-COMPONENT_TEST_TARGETS =
+COMPONENT_TEST_TARGETS =	$(if $(strip $(TOX_POSARGS)),-- $(TOX_POSARGS))
 
-TOX_TESTENV = -e py$(shell echo $(PYTHON_VERSION) | tr -d .)
+TOX_TESTENV = -e py$(subst .,,$(PYTHON_VERSION))
 
 # Make sure following tools are called indirectly to properly support tox-current-env
 TOX_CALL_INDIRECTLY += py.test
@@ -335,7 +331,7 @@
 COMPONENT_PRE_TEST_ACTION += true ;
 
 # Normalize tox test results.
-COMPONENT_TEST_TRANSFORMS += "-e 's/py$(shell echo $(PYTHON_VERSION) | tr -d .)/py\$$(PYV)/g'"	# normalize PYV
+COMPONENT_TEST_TRANSFORMS += "-e 's/py$(subst .,,$(PYTHON_VERSION))/py\$$(PYV)/g'"	# normalize PYV
 COMPONENT_TEST_TRANSFORMS += "-e '/^py\$$(PYV) installed:/d'"		# depends on set of installed packages
 COMPONENT_TEST_TRANSFORMS += "-e '/PYTHONHASHSEED/d'"			# this is random
 
@@ -346,6 +342,10 @@
 # Remove timing for tox 4 test results
 COMPONENT_TEST_TRANSFORMS += "-e 's/^\(  py\$$(PYV): OK\) (.* seconds)$$/\1/'"
 COMPONENT_TEST_TRANSFORMS += "-e 's/^\(  congratulations :)\) (.* seconds)$$/\1/'"
+
+# Remove useless lines from the "coverage combine" output
+COMPONENT_TEST_TRANSFORMS += "-e '/^Combined data file .*\.coverage/d'"
+COMPONENT_TEST_TRANSFORMS += "-e '/^Skipping duplicate data .*\.coverage/d'"
 
 # sort list of Sphinx doctest results
 COMPONENT_TEST_TRANSFORMS += \
@@ -389,9 +389,6 @@
 COMPONENT_TEST_ARGS =		$(PYTEST_ADDOPTS)
 COMPONENT_TEST_TARGETS =
 
-# Force pytest to not use colored output so the results normalization is unaffected
-PYTEST_ADDOPTS += --color=no
-
 USERLAND_TEST_REQUIRED_PACKAGES += library/python/pytest
 else ifeq ($(strip $(TEST_STYLE)),unittest)
 COMPONENT_TEST_CMD =		$(PYTHON) -m unittest
@@ -410,13 +407,94 @@
 # Run pytest verbose to get separate line per test in results output
 PYTEST_ADDOPTS += --verbose
 
+# Force pytest to not use colored output so the results normalization is unaffected
+PYTEST_ADDOPTS += --color=no
+
+# Avoid loading of unexpected pytest plugins.
+define disable-pytest-plugin
+PYTEST_ADDOPTS += $$(if $$(filter library/python/$(2)-$$(subst .,,$$(PYTHON_VERSION)), $$(REQUIRED_PACKAGES) $$(TEST_REQUIRED_PACKAGES) $$(COMPONENT_FMRI)-$$(subst .,,$$(PYTHON_VERSION))),,-p 'no:$(1)')
+endef
+$(eval $(call disable-pytest-plugin,anyio,anyio))
+$(eval $(call disable-pytest-plugin,asyncio,pytest-asyncio))		# adds line to test report header
+$(eval $(call disable-pytest-plugin,benchmark,pytest-benchmark))	# adds line to test report header; adds benchmark report
+$(eval $(call disable-pytest-plugin,black,pytest-black))		# runs extra test(s)
+$(eval $(call disable-pytest-plugin,check,pytest-check))
+$(eval $(call disable-pytest-plugin,checkdocs,pytest-checkdocs))	# runs extra test(s)
+$(eval $(call disable-pytest-plugin,console-scripts,pytest-console-scripts))
+$(eval $(call disable-pytest-plugin,cov,pytest-cov))
+$(eval $(call disable-pytest-plugin,custom_exit_code,pytest-custom-exit-code))
+$(eval $(call disable-pytest-plugin,enabler,pytest-enabler))
+$(eval $(call disable-pytest-plugin,env,pytest-env))
+$(eval $(call disable-pytest-plugin,faker,faker))
+$(eval $(call disable-pytest-plugin,flake8,pytest-flake8))
+$(eval $(call disable-pytest-plugin,flaky,flaky))
+$(eval $(call disable-pytest-plugin,freezer,pytest-freezer))
+$(eval $(call disable-pytest-plugin,helpers_namespace,pytest-helpers-namespace))
+$(eval $(call disable-pytest-plugin,hypothesispytest,hypothesis))	# adds line to test report header
+$(eval $(call disable-pytest-plugin,jaraco.test.http,jaraco-test))
+$(eval $(call disable-pytest-plugin,kgb,kgb))
+$(eval $(call disable-pytest-plugin,metadata,pytest-metadata))		# adds line to test report header
+$(eval $(call disable-pytest-plugin,mypy,pytest-mypy))			# runs extra test(s)
+$(eval $(call disable-pytest-plugin,perf,pytest-perf))			# https://github.com/jaraco/pytest-perf/issues/9
+$(eval $(call disable-pytest-plugin,pytest home,pytest-home))
+$(eval $(call disable-pytest-plugin,pytest-datadir,pytest-datadir))
+$(eval $(call disable-pytest-plugin,pytest-mypy-plugins,pytest-mypy-plugins))	# could cause tests to fail
+$(eval $(call disable-pytest-plugin,pytest-teamcity,teamcity-messages))
+$(eval $(call disable-pytest-plugin,pytest_expect,pytest-expect))
+$(eval $(call disable-pytest-plugin,pytest_fakefs,pyfakefs))
+$(eval $(call disable-pytest-plugin,pytest_forked,pytest-forked))
+$(eval $(call disable-pytest-plugin,pytest_httpserver,pytest-httpserver))
+$(eval $(call disable-pytest-plugin,pytest_ignore_flaky,pytest-ignore-flaky))
+$(eval $(call disable-pytest-plugin,pytest_lazyfixture,pytest-lazy-fixtures))
+$(eval $(call disable-pytest-plugin,pytest_mock,pytest-mock))
+$(eval $(call disable-pytest-plugin,randomly,pytest-randomly))		# reorders tests
+$(eval $(call disable-pytest-plugin,regressions,pytest-regressions))
+$(eval $(call disable-pytest-plugin,relaxed,pytest-relaxed))		# runs extra test(s); produces different test report
+$(eval $(call disable-pytest-plugin,reporter,pytest-reporter))		# https://github.com/christiansandberg/pytest-reporter/issues/8
+$(eval $(call disable-pytest-plugin,rerunfailures,pytest-rerunfailures))
+$(eval $(call disable-pytest-plugin,salt-factories,pytest-salt-factories))			# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-event-listener,pytest-salt-factories))	# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-factories,pytest-salt-factories))		# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-loader-mock,pytest-salt-factories))		# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-log-server,pytest-salt-factories))		# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-markers,pytest-salt-factories))		# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-sysinfo,pytest-salt-factories))		# requires salt
+$(eval $(call disable-pytest-plugin,salt-factories-sysstats,pytest-salt-factories))		# requires salt
+$(eval $(call disable-pytest-plugin,shell-utilities,pytest-shell-utilities))
+$(eval $(call disable-pytest-plugin,skip-markers,pytest-skip-markers))
+$(eval $(call disable-pytest-plugin,socket,pytest-socket))
+$(eval $(call disable-pytest-plugin,subprocess,pytest-subprocess))
+$(eval $(call disable-pytest-plugin,subtests,pytest-subtests))
+$(eval $(call disable-pytest-plugin,tempdir,pytest-tempdir))		# adds line to test report header
+$(eval $(call disable-pytest-plugin,time_machine,time-machine))
+$(eval $(call disable-pytest-plugin,timeout,pytest-timeout))
+$(eval $(call disable-pytest-plugin,travis-fold,pytest-travis-fold))
+$(eval $(call disable-pytest-plugin,typeguard,typeguard))
+$(eval $(call disable-pytest-plugin,unittest_mock,backports-unittest-mock))
+$(eval $(call disable-pytest-plugin,xdist,pytest-xdist))
+$(eval $(call disable-pytest-plugin,xdist.looponfail,pytest-xdist))
+$(eval $(call disable-pytest-plugin,xprocess,pytest-xprocess))		# adds a reminder line to test output
+
+# By default we are not interested in full list of test failures so exit on
+# first failure to save time.  This could be easily overridden from environment
+# if needed (for example to debug test failures) or in per-component Makefile.
+PYTEST_FASTFAIL = -x
+PYTEST_ADDOPTS += $(PYTEST_FASTFAIL)
+
+# By default we are not interested to see the default long tracebacks.
+# Detailed tracebacks are shown either for failures or xfails.  We aim to see
+# testing passed so there should be no failures.  Since xfails are expected
+# failures we are not interested in detailed tracebacks here at all since they
+# could contain random data, like pointers, temporary file names, etc.
+PYTEST_TRACEBACK = --tb=line
+PYTEST_ADDOPTS += $(PYTEST_TRACEBACK)
+
 # Normalize pytest test results.  The pytest framework could be used either
 # directly or via tox or setup.py so add these transforms for all test styles
 # unconditionally.
 COMPONENT_TEST_TRANSFORMS += \
 	"-e 's/^\(platform sunos5 -- Python \)$(shell echo $(PYTHON_VERSION) | $(GSED) -e 's/\./\\./g')\.[0-9]\{1,\}.*\( -- .*\)/\1\$$(PYTHON_VERSION).X\2/'"
 COMPONENT_TEST_TRANSFORMS += "-e '/^Using --randomly-seed=[0-9]\{1,\}$$/d'"	# this is random
-COMPONENT_TEST_TRANSFORMS += "-e '/^benchmark: /d'"				# line with version details
 COMPONENT_TEST_TRANSFORMS += "-e '/^plugins: /d'"				# order of listed plugins could vary
 COMPONENT_TEST_TRANSFORMS += "-e '/^-\{1,\} coverage: /,/^$$/d'"		# remove coverage report
 # sort list of pytest unit tests and drop percentage
@@ -430,9 +508,48 @@
 COMPONENT_TEST_TRANSFORMS += \
 	"-e 's/^=\{1,\} \(.*\) in [0-9]\{1,\}\.[0-9]\{1,\}s \(([^)]*) \)\?=\{1,\}$$/======== \1 ========/'"	# remove timing
 # Remove slowest durations report for projects that run pytest with --durations option
-COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} slowest [0-9]\{1,\} durations =\{1,\}$$/,/^=/{/^=/!d}'"
+COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} slowest [0-9 ]*durations =\{1,\}$$/,/^=/{/^=/!d}'"
 # Remove short test summary info for projects that run pytest with -r option
 COMPONENT_TEST_TRANSFORMS += "-e '/^=\{1,\} short test summary info =\{1,\}$$/,/^=/{/^=/!d}'"
+
+# Normalize test results produced by pytest-benchmark
+COMPONENT_TEST_TRANSFORMS += \
+	$(if $(filter library/python/pytest-benchmark-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),"| ( \
+		$(GSED) -e '/^-\{1,\} benchmark/,/^=/{/^=/!d}' \
+	) | $(COMPONENT_TEST_TRANSFORMER) -e ''")
+
+# Normalize test results produced by pytest-xdist
+COMPONENT_TEST_TRANSFORMS += \
+	$(if $(filter library/python/pytest-xdist-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES)),"| ( \
+		$(GSED) -u \
+			-e '/^created: .* workers$$/d' \
+			-e 's/^[0-9]\{1,\}\( workers \[[0-9]\{1,\} items\]\)$$/X\1/' \
+			-e '/^scheduling tests via /q' ; \
+		$(GSED) -u -e '/^$$/q' ; \
+		$(GSED) -u -n -e '/^\[gw/p' -e '/^$$/Q' | ( $(GSED) \
+			-e 's/^\[gw[0-9]\{1,\}\] \[...%\] //' \
+			-e 's/ *$$//' \
+			-e 's/\([^ ]\{1,\}\) \(.*\)$$/\2 \1/' \
+			| $(SORT) | $(NAWK) '{print}END{if(NR>0)printf(\"\\\\n\")}' ; \
+		) ; \
+		$(CAT) \
+	) | $(COMPONENT_TEST_TRANSFORMER) -e ''")
+
+# Normalize stestr test results
+USE_STESTR = $(filter library/python/stestr-$(subst .,,$(PYTHON_VERSION)), $(REQUIRED_PACKAGES) $(TEST_REQUIRED_PACKAGES))
+COMPONENT_TEST_TRANSFORMS += \
+	$(if $(strip $(USE_STESTR)),"| ( \
+			$(GSED) -e '0,/^{[0-9]\{1,\}}/{//i\'\$$'\\\n{0}\\\n}' \
+				-e 's/^\(Ran: [0-9]\{1,\} tests\{0,1\}\) in .*\$$/\1/' \
+				-e '/^Sum of execute time for each test/d' \
+				-e '/^ - Worker /d' \
+		) | ( \
+			$(GSED) -u -e '/^{0}\$$/Q' ; \
+			$(GSED) -u -e 's/^{[0-9]\{1,\}} //' \
+				-e 's/\[[.0-9]\{1,\}s\] \.\.\./.../' \
+				-e '/^\$$/Q' | $(SORT) | $(GSED) -e '\$$a\'\$$'\\\n\\\n' ; \
+			$(CAT) \
+		) | $(COMPONENT_TEST_TRANSFORMER) -e ''")
 
 # Normalize setup.py test results.  The setup.py testing could be used either
 # directly or via tox so add these transforms for all test styles
@@ -475,6 +592,24 @@
 	$(COMPONENT_TEST_CLEANUP)
 	$(TOUCH) $@
 
+ifeq ($(strip $(SINGLE_PYTHON_VERSION)),no)
+# Temporarily create symlinks for renamed binaries
+COMPONENT_PRE_TEST_ACTION += \
+	for f in $(PROTOUSRBINDIR)/*-$(PYTHON_VERSION) ; do \
+		[ -f $$f ] || continue ; \
+		[ -L $${f%%-$(PYTHON_VERSION)} ] && $(RM) $${f%%-$(PYTHON_VERSION)} ; \
+		[ -e $${f%%-$(PYTHON_VERSION)} ] && continue ; \
+		$(SYMLINK) $$(basename $$f) $${f%%-$(PYTHON_VERSION)} ; \
+	done ;
+
+# Cleanup of temporary symlinks
+COMPONENT_POST_TEST_ACTION += \
+	for f in $(PROTOUSRBINDIR)/*-$(PYTHON_VERSION) ; do \
+		[ -f $$f ] || continue ; \
+		[ ! -L $${f%%-$(PYTHON_VERSION)} ] || $(RM) $${f%%-$(PYTHON_VERSION)} ; \
+	done ;
+endif
+
 
 ifeq ($(strip $(SINGLE_PYTHON_VERSION)),no)
 # We need to add -$(PYV) to package fmri
@@ -514,32 +649,34 @@
 # Generate raw lists of test dependencies per Python version
 COMPONENT_POST_INSTALL_ACTION += \
 	cd $(@D)$(COMPONENT_SUBDIR:%=/%) ; \
-	for f in $(TEST_REQUIREMENTS) ; do \
-		$(CAT) $$f ; \
-	done | $(WS_TOOLS)/python-resolve-deps \
+	( for f in $(TEST_REQUIREMENTS) ; do \
+		$(CAT) $$f | $(DOS2UNIX) -ascii ; \
+	done ; \
+	for e in $(TEST_REQUIREMENTS_EXTRAS) ; do \
+		PYTHONPATH=$(PROTO_DIR)/$(PYTHON_DIR)/site-packages:$(PROTO_DIR)/$(PYTHON_LIB) \
+			$(PYTHON) $(WS_TOOLS)/python-requires $(COMPONENT_NAME) $$e ; \
+	done ) | $(WS_TOOLS)/python-resolve-deps \
 		PYTHONPATH=$(PROTO_DIR)/$(PYTHON_DIR)/site-packages:$(PROTO_DIR)/$(PYTHON_LIB) \
 		$(PYTHON) $(WS_TOOLS)/python-requires $(COMPONENT_NAME) \
 	| $(PYTHON) $(WS_TOOLS)/python-requires - >> $(@D)/.depend-test ;
 
 # Convert raw per version lists of test dependencies to single list of
-# TEST_REQUIRED_PACKAGES entries
+# TEST_REQUIRED_PACKAGES entries.  Some Python projects lists their own project
+# as a test dependency so filter this out here too.
 $(BUILD_DIR)/META.depend-test.required:	$(INSTALL_$(MK_BITS))
 	$(CAT) $(INSTALL_$(MK_BITS):%.installed=%.depend-test) | $(SORT) -u \
-		| $(GSED) -e 's/.*/TEST_REQUIRED_PACKAGES.python += library\/python\/&/' > $@
+		| $(GSED) -e 's/.*/TEST_REQUIRED_PACKAGES.python += library\/python\/&/' \
+		| ( $(GNU_GREP) -v ' $(COMPONENT_FMRI)$$' || true ) \
+		> $@
 
 # Add META.depend-test.required to the generated list of REQUIRED_PACKAGES
 REQUIRED_PACKAGES_TRANSFORM += -e '$$r $(BUILD_DIR)/META.depend-test.required'
-
-# The python-requires script requires importlib_metadata for Python 3.7 to
-# provide useful output.  Since we do fake bootstrap for Python 3.7 we require
-# the package here unconditionally.
-USERLAND_REQUIRED_PACKAGES += library/python/importlib-metadata-37
 
 # The python-requires script requires packaging to provide useful output but
 # packaging might be unavailable during bootstrap until we reach bootstrap
 # checkpoint 2.  So require it conditionally.
 ifeq ($(filter $(strip $(COMPONENT_NAME)),$(PYTHON_BOOTSTRAP_CHECKPOINT_2)),)
-PYTHON_USERLAND_REQUIRED_PACKAGES += library/python/packaging
+USERLAND_REQUIRED_PACKAGES.python += library/python/packaging
 endif
 
 

--
Gitblit v1.9.3