Marcel Telka
2023-10-16 5ea30ad8664e94f0cfeee9ae3c3d4b2fb86ec263
gfx-drm: fix build

5 files added
2 files modified
745 ■■■■■ changed files
components/openindiana/gfx-drm/Makefile 39 ●●●● patch | view | raw | blame | history
components/openindiana/gfx-drm/packages.ignore.in patch | view | raw | blame | history
components/openindiana/gfx-drm/patches/0001-userland-fetch-should-be-PEP8-compliant.patch 468 ●●●●● patch | view | raw | blame | history
components/openindiana/gfx-drm/patches/0002-Convert-oi-userland-tools-to-Python-3.5.patch 225 ●●●●● patch | view | raw | blame | history
components/openindiana/gfx-drm/pkg5 1 ●●●● patch | view | raw | blame | history
components/openindiana/gfx-drm/pkg5.complete.fmris 6 ●●●●● patch | view | raw | blame | history
components/openindiana/gfx-drm/pkg5.fmris 6 ●●●●● patch | view | raw | blame | history
components/openindiana/gfx-drm/Makefile
@@ -34,6 +34,9 @@
CLEAN_PATHS += $(BUILD_DIR)
CLOBBER_PATHS += $(SOURCE_DIR)
COMPONENT_PREP_GIT=no
include $(WS_MAKE_RULES)/prep.mk
$(SOURCE_DIR)/.downloaded: 
    @[ -d $(SOURCE_DIR) ] || \
    $(GIT) clone -b $(GIT_BRANCH) $(GIT_REPO) $(SOURCE_DIR)
@@ -48,19 +51,6 @@
      $(GIT) log -1 --format=%H > .downloaded
download:: $(SOURCE_DIR)/.downloaded
PATCH_DIR?= patches
PATCH_PATTERN?= *.patch
PATCHES= # please don't patch -- update gfx-drm instead
$(SOURCE_DIR)/.patched:    $(SOURCE_DIR)/.downloaded $(PATCHES)
    $(MKDIR) $(@D)
    cd $(SOURCE_DIR) && \
        $(GIT) checkout -f && \
              $(GIT) clean -f
    $(TOUCH) $@
prep::    $(SOURCE_DIR)/.patched
$(BUILD_DIR)/$(MACH)/.built: $(SOURCE_DIR)/.patched
    $(MKDIR) $(@D)
@@ -135,12 +125,23 @@
clobber::       clean
    $(RM) -r $(CLOBBER_PATHS)
PACKAGE_NAMES  = driver/graphics/agpgart
PACKAGE_NAMES += driver/graphics/drm
PACKAGE_NAMES += system/header/header-agp
PACKAGE_NAMES += system/header/header-drm
PACKAGE_NAMES += system/test/libdrm
PACKAGE_NAMES += x11/library/libdrm
#
# Ensure that when printing packages delivered by gfx-drm, we ignore the
# packages that will be dropped when publishing to the userland repository.
#
# Note that the "pkg5.complete.fmris" cache file should contain all packages
# delivered by the gate, so that "packages.ignore.in" can be modified to apply
# a new filter without rebuilding the gate or the cached list.
#
print-package-names:
    if test -d $(GFX_DRM_REPO); then \
        pkgrepo list -H -F tsv -s $(GFX_DRM_REPO) \
            | cut -f 2 \
            | LANG=C LC_ALL=C sort -u \
            > pkg5.complete.fmris; \
    fi
    grep -v -x -F -f packages.ignore.in pkg5.complete.fmris > pkg5.fmris
    cat pkg5.fmris
REQUIRED_PACKAGES += developer/build/onbld
REQUIRED_PACKAGES += diagnostic/scanpci
components/openindiana/gfx-drm/packages.ignore.in
components/openindiana/gfx-drm/patches/0001-userland-fetch-should-be-PEP8-compliant.patch
New file
@@ -0,0 +1,468 @@
From 0360486a6e330a7cfef86c6db8ffa01a98deda0a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Adam=20=C5=A0tevko?= <adam.stevko@gmail.com>
Date: Thu, 20 Oct 2016 22:36:17 +0200
Subject: [PATCH 1/2] userland-fetch should be PEP8 compliant
---
 usr/src/tools/userland-fetch | 426 ++++++++++++++++++-----------------
 1 file changed, 216 insertions(+), 210 deletions(-)
diff --git a/usr/src/tools/userland-fetch b/usr/src/tools/userland-fetch
index 47cc672..ac93d10 100755
--- a/usr/src/tools/userland-fetch
+++ b/usr/src/tools/userland-fetch
@@ -36,238 +36,244 @@ from urllib2 import urlopen
 from urllib2 import Request
 import hashlib
+
 def printIOError(e, txt):
-    """ Function to decode and print IOError type exception """
-    print "I/O Error: " + txt + ": "
-    try:
-        (code, message) = e
-        print str(message) + " (" + str(code) + ")"
-    except:
-        print str(e)
-
+    """ Function to decode and print IOError type exception """
+    print "I/O Error: " + txt + ": "
+    try:
+        (code, message) = e
+        print str(message) + " (" + str(code) + ")"
+    except:
+        print str(e)
+
+
 def validate(file, hash):
-    algorithm, hashvalue = hash.split(':')
+    algorithm, hashvalue = hash.split(':')
+
+    # force migration away from sha1
+    if algorithm == "sha1":
+        algorithm = "sha256"
+    try:
+        m = hashlib.new(algorithm)
+    except ValueError:
+        return False
-    # force migration away from sha1
-    if algorithm == "sha1":
-        algorithm = "sha256"
-    try:
-        m = hashlib.new(algorithm)
-    except ValueError:
-        return False
+    while True:
+        try:
+            block = file.read()
+        except IOError, err:
+            print str(err),
+            break
-    while True:
-        try:
-            block = file.read()
-        except IOError, err:
-            print str(err),
-            break
+        m.update(block)
+        if block == '':
+            break
-        m.update(block)
-        if block == '':
-            break
+    return "%s:%s" % (algorithm, m.hexdigest())
-    return "%s:%s" % (algorithm, m.hexdigest())
 def validate_container(filename, hash):
-    try:
-        file = open(filename, 'r')
-    except IOError as e:
-        printIOError(e, "Can't open file " + filename)
-        return False
-    return validate(file, hash)
+    try:
+        file = open(filename, 'r')
+    except IOError as e:
+        printIOError(e, "Can't open file " + filename)
+        return False
+    return validate(file, hash)
 def validate_payload(filename, hash):
-    import re
-    import gzip
-    import bz2
-
-    expr_bz = re.compile('.+\.bz2$', re.IGNORECASE)
-    expr_gz = re.compile('.+\.gz$', re.IGNORECASE)
-    expr_tgz = re.compile('.+\.tgz$', re.IGNORECASE)
-
-    try:
-        if expr_bz.match(filename):
-            file = bz2.BZ2File(filename, 'r')
-        elif expr_gz.match(filename):
-            file = gzip.GzipFile(filename, 'r')
-        elif expr_tgz.match(filename):
-            file = gzip.GzipFile(filename, 'r')
-        else:
-            return False
-    except IOError as e:
-        printIOError(e, "Can't open archive " + filename)
-        return False
-    return validate(file, hash)
-
-
-def download(url, filename = None, user_agent_arg = None):
-    src = None
-
-    try:
-        req = Request(url)
-        if user_agent_arg != None:
-            req.add_header("User-Agent", user_agent_arg)
-        src = urlopen(req)
-    except IOError as e:
-        printIOError(e, "Can't open url " + url)
-        return None
-
-    # 3xx, 4xx and 5xx (f|ht)tp codes designate unsuccessfull action
-    if src.getcode() and (3 <= int(src.getcode()/100) <= 5):
-        print "Error code: " + str(src.getcode())
-        return None
-
-    if filename == None:
-        filename = src.geturl().split('/')[-1]
-
-    try:
-        dst = open(filename, 'wb');
-    except IOError as e:
-        printIOError(e, "Can't open file " + filename + " for writing")
-        src.close()
-        return None
-
-    while True:
-        block = src.read()
-        if block == '':
-            break;
-        dst.write(block)
-
-    src.close()
-    dst.close()
-
-    # return the name of the file that we downloaded the data to.
-    return filename
+    import re
+    import gzip
+    import bz2
+
+    expr_bz = re.compile('.+\.bz2$', re.IGNORECASE)
+    expr_gz = re.compile('.+\.gz$', re.IGNORECASE)
+    expr_tgz = re.compile('.+\.tgz$', re.IGNORECASE)
+
+    try:
+        if expr_bz.match(filename):
+            file = bz2.BZ2File(filename, 'r')
+        elif expr_gz.match(filename):
+            file = gzip.GzipFile(filename, 'r')
+        elif expr_tgz.match(filename):
+            file = gzip.GzipFile(filename, 'r')
+        else:
+            return False
+    except IOError as e:
+        printIOError(e, "Can't open archive " + filename)
+        return False
+    return validate(file, hash)
+
+
+def download(url, filename=None, user_agent_arg=None):
+    src = None
+
+    try:
+        req = Request(url)
+        if user_agent_arg != None:
+            req.add_header("User-Agent", user_agent_arg)
+        src = urlopen(req)
+    except IOError as e:
+        printIOError(e, "Can't open url " + url)
+        return None
+
+    # 3xx, 4xx and 5xx (f|ht)tp codes designate unsuccessfull action
+    if src.getcode() and (3 <= int(src.getcode() / 100) <= 5):
+        print "Error code: " + str(src.getcode())
+        return None
+
+    if filename == None:
+        filename = src.geturl().split('/')[-1]
+
+    try:
+        dst = open(filename, 'wb')
+    except IOError as e:
+        printIOError(e, "Can't open file " + filename + " for writing")
+        src.close()
+        return None
+
+    while True:
+        block = src.read()
+        if block == '':
+            break
+        dst.write(block)
+
+    src.close()
+    dst.close()
+
+    # return the name of the file that we downloaded the data to.
+    return filename
+
 def download_paths(search, filename, url):
-    urls = list()
+    urls = list()
+
+    if filename != None:
+        tmp = os.getenv('DOWNLOAD_SEARCH_PATH')
+        if tmp:
+            search += tmp.split(' ')
-    if filename != None:
-        tmp = os.getenv('DOWNLOAD_SEARCH_PATH')
-        if tmp:
-            search += tmp.split(' ')
+        file = os.path.basename(filename)
-        file = os.path.basename(filename)
+        urls = [base + '/' + file for base in search]
-        urls = [ base + '/' + file for base in search ]
+        # filename should always be first
+        if filename in urls:
+            urls.remove(filename)
+        urls.insert(0, filename)
-        # filename should always be first
-        if filename in urls:
-            urls.remove(filename)
-        urls.insert(0, filename)
+    # command line url is a fallback, so it's last
+    if url != None and url not in urls:
+        urls.append(url)
-    # command line url is a fallback, so it's last
-    if url != None and url not in urls:
-        urls.append(url)
+    return urls
-    return urls
 def usage():
-    print "Usage: %s [-a|--user-agent (user-agent)] [-f|--file (file)] " \
-        "[-l|--link] [-h|--hash (hash)] [-s|--search (search-dir)] " \
-        "-u|--url (url)" % (sys.argv[0].split('/')[-1])
-    sys.exit(1)
+    print "Usage: %s [-a|--user-agent (user-agent)] [-f|--file (file)] " \
+        "[-l|--link] [-h|--hash (hash)] [-s|--search (search-dir)] " \
+        "-u|--url (url)" % (sys.argv[0].split('/')[-1])
+    sys.exit(1)
+
 def main():
-    import getopt
-
-    # FLUSH STDOUT
-    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
-
-    user_agent_arg = None
-    file_arg = None
-    link_arg = False
-    keep_arg = False
-    hash_arg = None
-    url_arg = None
-    search_list = list()
-
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "a:f:h:lks:u:",
-            ["file=", "link", "keep", "hash=", "search=", "url=",
-            "user-agent="])
-    except getopt.GetoptError, err:
-        print str(err)
-        usage()
-
-    for opt, arg in opts:
-        if opt in [ "-a", "--user-agent" ]:
-            user_agent_arg = arg
-        elif opt in [ "-f", "--file" ]:
-            file_arg = arg
-        elif opt in [ "-l", "--link" ]:
-            link_arg = True
-        elif opt in [ "-k", "--keep" ]:
-            keep_arg = True
-        elif opt in [ "-h", "--hash" ]:
-            hash_arg = arg
-        elif opt in [ "-s", "--search" ]:
-            search_list.append(arg)
-        elif opt in [ "-u", "--url" ]:
-            url_arg = arg
-        else:
-            assert False, "unknown option"
-
-    if url_arg == None:
-        usage()
-
-    for url in download_paths(search_list, file_arg, url_arg):
-        print "Source %s..." % url,
-
-        scheme, path = splittype(url)
-        name = file_arg
-
-        if scheme in [ None, 'file' ]:
-            if os.path.exists(path) == False:
-                print "not found, skipping file copy"
-                continue
-            elif name != path:
-                if link_arg == False:
-                    print "\n    copying..."
-                    shutil.copy2(path, name)
-                else:
-                    print "\n    linking..."
-                    os.symlink(path, name)
-            else:
-                pass
-        elif scheme in [ 'http', 'https', 'ftp' ]:
-            print "\n    downloading...",
-            name = download(url, file_arg, user_agent_arg)
-            if name == None:
-                print "failed"
-                continue
-
-        print "\n    validating...",
-        if hash_arg == None:
-            print "skipping (no hash)"
-            sys.exit(0)
-
-        realhash = validate_container(name, hash_arg)
-        if realhash == hash_arg:
-            print "ok"
-            sys.exit(0)
-        else:
-            payloadhash = validate_payload(name, hash_arg)
-            if payloadhash == hash_arg:
-                print "ok"
-                sys.exit(0)
-            print "corruption detected"
-            print "    expected: %s" % hash_arg
-            print "    actual:   %s" % realhash
-            print "    payload:  %s" % payloadhash
-
-        if keep_arg == False:
-            try:
-                print "\nWARN: Removing the corrupt downloaded file"
-                os.remove(name)
-            except OSError:
-                pass
-        else:
-            print "\nINFO: Keeping the downloaded file because asked to"
-
-    sys.exit(1)
+    import getopt
+
+    # FLUSH STDOUT
+    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
+
+    user_agent_arg = None
+    file_arg = None
+    link_arg = False
+    keep_arg = False
+    hash_arg = None
+    url_arg = None
+    search_list = list()
+
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "a:f:h:lks:u:",
+                                   ["file=", "link", "keep", "hash=", "search=", "url=",
+                                    "user-agent="])
+    except getopt.GetoptError, err:
+        print str(err)
+        usage()
+
+    for opt, arg in opts:
+        if opt in ["-a", "--user-agent"]:
+            user_agent_arg = arg
+        elif opt in ["-f", "--file"]:
+            file_arg = arg
+        elif opt in ["-l", "--link"]:
+            link_arg = True
+        elif opt in ["-k", "--keep"]:
+            keep_arg = True
+        elif opt in ["-h", "--hash"]:
+            hash_arg = arg
+        elif opt in ["-s", "--search"]:
+            search_list.append(arg)
+        elif opt in ["-u", "--url"]:
+            url_arg = arg
+        else:
+            assert False, "unknown option"
+
+    if url_arg == None:
+        usage()
+
+    for url in download_paths(search_list, file_arg, url_arg):
+        print "Source %s..." % url,
+
+        scheme, path = splittype(url)
+        name = file_arg
+
+        if scheme in [None, 'file']:
+            if os.path.exists(path) == False:
+                print "not found, skipping file copy"
+                continue
+            elif name != path:
+                if link_arg == False:
+                    print "\n    copying..."
+                    shutil.copy2(path, name)
+                else:
+                    print "\n    linking..."
+                    os.symlink(path, name)
+            else:
+                pass
+        elif scheme in ['http', 'https', 'ftp']:
+            print "\n    downloading...",
+            name = download(url, file_arg, user_agent_arg)
+            if name == None:
+                print "failed"
+                continue
+
+        print "\n    validating...",
+        if hash_arg == None:
+            print "skipping (no hash)"
+            sys.exit(0)
+
+        realhash = validate_container(name, hash_arg)
+        if realhash == hash_arg:
+            print "ok"
+            sys.exit(0)
+        else:
+            payloadhash = validate_payload(name, hash_arg)
+            if payloadhash == hash_arg:
+                print "ok"
+                sys.exit(0)
+            print "corruption detected"
+            print "    expected: %s" % hash_arg
+            print "    actual:   %s" % realhash
+            print "    payload:  %s" % payloadhash
+
+        if keep_arg == False:
+            try:
+                print "\nWARN: Removing the corrupt downloaded file"
+                os.remove(name)
+            except OSError:
+                pass
+        else:
+            print "\nINFO: Keeping the downloaded file because asked to"
+
+    sys.exit(1)
 if __name__ == "__main__":
-    main()
+    main()
components/openindiana/gfx-drm/patches/0002-Convert-oi-userland-tools-to-Python-3.5.patch
New file
@@ -0,0 +1,225 @@
From 22bd08d19c456bb0b359bbefb6f5f638cc77865d Mon Sep 17 00:00:00 2001
From: Alexander Pyhalov <apyhalov@gmail.com>
Date: Thu, 8 Aug 2019 09:13:24 +0300
Subject: [PATCH 2/2] Convert oi-userland tools to Python 3.5
---
 usr/src/tools/userland-fetch | 90 ++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 49 deletions(-)
diff --git a/usr/src/tools/userland-fetch b/usr/src/tools/userland-fetch
index ac93d10..d89391b 100755
--- a/usr/src/tools/userland-fetch
+++ b/usr/src/tools/userland-fetch
@@ -22,7 +22,7 @@
 # Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
 #
 #
-# fetch.py - a file download utility
+# userland-fetch - a file download utility
 #
 #  A simple program similiar to wget(1), but handles local file copy, ignores
 #  directories, and verifies file hashes.
@@ -31,20 +31,20 @@
 import os
 import sys
 import shutil
-from urllib import splittype
-from urllib2 import urlopen
-from urllib2 import Request
+from urllib.parse import splittype
+from urllib.request import urlopen
+from urllib.request import Request
 import hashlib
 def printIOError(e, txt):
     """ Function to decode and print IOError type exception """
-    print "I/O Error: " + txt + ": "
+    print("I/O Error: " + txt + ": ")
     try:
         (code, message) = e
-        print str(message) + " (" + str(code) + ")"
+        print(str(message) + " (" + str(code) + ")")
     except:
-        print str(e)
+        print(str(e))
 def validate(file, hash):
@@ -58,23 +58,20 @@ def validate(file, hash):
     except ValueError:
         return False
-    while True:
-        try:
-            block = file.read()
-        except IOError, err:
-            print str(err),
-            break
-
+    try:
+        block = file.read()
         m.update(block)
-        if block == '':
-            break
+        return "%s:%s" % (algorithm, m.hexdigest())
+    except IOError as err:
+        print(str(err), end=' ')
+
+    return "%s:" % (algorithm)
-    return "%s:%s" % (algorithm, m.hexdigest())
 def validate_container(filename, hash):
     try:
-        file = open(filename, 'r')
+        file = open(filename, 'rb')
     except IOError as e:
         printIOError(e, "Can't open file " + filename)
         return False
@@ -92,11 +89,11 @@ def validate_payload(filename, hash):
     try:
         if expr_bz.match(filename):
-            file = bz2.BZ2File(filename, 'r')
+            file = bz2.BZ2File(filename, 'rb')
         elif expr_gz.match(filename):
-            file = gzip.GzipFile(filename, 'r')
+            file = gzip.GzipFile(filename, 'rb')
         elif expr_tgz.match(filename):
-            file = gzip.GzipFile(filename, 'r')
+            file = gzip.GzipFile(filename, 'rb')
         else:
             return False
     except IOError as e:
@@ -119,7 +116,7 @@ def download(url, filename=None, user_agent_arg=None):
     # 3xx, 4xx and 5xx (f|ht)tp codes designate unsuccessfull action
     if src.getcode() and (3 <= int(src.getcode() / 100) <= 5):
-        print "Error code: " + str(src.getcode())
+        print("Error code: " + str(src.getcode()))
         return None
     if filename == None:
@@ -132,11 +129,7 @@ def download(url, filename=None, user_agent_arg=None):
         src.close()
         return None
-    while True:
-        block = src.read()
-        if block == '':
-            break
-        dst.write(block)
+    dst.write(src.read())
     src.close()
     dst.close()
@@ -170,17 +163,16 @@ def download_paths(search, filename, url):
 def usage():
-    print "Usage: %s [-a|--user-agent (user-agent)] [-f|--file (file)] " \
+    print("Usage: %s [-a|--user-agent (user-agent)] [-f|--file (file)] " \
         "[-l|--link] [-h|--hash (hash)] [-s|--search (search-dir)] " \
-        "-u|--url (url)" % (sys.argv[0].split('/')[-1])
+        "-u|--url (url)" % (sys.argv[0].split('/')[-1]))
     sys.exit(1)
 def main():
     import getopt
-    # FLUSH STDOUT
-    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
+    sys.stdout.flush()
     user_agent_arg = None
     file_arg = None
@@ -194,8 +186,8 @@ def main():
         opts, args = getopt.getopt(sys.argv[1:], "a:f:h:lks:u:",
                                    ["file=", "link", "keep", "hash=", "search=", "url=",
                                     "user-agent="])
-    except getopt.GetoptError, err:
-        print str(err)
+    except getopt.GetoptError as err:
+        print(str(err))
         usage()
     for opt, arg in opts:
@@ -220,58 +212,58 @@ def main():
         usage()
     for url in download_paths(search_list, file_arg, url_arg):
-        print "Source %s..." % url,
+        print("Source %s..." % url, end=' ')
         scheme, path = splittype(url)
         name = file_arg
         if scheme in [None, 'file']:
             if os.path.exists(path) == False:
-                print "not found, skipping file copy"
+                print("not found, skipping file copy")
                 continue
             elif name != path:
                 if link_arg == False:
-                    print "\n    copying..."
+                    print("\n    copying...")
                     shutil.copy2(path, name)
                 else:
-                    print "\n    linking..."
+                    print("\n    linking...")
                     os.symlink(path, name)
             else:
                 pass
         elif scheme in ['http', 'https', 'ftp']:
-            print "\n    downloading...",
+            print("\n    downloading...", end=' ')
             name = download(url, file_arg, user_agent_arg)
             if name == None:
-                print "failed"
+                print("failed")
                 continue
-        print "\n    validating...",
+        print("\n    validating...", end=' ')
         if hash_arg == None:
-            print "skipping (no hash)"
+            print("skipping (no hash)")
             sys.exit(0)
         realhash = validate_container(name, hash_arg)
         if realhash == hash_arg:
-            print "ok"
+            print("ok")
             sys.exit(0)
         else:
             payloadhash = validate_payload(name, hash_arg)
             if payloadhash == hash_arg:
-                print "ok"
+                print("ok")
                 sys.exit(0)
-            print "corruption detected"
-            print "    expected: %s" % hash_arg
-            print "    actual:   %s" % realhash
-            print "    payload:  %s" % payloadhash
+            print("corruption detected")
+            print("    expected: %s" % hash_arg)
+            print("    actual:   %s" % realhash)
+            print("    payload:  %s" % payloadhash)
         if keep_arg == False:
             try:
-                print "\nWARN: Removing the corrupt downloaded file"
+                print("\nWARN: Removing the corrupt downloaded file")
                 os.remove(name)
             except OSError:
                 pass
         else:
-            print "\nINFO: Keeping the downloaded file because asked to"
+            print("\nINFO: Keeping the downloaded file because asked to")
     sys.exit(1)
components/openindiana/gfx-drm/pkg5
@@ -1,6 +1,5 @@
{
    "dependencies": [
        "SUNWcs",
        "developer/build/onbld",
        "developer/debug/mdb",
        "diagnostic/scanpci",
components/openindiana/gfx-drm/pkg5.complete.fmris
New file
@@ -0,0 +1,6 @@
driver/graphics/agpgart
driver/graphics/drm
system/header/header-agp
system/header/header-drm
system/test/libdrm
x11/library/libdrm
components/openindiana/gfx-drm/pkg5.fmris
New file
@@ -0,0 +1,6 @@
driver/graphics/agpgart
driver/graphics/drm
system/header/header-agp
system/header/header-drm
system/test/libdrm
x11/library/libdrm