From 7b9ced3fbcd6ddc95c0c0aee45d5feb51d78f79b Mon Sep 17 00:00:00 2001 From: Archzfs Buildbot <34833957+archzfs-bot@users.noreply.github.com> Date: Mon, 17 Aug 2020 00:57:00 +0000 Subject: [PATCH] Semi-automated update for zfs 0.8.4 --- PKGBUILD | 12 +- linux-5.8-compat-__vmalloc.patch | 209 +++++++++++++++++++++++++++++++ 2 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 linux-5.8-compat-__vmalloc.patch diff --git a/PKGBUILD b/PKGBUILD index 1948b1e..b6f91a2 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -13,14 +13,22 @@ pkgrel=1 makedepends=() arch=("x86_64") url="https://zfsonlinux.org/" -source=("https://github.com/zfsonlinux/zfs/releases/download/zfs-${pkgver}/zfs-${pkgver}.tar.gz") -sha256sums=("2b988f5777976f09d08083f6bebf6e67219c4c4c183c1f33033fb7e5e5eacafb") +source=("https://github.com/zfsonlinux/zfs/releases/download/zfs-${pkgver}/zfs-${pkgver}.tar.gz" + "linux-5.8-compat-__vmalloc.patch" +) +sha256sums=("2b988f5777976f09d08083f6bebf6e67219c4c4c183c1f33033fb7e5e5eacafb" + "264728b1e4f7f7509fde76b6049c93033aa813ae6324f37609ff95db8c9e8959" +) license=("CDDL") depends=("zfs-utils=${pkgver}" "lsb-release" "dkms") provides=("zfs" "zfs-headers" "spl" "spl-headers") groups=("archzfs-dkms") conflicts=("zfs" "zfs-headers" "spl" "spl-headers") replaces=("spl-dkms") +prepare() { + cd "${srcdir}/zfs-${pkgver}" + patch -Np1 -i ${srcdir}/linux-5.8-compat-__vmalloc.patch +} build() { cd "${srcdir}/zfs-${pkgver}" diff --git a/linux-5.8-compat-__vmalloc.patch b/linux-5.8-compat-__vmalloc.patch new file mode 100644 index 0000000..b73e9fc --- /dev/null +++ b/linux-5.8-compat-__vmalloc.patch @@ -0,0 +1,209 @@ +diff --git a/config/kernel-kmem.m4 b/config/kernel-kmem.m4 +new file mode 100644 +index 0000000..43f9e72 +--- /dev/null ++++ b/config/kernel-kmem.m4 +@@ -0,0 +1,108 @@ ++dnl # ++dnl # Enabled by default it provides a minimal level of memory tracking. ++dnl # A total count of bytes allocated is kept for each alloc and free. ++dnl # Then at module unload time a report to the console will be printed ++dnl # if memory was leaked. ++dnl # ++AC_DEFUN([SPL_AC_DEBUG_KMEM], [ ++ AC_ARG_ENABLE([debug-kmem], ++ [AS_HELP_STRING([--enable-debug-kmem], ++ [Enable basic kmem accounting @<:@default=no@:>@])], ++ [], ++ [enable_debug_kmem=no]) ++ ++ AS_IF([test "x$enable_debug_kmem" = xyes], ++ [ ++ KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_KMEM" ++ DEBUG_KMEM="_with_debug_kmem" ++ AC_DEFINE([DEBUG_KMEM], [1], ++ [Define to 1 to enable basic kmem accounting]) ++ ], [ ++ DEBUG_KMEM="_without_debug_kmem" ++ ]) ++ ++ AC_SUBST(DEBUG_KMEM) ++ AC_MSG_CHECKING([whether basic kmem accounting is enabled]) ++ AC_MSG_RESULT([$enable_debug_kmem]) ++]) ++ ++dnl # ++dnl # Disabled by default it provides detailed memory tracking. This ++dnl # feature also requires --enable-debug-kmem to be set. When enabled ++dnl # not only will total bytes be tracked but also the location of every ++dnl # alloc and free. When the SPL module is unloaded a list of all leaked ++dnl # addresses and where they were allocated will be dumped to the console. ++dnl # Enabling this feature has a significant impact on performance but it ++dnl # makes finding memory leaks pretty straight forward. ++dnl # ++AC_DEFUN([SPL_AC_DEBUG_KMEM_TRACKING], [ ++ AC_ARG_ENABLE([debug-kmem-tracking], ++ [AS_HELP_STRING([--enable-debug-kmem-tracking], ++ [Enable detailed kmem tracking @<:@default=no@:>@])], ++ [], ++ [enable_debug_kmem_tracking=no]) ++ ++ AS_IF([test "x$enable_debug_kmem_tracking" = xyes], ++ [ ++ KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_KMEM_TRACKING" ++ DEBUG_KMEM_TRACKING="_with_debug_kmem_tracking" ++ AC_DEFINE([DEBUG_KMEM_TRACKING], [1], ++ [Define to 1 to enable detailed kmem tracking]) ++ ], [ ++ DEBUG_KMEM_TRACKING="_without_debug_kmem_tracking" ++ ]) ++ ++ AC_SUBST(DEBUG_KMEM_TRACKING) ++ AC_MSG_CHECKING([whether detailed kmem tracking is enabled]) ++ AC_MSG_RESULT([$enable_debug_kmem_tracking]) ++]) ++ ++dnl # ++dnl # 4.12 API, ++dnl # Added kvmalloc allocation strategy ++dnl # ++AC_DEFUN([ZFS_AC_KERNEL_SRC_KVMALLOC], [ ++ ZFS_LINUX_TEST_SRC([kvmalloc], [ ++ #include ++ ],[ ++ void *p __attribute__ ((unused)); ++ ++ p = kvmalloc(0, GFP_KERNEL); ++ ]) ++]) ++ ++AC_DEFUN([ZFS_AC_KERNEL_KVMALLOC], [ ++ AC_MSG_CHECKING([whether kvmalloc(ptr, flags) is available]) ++ ZFS_LINUX_TEST_RESULT([kvmalloc], [ ++ AC_MSG_RESULT(yes) ++ AC_DEFINE(HAVE_KVMALLOC, 1, [kvmalloc exists]) ++ ],[ ++ AC_MSG_RESULT(no) ++ ]) ++]) ++ ++dnl # ++dnl # 5.8 API, ++dnl # __vmalloc PAGE_KERNEL removal ++dnl # ++AC_DEFUN([ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL], [ ++ ZFS_LINUX_TEST_SRC([__vmalloc], [ ++ #include ++ #include ++ ],[ ++ void *p __attribute__ ((unused)); ++ ++ p = __vmalloc(0, GFP_KERNEL, PAGE_KERNEL); ++ ]) ++]) ++ ++AC_DEFUN([ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL], [ ++ AC_MSG_CHECKING([whether __vmalloc(ptr, flags, pageflags) is available]) ++ ZFS_LINUX_TEST_RESULT([__vmalloc], [ ++ AC_MSG_RESULT(yes) ++ AC_DEFINE(HAVE_VMALLOC_PAGE_KERNEL, 1, [__vmalloc page flags exists]) ++ ],[ ++ AC_MSG_RESULT(no) ++ ]) ++]) ++- +\ No newline at end of file +diff --git a/config/kernel.m4 b/config/kernel.m4 +index b67fcef..23edfdc 100644 +--- a/config/kernel.m4 ++++ b/config/kernel.m4 +@@ -45,6 +45,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [ + ZFS_AC_KERNEL_SRC_SCHED + ZFS_AC_KERNEL_SRC_USLEEP_RANGE + ZFS_AC_KERNEL_SRC_KMEM_CACHE ++ ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL + ZFS_AC_KERNEL_SRC_WAIT + ZFS_AC_KERNEL_SRC_INODE_TIMES + ZFS_AC_KERNEL_SRC_INODE_LOCK +@@ -163,6 +164,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [ + ZFS_AC_KERNEL_SCHED + ZFS_AC_KERNEL_USLEEP_RANGE + ZFS_AC_KERNEL_KMEM_CACHE ++ ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL + ZFS_AC_KERNEL_WAIT + ZFS_AC_KERNEL_INODE_TIMES + ZFS_AC_KERNEL_INODE_LOCK +diff --git a/include/spl/sys/kmem.h b/include/spl/sys/kmem.h +index 72d3a77..62bb36e 100644 +--- a/include/spl/sys/kmem.h ++++ b/include/spl/sys/kmem.h +@@ -169,6 +169,15 @@ extern void *spl_kmem_alloc(size_t sz, int fl, const char *func, int line); + extern void *spl_kmem_zalloc(size_t sz, int fl, const char *func, int line); + extern void spl_kmem_free(const void *ptr, size_t sz); + ++/* ++ * * 5.8 API change, pgprot_t argument removed. ++ * */ ++#ifdef HAVE_VMALLOC_PAGE_KERNEL ++#define spl_vmalloc(size, flags) __vmalloc(size, flags, PAGE_KERNEL) ++#else ++#define spl_vmalloc(size, flags) __vmalloc(size, flags) ++#endif ++ + /* + * The following functions are only available for internal use. + */ +diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c +index d71b4b3..4866b29 100644 +--- a/module/spl/spl-kmem-cache.c ++++ b/module/spl/spl-kmem-cache.c +@@ -203,7 +203,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags) + ASSERT(ISP2(size)); + ptr = (void *)__get_free_pages(lflags, get_order(size)); + } else { +- ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL); ++ ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); + } + + /* Resulting allocated memory will be page aligned */ +@@ -1242,7 +1242,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) + * allocation. + * + * However, this can't be applied to KVM_VMEM due to a bug that +- * __vmalloc() doesn't honor gfp flags in page table allocation. ++ * spl_vmalloc() doesn't honor gfp flags in page table allocation. + */ + if (!(skc->skc_flags & KMC_VMEM)) { + rc = __spl_cache_grow(skc, flags | KM_NOSLEEP); +diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c +index cee69ad..c1ddb06 100644 +--- a/module/spl/spl-kmem.c ++++ b/module/spl/spl-kmem.c +@@ -172,7 +172,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) + * kmem_zalloc() callers. + * + * For vmem_alloc() and vmem_zalloc() callers it is permissible +- * to use __vmalloc(). However, in general use of __vmalloc() ++ * to use spl_vmalloc(). However, in general use of spl_vmalloc() + * is strongly discouraged because a global lock must be + * acquired. Contention on this lock can significantly + * impact performance so frequently manipulating the virtual +@@ -180,8 +180,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) + */ + if ((size > spl_kmem_alloc_max) || use_vmem) { + if (flags & KM_VMEM) { +- ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, +- PAGE_KERNEL); ++ ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); + } else { + return (NULL); + } +@@ -194,7 +193,7 @@ spl_kmem_alloc_impl(size_t size, int flags, int node) + + /* + * For vmem_alloc() and vmem_zalloc() callers retry immediately +- * using __vmalloc() which is unlikely to fail. ++ * using spl_vmalloc() which is unlikely to fail. + */ + if ((flags & KM_VMEM) && (use_vmem == 0)) { + use_vmem = 1;