summaryrefslogtreecommitdiff
path: root/gnu/packages/patches
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/packages/patches')
-rw-r--r--gnu/packages/patches/antlr3-3_1-fix-java8-compilation.patch35
-rw-r--r--gnu/packages/patches/antlr3-3_3-fix-java8-compilation.patch35
-rw-r--r--gnu/packages/patches/ceph-disable-cpu-optimizations.patch64
-rw-r--r--gnu/packages/patches/ceph-disable-unittest-throttle.patch52
-rw-r--r--gnu/packages/patches/ceph-rocksdb-compat.patch63
-rw-r--r--gnu/packages/patches/dblatex-remove-multirow.patch30
-rw-r--r--gnu/packages/patches/dovecot-CVE-2017-15132.patch36
-rw-r--r--gnu/packages/patches/gd-CVE-2018-5711.patch61
-rw-r--r--gnu/packages/patches/glibc-allow-kernel-2.6.32.patch46
-rw-r--r--gnu/packages/patches/guile-linux-syscalls.patch50
-rw-r--r--gnu/packages/patches/icecat-bug-1348660-pt5.patch727
-rw-r--r--gnu/packages/patches/icecat-bug-1414945.patch73
-rw-r--r--gnu/packages/patches/icecat-bug-1415133.patch40
-rw-r--r--gnu/packages/patches/icecat-bug-1424373-pt2.patch183
-rw-r--r--gnu/packages/patches/java-jeromq-fix-tests.patch253
-rw-r--r--gnu/packages/patches/lrzip-CVE-2017-8842.patch23
-rw-r--r--gnu/packages/patches/perl-gd-options-passthrough-and-fontconfig.patch53
-rw-r--r--gnu/packages/patches/python-mox3-python3.6-compat.patch43
-rw-r--r--gnu/packages/patches/python-parse-too-many-fields.patch52
-rw-r--r--gnu/packages/patches/thefuck-test-environ.patch20
-rw-r--r--gnu/packages/patches/wavpack-CVE-2018-6767.patch119
-rw-r--r--gnu/packages/patches/wxmaxima-do-not-use-old-gnuplot-parameters.patch26
-rw-r--r--gnu/packages/patches/zsh-CVE-2018-7548.patch48
-rw-r--r--gnu/packages/patches/zsh-CVE-2018-7549.patch56
24 files changed, 891 insertions, 1297 deletions
diff --git a/gnu/packages/patches/antlr3-3_1-fix-java8-compilation.patch b/gnu/packages/patches/antlr3-3_1-fix-java8-compilation.patch
new file mode 100644
index 0000000000..0c4deb024b
--- /dev/null
+++ b/gnu/packages/patches/antlr3-3_1-fix-java8-compilation.patch
@@ -0,0 +1,35 @@
+Based on the upstream fix for the java8 compilation issue.
+Simplified patch.
+Upstream version of patch does not work with this source tree.
+
+The issue is that in java8 it is an error to pass null to
+removeAll. Results in null pointer exception. java7
+behaviour was to return the list unmodified.
+
+From db2a350c6d90efaa8dde949fa76005c2c5af45c4 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?G=C3=A1bor=20Boskovits?= <boskovits@gmail.com>
+Date: Fri, 5 Jan 2018 17:05:31 +0100
+Subject: [PATCH] Fix java8 compilation.
+
+---
+ src/org/antlr/tool/CompositeGrammar.java | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/src/org/antlr/tool/CompositeGrammar.java b/src/org/antlr/tool/CompositeGrammar.java
+index f1408e7..7e02431 100644
+--- a/src/org/antlr/tool/CompositeGrammar.java
++++ b/src/org/antlr/tool/CompositeGrammar.java
+@@ -218,7 +218,9 @@ public class CompositeGrammar {
+ public List<Grammar> getIndirectDelegates(Grammar g) {
+ List<Grammar> direct = getDirectDelegates(g);
+ List<Grammar> delegates = getDelegates(g);
+- delegates.removeAll(direct);
++ if (direct != null) {
++ delegates.removeAll(direct);
++ }
+ return delegates;
+ }
+
+--
+2.15.1
+
diff --git a/gnu/packages/patches/antlr3-3_3-fix-java8-compilation.patch b/gnu/packages/patches/antlr3-3_3-fix-java8-compilation.patch
new file mode 100644
index 0000000000..a7d6be9b6b
--- /dev/null
+++ b/gnu/packages/patches/antlr3-3_3-fix-java8-compilation.patch
@@ -0,0 +1,35 @@
+Based on the upstream fix for the java8 compilation issue.
+Simplified patch.
+Upstream version of patch does not work with this source tree.
+
+The issue is that in java8 it is an error to pass null to
+removeAll. Results in null pointer exception. java7
+behaviour was to return the list unmodified.
+
+From 43867d50c05d1c06ab7220eb974a8874ae10c308 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?G=C3=A1bor=20Boskovits?= <boskovits@gmail.com>
+Date: Fri, 5 Jan 2018 19:08:24 +0100
+Subject: [PATCH] Fix java8 complilation error.
+
+---
+ tool/src/main/java/org/antlr/tool/CompositeGrammar.java | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/tool/src/main/java/org/antlr/tool/CompositeGrammar.java b/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
+index f34ea73..63740a6 100644
+--- a/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
++++ b/tool/src/main/java/org/antlr/tool/CompositeGrammar.java
+@@ -226,7 +226,9 @@ public class CompositeGrammar {
+ public List<Grammar> getIndirectDelegates(Grammar g) {
+ List<Grammar> direct = getDirectDelegates(g);
+ List<Grammar> delegates = getDelegates(g);
+- delegates.removeAll(direct);
++ if(direct != null) {
++ delegates.removeAll(direct);
++ }
+ return delegates;
+ }
+
+--
+2.15.1
+
diff --git a/gnu/packages/patches/ceph-disable-cpu-optimizations.patch b/gnu/packages/patches/ceph-disable-cpu-optimizations.patch
index f33eb629d6..421b1d4fe1 100644
--- a/gnu/packages/patches/ceph-disable-cpu-optimizations.patch
+++ b/gnu/packages/patches/ceph-disable-cpu-optimizations.patch
@@ -1,5 +1,4 @@
-Disable CPU optimizations not supported by all x86_64 systems. Also
-don't add anything for i686.
+Disable CPU optimizations not supported by all x86_64 systems.
--- a/cmake/modules/SIMDExt.cmake 2017-03-23 22:22:58.254071694 +0100
+++ b/cmake/modules/SIMDExt.cmake 2017-03-23 22:23:22.446848845 +0100
@@ -15,39 +14,30 @@ don't add anything for i686.
#
# SIMD_COMPILE_FLAGS
#
-@@ -73,7 +68,7 @@
- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mfpu=neon")
- endif()
-
--elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i386|i686|amd64|x86_64|AMD64")
-+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|AMD64")
- set(HAVE_INTEL 1)
- CHECK_C_COMPILER_FLAG(-msse HAVE_INTEL_SSE)
- if(HAVE_INTEL_SSE)
-@@ -83,26 +78,6 @@
- if(HAVE_INTEL_SSE2)
- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse2")
- endif()
-- CHECK_C_COMPILER_FLAG(-msse3 HAVE_INTEL_SSE3)
-- if(HAVE_INTEL_SSE3)
-- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse3")
-- endif()
-- CHECK_C_COMPILER_FLAG(-mssse3 HAVE_INTEL_SSSE3)
-- if(HAVE_INTEL_SSSE3)
-- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mssse3")
-- endif()
-- CHECK_C_COMPILER_FLAG(-mpclmul HAVE_INTEL_PCLMUL)
-- if(HAVE_INTEL_PCLMUL)
-- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mpclmul")
-- endif()
-- CHECK_C_COMPILER_FLAG(-msse4.1 HAVE_INTEL_SSE4_1)
-- if(HAVE_INTEL_SSE4_1)
-- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.1")
-- endif()
-- CHECK_C_COMPILER_FLAG(-msse4.2 HAVE_INTEL_SSE4_2)
-- if(HAVE_INTEL_SSE4_2)
-- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.2")
-- endif()
+@@ -85,26 +80,6 @@
+ if(HAVE_INTEL_SSE2)
+ set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse2")
+ endif()
+- CHECK_C_COMPILER_FLAG(-msse3 HAVE_INTEL_SSE3)
+- if(HAVE_INTEL_SSE3)
+- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse3")
+- endif()
+- CHECK_C_COMPILER_FLAG(-mssse3 HAVE_INTEL_SSSE3)
+- if(HAVE_INTEL_SSSE3)
+- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mssse3")
+- endif()
+- CHECK_C_COMPILER_FLAG(-mpclmul HAVE_INTEL_PCLMUL)
+- if(HAVE_INTEL_PCLMUL)
+- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -mpclmul")
+- endif()
+- CHECK_C_COMPILER_FLAG(-msse4.1 HAVE_INTEL_SSE4_1)
+- if(HAVE_INTEL_SSE4_1)
+- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.1")
+- endif()
+- CHECK_C_COMPILER_FLAG(-msse4.2 HAVE_INTEL_SSE4_2)
+- if(HAVE_INTEL_SSE4_2)
+- set(SIMD_COMPILE_FLAGS "${SIMD_COMPILE_FLAGS} -msse4.2")
+- endif()
+ endif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|AMD64")
+ endif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686|amd64|x86_64|AMD64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(powerpc|ppc)64le")
- set(HAVE_PPC64LE 1)
- message(STATUS " we are ppc64le")
diff --git a/gnu/packages/patches/ceph-disable-unittest-throttle.patch b/gnu/packages/patches/ceph-disable-unittest-throttle.patch
deleted file mode 100644
index 08ae5f9e24..0000000000
--- a/gnu/packages/patches/ceph-disable-unittest-throttle.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-FIXME: This test broke after the gcc-5/glibc-2.25 core-updates merge.
-Not sure what's going on here, it hangs after spawning the first thread.
-
-diff --git a/src/test/common/Throttle.cc b/src/test/common/Throttle.cc
-index 5b6d73217d..40a477b2a3 100644
---- a/src/test/common/Throttle.cc
-+++ b/src/test/common/Throttle.cc
-@@ -216,44 +216,6 @@ TEST_F(ThrottleTest, wait) {
- } while(!waited);
- }
-
--TEST_F(ThrottleTest, destructor) {
-- Thread_get *t;
-- {
-- int64_t throttle_max = 10;
-- Throttle *throttle = new Throttle(g_ceph_context, "throttle", throttle_max);
--
-- ASSERT_FALSE(throttle->get(5));
--
-- t = new Thread_get(*throttle, 7);
-- t->create("t_throttle");
-- bool blocked;
-- useconds_t delay = 1;
-- do {
-- usleep(delay);
-- if (throttle->get_or_fail(1)) {
-- throttle->put(1);
-- blocked = false;
-- } else {
-- blocked = true;
-- }
-- delay *= 2;
-- } while(!blocked);
-- delete throttle;
-- }
--
-- { //
-- // The thread is left hanging, otherwise it will abort().
-- // Deleting the Throttle on which it is waiting creates a
-- // inconsistency that will be detected: the Throttle object that
-- // it references no longer exists.
-- //
-- pthread_t id = t->get_thread_id();
-- ASSERT_EQ(pthread_kill(id, 0), 0);
-- delete t;
-- ASSERT_EQ(pthread_kill(id, 0), 0);
-- }
--}
--
- std::pair<double, std::chrono::duration<double> > test_backoff(
- double low_threshhold,
- double high_threshhold,
diff --git a/gnu/packages/patches/ceph-rocksdb-compat.patch b/gnu/packages/patches/ceph-rocksdb-compat.patch
new file mode 100644
index 0000000000..1a3c6b0b23
--- /dev/null
+++ b/gnu/packages/patches/ceph-rocksdb-compat.patch
@@ -0,0 +1,63 @@
+Fix compatibility with newer versions of RocksDB.
+
+Adapted from this upstream patch, with some additional changes for 12.2:
+https://github.com/ceph/ceph/commit/9d73a7121fdb1ae87cb1aa6f7d9d7a13f329ae68
+
+diff --git a/src/kv/RocksDBStore.cc b/src/kv/RocksDBStore.cc
+index 8660afe1886d..bc0de79e23cb 100644
+--- a/src/kv/RocksDBStore.cc
++++ b/src/kv/RocksDBStore.cc
+@@ -505,7 +505,7 @@
+ // considering performance overhead, default is disabled
+ if (g_conf->rocksdb_perf) {
+ rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableTimeExceptForMutex);
+- rocksdb::perf_context.Reset();
++ rocksdb::get_perf_context()->Reset();
+ }
+
+ RocksDBTransactionImpl * _t =
+@@ -532,13 +532,13 @@
+ utime_t write_wal_time;
+ utime_t write_pre_and_post_process_time;
+ write_wal_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_wal_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_wal_time)/1000000000);
+ write_memtable_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_memtable_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_memtable_time)/1000000000);
+ write_delay_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_delay_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_delay_time)/1000000000);
+ write_pre_and_post_process_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_pre_and_post_process_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_pre_and_post_process_time)/1000000000);
+ logger->tinc(l_rocksdb_write_memtable_time, write_memtable_time);
+ logger->tinc(l_rocksdb_write_delay_time, write_delay_time);
+ logger->tinc(l_rocksdb_write_wal_time, write_wal_time);
+@@ -558,7 +558,7 @@
+ // considering performance overhead, default is disabled
+ if (g_conf->rocksdb_perf) {
+ rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableTimeExceptForMutex);
+- rocksdb::perf_context.Reset();
++ rocksdb::get_perf_context()->Reset();
+ }
+
+ RocksDBTransactionImpl * _t =
+@@ -586,13 +586,13 @@
+ utime_t write_wal_time;
+ utime_t write_pre_and_post_process_time;
+ write_wal_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_wal_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_wal_time)/1000000000);
+ write_memtable_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_memtable_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_memtable_time)/1000000000);
+ write_delay_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_delay_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_delay_time)/1000000000);
+ write_pre_and_post_process_time.set_from_double(
+- static_cast<double>(rocksdb::perf_context.write_pre_and_post_process_time)/1000000000);
++ static_cast<double>(rocksdb::get_perf_context()->write_pre_and_post_process_time)/1000000000);
+ logger->tinc(l_rocksdb_write_memtable_time, write_memtable_time);
+ logger->tinc(l_rocksdb_write_delay_time, write_delay_time);
+ logger->tinc(l_rocksdb_write_wal_time, write_wal_time);
diff --git a/gnu/packages/patches/dblatex-remove-multirow.patch b/gnu/packages/patches/dblatex-remove-multirow.patch
deleted file mode 100644
index 62d1c64e08..0000000000
--- a/gnu/packages/patches/dblatex-remove-multirow.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-This patch is needed to fix dblatex with newer versions of TeX Live.
-It was taken from https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=840189
-
---- a/latex/style/dbk_table.sty Sun Oct 02 23:21:03 2016 +0200
-+++ b/latex/style/dbk_table.sty Mon Oct 10 21:31:00 2016 +0200
-@@ -9,7 +9,6 @@
- \usepackage{longtable}
- \usepackage{lscape}
- \usepackage{colortbl}
--\usepackage{multirow}
- \usepackage{calc}
- \usepackage{hhline}
-
-@@ -21,16 +20,6 @@
- \expandafter[\expandafter3\expandafter]\expandafter{%
- \multicolumn{#1}{#2}{#3}}
-
--% Make \@xmultirow long
--\expandafter\long\expandafter\def%
--\expandafter\@xmultirow%
--\expandafter#\expandafter1%
--\expandafter[\expandafter#\expandafter2\expandafter]%
--\expandafter#\expandafter3%
--\expandafter[\expandafter#\expandafter4\expandafter]%
--\expandafter#\expandafter5%
--\expandafter{\@xmultirow{#1}[#2]{#3}[#4]{#5}}
--
- % For the newtbl code
- \newdimen\newtblstarfactor%
- \newdimen\newtblsparewidth%
diff --git a/gnu/packages/patches/dovecot-CVE-2017-15132.patch b/gnu/packages/patches/dovecot-CVE-2017-15132.patch
deleted file mode 100644
index 32666b8557..0000000000
--- a/gnu/packages/patches/dovecot-CVE-2017-15132.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-Fix CVE-2017-15132:
-
-https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-15132
-
-Patch copied from upstream source repository:
-
-https://github.com/dovecot/core/commit/1a29ed2f96da1be22fa5a4d96c7583aa81b8b060
-
-From 1a29ed2f96da1be22fa5a4d96c7583aa81b8b060 Mon Sep 17 00:00:00 2001
-From: Timo Sirainen <timo.sirainen@dovecot.fi>
-Date: Mon, 18 Dec 2017 16:50:51 +0200
-Subject: [PATCH] lib-auth: Fix memory leak in auth_client_request_abort()
-
-This caused memory leaks when authentication was aborted. For example
-with IMAP:
-
-a AUTHENTICATE PLAIN
-*
-
-Broken by 9137c55411aa39d41c1e705ddc34d5bd26c65021
----
- src/lib-auth/auth-client-request.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/src/lib-auth/auth-client-request.c b/src/lib-auth/auth-client-request.c
-index 480fb42b30..046f7c307d 100644
---- a/src/lib-auth/auth-client-request.c
-+++ b/src/lib-auth/auth-client-request.c
-@@ -186,6 +186,7 @@ void auth_client_request_abort(struct auth_client_request **_request)
-
- auth_client_send_cancel(request->conn->client, request->id);
- call_callback(request, AUTH_REQUEST_STATUS_ABORT, NULL, NULL);
-+ pool_unref(&request->pool);
- }
-
- unsigned int auth_client_request_get_id(struct auth_client_request *request)
diff --git a/gnu/packages/patches/gd-CVE-2018-5711.patch b/gnu/packages/patches/gd-CVE-2018-5711.patch
new file mode 100644
index 0000000000..83b12cde63
--- /dev/null
+++ b/gnu/packages/patches/gd-CVE-2018-5711.patch
@@ -0,0 +1,61 @@
+This patch is adapted from commit a11f47475e6443b7f32d21f2271f28f417e2ac04 and
+fixes CVE-2018-5711.
+
+From a11f47475e6443b7f32d21f2271f28f417e2ac04 Mon Sep 17 00:00:00 2001
+From: "Christoph M. Becker" <cmbecker69@gmx.de>
+Date: Wed, 29 Nov 2017 19:37:38 +0100
+Subject: [PATCH] Fix #420: Potential infinite loop in gdImageCreateFromGifCtx
+
+Due to a signedness confusion in `GetCode_` a corrupt GIF file can
+trigger an infinite loop. Furthermore we make sure that a GIF without
+any palette entries is treated as invalid *after* open palette entries
+have been removed.
+
+CVE-2018-5711
+
+See also https://bugs.php.net/bug.php?id=75571.
+---
+ src/gd_gif_in.c | 12 ++++++------
+ 1 file changed, 38 insertions(+), 6 deletions(-)
+
+diff --git a/src/gd_gif_in.c b/src/gd_gif_in.c
+index daf26e7..0a8bd71 100644
+--- a/src/gd_gif_in.c
++++ b/src/gd_gif_in.c
+@@ -335,11 +335,6 @@ terminated:
+ return 0;
+ }
+
+- if(!im->colorsTotal) {
+- gdImageDestroy(im);
+- return 0;
+- }
+-
+ /* Check for open colors at the end, so
+ * we can reduce colorsTotal and ultimately
+ * BitsPerPixel */
+@@ -351,6 +346,11 @@ terminated:
+ }
+ }
+
++ if(!im->colorsTotal) {
++ gdImageDestroy(im);
++ return 0;
++ }
++
+ return im;
+ }
+
+@@ -447,7 +447,7 @@ static int
+ GetCode_(gdIOCtx *fd, CODE_STATIC_DATA *scd, int code_size, int flag, int *ZeroDataBlockP)
+ {
+ int i, j, ret;
+- unsigned char count;
++ int count;
+
+ if(flag) {
+ scd->curbit = 0;
+
+--
+2.13.6
+
diff --git a/gnu/packages/patches/glibc-allow-kernel-2.6.32.patch b/gnu/packages/patches/glibc-allow-kernel-2.6.32.patch
new file mode 100644
index 0000000000..c03807323f
--- /dev/null
+++ b/gnu/packages/patches/glibc-allow-kernel-2.6.32.patch
@@ -0,0 +1,46 @@
+This patch allows libc to be used with the heavily-patched kernel found
+on CentOS 6, which identifies itself as 2.6.32.
+
+See <https://lists.gnu.org/archive/html/guix-devel/2018-02/msg00392.html>.
+
+Patch taken from Nixpkgs.
+
+diff --git a/sysdeps/unix/sysv/linux/configure b/sysdeps/unix/sysv/linux/configure
+index cace758c01..38fe7fe0b0 100644
+--- a/sysdeps/unix/sysv/linux/configure
++++ b/sysdeps/unix/sysv/linux/configure
+@@ -69,7 +69,7 @@ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for kernel header at least $minimum_kernel" >&5
+ $as_echo_n "checking for kernel header at least $minimum_kernel... " >&6; }
+ decnum=`echo "$minimum_kernel.0.0.0" | sed 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/(\1 * 65536 + \2 * 256 + \3)/'`;
+-abinum=`echo "$minimum_kernel.0.0.0" | sed 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\1,\2,\3/'`;
++abinum=`echo "2.6.32.0.0.0" | sed 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\1,\2,\3/'`;
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h. */
+ #include <linux/version.h>
+diff --git a/sysdeps/unix/sysv/linux/configure.ac b/sysdeps/unix/sysv/linux/configure.ac
+index 13abda0a51..6abc12eaed 100644
+--- a/sysdeps/unix/sysv/linux/configure.ac
++++ b/sysdeps/unix/sysv/linux/configure.ac
+@@ -50,7 +50,7 @@ fi
+ AC_MSG_CHECKING(for kernel header at least $minimum_kernel)
+ changequote(,)dnl
+ decnum=`echo "$minimum_kernel.0.0.0" | sed 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/(\1 * 65536 + \2 * 256 + \3)/'`;
+-abinum=`echo "$minimum_kernel.0.0.0" | sed 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\1,\2,\3/'`;
++abinum=`echo "2.6.32.0.0.0" | sed 's/\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*/\1,\2,\3/'`;
+ changequote([,])dnl
+ AC_TRY_COMPILE([#include <linux/version.h>
+ #if LINUX_VERSION_CODE < $decnum
+diff --git a/sysdeps/unix/sysv/linux/dl-osinfo.h b/sysdeps/unix/sysv/linux/dl-osinfo.h
+index 823cd8224d..482caaeeec 100644
+--- a/sysdeps/unix/sysv/linux/dl-osinfo.h
++++ b/sysdeps/unix/sysv/linux/dl-osinfo.h
+@@ -39,7 +39,7 @@
+ GLRO(dl_osversion) = version; \
+ \
+ /* Now we can test with the required version. */ \
+- if (__LINUX_KERNEL_VERSION > 0 && version < __LINUX_KERNEL_VERSION) \
++ if (__LINUX_KERNEL_VERSION > 0 && version < __LINUX_KERNEL_VERSION && version != 0x020620) \
+ /* Not sufficent. */ \
+ FATAL ("FATAL: kernel too old\n"); \
+ } \
diff --git a/gnu/packages/patches/guile-linux-syscalls.patch b/gnu/packages/patches/guile-linux-syscalls.patch
index 69970a3e60..12cddff47b 100644
--- a/gnu/packages/patches/guile-linux-syscalls.patch
+++ b/gnu/packages/patches/guile-linux-syscalls.patch
@@ -3,17 +3,21 @@ This patch adds bindings to Linux syscalls for which glibc has symbols.
Using the FFI would have been nice, but that's not an option when using
a statically-linked Guile in an initrd that doesn't have libc.so around.
---- guile-2.0.11/libguile/posix.c.orig 2014-02-28 15:01:27.000000000 -0500
-+++ guile-2.0.11/libguile/posix.c 2015-06-21 14:28:23.624251038 -0400
-@@ -2245,6 +2245,295 @@
+diff --git a/libguile/posix.c b/libguile/posix.c
+index b0fcad5fd..1343186e3 100644
+--- a/libguile/posix.c
++++ b/libguile/posix.c
+@@ -2341,6 +2341,335 @@ scm_init_popen (void)
}
- #endif
+ #endif /* HAVE_START_CHILD */
+
+/* Linux! */
+#ifdef __linux__
+
+#include <sys/mount.h>
++#include <sys/syscall.h>
++
+#include "libguile/foreign.h"
+#include "libguile/bytevectors.h"
+
@@ -91,6 +95,16 @@ a statically-linked Guile in an initrd that doesn't have libc.so around.
+ ARGS, a space-separated list of options. */
+extern long init_module (void *module, unsigned long len, const char *args);
+
++/* Load a kernel module from FD. FLAGS must be a bitwise or of
++ MODULE_INIT_* constants. The GNU libc doesn't provide a wrapper for
++ this one so we use 'syscall'. */
++static int
++finit_module (int fd, const char *args, int flags)
++{
++ return syscall (SYS_finit_module, fd, args, flags);
++}
++
++
+SCM_DEFINE (scm_load_linux_module, "load-linux-module", 1, 1, 0,
+ (SCM data, SCM options),
+ "Load the Linux kernel module whose contents are in bytevector "
@@ -121,6 +135,34 @@ a statically-linked Guile in an initrd that doesn't have libc.so around.
+}
+#undef FUNC_NAME
+
++SCM_DEFINE (scm_load_linux_module_fd, "load-linux-module/fd", 1, 2, 0,
++ (SCM fd, SCM options, SCM flags),
++ "Load the Linux kernel module from the file at FD, "
++ "with the arguments from the OPTIONS string, and "
++ "optionally the given FLAGS.")
++#define FUNC_NAME s_scm_load_linux_module_fd
++{
++ long err;
++ int c_fd, c_flags;
++ char *c_options;
++
++ c_fd = scm_to_int (fd);
++ c_options =
++ scm_to_locale_string (SCM_UNBNDP (options) ? scm_nullstr : options);
++ c_flags = SCM_UNBNDP (flags) ? 0 : scm_to_int (flags);
++
++ err = finit_module (c_fd, c_options, c_flags);
++
++ free (c_options);
++
++ if (err != 0)
++ SCM_SYSERROR;
++
++ return SCM_UNSPECIFIED;
++}
++#undef FUNC_NAME
++
++
+/* Rebooting, halting, and all that. */
+
+#include <sys/reboot.h>
diff --git a/gnu/packages/patches/icecat-bug-1348660-pt5.patch b/gnu/packages/patches/icecat-bug-1348660-pt5.patch
deleted file mode 100644
index b0bede3b38..0000000000
--- a/gnu/packages/patches/icecat-bug-1348660-pt5.patch
+++ /dev/null
@@ -1,727 +0,0 @@
-This is a subset of the following changeset from upstream:
- https://hg.mozilla.org/releases/mozilla-esr52/raw-rev/5e07bd37ac61
-
-This excludes all test code from that changeset, including a GIT binary patch
-that is not supported by Guix's patch-and-repack mechanism.
-
-# HG changeset patch
-# User Jan Varga <jan.varga@gmail.com>
-# Date 1490181244 -3600
-# Node ID 5e07bd37ac6162f218dfe03ed83b5dcca9653b68
-# Parent 28934912eede9e14895baf4af7575ca9639f59ee
-Bug 1348660 - Part 5: Implement a method to retrieve usage data for all origins at once. r=btseng, a=lizzard
-
-diff --git a/dom/quota/ActorsChild.cpp b/dom/quota/ActorsChild.cpp
---- a/dom/quota/ActorsChild.cpp
-+++ b/dom/quota/ActorsChild.cpp
-@@ -137,16 +137,52 @@ QuotaUsageRequestChild::HandleResponse(n
- AssertIsOnOwningThread();
- MOZ_ASSERT(NS_FAILED(aResponse));
- MOZ_ASSERT(mRequest);
-
- mRequest->SetError(aResponse);
- }
-
- void
-+QuotaUsageRequestChild::HandleResponse(const nsTArray<OriginUsage>& aResponse)
-+{
-+ AssertIsOnOwningThread();
-+ MOZ_ASSERT(mRequest);
-+
-+ RefPtr<nsVariant> variant = new nsVariant();
-+
-+ if (aResponse.IsEmpty()) {
-+ variant->SetAsEmptyArray();
-+ } else {
-+ nsTArray<RefPtr<UsageResult>> usageResults;
-+
-+ const uint32_t count = aResponse.Length();
-+
-+ usageResults.SetCapacity(count);
-+
-+ for (uint32_t index = 0; index < count; index++) {
-+ auto& originUsage = aResponse[index];
-+
-+ RefPtr<UsageResult> usageResult = new UsageResult(originUsage.origin(),
-+ originUsage.persisted(),
-+ originUsage.usage());
-+
-+ usageResults.AppendElement(usageResult.forget());
-+ }
-+
-+ variant->SetAsArray(nsIDataType::VTYPE_INTERFACE_IS,
-+ &NS_GET_IID(nsIQuotaUsageResult),
-+ usageResults.Length(),
-+ static_cast<void*>(usageResults.Elements()));
-+ }
-+
-+ mRequest->SetResult(variant);
-+}
-+
-+void
- QuotaUsageRequestChild::HandleResponse(const OriginUsageResponse& aResponse)
- {
- AssertIsOnOwningThread();
- MOZ_ASSERT(mRequest);
-
- RefPtr<OriginUsageResult> result =
- new OriginUsageResult(aResponse.usage(),
- aResponse.fileUsage(),
-@@ -177,16 +213,20 @@ QuotaUsageRequestChild::Recv__delete__(c
- AssertIsOnOwningThread();
- MOZ_ASSERT(mRequest);
-
- switch (aResponse.type()) {
- case UsageRequestResponse::Tnsresult:
- HandleResponse(aResponse.get_nsresult());
- break;
-
-+ case UsageRequestResponse::TAllUsageResponse:
-+ HandleResponse(aResponse.get_AllUsageResponse().originUsages());
-+ break;
-+
- case UsageRequestResponse::TOriginUsageResponse:
- HandleResponse(aResponse.get_OriginUsageResponse());
- break;
-
- default:
- MOZ_CRASH("Unknown response type!");
- }
-
-diff --git a/dom/quota/ActorsChild.h b/dom/quota/ActorsChild.h
---- a/dom/quota/ActorsChild.h
-+++ b/dom/quota/ActorsChild.h
-@@ -93,16 +93,19 @@ private:
-
- // Only destroyed by QuotaChild.
- ~QuotaUsageRequestChild();
-
- void
- HandleResponse(nsresult aResponse);
-
- void
-+ HandleResponse(const nsTArray<OriginUsage>& aResponse);
-+
-+ void
- HandleResponse(const OriginUsageResponse& aResponse);
-
- // IPDL methods are only called by IPDL.
- virtual void
- ActorDestroy(ActorDestroyReason aWhy) override;
-
- virtual bool
- Recv__delete__(const UsageRequestResponse& aResponse) override;
-diff --git a/dom/quota/ActorsParent.cpp b/dom/quota/ActorsParent.cpp
---- a/dom/quota/ActorsParent.cpp
-+++ b/dom/quota/ActorsParent.cpp
-@@ -1039,16 +1039,42 @@ private:
- // IPDL methods.
- void
- ActorDestroy(ActorDestroyReason aWhy) override;
-
- bool
- RecvCancel() override;
- };
-
-+class GetUsageOp final
-+ : public QuotaUsageRequestBase
-+{
-+ nsTArray<OriginUsage> mOriginUsages;
-+ nsDataHashtable<nsCStringHashKey, uint32_t> mOriginUsagesIndex;
-+
-+ bool mGetAll;
-+
-+public:
-+ explicit GetUsageOp(const UsageRequestParams& aParams);
-+
-+private:
-+ ~GetUsageOp()
-+ { }
-+
-+ nsresult
-+ TraverseRepository(QuotaManager* aQuotaManager,
-+ PersistenceType aPersistenceType);
-+
-+ nsresult
-+ DoDirectoryWork(QuotaManager* aQuotaManager) override;
-+
-+ void
-+ GetResponse(UsageRequestResponse& aResponse) override;
-+};
-+
- class GetOriginUsageOp final
- : public QuotaUsageRequestBase
- {
- // If mGetGroupUsage is false, we use mUsageInfo to record the origin usage
- // and the file usage. Otherwise, we use it to record the group usage and the
- // limit.
- UsageInfo mUsageInfo;
-
-@@ -5693,16 +5719,20 @@ PQuotaUsageRequestParent*
- Quota::AllocPQuotaUsageRequestParent(const UsageRequestParams& aParams)
- {
- AssertIsOnBackgroundThread();
- MOZ_ASSERT(aParams.type() != UsageRequestParams::T__None);
-
- RefPtr<QuotaUsageRequestBase> actor;
-
- switch (aParams.type()) {
-+ case UsageRequestParams::TAllUsageParams:
-+ actor = new GetUsageOp(aParams);
-+ break;
-+
- case UsageRequestParams::TOriginUsageParams:
- actor = new GetOriginUsageOp(aParams);
- break;
-
- default:
- MOZ_CRASH("Should never get here!");
- }
-
-@@ -6033,16 +6063,189 @@ QuotaUsageRequestBase::RecvCancel()
- if (mCanceled.exchange(true)) {
- NS_WARNING("Canceled more than once?!");
- return false;
- }
-
- return true;
- }
-
-+GetUsageOp::GetUsageOp(const UsageRequestParams& aParams)
-+ : mGetAll(aParams.get_AllUsageParams().getAll())
-+{
-+ AssertIsOnOwningThread();
-+ MOZ_ASSERT(aParams.type() == UsageRequestParams::TAllUsageParams);
-+}
-+
-+nsresult
-+GetUsageOp::TraverseRepository(QuotaManager* aQuotaManager,
-+ PersistenceType aPersistenceType)
-+{
-+ AssertIsOnIOThread();
-+ MOZ_ASSERT(aQuotaManager);
-+
-+ nsresult rv;
-+
-+ nsCOMPtr<nsIFile> directory =
-+ do_CreateInstance(NS_LOCAL_FILE_CONTRACTID, &rv);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ rv = directory->InitWithPath(aQuotaManager->GetStoragePath(aPersistenceType));
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ bool exists;
-+ rv = directory->Exists(&exists);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ if (!exists) {
-+ return NS_OK;
-+ }
-+
-+ nsCOMPtr<nsISimpleEnumerator> entries;
-+ rv = directory->GetDirectoryEntries(getter_AddRefs(entries));
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ bool persistent = aPersistenceType == PERSISTENCE_TYPE_PERSISTENT;
-+
-+ bool hasMore;
-+ while (NS_SUCCEEDED((rv = entries->HasMoreElements(&hasMore))) &&
-+ hasMore && !mCanceled) {
-+ nsCOMPtr<nsISupports> entry;
-+ rv = entries->GetNext(getter_AddRefs(entry));
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ nsCOMPtr<nsIFile> originDir = do_QueryInterface(entry);
-+ MOZ_ASSERT(originDir);
-+
-+ bool isDirectory;
-+ rv = originDir->IsDirectory(&isDirectory);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ if (!isDirectory) {
-+ nsString leafName;
-+ rv = originDir->GetLeafName(leafName);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ if (!leafName.EqualsLiteral(DSSTORE_FILE_NAME)) {
-+ QM_WARNING("Something (%s) in the repository that doesn't belong!",
-+ NS_ConvertUTF16toUTF8(leafName).get());
-+ }
-+ continue;
-+ }
-+
-+ int64_t timestamp;
-+ nsCString suffix;
-+ nsCString group;
-+ nsCString origin;
-+ bool isApp;
-+ rv = aQuotaManager->GetDirectoryMetadata2WithRestore(originDir,
-+ persistent,
-+ &timestamp,
-+ suffix,
-+ group,
-+ origin,
-+ &isApp);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ if (!mGetAll &&
-+ aQuotaManager->IsOriginWhitelistedForPersistentStorage(origin)) {
-+ continue;
-+ }
-+
-+ OriginUsage* originUsage;
-+
-+ // We can't store pointers to OriginUsage objects in the hashtable
-+ // since AppendElement() reallocates its internal array buffer as number
-+ // of elements grows.
-+ uint32_t index;
-+ if (mOriginUsagesIndex.Get(origin, &index)) {
-+ originUsage = &mOriginUsages[index];
-+ } else {
-+ index = mOriginUsages.Length();
-+
-+ originUsage = mOriginUsages.AppendElement();
-+
-+ originUsage->origin() = origin;
-+ originUsage->persisted() = false;
-+ originUsage->usage() = 0;
-+
-+ mOriginUsagesIndex.Put(origin, index);
-+ }
-+
-+ UsageInfo usageInfo;
-+ rv = GetUsageForOrigin(aQuotaManager,
-+ aPersistenceType,
-+ group,
-+ origin,
-+ isApp,
-+ &usageInfo);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ originUsage->usage() = originUsage->usage() + usageInfo.TotalUsage();
-+ }
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ return NS_OK;
-+}
-+
-+nsresult
-+GetUsageOp::DoDirectoryWork(QuotaManager* aQuotaManager)
-+{
-+ AssertIsOnIOThread();
-+
-+ PROFILER_LABEL("Quota", "GetUsageOp::DoDirectoryWork",
-+ js::ProfileEntry::Category::OTHER);
-+
-+ nsresult rv;
-+
-+ for (const PersistenceType type : kAllPersistenceTypes) {
-+ rv = TraverseRepository(aQuotaManager, type);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+ }
-+
-+ return NS_OK;
-+}
-+
-+void
-+GetUsageOp::GetResponse(UsageRequestResponse& aResponse)
-+{
-+ AssertIsOnOwningThread();
-+
-+ aResponse = AllUsageResponse();
-+
-+ if (!mOriginUsages.IsEmpty()) {
-+ nsTArray<OriginUsage>& originUsages =
-+ aResponse.get_AllUsageResponse().originUsages();
-+
-+ mOriginUsages.SwapElements(originUsages);
-+ }
-+}
-+
- GetOriginUsageOp::GetOriginUsageOp(const UsageRequestParams& aParams)
- : mParams(aParams.get_OriginUsageParams())
- , mGetGroupUsage(aParams.get_OriginUsageParams().getGroupUsage())
- {
- AssertIsOnOwningThread();
- MOZ_ASSERT(aParams.type() == UsageRequestParams::TOriginUsageParams);
- }
-
-diff --git a/dom/quota/PQuota.ipdl b/dom/quota/PQuota.ipdl
---- a/dom/quota/PQuota.ipdl
-+++ b/dom/quota/PQuota.ipdl
-@@ -12,24 +12,30 @@ include "mozilla/dom/quota/Serialization
-
- using mozilla::dom::quota::PersistenceType
- from "mozilla/dom/quota/PersistenceType.h";
-
- namespace mozilla {
- namespace dom {
- namespace quota {
-
-+struct AllUsageParams
-+{
-+ bool getAll;
-+};
-+
- struct OriginUsageParams
- {
- PrincipalInfo principalInfo;
- bool getGroupUsage;
- };
-
- union UsageRequestParams
- {
-+ AllUsageParams;
- OriginUsageParams;
- };
-
- struct ClearOriginParams
- {
- PrincipalInfo principalInfo;
- PersistenceType persistenceType;
- bool persistenceTypeIsExplicit;
-diff --git a/dom/quota/PQuotaUsageRequest.ipdl b/dom/quota/PQuotaUsageRequest.ipdl
---- a/dom/quota/PQuotaUsageRequest.ipdl
-+++ b/dom/quota/PQuotaUsageRequest.ipdl
-@@ -3,26 +3,39 @@
- * You can obtain one at http://mozilla.org/MPL/2.0/. */
-
- include protocol PQuota;
-
- namespace mozilla {
- namespace dom {
- namespace quota {
-
-+struct OriginUsage
-+{
-+ nsCString origin;
-+ bool persisted;
-+ uint64_t usage;
-+};
-+
-+struct AllUsageResponse
-+{
-+ OriginUsage[] originUsages;
-+};
-+
- struct OriginUsageResponse
- {
- uint64_t usage;
- uint64_t fileUsage;
- uint64_t limit;
- };
-
- union UsageRequestResponse
- {
- nsresult;
-+ AllUsageResponse;
- OriginUsageResponse;
- };
-
- protocol PQuotaUsageRequest
- {
- manager PQuota;
-
- parent:
-diff --git a/dom/quota/QuotaManagerService.cpp b/dom/quota/QuotaManagerService.cpp
---- a/dom/quota/QuotaManagerService.cpp
-+++ b/dom/quota/QuotaManagerService.cpp
-@@ -490,16 +490,41 @@ QuotaManagerService::RemoveIdleObserver(
-
- NS_IMPL_ADDREF(QuotaManagerService)
- NS_IMPL_RELEASE_WITH_DESTROY(QuotaManagerService, Destroy())
- NS_IMPL_QUERY_INTERFACE(QuotaManagerService,
- nsIQuotaManagerService,
- nsIObserver)
-
- NS_IMETHODIMP
-+QuotaManagerService::GetUsage(nsIQuotaUsageCallback* aCallback,
-+ bool aGetAll,
-+ nsIQuotaUsageRequest** _retval)
-+{
-+ MOZ_ASSERT(NS_IsMainThread());
-+ MOZ_ASSERT(aCallback);
-+
-+ RefPtr<UsageRequest> request = new UsageRequest(aCallback);
-+
-+ AllUsageParams params;
-+
-+ params.getAll() = aGetAll;
-+
-+ nsAutoPtr<PendingRequestInfo> info(new UsageRequestInfo(request, params));
-+
-+ nsresult rv = InitiateRequest(info);
-+ if (NS_WARN_IF(NS_FAILED(rv))) {
-+ return rv;
-+ }
-+
-+ request.forget(_retval);
-+ return NS_OK;
-+}
-+
-+NS_IMETHODIMP
- QuotaManagerService::GetUsageForPrincipal(nsIPrincipal* aPrincipal,
- nsIQuotaUsageCallback* aCallback,
- bool aGetGroupUsage,
- nsIQuotaUsageRequest** _retval)
- {
- MOZ_ASSERT(NS_IsMainThread());
- MOZ_ASSERT(aPrincipal);
- MOZ_ASSERT(aCallback);
-diff --git a/dom/quota/QuotaRequests.cpp b/dom/quota/QuotaRequests.cpp
---- a/dom/quota/QuotaRequests.cpp
-+++ b/dom/quota/QuotaRequests.cpp
-@@ -86,16 +86,25 @@ RequestBase::GetResultCode(nsresult* aRe
- if (!mHaveResultOrErrorCode) {
- return NS_ERROR_FAILURE;
- }
-
- *aResultCode = mResultCode;
- return NS_OK;
- }
-
-+UsageRequest::UsageRequest(nsIQuotaUsageCallback* aCallback)
-+ : mCallback(aCallback)
-+ , mBackgroundActor(nullptr)
-+ , mCanceled(false)
-+{
-+ AssertIsOnOwningThread();
-+ MOZ_ASSERT(aCallback);
-+}
-+
- UsageRequest::UsageRequest(nsIPrincipal* aPrincipal,
- nsIQuotaUsageCallback* aCallback)
- : RequestBase(aPrincipal)
- , mCallback(aCallback)
- , mBackgroundActor(nullptr)
- , mCanceled(false)
- {
- AssertIsOnOwningThread();
-diff --git a/dom/quota/QuotaRequests.h b/dom/quota/QuotaRequests.h
---- a/dom/quota/QuotaRequests.h
-+++ b/dom/quota/QuotaRequests.h
-@@ -73,16 +73,18 @@ class UsageRequest final
-
- nsCOMPtr<nsIVariant> mResult;
-
- QuotaUsageRequestChild* mBackgroundActor;
-
- bool mCanceled;
-
- public:
-+ explicit UsageRequest(nsIQuotaUsageCallback* aCallback);
-+
- UsageRequest(nsIPrincipal* aPrincipal,
- nsIQuotaUsageCallback* aCallback);
-
- void
- SetBackgroundActor(QuotaUsageRequestChild* aBackgroundActor);
-
- void
- ClearBackgroundActor()
-diff --git a/dom/quota/QuotaResults.cpp b/dom/quota/QuotaResults.cpp
---- a/dom/quota/QuotaResults.cpp
-+++ b/dom/quota/QuotaResults.cpp
-@@ -5,16 +5,53 @@
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
- #include "QuotaResults.h"
-
- namespace mozilla {
- namespace dom {
- namespace quota {
-
-+UsageResult::UsageResult(const nsACString& aOrigin,
-+ bool aPersisted,
-+ uint64_t aUsage)
-+ : mOrigin(aOrigin)
-+ , mUsage(aUsage)
-+ , mPersisted(aPersisted)
-+{
-+}
-+
-+NS_IMPL_ISUPPORTS(UsageResult,
-+ nsIQuotaUsageResult)
-+
-+NS_IMETHODIMP
-+UsageResult::GetOrigin(nsACString& aOrigin)
-+{
-+ aOrigin = mOrigin;
-+ return NS_OK;
-+}
-+
-+NS_IMETHODIMP
-+UsageResult::GetPersisted(bool* aPersisted)
-+{
-+ MOZ_ASSERT(aPersisted);
-+
-+ *aPersisted = mPersisted;
-+ return NS_OK;
-+}
-+
-+NS_IMETHODIMP
-+UsageResult::GetUsage(uint64_t* aUsage)
-+{
-+ MOZ_ASSERT(aUsage);
-+
-+ *aUsage = mUsage;
-+ return NS_OK;
-+}
-+
- OriginUsageResult::OriginUsageResult(uint64_t aUsage,
- uint64_t aFileUsage,
- uint64_t aLimit)
- : mUsage(aUsage)
- , mFileUsage(aFileUsage)
- , mLimit(aLimit)
- {
- }
-diff --git a/dom/quota/QuotaResults.h b/dom/quota/QuotaResults.h
---- a/dom/quota/QuotaResults.h
-+++ b/dom/quota/QuotaResults.h
-@@ -8,16 +8,36 @@
- #define mozilla_dom_quota_QuotaResults_h
-
- #include "nsIQuotaResults.h"
-
- namespace mozilla {
- namespace dom {
- namespace quota {
-
-+class UsageResult
-+ : public nsIQuotaUsageResult
-+{
-+ nsCString mOrigin;
-+ uint64_t mUsage;
-+ bool mPersisted;
-+
-+public:
-+ UsageResult(const nsACString& aOrigin,
-+ bool aPersisted,
-+ uint64_t aUsage);
-+
-+private:
-+ virtual ~UsageResult()
-+ { }
-+
-+ NS_DECL_ISUPPORTS
-+ NS_DECL_NSIQUOTAUSAGERESULT
-+};
-+
- class OriginUsageResult
- : public nsIQuotaOriginUsageResult
- {
- uint64_t mUsage;
- uint64_t mFileUsage;
- uint64_t mLimit;
-
- public:
-diff --git a/dom/quota/nsIQuotaManagerService.idl b/dom/quota/nsIQuotaManagerService.idl
---- a/dom/quota/nsIQuotaManagerService.idl
-+++ b/dom/quota/nsIQuotaManagerService.idl
-@@ -10,16 +10,31 @@ interface nsIPrincipal;
- interface nsIQuotaRequest;
- interface nsIQuotaUsageCallback;
- interface nsIQuotaUsageRequest;
-
- [scriptable, builtinclass, uuid(1b3d0a38-8151-4cf9-89fa-4f92c2ef0e7e)]
- interface nsIQuotaManagerService : nsISupports
- {
- /**
-+ * Schedules an asynchronous callback that will inspect all origins and
-+ * return the total amount of disk space being used by storages for each
-+ * origin separately.
-+ *
-+ * @param aCallback
-+ * The callback that will be called when the usage is available.
-+ * @param aGetAll
-+ * An optional boolean to indicate inspection of all origins,
-+ * including internal ones.
-+ */
-+ [must_use] nsIQuotaUsageRequest
-+ getUsage(in nsIQuotaUsageCallback aCallback,
-+ [optional] in boolean aGetAll);
-+
-+ /**
- * Schedules an asynchronous callback that will return the total amount of
- * disk space being used by storages for the given origin.
- *
- * @param aPrincipal
- * A principal for the origin whose usage is being queried.
- * @param aCallback
- * The callback that will be called when the usage is available.
- * @param aGetGroupUsage
-diff --git a/dom/quota/nsIQuotaRequests.idl b/dom/quota/nsIQuotaRequests.idl
---- a/dom/quota/nsIQuotaRequests.idl
-+++ b/dom/quota/nsIQuotaRequests.idl
-@@ -18,16 +18,17 @@ interface nsIQuotaRequestBase : nsISuppo
-
- [must_use] readonly attribute nsresult resultCode;
- };
-
- [scriptable, uuid(166e28e6-cf6d-4927-a6d7-b51bca9d3469)]
- interface nsIQuotaUsageRequest : nsIQuotaRequestBase
- {
- // The result can contain one of these types:
-+ // array of nsIQuotaUsageResult
- // nsIQuotaOriginUsageResult
- [must_use] readonly attribute nsIVariant result;
-
- attribute nsIQuotaUsageCallback callback;
-
- [must_use] void
- cancel();
- };
-diff --git a/dom/quota/nsIQuotaResults.idl b/dom/quota/nsIQuotaResults.idl
---- a/dom/quota/nsIQuotaResults.idl
-+++ b/dom/quota/nsIQuotaResults.idl
-@@ -1,16 +1,26 @@
- /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
- /* vim: set ts=2 et sw=2 tw=80: */
- /* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
- #include "nsISupports.idl"
-
-+[scriptable, function, uuid(d8c9328b-9aa8-4f5d-90e6-482de4a6d5b8)]
-+interface nsIQuotaUsageResult : nsISupports
-+{
-+ readonly attribute ACString origin;
-+
-+ readonly attribute boolean persisted;
-+
-+ readonly attribute unsigned long long usage;
-+};
-+
- [scriptable, function, uuid(96df03d2-116a-493f-bb0b-118c212a6b32)]
- interface nsIQuotaOriginUsageResult : nsISupports
- {
- readonly attribute unsigned long long usage;
-
- readonly attribute unsigned long long fileUsage;
-
- readonly attribute unsigned long long limit;
-
diff --git a/gnu/packages/patches/icecat-bug-1414945.patch b/gnu/packages/patches/icecat-bug-1414945.patch
deleted file mode 100644
index 3638ace1c4..0000000000
--- a/gnu/packages/patches/icecat-bug-1414945.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-Based on:
- https://hg.mozilla.org/releases/mozilla-esr52/raw-rev/d303b3bb88c3
-
-Adapted to apply cleanly to IceCat.
-
-# HG changeset patch
-# User Philip Chimento <philip.chimento@gmail.com>
-# Date 1510012155 28800
-# Node ID d303b3bb88c3345d3a089901e2b6fe883d148e44
-# Parent 0152d097672f7e99504815cf7b06d9f303419fba
-Bug 1414945 - Don't use TimeDuration in static initializer. r=jandem, a=ritu
-
-On Darwin this would cause a race between two static initializers.
-
-diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp
---- a/js/src/shell/js.cpp
-+++ b/js/src/shell/js.cpp
-@@ -138,17 +138,17 @@ static const size_t gMaxStackSize = 2 *
- #else
- static const size_t gMaxStackSize = 128 * sizeof(size_t) * 1024;
- #endif
-
- /*
- * Limit the timeout to 30 minutes to prevent an overflow on platfoms
- * that represent the time internally in microseconds using 32-bit int.
- */
--static const TimeDuration MAX_TIMEOUT_INTERVAL = TimeDuration::FromSeconds(1800.0);
-+static const double MAX_TIMEOUT_SECONDS = 1800.0;
-
- // SharedArrayBuffer and Atomics settings track IceCat. Choose a custom setting
- // with --shared-memory={on,off}.
- #ifndef RELEASE_OR_BETA
- # define SHARED_MEMORY_DEFAULT 1
- #else
- # define SHARED_MEMORY_DEFAULT 0
- #endif
-@@ -3518,16 +3518,17 @@ Sleep_fn(JSContext* cx, unsigned argc, V
- if (!ToNumber(cx, args[0], &t_secs))
- return false;
- if (mozilla::IsNaN(t_secs)) {
- JS_ReportErrorASCII(cx, "sleep interval is not a number");
- return false;
- }
-
- duration = TimeDuration::FromSeconds(Max(0.0, t_secs));
-+ const TimeDuration MAX_TIMEOUT_INTERVAL = TimeDuration::FromSeconds(MAX_TIMEOUT_SECONDS);
- if (duration > MAX_TIMEOUT_INTERVAL) {
- JS_ReportErrorASCII(cx, "Excessive sleep interval");
- return false;
- }
- }
- {
- LockGuard<Mutex> guard(sc->watchdogLock);
- TimeStamp toWakeup = TimeStamp::Now() + duration;
-@@ -3675,16 +3676,17 @@ CancelExecution(JSContext* cx)
-
- static bool
- SetTimeoutValue(JSContext* cx, double t)
- {
- if (mozilla::IsNaN(t)) {
- JS_ReportErrorASCII(cx, "timeout is not a number");
- return false;
- }
-+ const TimeDuration MAX_TIMEOUT_INTERVAL = TimeDuration::FromSeconds(MAX_TIMEOUT_SECONDS);
- if (TimeDuration::FromSeconds(t) > MAX_TIMEOUT_INTERVAL) {
- JS_ReportErrorASCII(cx, "Excessive timeout value");
- return false;
- }
- GetShellContext(cx)->timeoutInterval = t;
- if (!ScheduleWatchdog(cx, t)) {
- JS_ReportErrorASCII(cx, "Failed to create the watchdog");
- return false;
-
diff --git a/gnu/packages/patches/icecat-bug-1415133.patch b/gnu/packages/patches/icecat-bug-1415133.patch
deleted file mode 100644
index 4e322d21fb..0000000000
--- a/gnu/packages/patches/icecat-bug-1415133.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-Based on:
- https://hg.mozilla.org/releases/mozilla-esr52/raw-rev/22fe3ff3f923
-
-Adapted to apply cleanly to IceCat.
-
-# HG changeset patch
-# User Marco Bonardo <mbonardo@mozilla.com>
-# Date 1510052455 -3600
-# Node ID 22fe3ff3f92358596521f7155ddc512006022207
-# Parent 2909ba991f3134f9fbf4859cf08582f1c9845594
-Bug 1415133 - Downgrades from 55+ to ESR lose bookmarks. r=past a=lizzard
-
-MozReview-Commit-ID: 44Rw7m1FP4h
-
-diff --git a/toolkit/components/places/Database.cpp b/toolkit/components/places/Database.cpp
---- a/toolkit/components/places/Database.cpp
-+++ b/toolkit/components/places/Database.cpp
-@@ -761,16 +761,21 @@ Database::InitSchema(bool* aDatabaseMigr
- // 2. implement a method that performs upgrade to your version from the
- // previous one.
- //
- // NOTE: The downgrade process is pretty much complicated by the fact old
- // versions cannot know what a new version is going to implement.
- // The only thing we will do for downgrades is setting back the schema
- // version, so that next upgrades will run again the migration step.
-
-+ if (currentSchemaVersion > 36) {
-+ // These versions are not downgradable.
-+ return NS_ERROR_FILE_CORRUPTED;
-+ }
-+
- if (currentSchemaVersion < DATABASE_SCHEMA_VERSION) {
- *aDatabaseMigrated = true;
-
- if (currentSchemaVersion < 11) {
- // These are versions older than IceCat 4 that are not supported
- // anymore. In this case it's safer to just replace the database.
- return NS_ERROR_FILE_CORRUPTED;
- }
-
diff --git a/gnu/packages/patches/icecat-bug-1424373-pt2.patch b/gnu/packages/patches/icecat-bug-1424373-pt2.patch
deleted file mode 100644
index cdc7226bd3..0000000000
--- a/gnu/packages/patches/icecat-bug-1424373-pt2.patch
+++ /dev/null
@@ -1,183 +0,0 @@
-Based on:
- https://hg.mozilla.org/releases/mozilla-esr52/raw-rev/19ea736e7e3d
-
-Adapted to apply cleanly to IceCat.
-
-# HG changeset patch
-# User Mike Conley <mconley@mozilla.com>
-# Date 1513892173 18000
-# Node ID 19ea736e7e3d20555ee6633b9d7803c1225979e1
-# Parent 320032aaa06899f5585dcd0288059e5342118714
-Bug 1424373 - Rename crash report submission pref. r=Mossop a=jcristau
-
-diff --git a/browser/app/profile/icecat.js b/browser/app/profile/icecat.js
---- a/browser/app/profile/icecat.js
-+++ b/browser/app/profile/icecat.js
-@@ -1557,15 +1557,15 @@ pref("browser.crashReports.unsubmittedCh
- pref("browser.crashReports.unsubmittedCheck.enabled", false);
- #endif
-
- // chancesUntilSuppress is how many times we'll show the unsubmitted
- // crash report notification across different days and shutdown
- // without a user choice before we suppress the notification for
- // some number of days.
- pref("browser.crashReports.unsubmittedCheck.chancesUntilSuppress", 4);
--pref("browser.crashReports.unsubmittedCheck.autoSubmit", false);
-+pref("browser.crashReports.unsubmittedCheck.autoSubmit2", false);
-
- #ifdef NIGHTLY_BUILD
- // Enable the (fairly costly) client/server validation on nightly only. The other prefs
- // controlling validation are located in /services/sync/services-sync.js
- pref("services.sync.validation.enabled", true);
- #endif
-diff --git a/browser/base/content/test/tabcrashed/browser_autoSubmitRequest.js b/browser/base/content/test/tabcrashed/browser_autoSubmitRequest.js
---- a/browser/base/content/test/tabcrashed/browser_autoSubmitRequest.js
-+++ b/browser/base/content/test/tabcrashed/browser_autoSubmitRequest.js
-@@ -1,12 +1,12 @@
- "use strict";
-
- const PAGE = "data:text/html,<html><body>A%20regular,%20everyday,%20normal%20page.";
--const AUTOSUBMIT_PREF = "browser.crashReports.unsubmittedCheck.autoSubmit";
-+const AUTOSUBMIT_PREF = "browser.crashReports.unsubmittedCheck.autoSubmit2";
-
- const {TabStateFlusher} =
- Cu.import("resource:///modules/sessionstore/TabStateFlusher.jsm", {});
-
- // On debug builds, crashing tabs results in much thinking, which
- // slows down the test and results in intermittent test timeouts,
- // so we'll pump up the expected timeout for this test.
- requestLongerTimeout(2);
-diff --git a/browser/components/preferences/in-content/advanced.xul b/browser/components/preferences/in-content/advanced.xul
---- a/browser/components/preferences/in-content/advanced.xul
-+++ b/browser/components/preferences/in-content/advanced.xul
-@@ -51,18 +51,18 @@
- #ifdef MOZ_TELEMETRY_REPORTING
- <preference id="toolkit.telemetry.enabled"
- name="toolkit.telemetry.enabled"
- type="bool"/>
- #endif
-
- <!-- Data Choices tab -->
- #ifdef MOZ_CRASHREPORTER
-- <preference id="browser.crashReports.unsubmittedCheck.autoSubmit"
-- name="browser.crashReports.unsubmittedCheck.autoSubmit"
-+ <preference id="browser.crashReports.unsubmittedCheck.autoSubmit2"
-+ name="browser.crashReports.unsubmittedCheck.autoSubmit2"
- type="bool"/>
- #endif
-
- <!-- Network tab -->
- <preference id="browser.cache.disk.capacity"
- name="browser.cache.disk.capacity"
- type="int"/>
- <preference id="browser.offline-apps.notify"
-@@ -232,17 +232,17 @@
- </hbox>
- </vbox>
- </groupbox>
- #endif
- #ifdef MOZ_CRASHREPORTER
- <groupbox>
- <caption>
- <checkbox id="automaticallySubmitCrashesBox"
-- preference="browser.crashReports.unsubmittedCheck.autoSubmit"
-+ preference="browser.crashReports.unsubmittedCheck.autoSubmit2"
- label="&alwaysSubmitCrashReports.label;"
- accesskey="&alwaysSubmitCrashReports.accesskey;"/>
- </caption>
- <hbox class="indent">
- <label flex="1">&crashReporterDesc2.label;</label>
- <spacer flex="10"/>
- <label id="crashReporterLearnMore"
- class="text-link">&crashReporterLearnMore.label;</label>
-diff --git a/browser/components/sessionstore/test/browser_background_tab_crash.js b/browser/components/sessionstore/test/browser_background_tab_crash.js
---- a/browser/components/sessionstore/test/browser_background_tab_crash.js
-+++ b/browser/components/sessionstore/test/browser_background_tab_crash.js
-@@ -142,17 +142,17 @@ add_task(function* test_background_crash
- /**
- * Tests that if a content process crashes taking down only
- * background tabs, and the user is configured to send backlogged
- * crash reports automatically, that the tab crashed page is not
- * shown.
- */
- add_task(function* test_background_crash_autosubmit_backlogged() {
- yield SpecialPowers.pushPrefEnv({
-- set: [["browser.crashReports.unsubmittedCheck.autoSubmit", true]],
-+ set: [["browser.crashReports.unsubmittedCheck.autoSubmit2", true]],
- });
-
- yield setupBackgroundTabs(function*([tab1, tab2]) {
- // Let's crash one of those background tabs now...
- yield crashBackgroundTabs([tab1, tab2]);
-
- // Selecting the first tab should restore it.
- let tabRestored = promiseTabRestored(tab1);
-diff --git a/browser/modules/ContentCrashHandlers.jsm b/browser/modules/ContentCrashHandlers.jsm
---- a/browser/modules/ContentCrashHandlers.jsm
-+++ b/browser/modules/ContentCrashHandlers.jsm
-@@ -865,21 +865,21 @@ this.UnsubmittedCrashHandler = {
- return nb.appendNotification(message, notificationID,
- "chrome://browser/skin/tab-crashed.svg",
- nb.PRIORITY_INFO_HIGH, buttons,
- eventCallback);
- },
-
- get autoSubmit() {
- return Services.prefs
-- .getBoolPref("browser.crashReports.unsubmittedCheck.autoSubmit");
-+ .getBoolPref("browser.crashReports.unsubmittedCheck.autoSubmit2");
- },
-
- set autoSubmit(val) {
-- Services.prefs.setBoolPref("browser.crashReports.unsubmittedCheck.autoSubmit",
-+ Services.prefs.setBoolPref("browser.crashReports.unsubmittedCheck.autoSubmit2",
- val);
- },
-
- /**
- * Attempt to submit reports to the crash report server. Each
- * report will have the "SubmittedFromInfobar" extra key set
- * to true.
- *
-diff --git a/browser/modules/test/browser_UnsubmittedCrashHandler.js b/browser/modules/test/browser_UnsubmittedCrashHandler.js
---- a/browser/modules/test/browser_UnsubmittedCrashHandler.js
-+++ b/browser/modules/test/browser_UnsubmittedCrashHandler.js
-@@ -344,17 +344,17 @@ add_task(function* test_can_submit_sever
- clearPendingCrashReports();
- });
-
- /**
- * Tests that choosing "Send Always" flips the autoSubmit pref
- * and sends the pending crash reports.
- */
- add_task(function* test_can_submit_always() {
-- let pref = "browser.crashReports.unsubmittedCheck.autoSubmit";
-+ let pref = "browser.crashReports.unsubmittedCheck.autoSubmit2";
- Assert.equal(Services.prefs.getBoolPref(pref), false,
- "We should not be auto-submitting by default");
-
- let reportIDs = yield createPendingCrashReports(1);
- let notification =
- yield UnsubmittedCrashHandler.checkForUnsubmittedCrashReports();
- Assert.ok(notification, "There should be a notification");
-
-@@ -388,17 +388,17 @@ add_task(function* test_can_submit_alway
-
- /**
- * Tests that if the user has chosen to automatically send
- * crash reports that no notification is displayed to the
- * user.
- */
- add_task(function* test_can_auto_submit() {
- yield SpecialPowers.pushPrefEnv({ set: [
-- ["browser.crashReports.unsubmittedCheck.autoSubmit", true],
-+ ["browser.crashReports.unsubmittedCheck.autoSubmit2", true],
- ]});
-
- let reportIDs = yield createPendingCrashReports(3);
- let promiseReports = waitForSubmittedReports(reportIDs);
- let notification =
- yield UnsubmittedCrashHandler.checkForUnsubmittedCrashReports();
- Assert.equal(notification, null, "There should be no notification");
- info("Waiting on reports to be received.");
-
diff --git a/gnu/packages/patches/java-jeromq-fix-tests.patch b/gnu/packages/patches/java-jeromq-fix-tests.patch
new file mode 100644
index 0000000000..5466b92707
--- /dev/null
+++ b/gnu/packages/patches/java-jeromq-fix-tests.patch
@@ -0,0 +1,253 @@
+From 5803aadd3f209eba1ffbb2cf7bf16778019dbee1 Mon Sep 17 00:00:00 2001
+From: fredoboulo <fredoboulo@users.noreply.github.com>
+Date: Fri, 23 Feb 2018 23:55:57 +0100
+Subject: [PATCH] Fix #524 : V1 and V2 protocol downgrades handle received data
+ in handshake buffer
+
+This patch is upstream pull request, see:
+https://gihub.com/zeromq/jeromq/pull/527.
+
+It is merged on commit c2afa9c, and we can drop it on the
+0.4.4 release.
+
+---
+ src/main/java/zmq/io/StreamEngine.java | 21 ++++++++++--
+ src/test/java/zmq/io/AbstractProtocolVersion.java | 41 +++++++++++++----------
+ src/test/java/zmq/io/V0ProtocolTest.java | 12 +++++++
+ src/test/java/zmq/io/V1ProtocolTest.java | 16 +++++++--
+ src/test/java/zmq/io/V2ProtocolTest.java | 16 +++++++--
+ 5 files changed, 81 insertions(+), 25 deletions(-)
+
+diff --git a/src/main/java/zmq/io/StreamEngine.java b/src/main/java/zmq/io/StreamEngine.java
+index b8933c92..fe2f2d8d 100644
+--- a/src/main/java/zmq/io/StreamEngine.java
++++ b/src/main/java/zmq/io/StreamEngine.java
+@@ -816,9 +816,7 @@ private boolean handshake()
+ assert (bufferSize == headerSize);
+
+ // Make sure the decoder sees the data we have already received.
+- greetingRecv.flip();
+- inpos = greetingRecv;
+- insize = greetingRecv.limit();
++ decodeDataAfterHandshake(0);
+
+ // To allow for interoperability with peers that do not forward
+ // their subscriptions, we inject a phantom subscription message
+@@ -846,6 +844,8 @@ else if (greetingRecv.get(revisionPos) == Protocol.V1.revision) {
+ }
+ encoder = new V1Encoder(errno, Config.OUT_BATCH_SIZE.getValue());
+ decoder = new V1Decoder(errno, Config.IN_BATCH_SIZE.getValue(), options.maxMsgSize, options.allocator);
++
++ decodeDataAfterHandshake(V2_GREETING_SIZE);
+ }
+ else if (greetingRecv.get(revisionPos) == Protocol.V2.revision) {
+ // ZMTP/2.0 framing.
+@@ -859,6 +859,8 @@ else if (greetingRecv.get(revisionPos) == Protocol.V2.revision) {
+ }
+ encoder = new V2Encoder(errno, Config.OUT_BATCH_SIZE.getValue());
+ decoder = new V2Decoder(errno, Config.IN_BATCH_SIZE.getValue(), options.maxMsgSize, options.allocator);
++
++ decodeDataAfterHandshake(V2_GREETING_SIZE);
+ }
+ else {
+ zmtpVersion = Protocol.V3;
+@@ -904,6 +906,19 @@ else if (greetingRecv.get(revisionPos) == Protocol.V2.revision) {
+ return true;
+ }
+
++ private void decodeDataAfterHandshake(int greetingSize)
++ {
++ final int pos = greetingRecv.position();
++ if (pos > greetingSize) {
++ // data is present after handshake
++ greetingRecv.position(greetingSize).limit(pos);
++
++ // Make sure the decoder sees this extra data.
++ inpos = greetingRecv;
++ insize = greetingRecv.remaining();
++ }
++ }
++
+ private Msg identityMsg()
+ {
+ Msg msg = new Msg(options.identitySize);
+diff --git a/src/test/java/zmq/io/AbstractProtocolVersion.java b/src/test/java/zmq/io/AbstractProtocolVersion.java
+index e60db403..aa06b4a7 100644
+--- a/src/test/java/zmq/io/AbstractProtocolVersion.java
++++ b/src/test/java/zmq/io/AbstractProtocolVersion.java
+@@ -18,15 +18,18 @@
+ import zmq.SocketBase;
+ import zmq.ZError;
+ import zmq.ZMQ;
++import zmq.ZMQ.Event;
+ import zmq.util.Utils;
+
+ public abstract class AbstractProtocolVersion
+ {
++ protected static final int REPETITIONS = 1000;
++
+ static class SocketMonitor extends Thread
+ {
+- private final Ctx ctx;
+- private final String monitorAddr;
+- private final List<ZMQ.Event> events = new ArrayList<>();
++ private final Ctx ctx;
++ private final String monitorAddr;
++ private final ZMQ.Event[] events = new ZMQ.Event[1];
+
+ public SocketMonitor(Ctx ctx, String monitorAddr)
+ {
+@@ -41,15 +44,15 @@ public void run()
+ boolean rc = s.connect(monitorAddr);
+ assertThat(rc, is(true));
+ // Only some of the exceptional events could fire
+- while (true) {
+- ZMQ.Event event = ZMQ.Event.read(s);
+- if (event == null && s.errno() == ZError.ETERM) {
+- break;
+- }
+- assertThat(event, notNullValue());
+-
+- events.add(event);
++
++ ZMQ.Event event = ZMQ.Event.read(s);
++ if (event == null && s.errno() == ZError.ETERM) {
++ s.close();
++ return;
+ }
++ assertThat(event, notNullValue());
++
++ events[0] = event;
+ s.close();
+ }
+ }
+@@ -69,11 +72,12 @@ public void run()
+ boolean rc = ZMQ.setSocketOption(receiver, ZMQ.ZMQ_LINGER, 0);
+ assertThat(rc, is(true));
+
+- SocketMonitor monitor = new SocketMonitor(ctx, "inproc://monitor");
+- monitor.start();
+ rc = ZMQ.monitorSocket(receiver, "inproc://monitor", ZMQ.ZMQ_EVENT_HANDSHAKE_PROTOCOL);
+ assertThat(rc, is(true));
+
++ SocketMonitor monitor = new SocketMonitor(ctx, "inproc://monitor");
++ monitor.start();
++
+ rc = ZMQ.bind(receiver, host);
+ assertThat(rc, is(true));
+
+@@ -81,17 +85,18 @@ public void run()
+ OutputStream out = sender.getOutputStream();
+ for (ByteBuffer raw : raws) {
+ out.write(raw.array());
+- ZMQ.msleep(100);
+ }
+
+ Msg msg = ZMQ.recv(receiver, 0);
+ assertThat(msg, notNullValue());
+ assertThat(new String(msg.data(), ZMQ.CHARSET), is(payload));
+
+- ZMQ.msleep(500);
+- assertThat(monitor.events.size(), is(1));
+- assertThat(monitor.events.get(0).event, is(ZMQ.ZMQ_EVENT_HANDSHAKE_PROTOCOL));
+- assertThat((Integer) monitor.events.get(0).arg, is(version));
++ monitor.join();
++
++ final Event event = monitor.events[0];
++ assertThat(event, notNullValue());
++ assertThat(event.event, is(ZMQ.ZMQ_EVENT_HANDSHAKE_PROTOCOL));
++ assertThat((Integer) event.arg, is(version));
+
+ InputStream in = sender.getInputStream();
+ byte[] data = new byte[255];
+diff --git a/src/test/java/zmq/io/V0ProtocolTest.java b/src/test/java/zmq/io/V0ProtocolTest.java
+index bd547d23..1a5b7aef 100644
+--- a/src/test/java/zmq/io/V0ProtocolTest.java
++++ b/src/test/java/zmq/io/V0ProtocolTest.java
+@@ -10,6 +10,18 @@
+
+ public class V0ProtocolTest extends AbstractProtocolVersion
+ {
++ @Test
++ public void testFixIssue524() throws IOException, InterruptedException
++ {
++ for (int idx = 0; idx < REPETITIONS; ++idx) {
++ if (idx % 100 == 0) {
++ System.out.print(idx + " ");
++ }
++ testProtocolVersion0short();
++ }
++ System.out.println();
++ }
++
+ @Test(timeout = 2000)
+ public void testProtocolVersion0short() throws IOException, InterruptedException
+ {
+diff --git a/src/test/java/zmq/io/V1ProtocolTest.java b/src/test/java/zmq/io/V1ProtocolTest.java
+index e1045f34..764159d0 100644
+--- a/src/test/java/zmq/io/V1ProtocolTest.java
++++ b/src/test/java/zmq/io/V1ProtocolTest.java
+@@ -10,7 +10,19 @@
+
+ public class V1ProtocolTest extends AbstractProtocolVersion
+ {
+- @Test(timeout = 2000)
++ @Test
++ public void testFixIssue524() throws IOException, InterruptedException
++ {
++ for (int idx = 0; idx < REPETITIONS; ++idx) {
++ if (idx % 100 == 0) {
++ System.out.print(idx + " ");
++ }
++ testProtocolVersion1short();
++ }
++ System.out.println();
++ }
++
++ @Test
+ public void testProtocolVersion1short() throws IOException, InterruptedException
+ {
+ List<ByteBuffer> raws = raws(0);
+@@ -25,7 +37,7 @@ public void testProtocolVersion1short() throws IOException, InterruptedException
+ assertProtocolVersion(1, raws, "abcdefg");
+ }
+
+- @Test(timeout = 2000)
++ @Test
+ public void testProtocolVersion1long() throws IOException, InterruptedException
+ {
+ List<ByteBuffer> raws = raws(0);
+diff --git a/src/test/java/zmq/io/V2ProtocolTest.java b/src/test/java/zmq/io/V2ProtocolTest.java
+index d5e64bce..7fda31bc 100644
+--- a/src/test/java/zmq/io/V2ProtocolTest.java
++++ b/src/test/java/zmq/io/V2ProtocolTest.java
+@@ -21,7 +21,19 @@ protected ByteBuffer identity()
+ .put((byte) 0);
+ }
+
+- @Test(timeout = 2000)
++ @Test
++ public void testFixIssue524() throws IOException, InterruptedException
++ {
++ for (int idx = 0; idx < REPETITIONS; ++idx) {
++ if (idx % 100 == 0) {
++ System.out.print(idx + " ");
++ }
++ testProtocolVersion2short();
++ }
++ System.out.println();
++ }
++
++ @Test
+ public void testProtocolVersion2short() throws IOException, InterruptedException
+ {
+ List<ByteBuffer> raws = raws(1);
+@@ -38,7 +50,7 @@ public void testProtocolVersion2short() throws IOException, InterruptedException
+ assertProtocolVersion(2, raws, "abcdefg");
+ }
+
+- @Test(timeout = 2000)
++ @Test
+ public void testProtocolVersion2long() throws IOException, InterruptedException
+ {
+ List<ByteBuffer> raws = raws(1);
diff --git a/gnu/packages/patches/lrzip-CVE-2017-8842.patch b/gnu/packages/patches/lrzip-CVE-2017-8842.patch
new file mode 100644
index 0000000000..89b4f2f5d9
--- /dev/null
+++ b/gnu/packages/patches/lrzip-CVE-2017-8842.patch
@@ -0,0 +1,23 @@
+From 38386bd482c0a8102a79958cb3eddcb97a167ca3 Mon Sep 17 00:00:00 2001
+From: Con Kolivas <kernel@kolivas.org>
+Date: Fri, 9 Mar 2018 17:39:40 +1100
+Subject: [PATCH] CVE-2017-8842 Fix divide-by-zero in bufRead::get
+
+---
+ libzpaq/libzpaq.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/libzpaq/libzpaq.h b/libzpaq/libzpaq.h
+index 93387da..cbe211d 100644
+--- a/libzpaq/libzpaq.h
++++ b/libzpaq/libzpaq.h
+@@ -465,7 +465,8 @@ struct bufRead: public libzpaq::Reader {
+
+ int get() {
+ if (progress && !(*s_len % 128)) {
+- int pct = (total_len - *s_len) * 100 / total_len;
++ int pct = (total_len > 0) ?
++ (total_len - *s_len) * 100 / total_len : 100;
+
+ if (pct / 10 != *last_pct / 10) {
+ int i;
diff --git a/gnu/packages/patches/perl-gd-options-passthrough-and-fontconfig.patch b/gnu/packages/patches/perl-gd-options-passthrough-and-fontconfig.patch
deleted file mode 100644
index b2ff43c0d3..0000000000
--- a/gnu/packages/patches/perl-gd-options-passthrough-and-fontconfig.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-This patch (adapted from nixpkgs for Build.PL) configures Getopt::Long to pass
-options so they will be available at the second GetOptions call.
-
-Also an option to specify the search path for libfontconfig is added.
-
---- GD-2.56/Build.PL 2014-10-27 20:34:54.000000000 -0500
-+++ GD-2.56/Build.PL 2015-02-22 10:08:12.569973657 -0600
-@@ -2,14 +2,14 @@
-
- use strict;
- use Module::Build;
--use Getopt::Long;
-+use Getopt::Long qw(:config pass_through);
- use Config;
-
- # =====> PATHS: CHECK AND ADJUST <=====
- my (@INC,@LIBPATH,@LIBS);
- my $AUTOCONFIG = 0; # global set by try_to_autoconfigure() below
-
--my ($options,$lib_gd_path,$lib_ft_path,$lib_png_path,$lib_jpeg_path,$lib_xpm_path,$lib_zlib_path);
-+my ($options,$lib_gd_path,$lib_ft_path,$lib_png_path,$lib_jpeg_path,$lib_xpm_path,$lib_zlib_path,$lib_fontconfig_path);
-
- unless (try_to_autoconfigure(\$options,\$lib_gd_path,\@INC,\@LIBPATH,\@LIBS)) {
- die <<END;
-@@ -38,6 +38,7 @@
- "lib_jpeg_path=s" => \$lib_jpeg_path,
- "lib_xpm_path=s" => \$lib_xpm_path,
- "lib_zlib_path=s" => \$lib_zlib_path,
-+ "lib_fontconfig_path=s" => \$lib_fontconfig_path,
- );
- unless ($result) {
- die <<END;
-@@ -53,6 +54,7 @@
- -lib_jpeg_path path path to libjpeg
- -lib_xpm_path path path to libxpm
- -lib_zlib_path path path to libpng
-+ -lib_fontconfig_path path path to fontconfig
- -ignore_missing_gd Ignore missing or old libgd installations and try to compile anyway
-
- If no options are passed on the command line. The program will
-@@ -100,6 +102,12 @@
- @INC = ("-I$lib_zlib_path/include", @INC);
- @LIBPATH = ("-L$lib_zlib_path/lib", @LIBPATH);
- }
-+if( defined($lib_fontconfig_path) )
-+{
-+ print "Fontconfig library used from: $lib_fontconfig_path\n";
-+ @INC = ("-I$lib_fontconfig_path/include", @INC);
-+ @LIBPATH = ("-L$lib_fontconfig_path/lib", @LIBPATH);
-+}
- #############################################################################################
-
- if ($^O eq 'VMS'){
diff --git a/gnu/packages/patches/python-mox3-python3.6-compat.patch b/gnu/packages/patches/python-mox3-python3.6-compat.patch
new file mode 100644
index 0000000000..0426d07cf9
--- /dev/null
+++ b/gnu/packages/patches/python-mox3-python3.6-compat.patch
@@ -0,0 +1,43 @@
+Fix regex so that it works with Python 3.6.
+
+See <https://docs.python.org/3/library/re.html#re.LOCALE>.
+
+Copied from upstream bug report:
+https://bugs.launchpad.net/python-mox3/+bug/1665266
+
+From 05064cdb6ea7a16450c6beae2b6f7c6074212a69 Mon Sep 17 00:00:00 2001
+From: Zac Medico <zmedico@gentoo.org>
+Date: Thu, 16 Feb 2017 00:24:10 -0800
+Subject: [PATCH] RegexTest: python3.6 compatibility
+
+These fixes are backward-compatible with older python versions:
+
+* raw strings fix invalid escape sequences
+* flags=8 fixes ValueError: cannot use LOCALE flag with a str pattern
+---
+ mox3/tests/test_mox.py | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/mox3/tests/test_mox.py b/mox3/tests/test_mox.py
+index 15ac565..3a1af17 100644
+--- a/mox3/tests/test_mox.py
++++ b/mox3/tests/test_mox.py
+@@ -312,12 +312,12 @@ class RegexTest(testtools.TestCase):
+ def testReprWithoutFlags(self):
+ """repr should return the regular expression pattern."""
+ self.assertTrue(
+- repr(mox.Regex(r"a\s+b")) == "<regular expression 'a\s+b'>")
++ repr(mox.Regex(r"a\s+b")) == r"<regular expression 'a\s+b'>")
+
+ def testReprWithFlags(self):
+ """repr should return the regular expression pattern and flags."""
+- self.assertTrue(repr(mox.Regex(r"a\s+b", flags=4)) ==
+- "<regular expression 'a\s+b', flags=4>")
++ self.assertTrue(repr(mox.Regex(r"a\s+b", flags=8)) ==
++ r"<regular expression 'a\s+b', flags=8>")
+
+
+ class IsTest(testtools.TestCase):
+--
+2.10.2
+
diff --git a/gnu/packages/patches/python-parse-too-many-fields.patch b/gnu/packages/patches/python-parse-too-many-fields.patch
deleted file mode 100644
index 9db6b91a7f..0000000000
--- a/gnu/packages/patches/python-parse-too-many-fields.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 32f15cfefb7c7b6476360ac65cba807aa3dfccfa Mon Sep 17 00:00:00 2001
-From: David King <dking@redhat.com>
-Date: Mon, 14 Dec 2015 09:58:19 +0000
-Subject: [PATCH] Fix test_too_many_fields with Python 3.5
-
-taken from https://github.com/r1chardj0n3s/parse/pull/34
-
-Python versions before 3.5 had a limit of 100 groups in regular
-expressions. This limit was removed during 3.5 development:
-
-http://bugs.python.org/issue22437
-https://hg.python.org/cpython/rev/0b85ea4bd1af
-
-The test_too_many_fields test asserts that the limit exists by
-attempting to parse a string with 15 fields, which triggers the 100
-named groups limit.
-
-Adjust the test so that if first checks to see whether the limit of 100
-named groups exists, and only assert that parsing 15 fields fails if
-that is the case.
----
- test_parse.py | 10 ++++++++--
- 1 file changed, 8 insertions(+), 2 deletions(-)
-
-diff --git a/test_parse.py b/test_parse.py
-index c524349..1d50568 100755
---- a/test_parse.py
-+++ b/test_parse.py
-@@ -6,6 +6,7 @@
-
- import unittest
- from datetime import datetime, time
-+import re
-
- import parse
-
-@@ -624,8 +625,13 @@ def test_mixed_type_variant(self):
- self.assertEqual(r.fixed[21], 'spam')
-
- def test_too_many_fields(self):
-- p = parse.compile('{:ti}' * 15)
-- self.assertRaises(parse.TooManyFields, p.parse, '')
-+ # Python 3.5 removed the limit of 100 named groups in a regular expression,
-+ # so only test for the exception if the limit exists.
-+ try:
-+ re.compile("".join("(?P<n{n}>{n}-)".format(n=i) for i in range(101)))
-+ except AssertionError:
-+ p = parse.compile('{:ti}' * 15)
-+ self.assertRaises(parse.TooManyFields, p.parse, '')
-
-
- class TestSearch(unittest.TestCase):
diff --git a/gnu/packages/patches/thefuck-test-environ.patch b/gnu/packages/patches/thefuck-test-environ.patch
index 0c602a38c4..54194d8928 100644
--- a/gnu/packages/patches/thefuck-test-environ.patch
+++ b/gnu/packages/patches/thefuck-test-environ.patch
@@ -1,14 +1,14 @@
Retain environment setting of "HOME" for tests that need os.path.expanduser()
to return a readable directory in the build chroot.
---- thefuck-3.15/tests/test_conf.py
-+++ thefuck-3.15/tests/test_conf.py
-@@ -12,7 +12,7 @@
-
- @pytest.fixture
- def environ(monkeypatch):
-- data = {}
-+ data = {"HOME": os.environ.get("HOME")}
- monkeypatch.setattr('thefuck.conf.os.environ', data)
- return data
+--- thefuck-3.25/tests/conftest.py
++++ thefuck-3.25/tests/conftest.py
+@@ -64,6 +64,7 @@ def set_shell(monkeypatch):
+ @pytest.fixture(autouse=True)
+ def os_environ(monkeypatch):
+- env = {'PATH': os.environ['PATH']}
++ env = {'PATH': os.environ['PATH'],
++ 'HOME': os.environ['HOME']}
+ monkeypatch.setattr('os.environ', env)
+ return env
diff --git a/gnu/packages/patches/wavpack-CVE-2018-6767.patch b/gnu/packages/patches/wavpack-CVE-2018-6767.patch
new file mode 100644
index 0000000000..f49a8f0fd6
--- /dev/null
+++ b/gnu/packages/patches/wavpack-CVE-2018-6767.patch
@@ -0,0 +1,119 @@
+Fix CVE-2018-6767:
+https://nvd.nist.gov/vuln/detail/CVE-2018-6767
+https://security-tracker.debian.org/tracker/CVE-2018-6767
+
+Patch copied from upstream source repository:
+https://github.com/dbry/WavPack/commit/d5bf76b5a88d044a1be1d5656698e3ba737167e5
+
+From d5bf76b5a88d044a1be1d5656698e3ba737167e5 Mon Sep 17 00:00:00 2001
+From: David Bryant <david@wavpack.com>
+Date: Sun, 4 Feb 2018 11:28:15 -0800
+Subject: [PATCH] issue #27, do not overwrite stack on corrupt RF64 file
+
+---
+ cli/riff.c | 39 ++++++++++++++++++++++++++++++++-------
+ 1 file changed, 32 insertions(+), 7 deletions(-)
+
+diff --git a/cli/riff.c b/cli/riff.c
+index 8b1af45..de98c1e 100644
+--- a/cli/riff.c
++++ b/cli/riff.c
+@@ -42,6 +42,7 @@ typedef struct {
+
+ #pragma pack(pop)
+
++#define CS64ChunkFormat "4D"
+ #define DS64ChunkFormat "DDDL"
+
+ #define WAVPACK_NO_ERROR 0
+@@ -101,13 +102,13 @@ int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, Wavpack
+
+ if (!strncmp (chunk_header.ckID, "ds64", 4)) {
+ if (chunk_header.ckSize < sizeof (DS64Chunk) ||
+- !DoReadFile (infile, &ds64_chunk, chunk_header.ckSize, &bcount) ||
+- bcount != chunk_header.ckSize) {
++ !DoReadFile (infile, &ds64_chunk, sizeof (DS64Chunk), &bcount) ||
++ bcount != sizeof (DS64Chunk)) {
+ error_line ("%s is not a valid .WAV file!", infilename);
+ return WAVPACK_SOFT_ERROR;
+ }
+ else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
+- !WavpackAddWrapper (wpc, &ds64_chunk, chunk_header.ckSize)) {
++ !WavpackAddWrapper (wpc, &ds64_chunk, sizeof (DS64Chunk))) {
+ error_line ("%s", WavpackGetErrorMessage (wpc));
+ return WAVPACK_SOFT_ERROR;
+ }
+@@ -315,10 +316,11 @@ int ParseRiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, Wavpack
+
+ int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
+ {
+- int do_rf64 = 0, write_junk = 1;
++ int do_rf64 = 0, write_junk = 1, table_length = 0;
+ ChunkHeader ds64hdr, datahdr, fmthdr;
+ RiffChunkHeader riffhdr;
+ DS64Chunk ds64_chunk;
++ CS64Chunk cs64_chunk;
+ JunkChunk junkchunk;
+ WaveHeader wavhdr;
+ uint32_t bcount;
+@@ -380,6 +382,7 @@ int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples,
+ strncpy (riffhdr.formType, "WAVE", sizeof (riffhdr.formType));
+ total_riff_bytes = sizeof (riffhdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 1) & ~(int64_t)1);
+ if (do_rf64) total_riff_bytes += sizeof (ds64hdr) + sizeof (ds64_chunk);
++ total_riff_bytes += table_length * sizeof (CS64Chunk);
+ if (write_junk) total_riff_bytes += sizeof (junkchunk);
+ strncpy (fmthdr.ckID, "fmt ", sizeof (fmthdr.ckID));
+ strncpy (datahdr.ckID, "data", sizeof (datahdr.ckID));
+@@ -394,11 +397,12 @@ int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples,
+
+ if (do_rf64) {
+ strncpy (ds64hdr.ckID, "ds64", sizeof (ds64hdr.ckID));
+- ds64hdr.ckSize = sizeof (ds64_chunk);
++ ds64hdr.ckSize = sizeof (ds64_chunk) + (table_length * sizeof (CS64Chunk));
+ CLEAR (ds64_chunk);
+ ds64_chunk.riffSize64 = total_riff_bytes;
+ ds64_chunk.dataSize64 = total_data_bytes;
+ ds64_chunk.sampleCount64 = total_samples;
++ ds64_chunk.tableLength = table_length;
+ riffhdr.ckSize = (uint32_t) -1;
+ datahdr.ckSize = (uint32_t) -1;
+ WavpackNativeToLittleEndian (&ds64hdr, ChunkHeaderFormat);
+@@ -409,6 +413,14 @@ int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples,
+ datahdr.ckSize = (uint32_t) total_data_bytes;
+ }
+
++ // this "table" is just a dummy placeholder for testing (normally not written)
++
++ if (table_length) {
++ strncpy (cs64_chunk.ckID, "dmmy", sizeof (cs64_chunk.ckID));
++ cs64_chunk.chunkSize64 = 12345678;
++ WavpackNativeToLittleEndian (&cs64_chunk, CS64ChunkFormat);
++ }
++
+ // write the RIFF chunks up to just before the data starts
+
+ WavpackNativeToLittleEndian (&riffhdr, ChunkHeaderFormat);
+@@ -418,8 +430,21 @@ int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples,
+
+ if (!DoWriteFile (outfile, &riffhdr, sizeof (riffhdr), &bcount) || bcount != sizeof (riffhdr) ||
+ (do_rf64 && (!DoWriteFile (outfile, &ds64hdr, sizeof (ds64hdr), &bcount) || bcount != sizeof (ds64hdr))) ||
+- (do_rf64 && (!DoWriteFile (outfile, &ds64_chunk, sizeof (ds64_chunk), &bcount) || bcount != sizeof (ds64_chunk))) ||
+- (write_junk && (!DoWriteFile (outfile, &junkchunk, sizeof (junkchunk), &bcount) || bcount != sizeof (junkchunk))) ||
++ (do_rf64 && (!DoWriteFile (outfile, &ds64_chunk, sizeof (ds64_chunk), &bcount) || bcount != sizeof (ds64_chunk)))) {
++ error_line ("can't write .WAV data, disk probably full!");
++ return FALSE;
++ }
++
++ // again, this is normally not written except for testing
++
++ while (table_length--)
++ if (!DoWriteFile (outfile, &cs64_chunk, sizeof (cs64_chunk), &bcount) || bcount != sizeof (cs64_chunk)) {
++ error_line ("can't write .WAV data, disk probably full!");
++ return FALSE;
++ }
++
++
++ if ((write_junk && (!DoWriteFile (outfile, &junkchunk, sizeof (junkchunk), &bcount) || bcount != sizeof (junkchunk))) ||
+ !DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) ||
+ !DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize ||
+ !DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) {
diff --git a/gnu/packages/patches/wxmaxima-do-not-use-old-gnuplot-parameters.patch b/gnu/packages/patches/wxmaxima-do-not-use-old-gnuplot-parameters.patch
new file mode 100644
index 0000000000..345101bd3a
--- /dev/null
+++ b/gnu/packages/patches/wxmaxima-do-not-use-old-gnuplot-parameters.patch
@@ -0,0 +1,26 @@
+This fixes the wxplot2d plotting issue found at
+https://github.com/andrejv/wxmaxima/issues/973.
+
+From 5a0693c97ceaa4935b908f1e478126896952f399 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Gunter=20K=C3=B6nigsmann?= <gunter@peterpall.de>
+Date: Mon, 19 Feb 2018 05:37:35 +0100
+Subject: [PATCH] Seems I accidentally made wxMaxima to default to parameters
+ for old gnuplots. Resolves #973
+
+---
+ data/wxmathml.lisp.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/data/wxmathml.lisp.in b/data/wxmathml.lisp.in
+index a32e3fc3..4e19acaf 100644
+--- a/data/wxmathml.lisp.in
++++ b/data/wxmathml.lisp.in
+@@ -43,7 +43,7 @@
+ (defvar $wxsubscripts t
+ "Recognize TeX-style subscripts")
+ (defvar $wxplot_pngcairo nil "Use gnuplot's pngcairo terminal for new plots?")
+-(defmvar $wxplot_old_gnuplot t)
++(defmvar $wxplot_old_gnuplot nil)
+
+ (defun $wxstatusbar (status)
+ (format t "<statusbar>~a</statusbar>~%" status))
diff --git a/gnu/packages/patches/zsh-CVE-2018-7548.patch b/gnu/packages/patches/zsh-CVE-2018-7548.patch
new file mode 100644
index 0000000000..1ee15fad73
--- /dev/null
+++ b/gnu/packages/patches/zsh-CVE-2018-7548.patch
@@ -0,0 +1,48 @@
+Fix CVE-2018-7548:
+
+https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-7548
+
+Patch copied from upstream source repository:
+
+https://sourceforge.net/p/zsh/code/ci/110b13e1090bc31ac1352b28adc2d02b6d25a102
+
+From 110b13e1090bc31ac1352b28adc2d02b6d25a102 Mon Sep 17 00:00:00 2001
+From: Joey Pabalinas <joeypabalinas@gmail.com>
+Date: Tue, 23 Jan 2018 22:28:08 -0800
+Subject: [PATCH] 42313: avoid null-pointer deref when using ${(PA)...} on an
+ empty array result
+
+---
+ ChangeLog | 5 +++++
+ Src/subst.c | 2 +-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index d2ba94afc..3037edda4 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,3 +1,8 @@
+#+2018-01-23 Barton E. Schaefer <schaefer@zsh.org>
+#+
+#+ * Joey Pabalinas: 42313: Src/subst.c: avoid null-pointer deref
+#+ when using ${(PA)...} on an empty array result
+#+
+# 2018-01-23 Oliver Kiddle <okiddle@yahoo.co.uk>
+#
+# * 42317: Completion/Linux/Command/_cryptsetup,
+diff --git a/Src/subst.c b/Src/subst.c
+index d027e3d83..a265a187e 100644
+--- a/Src/subst.c
++++ b/Src/subst.c
+@@ -2430,7 +2430,7 @@ paramsubst(LinkList l, LinkNode n, char **str, int qt, int pf_flags,
+ val = aval[0];
+ isarr = 0;
+ }
+- s = dyncat(val, s);
++ s = val ? dyncat(val, s) : dupstring(s);
+ /* Now behave po-faced as if it was always like that... */
+ subexp = 0;
+ /*
+--
+2.16.2
+
diff --git a/gnu/packages/patches/zsh-CVE-2018-7549.patch b/gnu/packages/patches/zsh-CVE-2018-7549.patch
new file mode 100644
index 0000000000..abefcdf2f9
--- /dev/null
+++ b/gnu/packages/patches/zsh-CVE-2018-7549.patch
@@ -0,0 +1,56 @@
+Fix CVE-2018-7549:
+
+https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-7549
+
+Patch copied from upstream source repository:
+
+https://sourceforge.net/p/zsh/code/ci/c2cc8b0fbefc9868fa83537f5b6d90fc1ec438dd
+
+From c2cc8b0fbefc9868fa83537f5b6d90fc1ec438dd Mon Sep 17 00:00:00 2001
+From: Stephane Chazelas <stephane.chazelas@gmail.com>
+Date: Fri, 22 Dec 2017 22:17:09 +0000
+Subject: [PATCH] Avoid crash copying empty hash table.
+
+Visible with typeset -p.
+---
+ ChangeLog | 2 ++
+ Src/params.c | 11 +++++++----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+#diff --git a/ChangeLog b/ChangeLog
+#index f74c26b88..e3628cfa7 100644
+#--- a/ChangeLog
+#+++ b/ChangeLog
+#@@ -1,5 +1,7 @@
+# 2018-01-04 Peter Stephenson <p.stephenson@samsung.com>
+#
+#+ * Stephane: 42159: Src/params.c: avoid crash copying empty hash table.
+#+
+# * Sebastian: 42188: Src/Modules/system.c: It is necessary to
+# close the lock descriptor in some failure cases.
+#
+diff --git a/Src/params.c b/Src/params.c
+index 31ff0445b..de7730ae7 100644
+--- a/Src/params.c
++++ b/Src/params.c
+@@ -549,10 +549,13 @@ scancopyparams(HashNode hn, UNUSED(int flags))
+ HashTable
+ copyparamtable(HashTable ht, char *name)
+ {
+- HashTable nht = newparamtable(ht->hsize, name);
+- outtable = nht;
+- scanhashtable(ht, 0, 0, 0, scancopyparams, 0);
+- outtable = NULL;
++ HashTable nht = 0;
++ if (ht) {
++ nht = newparamtable(ht->hsize, name);
++ outtable = nht;
++ scanhashtable(ht, 0, 0, 0, scancopyparams, 0);
++ outtable = NULL;
++ }
+ return nht;
+ }
+
+--
+2.16.2
+