summaryrefslogtreecommitdiff
path: root/multimedia/mythtv/files/patch-CVE-2017-15186
diff options
context:
space:
mode:
Diffstat (limited to 'multimedia/mythtv/files/patch-CVE-2017-15186')
-rw-r--r--multimedia/mythtv/files/patch-CVE-2017-1518678
1 files changed, 78 insertions, 0 deletions
diff --git a/multimedia/mythtv/files/patch-CVE-2017-15186 b/multimedia/mythtv/files/patch-CVE-2017-15186
new file mode 100644
index 000000000000..4dbc51f1e254
--- /dev/null
+++ b/multimedia/mythtv/files/patch-CVE-2017-15186
@@ -0,0 +1,78 @@
+From 0eb0b21c7f4f2b6a3a74d2d252f95b81a4d472c3 Mon Sep 17 00:00:00 2001
+From: Michael Niedermayer <michael@niedermayer.cc>
+Date: Sat, 30 Sep 2017 00:20:09 +0200
+Subject: [PATCH] avcodec/x86/lossless_videoencdsp: Fix handling of small
+ widths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Fixes out of array access
+Fixes: crash-huf.avi
+
+Regression since: 6b41b4414934cc930468ccd5db598dd6ef643987
+
+This could also be fixed by adding checks in the C code that calls the dsp
+
+Found-by: Zhibin Hu and 连一汉 <lianyihan@360.cn>
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+(cherry picked from commit df62b70de8aaa285168e72fe8f6e740843ca91fa)
+Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
+---
+ libavcodec/x86/huffyuvencdsp.asm | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git libavcodec/x86/huffyuvencdsp.asm libavcodec/x86/huffyuvencdsp.asm
+index a55a1de65de..7a1ce2e839e 100644
+--- external/FFmpeg/libavcodec/x86/huffyuvencdsp.asm
++++ external/FFmpeg/libavcodec/x86/huffyuvencdsp.asm
+@@ -42,10 +42,11 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
+ %define i t0q
+ %endmacro
+
+-; label to jump to if w < regsize
+-%macro DIFF_BYTES_LOOP_PREP 1
++; labels to jump to if w < regsize and w < 0
++%macro DIFF_BYTES_LOOP_PREP 2
+ mov i, wq
+ and i, -2 * regsize
++ js %2
+ jz %1
+ add dstq, i
+ add src1q, i
+@@ -87,7 +88,7 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
+ %if mmsize > 16
+ ; fall back to narrower xmm
+ %define regsize mmsize / 2
+- DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa
++ DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa, .end_aa
+ .loop2_%1%2:
+ DIFF_BYTES_LOOP_CORE %1, %2, xm0, xm1
+ add i, 2 * regsize
+@@ -114,7 +115,7 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
+ INIT_MMX mmx
+ DIFF_BYTES_PROLOGUE
+ %define regsize mmsize
+- DIFF_BYTES_LOOP_PREP .skip_main_aa
++ DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
+ DIFF_BYTES_BODY a, a
+ %undef i
+ %endif
+@@ -122,7 +123,7 @@ DIFF_BYTES_PROLOGUE
+ INIT_XMM sse2
+ DIFF_BYTES_PROLOGUE
+ %define regsize mmsize
+- DIFF_BYTES_LOOP_PREP .skip_main_aa
++ DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa
+ test dstq, regsize - 1
+ jnz .loop_uu
+ test src1q, regsize - 1
+@@ -138,7 +139,7 @@ DIFF_BYTES_PROLOGUE
+ %define regsize mmsize
+ ; Directly using unaligned SSE2 version is marginally faster than
+ ; branching based on arguments.
+- DIFF_BYTES_LOOP_PREP .skip_main_uu
++ DIFF_BYTES_LOOP_PREP .skip_main_uu, .end_uu
+ test dstq, regsize - 1
+ jnz .loop_uu
+ test src1q, regsize - 1