summaryrefslogtreecommitdiff
path: root/gnu/packages/patches/ffmpeg-4-binutils-2.41.patch
diff options
context:
space:
mode:
authorJosselin Poiret <[email protected]>2024-01-16 11:32:22 +0100
committerLudovic Courtès <[email protected]>2024-08-31 10:44:27 +0200
commitce21952f81ffe7f7674348437535e30e1fcd662b (patch)
tree36bb74221aeeb0e380e53cb570f4c99c10f3f968 /gnu/packages/patches/ffmpeg-4-binutils-2.41.patch
parent2b9c730833cb7287981e1cc4f3398de73373cbab (diff)
gnu: ffmpeg-4: Fix build with binutils ≥ 2.41.
* gnu/packages/patches/ffmpeg-4-binutils-2.41.patch: New patch. * gnu/local.mk (dist_patch_DATA): Register it. * gnu/packages/video.scm (ffmpeg-4): Use it. Change-Id: I07ec3525edb220d85e086e145b9561ea3d084b6c
Diffstat (limited to 'gnu/packages/patches/ffmpeg-4-binutils-2.41.patch')
-rw-r--r--gnu/packages/patches/ffmpeg-4-binutils-2.41.patch76
1 files changed, 76 insertions, 0 deletions
diff --git a/gnu/packages/patches/ffmpeg-4-binutils-2.41.patch b/gnu/packages/patches/ffmpeg-4-binutils-2.41.patch
new file mode 100644
index 0000000000..52a9310029
--- /dev/null
+++ b/gnu/packages/patches/ffmpeg-4-binutils-2.41.patch
@@ -0,0 +1,76 @@
+From effadce6c756247ea8bae32dc13bb3e6f464f0eb Mon Sep 17 00:00:00 2001
+From: =?utf8?q?R=C3=A9mi=20Denis-Courmont?= <[email protected]>
+Date: Sun, 16 Jul 2023 18:18:02 +0300
+Subject: [PATCH] avcodec/x86/mathops: clip constants used with shift
+ instructions within inline assembly
+
+Fixes assembling with binutil as >= 2.41
+
+Signed-off-by: James Almer <[email protected]>
+---
+ libavcodec/x86/mathops.h | 26 +++++++++++++++++++++++---
+ 1 file changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/libavcodec/x86/mathops.h b/libavcodec/x86/mathops.h
+index 6298f5ed19..ca7e2dffc1 100644
+--- a/libavcodec/x86/mathops.h
++++ b/libavcodec/x86/mathops.h
+@@ -35,12 +35,20 @@
+ static av_always_inline av_const int MULL(int a, int b, unsigned shift)
+ {
+ int rt, dummy;
++ if (__builtin_constant_p(shift))
+ __asm__ (
+ "imull %3 \n\t"
+ "shrdl %4, %%edx, %%eax \n\t"
+ :"=a"(rt), "=d"(dummy)
+- :"a"(a), "rm"(b), "ci"((uint8_t)shift)
++ :"a"(a), "rm"(b), "i"(shift & 0x1F)
+ );
++ else
++ __asm__ (
++ "imull %3 \n\t"
++ "shrdl %4, %%edx, %%eax \n\t"
++ :"=a"(rt), "=d"(dummy)
++ :"a"(a), "rm"(b), "c"((uint8_t)shift)
++ );
+ return rt;
+ }
+
+@@ -113,19 +121,31 @@ __asm__ volatile(\
+ // avoid +32 for shift optimization (gcc should do that ...)
+ #define NEG_SSR32 NEG_SSR32
+ static inline int32_t NEG_SSR32( int32_t a, int8_t s){
++ if (__builtin_constant_p(s))
+ __asm__ ("sarl %1, %0\n\t"
+ : "+r" (a)
+- : "ic" ((uint8_t)(-s))
++ : "i" (-s & 0x1F)
+ );
++ else
++ __asm__ ("sarl %1, %0\n\t"
++ : "+r" (a)
++ : "c" ((uint8_t)(-s))
++ );
+ return a;
+ }
+
+ #define NEG_USR32 NEG_USR32
+ static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
++ if (__builtin_constant_p(s))
+ __asm__ ("shrl %1, %0\n\t"
+ : "+r" (a)
+- : "ic" ((uint8_t)(-s))
++ : "i" (-s & 0x1F)
+ );
++ else
++ __asm__ ("shrl %1, %0\n\t"
++ : "+r" (a)
++ : "c" ((uint8_t)(-s))
++ );
+ return a;
+ }
+
+--
+2.25.1
+