ffmpeg: fix assembling with binutils v2.41

fix addresses issue when building ffmpeg with binutils v2.41
that is documented here: https://trac.ffmpeg.org/ticket/10405

Issue about build failure is available here:
https://github.com/openwrt/packages/issues/22170

Remarks:
Patch is merged to ffmpeg upstream and will be included in
next release of ffmpeg, so patch applies only to current
version and is removed when package is updated to next
version of ffmpeg.

Signed-off-by: Oskari Rauta <oskari.rauta@gmail.com>
Co-authored-by: Josef Schlehofer <pepe.schlehofer@gmail.com>
This commit is contained in:
Oskari Rauta 2023-09-23 10:10:36 +03:00
parent 4374c3250f
commit 5a68954b0a
2 changed files with 72 additions and 1 deletions

View File

@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=ffmpeg
PKG_VERSION:=5.1.3
PKG_RELEASE:=1
PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://ffmpeg.org/releases/

View File

@ -0,0 +1,71 @@
From: Rémi Denis-Courmont <remi@remlab.net>
Date: Sun, 16 Jul 2023 15:18:02 +0000 (+0300)
Subject: avcodec/x86/mathops: clip constants used with shift instructions within inline assembly
X-Git-Url: http://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff_plain/25cd95a9dc3510c3cc0d7aad6f9d83f6a1078c7e?hp=e5b5dd66535f444451e0fee59247b224d866f334
avcodec/x86/mathops: clip constants used with shift instructions within inline assembly
Fixes assembling with binutil as >= 2.41
Signed-off-by: James Almer <jamrial@gmail.com>
(cherry picked from commit effadce6c756247ea8bae32dc13bb3e6f464f0eb)
---
--- a/libavcodec/x86/mathops.h
+++ b/libavcodec/x86/mathops.h
@@ -35,12 +35,20 @@
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
{
int rt, dummy;
+ if (__builtin_constant_p(shift))
__asm__ (
"imull %3 \n\t"
"shrdl %4, %%edx, %%eax \n\t"
:"=a"(rt), "=d"(dummy)
- :"a"(a), "rm"(b), "ci"((uint8_t)shift)
+ :"a"(a), "rm"(b), "i"(shift & 0x1F)
);
+ else
+ __asm__ (
+ "imull %3 \n\t"
+ "shrdl %4, %%edx, %%eax \n\t"
+ :"=a"(rt), "=d"(dummy)
+ :"a"(a), "rm"(b), "c"((uint8_t)shift)
+ );
return rt;
}
@@ -113,19 +121,31 @@ __asm__ volatile(\
// avoid +32 for shift optimization (gcc should do that ...)
#define NEG_SSR32 NEG_SSR32
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
+ if (__builtin_constant_p(s))
__asm__ ("sarl %1, %0\n\t"
: "+r" (a)
- : "ic" ((uint8_t)(-s))
+ : "i" (-s & 0x1F)
);
+ else
+ __asm__ ("sarl %1, %0\n\t"
+ : "+r" (a)
+ : "c" ((uint8_t)(-s))
+ );
return a;
}
#define NEG_USR32 NEG_USR32
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
+ if (__builtin_constant_p(s))
__asm__ ("shrl %1, %0\n\t"
: "+r" (a)
- : "ic" ((uint8_t)(-s))
+ : "i" (-s & 0x1F)
);
+ else
+ __asm__ ("shrl %1, %0\n\t"
+ : "+r" (a)
+ : "c" ((uint8_t)(-s))
+ );
return a;
}