mirror of
https://github.com/ossrs/srs.git
synced 2025-03-09 15:49:59 +00:00
parent
4308f238c0
commit
37c84eccc0
28 changed files with 8441 additions and 50 deletions
18
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/Makefile
vendored
Normal file
18
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/Makefile
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
OBJS += x86/cpu.o \
|
||||
x86/fixed_dsp_init.o \
|
||||
x86/float_dsp_init.o \
|
||||
x86/imgutils_init.o \
|
||||
x86/lls_init.o \
|
||||
|
||||
OBJS-$(CONFIG_PIXELUTILS) += x86/pixelutils_init.o \
|
||||
|
||||
EMMS_OBJS_$(HAVE_MMX_INLINE)_$(HAVE_MMX_EXTERNAL)_$(HAVE_MM_EMPTY) = x86/emms.o
|
||||
|
||||
X86ASM-OBJS += x86/cpuid.o \
|
||||
$(EMMS_OBJS__yes_) \
|
||||
x86/fixed_dsp.o \
|
||||
x86/float_dsp.o \
|
||||
x86/imgutils.o \
|
||||
x86/lls.o \
|
||||
|
||||
X86ASM-OBJS-$(CONFIG_PIXELUTILS) += x86/pixelutils.o \
|
91
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/cpuid.asm
vendored
Normal file
91
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/cpuid.asm
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
;*****************************************************************************
|
||||
;* Copyright (C) 2005-2010 x264 project
|
||||
;*
|
||||
;* Authors: Loren Merritt <lorenm@u.washington.edu>
|
||||
;* Fiona Glaser <fiona@x264.com>
|
||||
;*
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "x86util.asm"
|
||||
|
||||
SECTION .text
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void ff_cpu_cpuid(int index, int *eax, int *ebx, int *ecx, int *edx)
|
||||
;-----------------------------------------------------------------------------
|
||||
cglobal cpu_cpuid, 5,7
|
||||
push rbx
|
||||
push r4
|
||||
push r3
|
||||
push r2
|
||||
push r1
|
||||
mov eax, r0d
|
||||
xor ecx, ecx
|
||||
cpuid
|
||||
pop r4
|
||||
mov [r4], eax
|
||||
pop r4
|
||||
mov [r4], ebx
|
||||
pop r4
|
||||
mov [r4], ecx
|
||||
pop r4
|
||||
mov [r4], edx
|
||||
pop rbx
|
||||
RET
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void ff_cpu_xgetbv(int op, int *eax, int *edx)
|
||||
;-----------------------------------------------------------------------------
|
||||
cglobal cpu_xgetbv, 3,7
|
||||
push r2
|
||||
push r1
|
||||
mov ecx, r0d
|
||||
xgetbv
|
||||
pop r4
|
||||
mov [r4], eax
|
||||
pop r4
|
||||
mov [r4], edx
|
||||
RET
|
||||
|
||||
%if ARCH_X86_64 == 0
|
||||
;-----------------------------------------------------------------------------
|
||||
; int ff_cpu_cpuid_test(void)
|
||||
; return 0 if unsupported
|
||||
;-----------------------------------------------------------------------------
|
||||
cglobal cpu_cpuid_test
|
||||
pushfd
|
||||
push ebx
|
||||
push ebp
|
||||
push esi
|
||||
push edi
|
||||
pushfd
|
||||
pop eax
|
||||
mov ebx, eax
|
||||
xor eax, 0x200000
|
||||
push eax
|
||||
popfd
|
||||
pushfd
|
||||
pop eax
|
||||
xor eax, ebx
|
||||
pop edi
|
||||
pop esi
|
||||
pop ebp
|
||||
pop ebx
|
||||
popfd
|
||||
ret
|
||||
%endif
|
48
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/fixed_dsp.asm
vendored
Normal file
48
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/fixed_dsp.asm
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
;*****************************************************************************
|
||||
;* x86-optimized Float DSP functions
|
||||
;*
|
||||
;* Copyright 2016 James Almer
|
||||
;*
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "x86util.asm"
|
||||
|
||||
SECTION .text
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void ff_butterflies_fixed(float *src0, float *src1, int len);
|
||||
;-----------------------------------------------------------------------------
|
||||
INIT_XMM sse2
|
||||
cglobal butterflies_fixed, 3,3,3, src0, src1, len
|
||||
shl lend, 2
|
||||
add src0q, lenq
|
||||
add src1q, lenq
|
||||
neg lenq
|
||||
|
||||
align 16
|
||||
.loop:
|
||||
mova m0, [src0q + lenq]
|
||||
mova m1, [src1q + lenq]
|
||||
mova m2, m0
|
||||
paddd m0, m1
|
||||
psubd m2, m1
|
||||
mova [src0q + lenq], m0
|
||||
mova [src1q + lenq], m2
|
||||
add lenq, mmsize
|
||||
jl .loop
|
||||
RET
|
484
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/float_dsp.asm
vendored
Normal file
484
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/float_dsp.asm
vendored
Normal file
|
@ -0,0 +1,484 @@
|
|||
;*****************************************************************************
|
||||
;* x86-optimized Float DSP functions
|
||||
;*
|
||||
;* Copyright 2006 Loren Merritt
|
||||
;*
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "x86util.asm"
|
||||
|
||||
SECTION_RODATA 32
|
||||
pd_reverse: dd 7, 6, 5, 4, 3, 2, 1, 0
|
||||
|
||||
SECTION .text
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vector_fmul(float *dst, const float *src0, const float *src1, int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL 0
|
||||
cglobal vector_fmul, 4,4,2, dst, src0, src1, len
|
||||
lea lenq, [lend*4 - 64]
|
||||
ALIGN 16
|
||||
.loop:
|
||||
%assign a 0
|
||||
%rep 32/mmsize
|
||||
mova m0, [src0q + lenq + (a+0)*mmsize]
|
||||
mova m1, [src0q + lenq + (a+1)*mmsize]
|
||||
mulps m0, m0, [src1q + lenq + (a+0)*mmsize]
|
||||
mulps m1, m1, [src1q + lenq + (a+1)*mmsize]
|
||||
mova [dstq + lenq + (a+0)*mmsize], m0
|
||||
mova [dstq + lenq + (a+1)*mmsize], m1
|
||||
%assign a a+2
|
||||
%endrep
|
||||
|
||||
sub lenq, 64
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_FMUL
|
||||
%endif
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vector_dmul(double *dst, const double *src0, const double *src1, int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_DMUL 0
|
||||
cglobal vector_dmul, 4,4,4, dst, src0, src1, len
|
||||
lea lend, [lenq*8 - mmsize*4]
|
||||
ALIGN 16
|
||||
.loop:
|
||||
movaps m0, [src0q + lenq + 0*mmsize]
|
||||
movaps m1, [src0q + lenq + 1*mmsize]
|
||||
movaps m2, [src0q + lenq + 2*mmsize]
|
||||
movaps m3, [src0q + lenq + 3*mmsize]
|
||||
mulpd m0, m0, [src1q + lenq + 0*mmsize]
|
||||
mulpd m1, m1, [src1q + lenq + 1*mmsize]
|
||||
mulpd m2, m2, [src1q + lenq + 2*mmsize]
|
||||
mulpd m3, m3, [src1q + lenq + 3*mmsize]
|
||||
movaps [dstq + lenq + 0*mmsize], m0
|
||||
movaps [dstq + lenq + 1*mmsize], m1
|
||||
movaps [dstq + lenq + 2*mmsize], m2
|
||||
movaps [dstq + lenq + 3*mmsize], m3
|
||||
|
||||
sub lenq, mmsize*4
|
||||
jge .loop
|
||||
RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse2
|
||||
VECTOR_DMUL
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_DMUL
|
||||
%endif
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; void ff_vector_fmac_scalar(float *dst, const float *src, float mul, int len)
|
||||
;------------------------------------------------------------------------------
|
||||
|
||||
%macro VECTOR_FMAC_SCALAR 0
|
||||
%if UNIX64
|
||||
cglobal vector_fmac_scalar, 3,3,5, dst, src, len
|
||||
%else
|
||||
cglobal vector_fmac_scalar, 4,4,5, dst, src, mul, len
|
||||
%endif
|
||||
%if ARCH_X86_32
|
||||
VBROADCASTSS m0, mulm
|
||||
%else
|
||||
%if WIN64
|
||||
SWAP 0, 2
|
||||
%endif
|
||||
shufps xm0, xm0, 0
|
||||
%if cpuflag(avx)
|
||||
vinsertf128 m0, m0, xm0, 1
|
||||
%endif
|
||||
%endif
|
||||
lea lenq, [lend*4-64]
|
||||
.loop:
|
||||
%if cpuflag(fma3)
|
||||
mova m1, [dstq+lenq]
|
||||
mova m2, [dstq+lenq+1*mmsize]
|
||||
fmaddps m1, m0, [srcq+lenq], m1
|
||||
fmaddps m2, m0, [srcq+lenq+1*mmsize], m2
|
||||
%else ; cpuflag
|
||||
mulps m1, m0, [srcq+lenq]
|
||||
mulps m2, m0, [srcq+lenq+1*mmsize]
|
||||
%if mmsize < 32
|
||||
mulps m3, m0, [srcq+lenq+2*mmsize]
|
||||
mulps m4, m0, [srcq+lenq+3*mmsize]
|
||||
%endif ; mmsize
|
||||
addps m1, m1, [dstq+lenq]
|
||||
addps m2, m2, [dstq+lenq+1*mmsize]
|
||||
%if mmsize < 32
|
||||
addps m3, m3, [dstq+lenq+2*mmsize]
|
||||
addps m4, m4, [dstq+lenq+3*mmsize]
|
||||
%endif ; mmsize
|
||||
%endif ; cpuflag
|
||||
mova [dstq+lenq], m1
|
||||
mova [dstq+lenq+1*mmsize], m2
|
||||
%if mmsize < 32
|
||||
mova [dstq+lenq+2*mmsize], m3
|
||||
mova [dstq+lenq+3*mmsize], m4
|
||||
%endif ; mmsize
|
||||
sub lenq, 64
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMAC_SCALAR
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_FMAC_SCALAR
|
||||
%endif
|
||||
%if HAVE_FMA3_EXTERNAL
|
||||
INIT_YMM fma3
|
||||
VECTOR_FMAC_SCALAR
|
||||
%endif
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; void ff_vector_fmul_scalar(float *dst, const float *src, float mul, int len)
|
||||
;------------------------------------------------------------------------------
|
||||
|
||||
%macro VECTOR_FMUL_SCALAR 0
|
||||
%if UNIX64
|
||||
cglobal vector_fmul_scalar, 3,3,2, dst, src, len
|
||||
%else
|
||||
cglobal vector_fmul_scalar, 4,4,3, dst, src, mul, len
|
||||
%endif
|
||||
%if ARCH_X86_32
|
||||
movss m0, mulm
|
||||
%elif WIN64
|
||||
SWAP 0, 2
|
||||
%endif
|
||||
shufps m0, m0, 0
|
||||
lea lenq, [lend*4-mmsize]
|
||||
.loop:
|
||||
mova m1, [srcq+lenq]
|
||||
mulps m1, m0
|
||||
mova [dstq+lenq], m1
|
||||
sub lenq, mmsize
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL_SCALAR
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; void ff_vector_dmac_scalar(double *dst, const double *src, double mul,
|
||||
; int len)
|
||||
;------------------------------------------------------------------------------
|
||||
|
||||
%macro VECTOR_DMAC_SCALAR 0
|
||||
%if ARCH_X86_32
|
||||
cglobal vector_dmac_scalar, 2,4,5, dst, src, mul, len, lenaddr
|
||||
mov lenq, lenaddrm
|
||||
VBROADCASTSD m0, mulm
|
||||
%else
|
||||
%if UNIX64
|
||||
cglobal vector_dmac_scalar, 3,3,5, dst, src, len
|
||||
%else
|
||||
cglobal vector_dmac_scalar, 4,4,5, dst, src, mul, len
|
||||
SWAP 0, 2
|
||||
%endif
|
||||
movlhps xm0, xm0
|
||||
%if cpuflag(avx)
|
||||
vinsertf128 m0, m0, xm0, 1
|
||||
%endif
|
||||
%endif
|
||||
lea lenq, [lend*8-mmsize*4]
|
||||
.loop:
|
||||
%if cpuflag(fma3)
|
||||
movaps m1, [dstq+lenq]
|
||||
movaps m2, [dstq+lenq+1*mmsize]
|
||||
movaps m3, [dstq+lenq+2*mmsize]
|
||||
movaps m4, [dstq+lenq+3*mmsize]
|
||||
fmaddpd m1, m0, [srcq+lenq], m1
|
||||
fmaddpd m2, m0, [srcq+lenq+1*mmsize], m2
|
||||
fmaddpd m3, m0, [srcq+lenq+2*mmsize], m3
|
||||
fmaddpd m4, m0, [srcq+lenq+3*mmsize], m4
|
||||
%else ; cpuflag
|
||||
mulpd m1, m0, [srcq+lenq]
|
||||
mulpd m2, m0, [srcq+lenq+1*mmsize]
|
||||
mulpd m3, m0, [srcq+lenq+2*mmsize]
|
||||
mulpd m4, m0, [srcq+lenq+3*mmsize]
|
||||
addpd m1, m1, [dstq+lenq]
|
||||
addpd m2, m2, [dstq+lenq+1*mmsize]
|
||||
addpd m3, m3, [dstq+lenq+2*mmsize]
|
||||
addpd m4, m4, [dstq+lenq+3*mmsize]
|
||||
%endif ; cpuflag
|
||||
movaps [dstq+lenq], m1
|
||||
movaps [dstq+lenq+1*mmsize], m2
|
||||
movaps [dstq+lenq+2*mmsize], m3
|
||||
movaps [dstq+lenq+3*mmsize], m4
|
||||
sub lenq, mmsize*4
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse2
|
||||
VECTOR_DMAC_SCALAR
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_DMAC_SCALAR
|
||||
%endif
|
||||
%if HAVE_FMA3_EXTERNAL
|
||||
INIT_YMM fma3
|
||||
VECTOR_DMAC_SCALAR
|
||||
%endif
|
||||
|
||||
;------------------------------------------------------------------------------
|
||||
; void ff_vector_dmul_scalar(double *dst, const double *src, double mul,
|
||||
; int len)
|
||||
;------------------------------------------------------------------------------
|
||||
|
||||
%macro VECTOR_DMUL_SCALAR 0
|
||||
%if ARCH_X86_32
|
||||
cglobal vector_dmul_scalar, 3,4,3, dst, src, mul, len, lenaddr
|
||||
mov lenq, lenaddrm
|
||||
%elif UNIX64
|
||||
cglobal vector_dmul_scalar, 3,3,3, dst, src, len
|
||||
%else
|
||||
cglobal vector_dmul_scalar, 4,4,3, dst, src, mul, len
|
||||
%endif
|
||||
%if ARCH_X86_32
|
||||
VBROADCASTSD m0, mulm
|
||||
%else
|
||||
%if WIN64
|
||||
SWAP 0, 2
|
||||
%endif
|
||||
movlhps xm0, xm0
|
||||
%if cpuflag(avx)
|
||||
vinsertf128 ym0, ym0, xm0, 1
|
||||
%endif
|
||||
%endif
|
||||
lea lenq, [lend*8-2*mmsize]
|
||||
.loop:
|
||||
mulpd m1, m0, [srcq+lenq ]
|
||||
mulpd m2, m0, [srcq+lenq+mmsize]
|
||||
movaps [dstq+lenq ], m1
|
||||
movaps [dstq+lenq+mmsize], m2
|
||||
sub lenq, 2*mmsize
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse2
|
||||
VECTOR_DMUL_SCALAR
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_DMUL_SCALAR
|
||||
%endif
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; vector_fmul_window(float *dst, const float *src0,
|
||||
; const float *src1, const float *win, int len);
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL_WINDOW 0
|
||||
cglobal vector_fmul_window, 5, 6, 6, dst, src0, src1, win, len, len1
|
||||
shl lend, 2
|
||||
lea len1q, [lenq - mmsize]
|
||||
add src0q, lenq
|
||||
add dstq, lenq
|
||||
add winq, lenq
|
||||
neg lenq
|
||||
.loop:
|
||||
mova m0, [winq + lenq]
|
||||
mova m4, [src0q + lenq]
|
||||
%if cpuflag(sse)
|
||||
mova m1, [winq + len1q]
|
||||
mova m5, [src1q + len1q]
|
||||
shufps m1, m1, 0x1b
|
||||
shufps m5, m5, 0x1b
|
||||
mova m2, m0
|
||||
mova m3, m1
|
||||
mulps m2, m4
|
||||
mulps m3, m5
|
||||
mulps m1, m4
|
||||
mulps m0, m5
|
||||
addps m2, m3
|
||||
subps m1, m0
|
||||
shufps m2, m2, 0x1b
|
||||
%else
|
||||
pswapd m1, [winq + len1q]
|
||||
pswapd m5, [src1q + len1q]
|
||||
mova m2, m0
|
||||
mova m3, m1
|
||||
pfmul m2, m4
|
||||
pfmul m3, m5
|
||||
pfmul m1, m4
|
||||
pfmul m0, m5
|
||||
pfadd m2, m3
|
||||
pfsub m1, m0
|
||||
pswapd m2, m2
|
||||
%endif
|
||||
mova [dstq + lenq], m1
|
||||
mova [dstq + len1q], m2
|
||||
sub len1q, mmsize
|
||||
add lenq, mmsize
|
||||
jl .loop
|
||||
%if mmsize == 8
|
||||
femms
|
||||
%endif
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_MMX 3dnowext
|
||||
VECTOR_FMUL_WINDOW
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL_WINDOW
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; vector_fmul_add(float *dst, const float *src0, const float *src1,
|
||||
; const float *src2, int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL_ADD 0
|
||||
cglobal vector_fmul_add, 5,5,4, dst, src0, src1, src2, len
|
||||
lea lenq, [lend*4 - 2*mmsize]
|
||||
ALIGN 16
|
||||
.loop:
|
||||
mova m0, [src0q + lenq]
|
||||
mova m1, [src0q + lenq + mmsize]
|
||||
%if cpuflag(fma3)
|
||||
mova m2, [src2q + lenq]
|
||||
mova m3, [src2q + lenq + mmsize]
|
||||
fmaddps m0, m0, [src1q + lenq], m2
|
||||
fmaddps m1, m1, [src1q + lenq + mmsize], m3
|
||||
%else
|
||||
mulps m0, m0, [src1q + lenq]
|
||||
mulps m1, m1, [src1q + lenq + mmsize]
|
||||
addps m0, m0, [src2q + lenq]
|
||||
addps m1, m1, [src2q + lenq + mmsize]
|
||||
%endif
|
||||
mova [dstq + lenq], m0
|
||||
mova [dstq + lenq + mmsize], m1
|
||||
|
||||
sub lenq, 2*mmsize
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL_ADD
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_FMUL_ADD
|
||||
%endif
|
||||
%if HAVE_FMA3_EXTERNAL
|
||||
INIT_YMM fma3
|
||||
VECTOR_FMUL_ADD
|
||||
%endif
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void vector_fmul_reverse(float *dst, const float *src0, const float *src1,
|
||||
; int len)
|
||||
;-----------------------------------------------------------------------------
|
||||
%macro VECTOR_FMUL_REVERSE 0
|
||||
cglobal vector_fmul_reverse, 4,4,2, dst, src0, src1, len
|
||||
%if cpuflag(avx2)
|
||||
movaps m2, [pd_reverse]
|
||||
%endif
|
||||
lea lenq, [lend*4 - 2*mmsize]
|
||||
ALIGN 16
|
||||
.loop:
|
||||
%if cpuflag(avx2)
|
||||
vpermps m0, m2, [src1q]
|
||||
vpermps m1, m2, [src1q+mmsize]
|
||||
%elif cpuflag(avx)
|
||||
vmovaps xmm0, [src1q + 16]
|
||||
vinsertf128 m0, m0, [src1q], 1
|
||||
vshufps m0, m0, m0, q0123
|
||||
vmovaps xmm1, [src1q + mmsize + 16]
|
||||
vinsertf128 m1, m1, [src1q + mmsize], 1
|
||||
vshufps m1, m1, m1, q0123
|
||||
%else
|
||||
mova m0, [src1q]
|
||||
mova m1, [src1q + mmsize]
|
||||
shufps m0, m0, q0123
|
||||
shufps m1, m1, q0123
|
||||
%endif
|
||||
mulps m0, m0, [src0q + lenq + mmsize]
|
||||
mulps m1, m1, [src0q + lenq]
|
||||
movaps [dstq + lenq + mmsize], m0
|
||||
movaps [dstq + lenq], m1
|
||||
add src1q, 2*mmsize
|
||||
sub lenq, 2*mmsize
|
||||
jge .loop
|
||||
REP_RET
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse
|
||||
VECTOR_FMUL_REVERSE
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
VECTOR_FMUL_REVERSE
|
||||
%endif
|
||||
%if HAVE_AVX2_EXTERNAL
|
||||
INIT_YMM avx2
|
||||
VECTOR_FMUL_REVERSE
|
||||
%endif
|
||||
|
||||
; float scalarproduct_float_sse(const float *v1, const float *v2, int len)
|
||||
INIT_XMM sse
|
||||
cglobal scalarproduct_float, 3,3,2, v1, v2, offset
|
||||
shl offsetd, 2
|
||||
add v1q, offsetq
|
||||
add v2q, offsetq
|
||||
neg offsetq
|
||||
xorps xmm0, xmm0
|
||||
.loop:
|
||||
movaps xmm1, [v1q+offsetq]
|
||||
mulps xmm1, [v2q+offsetq]
|
||||
addps xmm0, xmm1
|
||||
add offsetq, 16
|
||||
js .loop
|
||||
movhlps xmm1, xmm0
|
||||
addps xmm0, xmm1
|
||||
movss xmm1, xmm0
|
||||
shufps xmm0, xmm0, 1
|
||||
addss xmm0, xmm1
|
||||
%if ARCH_X86_64 == 0
|
||||
movss r0m, xmm0
|
||||
fld dword r0m
|
||||
%endif
|
||||
RET
|
||||
|
||||
;-----------------------------------------------------------------------------
|
||||
; void ff_butterflies_float(float *src0, float *src1, int len);
|
||||
;-----------------------------------------------------------------------------
|
||||
INIT_XMM sse
|
||||
cglobal butterflies_float, 3,3,3, src0, src1, len
|
||||
shl lend, 2
|
||||
add src0q, lenq
|
||||
add src1q, lenq
|
||||
neg lenq
|
||||
.loop:
|
||||
mova m0, [src0q + lenq]
|
||||
mova m1, [src1q + lenq]
|
||||
subps m2, m0, m1
|
||||
addps m0, m0, m1
|
||||
mova [src1q + lenq], m2
|
||||
mova [src0q + lenq], m0
|
||||
add lenq, mmsize
|
||||
jl .loop
|
||||
REP_RET
|
53
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/imgutils.asm
vendored
Normal file
53
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/imgutils.asm
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
;*****************************************************************************
|
||||
;* Copyright 2016 Anton Khirnov
|
||||
;*
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "libavutil/x86/x86util.asm"
|
||||
|
||||
SECTION .text
|
||||
|
||||
INIT_XMM sse4
|
||||
cglobal image_copy_plane_uc_from, 6, 7, 4, dst, dst_linesize, src, src_linesize, bw, height, rowpos
|
||||
add dstq, bwq
|
||||
add srcq, bwq
|
||||
neg bwq
|
||||
|
||||
.row_start:
|
||||
mov rowposq, bwq
|
||||
|
||||
.loop:
|
||||
movntdqa m0, [srcq + rowposq + 0 * mmsize]
|
||||
movntdqa m1, [srcq + rowposq + 1 * mmsize]
|
||||
movntdqa m2, [srcq + rowposq + 2 * mmsize]
|
||||
movntdqa m3, [srcq + rowposq + 3 * mmsize]
|
||||
|
||||
mova [dstq + rowposq + 0 * mmsize], m0
|
||||
mova [dstq + rowposq + 1 * mmsize], m1
|
||||
mova [dstq + rowposq + 2 * mmsize], m2
|
||||
mova [dstq + rowposq + 3 * mmsize], m3
|
||||
|
||||
add rowposq, 4 * mmsize
|
||||
jnz .loop
|
||||
|
||||
add srcq, src_linesizeq
|
||||
add dstq, dst_linesizeq
|
||||
dec heightd
|
||||
jnz .row_start
|
||||
|
||||
RET
|
290
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/lls.asm
vendored
Normal file
290
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/lls.asm
vendored
Normal file
|
@ -0,0 +1,290 @@
|
|||
;******************************************************************************
|
||||
;* linear least squares model
|
||||
;*
|
||||
;* Copyright (c) 2013 Loren Merritt
|
||||
;*
|
||||
;* This file is part of FFmpeg.
|
||||
;*
|
||||
;* FFmpeg is free software; you can redistribute it and/or
|
||||
;* modify it under the terms of the GNU Lesser General Public
|
||||
;* License as published by the Free Software Foundation; either
|
||||
;* version 2.1 of the License, or (at your option) any later version.
|
||||
;*
|
||||
;* FFmpeg is distributed in the hope that it will be useful,
|
||||
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
;* Lesser General Public License for more details.
|
||||
;*
|
||||
;* You should have received a copy of the GNU Lesser General Public
|
||||
;* License along with FFmpeg; if not, write to the Free Software
|
||||
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
;******************************************************************************
|
||||
|
||||
%include "x86util.asm"
|
||||
|
||||
SECTION .text
|
||||
|
||||
%define MAX_VARS 32
|
||||
%define MAX_VARS_ALIGN (MAX_VARS+4)
|
||||
%define COVAR_STRIDE MAX_VARS_ALIGN*8
|
||||
%define COVAR(x,y) [covarq + (x)*8 + (y)*COVAR_STRIDE]
|
||||
|
||||
struc LLSModel
|
||||
.covariance: resq MAX_VARS_ALIGN*MAX_VARS_ALIGN
|
||||
.coeff: resq MAX_VARS*MAX_VARS
|
||||
.variance: resq MAX_VARS
|
||||
.indep_count: resd 1
|
||||
endstruc
|
||||
|
||||
%macro ADDPD_MEM 2
|
||||
%if cpuflag(avx)
|
||||
vaddpd %2, %2, %1
|
||||
%else
|
||||
addpd %2, %1
|
||||
%endif
|
||||
mova %1, %2
|
||||
%endmacro
|
||||
|
||||
INIT_XMM sse2
|
||||
%define movdqa movaps
|
||||
cglobal update_lls, 2,5,8, ctx, var, i, j, covar2
|
||||
%define covarq ctxq
|
||||
mov id, [ctxq + LLSModel.indep_count]
|
||||
lea varq, [varq + iq*8]
|
||||
neg iq
|
||||
mov covar2q, covarq
|
||||
.loopi:
|
||||
; Compute all 3 pairwise products of a 2x2 block that lies on the diagonal
|
||||
mova m1, [varq + iq*8]
|
||||
mova m3, [varq + iq*8 + 16]
|
||||
pshufd m4, m1, q1010
|
||||
pshufd m5, m1, q3232
|
||||
pshufd m6, m3, q1010
|
||||
pshufd m7, m3, q3232
|
||||
mulpd m0, m1, m4
|
||||
mulpd m1, m1, m5
|
||||
lea covarq, [covar2q + 16]
|
||||
ADDPD_MEM COVAR(-2,0), m0
|
||||
ADDPD_MEM COVAR(-2,1), m1
|
||||
lea jq, [iq + 2]
|
||||
cmp jd, -2
|
||||
jg .skip4x4
|
||||
.loop4x4:
|
||||
; Compute all 16 pairwise products of a 4x4 block
|
||||
mulpd m0, m4, m3
|
||||
mulpd m1, m5, m3
|
||||
mulpd m2, m6, m3
|
||||
mulpd m3, m3, m7
|
||||
ADDPD_MEM COVAR(0,0), m0
|
||||
ADDPD_MEM COVAR(0,1), m1
|
||||
ADDPD_MEM COVAR(0,2), m2
|
||||
ADDPD_MEM COVAR(0,3), m3
|
||||
mova m3, [varq + jq*8 + 16]
|
||||
mulpd m0, m4, m3
|
||||
mulpd m1, m5, m3
|
||||
mulpd m2, m6, m3
|
||||
mulpd m3, m3, m7
|
||||
ADDPD_MEM COVAR(2,0), m0
|
||||
ADDPD_MEM COVAR(2,1), m1
|
||||
ADDPD_MEM COVAR(2,2), m2
|
||||
ADDPD_MEM COVAR(2,3), m3
|
||||
mova m3, [varq + jq*8 + 32]
|
||||
add covarq, 32
|
||||
add jq, 4
|
||||
cmp jd, -2
|
||||
jle .loop4x4
|
||||
.skip4x4:
|
||||
test jd, jd
|
||||
jg .skip2x4
|
||||
mulpd m4, m3
|
||||
mulpd m5, m3
|
||||
mulpd m6, m3
|
||||
mulpd m7, m3
|
||||
ADDPD_MEM COVAR(0,0), m4
|
||||
ADDPD_MEM COVAR(0,1), m5
|
||||
ADDPD_MEM COVAR(0,2), m6
|
||||
ADDPD_MEM COVAR(0,3), m7
|
||||
.skip2x4:
|
||||
add iq, 4
|
||||
add covar2q, 4*COVAR_STRIDE+32
|
||||
cmp id, -2
|
||||
jle .loopi
|
||||
test id, id
|
||||
jg .ret
|
||||
mov jq, iq
|
||||
%define covarq covar2q
|
||||
.loop2x1:
|
||||
movsd m0, [varq + iq*8]
|
||||
movlhps m0, m0
|
||||
mulpd m0, [varq + jq*8]
|
||||
ADDPD_MEM COVAR(0,0), m0
|
||||
inc iq
|
||||
add covarq, COVAR_STRIDE
|
||||
test id, id
|
||||
jle .loop2x1
|
||||
.ret:
|
||||
REP_RET
|
||||
|
||||
%macro UPDATE_LLS 0
|
||||
cglobal update_lls, 3,6,8, ctx, var, count, i, j, count2
|
||||
%define covarq ctxq
|
||||
mov countd, [ctxq + LLSModel.indep_count]
|
||||
lea count2d, [countq-2]
|
||||
xor id, id
|
||||
.loopi:
|
||||
; Compute all 10 pairwise products of a 4x4 block that lies on the diagonal
|
||||
mova ymm1, [varq + iq*8]
|
||||
vbroadcastsd ymm4, [varq + iq*8]
|
||||
vbroadcastsd ymm5, [varq + iq*8 + 8]
|
||||
vbroadcastsd ymm6, [varq + iq*8 + 16]
|
||||
vbroadcastsd ymm7, [varq + iq*8 + 24]
|
||||
vextractf128 xmm3, ymm1, 1
|
||||
%if cpuflag(fma3)
|
||||
mova ymm0, COVAR(iq ,0)
|
||||
mova xmm2, COVAR(iq+2,2)
|
||||
fmaddpd ymm0, ymm1, ymm4, ymm0
|
||||
fmaddpd xmm2, xmm3, xmm6, xmm2
|
||||
fmaddpd ymm1, ymm5, ymm1, COVAR(iq ,1)
|
||||
fmaddpd xmm3, xmm7, xmm3, COVAR(iq+2,3)
|
||||
mova COVAR(iq ,0), ymm0
|
||||
mova COVAR(iq ,1), ymm1
|
||||
mova COVAR(iq+2,2), xmm2
|
||||
mova COVAR(iq+2,3), xmm3
|
||||
%else
|
||||
vmulpd ymm0, ymm1, ymm4
|
||||
vmulpd ymm1, ymm1, ymm5
|
||||
vmulpd xmm2, xmm3, xmm6
|
||||
vmulpd xmm3, xmm3, xmm7
|
||||
ADDPD_MEM COVAR(iq ,0), ymm0
|
||||
ADDPD_MEM COVAR(iq ,1), ymm1
|
||||
ADDPD_MEM COVAR(iq+2,2), xmm2
|
||||
ADDPD_MEM COVAR(iq+2,3), xmm3
|
||||
%endif ; cpuflag(fma3)
|
||||
lea jd, [iq + 4]
|
||||
cmp jd, count2d
|
||||
jg .skip4x4
|
||||
.loop4x4:
|
||||
; Compute all 16 pairwise products of a 4x4 block
|
||||
mova ymm3, [varq + jq*8]
|
||||
%if cpuflag(fma3)
|
||||
mova ymm0, COVAR(jq, 0)
|
||||
mova ymm1, COVAR(jq, 1)
|
||||
mova ymm2, COVAR(jq, 2)
|
||||
fmaddpd ymm0, ymm3, ymm4, ymm0
|
||||
fmaddpd ymm1, ymm3, ymm5, ymm1
|
||||
fmaddpd ymm2, ymm3, ymm6, ymm2
|
||||
fmaddpd ymm3, ymm7, ymm3, COVAR(jq,3)
|
||||
mova COVAR(jq, 0), ymm0
|
||||
mova COVAR(jq, 1), ymm1
|
||||
mova COVAR(jq, 2), ymm2
|
||||
mova COVAR(jq, 3), ymm3
|
||||
%else
|
||||
vmulpd ymm0, ymm3, ymm4
|
||||
vmulpd ymm1, ymm3, ymm5
|
||||
vmulpd ymm2, ymm3, ymm6
|
||||
vmulpd ymm3, ymm3, ymm7
|
||||
ADDPD_MEM COVAR(jq,0), ymm0
|
||||
ADDPD_MEM COVAR(jq,1), ymm1
|
||||
ADDPD_MEM COVAR(jq,2), ymm2
|
||||
ADDPD_MEM COVAR(jq,3), ymm3
|
||||
%endif ; cpuflag(fma3)
|
||||
add jd, 4
|
||||
cmp jd, count2d
|
||||
jle .loop4x4
|
||||
.skip4x4:
|
||||
cmp jd, countd
|
||||
jg .skip2x4
|
||||
mova xmm3, [varq + jq*8]
|
||||
%if cpuflag(fma3)
|
||||
mova xmm0, COVAR(jq, 0)
|
||||
mova xmm1, COVAR(jq, 1)
|
||||
mova xmm2, COVAR(jq, 2)
|
||||
fmaddpd xmm0, xmm3, xmm4, xmm0
|
||||
fmaddpd xmm1, xmm3, xmm5, xmm1
|
||||
fmaddpd xmm2, xmm3, xmm6, xmm2
|
||||
fmaddpd xmm3, xmm7, xmm3, COVAR(jq,3)
|
||||
mova COVAR(jq, 0), xmm0
|
||||
mova COVAR(jq, 1), xmm1
|
||||
mova COVAR(jq, 2), xmm2
|
||||
mova COVAR(jq, 3), xmm3
|
||||
%else
|
||||
vmulpd xmm0, xmm3, xmm4
|
||||
vmulpd xmm1, xmm3, xmm5
|
||||
vmulpd xmm2, xmm3, xmm6
|
||||
vmulpd xmm3, xmm3, xmm7
|
||||
ADDPD_MEM COVAR(jq,0), xmm0
|
||||
ADDPD_MEM COVAR(jq,1), xmm1
|
||||
ADDPD_MEM COVAR(jq,2), xmm2
|
||||
ADDPD_MEM COVAR(jq,3), xmm3
|
||||
%endif ; cpuflag(fma3)
|
||||
.skip2x4:
|
||||
add id, 4
|
||||
add covarq, 4*COVAR_STRIDE
|
||||
cmp id, count2d
|
||||
jle .loopi
|
||||
cmp id, countd
|
||||
jg .ret
|
||||
mov jd, id
|
||||
.loop2x1:
|
||||
vmovddup xmm0, [varq + iq*8]
|
||||
%if cpuflag(fma3)
|
||||
mova xmm1, [varq + jq*8]
|
||||
fmaddpd xmm0, xmm1, xmm0, COVAR(jq,0)
|
||||
mova COVAR(jq,0), xmm0
|
||||
%else
|
||||
vmulpd xmm0, [varq + jq*8]
|
||||
ADDPD_MEM COVAR(jq,0), xmm0
|
||||
%endif ; cpuflag(fma3)
|
||||
inc id
|
||||
add covarq, COVAR_STRIDE
|
||||
cmp id, countd
|
||||
jle .loop2x1
|
||||
.ret:
|
||||
REP_RET
|
||||
%endmacro ; UPDATE_LLS
|
||||
|
||||
%if HAVE_AVX_EXTERNAL
|
||||
INIT_YMM avx
|
||||
UPDATE_LLS
|
||||
%endif
|
||||
%if HAVE_FMA3_EXTERNAL
|
||||
INIT_YMM fma3
|
||||
UPDATE_LLS
|
||||
%endif
|
||||
|
||||
INIT_XMM sse2
|
||||
cglobal evaluate_lls, 3,4,2, ctx, var, order, i
|
||||
; This function is often called on the same buffer as update_lls, but with
|
||||
; an offset. They can't both be aligned.
|
||||
; Load halves rather than movu to avoid store-forwarding stalls, since the
|
||||
; input was initialized immediately prior to this function using scalar math.
|
||||
%define coefsq ctxq
|
||||
mov id, orderd
|
||||
imul orderd, MAX_VARS
|
||||
lea coefsq, [ctxq + LLSModel.coeff + orderq*8]
|
||||
movsd m0, [varq]
|
||||
movhpd m0, [varq + 8]
|
||||
mulpd m0, [coefsq]
|
||||
lea coefsq, [coefsq + iq*8]
|
||||
lea varq, [varq + iq*8]
|
||||
neg iq
|
||||
add iq, 2
|
||||
.loop:
|
||||
movsd m1, [varq + iq*8]
|
||||
movhpd m1, [varq + iq*8 + 8]
|
||||
mulpd m1, [coefsq + iq*8]
|
||||
addpd m0, m1
|
||||
add iq, 2
|
||||
jl .loop
|
||||
jg .skip1
|
||||
movsd m1, [varq + iq*8]
|
||||
mulsd m1, [coefsq + iq*8]
|
||||
addpd m0, m1
|
||||
.skip1:
|
||||
movhlps m1, m0
|
||||
addsd m0, m1
|
||||
%if ARCH_X86_32
|
||||
movsd r0m, m0
|
||||
fld qword r0m
|
||||
%endif
|
||||
RET
|
1701
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/x86inc.asm
vendored
Normal file
1701
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/x86inc.asm
vendored
Normal file
File diff suppressed because it is too large
Load diff
1028
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/x86util.asm
vendored
Normal file
1028
trunk/3rdparty/ffmpeg-4.2-fit/libavutil/x86/x86util.asm
vendored
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue