Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- From 8c99927a6546de40e8767d2eb3c4bf4cbbaee49e Mon Sep 17 00:00:00 2001
- From: Daniel Kang <daniel.d.kang@gmail.com>
- Date: Tue, 24 May 2011 15:14:38 -0400
- Subject: [PATCH 1/2] Add IDCT functions for 10-bit H.264.
- Ports the majority of IDCT functions for 10-bit H.264.
- Parts are inspired from 8-bit IDCT code in Libav; other parts ported from x264 with relicensing permission from author.
- ---
- libavcodec/x86/Makefile | 3 +-
- libavcodec/x86/h264_idct_10bit.asm | 536 ++++++++++++++++++++++++++++++++++++
- libavcodec/x86/h264dsp_mmx.c | 59 ++++
- 3 files changed, 597 insertions(+), 1 deletions(-)
- create mode 100644 libavcodec/x86/h264_idct_10bit.asm
- diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
- index ba664ab..38b736e 100644
- --- a/libavcodec/x86/Makefile
- +++ b/libavcodec/x86/Makefile
- @@ -12,8 +12,9 @@ YASM-OBJS-$(CONFIG_FFT) += x86/fft_mmx.o \
- MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_mmx.o
- YASM-OBJS-$(CONFIG_H264DSP) += x86/h264_deblock.o \
- x86/h264_deblock_10bit.o \
- - x86/h264_weight.o \
- x86/h264_idct.o \
- + x86/h264_idct_10bit.o \
- + x86/h264_weight.o \
- YASM-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred.o
- MMX-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred_init.o
- diff --git a/libavcodec/x86/h264_idct_10bit.asm b/libavcodec/x86/h264_idct_10bit.asm
- new file mode 100644
- index 0000000..bfafc71
- --- /dev/null
- +++ b/libavcodec/x86/h264_idct_10bit.asm
- @@ -0,0 +1,536 @@
- +;*****************************************************************************
- +;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
- +;*****************************************************************************
- +;* Copyright (C) 2005-2011 x264 project
- +;*
- +;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
- +;*
- +;* This file is part of Libav.
- +;*
- +;* Libav is free software; you can redistribute it and/or
- +;* modify it under the terms of the GNU Lesser General Public
- +;* License as published by the Free Software Foundation; either
- +;* version 2.1 of the License, or (at your option) any later version.
- +;*
- +;* Libav is distributed in the hope that it will be useful,
- +;* but WITHOUT ANY WARRANTY; without even the implied warranty of
- +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- +;* Lesser General Public License for more details.
- +;*
- +;* You should have received a copy of the GNU Lesser General Public
- +;* License along with Libav; if not, write to the Free Software
- +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- +;******************************************************************************
- +
- +%include "x86inc.asm"
- +%include "x86util.asm"
- +
- +SECTION_RODATA
- +
- +pw_pixel_max: times 8 dw ((1 << 10)-1)
- +pd_32: times 4 dd 32
- +scan8_mem: db 4+1*8, 5+1*8, 4+2*8, 5+2*8
- + db 6+1*8, 7+1*8, 6+2*8, 7+2*8
- + db 4+3*8, 5+3*8, 4+4*8, 5+4*8
- + db 6+3*8, 7+3*8, 6+4*8, 7+4*8
- + db 1+1*8, 2+1*8
- + db 1+2*8, 2+2*8
- + db 1+4*8, 2+4*8
- + db 1+5*8, 2+5*8
- +
- +%ifdef PIC
- +%define scan8 r11
- +%else
- +%define scan8 scan8_mem
- +%endif
- +
- +SECTION .text
- +
- +;-----------------------------------------------------------------------------
- +; void h264_idct_add(pixel *dst, dctcoef *block, int stride)
- +;-----------------------------------------------------------------------------
- +%macro STORE_DIFFx2 6
- + psrad %1, 6
- + psrad %2, 6
- + packssdw %1, %2
- + movq %3, [%5]
- + movhps %3, [%5+%6]
- + paddsw %1, %3
- + CLIPW %1, %4, [pw_pixel_max]
- + movq [%5], %1
- + movhps [%5+%6], %1
- +%endmacro
- +
- +;dst, in, stride
- +%macro IDCT4_ADD_10 3
- + mova m0, [%2+ 0]
- + mova m1, [%2+16]
- + mova m2, [%2+32]
- + mova m3, [%2+48]
- + IDCT4_1D d,0,1,2,3,4,5
- + TRANSPOSE4x4D 0,1,2,3,4
- + paddd m0, [pd_32]
- + IDCT4_1D d,0,1,2,3,4,5
- + pxor m5, m5
- + STORE_DIFFx2 m0, m1, m4, m5, %1, %3
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m2, m3, m4, m5, %1, %3
- +%endmacro
- +
- +%macro IDCT_ADD_10 1
- +cglobal h264_idct_add_10_%1, 3,3
- + IDCT4_ADD_10 r0, r1, r2
- + RET
- +%endmacro
- +
- +INIT_XMM
- +IDCT_ADD_10 sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT_ADD_10 avx
- +%endif
- +
- +;-----------------------------------------------------------------------------
- +; h264_idct_add16(pixel *dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
- +;-----------------------------------------------------------------------------
- +;;;;;;; NO FATE SAMPLES TRIGGER THIS
- +%macro ADD4x4IDCT 1
- +add4x4_idct_%1:
- + add r5, r0
- + mova m0, [r2+ 0]
- + mova m1, [r2+16]
- + mova m2, [r2+32]
- + mova m3, [r2+48]
- + IDCT4_1D d,0,1,2,3,4,5
- + TRANSPOSE4x4D 0,1,2,3,4
- + paddd m0, [pd_32]
- + IDCT4_1D d,0,1,2,3,4,5
- + pxor m5, m5
- + STORE_DIFFx2 m0, m1, m4, m5, r5, r3
- + lea r5, [r5+r3*2]
- + STORE_DIFFx2 m2, m3, m4, m5, r5, r3
- + ret
- +%endmacro
- +
- +INIT_XMM
- +ALIGN 16
- +ADD4x4IDCT sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +ALIGN 16
- +ADD4x4IDCT avx
- +%endif
- +
- +%macro ADD16_OP 3
- + cmp byte [r4+%3], 0
- + jz .skipblock%2
- + mov r5d, dword [r1+%2*4]
- + call add4x4_idct_%1
- +.skipblock%2:
- +%if %2<15
- + add r2, 64
- +%endif
- +%endmacro
- +
- +%macro IDCT_ADD16_10 1
- +cglobal h264_idct_add16_10_%1, 5,6
- + ADD16_OP %1, 0, 4+1*8
- + ADD16_OP %1, 1, 5+1*8
- + ADD16_OP %1, 2, 4+2*8
- + ADD16_OP %1, 3, 5+2*8
- + ADD16_OP %1, 4, 6+1*8
- + ADD16_OP %1, 5, 7+1*8
- + ADD16_OP %1, 6, 6+2*8
- + ADD16_OP %1, 7, 7+2*8
- + ADD16_OP %1, 8, 4+3*8
- + ADD16_OP %1, 9, 5+3*8
- + ADD16_OP %1, 10, 4+4*8
- + ADD16_OP %1, 11, 5+4*8
- + ADD16_OP %1, 12, 6+3*8
- + ADD16_OP %1, 13, 7+3*8
- + ADD16_OP %1, 14, 6+4*8
- + ADD16_OP %1, 15, 7+4*8
- + RET
- +%endmacro
- +
- +INIT_XMM
- +IDCT_ADD16_10 sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT_ADD16_10 avx
- +%endif
- +
- +;-----------------------------------------------------------------------------
- +; h264_idct_add16intra(pixel *dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
- +;-----------------------------------------------------------------------------
- +%macro ADD16_OP_INTRA 3
- + movzx r5, byte [r4+%3]
- + or r5d, dword [r2]
- + jz .skipblock%2
- + mov r5d, dword [r1+%2*4]
- + call add4x4_idct_%1
- +.skipblock%2:
- +%if %2<15
- + add r2, 64
- +%endif
- +%endmacro
- +
- +%macro IDCT_ADD16INTRA_10 1
- +cglobal h264_idct_add16intra_10_%1,5,6
- + ADD16_OP_INTRA %1, 0, 4+1*8
- + ADD16_OP_INTRA %1, 1, 5+1*8
- + ADD16_OP_INTRA %1, 2, 4+2*8
- + ADD16_OP_INTRA %1, 3, 5+2*8
- + ADD16_OP_INTRA %1, 4, 6+1*8
- + ADD16_OP_INTRA %1, 5, 7+1*8
- + ADD16_OP_INTRA %1, 6, 6+2*8
- + ADD16_OP_INTRA %1, 7, 7+2*8
- + ADD16_OP_INTRA %1, 8, 4+3*8
- + ADD16_OP_INTRA %1, 9, 5+3*8
- + ADD16_OP_INTRA %1, 10, 4+4*8
- + ADD16_OP_INTRA %1, 11, 5+4*8
- + ADD16_OP_INTRA %1, 12, 6+3*8
- + ADD16_OP_INTRA %1, 13, 7+3*8
- + ADD16_OP_INTRA %1, 14, 6+4*8
- + ADD16_OP_INTRA %1, 15, 7+4*8
- + RET
- +%endmacro
- +
- +INIT_XMM
- +IDCT_ADD16INTRA_10 sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT_ADD16INTRA_10 avx
- +%endif
- +
- +;-----------------------------------------------------------------------------
- +; h264_idct_add8(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
- +;-----------------------------------------------------------------------------
- +%macro IDCT_ADD8 1
- +cglobal h264_idct_add8_10_%1,5,7
- + mov r5, 16
- + add r2, 1024
- +%ifdef PIC
- + lea r11, [scan8_mem]
- +%endif
- +%ifdef ARCH_X86_64
- + mov r10, r0
- +%endif
- + call h264_idct_add8_10_plane_%1
- +%ifdef ARCH_X86_64
- + add r10, gprsize
- +%else
- + add r0mp, gprsize
- +%endif
- +%ifndef UNIX64
- + call h264_idct_add8_10_plane_%1
- + RET
- +%endif
- +
- +h264_idct_add8_10_plane_%1:
- +.nextblock:
- + movzx r6, byte [scan8+r5]
- + movzx r6, byte [r4+r6]
- + or r6d, dword [r2]
- + test r6, r6
- + jz .skipblock
- +%ifdef ARCH_X86_64
- + mov r0d, dword [r1+r5*4]
- + add r0, [r10]
- +%else
- + mov r0, r1m ; XXX r1m here is actually r0m of the calling func
- + mov r0, [r0]
- + add r0, dword [r1+r5*4]
- +%endif
- + IDCT4_ADD_10 r0, r2, r3
- +.skipblock:
- + inc r5
- + add r2, 64
- + test r5, 3
- + jnz .nextblock
- + rep ret
- +%endmacro ; IDCT_ADD8
- +
- +INIT_XMM
- +IDCT_ADD8 sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT_ADD8 avx
- +%endif
- +
- +;-----------------------------------------------------------------------------
- +; void h264_idct_dc_add(pixel *dst, dctcoef *block, int stride)
- +;-----------------------------------------------------------------------------
- +%macro IDCT_DC_ADD_OP_10 0
- + mova m1, [r0+0 ]
- + mova m2, [r0+r2 ]
- + mova m3, [r0+r2*2]
- + mova m4, [r0+r1 ]
- + pxor m5, m5
- + paddw m1, m0
- + paddw m2, m0
- + paddw m3, m0
- + paddw m4, m0
- + CLIPW m1, m5, m6
- + CLIPW m2, m5, m6
- + CLIPW m3, m5, m6
- + CLIPW m4, m5, m6
- + mova [r0+0 ], m1
- + mova [r0+r2 ], m2
- + mova [r0+r2*2], m3
- + mova [r0+r1 ], m4
- +%endmacro
- +
- +INIT_MMX
- +cglobal h264_idct_dc_add_10_mmx2,3,3
- + movd m0, dword [r1]
- + paddd m0, [pd_32]
- + psrad m0, 6
- + lea r1, [r2*3]
- + pshufw m0, m0, 0
- + mova m6, [pw_pixel_max]
- + IDCT_DC_ADD_OP_10
- + RET
- +
- +;-----------------------------------------------------------------------------
- +; void h264_idct8_dc_add(pixel *dst, dctcoef *block, int stride)
- +;-----------------------------------------------------------------------------
- +%macro IDCT8_DC_ADD 1
- +cglobal h264_idct8_dc_add_10_%1,3,3,7
- + mov r1d, dword [r1]
- + add r1, 32
- + sar r1, 6
- + movd m0, r1d
- + lea r1, [r2*3]
- + SPLATW m0, m0, 0
- + mova m6, [pw_pixel_max]
- + IDCT_DC_ADD_OP_10
- + lea r0, [r0+r2*4]
- + IDCT_DC_ADD_OP_10
- + RET
- +%endmacro
- +
- +INIT_XMM
- +IDCT8_DC_ADD sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT8_DC_ADD avx
- +%endif
- +
- +;-----------------------------------------------------------------------------
- +; void h264_idct8_add(pixel *dst, dctcoef *block, int stride)
- +;-----------------------------------------------------------------------------
- +%macro IDCT8_1D 2
- + SWAP 0, 1
- + psrad m4, m5, 1
- + psrad m1, m0, 1
- + paddd m4, m5
- + paddd m1, m0
- + paddd m4, m7
- + paddd m1, m5
- + psubd m4, m0
- + paddd m1, m3
- +
- + psubd m0, m3
- + psubd m5, m3
- + paddd m0, m7
- + psubd m5, m7
- + psrad m3, 1
- + psrad m7, 1
- + psubd m0, m3
- + psubd m5, m7
- +
- + SWAP 1, 7
- + psrad m1, m7, 2
- + psrad m3, m4, 2
- + paddd m3, m0
- + psrad m0, 2
- + paddd m1, m5
- + psrad m5, 2
- + psubd m0, m4
- + psubd m7, m5
- +
- + SWAP 5, 6
- + psrad m4, m2, 1
- + psrad m6, m5, 1
- + psubd m4, m5
- + paddd m6, m2
- +
- + mova m2, %1
- + mova m5, %2
- + SUMSUB_BA d, 5, 2
- + SUMSUB_BA d, 6, 5
- + SUMSUB_BA d, 4, 2
- + SUMSUB_BA d, 7, 6
- + SUMSUB_BA d, 0, 4
- + SUMSUB_BA d, 3, 2
- + SUMSUB_BA d, 1, 5
- + SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
- +%endmacro
- +
- +%macro IDCT8_1D_FULL 1
- + mova m7, [%1+112*2]
- + mova m6, [%1+ 96*2]
- + mova m5, [%1+ 80*2]
- + mova m3, [%1+ 48*2]
- + mova m2, [%1+ 32*2]
- + mova m1, [%1+ 16*2]
- + IDCT8_1D [%1], [%1+ 64*2]
- +%endmacro
- +
- +; %1=int16_t *block, %2=int16_t *dstblock
- +%macro IDCT8_ADD_SSE_START 2
- +%ifdef ARCH_X86_64
- + mova m7, [%1+112*2]
- + mova m6, [%1+ 96*2]
- + mova m5, [%1+ 80*2]
- + mova m3, [%1+ 48*2]
- + mova m2, [%1+ 32*2]
- + mova m1, [%1+ 16*2]
- + IDCT8_1D [%1], [%1+ 64*2]
- + TRANSPOSE4x4D 0,1,2,3,8
- + mova [%2 ], m0
- + TRANSPOSE4x4D 4,5,6,7,8
- + mova [%2+8*2], m4
- +%else
- + IDCT8_1D_FULL %1
- + mova [%1], m7
- + TRANSPOSE4x4D 0,1,2,3,7
- + mova m7, [%1]
- + mova [%2 ], m0
- + mova [%2+16*2], m1
- + mova [%2+32*2], m2
- + mova [%2+48*2], m3
- + TRANSPOSE4x4D 4,5,6,7,3
- + mova [%2+ 8*2], m4
- + mova [%2+24*2], m5
- + mova [%2+40*2], m6
- + mova [%2+56*2], m7
- +%endif
- +%endmacro
- +
- +; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
- +%macro IDCT8_ADD_SSE_END 3
- +%ifdef ARCH_X86_64
- + IDCT8_1D [%2], [%2+ 64*2]
- +
- + pxor m8, m8
- + STORE_DIFFx2 m0, m1, m15, m8, %1, %3
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m2, m3, m15, m8, %1, %3
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m4, m5, m15, m8, %1, %3
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m6, m7, m15, m8, %1, %3
- +%else
- + IDCT8_1D_FULL %2
- + mova [%2 ], m5
- + mova [%2+16*2], m6
- + mova [%2+32*2], m7
- +
- + pxor m7, m7
- + STORE_DIFFx2 m0, m1, m5, m7, %1, %3
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m2, m3, m5, m7, %1, %3
- + mova m0, [%2 ]
- + mova m1, [%2+16*2]
- + mova m2, [%2+32*2]
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m4, m0, m5, m7, %1, %3
- + lea %1, [%1+%3*2]
- + STORE_DIFFx2 m1, m2, m5, m7, %1, %3
- +%endif ; ARCH_X86_64
- +%endmacro
- +
- +%macro IDCT8_ADD 1
- +cglobal h264_idct8_add_10_%1, 3,4,16
- +%ifndef UNIX64
- + %assign pad 16-gprsize-(stack_offset&15)
- + sub rsp, pad
- + call h264_idct8_add1_10_%1
- + add rsp, pad
- + RET
- +%endif
- +
- +ALIGN 16
- +; TODO: does not need to use stack
- +h264_idct8_add1_10_%1:
- +%assign pad 256+16-gprsize
- + sub rsp, pad
- +
- + add dword [r1], 32
- + IDCT8_ADD_SSE_START r1 , rsp
- +%ifdef ARCH_X86_64
- + SWAP 1, 9
- + SWAP 2, 10
- + SWAP 3, 11
- + SWAP 5, 12
- + SWAP 6, 13
- + SWAP 7, 14
- +%endif
- + IDCT8_ADD_SSE_START r1+16, rsp+128
- + lea r3, [r0+8]
- +%ifdef ARCH_X86_64
- + PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,12, 10,13, 11,14, 12,5, 13,6, 14,7
- +%endif
- + IDCT8_ADD_SSE_END r0 , rsp, r2
- +%ifdef ARCH_X86_64
- + SWAP 1, 9
- + SWAP 2, 10
- + SWAP 3, 11
- + SWAP 5, 12
- + SWAP 6, 13
- + SWAP 7, 14
- +%endif
- + IDCT8_ADD_SSE_END r3 , rsp+16, r2
- +
- + add rsp, pad
- + ret
- +%endmacro
- +
- +INIT_XMM
- +IDCT8_ADD sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT8_ADD avx
- +%endif
- +
- +;-----------------------------------------------------------------------------
- +; h264_idct8_add4(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
- +;-----------------------------------------------------------------------------
- +;;;;;;; NO FATE SAMPLES TRIGGER THIS
- +%macro IDCT8_ADD4_OP 3
- + cmp byte [r4+%3], 0
- + jz .skipblock%2
- + mov r0d, dword [r6+%2*4]
- + add r0, r5
- + call h264_idct8_add1_10_%1
- +.skipblock%2:
- +%if %2<12
- + add r1, 256
- +%endif
- +%endmacro
- +
- +%macro IDCT8_ADD4 1
- +cglobal h264_idct8_add4_10_%1, 0,7,16
- + %assign pad 16-gprsize-(stack_offset&15)
- + SUB rsp, pad
- + mov r5, r0mp
- + mov r6, r1mp
- + mov r1, r2mp
- + mov r2d, r3m
- + movifnidn r4, r4mp
- + IDCT8_ADD4_OP %1, 0, 4+1*8
- + IDCT8_ADD4_OP %1, 4, 6+1*8
- + IDCT8_ADD4_OP %1, 8, 4+3*8
- + IDCT8_ADD4_OP %1, 12, 6+3*8
- + ADD rsp, pad
- + RET
- +%endmacro ; IDCT8_ADD4
- +
- +INIT_XMM
- +IDCT8_ADD4 sse2
- +%ifdef HAVE_AVX
- +INIT_AVX
- +IDCT8_ADD4 avx
- +%endif
- diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
- index 1c07d14..d60fbd5 100644
- --- a/libavcodec/x86/h264dsp_mmx.c
- +++ b/libavcodec/x86/h264dsp_mmx.c
- @@ -27,6 +27,43 @@ DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
- /***********************************/
- /* IDCT */
- +#define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
- +void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT (uint8_t *dst, int16_t *block, int stride);
- +
- +IDCT_ADD_FUNC(, 10, sse2)
- +IDCT_ADD_FUNC(_dc, 10, mmx2)
- +IDCT_ADD_FUNC(8_dc, 10, sse2)
- +IDCT_ADD_FUNC(8, 10, sse2)
- +#if HAVE_AVX
- +IDCT_ADD_FUNC(, 10, avx)
- +IDCT_ADD_FUNC(8_dc, 10, avx)
- +IDCT_ADD_FUNC(8, 10, avx)
- +#endif
- +
- +
- +#define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
- +void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
- + (uint8_t *dst, const int *block_offset, \
- + DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- +
- +IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
- +IDCT_ADD_REP_FUNC(8, 4, 10, avx)
- +IDCT_ADD_REP_FUNC(, 16, 10, sse2)
- +IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
- +#if HAVE_AVX
- +IDCT_ADD_REP_FUNC(, 16, 10, avx)
- +IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
- +#endif
- +
- +
- +#define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
- +void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
- + (uint8_t **dst, const int *block_offset, \
- + DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- +IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
- +#if HAVE_AVX
- +IDCT_ADD_REP_FUNC2(, 8, 10, avx)
- +#endif
- void ff_h264_idct_add_mmx (uint8_t *dst, int16_t *block, int stride);
- void ff_h264_idct8_add_mmx (uint8_t *dst, int16_t *block, int stride);
- @@ -418,7 +455,17 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
- #endif
- + c->h264_idct_dc_add= ff_h264_idct_dc_add_10_mmx2;
- if (mm_flags&AV_CPU_FLAG_SSE2) {
- + c->h264_idct_add = ff_h264_idct_add_10_sse2;
- + c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
- + c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
- +
- + c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
- + c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
- + c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
- + c->h264_idct_add16intra= ff_h264_idct_add16intra_10_sse2;
- +
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_sse2;
- c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_sse2;
- #if HAVE_ALIGNED_STACK
- @@ -428,7 +475,18 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
- #endif
- }
- +#if HAVE_AVX
- if (mm_flags&AV_CPU_FLAG_AVX) {
- + c->h264_idct_dc_add =
- + c->h264_idct_add = ff_h264_idct_add_10_avx;
- + c->h264_idct8_add = ff_h264_idct8_add_10_avx;
- + c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
- +
- + c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
- + c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx;
- + c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
- + c->h264_idct_add16intra= ff_h264_idct_add16intra_10_avx;
- +
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_10_avx;
- c->h264_v_loop_filter_chroma_intra= ff_deblock_v_chroma_intra_10_avx;
- #if HAVE_ALIGNED_STACK
- @@ -438,6 +496,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
- #endif
- }
- +#endif /* HAVE_AVX */
- }
- }
- #endif
- --
- 1.7.5.1
- From 35be6d85908338a67986d56520ffbc339ae35e80 Mon Sep 17 00:00:00 2001
- From: Daniel Kang <daniel.d.kang@gmail.com>
- Date: Tue, 24 May 2011 15:15:08 -0400
- Subject: [PATCH 2/2] Update 8-bit H.264 IDCT function names to reflect
- bit-depth.
- ---
- libavcodec/h264dsp.h | 1 -
- libavcodec/x86/h264_idct.asm | 38 +++++++++---------
- libavcodec/x86/h264dsp_mmx.c | 90 ++++++++++++++++++------------------------
- 3 files changed, 57 insertions(+), 72 deletions(-)
- diff --git a/libavcodec/h264dsp.h b/libavcodec/h264dsp.h
- index 87a1dd9..864c118 100644
- --- a/libavcodec/h264dsp.h
- +++ b/libavcodec/h264dsp.h
- @@ -66,7 +66,6 @@ typedef struct H264DSPContext{
- void (*h264_idct_dc_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
- void (*h264_idct8_dc_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
- - void (*h264_dct)(DCTELEM block[4][4]);
- void (*h264_idct_add16)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
- void (*h264_idct8_add4)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
- void (*h264_idct_add8)(uint8_t **dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
- diff --git a/libavcodec/x86/h264_idct.asm b/libavcodec/x86/h264_idct.asm
- index ae70a30..f90f41c 100644
- --- a/libavcodec/x86/h264_idct.asm
- +++ b/libavcodec/x86/h264_idct.asm
- @@ -73,7 +73,7 @@ SECTION .text
- INIT_MMX
- ; ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
- -cglobal h264_idct_add_mmx, 3, 3, 0
- +cglobal h264_idct_add_8_mmx, 3, 3, 0
- IDCT4_ADD r0, r1, r2
- RET
- @@ -125,7 +125,7 @@ cglobal h264_idct_add_mmx, 3, 3, 0
- SUMSUB_BA w, 0, 4
- SUMSUB_BA w, 3, 2
- SUMSUB_BA w, 1, 5
- - SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
- + SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
- %endmacro
- %macro IDCT8_1D_FULL 1
- @@ -177,7 +177,7 @@ cglobal h264_idct_add_mmx, 3, 3, 0
- INIT_MMX
- ; ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
- -cglobal h264_idct8_add_mmx, 3, 4, 0
- +cglobal h264_idct8_add_8_mmx, 3, 4, 0
- %assign pad 128+4-(stack_offset&7)
- SUB rsp, pad
- @@ -237,7 +237,7 @@ cglobal h264_idct8_add_mmx, 3, 4, 0
- INIT_XMM
- ; ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
- -cglobal h264_idct8_add_sse2, 3, 4, 10
- +cglobal h264_idct8_add_8_sse2, 3, 4, 10
- IDCT8_ADD_SSE r0, r1, r2, r3
- RET
- @@ -261,7 +261,7 @@ cglobal h264_idct8_add_sse2, 3, 4, 10
- packuswb m1, m1
- %endmacro
- -%macro DC_ADD_MMX2_OP 3-4
- +%macro DC_ADD_MMX2_OP 4
- %1 m2, [%2 ]
- %1 m3, [%2+%3 ]
- %1 m4, [%2+%3*2]
- @@ -282,13 +282,13 @@ cglobal h264_idct8_add_sse2, 3, 4, 10
- INIT_MMX
- ; ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
- -cglobal h264_idct_dc_add_mmx2, 3, 3, 0
- +cglobal h264_idct_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP movh, r0, r2, r1
- RET
- ; ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
- -cglobal h264_idct8_dc_add_mmx2, 3, 3, 0
- +cglobal h264_idct8_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP mova, r0, r2, r1
- lea r0, [r0+r2*4]
- @@ -297,7 +297,7 @@ cglobal h264_idct8_dc_add_mmx2, 3, 3, 0
- ; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add16_mmx, 5, 7, 0
- +cglobal h264_idct_add16_8_mmx, 5, 7, 0
- xor r5, r5
- %ifdef PIC
- lea r11, [scan8_mem]
- @@ -319,7 +319,7 @@ cglobal h264_idct_add16_mmx, 5, 7, 0
- ; ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct8_add4_mmx, 5, 7, 0
- +cglobal h264_idct8_add4_8_mmx, 5, 7, 0
- %assign pad 128+4-(stack_offset&7)
- SUB rsp, pad
- @@ -351,7 +351,7 @@ cglobal h264_idct8_add4_mmx, 5, 7, 0
- ; ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add16_mmx2, 5, 7, 0
- +cglobal h264_idct_add16_8_mmx2, 5, 7, 0
- xor r5, r5
- %ifdef PIC
- lea r11, [scan8_mem]
- @@ -398,7 +398,7 @@ cglobal h264_idct_add16_mmx2, 5, 7, 0
- ; ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add16intra_mmx, 5, 7, 0
- +cglobal h264_idct_add16intra_8_mmx, 5, 7, 0
- xor r5, r5
- %ifdef PIC
- lea r11, [scan8_mem]
- @@ -421,7 +421,7 @@ cglobal h264_idct_add16intra_mmx, 5, 7, 0
- ; ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add16intra_mmx2, 5, 7, 0
- +cglobal h264_idct_add16intra_8_mmx2, 5, 7, 0
- xor r5, r5
- %ifdef PIC
- lea r11, [scan8_mem]
- @@ -466,7 +466,7 @@ cglobal h264_idct_add16intra_mmx2, 5, 7, 0
- ; ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct8_add4_mmx2, 5, 7, 0
- +cglobal h264_idct8_add4_8_mmx2, 5, 7, 0
- %assign pad 128+4-(stack_offset&7)
- SUB rsp, pad
- @@ -529,7 +529,7 @@ cglobal h264_idct8_add4_mmx2, 5, 7, 0
- INIT_XMM
- ; ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct8_add4_sse2, 5, 7, 10
- +cglobal h264_idct8_add4_8_sse2, 5, 7, 10
- xor r5, r5
- %ifdef PIC
- lea r11, [scan8_mem]
- @@ -607,7 +607,7 @@ h264_idct_add8_mmx_plane:
- ; ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add8_mmx, 5, 7, 0
- +cglobal h264_idct_add8_8_mmx, 5, 7, 0
- mov r5, 16
- add r2, 512
- %ifdef PIC
- @@ -668,7 +668,7 @@ h264_idct_add8_mmx2_plane
- ; ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add8_mmx2, 5, 7, 0
- +cglobal h264_idct_add8_8_mmx2, 5, 7, 0
- mov r5, 16
- add r2, 512
- %ifdef ARCH_X86_64
- @@ -744,7 +744,7 @@ x264_add8x4_idct_sse2:
- ; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add16_sse2, 5, 5, 8
- +cglobal h264_idct_add16_8_sse2, 5, 5, 8
- %ifdef ARCH_X86_64
- mov r10, r0
- %endif
- @@ -791,7 +791,7 @@ cglobal h264_idct_add16_sse2, 5, 5, 8
- ; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add16intra_sse2, 5, 7, 8
- +cglobal h264_idct_add16intra_8_sse2, 5, 7, 8
- %ifdef ARCH_X86_64
- mov r10, r0
- %endif
- @@ -840,7 +840,7 @@ cglobal h264_idct_add16intra_sse2, 5, 7, 8
- ; ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset,
- ; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
- -cglobal h264_idct_add8_sse2, 5, 7, 8
- +cglobal h264_idct_add8_8_sse2, 5, 7, 8
- add r2, 512
- %ifdef ARCH_X86_64
- mov r10, r0
- diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
- index d60fbd5..1a31e41 100644
- --- a/libavcodec/x86/h264dsp_mmx.c
- +++ b/libavcodec/x86/h264dsp_mmx.c
- @@ -30,9 +30,14 @@ DECLARE_ALIGNED(8, static const uint64_t, ff_pb_3_1 ) = 0x0103010301030103ULL;
- #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
- void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT (uint8_t *dst, int16_t *block, int stride);
- +IDCT_ADD_FUNC(, 8, mmx)
- IDCT_ADD_FUNC(, 10, sse2)
- +IDCT_ADD_FUNC(_dc, 8, mmx2)
- IDCT_ADD_FUNC(_dc, 10, mmx2)
- +IDCT_ADD_FUNC(8_dc, 8, mmx2)
- IDCT_ADD_FUNC(8_dc, 10, sse2)
- +IDCT_ADD_FUNC(8, 8, mmx)
- +IDCT_ADD_FUNC(8, 8, sse2)
- IDCT_ADD_FUNC(8, 10, sse2)
- #if HAVE_AVX
- IDCT_ADD_FUNC(, 10, avx)
- @@ -46,9 +51,18 @@ void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
- (uint8_t *dst, const int *block_offset, \
- DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- +IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
- +IDCT_ADD_REP_FUNC(8, 4, 8, mmx2)
- +IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
- IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
- IDCT_ADD_REP_FUNC(8, 4, 10, avx)
- +IDCT_ADD_REP_FUNC(, 16, 8, mmx)
- +IDCT_ADD_REP_FUNC(, 16, 8, mmx2)
- +IDCT_ADD_REP_FUNC(, 16, 8, sse2)
- IDCT_ADD_REP_FUNC(, 16, 10, sse2)
- +IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
- +IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
- +IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
- IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
- #if HAVE_AVX
- IDCT_ADD_REP_FUNC(, 16, 10, avx)
- @@ -60,42 +74,14 @@ IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
- void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
- (uint8_t **dst, const int *block_offset, \
- DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- +IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
- +IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
- +IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
- IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
- #if HAVE_AVX
- IDCT_ADD_REP_FUNC2(, 8, 10, avx)
- #endif
- -void ff_h264_idct_add_mmx (uint8_t *dst, int16_t *block, int stride);
- -void ff_h264_idct8_add_mmx (uint8_t *dst, int16_t *block, int stride);
- -void ff_h264_idct8_add_sse2 (uint8_t *dst, int16_t *block, int stride);
- -void ff_h264_idct_dc_add_mmx2 (uint8_t *dst, int16_t *block, int stride);
- -void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride);
- -
- -void ff_h264_idct_add16_mmx (uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct8_add4_mmx (uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add16_mmx2 (uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add16intra_mmx (uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct8_add4_mmx2 (uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct8_add4_sse2 (uint8_t *dst, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add8_mmx (uint8_t **dest, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add8_mmx2 (uint8_t **dest, const int *block_offset,
- - DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
- -
- -void ff_h264_idct_add16_sse2 (uint8_t *dst, const int *block_offset, DCTELEM *block,
- - int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset, DCTELEM *block,
- - int stride, const uint8_t nnzc[6*8]);
- -void ff_h264_idct_add8_sse2 (uint8_t **dest, const int *block_offset, DCTELEM *block,
- - int stride, const uint8_t nnzc[6*8]);
- void ff_h264_luma_dc_dequant_idct_mmx (DCTELEM *output, DCTELEM *input, int qmul);
- void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
- @@ -350,24 +336,24 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
- }
- #if HAVE_YASM
- if (mm_flags & AV_CPU_FLAG_MMX) {
- - c->h264_idct_dc_add=
- - c->h264_idct_add= ff_h264_idct_add_mmx;
- - c->h264_idct8_dc_add=
- - c->h264_idct8_add= ff_h264_idct8_add_mmx;
- -
- - c->h264_idct_add16 = ff_h264_idct_add16_mmx;
- - c->h264_idct8_add4 = ff_h264_idct8_add4_mmx;
- - c->h264_idct_add8 = ff_h264_idct_add8_mmx;
- - c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
- + c->h264_idct_dc_add =
- + c->h264_idct_add = ff_h264_idct_add_8_mmx;
- + c->h264_idct8_dc_add =
- + c->h264_idct8_add = ff_h264_idct8_add_8_mmx;
- +
- + c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
- + c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
- + c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
- + c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
- c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_mmx;
- if (mm_flags & AV_CPU_FLAG_MMX2) {
- - c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
- - c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
- - c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
- - c->h264_idct8_add4 = ff_h264_idct8_add4_mmx2;
- - c->h264_idct_add8 = ff_h264_idct_add8_mmx2;
- - c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
- + c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmx2;
- + c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmx2;
- + c->h264_idct_add16 = ff_h264_idct_add16_8_mmx2;
- + c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx2;
- + c->h264_idct_add8 = ff_h264_idct_add8_8_mmx2;
- + c->h264_idct_add16intra= ff_h264_idct_add16intra_8_mmx2;
- c->h264_v_loop_filter_chroma= ff_deblock_v_chroma_8_mmxext;
- c->h264_h_loop_filter_chroma= ff_deblock_h_chroma_8_mmxext;
- @@ -398,8 +384,12 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
- c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
- if (mm_flags&AV_CPU_FLAG_SSE2) {
- - c->h264_idct8_add = ff_h264_idct8_add_sse2;
- - c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
- + c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
- +
- + c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
- + c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
- + c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
- + c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
- c->h264_luma_dc_dequant_idct= ff_h264_luma_dc_dequant_idct_sse2;
- c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_sse2;
- @@ -420,10 +410,6 @@ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth)
- c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
- c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
- #endif
- -
- - c->h264_idct_add16 = ff_h264_idct_add16_sse2;
- - c->h264_idct_add8 = ff_h264_idct_add8_sse2;
- - c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
- }
- if (mm_flags&AV_CPU_FLAG_SSSE3) {
- c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
- --
- 1.7.5.1
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement