From 9ed2267520aeadf1641972706a6e103e1d0e7147 Mon Sep 17 00:00:00 2001
From: Halvor Kielland-Gyrud <h.kiella@gmail.com>
Date: Thu, 12 May 2016 16:11:37 +0200
Subject: [PATCH] WIP optimize Prores encoder with AVX2

---
 libavcodec/fdctdsp.c            |   2 +-
 libavcodec/proresenc_anatoliy.c | 307 +++++++++++++++++++--
 libavcodec/utils.c              |   5 +-
 libavcodec/x86/Makefile         |   1 +
 libavcodec/x86/fdct10.c         | 581 ++++++++++++++++++++++++++++++++++++++++
 libavcodec/x86/fdct10.h         |  26 ++
 libavcodec/x86/fdctdsp_init.c   |   5 +
 7 files changed, 902 insertions(+), 25 deletions(-)
 create mode 100644 libavcodec/x86/fdct10.c
 create mode 100644 libavcodec/x86/fdct10.h

diff --git a/libavcodec/fdctdsp.c b/libavcodec/fdctdsp.c
index b9c2c86..cb984e2 100644
--- a/libavcodec/fdctdsp.c
+++ b/libavcodec/fdctdsp.c
@@ -46,5 +46,5 @@ av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
     if (ARCH_PPC)
         ff_fdctdsp_init_ppc(c, avctx, high_bit_depth);
     if (ARCH_X86)
-        ff_fdctdsp_init_x86(c, avctx, high_bit_depth);
+      ff_fdctdsp_init_x86(c, avctx, high_bit_depth);
 }
diff --git a/libavcodec/proresenc_anatoliy.c b/libavcodec/proresenc_anatoliy.c
index 0516066..da017cc 100644
--- a/libavcodec/proresenc_anatoliy.c
+++ b/libavcodec/proresenc_anatoliy.c
@@ -33,6 +33,11 @@
 #include "bytestream.h"
 #include "fdctdsp.h"
 
+#include <immintrin.h>
+#include <malloc.h>
+#include "libavutil/cpu.h"
+#include "libavutil/x86/cpu.h"
+
 #define DEFAULT_SLICE_MB_WIDTH 8
 
 #define FF_PROFILE_PRORES_PROXY     0
@@ -152,8 +157,12 @@ typedef struct {
 
     int qmat_luma[16][64];
     int qmat_chroma[16][64];
+  
 } ProresContext;
 
+static void (*encode_ac_coeffs)(struct AVCodecContext*, struct PutBitContext*,
+			     int16_t*, int, int*);
+
 static void encode_codeword(PutBitContext *pb, int val, int codebook)
 {
     unsigned int rice_order, exp_order, switch_bits, first_exp, exp, zeros;
@@ -182,10 +191,56 @@ static void encode_codeword(PutBitContext *pb, int val, int codebook)
     }
 }
 
+
+#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
+static void encode_codeword_avx2(PutBitContext *pb, int *vals1, int *vals2, int *vals3, unsigned int *rice_orders,
+				 unsigned int *exp_orders, unsigned int *switch_bits, unsigned int *first_exps)
+{
+    unsigned int exp, zeros;
+    int i;
+    
+    for (i = 0; i < 8; i++) {
+      if (vals1[i] >= first_exps[0 + i]) {  //exp golomb 
+        vals1[i] -= first_exps[0 + i];
+        vals1[i] += (1 << exp_orders[0 + i]);
+        exp = av_log2(vals1[i]);
+        zeros = exp - exp_orders[0 + i] + switch_bits[0 + i] + 1;
+        put_bits(pb, zeros, 0);
+        put_bits(pb, exp + 1, vals1[i]);
+      } else if (rice_orders[0 + i]) {
+        put_bits(pb, (vals1[i] >> rice_orders[0 + i]), 0);
+        put_bits(pb, 1, 1);
+        put_sbits(pb, rice_orders[0 + i], vals1[i]);
+      } else {
+        put_bits(pb, vals1[i], 0);
+        put_bits(pb, 1, 1);
+      }
+      
+      if (vals2[i] >= first_exps[8 + i]) {  //exp golomb 
+	vals2[i] -= first_exps[8 + i];
+        vals2[i] += (1 << exp_orders[8 + i]);
+        exp = av_log2(vals2[i]);
+        zeros = exp - exp_orders[8 + i] + switch_bits[8 + i] + 1;
+        put_bits(pb, zeros, 0);
+        put_bits(pb, exp + 1, vals2[i]);
+      } else if (rice_orders[8 + i]) {
+        put_bits(pb, (vals2[i] >> rice_orders[8 + i]), 0);
+        put_bits(pb, 1, 1);
+        put_sbits(pb, rice_orders[8 + i], vals2[i]);
+      } else {
+        put_bits(pb, vals2[i], 0);
+        put_bits(pb, 1, 1);
+      }
+
+      put_bits(pb, 1, IS_NEGATIVE(vals3[i]));
+    
+    }
+}
+
+
 #define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
 #define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
 #define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
-#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
 #define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
 
 static av_always_inline int get_level(int val)
@@ -229,35 +284,225 @@ static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29,
 static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28,
         0x28, 0x28, 0x28, 0x4C };
 
-static void encode_ac_coeffs(AVCodecContext *avctx, PutBitContext *pb,
-        int16_t *in, int blocks_per_slice, int *qmat)
+#ifdef HAVE_AVX2
+static void encode_ac_coeffs_avx2(AVCodecContext *avctx, PutBitContext *pb,
+				  int16_t *in, int blocks_per_slice, int *qmat)
 {
     int prev_run = 4;
-    int prev_level = 2;
-
-    int run = 0, level, code, i, j;
+    int prev_level = 2; 
+    int run = 0,  i, j, k;
+
+    __m256i indp_vec, indexes_vec, indexes_shift;
+    __m256i temp_vec1, temp_vec2, temp_vec3;
+    __m256 factors, vals_vec;
+
+    int num_vals = 0;
+    float qmat_val;
+
+    __m256i offsets = _mm256_set_epi32(7, 6, 5, 4, 3, 2, 1, 0);
+    __m256i one_vec = _mm256_set1_epi32(1);
+    __m256i three_vec = _mm256_set1_epi32(3);
+    __m256i seven_vec = _mm256_set1_epi32(7);
+    __m256i nine_vec = _mm256_set1_epi32(9);
+    __m256i fifteen_vec = _mm256_set1_epi32(15);
+
+    int32_t temp[8]__attribute__((aligned(32)));
+    uint32_t rice_orders[16]__attribute__((aligned(32)));
+    uint32_t exp_orders[16]__attribute__((aligned(32)));
+    uint32_t switch_bits[16]__attribute__((aligned(32)));
+    uint32_t first_exps[16]__attribute__((aligned(32)));
+    
+    int32_t vals[64*16*8]__attribute__((aligned(32))); //FIXME array sizes needs tuning
+    int32_t run_vals[64*16*8]__attribute__((aligned(32))); 
+    int32_t level_vals[64*16*8]__attribute__((aligned(32))); 
+    int32_t prev_run_vals[64*16*8]__attribute__((aligned(32))); 
+    int32_t prev_level_vals[64*16*8]__attribute__((aligned(32)));
+   
+    
     for (i = 1; i < 64; i++) {
         int indp = progressive_scan[i];
-        for (j = 0; j < blocks_per_slice; j++) {
-            int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
-            if (val) {
-                encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
 
-                prev_run   = run;
-                run        = 0;
-                level      = get_level(val);
-                code       = level - 1;
-
-                encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
+	indp_vec = _mm256_set1_epi32(indp);
+	
+	qmat_val = 1.f/qmat[indp];
+	factors = _mm256_set1_ps(qmat_val);
+
+	for (j = 0; j < blocks_per_slice-blocks_per_slice%8; j += 8) {	
+	  indexes_vec = _mm256_set1_epi32(j);
+	  indexes_vec = _mm256_add_epi32(indexes_vec, offsets);
+	  
+	  indexes_shift = _mm256_slli_epi32(indexes_vec, 6);
+	  indexes_vec = _mm256_add_epi32(indexes_shift, indp_vec);
+	  _mm256_store_si256((__m256i*)temp, indexes_vec);
+
+	  vals_vec = _mm256_set_ps(in[temp[7]], in[temp[6]], in[temp[5]], in[temp[4]],
+				   in[temp[3]], in[temp[2]], in[temp[1]], in[temp[0]]);
+	  
+	  vals_vec = _mm256_mul_ps(vals_vec, factors);
+	  vals_vec = _mm256_round_ps(vals_vec, ((_MM_FROUND_TO_ZERO |_MM_FROUND_NO_EXC)));
+
+	  temp_vec1 = _mm256_cvtps_epi32(vals_vec);
+
+	  if (_mm256_testz_si256(temp_vec1, temp_vec1)) {
+	    run += 8;
+
+	  } else {
+	    _mm256_store_si256((__m256i*)temp, temp_vec1);
+	    
+	    for (k = 0; k < 8; k++) {
+	      if (temp[k]) {
+		vals[num_vals] = temp[k];
+		run_vals[num_vals] = run;
+		prev_run_vals[num_vals] = prev_run;
+		
+		prev_run = run;
+		run = 0;
+		
+		level_vals[num_vals] = get_level(vals[num_vals]);
+		
+		prev_level_vals[num_vals] = prev_level;
+		prev_level = level_vals[num_vals++];
+		
+	      }else {
+		++run;
+	      }	      
+	    }
+	  }   
+	}
+
+	// Handle case where blocks_per_slice not divisible by 8
+	for (; j < blocks_per_slice; j++) {
+	  int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
+	  if (val) {
+	    vals[num_vals] = val;
+	    run_vals[num_vals] = run;
+	    prev_run_vals[num_vals] = prev_run;
+	    
+	    prev_run = run;
+	    run = 0;
+		
+	    level_vals[num_vals] = get_level(vals[num_vals]);
+	    
+	    prev_level_vals[num_vals] = prev_level;
+	    prev_level = level_vals[num_vals++];
+
+	  } else {
+	    ++run;
+	  }
+	}
+    }
 
-                prev_level = level;
+    for (k = 0; k < num_vals-num_vals%8; k += 8) {
+      
+      // First encode_codeword       
+      // prev_run_vals
+      temp_vec1 = _mm256_load_si256((__m256i*)&prev_run_vals[k]);
+      temp_vec2 = _mm256_min_epi32(temp_vec1, fifteen_vec);
+      _mm256_store_si256((__m256i*)temp, temp_vec2);
+
+      // codebook vals
+      temp_vec1 = _mm256_set_epi32(run_to_cb[temp[7]], run_to_cb[temp[6]],
+				   run_to_cb[temp[5]], run_to_cb[temp[4]],
+				   run_to_cb[temp[3]], run_to_cb[temp[2]],
+				   run_to_cb[temp[1]], run_to_cb[temp[0]]);
+      // exp_orders
+      temp_vec2 = _mm256_srli_epi32(temp_vec1, 2);
+      temp_vec3 = _mm256_and_si256(temp_vec2, seven_vec);
+      _mm256_store_si256((__m256i*)&exp_orders[0], temp_vec3);
+
+      // switch bits
+      temp_vec2 = _mm256_and_si256(temp_vec1, three_vec);
+      _mm256_store_si256((__m256i*)&switch_bits[0], temp_vec2);
+
+      // rice_orders
+      temp_vec3 = _mm256_srli_epi32(temp_vec1, 5);
+      _mm256_store_si256((__m256i*)&rice_orders[0], temp_vec3);
+
+      // first_exps
+      temp_vec1 = _mm256_add_epi32(temp_vec2, one_vec);
+      temp_vec2 = _mm256_sllv_epi32(temp_vec1, temp_vec3);
+      _mm256_store_si256((__m256i*)&first_exps[0], temp_vec2);	  
+	 
+
+      // Second encode_codeword 
+      // vals (levels-1)	  
+      temp_vec1 = _mm256_load_si256((__m256i*)&level_vals[k]);
+      temp_vec2 = _mm256_sub_epi32(temp_vec1, one_vec);
+      _mm256_store_si256((__m256i*)&level_vals[k], temp_vec2);
+
+	  
+      // prev_level_vals
+      temp_vec1 = _mm256_load_si256((__m256i*)&prev_level_vals[k]);
+      temp_vec2 = _mm256_min_epi32(temp_vec1, nine_vec);
+      _mm256_store_si256((__m256i*)temp, temp_vec2);
+
+      // codebook vals
+      temp_vec1 = _mm256_set_epi32(lev_to_cb[temp[7]], lev_to_cb[temp[6]],
+				   lev_to_cb[temp[5]], lev_to_cb[temp[4]],
+				   lev_to_cb[temp[3]], lev_to_cb[temp[2]],
+				   lev_to_cb[temp[1]], lev_to_cb[temp[0]]);
+      // exp_orders
+      temp_vec2 = _mm256_srli_epi32(temp_vec1, 2);
+      temp_vec3 = _mm256_and_si256(temp_vec2, seven_vec);
+      _mm256_store_si256((__m256i*)&exp_orders[8], temp_vec3);
+
+      // switch bits
+      temp_vec2 = _mm256_and_si256(temp_vec1, three_vec);
+      _mm256_store_si256((__m256i*)&switch_bits[8], temp_vec2);
+
+      // rice_orders
+      temp_vec3 = _mm256_srli_epi32(temp_vec1, 5);
+      _mm256_store_si256((__m256i*)&rice_orders[8], temp_vec3);
+
+      // first_exps
+      temp_vec1 = _mm256_add_epi32(temp_vec2, one_vec);
+      temp_vec2 = _mm256_sllv_epi32(temp_vec1, temp_vec3);
+      _mm256_store_si256((__m256i*)&first_exps[8], temp_vec2);	  
+
+      encode_codeword_avx2(pb, &run_vals[k], &level_vals[k], &vals[k], rice_orders,
+			   exp_orders, switch_bits, first_exps);
+    }
+    
+    // Handle case where num_vals not divisible by 8
+    for (; k < num_vals; k++) {
+      encode_codeword(pb, run_vals[k], run_to_cb[FFMIN(prev_run_vals[k], 15)]);
+      encode_codeword(pb, level_vals[k]-1, lev_to_cb[FFMIN(prev_level_vals[k], 9)]);
+      put_bits(pb, 1, IS_NEGATIVE(vals[k]));
+    }
+}
+#endif
 
-                put_bits(pb, 1, IS_NEGATIVE(val));
-            } else {
-                ++run;
-            }
-        }
+static void encode_ac_coeffs_std(AVCodecContext *avctx, PutBitContext *pb,
+        int16_t *in, int blocks_per_slice, int *qmat)
+{
+  int prev_run = 4;
+  int prev_level = 2; 
+  int run = 0, level, code, i, j;
+
+  for (i = 1; i < 64; i++) {
+    int indp = progressive_scan[i];
+
+    for (j = 0; j < blocks_per_slice; j++) {
+      int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
+
+      if (val) {
+	encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
+
+	prev_run   = run;
+	run        = 0;
+	level      = get_level(val);
+	code       = level - 1;
+	
+	encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
+	
+	prev_level = level;
+	
+	put_bits(pb, 1, IS_NEGATIVE(val));
+      } else {
+	++run;
+      }
     }
+  }
 }
 
 static void get(uint8_t *pixels, int stride, int16_t* block)
@@ -387,6 +632,14 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
     dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << 4);
     dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << 4);
 
+    /*
+    if(mb_y == 67) {
+      printf("dest_y offset: %d\n", (mb_y << 4) * luma_stride   + (mb_x << 5));
+      printf("dest_u offset: %d\n", (mb_y << 4) * chroma_stride   + (mb_x << 4));
+      printf("dest_v offset: %d\n", (mb_y << 4) * chroma_stride   + (mb_x << 4));
+    }
+    */
+    
     if (unsafe) {
 
         subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
@@ -493,7 +746,8 @@ static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
     int pic_size, ret;
     int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + AV_INPUT_BUFFER_MIN_SIZE; //FIXME choose tighter limit
 
-
+    //printf("Frame size: %d\n", frame_size);
+    //printf("AVFrame: %d\n", (avctx->height*(4*pict->linesize[0]+4*pict->linesize[1]+4*pict->linesize[2])));
     if ((ret = ff_alloc_packet2(avctx, pkt, frame_size + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
         return ret;
 
@@ -525,6 +779,8 @@ static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
     pkt->size = pic_size + 8 + header_size;
     *got_packet = 1;
 
+    //printf("Packet size: %d\n", pkt->size);
+
     return 0;
 }
 
@@ -539,6 +795,7 @@ static av_cold int prores_encode_init(AVCodecContext *avctx)
 {
     int i;
     ProresContext* ctx = avctx->priv_data;
+    int cpu_flags = av_get_cpu_flags();
 
     if (avctx->pix_fmt != AV_PIX_FMT_YUV422P10) {
         av_log(avctx, AV_LOG_ERROR, "need YUV422P10\n");
@@ -590,6 +847,10 @@ static av_cold int prores_encode_init(AVCodecContext *avctx)
         scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
     }
 
+    encode_ac_coeffs = encode_ac_coeffs_std;
+    if (INLINE_AVX2(cpu_flags))
+    	encode_ac_coeffs = encode_ac_coeffs_avx2;
+
     return 0;
 }
 
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 8652b17..bd4a08d 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -1699,11 +1699,14 @@ int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64
             avpkt->data = avctx->internal->byte_buffer;
             avpkt->size = avctx->internal->byte_buffer_size;
         }
+	else {
+	  printf("If test not true\n");
+	}
     }
 
     if (avpkt->data) {
         AVBufferRef *buf = avpkt->buf;
-
+	
         if (avpkt->size < size) {
             av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
             return AVERROR(EINVAL);
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index 839b5bc..70ca132 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -70,6 +70,7 @@ OBJS-$(CONFIG_VP9_DECODER)             += x86/vp9dsp_init.o            \
                                           x86/vp9dsp_init_12bpp.o      \
                                           x86/vp9dsp_init_16bpp.o
 OBJS-$(CONFIG_WEBP_DECODER)            += x86/vp8dsp_init.o
+OBJS-$(CONFIG_FDCTDSP)		       += x86/fdct10.o	
 
 
 # GCC inline assembly optimizations
diff --git a/libavcodec/x86/fdct10.c b/libavcodec/x86/fdct10.c
new file mode 100644
index 0000000..9406c6d
--- /dev/null
+++ b/libavcodec/x86/fdct10.c
@@ -0,0 +1,581 @@
+// Based on jfdctint_template.c.
+
+// Requires significant cleanup before submitting to ffmpeg.
+
+/**
+ * @file
+ * Independent JPEG Group's slow & accurate dct.
+ */
+
+#include "libavutil/common.h"
+#include <immintrin.h>
+#include "fdct10.h"
+
+#ifndef BIT_DEPTH
+#define BIT_DEPTH 10
+#endif
+
+#define DCTSIZE 8
+#define BITS_IN_JSAMPLE BIT_DEPTH
+#define GLOBAL(x) x
+#define RIGHT_SHIFT(x, n) ((x) >> (n))
+#define MULTIPLY16C16(var,const) ((var)*(const))
+
+#if 1 //def USE_ACCURATE_ROUNDING
+#define DESCALE(x,n)  RIGHT_SHIFT((x) + (1 << ((n) - 1)), n)
+#else
+#define DESCALE(x,n)  RIGHT_SHIFT(x, n)
+#endif
+
+/*
+ * This module is specialized to the case DCTSIZE = 8.
+ */
+
+#if DCTSIZE != 8
+#error  "Sorry, this code only copes with 8x8 DCTs."
+#endif
+
+
+/*
+ * The poop on this scaling stuff is as follows:
+ *
+ * Each 1-D DCT step produces outputs which are a factor of sqrt(N)
+ * larger than the true DCT outputs.  The final outputs are therefore
+ * a factor of N larger than desired; since N=8 this can be cured by
+ * a simple right shift at the end of the algorithm.  The advantage of
+ * this arrangement is that we save two multiplications per 1-D DCT,
+ * because the y0 and y4 outputs need not be divided by sqrt(N).
+ * In the IJG code, this factor of 8 is removed by the quantization step
+ * (in jcdctmgr.c), NOT in this module.
+ *
+ * We have to do addition and subtraction of the integer inputs, which
+ * is no problem, and multiplication by fractional constants, which is
+ * a problem to do in integer arithmetic.  We multiply all the constants
+ * by CONST_SCALE and convert them to integer constants (thus retaining
+ * CONST_BITS bits of precision in the constants).  After doing a
+ * multiplication we have to divide the product by CONST_SCALE, with proper
+ * rounding, to produce the correct output.  This division can be done
+ * cheaply as a right shift of CONST_BITS bits.  We postpone shifting
+ * as long as possible so that partial sums can be added together with
+ * full fractional precision.
+ *
+ * The outputs of the first pass are scaled up by PASS1_BITS bits so that
+ * they are represented to better-than-integral precision.  These outputs
+ * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word
+ * with the recommended scaling.  (For 12-bit sample data, the intermediate
+ * array is int32_t anyway.)
+ *
+ * To avoid overflow of the 32-bit intermediate results in pass 2, we must
+ * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26.  Error analysis
+ * shows that the values given below are the most effective.
+ */
+
+#undef CONST_BITS
+#undef PASS1_BITS
+#undef OUT_SHIFT
+
+#if BITS_IN_JSAMPLE == 8
+#define CONST_BITS  13
+#define PASS1_BITS  4   /* set this to 2 if 16x16 multiplies are faster */
+#define OUT_SHIFT   PASS1_BITS
+#else
+#define CONST_BITS  13
+#define PASS1_BITS  1   /* lose a little precision to avoid overflow */
+#define OUT_SHIFT   (PASS1_BITS + 1)
+#endif
+
+/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
+ * causing a lot of useless floating-point operations at run time.
+ * To get around this we use the following pre-calculated constants.
+ * If you change CONST_BITS you may want to add appropriate values.
+ * (With a reasonable C compiler, you can just rely on the FIX() macro...)
+ */
+
+#if CONST_BITS == 13
+#define FIX_0_298631336  ((int32_t)  2446)      /* FIX(0.298631336) */
+#define FIX_0_390180644  ((int32_t)  3196)      /* FIX(0.390180644) */
+#define FIX_0_541196100  ((int32_t)  4433)      /* FIX(0.541196100) */
+#define FIX_0_765366865  ((int32_t)  6270)      /* FIX(0.765366865) */
+#define FIX_0_899976223  ((int32_t)  7373)      /* FIX(0.899976223) */
+#define FIX_1_175875602  ((int32_t)  9633)      /* FIX(1.175875602) */
+#define FIX_1_501321110  ((int32_t)  12299)     /* FIX(1.501321110) */
+#define FIX_1_847759065  ((int32_t)  15137)     /* FIX(1.847759065) */
+#define FIX_1_961570560  ((int32_t)  16069)     /* FIX(1.961570560) */
+#define FIX_2_053119869  ((int32_t)  16819)     /* FIX(2.053119869) */
+#define FIX_2_562915447  ((int32_t)  20995)     /* FIX(2.562915447) */
+#define FIX_3_072711026  ((int32_t)  25172)     /* FIX(3.072711026) */
+#else
+#define FIX_0_298631336  FIX(0.298631336)
+#define FIX_0_390180644  FIX(0.390180644)
+#define FIX_0_541196100  FIX(0.541196100)
+#define FIX_0_765366865  FIX(0.765366865)
+#define FIX_0_899976223  FIX(0.899976223)
+#define FIX_1_175875602  FIX(1.175875602)
+#define FIX_1_501321110  FIX(1.501321110)
+#define FIX_1_847759065  FIX(1.847759065)
+#define FIX_1_961570560  FIX(1.961570560)
+#define FIX_2_053119869  FIX(2.053119869)
+#define FIX_2_562915447  FIX(2.562915447)
+#define FIX_3_072711026  FIX(3.072711026)
+  #endif
+
+
+/* Multiply an int32_t variable by an int32_t constant to yield an int32_t result.
+ * For 8-bit samples with the recommended scaling, all the variable
+ * and constant values involved are no more than 16 bits wide, so a
+ * 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
+ * For 12-bit samples, a full 32-bit multiplication will be needed.
+ */
+
+#if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2
+#define MULTIPLY(var,const)  MULTIPLY16C16(var,const)
+  #else
+#define MULTIPLY(var,const)  ((var) * (const))
+#endif
+
+
+#ifndef TRANSPOSE8x8
+#define TRANSPOSE8x8
+static inline void _mm256_merge_epi32(const __m256i v0, const __m256i v1, __m256i *vl, __m256i *vh)
+{
+  __m256i va = _mm256_permute4x64_epi64(v0, _MM_SHUFFLE(3, 1, 2, 0));
+  __m256i vb = _mm256_permute4x64_epi64(v1, _MM_SHUFFLE(3, 1, 2, 0));
+  *vl = _mm256_unpacklo_epi32(va, vb);
+  *vh = _mm256_unpackhi_epi32(va, vb);
+}
+
+static inline void _mm256_merge_epi64(const __m256i v0, const __m256i v1, __m256i *vl, __m256i *vh)
+{
+  __m256i va = _mm256_permute4x64_epi64(v0, _MM_SHUFFLE(3, 1, 2, 0));
+  __m256i vb = _mm256_permute4x64_epi64(v1, _MM_SHUFFLE(3, 1, 2, 0));
+  *vl = _mm256_unpacklo_epi64(va, vb);
+  *vh = _mm256_unpackhi_epi64(va, vb);
+}
+
+static inline void _mm256_merge_si128(const __m256i v0, const __m256i v1, __m256i *vl, __m256i *vh)
+{
+  *vl = _mm256_permute2x128_si256(v0, v1, _MM_SHUFFLE(0, 2, 0, 0));
+  *vh = _mm256_permute2x128_si256(v0, v1, _MM_SHUFFLE(0, 3, 0, 1));
+}
+
+/*
+ * Transpose_8_8
+ *
+ * in place transpose of 8 x 8 int array
+ */
+static void Transpose_8_8(__m256i *v0, __m256i *v1, __m256i *v2, __m256i *v3, __m256i *v4, __m256i *v5, __m256i *v6, __m256i *v7)
+{
+  __m256i w0, w1, w2, w3, w4, w5, w6, w7;
+  __m256i x0, x1, x2, x3, x4, x5, x6, x7;
+
+  _mm256_merge_epi32(*v0, *v1, &w0, &w1);
+  _mm256_merge_epi32(*v2, *v3, &w2, &w3);
+  _mm256_merge_epi32(*v4, *v5, &w4, &w5);
+  _mm256_merge_epi32(*v6, *v7, &w6, &w7);
+
+  _mm256_merge_epi64(w0, w2, &x0, &x1);
+  _mm256_merge_epi64(w1, w3, &x2, &x3);
+  _mm256_merge_epi64(w4, w6, &x4, &x5);
+  _mm256_merge_epi64(w5, w7, &x6, &x7);
+
+  _mm256_merge_si128(x0, x4, v0, v1);
+  _mm256_merge_si128(x1, x5, v2, v3);
+  _mm256_merge_si128(x2, x6, v4, v5);
+  _mm256_merge_si128(x3, x7, v6, v7);
+}
+#endif
+
+
+
+static av_always_inline void row_fdct10(int16_t *data)
+{
+  int32_t shift_val;
+
+  __m256i regs07, regs70; 
+  __m256i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
+  __m256i reg07_add, reg16_add, reg25_add, reg34_add, reg07_sub, reg16_sub, reg25_sub, reg34_sub;
+
+
+  
+  /* Pass 1: process rows. */
+  /* Note results are scaled up by sqrt(8) compared to a true DCT; */
+  /* furthermore, we scale the results by 2**PASS1_BITS. */
+
+
+  reg0 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*0]));
+  reg1 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*1]));
+  reg2 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*2]));
+  reg3 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*3]));
+
+  reg4 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*4]));
+  reg5 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*5]));
+  reg6 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*6]));
+  reg7 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*7]));
+
+  Transpose_8_8(&reg0, &reg1, &reg2, &reg3, &reg4, &reg5, &reg6, &reg7);
+
+  /* tmp1 - tmp4 */
+  reg07_add = _mm256_add_epi32(reg0, reg7);
+  reg16_add = _mm256_add_epi32(reg1, reg6);
+  reg25_add = _mm256_add_epi32(reg2, reg5);
+  reg34_add = _mm256_add_epi32(reg3, reg4);
+
+  /* tmp4 - tmp 7*/
+  
+  reg34_sub = _mm256_sub_epi32(reg3, reg4);
+  reg25_sub = _mm256_sub_epi32(reg2, reg5);
+  reg16_sub = _mm256_sub_epi32(reg1, reg6);
+  reg07_sub = _mm256_sub_epi32(reg0, reg7);
+  
+
+
+  /* tmps[0 ... 3] */
+  reg0 = _mm256_add_epi32(reg07_add, reg34_add);
+  reg1 = _mm256_add_epi32(reg16_add, reg25_add);
+  reg2 = _mm256_sub_epi32(reg16_add, reg25_add);
+  reg3 = _mm256_sub_epi32(reg07_add, reg34_add);
+
+  
+  /*** Even values to be descaled ***/
+  /* DESCALE(x, n) (x + (1 << (n - 1))) >> n) */
+
+  /* Descaled values for data[DCTSIZE*0] */
+  reg07_add = _mm256_add_epi32(reg0, reg1);
+
+  //  shift_val = (1 << (OUT_SHIFT -1));
+  regs07 = _mm256_set1_epi32(1 << PASS1_BITS);
+
+  reg5 = _mm256_mullo_epi32(reg07_add, regs07); 
+
+  regs70 = _mm256_set_epi64x(0, 0x0d0c090805040100, 0, 0x0d0c090805040100); // Shuffle mask
+    
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*0], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*0
+  
+  
+  /* Descaled values for data[DCTSIZE*4] */
+  reg07_add = _mm256_sub_epi32(reg0, reg1);
+
+  reg5 = _mm256_mullo_epi32(reg07_add, regs07);
+  
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*4], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*4
+
+
+  
+    /* z1 */
+  reg6 = _mm256_add_epi32(reg2, reg3);
+  reg7 = _mm256_mullo_epi32(reg6, _mm256_set1_epi32(FIX_0_541196100));
+
+  
+  /* Descaled values for data[DCTSIZE*2] */
+  reg16_add = _mm256_mullo_epi32(reg3, _mm256_set1_epi32(FIX_0_765366865)); // temp values
+  reg07_add = _mm256_add_epi32(reg7, reg16_add);
+
+  shift_val = (1 << (CONST_BITS - PASS1_BITS -1));
+  regs07 = _mm256_set1_epi32(shift_val);
+
+  reg4 = _mm256_add_epi32(reg07_add, regs07); // (x + (1 << (n-1))) //
+  reg5 = _mm256_srli_epi32(reg4, CONST_BITS - PASS1_BITS); // (x + (1 << (n-1))) >> n //
+
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*2], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*2
+ 
+  
+  /* Descale value for data[DCTSIZE*6] */  
+  reg16_add = _mm256_mullo_epi32(reg2, _mm256_set1_epi32(- FIX_1_847759065)); // temp values
+  reg07_add = _mm256_add_epi32(reg7, reg16_add);
+
+  reg4 = _mm256_add_epi32(reg07_add, regs07); // (x + (1 << (n-1))) //
+  reg5 = _mm256_srli_epi32(reg4, CONST_BITS - PASS1_BITS); // (x + (1 << (n-1))) >> n //
+
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*6], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*6
+
+
+    /*** Odd values to be descaled ***/
+  /* DESCALE(x, n) (x + (1 << (n - 1))) >> n) */
+
+  /* zs[0 ... 3] (z1 ... z4) */
+  reg07_add = _mm256_add_epi32(reg34_sub, reg07_sub);
+  reg16_add = _mm256_add_epi32(reg25_sub, reg16_sub);
+  reg25_add = _mm256_add_epi32(reg16_sub, reg34_sub);
+  reg34_add = _mm256_add_epi32(reg07_sub, reg25_sub);
+
+  /* tmp4 ... tmp7 multiply */
+  reg4 = _mm256_mullo_epi32(reg34_sub, _mm256_set1_epi32(FIX_0_298631336));
+  reg5 = _mm256_mullo_epi32(reg25_sub, _mm256_set1_epi32(FIX_2_053119869));
+  reg6 = _mm256_mullo_epi32(reg16_sub, _mm256_set1_epi32(FIX_3_072711026));
+  reg7 = _mm256_mullo_epi32(reg07_sub, _mm256_set1_epi32(FIX_1_501321110));
+
+  
+  /* z5 */
+  reg34_sub = _mm256_add_epi32(reg25_add, reg34_add);
+  reg07_sub = _mm256_mullo_epi32(reg34_sub, _mm256_set1_epi32(FIX_1_175875602));
+
+  /* zs[0 ... 3] (z1 ... z4) multiply */
+  reg0 = _mm256_mullo_epi32(reg07_add, _mm256_set1_epi32(- FIX_0_899976223));
+  reg1 = _mm256_mullo_epi32(reg16_add, _mm256_set1_epi32(- FIX_2_562915447));
+  reg2 = _mm256_mullo_epi32(reg25_add, _mm256_set1_epi32(- FIX_1_961570560));
+  reg3 = _mm256_mullo_epi32(reg34_add, _mm256_set1_epi32(- FIX_0_390180644));
+
+  reg2 = _mm256_add_epi32(reg2, reg07_sub);
+  reg3 = _mm256_add_epi32(reg3, reg07_sub);
+
+  
+
+
+  /* Descaled values for data[DCTSIZE*7] */
+  reg34_sub = _mm256_add_epi32(reg4, reg0);   // temp values for data[DCTSIZE*7]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg2);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS - PASS1_BITS); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*7], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*7
+ 
+
+  
+  /* Descaled values for data[DCTSIZE*5] */
+  reg34_sub = _mm256_add_epi32(reg5, reg1);   // temp values for data[DCTSIZE*5]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg3);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS - PASS1_BITS); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*5], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*5
+
+  
+  /* Descaled value for data[DCTSIZE*3] */
+  reg34_sub = _mm256_add_epi32(reg6, reg1);   // temp values for data[DCTSIZE*3]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg2);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS - PASS1_BITS); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*3], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*3
+
+  
+  /* Descaled values for data[DCTSIZE*1] */
+  reg34_sub = _mm256_add_epi32(reg7, reg0);   // temp values for data[DCTSIZE*1]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg3);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS - PASS1_BITS); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*1], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*1 
+}
+
+/*
+ * Perform the forward DCT on one block of samples.
+ */
+
+
+void ff_fdct10_avx2(int16_t *data)
+{
+  int32_t shift_val;
+   
+  __m256i regs07, regs70; 
+  __m256i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
+  __m256i reg07_add, reg16_add, reg25_add, reg34_add, reg07_sub, reg16_sub, reg25_sub, reg34_sub; 
+
+  
+  row_fdct10(data);
+
+
+  /* Pass 2rocess columns.
+   * We remove the PASS1_BITS scaling, but leave the results scaled up
+   * by an overall factor of 8.
+   */
+
+  reg0 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*0]));
+  reg1 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*1]));
+  reg2 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*2]));
+  reg3 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*3]));
+
+  reg4 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*4]));
+  reg5 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*5]));
+  reg6 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*6]));
+  reg7 = _mm256_cvtepi16_epi32(_mm_load_si128((__m128i*)&data[DCTSIZE*7]));
+
+  Transpose_8_8(&reg0, &reg1, &reg2, &reg3, &reg4, &reg5, &reg6, &reg7);
+  
+
+  /* tmp1 - tmp4 */
+  reg07_add = _mm256_add_epi32(reg0, reg7);
+  reg16_add = _mm256_add_epi32(reg1, reg6);
+  reg25_add = _mm256_add_epi32(reg2, reg5);
+  reg34_add = _mm256_add_epi32(reg3, reg4);
+
+  /* tmp4 - tmp 7*/
+  reg34_sub = _mm256_sub_epi32(reg3, reg4);
+  reg25_sub = _mm256_sub_epi32(reg2, reg5);
+  reg16_sub = _mm256_sub_epi32(reg1, reg6);
+  reg07_sub = _mm256_sub_epi32(reg0, reg7);
+
+
+  /* tmps[0 ... 3] */
+  reg0 = _mm256_add_epi32(reg07_add, reg34_add);
+  reg1 = _mm256_add_epi32(reg16_add, reg25_add);
+  reg2 = _mm256_sub_epi32(reg16_add, reg25_add);
+  reg3 = _mm256_sub_epi32(reg07_add, reg34_add);
+
+
+
+  /*** Even values to be descaled ***/
+  /* DESCALE(x, n) (x + (1 << (n - 1))) >> n) */
+
+  /* Descaled values for data[DCTSIZE*0] */
+  reg07_add = _mm256_add_epi32(reg0, reg1);
+
+  shift_val = (1 << (OUT_SHIFT -1));
+  regs07 = _mm256_set1_epi32(shift_val);
+
+  reg4 = _mm256_add_epi32(reg07_add, regs07); // (x + (1 << (n-1)))
+  reg5 = _mm256_srli_epi32(reg4, OUT_SHIFT); // (x + (1 << (n-1))) >> n
+
+  regs70 = _mm256_set_epi64x(0, 0x0d0c090805040100, 0, 0x0d0c090805040100); // Shuffle mask
+    
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*0], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*0
+
+  
+  /* Descaled values for data[DCTSIZE*4] */
+  reg07_add = _mm256_sub_epi32(reg0, reg1);
+    
+  reg4 = _mm256_add_epi32(reg07_add, regs07); // (x + (1 << (n-1)))
+  reg5 = _mm256_srli_epi32(reg4, OUT_SHIFT); // (x + (1 << (n-1))) >> n
+  
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*4], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*4
+  
+
+  
+  /* z1 */
+  reg6 = _mm256_add_epi32(reg2, reg3);
+  reg7 = _mm256_mullo_epi32(reg6, _mm256_set1_epi32(FIX_0_541196100));
+
+  
+  /* Descaled values for data[DCTSIZE*2] */
+  reg16_add = _mm256_mullo_epi32(reg3, _mm256_set1_epi32(FIX_0_765366865)); // temp values
+  reg07_add = _mm256_add_epi32(reg7, reg16_add);
+
+  shift_val = (1 << (CONST_BITS + OUT_SHIFT -1));
+  regs07 = _mm256_set1_epi32(shift_val);
+
+  reg4 = _mm256_add_epi32(reg07_add, regs07); // (x + (1 << (n-1))) //
+  reg5 = _mm256_srli_epi32(reg4, CONST_BITS + OUT_SHIFT); // (x + (1 << (n-1))) >> n //
+
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*2], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*2
+ 
+  
+  /* Descale value for data[DCTSIZE*6] */  
+  reg16_add = _mm256_mullo_epi32(reg2, _mm256_set1_epi32(- FIX_1_847759065)); // temp values
+  reg07_add = _mm256_add_epi32(reg7, reg16_add);
+
+  reg4 = _mm256_add_epi32(reg07_add, regs07); // (x + (1 << (n-1))) //
+  reg5 = _mm256_srli_epi32(reg4, CONST_BITS + OUT_SHIFT); // (x + (1 << (n-1))) >> n //
+
+  reg4 = _mm256_shuffle_epi8(reg5, regs70);
+  reg5 = _mm256_permute4x64_epi64(reg4, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*6], _mm256_castsi256_si128(reg5)); // Descaled values DCTSIZE*6
+ 
+
+  
+  /*** Odd values to be descaled ***/
+  /* DESCALE(x, n) (x + (1 << (n - 1))) >> n) */
+
+  /* zs[0 ... 3] (z1 ... z4) */
+  reg07_add = _mm256_add_epi32(reg34_sub, reg07_sub);
+  reg16_add = _mm256_add_epi32(reg25_sub, reg16_sub);
+  reg25_add = _mm256_add_epi32(reg16_sub, reg34_sub);
+  reg34_add = _mm256_add_epi32(reg07_sub, reg25_sub);
+
+  /* tmp4 ... tmp7 multiply */
+  reg4 = _mm256_mullo_epi32(reg34_sub, _mm256_set1_epi32(FIX_0_298631336));
+  reg5 = _mm256_mullo_epi32(reg25_sub, _mm256_set1_epi32(FIX_2_053119869));
+  reg6 = _mm256_mullo_epi32(reg16_sub, _mm256_set1_epi32(FIX_3_072711026));
+  reg7 = _mm256_mullo_epi32(reg07_sub, _mm256_set1_epi32(FIX_1_501321110));
+
+
+  /* z5 */
+  reg34_sub = _mm256_add_epi32(reg25_add, reg34_add);
+  reg07_sub = _mm256_mullo_epi32(reg34_sub, _mm256_set1_epi32(FIX_1_175875602));
+
+  /* zs[0 ... 3] (z1 ... z4) multiply */
+  reg0 = _mm256_mullo_epi32(reg07_add, _mm256_set1_epi32(- FIX_0_899976223));
+  reg1 = _mm256_mullo_epi32(reg16_add, _mm256_set1_epi32(- FIX_2_562915447));
+  reg2 = _mm256_mullo_epi32(reg25_add, _mm256_set1_epi32(- FIX_1_961570560));
+  reg3 = _mm256_mullo_epi32(reg34_add, _mm256_set1_epi32(- FIX_0_390180644));
+
+  reg2 = _mm256_add_epi32(reg2, reg07_sub);
+  reg3 = _mm256_add_epi32(reg3, reg07_sub);
+
+  
+
+
+  /* Descaled values for data[DCTSIZE*7] */
+  reg34_sub = _mm256_add_epi32(reg4, reg0);   // temp values for data[DCTSIZE*7]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg2);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS + OUT_SHIFT); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*7], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*7
+ 
+
+  
+  /* Descaled values for data[DCTSIZE*5] */
+  reg34_sub = _mm256_add_epi32(reg5, reg1);   // temp values for data[DCTSIZE*5]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg3);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS + OUT_SHIFT); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*5], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*5
+
+  
+  /* Descaled value for data[DCTSIZE*3] */
+  reg34_sub = _mm256_add_epi32(reg6, reg1);   // temp values for data[DCTSIZE*3]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg2);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS + OUT_SHIFT); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*3], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*3
+
+  
+  /* Descaled values for data[DCTSIZE*1] */
+  reg34_sub = _mm256_add_epi32(reg7, reg0);   // temp values for data[DCTSIZE*1]
+  reg07_sub = _mm256_add_epi32(reg34_sub, reg3);
+
+  reg07_add = _mm256_add_epi32(reg07_sub, regs07); // (x + (1 << (n-1))) //
+  reg16_add = _mm256_srli_epi32(reg07_add, CONST_BITS + OUT_SHIFT); // (x + (1 << (n-1))) >> n //
+
+  reg07_add = _mm256_shuffle_epi8(reg16_add, regs70);
+  reg16_add = _mm256_permute4x64_epi64(reg07_add, 0b11011000);
+  _mm_store_si128((__m128i*)&data[DCTSIZE*1], _mm256_castsi256_si128(reg16_add)); // Descaled values DCTSIZE*1 
+}
+
diff --git a/libavcodec/x86/fdct10.h b/libavcodec/x86/fdct10.h
new file mode 100644
index 0000000..bd4e048
--- /dev/null
+++ b/libavcodec/x86/fdct10.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_X86_FDCT10_H
+#define AVCODEC_X86_FDCT10_H
+
+#include <stdint.h>
+
+void ff_fdct10_avx2(int16_t *data);
+  
+#endif /* AVCODEC_X86_FDCT10_H */
diff --git a/libavcodec/x86/fdctdsp_init.c b/libavcodec/x86/fdctdsp_init.c
index 0cb5fd6..99b4e67 100644
--- a/libavcodec/x86/fdctdsp_init.c
+++ b/libavcodec/x86/fdctdsp_init.c
@@ -22,6 +22,7 @@
 #include "libavcodec/avcodec.h"
 #include "libavcodec/fdctdsp.h"
 #include "fdct.h"
+#include "fdct10.h"
 
 av_cold void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
                                  unsigned high_bit_depth)
@@ -41,4 +42,8 @@ av_cold void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
                 c->fdct = ff_fdct_sse2;
         }
     }
+    else {
+      if (INLINE_AVX2(cpu_flags) && (avctx->bits_per_raw_sample == 10 || avctx->bits_per_raw_sample == 9))
+	c->fdct = ff_fdct10_avx2;
+    }
 }
-- 
2.1.4

