aboutsummaryrefslogtreecommitdiffstats
path: root/Transceiver52M/x86
diff options
context:
space:
mode:
authorThomas Tsou <tom@tsou.cc>2013-10-30 21:24:40 -0400
committerThomas Tsou <tom@tsou.cc>2013-11-04 09:15:55 -0800
commit17bbb9b755b2b91c15114bc2dfb6f29fc7a66377 (patch)
tree90d90d343f54037ed5cd8472d63fb1f4c2a7f1e1 /Transceiver52M/x86
parenta1a3ab4babf591cf9c0d233d93b1188f747f5259 (diff)
Transceiver52M: Separate architecture specific files
Move x86 specific files into their own directory as this area is about to get crowded with the addition of ARM support. Signed-off-by: Thomas Tsou <tom@tsou.cc>
Diffstat (limited to 'Transceiver52M/x86')
-rw-r--r--Transceiver52M/x86/Makefile.am8
-rw-r--r--Transceiver52M/x86/convert.c200
-rw-r--r--Transceiver52M/x86/convolve.c601
3 files changed, 809 insertions, 0 deletions
diff --git a/Transceiver52M/x86/Makefile.am b/Transceiver52M/x86/Makefile.am
new file mode 100644
index 0000000..0621b17
--- /dev/null
+++ b/Transceiver52M/x86/Makefile.am
@@ -0,0 +1,8 @@
+AM_CFLAGS = -Wall -std=gnu99 -march=native -I../common
+
+noinst_LTLIBRARIES = libarch.la
+
+libarch_la_SOURCES = \
+ ../common/convolve_base.c \
+ convert.c \
+ convolve.c
diff --git a/Transceiver52M/x86/convert.c b/Transceiver52M/x86/convert.c
new file mode 100644
index 0000000..1d2f208
--- /dev/null
+++ b/Transceiver52M/x86/convert.c
@@ -0,0 +1,200 @@
+/*
+ * SSE type conversions
+ * Copyright (C) 2013 Thomas Tsou <tom@tsou.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <malloc.h>
+#include <string.h>
+#include "convert.h"
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SSE3
+#include <xmmintrin.h>
+#include <emmintrin.h>
+
+#ifdef HAVE_SSE4_1
+#include <smmintrin.h>
+
+/* 16*N 16-bit signed integer converted to single precision floats */
+static void _sse_convert_si16_ps_16n(float *restrict out,
+ short *restrict in,
+ int len)
+{
+ __m128i m0, m1, m2, m3, m4, m5;
+ __m128 m6, m7, m8, m9;
+
+ for (int i = 0; i < len / 16; i++) {
+ /* Load (unaligned) packed floats */
+ m0 = _mm_loadu_si128((__m128i *) &in[16 * i + 0]);
+ m1 = _mm_loadu_si128((__m128i *) &in[16 * i + 8]);
+
+ /* Unpack */
+ m2 = _mm_cvtepi16_epi32(m0);
+ m4 = _mm_cvtepi16_epi32(m1);
+ m0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1, 0, 3, 2));
+ m1 = _mm_shuffle_epi32(m1, _MM_SHUFFLE(1, 0, 3, 2));
+ m3 = _mm_cvtepi16_epi32(m0);
+ m5 = _mm_cvtepi16_epi32(m1);
+
+ /* Convert */
+ m6 = _mm_cvtepi32_ps(m2);
+ m7 = _mm_cvtepi32_ps(m3);
+ m8 = _mm_cvtepi32_ps(m4);
+ m9 = _mm_cvtepi32_ps(m5);
+
+ /* Store */
+ _mm_storeu_ps(&out[16 * i + 0], m6);
+ _mm_storeu_ps(&out[16 * i + 4], m7);
+ _mm_storeu_ps(&out[16 * i + 8], m8);
+ _mm_storeu_ps(&out[16 * i + 12], m9);
+ }
+}
+
+/* 16*N 16-bit signed integer conversion with remainder */
+static void _sse_convert_si16_ps(float *restrict out,
+ short *restrict in,
+ int len)
+{
+ int start = len / 16 * 16;
+
+ _sse_convert_si16_ps_16n(out, in, len);
+
+ for (int i = 0; i < len % 16; i++)
+ out[start + i] = in[start + i];
+}
+#endif /* HAVE_SSE4_1 */
+
+/* 8*N single precision floats scaled and converted to 16-bit signed integer */
+static void _sse_convert_scale_ps_si16_8n(short *restrict out,
+ float *restrict in,
+ float scale, int len)
+{
+ __m128 m0, m1, m2;
+ __m128i m4, m5;
+
+ for (int i = 0; i < len / 8; i++) {
+ /* Load (unaligned) packed floats */
+ m0 = _mm_loadu_ps(&in[8 * i + 0]);
+ m1 = _mm_loadu_ps(&in[8 * i + 4]);
+ m2 = _mm_load1_ps(&scale);
+
+ /* Scale */
+ m0 = _mm_mul_ps(m0, m2);
+ m1 = _mm_mul_ps(m1, m2);
+
+ /* Convert */
+ m4 = _mm_cvtps_epi32(m0);
+ m5 = _mm_cvtps_epi32(m1);
+
+ /* Pack and store */
+ m5 = _mm_packs_epi32(m4, m5);
+ _mm_storeu_si128((__m128i *) &out[8 * i], m5);
+ }
+}
+
+/* 8*N single precision floats scaled and converted with remainder */
+static void _sse_convert_scale_ps_si16(short *restrict out,
+ float *restrict in,
+ float scale, int len)
+{
+ int start = len / 8 * 8;
+
+ _sse_convert_scale_ps_si16_8n(out, in, scale, len);
+
+ for (int i = 0; i < len % 8; i++)
+ out[start + i] = in[start + i] * scale;
+}
+
+/* 16*N single precision floats scaled and converted to 16-bit signed integer */
+static void _sse_convert_scale_ps_si16_16n(short *restrict out,
+ float *restrict in,
+ float scale, int len)
+{
+ __m128 m0, m1, m2, m3, m4;
+ __m128i m5, m6, m7, m8;
+
+ for (int i = 0; i < len / 16; i++) {
+ /* Load (unaligned) packed floats */
+ m0 = _mm_loadu_ps(&in[16 * i + 0]);
+ m1 = _mm_loadu_ps(&in[16 * i + 4]);
+ m2 = _mm_loadu_ps(&in[16 * i + 8]);
+ m3 = _mm_loadu_ps(&in[16 * i + 12]);
+ m4 = _mm_load1_ps(&scale);
+
+ /* Scale */
+ m0 = _mm_mul_ps(m0, m4);
+ m1 = _mm_mul_ps(m1, m4);
+ m2 = _mm_mul_ps(m2, m4);
+ m3 = _mm_mul_ps(m3, m4);
+
+ /* Convert */
+ m5 = _mm_cvtps_epi32(m0);
+ m6 = _mm_cvtps_epi32(m1);
+ m7 = _mm_cvtps_epi32(m2);
+ m8 = _mm_cvtps_epi32(m3);
+
+ /* Pack and store */
+ m5 = _mm_packs_epi32(m5, m6);
+ m7 = _mm_packs_epi32(m7, m8);
+ _mm_storeu_si128((__m128i *) &out[16 * i + 0], m5);
+ _mm_storeu_si128((__m128i *) &out[16 * i + 8], m7);
+ }
+}
+#else /* HAVE_SSE3 */
+static void convert_scale_ps_si16(short *out, float *in, float scale, int len)
+{
+ for (int i = 0; i < len; i++)
+ out[i] = in[i] * scale;
+}
+#endif
+
+#ifndef HAVE_SSE3
+static void convert_si16_ps(float *out, short *in, int len)
+{
+ for (int i = 0; i < len; i++)
+ out[i] = in[i];
+}
+#endif
+
+void convert_float_short(short *out, float *in, float scale, int len)
+{
+#ifdef HAVE_SSE3
+ if (!(len % 16))
+ _sse_convert_scale_ps_si16_16n(out, in, scale, len);
+ else if (!(len % 8))
+ _sse_convert_scale_ps_si16_8n(out, in, scale, len);
+ else
+ _sse_convert_scale_ps_si16(out, in, scale, len);
+#else
+ convert_scale_ps_si16(out, in, scale, len);
+#endif
+}
+
+void convert_short_float(float *out, short *in, int len)
+{
+#ifdef HAVE_SSE4_1
+ if (!(len % 16))
+ _sse_convert_si16_ps_16n(out, in, len);
+ else
+ _sse_convert_si16_ps(out, in, len);
+#else
+ convert_si16_ps(out, in, len);
+#endif
+}
diff --git a/Transceiver52M/x86/convolve.c b/Transceiver52M/x86/convolve.c
new file mode 100644
index 0000000..ed85d97
--- /dev/null
+++ b/Transceiver52M/x86/convolve.c
@@ -0,0 +1,601 @@
+/*
+ * SSE Convolution
+ * Copyright (C) 2012, 2013 Thomas Tsou <tom@tsou.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <malloc.h>
+#include <string.h>
+#include <stdio.h>
+#include "convolve.h"
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* Forward declarations from base implementation */
+int _base_convolve_real(float *x, int x_len,
+ float *h, int h_len,
+ float *y, int y_len,
+ int start, int len,
+ int step, int offset);
+
+int _base_convolve_complex(float *x, int x_len,
+ float *h, int h_len,
+ float *y, int y_len,
+ int start, int len,
+ int step, int offset);
+
+int bounds_check(int x_len, int h_len, int y_len,
+ int start, int len, int step);
+
+#ifdef HAVE_SSE3
+#include <xmmintrin.h>
+#include <pmmintrin.h>
+
+/* 4-tap SSE complex-real convolution */
+static void sse_conv_real4(float *restrict x,
+ float *restrict h,
+ float *restrict y,
+ int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7;
+
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[0]);
+ m1 = _mm_load_ps(&h[4]);
+ m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+
+ for (int i = 0; i < len; i++) {
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 4]);
+ m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m3 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m4 = _mm_mul_ps(m2, m7);
+ m5 = _mm_mul_ps(m3, m7);
+
+ /* Sum and store */
+ m6 = _mm_hadd_ps(m4, m5);
+ m0 = _mm_hadd_ps(m6, m6);
+
+ _mm_store_ss(&y[2 * i + 0], m0);
+ m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m0);
+ }
+}
+
+/* 8-tap SSE complex-real convolution */
+static void sse_conv_real8(float *restrict x,
+ float *restrict h,
+ float *restrict y,
+ int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7, m8, m9;
+
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[0]);
+ m1 = _mm_load_ps(&h[4]);
+ m2 = _mm_load_ps(&h[8]);
+ m3 = _mm_load_ps(&h[12]);
+
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+
+ for (int i = 0; i < len; i++) {
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 4]);
+ m2 = _mm_loadu_ps(&x[2 * i + 8]);
+ m3 = _mm_loadu_ps(&x[2 * i + 12]);
+
+ m6 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m8 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m9 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m6 = _mm_mul_ps(m6, m4);
+ m7 = _mm_mul_ps(m7, m4);
+ m8 = _mm_mul_ps(m8, m5);
+ m9 = _mm_mul_ps(m9, m5);
+
+ /* Sum and store */
+ m6 = _mm_add_ps(m6, m8);
+ m7 = _mm_add_ps(m7, m9);
+ m6 = _mm_hadd_ps(m6, m7);
+ m6 = _mm_hadd_ps(m6, m6);
+
+ _mm_store_ss(&y[2 * i + 0], m6);
+ m6 = _mm_shuffle_ps(m6, m6, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m6);
+ }
+}
+
+/* 12-tap SSE complex-real convolution */
+static void sse_conv_real12(float *restrict x,
+ float *restrict h,
+ float *restrict y,
+ int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7;
+ __m128 m8, m9, m10, m11, m12, m13, m14;
+
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[0]);
+ m1 = _mm_load_ps(&h[4]);
+ m2 = _mm_load_ps(&h[8]);
+ m3 = _mm_load_ps(&h[12]);
+ m4 = _mm_load_ps(&h[16]);
+ m5 = _mm_load_ps(&h[20]);
+
+ m12 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m13 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m14 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
+
+ for (int i = 0; i < len; i++) {
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 4]);
+ m2 = _mm_loadu_ps(&x[2 * i + 8]);
+ m3 = _mm_loadu_ps(&x[2 * i + 12]);
+
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ m0 = _mm_loadu_ps(&x[2 * i + 16]);
+ m1 = _mm_loadu_ps(&x[2 * i + 20]);
+
+ m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m0 = _mm_mul_ps(m4, m12);
+ m1 = _mm_mul_ps(m5, m12);
+ m2 = _mm_mul_ps(m6, m13);
+ m3 = _mm_mul_ps(m7, m13);
+ m4 = _mm_mul_ps(m8, m14);
+ m5 = _mm_mul_ps(m9, m14);
+
+ /* Sum and store */
+ m8 = _mm_add_ps(m0, m2);
+ m9 = _mm_add_ps(m1, m3);
+ m10 = _mm_add_ps(m8, m4);
+ m11 = _mm_add_ps(m9, m5);
+
+ m2 = _mm_hadd_ps(m10, m11);
+ m3 = _mm_hadd_ps(m2, m2);
+
+ _mm_store_ss(&y[2 * i + 0], m3);
+ m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m3);
+ }
+}
+
+/* 16-tap SSE complex-real convolution */
+static void sse_conv_real16(float *restrict x,
+ float *restrict h,
+ float *restrict y,
+ int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7;
+ __m128 m8, m9, m10, m11, m12, m13, m14, m15;
+
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[0]);
+ m1 = _mm_load_ps(&h[4]);
+ m2 = _mm_load_ps(&h[8]);
+ m3 = _mm_load_ps(&h[12]);
+
+ m4 = _mm_load_ps(&h[16]);
+ m5 = _mm_load_ps(&h[20]);
+ m6 = _mm_load_ps(&h[24]);
+ m7 = _mm_load_ps(&h[28]);
+
+ m12 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m13 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m14 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
+ m15 = _mm_shuffle_ps(m6, m7, _MM_SHUFFLE(0, 2, 0, 2));
+
+ for (int i = 0; i < len; i++) {
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 4]);
+ m2 = _mm_loadu_ps(&x[2 * i + 8]);
+ m3 = _mm_loadu_ps(&x[2 * i + 12]);
+
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ m0 = _mm_loadu_ps(&x[2 * i + 16]);
+ m1 = _mm_loadu_ps(&x[2 * i + 20]);
+ m2 = _mm_loadu_ps(&x[2 * i + 24]);
+ m3 = _mm_loadu_ps(&x[2 * i + 28]);
+
+ m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m10 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m11 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m0 = _mm_mul_ps(m4, m12);
+ m1 = _mm_mul_ps(m5, m12);
+ m2 = _mm_mul_ps(m6, m13);
+ m3 = _mm_mul_ps(m7, m13);
+
+ m4 = _mm_mul_ps(m8, m14);
+ m5 = _mm_mul_ps(m9, m14);
+ m6 = _mm_mul_ps(m10, m15);
+ m7 = _mm_mul_ps(m11, m15);
+
+ /* Sum and store */
+ m8 = _mm_add_ps(m0, m2);
+ m9 = _mm_add_ps(m1, m3);
+ m10 = _mm_add_ps(m4, m6);
+ m11 = _mm_add_ps(m5, m7);
+
+ m0 = _mm_add_ps(m8, m10);
+ m1 = _mm_add_ps(m9, m11);
+ m2 = _mm_hadd_ps(m0, m1);
+ m3 = _mm_hadd_ps(m2, m2);
+
+ _mm_store_ss(&y[2 * i + 0], m3);
+ m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m3);
+ }
+}
+
+/* 20-tap SSE complex-real convolution */
+static void sse_conv_real20(float *restrict x,
+ float *restrict h,
+ float *restrict y,
+ int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7;
+ __m128 m8, m9, m11, m12, m13, m14, m15;
+
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[0]);
+ m1 = _mm_load_ps(&h[4]);
+ m2 = _mm_load_ps(&h[8]);
+ m3 = _mm_load_ps(&h[12]);
+ m4 = _mm_load_ps(&h[16]);
+ m5 = _mm_load_ps(&h[20]);
+ m6 = _mm_load_ps(&h[24]);
+ m7 = _mm_load_ps(&h[28]);
+ m8 = _mm_load_ps(&h[32]);
+ m9 = _mm_load_ps(&h[36]);
+
+ m11 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m12 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m13 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
+ m14 = _mm_shuffle_ps(m6, m7, _MM_SHUFFLE(0, 2, 0, 2));
+ m15 = _mm_shuffle_ps(m8, m9, _MM_SHUFFLE(0, 2, 0, 2));
+
+ for (int i = 0; i < len; i++) {
+ /* Multiply-accumulate first 12 taps */
+ m0 = _mm_loadu_ps(&x[2 * i + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 4]);
+ m2 = _mm_loadu_ps(&x[2 * i + 8]);
+ m3 = _mm_loadu_ps(&x[2 * i + 12]);
+ m4 = _mm_loadu_ps(&x[2 * i + 16]);
+ m5 = _mm_loadu_ps(&x[2 * i + 20]);
+
+ m6 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m7 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m8 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m9 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+ m0 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(0, 2, 0, 2));
+ m1 = _mm_shuffle_ps(m4, m5, _MM_SHUFFLE(1, 3, 1, 3));
+
+ m2 = _mm_mul_ps(m6, m11);
+ m3 = _mm_mul_ps(m7, m11);
+ m4 = _mm_mul_ps(m8, m12);
+ m5 = _mm_mul_ps(m9, m12);
+ m6 = _mm_mul_ps(m0, m13);
+ m7 = _mm_mul_ps(m1, m13);
+
+ m0 = _mm_add_ps(m2, m4);
+ m1 = _mm_add_ps(m3, m5);
+ m8 = _mm_add_ps(m0, m6);
+ m9 = _mm_add_ps(m1, m7);
+
+ /* Multiply-accumulate last 8 taps */
+ m0 = _mm_loadu_ps(&x[2 * i + 24]);
+ m1 = _mm_loadu_ps(&x[2 * i + 28]);
+ m2 = _mm_loadu_ps(&x[2 * i + 32]);
+ m3 = _mm_loadu_ps(&x[2 * i + 36]);
+
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m6 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ m0 = _mm_mul_ps(m4, m14);
+ m1 = _mm_mul_ps(m5, m14);
+ m2 = _mm_mul_ps(m6, m15);
+ m3 = _mm_mul_ps(m7, m15);
+
+ m4 = _mm_add_ps(m0, m2);
+ m5 = _mm_add_ps(m1, m3);
+
+ /* Final sum and store */
+ m0 = _mm_add_ps(m8, m4);
+ m1 = _mm_add_ps(m9, m5);
+ m2 = _mm_hadd_ps(m0, m1);
+ m3 = _mm_hadd_ps(m2, m2);
+
+ _mm_store_ss(&y[2 * i + 0], m3);
+ m3 = _mm_shuffle_ps(m3, m3, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m3);
+ }
+}
+
+/* 4*N-tap SSE complex-real convolution */
+static void sse_conv_real4n(float *x, float *h, float *y, int h_len, int len)
+{
+ __m128 m0, m1, m2, m4, m5, m6, m7;
+
+ for (int i = 0; i < len; i++) {
+ /* Zero */
+ m6 = _mm_setzero_ps();
+ m7 = _mm_setzero_ps();
+
+ for (int n = 0; n < h_len / 4; n++) {
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[8 * n + 0]);
+ m1 = _mm_load_ps(&h[8 * n + 4]);
+ m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 8 * n + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 8 * n + 4]);
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m0 = _mm_mul_ps(m2, m4);
+ m1 = _mm_mul_ps(m2, m5);
+
+ /* Accumulate */
+ m6 = _mm_add_ps(m6, m0);
+ m7 = _mm_add_ps(m7, m1);
+ }
+
+ m0 = _mm_hadd_ps(m6, m7);
+ m0 = _mm_hadd_ps(m0, m0);
+
+ _mm_store_ss(&y[2 * i + 0], m0);
+ m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m0);
+ }
+}
+
+/* 4*N-tap SSE complex-complex convolution */
+static void sse_conv_cmplx_4n(float *x, float *h, float *y, int h_len, int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7;
+
+ for (int i = 0; i < len; i++) {
+ /* Zero */
+ m6 = _mm_setzero_ps();
+ m7 = _mm_setzero_ps();
+
+ for (int n = 0; n < h_len / 4; n++) {
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[8 * n + 0]);
+ m1 = _mm_load_ps(&h[8 * n + 4]);
+ m2 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m3 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 8 * n + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 8 * n + 4]);
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m0 = _mm_mul_ps(m2, m4);
+ m1 = _mm_mul_ps(m3, m5);
+
+ m2 = _mm_mul_ps(m2, m5);
+ m3 = _mm_mul_ps(m3, m4);
+
+ /* Sum */
+ m0 = _mm_sub_ps(m0, m1);
+ m2 = _mm_add_ps(m2, m3);
+
+ /* Accumulate */
+ m6 = _mm_add_ps(m6, m0);
+ m7 = _mm_add_ps(m7, m2);
+ }
+
+ m0 = _mm_hadd_ps(m6, m7);
+ m0 = _mm_hadd_ps(m0, m0);
+
+ _mm_store_ss(&y[2 * i + 0], m0);
+ m0 = _mm_shuffle_ps(m0, m0, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m0);
+ }
+}
+
+/* 8*N-tap SSE complex-complex convolution */
+static void sse_conv_cmplx_8n(float *x, float *h, float *y, int h_len, int len)
+{
+ __m128 m0, m1, m2, m3, m4, m5, m6, m7;
+ __m128 m8, m9, m10, m11, m12, m13, m14, m15;
+
+ for (int i = 0; i < len; i++) {
+ /* Zero */
+ m12 = _mm_setzero_ps();
+ m13 = _mm_setzero_ps();
+ m14 = _mm_setzero_ps();
+ m15 = _mm_setzero_ps();
+
+ for (int n = 0; n < h_len / 8; n++) {
+ /* Load (aligned) filter taps */
+ m0 = _mm_load_ps(&h[16 * n + 0]);
+ m1 = _mm_load_ps(&h[16 * n + 4]);
+ m2 = _mm_load_ps(&h[16 * n + 8]);
+ m3 = _mm_load_ps(&h[16 * n + 12]);
+
+ m4 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m5 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m6 = _mm_shuffle_ps(m2, m2, _MM_SHUFFLE(0, 2, 0, 2));
+ m7 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Load (unaligned) input data */
+ m0 = _mm_loadu_ps(&x[2 * i + 16 * n + 0]);
+ m1 = _mm_loadu_ps(&x[2 * i + 16 * n + 4]);
+ m2 = _mm_loadu_ps(&x[2 * i + 16 * n + 8]);
+ m3 = _mm_loadu_ps(&x[2 * i + 16 * n + 12]);
+
+ m8 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(0, 2, 0, 2));
+ m9 = _mm_shuffle_ps(m0, m1, _MM_SHUFFLE(1, 3, 1, 3));
+ m10 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(0, 2, 0, 2));
+ m11 = _mm_shuffle_ps(m2, m3, _MM_SHUFFLE(1, 3, 1, 3));
+
+ /* Quad multiply */
+ m0 = _mm_mul_ps(m4, m8);
+ m1 = _mm_mul_ps(m5, m9);
+ m2 = _mm_mul_ps(m6, m10);
+ m3 = _mm_mul_ps(m7, m11);
+
+ m4 = _mm_mul_ps(m4, m9);
+ m5 = _mm_mul_ps(m5, m8);
+ m6 = _mm_mul_ps(m6, m11);
+ m7 = _mm_mul_ps(m7, m10);
+
+ /* Sum */
+ m0 = _mm_sub_ps(m0, m1);
+ m2 = _mm_sub_ps(m2, m3);
+ m4 = _mm_add_ps(m4, m5);
+ m6 = _mm_add_ps(m6, m7);
+
+ /* Accumulate */
+ m12 = _mm_add_ps(m12, m0);
+ m13 = _mm_add_ps(m13, m2);
+ m14 = _mm_add_ps(m14, m4);
+ m15 = _mm_add_ps(m15, m6);
+ }
+
+ m0 = _mm_add_ps(m12, m13);
+ m1 = _mm_add_ps(m14, m15);
+ m2 = _mm_hadd_ps(m0, m1);
+ m2 = _mm_hadd_ps(m2, m2);
+
+ _mm_store_ss(&y[2 * i + 0], m2);
+ m2 = _mm_shuffle_ps(m2, m2, _MM_SHUFFLE(0, 3, 2, 1));
+ _mm_store_ss(&y[2 * i + 1], m2);
+ }
+}
+#endif
+
+/* API: Aligned complex-real */
+int convolve_real(float *x, int x_len,
+ float *h, int h_len,
+ float *y, int y_len,
+ int start, int len,
+ int step, int offset)
+{
+ void (*conv_func)(float *, float *, float *, int) = NULL;
+ void (*conv_func_n)(float *, float *, float *, int, int) = NULL;
+
+ if (bounds_check(x_len, h_len, y_len, start, len, step) < 0)
+ return -1;
+
+ memset(y, 0, len * 2 * sizeof(float));
+
+#ifdef HAVE_SSE3
+ if (step <= 4) {
+ switch (h_len) {
+ case 4:
+ conv_func = sse_conv_real4;
+ break;
+ case 8:
+ conv_func = sse_conv_real8;
+ break;
+ case 12:
+ conv_func = sse_conv_real12;
+ break;
+ case 16:
+ conv_func = sse_conv_real16;
+ break;
+ case 20:
+ conv_func = sse_conv_real20;
+ break;
+ default:
+ if (!(h_len % 4))
+ conv_func_n = sse_conv_real4n;
+ }
+ }
+#endif
+ if (conv_func) {
+ conv_func(&x[2 * (-(h_len - 1) + start)],
+ h, y, len);
+ } else if (conv_func_n) {
+ conv_func_n(&x[2 * (-(h_len - 1) + start)],
+ h, y, h_len, len);
+ } else {
+ _base_convolve_real(x, x_len,
+ h, h_len,
+ y, y_len,
+ start, len, step, offset);
+ }
+
+ return len;
+}
+
+/* API: Aligned complex-complex */
+int convolve_complex(float *x, int x_len,
+ float *h, int h_len,
+ float *y, int y_len,
+ int start, int len,
+ int step, int offset)
+{
+ void (*conv_func)(float *, float *, float *, int, int) = NULL;
+
+ if (bounds_check(x_len, h_len, y_len, start, len, step) < 0)
+ return -1;
+
+ memset(y, 0, len * 2 * sizeof(float));
+
+#ifdef HAVE_SSE3
+ if (step <= 4) {
+ if (!(h_len % 8))
+ conv_func = sse_conv_cmplx_8n;
+ else if (!(h_len % 4))
+ conv_func = sse_conv_cmplx_4n;
+ }
+#endif
+ if (conv_func) {
+ conv_func(&x[2 * (-(h_len - 1) + start)],
+ h, y, h_len, len);
+ } else {
+ _base_convolve_complex(x, x_len,
+ h, h_len,
+ y, y_len,
+ start, len, step, offset);
+ }
+
+ return len;
+}