tesseract  4.0.0-1-g2a2b
dotproductsse.cpp
Go to the documentation of this file.
1 // File: dotproductsse.cpp
3 // Description: Architecture-specific dot-product function.
4 // Author: Ray Smith
5 // Created: Wed Jul 22 10:57:45 PDT 2015
6 //
7 // (C) Copyright 2015, Google Inc.
8 // Licensed under the Apache License, Version 2.0 (the "License");
9 // you may not use this file except in compliance with the License.
10 // You may obtain a copy of the License at
11 // http://www.apache.org/licenses/LICENSE-2.0
12 // Unless required by applicable law or agreed to in writing, software
13 // distributed under the License is distributed on an "AS IS" BASIS,
14 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 // See the License for the specific language governing permissions and
16 // limitations under the License.
18 
19 #if !defined(__SSE4_1__)
20 // This code can't compile with "-msse4.1", so use dummy stubs.
21 
22 #include "dotproductsse.h"
23 #include <cstdio>
24 #include <cstdlib>
25 
26 namespace tesseract {
27 double DotProductSSE(const double* u, const double* v, int n) {
28  fprintf(stderr, "DotProductSSE can't be used on Android\n");
29  abort();
30 }
31 int32_t IntDotProductSSE(const int8_t* u, const int8_t* v, int n) {
32  fprintf(stderr, "IntDotProductSSE can't be used on Android\n");
33  abort();
34 }
35 } // namespace tesseract
36 
37 #else // !defined(__SSE4_1__)
38 // Non-Android code here
39 
40 #include <emmintrin.h>
41 #include <smmintrin.h>
42 #include <cstdint>
43 #include "dotproductsse.h"
44 
45 namespace tesseract {
46 
47 // Computes and returns the dot product of the n-vectors u and v.
48 // Uses Intel SSE intrinsics to access the SIMD instruction set.
49 double DotProductSSE(const double* u, const double* v, int n) {
50  int max_offset = n - 2;
51  int offset = 0;
52  // Accumulate a set of 2 sums in sum, by loading pairs of 2 values from u and
53  // v, and multiplying them together in parallel.
54  __m128d sum = _mm_setzero_pd();
55  if (offset <= max_offset) {
56  offset = 2;
57  // Aligned load is reputedly faster but requires 16 byte aligned input.
58  if ((reinterpret_cast<uintptr_t>(u) & 15) == 0 &&
59  (reinterpret_cast<uintptr_t>(v) & 15) == 0) {
60  // Use aligned load.
61  sum = _mm_load_pd(u);
62  __m128d floats2 = _mm_load_pd(v);
63  // Multiply.
64  sum = _mm_mul_pd(sum, floats2);
65  while (offset <= max_offset) {
66  __m128d floats1 = _mm_load_pd(u + offset);
67  floats2 = _mm_load_pd(v + offset);
68  offset += 2;
69  floats1 = _mm_mul_pd(floats1, floats2);
70  sum = _mm_add_pd(sum, floats1);
71  }
72  } else {
73  // Use unaligned load.
74  sum = _mm_loadu_pd(u);
75  __m128d floats2 = _mm_loadu_pd(v);
76  // Multiply.
77  sum = _mm_mul_pd(sum, floats2);
78  while (offset <= max_offset) {
79  __m128d floats1 = _mm_loadu_pd(u + offset);
80  floats2 = _mm_loadu_pd(v + offset);
81  offset += 2;
82  floats1 = _mm_mul_pd(floats1, floats2);
83  sum = _mm_add_pd(sum, floats1);
84  }
85  }
86  }
87  // Add the 2 sums in sum horizontally.
88  sum = _mm_hadd_pd(sum, sum);
89  // Extract the low result.
90  double result = _mm_cvtsd_f64(sum);
91  // Add on any left-over products.
92  while (offset < n) {
93  result += u[offset] * v[offset];
94  ++offset;
95  }
96  return result;
97 }
98 
99 // Computes and returns the dot product of the n-vectors u and v.
100 // Uses Intel SSE intrinsics to access the SIMD instruction set.
101 int32_t IntDotProductSSE(const int8_t* u, const int8_t* v, int n) {
102  int max_offset = n - 8;
103  int offset = 0;
104  // Accumulate a set of 4 32-bit sums in sum, by loading 8 pairs of 8-bit
105  // values, extending to 16 bit, multiplying to make 32 bit results.
106  __m128i sum = _mm_setzero_si128();
107  if (offset <= max_offset) {
108  offset = 8;
109  __m128i packed1 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(u));
110  __m128i packed2 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(v));
111  sum = _mm_cvtepi8_epi16(packed1);
112  packed2 = _mm_cvtepi8_epi16(packed2);
113  // The magic _mm_add_epi16 is perfect here. It multiplies 8 pairs of 16 bit
114  // ints to make 32 bit results, which are then horizontally added in pairs
115  // to make 4 32 bit results that still fit in a 128 bit register.
116  sum = _mm_madd_epi16(sum, packed2);
117  while (offset <= max_offset) {
118  packed1 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(u + offset));
119  packed2 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(v + offset));
120  offset += 8;
121  packed1 = _mm_cvtepi8_epi16(packed1);
122  packed2 = _mm_cvtepi8_epi16(packed2);
123  packed1 = _mm_madd_epi16(packed1, packed2);
124  sum = _mm_add_epi32(sum, packed1);
125  }
126  }
127  // Sum the 4 packed 32 bit sums and extract the low result.
128  sum = _mm_hadd_epi32(sum, sum);
129  sum = _mm_hadd_epi32(sum, sum);
130  int32_t result = _mm_cvtsi128_si32(sum);
131  while (offset < n) {
132  result += u[offset] * v[offset];
133  ++offset;
134  }
135  return result;
136 }
137 
138 } // namespace tesseract.
139 
140 #endif // ANDROID_BUILD
double DotProductSSE(const double *u, const double *v, int n)
int32_t IntDotProductSSE(const int8_t *u, const int8_t *v, int n)