Skip to content

Commit

Permalink
Optimize performance by using inline MatrixDotVectorInternal
Browse files Browse the repository at this point in the history
This improves performace for the "best" models because it
avoids function calls.

The compiler also knows the passed values for the parameters
add_bias_fwd and skip_bias_back.

Signed-off-by: Stefan Weil <[email protected]>
  • Loading branch information
stweil committed Nov 29, 2018
1 parent 685b136 commit e161501
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 33 deletions.
44 changes: 22 additions & 22 deletions src/lstm/weightmatrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,28 @@ const int kAdamCorrectionIterations = 200000;
// Epsilon in Adam to prevent division by zero.
const double kAdamEpsilon = 1e-8;

// Computes matrix.vector v = Wu.
// u is of size W.dim2() - add_bias_fwd and the output v is of size
// W.dim1() - skip_bias_back.
// If add_bias_fwd, u is imagined to have an extra element at the end with value
// 1, to implement the bias, weight.
// If skip_bias_back, we are actullay performing the backwards product on a
// transposed matrix, so we need to drop the v output corresponding to the last
// element in dim1.
static inline void MatrixDotVectorInternal(const GENERIC_2D_ARRAY<double>& w,
bool add_bias_fwd,
bool skip_bias_back, const double* u,
double* v) {
int num_results = w.dim1() - skip_bias_back;
int extent = w.dim2() - add_bias_fwd;
for (int i = 0; i < num_results; ++i) {
const double* wi = w[i];
double total = WeightMatrix::DotProduct(wi, u, extent);
if (add_bias_fwd) total += wi[extent]; // The bias value.
v[i] = total;
}
}

// Copies the whole input transposed, converted to double, into *this.
void TransposedArray::Transpose(const GENERIC_2D_ARRAY<double>& input) {
int width = input.dim1();
Expand Down Expand Up @@ -401,26 +423,4 @@ void WeightMatrix::FloatToDouble(const GENERIC_2D_ARRAY<float>& wf,
}
}

// Computes matrix.vector v = Wu.
// u is of size W.dim2() - add_bias_fwd and the output v is of size
// W.dim1() - skip_bias_back.
// If add_bias_fwd, u is imagined to have an extra element at the end with value
// 1, to implement the bias, weight.
// If skip_bias_back, we are actullay performing the backwards product on a
// transposed matrix, so we need to drop the v output corresponding to the last
// element in dim1.
void WeightMatrix::MatrixDotVectorInternal(const GENERIC_2D_ARRAY<double>& w,
bool add_bias_fwd,
bool skip_bias_back, const double* u,
double* v) {
int num_results = w.dim1() - skip_bias_back;
int extent = w.dim2() - add_bias_fwd;
for (int i = 0; i < num_results; ++i) {
const double* wi = w[i];
double total = DotProduct(wi, u, extent);
if (add_bias_fwd) total += wi[extent]; // The bias value.
v[i] = total;
}
}

} // namespace tesseract.
11 changes: 0 additions & 11 deletions src/lstm/weightmatrix.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,17 +159,6 @@ class WeightMatrix {
static void FloatToDouble(const GENERIC_2D_ARRAY<float>& wf,
GENERIC_2D_ARRAY<double>* wd);

private:
// Computes matrix.vector v = Wu.
// u is of size starts.back()+extents.back() and the output v is of size
// starts.size().
// The weight matrix w, is of size starts.size()xMAX(extents)+add_bias_fwd.
// If add_bias_fwd, an extra element at the end of w[i] is the bias weight
// and is added to v[i].
static void MatrixDotVectorInternal(const GENERIC_2D_ARRAY<double>& w,
bool add_bias_fwd, bool skip_bias_back,
const double* u, double* v);

private:
// Choice between float and 8 bit int implementations.
GENERIC_2D_ARRAY<double> wf_;
Expand Down

0 comments on commit e161501

Please sign in to comment.