tesseract  5.0.0-alpha-619-ge9db
parallel.cpp
Go to the documentation of this file.
1 // File: parallel.cpp
3 // Description: Runs networks in parallel on the same input.
4 // Author: Ray Smith
5 // Created: Thu May 02 08:06:06 PST 2013
6 //
7 // (C) Copyright 2013, Google Inc.
8 // Licensed under the Apache License, Version 2.0 (the "License");
9 // you may not use this file except in compliance with the License.
10 // You may obtain a copy of the License at
11 // http://www.apache.org/licenses/LICENSE-2.0
12 // Unless required by applicable law or agreed to in writing, software
13 // distributed under the License is distributed on an "AS IS" BASIS,
14 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 // See the License for the specific language governing permissions and
16 // limitations under the License.
18 
19 #include "parallel.h"
20 
21 #ifdef _OPENMP
22 #include <omp.h>
23 #endif
24 
25 #include "functions.h" // For conditional undef of _OPENMP.
26 #include "networkscratch.h"
27 
28 namespace tesseract {
29 
30 // ni_ and no_ will be set by AddToStack.
32  type_ = type;
33 }
34 
35 // Returns the shape output from the network given an input shape (which may
36 // be partially unknown ie zero).
37 StaticShape Parallel::OutputShape(const StaticShape& input_shape) const {
38  StaticShape result = stack_[0]->OutputShape(input_shape);
39  int stack_size = stack_.size();
40  for (int i = 1; i < stack_size; ++i) {
41  StaticShape shape = stack_[i]->OutputShape(input_shape);
42  result.set_depth(result.depth() + shape.depth());
43  }
44  return result;
45 }
46 
47 // Runs forward propagation of activations on the input line.
48 // See NetworkCpp for a detailed discussion of the arguments.
49 void Parallel::Forward(bool debug, const NetworkIO& input,
50  const TransposedArray* input_transpose,
51  NetworkScratch* scratch, NetworkIO* output) {
52  bool parallel_debug = false;
53  // If this parallel is a replicator of convolvers, or holds a 1-d LSTM pair,
54  // or a 2-d LSTM quad, do debug locally, and don't pass the flag on.
55  if (debug && type_ != NT_PARALLEL) {
56  parallel_debug = true;
57  debug = false;
58  }
59  int stack_size = stack_.size();
60  if (type_ == NT_PAR_2D_LSTM) {
61  // Special case, run parallel in parallel.
63  results.init_to_size(stack_size, NetworkScratch::IO());
64  for (int i = 0; i < stack_size; ++i) {
65  results[i].Resize(input, stack_[i]->NumOutputs(), scratch);
66  }
67 #ifdef _OPENMP
68 #pragma omp parallel for num_threads(stack_size)
69 #endif
70  for (int i = 0; i < stack_size; ++i) {
71  stack_[i]->Forward(debug, input, nullptr, scratch, results[i]);
72  }
73  // Now pack all the results (serially) into the output.
74  int out_offset = 0;
75  output->Resize(*results[0], NumOutputs());
76  for (int i = 0; i < stack_size; ++i) {
77  out_offset = output->CopyPacking(*results[i], out_offset);
78  }
79  } else {
80  // Revolving intermediate result.
81  NetworkScratch::IO result(input, scratch);
82  // Source for divided replicated.
83  NetworkScratch::IO source_part;
84  TransposedArray* src_transpose = nullptr;
85  if (IsTraining() && type_ == NT_REPLICATED) {
86  // Make a transposed copy of the input.
87  input.Transpose(&transposed_input_);
88  src_transpose = &transposed_input_;
89  }
90  // Run each network, putting the outputs into result.
91  int out_offset = 0;
92  for (int i = 0; i < stack_size; ++i) {
93  stack_[i]->Forward(debug, input, src_transpose, scratch, result);
94  // All networks must have the same output width
95  if (i == 0) {
96  output->Resize(*result, NumOutputs());
97  } else {
98  ASSERT_HOST(result->Width() == output->Width());
99  }
100  out_offset = output->CopyPacking(*result, out_offset);
101  }
102  }
103  if (parallel_debug) {
104  DisplayForward(*output);
105  }
106 }
107 
108 // Runs backward propagation of errors on the deltas line.
109 // See NetworkCpp for a detailed discussion of the arguments.
110 bool Parallel::Backward(bool debug, const NetworkIO& fwd_deltas,
111  NetworkScratch* scratch,
112  NetworkIO* back_deltas) {
113  // If this parallel is a replicator of convolvers, or holds a 1-d LSTM pair,
114  // or a 2-d LSTM quad, do debug locally, and don't pass the flag on.
115  if (debug && type_ != NT_PARALLEL) {
116  DisplayBackward(fwd_deltas);
117  debug = false;
118  }
119  int stack_size = stack_.size();
120  if (type_ == NT_PAR_2D_LSTM) {
121  // Special case, run parallel in parallel.
122  GenericVector<NetworkScratch::IO> in_deltas, out_deltas;
123  in_deltas.init_to_size(stack_size, NetworkScratch::IO());
124  out_deltas.init_to_size(stack_size, NetworkScratch::IO());
125  // Split the forward deltas for each stack element.
126  int feature_offset = 0;
127  for (int i = 0; i < stack_.size(); ++i) {
128  int num_features = stack_[i]->NumOutputs();
129  in_deltas[i].Resize(fwd_deltas, num_features, scratch);
130  out_deltas[i].Resize(fwd_deltas, stack_[i]->NumInputs(), scratch);
131  in_deltas[i]->CopyUnpacking(fwd_deltas, feature_offset, num_features);
132  feature_offset += num_features;
133  }
134 #ifdef _OPENMP
135 #pragma omp parallel for num_threads(stack_size)
136 #endif
137  for (int i = 0; i < stack_size; ++i) {
138  stack_[i]->Backward(debug, *in_deltas[i], scratch,
139  i == 0 ? back_deltas : out_deltas[i]);
140  }
141  if (needs_to_backprop_) {
142  for (int i = 1; i < stack_size; ++i) {
143  back_deltas->AddAllToFloat(*out_deltas[i]);
144  }
145  }
146  } else {
147  // Revolving partial deltas.
148  NetworkScratch::IO in_deltas(fwd_deltas, scratch);
149  // The sum of deltas from different sources, which will eventually go into
150  // back_deltas.
151  NetworkScratch::IO out_deltas;
152  int feature_offset = 0;
153  for (int i = 0; i < stack_.size(); ++i) {
154  int num_features = stack_[i]->NumOutputs();
155  in_deltas->CopyUnpacking(fwd_deltas, feature_offset, num_features);
156  feature_offset += num_features;
157  if (stack_[i]->Backward(debug, *in_deltas, scratch, back_deltas)) {
158  if (i == 0) {
159  out_deltas.ResizeFloat(*back_deltas, back_deltas->NumFeatures(),
160  scratch);
161  out_deltas->CopyAll(*back_deltas);
162  } else if (back_deltas->NumFeatures() == out_deltas->NumFeatures()) {
163  // Widths are allowed to be different going back, as we may have
164  // input nets, so only accumulate the deltas if the widths are the
165  // same.
166  out_deltas->AddAllToFloat(*back_deltas);
167  }
168  }
169  }
170  if (needs_to_backprop_) back_deltas->CopyAll(*out_deltas);
171  }
172  if (needs_to_backprop_) back_deltas->ScaleFloatBy(1.0f / stack_size);
173  return needs_to_backprop_;
174 }
175 
176 } // namespace tesseract.
tesseract::StaticShape
Definition: static_shape.h:38
tesseract::NT_PARALLEL
Definition: network.h:49
tesseract::Parallel::Forward
void Forward(bool debug, const NetworkIO &input, const TransposedArray *input_transpose, NetworkScratch *scratch, NetworkIO *output) override
Definition: parallel.cpp:49
tesseract::NT_PAR_2D_LSTM
Definition: network.h:53
tesseract::Parallel::Backward
bool Backward(bool debug, const NetworkIO &fwd_deltas, NetworkScratch *scratch, NetworkIO *back_deltas) override
Definition: parallel.cpp:110
tesseract::Network::DisplayForward
void DisplayForward(const NetworkIO &matrix)
Definition: network.cpp:288
ASSERT_HOST
#define ASSERT_HOST(x)
Definition: errcode.h:87
tesseract::Parallel::OutputShape
StaticShape OutputShape(const StaticShape &input_shape) const override
Definition: parallel.cpp:37
tesseract::NetworkIO::CopyUnpacking
void CopyUnpacking(const NetworkIO &src, int feature_offset, int num_features)
Definition: networkio.cpp:945
STRING
Definition: strngs.h:45
tesseract::NetworkIO::Width
int Width() const
Definition: networkio.h:107
tesseract::NetworkScratch
Definition: networkscratch.h:34
parallel.h
tesseract::Network::type
NetworkType type() const
Definition: network.h:112
tesseract::NetworkScratch::IO::ResizeFloat
void ResizeFloat(const NetworkIO &src, int num_features, NetworkScratch *scratch)
Definition: networkscratch.h:99
tesseract::NT_REPLICATED
Definition: network.h:50
tesseract::NetworkType
NetworkType
Definition: network.h:43
tesseract::Network::needs_to_backprop_
bool needs_to_backprop_
Definition: network.h:295
tesseract::Plumbing::stack_
PointerVector< Network > stack_
Definition: plumbing.h:136
tesseract::Network::IsTraining
bool IsTraining() const
Definition: network.h:115
tesseract::NetworkIO::Transpose
void Transpose(TransposedArray *dest) const
Definition: networkio.cpp:964
tesseract::NetworkIO::CopyPacking
int CopyPacking(const NetworkIO &src, int feature_offset)
Definition: networkio.cpp:917
networkscratch.h
tesseract::Network::type_
NetworkType type_
Definition: network.h:293
tesseract::StaticShape::depth
int depth() const
Definition: static_shape.h:48
tesseract::NetworkIO
Definition: networkio.h:39
tesseract::Plumbing
Definition: plumbing.h:30
tesseract
Definition: baseapi.h:65
tesseract::Network::NumOutputs
int NumOutputs() const
Definition: network.h:123
GenericVector
Definition: baseapi.h:40
tesseract::NetworkScratch::IO
Definition: networkscratch.h:51
tesseract::NetworkIO::CopyAll
void CopyAll(const NetworkIO &src)
Definition: networkio.cpp:811
tesseract::NetworkIO::Resize
void Resize(const NetworkIO &src, int num_features)
Definition: networkio.h:45
tesseract::TransposedArray
Definition: weightmatrix.h:32
GenericVector::init_to_size
void init_to_size(int size, const T &t)
Definition: genericvector.h:706
functions.h
tesstrain_utils.type
type
Definition: tesstrain_utils.py:141
tesseract::NetworkIO::ScaleFloatBy
void ScaleFloatBy(float factor)
Definition: networkio.h:234
tesseract::Parallel::Parallel
Parallel(const STRING &name, NetworkType type)
Definition: parallel.cpp:31
tesseract::NetworkIO::AddAllToFloat
void AddAllToFloat(const NetworkIO &src)
Definition: networkio.cpp:817
tesseract::NetworkIO::NumFeatures
int NumFeatures() const
Definition: networkio.h:111
tesseract::Network::NumInputs
int NumInputs() const
Definition: network.h:120
tesseract::Network::DisplayBackward
void DisplayBackward(const NetworkIO &matrix)
Definition: network.cpp:299
tesseract::StaticShape::set_depth
void set_depth(int value)
Definition: static_shape.h:49