diff options
-rw-r--r-- | src/graphClasses.h | 4 | ||||
-rw-r--r-- | src/propagator.h | 13 |
2 files changed, 9 insertions, 8 deletions
diff --git a/src/graphClasses.h b/src/graphClasses.h index 8c90d4b..da5f1af 100644 --- a/src/graphClasses.h +++ b/src/graphClasses.h @@ -11,7 +11,7 @@ namespace nplm template <class X> class Node { public: - const X * param; //what parameter is this + X * param; //what parameter is this //vector <void *> children; //vector <void *> parents; Eigen::Matrix<double,Eigen::Dynamic,Eigen::Dynamic> fProp_matrix; @@ -21,7 +21,7 @@ class Node { public: Node() : param(NULL), minibatch_size(0) { } - Node(const X *input_param, int minibatch_size) + Node(X *input_param, int minibatch_size) : param(input_param), minibatch_size(minibatch_size) { diff --git a/src/propagator.h b/src/propagator.h index 40bf1d6..0619de6 100644 --- a/src/propagator.h +++ b/src/propagator.h @@ -30,12 +30,13 @@ public: propagator (const model &nn, int minibatch_size) : pnn(&nn), - input_layer_node(&nn.input_layer, minibatch_size), - first_hidden_linear_node(&nn.first_hidden_linear, minibatch_size), - first_hidden_activation_node(&nn.first_hidden_activation, minibatch_size), - second_hidden_linear_node(&nn.second_hidden_linear, minibatch_size), - second_hidden_activation_node(&nn.second_hidden_activation, minibatch_size), - output_layer_node(&nn.output_layer, minibatch_size), + // These are const for purposes of querying. The issue is that it's also used non-const for purposes of training, so X* only takes mutable classes. + input_layer_node(const_cast<Input_word_embeddings*>(&nn.input_layer), minibatch_size), + first_hidden_linear_node(const_cast<Linear_layer*>(&nn.first_hidden_linear), minibatch_size), + first_hidden_activation_node(const_cast<Activation_function*>(&nn.first_hidden_activation), minibatch_size), + second_hidden_linear_node(const_cast<Linear_layer*>(&nn.second_hidden_linear), minibatch_size), + second_hidden_activation_node(const_cast<Activation_function*>(&nn.second_hidden_activation), minibatch_size), + output_layer_node(const_cast<Output_word_embeddings*>(&nn.output_layer), minibatch_size), minibatch_size(minibatch_size) { } |