#pragma once #include #include #include #include #include #include #include #include #include "maybe_omp.h" #include "util.h" #include "graphClasses.h" #include "USCMatrix.h" // classes for various kinds of layers #include "SoftmaxLoss.h" #include "Activation_function.h" //#define EIGEN_DONT_PARALLELIZE //#define EIGEN_DEFAULT_TO_ROW_MAJOR using namespace std; namespace nplm { // is this cheating? using Eigen::Matrix; using Eigen::Array; using Eigen::MatrixBase; using Eigen::Dynamic; typedef boost::unordered_map int_map; struct Clipper{ user_data_t operator() (user_data_t x) const { return std::min(user_data_t(0.5), std::max(x,user_data_t(-0.5))); //return(x); } }; class Linear_layer { private: Matrix U; Matrix U_gradient; Matrix U_velocity; Matrix U_running_gradient; Matrix U_running_parameter_update; // Biases Matrix b; Matrix b_velocity; Matrix b_running_gradient; Matrix b_running_parameter_update; Matrix b_gradient; friend class model; public: Linear_layer() { } Linear_layer(int rows, int cols) { resize(rows, cols); } void resize(int rows, int cols) { U.setZero(rows, cols); U_gradient.setZero(rows, cols); //U_running_gradient.setZero(rows, cols); //U_running_parameter_updates.setZero(rows, cols); //U_velocity.setZero(rows, cols); b.resize(rows); b_gradient.setZero(rows); //b_running_gradient.resize(rows); //b_velocity.resize(rows); } void read_weights(std::ifstream &U_file) { readMatrix(U_file, U); } void write_weights(std::ofstream &U_file) { writeMatrix(U, U_file); } void read_biases(std::ifstream &b_file) { readMatrix(b_file, b); } void write_biases(std::ofstream &b_file) { writeMatrix(b, b_file); } template void initialize(Engine &engine, bool init_normal, user_data_t init_range, string ¶meter_update, user_data_t adagrad_epsilon) { if (parameter_update == "ADA") { U_running_gradient = Matrix::Ones(U.rows(),U.cols())*adagrad_epsilon; b_running_gradient = Matrix::Ones(b.size())*adagrad_epsilon; } if (parameter_update == "ADAD") { U_running_gradient.setZero(U.rows(),U.cols()); b_running_gradient.setZero(b.size()); U_running_parameter_update.setZero(U.rows(),U.cols()); b_running_parameter_update.setZero(b.size()); } initMatrix(engine, U, init_normal, init_range); initBias(engine, b, init_normal, init_range); } int n_inputs () const { return U.cols(); } int n_outputs () const { return U.rows(); } template void fProp(const MatrixBase &input, const MatrixBase &output) const { UNCONST(DerivedOut, output, my_output); my_output.leftCols(input.cols()).noalias() = U*input; int num_examples = input.cols(); for (int example = 0;example < num_examples;example++) { my_output.leftCols(input.cols()).col(example) += b; } } // Sparse input template void fProp(const USCMatrix &input, const MatrixBase &output_const) const { UNCONST(DerivedOut, output_const, output); output.setZero(); uscgemm(1.0, U, input, output.leftCols(input.cols())); // Each column corresponds to a training example. We // parallelize the adding of biases per dimension. int num_examples = input.cols(); for (int example = 0;example < num_examples;example++) { output.leftCols(input.cols()).col(example) += b; } } template void bProp(const MatrixBase &input, MatrixBase &output) const { UNCONST(DerivedGIn, output, my_output); my_output.noalias() = U.transpose()*input; } template void computeGradient( const MatrixBase &bProp_input, const MatrixBase &fProp_input, user_data_t learning_rate, user_data_t momentum, user_data_t L2_reg) { U_gradient.noalias() = bProp_input*fProp_input.transpose(); // get the bias gradient for all dimensions in parallel int size = b.size(); b_gradient = bProp_input.rowwise().sum(); // This used to be multithreaded, but there was no measureable difference if (L2_reg > 0.0) { U_gradient -= 2*L2_reg*U; b_gradient -= 2*L2_reg*b; } if (momentum > 0.0) { U_velocity = momentum*U_velocity + U_gradient; U += learning_rate * U_velocity; b_velocity = momentum*b_velocity + b_gradient; b += learning_rate * b_velocity; } else { U += learning_rate * U_gradient; b += learning_rate * b_gradient; /* //UPDATE CLIPPING U += (learning_rate*U_gradient).array().unaryExpr(Clipper()).matrix(); b += (learning_rate*b_gradient).array().unaryExpr(Clipper()).matrix(); //GRADIENT CLIPPING //U += learning_rate*(U_gradient.array().unaryExpr(Clipper())).matrix(); //b += learning_rate*(b_gradient.array().unaryExpr(Clipper())).matrix(); */ } } template void computeGradientAdagrad(const MatrixBase &bProp_input, const MatrixBase &fProp_input, user_data_t learning_rate, user_data_t L2_reg) { U_gradient.noalias() = bProp_input*fProp_input.transpose(); // get the bias gradient for all dimensions in parallel int size = b.size(); b_gradient.noalias() = bProp_input.rowwise().sum(); if (L2_reg != 0) { U_gradient -= 2*L2_reg*U; b_gradient -= 2*L2_reg*b; } // ignore momentum? #pragma omp parallel for for (int col=0; col void computeGradientAdadelta(const MatrixBase &bProp_input, const MatrixBase &fProp_input, user_data_t learning_rate, user_data_t L2_reg, user_data_t conditioning_constant, user_data_t decay) { //cerr<<"decay is "< b_current_parameter_update; // get the bias gradient for all dimensions in parallel int size = b.size(); b_gradient.noalias() = bProp_input.rowwise().sum(); if (L2_reg != 0) { U_gradient -= 2*L2_reg*U; b_gradient -= 2*L2_reg*b; } // ignore momentum? #pragma omp parallel for //cerr<<"U gradient is "< U_current_parameter_update; U_running_gradient.col(col) = decay*U_running_gradient.col(col) + (1-decay)*U_gradient.col(col).array().square().matrix(); //cerr<<"U running gradient is "< void computeGradientCheck(const MatrixBase &bProp_input, const MatrixBase &fProp_input, const MatrixBase &gradient) const { UNCONST(DerivedGW, gradient, my_gradient); my_gradient.noalias() = bProp_input*fProp_input.transpose(); } }; class Output_word_embeddings { private: // row-major is better for uscgemm //Matrix W; // Having W be a pointer to a matrix allows ease of sharing // input and output word embeddings Matrix *W; std::vector W_data; Matrix b; Matrix W_running_gradient; Matrix W_gradient; Matrix W_running_parameter_update; Matrix b_running_gradient; Matrix b_gradient; Matrix b_running_parameter_update; public: Output_word_embeddings() { } Output_word_embeddings(int rows, int cols) { resize(rows, cols); } void resize(int rows, int cols) { W->setZero(rows, cols); b.setZero(rows); } void set_W(Matrix *input_W) { W = input_W; } void read_weights(std::ifstream &W_file) { readMatrix(W_file, *W); } void write_weights(std::ofstream &W_file) { writeMatrix(*W, W_file); } void read_biases(std::ifstream &b_file) { readMatrix(b_file, b); } void write_biases(std::ofstream &b_file) { writeMatrix(b, b_file); } template void initialize(Engine &engine, bool init_normal, user_data_t init_range, user_data_t init_bias, string ¶meter_update, user_data_t adagrad_epsilon) { W_gradient.setZero(W->rows(),W->cols()); b_gradient.setZero(b.size()); if (parameter_update == "ADA") { W_running_gradient = Matrix::Ones(W->rows(),W->cols())*adagrad_epsilon; b_running_gradient = Matrix::Ones(b.size())*adagrad_epsilon; //W_gradient.setZero(W->rows(),W->cols()); //b_gradient.setZero(b.size()); } if (parameter_update == "ADAD") { W_running_gradient.setZero(W->rows(),W->cols()); b_running_gradient.setZero(b.size()); W_gradient.setZero(W->rows(),W->cols()); //b_gradient.setZero(b.size()); //W_running_parameter_update.setZero(W->rows(),W->cols()); b_running_parameter_update.setZero(b.size()); } initMatrix(engine, *W, init_normal, init_range); b.fill(init_bias); } int n_inputs () const { return W->cols(); } int n_outputs () const { return W->rows(); } template void fProp(const MatrixBase &input, const MatrixBase &output) const { UNCONST(DerivedOut, output, my_output); my_output = ((*W) * input).colwise() + b; /* TODO: without EIGEN_NO_DEBUG - is this a bug? ProductBase.h:102: Eigen::ProductBase::ProductBase(const Lhs& , const Rhs&) [with Derived = Eigen::GeneralProduct, Eigen::Matrix, 5>; Lhs = Eigen::Matrix; Rhs = Eigen::Matrix]: Assertion `a_lhs.cols() == a_rhs.rows() & & "invalid matrix product" && "if you wanted a coeff-wise or a dot product use t he respective explicit functions"' failed. (gdb) p a_lhs.cols() $3 = 50 (gdb) p a_rhs.rows() $4 = 100 (gdb) p a_lhs.rows() $5 = 2 (gdb) p a_rhs.cols() $6 = 1 from lookup_ngram normalization prop.skip_hidden in neuralNetwork.h:100 */ } // Sparse output version template void fProp(const MatrixBase &input, const MatrixBase &samples, const MatrixBase &output) const { UNCONST(DerivedOutV, output, my_output); #pragma omp parallel for for (int instance_id = 0; instance_id < samples.cols(); instance_id++) { for (int sample_id = 0; sample_id < samples.rows(); sample_id++) { my_output(sample_id, instance_id) = b(samples(sample_id, instance_id)); } } USCMatrix sparse_output(W->rows(), samples, my_output); uscgemm_masked(1.0, *W, input, sparse_output); my_output = sparse_output.values; // too bad, so much copying } // Return single element of output matrix template user_data_t fProp(const MatrixBase &input, int word, int instance) const { return W->row(word).dot(input.col(instance)) + b(word); } // Dense versions (for log-likelihood loss) template void bProp(const MatrixBase &input_bProp_matrix, const MatrixBase &bProp_matrix) const { // W is vocab_size x output_embedding_dimension // input_bProp_matrix is vocab_size x minibatch_size // bProp_matrix is output_embedding_dimension x minibatch_size UNCONST(DerivedGIn, bProp_matrix, my_bProp_matrix); my_bProp_matrix.leftCols(input_bProp_matrix.cols()).noalias() = W->transpose() * input_bProp_matrix; } template void computeGradient(const MatrixBase &predicted_embeddings, const MatrixBase &bProp_input, user_data_t learning_rate, user_data_t momentum) //not sure if we want to use momentum here { // W is vocab_size x output_embedding_dimension // b is vocab_size x 1 // predicted_embeddings is output_embedding_dimension x minibatch_size // bProp_input is vocab_size x minibatch_size W->noalias() += learning_rate * bProp_input * predicted_embeddings.transpose(); b += learning_rate * bProp_input.rowwise().sum(); /* //GRADIENT CLIPPING W->noalias() += learning_rate * ((bProp_input * predicted_embeddings.transpose()).array().unaryExpr(Clipper())).matrix(); b += learning_rate * (bProp_input.rowwise().sum().array().unaryExpr(Clipper())).matrix(); //UPDATE CLIPPING W->noalias() += (learning_rate * (bProp_input * predicted_embeddings.transpose())).array().unaryExpr(Clipper()).matrix(); b += (learning_rate * (bProp_input.rowwise().sum())).array().unaryExpr(Clipper()).matrix(); */ } template void computeGradientAdagrad( const MatrixBase &predicted_embeddings, const MatrixBase &bProp_input, user_data_t learning_rate) //not sure if we want to use momentum here { // W is vocab_size x output_embedding_dimension // b is vocab_size x 1 // predicted_embeddings is output_embedding_dimension x minibatch_size // bProp_input is vocab_size x minibatch_sizea W_gradient.setZero(W->rows(), W->cols()); b_gradient.setZero(b.size()); W_gradient.noalias() = bProp_input * predicted_embeddings.transpose(); b_gradient.noalias() = bProp_input.rowwise().sum(); W_running_gradient += W_gradient.array().square().matrix(); b_running_gradient += b_gradient.array().square().matrix(); W->noalias() += learning_rate * (W_gradient.array()/W_running_gradient.array().sqrt()).matrix(); b += learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt()).matrix(); /* //UPDATE CLIPPING *W += (learning_rate * (W_gradient.array()/W_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix(); b += (learning_rate * (b_gradient.array()/b_running_gradient.array().sqrt())).unaryExpr(Clipper()).matrix(); */ } template void computeGradientAdadelta(const MatrixBase &predicted_embeddings, const MatrixBase &bProp_input, user_data_t learning_rate, user_data_t conditioning_constant, user_data_t decay) //not sure if we want to use momentum here { // W is vocab_size x output_embedding_dimension // b is vocab_size x 1 // predicted_embeddings is output_embedding_dimension x minibatch_size // bProp_input is vocab_size x minibatch_size Array W_current_parameter_update; Array b_current_parameter_update; W_gradient.setZero(W->rows(), W->cols()); b_gradient.setZero(b.size()); W_gradient.noalias() = bProp_input * predicted_embeddings.transpose(); b_gradient.noalias() = bProp_input.rowwise().sum(); W_running_gradient = decay*W_running_gradient + (1.-decay)*W_gradient.array().square().matrix(); b_running_gradient = decay*b_running_gradient+ (1.-decay)*b_gradient.array().square().matrix(); W_current_parameter_update = ((W_running_parameter_update.array()+conditioning_constant).sqrt()/ (W_running_gradient.array()+conditioning_constant).sqrt())* W_gradient.array(); b_current_parameter_update = ((b_running_parameter_update.array()+conditioning_constant).sqrt()/ (b_running_gradient.array()+conditioning_constant).sqrt())* b_gradient.array(); W_running_parameter_update = decay*W_running_parameter_update + (1.-decay)*W_current_parameter_update.square().matrix(); b_running_parameter_update = decay*b_running_parameter_update + (1.-decay)*b_current_parameter_update.square().matrix(); *W += learning_rate*W_current_parameter_update.matrix(); b += learning_rate*b_current_parameter_update.matrix(); } // Sparse versions template void bProp(const MatrixBase &samples, const MatrixBase &weights, const MatrixBase &bProp_matrix) const { UNCONST(DerivedGIn, bProp_matrix, my_bProp_matrix); my_bProp_matrix.setZero(); uscgemm(1.0, W->transpose(), USCMatrix(W->rows(), samples, weights), my_bProp_matrix.leftCols(samples.cols())); // narrow bProp_matrix for possible short minibatch } template void computeGradient(const MatrixBase &predicted_embeddings, const MatrixBase &samples, const MatrixBase &weights, user_data_t learning_rate, user_data_t momentum) //not sure if we want to use momentum here { //cerr<<"in gradient"< gradient_output(W->rows(), samples, weights); uscgemm(learning_rate, gradient_output, predicted_embeddings.leftCols(gradient_output.cols()).transpose(), *W); // narrow predicted_embeddings for possible short minibatch uscgemv(learning_rate, gradient_output, Matrix::Ones(gradient_output.cols()), b); /* //IN ORDER TO IMPLEMENT CLIPPING, WE HAVE TO COMPUTE THE GRADIENT //FIRST USCMatrix gradient_output(W->rows(), samples, weights); uscgemm(1.0, gradient_output, predicted_embeddings.leftCols(samples.cols()).transpose(), W_gradient); uscgemv(1.0, gradient_output, Matrix::Ones(weights.cols()), b_gradient); int_map update_map; //stores all the parameters that have been updated for (int sample_id=0; sample_id update_items; for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it) update_items.push_back(it->first); int num_items = update_items.size(); //#pragma omp parallel for for (int item_id=0; item_idrow(update_item) += learning_rate * W_gradient.row(update_item); //b(update_item) += learning_rate * b_gradient(update_item); //UPDATE CLIPPING W->row(update_item) += (learning_rate * W_gradient.row(update_item)).array().unaryExpr(Clipper()).matrix(); user_data_t update = learning_rate * b_gradient(update_item); b(update_item) += std::min(0.5, std::max(update,-0.5)); //GRADIENT CLIPPING W_gradient.row(update_item).setZero(); b_gradient(update_item) = 0.; } */ //cerr<<"Finished gradient"< void computeGradientAdagrad(const MatrixBase &predicted_embeddings, const MatrixBase &samples, const MatrixBase &weights, user_data_t learning_rate) //not sure if we want to use momentum here { //W_gradient.setZero(W->rows(), W->cols()); //b_gradient.setZero(b.size()); //FOR CLIPPING, WE DO NOT MULTIPLY THE GRADIENT WITH THE LEARNING RATE USCMatrix gradient_output(W->rows(), samples, weights); uscgemm(1.0, gradient_output, predicted_embeddings.leftCols(samples.cols()).transpose(), W_gradient); uscgemv(1.0, gradient_output, Matrix::Ones(weights.cols()), b_gradient); int_map update_map; //stores all the parameters that have been updated for (int sample_id=0; sample_id update_items; for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it) update_items.push_back(it->first); int num_items = update_items.size(); //#pragma omp parallel for for (int item_id=0; item_idrow(update_item) += learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()).matrix(); b(update_item) += learning_rate * b_gradient(update_item) / sqrt(b_running_gradient(update_item)); /* //UPDATE CLIPPING W->row(update_item) += (learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt())).unaryExpr(Clipper()).matrix(); user_data_t update = learning_rate * b_gradient(update_item) / sqrt(b_running_gradient(update_item)); b(update_item) += Clipper(update);//std::min(0.5, std::max(update,-0.5)); */ W_gradient.row(update_item).setZero(); b_gradient(update_item) = 0.; } } template void computeGradientAdadelta(const MatrixBase &predicted_embeddings, const MatrixBase &samples, const MatrixBase &weights, user_data_t learning_rate, user_data_t conditioning_constant, user_data_t decay) //not sure if we want to use momentum here { //cerr<<"decay is "<rows(), W->cols()); //b_gradient.setZero(b.size()); USCMatrix gradient_output(W->rows(), samples, weights); uscgemm(1.0, gradient_output, predicted_embeddings.leftCols(samples.cols()).transpose(), W_gradient); uscgemv(1.0, gradient_output, Matrix::Ones(weights.cols()), b_gradient); int_map update_map; //stores all the parameters that have been updated for (int sample_id=0; sample_id update_items; for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it) update_items.push_back(it->first); int num_items = update_items.size(); #pragma omp parallel for for (int item_id=0; item_id W_current_parameter_update; user_data_t b_current_parameter_update; int update_item = update_items[item_id]; W_running_gradient.row(update_item) = decay*W_running_gradient.row(update_item)+ (1.-decay)*W_gradient.row(update_item).array().square().matrix(); b_running_gradient(update_item) = decay*b_running_gradient(update_item)+ (1.-decay)*b_gradient(update_item)*b_gradient(update_item); //cerr<<"Output: W gradient is "<row(update_item) += learning_rate*W_current_parameter_update.matrix(); b(update_item) += learning_rate*b_current_parameter_update; W_gradient.row(update_item).setZero(); b_gradient(update_item) = 0.; } } template void computeGradientCheck(const MatrixBase &predicted_embeddings, const MatrixBase &samples, const MatrixBase &weights, const MatrixBase &gradient_W, const MatrixBase &gradient_b) const { UNCONST(DerivedGW, gradient_W, my_gradient_W); UNCONST(DerivedGb, gradient_b, my_gradient_b); my_gradient_W.setZero(); my_gradient_b.setZero(); USCMatrix gradient_output(W->rows(), samples, weights); uscgemm(1.0, gradient_output, predicted_embeddings.leftCols(samples.cols()).transpose(), my_gradient_W); uscgemv(1.0, gradient_output, Matrix::Ones(weights.cols()), my_gradient_b); } }; class Input_word_embeddings { private: Matrix *W; int context_size, vocab_size; Matrix W_running_gradient; Matrix W_running_parameter_update; Matrix W_gradient; friend class model; public: Input_word_embeddings() : context_size(0), vocab_size(0) { } Input_word_embeddings(int rows, int cols, int context) { resize(rows, cols, context); } void set_W(Matrix *input_W) { W = input_W; } void resize(int rows, int cols, int context) { context_size = context; vocab_size = rows; W->setZero(rows, cols); } void zero(int output_id) { W->row(output_id).setZero(); } void read(std::ifstream &W_file) { readMatrix(W_file, *W); } void write(std::ofstream &W_file) { writeMatrix(*W, W_file); } template void initialize(Engine &engine, bool init_normal, user_data_t init_range, string ¶meter_update, user_data_t adagrad_epsilon) { W_gradient.setZero(W->rows(),W->cols()); if (parameter_update == "ADA") { W_running_gradient = Matrix::Ones(W->rows(),W->cols())*adagrad_epsilon; //W_gradient.setZero(W->rows(),W->cols()); } if (parameter_update == "ADAD") { W_running_gradient.setZero(W->rows(),W->cols()); //W_gradient.setZero(W->rows(),W->cols()); W_running_parameter_update.setZero(W->rows(),W->cols()); } initMatrix(engine, *W, init_normal, init_range); } int n_inputs() const { return -1; } int n_outputs() const { return W->cols() * context_size; } // set output_id's embedding to the weighted average of all embeddings template void average(const Dist &dist, int output_id) { W->row(output_id).setZero(); for (int i=0; i < W->rows(); i++) if (i != output_id) W->row(output_id) += dist.prob(i) * W->row(i); } template void fProp(const MatrixBase &input, const MatrixBase &output) const { int embedding_dimension = W->cols(); // W is vocab_size x embedding_dimension // input is ngram_size*vocab_size x minibatch_size // output is ngram_size*embedding_dimension x minibatch_size /* // Dense version: for (int ngram=0; ngramtranspose(), USCMatrix(W->rows(),input.middleRows(ngram, 1),Matrix::Ones(input.cols())), my_output.block(ngram*embedding_dimension, 0, embedding_dimension, input.cols())); } } // When model is premultiplied, this layer doesn't get used, // but this method is used to get the input into a sparse matrix. // Hopefully this can get eliminated someday template void munge(const MatrixBase &input, USCMatrix &output) const { output.resize(vocab_size*context_size, context_size, input.cols()); for (int i=0; i < context_size; i++) output.indexes.row(i).array() = input.row(i).array() + i*vocab_size; output.values.fill(1.0); } template void computeGradient(const MatrixBase &bProp_input, const MatrixBase &input_words, user_data_t learning_rate, user_data_t momentum, user_data_t L2_reg) { int embedding_dimension = W->cols(); // W is vocab_size x embedding_dimension // input is ngram_size*vocab_size x minibatch_size // bProp_input is ngram_size*embedding_dimension x minibatch_size /* // Dense version: for (int ngram=0; ngram(W->rows(), input_words.middleRows(ngram, 1), Matrix::Ones(input_words.cols())), bProp_input.block(ngram*embedding_dimension,0,embedding_dimension,input_words.cols()).transpose(), *W); } /* //IF WE WANT TO DO GRADIENT CLIPPING, THEN WE FIRST COMPUTE THE GRADIENT AND THEN //PERFORM CLIPPING WHILE UPDATING for (int ngram=0; ngram(W->rows(),input_words.middleRows(ngram, 1),Matrix::Ones(input_words.cols())), bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(), W_gradient); } int_map update_map; //stores all the parameters that have been updated for (int ngram=0; ngram update_items; for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it) { update_items.push_back(it->first); } int num_items = update_items.size(); #pragma omp parallel for for (int item_id=0; item_idrow(update_item) += (learning_rate* W_gradient.row(update_item).array().unaryExpr(Clipper())).matrix(); //GRADIENT CLIPPING //W->row(update_item) += learning_rate* // W_gradient.row(update_item).array().unaryExpr(Clipper()).matrix(); //SETTING THE GRADIENT TO ZERO W_gradient.row(update_item).setZero(); } */ } template void computeGradientAdagrad(const MatrixBase &bProp_input, const MatrixBase &input_words, user_data_t learning_rate, user_data_t L2_reg) { int embedding_dimension = W->cols(); //W_gradient.setZero(W->rows(), W->cols()); /* if (W_running_gradient.rows() != W->rows() || W_running_gradient.cols() != W->cols()) W_running_gradient = Ones(W->rows(), W->cols())*adagrad_epsilon; */ for (int ngram=0; ngram(W->rows(),input_words.middleRows(ngram, 1),Matrix::Ones(input_words.cols())), bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(), W_gradient); } int_map update_map; //stores all the parameters that have been updated for (int ngram=0; ngram update_items; for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it) { update_items.push_back(it->first); } int num_items = update_items.size(); #pragma omp parallel for for (int item_id=0; item_idrow(update_item) += learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt()).matrix(); /* //UPDATE CLIPPING W->row(update_item) += (learning_rate * (W_gradient.row(update_item).array() / W_running_gradient.row(update_item).array().sqrt())) .unaryExpr(Clipper()).matrix(); */ W_gradient.row(update_item).setZero(); } } template void computeGradientAdadelta(const MatrixBase &bProp_input, const MatrixBase &input_words, user_data_t learning_rate, user_data_t L2_reg, user_data_t conditioning_constant, user_data_t decay) { int embedding_dimension = W->cols(); //W_gradient.setZero(W->rows(), W->cols()); /* if (W_running_gradient.rows() != W->rows() || W_running_gradient.cols() != W->cols()) W_running_gradient = Ones(W->rows(), W->cols())*adagrad_epsilon; */ for (int ngram=0; ngram(W->rows(),input_words.middleRows(ngram, 1),Matrix::Ones(input_words.cols())), bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(), W_gradient); } int_map update_map; //stores all the parameters that have been updated for (int ngram=0; ngram update_items; for (int_map::iterator it = update_map.begin(); it != update_map.end(); ++it) { update_items.push_back(it->first); } int num_items = update_items.size(); #pragma omp parallel for for (int item_id=0; item_id W_current_parameter_update; int update_item = update_items[item_id]; W_running_gradient.row(update_item) = decay*W_running_gradient.row(update_item)+ (1.-decay)*W_gradient.row(update_item).array().square().matrix(); W_current_parameter_update = ((W_running_parameter_update.row(update_item).array()+conditioning_constant).sqrt()/ (W_running_gradient.row(update_item).array()+conditioning_constant).sqrt())* W_gradient.row(update_item).array(); //cerr<<"Input: W current parameter update is "<row(update_item) += learning_rate*W_current_parameter_update.matrix(); //cerr<<"Input: After update, W is "<row(update_item)< void computeGradientCheck(const MatrixBase &bProp_input, const MatrixBase &input_words, int x, int minibatch_size, const MatrixBase &gradient) const //not sure if we want to use momentum here { UNCONST(DerivedGW, gradient, my_gradient); int embedding_dimension = W->cols(); my_gradient.setZero(); for (int ngram=0; ngram(W->rows(),input_words.middleRows(ngram, 1),Matrix::Ones(input_words.cols())), bProp_input.block(ngram*embedding_dimension, 0, embedding_dimension, input_words.cols()).transpose(), my_gradient); } }; } // namespace nplm