diff options
author | Kenneth Heafield <github@kheafield.com> | 2015-03-26 18:57:41 +0300 |
---|---|---|
committer | Kenneth Heafield <github@kheafield.com> | 2015-03-26 18:57:41 +0300 |
commit | 206d0c969885817521e941eaec879517e39a5b59 (patch) | |
tree | 822028342de47622850a545444b0b646a4afc2dd | |
parent | 8b323abbca16619bfbaf32d5f9b2dcaf63457d53 (diff) |
Make NPLM wrapper compile again. Fixes #102.
-rw-r--r-- | lm/Jamfile | 6 | ||||
-rw-r--r-- | lm/wrappers/nplm.cc | 57 | ||||
-rw-r--r-- | lm/wrappers/nplm.hh | 4 |
3 files changed, 48 insertions, 19 deletions
diff --git a/lm/Jamfile b/lm/Jamfile index 227b22014..0b5bbf259 100644 --- a/lm/Jamfile +++ b/lm/Jamfile @@ -16,10 +16,10 @@ max-order += <dependency>$(ORDER-LOG) ; wrappers = ; local with-nplm = [ option.get "with-nplm" ] ; if $(with-nplm) { - lib neuralLM : : <search>$(with-nplm)/src ; + lib nplm : : <search>$(with-nplm)/src ; obj nplm.o : wrappers/nplm.cc : <include>.. <include>$(with-nplm)/src <cxxflags>-fopenmp ; - alias nplm : nplm.o neuralLM ..//boost_thread : : : <cxxflags>-fopenmp <linkflags>-fopenmp <define>WITH_NPLM <library>..//boost_thread ; - wrappers += nplm ; + alias nplm-all : nplm.o nplm ..//boost_thread : : : <cxxflags>-fopenmp <linkflags>-fopenmp <define>WITH_NPLM <library>..//boost_thread ; + wrappers += nplm-all ; } fakelib kenlm : $(wrappers) [ glob *.cc : *main.cc *test.cc ] ../util//kenutil : <include>.. $(max-order) : : <include>.. $(max-order) ; diff --git a/lm/wrappers/nplm.cc b/lm/wrappers/nplm.cc index 70622bd2b..44fd75a83 100644 --- a/lm/wrappers/nplm.cc +++ b/lm/wrappers/nplm.cc @@ -21,6 +21,26 @@ WordIndex Vocabulary::Index(const std::string &str) const { return vocab_.lookup_word(str); } +class Backend { + public: + Backend(const nplm::neuralLM &from, const std::size_t cache_size) : lm_(from), ngram_(from.get_order()) { + lm_.set_cache(cache_size); + } + + nplm::neuralLM &LM() { return lm_; } + const nplm::neuralLM &LM() const { return lm_; } + + Eigen::Matrix<int,Eigen::Dynamic,1> &staging_ngram() { return ngram_; } + + double lookup_from_staging() { return lm_.lookup_ngram(ngram_); } + + int order() const { return lm_.get_order(); } + + private: + nplm::neuralLM lm_; + Eigen::Matrix<int,Eigen::Dynamic,1> ngram_; +}; + bool Model::Recognize(const std::string &name) { try { util::scoped_fd file(util::OpenReadOrThrow(name.c_str())); @@ -31,10 +51,18 @@ bool Model::Recognize(const std::string &name) { } catch (const util::Exception &) { return false; } -} +} + +namespace { +nplm::neuralLM *LoadNPLM(const std::string &file) { + util::scoped_ptr<nplm::neuralLM> ret(new nplm::neuralLM()); + ret->read(file); + return ret.release(); +} +} // namespace Model::Model(const std::string &file, std::size_t cache) - : base_instance_(new nplm::neuralLM(file)), vocab_(base_instance_->get_vocabulary()), cache_size_(cache) { + : base_instance_(LoadNPLM(file)), vocab_(base_instance_->get_vocabulary()), cache_size_(cache) { UTIL_THROW_IF(base_instance_->get_order() > NPLM_MAX_ORDER, util::Exception, "This NPLM has order " << (unsigned int)base_instance_->get_order() << " but the KenLM wrapper was compiled with " << NPLM_MAX_ORDER << ". Change the defintion of NPLM_MAX_ORDER and recompile."); // log10 compatible with backoff models. base_instance_->set_log_base(10.0); @@ -49,26 +77,25 @@ Model::Model(const std::string &file, std::size_t cache) Model::~Model() {} FullScoreReturn Model::FullScore(const State &from, const WordIndex new_word, State &out_state) const { - nplm::neuralLM *lm = backend_.get(); - if (!lm) { - lm = new nplm::neuralLM(*base_instance_); - backend_.reset(lm); - lm->set_cache(cache_size_); + Backend *backend = backend_.get(); + if (!backend) { + backend = new Backend(*base_instance_, cache_size_); + backend_.reset(backend); } // State is in natural word order. FullScoreReturn ret; - for (int i = 0; i < lm->get_order() - 1; ++i) { - lm->staging_ngram()(i) = from.words[i]; + for (int i = 0; i < backend->order() - 1; ++i) { + backend->staging_ngram()(i) = from.words[i]; } - lm->staging_ngram()(lm->get_order() - 1) = new_word; - ret.prob = lm->lookup_from_staging(); + backend->staging_ngram()(backend->order() - 1) = new_word; + ret.prob = backend->lookup_from_staging(); // Always say full order. - ret.ngram_length = lm->get_order(); + ret.ngram_length = backend->order(); // Shift everything down by one. - memcpy(out_state.words, from.words + 1, sizeof(WordIndex) * (lm->get_order() - 2)); - out_state.words[lm->get_order() - 2] = new_word; + memcpy(out_state.words, from.words + 1, sizeof(WordIndex) * (backend->order() - 2)); + out_state.words[backend->order() - 2] = new_word; // Fill in trailing words with zeros so state comparison works. - memset(out_state.words + lm->get_order() - 1, 0, sizeof(WordIndex) * (NPLM_MAX_ORDER - lm->get_order())); + memset(out_state.words + backend->order() - 1, 0, sizeof(WordIndex) * (NPLM_MAX_ORDER - backend->order())); return ret; } diff --git a/lm/wrappers/nplm.hh b/lm/wrappers/nplm.hh index b7dd4a21e..416281de2 100644 --- a/lm/wrappers/nplm.hh +++ b/lm/wrappers/nplm.hh @@ -49,6 +49,8 @@ struct State { WordIndex words[NPLM_MAX_ORDER - 1]; }; +class Backend; + class Model : public lm::base::ModelFacade<Model, State, Vocabulary> { private: typedef lm::base::ModelFacade<Model, State, Vocabulary> P; @@ -68,7 +70,7 @@ class Model : public lm::base::ModelFacade<Model, State, Vocabulary> { private: boost::scoped_ptr<nplm::neuralLM> base_instance_; - mutable boost::thread_specific_ptr<nplm::neuralLM> backend_; + mutable boost::thread_specific_ptr<Backend> backend_; Vocabulary vocab_; |