Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/lm
diff options
context:
space:
mode:
authorjiejiang <mail.jie.jiang@gmail.com>2014-01-15 22:16:56 +0400
committerjiejiang <mail.jie.jiang@gmail.com>2014-01-15 22:16:56 +0400
commit5f1217d793d5928a33c3d240e31e1164b8e2d083 (patch)
treecd68390712101e6dae3ee82f65eb353e7ccd0dfc /lm
parent744376b3fbebc41c4a270bf549826d5eb9219ae0 (diff)
parentdf30085bbe8f514294668ce94f0ca4fe218362fe (diff)
merged upstream with origin for mingw
Diffstat (limited to 'lm')
-rw-r--r--lm/build_binary_main.cc7
-rw-r--r--lm/builder/corpus_count.cc6
-rw-r--r--lm/builder/lmplz_main.cc17
-rw-r--r--lm/builder/pipeline.cc1
-rw-r--r--lm/facade.hh19
-rw-r--r--lm/filter/wrapper.hh10
-rw-r--r--lm/ngram_query.hh17
-rw-r--r--lm/query_main.cc51
-rw-r--r--lm/read_arpa.cc2
-rw-r--r--lm/search_trie.cc18
-rw-r--r--lm/state.hh4
-rw-r--r--lm/virtual_interface.hh3
12 files changed, 102 insertions, 53 deletions
diff --git a/lm/build_binary_main.cc b/lm/build_binary_main.cc
index 425a12342..15b421e9f 100644
--- a/lm/build_binary_main.cc
+++ b/lm/build_binary_main.cc
@@ -52,6 +52,7 @@ void Usage(const char *name, const char *default_mem) {
"-a compresses pointers using an array of offsets. The parameter is the\n"
" maximum number of bits encoded by the array. Memory is minimized subject\n"
" to the maximum, so pick 255 to minimize memory.\n\n"
+"-h print this help message.\n\n"
"Get a memory estimate by passing an ARPA file without an output file name.\n";
exit(1);
}
@@ -104,12 +105,15 @@ int main(int argc, char *argv[]) {
const char *default_mem = util::GuessPhysicalMemory() ? "80%" : "1G";
+ if (argc == 2 && !strcmp(argv[1], "--help"))
+ Usage(argv[0], default_mem);
+
try {
bool quantize = false, set_backoff_bits = false, bhiksha = false, set_write_method = false, rest = false;
lm::ngram::Config config;
config.building_memory = util::ParseSize(default_mem);
int opt;
- while ((opt = getopt(argc, argv, "q:b:a:u:p:t:T:m:S:w:sir:")) != -1) {
+ while ((opt = getopt(argc, argv, "q:b:a:u:p:t:T:m:S:w:sir:h")) != -1) {
switch(opt) {
case 'q':
config.prob_bits = ParseBitCount(optarg);
@@ -161,6 +165,7 @@ int main(int argc, char *argv[]) {
ParseFileList(optarg, config.rest_lower_files);
config.rest_function = Config::REST_LOWER;
break;
+ case 'h': // help
default:
Usage(argv[0], default_mem);
}
diff --git a/lm/builder/corpus_count.cc b/lm/builder/corpus_count.cc
index 3edd3216a..6ad91dde7 100644
--- a/lm/builder/corpus_count.cc
+++ b/lm/builder/corpus_count.cc
@@ -238,12 +238,14 @@ void CorpusCount::Run(const util::stream::ChainPosition &position) {
const WordIndex end_sentence = vocab.Lookup("</s>");
Writer writer(NGram::OrderFromSize(position.GetChain().EntrySize()), position, dedupe_mem_.get(), dedupe_mem_size_);
uint64_t count = 0;
- StringPiece delimiters("\0\t\r ", 4);
+ bool delimiters[256];
+ memset(delimiters, 0, sizeof(delimiters));
+ delimiters['\0'] = delimiters['\t'] = delimiters['\n'] = delimiters['\r'] = delimiters[' '] = true;
try {
while(true) {
StringPiece line(from_.ReadLine());
writer.StartSentence();
- for (util::TokenIter<util::AnyCharacter, true> w(line, delimiters); w; ++w) {
+ for (util::TokenIter<util::BoolCharacter, true> w(line, delimiters); w; ++w) {
WordIndex word = vocab.Lookup(*w);
UTIL_THROW_IF(word <= 2, FormatLoadException, "Special word " << *w << " is not allowed in the corpus. I plan to support models containing <unk> in the future.");
writer.Append(word);
diff --git a/lm/builder/lmplz_main.cc b/lm/builder/lmplz_main.cc
index 2e3002d12..2563deed8 100644
--- a/lm/builder/lmplz_main.cc
+++ b/lm/builder/lmplz_main.cc
@@ -36,6 +36,7 @@ int main(int argc, char *argv[]) {
std::string text, arpa;
options.add_options()
+ ("help", po::bool_switch(), "Show this help message")
("order,o", po::value<std::size_t>(&pipeline.order)
#if BOOST_VERSION >= 104200
->required()
@@ -52,7 +53,10 @@ int main(int argc, char *argv[]) {
("verbose_header", po::bool_switch(&pipeline.verbose_header), "Add a verbose header to the ARPA file that includes information such as token count, smoothing type, etc.")
("text", po::value<std::string>(&text), "Read text from a file instead of stdin")
("arpa", po::value<std::string>(&arpa), "Write ARPA to a file instead of stdout");
- if (argc == 1) {
+ po::variables_map vm;
+ po::store(po::parse_command_line(argc, argv, options), vm);
+
+ if (argc == 1 || vm["help"].as<bool>()) {
std::cerr <<
"Builds unpruned language models with modified Kneser-Ney smoothing.\n\n"
"Please cite:\n"
@@ -70,12 +74,17 @@ int main(int argc, char *argv[]) {
"setting the temporary file location (-T) and sorting memory (-S) is recommended.\n\n"
"Memory sizes are specified like GNU sort: a number followed by a unit character.\n"
"Valid units are \% for percentage of memory (supported platforms only) and (in\n"
- "increasing powers of 1024): b, K, M, G, T, P, E, Z, Y. Default is K (*1024).\n\n";
+ "increasing powers of 1024): b, K, M, G, T, P, E, Z, Y. Default is K (*1024).\n";
+ uint64_t mem = util::GuessPhysicalMemory();
+ if (mem) {
+ std::cerr << "This machine has " << mem << " bytes of memory.\n\n";
+ } else {
+ std::cerr << "Unable to determine the amount of memory on this machine.\n\n";
+ }
std::cerr << options << std::endl;
return 1;
}
- po::variables_map vm;
- po::store(po::parse_command_line(argc, argv, options), vm);
+
po::notify(vm);
// required() appeared in Boost 1.42.0.
diff --git a/lm/builder/pipeline.cc b/lm/builder/pipeline.cc
index b89ea6ba5..44a2313c2 100644
--- a/lm/builder/pipeline.cc
+++ b/lm/builder/pipeline.cc
@@ -226,6 +226,7 @@ void CountText(int text_file /* input */, int vocab_file /* output */, Master &m
util::stream::Sort<SuffixOrder, AddCombiner> sorter(chain, config.sort, SuffixOrder(config.order), AddCombiner());
chain.Wait(true);
+ std::cerr << "Unigram tokens " << token_count << " types " << type_count << std::endl;
std::cerr << "=== 2/5 Calculating and sorting adjusted counts ===" << std::endl;
master.InitForAdjust(sorter, type_count);
}
diff --git a/lm/facade.hh b/lm/facade.hh
index 8b1860176..760e839e0 100644
--- a/lm/facade.hh
+++ b/lm/facade.hh
@@ -16,11 +16,6 @@ template <class Child, class StateT, class VocabularyT> class ModelFacade : publ
typedef StateT State;
typedef VocabularyT Vocabulary;
- // Default Score function calls FullScore. Model can override this.
- float Score(const State &in_state, const WordIndex new_word, State &out_state) const {
- return static_cast<const Child*>(this)->FullScore(in_state, new_word, out_state).prob;
- }
-
/* Translate from void* to State */
FullScoreReturn FullScore(const void *in_state, const WordIndex new_word, void *out_state) const {
return static_cast<const Child*>(this)->FullScore(
@@ -28,6 +23,20 @@ template <class Child, class StateT, class VocabularyT> class ModelFacade : publ
new_word,
*reinterpret_cast<State*>(out_state));
}
+
+ FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const {
+ return static_cast<const Child*>(this)->FullScoreForgotState(
+ context_rbegin,
+ context_rend,
+ new_word,
+ *reinterpret_cast<State*>(out_state));
+ }
+
+ // Default Score function calls FullScore. Model can override this.
+ float Score(const State &in_state, const WordIndex new_word, State &out_state) const {
+ return static_cast<const Child*>(this)->FullScore(in_state, new_word, out_state).prob;
+ }
+
float Score(const void *in_state, const WordIndex new_word, void *out_state) const {
return static_cast<const Child*>(this)->Score(
*reinterpret_cast<const State*>(in_state),
diff --git a/lm/filter/wrapper.hh b/lm/filter/wrapper.hh
index 90b07a08f..eb6575010 100644
--- a/lm/filter/wrapper.hh
+++ b/lm/filter/wrapper.hh
@@ -39,17 +39,15 @@ template <class FilterT> class ContextFilter {
explicit ContextFilter(Filter &backend) : backend_(backend) {}
template <class Output> void AddNGram(const StringPiece &ngram, const StringPiece &line, Output &output) {
- pieces_.clear();
- // TODO: this copy could be avoided by a lookahead iterator.
- std::copy(util::TokenIter<util::SingleCharacter, true>(ngram, ' '), util::TokenIter<util::SingleCharacter, true>::end(), std::back_insert_iterator<std::vector<StringPiece> >(pieces_));
- backend_.AddNGram(pieces_.begin(), pieces_.end() - !pieces_.empty(), line, output);
+ // Find beginning of string or last space.
+ const char *last_space;
+ for (last_space = ngram.data() + ngram.size() - 1; last_space > ngram.data() && *last_space != ' '; --last_space) {}
+ backend_.AddNGram(StringPiece(ngram.data(), last_space - ngram.data()), line, output);
}
void Flush() const {}
private:
- std::vector<StringPiece> pieces_;
-
Filter backend_;
};
diff --git a/lm/ngram_query.hh b/lm/ngram_query.hh
index dfcda170e..ec2590f41 100644
--- a/lm/ngram_query.hh
+++ b/lm/ngram_query.hh
@@ -11,21 +11,25 @@
#include <istream>
#include <string>
+#include <math.h>
+
namespace lm {
namespace ngram {
template <class Model> void Query(const Model &model, bool sentence_context, std::istream &in_stream, std::ostream &out_stream) {
- std::cerr << "Loading statistics:\n";
- util::PrintUsage(std::cerr);
typename Model::State state, out;
lm::FullScoreReturn ret;
std::string word;
+ double corpus_total = 0.0;
+ uint64_t corpus_oov = 0;
+ uint64_t corpus_tokens = 0;
+
while (in_stream) {
state = sentence_context ? model.BeginSentenceState() : model.NullContextState();
float total = 0.0;
bool got = false;
- unsigned int oov = 0;
+ uint64_t oov = 0;
while (in_stream >> word) {
got = true;
lm::WordIndex vocab = model.GetVocabulary().Index(word);
@@ -33,6 +37,7 @@ template <class Model> void Query(const Model &model, bool sentence_context, std
ret = model.FullScore(state, vocab, out);
total += ret.prob;
out_stream << word << '=' << vocab << ' ' << static_cast<unsigned int>(ret.ngram_length) << ' ' << ret.prob << '\t';
+ ++corpus_tokens;
state = out;
char c;
while (true) {
@@ -50,12 +55,14 @@ template <class Model> void Query(const Model &model, bool sentence_context, std
if (sentence_context) {
ret = model.FullScore(state, model.GetVocabulary().EndSentence(), out);
total += ret.prob;
+ ++corpus_tokens;
out_stream << "</s>=" << model.GetVocabulary().EndSentence() << ' ' << static_cast<unsigned int>(ret.ngram_length) << ' ' << ret.prob << '\t';
}
out_stream << "Total: " << total << " OOV: " << oov << '\n';
+ corpus_total += total;
+ corpus_oov += oov;
}
- std::cerr << "After queries:\n";
- util::PrintUsage(std::cerr);
+ out_stream << "Perplexity " << pow(10.0, -(corpus_total / static_cast<double>(corpus_tokens))) << std::endl;
}
template <class M> void Query(const char *file, bool sentence_context, std::istream &in_stream, std::ostream &out_stream) {
diff --git a/lm/query_main.cc b/lm/query_main.cc
index 27d3a1a56..bd4fde62f 100644
--- a/lm/query_main.cc
+++ b/lm/query_main.cc
@@ -1,42 +1,65 @@
#include "lm/ngram_query.hh"
+#ifdef WITH_NPLM
+#include "lm/wrappers/nplm.hh"
+#endif
+
+#include <stdlib.h>
+
+void Usage(const char *name) {
+ std::cerr << "KenLM was compiled with maximum order " << KENLM_MAX_ORDER << "." << std::endl;
+ std::cerr << "Usage: " << name << " [-n] lm_file" << std::endl;
+ std::cerr << "Input is wrapped in <s> and </s> unless -n is passed." << std::endl;
+ exit(1);
+}
+
int main(int argc, char *argv[]) {
- if (!(argc == 2 || (argc == 3 && !strcmp(argv[2], "null")))) {
- std::cerr << "KenLM was compiled with maximum order " << KENLM_MAX_ORDER << "." << std::endl;
- std::cerr << "Usage: " << argv[0] << " lm_file [null]" << std::endl;
- std::cerr << "Input is wrapped in <s> and </s> unless null is passed." << std::endl;
- return 1;
+ bool sentence_context = true;
+ const char *file = NULL;
+ for (char **arg = argv + 1; arg != argv + argc; ++arg) {
+ if (!strcmp(*arg, "-n")) {
+ sentence_context = false;
+ } else if (!strcmp(*arg, "-h") || !strcmp(*arg, "--help") || file) {
+ Usage(argv[0]);
+ } else {
+ file = *arg;
+ }
}
+ if (!file) Usage(argv[0]);
try {
- bool sentence_context = (argc == 2);
using namespace lm::ngram;
ModelType model_type;
- if (RecognizeBinary(argv[1], model_type)) {
+ if (RecognizeBinary(file, model_type)) {
switch(model_type) {
case PROBING:
- Query<lm::ngram::ProbingModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<lm::ngram::ProbingModel>(file, sentence_context, std::cin, std::cout);
break;
case REST_PROBING:
- Query<lm::ngram::RestProbingModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<lm::ngram::RestProbingModel>(file, sentence_context, std::cin, std::cout);
break;
case TRIE:
- Query<TrieModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<TrieModel>(file, sentence_context, std::cin, std::cout);
break;
case QUANT_TRIE:
- Query<QuantTrieModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<QuantTrieModel>(file, sentence_context, std::cin, std::cout);
break;
case ARRAY_TRIE:
- Query<ArrayTrieModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<ArrayTrieModel>(file, sentence_context, std::cin, std::cout);
break;
case QUANT_ARRAY_TRIE:
- Query<QuantArrayTrieModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<QuantArrayTrieModel>(file, sentence_context, std::cin, std::cout);
break;
default:
std::cerr << "Unrecognized kenlm model type " << model_type << std::endl;
abort();
}
+#ifdef WITH_NPLM
+ } else if (lm::np::Model::Recognize(file)) {
+ lm::np::Model model(file);
+ Query(model, sentence_context, std::cin, std::cout);
+#endif
} else {
- Query<ProbingModel>(argv[1], sentence_context, std::cin, std::cout);
+ Query<ProbingModel>(file, sentence_context, std::cin, std::cout);
}
std::cerr << "Total time including destruction:\n";
util::PrintUsage(std::cerr);
diff --git a/lm/read_arpa.cc b/lm/read_arpa.cc
index 5ccba7147..fb8bbfa28 100644
--- a/lm/read_arpa.cc
+++ b/lm/read_arpa.cc
@@ -150,7 +150,7 @@ void PositiveProbWarn::Warn(float prob) {
case THROW_UP:
UTIL_THROW(FormatLoadException, "Positive log probability " << prob << " in the model. This is a bug in IRSTLM; you can set config.positive_log_probability = SILENT or pass -i to build_binary to substitute 0.0 for the log probability. Error");
case COMPLAIN:
- std::cerr << "There's a positive log probability " << prob << " in the APRA file, probably because of a bug in IRSTLM. This and subsequent entires will be mapepd to 0 log probability." << std::endl;
+ std::cerr << "There's a positive log probability " << prob << " in the APRA file, probably because of a bug in IRSTLM. This and subsequent entires will be mapped to 0 log probability." << std::endl;
action_ = SILENT;
break;
case SILENT:
diff --git a/lm/search_trie.cc b/lm/search_trie.cc
index 1b0d9b263..27605e548 100644
--- a/lm/search_trie.cc
+++ b/lm/search_trie.cc
@@ -253,11 +253,6 @@ class FindBlanks {
++counts_.back();
}
- // Unigrams wrote one past.
- void Cleanup() {
- --counts_[0];
- }
-
const std::vector<uint64_t> &Counts() const {
return counts_;
}
@@ -310,8 +305,6 @@ template <class Quant, class Bhiksha> class WriteEntries {
typename Quant::LongestPointer(quant_, longest_.Insert(words[order_ - 1])).Write(reinterpret_cast<const Prob*>(words + order_)->prob);
}
- void Cleanup() {}
-
private:
RecordReader *contexts_;
const Quant &quant_;
@@ -385,14 +378,14 @@ template <class Doing> void RecursiveInsert(const unsigned char total_order, con
util::ErsatzProgress progress(unigram_count + 1, progress_out, message);
WordIndex unigram = 0;
std::priority_queue<Gram> grams;
- grams.push(Gram(&unigram, 1));
+ if (unigram_count) grams.push(Gram(&unigram, 1));
for (unsigned char i = 2; i <= total_order; ++i) {
if (input[i-2]) grams.push(Gram(reinterpret_cast<const WordIndex*>(input[i-2].Data()), i));
}
BlankManager<Doing> blank(total_order, doing);
- while (true) {
+ while (!grams.empty()) {
Gram top = grams.top();
grams.pop();
unsigned char order = top.end - top.begin;
@@ -400,8 +393,7 @@ template <class Doing> void RecursiveInsert(const unsigned char total_order, con
blank.Visit(&unigram, 1, doing.UnigramProb(unigram));
doing.Unigram(unigram);
progress.Set(unigram);
- if (++unigram == unigram_count + 1) break;
- grams.push(top);
+ if (++unigram < unigram_count) grams.push(top);
} else {
if (order == total_order) {
blank.Visit(top.begin, order, reinterpret_cast<const Prob*>(top.end)->prob);
@@ -414,8 +406,6 @@ template <class Doing> void RecursiveInsert(const unsigned char total_order, con
if (++reader) grams.push(top);
}
}
- assert(grams.empty());
- doing.Cleanup();
}
void SanityCheckCounts(const std::vector<uint64_t> &initial, const std::vector<uint64_t> &fixed) {
@@ -524,6 +514,8 @@ template <class Quant, class Bhiksha> void BuildTrie(SortedFiles &files, std::ve
{
WriteEntries<Quant, Bhiksha> writer(contexts, quant, unigrams, out.middle_begin_, out.longest_, counts.size(), sri);
RecursiveInsert(counts.size(), counts[0], inputs, config.ProgressMessages(), "Writing trie", writer);
+ // Write the last unigram entry, which is the end pointer for the bigrams.
+ writer.Unigram(counts[0]);
}
// Do not disable this error message or else too little state will be returned. Both WriteEntries::Middle and returning state based on found n-grams will need to be fixed to handle this situation.
diff --git a/lm/state.hh b/lm/state.hh
index d8e6c132b..543df37c9 100644
--- a/lm/state.hh
+++ b/lm/state.hh
@@ -91,7 +91,7 @@ inline uint64_t hash_value(const Left &left) {
}
struct ChartState {
- bool operator==(const ChartState &other) {
+ bool operator==(const ChartState &other) const {
return (right == other.right) && (left == other.left);
}
@@ -102,7 +102,7 @@ struct ChartState {
}
bool operator<(const ChartState &other) const {
- return Compare(other) == -1;
+ return Compare(other) < 0;
}
void ZeroRemaining() {
diff --git a/lm/virtual_interface.hh b/lm/virtual_interface.hh
index 17f064b2c..ff4a388e7 100644
--- a/lm/virtual_interface.hh
+++ b/lm/virtual_interface.hh
@@ -130,6 +130,9 @@ class Model {
// Requires in_state != out_state
virtual FullScoreReturn FullScore(const void *in_state, const WordIndex new_word, void *out_state) const = 0;
+ // Prefer to use FullScore. The context words should be provided in reverse order.
+ virtual FullScoreReturn FullScoreForgotState(const WordIndex *context_rbegin, const WordIndex *context_rend, const WordIndex new_word, void *out_state) const = 0;
+
unsigned char Order() const { return order_; }
const Vocabulary &BaseVocabulary() const { return *base_vocab_; }