Welcome to mirror list, hosted at ThFree Co, Russian Federation.

tokenize.hh « util - github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 5d843022209e008a8c51aafd4e79bbab115a8cce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#ifndef TOKENIZE_H
#define TOKENIZE_H

#include <string>
#include <vector>

namespace util
{

/** Split input text into a series of tokens.
 *
 * Splits on spaces and tabs, no other whitespace characters, and is not
 * locale-sensitive.
 *
 * The spaces themselves are not included.  A sequence of consecutive space/tab
 * characters count as one.
 */
inline std::vector<std::string> tokenize(const char input[])
{
  std::vector<std::string> token;
  bool betweenWords = true;
  int start = 0;
  int i;
  for(i = 0; input[i] != '\0'; i++) {
    const bool isSpace = (input[i] == ' ' || input[i] == '\t');

    if (!isSpace && betweenWords) {
      start = i;
      betweenWords = false;
    } else if (isSpace && !betweenWords) {
      token.push_back( std::string( input+start, i-start ) );
      betweenWords = true;
    }
  }
  if (!betweenWords)
    token.push_back( std::string( input+start, i-start ) );
  return token;
}

/** Split input string into a series of tokens.
 *
 * Like tokenize(const char[]), but takes a std::string.
 */
inline std::vector<std::string> tokenize(const std::string &input)
{
  return tokenize(input.c_str());
}

} // namespace util

#endif