Welcome to mirror list, hosted at ThFree Co, Russian Federation.

ug_lru_cache.h « mm « UG « TranslationModel « moses - github.com/moses-smt/mosesdecoder.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 0000b194fcfb27b90d9ce651eb76f99feb31f663 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
//-*- c++ -*-
#pragma once
#include <vector>
#include <map>
#include <algorithm>
#include <boost/shared_ptr.hpp>
#include <boost/thread.hpp>
#include <sys/time.h>


#ifndef sptr
#define sptr boost::shared_ptr
#endif

namespace lru_cache
{
  using namespace std;
  using namespace boost;

  template<typename KEY, typename VAL>
  class LRU_Cache
  {
  public:
    typedef unordered_map<KEY,uint32_t> map_t;
  private:
    struct Record
    {
      uint32_t prev,next;
      KEY            key;
      // timeval      tstamp; // time stamp
      typename boost::shared_ptr<VAL> ptr; // cached shared ptr
    };

    mutable boost::shared_mutex m_lock;
    uint32_t m_qfront, m_qback;
    vector<Record> m_recs;
    map_t m_idx;

    void
    update_queue(KEY const& key, uint32_t const p)
    {
      // CALLER MUST LOCK!
      // "remove" item in slot p from it's current position of the
      // queue (which is different from the slot position) and move it
      // to the end
      Record& r = m_recs[p];
      if (m_recs.size() == 1)
	r.next = r.prev = m_qback = m_qfront = 0;

      if (r.key != key || p == m_qback) return;

      if (m_qfront == p)
	m_qfront = m_recs[r.next].prev = r.next;
      else
	{
	  m_recs[r.prev].next = r.next;
	  m_recs[r.next].prev = r.prev;
	}
      r.prev = m_qback;
      m_recs[r.prev].next = m_qback = r.next = p;
    }

  public:
    LRU_Cache(size_t capacity=1) : m_qfront(0), m_qback(0) { reserve(capacity); }
    size_t capacity() const { return m_recs.capacity(); }
    void reserve(size_t s) { m_recs.reserve(s); }

    sptr<VAL>
    get(KEY const& key)
    {
      uint32_t p;
      { // brackets needed for lock scoping
	boost::shared_lock<boost::shared_mutex> rlock(m_lock);
	typename map_t::const_iterator i = m_idx.find(key);
	if (i == m_idx.end()) return sptr<VAL>();
	p = i->second;
      }
      boost::lock_guard<boost::shared_mutex> guard(m_lock);
      update_queue(key,p);
      return m_recs[p].ptr;
    }

    void
    set(KEY const& key, sptr<VAL> const& ptr)
    {
      boost::lock_guard<boost::shared_mutex> lock(m_lock);
      pair<typename map_t::iterator,bool> foo;
      foo = m_idx.insert(make_pair(key,m_recs.size()));

      uint32_t p = foo.first->second;
      if (foo.second) // was not in the cache
	{
	  if (m_recs.size() < m_recs.capacity())
	    m_recs.push_back(Record());
	  else
	    {
	      foo.first->second = p = m_qfront;
	      m_idx.erase(m_recs[p].key);
	    }
	  m_recs[p].key = key;
	}
      update_queue(key,p);
      m_recs[p].ptr = ptr;
    }
  };
}