Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAntony Riakiotakis <kalast@gmail.com>2015-02-05 17:06:13 +0300
committerAntony Riakiotakis <kalast@gmail.com>2015-02-05 17:06:25 +0300
commit15ff730b9b1d99f7b11a69628d9489546986e145 (patch)
tree4f65c4c67209c2eb387b851ad37a3022a8ad45c9 /intern/memutil
parentf6e798946092c5d30ad0c011bd8c2920971471b2 (diff)
Change movie cache to use vectors instead of lists.
Runtime costs were horrible. On gooseberry in some sequencer edits using proxies of small size, a cache with about 2000 elements would slow to about 6 fps once cache was full and system tried to find smallest element available. There are still improvements to be done here, like requesting a number of good candidates to avoid rerunnung through the list, or even using some heap or ring buffer scheme to sort data, but nothing suits all needs so for now that should bring the cache back to usable state (25fps here at the studio)
Diffstat (limited to 'intern/memutil')
-rw-r--r--intern/memutil/MEM_CacheLimiter.h36
1 files changed, 19 insertions, 17 deletions
diff --git a/intern/memutil/MEM_CacheLimiter.h b/intern/memutil/MEM_CacheLimiter.h
index bbe6ace2456..3e8ae7e5a4d 100644
--- a/intern/memutil/MEM_CacheLimiter.h
+++ b/intern/memutil/MEM_CacheLimiter.h
@@ -130,7 +130,7 @@ private:
T * data;
int refcount;
- typename std::list<MEM_CacheLimiterHandle<T> *, MEM_Allocator<MEM_CacheLimiterHandle<T> *> >::iterator me;
+ int pos;
MEM_CacheLimiter<T> * parent;
};
@@ -146,29 +146,32 @@ public:
}
~MEM_CacheLimiter() {
- for (iterator it = queue.begin(); it != queue.end(); it++) {
- delete *it;
+ int i;
+ for (i = 0; i < queue.size(); i++) {
+ delete queue[i];
}
}
MEM_CacheLimiterHandle<T> *insert(T * elem) {
queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
- iterator it = queue.end();
- --it;
- queue.back()->me = it;
+ queue.back()->pos = queue.size() - 1;
return queue.back();
}
void unmanage(MEM_CacheLimiterHandle<T> *handle) {
- queue.erase(handle->me);
+ int pos = handle->pos;
+ queue[pos] = queue.back();
+ queue[pos]->pos = pos;
+ queue.pop_back();
delete handle;
}
size_t get_memory_in_use() {
size_t size = 0;
if (data_size_func) {
- for (iterator it = queue.begin(); it != queue.end(); it++) {
- size += data_size_func((*it)->get()->get_data());
+ int i;
+ for (i = 0; i < queue.size(); i++) {
+ size += data_size_func(queue[i]->get()->get_data());
}
}
else {
@@ -226,11 +229,11 @@ public:
* least priority element anyway.
*/
if (item_priority_func == NULL) {
+ queue[handle->pos] = queue.back();
+ queue[handle->pos]->pos = handle->pos;
+ queue.pop_back();
queue.push_back(handle);
- queue.erase(handle->me);
- iterator it = queue.end();
- --it;
- handle->me = it;
+ handle->pos = queue.size() - 1;
}
}
@@ -244,7 +247,7 @@ public:
private:
typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
- typedef std::list<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
+ typedef std::vector<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
typedef typename MEM_CacheQueue::iterator iterator;
/* Check whether element can be destroyed when enforcing cache limits */
@@ -277,11 +280,10 @@ private:
}
else {
int best_match_priority = 0;
- iterator it;
int i;
- for (it = queue.begin(), i = 0; it != queue.end(); it++, i++) {
- MEM_CacheElementPtr elem = *it;
+ for (i = 0; i < queue.size(); i++) {
+ MEM_CacheElementPtr elem = queue[i];
if (!can_destroy_element(elem))
continue;