cache.c 11.4 KB
Newer Older
1 2
/*
    This file is part of darktable,
3
    copyright (c) 2014 johannes hanika.
4
    copyright (c) 2015 LebedevRI
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19

    darktable is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    darktable is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with darktable.  If not, see <http://www.gnu.org/licenses/>.
*/

20
#include "common/cache.h"
21
#include "common/darktable.h"
22
#include "common/dtpthread.h"
23

24
#include <assert.h>
25 26 27
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
28

29
// this implements a concurrent LRU cache
30

31 32
void dt_cache_init(
    dt_cache_t *cache,
33
    size_t entry_size,
34
    size_t cost_quota)
35
{
36
  cache->cost = 0;
37
  cache->lru = 0;
38
  cache->entry_size = entry_size;
39
  cache->cost_quota = cost_quota;
40
  dt_pthread_mutex_init(&cache->lock, 0);
41 42 43 44 45
  cache->allocate = 0;
  cache->allocate_data = 0;
  cache->cleanup = 0;
  cache->cleanup_data = 0;
  cache->hashtable = g_hash_table_new(0, 0);
46 47
}

48
void dt_cache_cleanup(dt_cache_t *cache)
49
{
50
  g_hash_table_destroy(cache->hashtable);
51 52
  GList *l = cache->lru;
  while(l)
53
  {
54
    dt_cache_entry_t *entry = (dt_cache_entry_t *)l->data;
55

56
    if(cache->cleanup)
57 58 59 60
    {
      assert(entry->data_size);
      ASAN_UNPOISON_MEMORY_REGION(entry->data, entry->data_size);

61
      cache->cleanup(cache->cleanup_data, entry);
62
    }
63 64
    else
      dt_free_align(entry->data);
65

66
    dt_pthread_rwlock_destroy(&entry->lock);
67
    g_slice_free1(sizeof(*entry), entry);
68
    l = g_list_next(l);
69
  }
70
  g_list_free(cache->lru);
71
  dt_pthread_mutex_destroy(&cache->lock);
72 73
}

74
int32_t dt_cache_contains(dt_cache_t *cache, const uint32_t key)
75
{
76
  dt_pthread_mutex_lock(&cache->lock);
77
  int32_t result = g_hash_table_contains(cache->hashtable, GINT_TO_POINTER(key));
78
  dt_pthread_mutex_unlock(&cache->lock);
79
  return result;
80 81
}

82 83 84 85
int dt_cache_for_all(
    dt_cache_t *cache,
    int (*process)(const uint32_t key, const void *data, void *user_data),
    void *user_data)
86
{
87
  dt_pthread_mutex_lock(&cache->lock);
88 89
  GHashTableIter iter;
  gpointer key, value;
90

91 92
  g_hash_table_iter_init (&iter, cache->hashtable);
  while (g_hash_table_iter_next (&iter, &key, &value))
93
  {
94 95
    dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
    const int err = process(GPOINTER_TO_INT(key), entry->data, user_data);
96
    if(err)
97
    {
98
      dt_pthread_mutex_unlock(&cache->lock);
99
      return err;
100
    }
101
  }
102
  dt_pthread_mutex_unlock(&cache->lock);
103 104 105
  return 0;
}

106 107
// return read locked bucket, or NULL if it's not already there.
// never attempt to allocate a new slot.
108
dt_cache_entry_t *dt_cache_testget(dt_cache_t *cache, const uint32_t key, char mode)
109
{
110
  gpointer orig_key, value;
111 112
  gboolean res;
  int result;
113
  double start = dt_get_wtime();
114 115
  dt_pthread_mutex_lock(&cache->lock);
  res = g_hash_table_lookup_extended(
116
      cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
117 118 119 120
  if(res)
  {
    dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
    // lock the cache entry
121 122 123 124 125 126
    if(mode == 'w') result = dt_pthread_rwlock_trywrlock(&entry->lock);
    else            result = dt_pthread_rwlock_tryrdlock(&entry->lock);
    if(result)
    { // need to give up mutex so other threads have a chance to get in between and
      // free the lock we're trying to acquire:
      dt_pthread_mutex_unlock(&cache->lock);
127
      return 0;
128
    }
129 130 131 132
    // bubble up in lru list:
    cache->lru = g_list_remove_link(cache->lru, entry->link);
    cache->lru = g_list_concat(cache->lru, entry->link);
    dt_pthread_mutex_unlock(&cache->lock);
133 134
    double end = dt_get_wtime();
    if(end - start > 0.1)
135
      fprintf(stderr, "try+ wait time %.06fs mode %c \n", end - start, mode);
136

137 138 139 140 141 142
    if(mode == 'w')
    {
      assert(entry->data_size);
      ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
    }

143 144
    // WARNING: do *NOT* unpoison here. it must be done by the caller!

145 146
    return entry;
  }
147
  dt_pthread_mutex_unlock(&cache->lock);
148 149 150
  double end = dt_get_wtime();
  if(end - start > 0.1)
    fprintf(stderr, "try- wait time %.06fs\n", end - start);
151
  return 0;
152 153
}

154 155 156
// if found, the data void* is returned. if not, it is set to be
// the given *data and a new hash table entry is created, which can be
// found using the given key later on.
157
dt_cache_entry_t *dt_cache_get_with_caller(dt_cache_t *cache, const uint32_t key, char mode, const char *file, int line)
158
{
159
  gpointer orig_key, value;
160 161
  gboolean res;
  int result;
162
  double start = dt_get_wtime();
163 164 165
restart:
  dt_pthread_mutex_lock(&cache->lock);
  res = g_hash_table_lookup_extended(
166
      cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
167
  if(res)
168 169
  { // yay, found. read lock and pass on.
    dt_cache_entry_t *entry = (dt_cache_entry_t *)value;
170 171 172 173 174 175
    if(mode == 'w') result = dt_pthread_rwlock_trywrlock_with_caller(&entry->lock, file, line);
    else            result = dt_pthread_rwlock_tryrdlock_with_caller(&entry->lock, file, line);
    if(result)
    { // need to give up mutex so other threads have a chance to get in between and
      // free the lock we're trying to acquire:
      dt_pthread_mutex_unlock(&cache->lock);
176
      g_usleep(5);
177 178
      goto restart;
    }
179 180 181
    // bubble up in lru list:
    cache->lru = g_list_remove_link(cache->lru, entry->link);
    cache->lru = g_list_concat(cache->lru, entry->link);
182
    dt_pthread_mutex_unlock(&cache->lock);
183 184 185 186 187

#ifdef _DEBUG
    const pthread_t writer = dt_pthread_rwlock_get_writer(&entry->lock);
    if(mode == 'w')
    {
Roman Lebedev's avatar
Roman Lebedev committed
188
      assert(pthread_equal(writer, pthread_self()));
189 190 191
    }
    else
    {
Roman Lebedev's avatar
Roman Lebedev committed
192
      assert(!pthread_equal(writer, pthread_self()));
193 194 195
    }
#endif

196 197 198 199 200 201
    if(mode == 'w')
    {
      assert(entry->data_size);
      ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
    }

202 203
    // WARNING: do *NOT* unpoison here. it must be done by the caller!

204
    return entry;
205 206
  }

207 208 209
  // else, not found, need to allocate.

  // first try to clean up.
210 211 212 213 214 215 216
  // also wait if we can't free more than the requested fill ratio.
  if(cache->cost > 0.8f * cache->cost_quota)
  {
    // need to roll back all the way to get a consistent lock state:
    dt_cache_gc(cache, 0.8f);
  }

217
  // here dies your 32-bit system:
218
  dt_cache_entry_t *entry = (dt_cache_entry_t *)g_slice_alloc(sizeof(dt_cache_entry_t));
219 220
  int ret = dt_pthread_rwlock_init(&entry->lock, 0);
  if(ret) fprintf(stderr, "rwlock init: %d\n", ret);
221
  entry->data = 0;
222
  entry->data_size = cache->entry_size;
223
  entry->cost = 1;
224
  entry->link = g_list_append(0, entry);
225
  entry->key = key;
226
  entry->_lock_demoting = 0;
227

228
  g_hash_table_insert(cache->hashtable, GINT_TO_POINTER(key), entry);
229 230 231

  assert(cache->allocate || entry->data_size);

232
  if(cache->allocate)
233
    cache->allocate(cache->allocate_data, entry);
234
  else
235 236
    entry->data = dt_alloc_align(16, entry->data_size);

237 238 239
  assert(entry->data_size);
  ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);

240 241 242
  // if allocate callback is given, always return a write lock
  const int write = ((mode == 'w') || cache->allocate);

243
  // write lock in case the caller requests it:
244 245
  if(write) dt_pthread_rwlock_wrlock_with_caller(&entry->lock, file, line);
  else      dt_pthread_rwlock_rdlock_with_caller(&entry->lock, file, line);
246

247 248 249 250
  cache->cost += entry->cost;

  // put at end of lru list (most recently used):
  cache->lru = g_list_concat(cache->lru, entry->link);
251

252
  dt_pthread_mutex_unlock(&cache->lock);
253 254 255
  double end = dt_get_wtime();
  if(end - start > 0.1)
    fprintf(stderr, "wait time %.06fs\n", end - start);
256 257 258

  // WARNING: do *NOT* unpoison here. it must be done by the caller!

259
  return entry;
260
}
261

262
int dt_cache_remove(dt_cache_t *cache, const uint32_t key)
263
{
264 265 266 267 268
  gpointer orig_key, value;
  gboolean res;
  int result;
  dt_cache_entry_t *entry;
restart:
269
  dt_pthread_mutex_lock(&cache->lock);
270

271
  res = g_hash_table_lookup_extended(
272
      cache->hashtable, GINT_TO_POINTER(key), &orig_key, &value);
273
  entry = (dt_cache_entry_t *)value;
274
  if(!res)
275
  { // not found in cache, not deleting.
276 277
    dt_pthread_mutex_unlock(&cache->lock);
    return 1;
278
  }
279
  // need write lock to be able to delete:
280 281 282 283
  result = dt_pthread_rwlock_trywrlock(&entry->lock);
  if(result)
  {
    dt_pthread_mutex_unlock(&cache->lock);
284
    g_usleep(5);
285 286 287
    goto restart;
  }

288 289 290 291 292 293 294 295 296
  if(entry->_lock_demoting)
  {
    // oops, we are currently demoting (rw -> r) lock to this entry in some thread. do not touch!
    dt_pthread_rwlock_unlock(&entry->lock);
    dt_pthread_mutex_unlock(&cache->lock);
    g_usleep(5);
    goto restart;
  }

297
  gboolean removed = g_hash_table_remove(cache->hashtable, GINT_TO_POINTER(key));
298
  (void)removed; // make non-assert compile happy
299
  assert(removed);
300
  cache->lru = g_list_delete_link(cache->lru, entry->link);
301

302
  if(cache->cleanup)
303 304 305 306
  {
    assert(entry->data_size);
    ASAN_UNPOISON_MEMORY_REGION(entry->data, entry->data_size);

307
    cache->cleanup(cache->cleanup_data, entry);
308
  }
309 310
  else
    dt_free_align(entry->data);
311

312 313
  dt_pthread_rwlock_unlock(&entry->lock);
  dt_pthread_rwlock_destroy(&entry->lock);
314
  cache->cost -= entry->cost;
315
  g_slice_free1(sizeof(*entry), entry);
316

317 318
  dt_pthread_mutex_unlock(&cache->lock);
  return 0;
319 320
}

321 322
// best-effort garbage collection. never blocks, never fails. well, sometimes it just doesn't free anything.
void dt_cache_gc(dt_cache_t *cache, const float fill_ratio)
323
{
324
  GList *l = cache->lru;
325
  int cnt = 0;
326 327
  while(l)
  {
328
    cnt++;
329
    dt_cache_entry_t *entry = (dt_cache_entry_t *)l->data;
330
    assert(entry->link->data == entry);
331 332
    l = g_list_next(l); // we might remove this element, so walk to the next one while we still have the pointer..
    if(cache->cost < cache->cost_quota * fill_ratio) break;
333

334
    // if still locked by anyone else give up:
335
    if(dt_pthread_rwlock_trywrlock(&entry->lock)) continue;
336

337 338 339 340 341 342 343
    if(entry->_lock_demoting)
    {
      // oops, we are currently demoting (rw -> r) lock to this entry in some thread. do not touch!
      dt_pthread_rwlock_unlock(&entry->lock);
      continue;
    }

344
    // delete!
johannes hanika's avatar
johannes hanika committed
345
    g_hash_table_remove(cache->hashtable, GINT_TO_POINTER(entry->key));
346
    cache->lru = g_list_delete_link(cache->lru, entry->link);
347
    cache->cost -= entry->cost;
348

349
    if(cache->cleanup)
350 351 352 353
    {
      assert(entry->data_size);
      ASAN_UNPOISON_MEMORY_REGION(entry->data, entry->data_size);

354
      cache->cleanup(cache->cleanup_data, entry);
355
    }
356 357
    else
      dt_free_align(entry->data);
358

359 360
    dt_pthread_rwlock_unlock(&entry->lock);
    dt_pthread_rwlock_destroy(&entry->lock);
361
    g_slice_free1(sizeof(*entry), entry);
362
  }
363 364
}

365
void dt_cache_release_with_caller(dt_cache_t *cache, dt_cache_entry_t *entry, const char *file, int line)
366
{
367 368 369 370 371 372 373 374 375 376 377 378 379 380
#if((__has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)) && 1)
  // yes, this is *HIGHLY* unportable and is accessing implementation details.
#ifdef _DEBUG
  if(entry->lock.lock.__data.__nr_readers <= 1)
#else
  if(entry->lock.__data.__nr_readers <= 1)
#endif
  {
    // only if there are no other reades we may poison.
    assert(entry->data_size);
    ASAN_POISON_MEMORY_REGION(entry->data, entry->data_size);
  }
#endif

381
  dt_pthread_rwlock_unlock(&entry->lock);
382 383
}

Richard Wonka's avatar
Richard Wonka committed
384
// modelines: These editor modelines have been set for all relevant files by tools/update_modelines.sh
385
// vim: shiftwidth=2 expandtab tabstop=2 cindent
Tobias Ellinghaus's avatar
Tobias Ellinghaus committed
386
// kate: tab-indents: off; indent-width 2; replace-tabs on; indent-mode cstyle; remove-trailing-spaces modified;