storage.c 12 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Various storage structures (pool allocation, vector, hash table)
 *
 * Copyright (C) 1993, Eric Youngdale.
 *               2004, Eric Pouech
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 */


#include "config.h"
#include <assert.h>
#include <stdlib.h>
#include "wine/debug.h"

#include "dbghelp_private.h"
#ifdef USE_STATS
#include <math.h>
#endif

WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);

struct pool_arena
{
37 38 39
    struct list entry;
    char       *current;
    char       *end;
40 41
};

42
void pool_init(struct pool* a, size_t arena_size)
43
{
44 45
    list_init( &a->arena_list );
    list_init( &a->arena_full );
46 47 48 49 50 51 52 53 54
    a->arena_size = arena_size;
}

void pool_destroy(struct pool* pool)
{
    struct pool_arena*  arena;
    struct pool_arena*  next;

#ifdef USE_STATS
55 56
    size_t alloc, used, num;

57
    alloc = used = num = 0;
58
    LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
59
    {
60 61 62 63 64 65 66
        alloc += arena->end - (char *)arena;
        used += arena->current - (char*)arena;
        num++;
    }
    LIST_FOR_EACH_ENTRY( arena, &pool->arena_full, struct pool_arena, entry )
    {
        alloc += arena->end - (char *)arena;
67 68 69
        used += arena->current - (char*)arena;
        num++;
    }
70
    if (alloc == 0) alloc = 1;      /* avoid division by zero */
71 72 73
    FIXME("STATS: pool %p has allocated %u kbytes, used %u kbytes in %u arenas, non-allocation ratio: %.2f%%\n",
          pool, (unsigned)(alloc >> 10), (unsigned)(used >> 10), (unsigned)num,
          100.0 - (float)used / (float)alloc * 100.0);
74 75
#endif

76 77 78 79 80 81
    LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_list, struct pool_arena, entry )
    {
        list_remove( &arena->entry );
        HeapFree(GetProcessHeap(), 0, arena);
    }
    LIST_FOR_EACH_ENTRY_SAFE( arena, next, &pool->arena_full, struct pool_arena, entry )
82
    {
83
        list_remove( &arena->entry );
84 85 86 87
        HeapFree(GetProcessHeap(), 0, arena);
    }
}

88
void* pool_alloc(struct pool* pool, size_t len)
89 90 91
{
    struct pool_arena*  arena;
    void*               ret;
92
    size_t size;
93 94 95

    len = (len + 3) & ~3; /* round up size on DWORD boundary */

96
    LIST_FOR_EACH_ENTRY( arena, &pool->arena_list, struct pool_arena, entry )
97
    {
98
        if (arena->end - arena->current >= len)
99
        {
100 101
            ret = arena->current;
            arena->current += len;
102 103 104 105 106
            if (arena->current + 16 >= arena->end)
            {
                list_remove( &arena->entry );
                list_add_tail( &pool->arena_full, &arena->entry );
            }
107 108 109
            return ret;
        }
    }
110

111 112
    size = max( pool->arena_size, len );
    arena = HeapAlloc(GetProcessHeap(), 0, size + sizeof(struct pool_arena));
113
    if (!arena) return NULL;
114

115
    ret = arena + 1;
116
    arena->current = (char*)ret + len;
117
    arena->end = (char*)ret + size;
118 119 120 121
    if (arena->current + 16 >= arena->end)
        list_add_tail( &pool->arena_full, &arena->entry );
    else
        list_add_head( &pool->arena_list, &arena->entry );
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
    return ret;
}

char* pool_strdup(struct pool* pool, const char* str)
{
    char* ret;
    if ((ret = pool_alloc(pool, strlen(str) + 1))) strcpy(ret, str);
    return ret;
}

void vector_init(struct vector* v, unsigned esz, unsigned bucket_sz)
{
    v->buckets = NULL;
    /* align size on DWORD boundaries */
    v->elt_size = (esz + 3) & ~3;
    switch (bucket_sz)
    {
    case    2: v->shift =  1; break;
    case    4: v->shift =  2; break;
    case    8: v->shift =  3; break;
    case   16: v->shift =  4; break;
    case   32: v->shift =  5; break;
    case   64: v->shift =  6; break;
    case  128: v->shift =  7; break;
    case  256: v->shift =  8; break;
    case  512: v->shift =  9; break;
    case 1024: v->shift = 10; break;
    default: assert(0);
    }
    v->num_buckets = 0;
152
    v->buckets_allocated = 0;
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
    v->num_elts = 0;
}

unsigned vector_length(const struct vector* v)
{
    return v->num_elts;
}

void* vector_at(const struct vector* v, unsigned pos)
{
    unsigned o;

    if (pos >= v->num_elts) return NULL;
    o = pos & ((1 << v->shift) - 1);
    return (char*)v->buckets[pos >> v->shift] + o * v->elt_size;
}

void* vector_add(struct vector* v, struct pool* pool)
{
    unsigned    ncurr = v->num_elts++;

    /* check that we don't wrap around */
    assert(v->num_elts > ncurr);
    if (ncurr == (v->num_buckets << v->shift))
    {
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
        if(v->num_buckets == v->buckets_allocated)
        {
            /* Double the bucket cache, so it scales well with big vectors.*/
            unsigned    new_reserved;
            void*       new;

            new_reserved = 2*v->buckets_allocated;
            if(new_reserved == 0) new_reserved = 1;

            /* Don't even try to resize memory.
               Pool datastructure is very inefficient with reallocs. */
            new = pool_alloc(pool, new_reserved * sizeof(void*));
            memcpy(new, v->buckets, v->buckets_allocated * sizeof(void*));
            v->buckets = new;
            v->buckets_allocated = new_reserved;
        }
194 195 196 197 198 199
        v->buckets[v->num_buckets] = pool_alloc(pool, v->elt_size << v->shift);
        return v->buckets[v->num_buckets++];
    }
    return vector_at(v, ncurr);
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/* We construct the sparse array as two vectors (of equal size)
 * The first vector (key2index) is the lookup table between the key and
 * an index in the second vector (elements)
 * When inserting an element, it's always appended in second vector (and
 * never moved in memory later on), only the first vector is reordered
 */
struct key2index
{
    unsigned long       key;
    unsigned            index;
};

void sparse_array_init(struct sparse_array* sa, unsigned elt_sz, unsigned bucket_sz)
{
    vector_init(&sa->key2index, sizeof(struct key2index), bucket_sz);
    vector_init(&sa->elements, elt_sz, bucket_sz);
}

/******************************************************************
219
 *		sparse_array_lookup
220 221 222
 *
 * Returns the first index which key is >= at passed key
 */
223 224
static struct key2index* sparse_array_lookup(const struct sparse_array* sa,
                                             unsigned long key, unsigned* idx)
225 226
{
    struct key2index*   pk2i;
227
    unsigned            low, high;
228

229
    if (!sa->elements.num_elts)
230
    {
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
        *idx = 0;
        return NULL;
    }
    high = sa->elements.num_elts;
    pk2i = vector_at(&sa->key2index, high - 1);
    if (pk2i->key < key)
    {
        *idx = high;
        return NULL;
    }
    if (pk2i->key == key)
    {
        *idx = high - 1;
        return pk2i;
    }
    low = 0;
    pk2i = vector_at(&sa->key2index, low);
    if (pk2i->key >= key)
    {
        *idx = 0;
        return pk2i;
    }
    /* now we have: sa(lowest key) < key < sa(highest key) */
    while (low < high)
    {
        *idx = (low + high) / 2;
257
        pk2i = vector_at(&sa->key2index, *idx);
258 259 260
        if (pk2i->key > key)            high = *idx;
        else if (pk2i->key < key)       low = *idx + 1;
        else                            return pk2i;
261
    }
262 263 264 265 266 267
    /* binary search could return exact item, we search for highest one
     * below the key
     */
    if (pk2i->key < key)
        pk2i = vector_at(&sa->key2index, ++(*idx));
    return pk2i;
268 269 270 271 272 273 274
}

void*   sparse_array_find(const struct sparse_array* sa, unsigned long key)
{
    unsigned            idx;
    struct key2index*   pk2i;

275
    if ((pk2i = sparse_array_lookup(sa, key, &idx)) && pk2i->key == key)
276 277 278 279 280 281 282 283 284 285 286
        return vector_at(&sa->elements, pk2i->index);
    return NULL;
}

void*   sparse_array_add(struct sparse_array* sa, unsigned long key, 
                         struct pool* pool)
{
    unsigned            idx, i;
    struct key2index*   pk2i;
    struct key2index*   to;

287
    pk2i = sparse_array_lookup(sa, key, &idx);
288 289
    if (pk2i && pk2i->key == key)
    {
290
        FIXME("re-adding an existing key\n");
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
        return NULL;
    }
    to = vector_add(&sa->key2index, pool);
    if (pk2i)
    {
        /* we need to shift vector's content... */
        /* let's do it brute force... (FIXME) */
        assert(sa->key2index.num_elts >= 2);
        for (i = sa->key2index.num_elts - 1; i > idx; i--)
        {
            pk2i = vector_at(&sa->key2index, i - 1);
            *to = *pk2i;
            to = pk2i;
        }
    }

    to->key = key;
    to->index = sa->elements.num_elts;

    return vector_add(&sa->elements, pool);
}

unsigned sparse_array_length(const struct sparse_array* sa)
{
    return sa->elements.num_elts;
}

318
static unsigned hash_table_hash(const char* name, unsigned num_buckets)
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
{
    unsigned    hash = 0;
    while (*name)
    {
        hash += *name++;
        hash += (hash << 10);
        hash ^= (hash >> 6);
    }
    hash += (hash << 3);
    hash ^= (hash >> 11);
    hash += (hash << 15);
    return hash % num_buckets;
}

void hash_table_init(struct pool* pool, struct hash_table* ht, unsigned num_buckets)
{
335
    ht->num_elts = 0;
336
    ht->num_buckets = num_buckets;
337 338
    ht->pool = pool;
    ht->buckets = NULL;
339 340 341 342 343 344 345
}

void hash_table_destroy(struct hash_table* ht)
{
#if defined(USE_STATS)
    int                         i;
    unsigned                    len;
346
    unsigned                    min = 0xffffffff, max = 0, sq = 0;
347 348 349 350 351 352 353 354 355 356
    struct hash_table_elt*      elt;
    double                      mean, variance;

    for (i = 0; i < ht->num_buckets; i++)
    {
        for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
        if (len < min) min = len;
        if (len > max) max = len;
        sq += len * len;
    }
357
    mean = (double)ht->num_elts / ht->num_buckets;
358 359
    variance = (double)sq / ht->num_buckets - mean * mean;
    FIXME("STATS: elts[num:%-4u size:%u mean:%f] buckets[min:%-4u variance:%+f max:%-4u]\n",
360
          ht->num_elts, ht->num_buckets, mean, min, variance, max);
361

362 363 364 365 366
    for (i = 0; i < ht->num_buckets; i++)
    {
        for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
        if (len == max)
        {
367
            FIXME("Longest bucket:\n");
368 369 370 371 372 373 374 375 376 377 378
            for (elt = ht->buckets[i]; elt; elt = elt->next)
                FIXME("\t%s\n", elt->name);
            break;
        }

    }
#endif
}

void hash_table_add(struct hash_table* ht, struct hash_table_elt* elt)
{
379 380
    unsigned                    hash = hash_table_hash(elt->name, ht->num_buckets);

381 382
    if (!ht->buckets)
    {
383
        ht->buckets = pool_alloc(ht->pool, ht->num_buckets * sizeof(struct hash_table_bucket));
384
        assert(ht->buckets);
385
        memset(ht->buckets, 0, ht->num_buckets * sizeof(struct hash_table_bucket));
386 387
    }

388 389 390
    /* in some cases, we need to get back the symbols of same name in the order
     * in which they've been inserted. So insert new elements at the end of the list.
     */
391 392 393 394 395 396 397 398 399
    if (!ht->buckets[hash].first)
    {
        ht->buckets[hash].first = elt;
    }
    else
    {
        ht->buckets[hash].last->next = elt;
    }
    ht->buckets[hash].last = elt;
400
    elt->next = NULL;
401
    ht->num_elts++;
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
}

void hash_table_iter_init(const struct hash_table* ht, 
                          struct hash_table_iter* hti, const char* name)
{
    hti->ht = ht;
    if (name)
    {
        hti->last = hash_table_hash(name, ht->num_buckets);
        hti->index = hti->last - 1;
    }
    else
    {
        hti->last = ht->num_buckets - 1;
        hti->index = -1;
    }
    hti->element = NULL;
}

void* hash_table_iter_up(struct hash_table_iter* hti)
{
423
    if (!hti->ht->buckets) return NULL;
424

425 426
    if (hti->element) hti->element = hti->element->next;
    while (!hti->element && hti->index < hti->last) 
427
        hti->element = hti->ht->buckets[++hti->index].first;
428 429
    return hti->element;
}