X-Git-Url: http://erislabs.net/gitweb/?a=blobdiff_plain;f=lib%2Fhash.c;h=d312a2644e25622718de776aeea26403d62caf2b;hb=7ef6c64e210ac0979d7e8ac69bc5b5208c2405ab;hp=e9cf28ab6d8b24b3890058efe179edc06c026e1b;hpb=188544a42bbd8fba6044ee542d8fcd29da4ab2e9;p=gnulib.git diff --git a/lib/hash.c b/lib/hash.c index e9cf28ab6..d312a2644 100644 --- a/lib/hash.c +++ b/lib/hash.c @@ -1,789 +1,1233 @@ -/* Global assumptions: - - ANSI C - - a certain amount of library support, at least - - C ints are at least 32-bits long - */ +/* hash - hashing table processing. -/* Things to do: - - add a sample do_all function for listing the hash table. - */ + Copyright (C) 1998-2004, 2006-2007, 2009-2014 Free Software Foundation, Inc. -#include -#include -#include + Written by Jim Meyering, 1992. -#include "hash.h" + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. -#ifdef USE_OBSTACK -/* This macro assumes that there is an HT with an initialized - HT_OBSTACK in scope. */ -# define ZALLOC(n) obstack_alloc (&(ht->ht_obstack), (n)) -#else -# define ZALLOC(n) malloc ((n)) -#endif + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. -#define BUCKET_HEAD(ht, idx) ((ht)->hash_table[(idx)]) + You should have received a copy of the GNU General Public License + along with this program. If not, see . */ -static void hash_free_0 (HT *, int); +/* A generic hash table package. */ -static int -is_prime (candidate) - unsigned long candidate; -{ - /* No even number and none less than 10 will be passed here. */ - unsigned long divn = 3; - unsigned long sq = divn * divn; +/* Define USE_OBSTACK to 1 if you want the allocator to use obstacks instead + of malloc. If you change USE_OBSTACK, you have to recompile! */ - while (sq < candidate && (candidate % divn)) - { - divn++; - sq += 4 * divn; - divn++; - } +#include - return (candidate % divn); -} +#include "hash.h" -/* Round a given number up to the nearest prime. */ +#include "bitrotate.h" +#include "xalloc-oversized.h" -static unsigned long -next_prime (candidate) - unsigned long candidate; -{ - /* Make it definitely odd. */ - candidate |= 1; +#include +#include +#include - while (!is_prime (candidate)) - candidate += 2; +#if USE_OBSTACK +# include "obstack.h" +# ifndef obstack_chunk_alloc +# define obstack_chunk_alloc malloc +# endif +# ifndef obstack_chunk_free +# define obstack_chunk_free free +# endif +#endif - return candidate; -} +struct hash_entry + { + void *data; + struct hash_entry *next; + }; -static void -hash_free_entry (HT *ht, HASH_ENT *e) -{ - e->key = NULL; - e->next = ht->hash_free_entry_list; - ht->hash_free_entry_list = e; -} +struct hash_table + { + /* The array of buckets starts at BUCKET and extends to BUCKET_LIMIT-1, + for a possibility of N_BUCKETS. Among those, N_BUCKETS_USED buckets + are not empty, there are N_ENTRIES active entries in the table. */ + struct hash_entry *bucket; + struct hash_entry const *bucket_limit; + size_t n_buckets; + size_t n_buckets_used; + size_t n_entries; + + /* Tuning arguments, kept in a physically separate structure. */ + const Hash_tuning *tuning; + + /* Three functions are given to 'hash_initialize', see the documentation + block for this function. In a word, HASHER randomizes a user entry + into a number up from 0 up to some maximum minus 1; COMPARATOR returns + true if two user entries compare equally; and DATA_FREER is the cleanup + function for a user entry. */ + Hash_hasher hasher; + Hash_comparator comparator; + Hash_data_freer data_freer; + + /* A linked list of freed struct hash_entry structs. */ + struct hash_entry *free_entry_list; + +#if USE_OBSTACK + /* Whenever obstacks are used, it is possible to allocate all overflowed + entries into a single stack, so they all can be freed in a single + operation. It is not clear if the speedup is worth the trouble. */ + struct obstack entry_stack; +#endif + }; + +/* A hash table contains many internal entries, each holding a pointer to + some user-provided data (also called a user entry). An entry indistinctly + refers to both the internal entry and its associated user entry. A user + entry contents may be hashed by a randomization function (the hashing + function, or just "hasher" for short) into a number (or "slot") between 0 + and the current table size. At each slot position in the hash table, + starts a linked chain of entries for which the user data all hash to this + slot. A bucket is the collection of all entries hashing to the same slot. + + A good "hasher" function will distribute entries rather evenly in buckets. + In the ideal case, the length of each bucket is roughly the number of + entries divided by the table size. Finding the slot for a data is usually + done in constant time by the "hasher", and the later finding of a precise + entry is linear in time with the size of the bucket. Consequently, a + larger hash table size (that is, a larger number of buckets) is prone to + yielding shorter chains, *given* the "hasher" function behaves properly. + + Long buckets slow down the lookup algorithm. One might use big hash table + sizes in hope to reduce the average length of buckets, but this might + become inordinate, as unused slots in the hash table take some space. The + best bet is to make sure you are using a good "hasher" function (beware + that those are not that easy to write! :-), and to use a table size + larger than the actual number of entries. */ + +/* If an insertion makes the ratio of nonempty buckets to table size larger + than the growth threshold (a number between 0.0 and 1.0), then increase + the table size by multiplying by the growth factor (a number greater than + 1.0). The growth threshold defaults to 0.8, and the growth factor + defaults to 1.414, meaning that the table will have doubled its size + every second time 80% of the buckets get used. */ +#define DEFAULT_GROWTH_THRESHOLD 0.8f +#define DEFAULT_GROWTH_FACTOR 1.414f + +/* If a deletion empties a bucket and causes the ratio of used buckets to + table size to become smaller than the shrink threshold (a number between + 0.0 and 1.0), then shrink the table by multiplying by the shrink factor (a + number greater than the shrink threshold but smaller than 1.0). The shrink + threshold and factor default to 0.0 and 1.0, meaning that the table never + shrinks. */ +#define DEFAULT_SHRINK_THRESHOLD 0.0f +#define DEFAULT_SHRINK_FACTOR 1.0f + +/* Use this to initialize or reset a TUNING structure to + some sensible values. */ +static const Hash_tuning default_tuning = + { + DEFAULT_SHRINK_THRESHOLD, + DEFAULT_SHRINK_FACTOR, + DEFAULT_GROWTH_THRESHOLD, + DEFAULT_GROWTH_FACTOR, + false + }; + +/* Information and lookup. */ + +/* The following few functions provide information about the overall hash + table organization: the number of entries, number of buckets and maximum + length of buckets. */ + +/* Return the number of buckets in the hash table. The table size, the total + number of buckets (used plus unused), or the maximum number of slots, are + the same quantity. */ -static HASH_ENT * -hash_allocate_entry (HT *ht) +size_t +hash_get_n_buckets (const Hash_table *table) { - HASH_ENT *new; - if (ht->hash_free_entry_list) - { - new = ht->hash_free_entry_list; - ht->hash_free_entry_list = new->next; - } - else - { - new = (HASH_ENT *) ZALLOC (sizeof (HASH_ENT)); - } - return new; + return table->n_buckets; } -unsigned int -hash_get_n_slots_used (const HT *ht) +/* Return the number of slots in use (non-empty buckets). */ + +size_t +hash_get_n_buckets_used (const Hash_table *table) { - return ht->hash_n_slots_used; + return table->n_buckets_used; } -/* FIXME-comment */ +/* Return the number of active entries. */ -int -hash_rehash (HT *ht, unsigned int new_table_size) +size_t +hash_get_n_entries (const Hash_table *table) { - HT *ht_new; - unsigned int i; - - if (ht->hash_table_size <= 0 || new_table_size == 0) - return 1; + return table->n_entries; +} - ht_new = hash_initialize (new_table_size, ht->hash_key_freer, - ht->hash_hash, ht->hash_key_comparator); +/* Return the length of the longest chain (bucket). */ - if (ht_new == NULL) - return 1; +size_t +hash_get_max_bucket_length (const Hash_table *table) +{ + struct hash_entry const *bucket; + size_t max_bucket_length = 0; - for (i = 0; i < ht->hash_table_size; i++) + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) { - HASH_ENT *p = BUCKET_HEAD (ht, i); - for ( /* empty */ ; p; p = p->next) - { - int failed; - const void *already_in_table; - already_in_table = hash_insert_if_absent (ht_new, p->key, &failed); - assert (failed == 0 && already_in_table == 0); - } - } + if (bucket->data) + { + struct hash_entry const *cursor = bucket; + size_t bucket_length = 1; - hash_free_0 (ht, 0); + while (cursor = cursor->next, cursor) + bucket_length++; -#ifdef TESTING - assert (hash_table_ok (ht_new)); -#endif - *ht = *ht_new; - free (ht_new); - - /* FIXME: fill in ht_new->n_slots_used and other statistics fields. */ + if (bucket_length > max_bucket_length) + max_bucket_length = bucket_length; + } + } - return 0; + return max_bucket_length; } -/* FIXME-comment */ +/* Do a mild validation of a hash table, by traversing it and checking two + statistics. */ -unsigned int -hash_get_max_chain_length (HT *ht) +bool +hash_table_ok (const Hash_table *table) { - unsigned int i; - unsigned int max_chain_length = 0; + struct hash_entry const *bucket; + size_t n_buckets_used = 0; + size_t n_entries = 0; - if (!ht->hash_dirty_max_chain_length) - return ht->hash_max_chain_length; - - for (i = 0; i < ht->hash_table_size; i++) + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) { - unsigned int chain_length = 0; - HASH_ENT *p = BUCKET_HEAD (ht, i); - for ( /* empty */ ; p; p = p->next) - ++chain_length; - if (chain_length > max_chain_length) - max_chain_length = chain_length; + if (bucket->data) + { + struct hash_entry const *cursor = bucket; + + /* Count bucket head. */ + n_buckets_used++; + n_entries++; + + /* Count bucket overflow. */ + while (cursor = cursor->next, cursor) + n_entries++; + } } - ht->hash_max_chain_length = max_chain_length; - ht->hash_dirty_max_chain_length = 0; - return ht->hash_max_chain_length; + if (n_buckets_used == table->n_buckets_used && n_entries == table->n_entries) + return true; + + return false; } -unsigned int -hash_get_n_keys (const HT *ht) +void +hash_print_statistics (const Hash_table *table, FILE *stream) { - return ht->hash_n_keys; + size_t n_entries = hash_get_n_entries (table); + size_t n_buckets = hash_get_n_buckets (table); + size_t n_buckets_used = hash_get_n_buckets_used (table); + size_t max_bucket_length = hash_get_max_bucket_length (table); + + fprintf (stream, "# entries: %lu\n", (unsigned long int) n_entries); + fprintf (stream, "# buckets: %lu\n", (unsigned long int) n_buckets); + fprintf (stream, "# buckets used: %lu (%.2f%%)\n", + (unsigned long int) n_buckets_used, + (100.0 * n_buckets_used) / n_buckets); + fprintf (stream, "max bucket length: %lu\n", + (unsigned long int) max_bucket_length); } -unsigned int -hash_get_table_size (const HT *ht) +/* Hash KEY and return a pointer to the selected bucket. + If TABLE->hasher misbehaves, abort. */ +static struct hash_entry * +safe_hasher (const Hash_table *table, const void *key) +{ + size_t n = table->hasher (key, table->n_buckets); + if (! (n < table->n_buckets)) + abort (); + return table->bucket + n; +} + +/* If ENTRY matches an entry already in the hash table, return the + entry from the table. Otherwise, return NULL. */ + +void * +hash_lookup (const Hash_table *table, const void *entry) { - return ht->hash_table_size; + struct hash_entry const *bucket = safe_hasher (table, entry); + struct hash_entry const *cursor; + + if (bucket->data == NULL) + return NULL; + + for (cursor = bucket; cursor; cursor = cursor->next) + if (entry == cursor->data || table->comparator (entry, cursor->data)) + return cursor->data; + + return NULL; } -/* CANDIDATE_TABLE_SIZE need not be prime. If WHEN_TO_REHASH is positive, when - that percentage of table entries have been used, the table is - deemed too small; then a new, larger table (GROW_FACTOR times - larger than the previous size) is allocated and all entries in - the old table are rehashed into the new, larger one. The old - table is freed. If WHEN_TO_REHASH is zero or negative, the - table is never resized. +/* Walking. */ - The function returns non-zero - - if TABLE_SIZE is zero or negative - - if EQUALITY_TESTER or HASH is null - - if it was unable to allocate sufficient storage for the hash table - - if WHEN_TO_REHASH is zero or negative - Otherwise it returns zero. +/* The functions in this page traverse the hash table and process the + contained entries. For the traversal to work properly, the hash table + should not be resized nor modified while any particular entry is being + processed. In particular, entries should not be added, and an entry + may be removed only if there is no shrink threshold and the entry being + removed has already been passed to hash_get_next. */ - FIXME: tell what happens to any existing hash table when this - function is called (e.g. a second time). */ +/* Return the first data in the table, or NULL if the table is empty. */ -HT * -hash_initialize (unsigned int candidate_table_size, - Hash_key_freer_type key_freer, - unsigned int (*hash) (const void *, unsigned int), - int (*key_comparator) (const void *, const void *)) +void * +hash_get_first (const Hash_table *table) { - HT *ht; - unsigned int i; - unsigned int table_size; + struct hash_entry const *bucket; - if (candidate_table_size <= 0) + if (table->n_entries == 0) return NULL; - if (hash == NULL || key_comparator == NULL) - return NULL; + for (bucket = table->bucket; ; bucket++) + if (! (bucket < table->bucket_limit)) + abort (); + else if (bucket->data) + return bucket->data; +} - ht = (HT *) malloc (sizeof (HT)); - if (ht == NULL) - return NULL; +/* Return the user data for the entry following ENTRY, where ENTRY has been + returned by a previous call to either 'hash_get_first' or 'hash_get_next'. + Return NULL if there are no more entries. */ - table_size = next_prime (candidate_table_size); - ht->hash_table = (HASH_ENT **) malloc (table_size * sizeof (HASH_ENT *)); - if (ht->hash_table == NULL) - return NULL; +void * +hash_get_next (const Hash_table *table, const void *entry) +{ + struct hash_entry const *bucket = safe_hasher (table, entry); + struct hash_entry const *cursor; - for (i = 0; i < table_size; i++) + /* Find next entry in the same bucket. */ + cursor = bucket; + do { - BUCKET_HEAD (ht, i) = NULL; + if (cursor->data == entry && cursor->next) + return cursor->next->data; + cursor = cursor->next; } + while (cursor != NULL); - ht->hash_free_entry_list = NULL; - ht->hash_table_size = table_size; - ht->hash_hash = hash; - ht->hash_key_comparator = key_comparator; - ht->hash_key_freer = key_freer; - ht->hash_n_slots_used = 0; - ht->hash_max_chain_length = 0; - ht->hash_n_keys = 0; - ht->hash_dirty_max_chain_length = 0; -#ifdef USE_OBSTACK - obstack_init (&(ht->ht_obstack)); -#endif + /* Find first entry in any subsequent bucket. */ + while (++bucket < table->bucket_limit) + if (bucket->data) + return bucket->data; - return ht; + /* None found. */ + return NULL; } -/* This private function is used to help with insertion and deletion. - If E does *not* compare equal to the key of any entry in the table, - return NULL. - When E matches an entry in the table, return a pointer to the matching - entry. When DELETE is non-zero and E matches an entry in the table, - unlink the matching entry. Set *CHAIN_LENGTH to the number of keys - that have hashed to the bucket E hashed to. */ +/* Fill BUFFER with pointers to active user entries in the hash table, then + return the number of pointers copied. Do not copy more than BUFFER_SIZE + pointers. */ -static HASH_ENT * -hash_find_entry (HT *ht, const void *e, unsigned int *table_idx, - unsigned int *chain_length, int delete) +size_t +hash_get_entries (const Hash_table *table, void **buffer, + size_t buffer_size) { - unsigned int idx; - int found; - HASH_ENT *p, *prev; + size_t counter = 0; + struct hash_entry const *bucket; + struct hash_entry const *cursor; - idx = ht->hash_hash (e, ht->hash_table_size); - assert (idx < ht->hash_table_size); - - *table_idx = idx; - *chain_length = 0; + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) + { + if (bucket->data) + { + for (cursor = bucket; cursor; cursor = cursor->next) + { + if (counter >= buffer_size) + return counter; + buffer[counter++] = cursor->data; + } + } + } - prev = ht->hash_table[idx]; + return counter; +} - if (prev == NULL) - return NULL; +/* Call a PROCESSOR function for each entry of a hash table, and return the + number of entries for which the processor function returned success. A + pointer to some PROCESSOR_DATA which will be made available to each call to + the processor function. The PROCESSOR accepts two arguments: the first is + the user entry being walked into, the second is the value of PROCESSOR_DATA + as received. The walking continue for as long as the PROCESSOR function + returns nonzero. When it returns zero, the walking is interrupted. */ + +size_t +hash_do_for_each (const Hash_table *table, Hash_processor processor, + void *processor_data) +{ + size_t counter = 0; + struct hash_entry const *bucket; + struct hash_entry const *cursor; - *chain_length = 1; - if (ht->hash_key_comparator (e, prev->key) == 0) + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) { - if (delete) - ht->hash_table[idx] = prev->next; - return prev; + if (bucket->data) + { + for (cursor = bucket; cursor; cursor = cursor->next) + { + if (! processor (cursor->data, processor_data)) + return counter; + counter++; + } + } } - p = prev->next; - found = 0; - while (p) - { - ++(*chain_length); - if (ht->hash_key_comparator (e, p->key) == 0) - { - found = 1; - break; - } - prev = p; - p = p->next; - } + return counter; +} - if (!found) - return NULL; +/* Allocation and clean-up. */ - assert (p != NULL); - if (delete) - prev->next = p->next; +/* Return a hash index for a NUL-terminated STRING between 0 and N_BUCKETS-1. + This is a convenience routine for constructing other hashing functions. */ - return p; -} +#if USE_DIFF_HASH -/* Return non-zero if E is already in the table, zero otherwise. */ +/* About hashings, Paul Eggert writes to me (FP), on 1994-01-01: "Please see + B. J. McKenzie, R. Harries & T. Bell, Selecting a hashing algorithm, + Software--practice & experience 20, 2 (Feb 1990), 209-224. Good hash + algorithms tend to be domain-specific, so what's good for [diffutils'] io.c + may not be good for your application." */ -int -hash_query_in_table (const HT *ht, const void *e) +size_t +hash_string (const char *string, size_t n_buckets) { - unsigned int idx; - HASH_ENT *p; +# define HASH_ONE_CHAR(Value, Byte) \ + ((Byte) + rotl_sz (Value, 7)) - idx = ht->hash_hash (e, ht->hash_table_size); - assert (idx < ht->hash_table_size); - for (p = BUCKET_HEAD (ht, idx); p != NULL; p = p->next) - if (ht->hash_key_comparator (e, p->key) == 0) - return 1; - return 0; -} + size_t value = 0; + unsigned char ch; -void * -hash_lookup (const HT *ht, const void *e) -{ - unsigned int idx; - HASH_ENT *p; + for (; (ch = *string); string++) + value = HASH_ONE_CHAR (value, ch); + return value % n_buckets; - idx = ht->hash_hash (e, ht->hash_table_size); - assert (idx < ht->hash_table_size); - for (p = BUCKET_HEAD (ht, idx); p != NULL; p = p->next) - if (ht->hash_key_comparator (e, p->key) == 0) - return p->key; - return NULL; +# undef HASH_ONE_CHAR } -/* If E matches an entry already in the hash table, don't modify the - table and return a pointer to the matched entry. If E does not - match any item in the table, insert E and return NULL. - If the storage required for insertion cannot be allocated - set *FAILED to non-zero and return NULL. */ - -void * -hash_insert_if_absent (HT *ht, const void *e, int *failed) -{ - const HASH_ENT *ent; - HASH_ENT *new; - unsigned int idx; - unsigned int chain_length; +#else /* not USE_DIFF_HASH */ - assert (e != NULL); /* Can't insert a NULL key. */ +/* This one comes from 'recode', and performs a bit better than the above as + per a few experiments. It is inspired from a hashing routine found in the + very old Cyber 'snoop', itself written in typical Greg Mansfield style. + (By the way, what happened to this excellent man? Is he still alive?) */ - *failed = 0; - ent = hash_find_entry (ht, e, &idx, &chain_length, 0); - if (ent != NULL) - { - /* E matches a key from an entry already in the table. */ - return ent->key; - } - - new = hash_allocate_entry (ht); - if (new == NULL) - { - *failed = 1; - return NULL; - } +size_t +hash_string (const char *string, size_t n_buckets) +{ + size_t value = 0; + unsigned char ch; - new->key = (void *) e; - new->next = BUCKET_HEAD (ht, idx); - BUCKET_HEAD (ht, idx) = new; + for (; (ch = *string); string++) + value = (value * 31 + ch) % n_buckets; + return value; +} - if (chain_length == 0) - ++(ht->hash_n_slots_used); +#endif /* not USE_DIFF_HASH */ - /* The insertion has just increased chain_length by 1. */ - ++chain_length; +/* Return true if CANDIDATE is a prime number. CANDIDATE should be an odd + number at least equal to 11. */ - if (chain_length > ht->hash_max_chain_length) - ht->hash_max_chain_length = chain_length; +static bool _GL_ATTRIBUTE_CONST +is_prime (size_t candidate) +{ + size_t divisor = 3; + size_t square = divisor * divisor; - ++(ht->hash_n_keys); - if ((double) ht->hash_n_keys / ht->hash_table_size > 0.80) + while (square < candidate && (candidate % divisor)) { - unsigned int new_size; - new_size = next_prime (2 * ht->hash_table_size + 1); - *failed = hash_rehash (ht, new_size); + divisor++; + square += 4 * divisor; + divisor++; } -#ifdef TESTING - assert (hash_table_ok (ht)); -#endif - - return NULL; + return (candidate % divisor ? true : false); } -/* If E is already in the table, remove it and return a pointer to - the just-deleted key (the user may want to deallocate its storage). - If E is not in the table, don't modify the table and return NULL. */ +/* Round a given CANDIDATE number up to the nearest prime, and return that + prime. Primes lower than 10 are merely skipped. */ -void * -hash_delete_if_present (HT *ht, const void *e) +static size_t _GL_ATTRIBUTE_CONST +next_prime (size_t candidate) { - HASH_ENT *ent; - void *key; - unsigned int idx; - unsigned int chain_length; - - ent = hash_find_entry (ht, e, &idx, &chain_length, 1); - if (ent == NULL) - return NULL; + /* Skip small primes. */ + if (candidate < 10) + candidate = 10; - if (ent->next == NULL && chain_length == 1) - --(ht->hash_n_slots_used); - - key = ent->key; - - --(ht->hash_n_keys); - ht->hash_dirty_max_chain_length = 1; - if (ent->next == NULL && chain_length < ht->hash_max_chain_length) - ht->hash_dirty_max_chain_length = 0; + /* Make it definitely odd. */ + candidate |= 1; - hash_free_entry (ht, ent); + while (SIZE_MAX != candidate && !is_prime (candidate)) + candidate += 2; -#ifdef TESTING - assert (hash_table_ok (ht)); -#endif - return key; + return candidate; } void -hash_print_statistics (const HT *ht, FILE *stream) +hash_reset_tuning (Hash_tuning *tuning) { - unsigned int n_slots_used; - unsigned int n_keys; - unsigned int max_chain_length; - int err; - - err = hash_get_statistics (ht, &n_slots_used, &n_keys, &max_chain_length); - assert (err == 0); - fprintf (stream, "table size: %d\n", ht->hash_table_size); - fprintf (stream, "# slots used: %u (%.2f%%)\n", n_slots_used, - (100.0 * n_slots_used) / ht->hash_table_size); - fprintf (stream, "# keys: %u\n", n_keys); - fprintf (stream, "max chain length: %u\n", max_chain_length); + *tuning = default_tuning; } -/* If there is *NO* table (so, no meaningful stats) return non-zero - and don't reference the argument pointers. Otherwise compute the - performance statistics and return non-zero. */ - -int -hash_get_statistics (const HT *ht, - unsigned int *n_slots_used, - unsigned int *n_keys, - unsigned int *max_chain_length) +/* If the user passes a NULL hasher, we hash the raw pointer. */ +static size_t +raw_hasher (const void *data, size_t n) { - unsigned int i; + /* When hashing unique pointers, it is often the case that they were + generated by malloc and thus have the property that the low-order + bits are 0. As this tends to give poorer performance with small + tables, we rotate the pointer value before performing division, + in an attempt to improve hash quality. */ + size_t val = rotr_sz ((size_t) data, 3); + return val % n; +} - if (ht == NULL || ht->hash_table == NULL) - return 1; +/* If the user passes a NULL comparator, we use pointer comparison. */ +static bool +raw_comparator (const void *a, const void *b) +{ + return a == b; +} - *max_chain_length = 0; - *n_slots_used = 0; - *n_keys = 0; - for (i = 0; i < ht->hash_table_size; i++) - { - unsigned int chain_length = 0; - HASH_ENT *p; +/* For the given hash TABLE, check the user supplied tuning structure for + reasonable values, and return true if there is no gross error with it. + Otherwise, definitively reset the TUNING field to some acceptable default + in the hash table (that is, the user loses the right of further modifying + tuning arguments), and return false. */ - p = BUCKET_HEAD (ht, i); - if (p != NULL) - ++(*n_slots_used); +static bool +check_tuning (Hash_table *table) +{ + const Hash_tuning *tuning = table->tuning; + float epsilon; + if (tuning == &default_tuning) + return true; + + /* Be a bit stricter than mathematics would require, so that + rounding errors in size calculations do not cause allocations to + fail to grow or shrink as they should. The smallest allocation + is 11 (due to next_prime's algorithm), so an epsilon of 0.1 + should be good enough. */ + epsilon = 0.1f; + + if (epsilon < tuning->growth_threshold + && tuning->growth_threshold < 1 - epsilon + && 1 + epsilon < tuning->growth_factor + && 0 <= tuning->shrink_threshold + && tuning->shrink_threshold + epsilon < tuning->shrink_factor + && tuning->shrink_factor <= 1 + && tuning->shrink_threshold + epsilon < tuning->growth_threshold) + return true; + + table->tuning = &default_tuning; + return false; +} - for (; p; p = p->next) - ++chain_length; +/* Compute the size of the bucket array for the given CANDIDATE and + TUNING, or return 0 if there is no possible way to allocate that + many entries. */ - *n_keys += chain_length; - if (chain_length > *max_chain_length) - *max_chain_length = chain_length; +static size_t _GL_ATTRIBUTE_PURE +compute_bucket_size (size_t candidate, const Hash_tuning *tuning) +{ + if (!tuning->is_n_buckets) + { + float new_candidate = candidate / tuning->growth_threshold; + if (SIZE_MAX <= new_candidate) + return 0; + candidate = new_candidate; } - return 0; + candidate = next_prime (candidate); + if (xalloc_oversized (candidate, sizeof (struct hash_entry *))) + return 0; + return candidate; } -int -hash_table_ok (HT *ht) +/* Allocate and return a new hash table, or NULL upon failure. The initial + number of buckets is automatically selected so as to _guarantee_ that you + may insert at least CANDIDATE different user entries before any growth of + the hash table size occurs. So, if have a reasonably tight a-priori upper + bound on the number of entries you intend to insert in the hash table, you + may save some table memory and insertion time, by specifying it here. If + the IS_N_BUCKETS field of the TUNING structure is true, the CANDIDATE + argument has its meaning changed to the wanted number of buckets. + + TUNING points to a structure of user-supplied values, in case some fine + tuning is wanted over the default behavior of the hasher. If TUNING is + NULL, the default tuning parameters are used instead. If TUNING is + provided but the values requested are out of bounds or might cause + rounding errors, return NULL. + + The user-supplied HASHER function, when not NULL, accepts two + arguments ENTRY and TABLE_SIZE. It computes, by hashing ENTRY contents, a + slot number for that entry which should be in the range 0..TABLE_SIZE-1. + This slot number is then returned. + + The user-supplied COMPARATOR function, when not NULL, accepts two + arguments pointing to user data, it then returns true for a pair of entries + that compare equal, or false otherwise. This function is internally called + on entries which are already known to hash to the same bucket index, + but which are distinct pointers. + + The user-supplied DATA_FREER function, when not NULL, may be later called + with the user data as an argument, just before the entry containing the + data gets freed. This happens from within 'hash_free' or 'hash_clear'. + You should specify this function only if you want these functions to free + all of your 'data' data. This is typically the case when your data is + simply an auxiliary struct that you have malloc'd to aggregate several + values. */ + +Hash_table * +hash_initialize (size_t candidate, const Hash_tuning *tuning, + Hash_hasher hasher, Hash_comparator comparator, + Hash_data_freer data_freer) { - int code; - unsigned int n_slots_used; - unsigned int n_keys; - unsigned int max_chain_length; + Hash_table *table; - if (ht == NULL || ht->hash_table == NULL) - return 1; + if (hasher == NULL) + hasher = raw_hasher; + if (comparator == NULL) + comparator = raw_comparator; - code = hash_get_statistics (ht, &n_slots_used, &n_keys, - &max_chain_length); + table = malloc (sizeof *table); + if (table == NULL) + return NULL; - if (code != 0 - || n_slots_used != ht->hash_n_slots_used - || n_keys != ht->hash_n_keys - || max_chain_length != hash_get_max_chain_length (ht)) - return 0; + if (!tuning) + tuning = &default_tuning; + table->tuning = tuning; + if (!check_tuning (table)) + { + /* Fail if the tuning options are invalid. This is the only occasion + when the user gets some feedback about it. Once the table is created, + if the user provides invalid tuning options, we silently revert to + using the defaults, and ignore further request to change the tuning + options. */ + goto fail; + } - return 1; -} + table->n_buckets = compute_bucket_size (candidate, tuning); + if (!table->n_buckets) + goto fail; -/* See hash_do_for_each_2 (below) for a variant. */ + table->bucket = calloc (table->n_buckets, sizeof *table->bucket); + if (table->bucket == NULL) + goto fail; + table->bucket_limit = table->bucket + table->n_buckets; + table->n_buckets_used = 0; + table->n_entries = 0; -void -hash_do_for_each (HT *ht, void (*f) (void *e, void *aux), void *aux) -{ - unsigned int i; + table->hasher = hasher; + table->comparator = comparator; + table->data_freer = data_freer; -#ifdef TESTING - assert (hash_table_ok (ht)); + table->free_entry_list = NULL; +#if USE_OBSTACK + obstack_init (&table->entry_stack); #endif + return table; - if (ht->hash_table == NULL) - return; - - for (i = 0; i < ht->hash_table_size; i++) - { - HASH_ENT *p; - for (p = BUCKET_HEAD (ht, i); p; p = p->next) - { - (*f) (p->key, aux); - } - } + fail: + free (table); + return NULL; } -/* Just like hash_do_for_each, except that function F returns an int - that can signal (when non-zero) we should return early. */ +/* Make all buckets empty, placing any chained entries on the free list. + Apply the user-specified function data_freer (if any) to the datas of any + affected entries. */ -int -hash_do_for_each_2 (HT *ht, int (*f) (void *e, void *aux), void *aux) +void +hash_clear (Hash_table *table) { - unsigned int i; + struct hash_entry *bucket; -#ifdef TESTING - assert (hash_table_ok (ht)); -#endif - - if (ht->hash_table == NULL) - return 0; - - for (i = 0; i < ht->hash_table_size; i++) + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) { - HASH_ENT *p; - for (p = BUCKET_HEAD (ht, i); p; p = p->next) - { - int return_code; - - return_code = (*f) (p->key, aux); - if (return_code != 0) - return return_code; - } + if (bucket->data) + { + struct hash_entry *cursor; + struct hash_entry *next; + + /* Free the bucket overflow. */ + for (cursor = bucket->next; cursor; cursor = next) + { + if (table->data_freer) + table->data_freer (cursor->data); + cursor->data = NULL; + + next = cursor->next; + /* Relinking is done one entry at a time, as it is to be expected + that overflows are either rare or short. */ + cursor->next = table->free_entry_list; + table->free_entry_list = cursor; + } + + /* Free the bucket head. */ + if (table->data_freer) + table->data_freer (bucket->data); + bucket->data = NULL; + bucket->next = NULL; + } } - return 0; + + table->n_buckets_used = 0; + table->n_entries = 0; } -/* For each entry in the bucket addressed by BUCKET_KEY of the hash - table HT, invoke the function F. If F returns non-zero, stop - iterating and return that value. Otherwise, apply F to all entries - in the selected bucket and return zero. The AUX argument to this - function is passed as the last argument in each invocation of F. - The first argument to F is BUCKET_KEY, and the second is the key of - an entry in the selected bucket. */ +/* Reclaim all storage associated with a hash table. If a data_freer + function has been supplied by the user when the hash table was created, + this function applies it to the data of each entry before freeing that + entry. */ -int -hash_do_for_each_in_selected_bucket (HT *ht, const void *bucket_key, - int (*f) (const void *bucket_key, - void *e, void *aux), - void *aux) +void +hash_free (Hash_table *table) { - int idx; - HASH_ENT *p; + struct hash_entry *bucket; + struct hash_entry *cursor; + struct hash_entry *next; -#ifdef TESTING - assert (hash_table_ok (ht)); -#endif + /* Call the user data_freer function. */ + if (table->data_freer && table->n_entries) + { + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) + { + if (bucket->data) + { + for (cursor = bucket; cursor; cursor = cursor->next) + table->data_freer (cursor->data); + } + } + } - if (ht->hash_table == NULL) - return 0; +#if USE_OBSTACK - idx = ht->hash_hash (bucket_key, ht->hash_table_size); + obstack_free (&table->entry_stack, NULL); - for (p = BUCKET_HEAD (ht, idx); p != NULL; p = p->next) +#else + + /* Free all bucket overflowed entries. */ + for (bucket = table->bucket; bucket < table->bucket_limit; bucket++) { - int return_code; + for (cursor = bucket->next; cursor; cursor = next) + { + next = cursor->next; + free (cursor); + } + } - return_code = (*f) (bucket_key, p->key, aux); - if (return_code != 0) - return return_code; + /* Also reclaim the internal list of previously freed entries. */ + for (cursor = table->free_entry_list; cursor; cursor = next) + { + next = cursor->next; + free (cursor); } - return 0; +#endif + + /* Free the remainder of the hash table structure. */ + free (table->bucket); + free (table); } -/* Make all buckets empty, placing any chained entries on the free list. - As with hash_free, apply the user-specified function key_freer - (if it's not NULL) to the keys of any affected entries. */ +/* Insertion and deletion. */ -void -hash_clear (HT *ht) +/* Get a new hash entry for a bucket overflow, possibly by recycling a + previously freed one. If this is not possible, allocate a new one. */ + +static struct hash_entry * +allocate_entry (Hash_table *table) { - unsigned int i; - HASH_ENT *p; + struct hash_entry *new; - for (i = 0; i < ht->hash_table_size; i++) + if (table->free_entry_list) + { + new = table->free_entry_list; + table->free_entry_list = new->next; + } + else { - HASH_ENT *tail = NULL; - HASH_ENT *head = BUCKET_HEAD (ht, i); - - /* Free any keys and get tail pointer to last entry in chain. */ - for (p = head; p; p = p->next) - { - if (ht->hash_key_freer != NULL) - ht->hash_key_freer (p->key); - p->key = NULL; /* Make sure no one tries to use this key later. */ - tail = p; - } - BUCKET_HEAD (ht, i) = NULL; - - /* If there's a chain in this bucket, tack it onto the - beginning of the free list. */ - if (head != NULL) - { - assert (tail != NULL && tail->next == NULL); - tail->next = ht->hash_free_entry_list; - ht->hash_free_entry_list = head; - } +#if USE_OBSTACK + new = obstack_alloc (&table->entry_stack, sizeof *new); +#else + new = malloc (sizeof *new); +#endif } - ht->hash_n_slots_used = 0; - ht->hash_max_chain_length = 0; - ht->hash_n_keys = 0; - ht->hash_dirty_max_chain_length = 0; + + return new; } -/* Free all storage associated with HT that functions in this package - have allocated. If a key_freer function has been supplied (when HT - was created), this function applies it to the key of each entry before - freeing that entry. */ +/* Free a hash entry which was part of some bucket overflow, + saving it for later recycling. */ static void -hash_free_0 (HT *ht, int free_user_data) +free_entry (Hash_table *table, struct hash_entry *entry) +{ + entry->data = NULL; + entry->next = table->free_entry_list; + table->free_entry_list = entry; +} + +/* This private function is used to help with insertion and deletion. When + ENTRY matches an entry in the table, return a pointer to the corresponding + user data and set *BUCKET_HEAD to the head of the selected bucket. + Otherwise, return NULL. When DELETE is true and ENTRY matches an entry in + the table, unlink the matching entry. */ + +static void * +hash_find_entry (Hash_table *table, const void *entry, + struct hash_entry **bucket_head, bool delete) { - if (free_user_data && ht->hash_key_freer != NULL) + struct hash_entry *bucket = safe_hasher (table, entry); + struct hash_entry *cursor; + + *bucket_head = bucket; + + /* Test for empty bucket. */ + if (bucket->data == NULL) + return NULL; + + /* See if the entry is the first in the bucket. */ + if (entry == bucket->data || table->comparator (entry, bucket->data)) { - unsigned int i; - - for (i = 0; i < ht->hash_table_size; i++) - { - HASH_ENT *p; - HASH_ENT *next; - - for (p = BUCKET_HEAD (ht, i); p; p = next) - { - next = p->next; - ht->hash_key_freer (p->key); - } - } + void *data = bucket->data; + + if (delete) + { + if (bucket->next) + { + struct hash_entry *next = bucket->next; + + /* Bump the first overflow entry into the bucket head, then save + the previous first overflow entry for later recycling. */ + *bucket = *next; + free_entry (table, next); + } + else + { + bucket->data = NULL; + } + } + + return data; } -#ifdef USE_OBSTACK - obstack_free (&(ht->ht_obstack), NULL); -#else - { - unsigned int i; - for (i = 0; i < ht->hash_table_size; i++) - { - HASH_ENT *p; - HASH_ENT *next; - - for (p = BUCKET_HEAD (ht, i); p; p = next) - { - next = p->next; - free (p); - } - } - } -#endif - ht->hash_free_entry_list = NULL; - free (ht->hash_table); + /* Scan the bucket overflow. */ + for (cursor = bucket; cursor->next; cursor = cursor->next) + { + if (entry == cursor->next->data + || table->comparator (entry, cursor->next->data)) + { + void *data = cursor->next->data; + + if (delete) + { + struct hash_entry *next = cursor->next; + + /* Unlink the entry to delete, then save the freed entry for later + recycling. */ + cursor->next = next->next; + free_entry (table, next); + } + + return data; + } + } + + /* No entry found. */ + return NULL; } -void -hash_free (HT *ht) +/* Internal helper, to move entries from SRC to DST. Both tables must + share the same free entry list. If SAFE, only move overflow + entries, saving bucket heads for later, so that no allocations will + occur. Return false if the free entry list is exhausted and an + allocation fails. */ + +static bool +transfer_entries (Hash_table *dst, Hash_table *src, bool safe) { - hash_free_0 (ht, 1); - free (ht); + struct hash_entry *bucket; + struct hash_entry *cursor; + struct hash_entry *next; + for (bucket = src->bucket; bucket < src->bucket_limit; bucket++) + if (bucket->data) + { + void *data; + struct hash_entry *new_bucket; + + /* Within each bucket, transfer overflow entries first and + then the bucket head, to minimize memory pressure. After + all, the only time we might allocate is when moving the + bucket head, but moving overflow entries first may create + free entries that can be recycled by the time we finally + get to the bucket head. */ + for (cursor = bucket->next; cursor; cursor = next) + { + data = cursor->data; + new_bucket = safe_hasher (dst, data); + + next = cursor->next; + + if (new_bucket->data) + { + /* Merely relink an existing entry, when moving from a + bucket overflow into a bucket overflow. */ + cursor->next = new_bucket->next; + new_bucket->next = cursor; + } + else + { + /* Free an existing entry, when moving from a bucket + overflow into a bucket header. */ + new_bucket->data = data; + dst->n_buckets_used++; + free_entry (dst, cursor); + } + } + /* Now move the bucket head. Be sure that if we fail due to + allocation failure that the src table is in a consistent + state. */ + data = bucket->data; + bucket->next = NULL; + if (safe) + continue; + new_bucket = safe_hasher (dst, data); + + if (new_bucket->data) + { + /* Allocate or recycle an entry, when moving from a bucket + header into a bucket overflow. */ + struct hash_entry *new_entry = allocate_entry (dst); + + if (new_entry == NULL) + return false; + + new_entry->data = data; + new_entry->next = new_bucket->next; + new_bucket->next = new_entry; + } + else + { + /* Move from one bucket header to another. */ + new_bucket->data = data; + dst->n_buckets_used++; + } + bucket->data = NULL; + src->n_buckets_used--; + } + return true; } -#ifdef TESTING +/* For an already existing hash table, change the number of buckets through + specifying CANDIDATE. The contents of the hash table are preserved. The + new number of buckets is automatically selected so as to _guarantee_ that + the table may receive at least CANDIDATE different user entries, including + those already in the table, before any other growth of the hash table size + occurs. If TUNING->IS_N_BUCKETS is true, then CANDIDATE specifies the + exact number of buckets desired. Return true iff the rehash succeeded. */ -void -hash_print (const HT *ht) +bool +hash_rehash (Hash_table *table, size_t candidate) { - int i; + Hash_table storage; + Hash_table *new_table; + size_t new_size = compute_bucket_size (candidate, table->tuning); + + if (!new_size) + return false; + if (new_size == table->n_buckets) + return true; + new_table = &storage; + new_table->bucket = calloc (new_size, sizeof *new_table->bucket); + if (new_table->bucket == NULL) + return false; + new_table->n_buckets = new_size; + new_table->bucket_limit = new_table->bucket + new_size; + new_table->n_buckets_used = 0; + new_table->n_entries = 0; + new_table->tuning = table->tuning; + new_table->hasher = table->hasher; + new_table->comparator = table->comparator; + new_table->data_freer = table->data_freer; + + /* In order for the transfer to successfully complete, we need + additional overflow entries when distinct buckets in the old + table collide into a common bucket in the new table. The worst + case possible is a hasher that gives a good spread with the old + size, but returns a constant with the new size; if we were to + guarantee table->n_buckets_used-1 free entries in advance, then + the transfer would be guaranteed to not allocate memory. + However, for large tables, a guarantee of no further allocation + introduces a lot of extra memory pressure, all for an unlikely + corner case (most rehashes reduce, rather than increase, the + number of overflow entries needed). So, we instead ensure that + the transfer process can be reversed if we hit a memory + allocation failure mid-transfer. */ + + /* Merely reuse the extra old space into the new table. */ +#if USE_OBSTACK + new_table->entry_stack = table->entry_stack; +#endif + new_table->free_entry_list = table->free_entry_list; - for (i = 0; i < ht->hash_table_size; i++) + if (transfer_entries (new_table, table, false)) { - HASH_ENT *p; + /* Entries transferred successfully; tie up the loose ends. */ + free (table->bucket); + table->bucket = new_table->bucket; + table->bucket_limit = new_table->bucket_limit; + table->n_buckets = new_table->n_buckets; + table->n_buckets_used = new_table->n_buckets_used; + table->free_entry_list = new_table->free_entry_list; + /* table->n_entries and table->entry_stack already hold their value. */ + return true; + } + + /* We've allocated new_table->bucket (and possibly some entries), + exhausted the free list, and moved some but not all entries into + new_table. We must undo the partial move before returning + failure. The only way to get into this situation is if new_table + uses fewer buckets than the old table, so we will reclaim some + free entries as overflows in the new table are put back into + distinct buckets in the old table. + + There are some pathological cases where a single pass through the + table requires more intermediate overflow entries than using two + passes. Two passes give worse cache performance and takes + longer, but at this point, we're already out of memory, so slow + and safe is better than failure. */ + table->free_entry_list = new_table->free_entry_list; + if (! (transfer_entries (table, new_table, true) + && transfer_entries (table, new_table, false))) + abort (); + /* table->n_entries already holds its value. */ + free (new_table->bucket); + return false; +} + +/* Insert ENTRY into hash TABLE if there is not already a matching entry. + + Return -1 upon memory allocation failure. + Return 1 if insertion succeeded. + Return 0 if there is already a matching entry in the table, + and in that case, if MATCHED_ENT is non-NULL, set *MATCHED_ENT + to that entry. + + This interface is easier to use than hash_insert when you must + distinguish between the latter two cases. More importantly, + hash_insert is unusable for some types of ENTRY values. When using + hash_insert, the only way to distinguish those cases is to compare + the return value and ENTRY. That works only when you can have two + different ENTRY values that point to data that compares "equal". Thus, + when the ENTRY value is a simple scalar, you must use + hash_insert_if_absent. ENTRY must not be NULL. */ +int +hash_insert_if_absent (Hash_table *table, void const *entry, + void const **matched_ent) +{ + void *data; + struct hash_entry *bucket; - if (BUCKET_HEAD (ht, i) != NULL) - printf ("%d:\n", i); + /* The caller cannot insert a NULL entry, since hash_lookup returns NULL + to indicate "not found", and hash_find_entry uses "bucket->data == NULL" + to indicate an empty bucket. */ + if (! entry) + abort (); - for (p = BUCKET_HEAD (ht, i); p; p = p->next) - { - char *s = (char *) p->key; - /* FIXME */ - printf (" %s\n", s); - } + /* If there's a matching entry already in the table, return that. */ + if ((data = hash_find_entry (table, entry, &bucket, false)) != NULL) + { + if (matched_ent) + *matched_ent = data; + return 0; } -} -#endif /* TESTING */ + /* If the growth threshold of the buckets in use has been reached, increase + the table size and rehash. There's no point in checking the number of + entries: if the hashing function is ill-conditioned, rehashing is not + likely to improve it. */ -void -hash_get_key_list (const HT *ht, unsigned int bufsize, void **buf) -{ - unsigned int i; - unsigned int c = 0; + if (table->n_buckets_used + > table->tuning->growth_threshold * table->n_buckets) + { + /* Check more fully, before starting real work. If tuning arguments + became invalid, the second check will rely on proper defaults. */ + check_tuning (table); + if (table->n_buckets_used + > table->tuning->growth_threshold * table->n_buckets) + { + const Hash_tuning *tuning = table->tuning; + float candidate = + (tuning->is_n_buckets + ? (table->n_buckets * tuning->growth_factor) + : (table->n_buckets * tuning->growth_factor + * tuning->growth_threshold)); + + if (SIZE_MAX <= candidate) + return -1; + + /* If the rehash fails, arrange to return NULL. */ + if (!hash_rehash (table, candidate)) + return -1; + + /* Update the bucket we are interested in. */ + if (hash_find_entry (table, entry, &bucket, false) != NULL) + abort (); + } + } + + /* ENTRY is not matched, it should be inserted. */ - for (i = 0; i < ht->hash_table_size; i++) + if (bucket->data) { - HASH_ENT *p; - - for (p = BUCKET_HEAD (ht, i); p; p = p->next) - { - if (c >= bufsize) - return; - buf[c++] = p->key; - } + struct hash_entry *new_entry = allocate_entry (table); + + if (new_entry == NULL) + return -1; + + /* Add ENTRY in the overflow of the bucket. */ + + new_entry->data = (void *) entry; + new_entry->next = bucket->next; + bucket->next = new_entry; + table->n_entries++; + return 1; } + + /* Add ENTRY right in the bucket head. */ + + bucket->data = (void *) entry; + table->n_entries++; + table->n_buckets_used++; + + return 1; +} + +/* hash_insert0 is the deprecated name for hash_insert_if_absent. + . */ +int +hash_insert0 (Hash_table *table, void const *entry, void const **matched_ent) +{ + return hash_insert_if_absent (table, entry, matched_ent); } -/* Return the first key in the table. If the table is empty, return NULL. */ +/* If ENTRY matches an entry already in the hash table, return the pointer + to the entry from the table. Otherwise, insert ENTRY and return ENTRY. + Return NULL if the storage required for insertion cannot be allocated. + This implementation does not support duplicate entries or insertion of + NULL. */ void * -hash_get_first (const HT *ht) +hash_insert (Hash_table *table, void const *entry) { - unsigned int idx; - HASH_ENT *p; + void const *matched_ent; + int err = hash_insert_if_absent (table, entry, &matched_ent); + return (err == -1 + ? NULL + : (void *) (err == 0 ? matched_ent : entry)); +} + +/* If ENTRY is already in the table, remove it and return the just-deleted + data (the user may want to deallocate its storage). If ENTRY is not in the + table, don't modify the table and return NULL. */ - if (ht->hash_n_keys == 0) +void * +hash_delete (Hash_table *table, const void *entry) +{ + void *data; + struct hash_entry *bucket; + + data = hash_find_entry (table, entry, &bucket, true); + if (!data) return NULL; - for (idx = 0; idx < ht->hash_table_size; idx++) + table->n_entries--; + if (!bucket->data) { - if ((p = BUCKET_HEAD (ht, idx)) != NULL) - return p->key; + table->n_buckets_used--; + + /* If the shrink threshold of the buckets in use has been reached, + rehash into a smaller table. */ + + if (table->n_buckets_used + < table->tuning->shrink_threshold * table->n_buckets) + { + /* Check more fully, before starting real work. If tuning arguments + became invalid, the second check will rely on proper defaults. */ + check_tuning (table); + if (table->n_buckets_used + < table->tuning->shrink_threshold * table->n_buckets) + { + const Hash_tuning *tuning = table->tuning; + size_t candidate = + (tuning->is_n_buckets + ? table->n_buckets * tuning->shrink_factor + : (table->n_buckets * tuning->shrink_factor + * tuning->growth_threshold)); + + if (!hash_rehash (table, candidate)) + { + /* Failure to allocate memory in an attempt to + shrink the table is not fatal. But since memory + is low, we can at least be kind and free any + spare entries, rather than keeping them tied up + in the free entry list. */ +#if ! USE_OBSTACK + struct hash_entry *cursor = table->free_entry_list; + struct hash_entry *next; + while (cursor) + { + next = cursor->next; + free (cursor); + cursor = next; + } + table->free_entry_list = NULL; +#endif + } + } + } } - abort (); + + return data; } -/* Return the key in the entry following the entry whose key matches E. - If there is the only one key in the table and that key matches E, - return the matching key. If E is not in the table, return NULL. */ +/* Testing. */ -void * -hash_get_next (const HT *ht, const void *e) +#if TESTING + +void +hash_print (const Hash_table *table) { - unsigned int idx; - HASH_ENT *p; + struct hash_entry *bucket = (struct hash_entry *) table->bucket; - idx = ht->hash_hash (e, ht->hash_table_size); - assert (idx < ht->hash_table_size); - for (p = BUCKET_HEAD (ht, idx); p != NULL; p = p->next) + for ( ; bucket < table->bucket_limit; bucket++) { - if (ht->hash_key_comparator (e, p->key) == 0) - { - if (p->next != NULL) - { - return p->next->key; - } - else - { - unsigned int bucket; - - /* E is the last or only key in the bucket chain. */ - if (ht->hash_n_keys == 1) - { - /* There is only one key in the table, and it matches E. */ - return p->key; - } - bucket = idx; - do - { - idx = (idx + 1) % ht->hash_table_size; - if ((p = BUCKET_HEAD (ht, idx)) != NULL) - return p->key; - } - while (idx != bucket); - } - } + struct hash_entry *cursor; + + if (bucket) + printf ("%lu:\n", (unsigned long int) (bucket - table->bucket)); + + for (cursor = bucket; cursor; cursor = cursor->next) + { + char const *s = cursor->data; + /* FIXME */ + if (s) + printf (" %s\n", s); + } } - - /* E is not in the table. */ - return NULL; } + +#endif /* TESTING */