THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

308 lines
7.2 KiB

  1. /*
  2. * Generic implementation of hash-based key value mappings.
  3. */
  4. #include "cache.h"
  5. #include "hashmap.h"
  6. #define FNV32_BASE ((unsigned int) 0x811c9dc5)
  7. #define FNV32_PRIME ((unsigned int) 0x01000193)
  8. unsigned int strhash(const char *str)
  9. {
  10. unsigned int c, hash = FNV32_BASE;
  11. while ((c = (unsigned char) *str++))
  12. hash = (hash * FNV32_PRIME) ^ c;
  13. return hash;
  14. }
  15. unsigned int strihash(const char *str)
  16. {
  17. unsigned int c, hash = FNV32_BASE;
  18. while ((c = (unsigned char) *str++)) {
  19. if (c >= 'a' && c <= 'z')
  20. c -= 'a' - 'A';
  21. hash = (hash * FNV32_PRIME) ^ c;
  22. }
  23. return hash;
  24. }
  25. unsigned int memhash(const void *buf, size_t len)
  26. {
  27. unsigned int hash = FNV32_BASE;
  28. unsigned char *ucbuf = (unsigned char *) buf;
  29. while (len--) {
  30. unsigned int c = *ucbuf++;
  31. hash = (hash * FNV32_PRIME) ^ c;
  32. }
  33. return hash;
  34. }
  35. unsigned int memihash(const void *buf, size_t len)
  36. {
  37. unsigned int hash = FNV32_BASE;
  38. unsigned char *ucbuf = (unsigned char *) buf;
  39. while (len--) {
  40. unsigned int c = *ucbuf++;
  41. if (c >= 'a' && c <= 'z')
  42. c -= 'a' - 'A';
  43. hash = (hash * FNV32_PRIME) ^ c;
  44. }
  45. return hash;
  46. }
  47. /*
  48. * Incoporate another chunk of data into a memihash
  49. * computation.
  50. */
  51. unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len)
  52. {
  53. unsigned int hash = hash_seed;
  54. unsigned char *ucbuf = (unsigned char *) buf;
  55. while (len--) {
  56. unsigned int c = *ucbuf++;
  57. if (c >= 'a' && c <= 'z')
  58. c -= 'a' - 'A';
  59. hash = (hash * FNV32_PRIME) ^ c;
  60. }
  61. return hash;
  62. }
  63. #define HASHMAP_INITIAL_SIZE 64
  64. /* grow / shrink by 2^2 */
  65. #define HASHMAP_RESIZE_BITS 2
  66. /* load factor in percent */
  67. #define HASHMAP_LOAD_FACTOR 80
  68. static void alloc_table(struct hashmap *map, unsigned int size)
  69. {
  70. map->tablesize = size;
  71. map->table = xcalloc(size, sizeof(struct hashmap_entry *));
  72. /* calculate resize thresholds for new size */
  73. map->grow_at = (unsigned int) ((uint64_t) size * HASHMAP_LOAD_FACTOR / 100);
  74. if (size <= HASHMAP_INITIAL_SIZE)
  75. map->shrink_at = 0;
  76. else
  77. /*
  78. * The shrink-threshold must be slightly smaller than
  79. * (grow-threshold / resize-factor) to prevent erratic resizing,
  80. * thus we divide by (resize-factor + 1).
  81. */
  82. map->shrink_at = map->grow_at / ((1 << HASHMAP_RESIZE_BITS) + 1);
  83. }
  84. static inline int entry_equals(const struct hashmap *map,
  85. const struct hashmap_entry *e1, const struct hashmap_entry *e2,
  86. const void *keydata)
  87. {
  88. return (e1 == e2) ||
  89. (e1->hash == e2->hash &&
  90. !map->cmpfn(map->cmpfn_data, e1, e2, keydata));
  91. }
  92. static inline unsigned int bucket(const struct hashmap *map,
  93. const struct hashmap_entry *key)
  94. {
  95. return key->hash & (map->tablesize - 1);
  96. }
  97. int hashmap_bucket(const struct hashmap *map, unsigned int hash)
  98. {
  99. return hash & (map->tablesize - 1);
  100. }
  101. static void rehash(struct hashmap *map, unsigned int newsize)
  102. {
  103. unsigned int i, oldsize = map->tablesize;
  104. struct hashmap_entry **oldtable = map->table;
  105. alloc_table(map, newsize);
  106. for (i = 0; i < oldsize; i++) {
  107. struct hashmap_entry *e = oldtable[i];
  108. while (e) {
  109. struct hashmap_entry *next = e->next;
  110. unsigned int b = bucket(map, e);
  111. e->next = map->table[b];
  112. map->table[b] = e;
  113. e = next;
  114. }
  115. }
  116. free(oldtable);
  117. }
  118. static inline struct hashmap_entry **find_entry_ptr(const struct hashmap *map,
  119. const struct hashmap_entry *key, const void *keydata)
  120. {
  121. struct hashmap_entry **e = &map->table[bucket(map, key)];
  122. while (*e && !entry_equals(map, *e, key, keydata))
  123. e = &(*e)->next;
  124. return e;
  125. }
  126. static int always_equal(const void *unused_cmp_data,
  127. const void *unused1,
  128. const void *unused2,
  129. const void *unused_keydata)
  130. {
  131. return 0;
  132. }
  133. void hashmap_init(struct hashmap *map, hashmap_cmp_fn equals_function,
  134. const void *cmpfn_data, size_t initial_size)
  135. {
  136. unsigned int size = HASHMAP_INITIAL_SIZE;
  137. memset(map, 0, sizeof(*map));
  138. map->cmpfn = equals_function ? equals_function : always_equal;
  139. map->cmpfn_data = cmpfn_data;
  140. /* calculate initial table size and allocate the table */
  141. initial_size = (unsigned int) ((uint64_t) initial_size * 100
  142. / HASHMAP_LOAD_FACTOR);
  143. while (initial_size > size)
  144. size <<= HASHMAP_RESIZE_BITS;
  145. alloc_table(map, size);
  146. /*
  147. * Keep track of the number of items in the map and
  148. * allow the map to automatically grow as necessary.
  149. */
  150. map->do_count_items = 1;
  151. }
  152. void hashmap_free(struct hashmap *map, int free_entries)
  153. {
  154. if (!map || !map->table)
  155. return;
  156. if (free_entries) {
  157. struct hashmap_iter iter;
  158. struct hashmap_entry *e;
  159. hashmap_iter_init(map, &iter);
  160. while ((e = hashmap_iter_next(&iter)))
  161. free(e);
  162. }
  163. free(map->table);
  164. memset(map, 0, sizeof(*map));
  165. }
  166. void *hashmap_get(const struct hashmap *map, const void *key, const void *keydata)
  167. {
  168. return *find_entry_ptr(map, key, keydata);
  169. }
  170. void *hashmap_get_next(const struct hashmap *map, const void *entry)
  171. {
  172. struct hashmap_entry *e = ((struct hashmap_entry *) entry)->next;
  173. for (; e; e = e->next)
  174. if (entry_equals(map, entry, e, NULL))
  175. return e;
  176. return NULL;
  177. }
  178. void hashmap_add(struct hashmap *map, void *entry)
  179. {
  180. unsigned int b = bucket(map, entry);
  181. /* add entry */
  182. ((struct hashmap_entry *) entry)->next = map->table[b];
  183. map->table[b] = entry;
  184. /* fix size and rehash if appropriate */
  185. if (map->do_count_items) {
  186. map->private_size++;
  187. if (map->private_size > map->grow_at)
  188. rehash(map, map->tablesize << HASHMAP_RESIZE_BITS);
  189. }
  190. }
  191. void *hashmap_remove(struct hashmap *map, const void *key, const void *keydata)
  192. {
  193. struct hashmap_entry *old;
  194. struct hashmap_entry **e = find_entry_ptr(map, key, keydata);
  195. if (!*e)
  196. return NULL;
  197. /* remove existing entry */
  198. old = *e;
  199. *e = old->next;
  200. old->next = NULL;
  201. /* fix size and rehash if appropriate */
  202. if (map->do_count_items) {
  203. map->private_size--;
  204. if (map->private_size < map->shrink_at)
  205. rehash(map, map->tablesize >> HASHMAP_RESIZE_BITS);
  206. }
  207. return old;
  208. }
  209. void *hashmap_put(struct hashmap *map, void *entry)
  210. {
  211. struct hashmap_entry *old = hashmap_remove(map, entry, NULL);
  212. hashmap_add(map, entry);
  213. return old;
  214. }
  215. void hashmap_iter_init(struct hashmap *map, struct hashmap_iter *iter)
  216. {
  217. iter->map = map;
  218. iter->tablepos = 0;
  219. iter->next = NULL;
  220. }
  221. void *hashmap_iter_next(struct hashmap_iter *iter)
  222. {
  223. struct hashmap_entry *current = iter->next;
  224. for (;;) {
  225. if (current) {
  226. iter->next = current->next;
  227. return current;
  228. }
  229. if (iter->tablepos >= iter->map->tablesize)
  230. return NULL;
  231. current = iter->map->table[iter->tablepos++];
  232. }
  233. }
  234. struct pool_entry {
  235. struct hashmap_entry ent;
  236. size_t len;
  237. unsigned char data[FLEX_ARRAY];
  238. };
  239. static int pool_entry_cmp(const void *unused_cmp_data,
  240. const struct pool_entry *e1,
  241. const struct pool_entry *e2,
  242. const unsigned char *keydata)
  243. {
  244. return e1->data != keydata &&
  245. (e1->len != e2->len || memcmp(e1->data, keydata, e1->len));
  246. }
  247. const void *memintern(const void *data, size_t len)
  248. {
  249. static struct hashmap map;
  250. struct pool_entry key, *e;
  251. /* initialize string pool hashmap */
  252. if (!map.tablesize)
  253. hashmap_init(&map, (hashmap_cmp_fn) pool_entry_cmp, NULL, 0);
  254. /* lookup interned string in pool */
  255. hashmap_entry_init(&key, memhash(data, len));
  256. key.len = len;
  257. e = hashmap_get(&map, &key, data);
  258. if (!e) {
  259. /* not found: create it */
  260. FLEX_ALLOC_MEM(e, data, data, len);
  261. hashmap_entry_init(e, key.ent.hash);
  262. e->len = len;
  263. hashmap_add(&map, e);
  264. }
  265. return e->data;
  266. }