StarPU Internal Handbook
uthash.h
Go to the documentation of this file.
1/*
2Copyright (c) 2003-2010, Troy D. Hanson http://uthash.sourceforge.net
3All rights reserved.
4
5Redistribution and use in source and binary forms, with or without
6modification, are permitted provided that the following conditions are met:
7
8 * Redistributions of source code must retain the above copyright
9 notice, this list of conditions and the following disclaimer.
10
11THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22*/
23
24#ifndef UTHASH_H
25#define UTHASH_H
26
29#include <string.h> /* memcmp,strlen */
30#include <stddef.h> /* ptrdiff_t */
31
32/* These macros use decltype or the earlier __typeof GNU extension.
33 As decltype is only available in newer compilers (VS2010 or gcc 4.3+
34 when compiling c++ source) this code uses whatever method is needed
35 or, for VS2008 where neither is available, uses casting workarounds. */
36#ifdef _MSC_VER /* MS compiler */
37#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
38#define DECLTYPE(x) (decltype(x))
39#else /* VS2008 or older (or VS2010 in C mode) */
40#define NO_DECLTYPE
41#define DECLTYPE(x)
42#endif
43#else /* GNU, Sun and other compilers */
44#define DECLTYPE(x) (__typeof(x))
45#endif
46
47#ifdef NO_DECLTYPE
48#define DECLTYPE_ASSIGN(dst,src) \
49do { \
50 char **_da_dst = (char**)(&(dst)); \
51 *_da_dst = (char*)(src); \
52} while(0)
53#else
54#define DECLTYPE_ASSIGN(dst,src) \
55do { \
56 (dst) = DECLTYPE(dst)(src); \
57} while(0)
58#endif
59
60/* a number of the hash function use uint32_t which isn't defined on win32 */
61#ifdef _MSC_VER
62typedef unsigned int uint32_t;
63#else
64#include <inttypes.h> /* uint32_t */
65#endif
66
67#define UTHASH_VERSION 1.9.3
68
69#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
70#define uthash_malloc(sz) malloc(sz) /* malloc fcn */
71#define uthash_free(ptr,sz) free(ptr) /* free fcn */
72
73#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
74#define uthash_expand_fyi(tbl) /* can be defined to log expands */
75
76/* initial number of buckets */
77#define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
78#define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
79#define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
80
81/* calculate the element whose hash handle address is hhe */
82#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
83
84#define HASH_FIND(hh,head,keyptr,keylen,out) \
85do { \
86 unsigned _hf_bkt=0,_hf_hashv=0; \
87 out=NULL; \
88 if (head) { \
89 HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
90 if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \
91 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
92 keyptr,keylen,out); \
93 } \
94 } \
95} while (0)
96
97#ifdef HASH_BLOOM
98#define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM)
99#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0)
100#define HASH_BLOOM_MAKE(tbl) \
101do { \
102 (tbl)->bloom_nbits = HASH_BLOOM; \
103 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
104 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
105 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
106 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
107} while (0);
108
109#define HASH_BLOOM_FREE(tbl) \
110do { \
111 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
112} while (0);
113
114#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8)))
115#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8)))
116
117#define HASH_BLOOM_ADD(tbl,hashv) \
118 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
119
120#define HASH_BLOOM_TEST(tbl,hashv) \
121 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
122
123#else
124#define HASH_BLOOM_MAKE(tbl)
125#define HASH_BLOOM_FREE(tbl)
126#define HASH_BLOOM_ADD(tbl,hashv)
127#define HASH_BLOOM_TEST(tbl,hashv) (1)
128#endif
129
130#define HASH_MAKE_TABLE(hh,head) \
131do { \
132 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
133 sizeof(UT_hash_table)); \
134 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
135 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
136 (head)->hh.tbl->tail = &((head)->hh); \
137 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
138 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
139 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
140 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
141 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
142 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
143 memset((head)->hh.tbl->buckets, 0, \
144 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
145 HASH_BLOOM_MAKE((head)->hh.tbl); \
146 (head)->hh.tbl->signature = HASH_SIGNATURE; \
147} while(0)
148
149#define HASH_ADD(hh,head,fieldname,keylen_in,add) \
150 HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
151
152#ifdef STARPU_DEBUG
153/* Check that we don't insert the same key several times */
154#define HASH_CHECK_KEY(hh,head,keyptr,keylen,out) \
155do { \
156 __typeof__(out) _out; \
157 HASH_FIND(hh,head,keyptr,keylen,_out); \
158 STARPU_ASSERT_MSG(!_out,"Cannot insert the same key twice"); \
159} while(0)
160#else
161#define HASH_CHECK_KEY(hh,head,keyptr,keylen,out)
162#endif
163
164#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
165do { \
166 unsigned _ha_bkt=0; \
167 HASH_CHECK_KEY(hh,head,keyptr,keylen_in,add); \
168 (add)->hh.next = NULL; \
169 (add)->hh.key = (char*)keyptr; \
170 (add)->hh.keylen = keylen_in; \
171 if (!(head)) { \
172 head = (add); \
173 (head)->hh.prev = NULL; \
174 HASH_MAKE_TABLE(hh,head); \
175 } else { \
176 (head)->hh.tbl->tail->next = (add); \
177 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
178 (head)->hh.tbl->tail = &((add)->hh); \
179 } \
180 (head)->hh.tbl->num_items++; \
181 (add)->hh.tbl = (head)->hh.tbl; \
182 HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
183 (add)->hh.hashv, _ha_bkt); \
184 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
185 HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
186 HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
187 HASH_FSCK(hh,head); \
188} while(0)
189
190#define HASH_TO_BKT( hashv, num_bkts, bkt ) \
191do { \
192 bkt = ((hashv) & ((num_bkts) - 1)); \
193} while(0)
194
195/* delete "delptr" from the hash table.
196 * "the usual" patch-up process for the app-order doubly-linked-list.
197 * The use of _hd_hh_del below deserves special explanation.
198 * These used to be expressed using (delptr) but that led to a bug
199 * if someone used the same symbol for the head and deletee, like
200 * HASH_DELETE(hh,users,users);
201 * We want that to work, but by changing the head (users) below
202 * we were forfeiting our ability to further refer to the deletee (users)
203 * in the patch-up process. Solution: use scratch space to
204 * copy the deletee pointer, then the latter references are via that
205 * scratch pointer rather than through the repointed (users) symbol.
206 */
207#define HASH_DELETE(hh,head,delptr) \
208do { \
209 unsigned _hd_bkt; \
210 struct UT_hash_handle *_hd_hh_del; \
211 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
212 uthash_free((head)->hh.tbl->buckets, \
213 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
214 HASH_BLOOM_FREE((head)->hh.tbl); \
215 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
216 head = NULL; \
217 } else { \
218 _hd_hh_del = &((delptr)->hh); \
219 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
220 (head)->hh.tbl->tail = \
221 (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
222 (head)->hh.tbl->hho); \
223 } \
224 if ((delptr)->hh.prev) { \
225 ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
226 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
227 } else { \
228 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
229 } \
230 if (_hd_hh_del->next) { \
231 ((UT_hash_handle*)((char*)_hd_hh_del->next + \
232 (head)->hh.tbl->hho))->prev = \
233 _hd_hh_del->prev; \
234 } \
235 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
236 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
237 (head)->hh.tbl->num_items--; \
238 } \
239 HASH_FSCK(hh,head); \
240} while (0)
241
242
243/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
244#define HASH_FIND_STR(head,findstr,out) \
245 HASH_FIND(hh,head,findstr,strlen(findstr),out)
246#define HASH_ADD_STR(head,strfield,add) \
247 HASH_ADD(hh,head,strfield[0],strlen(add->strfield),add)
248#define HASH_FIND_INT(head,findint,out) \
249 HASH_FIND(hh,head,findint,sizeof(int),out)
250#define HASH_ADD_INT(head,intfield,add) \
251 HASH_ADD(hh,head,intfield,sizeof(int),add)
252#define HASH_FIND_PTR(head,findptr,out) \
253 HASH_FIND(hh,head,findptr,sizeof(void *),out)
254#define HASH_ADD_PTR(head,ptrfield,add) \
255 HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
256#define HASH_DEL(head,delptr) \
257 HASH_DELETE(hh,head,delptr)
258
259/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
260 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
261 */
262#ifdef HASH_DEBUG
263#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
264#define HASH_FSCK(hh,head) \
265do { \
266 unsigned _bkt_i; \
267 unsigned _count, _bkt_count; \
268 char *_prev; \
269 struct UT_hash_handle *_thh; \
270 if (head) { \
271 _count = 0; \
272 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
273 _bkt_count = 0; \
274 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
275 _prev = NULL; \
276 while (_thh) { \
277 if (_prev != (char*)(_thh->hh_prev)) { \
278 HASH_OOPS("invalid hh_prev %p, actual %p\n", \
279 _thh->hh_prev, _prev ); \
280 } \
281 _bkt_count++; \
282 _prev = (char*)(_thh); \
283 _thh = _thh->hh_next; \
284 } \
285 _count += _bkt_count; \
286 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
287 HASH_OOPS("invalid bucket count %u, actual %u\n", \
288 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
289 } \
290 } \
291 if (_count != (head)->hh.tbl->num_items) { \
292 HASH_OOPS("invalid hh item count %u, actual %u\n", \
293 (head)->hh.tbl->num_items, _count ); \
294 } \
295 /* traverse hh in app order; check next/prev integrity, count */ \
296 _count = 0; \
297 _prev = NULL; \
298 _thh = &(head)->hh; \
299 while (_thh) { \
300 _count++; \
301 if (_prev !=(char*)(_thh->prev)) { \
302 HASH_OOPS("invalid prev %p, actual %p\n", \
303 _thh->prev, _prev ); \
304 } \
305 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
306 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
307 (head)->hh.tbl->hho) : NULL ); \
308 } \
309 if (_count != (head)->hh.tbl->num_items) { \
310 HASH_OOPS("invalid app item count %u, actual %u\n", \
311 (head)->hh.tbl->num_items, _count ); \
312 } \
313 } \
314} while (0)
315#else
316#define HASH_FSCK(hh,head)
317#endif
318
319/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
320 * the descriptor to which this macro is defined for tuning the hash function.
321 * The app can #include <unistd.h> to get the prototype for write(2). */
322#ifdef HASH_EMIT_KEYS
323#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
324do { \
325 unsigned _klen = fieldlen; \
326 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
327 write(HASH_EMIT_KEYS, keyptr, fieldlen); \
328} while (0)
329#else
330#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
331#endif
332
333/* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
334#ifdef HASH_FUNCTION
335#define HASH_FCN HASH_FUNCTION
336#else
337#define HASH_FCN HASH_JEN
338#endif
339
340/* The Bernstein hash function, used in Perl prior to v5.6 */
341#define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
342do { \
343 unsigned _hb_keylen=keylen; \
344 char *_hb_key=(char*)(key); \
345 (hashv) = 0; \
346 while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
347 bkt = (hashv) & (num_bkts-1); \
348} while (0)
349
350
351/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
352 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
353#define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
354do { \
355 unsigned _sx_i; \
356 char *_hs_key=(char*)(key); \
357 hashv = 0; \
358 for(_sx_i=0; _sx_i < keylen; _sx_i++) \
359 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
360 bkt = hashv & (num_bkts-1); \
361} while (0)
362
363#define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
364do { \
365 unsigned _fn_i; \
366 char *_hf_key=(char*)(key); \
367 hashv = 2166136261UL; \
368 for(_fn_i=0; _fn_i < keylen; _fn_i++) \
369 hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
370 bkt = hashv & (num_bkts-1); \
371} while(0);
372
373#define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
374do { \
375 unsigned _ho_i; \
376 char *_ho_key=(char*)(key); \
377 hashv = 0; \
378 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
379 hashv += _ho_key[_ho_i]; \
380 hashv += (hashv << 10); \
381 hashv ^= (hashv >> 6); \
382 } \
383 hashv += (hashv << 3); \
384 hashv ^= (hashv >> 11); \
385 hashv += (hashv << 15); \
386 bkt = hashv & (num_bkts-1); \
387} while(0)
388
389#define HASH_JEN_MIX(a,b,c) \
390do { \
391 a -= b; a -= c; a ^= ( c >> 13 ); \
392 b -= c; b -= a; b ^= ( a << 8 ); \
393 c -= a; c -= b; c ^= ( b >> 13 ); \
394 a -= b; a -= c; a ^= ( c >> 12 ); \
395 b -= c; b -= a; b ^= ( a << 16 ); \
396 c -= a; c -= b; c ^= ( b >> 5 ); \
397 a -= b; a -= c; a ^= ( c >> 3 ); \
398 b -= c; b -= a; b ^= ( a << 10 ); \
399 c -= a; c -= b; c ^= ( b >> 15 ); \
400} while (0)
401
402#define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
403do { \
404 unsigned _hj_i,_hj_j,_hj_k; \
405 char *_hj_key=(char*)(key); \
406 hashv = 0xfeedbeef; \
407 _hj_i = _hj_j = 0x9e3779b9; \
408 _hj_k = keylen; \
409 while (_hj_k >= 12) { \
410 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
411 + ( (unsigned)_hj_key[2] << 16 ) \
412 + ( (unsigned)_hj_key[3] << 24 ) ); \
413 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
414 + ( (unsigned)_hj_key[6] << 16 ) \
415 + ( (unsigned)_hj_key[7] << 24 ) ); \
416 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
417 + ( (unsigned)_hj_key[10] << 16 ) \
418 + ( (unsigned)_hj_key[11] << 24 ) ); \
419 \
420 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
421 \
422 _hj_key += 12; \
423 _hj_k -= 12; \
424 } \
425 hashv += keylen; \
426 switch ( _hj_k ) { \
427 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
428 /* FALLTHRU */ \
429 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
430 /* FALLTHRU */ \
431 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
432 /* FALLTHRU */ \
433 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
434 /* FALLTHRU */ \
435 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
436 /* FALLTHRU */ \
437 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
438 /* FALLTHRU */ \
439 case 5: _hj_j += _hj_key[4]; \
440 /* FALLTHRU */ \
441 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
442 /* FALLTHRU */ \
443 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
444 /* FALLTHRU */ \
445 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
446 /* FALLTHRU */ \
447 case 1: _hj_i += _hj_key[0]; \
448 /* FALLTHRU */ \
449 default: break; \
450 } \
451 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
452 bkt = hashv & (num_bkts-1); \
453} while(0)
454
455/* The Paul Hsieh hash function */
456#undef get16bits
457#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
458 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
459#define get16bits(d) (*((const uint16_t *) (d)))
460#endif
461
462#if !defined (get16bits)
463#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
464 +(uint32_t)(((const uint8_t *)(d))[0]) )
465#endif
466#define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
467do { \
468 char *_sfh_key=(char*)(key); \
469 uint32_t _sfh_tmp, _sfh_len = keylen; \
470 \
471 int _sfh_rem = _sfh_len & 3; \
472 _sfh_len >>= 2; \
473 hashv = 0xcafebabe; \
474 \
475 /* Main loop */ \
476 for (;_sfh_len > 0; _sfh_len--) { \
477 hashv += get16bits (_sfh_key); \
478 _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \
479 hashv = (hashv << 16) ^ _sfh_tmp; \
480 _sfh_key += 2*sizeof (uint16_t); \
481 hashv += hashv >> 11; \
482 } \
483 \
484 /* Handle end cases */ \
485 switch (_sfh_rem) { \
486 case 3: hashv += get16bits (_sfh_key); \
487 hashv ^= hashv << 16; \
488 hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \
489 hashv += hashv >> 11; \
490 break; \
491 case 2: hashv += get16bits (_sfh_key); \
492 hashv ^= hashv << 11; \
493 hashv += hashv >> 17; \
494 break; \
495 case 1: hashv += *_sfh_key; \
496 hashv ^= hashv << 10; \
497 hashv += hashv >> 1; \
498 break; \
499 default: break; \
500 } \
501 \
502 /* Force "avalanching" of final 127 bits */ \
503 hashv ^= hashv << 3; \
504 hashv += hashv >> 5; \
505 hashv ^= hashv << 4; \
506 hashv += hashv >> 17; \
507 hashv ^= hashv << 25; \
508 hashv += hashv >> 6; \
509 bkt = hashv & (num_bkts-1); \
510} while(0);
511
512#ifdef HASH_USING_NO_STRICT_ALIASING
513/* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads.
514 * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
515 * So MurmurHash comes in two versions, the faster unaligned one and the slower
516 * aligned one. We only use the faster one on CPU's where we know it's safe.
517 *
518 * Note the preprocessor built-in defines can be emitted using:
519 *
520 * gcc -m64 -dM -E - < /dev/null (on gcc)
521 * cc -## a.c (where a.c is a simple test file) (Sun Studio)
522 */
523#if (defined(__i386__) || defined(__x86_64__))
524#define HASH_MUR HASH_MUR_UNALIGNED
525#else
526#define HASH_MUR HASH_MUR_ALIGNED
527#endif
528
529/* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */
530#define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \
531do { \
532 const unsigned int _mur_m = 0x5bd1e995; \
533 const int _mur_r = 24; \
534 hashv = 0xcafebabe ^ keylen; \
535 char *_mur_key = (char *)(key); \
536 uint32_t _mur_tmp, _mur_len = keylen; \
537 \
538 for (;_mur_len >= 4; _mur_len-=4) { \
539 _mur_tmp = *(uint32_t *)_mur_key; \
540 _mur_tmp *= _mur_m; \
541 _mur_tmp ^= _mur_tmp >> _mur_r; \
542 _mur_tmp *= _mur_m; \
543 hashv *= _mur_m; \
544 hashv ^= _mur_tmp; \
545 _mur_key += 4; \
546 } \
547 \
548 switch(_mur_len) \
549 { \
550 case 3: hashv ^= _mur_key[2] << 16; \
551 /* FALLTHRU */ \
552 case 2: hashv ^= _mur_key[1] << 8; \
553 /* FALLTHRU */ \
554 case 1: hashv ^= _mur_key[0]; \
555 hashv *= _mur_m; \
556 /* FALLTHRU */ \
557 default: break; \
558 }; \
559 \
560 hashv ^= hashv >> 13; \
561 hashv *= _mur_m; \
562 hashv ^= hashv >> 15; \
563 \
564 bkt = hashv & (num_bkts-1); \
565} while(0)
566
567/* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */
568#define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \
569do { \
570 const unsigned int _mur_m = 0x5bd1e995; \
571 const int _mur_r = 24; \
572 hashv = 0xcafebabe ^ (keylen); \
573 char *_mur_key = (char *)(key); \
574 uint32_t _mur_len = keylen; \
575 int _mur_align = (int)_mur_key & 3; \
576 \
577 if (_mur_align && (_mur_len >= 4)) { \
578 unsigned _mur_t = 0, _mur_d = 0; \
579 switch(_mur_align) { \
580 case 1: _mur_t |= _mur_key[2] << 16; \
581 /* FALLTHRU */ \
582 case 2: _mur_t |= _mur_key[1] << 8; \
583 /* FALLTHRU */ \
584 case 3: _mur_t |= _mur_key[0]; \
585 /* FALLTHRU */ \
586 default: break; \
587 } \
588 _mur_t <<= (8 * _mur_align); \
589 _mur_key += 4-_mur_align; \
590 _mur_len -= 4-_mur_align; \
591 int _mur_sl = 8 * (4-_mur_align); \
592 int _mur_sr = 8 * _mur_align; \
593 \
594 for (;_mur_len >= 4; _mur_len-=4) { \
595 _mur_d = *(unsigned *)_mur_key; \
596 _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
597 unsigned _mur_k = _mur_t; \
598 _mur_k *= _mur_m; \
599 _mur_k ^= _mur_k >> _mur_r; \
600 _mur_k *= _mur_m; \
601 hashv *= _mur_m; \
602 hashv ^= _mur_k; \
603 _mur_t = _mur_d; \
604 _mur_key += 4; \
605 } \
606 _mur_d = 0; \
607 if(_mur_len >= _mur_align) { \
608 switch(_mur_align) { \
609 case 3: _mur_d |= _mur_key[2] << 16; \
610 /* FALLTHRU */ \
611 case 2: _mur_d |= _mur_key[1] << 8; \
612 /* FALLTHRU */ \
613 case 1: _mur_d |= _mur_key[0]; \
614 /* FALLTHRU */ \
615 default: break; \
616 } \
617 unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
618 _mur_k *= _mur_m; \
619 _mur_k ^= _mur_k >> _mur_r; \
620 _mur_k *= _mur_m; \
621 hashv *= _mur_m; \
622 hashv ^= _mur_k; \
623 _mur_k += _mur_align; \
624 _mur_len -= _mur_align; \
625 \
626 switch(_mur_len) \
627 { \
628 case 3: hashv ^= _mur_key[2] << 16; \
629 /* FALLTHRU */ \
630 case 2: hashv ^= _mur_key[1] << 8; \
631 /* FALLTHRU */ \
632 case 1: hashv ^= _mur_key[0]; \
633 hashv *= _mur_m; \
634 /* FALLTHRU */ \
635 default: break; \
636 } \
637 } else { \
638 switch(_mur_len) \
639 { \
640 case 3: _mur_d ^= _mur_key[2] << 16; \
641 /* FALLTHRU */ \
642 case 2: _mur_d ^= _mur_key[1] << 8; \
643 /* FALLTHRU */ \
644 case 1: _mur_d ^= _mur_key[0]; \
645 /* FALLTHRU */ \
646 case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
647 hashv *= _mur_m; \
648 /* FALLTHRU */ \
649 default: break; \
650 } \
651 } \
652 \
653 hashv ^= hashv >> 13; \
654 hashv *= _mur_m; \
655 hashv ^= hashv >> 15; \
656 } else { \
657 for (;_mur_len >= 4; _mur_len-=4) { \
658 unsigned _mur_k = *(unsigned*)_mur_key; \
659 _mur_k *= _mur_m; \
660 _mur_k ^= _mur_k >> _mur_r; \
661 _mur_k *= _mur_m; \
662 hashv *= _mur_m; \
663 hashv ^= _mur_k; \
664 _mur_key += 4; \
665 } \
666 switch(_mur_len) \
667 { \
668 case 3: hashv ^= _mur_key[2] << 16; \
669 /* FALLTHRU */ \
670 case 2: hashv ^= _mur_key[1] << 8; \
671 /* FALLTHRU */ \
672 case 1: hashv ^= _mur_key[0]; \
673 hashv *= _mur_m; \
674 /* FALLTHRU */ \
675 default: break; \
676 } \
677 \
678 hashv ^= hashv >> 13; \
679 hashv *= _mur_m; \
680 hashv ^= hashv >> 15; \
681 } \
682 bkt = hashv & (num_bkts-1); \
683} while(0)
684#endif /* HASH_USING_NO_STRICT_ALIASING */
685
686/* key comparison function; return 0 if keys equal */
687#define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
688
689/* iterate over items in a known bucket to find desired item */
690#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
691do { \
692 if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \
693 else out=NULL; \
694 while (out) { \
695 if (out->hh.keylen == keylen_in) { \
696 if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
697 } \
698 if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \
699 else out = NULL; \
700 } \
701} while(0)
702
703/* add an item to a bucket */
704#define HASH_ADD_TO_BKT(head,addhh) \
705do { \
706 head.count++; \
707 (addhh)->hh_next = head.hh_head; \
708 (addhh)->hh_prev = NULL; \
709 if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
710 (head).hh_head=addhh; \
711 if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
712 && (addhh)->tbl->noexpand != 1) { \
713 HASH_EXPAND_BUCKETS((addhh)->tbl); \
714 } \
715} while(0)
716
717/* remove an item from a given bucket */
718#define HASH_DEL_IN_BKT(hh,head,hh_del) \
719 (head).count--; \
720 if ((head).hh_head == hh_del) { \
721 (head).hh_head = hh_del->hh_next; \
722 } \
723 if (hh_del->hh_prev) { \
724 hh_del->hh_prev->hh_next = hh_del->hh_next; \
725 } \
726 if (hh_del->hh_next) { \
727 hh_del->hh_next->hh_prev = hh_del->hh_prev; \
728 }
729
730/* Bucket expansion has the effect of doubling the number of buckets
731 * and redistributing the items into the new buckets. Ideally the
732 * items will distribute more or less evenly into the new buckets
733 * (the extent to which this is true is a measure of the quality of
734 * the hash function as it applies to the key domain).
735 *
736 * With the items distributed into more buckets, the chain length
737 * (item count) in each bucket is reduced. Thus by expanding buckets
738 * the hash keeps a bound on the chain length. This bounded chain
739 * length is the essence of how a hash provides constant time lookup.
740 *
741 * The calculation of tbl->ideal_chain_maxlen below deserves some
742 * explanation. First, keep in mind that we're calculating the ideal
743 * maximum chain length based on the *new* (doubled) bucket count.
744 * In fractions this is just n/b (n=number of items,b=new num buckets).
745 * Since the ideal chain length is an integer, we want to calculate
746 * ceil(n/b). We don't depend on floating point arithmetic in this
747 * hash, so to calculate ceil(n/b) with integers we could write
748 *
749 * ceil(n/b) = (n/b) + ((n%b)?1:0)
750 *
751 * and in fact a previous version of this hash did just that.
752 * But now we have improved things a bit by recognizing that b is
753 * always a power of two. We keep its base 2 log handy (call it lb),
754 * so now we can write this with a bit shift and logical AND:
755 *
756 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
757 *
758 */
759#define HASH_EXPAND_BUCKETS(tbl) \
760do { \
761 unsigned _he_bkt; \
762 unsigned _he_bkt_i; \
763 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
764 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
765 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
766 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
767 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
768 memset(_he_new_buckets, 0, \
769 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
770 tbl->ideal_chain_maxlen = \
771 (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
772 ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
773 tbl->nonideal_items = 0; \
774 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
775 { \
776 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
777 while (_he_thh) { \
778 _he_hh_nxt = _he_thh->hh_next; \
779 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
780 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
781 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
782 tbl->nonideal_items++; \
783 _he_newbkt->expand_mult = _he_newbkt->count / \
784 tbl->ideal_chain_maxlen; \
785 } \
786 _he_thh->hh_prev = NULL; \
787 _he_thh->hh_next = _he_newbkt->hh_head; \
788 if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
789 _he_thh; \
790 _he_newbkt->hh_head = _he_thh; \
791 _he_thh = _he_hh_nxt; \
792 } \
793 } \
794 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
795 tbl->num_buckets *= 2; \
796 tbl->log2_num_buckets++; \
797 tbl->buckets = _he_new_buckets; \
798 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
799 (tbl->ineff_expands+1) : 0; \
800 if (tbl->ineff_expands > 1) { \
801 tbl->noexpand=1; \
802 uthash_noexpand_fyi(tbl); \
803 } \
804 uthash_expand_fyi(tbl); \
805} while(0)
806
807
808/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
809/* Note that HASH_SORT assumes the hash handle name to be hh.
810 * HASH_SRT was added to allow the hash handle name to be passed in. */
811#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
812#define HASH_SRT(hh,head,cmpfcn) \
813do { \
814 unsigned _hs_i; \
815 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
816 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
817 if (head) { \
818 _hs_insize = 1; \
819 _hs_looping = 1; \
820 _hs_list = &((head)->hh); \
821 while (_hs_looping) { \
822 _hs_p = _hs_list; \
823 _hs_list = NULL; \
824 _hs_tail = NULL; \
825 _hs_nmerges = 0; \
826 while (_hs_p) { \
827 _hs_nmerges++; \
828 _hs_q = _hs_p; \
829 _hs_psize = 0; \
830 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
831 _hs_psize++; \
832 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
833 ((void*)((char*)(_hs_q->next) + \
834 (head)->hh.tbl->hho)) : NULL); \
835 if (! (_hs_q) ) break; \
836 } \
837 _hs_qsize = _hs_insize; \
838 while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
839 if (_hs_psize == 0) { \
840 _hs_e = _hs_q; \
841 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
842 ((void*)((char*)(_hs_q->next) + \
843 (head)->hh.tbl->hho)) : NULL); \
844 _hs_qsize--; \
845 } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
846 _hs_e = _hs_p; \
847 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
848 ((void*)((char*)(_hs_p->next) + \
849 (head)->hh.tbl->hho)) : NULL); \
850 _hs_psize--; \
851 } else if (( \
852 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
853 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
854 ) <= 0) { \
855 _hs_e = _hs_p; \
856 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
857 ((void*)((char*)(_hs_p->next) + \
858 (head)->hh.tbl->hho)) : NULL); \
859 _hs_psize--; \
860 } else { \
861 _hs_e = _hs_q; \
862 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
863 ((void*)((char*)(_hs_q->next) + \
864 (head)->hh.tbl->hho)) : NULL); \
865 _hs_qsize--; \
866 } \
867 if ( _hs_tail ) { \
868 _hs_tail->next = ((_hs_e) ? \
869 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
870 } else { \
871 _hs_list = _hs_e; \
872 } \
873 _hs_e->prev = ((_hs_tail) ? \
874 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
875 _hs_tail = _hs_e; \
876 } \
877 _hs_p = _hs_q; \
878 } \
879 _hs_tail->next = NULL; \
880 if ( _hs_nmerges <= 1 ) { \
881 _hs_looping=0; \
882 (head)->hh.tbl->tail = _hs_tail; \
883 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
884 } \
885 _hs_insize *= 2; \
886 } \
887 HASH_FSCK(hh,head); \
888 } \
889} while (0)
890
891/* This function selects items from one hash into another hash.
892 * The end result is that the selected items have dual presence
893 * in both hashes. There is no copy of the items made; rather
894 * they are added into the new hash through a secondary hash
895 * hash handle that must be present in the structure. */
896#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
897do { \
898 unsigned _src_bkt, _dst_bkt; \
899 void *_last_elt=NULL, *_elt; \
900 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
901 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
902 if (src) { \
903 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
904 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
905 _src_hh; \
906 _src_hh = _src_hh->hh_next) { \
907 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
908 if (cond(_elt)) { \
909 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
910 _dst_hh->key = _src_hh->key; \
911 _dst_hh->keylen = _src_hh->keylen; \
912 _dst_hh->hashv = _src_hh->hashv; \
913 _dst_hh->prev = _last_elt; \
914 _dst_hh->next = NULL; \
915 if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
916 if (!dst) { \
917 DECLTYPE_ASSIGN(dst,_elt); \
918 HASH_MAKE_TABLE(hh_dst,dst); \
919 } else { \
920 _dst_hh->tbl = (dst)->hh_dst.tbl; \
921 } \
922 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
923 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
924 (dst)->hh_dst.tbl->num_items++; \
925 _last_elt = _elt; \
926 _last_elt_hh = _dst_hh; \
927 } \
928 } \
929 } \
930 } \
931 HASH_FSCK(hh_dst,dst); \
932} while (0)
933
934#define HASH_CLEAR(hh,head) \
935do { \
936 if (head) { \
937 uthash_free((head)->hh.tbl->buckets, \
938 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
939 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
940 (head)=NULL; \
941 } \
942} while(0)
943
944#ifdef NO_DECLTYPE
945#define HASH_ITER(hh,head,el,tmp) \
946for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \
947 el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL))
948#else
949#define HASH_ITER(hh,head,el,tmp) \
950for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \
951 el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL))
952#endif
953
954/* obtain a count of items in the hash */
955#define HASH_COUNT(head) HASH_CNT(hh,head)
956#define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0)
957
958typedef struct UT_hash_bucket {
959 struct UT_hash_handle *hh_head;
960 unsigned count;
961
962 /* expand_mult is normally set to 0. In this situation, the max chain length
963 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
964 * the bucket's chain exceeds this length, bucket expansion is triggered).
965 * However, setting expand_mult to a non-zero value delays bucket expansion
966 * (that would be triggered by additions to this particular bucket)
967 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
968 * (The multiplier is simply expand_mult+1). The whole idea of this
969 * multiplier is to reduce bucket expansions, since they are expensive, in
970 * situations where we know that a particular bucket tends to be overused.
971 * It is better to let its chain length grow to a longer yet-still-bounded
972 * value, than to do an O(n) bucket expansion too often.
973 */
974 unsigned expand_mult;
975
977
978/* random signature used only to find hash tables in external analysis */
979#define HASH_SIGNATURE 0xa0111fe1
980#define HASH_BLOOM_SIGNATURE 0xb12220f2
981
982typedef struct UT_hash_table {
983 UT_hash_bucket *buckets;
984 unsigned num_buckets, log2_num_buckets;
985 unsigned num_items;
986 struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
987 ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
988
989 /* in an ideal situation (all buckets used equally), no bucket would have
990 * more than ceil(#items/#buckets) items. that's the ideal chain length. */
991 unsigned ideal_chain_maxlen;
992
993 /* nonideal_items is the number of items in the hash whose chain position
994 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
995 * hash distribution; reaching them in a chain traversal takes >ideal steps */
996 unsigned nonideal_items;
997
998 /* ineffective expands occur when a bucket doubling was performed, but
999 * afterward, more than half the items in the hash had nonideal chain
1000 * positions. If this happens on two consecutive expansions we inhibit any
1001 * further expansion, as it's not helping; this happens when the hash
1002 * function isn't a good fit for the key domain. When expansion is inhibited
1003 * the hash will still work, albeit no longer in constant time. */
1004 unsigned ineff_expands, noexpand;
1005
1006 uint32_t signature; /* used only to find hash tables in external analysis */
1007#ifdef HASH_BLOOM
1008 uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1009 uint8_t *bloom_bv;
1010 char bloom_nbits;
1011#endif
1012
1014
1015typedef struct UT_hash_handle {
1016 struct UT_hash_table *tbl;
1017 void *prev; /* prev element in app order */
1018 void *next; /* next element in app order */
1019 struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1020 struct UT_hash_handle *hh_next; /* next hh in bucket order */
1021 void *key; /* ptr to enclosing struct's key */
1022 unsigned keylen; /* enclosing struct's key len */
1023 unsigned hashv; /* result of hash-fcn(key) */
1025
1026#endif /* UTHASH_H */
Definition: uthash.h:958
Definition: uthash.h:1015
Definition: uthash.h:982