THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

732 lines
19KB

  1. /*
  2. * name-hash.c
  3. *
  4. * Hashing names in the index state
  5. *
  6. * Copyright (C) 2008 Linus Torvalds
  7. */
  8. #include "cache.h"
  9. #include "thread-utils.h"
  10. struct dir_entry {
  11. struct hashmap_entry ent;
  12. struct dir_entry *parent;
  13. int nr;
  14. unsigned int namelen;
  15. char name[FLEX_ARRAY];
  16. };
  17. static int dir_entry_cmp(const void *unused_cmp_data,
  18. const struct hashmap_entry *eptr,
  19. const struct hashmap_entry *entry_or_key,
  20. const void *keydata)
  21. {
  22. const struct dir_entry *e1, *e2;
  23. const char *name = keydata;
  24. e1 = container_of(eptr, const struct dir_entry, ent);
  25. e2 = container_of(entry_or_key, const struct dir_entry, ent);
  26. return e1->namelen != e2->namelen || strncasecmp(e1->name,
  27. name ? name : e2->name, e1->namelen);
  28. }
  29. static struct dir_entry *find_dir_entry__hash(struct index_state *istate,
  30. const char *name, unsigned int namelen, unsigned int hash)
  31. {
  32. struct dir_entry key;
  33. hashmap_entry_init(&key.ent, hash);
  34. key.namelen = namelen;
  35. return hashmap_get_entry(&istate->dir_hash, &key, ent, name);
  36. }
  37. static struct dir_entry *find_dir_entry(struct index_state *istate,
  38. const char *name, unsigned int namelen)
  39. {
  40. return find_dir_entry__hash(istate, name, namelen, memihash(name, namelen));
  41. }
  42. static struct dir_entry *hash_dir_entry(struct index_state *istate,
  43. struct cache_entry *ce, int namelen)
  44. {
  45. /*
  46. * Throw each directory component in the hash for quick lookup
  47. * during a git status. Directory components are stored without their
  48. * closing slash. Despite submodules being a directory, they never
  49. * reach this point, because they are stored
  50. * in index_state.name_hash (as ordinary cache_entries).
  51. */
  52. struct dir_entry *dir;
  53. /* get length of parent directory */
  54. while (namelen > 0 && !is_dir_sep(ce->name[namelen - 1]))
  55. namelen--;
  56. if (namelen <= 0)
  57. return NULL;
  58. namelen--;
  59. /* lookup existing entry for that directory */
  60. dir = find_dir_entry(istate, ce->name, namelen);
  61. if (!dir) {
  62. /* not found, create it and add to hash table */
  63. FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
  64. hashmap_entry_init(&dir->ent, memihash(ce->name, namelen));
  65. dir->namelen = namelen;
  66. hashmap_add(&istate->dir_hash, &dir->ent);
  67. /* recursively add missing parent directories */
  68. dir->parent = hash_dir_entry(istate, ce, namelen);
  69. }
  70. return dir;
  71. }
  72. static void add_dir_entry(struct index_state *istate, struct cache_entry *ce)
  73. {
  74. /* Add reference to the directory entry (and parents if 0). */
  75. struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
  76. while (dir && !(dir->nr++))
  77. dir = dir->parent;
  78. }
  79. static void remove_dir_entry(struct index_state *istate, struct cache_entry *ce)
  80. {
  81. /*
  82. * Release reference to the directory entry. If 0, remove and continue
  83. * with parent directory.
  84. */
  85. struct dir_entry *dir = hash_dir_entry(istate, ce, ce_namelen(ce));
  86. while (dir && !(--dir->nr)) {
  87. struct dir_entry *parent = dir->parent;
  88. hashmap_remove(&istate->dir_hash, &dir->ent, NULL);
  89. free(dir);
  90. dir = parent;
  91. }
  92. }
  93. static void hash_index_entry(struct index_state *istate, struct cache_entry *ce)
  94. {
  95. if (ce->ce_flags & CE_HASHED)
  96. return;
  97. ce->ce_flags |= CE_HASHED;
  98. hashmap_entry_init(&ce->ent, memihash(ce->name, ce_namelen(ce)));
  99. hashmap_add(&istate->name_hash, &ce->ent);
  100. if (ignore_case)
  101. add_dir_entry(istate, ce);
  102. }
  103. static int cache_entry_cmp(const void *unused_cmp_data,
  104. const struct hashmap_entry *eptr,
  105. const struct hashmap_entry *entry_or_key,
  106. const void *remove)
  107. {
  108. const struct cache_entry *ce1, *ce2;
  109. ce1 = container_of(eptr, const struct cache_entry, ent);
  110. ce2 = container_of(entry_or_key, const struct cache_entry, ent);
  111. /*
  112. * For remove_name_hash, find the exact entry (pointer equality); for
  113. * index_file_exists, find all entries with matching hash code and
  114. * decide whether the entry matches in same_name.
  115. */
  116. return remove ? !(ce1 == ce2) : 0;
  117. }
  118. static int lazy_try_threaded = 1;
  119. static int lazy_nr_dir_threads;
  120. /*
  121. * Set a minimum number of cache_entries that we will handle per
  122. * thread and use that to decide how many threads to run (upto
  123. * the number on the system).
  124. *
  125. * For guidance setting the lower per-thread bound, see:
  126. * t/helper/test-lazy-init-name-hash --analyze
  127. */
  128. #define LAZY_THREAD_COST (2000)
  129. /*
  130. * We use n mutexes to guard n partitions of the "istate->dir_hash"
  131. * hashtable. Since "find" and "insert" operations will hash to a
  132. * particular bucket and modify/search a single chain, we can say
  133. * that "all chains mod n" are guarded by the same mutex -- rather
  134. * than having a single mutex to guard the entire table. (This does
  135. * require that we disable "rehashing" on the hashtable.)
  136. *
  137. * So, a larger value here decreases the probability of a collision
  138. * and the time that each thread must wait for the mutex.
  139. */
  140. #define LAZY_MAX_MUTEX (32)
  141. static pthread_mutex_t *lazy_dir_mutex_array;
  142. /*
  143. * An array of lazy_entry items is used by the n threads in
  144. * the directory parse (first) phase to (lock-free) store the
  145. * intermediate results. These values are then referenced by
  146. * the 2 threads in the second phase.
  147. */
  148. struct lazy_entry {
  149. struct dir_entry *dir;
  150. unsigned int hash_dir;
  151. unsigned int hash_name;
  152. };
  153. /*
  154. * Decide if we want to use threads (if available) to load
  155. * the hash tables. We set "lazy_nr_dir_threads" to zero when
  156. * it is not worth it.
  157. */
  158. static int lookup_lazy_params(struct index_state *istate)
  159. {
  160. int nr_cpus;
  161. lazy_nr_dir_threads = 0;
  162. if (!lazy_try_threaded)
  163. return 0;
  164. /*
  165. * If we are respecting case, just use the original
  166. * code to build the "istate->name_hash". We don't
  167. * need the complexity here.
  168. */
  169. if (!ignore_case)
  170. return 0;
  171. nr_cpus = online_cpus();
  172. if (nr_cpus < 2)
  173. return 0;
  174. if (istate->cache_nr < 2 * LAZY_THREAD_COST)
  175. return 0;
  176. if (istate->cache_nr < nr_cpus * LAZY_THREAD_COST)
  177. nr_cpus = istate->cache_nr / LAZY_THREAD_COST;
  178. lazy_nr_dir_threads = nr_cpus;
  179. return lazy_nr_dir_threads;
  180. }
  181. /*
  182. * Initialize n mutexes for use when searching and inserting
  183. * into "istate->dir_hash". All "dir" threads are trying
  184. * to insert partial pathnames into the hash as they iterate
  185. * over their portions of the index, so lock contention is
  186. * high.
  187. *
  188. * However, the hashmap is going to put items into bucket
  189. * chains based on their hash values. Use that to create n
  190. * mutexes and lock on mutex[bucket(hash) % n]. This will
  191. * decrease the collision rate by (hopefully) by a factor of n.
  192. */
  193. static void init_dir_mutex(void)
  194. {
  195. int j;
  196. lazy_dir_mutex_array = xcalloc(LAZY_MAX_MUTEX, sizeof(pthread_mutex_t));
  197. for (j = 0; j < LAZY_MAX_MUTEX; j++)
  198. init_recursive_mutex(&lazy_dir_mutex_array[j]);
  199. }
  200. static void cleanup_dir_mutex(void)
  201. {
  202. int j;
  203. for (j = 0; j < LAZY_MAX_MUTEX; j++)
  204. pthread_mutex_destroy(&lazy_dir_mutex_array[j]);
  205. free(lazy_dir_mutex_array);
  206. }
  207. static void lock_dir_mutex(int j)
  208. {
  209. pthread_mutex_lock(&lazy_dir_mutex_array[j]);
  210. }
  211. static void unlock_dir_mutex(int j)
  212. {
  213. pthread_mutex_unlock(&lazy_dir_mutex_array[j]);
  214. }
  215. static inline int compute_dir_lock_nr(
  216. const struct hashmap *map,
  217. unsigned int hash)
  218. {
  219. return hashmap_bucket(map, hash) % LAZY_MAX_MUTEX;
  220. }
  221. static struct dir_entry *hash_dir_entry_with_parent_and_prefix(
  222. struct index_state *istate,
  223. struct dir_entry *parent,
  224. struct strbuf *prefix)
  225. {
  226. struct dir_entry *dir;
  227. unsigned int hash;
  228. int lock_nr;
  229. /*
  230. * Either we have a parent directory and path with slash(es)
  231. * or the directory is an immediate child of the root directory.
  232. */
  233. assert((parent != NULL) ^ (strchr(prefix->buf, '/') == NULL));
  234. if (parent)
  235. hash = memihash_cont(parent->ent.hash,
  236. prefix->buf + parent->namelen,
  237. prefix->len - parent->namelen);
  238. else
  239. hash = memihash(prefix->buf, prefix->len);
  240. lock_nr = compute_dir_lock_nr(&istate->dir_hash, hash);
  241. lock_dir_mutex(lock_nr);
  242. dir = find_dir_entry__hash(istate, prefix->buf, prefix->len, hash);
  243. if (!dir) {
  244. FLEX_ALLOC_MEM(dir, name, prefix->buf, prefix->len);
  245. hashmap_entry_init(&dir->ent, hash);
  246. dir->namelen = prefix->len;
  247. dir->parent = parent;
  248. hashmap_add(&istate->dir_hash, &dir->ent);
  249. if (parent) {
  250. unlock_dir_mutex(lock_nr);
  251. /* All I really need here is an InterlockedIncrement(&(parent->nr)) */
  252. lock_nr = compute_dir_lock_nr(&istate->dir_hash, parent->ent.hash);
  253. lock_dir_mutex(lock_nr);
  254. parent->nr++;
  255. }
  256. }
  257. unlock_dir_mutex(lock_nr);
  258. return dir;
  259. }
  260. /*
  261. * handle_range_1() and handle_range_dir() are derived from
  262. * clear_ce_flags_1() and clear_ce_flags_dir() in unpack-trees.c
  263. * and handle the iteration over the entire array of index entries.
  264. * They use recursion for adjacent entries in the same parent
  265. * directory.
  266. */
  267. static int handle_range_1(
  268. struct index_state *istate,
  269. int k_start,
  270. int k_end,
  271. struct dir_entry *parent,
  272. struct strbuf *prefix,
  273. struct lazy_entry *lazy_entries);
  274. static int handle_range_dir(
  275. struct index_state *istate,
  276. int k_start,
  277. int k_end,
  278. struct dir_entry *parent,
  279. struct strbuf *prefix,
  280. struct lazy_entry *lazy_entries,
  281. struct dir_entry **dir_new_out)
  282. {
  283. int rc, k;
  284. int input_prefix_len = prefix->len;
  285. struct dir_entry *dir_new;
  286. dir_new = hash_dir_entry_with_parent_and_prefix(istate, parent, prefix);
  287. strbuf_addch(prefix, '/');
  288. /*
  289. * Scan forward in the index array for index entries having the same
  290. * path prefix (that are also in this directory).
  291. */
  292. if (k_start + 1 >= k_end)
  293. k = k_end;
  294. else if (strncmp(istate->cache[k_start + 1]->name, prefix->buf, prefix->len) > 0)
  295. k = k_start + 1;
  296. else if (strncmp(istate->cache[k_end - 1]->name, prefix->buf, prefix->len) == 0)
  297. k = k_end;
  298. else {
  299. int begin = k_start;
  300. int end = k_end;
  301. assert(begin >= 0);
  302. while (begin < end) {
  303. int mid = begin + ((end - begin) >> 1);
  304. int cmp = strncmp(istate->cache[mid]->name, prefix->buf, prefix->len);
  305. if (cmp == 0) /* mid has same prefix; look in second part */
  306. begin = mid + 1;
  307. else if (cmp > 0) /* mid is past group; look in first part */
  308. end = mid;
  309. else
  310. die("cache entry out of order");
  311. }
  312. k = begin;
  313. }
  314. /*
  315. * Recurse and process what we can of this subset [k_start, k).
  316. */
  317. rc = handle_range_1(istate, k_start, k, dir_new, prefix, lazy_entries);
  318. strbuf_setlen(prefix, input_prefix_len);
  319. *dir_new_out = dir_new;
  320. return rc;
  321. }
  322. static int handle_range_1(
  323. struct index_state *istate,
  324. int k_start,
  325. int k_end,
  326. struct dir_entry *parent,
  327. struct strbuf *prefix,
  328. struct lazy_entry *lazy_entries)
  329. {
  330. int input_prefix_len = prefix->len;
  331. int k = k_start;
  332. while (k < k_end) {
  333. struct cache_entry *ce_k = istate->cache[k];
  334. const char *name, *slash;
  335. if (prefix->len && strncmp(ce_k->name, prefix->buf, prefix->len))
  336. break;
  337. name = ce_k->name + prefix->len;
  338. slash = strchr(name, '/');
  339. if (slash) {
  340. int len = slash - name;
  341. int processed;
  342. struct dir_entry *dir_new;
  343. strbuf_add(prefix, name, len);
  344. processed = handle_range_dir(istate, k, k_end, parent, prefix, lazy_entries, &dir_new);
  345. if (processed) {
  346. k += processed;
  347. strbuf_setlen(prefix, input_prefix_len);
  348. continue;
  349. }
  350. strbuf_addch(prefix, '/');
  351. processed = handle_range_1(istate, k, k_end, dir_new, prefix, lazy_entries);
  352. k += processed;
  353. strbuf_setlen(prefix, input_prefix_len);
  354. continue;
  355. }
  356. /*
  357. * It is too expensive to take a lock to insert "ce_k"
  358. * into "istate->name_hash" and increment the ref-count
  359. * on the "parent" dir. So we defer actually updating
  360. * permanent data structures until phase 2 (where we
  361. * can change the locking requirements) and simply
  362. * accumulate our current results into the lazy_entries
  363. * data array).
  364. *
  365. * We do not need to lock the lazy_entries array because
  366. * we have exclusive access to the cells in the range
  367. * [k_start,k_end) that this thread was given.
  368. */
  369. lazy_entries[k].dir = parent;
  370. if (parent) {
  371. lazy_entries[k].hash_name = memihash_cont(
  372. parent->ent.hash,
  373. ce_k->name + parent->namelen,
  374. ce_namelen(ce_k) - parent->namelen);
  375. lazy_entries[k].hash_dir = parent->ent.hash;
  376. } else {
  377. lazy_entries[k].hash_name = memihash(ce_k->name, ce_namelen(ce_k));
  378. }
  379. k++;
  380. }
  381. return k - k_start;
  382. }
  383. struct lazy_dir_thread_data {
  384. pthread_t pthread;
  385. struct index_state *istate;
  386. struct lazy_entry *lazy_entries;
  387. int k_start;
  388. int k_end;
  389. };
  390. static void *lazy_dir_thread_proc(void *_data)
  391. {
  392. struct lazy_dir_thread_data *d = _data;
  393. struct strbuf prefix = STRBUF_INIT;
  394. handle_range_1(d->istate, d->k_start, d->k_end, NULL, &prefix, d->lazy_entries);
  395. strbuf_release(&prefix);
  396. return NULL;
  397. }
  398. struct lazy_name_thread_data {
  399. pthread_t pthread;
  400. struct index_state *istate;
  401. struct lazy_entry *lazy_entries;
  402. };
  403. static void *lazy_name_thread_proc(void *_data)
  404. {
  405. struct lazy_name_thread_data *d = _data;
  406. int k;
  407. for (k = 0; k < d->istate->cache_nr; k++) {
  408. struct cache_entry *ce_k = d->istate->cache[k];
  409. ce_k->ce_flags |= CE_HASHED;
  410. hashmap_entry_init(&ce_k->ent, d->lazy_entries[k].hash_name);
  411. hashmap_add(&d->istate->name_hash, &ce_k->ent);
  412. }
  413. return NULL;
  414. }
  415. static inline void lazy_update_dir_ref_counts(
  416. struct index_state *istate,
  417. struct lazy_entry *lazy_entries)
  418. {
  419. int k;
  420. for (k = 0; k < istate->cache_nr; k++) {
  421. if (lazy_entries[k].dir)
  422. lazy_entries[k].dir->nr++;
  423. }
  424. }
  425. static void threaded_lazy_init_name_hash(
  426. struct index_state *istate)
  427. {
  428. int err;
  429. int nr_each;
  430. int k_start;
  431. int t;
  432. struct lazy_entry *lazy_entries;
  433. struct lazy_dir_thread_data *td_dir;
  434. struct lazy_name_thread_data *td_name;
  435. if (!HAVE_THREADS)
  436. return;
  437. k_start = 0;
  438. nr_each = DIV_ROUND_UP(istate->cache_nr, lazy_nr_dir_threads);
  439. lazy_entries = xcalloc(istate->cache_nr, sizeof(struct lazy_entry));
  440. td_dir = xcalloc(lazy_nr_dir_threads, sizeof(struct lazy_dir_thread_data));
  441. td_name = xcalloc(1, sizeof(struct lazy_name_thread_data));
  442. init_dir_mutex();
  443. /*
  444. * Phase 1:
  445. * Build "istate->dir_hash" using n "dir" threads (and a read-only index).
  446. */
  447. for (t = 0; t < lazy_nr_dir_threads; t++) {
  448. struct lazy_dir_thread_data *td_dir_t = td_dir + t;
  449. td_dir_t->istate = istate;
  450. td_dir_t->lazy_entries = lazy_entries;
  451. td_dir_t->k_start = k_start;
  452. k_start += nr_each;
  453. if (k_start > istate->cache_nr)
  454. k_start = istate->cache_nr;
  455. td_dir_t->k_end = k_start;
  456. err = pthread_create(&td_dir_t->pthread, NULL, lazy_dir_thread_proc, td_dir_t);
  457. if (err)
  458. die(_("unable to create lazy_dir thread: %s"), strerror(err));
  459. }
  460. for (t = 0; t < lazy_nr_dir_threads; t++) {
  461. struct lazy_dir_thread_data *td_dir_t = td_dir + t;
  462. if (pthread_join(td_dir_t->pthread, NULL))
  463. die("unable to join lazy_dir_thread");
  464. }
  465. /*
  466. * Phase 2:
  467. * Iterate over all index entries and add them to the "istate->name_hash"
  468. * using a single "name" background thread.
  469. * (Testing showed it wasn't worth running more than 1 thread for this.)
  470. *
  471. * Meanwhile, finish updating the parent directory ref-counts for each
  472. * index entry using the current thread. (This step is very fast and
  473. * doesn't need threading.)
  474. */
  475. td_name->istate = istate;
  476. td_name->lazy_entries = lazy_entries;
  477. err = pthread_create(&td_name->pthread, NULL, lazy_name_thread_proc, td_name);
  478. if (err)
  479. die(_("unable to create lazy_name thread: %s"), strerror(err));
  480. lazy_update_dir_ref_counts(istate, lazy_entries);
  481. err = pthread_join(td_name->pthread, NULL);
  482. if (err)
  483. die(_("unable to join lazy_name thread: %s"), strerror(err));
  484. cleanup_dir_mutex();
  485. free(td_name);
  486. free(td_dir);
  487. free(lazy_entries);
  488. }
  489. static void lazy_init_name_hash(struct index_state *istate)
  490. {
  491. if (istate->name_hash_initialized)
  492. return;
  493. trace_performance_enter();
  494. hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
  495. hashmap_init(&istate->dir_hash, dir_entry_cmp, NULL, istate->cache_nr);
  496. if (lookup_lazy_params(istate)) {
  497. /*
  498. * Disable item counting and automatic rehashing because
  499. * we do per-chain (mod n) locking rather than whole hashmap
  500. * locking and we need to prevent the table-size from changing
  501. * and bucket items from being redistributed.
  502. */
  503. hashmap_disable_item_counting(&istate->dir_hash);
  504. threaded_lazy_init_name_hash(istate);
  505. hashmap_enable_item_counting(&istate->dir_hash);
  506. } else {
  507. int nr;
  508. for (nr = 0; nr < istate->cache_nr; nr++)
  509. hash_index_entry(istate, istate->cache[nr]);
  510. }
  511. istate->name_hash_initialized = 1;
  512. trace_performance_leave("initialize name hash");
  513. }
  514. /*
  515. * A test routine for t/helper/ sources.
  516. *
  517. * Returns the number of threads used or 0 when
  518. * the non-threaded code path was used.
  519. *
  520. * Requesting threading WILL NOT override guards
  521. * in lookup_lazy_params().
  522. */
  523. int test_lazy_init_name_hash(struct index_state *istate, int try_threaded)
  524. {
  525. lazy_nr_dir_threads = 0;
  526. lazy_try_threaded = try_threaded;
  527. lazy_init_name_hash(istate);
  528. return lazy_nr_dir_threads;
  529. }
  530. void add_name_hash(struct index_state *istate, struct cache_entry *ce)
  531. {
  532. if (istate->name_hash_initialized)
  533. hash_index_entry(istate, ce);
  534. }
  535. void remove_name_hash(struct index_state *istate, struct cache_entry *ce)
  536. {
  537. if (!istate->name_hash_initialized || !(ce->ce_flags & CE_HASHED))
  538. return;
  539. ce->ce_flags &= ~CE_HASHED;
  540. hashmap_remove(&istate->name_hash, &ce->ent, ce);
  541. if (ignore_case)
  542. remove_dir_entry(istate, ce);
  543. }
  544. static int slow_same_name(const char *name1, int len1, const char *name2, int len2)
  545. {
  546. if (len1 != len2)
  547. return 0;
  548. while (len1) {
  549. unsigned char c1 = *name1++;
  550. unsigned char c2 = *name2++;
  551. len1--;
  552. if (c1 != c2) {
  553. c1 = toupper(c1);
  554. c2 = toupper(c2);
  555. if (c1 != c2)
  556. return 0;
  557. }
  558. }
  559. return 1;
  560. }
  561. static int same_name(const struct cache_entry *ce, const char *name, int namelen, int icase)
  562. {
  563. int len = ce_namelen(ce);
  564. /*
  565. * Always do exact compare, even if we want a case-ignoring comparison;
  566. * we do the quick exact one first, because it will be the common case.
  567. */
  568. if (len == namelen && !memcmp(name, ce->name, len))
  569. return 1;
  570. if (!icase)
  571. return 0;
  572. return slow_same_name(name, namelen, ce->name, len);
  573. }
  574. int index_dir_exists(struct index_state *istate, const char *name, int namelen)
  575. {
  576. struct dir_entry *dir;
  577. lazy_init_name_hash(istate);
  578. dir = find_dir_entry(istate, name, namelen);
  579. return dir && dir->nr;
  580. }
  581. void adjust_dirname_case(struct index_state *istate, char *name)
  582. {
  583. const char *startPtr = name;
  584. const char *ptr = startPtr;
  585. lazy_init_name_hash(istate);
  586. while (*ptr) {
  587. while (*ptr && *ptr != '/')
  588. ptr++;
  589. if (*ptr == '/') {
  590. struct dir_entry *dir;
  591. dir = find_dir_entry(istate, name, ptr - name);
  592. if (dir) {
  593. memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
  594. startPtr = ptr + 1;
  595. }
  596. ptr++;
  597. }
  598. }
  599. }
  600. struct cache_entry *index_file_exists(struct index_state *istate, const char *name, int namelen, int icase)
  601. {
  602. struct cache_entry *ce;
  603. unsigned int hash = memihash(name, namelen);
  604. lazy_init_name_hash(istate);
  605. ce = hashmap_get_entry_from_hash(&istate->name_hash, hash, NULL,
  606. struct cache_entry, ent);
  607. hashmap_for_each_entry_from(&istate->name_hash, ce, ent) {
  608. if (same_name(ce, name, namelen, icase))
  609. return ce;
  610. }
  611. return NULL;
  612. }
  613. void free_name_hash(struct index_state *istate)
  614. {
  615. if (!istate->name_hash_initialized)
  616. return;
  617. istate->name_hash_initialized = 0;
  618. hashmap_free(&istate->name_hash);
  619. hashmap_free_entries(&istate->dir_hash, struct dir_entry, ent);
  620. }