THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

849 lines
21KB

  1. #include "cache.h"
  2. #include "lockfile.h"
  3. #include "tree.h"
  4. #include "tree-walk.h"
  5. #include "cache-tree.h"
  6. #include "object-store.h"
  7. #include "replace-object.h"
  8. #include "promisor-remote.h"
  9. #ifndef DEBUG_CACHE_TREE
  10. #define DEBUG_CACHE_TREE 0
  11. #endif
  12. struct cache_tree *cache_tree(void)
  13. {
  14. struct cache_tree *it = xcalloc(1, sizeof(struct cache_tree));
  15. it->entry_count = -1;
  16. return it;
  17. }
  18. void cache_tree_free(struct cache_tree **it_p)
  19. {
  20. int i;
  21. struct cache_tree *it = *it_p;
  22. if (!it)
  23. return;
  24. for (i = 0; i < it->subtree_nr; i++)
  25. if (it->down[i]) {
  26. cache_tree_free(&it->down[i]->cache_tree);
  27. free(it->down[i]);
  28. }
  29. free(it->down);
  30. free(it);
  31. *it_p = NULL;
  32. }
  33. static int subtree_name_cmp(const char *one, int onelen,
  34. const char *two, int twolen)
  35. {
  36. if (onelen < twolen)
  37. return -1;
  38. if (twolen < onelen)
  39. return 1;
  40. return memcmp(one, two, onelen);
  41. }
  42. static int subtree_pos(struct cache_tree *it, const char *path, int pathlen)
  43. {
  44. struct cache_tree_sub **down = it->down;
  45. int lo, hi;
  46. lo = 0;
  47. hi = it->subtree_nr;
  48. while (lo < hi) {
  49. int mi = lo + (hi - lo) / 2;
  50. struct cache_tree_sub *mdl = down[mi];
  51. int cmp = subtree_name_cmp(path, pathlen,
  52. mdl->name, mdl->namelen);
  53. if (!cmp)
  54. return mi;
  55. if (cmp < 0)
  56. hi = mi;
  57. else
  58. lo = mi + 1;
  59. }
  60. return -lo-1;
  61. }
  62. static struct cache_tree_sub *find_subtree(struct cache_tree *it,
  63. const char *path,
  64. int pathlen,
  65. int create)
  66. {
  67. struct cache_tree_sub *down;
  68. int pos = subtree_pos(it, path, pathlen);
  69. if (0 <= pos)
  70. return it->down[pos];
  71. if (!create)
  72. return NULL;
  73. pos = -pos-1;
  74. ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
  75. it->subtree_nr++;
  76. FLEX_ALLOC_MEM(down, name, path, pathlen);
  77. down->cache_tree = NULL;
  78. down->namelen = pathlen;
  79. if (pos < it->subtree_nr)
  80. MOVE_ARRAY(it->down + pos + 1, it->down + pos,
  81. it->subtree_nr - pos - 1);
  82. it->down[pos] = down;
  83. return down;
  84. }
  85. struct cache_tree_sub *cache_tree_sub(struct cache_tree *it, const char *path)
  86. {
  87. int pathlen = strlen(path);
  88. return find_subtree(it, path, pathlen, 1);
  89. }
  90. static int do_invalidate_path(struct cache_tree *it, const char *path)
  91. {
  92. /* a/b/c
  93. * ==> invalidate self
  94. * ==> find "a", have it invalidate "b/c"
  95. * a
  96. * ==> invalidate self
  97. * ==> if "a" exists as a subtree, remove it.
  98. */
  99. const char *slash;
  100. int namelen;
  101. struct cache_tree_sub *down;
  102. #if DEBUG_CACHE_TREE
  103. fprintf(stderr, "cache-tree invalidate <%s>\n", path);
  104. #endif
  105. if (!it)
  106. return 0;
  107. slash = strchrnul(path, '/');
  108. namelen = slash - path;
  109. it->entry_count = -1;
  110. if (!*slash) {
  111. int pos;
  112. pos = subtree_pos(it, path, namelen);
  113. if (0 <= pos) {
  114. cache_tree_free(&it->down[pos]->cache_tree);
  115. free(it->down[pos]);
  116. /* 0 1 2 3 4 5
  117. * ^ ^subtree_nr = 6
  118. * pos
  119. * move 4 and 5 up one place (2 entries)
  120. * 2 = 6 - 3 - 1 = subtree_nr - pos - 1
  121. */
  122. MOVE_ARRAY(it->down + pos, it->down + pos + 1,
  123. it->subtree_nr - pos - 1);
  124. it->subtree_nr--;
  125. }
  126. return 1;
  127. }
  128. down = find_subtree(it, path, namelen, 0);
  129. if (down)
  130. do_invalidate_path(down->cache_tree, slash + 1);
  131. return 1;
  132. }
  133. void cache_tree_invalidate_path(struct index_state *istate, const char *path)
  134. {
  135. if (do_invalidate_path(istate->cache_tree, path))
  136. istate->cache_changed |= CACHE_TREE_CHANGED;
  137. }
  138. static int verify_cache(struct cache_entry **cache,
  139. int entries, int flags)
  140. {
  141. int i, funny;
  142. int silent = flags & WRITE_TREE_SILENT;
  143. /* Verify that the tree is merged */
  144. funny = 0;
  145. for (i = 0; i < entries; i++) {
  146. const struct cache_entry *ce = cache[i];
  147. if (ce_stage(ce)) {
  148. if (silent)
  149. return -1;
  150. if (10 < ++funny) {
  151. fprintf(stderr, "...\n");
  152. break;
  153. }
  154. fprintf(stderr, "%s: unmerged (%s)\n",
  155. ce->name, oid_to_hex(&ce->oid));
  156. }
  157. }
  158. if (funny)
  159. return -1;
  160. /* Also verify that the cache does not have path and path/file
  161. * at the same time. At this point we know the cache has only
  162. * stage 0 entries.
  163. */
  164. funny = 0;
  165. for (i = 0; i < entries - 1; i++) {
  166. /* path/file always comes after path because of the way
  167. * the cache is sorted. Also path can appear only once,
  168. * which means conflicting one would immediately follow.
  169. */
  170. const char *this_name = cache[i]->name;
  171. const char *next_name = cache[i+1]->name;
  172. int this_len = strlen(this_name);
  173. if (this_len < strlen(next_name) &&
  174. strncmp(this_name, next_name, this_len) == 0 &&
  175. next_name[this_len] == '/') {
  176. if (10 < ++funny) {
  177. fprintf(stderr, "...\n");
  178. break;
  179. }
  180. fprintf(stderr, "You have both %s and %s\n",
  181. this_name, next_name);
  182. }
  183. }
  184. if (funny)
  185. return -1;
  186. return 0;
  187. }
  188. static void discard_unused_subtrees(struct cache_tree *it)
  189. {
  190. struct cache_tree_sub **down = it->down;
  191. int nr = it->subtree_nr;
  192. int dst, src;
  193. for (dst = src = 0; src < nr; src++) {
  194. struct cache_tree_sub *s = down[src];
  195. if (s->used)
  196. down[dst++] = s;
  197. else {
  198. cache_tree_free(&s->cache_tree);
  199. free(s);
  200. it->subtree_nr--;
  201. }
  202. }
  203. }
  204. int cache_tree_fully_valid(struct cache_tree *it)
  205. {
  206. int i;
  207. if (!it)
  208. return 0;
  209. if (it->entry_count < 0 || !has_object_file(&it->oid))
  210. return 0;
  211. for (i = 0; i < it->subtree_nr; i++) {
  212. if (!cache_tree_fully_valid(it->down[i]->cache_tree))
  213. return 0;
  214. }
  215. return 1;
  216. }
  217. static int update_one(struct cache_tree *it,
  218. struct cache_entry **cache,
  219. int entries,
  220. const char *base,
  221. int baselen,
  222. int *skip_count,
  223. int flags)
  224. {
  225. struct strbuf buffer;
  226. int missing_ok = flags & WRITE_TREE_MISSING_OK;
  227. int dryrun = flags & WRITE_TREE_DRY_RUN;
  228. int repair = flags & WRITE_TREE_REPAIR;
  229. int to_invalidate = 0;
  230. int i;
  231. assert(!(dryrun && repair));
  232. *skip_count = 0;
  233. if (0 <= it->entry_count && has_object_file(&it->oid))
  234. return it->entry_count;
  235. /*
  236. * We first scan for subtrees and update them; we start by
  237. * marking existing subtrees -- the ones that are unmarked
  238. * should not be in the result.
  239. */
  240. for (i = 0; i < it->subtree_nr; i++)
  241. it->down[i]->used = 0;
  242. /*
  243. * Find the subtrees and update them.
  244. */
  245. i = 0;
  246. while (i < entries) {
  247. const struct cache_entry *ce = cache[i];
  248. struct cache_tree_sub *sub;
  249. const char *path, *slash;
  250. int pathlen, sublen, subcnt, subskip;
  251. path = ce->name;
  252. pathlen = ce_namelen(ce);
  253. if (pathlen <= baselen || memcmp(base, path, baselen))
  254. break; /* at the end of this level */
  255. slash = strchr(path + baselen, '/');
  256. if (!slash) {
  257. i++;
  258. continue;
  259. }
  260. /*
  261. * a/bbb/c (base = a/, slash = /c)
  262. * ==>
  263. * path+baselen = bbb/c, sublen = 3
  264. */
  265. sublen = slash - (path + baselen);
  266. sub = find_subtree(it, path + baselen, sublen, 1);
  267. if (!sub->cache_tree)
  268. sub->cache_tree = cache_tree();
  269. subcnt = update_one(sub->cache_tree,
  270. cache + i, entries - i,
  271. path,
  272. baselen + sublen + 1,
  273. &subskip,
  274. flags);
  275. if (subcnt < 0)
  276. return subcnt;
  277. if (!subcnt)
  278. die("index cache-tree records empty sub-tree");
  279. i += subcnt;
  280. sub->count = subcnt; /* to be used in the next loop */
  281. *skip_count += subskip;
  282. sub->used = 1;
  283. }
  284. discard_unused_subtrees(it);
  285. /*
  286. * Then write out the tree object for this level.
  287. */
  288. strbuf_init(&buffer, 8192);
  289. i = 0;
  290. while (i < entries) {
  291. const struct cache_entry *ce = cache[i];
  292. struct cache_tree_sub *sub = NULL;
  293. const char *path, *slash;
  294. int pathlen, entlen;
  295. const struct object_id *oid;
  296. unsigned mode;
  297. int expected_missing = 0;
  298. int contains_ita = 0;
  299. int ce_missing_ok;
  300. path = ce->name;
  301. pathlen = ce_namelen(ce);
  302. if (pathlen <= baselen || memcmp(base, path, baselen))
  303. break; /* at the end of this level */
  304. slash = strchr(path + baselen, '/');
  305. if (slash) {
  306. entlen = slash - (path + baselen);
  307. sub = find_subtree(it, path + baselen, entlen, 0);
  308. if (!sub)
  309. die("cache-tree.c: '%.*s' in '%s' not found",
  310. entlen, path + baselen, path);
  311. i += sub->count;
  312. oid = &sub->cache_tree->oid;
  313. mode = S_IFDIR;
  314. contains_ita = sub->cache_tree->entry_count < 0;
  315. if (contains_ita) {
  316. to_invalidate = 1;
  317. expected_missing = 1;
  318. }
  319. }
  320. else {
  321. oid = &ce->oid;
  322. mode = ce->ce_mode;
  323. entlen = pathlen - baselen;
  324. i++;
  325. }
  326. ce_missing_ok = mode == S_IFGITLINK || missing_ok ||
  327. (has_promisor_remote() &&
  328. ce_skip_worktree(ce));
  329. if (is_null_oid(oid) ||
  330. (!ce_missing_ok && !has_object_file(oid))) {
  331. strbuf_release(&buffer);
  332. if (expected_missing)
  333. return -1;
  334. return error("invalid object %06o %s for '%.*s'",
  335. mode, oid_to_hex(oid), entlen+baselen, path);
  336. }
  337. /*
  338. * CE_REMOVE entries are removed before the index is
  339. * written to disk. Skip them to remain consistent
  340. * with the future on-disk index.
  341. */
  342. if (ce->ce_flags & CE_REMOVE) {
  343. *skip_count = *skip_count + 1;
  344. continue;
  345. }
  346. /*
  347. * CE_INTENT_TO_ADD entries exist on on-disk index but
  348. * they are not part of generated trees. Invalidate up
  349. * to root to force cache-tree users to read elsewhere.
  350. */
  351. if (!sub && ce_intent_to_add(ce)) {
  352. to_invalidate = 1;
  353. continue;
  354. }
  355. /*
  356. * "sub" can be an empty tree if all subentries are i-t-a.
  357. */
  358. if (contains_ita && is_empty_tree_oid(oid))
  359. continue;
  360. strbuf_grow(&buffer, entlen + 100);
  361. strbuf_addf(&buffer, "%o %.*s%c", mode, entlen, path + baselen, '\0');
  362. strbuf_add(&buffer, oid->hash, the_hash_algo->rawsz);
  363. #if DEBUG_CACHE_TREE
  364. fprintf(stderr, "cache-tree update-one %o %.*s\n",
  365. mode, entlen, path + baselen);
  366. #endif
  367. }
  368. if (repair) {
  369. struct object_id oid;
  370. hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
  371. if (has_object_file_with_flags(&oid, OBJECT_INFO_SKIP_FETCH_OBJECT))
  372. oidcpy(&it->oid, &oid);
  373. else
  374. to_invalidate = 1;
  375. } else if (dryrun) {
  376. hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
  377. } else if (write_object_file(buffer.buf, buffer.len, tree_type,
  378. &it->oid)) {
  379. strbuf_release(&buffer);
  380. return -1;
  381. }
  382. strbuf_release(&buffer);
  383. it->entry_count = to_invalidate ? -1 : i - *skip_count;
  384. #if DEBUG_CACHE_TREE
  385. fprintf(stderr, "cache-tree update-one (%d ent, %d subtree) %s\n",
  386. it->entry_count, it->subtree_nr,
  387. oid_to_hex(&it->oid));
  388. #endif
  389. return i;
  390. }
  391. int cache_tree_update(struct index_state *istate, int flags)
  392. {
  393. struct cache_tree *it = istate->cache_tree;
  394. struct cache_entry **cache = istate->cache;
  395. int entries = istate->cache_nr;
  396. int skip, i = verify_cache(cache, entries, flags);
  397. if (i)
  398. return i;
  399. trace_performance_enter();
  400. i = update_one(it, cache, entries, "", 0, &skip, flags);
  401. trace_performance_leave("cache_tree_update");
  402. if (i < 0)
  403. return i;
  404. istate->cache_changed |= CACHE_TREE_CHANGED;
  405. return 0;
  406. }
  407. static void write_one(struct strbuf *buffer, struct cache_tree *it,
  408. const char *path, int pathlen)
  409. {
  410. int i;
  411. /* One "cache-tree" entry consists of the following:
  412. * path (NUL terminated)
  413. * entry_count, subtree_nr ("%d %d\n")
  414. * tree-sha1 (missing if invalid)
  415. * subtree_nr "cache-tree" entries for subtrees.
  416. */
  417. strbuf_grow(buffer, pathlen + 100);
  418. strbuf_add(buffer, path, pathlen);
  419. strbuf_addf(buffer, "%c%d %d\n", 0, it->entry_count, it->subtree_nr);
  420. #if DEBUG_CACHE_TREE
  421. if (0 <= it->entry_count)
  422. fprintf(stderr, "cache-tree <%.*s> (%d ent, %d subtree) %s\n",
  423. pathlen, path, it->entry_count, it->subtree_nr,
  424. oid_to_hex(&it->oid));
  425. else
  426. fprintf(stderr, "cache-tree <%.*s> (%d subtree) invalid\n",
  427. pathlen, path, it->subtree_nr);
  428. #endif
  429. if (0 <= it->entry_count) {
  430. strbuf_add(buffer, it->oid.hash, the_hash_algo->rawsz);
  431. }
  432. for (i = 0; i < it->subtree_nr; i++) {
  433. struct cache_tree_sub *down = it->down[i];
  434. if (i) {
  435. struct cache_tree_sub *prev = it->down[i-1];
  436. if (subtree_name_cmp(down->name, down->namelen,
  437. prev->name, prev->namelen) <= 0)
  438. die("fatal - unsorted cache subtree");
  439. }
  440. write_one(buffer, down->cache_tree, down->name, down->namelen);
  441. }
  442. }
  443. void cache_tree_write(struct strbuf *sb, struct cache_tree *root)
  444. {
  445. write_one(sb, root, "", 0);
  446. }
  447. static struct cache_tree *read_one(const char **buffer, unsigned long *size_p)
  448. {
  449. const char *buf = *buffer;
  450. unsigned long size = *size_p;
  451. const char *cp;
  452. char *ep;
  453. struct cache_tree *it;
  454. int i, subtree_nr;
  455. const unsigned rawsz = the_hash_algo->rawsz;
  456. it = NULL;
  457. /* skip name, but make sure name exists */
  458. while (size && *buf) {
  459. size--;
  460. buf++;
  461. }
  462. if (!size)
  463. goto free_return;
  464. buf++; size--;
  465. it = cache_tree();
  466. cp = buf;
  467. it->entry_count = strtol(cp, &ep, 10);
  468. if (cp == ep)
  469. goto free_return;
  470. cp = ep;
  471. subtree_nr = strtol(cp, &ep, 10);
  472. if (cp == ep)
  473. goto free_return;
  474. while (size && *buf && *buf != '\n') {
  475. size--;
  476. buf++;
  477. }
  478. if (!size)
  479. goto free_return;
  480. buf++; size--;
  481. if (0 <= it->entry_count) {
  482. if (size < rawsz)
  483. goto free_return;
  484. oidread(&it->oid, (const unsigned char *)buf);
  485. buf += rawsz;
  486. size -= rawsz;
  487. }
  488. #if DEBUG_CACHE_TREE
  489. if (0 <= it->entry_count)
  490. fprintf(stderr, "cache-tree <%s> (%d ent, %d subtree) %s\n",
  491. *buffer, it->entry_count, subtree_nr,
  492. oid_to_hex(&it->oid));
  493. else
  494. fprintf(stderr, "cache-tree <%s> (%d subtrees) invalid\n",
  495. *buffer, subtree_nr);
  496. #endif
  497. /*
  498. * Just a heuristic -- we do not add directories that often but
  499. * we do not want to have to extend it immediately when we do,
  500. * hence +2.
  501. */
  502. it->subtree_alloc = subtree_nr + 2;
  503. it->down = xcalloc(it->subtree_alloc, sizeof(struct cache_tree_sub *));
  504. for (i = 0; i < subtree_nr; i++) {
  505. /* read each subtree */
  506. struct cache_tree *sub;
  507. struct cache_tree_sub *subtree;
  508. const char *name = buf;
  509. sub = read_one(&buf, &size);
  510. if (!sub)
  511. goto free_return;
  512. subtree = cache_tree_sub(it, name);
  513. subtree->cache_tree = sub;
  514. }
  515. if (subtree_nr != it->subtree_nr)
  516. die("cache-tree: internal error");
  517. *buffer = buf;
  518. *size_p = size;
  519. return it;
  520. free_return:
  521. cache_tree_free(&it);
  522. return NULL;
  523. }
  524. struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
  525. {
  526. if (buffer[0])
  527. return NULL; /* not the whole tree */
  528. return read_one(&buffer, &size);
  529. }
  530. static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
  531. {
  532. if (!it)
  533. return NULL;
  534. while (*path) {
  535. const char *slash;
  536. struct cache_tree_sub *sub;
  537. slash = strchrnul(path, '/');
  538. /*
  539. * Between path and slash is the name of the subtree
  540. * to look for.
  541. */
  542. sub = find_subtree(it, path, slash - path, 0);
  543. if (!sub)
  544. return NULL;
  545. it = sub->cache_tree;
  546. path = slash;
  547. while (*path == '/')
  548. path++;
  549. }
  550. return it;
  551. }
  552. static int write_index_as_tree_internal(struct object_id *oid,
  553. struct index_state *index_state,
  554. int cache_tree_valid,
  555. int flags,
  556. const char *prefix)
  557. {
  558. if (flags & WRITE_TREE_IGNORE_CACHE_TREE) {
  559. cache_tree_free(&index_state->cache_tree);
  560. cache_tree_valid = 0;
  561. }
  562. if (!index_state->cache_tree)
  563. index_state->cache_tree = cache_tree();
  564. if (!cache_tree_valid && cache_tree_update(index_state, flags) < 0)
  565. return WRITE_TREE_UNMERGED_INDEX;
  566. if (prefix) {
  567. struct cache_tree *subtree;
  568. subtree = cache_tree_find(index_state->cache_tree, prefix);
  569. if (!subtree)
  570. return WRITE_TREE_PREFIX_ERROR;
  571. oidcpy(oid, &subtree->oid);
  572. }
  573. else
  574. oidcpy(oid, &index_state->cache_tree->oid);
  575. return 0;
  576. }
  577. struct tree* write_in_core_index_as_tree(struct repository *repo) {
  578. struct object_id o;
  579. int was_valid, ret;
  580. struct index_state *index_state = repo->index;
  581. was_valid = index_state->cache_tree &&
  582. cache_tree_fully_valid(index_state->cache_tree);
  583. ret = write_index_as_tree_internal(&o, index_state, was_valid, 0, NULL);
  584. if (ret == WRITE_TREE_UNMERGED_INDEX) {
  585. int i;
  586. fprintf(stderr, "BUG: There are unmerged index entries:\n");
  587. for (i = 0; i < index_state->cache_nr; i++) {
  588. const struct cache_entry *ce = index_state->cache[i];
  589. if (ce_stage(ce))
  590. fprintf(stderr, "BUG: %d %.*s\n", ce_stage(ce),
  591. (int)ce_namelen(ce), ce->name);
  592. }
  593. BUG("unmerged index entries when writing inmemory index");
  594. }
  595. return lookup_tree(repo, &index_state->cache_tree->oid);
  596. }
  597. int write_index_as_tree(struct object_id *oid, struct index_state *index_state, const char *index_path, int flags, const char *prefix)
  598. {
  599. int entries, was_valid;
  600. struct lock_file lock_file = LOCK_INIT;
  601. int ret;
  602. hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
  603. entries = read_index_from(index_state, index_path, get_git_dir());
  604. if (entries < 0) {
  605. ret = WRITE_TREE_UNREADABLE_INDEX;
  606. goto out;
  607. }
  608. was_valid = !(flags & WRITE_TREE_IGNORE_CACHE_TREE) &&
  609. index_state->cache_tree &&
  610. cache_tree_fully_valid(index_state->cache_tree);
  611. ret = write_index_as_tree_internal(oid, index_state, was_valid, flags,
  612. prefix);
  613. if (!ret && !was_valid) {
  614. write_locked_index(index_state, &lock_file, COMMIT_LOCK);
  615. /* Not being able to write is fine -- we are only interested
  616. * in updating the cache-tree part, and if the next caller
  617. * ends up using the old index with unupdated cache-tree part
  618. * it misses the work we did here, but that is just a
  619. * performance penalty and not a big deal.
  620. */
  621. }
  622. out:
  623. rollback_lock_file(&lock_file);
  624. return ret;
  625. }
  626. static void prime_cache_tree_rec(struct repository *r,
  627. struct cache_tree *it,
  628. struct tree *tree)
  629. {
  630. struct tree_desc desc;
  631. struct name_entry entry;
  632. int cnt;
  633. oidcpy(&it->oid, &tree->object.oid);
  634. init_tree_desc(&desc, tree->buffer, tree->size);
  635. cnt = 0;
  636. while (tree_entry(&desc, &entry)) {
  637. if (!S_ISDIR(entry.mode))
  638. cnt++;
  639. else {
  640. struct cache_tree_sub *sub;
  641. struct tree *subtree = lookup_tree(r, &entry.oid);
  642. if (!subtree->object.parsed)
  643. parse_tree(subtree);
  644. sub = cache_tree_sub(it, entry.path);
  645. sub->cache_tree = cache_tree();
  646. prime_cache_tree_rec(r, sub->cache_tree, subtree);
  647. cnt += sub->cache_tree->entry_count;
  648. }
  649. }
  650. it->entry_count = cnt;
  651. }
  652. void prime_cache_tree(struct repository *r,
  653. struct index_state *istate,
  654. struct tree *tree)
  655. {
  656. cache_tree_free(&istate->cache_tree);
  657. istate->cache_tree = cache_tree();
  658. prime_cache_tree_rec(r, istate->cache_tree, tree);
  659. istate->cache_changed |= CACHE_TREE_CHANGED;
  660. }
  661. /*
  662. * find the cache_tree that corresponds to the current level without
  663. * exploding the full path into textual form. The root of the
  664. * cache tree is given as "root", and our current level is "info".
  665. * (1) When at root level, info->prev is NULL, so it is "root" itself.
  666. * (2) Otherwise, find the cache_tree that corresponds to one level
  667. * above us, and find ourselves in there.
  668. */
  669. static struct cache_tree *find_cache_tree_from_traversal(struct cache_tree *root,
  670. struct traverse_info *info)
  671. {
  672. struct cache_tree *our_parent;
  673. if (!info->prev)
  674. return root;
  675. our_parent = find_cache_tree_from_traversal(root, info->prev);
  676. return cache_tree_find(our_parent, info->name);
  677. }
  678. int cache_tree_matches_traversal(struct cache_tree *root,
  679. struct name_entry *ent,
  680. struct traverse_info *info)
  681. {
  682. struct cache_tree *it;
  683. it = find_cache_tree_from_traversal(root, info);
  684. it = cache_tree_find(it, ent->path);
  685. if (it && it->entry_count > 0 && oideq(&ent->oid, &it->oid))
  686. return it->entry_count;
  687. return 0;
  688. }
  689. static void verify_one(struct repository *r,
  690. struct index_state *istate,
  691. struct cache_tree *it,
  692. struct strbuf *path)
  693. {
  694. int i, pos, len = path->len;
  695. struct strbuf tree_buf = STRBUF_INIT;
  696. struct object_id new_oid;
  697. for (i = 0; i < it->subtree_nr; i++) {
  698. strbuf_addf(path, "%s/", it->down[i]->name);
  699. verify_one(r, istate, it->down[i]->cache_tree, path);
  700. strbuf_setlen(path, len);
  701. }
  702. if (it->entry_count < 0 ||
  703. /* no verification on tests (t7003) that replace trees */
  704. lookup_replace_object(r, &it->oid) != &it->oid)
  705. return;
  706. if (path->len) {
  707. pos = index_name_pos(istate, path->buf, path->len);
  708. pos = -pos - 1;
  709. } else {
  710. pos = 0;
  711. }
  712. i = 0;
  713. while (i < it->entry_count) {
  714. struct cache_entry *ce = istate->cache[pos + i];
  715. const char *slash;
  716. struct cache_tree_sub *sub = NULL;
  717. const struct object_id *oid;
  718. const char *name;
  719. unsigned mode;
  720. int entlen;
  721. if (ce->ce_flags & (CE_STAGEMASK | CE_INTENT_TO_ADD | CE_REMOVE))
  722. BUG("%s with flags 0x%x should not be in cache-tree",
  723. ce->name, ce->ce_flags);
  724. name = ce->name + path->len;
  725. slash = strchr(name, '/');
  726. if (slash) {
  727. entlen = slash - name;
  728. sub = find_subtree(it, ce->name + path->len, entlen, 0);
  729. if (!sub || sub->cache_tree->entry_count < 0)
  730. BUG("bad subtree '%.*s'", entlen, name);
  731. oid = &sub->cache_tree->oid;
  732. mode = S_IFDIR;
  733. i += sub->cache_tree->entry_count;
  734. } else {
  735. oid = &ce->oid;
  736. mode = ce->ce_mode;
  737. entlen = ce_namelen(ce) - path->len;
  738. i++;
  739. }
  740. strbuf_addf(&tree_buf, "%o %.*s%c", mode, entlen, name, '\0');
  741. strbuf_add(&tree_buf, oid->hash, the_hash_algo->rawsz);
  742. }
  743. hash_object_file(tree_buf.buf, tree_buf.len, tree_type, &new_oid);
  744. if (!oideq(&new_oid, &it->oid))
  745. BUG("cache-tree for path %.*s does not match. "
  746. "Expected %s got %s", len, path->buf,
  747. oid_to_hex(&new_oid), oid_to_hex(&it->oid));
  748. strbuf_setlen(path, len);
  749. strbuf_release(&tree_buf);
  750. }
  751. void cache_tree_verify(struct repository *r, struct index_state *istate)
  752. {
  753. struct strbuf path = STRBUF_INIT;
  754. if (!istate->cache_tree)
  755. return;
  756. verify_one(r, istate, istate->cache_tree, &path);
  757. strbuf_release(&path);
  758. }