THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

746 lines
19KB

  1. #include "cache.h"
  2. #include "tempfile.h"
  3. #include "lockfile.h"
  4. #include "commit.h"
  5. #include "tag.h"
  6. #include "pkt-line.h"
  7. #include "remote.h"
  8. #include "refs.h"
  9. #include "sha1-array.h"
  10. #include "diff.h"
  11. #include "revision.h"
  12. #include "commit-slab.h"
  13. #include "revision.h"
  14. #include "list-objects.h"
  15. static int is_shallow = -1;
  16. static struct stat_validity shallow_stat;
  17. static char *alternate_shallow_file;
  18. void set_alternate_shallow_file(const char *path, int override)
  19. {
  20. if (is_shallow != -1)
  21. BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
  22. if (alternate_shallow_file && !override)
  23. return;
  24. free(alternate_shallow_file);
  25. alternate_shallow_file = xstrdup_or_null(path);
  26. }
  27. int register_shallow(const struct object_id *oid)
  28. {
  29. struct commit_graft *graft =
  30. xmalloc(sizeof(struct commit_graft));
  31. struct commit *commit = lookup_commit(oid);
  32. oidcpy(&graft->oid, oid);
  33. graft->nr_parent = -1;
  34. if (commit && commit->object.parsed)
  35. commit->parents = NULL;
  36. return register_commit_graft(graft, 0);
  37. }
  38. int is_repository_shallow(void)
  39. {
  40. FILE *fp;
  41. char buf[1024];
  42. const char *path = alternate_shallow_file;
  43. if (is_shallow >= 0)
  44. return is_shallow;
  45. if (!path)
  46. path = git_path_shallow();
  47. /*
  48. * fetch-pack sets '--shallow-file ""' as an indicator that no
  49. * shallow file should be used. We could just open it and it
  50. * will likely fail. But let's do an explicit check instead.
  51. */
  52. if (!*path || (fp = fopen(path, "r")) == NULL) {
  53. stat_validity_clear(&shallow_stat);
  54. is_shallow = 0;
  55. return is_shallow;
  56. }
  57. stat_validity_update(&shallow_stat, fileno(fp));
  58. is_shallow = 1;
  59. while (fgets(buf, sizeof(buf), fp)) {
  60. struct object_id oid;
  61. if (get_oid_hex(buf, &oid))
  62. die("bad shallow line: %s", buf);
  63. register_shallow(&oid);
  64. }
  65. fclose(fp);
  66. return is_shallow;
  67. }
  68. struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
  69. int shallow_flag, int not_shallow_flag)
  70. {
  71. int i = 0, cur_depth = 0;
  72. struct commit_list *result = NULL;
  73. struct object_array stack = OBJECT_ARRAY_INIT;
  74. struct commit *commit = NULL;
  75. struct commit_graft *graft;
  76. while (commit || i < heads->nr || stack.nr) {
  77. struct commit_list *p;
  78. if (!commit) {
  79. if (i < heads->nr) {
  80. commit = (struct commit *)
  81. deref_tag(heads->objects[i++].item, NULL, 0);
  82. if (!commit || commit->object.type != OBJ_COMMIT) {
  83. commit = NULL;
  84. continue;
  85. }
  86. if (!commit->util)
  87. commit->util = xmalloc(sizeof(int));
  88. *(int *)commit->util = 0;
  89. cur_depth = 0;
  90. } else {
  91. commit = (struct commit *)
  92. object_array_pop(&stack);
  93. cur_depth = *(int *)commit->util;
  94. }
  95. }
  96. parse_commit_or_die(commit);
  97. cur_depth++;
  98. if ((depth != INFINITE_DEPTH && cur_depth >= depth) ||
  99. (is_repository_shallow() && !commit->parents &&
  100. (graft = lookup_commit_graft(&commit->object.oid)) != NULL &&
  101. graft->nr_parent < 0)) {
  102. commit_list_insert(commit, &result);
  103. commit->object.flags |= shallow_flag;
  104. commit = NULL;
  105. continue;
  106. }
  107. commit->object.flags |= not_shallow_flag;
  108. for (p = commit->parents, commit = NULL; p; p = p->next) {
  109. if (!p->item->util) {
  110. int *pointer = xmalloc(sizeof(int));
  111. p->item->util = pointer;
  112. *pointer = cur_depth;
  113. } else {
  114. int *pointer = p->item->util;
  115. if (cur_depth >= *pointer)
  116. continue;
  117. *pointer = cur_depth;
  118. }
  119. if (p->next)
  120. add_object_array(&p->item->object,
  121. NULL, &stack);
  122. else {
  123. commit = p->item;
  124. cur_depth = *(int *)commit->util;
  125. }
  126. }
  127. }
  128. return result;
  129. }
  130. static void show_commit(struct commit *commit, void *data)
  131. {
  132. commit_list_insert(commit, data);
  133. }
  134. /*
  135. * Given rev-list arguments, run rev-list. All reachable commits
  136. * except border ones are marked with not_shallow_flag. Border commits
  137. * are marked with shallow_flag. The list of border/shallow commits
  138. * are also returned.
  139. */
  140. struct commit_list *get_shallow_commits_by_rev_list(int ac, const char **av,
  141. int shallow_flag,
  142. int not_shallow_flag)
  143. {
  144. struct commit_list *result = NULL, *p;
  145. struct commit_list *not_shallow_list = NULL;
  146. struct rev_info revs;
  147. int both_flags = shallow_flag | not_shallow_flag;
  148. /*
  149. * SHALLOW (excluded) and NOT_SHALLOW (included) should not be
  150. * set at this point. But better be safe than sorry.
  151. */
  152. clear_object_flags(both_flags);
  153. is_repository_shallow(); /* make sure shallows are read */
  154. init_revisions(&revs, NULL);
  155. save_commit_buffer = 0;
  156. setup_revisions(ac, av, &revs, NULL);
  157. if (prepare_revision_walk(&revs))
  158. die("revision walk setup failed");
  159. traverse_commit_list(&revs, show_commit, NULL, &not_shallow_list);
  160. /* Mark all reachable commits as NOT_SHALLOW */
  161. for (p = not_shallow_list; p; p = p->next)
  162. p->item->object.flags |= not_shallow_flag;
  163. /*
  164. * mark border commits SHALLOW + NOT_SHALLOW.
  165. * We cannot clear NOT_SHALLOW right now. Imagine border
  166. * commit A is processed first, then commit B, whose parent is
  167. * A, later. If NOT_SHALLOW on A is cleared at step 1, B
  168. * itself is considered border at step 2, which is incorrect.
  169. */
  170. for (p = not_shallow_list; p; p = p->next) {
  171. struct commit *c = p->item;
  172. struct commit_list *parent;
  173. if (parse_commit(c))
  174. die("unable to parse commit %s",
  175. oid_to_hex(&c->object.oid));
  176. for (parent = c->parents; parent; parent = parent->next)
  177. if (!(parent->item->object.flags & not_shallow_flag)) {
  178. c->object.flags |= shallow_flag;
  179. commit_list_insert(c, &result);
  180. break;
  181. }
  182. }
  183. free_commit_list(not_shallow_list);
  184. /*
  185. * Now we can clean up NOT_SHALLOW on border commits. Having
  186. * both flags set can confuse the caller.
  187. */
  188. for (p = result; p; p = p->next) {
  189. struct object *o = &p->item->object;
  190. if ((o->flags & both_flags) == both_flags)
  191. o->flags &= ~not_shallow_flag;
  192. }
  193. return result;
  194. }
  195. static void check_shallow_file_for_update(void)
  196. {
  197. if (is_shallow == -1)
  198. BUG("shallow must be initialized by now");
  199. if (!stat_validity_check(&shallow_stat, git_path_shallow()))
  200. die("shallow file has changed since we read it");
  201. }
  202. #define SEEN_ONLY 1
  203. #define VERBOSE 2
  204. struct write_shallow_data {
  205. struct strbuf *out;
  206. int use_pack_protocol;
  207. int count;
  208. unsigned flags;
  209. };
  210. static int write_one_shallow(const struct commit_graft *graft, void *cb_data)
  211. {
  212. struct write_shallow_data *data = cb_data;
  213. const char *hex = oid_to_hex(&graft->oid);
  214. if (graft->nr_parent != -1)
  215. return 0;
  216. if (data->flags & SEEN_ONLY) {
  217. struct commit *c = lookup_commit(&graft->oid);
  218. if (!c || !(c->object.flags & SEEN)) {
  219. if (data->flags & VERBOSE)
  220. printf("Removing %s from .git/shallow\n",
  221. oid_to_hex(&c->object.oid));
  222. return 0;
  223. }
  224. }
  225. data->count++;
  226. if (data->use_pack_protocol)
  227. packet_buf_write(data->out, "shallow %s", hex);
  228. else {
  229. strbuf_addstr(data->out, hex);
  230. strbuf_addch(data->out, '\n');
  231. }
  232. return 0;
  233. }
  234. static int write_shallow_commits_1(struct strbuf *out, int use_pack_protocol,
  235. const struct oid_array *extra,
  236. unsigned flags)
  237. {
  238. struct write_shallow_data data;
  239. int i;
  240. data.out = out;
  241. data.use_pack_protocol = use_pack_protocol;
  242. data.count = 0;
  243. data.flags = flags;
  244. for_each_commit_graft(write_one_shallow, &data);
  245. if (!extra)
  246. return data.count;
  247. for (i = 0; i < extra->nr; i++) {
  248. strbuf_addstr(out, oid_to_hex(extra->oid + i));
  249. strbuf_addch(out, '\n');
  250. data.count++;
  251. }
  252. return data.count;
  253. }
  254. int write_shallow_commits(struct strbuf *out, int use_pack_protocol,
  255. const struct oid_array *extra)
  256. {
  257. return write_shallow_commits_1(out, use_pack_protocol, extra, 0);
  258. }
  259. const char *setup_temporary_shallow(const struct oid_array *extra)
  260. {
  261. struct tempfile *temp;
  262. struct strbuf sb = STRBUF_INIT;
  263. if (write_shallow_commits(&sb, 0, extra)) {
  264. temp = xmks_tempfile(git_path("shallow_XXXXXX"));
  265. if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
  266. close_tempfile_gently(temp) < 0)
  267. die_errno("failed to write to %s",
  268. get_tempfile_path(temp));
  269. strbuf_release(&sb);
  270. return get_tempfile_path(temp);
  271. }
  272. /*
  273. * is_repository_shallow() sees empty string as "no shallow
  274. * file".
  275. */
  276. return "";
  277. }
  278. void setup_alternate_shallow(struct lock_file *shallow_lock,
  279. const char **alternate_shallow_file,
  280. const struct oid_array *extra)
  281. {
  282. struct strbuf sb = STRBUF_INIT;
  283. int fd;
  284. fd = hold_lock_file_for_update(shallow_lock, git_path_shallow(),
  285. LOCK_DIE_ON_ERROR);
  286. check_shallow_file_for_update();
  287. if (write_shallow_commits(&sb, 0, extra)) {
  288. if (write_in_full(fd, sb.buf, sb.len) < 0)
  289. die_errno("failed to write to %s",
  290. get_lock_file_path(shallow_lock));
  291. *alternate_shallow_file = get_lock_file_path(shallow_lock);
  292. } else
  293. /*
  294. * is_repository_shallow() sees empty string as "no
  295. * shallow file".
  296. */
  297. *alternate_shallow_file = "";
  298. strbuf_release(&sb);
  299. }
  300. static int advertise_shallow_grafts_cb(const struct commit_graft *graft, void *cb)
  301. {
  302. int fd = *(int *)cb;
  303. if (graft->nr_parent == -1)
  304. packet_write_fmt(fd, "shallow %s\n", oid_to_hex(&graft->oid));
  305. return 0;
  306. }
  307. void advertise_shallow_grafts(int fd)
  308. {
  309. if (!is_repository_shallow())
  310. return;
  311. for_each_commit_graft(advertise_shallow_grafts_cb, &fd);
  312. }
  313. /*
  314. * mark_reachable_objects() should have been run prior to this and all
  315. * reachable commits marked as "SEEN".
  316. */
  317. void prune_shallow(int show_only)
  318. {
  319. static struct lock_file shallow_lock;
  320. struct strbuf sb = STRBUF_INIT;
  321. int fd;
  322. if (show_only) {
  323. write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY | VERBOSE);
  324. strbuf_release(&sb);
  325. return;
  326. }
  327. fd = hold_lock_file_for_update(&shallow_lock, git_path_shallow(),
  328. LOCK_DIE_ON_ERROR);
  329. check_shallow_file_for_update();
  330. if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
  331. if (write_in_full(fd, sb.buf, sb.len) < 0)
  332. die_errno("failed to write to %s",
  333. get_lock_file_path(&shallow_lock));
  334. commit_lock_file(&shallow_lock);
  335. } else {
  336. unlink(git_path_shallow());
  337. rollback_lock_file(&shallow_lock);
  338. }
  339. strbuf_release(&sb);
  340. }
  341. struct trace_key trace_shallow = TRACE_KEY_INIT(SHALLOW);
  342. /*
  343. * Step 1, split sender shallow commits into "ours" and "theirs"
  344. * Step 2, clean "ours" based on .git/shallow
  345. */
  346. void prepare_shallow_info(struct shallow_info *info, struct oid_array *sa)
  347. {
  348. int i;
  349. trace_printf_key(&trace_shallow, "shallow: prepare_shallow_info\n");
  350. memset(info, 0, sizeof(*info));
  351. info->shallow = sa;
  352. if (!sa)
  353. return;
  354. ALLOC_ARRAY(info->ours, sa->nr);
  355. ALLOC_ARRAY(info->theirs, sa->nr);
  356. for (i = 0; i < sa->nr; i++) {
  357. if (has_object_file(sa->oid + i)) {
  358. struct commit_graft *graft;
  359. graft = lookup_commit_graft(&sa->oid[i]);
  360. if (graft && graft->nr_parent < 0)
  361. continue;
  362. info->ours[info->nr_ours++] = i;
  363. } else
  364. info->theirs[info->nr_theirs++] = i;
  365. }
  366. }
  367. void clear_shallow_info(struct shallow_info *info)
  368. {
  369. free(info->ours);
  370. free(info->theirs);
  371. }
  372. /* Step 4, remove non-existent ones in "theirs" after getting the pack */
  373. void remove_nonexistent_theirs_shallow(struct shallow_info *info)
  374. {
  375. struct object_id *oid = info->shallow->oid;
  376. int i, dst;
  377. trace_printf_key(&trace_shallow, "shallow: remove_nonexistent_theirs_shallow\n");
  378. for (i = dst = 0; i < info->nr_theirs; i++) {
  379. if (i != dst)
  380. info->theirs[dst] = info->theirs[i];
  381. if (has_object_file(oid + info->theirs[i]))
  382. dst++;
  383. }
  384. info->nr_theirs = dst;
  385. }
  386. define_commit_slab(ref_bitmap, uint32_t *);
  387. #define POOL_SIZE (512 * 1024)
  388. struct paint_info {
  389. struct ref_bitmap ref_bitmap;
  390. unsigned nr_bits;
  391. char **pools;
  392. char *free, *end;
  393. unsigned pool_count;
  394. };
  395. static uint32_t *paint_alloc(struct paint_info *info)
  396. {
  397. unsigned nr = DIV_ROUND_UP(info->nr_bits, 32);
  398. unsigned size = nr * sizeof(uint32_t);
  399. void *p;
  400. if (!info->pool_count || size > info->end - info->free) {
  401. if (size > POOL_SIZE)
  402. BUG("pool size too small for %d in paint_alloc()",
  403. size);
  404. info->pool_count++;
  405. REALLOC_ARRAY(info->pools, info->pool_count);
  406. info->free = xmalloc(POOL_SIZE);
  407. info->pools[info->pool_count - 1] = info->free;
  408. info->end = info->free + POOL_SIZE;
  409. }
  410. p = info->free;
  411. info->free += size;
  412. return p;
  413. }
  414. /*
  415. * Given a commit SHA-1, walk down to parents until either SEEN,
  416. * UNINTERESTING or BOTTOM is hit. Set the id-th bit in ref_bitmap for
  417. * all walked commits.
  418. */
  419. static void paint_down(struct paint_info *info, const struct object_id *oid,
  420. unsigned int id)
  421. {
  422. unsigned int i, nr;
  423. struct commit_list *head = NULL;
  424. int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
  425. size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
  426. struct commit *c = lookup_commit_reference_gently(oid, 1);
  427. uint32_t *tmp; /* to be freed before return */
  428. uint32_t *bitmap;
  429. if (!c)
  430. return;
  431. tmp = xmalloc(bitmap_size);
  432. bitmap = paint_alloc(info);
  433. memset(bitmap, 0, bitmap_size);
  434. bitmap[id / 32] |= (1U << (id % 32));
  435. commit_list_insert(c, &head);
  436. while (head) {
  437. struct commit_list *p;
  438. struct commit *c = pop_commit(&head);
  439. uint32_t **refs = ref_bitmap_at(&info->ref_bitmap, c);
  440. /* XXX check "UNINTERESTING" from pack bitmaps if available */
  441. if (c->object.flags & (SEEN | UNINTERESTING))
  442. continue;
  443. else
  444. c->object.flags |= SEEN;
  445. if (*refs == NULL)
  446. *refs = bitmap;
  447. else {
  448. memcpy(tmp, *refs, bitmap_size);
  449. for (i = 0; i < bitmap_nr; i++)
  450. tmp[i] |= bitmap[i];
  451. if (memcmp(tmp, *refs, bitmap_size)) {
  452. *refs = paint_alloc(info);
  453. memcpy(*refs, tmp, bitmap_size);
  454. }
  455. }
  456. if (c->object.flags & BOTTOM)
  457. continue;
  458. if (parse_commit(c))
  459. die("unable to parse commit %s",
  460. oid_to_hex(&c->object.oid));
  461. for (p = c->parents; p; p = p->next) {
  462. if (p->item->object.flags & SEEN)
  463. continue;
  464. commit_list_insert(p->item, &head);
  465. }
  466. }
  467. nr = get_max_object_index();
  468. for (i = 0; i < nr; i++) {
  469. struct object *o = get_indexed_object(i);
  470. if (o && o->type == OBJ_COMMIT)
  471. o->flags &= ~SEEN;
  472. }
  473. free(tmp);
  474. }
  475. static int mark_uninteresting(const char *refname, const struct object_id *oid,
  476. int flags, void *cb_data)
  477. {
  478. struct commit *commit = lookup_commit_reference_gently(oid, 1);
  479. if (!commit)
  480. return 0;
  481. commit->object.flags |= UNINTERESTING;
  482. mark_parents_uninteresting(commit);
  483. return 0;
  484. }
  485. static void post_assign_shallow(struct shallow_info *info,
  486. struct ref_bitmap *ref_bitmap,
  487. int *ref_status);
  488. /*
  489. * Step 6(+7), associate shallow commits with new refs
  490. *
  491. * info->ref must be initialized before calling this function.
  492. *
  493. * If used is not NULL, it's an array of info->shallow->nr
  494. * bitmaps. The n-th bit set in the m-th bitmap if ref[n] needs the
  495. * m-th shallow commit from info->shallow.
  496. *
  497. * If used is NULL, "ours" and "theirs" are updated. And if ref_status
  498. * is not NULL it's an array of ref->nr ints. ref_status[i] is true if
  499. * the ref needs some shallow commits from either info->ours or
  500. * info->theirs.
  501. */
  502. void assign_shallow_commits_to_refs(struct shallow_info *info,
  503. uint32_t **used, int *ref_status)
  504. {
  505. struct object_id *oid = info->shallow->oid;
  506. struct oid_array *ref = info->ref;
  507. unsigned int i, nr;
  508. int *shallow, nr_shallow = 0;
  509. struct paint_info pi;
  510. trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
  511. ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
  512. for (i = 0; i < info->nr_ours; i++)
  513. shallow[nr_shallow++] = info->ours[i];
  514. for (i = 0; i < info->nr_theirs; i++)
  515. shallow[nr_shallow++] = info->theirs[i];
  516. /*
  517. * Prepare the commit graph to track what refs can reach what
  518. * (new) shallow commits.
  519. */
  520. nr = get_max_object_index();
  521. for (i = 0; i < nr; i++) {
  522. struct object *o = get_indexed_object(i);
  523. if (!o || o->type != OBJ_COMMIT)
  524. continue;
  525. o->flags &= ~(UNINTERESTING | BOTTOM | SEEN);
  526. }
  527. memset(&pi, 0, sizeof(pi));
  528. init_ref_bitmap(&pi.ref_bitmap);
  529. pi.nr_bits = ref->nr;
  530. /*
  531. * "--not --all" to cut short the traversal if new refs
  532. * connect to old refs. If not (e.g. force ref updates) it'll
  533. * have to go down to the current shallow commits.
  534. */
  535. head_ref(mark_uninteresting, NULL);
  536. for_each_ref(mark_uninteresting, NULL);
  537. /* Mark potential bottoms so we won't go out of bound */
  538. for (i = 0; i < nr_shallow; i++) {
  539. struct commit *c = lookup_commit(&oid[shallow[i]]);
  540. c->object.flags |= BOTTOM;
  541. }
  542. for (i = 0; i < ref->nr; i++)
  543. paint_down(&pi, ref->oid + i, i);
  544. if (used) {
  545. int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
  546. memset(used, 0, sizeof(*used) * info->shallow->nr);
  547. for (i = 0; i < nr_shallow; i++) {
  548. const struct commit *c = lookup_commit(&oid[shallow[i]]);
  549. uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
  550. if (*map)
  551. used[shallow[i]] = xmemdupz(*map, bitmap_size);
  552. }
  553. /*
  554. * unreachable shallow commits are not removed from
  555. * "ours" and "theirs". The user is supposed to run
  556. * step 7 on every ref separately and not trust "ours"
  557. * and "theirs" any more.
  558. */
  559. } else
  560. post_assign_shallow(info, &pi.ref_bitmap, ref_status);
  561. clear_ref_bitmap(&pi.ref_bitmap);
  562. for (i = 0; i < pi.pool_count; i++)
  563. free(pi.pools[i]);
  564. free(pi.pools);
  565. free(shallow);
  566. }
  567. struct commit_array {
  568. struct commit **commits;
  569. int nr, alloc;
  570. };
  571. static int add_ref(const char *refname, const struct object_id *oid,
  572. int flags, void *cb_data)
  573. {
  574. struct commit_array *ca = cb_data;
  575. ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
  576. ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
  577. if (ca->commits[ca->nr])
  578. ca->nr++;
  579. return 0;
  580. }
  581. static void update_refstatus(int *ref_status, int nr, uint32_t *bitmap)
  582. {
  583. unsigned int i;
  584. if (!ref_status)
  585. return;
  586. for (i = 0; i < nr; i++)
  587. if (bitmap[i / 32] & (1U << (i % 32)))
  588. ref_status[i]++;
  589. }
  590. /*
  591. * Step 7, reachability test on "ours" at commit level
  592. */
  593. static void post_assign_shallow(struct shallow_info *info,
  594. struct ref_bitmap *ref_bitmap,
  595. int *ref_status)
  596. {
  597. struct object_id *oid = info->shallow->oid;
  598. struct commit *c;
  599. uint32_t **bitmap;
  600. int dst, i, j;
  601. int bitmap_nr = DIV_ROUND_UP(info->ref->nr, 32);
  602. struct commit_array ca;
  603. trace_printf_key(&trace_shallow, "shallow: post_assign_shallow\n");
  604. if (ref_status)
  605. memset(ref_status, 0, sizeof(*ref_status) * info->ref->nr);
  606. /* Remove unreachable shallow commits from "theirs" */
  607. for (i = dst = 0; i < info->nr_theirs; i++) {
  608. if (i != dst)
  609. info->theirs[dst] = info->theirs[i];
  610. c = lookup_commit(&oid[info->theirs[i]]);
  611. bitmap = ref_bitmap_at(ref_bitmap, c);
  612. if (!*bitmap)
  613. continue;
  614. for (j = 0; j < bitmap_nr; j++)
  615. if (bitmap[0][j]) {
  616. update_refstatus(ref_status, info->ref->nr, *bitmap);
  617. dst++;
  618. break;
  619. }
  620. }
  621. info->nr_theirs = dst;
  622. memset(&ca, 0, sizeof(ca));
  623. head_ref(add_ref, &ca);
  624. for_each_ref(add_ref, &ca);
  625. /* Remove unreachable shallow commits from "ours" */
  626. for (i = dst = 0; i < info->nr_ours; i++) {
  627. if (i != dst)
  628. info->ours[dst] = info->ours[i];
  629. c = lookup_commit(&oid[info->ours[i]]);
  630. bitmap = ref_bitmap_at(ref_bitmap, c);
  631. if (!*bitmap)
  632. continue;
  633. for (j = 0; j < bitmap_nr; j++)
  634. if (bitmap[0][j] &&
  635. /* Step 7, reachability test at commit level */
  636. !in_merge_bases_many(c, ca.nr, ca.commits)) {
  637. update_refstatus(ref_status, info->ref->nr, *bitmap);
  638. dst++;
  639. break;
  640. }
  641. }
  642. info->nr_ours = dst;
  643. free(ca.commits);
  644. }
  645. /* (Delayed) step 7, reachability test at commit level */
  646. int delayed_reachability_test(struct shallow_info *si, int c)
  647. {
  648. if (si->need_reachability_test[c]) {
  649. struct commit *commit = lookup_commit(&si->shallow->oid[c]);
  650. if (!si->commits) {
  651. struct commit_array ca;
  652. memset(&ca, 0, sizeof(ca));
  653. head_ref(add_ref, &ca);
  654. for_each_ref(add_ref, &ca);
  655. si->commits = ca.commits;
  656. si->nr_commits = ca.nr;
  657. }
  658. si->reachable[c] = in_merge_bases_many(commit,
  659. si->nr_commits,
  660. si->commits);
  661. si->need_reachability_test[c] = 0;
  662. }
  663. return si->reachable[c];
  664. }