THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1823 lines
50KB

  1. #include "cache.h"
  2. #include "repository.h"
  3. #include "config.h"
  4. #include "lockfile.h"
  5. #include "refs.h"
  6. #include "pkt-line.h"
  7. #include "commit.h"
  8. #include "tag.h"
  9. #include "exec-cmd.h"
  10. #include "pack.h"
  11. #include "sideband.h"
  12. #include "fetch-pack.h"
  13. #include "remote.h"
  14. #include "run-command.h"
  15. #include "connect.h"
  16. #include "transport.h"
  17. #include "version.h"
  18. #include "sha1-array.h"
  19. #include "oidset.h"
  20. #include "packfile.h"
  21. #include "object-store.h"
  22. #include "connected.h"
  23. #include "fetch-negotiator.h"
  24. #include "fsck.h"
  25. static int transfer_unpack_limit = -1;
  26. static int fetch_unpack_limit = -1;
  27. static int unpack_limit = 100;
  28. static int prefer_ofs_delta = 1;
  29. static int no_done;
  30. static int deepen_since_ok;
  31. static int deepen_not_ok;
  32. static int fetch_fsck_objects = -1;
  33. static int transfer_fsck_objects = -1;
  34. static int agent_supported;
  35. static int server_supports_filtering;
  36. static struct lock_file shallow_lock;
  37. static const char *alternate_shallow_file;
  38. static struct strbuf fsck_msg_types = STRBUF_INIT;
  39. /* Remember to update object flag allocation in object.h */
  40. #define COMPLETE (1U << 0)
  41. #define ALTERNATE (1U << 1)
  42. /*
  43. * After sending this many "have"s if we do not get any new ACK , we
  44. * give up traversing our history.
  45. */
  46. #define MAX_IN_VAIN 256
  47. static int multi_ack, use_sideband;
  48. /* Allow specifying sha1 if it is a ref tip. */
  49. #define ALLOW_TIP_SHA1 01
  50. /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
  51. #define ALLOW_REACHABLE_SHA1 02
  52. static unsigned int allow_unadvertised_object_request;
  53. __attribute__((format (printf, 2, 3)))
  54. static inline void print_verbose(const struct fetch_pack_args *args,
  55. const char *fmt, ...)
  56. {
  57. va_list params;
  58. if (!args->verbose)
  59. return;
  60. va_start(params, fmt);
  61. vfprintf(stderr, fmt, params);
  62. va_end(params);
  63. fputc('\n', stderr);
  64. }
  65. struct alternate_object_cache {
  66. struct object **items;
  67. size_t nr, alloc;
  68. };
  69. static void cache_one_alternate(const struct object_id *oid,
  70. void *vcache)
  71. {
  72. struct alternate_object_cache *cache = vcache;
  73. struct object *obj = parse_object(the_repository, oid);
  74. if (!obj || (obj->flags & ALTERNATE))
  75. return;
  76. obj->flags |= ALTERNATE;
  77. ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
  78. cache->items[cache->nr++] = obj;
  79. }
  80. static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
  81. void (*cb)(struct fetch_negotiator *,
  82. struct object *))
  83. {
  84. static int initialized;
  85. static struct alternate_object_cache cache;
  86. size_t i;
  87. if (!initialized) {
  88. for_each_alternate_ref(cache_one_alternate, &cache);
  89. initialized = 1;
  90. }
  91. for (i = 0; i < cache.nr; i++)
  92. cb(negotiator, cache.items[i]);
  93. }
  94. static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
  95. const char *refname,
  96. const struct object_id *oid)
  97. {
  98. struct object *o = deref_tag(the_repository,
  99. parse_object(the_repository, oid),
  100. refname, 0);
  101. if (o && o->type == OBJ_COMMIT)
  102. negotiator->add_tip(negotiator, (struct commit *)o);
  103. return 0;
  104. }
  105. static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
  106. int flag, void *cb_data)
  107. {
  108. return rev_list_insert_ref(cb_data, refname, oid);
  109. }
  110. enum ack_type {
  111. NAK = 0,
  112. ACK,
  113. ACK_continue,
  114. ACK_common,
  115. ACK_ready
  116. };
  117. static void consume_shallow_list(struct fetch_pack_args *args,
  118. struct packet_reader *reader)
  119. {
  120. if (args->stateless_rpc && args->deepen) {
  121. /* If we sent a depth we will get back "duplicate"
  122. * shallow and unshallow commands every time there
  123. * is a block of have lines exchanged.
  124. */
  125. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  126. if (starts_with(reader->line, "shallow "))
  127. continue;
  128. if (starts_with(reader->line, "unshallow "))
  129. continue;
  130. die(_("git fetch-pack: expected shallow list"));
  131. }
  132. if (reader->status != PACKET_READ_FLUSH)
  133. die(_("git fetch-pack: expected a flush packet after shallow list"));
  134. }
  135. }
  136. static enum ack_type get_ack(struct packet_reader *reader,
  137. struct object_id *result_oid)
  138. {
  139. int len;
  140. const char *arg;
  141. if (packet_reader_read(reader) != PACKET_READ_NORMAL)
  142. die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
  143. len = reader->pktlen;
  144. if (!strcmp(reader->line, "NAK"))
  145. return NAK;
  146. if (skip_prefix(reader->line, "ACK ", &arg)) {
  147. const char *p;
  148. if (!parse_oid_hex(arg, result_oid, &p)) {
  149. len -= p - reader->line;
  150. if (len < 1)
  151. return ACK;
  152. if (strstr(p, "continue"))
  153. return ACK_continue;
  154. if (strstr(p, "common"))
  155. return ACK_common;
  156. if (strstr(p, "ready"))
  157. return ACK_ready;
  158. return ACK;
  159. }
  160. }
  161. die(_("git fetch-pack: expected ACK/NAK, got '%s'"), reader->line);
  162. }
  163. static void send_request(struct fetch_pack_args *args,
  164. int fd, struct strbuf *buf)
  165. {
  166. if (args->stateless_rpc) {
  167. send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
  168. packet_flush(fd);
  169. } else {
  170. if (write_in_full(fd, buf->buf, buf->len) < 0)
  171. die_errno(_("unable to write to remote"));
  172. }
  173. }
  174. static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
  175. struct object *obj)
  176. {
  177. rev_list_insert_ref(negotiator, NULL, &obj->oid);
  178. }
  179. #define INITIAL_FLUSH 16
  180. #define PIPESAFE_FLUSH 32
  181. #define LARGE_FLUSH 16384
  182. static int next_flush(int stateless_rpc, int count)
  183. {
  184. if (stateless_rpc) {
  185. if (count < LARGE_FLUSH)
  186. count <<= 1;
  187. else
  188. count = count * 11 / 10;
  189. } else {
  190. if (count < PIPESAFE_FLUSH)
  191. count <<= 1;
  192. else
  193. count += PIPESAFE_FLUSH;
  194. }
  195. return count;
  196. }
  197. static void mark_tips(struct fetch_negotiator *negotiator,
  198. const struct oid_array *negotiation_tips)
  199. {
  200. int i;
  201. if (!negotiation_tips) {
  202. for_each_ref(rev_list_insert_ref_oid, negotiator);
  203. return;
  204. }
  205. for (i = 0; i < negotiation_tips->nr; i++)
  206. rev_list_insert_ref(negotiator, NULL,
  207. &negotiation_tips->oid[i]);
  208. return;
  209. }
  210. static int find_common(struct fetch_negotiator *negotiator,
  211. struct fetch_pack_args *args,
  212. int fd[2], struct object_id *result_oid,
  213. struct ref *refs)
  214. {
  215. int fetching;
  216. int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
  217. const struct object_id *oid;
  218. unsigned in_vain = 0;
  219. int got_continue = 0;
  220. int got_ready = 0;
  221. struct strbuf req_buf = STRBUF_INIT;
  222. size_t state_len = 0;
  223. struct packet_reader reader;
  224. if (args->stateless_rpc && multi_ack == 1)
  225. die(_("--stateless-rpc requires multi_ack_detailed"));
  226. packet_reader_init(&reader, fd[0], NULL, 0,
  227. PACKET_READ_CHOMP_NEWLINE |
  228. PACKET_READ_DIE_ON_ERR_PACKET);
  229. if (!args->no_dependents) {
  230. mark_tips(negotiator, args->negotiation_tips);
  231. for_each_cached_alternate(negotiator, insert_one_alternate_object);
  232. }
  233. fetching = 0;
  234. for ( ; refs ; refs = refs->next) {
  235. struct object_id *remote = &refs->old_oid;
  236. const char *remote_hex;
  237. struct object *o;
  238. /*
  239. * If that object is complete (i.e. it is an ancestor of a
  240. * local ref), we tell them we have it but do not have to
  241. * tell them about its ancestors, which they already know
  242. * about.
  243. *
  244. * We use lookup_object here because we are only
  245. * interested in the case we *know* the object is
  246. * reachable and we have already scanned it.
  247. *
  248. * Do this only if args->no_dependents is false (if it is true,
  249. * we cannot trust the object flags).
  250. */
  251. if (!args->no_dependents &&
  252. ((o = lookup_object(the_repository, remote)) != NULL) &&
  253. (o->flags & COMPLETE)) {
  254. continue;
  255. }
  256. remote_hex = oid_to_hex(remote);
  257. if (!fetching) {
  258. struct strbuf c = STRBUF_INIT;
  259. if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
  260. if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
  261. if (no_done) strbuf_addstr(&c, " no-done");
  262. if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
  263. if (use_sideband == 1) strbuf_addstr(&c, " side-band");
  264. if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
  265. if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
  266. if (args->no_progress) strbuf_addstr(&c, " no-progress");
  267. if (args->include_tag) strbuf_addstr(&c, " include-tag");
  268. if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
  269. if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
  270. if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
  271. if (agent_supported) strbuf_addf(&c, " agent=%s",
  272. git_user_agent_sanitized());
  273. if (args->filter_options.choice)
  274. strbuf_addstr(&c, " filter");
  275. packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
  276. strbuf_release(&c);
  277. } else
  278. packet_buf_write(&req_buf, "want %s\n", remote_hex);
  279. fetching++;
  280. }
  281. if (!fetching) {
  282. strbuf_release(&req_buf);
  283. packet_flush(fd[1]);
  284. return 1;
  285. }
  286. if (is_repository_shallow(the_repository))
  287. write_shallow_commits(&req_buf, 1, NULL);
  288. if (args->depth > 0)
  289. packet_buf_write(&req_buf, "deepen %d", args->depth);
  290. if (args->deepen_since) {
  291. timestamp_t max_age = approxidate(args->deepen_since);
  292. packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
  293. }
  294. if (args->deepen_not) {
  295. int i;
  296. for (i = 0; i < args->deepen_not->nr; i++) {
  297. struct string_list_item *s = args->deepen_not->items + i;
  298. packet_buf_write(&req_buf, "deepen-not %s", s->string);
  299. }
  300. }
  301. if (server_supports_filtering && args->filter_options.choice) {
  302. const char *spec =
  303. expand_list_objects_filter_spec(&args->filter_options);
  304. packet_buf_write(&req_buf, "filter %s", spec);
  305. }
  306. packet_buf_flush(&req_buf);
  307. state_len = req_buf.len;
  308. if (args->deepen) {
  309. const char *arg;
  310. struct object_id oid;
  311. send_request(args, fd[1], &req_buf);
  312. while (packet_reader_read(&reader) == PACKET_READ_NORMAL) {
  313. if (skip_prefix(reader.line, "shallow ", &arg)) {
  314. if (get_oid_hex(arg, &oid))
  315. die(_("invalid shallow line: %s"), reader.line);
  316. register_shallow(the_repository, &oid);
  317. continue;
  318. }
  319. if (skip_prefix(reader.line, "unshallow ", &arg)) {
  320. if (get_oid_hex(arg, &oid))
  321. die(_("invalid unshallow line: %s"), reader.line);
  322. if (!lookup_object(the_repository, &oid))
  323. die(_("object not found: %s"), reader.line);
  324. /* make sure that it is parsed as shallow */
  325. if (!parse_object(the_repository, &oid))
  326. die(_("error in object: %s"), reader.line);
  327. if (unregister_shallow(&oid))
  328. die(_("no shallow found: %s"), reader.line);
  329. continue;
  330. }
  331. die(_("expected shallow/unshallow, got %s"), reader.line);
  332. }
  333. } else if (!args->stateless_rpc)
  334. send_request(args, fd[1], &req_buf);
  335. if (!args->stateless_rpc) {
  336. /* If we aren't using the stateless-rpc interface
  337. * we don't need to retain the headers.
  338. */
  339. strbuf_setlen(&req_buf, 0);
  340. state_len = 0;
  341. }
  342. trace2_region_enter("fetch-pack", "negotiation_v0_v1", the_repository);
  343. flushes = 0;
  344. retval = -1;
  345. if (args->no_dependents)
  346. goto done;
  347. while ((oid = negotiator->next(negotiator))) {
  348. packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
  349. print_verbose(args, "have %s", oid_to_hex(oid));
  350. in_vain++;
  351. if (flush_at <= ++count) {
  352. int ack;
  353. packet_buf_flush(&req_buf);
  354. send_request(args, fd[1], &req_buf);
  355. strbuf_setlen(&req_buf, state_len);
  356. flushes++;
  357. flush_at = next_flush(args->stateless_rpc, count);
  358. /*
  359. * We keep one window "ahead" of the other side, and
  360. * will wait for an ACK only on the next one
  361. */
  362. if (!args->stateless_rpc && count == INITIAL_FLUSH)
  363. continue;
  364. consume_shallow_list(args, &reader);
  365. do {
  366. ack = get_ack(&reader, result_oid);
  367. if (ack)
  368. print_verbose(args, _("got %s %d %s"), "ack",
  369. ack, oid_to_hex(result_oid));
  370. switch (ack) {
  371. case ACK:
  372. flushes = 0;
  373. multi_ack = 0;
  374. retval = 0;
  375. goto done;
  376. case ACK_common:
  377. case ACK_ready:
  378. case ACK_continue: {
  379. struct commit *commit =
  380. lookup_commit(the_repository,
  381. result_oid);
  382. int was_common;
  383. if (!commit)
  384. die(_("invalid commit %s"), oid_to_hex(result_oid));
  385. was_common = negotiator->ack(negotiator, commit);
  386. if (args->stateless_rpc
  387. && ack == ACK_common
  388. && !was_common) {
  389. /* We need to replay the have for this object
  390. * on the next RPC request so the peer knows
  391. * it is in common with us.
  392. */
  393. const char *hex = oid_to_hex(result_oid);
  394. packet_buf_write(&req_buf, "have %s\n", hex);
  395. state_len = req_buf.len;
  396. /*
  397. * Reset in_vain because an ack
  398. * for this commit has not been
  399. * seen.
  400. */
  401. in_vain = 0;
  402. } else if (!args->stateless_rpc
  403. || ack != ACK_common)
  404. in_vain = 0;
  405. retval = 0;
  406. got_continue = 1;
  407. if (ack == ACK_ready)
  408. got_ready = 1;
  409. break;
  410. }
  411. }
  412. } while (ack);
  413. flushes--;
  414. if (got_continue && MAX_IN_VAIN < in_vain) {
  415. print_verbose(args, _("giving up"));
  416. break; /* give up */
  417. }
  418. if (got_ready)
  419. break;
  420. }
  421. }
  422. done:
  423. trace2_region_leave("fetch-pack", "negotiation_v0_v1", the_repository);
  424. if (!got_ready || !no_done) {
  425. packet_buf_write(&req_buf, "done\n");
  426. send_request(args, fd[1], &req_buf);
  427. }
  428. print_verbose(args, _("done"));
  429. if (retval != 0) {
  430. multi_ack = 0;
  431. flushes++;
  432. }
  433. strbuf_release(&req_buf);
  434. if (!got_ready || !no_done)
  435. consume_shallow_list(args, &reader);
  436. while (flushes || multi_ack) {
  437. int ack = get_ack(&reader, result_oid);
  438. if (ack) {
  439. print_verbose(args, _("got %s (%d) %s"), "ack",
  440. ack, oid_to_hex(result_oid));
  441. if (ack == ACK)
  442. return 0;
  443. multi_ack = 1;
  444. continue;
  445. }
  446. flushes--;
  447. }
  448. /* it is no error to fetch into a completely empty repo */
  449. return count ? retval : 0;
  450. }
  451. static struct commit_list *complete;
  452. static int mark_complete(const struct object_id *oid)
  453. {
  454. struct object *o = parse_object(the_repository, oid);
  455. while (o && o->type == OBJ_TAG) {
  456. struct tag *t = (struct tag *) o;
  457. if (!t->tagged)
  458. break; /* broken repository */
  459. o->flags |= COMPLETE;
  460. o = parse_object(the_repository, &t->tagged->oid);
  461. }
  462. if (o && o->type == OBJ_COMMIT) {
  463. struct commit *commit = (struct commit *)o;
  464. if (!(commit->object.flags & COMPLETE)) {
  465. commit->object.flags |= COMPLETE;
  466. commit_list_insert(commit, &complete);
  467. }
  468. }
  469. return 0;
  470. }
  471. static int mark_complete_oid(const char *refname, const struct object_id *oid,
  472. int flag, void *cb_data)
  473. {
  474. return mark_complete(oid);
  475. }
  476. static void mark_recent_complete_commits(struct fetch_pack_args *args,
  477. timestamp_t cutoff)
  478. {
  479. while (complete && cutoff <= complete->item->date) {
  480. print_verbose(args, _("Marking %s as complete"),
  481. oid_to_hex(&complete->item->object.oid));
  482. pop_most_recent_commit(&complete, COMPLETE);
  483. }
  484. }
  485. static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
  486. {
  487. for (; refs; refs = refs->next)
  488. oidset_insert(oids, &refs->old_oid);
  489. }
  490. static int is_unmatched_ref(const struct ref *ref)
  491. {
  492. struct object_id oid;
  493. const char *p;
  494. return ref->match_status == REF_NOT_MATCHED &&
  495. !parse_oid_hex(ref->name, &oid, &p) &&
  496. *p == '\0' &&
  497. oideq(&oid, &ref->old_oid);
  498. }
  499. static void filter_refs(struct fetch_pack_args *args,
  500. struct ref **refs,
  501. struct ref **sought, int nr_sought)
  502. {
  503. struct ref *newlist = NULL;
  504. struct ref **newtail = &newlist;
  505. struct ref *unmatched = NULL;
  506. struct ref *ref, *next;
  507. struct oidset tip_oids = OIDSET_INIT;
  508. int i;
  509. int strict = !(allow_unadvertised_object_request &
  510. (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
  511. i = 0;
  512. for (ref = *refs; ref; ref = next) {
  513. int keep = 0;
  514. next = ref->next;
  515. if (starts_with(ref->name, "refs/") &&
  516. check_refname_format(ref->name, 0)) {
  517. /*
  518. * trash or a peeled value; do not even add it to
  519. * unmatched list
  520. */
  521. free_one_ref(ref);
  522. continue;
  523. } else {
  524. while (i < nr_sought) {
  525. int cmp = strcmp(ref->name, sought[i]->name);
  526. if (cmp < 0)
  527. break; /* definitely do not have it */
  528. else if (cmp == 0) {
  529. keep = 1; /* definitely have it */
  530. sought[i]->match_status = REF_MATCHED;
  531. }
  532. i++;
  533. }
  534. if (!keep && args->fetch_all &&
  535. (!args->deepen || !starts_with(ref->name, "refs/tags/")))
  536. keep = 1;
  537. }
  538. if (keep) {
  539. *newtail = ref;
  540. ref->next = NULL;
  541. newtail = &ref->next;
  542. } else {
  543. ref->next = unmatched;
  544. unmatched = ref;
  545. }
  546. }
  547. if (strict) {
  548. for (i = 0; i < nr_sought; i++) {
  549. ref = sought[i];
  550. if (!is_unmatched_ref(ref))
  551. continue;
  552. add_refs_to_oidset(&tip_oids, unmatched);
  553. add_refs_to_oidset(&tip_oids, newlist);
  554. break;
  555. }
  556. }
  557. /* Append unmatched requests to the list */
  558. for (i = 0; i < nr_sought; i++) {
  559. ref = sought[i];
  560. if (!is_unmatched_ref(ref))
  561. continue;
  562. if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) {
  563. ref->match_status = REF_MATCHED;
  564. *newtail = copy_ref(ref);
  565. newtail = &(*newtail)->next;
  566. } else {
  567. ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
  568. }
  569. }
  570. oidset_clear(&tip_oids);
  571. free_refs(unmatched);
  572. *refs = newlist;
  573. }
  574. static void mark_alternate_complete(struct fetch_negotiator *unused,
  575. struct object *obj)
  576. {
  577. mark_complete(&obj->oid);
  578. }
  579. struct loose_object_iter {
  580. struct oidset *loose_object_set;
  581. struct ref *refs;
  582. };
  583. /*
  584. * Mark recent commits available locally and reachable from a local ref as
  585. * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
  586. * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
  587. * thus do not need COMMON_REF marks).
  588. *
  589. * The cutoff time for recency is determined by this heuristic: it is the
  590. * earliest commit time of the objects in refs that are commits and that we know
  591. * the commit time of.
  592. */
  593. static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
  594. struct fetch_pack_args *args,
  595. struct ref **refs)
  596. {
  597. struct ref *ref;
  598. int old_save_commit_buffer = save_commit_buffer;
  599. timestamp_t cutoff = 0;
  600. save_commit_buffer = 0;
  601. trace2_region_enter("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
  602. for (ref = *refs; ref; ref = ref->next) {
  603. struct object *o;
  604. if (!has_object_file_with_flags(&ref->old_oid,
  605. OBJECT_INFO_QUICK |
  606. OBJECT_INFO_SKIP_FETCH_OBJECT))
  607. continue;
  608. o = parse_object(the_repository, &ref->old_oid);
  609. if (!o)
  610. continue;
  611. /*
  612. * We already have it -- which may mean that we were
  613. * in sync with the other side at some time after
  614. * that (it is OK if we guess wrong here).
  615. */
  616. if (o->type == OBJ_COMMIT) {
  617. struct commit *commit = (struct commit *)o;
  618. if (!cutoff || cutoff < commit->date)
  619. cutoff = commit->date;
  620. }
  621. }
  622. trace2_region_leave("fetch-pack", "parse_remote_refs_and_find_cutoff", NULL);
  623. /*
  624. * This block marks all local refs as COMPLETE, and then recursively marks all
  625. * parents of those refs as COMPLETE.
  626. */
  627. trace2_region_enter("fetch-pack", "mark_complete_local_refs", NULL);
  628. if (!args->deepen) {
  629. for_each_ref(mark_complete_oid, NULL);
  630. for_each_cached_alternate(NULL, mark_alternate_complete);
  631. commit_list_sort_by_date(&complete);
  632. if (cutoff)
  633. mark_recent_complete_commits(args, cutoff);
  634. }
  635. trace2_region_leave("fetch-pack", "mark_complete_local_refs", NULL);
  636. /*
  637. * Mark all complete remote refs as common refs.
  638. * Don't mark them common yet; the server has to be told so first.
  639. */
  640. trace2_region_enter("fetch-pack", "mark_common_remote_refs", NULL);
  641. for (ref = *refs; ref; ref = ref->next) {
  642. struct object *o = deref_tag(the_repository,
  643. lookup_object(the_repository,
  644. &ref->old_oid),
  645. NULL, 0);
  646. if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
  647. continue;
  648. negotiator->known_common(negotiator,
  649. (struct commit *)o);
  650. }
  651. trace2_region_leave("fetch-pack", "mark_common_remote_refs", NULL);
  652. save_commit_buffer = old_save_commit_buffer;
  653. }
  654. /*
  655. * Returns 1 if every object pointed to by the given remote refs is available
  656. * locally and reachable from a local ref, and 0 otherwise.
  657. */
  658. static int everything_local(struct fetch_pack_args *args,
  659. struct ref **refs)
  660. {
  661. struct ref *ref;
  662. int retval;
  663. for (retval = 1, ref = *refs; ref ; ref = ref->next) {
  664. const struct object_id *remote = &ref->old_oid;
  665. struct object *o;
  666. o = lookup_object(the_repository, remote);
  667. if (!o || !(o->flags & COMPLETE)) {
  668. retval = 0;
  669. print_verbose(args, "want %s (%s)", oid_to_hex(remote),
  670. ref->name);
  671. continue;
  672. }
  673. print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
  674. ref->name);
  675. }
  676. return retval;
  677. }
  678. static int sideband_demux(int in, int out, void *data)
  679. {
  680. int *xd = data;
  681. int ret;
  682. ret = recv_sideband("fetch-pack", xd[0], out);
  683. close(out);
  684. return ret;
  685. }
  686. static void write_promisor_file(const char *keep_name,
  687. struct ref **sought, int nr_sought)
  688. {
  689. struct strbuf promisor_name = STRBUF_INIT;
  690. int suffix_stripped;
  691. FILE *output;
  692. int i;
  693. strbuf_addstr(&promisor_name, keep_name);
  694. suffix_stripped = strbuf_strip_suffix(&promisor_name, ".keep");
  695. if (!suffix_stripped)
  696. BUG("name of pack lockfile should end with .keep (was '%s')",
  697. keep_name);
  698. strbuf_addstr(&promisor_name, ".promisor");
  699. output = xfopen(promisor_name.buf, "w");
  700. for (i = 0; i < nr_sought; i++)
  701. fprintf(output, "%s %s\n", oid_to_hex(&sought[i]->old_oid),
  702. sought[i]->name);
  703. fclose(output);
  704. strbuf_release(&promisor_name);
  705. }
  706. static int get_pack(struct fetch_pack_args *args,
  707. int xd[2], char **pack_lockfile,
  708. struct ref **sought, int nr_sought)
  709. {
  710. struct async demux;
  711. int do_keep = args->keep_pack;
  712. const char *cmd_name;
  713. struct pack_header header;
  714. int pass_header = 0;
  715. struct child_process cmd = CHILD_PROCESS_INIT;
  716. int ret;
  717. memset(&demux, 0, sizeof(demux));
  718. if (use_sideband) {
  719. /* xd[] is talking with upload-pack; subprocess reads from
  720. * xd[0], spits out band#2 to stderr, and feeds us band#1
  721. * through demux->out.
  722. */
  723. demux.proc = sideband_demux;
  724. demux.data = xd;
  725. demux.out = -1;
  726. demux.isolate_sigpipe = 1;
  727. if (start_async(&demux))
  728. die(_("fetch-pack: unable to fork off sideband demultiplexer"));
  729. }
  730. else
  731. demux.out = xd[0];
  732. if (!args->keep_pack && unpack_limit) {
  733. if (read_pack_header(demux.out, &header))
  734. die(_("protocol error: bad pack header"));
  735. pass_header = 1;
  736. if (ntohl(header.hdr_entries) < unpack_limit)
  737. do_keep = 0;
  738. else
  739. do_keep = 1;
  740. }
  741. if (alternate_shallow_file) {
  742. argv_array_push(&cmd.args, "--shallow-file");
  743. argv_array_push(&cmd.args, alternate_shallow_file);
  744. }
  745. if (do_keep || args->from_promisor) {
  746. if (pack_lockfile)
  747. cmd.out = -1;
  748. cmd_name = "index-pack";
  749. argv_array_push(&cmd.args, cmd_name);
  750. argv_array_push(&cmd.args, "--stdin");
  751. if (!args->quiet && !args->no_progress)
  752. argv_array_push(&cmd.args, "-v");
  753. if (args->use_thin_pack)
  754. argv_array_push(&cmd.args, "--fix-thin");
  755. if (do_keep && (args->lock_pack || unpack_limit)) {
  756. char hostname[HOST_NAME_MAX + 1];
  757. if (xgethostname(hostname, sizeof(hostname)))
  758. xsnprintf(hostname, sizeof(hostname), "localhost");
  759. argv_array_pushf(&cmd.args,
  760. "--keep=fetch-pack %"PRIuMAX " on %s",
  761. (uintmax_t)getpid(), hostname);
  762. }
  763. if (args->check_self_contained_and_connected)
  764. argv_array_push(&cmd.args, "--check-self-contained-and-connected");
  765. /*
  766. * If we're obtaining the filename of a lockfile, we'll use
  767. * that filename to write a .promisor file with more
  768. * information below. If not, we need index-pack to do it for
  769. * us.
  770. */
  771. if (!(do_keep && pack_lockfile) && args->from_promisor)
  772. argv_array_push(&cmd.args, "--promisor");
  773. }
  774. else {
  775. cmd_name = "unpack-objects";
  776. argv_array_push(&cmd.args, cmd_name);
  777. if (args->quiet || args->no_progress)
  778. argv_array_push(&cmd.args, "-q");
  779. args->check_self_contained_and_connected = 0;
  780. }
  781. if (pass_header)
  782. argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
  783. ntohl(header.hdr_version),
  784. ntohl(header.hdr_entries));
  785. if (fetch_fsck_objects >= 0
  786. ? fetch_fsck_objects
  787. : transfer_fsck_objects >= 0
  788. ? transfer_fsck_objects
  789. : 0) {
  790. if (args->from_promisor)
  791. /*
  792. * We cannot use --strict in index-pack because it
  793. * checks both broken objects and links, but we only
  794. * want to check for broken objects.
  795. */
  796. argv_array_push(&cmd.args, "--fsck-objects");
  797. else
  798. argv_array_pushf(&cmd.args, "--strict%s",
  799. fsck_msg_types.buf);
  800. }
  801. cmd.in = demux.out;
  802. cmd.git_cmd = 1;
  803. if (start_command(&cmd))
  804. die(_("fetch-pack: unable to fork off %s"), cmd_name);
  805. if (do_keep && pack_lockfile) {
  806. *pack_lockfile = index_pack_lockfile(cmd.out);
  807. close(cmd.out);
  808. }
  809. if (!use_sideband)
  810. /* Closed by start_command() */
  811. xd[0] = -1;
  812. ret = finish_command(&cmd);
  813. if (!ret || (args->check_self_contained_and_connected && ret == 1))
  814. args->self_contained_and_connected =
  815. args->check_self_contained_and_connected &&
  816. ret == 0;
  817. else
  818. die(_("%s failed"), cmd_name);
  819. if (use_sideband && finish_async(&demux))
  820. die(_("error in sideband demultiplexer"));
  821. /*
  822. * Now that index-pack has succeeded, write the promisor file using the
  823. * obtained .keep filename if necessary
  824. */
  825. if (do_keep && pack_lockfile && args->from_promisor)
  826. write_promisor_file(*pack_lockfile, sought, nr_sought);
  827. return 0;
  828. }
  829. static int cmp_ref_by_name(const void *a_, const void *b_)
  830. {
  831. const struct ref *a = *((const struct ref **)a_);
  832. const struct ref *b = *((const struct ref **)b_);
  833. return strcmp(a->name, b->name);
  834. }
  835. static struct ref *do_fetch_pack(struct fetch_pack_args *args,
  836. int fd[2],
  837. const struct ref *orig_ref,
  838. struct ref **sought, int nr_sought,
  839. struct shallow_info *si,
  840. char **pack_lockfile)
  841. {
  842. struct repository *r = the_repository;
  843. struct ref *ref = copy_ref_list(orig_ref);
  844. struct object_id oid;
  845. const char *agent_feature;
  846. int agent_len;
  847. struct fetch_negotiator negotiator_alloc;
  848. struct fetch_negotiator *negotiator;
  849. if (args->no_dependents) {
  850. negotiator = NULL;
  851. } else {
  852. negotiator = &negotiator_alloc;
  853. fetch_negotiator_init(r, negotiator);
  854. }
  855. sort_ref_list(&ref, ref_compare_name);
  856. QSORT(sought, nr_sought, cmp_ref_by_name);
  857. if ((agent_feature = server_feature_value("agent", &agent_len))) {
  858. agent_supported = 1;
  859. if (agent_len)
  860. print_verbose(args, _("Server version is %.*s"),
  861. agent_len, agent_feature);
  862. }
  863. if (server_supports("shallow"))
  864. print_verbose(args, _("Server supports %s"), "shallow");
  865. else if (args->depth > 0 || is_repository_shallow(r))
  866. die(_("Server does not support shallow clients"));
  867. if (args->depth > 0 || args->deepen_since || args->deepen_not)
  868. args->deepen = 1;
  869. if (server_supports("multi_ack_detailed")) {
  870. print_verbose(args, _("Server supports %s"), "multi_ack_detailed");
  871. multi_ack = 2;
  872. if (server_supports("no-done")) {
  873. print_verbose(args, _("Server supports %s"), "no-done");
  874. if (args->stateless_rpc)
  875. no_done = 1;
  876. }
  877. }
  878. else if (server_supports("multi_ack")) {
  879. print_verbose(args, _("Server supports %s"), "multi_ack");
  880. multi_ack = 1;
  881. }
  882. if (server_supports("side-band-64k")) {
  883. print_verbose(args, _("Server supports %s"), "side-band-64k");
  884. use_sideband = 2;
  885. }
  886. else if (server_supports("side-band")) {
  887. print_verbose(args, _("Server supports %s"), "side-band");
  888. use_sideband = 1;
  889. }
  890. if (server_supports("allow-tip-sha1-in-want")) {
  891. print_verbose(args, _("Server supports %s"), "allow-tip-sha1-in-want");
  892. allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
  893. }
  894. if (server_supports("allow-reachable-sha1-in-want")) {
  895. print_verbose(args, _("Server supports %s"), "allow-reachable-sha1-in-want");
  896. allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
  897. }
  898. if (server_supports("thin-pack"))
  899. print_verbose(args, _("Server supports %s"), "thin-pack");
  900. else
  901. args->use_thin_pack = 0;
  902. if (server_supports("no-progress"))
  903. print_verbose(args, _("Server supports %s"), "no-progress");
  904. else
  905. args->no_progress = 0;
  906. if (server_supports("include-tag"))
  907. print_verbose(args, _("Server supports %s"), "include-tag");
  908. else
  909. args->include_tag = 0;
  910. if (server_supports("ofs-delta"))
  911. print_verbose(args, _("Server supports %s"), "ofs-delta");
  912. else
  913. prefer_ofs_delta = 0;
  914. if (server_supports("filter")) {
  915. server_supports_filtering = 1;
  916. print_verbose(args, _("Server supports %s"), "filter");
  917. } else if (args->filter_options.choice) {
  918. warning("filtering not recognized by server, ignoring");
  919. }
  920. if (server_supports("deepen-since")) {
  921. print_verbose(args, _("Server supports %s"), "deepen-since");
  922. deepen_since_ok = 1;
  923. } else if (args->deepen_since)
  924. die(_("Server does not support --shallow-since"));
  925. if (server_supports("deepen-not")) {
  926. print_verbose(args, _("Server supports %s"), "deepen-not");
  927. deepen_not_ok = 1;
  928. } else if (args->deepen_not)
  929. die(_("Server does not support --shallow-exclude"));
  930. if (server_supports("deepen-relative"))
  931. print_verbose(args, _("Server supports %s"), "deepen-relative");
  932. else if (args->deepen_relative)
  933. die(_("Server does not support --deepen"));
  934. if (!args->no_dependents) {
  935. mark_complete_and_common_ref(negotiator, args, &ref);
  936. filter_refs(args, &ref, sought, nr_sought);
  937. if (everything_local(args, &ref)) {
  938. packet_flush(fd[1]);
  939. goto all_done;
  940. }
  941. } else {
  942. filter_refs(args, &ref, sought, nr_sought);
  943. }
  944. if (find_common(negotiator, args, fd, &oid, ref) < 0)
  945. if (!args->keep_pack)
  946. /* When cloning, it is not unusual to have
  947. * no common commit.
  948. */
  949. warning(_("no common commits"));
  950. if (args->stateless_rpc)
  951. packet_flush(fd[1]);
  952. if (args->deepen)
  953. setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
  954. NULL);
  955. else if (si->nr_ours || si->nr_theirs)
  956. alternate_shallow_file = setup_temporary_shallow(si->shallow);
  957. else
  958. alternate_shallow_file = NULL;
  959. if (get_pack(args, fd, pack_lockfile, sought, nr_sought))
  960. die(_("git fetch-pack: fetch failed."));
  961. all_done:
  962. if (negotiator)
  963. negotiator->release(negotiator);
  964. return ref;
  965. }
  966. static void add_shallow_requests(struct strbuf *req_buf,
  967. const struct fetch_pack_args *args)
  968. {
  969. if (is_repository_shallow(the_repository))
  970. write_shallow_commits(req_buf, 1, NULL);
  971. if (args->depth > 0)
  972. packet_buf_write(req_buf, "deepen %d", args->depth);
  973. if (args->deepen_since) {
  974. timestamp_t max_age = approxidate(args->deepen_since);
  975. packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
  976. }
  977. if (args->deepen_not) {
  978. int i;
  979. for (i = 0; i < args->deepen_not->nr; i++) {
  980. struct string_list_item *s = args->deepen_not->items + i;
  981. packet_buf_write(req_buf, "deepen-not %s", s->string);
  982. }
  983. }
  984. if (args->deepen_relative)
  985. packet_buf_write(req_buf, "deepen-relative\n");
  986. }
  987. static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
  988. {
  989. int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
  990. for ( ; wants ; wants = wants->next) {
  991. const struct object_id *remote = &wants->old_oid;
  992. struct object *o;
  993. /*
  994. * If that object is complete (i.e. it is an ancestor of a
  995. * local ref), we tell them we have it but do not have to
  996. * tell them about its ancestors, which they already know
  997. * about.
  998. *
  999. * We use lookup_object here because we are only
  1000. * interested in the case we *know* the object is
  1001. * reachable and we have already scanned it.
  1002. *
  1003. * Do this only if args->no_dependents is false (if it is true,
  1004. * we cannot trust the object flags).
  1005. */
  1006. if (!no_dependents &&
  1007. ((o = lookup_object(the_repository, remote)) != NULL) &&
  1008. (o->flags & COMPLETE)) {
  1009. continue;
  1010. }
  1011. if (!use_ref_in_want || wants->exact_oid)
  1012. packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
  1013. else
  1014. packet_buf_write(req_buf, "want-ref %s\n", wants->name);
  1015. }
  1016. }
  1017. static void add_common(struct strbuf *req_buf, struct oidset *common)
  1018. {
  1019. struct oidset_iter iter;
  1020. const struct object_id *oid;
  1021. oidset_iter_init(common, &iter);
  1022. while ((oid = oidset_iter_next(&iter))) {
  1023. packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
  1024. }
  1025. }
  1026. static int add_haves(struct fetch_negotiator *negotiator,
  1027. struct strbuf *req_buf,
  1028. int *haves_to_send, int *in_vain)
  1029. {
  1030. int ret = 0;
  1031. int haves_added = 0;
  1032. const struct object_id *oid;
  1033. while ((oid = negotiator->next(negotiator))) {
  1034. packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
  1035. if (++haves_added >= *haves_to_send)
  1036. break;
  1037. }
  1038. *in_vain += haves_added;
  1039. if (!haves_added || *in_vain >= MAX_IN_VAIN) {
  1040. /* Send Done */
  1041. packet_buf_write(req_buf, "done\n");
  1042. ret = 1;
  1043. }
  1044. /* Increase haves to send on next round */
  1045. *haves_to_send = next_flush(1, *haves_to_send);
  1046. return ret;
  1047. }
  1048. static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
  1049. struct fetch_pack_args *args,
  1050. const struct ref *wants, struct oidset *common,
  1051. int *haves_to_send, int *in_vain,
  1052. int sideband_all)
  1053. {
  1054. int ret = 0;
  1055. struct strbuf req_buf = STRBUF_INIT;
  1056. if (server_supports_v2("fetch", 1))
  1057. packet_buf_write(&req_buf, "command=fetch");
  1058. if (server_supports_v2("agent", 0))
  1059. packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
  1060. if (args->server_options && args->server_options->nr &&
  1061. server_supports_v2("server-option", 1)) {
  1062. int i;
  1063. for (i = 0; i < args->server_options->nr; i++)
  1064. packet_buf_write(&req_buf, "server-option=%s",
  1065. args->server_options->items[i].string);
  1066. }
  1067. packet_buf_delim(&req_buf);
  1068. if (args->use_thin_pack)
  1069. packet_buf_write(&req_buf, "thin-pack");
  1070. if (args->no_progress)
  1071. packet_buf_write(&req_buf, "no-progress");
  1072. if (args->include_tag)
  1073. packet_buf_write(&req_buf, "include-tag");
  1074. if (prefer_ofs_delta)
  1075. packet_buf_write(&req_buf, "ofs-delta");
  1076. if (sideband_all)
  1077. packet_buf_write(&req_buf, "sideband-all");
  1078. /* Add shallow-info and deepen request */
  1079. if (server_supports_feature("fetch", "shallow", 0))
  1080. add_shallow_requests(&req_buf, args);
  1081. else if (is_repository_shallow(the_repository) || args->deepen)
  1082. die(_("Server does not support shallow requests"));
  1083. /* Add filter */
  1084. if (server_supports_feature("fetch", "filter", 0) &&
  1085. args->filter_options.choice) {
  1086. const char *spec =
  1087. expand_list_objects_filter_spec(&args->filter_options);
  1088. print_verbose(args, _("Server supports filter"));
  1089. packet_buf_write(&req_buf, "filter %s", spec);
  1090. } else if (args->filter_options.choice) {
  1091. warning("filtering not recognized by server, ignoring");
  1092. }
  1093. /* add wants */
  1094. add_wants(args->no_dependents, wants, &req_buf);
  1095. if (args->no_dependents) {
  1096. packet_buf_write(&req_buf, "done");
  1097. ret = 1;
  1098. } else {
  1099. /* Add all of the common commits we've found in previous rounds */
  1100. add_common(&req_buf, common);
  1101. /* Add initial haves */
  1102. ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
  1103. }
  1104. /* Send request */
  1105. packet_buf_flush(&req_buf);
  1106. if (write_in_full(fd_out, req_buf.buf, req_buf.len) < 0)
  1107. die_errno(_("unable to write request to remote"));
  1108. strbuf_release(&req_buf);
  1109. return ret;
  1110. }
  1111. /*
  1112. * Processes a section header in a server's response and checks if it matches
  1113. * `section`. If the value of `peek` is 1, the header line will be peeked (and
  1114. * not consumed); if 0, the line will be consumed and the function will die if
  1115. * the section header doesn't match what was expected.
  1116. */
  1117. static int process_section_header(struct packet_reader *reader,
  1118. const char *section, int peek)
  1119. {
  1120. int ret;
  1121. if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
  1122. die(_("error reading section header '%s'"), section);
  1123. ret = !strcmp(reader->line, section);
  1124. if (!peek) {
  1125. if (!ret)
  1126. die(_("expected '%s', received '%s'"),
  1127. section, reader->line);
  1128. packet_reader_read(reader);
  1129. }
  1130. return ret;
  1131. }
  1132. static int process_acks(struct fetch_negotiator *negotiator,
  1133. struct packet_reader *reader,
  1134. struct oidset *common)
  1135. {
  1136. /* received */
  1137. int received_ready = 0;
  1138. int received_ack = 0;
  1139. process_section_header(reader, "acknowledgments", 0);
  1140. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  1141. const char *arg;
  1142. if (!strcmp(reader->line, "NAK"))
  1143. continue;
  1144. if (skip_prefix(reader->line, "ACK ", &arg)) {
  1145. struct object_id oid;
  1146. if (!get_oid_hex(arg, &oid)) {
  1147. struct commit *commit;
  1148. oidset_insert(common, &oid);
  1149. commit = lookup_commit(the_repository, &oid);
  1150. if (negotiator)
  1151. negotiator->ack(negotiator, commit);
  1152. }
  1153. continue;
  1154. }
  1155. if (!strcmp(reader->line, "ready")) {
  1156. received_ready = 1;
  1157. continue;
  1158. }
  1159. die(_("unexpected acknowledgment line: '%s'"), reader->line);
  1160. }
  1161. if (reader->status != PACKET_READ_FLUSH &&
  1162. reader->status != PACKET_READ_DELIM)
  1163. die(_("error processing acks: %d"), reader->status);
  1164. /*
  1165. * If an "acknowledgments" section is sent, a packfile is sent if and
  1166. * only if "ready" was sent in this section. The other sections
  1167. * ("shallow-info" and "wanted-refs") are sent only if a packfile is
  1168. * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
  1169. * otherwise.
  1170. */
  1171. if (received_ready && reader->status != PACKET_READ_DELIM)
  1172. die(_("expected packfile to be sent after 'ready'"));
  1173. if (!received_ready && reader->status != PACKET_READ_FLUSH)
  1174. die(_("expected no other sections to be sent after no 'ready'"));
  1175. /* return 0 if no common, 1 if there are common, or 2 if ready */
  1176. return received_ready ? 2 : (received_ack ? 1 : 0);
  1177. }
  1178. static void receive_shallow_info(struct fetch_pack_args *args,
  1179. struct packet_reader *reader,
  1180. struct oid_array *shallows,
  1181. struct shallow_info *si)
  1182. {
  1183. int unshallow_received = 0;
  1184. process_section_header(reader, "shallow-info", 0);
  1185. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  1186. const char *arg;
  1187. struct object_id oid;
  1188. if (skip_prefix(reader->line, "shallow ", &arg)) {
  1189. if (get_oid_hex(arg, &oid))
  1190. die(_("invalid shallow line: %s"), reader->line);
  1191. oid_array_append(shallows, &oid);
  1192. continue;
  1193. }
  1194. if (skip_prefix(reader->line, "unshallow ", &arg)) {
  1195. if (get_oid_hex(arg, &oid))
  1196. die(_("invalid unshallow line: %s"), reader->line);
  1197. if (!lookup_object(the_repository, &oid))
  1198. die(_("object not found: %s"), reader->line);
  1199. /* make sure that it is parsed as shallow */
  1200. if (!parse_object(the_repository, &oid))
  1201. die(_("error in object: %s"), reader->line);
  1202. if (unregister_shallow(&oid))
  1203. die(_("no shallow found: %s"), reader->line);
  1204. unshallow_received = 1;
  1205. continue;
  1206. }
  1207. die(_("expected shallow/unshallow, got %s"), reader->line);
  1208. }
  1209. if (reader->status != PACKET_READ_FLUSH &&
  1210. reader->status != PACKET_READ_DELIM)
  1211. die(_("error processing shallow info: %d"), reader->status);
  1212. if (args->deepen || unshallow_received) {
  1213. /*
  1214. * Treat these as shallow lines caused by our depth settings.
  1215. * In v0, these lines cannot cause refs to be rejected; do the
  1216. * same.
  1217. */
  1218. int i;
  1219. for (i = 0; i < shallows->nr; i++)
  1220. register_shallow(the_repository, &shallows->oid[i]);
  1221. setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
  1222. NULL);
  1223. args->deepen = 1;
  1224. } else if (shallows->nr) {
  1225. /*
  1226. * Treat these as shallow lines caused by the remote being
  1227. * shallow. In v0, remote refs that reach these objects are
  1228. * rejected (unless --update-shallow is set); do the same.
  1229. */
  1230. prepare_shallow_info(si, shallows);
  1231. if (si->nr_ours || si->nr_theirs)
  1232. alternate_shallow_file =
  1233. setup_temporary_shallow(si->shallow);
  1234. else
  1235. alternate_shallow_file = NULL;
  1236. } else {
  1237. alternate_shallow_file = NULL;
  1238. }
  1239. }
  1240. static int cmp_name_ref(const void *name, const void *ref)
  1241. {
  1242. return strcmp(name, (*(struct ref **)ref)->name);
  1243. }
  1244. static void receive_wanted_refs(struct packet_reader *reader,
  1245. struct ref **sought, int nr_sought)
  1246. {
  1247. process_section_header(reader, "wanted-refs", 0);
  1248. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  1249. struct object_id oid;
  1250. const char *end;
  1251. struct ref **found;
  1252. if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
  1253. die(_("expected wanted-ref, got '%s'"), reader->line);
  1254. found = bsearch(end, sought, nr_sought, sizeof(*sought),
  1255. cmp_name_ref);
  1256. if (!found)
  1257. die(_("unexpected wanted-ref: '%s'"), reader->line);
  1258. oidcpy(&(*found)->old_oid, &oid);
  1259. }
  1260. if (reader->status != PACKET_READ_DELIM)
  1261. die(_("error processing wanted refs: %d"), reader->status);
  1262. }
  1263. enum fetch_state {
  1264. FETCH_CHECK_LOCAL = 0,
  1265. FETCH_SEND_REQUEST,
  1266. FETCH_PROCESS_ACKS,
  1267. FETCH_GET_PACK,
  1268. FETCH_DONE,
  1269. };
  1270. static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
  1271. int fd[2],
  1272. const struct ref *orig_ref,
  1273. struct ref **sought, int nr_sought,
  1274. struct oid_array *shallows,
  1275. struct shallow_info *si,
  1276. char **pack_lockfile)
  1277. {
  1278. struct repository *r = the_repository;
  1279. struct ref *ref = copy_ref_list(orig_ref);
  1280. enum fetch_state state = FETCH_CHECK_LOCAL;
  1281. struct oidset common = OIDSET_INIT;
  1282. struct packet_reader reader;
  1283. int in_vain = 0, negotiation_started = 0;
  1284. int haves_to_send = INITIAL_FLUSH;
  1285. struct fetch_negotiator negotiator_alloc;
  1286. struct fetch_negotiator *negotiator;
  1287. if (args->no_dependents) {
  1288. negotiator = NULL;
  1289. } else {
  1290. negotiator = &negotiator_alloc;
  1291. fetch_negotiator_init(r, negotiator);
  1292. }
  1293. packet_reader_init(&reader, fd[0], NULL, 0,
  1294. PACKET_READ_CHOMP_NEWLINE |
  1295. PACKET_READ_DIE_ON_ERR_PACKET);
  1296. if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 1) &&
  1297. server_supports_feature("fetch", "sideband-all", 0)) {
  1298. reader.use_sideband = 1;
  1299. reader.me = "fetch-pack";
  1300. }
  1301. while (state != FETCH_DONE) {
  1302. switch (state) {
  1303. case FETCH_CHECK_LOCAL:
  1304. sort_ref_list(&ref, ref_compare_name);
  1305. QSORT(sought, nr_sought, cmp_ref_by_name);
  1306. /* v2 supports these by default */
  1307. allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
  1308. use_sideband = 2;
  1309. if (args->depth > 0 || args->deepen_since || args->deepen_not)
  1310. args->deepen = 1;
  1311. /* Filter 'ref' by 'sought' and those that aren't local */
  1312. if (!args->no_dependents) {
  1313. mark_complete_and_common_ref(negotiator, args, &ref);
  1314. filter_refs(args, &ref, sought, nr_sought);
  1315. if (everything_local(args, &ref))
  1316. state = FETCH_DONE;
  1317. else
  1318. state = FETCH_SEND_REQUEST;
  1319. mark_tips(negotiator, args->negotiation_tips);
  1320. for_each_cached_alternate(negotiator,
  1321. insert_one_alternate_object);
  1322. } else {
  1323. filter_refs(args, &ref, sought, nr_sought);
  1324. state = FETCH_SEND_REQUEST;
  1325. }
  1326. break;
  1327. case FETCH_SEND_REQUEST:
  1328. if (!negotiation_started) {
  1329. negotiation_started = 1;
  1330. trace2_region_enter("fetch-pack",
  1331. "negotiation_v2",
  1332. the_repository);
  1333. }
  1334. if (send_fetch_request(negotiator, fd[1], args, ref,
  1335. &common,
  1336. &haves_to_send, &in_vain,
  1337. reader.use_sideband))
  1338. state = FETCH_GET_PACK;
  1339. else
  1340. state = FETCH_PROCESS_ACKS;
  1341. break;
  1342. case FETCH_PROCESS_ACKS:
  1343. /* Process ACKs/NAKs */
  1344. switch (process_acks(negotiator, &reader, &common)) {
  1345. case 2:
  1346. state = FETCH_GET_PACK;
  1347. break;
  1348. case 1:
  1349. in_vain = 0;
  1350. /* fallthrough */
  1351. default:
  1352. state = FETCH_SEND_REQUEST;
  1353. break;
  1354. }
  1355. break;
  1356. case FETCH_GET_PACK:
  1357. trace2_region_leave("fetch-pack",
  1358. "negotiation_v2",
  1359. the_repository);
  1360. /* Check for shallow-info section */
  1361. if (process_section_header(&reader, "shallow-info", 1))
  1362. receive_shallow_info(args, &reader, shallows, si);
  1363. if (process_section_header(&reader, "wanted-refs", 1))
  1364. receive_wanted_refs(&reader, sought, nr_sought);
  1365. /* get the pack */
  1366. process_section_header(&reader, "packfile", 0);
  1367. if (get_pack(args, fd, pack_lockfile, sought, nr_sought))
  1368. die(_("git fetch-pack: fetch failed."));
  1369. state = FETCH_DONE;
  1370. break;
  1371. case FETCH_DONE:
  1372. continue;
  1373. }
  1374. }
  1375. if (negotiator)
  1376. negotiator->release(negotiator);
  1377. oidset_clear(&common);
  1378. return ref;
  1379. }
  1380. static int fetch_pack_config_cb(const char *var, const char *value, void *cb)
  1381. {
  1382. if (strcmp(var, "fetch.fsck.skiplist") == 0) {
  1383. const char *path;
  1384. if (git_config_pathname(&path, var, value))
  1385. return 1;
  1386. strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
  1387. fsck_msg_types.len ? ',' : '=', path);
  1388. free((char *)path);
  1389. return 0;
  1390. }
  1391. if (skip_prefix(var, "fetch.fsck.", &var)) {
  1392. if (is_valid_msg_type(var, value))
  1393. strbuf_addf(&fsck_msg_types, "%c%s=%s",
  1394. fsck_msg_types.len ? ',' : '=', var, value);
  1395. else
  1396. warning("Skipping unknown msg id '%s'", var);
  1397. return 0;
  1398. }
  1399. return git_default_config(var, value, cb);
  1400. }
  1401. static void fetch_pack_config(void)
  1402. {
  1403. git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
  1404. git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
  1405. git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
  1406. git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
  1407. git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
  1408. git_config(fetch_pack_config_cb, NULL);
  1409. }
  1410. static void fetch_pack_setup(void)
  1411. {
  1412. static int did_setup;
  1413. if (did_setup)
  1414. return;
  1415. fetch_pack_config();
  1416. if (0 <= transfer_unpack_limit)
  1417. unpack_limit = transfer_unpack_limit;
  1418. else if (0 <= fetch_unpack_limit)
  1419. unpack_limit = fetch_unpack_limit;
  1420. did_setup = 1;
  1421. }
  1422. static int remove_duplicates_in_refs(struct ref **ref, int nr)
  1423. {
  1424. struct string_list names = STRING_LIST_INIT_NODUP;
  1425. int src, dst;
  1426. for (src = dst = 0; src < nr; src++) {
  1427. struct string_list_item *item;
  1428. item = string_list_insert(&names, ref[src]->name);
  1429. if (item->util)
  1430. continue; /* already have it */
  1431. item->util = ref[src];
  1432. if (src != dst)
  1433. ref[dst] = ref[src];
  1434. dst++;
  1435. }
  1436. for (src = dst; src < nr; src++)
  1437. ref[src] = NULL;
  1438. string_list_clear(&names, 0);
  1439. return dst;
  1440. }
  1441. static void update_shallow(struct fetch_pack_args *args,
  1442. struct ref **sought, int nr_sought,
  1443. struct shallow_info *si)
  1444. {
  1445. struct oid_array ref = OID_ARRAY_INIT;
  1446. int *status;
  1447. int i;
  1448. if (args->deepen && alternate_shallow_file) {
  1449. if (*alternate_shallow_file == '\0') { /* --unshallow */
  1450. unlink_or_warn(git_path_shallow(the_repository));
  1451. rollback_lock_file(&shallow_lock);
  1452. } else
  1453. commit_lock_file(&shallow_lock);
  1454. alternate_shallow_file = NULL;
  1455. return;
  1456. }
  1457. if (!si->shallow || !si->shallow->nr)
  1458. return;
  1459. if (args->cloning) {
  1460. /*
  1461. * remote is shallow, but this is a clone, there are
  1462. * no objects in repo to worry about. Accept any
  1463. * shallow points that exist in the pack (iow in repo
  1464. * after get_pack() and reprepare_packed_git())
  1465. */
  1466. struct oid_array extra = OID_ARRAY_INIT;
  1467. struct object_id *oid = si->shallow->oid;
  1468. for (i = 0; i < si->shallow->nr; i++)
  1469. if (has_object_file(&oid[i]))
  1470. oid_array_append(&extra, &oid[i]);
  1471. if (extra.nr) {
  1472. setup_alternate_shallow(&shallow_lock,
  1473. &alternate_shallow_file,
  1474. &extra);
  1475. commit_lock_file(&shallow_lock);
  1476. alternate_shallow_file = NULL;
  1477. }
  1478. oid_array_clear(&extra);
  1479. return;
  1480. }
  1481. if (!si->nr_ours && !si->nr_theirs)
  1482. return;
  1483. remove_nonexistent_theirs_shallow(si);
  1484. if (!si->nr_ours && !si->nr_theirs)
  1485. return;
  1486. for (i = 0; i < nr_sought; i++)
  1487. oid_array_append(&ref, &sought[i]->old_oid);
  1488. si->ref = &ref;
  1489. if (args->update_shallow) {
  1490. /*
  1491. * remote is also shallow, .git/shallow may be updated
  1492. * so all refs can be accepted. Make sure we only add
  1493. * shallow roots that are actually reachable from new
  1494. * refs.
  1495. */
  1496. struct oid_array extra = OID_ARRAY_INIT;
  1497. struct object_id *oid = si->shallow->oid;
  1498. assign_shallow_commits_to_refs(si, NULL, NULL);
  1499. if (!si->nr_ours && !si->nr_theirs) {
  1500. oid_array_clear(&ref);
  1501. return;
  1502. }
  1503. for (i = 0; i < si->nr_ours; i++)
  1504. oid_array_append(&extra, &oid[si->ours[i]]);
  1505. for (i = 0; i < si->nr_theirs; i++)
  1506. oid_array_append(&extra, &oid[si->theirs[i]]);
  1507. setup_alternate_shallow(&shallow_lock,
  1508. &alternate_shallow_file,
  1509. &extra);
  1510. commit_lock_file(&shallow_lock);
  1511. oid_array_clear(&extra);
  1512. oid_array_clear(&ref);
  1513. alternate_shallow_file = NULL;
  1514. return;
  1515. }
  1516. /*
  1517. * remote is also shallow, check what ref is safe to update
  1518. * without updating .git/shallow
  1519. */
  1520. status = xcalloc(nr_sought, sizeof(*status));
  1521. assign_shallow_commits_to_refs(si, NULL, status);
  1522. if (si->nr_ours || si->nr_theirs) {
  1523. for (i = 0; i < nr_sought; i++)
  1524. if (status[i])
  1525. sought[i]->status = REF_STATUS_REJECT_SHALLOW;
  1526. }
  1527. free(status);
  1528. oid_array_clear(&ref);
  1529. }
  1530. static int iterate_ref_map(void *cb_data, struct object_id *oid)
  1531. {
  1532. struct ref **rm = cb_data;
  1533. struct ref *ref = *rm;
  1534. if (!ref)
  1535. return -1; /* end of the list */
  1536. *rm = ref->next;
  1537. oidcpy(oid, &ref->old_oid);
  1538. return 0;
  1539. }
  1540. struct ref *fetch_pack(struct fetch_pack_args *args,
  1541. int fd[],
  1542. const struct ref *ref,
  1543. struct ref **sought, int nr_sought,
  1544. struct oid_array *shallow,
  1545. char **pack_lockfile,
  1546. enum protocol_version version)
  1547. {
  1548. struct ref *ref_cpy;
  1549. struct shallow_info si;
  1550. struct oid_array shallows_scratch = OID_ARRAY_INIT;
  1551. fetch_pack_setup();
  1552. if (nr_sought)
  1553. nr_sought = remove_duplicates_in_refs(sought, nr_sought);
  1554. if (args->no_dependents && !args->filter_options.choice) {
  1555. /*
  1556. * The protocol does not support requesting that only the
  1557. * wanted objects be sent, so approximate this by setting a
  1558. * "blob:none" filter if no filter is already set. This works
  1559. * for all object types: note that wanted blobs will still be
  1560. * sent because they are directly specified as a "want".
  1561. *
  1562. * NEEDSWORK: Add an option in the protocol to request that
  1563. * only the wanted objects be sent, and implement it.
  1564. */
  1565. parse_list_objects_filter(&args->filter_options, "blob:none");
  1566. }
  1567. if (version != protocol_v2 && !ref) {
  1568. packet_flush(fd[1]);
  1569. die(_("no matching remote head"));
  1570. }
  1571. if (version == protocol_v2) {
  1572. if (shallow->nr)
  1573. BUG("Protocol V2 does not provide shallows at this point in the fetch");
  1574. memset(&si, 0, sizeof(si));
  1575. ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
  1576. &shallows_scratch, &si,
  1577. pack_lockfile);
  1578. } else {
  1579. prepare_shallow_info(&si, shallow);
  1580. ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
  1581. &si, pack_lockfile);
  1582. }
  1583. reprepare_packed_git(the_repository);
  1584. if (!args->cloning && args->deepen) {
  1585. struct check_connected_options opt = CHECK_CONNECTED_INIT;
  1586. struct ref *iterator = ref_cpy;
  1587. opt.shallow_file = alternate_shallow_file;
  1588. if (args->deepen)
  1589. opt.is_deepening_fetch = 1;
  1590. if (check_connected(iterate_ref_map, &iterator, &opt)) {
  1591. error(_("remote did not send all necessary objects"));
  1592. free_refs(ref_cpy);
  1593. ref_cpy = NULL;
  1594. rollback_lock_file(&shallow_lock);
  1595. goto cleanup;
  1596. }
  1597. args->connectivity_checked = 1;
  1598. }
  1599. update_shallow(args, sought, nr_sought, &si);
  1600. cleanup:
  1601. clear_shallow_info(&si);
  1602. oid_array_clear(&shallows_scratch);
  1603. return ref_cpy;
  1604. }
  1605. int report_unmatched_refs(struct ref **sought, int nr_sought)
  1606. {
  1607. int i, ret = 0;
  1608. for (i = 0; i < nr_sought; i++) {
  1609. if (!sought[i])
  1610. continue;
  1611. switch (sought[i]->match_status) {
  1612. case REF_MATCHED:
  1613. continue;
  1614. case REF_NOT_MATCHED:
  1615. error(_("no such remote ref %s"), sought[i]->name);
  1616. break;
  1617. case REF_UNADVERTISED_NOT_ALLOWED:
  1618. error(_("Server does not allow request for unadvertised object %s"),
  1619. sought[i]->name);
  1620. break;
  1621. }
  1622. ret = 1;
  1623. }
  1624. return ret;
  1625. }