THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

1793 lignes
49KB

  1. #include "cache.h"
  2. #include "repository.h"
  3. #include "config.h"
  4. #include "lockfile.h"
  5. #include "refs.h"
  6. #include "pkt-line.h"
  7. #include "commit.h"
  8. #include "tag.h"
  9. #include "exec-cmd.h"
  10. #include "pack.h"
  11. #include "sideband.h"
  12. #include "fetch-pack.h"
  13. #include "remote.h"
  14. #include "run-command.h"
  15. #include "connect.h"
  16. #include "transport.h"
  17. #include "version.h"
  18. #include "sha1-array.h"
  19. #include "oidset.h"
  20. #include "packfile.h"
  21. #include "object-store.h"
  22. #include "connected.h"
  23. #include "fetch-negotiator.h"
  24. #include "fsck.h"
  25. static int transfer_unpack_limit = -1;
  26. static int fetch_unpack_limit = -1;
  27. static int unpack_limit = 100;
  28. static int prefer_ofs_delta = 1;
  29. static int no_done;
  30. static int deepen_since_ok;
  31. static int deepen_not_ok;
  32. static int fetch_fsck_objects = -1;
  33. static int transfer_fsck_objects = -1;
  34. static int agent_supported;
  35. static int server_supports_filtering;
  36. static struct lock_file shallow_lock;
  37. static const char *alternate_shallow_file;
  38. static struct strbuf fsck_msg_types = STRBUF_INIT;
  39. /* Remember to update object flag allocation in object.h */
  40. #define COMPLETE (1U << 0)
  41. #define ALTERNATE (1U << 1)
  42. /*
  43. * After sending this many "have"s if we do not get any new ACK , we
  44. * give up traversing our history.
  45. */
  46. #define MAX_IN_VAIN 256
  47. static int multi_ack, use_sideband;
  48. /* Allow specifying sha1 if it is a ref tip. */
  49. #define ALLOW_TIP_SHA1 01
  50. /* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
  51. #define ALLOW_REACHABLE_SHA1 02
  52. static unsigned int allow_unadvertised_object_request;
  53. __attribute__((format (printf, 2, 3)))
  54. static inline void print_verbose(const struct fetch_pack_args *args,
  55. const char *fmt, ...)
  56. {
  57. va_list params;
  58. if (!args->verbose)
  59. return;
  60. va_start(params, fmt);
  61. vfprintf(stderr, fmt, params);
  62. va_end(params);
  63. fputc('\n', stderr);
  64. }
  65. struct alternate_object_cache {
  66. struct object **items;
  67. size_t nr, alloc;
  68. };
  69. static void cache_one_alternate(const struct object_id *oid,
  70. void *vcache)
  71. {
  72. struct alternate_object_cache *cache = vcache;
  73. struct object *obj = parse_object(the_repository, oid);
  74. if (!obj || (obj->flags & ALTERNATE))
  75. return;
  76. obj->flags |= ALTERNATE;
  77. ALLOC_GROW(cache->items, cache->nr + 1, cache->alloc);
  78. cache->items[cache->nr++] = obj;
  79. }
  80. static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
  81. void (*cb)(struct fetch_negotiator *,
  82. struct object *))
  83. {
  84. static int initialized;
  85. static struct alternate_object_cache cache;
  86. size_t i;
  87. if (!initialized) {
  88. for_each_alternate_ref(cache_one_alternate, &cache);
  89. initialized = 1;
  90. }
  91. for (i = 0; i < cache.nr; i++)
  92. cb(negotiator, cache.items[i]);
  93. }
  94. static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
  95. const char *refname,
  96. const struct object_id *oid)
  97. {
  98. struct object *o = deref_tag(the_repository,
  99. parse_object(the_repository, oid),
  100. refname, 0);
  101. if (o && o->type == OBJ_COMMIT)
  102. negotiator->add_tip(negotiator, (struct commit *)o);
  103. return 0;
  104. }
  105. static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
  106. int flag, void *cb_data)
  107. {
  108. return rev_list_insert_ref(cb_data, refname, oid);
  109. }
  110. enum ack_type {
  111. NAK = 0,
  112. ACK,
  113. ACK_continue,
  114. ACK_common,
  115. ACK_ready
  116. };
  117. static void consume_shallow_list(struct fetch_pack_args *args,
  118. struct packet_reader *reader)
  119. {
  120. if (args->stateless_rpc && args->deepen) {
  121. /* If we sent a depth we will get back "duplicate"
  122. * shallow and unshallow commands every time there
  123. * is a block of have lines exchanged.
  124. */
  125. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  126. if (starts_with(reader->line, "shallow "))
  127. continue;
  128. if (starts_with(reader->line, "unshallow "))
  129. continue;
  130. die(_("git fetch-pack: expected shallow list"));
  131. }
  132. if (reader->status != PACKET_READ_FLUSH)
  133. die(_("git fetch-pack: expected a flush packet after shallow list"));
  134. }
  135. }
  136. static enum ack_type get_ack(struct packet_reader *reader,
  137. struct object_id *result_oid)
  138. {
  139. int len;
  140. const char *arg;
  141. if (packet_reader_read(reader) != PACKET_READ_NORMAL)
  142. die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
  143. len = reader->pktlen;
  144. if (!strcmp(reader->line, "NAK"))
  145. return NAK;
  146. if (skip_prefix(reader->line, "ACK ", &arg)) {
  147. const char *p;
  148. if (!parse_oid_hex(arg, result_oid, &p)) {
  149. len -= p - reader->line;
  150. if (len < 1)
  151. return ACK;
  152. if (strstr(p, "continue"))
  153. return ACK_continue;
  154. if (strstr(p, "common"))
  155. return ACK_common;
  156. if (strstr(p, "ready"))
  157. return ACK_ready;
  158. return ACK;
  159. }
  160. }
  161. die(_("git fetch-pack: expected ACK/NAK, got '%s'"), reader->line);
  162. }
  163. static void send_request(struct fetch_pack_args *args,
  164. int fd, struct strbuf *buf)
  165. {
  166. if (args->stateless_rpc) {
  167. send_sideband(fd, -1, buf->buf, buf->len, LARGE_PACKET_MAX);
  168. packet_flush(fd);
  169. } else {
  170. if (write_in_full(fd, buf->buf, buf->len) < 0)
  171. die_errno(_("unable to write to remote"));
  172. }
  173. }
  174. static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
  175. struct object *obj)
  176. {
  177. rev_list_insert_ref(negotiator, NULL, &obj->oid);
  178. }
  179. #define INITIAL_FLUSH 16
  180. #define PIPESAFE_FLUSH 32
  181. #define LARGE_FLUSH 16384
  182. static int next_flush(int stateless_rpc, int count)
  183. {
  184. if (stateless_rpc) {
  185. if (count < LARGE_FLUSH)
  186. count <<= 1;
  187. else
  188. count = count * 11 / 10;
  189. } else {
  190. if (count < PIPESAFE_FLUSH)
  191. count <<= 1;
  192. else
  193. count += PIPESAFE_FLUSH;
  194. }
  195. return count;
  196. }
  197. static void mark_tips(struct fetch_negotiator *negotiator,
  198. const struct oid_array *negotiation_tips)
  199. {
  200. int i;
  201. if (!negotiation_tips) {
  202. for_each_ref(rev_list_insert_ref_oid, negotiator);
  203. return;
  204. }
  205. for (i = 0; i < negotiation_tips->nr; i++)
  206. rev_list_insert_ref(negotiator, NULL,
  207. &negotiation_tips->oid[i]);
  208. return;
  209. }
  210. static int find_common(struct fetch_negotiator *negotiator,
  211. struct fetch_pack_args *args,
  212. int fd[2], struct object_id *result_oid,
  213. struct ref *refs)
  214. {
  215. int fetching;
  216. int count = 0, flushes = 0, flush_at = INITIAL_FLUSH, retval;
  217. const struct object_id *oid;
  218. unsigned in_vain = 0;
  219. int got_continue = 0;
  220. int got_ready = 0;
  221. struct strbuf req_buf = STRBUF_INIT;
  222. size_t state_len = 0;
  223. struct packet_reader reader;
  224. if (args->stateless_rpc && multi_ack == 1)
  225. die(_("--stateless-rpc requires multi_ack_detailed"));
  226. packet_reader_init(&reader, fd[0], NULL, 0,
  227. PACKET_READ_CHOMP_NEWLINE |
  228. PACKET_READ_DIE_ON_ERR_PACKET);
  229. if (!args->no_dependents) {
  230. mark_tips(negotiator, args->negotiation_tips);
  231. for_each_cached_alternate(negotiator, insert_one_alternate_object);
  232. }
  233. fetching = 0;
  234. for ( ; refs ; refs = refs->next) {
  235. struct object_id *remote = &refs->old_oid;
  236. const char *remote_hex;
  237. struct object *o;
  238. /*
  239. * If that object is complete (i.e. it is an ancestor of a
  240. * local ref), we tell them we have it but do not have to
  241. * tell them about its ancestors, which they already know
  242. * about.
  243. *
  244. * We use lookup_object here because we are only
  245. * interested in the case we *know* the object is
  246. * reachable and we have already scanned it.
  247. *
  248. * Do this only if args->no_dependents is false (if it is true,
  249. * we cannot trust the object flags).
  250. */
  251. if (!args->no_dependents &&
  252. ((o = lookup_object(the_repository, remote)) != NULL) &&
  253. (o->flags & COMPLETE)) {
  254. continue;
  255. }
  256. remote_hex = oid_to_hex(remote);
  257. if (!fetching) {
  258. struct strbuf c = STRBUF_INIT;
  259. if (multi_ack == 2) strbuf_addstr(&c, " multi_ack_detailed");
  260. if (multi_ack == 1) strbuf_addstr(&c, " multi_ack");
  261. if (no_done) strbuf_addstr(&c, " no-done");
  262. if (use_sideband == 2) strbuf_addstr(&c, " side-band-64k");
  263. if (use_sideband == 1) strbuf_addstr(&c, " side-band");
  264. if (args->deepen_relative) strbuf_addstr(&c, " deepen-relative");
  265. if (args->use_thin_pack) strbuf_addstr(&c, " thin-pack");
  266. if (args->no_progress) strbuf_addstr(&c, " no-progress");
  267. if (args->include_tag) strbuf_addstr(&c, " include-tag");
  268. if (prefer_ofs_delta) strbuf_addstr(&c, " ofs-delta");
  269. if (deepen_since_ok) strbuf_addstr(&c, " deepen-since");
  270. if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
  271. if (agent_supported) strbuf_addf(&c, " agent=%s",
  272. git_user_agent_sanitized());
  273. if (args->filter_options.choice)
  274. strbuf_addstr(&c, " filter");
  275. packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
  276. strbuf_release(&c);
  277. } else
  278. packet_buf_write(&req_buf, "want %s\n", remote_hex);
  279. fetching++;
  280. }
  281. if (!fetching) {
  282. strbuf_release(&req_buf);
  283. packet_flush(fd[1]);
  284. return 1;
  285. }
  286. if (is_repository_shallow(the_repository))
  287. write_shallow_commits(&req_buf, 1, NULL);
  288. if (args->depth > 0)
  289. packet_buf_write(&req_buf, "deepen %d", args->depth);
  290. if (args->deepen_since) {
  291. timestamp_t max_age = approxidate(args->deepen_since);
  292. packet_buf_write(&req_buf, "deepen-since %"PRItime, max_age);
  293. }
  294. if (args->deepen_not) {
  295. int i;
  296. for (i = 0; i < args->deepen_not->nr; i++) {
  297. struct string_list_item *s = args->deepen_not->items + i;
  298. packet_buf_write(&req_buf, "deepen-not %s", s->string);
  299. }
  300. }
  301. if (server_supports_filtering && args->filter_options.choice) {
  302. const char *spec =
  303. expand_list_objects_filter_spec(&args->filter_options);
  304. packet_buf_write(&req_buf, "filter %s", spec);
  305. }
  306. packet_buf_flush(&req_buf);
  307. state_len = req_buf.len;
  308. if (args->deepen) {
  309. const char *arg;
  310. struct object_id oid;
  311. send_request(args, fd[1], &req_buf);
  312. while (packet_reader_read(&reader) == PACKET_READ_NORMAL) {
  313. if (skip_prefix(reader.line, "shallow ", &arg)) {
  314. if (get_oid_hex(arg, &oid))
  315. die(_("invalid shallow line: %s"), reader.line);
  316. register_shallow(the_repository, &oid);
  317. continue;
  318. }
  319. if (skip_prefix(reader.line, "unshallow ", &arg)) {
  320. if (get_oid_hex(arg, &oid))
  321. die(_("invalid unshallow line: %s"), reader.line);
  322. if (!lookup_object(the_repository, &oid))
  323. die(_("object not found: %s"), reader.line);
  324. /* make sure that it is parsed as shallow */
  325. if (!parse_object(the_repository, &oid))
  326. die(_("error in object: %s"), reader.line);
  327. if (unregister_shallow(&oid))
  328. die(_("no shallow found: %s"), reader.line);
  329. continue;
  330. }
  331. die(_("expected shallow/unshallow, got %s"), reader.line);
  332. }
  333. } else if (!args->stateless_rpc)
  334. send_request(args, fd[1], &req_buf);
  335. if (!args->stateless_rpc) {
  336. /* If we aren't using the stateless-rpc interface
  337. * we don't need to retain the headers.
  338. */
  339. strbuf_setlen(&req_buf, 0);
  340. state_len = 0;
  341. }
  342. trace2_region_enter("fetch-pack", "negotiation_v0_v1", the_repository);
  343. flushes = 0;
  344. retval = -1;
  345. if (args->no_dependents)
  346. goto done;
  347. while ((oid = negotiator->next(negotiator))) {
  348. packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
  349. print_verbose(args, "have %s", oid_to_hex(oid));
  350. in_vain++;
  351. if (flush_at <= ++count) {
  352. int ack;
  353. packet_buf_flush(&req_buf);
  354. send_request(args, fd[1], &req_buf);
  355. strbuf_setlen(&req_buf, state_len);
  356. flushes++;
  357. flush_at = next_flush(args->stateless_rpc, count);
  358. /*
  359. * We keep one window "ahead" of the other side, and
  360. * will wait for an ACK only on the next one
  361. */
  362. if (!args->stateless_rpc && count == INITIAL_FLUSH)
  363. continue;
  364. consume_shallow_list(args, &reader);
  365. do {
  366. ack = get_ack(&reader, result_oid);
  367. if (ack)
  368. print_verbose(args, _("got %s %d %s"), "ack",
  369. ack, oid_to_hex(result_oid));
  370. switch (ack) {
  371. case ACK:
  372. flushes = 0;
  373. multi_ack = 0;
  374. retval = 0;
  375. goto done;
  376. case ACK_common:
  377. case ACK_ready:
  378. case ACK_continue: {
  379. struct commit *commit =
  380. lookup_commit(the_repository,
  381. result_oid);
  382. int was_common;
  383. if (!commit)
  384. die(_("invalid commit %s"), oid_to_hex(result_oid));
  385. was_common = negotiator->ack(negotiator, commit);
  386. if (args->stateless_rpc
  387. && ack == ACK_common
  388. && !was_common) {
  389. /* We need to replay the have for this object
  390. * on the next RPC request so the peer knows
  391. * it is in common with us.
  392. */
  393. const char *hex = oid_to_hex(result_oid);
  394. packet_buf_write(&req_buf, "have %s\n", hex);
  395. state_len = req_buf.len;
  396. /*
  397. * Reset in_vain because an ack
  398. * for this commit has not been
  399. * seen.
  400. */
  401. in_vain = 0;
  402. } else if (!args->stateless_rpc
  403. || ack != ACK_common)
  404. in_vain = 0;
  405. retval = 0;
  406. got_continue = 1;
  407. if (ack == ACK_ready)
  408. got_ready = 1;
  409. break;
  410. }
  411. }
  412. } while (ack);
  413. flushes--;
  414. if (got_continue && MAX_IN_VAIN < in_vain) {
  415. print_verbose(args, _("giving up"));
  416. break; /* give up */
  417. }
  418. if (got_ready)
  419. break;
  420. }
  421. }
  422. done:
  423. trace2_region_leave("fetch-pack", "negotiation_v0_v1", the_repository);
  424. if (!got_ready || !no_done) {
  425. packet_buf_write(&req_buf, "done\n");
  426. send_request(args, fd[1], &req_buf);
  427. }
  428. print_verbose(args, _("done"));
  429. if (retval != 0) {
  430. multi_ack = 0;
  431. flushes++;
  432. }
  433. strbuf_release(&req_buf);
  434. if (!got_ready || !no_done)
  435. consume_shallow_list(args, &reader);
  436. while (flushes || multi_ack) {
  437. int ack = get_ack(&reader, result_oid);
  438. if (ack) {
  439. print_verbose(args, _("got %s (%d) %s"), "ack",
  440. ack, oid_to_hex(result_oid));
  441. if (ack == ACK)
  442. return 0;
  443. multi_ack = 1;
  444. continue;
  445. }
  446. flushes--;
  447. }
  448. /* it is no error to fetch into a completely empty repo */
  449. return count ? retval : 0;
  450. }
  451. static struct commit_list *complete;
  452. static int mark_complete(const struct object_id *oid)
  453. {
  454. struct object *o = parse_object(the_repository, oid);
  455. while (o && o->type == OBJ_TAG) {
  456. struct tag *t = (struct tag *) o;
  457. if (!t->tagged)
  458. break; /* broken repository */
  459. o->flags |= COMPLETE;
  460. o = parse_object(the_repository, &t->tagged->oid);
  461. }
  462. if (o && o->type == OBJ_COMMIT) {
  463. struct commit *commit = (struct commit *)o;
  464. if (!(commit->object.flags & COMPLETE)) {
  465. commit->object.flags |= COMPLETE;
  466. commit_list_insert(commit, &complete);
  467. }
  468. }
  469. return 0;
  470. }
  471. static int mark_complete_oid(const char *refname, const struct object_id *oid,
  472. int flag, void *cb_data)
  473. {
  474. return mark_complete(oid);
  475. }
  476. static void mark_recent_complete_commits(struct fetch_pack_args *args,
  477. timestamp_t cutoff)
  478. {
  479. while (complete && cutoff <= complete->item->date) {
  480. print_verbose(args, _("Marking %s as complete"),
  481. oid_to_hex(&complete->item->object.oid));
  482. pop_most_recent_commit(&complete, COMPLETE);
  483. }
  484. }
  485. static void add_refs_to_oidset(struct oidset *oids, struct ref *refs)
  486. {
  487. for (; refs; refs = refs->next)
  488. oidset_insert(oids, &refs->old_oid);
  489. }
  490. static int is_unmatched_ref(const struct ref *ref)
  491. {
  492. struct object_id oid;
  493. const char *p;
  494. return ref->match_status == REF_NOT_MATCHED &&
  495. !parse_oid_hex(ref->name, &oid, &p) &&
  496. *p == '\0' &&
  497. oideq(&oid, &ref->old_oid);
  498. }
  499. static void filter_refs(struct fetch_pack_args *args,
  500. struct ref **refs,
  501. struct ref **sought, int nr_sought)
  502. {
  503. struct ref *newlist = NULL;
  504. struct ref **newtail = &newlist;
  505. struct ref *unmatched = NULL;
  506. struct ref *ref, *next;
  507. struct oidset tip_oids = OIDSET_INIT;
  508. int i;
  509. int strict = !(allow_unadvertised_object_request &
  510. (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
  511. i = 0;
  512. for (ref = *refs; ref; ref = next) {
  513. int keep = 0;
  514. next = ref->next;
  515. if (starts_with(ref->name, "refs/") &&
  516. check_refname_format(ref->name, 0)) {
  517. /*
  518. * trash or a peeled value; do not even add it to
  519. * unmatched list
  520. */
  521. free_one_ref(ref);
  522. continue;
  523. } else {
  524. while (i < nr_sought) {
  525. int cmp = strcmp(ref->name, sought[i]->name);
  526. if (cmp < 0)
  527. break; /* definitely do not have it */
  528. else if (cmp == 0) {
  529. keep = 1; /* definitely have it */
  530. sought[i]->match_status = REF_MATCHED;
  531. }
  532. i++;
  533. }
  534. if (!keep && args->fetch_all &&
  535. (!args->deepen || !starts_with(ref->name, "refs/tags/")))
  536. keep = 1;
  537. }
  538. if (keep) {
  539. *newtail = ref;
  540. ref->next = NULL;
  541. newtail = &ref->next;
  542. } else {
  543. ref->next = unmatched;
  544. unmatched = ref;
  545. }
  546. }
  547. if (strict) {
  548. for (i = 0; i < nr_sought; i++) {
  549. ref = sought[i];
  550. if (!is_unmatched_ref(ref))
  551. continue;
  552. add_refs_to_oidset(&tip_oids, unmatched);
  553. add_refs_to_oidset(&tip_oids, newlist);
  554. break;
  555. }
  556. }
  557. /* Append unmatched requests to the list */
  558. for (i = 0; i < nr_sought; i++) {
  559. ref = sought[i];
  560. if (!is_unmatched_ref(ref))
  561. continue;
  562. if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) {
  563. ref->match_status = REF_MATCHED;
  564. *newtail = copy_ref(ref);
  565. newtail = &(*newtail)->next;
  566. } else {
  567. ref->match_status = REF_UNADVERTISED_NOT_ALLOWED;
  568. }
  569. }
  570. oidset_clear(&tip_oids);
  571. free_refs(unmatched);
  572. *refs = newlist;
  573. }
  574. static void mark_alternate_complete(struct fetch_negotiator *unused,
  575. struct object *obj)
  576. {
  577. mark_complete(&obj->oid);
  578. }
  579. struct loose_object_iter {
  580. struct oidset *loose_object_set;
  581. struct ref *refs;
  582. };
  583. /*
  584. * Mark recent commits available locally and reachable from a local ref as
  585. * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
  586. * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
  587. * thus do not need COMMON_REF marks).
  588. *
  589. * The cutoff time for recency is determined by this heuristic: it is the
  590. * earliest commit time of the objects in refs that are commits and that we know
  591. * the commit time of.
  592. */
  593. static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
  594. struct fetch_pack_args *args,
  595. struct ref **refs)
  596. {
  597. struct ref *ref;
  598. int old_save_commit_buffer = save_commit_buffer;
  599. timestamp_t cutoff = 0;
  600. save_commit_buffer = 0;
  601. for (ref = *refs; ref; ref = ref->next) {
  602. struct object *o;
  603. if (!has_object_file_with_flags(&ref->old_oid,
  604. OBJECT_INFO_QUICK))
  605. continue;
  606. o = parse_object(the_repository, &ref->old_oid);
  607. if (!o)
  608. continue;
  609. /* We already have it -- which may mean that we were
  610. * in sync with the other side at some time after
  611. * that (it is OK if we guess wrong here).
  612. */
  613. if (o->type == OBJ_COMMIT) {
  614. struct commit *commit = (struct commit *)o;
  615. if (!cutoff || cutoff < commit->date)
  616. cutoff = commit->date;
  617. }
  618. }
  619. if (!args->deepen) {
  620. for_each_ref(mark_complete_oid, NULL);
  621. for_each_cached_alternate(NULL, mark_alternate_complete);
  622. commit_list_sort_by_date(&complete);
  623. if (cutoff)
  624. mark_recent_complete_commits(args, cutoff);
  625. }
  626. /*
  627. * Mark all complete remote refs as common refs.
  628. * Don't mark them common yet; the server has to be told so first.
  629. */
  630. for (ref = *refs; ref; ref = ref->next) {
  631. struct object *o = deref_tag(the_repository,
  632. lookup_object(the_repository,
  633. &ref->old_oid),
  634. NULL, 0);
  635. if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
  636. continue;
  637. negotiator->known_common(negotiator,
  638. (struct commit *)o);
  639. }
  640. save_commit_buffer = old_save_commit_buffer;
  641. }
  642. /*
  643. * Returns 1 if every object pointed to by the given remote refs is available
  644. * locally and reachable from a local ref, and 0 otherwise.
  645. */
  646. static int everything_local(struct fetch_pack_args *args,
  647. struct ref **refs)
  648. {
  649. struct ref *ref;
  650. int retval;
  651. for (retval = 1, ref = *refs; ref ; ref = ref->next) {
  652. const struct object_id *remote = &ref->old_oid;
  653. struct object *o;
  654. o = lookup_object(the_repository, remote);
  655. if (!o || !(o->flags & COMPLETE)) {
  656. retval = 0;
  657. print_verbose(args, "want %s (%s)", oid_to_hex(remote),
  658. ref->name);
  659. continue;
  660. }
  661. print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
  662. ref->name);
  663. }
  664. return retval;
  665. }
  666. static int sideband_demux(int in, int out, void *data)
  667. {
  668. int *xd = data;
  669. int ret;
  670. ret = recv_sideband("fetch-pack", xd[0], out);
  671. close(out);
  672. return ret;
  673. }
  674. static void write_promisor_file(const char *keep_name,
  675. struct ref **sought, int nr_sought)
  676. {
  677. struct strbuf promisor_name = STRBUF_INIT;
  678. int suffix_stripped;
  679. FILE *output;
  680. int i;
  681. strbuf_addstr(&promisor_name, keep_name);
  682. suffix_stripped = strbuf_strip_suffix(&promisor_name, ".keep");
  683. if (!suffix_stripped)
  684. BUG("name of pack lockfile should end with .keep (was '%s')",
  685. keep_name);
  686. strbuf_addstr(&promisor_name, ".promisor");
  687. output = xfopen(promisor_name.buf, "w");
  688. for (i = 0; i < nr_sought; i++)
  689. fprintf(output, "%s %s\n", oid_to_hex(&sought[i]->old_oid),
  690. sought[i]->name);
  691. fclose(output);
  692. strbuf_release(&promisor_name);
  693. }
  694. static int get_pack(struct fetch_pack_args *args,
  695. int xd[2], char **pack_lockfile,
  696. struct ref **sought, int nr_sought)
  697. {
  698. struct async demux;
  699. int do_keep = args->keep_pack;
  700. const char *cmd_name;
  701. struct pack_header header;
  702. int pass_header = 0;
  703. struct child_process cmd = CHILD_PROCESS_INIT;
  704. int ret;
  705. memset(&demux, 0, sizeof(demux));
  706. if (use_sideband) {
  707. /* xd[] is talking with upload-pack; subprocess reads from
  708. * xd[0], spits out band#2 to stderr, and feeds us band#1
  709. * through demux->out.
  710. */
  711. demux.proc = sideband_demux;
  712. demux.data = xd;
  713. demux.out = -1;
  714. demux.isolate_sigpipe = 1;
  715. if (start_async(&demux))
  716. die(_("fetch-pack: unable to fork off sideband demultiplexer"));
  717. }
  718. else
  719. demux.out = xd[0];
  720. if (!args->keep_pack && unpack_limit) {
  721. if (read_pack_header(demux.out, &header))
  722. die(_("protocol error: bad pack header"));
  723. pass_header = 1;
  724. if (ntohl(header.hdr_entries) < unpack_limit)
  725. do_keep = 0;
  726. else
  727. do_keep = 1;
  728. }
  729. if (alternate_shallow_file) {
  730. argv_array_push(&cmd.args, "--shallow-file");
  731. argv_array_push(&cmd.args, alternate_shallow_file);
  732. }
  733. if (do_keep || args->from_promisor) {
  734. if (pack_lockfile)
  735. cmd.out = -1;
  736. cmd_name = "index-pack";
  737. argv_array_push(&cmd.args, cmd_name);
  738. argv_array_push(&cmd.args, "--stdin");
  739. if (!args->quiet && !args->no_progress)
  740. argv_array_push(&cmd.args, "-v");
  741. if (args->use_thin_pack)
  742. argv_array_push(&cmd.args, "--fix-thin");
  743. if (do_keep && (args->lock_pack || unpack_limit)) {
  744. char hostname[HOST_NAME_MAX + 1];
  745. if (xgethostname(hostname, sizeof(hostname)))
  746. xsnprintf(hostname, sizeof(hostname), "localhost");
  747. argv_array_pushf(&cmd.args,
  748. "--keep=fetch-pack %"PRIuMAX " on %s",
  749. (uintmax_t)getpid(), hostname);
  750. }
  751. if (args->check_self_contained_and_connected)
  752. argv_array_push(&cmd.args, "--check-self-contained-and-connected");
  753. /*
  754. * If we're obtaining the filename of a lockfile, we'll use
  755. * that filename to write a .promisor file with more
  756. * information below. If not, we need index-pack to do it for
  757. * us.
  758. */
  759. if (!(do_keep && pack_lockfile) && args->from_promisor)
  760. argv_array_push(&cmd.args, "--promisor");
  761. }
  762. else {
  763. cmd_name = "unpack-objects";
  764. argv_array_push(&cmd.args, cmd_name);
  765. if (args->quiet || args->no_progress)
  766. argv_array_push(&cmd.args, "-q");
  767. args->check_self_contained_and_connected = 0;
  768. }
  769. if (pass_header)
  770. argv_array_pushf(&cmd.args, "--pack_header=%"PRIu32",%"PRIu32,
  771. ntohl(header.hdr_version),
  772. ntohl(header.hdr_entries));
  773. if (fetch_fsck_objects >= 0
  774. ? fetch_fsck_objects
  775. : transfer_fsck_objects >= 0
  776. ? transfer_fsck_objects
  777. : 0) {
  778. if (args->from_promisor)
  779. /*
  780. * We cannot use --strict in index-pack because it
  781. * checks both broken objects and links, but we only
  782. * want to check for broken objects.
  783. */
  784. argv_array_push(&cmd.args, "--fsck-objects");
  785. else
  786. argv_array_pushf(&cmd.args, "--strict%s",
  787. fsck_msg_types.buf);
  788. }
  789. cmd.in = demux.out;
  790. cmd.git_cmd = 1;
  791. if (start_command(&cmd))
  792. die(_("fetch-pack: unable to fork off %s"), cmd_name);
  793. if (do_keep && pack_lockfile) {
  794. *pack_lockfile = index_pack_lockfile(cmd.out);
  795. close(cmd.out);
  796. }
  797. if (!use_sideband)
  798. /* Closed by start_command() */
  799. xd[0] = -1;
  800. ret = finish_command(&cmd);
  801. if (!ret || (args->check_self_contained_and_connected && ret == 1))
  802. args->self_contained_and_connected =
  803. args->check_self_contained_and_connected &&
  804. ret == 0;
  805. else
  806. die(_("%s failed"), cmd_name);
  807. if (use_sideband && finish_async(&demux))
  808. die(_("error in sideband demultiplexer"));
  809. /*
  810. * Now that index-pack has succeeded, write the promisor file using the
  811. * obtained .keep filename if necessary
  812. */
  813. if (do_keep && pack_lockfile && args->from_promisor)
  814. write_promisor_file(*pack_lockfile, sought, nr_sought);
  815. return 0;
  816. }
  817. static int cmp_ref_by_name(const void *a_, const void *b_)
  818. {
  819. const struct ref *a = *((const struct ref **)a_);
  820. const struct ref *b = *((const struct ref **)b_);
  821. return strcmp(a->name, b->name);
  822. }
  823. static struct ref *do_fetch_pack(struct fetch_pack_args *args,
  824. int fd[2],
  825. const struct ref *orig_ref,
  826. struct ref **sought, int nr_sought,
  827. struct shallow_info *si,
  828. char **pack_lockfile)
  829. {
  830. struct repository *r = the_repository;
  831. struct ref *ref = copy_ref_list(orig_ref);
  832. struct object_id oid;
  833. const char *agent_feature;
  834. int agent_len;
  835. struct fetch_negotiator negotiator;
  836. fetch_negotiator_init(r, &negotiator);
  837. sort_ref_list(&ref, ref_compare_name);
  838. QSORT(sought, nr_sought, cmp_ref_by_name);
  839. if ((agent_feature = server_feature_value("agent", &agent_len))) {
  840. agent_supported = 1;
  841. if (agent_len)
  842. print_verbose(args, _("Server version is %.*s"),
  843. agent_len, agent_feature);
  844. }
  845. if (server_supports("shallow"))
  846. print_verbose(args, _("Server supports %s"), "shallow");
  847. else if (args->depth > 0 || is_repository_shallow(r))
  848. die(_("Server does not support shallow clients"));
  849. if (args->depth > 0 || args->deepen_since || args->deepen_not)
  850. args->deepen = 1;
  851. if (server_supports("multi_ack_detailed")) {
  852. print_verbose(args, _("Server supports %s"), "multi_ack_detailed");
  853. multi_ack = 2;
  854. if (server_supports("no-done")) {
  855. print_verbose(args, _("Server supports %s"), "no-done");
  856. if (args->stateless_rpc)
  857. no_done = 1;
  858. }
  859. }
  860. else if (server_supports("multi_ack")) {
  861. print_verbose(args, _("Server supports %s"), "multi_ack");
  862. multi_ack = 1;
  863. }
  864. if (server_supports("side-band-64k")) {
  865. print_verbose(args, _("Server supports %s"), "side-band-64k");
  866. use_sideband = 2;
  867. }
  868. else if (server_supports("side-band")) {
  869. print_verbose(args, _("Server supports %s"), "side-band");
  870. use_sideband = 1;
  871. }
  872. if (server_supports("allow-tip-sha1-in-want")) {
  873. print_verbose(args, _("Server supports %s"), "allow-tip-sha1-in-want");
  874. allow_unadvertised_object_request |= ALLOW_TIP_SHA1;
  875. }
  876. if (server_supports("allow-reachable-sha1-in-want")) {
  877. print_verbose(args, _("Server supports %s"), "allow-reachable-sha1-in-want");
  878. allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
  879. }
  880. if (server_supports("thin-pack"))
  881. print_verbose(args, _("Server supports %s"), "thin-pack");
  882. else
  883. args->use_thin_pack = 0;
  884. if (server_supports("no-progress"))
  885. print_verbose(args, _("Server supports %s"), "no-progress");
  886. else
  887. args->no_progress = 0;
  888. if (server_supports("include-tag"))
  889. print_verbose(args, _("Server supports %s"), "include-tag");
  890. else
  891. args->include_tag = 0;
  892. if (server_supports("ofs-delta"))
  893. print_verbose(args, _("Server supports %s"), "ofs-delta");
  894. else
  895. prefer_ofs_delta = 0;
  896. if (server_supports("filter")) {
  897. server_supports_filtering = 1;
  898. print_verbose(args, _("Server supports %s"), "filter");
  899. } else if (args->filter_options.choice) {
  900. warning("filtering not recognized by server, ignoring");
  901. }
  902. if (server_supports("deepen-since")) {
  903. print_verbose(args, _("Server supports %s"), "deepen-since");
  904. deepen_since_ok = 1;
  905. } else if (args->deepen_since)
  906. die(_("Server does not support --shallow-since"));
  907. if (server_supports("deepen-not")) {
  908. print_verbose(args, _("Server supports %s"), "deepen-not");
  909. deepen_not_ok = 1;
  910. } else if (args->deepen_not)
  911. die(_("Server does not support --shallow-exclude"));
  912. if (server_supports("deepen-relative"))
  913. print_verbose(args, _("Server supports %s"), "deepen-relative");
  914. else if (args->deepen_relative)
  915. die(_("Server does not support --deepen"));
  916. if (!args->no_dependents) {
  917. mark_complete_and_common_ref(&negotiator, args, &ref);
  918. filter_refs(args, &ref, sought, nr_sought);
  919. if (everything_local(args, &ref)) {
  920. packet_flush(fd[1]);
  921. goto all_done;
  922. }
  923. } else {
  924. filter_refs(args, &ref, sought, nr_sought);
  925. }
  926. if (find_common(&negotiator, args, fd, &oid, ref) < 0)
  927. if (!args->keep_pack)
  928. /* When cloning, it is not unusual to have
  929. * no common commit.
  930. */
  931. warning(_("no common commits"));
  932. if (args->stateless_rpc)
  933. packet_flush(fd[1]);
  934. if (args->deepen)
  935. setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
  936. NULL);
  937. else if (si->nr_ours || si->nr_theirs)
  938. alternate_shallow_file = setup_temporary_shallow(si->shallow);
  939. else
  940. alternate_shallow_file = NULL;
  941. if (get_pack(args, fd, pack_lockfile, sought, nr_sought))
  942. die(_("git fetch-pack: fetch failed."));
  943. all_done:
  944. negotiator.release(&negotiator);
  945. return ref;
  946. }
  947. static void add_shallow_requests(struct strbuf *req_buf,
  948. const struct fetch_pack_args *args)
  949. {
  950. if (is_repository_shallow(the_repository))
  951. write_shallow_commits(req_buf, 1, NULL);
  952. if (args->depth > 0)
  953. packet_buf_write(req_buf, "deepen %d", args->depth);
  954. if (args->deepen_since) {
  955. timestamp_t max_age = approxidate(args->deepen_since);
  956. packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
  957. }
  958. if (args->deepen_not) {
  959. int i;
  960. for (i = 0; i < args->deepen_not->nr; i++) {
  961. struct string_list_item *s = args->deepen_not->items + i;
  962. packet_buf_write(req_buf, "deepen-not %s", s->string);
  963. }
  964. }
  965. if (args->deepen_relative)
  966. packet_buf_write(req_buf, "deepen-relative\n");
  967. }
  968. static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
  969. {
  970. int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
  971. for ( ; wants ; wants = wants->next) {
  972. const struct object_id *remote = &wants->old_oid;
  973. struct object *o;
  974. /*
  975. * If that object is complete (i.e. it is an ancestor of a
  976. * local ref), we tell them we have it but do not have to
  977. * tell them about its ancestors, which they already know
  978. * about.
  979. *
  980. * We use lookup_object here because we are only
  981. * interested in the case we *know* the object is
  982. * reachable and we have already scanned it.
  983. *
  984. * Do this only if args->no_dependents is false (if it is true,
  985. * we cannot trust the object flags).
  986. */
  987. if (!no_dependents &&
  988. ((o = lookup_object(the_repository, remote)) != NULL) &&
  989. (o->flags & COMPLETE)) {
  990. continue;
  991. }
  992. if (!use_ref_in_want || wants->exact_oid)
  993. packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
  994. else
  995. packet_buf_write(req_buf, "want-ref %s\n", wants->name);
  996. }
  997. }
  998. static void add_common(struct strbuf *req_buf, struct oidset *common)
  999. {
  1000. struct oidset_iter iter;
  1001. const struct object_id *oid;
  1002. oidset_iter_init(common, &iter);
  1003. while ((oid = oidset_iter_next(&iter))) {
  1004. packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
  1005. }
  1006. }
  1007. static int add_haves(struct fetch_negotiator *negotiator,
  1008. struct strbuf *req_buf,
  1009. int *haves_to_send, int *in_vain)
  1010. {
  1011. int ret = 0;
  1012. int haves_added = 0;
  1013. const struct object_id *oid;
  1014. while ((oid = negotiator->next(negotiator))) {
  1015. packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
  1016. if (++haves_added >= *haves_to_send)
  1017. break;
  1018. }
  1019. *in_vain += haves_added;
  1020. if (!haves_added || *in_vain >= MAX_IN_VAIN) {
  1021. /* Send Done */
  1022. packet_buf_write(req_buf, "done\n");
  1023. ret = 1;
  1024. }
  1025. /* Increase haves to send on next round */
  1026. *haves_to_send = next_flush(1, *haves_to_send);
  1027. return ret;
  1028. }
  1029. static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
  1030. struct fetch_pack_args *args,
  1031. const struct ref *wants, struct oidset *common,
  1032. int *haves_to_send, int *in_vain,
  1033. int sideband_all)
  1034. {
  1035. int ret = 0;
  1036. struct strbuf req_buf = STRBUF_INIT;
  1037. if (server_supports_v2("fetch", 1))
  1038. packet_buf_write(&req_buf, "command=fetch");
  1039. if (server_supports_v2("agent", 0))
  1040. packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
  1041. if (args->server_options && args->server_options->nr &&
  1042. server_supports_v2("server-option", 1)) {
  1043. int i;
  1044. for (i = 0; i < args->server_options->nr; i++)
  1045. packet_buf_write(&req_buf, "server-option=%s",
  1046. args->server_options->items[i].string);
  1047. }
  1048. packet_buf_delim(&req_buf);
  1049. if (args->use_thin_pack)
  1050. packet_buf_write(&req_buf, "thin-pack");
  1051. if (args->no_progress)
  1052. packet_buf_write(&req_buf, "no-progress");
  1053. if (args->include_tag)
  1054. packet_buf_write(&req_buf, "include-tag");
  1055. if (prefer_ofs_delta)
  1056. packet_buf_write(&req_buf, "ofs-delta");
  1057. if (sideband_all)
  1058. packet_buf_write(&req_buf, "sideband-all");
  1059. /* Add shallow-info and deepen request */
  1060. if (server_supports_feature("fetch", "shallow", 0))
  1061. add_shallow_requests(&req_buf, args);
  1062. else if (is_repository_shallow(the_repository) || args->deepen)
  1063. die(_("Server does not support shallow requests"));
  1064. /* Add filter */
  1065. if (server_supports_feature("fetch", "filter", 0) &&
  1066. args->filter_options.choice) {
  1067. const char *spec =
  1068. expand_list_objects_filter_spec(&args->filter_options);
  1069. print_verbose(args, _("Server supports filter"));
  1070. packet_buf_write(&req_buf, "filter %s", spec);
  1071. } else if (args->filter_options.choice) {
  1072. warning("filtering not recognized by server, ignoring");
  1073. }
  1074. /* add wants */
  1075. add_wants(args->no_dependents, wants, &req_buf);
  1076. if (args->no_dependents) {
  1077. packet_buf_write(&req_buf, "done");
  1078. ret = 1;
  1079. } else {
  1080. /* Add all of the common commits we've found in previous rounds */
  1081. add_common(&req_buf, common);
  1082. /* Add initial haves */
  1083. ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
  1084. }
  1085. /* Send request */
  1086. packet_buf_flush(&req_buf);
  1087. if (write_in_full(fd_out, req_buf.buf, req_buf.len) < 0)
  1088. die_errno(_("unable to write request to remote"));
  1089. strbuf_release(&req_buf);
  1090. return ret;
  1091. }
  1092. /*
  1093. * Processes a section header in a server's response and checks if it matches
  1094. * `section`. If the value of `peek` is 1, the header line will be peeked (and
  1095. * not consumed); if 0, the line will be consumed and the function will die if
  1096. * the section header doesn't match what was expected.
  1097. */
  1098. static int process_section_header(struct packet_reader *reader,
  1099. const char *section, int peek)
  1100. {
  1101. int ret;
  1102. if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
  1103. die(_("error reading section header '%s'"), section);
  1104. ret = !strcmp(reader->line, section);
  1105. if (!peek) {
  1106. if (!ret)
  1107. die(_("expected '%s', received '%s'"),
  1108. section, reader->line);
  1109. packet_reader_read(reader);
  1110. }
  1111. return ret;
  1112. }
  1113. static int process_acks(struct fetch_negotiator *negotiator,
  1114. struct packet_reader *reader,
  1115. struct oidset *common)
  1116. {
  1117. /* received */
  1118. int received_ready = 0;
  1119. int received_ack = 0;
  1120. process_section_header(reader, "acknowledgments", 0);
  1121. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  1122. const char *arg;
  1123. if (!strcmp(reader->line, "NAK"))
  1124. continue;
  1125. if (skip_prefix(reader->line, "ACK ", &arg)) {
  1126. struct object_id oid;
  1127. if (!get_oid_hex(arg, &oid)) {
  1128. struct commit *commit;
  1129. oidset_insert(common, &oid);
  1130. commit = lookup_commit(the_repository, &oid);
  1131. negotiator->ack(negotiator, commit);
  1132. }
  1133. continue;
  1134. }
  1135. if (!strcmp(reader->line, "ready")) {
  1136. received_ready = 1;
  1137. continue;
  1138. }
  1139. die(_("unexpected acknowledgment line: '%s'"), reader->line);
  1140. }
  1141. if (reader->status != PACKET_READ_FLUSH &&
  1142. reader->status != PACKET_READ_DELIM)
  1143. die(_("error processing acks: %d"), reader->status);
  1144. /*
  1145. * If an "acknowledgments" section is sent, a packfile is sent if and
  1146. * only if "ready" was sent in this section. The other sections
  1147. * ("shallow-info" and "wanted-refs") are sent only if a packfile is
  1148. * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
  1149. * otherwise.
  1150. */
  1151. if (received_ready && reader->status != PACKET_READ_DELIM)
  1152. die(_("expected packfile to be sent after 'ready'"));
  1153. if (!received_ready && reader->status != PACKET_READ_FLUSH)
  1154. die(_("expected no other sections to be sent after no 'ready'"));
  1155. /* return 0 if no common, 1 if there are common, or 2 if ready */
  1156. return received_ready ? 2 : (received_ack ? 1 : 0);
  1157. }
  1158. static void receive_shallow_info(struct fetch_pack_args *args,
  1159. struct packet_reader *reader,
  1160. struct oid_array *shallows,
  1161. struct shallow_info *si)
  1162. {
  1163. int unshallow_received = 0;
  1164. process_section_header(reader, "shallow-info", 0);
  1165. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  1166. const char *arg;
  1167. struct object_id oid;
  1168. if (skip_prefix(reader->line, "shallow ", &arg)) {
  1169. if (get_oid_hex(arg, &oid))
  1170. die(_("invalid shallow line: %s"), reader->line);
  1171. oid_array_append(shallows, &oid);
  1172. continue;
  1173. }
  1174. if (skip_prefix(reader->line, "unshallow ", &arg)) {
  1175. if (get_oid_hex(arg, &oid))
  1176. die(_("invalid unshallow line: %s"), reader->line);
  1177. if (!lookup_object(the_repository, &oid))
  1178. die(_("object not found: %s"), reader->line);
  1179. /* make sure that it is parsed as shallow */
  1180. if (!parse_object(the_repository, &oid))
  1181. die(_("error in object: %s"), reader->line);
  1182. if (unregister_shallow(&oid))
  1183. die(_("no shallow found: %s"), reader->line);
  1184. unshallow_received = 1;
  1185. continue;
  1186. }
  1187. die(_("expected shallow/unshallow, got %s"), reader->line);
  1188. }
  1189. if (reader->status != PACKET_READ_FLUSH &&
  1190. reader->status != PACKET_READ_DELIM)
  1191. die(_("error processing shallow info: %d"), reader->status);
  1192. if (args->deepen || unshallow_received) {
  1193. /*
  1194. * Treat these as shallow lines caused by our depth settings.
  1195. * In v0, these lines cannot cause refs to be rejected; do the
  1196. * same.
  1197. */
  1198. int i;
  1199. for (i = 0; i < shallows->nr; i++)
  1200. register_shallow(the_repository, &shallows->oid[i]);
  1201. setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
  1202. NULL);
  1203. args->deepen = 1;
  1204. } else if (shallows->nr) {
  1205. /*
  1206. * Treat these as shallow lines caused by the remote being
  1207. * shallow. In v0, remote refs that reach these objects are
  1208. * rejected (unless --update-shallow is set); do the same.
  1209. */
  1210. prepare_shallow_info(si, shallows);
  1211. if (si->nr_ours || si->nr_theirs)
  1212. alternate_shallow_file =
  1213. setup_temporary_shallow(si->shallow);
  1214. else
  1215. alternate_shallow_file = NULL;
  1216. } else {
  1217. alternate_shallow_file = NULL;
  1218. }
  1219. }
  1220. static int cmp_name_ref(const void *name, const void *ref)
  1221. {
  1222. return strcmp(name, (*(struct ref **)ref)->name);
  1223. }
  1224. static void receive_wanted_refs(struct packet_reader *reader,
  1225. struct ref **sought, int nr_sought)
  1226. {
  1227. process_section_header(reader, "wanted-refs", 0);
  1228. while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
  1229. struct object_id oid;
  1230. const char *end;
  1231. struct ref **found;
  1232. if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
  1233. die(_("expected wanted-ref, got '%s'"), reader->line);
  1234. found = bsearch(end, sought, nr_sought, sizeof(*sought),
  1235. cmp_name_ref);
  1236. if (!found)
  1237. die(_("unexpected wanted-ref: '%s'"), reader->line);
  1238. oidcpy(&(*found)->old_oid, &oid);
  1239. }
  1240. if (reader->status != PACKET_READ_DELIM)
  1241. die(_("error processing wanted refs: %d"), reader->status);
  1242. }
  1243. enum fetch_state {
  1244. FETCH_CHECK_LOCAL = 0,
  1245. FETCH_SEND_REQUEST,
  1246. FETCH_PROCESS_ACKS,
  1247. FETCH_GET_PACK,
  1248. FETCH_DONE,
  1249. };
  1250. static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
  1251. int fd[2],
  1252. const struct ref *orig_ref,
  1253. struct ref **sought, int nr_sought,
  1254. struct oid_array *shallows,
  1255. struct shallow_info *si,
  1256. char **pack_lockfile)
  1257. {
  1258. struct repository *r = the_repository;
  1259. struct ref *ref = copy_ref_list(orig_ref);
  1260. enum fetch_state state = FETCH_CHECK_LOCAL;
  1261. struct oidset common = OIDSET_INIT;
  1262. struct packet_reader reader;
  1263. int in_vain = 0, negotiation_started = 0;
  1264. int haves_to_send = INITIAL_FLUSH;
  1265. struct fetch_negotiator negotiator;
  1266. fetch_negotiator_init(r, &negotiator);
  1267. packet_reader_init(&reader, fd[0], NULL, 0,
  1268. PACKET_READ_CHOMP_NEWLINE |
  1269. PACKET_READ_DIE_ON_ERR_PACKET);
  1270. if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 1) &&
  1271. server_supports_feature("fetch", "sideband-all", 0)) {
  1272. reader.use_sideband = 1;
  1273. reader.me = "fetch-pack";
  1274. }
  1275. while (state != FETCH_DONE) {
  1276. switch (state) {
  1277. case FETCH_CHECK_LOCAL:
  1278. sort_ref_list(&ref, ref_compare_name);
  1279. QSORT(sought, nr_sought, cmp_ref_by_name);
  1280. /* v2 supports these by default */
  1281. allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
  1282. use_sideband = 2;
  1283. if (args->depth > 0 || args->deepen_since || args->deepen_not)
  1284. args->deepen = 1;
  1285. /* Filter 'ref' by 'sought' and those that aren't local */
  1286. if (!args->no_dependents) {
  1287. mark_complete_and_common_ref(&negotiator, args, &ref);
  1288. filter_refs(args, &ref, sought, nr_sought);
  1289. if (everything_local(args, &ref))
  1290. state = FETCH_DONE;
  1291. else
  1292. state = FETCH_SEND_REQUEST;
  1293. mark_tips(&negotiator, args->negotiation_tips);
  1294. for_each_cached_alternate(&negotiator,
  1295. insert_one_alternate_object);
  1296. } else {
  1297. filter_refs(args, &ref, sought, nr_sought);
  1298. state = FETCH_SEND_REQUEST;
  1299. }
  1300. break;
  1301. case FETCH_SEND_REQUEST:
  1302. if (!negotiation_started) {
  1303. negotiation_started = 1;
  1304. trace2_region_enter("fetch-pack",
  1305. "negotiation_v2",
  1306. the_repository);
  1307. }
  1308. if (send_fetch_request(&negotiator, fd[1], args, ref,
  1309. &common,
  1310. &haves_to_send, &in_vain,
  1311. reader.use_sideband))
  1312. state = FETCH_GET_PACK;
  1313. else
  1314. state = FETCH_PROCESS_ACKS;
  1315. break;
  1316. case FETCH_PROCESS_ACKS:
  1317. /* Process ACKs/NAKs */
  1318. switch (process_acks(&negotiator, &reader, &common)) {
  1319. case 2:
  1320. state = FETCH_GET_PACK;
  1321. break;
  1322. case 1:
  1323. in_vain = 0;
  1324. /* fallthrough */
  1325. default:
  1326. state = FETCH_SEND_REQUEST;
  1327. break;
  1328. }
  1329. break;
  1330. case FETCH_GET_PACK:
  1331. trace2_region_leave("fetch-pack",
  1332. "negotiation_v2",
  1333. the_repository);
  1334. /* Check for shallow-info section */
  1335. if (process_section_header(&reader, "shallow-info", 1))
  1336. receive_shallow_info(args, &reader, shallows, si);
  1337. if (process_section_header(&reader, "wanted-refs", 1))
  1338. receive_wanted_refs(&reader, sought, nr_sought);
  1339. /* get the pack */
  1340. process_section_header(&reader, "packfile", 0);
  1341. if (get_pack(args, fd, pack_lockfile, sought, nr_sought))
  1342. die(_("git fetch-pack: fetch failed."));
  1343. state = FETCH_DONE;
  1344. break;
  1345. case FETCH_DONE:
  1346. continue;
  1347. }
  1348. }
  1349. negotiator.release(&negotiator);
  1350. oidset_clear(&common);
  1351. return ref;
  1352. }
  1353. static int fetch_pack_config_cb(const char *var, const char *value, void *cb)
  1354. {
  1355. if (strcmp(var, "fetch.fsck.skiplist") == 0) {
  1356. const char *path;
  1357. if (git_config_pathname(&path, var, value))
  1358. return 1;
  1359. strbuf_addf(&fsck_msg_types, "%cskiplist=%s",
  1360. fsck_msg_types.len ? ',' : '=', path);
  1361. free((char *)path);
  1362. return 0;
  1363. }
  1364. if (skip_prefix(var, "fetch.fsck.", &var)) {
  1365. if (is_valid_msg_type(var, value))
  1366. strbuf_addf(&fsck_msg_types, "%c%s=%s",
  1367. fsck_msg_types.len ? ',' : '=', var, value);
  1368. else
  1369. warning("Skipping unknown msg id '%s'", var);
  1370. return 0;
  1371. }
  1372. return git_default_config(var, value, cb);
  1373. }
  1374. static void fetch_pack_config(void)
  1375. {
  1376. git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
  1377. git_config_get_int("transfer.unpacklimit", &transfer_unpack_limit);
  1378. git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
  1379. git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
  1380. git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
  1381. git_config(fetch_pack_config_cb, NULL);
  1382. }
  1383. static void fetch_pack_setup(void)
  1384. {
  1385. static int did_setup;
  1386. if (did_setup)
  1387. return;
  1388. fetch_pack_config();
  1389. if (0 <= transfer_unpack_limit)
  1390. unpack_limit = transfer_unpack_limit;
  1391. else if (0 <= fetch_unpack_limit)
  1392. unpack_limit = fetch_unpack_limit;
  1393. did_setup = 1;
  1394. }
  1395. static int remove_duplicates_in_refs(struct ref **ref, int nr)
  1396. {
  1397. struct string_list names = STRING_LIST_INIT_NODUP;
  1398. int src, dst;
  1399. for (src = dst = 0; src < nr; src++) {
  1400. struct string_list_item *item;
  1401. item = string_list_insert(&names, ref[src]->name);
  1402. if (item->util)
  1403. continue; /* already have it */
  1404. item->util = ref[src];
  1405. if (src != dst)
  1406. ref[dst] = ref[src];
  1407. dst++;
  1408. }
  1409. for (src = dst; src < nr; src++)
  1410. ref[src] = NULL;
  1411. string_list_clear(&names, 0);
  1412. return dst;
  1413. }
  1414. static void update_shallow(struct fetch_pack_args *args,
  1415. struct ref **sought, int nr_sought,
  1416. struct shallow_info *si)
  1417. {
  1418. struct oid_array ref = OID_ARRAY_INIT;
  1419. int *status;
  1420. int i;
  1421. if (args->deepen && alternate_shallow_file) {
  1422. if (*alternate_shallow_file == '\0') { /* --unshallow */
  1423. unlink_or_warn(git_path_shallow(the_repository));
  1424. rollback_lock_file(&shallow_lock);
  1425. } else
  1426. commit_lock_file(&shallow_lock);
  1427. alternate_shallow_file = NULL;
  1428. return;
  1429. }
  1430. if (!si->shallow || !si->shallow->nr)
  1431. return;
  1432. if (args->cloning) {
  1433. /*
  1434. * remote is shallow, but this is a clone, there are
  1435. * no objects in repo to worry about. Accept any
  1436. * shallow points that exist in the pack (iow in repo
  1437. * after get_pack() and reprepare_packed_git())
  1438. */
  1439. struct oid_array extra = OID_ARRAY_INIT;
  1440. struct object_id *oid = si->shallow->oid;
  1441. for (i = 0; i < si->shallow->nr; i++)
  1442. if (has_object_file(&oid[i]))
  1443. oid_array_append(&extra, &oid[i]);
  1444. if (extra.nr) {
  1445. setup_alternate_shallow(&shallow_lock,
  1446. &alternate_shallow_file,
  1447. &extra);
  1448. commit_lock_file(&shallow_lock);
  1449. alternate_shallow_file = NULL;
  1450. }
  1451. oid_array_clear(&extra);
  1452. return;
  1453. }
  1454. if (!si->nr_ours && !si->nr_theirs)
  1455. return;
  1456. remove_nonexistent_theirs_shallow(si);
  1457. if (!si->nr_ours && !si->nr_theirs)
  1458. return;
  1459. for (i = 0; i < nr_sought; i++)
  1460. oid_array_append(&ref, &sought[i]->old_oid);
  1461. si->ref = &ref;
  1462. if (args->update_shallow) {
  1463. /*
  1464. * remote is also shallow, .git/shallow may be updated
  1465. * so all refs can be accepted. Make sure we only add
  1466. * shallow roots that are actually reachable from new
  1467. * refs.
  1468. */
  1469. struct oid_array extra = OID_ARRAY_INIT;
  1470. struct object_id *oid = si->shallow->oid;
  1471. assign_shallow_commits_to_refs(si, NULL, NULL);
  1472. if (!si->nr_ours && !si->nr_theirs) {
  1473. oid_array_clear(&ref);
  1474. return;
  1475. }
  1476. for (i = 0; i < si->nr_ours; i++)
  1477. oid_array_append(&extra, &oid[si->ours[i]]);
  1478. for (i = 0; i < si->nr_theirs; i++)
  1479. oid_array_append(&extra, &oid[si->theirs[i]]);
  1480. setup_alternate_shallow(&shallow_lock,
  1481. &alternate_shallow_file,
  1482. &extra);
  1483. commit_lock_file(&shallow_lock);
  1484. oid_array_clear(&extra);
  1485. oid_array_clear(&ref);
  1486. alternate_shallow_file = NULL;
  1487. return;
  1488. }
  1489. /*
  1490. * remote is also shallow, check what ref is safe to update
  1491. * without updating .git/shallow
  1492. */
  1493. status = xcalloc(nr_sought, sizeof(*status));
  1494. assign_shallow_commits_to_refs(si, NULL, status);
  1495. if (si->nr_ours || si->nr_theirs) {
  1496. for (i = 0; i < nr_sought; i++)
  1497. if (status[i])
  1498. sought[i]->status = REF_STATUS_REJECT_SHALLOW;
  1499. }
  1500. free(status);
  1501. oid_array_clear(&ref);
  1502. }
  1503. static int iterate_ref_map(void *cb_data, struct object_id *oid)
  1504. {
  1505. struct ref **rm = cb_data;
  1506. struct ref *ref = *rm;
  1507. if (!ref)
  1508. return -1; /* end of the list */
  1509. *rm = ref->next;
  1510. oidcpy(oid, &ref->old_oid);
  1511. return 0;
  1512. }
  1513. struct ref *fetch_pack(struct fetch_pack_args *args,
  1514. int fd[],
  1515. const struct ref *ref,
  1516. struct ref **sought, int nr_sought,
  1517. struct oid_array *shallow,
  1518. char **pack_lockfile,
  1519. enum protocol_version version)
  1520. {
  1521. struct ref *ref_cpy;
  1522. struct shallow_info si;
  1523. struct oid_array shallows_scratch = OID_ARRAY_INIT;
  1524. fetch_pack_setup();
  1525. if (nr_sought)
  1526. nr_sought = remove_duplicates_in_refs(sought, nr_sought);
  1527. if (args->no_dependents && !args->filter_options.choice) {
  1528. /*
  1529. * The protocol does not support requesting that only the
  1530. * wanted objects be sent, so approximate this by setting a
  1531. * "blob:none" filter if no filter is already set. This works
  1532. * for all object types: note that wanted blobs will still be
  1533. * sent because they are directly specified as a "want".
  1534. *
  1535. * NEEDSWORK: Add an option in the protocol to request that
  1536. * only the wanted objects be sent, and implement it.
  1537. */
  1538. parse_list_objects_filter(&args->filter_options, "blob:none");
  1539. }
  1540. if (version != protocol_v2 && !ref) {
  1541. packet_flush(fd[1]);
  1542. die(_("no matching remote head"));
  1543. }
  1544. if (version == protocol_v2) {
  1545. if (shallow->nr)
  1546. BUG("Protocol V2 does not provide shallows at this point in the fetch");
  1547. memset(&si, 0, sizeof(si));
  1548. ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
  1549. &shallows_scratch, &si,
  1550. pack_lockfile);
  1551. } else {
  1552. prepare_shallow_info(&si, shallow);
  1553. ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
  1554. &si, pack_lockfile);
  1555. }
  1556. reprepare_packed_git(the_repository);
  1557. if (!args->cloning && args->deepen) {
  1558. struct check_connected_options opt = CHECK_CONNECTED_INIT;
  1559. struct ref *iterator = ref_cpy;
  1560. opt.shallow_file = alternate_shallow_file;
  1561. if (args->deepen)
  1562. opt.is_deepening_fetch = 1;
  1563. if (check_connected(iterate_ref_map, &iterator, &opt)) {
  1564. error(_("remote did not send all necessary objects"));
  1565. free_refs(ref_cpy);
  1566. ref_cpy = NULL;
  1567. rollback_lock_file(&shallow_lock);
  1568. goto cleanup;
  1569. }
  1570. args->connectivity_checked = 1;
  1571. }
  1572. update_shallow(args, sought, nr_sought, &si);
  1573. cleanup:
  1574. clear_shallow_info(&si);
  1575. oid_array_clear(&shallows_scratch);
  1576. return ref_cpy;
  1577. }
  1578. int report_unmatched_refs(struct ref **sought, int nr_sought)
  1579. {
  1580. int i, ret = 0;
  1581. for (i = 0; i < nr_sought; i++) {
  1582. if (!sought[i])
  1583. continue;
  1584. switch (sought[i]->match_status) {
  1585. case REF_MATCHED:
  1586. continue;
  1587. case REF_NOT_MATCHED:
  1588. error(_("no such remote ref %s"), sought[i]->name);
  1589. break;
  1590. case REF_UNADVERTISED_NOT_ALLOWED:
  1591. error(_("Server does not allow request for unadvertised object %s"),
  1592. sought[i]->name);
  1593. break;
  1594. }
  1595. ret = 1;
  1596. }
  1597. return ret;
  1598. }