THIS IS A TEST INSTANCE ONLY! REPOSITORIES CAN BE DELETED AT ANY TIME!

Git Source Code Mirror - This is a publish-only repository and all pull requests are ignored. Please follow Documentation/SubmittingPatches procedure for any of your improvements.
git
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

720 lines
18KB

  1. /*
  2. * Helper functions for tree diff generation
  3. */
  4. #include "cache.h"
  5. #include "diff.h"
  6. #include "diffcore.h"
  7. #include "tree.h"
  8. /*
  9. * internal mode marker, saying a tree entry != entry of tp[imin]
  10. * (see ll_diff_tree_paths for what it means there)
  11. *
  12. * we will update/use/emit entry for diff only with it unset.
  13. */
  14. #define S_IFXMIN_NEQ S_DIFFTREE_IFXMIN_NEQ
  15. #define FAST_ARRAY_ALLOC(x, nr) do { \
  16. if ((nr) <= 2) \
  17. (x) = xalloca((nr) * sizeof(*(x))); \
  18. else \
  19. ALLOC_ARRAY((x), nr); \
  20. } while(0)
  21. #define FAST_ARRAY_FREE(x, nr) do { \
  22. if ((nr) > 2) \
  23. free((x)); \
  24. } while(0)
  25. static struct combine_diff_path *ll_diff_tree_paths(
  26. struct combine_diff_path *p, const struct object_id *oid,
  27. const struct object_id **parents_oid, int nparent,
  28. struct strbuf *base, struct diff_options *opt);
  29. static int ll_diff_tree_oid(const struct object_id *old_oid,
  30. const struct object_id *new_oid,
  31. struct strbuf *base, struct diff_options *opt);
  32. /*
  33. * Compare two tree entries, taking into account only path/S_ISDIR(mode),
  34. * but not their sha1's.
  35. *
  36. * NOTE files and directories *always* compare differently, even when having
  37. * the same name - thanks to base_name_compare().
  38. *
  39. * NOTE empty (=invalid) descriptor(s) take part in comparison as +infty,
  40. * so that they sort *after* valid tree entries.
  41. *
  42. * Due to this convention, if trees are scanned in sorted order, all
  43. * non-empty descriptors will be processed first.
  44. */
  45. static int tree_entry_pathcmp(struct tree_desc *t1, struct tree_desc *t2)
  46. {
  47. struct name_entry *e1, *e2;
  48. int cmp;
  49. /* empty descriptors sort after valid tree entries */
  50. if (!t1->size)
  51. return t2->size ? 1 : 0;
  52. else if (!t2->size)
  53. return -1;
  54. e1 = &t1->entry;
  55. e2 = &t2->entry;
  56. cmp = base_name_compare(e1->path, tree_entry_len(e1), e1->mode,
  57. e2->path, tree_entry_len(e2), e2->mode);
  58. return cmp;
  59. }
  60. /*
  61. * convert path -> opt->diff_*() callbacks
  62. *
  63. * emits diff to first parent only, and tells diff tree-walker that we are done
  64. * with p and it can be freed.
  65. */
  66. static int emit_diff_first_parent_only(struct diff_options *opt, struct combine_diff_path *p)
  67. {
  68. struct combine_diff_parent *p0 = &p->parent[0];
  69. if (p->mode && p0->mode) {
  70. opt->change(opt, p0->mode, p->mode, &p0->oid, &p->oid,
  71. 1, 1, p->path, 0, 0);
  72. }
  73. else {
  74. const struct object_id *oid;
  75. unsigned int mode;
  76. int addremove;
  77. if (p->mode) {
  78. addremove = '+';
  79. oid = &p->oid;
  80. mode = p->mode;
  81. } else {
  82. addremove = '-';
  83. oid = &p0->oid;
  84. mode = p0->mode;
  85. }
  86. opt->add_remove(opt, addremove, mode, oid, 1, p->path, 0);
  87. }
  88. return 0; /* we are done with p */
  89. }
  90. /*
  91. * Make a new combine_diff_path from path/mode/sha1
  92. * and append it to paths list tail.
  93. *
  94. * Memory for created elements could be reused:
  95. *
  96. * - if last->next == NULL, the memory is allocated;
  97. *
  98. * - if last->next != NULL, it is assumed that p=last->next was returned
  99. * earlier by this function, and p->next was *not* modified.
  100. * The memory is then reused from p.
  101. *
  102. * so for clients,
  103. *
  104. * - if you do need to keep the element
  105. *
  106. * p = path_appendnew(p, ...);
  107. * process(p);
  108. * p->next = NULL;
  109. *
  110. * - if you don't need to keep the element after processing
  111. *
  112. * pprev = p;
  113. * p = path_appendnew(p, ...);
  114. * process(p);
  115. * p = pprev;
  116. * ; don't forget to free tail->next in the end
  117. *
  118. * p->parent[] remains uninitialized.
  119. */
  120. static struct combine_diff_path *path_appendnew(struct combine_diff_path *last,
  121. int nparent, const struct strbuf *base, const char *path, int pathlen,
  122. unsigned mode, const struct object_id *oid)
  123. {
  124. struct combine_diff_path *p;
  125. size_t len = st_add(base->len, pathlen);
  126. size_t alloclen = combine_diff_path_size(nparent, len);
  127. /* if last->next is !NULL - it is a pre-allocated memory, we can reuse */
  128. p = last->next;
  129. if (p && (alloclen > (intptr_t)p->next)) {
  130. FREE_AND_NULL(p);
  131. }
  132. if (!p) {
  133. p = xmalloc(alloclen);
  134. /*
  135. * until we go to it next round, .next holds how many bytes we
  136. * allocated (for faster realloc - we don't need copying old data).
  137. */
  138. p->next = (struct combine_diff_path *)(intptr_t)alloclen;
  139. }
  140. last->next = p;
  141. p->path = (char *)&(p->parent[nparent]);
  142. memcpy(p->path, base->buf, base->len);
  143. memcpy(p->path + base->len, path, pathlen);
  144. p->path[len] = 0;
  145. p->mode = mode;
  146. oidcpy(&p->oid, oid ? oid : &null_oid);
  147. return p;
  148. }
  149. /*
  150. * new path should be added to combine diff
  151. *
  152. * 3 cases on how/when it should be called and behaves:
  153. *
  154. * t, !tp -> path added, all parents lack it
  155. * !t, tp -> path removed from all parents
  156. * t, tp -> path modified/added
  157. * (M for tp[i]=tp[imin], A otherwise)
  158. */
  159. static struct combine_diff_path *emit_path(struct combine_diff_path *p,
  160. struct strbuf *base, struct diff_options *opt, int nparent,
  161. struct tree_desc *t, struct tree_desc *tp,
  162. int imin)
  163. {
  164. unsigned short mode;
  165. const char *path;
  166. const struct object_id *oid;
  167. int pathlen;
  168. int old_baselen = base->len;
  169. int i, isdir, recurse = 0, emitthis = 1;
  170. /* at least something has to be valid */
  171. assert(t || tp);
  172. if (t) {
  173. /* path present in resulting tree */
  174. oid = tree_entry_extract(t, &path, &mode);
  175. pathlen = tree_entry_len(&t->entry);
  176. isdir = S_ISDIR(mode);
  177. } else {
  178. /*
  179. * a path was removed - take path from imin parent. Also take
  180. * mode from that parent, to decide on recursion(1).
  181. *
  182. * 1) all modes for tp[i]=tp[imin] should be the same wrt
  183. * S_ISDIR, thanks to base_name_compare().
  184. */
  185. tree_entry_extract(&tp[imin], &path, &mode);
  186. pathlen = tree_entry_len(&tp[imin].entry);
  187. isdir = S_ISDIR(mode);
  188. oid = NULL;
  189. mode = 0;
  190. }
  191. if (opt->flags.recursive && isdir) {
  192. recurse = 1;
  193. emitthis = opt->flags.tree_in_recursive;
  194. }
  195. if (emitthis) {
  196. int keep;
  197. struct combine_diff_path *pprev = p;
  198. p = path_appendnew(p, nparent, base, path, pathlen, mode, oid);
  199. for (i = 0; i < nparent; ++i) {
  200. /*
  201. * tp[i] is valid, if present and if tp[i]==tp[imin] -
  202. * otherwise, we should ignore it.
  203. */
  204. int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
  205. const struct object_id *oid_i;
  206. unsigned mode_i;
  207. p->parent[i].status =
  208. !t ? DIFF_STATUS_DELETED :
  209. tpi_valid ?
  210. DIFF_STATUS_MODIFIED :
  211. DIFF_STATUS_ADDED;
  212. if (tpi_valid) {
  213. oid_i = &tp[i].entry.oid;
  214. mode_i = tp[i].entry.mode;
  215. }
  216. else {
  217. oid_i = &null_oid;
  218. mode_i = 0;
  219. }
  220. p->parent[i].mode = mode_i;
  221. oidcpy(&p->parent[i].oid, oid_i);
  222. }
  223. keep = 1;
  224. if (opt->pathchange)
  225. keep = opt->pathchange(opt, p);
  226. /*
  227. * If a path was filtered or consumed - we don't need to add it
  228. * to the list and can reuse its memory, leaving it as
  229. * pre-allocated element on the tail.
  230. *
  231. * On the other hand, if path needs to be kept, we need to
  232. * correct its .next to NULL, as it was pre-initialized to how
  233. * much memory was allocated.
  234. *
  235. * see path_appendnew() for details.
  236. */
  237. if (!keep)
  238. p = pprev;
  239. else
  240. p->next = NULL;
  241. }
  242. if (recurse) {
  243. const struct object_id **parents_oid;
  244. FAST_ARRAY_ALLOC(parents_oid, nparent);
  245. for (i = 0; i < nparent; ++i) {
  246. /* same rule as in emitthis */
  247. int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
  248. parents_oid[i] = tpi_valid ? &tp[i].entry.oid : NULL;
  249. }
  250. strbuf_add(base, path, pathlen);
  251. strbuf_addch(base, '/');
  252. p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt);
  253. FAST_ARRAY_FREE(parents_oid, nparent);
  254. }
  255. strbuf_setlen(base, old_baselen);
  256. return p;
  257. }
  258. static void skip_uninteresting(struct tree_desc *t, struct strbuf *base,
  259. struct diff_options *opt)
  260. {
  261. enum interesting match;
  262. while (t->size) {
  263. match = tree_entry_interesting(opt->repo->index, &t->entry,
  264. base, 0, &opt->pathspec);
  265. if (match) {
  266. if (match == all_entries_not_interesting)
  267. t->size = 0;
  268. break;
  269. }
  270. update_tree_entry(t);
  271. }
  272. }
  273. /*
  274. * generate paths for combined diff D(sha1,parents_oid[])
  275. *
  276. * Resulting paths are appended to combine_diff_path linked list, and also, are
  277. * emitted on the go via opt->pathchange() callback, so it is possible to
  278. * process the result as batch or incrementally.
  279. *
  280. * The paths are generated scanning new tree and all parents trees
  281. * simultaneously, similarly to what diff_tree() was doing for 2 trees.
  282. * The theory behind such scan is as follows:
  283. *
  284. *
  285. * D(T,P1...Pn) calculation scheme
  286. * -------------------------------
  287. *
  288. * D(T,P1...Pn) = D(T,P1) ^ ... ^ D(T,Pn) (regarding resulting paths set)
  289. *
  290. * D(T,Pj) - diff between T..Pj
  291. * D(T,P1...Pn) - combined diff from T to parents P1,...,Pn
  292. *
  293. *
  294. * We start from all trees, which are sorted, and compare their entries in
  295. * lock-step:
  296. *
  297. * T P1 Pn
  298. * - - -
  299. * |t| |p1| |pn|
  300. * |-| |--| ... |--| imin = argmin(p1...pn)
  301. * | | | | | |
  302. * |-| |--| |--|
  303. * |.| |. | |. |
  304. * . . .
  305. * . . .
  306. *
  307. * at any time there could be 3 cases:
  308. *
  309. * 1) t < p[imin];
  310. * 2) t > p[imin];
  311. * 3) t = p[imin].
  312. *
  313. * Schematic deduction of what every case means, and what to do, follows:
  314. *
  315. * 1) t < p[imin] -> ∀j t ∉ Pj -> "+t" ∈ D(T,Pj) -> D += "+t"; t↓
  316. *
  317. * 2) t > p[imin]
  318. *
  319. * 2.1) ∃j: pj > p[imin] -> "-p[imin]" ∉ D(T,Pj) -> D += ø; ∀ pi=p[imin] pi↓
  320. * 2.2) ∀i pi = p[imin] -> pi ∉ T -> "-pi" ∈ D(T,Pi) -> D += "-p[imin]"; ∀i pi↓
  321. *
  322. * 3) t = p[imin]
  323. *
  324. * 3.1) ∃j: pj > p[imin] -> "+t" ∈ D(T,Pj) -> only pi=p[imin] remains to investigate
  325. * 3.2) pi = p[imin] -> investigate δ(t,pi)
  326. * |
  327. * |
  328. * v
  329. *
  330. * 3.1+3.2) looking at δ(t,pi) ∀i: pi=p[imin] - if all != ø ->
  331. *
  332. * ⎧δ(t,pi) - if pi=p[imin]
  333. * -> D += ⎨
  334. * ⎩"+t" - if pi>p[imin]
  335. *
  336. *
  337. * in any case t↓ ∀ pi=p[imin] pi↓
  338. *
  339. *
  340. * ~~~~~~~~
  341. *
  342. * NOTE
  343. *
  344. * Usual diff D(A,B) is by definition the same as combined diff D(A,[B]),
  345. * so this diff paths generator can, and is used, for plain diffs
  346. * generation too.
  347. *
  348. * Please keep attention to the common D(A,[B]) case when working on the
  349. * code, in order not to slow it down.
  350. *
  351. * NOTE
  352. * nparent must be > 0.
  353. */
  354. /* ∀ pi=p[imin] pi↓ */
  355. static inline void update_tp_entries(struct tree_desc *tp, int nparent)
  356. {
  357. int i;
  358. for (i = 0; i < nparent; ++i)
  359. if (!(tp[i].entry.mode & S_IFXMIN_NEQ))
  360. update_tree_entry(&tp[i]);
  361. }
  362. static struct combine_diff_path *ll_diff_tree_paths(
  363. struct combine_diff_path *p, const struct object_id *oid,
  364. const struct object_id **parents_oid, int nparent,
  365. struct strbuf *base, struct diff_options *opt)
  366. {
  367. struct tree_desc t, *tp;
  368. void *ttree, **tptree;
  369. int i;
  370. FAST_ARRAY_ALLOC(tp, nparent);
  371. FAST_ARRAY_ALLOC(tptree, nparent);
  372. /*
  373. * load parents first, as they are probably already cached.
  374. *
  375. * ( log_tree_diff() parses commit->parent before calling here via
  376. * diff_tree_oid(parent, commit) )
  377. */
  378. for (i = 0; i < nparent; ++i)
  379. tptree[i] = fill_tree_descriptor(opt->repo, &tp[i], parents_oid[i]);
  380. ttree = fill_tree_descriptor(opt->repo, &t, oid);
  381. /* Enable recursion indefinitely */
  382. opt->pathspec.recursive = opt->flags.recursive;
  383. for (;;) {
  384. int imin, cmp;
  385. if (diff_can_quit_early(opt))
  386. break;
  387. if (opt->pathspec.nr) {
  388. skip_uninteresting(&t, base, opt);
  389. for (i = 0; i < nparent; i++)
  390. skip_uninteresting(&tp[i], base, opt);
  391. }
  392. /* comparing is finished when all trees are done */
  393. if (!t.size) {
  394. int done = 1;
  395. for (i = 0; i < nparent; ++i)
  396. if (tp[i].size) {
  397. done = 0;
  398. break;
  399. }
  400. if (done)
  401. break;
  402. }
  403. /*
  404. * lookup imin = argmin(p1...pn),
  405. * mark entries whether they =p[imin] along the way
  406. */
  407. imin = 0;
  408. tp[0].entry.mode &= ~S_IFXMIN_NEQ;
  409. for (i = 1; i < nparent; ++i) {
  410. cmp = tree_entry_pathcmp(&tp[i], &tp[imin]);
  411. if (cmp < 0) {
  412. imin = i;
  413. tp[i].entry.mode &= ~S_IFXMIN_NEQ;
  414. }
  415. else if (cmp == 0) {
  416. tp[i].entry.mode &= ~S_IFXMIN_NEQ;
  417. }
  418. else {
  419. tp[i].entry.mode |= S_IFXMIN_NEQ;
  420. }
  421. }
  422. /* fixup markings for entries before imin */
  423. for (i = 0; i < imin; ++i)
  424. tp[i].entry.mode |= S_IFXMIN_NEQ; /* pi > p[imin] */
  425. /* compare t vs p[imin] */
  426. cmp = tree_entry_pathcmp(&t, &tp[imin]);
  427. /* t = p[imin] */
  428. if (cmp == 0) {
  429. /* are either pi > p[imin] or diff(t,pi) != ø ? */
  430. if (!opt->flags.find_copies_harder) {
  431. for (i = 0; i < nparent; ++i) {
  432. /* p[i] > p[imin] */
  433. if (tp[i].entry.mode & S_IFXMIN_NEQ)
  434. continue;
  435. /* diff(t,pi) != ø */
  436. if (!oideq(&t.entry.oid, &tp[i].entry.oid) ||
  437. (t.entry.mode != tp[i].entry.mode))
  438. continue;
  439. goto skip_emit_t_tp;
  440. }
  441. }
  442. /* D += {δ(t,pi) if pi=p[imin]; "+a" if pi > p[imin]} */
  443. p = emit_path(p, base, opt, nparent,
  444. &t, tp, imin);
  445. skip_emit_t_tp:
  446. /* t↓, ∀ pi=p[imin] pi↓ */
  447. update_tree_entry(&t);
  448. update_tp_entries(tp, nparent);
  449. }
  450. /* t < p[imin] */
  451. else if (cmp < 0) {
  452. /* D += "+t" */
  453. p = emit_path(p, base, opt, nparent,
  454. &t, /*tp=*/NULL, -1);
  455. /* t↓ */
  456. update_tree_entry(&t);
  457. }
  458. /* t > p[imin] */
  459. else {
  460. /* ∀i pi=p[imin] -> D += "-p[imin]" */
  461. if (!opt->flags.find_copies_harder) {
  462. for (i = 0; i < nparent; ++i)
  463. if (tp[i].entry.mode & S_IFXMIN_NEQ)
  464. goto skip_emit_tp;
  465. }
  466. p = emit_path(p, base, opt, nparent,
  467. /*t=*/NULL, tp, imin);
  468. skip_emit_tp:
  469. /* ∀ pi=p[imin] pi↓ */
  470. update_tp_entries(tp, nparent);
  471. }
  472. }
  473. free(ttree);
  474. for (i = nparent-1; i >= 0; i--)
  475. free(tptree[i]);
  476. FAST_ARRAY_FREE(tptree, nparent);
  477. FAST_ARRAY_FREE(tp, nparent);
  478. return p;
  479. }
  480. struct combine_diff_path *diff_tree_paths(
  481. struct combine_diff_path *p, const struct object_id *oid,
  482. const struct object_id **parents_oid, int nparent,
  483. struct strbuf *base, struct diff_options *opt)
  484. {
  485. p = ll_diff_tree_paths(p, oid, parents_oid, nparent, base, opt);
  486. /*
  487. * free pre-allocated last element, if any
  488. * (see path_appendnew() for details about why)
  489. */
  490. FREE_AND_NULL(p->next);
  491. return p;
  492. }
  493. /*
  494. * Does it look like the resulting diff might be due to a rename?
  495. * - single entry
  496. * - not a valid previous file
  497. */
  498. static inline int diff_might_be_rename(void)
  499. {
  500. return diff_queued_diff.nr == 1 &&
  501. !DIFF_FILE_VALID(diff_queued_diff.queue[0]->one);
  502. }
  503. static void try_to_follow_renames(const struct object_id *old_oid,
  504. const struct object_id *new_oid,
  505. struct strbuf *base, struct diff_options *opt)
  506. {
  507. struct diff_options diff_opts;
  508. struct diff_queue_struct *q = &diff_queued_diff;
  509. struct diff_filepair *choice;
  510. int i;
  511. /*
  512. * follow-rename code is very specific, we need exactly one
  513. * path. Magic that matches more than one path is not
  514. * supported.
  515. */
  516. GUARD_PATHSPEC(&opt->pathspec, PATHSPEC_FROMTOP | PATHSPEC_LITERAL);
  517. #if 0
  518. /*
  519. * We should reject wildcards as well. Unfortunately we
  520. * haven't got a reliable way to detect that 'foo\*bar' in
  521. * fact has no wildcards. nowildcard_len is merely a hint for
  522. * optimization. Let it slip for now until wildmatch is taught
  523. * about dry-run mode and returns wildcard info.
  524. */
  525. if (opt->pathspec.has_wildcard)
  526. die("BUG:%s:%d: wildcards are not supported",
  527. __FILE__, __LINE__);
  528. #endif
  529. /* Remove the file creation entry from the diff queue, and remember it */
  530. choice = q->queue[0];
  531. q->nr = 0;
  532. repo_diff_setup(opt->repo, &diff_opts);
  533. diff_opts.flags.recursive = 1;
  534. diff_opts.flags.find_copies_harder = 1;
  535. diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
  536. diff_opts.single_follow = opt->pathspec.items[0].match;
  537. diff_opts.break_opt = opt->break_opt;
  538. diff_opts.rename_score = opt->rename_score;
  539. diff_setup_done(&diff_opts);
  540. ll_diff_tree_oid(old_oid, new_oid, base, &diff_opts);
  541. diffcore_std(&diff_opts);
  542. clear_pathspec(&diff_opts.pathspec);
  543. /* Go through the new set of filepairing, and see if we find a more interesting one */
  544. opt->found_follow = 0;
  545. for (i = 0; i < q->nr; i++) {
  546. struct diff_filepair *p = q->queue[i];
  547. /*
  548. * Found a source? Not only do we use that for the new
  549. * diff_queued_diff, we will also use that as the path in
  550. * the future!
  551. */
  552. if ((p->status == 'R' || p->status == 'C') &&
  553. !strcmp(p->two->path, opt->pathspec.items[0].match)) {
  554. const char *path[2];
  555. /* Switch the file-pairs around */
  556. q->queue[i] = choice;
  557. choice = p;
  558. /* Update the path we use from now on.. */
  559. path[0] = p->one->path;
  560. path[1] = NULL;
  561. clear_pathspec(&opt->pathspec);
  562. parse_pathspec(&opt->pathspec,
  563. PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL,
  564. PATHSPEC_LITERAL_PATH, "", path);
  565. /*
  566. * The caller expects us to return a set of vanilla
  567. * filepairs to let a later call to diffcore_std()
  568. * it makes to sort the renames out (among other
  569. * things), but we already have found renames
  570. * ourselves; signal diffcore_std() not to muck with
  571. * rename information.
  572. */
  573. opt->found_follow = 1;
  574. break;
  575. }
  576. }
  577. /*
  578. * Then, discard all the non-relevant file pairs...
  579. */
  580. for (i = 0; i < q->nr; i++) {
  581. struct diff_filepair *p = q->queue[i];
  582. diff_free_filepair(p);
  583. }
  584. /*
  585. * .. and re-instate the one we want (which might be either the
  586. * original one, or the rename/copy we found)
  587. */
  588. q->queue[0] = choice;
  589. q->nr = 1;
  590. }
  591. static int ll_diff_tree_oid(const struct object_id *old_oid,
  592. const struct object_id *new_oid,
  593. struct strbuf *base, struct diff_options *opt)
  594. {
  595. struct combine_diff_path phead, *p;
  596. pathchange_fn_t pathchange_old = opt->pathchange;
  597. phead.next = NULL;
  598. opt->pathchange = emit_diff_first_parent_only;
  599. diff_tree_paths(&phead, new_oid, &old_oid, 1, base, opt);
  600. for (p = phead.next; p;) {
  601. struct combine_diff_path *pprev = p;
  602. p = p->next;
  603. free(pprev);
  604. }
  605. opt->pathchange = pathchange_old;
  606. return 0;
  607. }
  608. int diff_tree_oid(const struct object_id *old_oid,
  609. const struct object_id *new_oid,
  610. const char *base_str, struct diff_options *opt)
  611. {
  612. struct strbuf base;
  613. int retval;
  614. strbuf_init(&base, PATH_MAX);
  615. strbuf_addstr(&base, base_str);
  616. retval = ll_diff_tree_oid(old_oid, new_oid, &base, opt);
  617. if (!*base_str && opt->flags.follow_renames && diff_might_be_rename())
  618. try_to_follow_renames(old_oid, new_oid, &base, opt);
  619. strbuf_release(&base);
  620. return retval;
  621. }
  622. int diff_root_tree_oid(const struct object_id *new_oid, const char *base, struct diff_options *opt)
  623. {
  624. return diff_tree_oid(NULL, new_oid, base, opt);
  625. }