]> Pileus Git - ~andy/git/blob - unpack-trees.c
Merge branch 'en/rebase-against-rebase-fix'
[~andy/git] / unpack-trees.c
1 #define NO_THE_INDEX_COMPATIBILITY_MACROS
2 #include "cache.h"
3 #include "dir.h"
4 #include "tree.h"
5 #include "tree-walk.h"
6 #include "cache-tree.h"
7 #include "unpack-trees.h"
8 #include "progress.h"
9 #include "refs.h"
10 #include "attr.h"
11
12 /*
13  * Error messages expected by scripts out of plumbing commands such as
14  * read-tree.  Non-scripted Porcelain is not required to use these messages
15  * and in fact are encouraged to reword them to better suit their particular
16  * situation better.  See how "git checkout" and "git merge" replaces
17  * them using set_porcelain_error_msgs(), for example.
18  */
19 const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = {
20         /* ERROR_WOULD_OVERWRITE */
21         "Entry '%s' would be overwritten by merge. Cannot merge.",
22
23         /* ERROR_NOT_UPTODATE_FILE */
24         "Entry '%s' not uptodate. Cannot merge.",
25
26         /* ERROR_NOT_UPTODATE_DIR */
27         "Updating '%s' would lose untracked files in it",
28
29         /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */
30         "Untracked working tree file '%s' would be overwritten by merge.",
31
32         /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */
33         "Untracked working tree file '%s' would be removed by merge.",
34
35         /* ERROR_BIND_OVERLAP */
36         "Entry '%s' overlaps with '%s'.  Cannot bind.",
37
38         /* ERROR_SPARSE_NOT_UPTODATE_FILE */
39         "Entry '%s' not uptodate. Cannot update sparse checkout.",
40
41         /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */
42         "Working tree file '%s' would be overwritten by sparse checkout update.",
43
44         /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */
45         "Working tree file '%s' would be removed by sparse checkout update.",
46 };
47
48 #define ERRORMSG(o,type) \
49         ( ((o) && (o)->msgs[(type)]) \
50           ? ((o)->msgs[(type)])      \
51           : (unpack_plumbing_errors[(type)]) )
52
53 static void add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
54         unsigned int set, unsigned int clear)
55 {
56         unsigned int size = ce_size(ce);
57         struct cache_entry *new = xmalloc(size);
58
59         clear |= CE_HASHED | CE_UNHASHED;
60
61         memcpy(new, ce, size);
62         new->next = NULL;
63         new->ce_flags = (new->ce_flags & ~clear) | set;
64         add_index_entry(&o->result, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
65 }
66
67 /*
68  * add error messages on path <path>
69  * corresponding to the type <e> with the message <msg>
70  * indicating if it should be display in porcelain or not
71  */
72 static int add_rejected_path(struct unpack_trees_options *o,
73                              enum unpack_trees_error_types e,
74                              const char *path)
75 {
76         struct rejected_paths_list *newentry;
77         int porcelain = o && (o)->msgs[e];
78         /*
79          * simply display the given error message if in plumbing mode
80          */
81         if (!porcelain)
82                 o->show_all_errors = 0;
83         if (!o->show_all_errors)
84                 return error(ERRORMSG(o, e), path);
85
86         /*
87          * Otherwise, insert in a list for future display by
88          * display_error_msgs()
89          */
90         newentry = xmalloc(sizeof(struct rejected_paths_list));
91         newentry->path = (char *)path;
92         newentry->next = o->unpack_rejects[e];
93         o->unpack_rejects[e] = newentry;
94         return -1;
95 }
96
97 /*
98  * free all the structures allocated for the error <e>
99  */
100 static void free_rejected_paths(struct unpack_trees_options *o,
101                                 enum unpack_trees_error_types e)
102 {
103         while (o->unpack_rejects[e]) {
104                 struct rejected_paths_list *del = o->unpack_rejects[e];
105                 o->unpack_rejects[e] = o->unpack_rejects[e]->next;
106                 free(del);
107         }
108         free(o->unpack_rejects[e]);
109 }
110
111 /*
112  * display all the error messages stored in a nice way
113  */
114 static void display_error_msgs(struct unpack_trees_options *o)
115 {
116         int e;
117         int something_displayed = 0;
118         for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) {
119                 if (o->unpack_rejects[e]) {
120                         struct rejected_paths_list *rp;
121                         struct strbuf path = STRBUF_INIT;
122                         something_displayed = 1;
123                         for (rp = o->unpack_rejects[e]; rp; rp = rp->next)
124                                 strbuf_addf(&path, "\t%s\n", rp->path);
125                         error(ERRORMSG(o, e), path.buf);
126                         strbuf_release(&path);
127                         free_rejected_paths(o, e);
128                 }
129         }
130         if (something_displayed)
131                 printf("Aborting\n");
132 }
133
134 /*
135  * Unlink the last component and schedule the leading directories for
136  * removal, such that empty directories get removed.
137  */
138 static void unlink_entry(struct cache_entry *ce)
139 {
140         if (has_symlink_or_noent_leading_path(ce->name, ce_namelen(ce)))
141                 return;
142         if (remove_or_warn(ce->ce_mode, ce->name))
143                 return;
144         schedule_dir_for_removal(ce->name, ce_namelen(ce));
145 }
146
147 static struct checkout state;
148 static int check_updates(struct unpack_trees_options *o)
149 {
150         unsigned cnt = 0, total = 0;
151         struct progress *progress = NULL;
152         struct index_state *index = &o->result;
153         int i;
154         int errs = 0;
155
156         if (o->update && o->verbose_update) {
157                 for (total = cnt = 0; cnt < index->cache_nr; cnt++) {
158                         struct cache_entry *ce = index->cache[cnt];
159                         if (ce->ce_flags & (CE_UPDATE | CE_REMOVE | CE_WT_REMOVE))
160                                 total++;
161                 }
162
163                 progress = start_progress_delay("Checking out files",
164                                                 total, 50, 1);
165                 cnt = 0;
166         }
167
168         if (o->update)
169                 git_attr_set_direction(GIT_ATTR_CHECKOUT, &o->result);
170         for (i = 0; i < index->cache_nr; i++) {
171                 struct cache_entry *ce = index->cache[i];
172
173                 if (ce->ce_flags & CE_WT_REMOVE) {
174                         display_progress(progress, ++cnt);
175                         if (o->update)
176                                 unlink_entry(ce);
177                         continue;
178                 }
179
180                 if (ce->ce_flags & CE_REMOVE) {
181                         display_progress(progress, ++cnt);
182                         if (o->update)
183                                 unlink_entry(ce);
184                 }
185         }
186         remove_marked_cache_entries(&o->result);
187         remove_scheduled_dirs();
188
189         for (i = 0; i < index->cache_nr; i++) {
190                 struct cache_entry *ce = index->cache[i];
191
192                 if (ce->ce_flags & CE_UPDATE) {
193                         display_progress(progress, ++cnt);
194                         ce->ce_flags &= ~CE_UPDATE;
195                         if (o->update) {
196                                 errs |= checkout_entry(ce, &state, NULL);
197                         }
198                 }
199         }
200         stop_progress(&progress);
201         if (o->update)
202                 git_attr_set_direction(GIT_ATTR_CHECKIN, NULL);
203         return errs != 0;
204 }
205
206 static int verify_uptodate_sparse(struct cache_entry *ce, struct unpack_trees_options *o);
207 static int verify_absent_sparse(struct cache_entry *ce, enum unpack_trees_error_types, struct unpack_trees_options *o);
208
209 static int will_have_skip_worktree(const struct cache_entry *ce, struct unpack_trees_options *o)
210 {
211         const char *basename;
212
213         if (ce_stage(ce))
214                 return 0;
215
216         basename = strrchr(ce->name, '/');
217         basename = basename ? basename+1 : ce->name;
218         return excluded_from_list(ce->name, ce_namelen(ce), basename, NULL, o->el) <= 0;
219 }
220
221 static int apply_sparse_checkout(struct cache_entry *ce, struct unpack_trees_options *o)
222 {
223         int was_skip_worktree = ce_skip_worktree(ce);
224
225         if (will_have_skip_worktree(ce, o))
226                 ce->ce_flags |= CE_SKIP_WORKTREE;
227         else
228                 ce->ce_flags &= ~CE_SKIP_WORKTREE;
229
230         /*
231          * We only care about files getting into the checkout area
232          * If merge strategies want to remove some, go ahead, this
233          * flag will be removed eventually in unpack_trees() if it's
234          * outside checkout area.
235          */
236         if (ce->ce_flags & CE_REMOVE)
237                 return 0;
238
239         if (!was_skip_worktree && ce_skip_worktree(ce)) {
240                 /*
241                  * If CE_UPDATE is set, verify_uptodate() must be called already
242                  * also stat info may have lost after merged_entry() so calling
243                  * verify_uptodate() again may fail
244                  */
245                 if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o))
246                         return -1;
247                 ce->ce_flags |= CE_WT_REMOVE;
248         }
249         if (was_skip_worktree && !ce_skip_worktree(ce)) {
250                 if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o))
251                         return -1;
252                 ce->ce_flags |= CE_UPDATE;
253         }
254         return 0;
255 }
256
257 static inline int call_unpack_fn(struct cache_entry **src, struct unpack_trees_options *o)
258 {
259         int ret = o->fn(src, o);
260         if (ret > 0)
261                 ret = 0;
262         return ret;
263 }
264
265 static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o)
266 {
267         ce->ce_flags |= CE_UNPACKED;
268
269         if (o->cache_bottom < o->src_index->cache_nr &&
270             o->src_index->cache[o->cache_bottom] == ce) {
271                 int bottom = o->cache_bottom;
272                 while (bottom < o->src_index->cache_nr &&
273                        o->src_index->cache[bottom]->ce_flags & CE_UNPACKED)
274                         bottom++;
275                 o->cache_bottom = bottom;
276         }
277 }
278
279 static void mark_all_ce_unused(struct index_state *index)
280 {
281         int i;
282         for (i = 0; i < index->cache_nr; i++)
283                 index->cache[i]->ce_flags &= ~CE_UNPACKED;
284 }
285
286 static int locate_in_src_index(struct cache_entry *ce,
287                                struct unpack_trees_options *o)
288 {
289         struct index_state *index = o->src_index;
290         int len = ce_namelen(ce);
291         int pos = index_name_pos(index, ce->name, len);
292         if (pos < 0)
293                 pos = -1 - pos;
294         return pos;
295 }
296
297 /*
298  * We call unpack_index_entry() with an unmerged cache entry
299  * only in diff-index, and it wants a single callback.  Skip
300  * the other unmerged entry with the same name.
301  */
302 static void mark_ce_used_same_name(struct cache_entry *ce,
303                                    struct unpack_trees_options *o)
304 {
305         struct index_state *index = o->src_index;
306         int len = ce_namelen(ce);
307         int pos;
308
309         for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) {
310                 struct cache_entry *next = index->cache[pos];
311                 if (len != ce_namelen(next) ||
312                     memcmp(ce->name, next->name, len))
313                         break;
314                 mark_ce_used(next, o);
315         }
316 }
317
318 static struct cache_entry *next_cache_entry(struct unpack_trees_options *o)
319 {
320         const struct index_state *index = o->src_index;
321         int pos = o->cache_bottom;
322
323         while (pos < index->cache_nr) {
324                 struct cache_entry *ce = index->cache[pos];
325                 if (!(ce->ce_flags & CE_UNPACKED))
326                         return ce;
327                 pos++;
328         }
329         return NULL;
330 }
331
332 static void add_same_unmerged(struct cache_entry *ce,
333                               struct unpack_trees_options *o)
334 {
335         struct index_state *index = o->src_index;
336         int len = ce_namelen(ce);
337         int pos = index_name_pos(index, ce->name, len);
338
339         if (0 <= pos)
340                 die("programming error in a caller of mark_ce_used_same_name");
341         for (pos = -pos - 1; pos < index->cache_nr; pos++) {
342                 struct cache_entry *next = index->cache[pos];
343                 if (len != ce_namelen(next) ||
344                     memcmp(ce->name, next->name, len))
345                         break;
346                 add_entry(o, next, 0, 0);
347                 mark_ce_used(next, o);
348         }
349 }
350
351 static int unpack_index_entry(struct cache_entry *ce,
352                               struct unpack_trees_options *o)
353 {
354         struct cache_entry *src[5] = { NULL };
355         int ret;
356
357         src[0] = ce;
358
359         mark_ce_used(ce, o);
360         if (ce_stage(ce)) {
361                 if (o->skip_unmerged) {
362                         add_entry(o, ce, 0, 0);
363                         return 0;
364                 }
365         }
366         ret = call_unpack_fn(src, o);
367         if (ce_stage(ce))
368                 mark_ce_used_same_name(ce, o);
369         return ret;
370 }
371
372 static int find_cache_pos(struct traverse_info *, const struct name_entry *);
373
374 static void restore_cache_bottom(struct traverse_info *info, int bottom)
375 {
376         struct unpack_trees_options *o = info->data;
377
378         if (o->diff_index_cached)
379                 return;
380         o->cache_bottom = bottom;
381 }
382
383 static int switch_cache_bottom(struct traverse_info *info)
384 {
385         struct unpack_trees_options *o = info->data;
386         int ret, pos;
387
388         if (o->diff_index_cached)
389                 return 0;
390         ret = o->cache_bottom;
391         pos = find_cache_pos(info->prev, &info->name);
392
393         if (pos < -1)
394                 o->cache_bottom = -2 - pos;
395         else if (pos < 0)
396                 o->cache_bottom = o->src_index->cache_nr;
397         return ret;
398 }
399
400 static int traverse_trees_recursive(int n, unsigned long dirmask, unsigned long df_conflicts, struct name_entry *names, struct traverse_info *info)
401 {
402         int i, ret, bottom;
403         struct tree_desc t[MAX_UNPACK_TREES];
404         void *buf[MAX_UNPACK_TREES];
405         struct traverse_info newinfo;
406         struct name_entry *p;
407
408         p = names;
409         while (!p->mode)
410                 p++;
411
412         newinfo = *info;
413         newinfo.prev = info;
414         newinfo.name = *p;
415         newinfo.pathlen += tree_entry_len(p->path, p->sha1) + 1;
416         newinfo.conflicts |= df_conflicts;
417
418         for (i = 0; i < n; i++, dirmask >>= 1) {
419                 const unsigned char *sha1 = NULL;
420                 if (dirmask & 1)
421                         sha1 = names[i].sha1;
422                 buf[i] = fill_tree_descriptor(t+i, sha1);
423         }
424
425         bottom = switch_cache_bottom(&newinfo);
426         ret = traverse_trees(n, t, &newinfo);
427         restore_cache_bottom(&newinfo, bottom);
428
429         for (i = 0; i < n; i++)
430                 free(buf[i]);
431
432         return ret;
433 }
434
435 /*
436  * Compare the traverse-path to the cache entry without actually
437  * having to generate the textual representation of the traverse
438  * path.
439  *
440  * NOTE! This *only* compares up to the size of the traverse path
441  * itself - the caller needs to do the final check for the cache
442  * entry having more data at the end!
443  */
444 static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
445 {
446         int len, pathlen, ce_len;
447         const char *ce_name;
448
449         if (info->prev) {
450                 int cmp = do_compare_entry(ce, info->prev, &info->name);
451                 if (cmp)
452                         return cmp;
453         }
454         pathlen = info->pathlen;
455         ce_len = ce_namelen(ce);
456
457         /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
458         if (ce_len < pathlen)
459                 return -1;
460
461         ce_len -= pathlen;
462         ce_name = ce->name + pathlen;
463
464         len = tree_entry_len(n->path, n->sha1);
465         return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
466 }
467
468 static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
469 {
470         int cmp = do_compare_entry(ce, info, n);
471         if (cmp)
472                 return cmp;
473
474         /*
475          * Even if the beginning compared identically, the ce should
476          * compare as bigger than a directory leading up to it!
477          */
478         return ce_namelen(ce) > traverse_path_len(info, n);
479 }
480
481 static int ce_in_traverse_path(const struct cache_entry *ce,
482                                const struct traverse_info *info)
483 {
484         if (!info->prev)
485                 return 1;
486         if (do_compare_entry(ce, info->prev, &info->name))
487                 return 0;
488         /*
489          * If ce (blob) is the same name as the path (which is a tree
490          * we will be descending into), it won't be inside it.
491          */
492         return (info->pathlen < ce_namelen(ce));
493 }
494
495 static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
496 {
497         int len = traverse_path_len(info, n);
498         struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
499
500         ce->ce_mode = create_ce_mode(n->mode);
501         ce->ce_flags = create_ce_flags(len, stage);
502         hashcpy(ce->sha1, n->sha1);
503         make_traverse_path(ce->name, info, n);
504
505         return ce;
506 }
507
508 static int unpack_nondirectories(int n, unsigned long mask,
509                                  unsigned long dirmask,
510                                  struct cache_entry **src,
511                                  const struct name_entry *names,
512                                  const struct traverse_info *info)
513 {
514         int i;
515         struct unpack_trees_options *o = info->data;
516         unsigned long conflicts;
517
518         /* Do we have *only* directories? Nothing to do */
519         if (mask == dirmask && !src[0])
520                 return 0;
521
522         conflicts = info->conflicts;
523         if (o->merge)
524                 conflicts >>= 1;
525         conflicts |= dirmask;
526
527         /*
528          * Ok, we've filled in up to any potential index entry in src[0],
529          * now do the rest.
530          */
531         for (i = 0; i < n; i++) {
532                 int stage;
533                 unsigned int bit = 1ul << i;
534                 if (conflicts & bit) {
535                         src[i + o->merge] = o->df_conflict_entry;
536                         continue;
537                 }
538                 if (!(mask & bit))
539                         continue;
540                 if (!o->merge)
541                         stage = 0;
542                 else if (i + 1 < o->head_idx)
543                         stage = 1;
544                 else if (i + 1 > o->head_idx)
545                         stage = 3;
546                 else
547                         stage = 2;
548                 src[i + o->merge] = create_ce_entry(info, names + i, stage);
549         }
550
551         if (o->merge)
552                 return call_unpack_fn(src, o);
553
554         for (i = 0; i < n; i++)
555                 if (src[i] && src[i] != o->df_conflict_entry)
556                         add_entry(o, src[i], 0, 0);
557         return 0;
558 }
559
560 static int unpack_failed(struct unpack_trees_options *o, const char *message)
561 {
562         discard_index(&o->result);
563         if (!o->gently) {
564                 if (message)
565                         return error("%s", message);
566                 return -1;
567         }
568         return -1;
569 }
570
571 /* NEEDSWORK: give this a better name and share with tree-walk.c */
572 static int name_compare(const char *a, int a_len,
573                         const char *b, int b_len)
574 {
575         int len = (a_len < b_len) ? a_len : b_len;
576         int cmp = memcmp(a, b, len);
577         if (cmp)
578                 return cmp;
579         return (a_len - b_len);
580 }
581
582 /*
583  * The tree traversal is looking at name p.  If we have a matching entry,
584  * return it.  If name p is a directory in the index, do not return
585  * anything, as we will want to match it when the traversal descends into
586  * the directory.
587  */
588 static int find_cache_pos(struct traverse_info *info,
589                           const struct name_entry *p)
590 {
591         int pos;
592         struct unpack_trees_options *o = info->data;
593         struct index_state *index = o->src_index;
594         int pfxlen = info->pathlen;
595         int p_len = tree_entry_len(p->path, p->sha1);
596
597         for (pos = o->cache_bottom; pos < index->cache_nr; pos++) {
598                 struct cache_entry *ce = index->cache[pos];
599                 const char *ce_name, *ce_slash;
600                 int cmp, ce_len;
601
602                 if (ce->ce_flags & CE_UNPACKED) {
603                         /*
604                          * cache_bottom entry is already unpacked, so
605                          * we can never match it; don't check it
606                          * again.
607                          */
608                         if (pos == o->cache_bottom)
609                                 ++o->cache_bottom;
610                         continue;
611                 }
612                 if (!ce_in_traverse_path(ce, info))
613                         continue;
614                 ce_name = ce->name + pfxlen;
615                 ce_slash = strchr(ce_name, '/');
616                 if (ce_slash)
617                         ce_len = ce_slash - ce_name;
618                 else
619                         ce_len = ce_namelen(ce) - pfxlen;
620                 cmp = name_compare(p->path, p_len, ce_name, ce_len);
621                 /*
622                  * Exact match; if we have a directory we need to
623                  * delay returning it.
624                  */
625                 if (!cmp)
626                         return ce_slash ? -2 - pos : pos;
627                 if (0 < cmp)
628                         continue; /* keep looking */
629                 /*
630                  * ce_name sorts after p->path; could it be that we
631                  * have files under p->path directory in the index?
632                  * E.g.  ce_name == "t-i", and p->path == "t"; we may
633                  * have "t/a" in the index.
634                  */
635                 if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) &&
636                     ce_name[p_len] < '/')
637                         continue; /* keep looking */
638                 break;
639         }
640         return -1;
641 }
642
643 static struct cache_entry *find_cache_entry(struct traverse_info *info,
644                                             const struct name_entry *p)
645 {
646         int pos = find_cache_pos(info, p);
647         struct unpack_trees_options *o = info->data;
648
649         if (0 <= pos)
650                 return o->src_index->cache[pos];
651         else
652                 return NULL;
653 }
654
655 static void debug_path(struct traverse_info *info)
656 {
657         if (info->prev) {
658                 debug_path(info->prev);
659                 if (*info->prev->name.path)
660                         putchar('/');
661         }
662         printf("%s", info->name.path);
663 }
664
665 static void debug_name_entry(int i, struct name_entry *n)
666 {
667         printf("ent#%d %06o %s\n", i,
668                n->path ? n->mode : 0,
669                n->path ? n->path : "(missing)");
670 }
671
672 static void debug_unpack_callback(int n,
673                                   unsigned long mask,
674                                   unsigned long dirmask,
675                                   struct name_entry *names,
676                                   struct traverse_info *info)
677 {
678         int i;
679         printf("* unpack mask %lu, dirmask %lu, cnt %d ",
680                mask, dirmask, n);
681         debug_path(info);
682         putchar('\n');
683         for (i = 0; i < n; i++)
684                 debug_name_entry(i, names + i);
685 }
686
687 static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
688 {
689         struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
690         struct unpack_trees_options *o = info->data;
691         const struct name_entry *p = names;
692
693         /* Find first entry with a real name (we could use "mask" too) */
694         while (!p->mode)
695                 p++;
696
697         if (o->debug_unpack)
698                 debug_unpack_callback(n, mask, dirmask, names, info);
699
700         /* Are we supposed to look at the index too? */
701         if (o->merge) {
702                 while (1) {
703                         int cmp;
704                         struct cache_entry *ce;
705
706                         if (o->diff_index_cached)
707                                 ce = next_cache_entry(o);
708                         else
709                                 ce = find_cache_entry(info, p);
710
711                         if (!ce)
712                                 break;
713                         cmp = compare_entry(ce, info, p);
714                         if (cmp < 0) {
715                                 if (unpack_index_entry(ce, o) < 0)
716                                         return unpack_failed(o, NULL);
717                                 continue;
718                         }
719                         if (!cmp) {
720                                 if (ce_stage(ce)) {
721                                         /*
722                                          * If we skip unmerged index
723                                          * entries, we'll skip this
724                                          * entry *and* the tree
725                                          * entries associated with it!
726                                          */
727                                         if (o->skip_unmerged) {
728                                                 add_same_unmerged(ce, o);
729                                                 return mask;
730                                         }
731                                 }
732                                 src[0] = ce;
733                         }
734                         break;
735                 }
736         }
737
738         if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)
739                 return -1;
740
741         if (src[0]) {
742                 if (ce_stage(src[0]))
743                         mark_ce_used_same_name(src[0], o);
744                 else
745                         mark_ce_used(src[0], o);
746         }
747
748         /* Now handle any directories.. */
749         if (dirmask) {
750                 unsigned long conflicts = mask & ~dirmask;
751                 if (o->merge) {
752                         conflicts <<= 1;
753                         if (src[0])
754                                 conflicts |= 1;
755                 }
756
757                 /* special case: "diff-index --cached" looking at a tree */
758                 if (o->diff_index_cached &&
759                     n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {
760                         int matches;
761                         matches = cache_tree_matches_traversal(o->src_index->cache_tree,
762                                                                names, info);
763                         /*
764                          * Everything under the name matches; skip the
765                          * entire hierarchy.  diff_index_cached codepath
766                          * special cases D/F conflicts in such a way that
767                          * it does not do any look-ahead, so this is safe.
768                          */
769                         if (matches) {
770                                 o->cache_bottom += matches;
771                                 return mask;
772                         }
773                 }
774
775                 if (traverse_trees_recursive(n, dirmask, conflicts,
776                                              names, info) < 0)
777                         return -1;
778                 return mask;
779         }
780
781         return mask;
782 }
783
784 /*
785  * N-way merge "len" trees.  Returns 0 on success, -1 on failure to manipulate the
786  * resulting index, -2 on failure to reflect the changes to the work tree.
787  */
788 int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
789 {
790         int i, ret;
791         static struct cache_entry *dfc;
792         struct exclude_list el;
793
794         if (len > MAX_UNPACK_TREES)
795                 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
796         memset(&state, 0, sizeof(state));
797         state.base_dir = "";
798         state.force = 1;
799         state.quiet = 1;
800         state.refresh_cache = 1;
801
802         memset(&el, 0, sizeof(el));
803         if (!core_apply_sparse_checkout || !o->update)
804                 o->skip_sparse_checkout = 1;
805         if (!o->skip_sparse_checkout) {
806                 if (add_excludes_from_file_to_list(git_path("info/sparse-checkout"), "", 0, NULL, &el, 0) < 0)
807                         o->skip_sparse_checkout = 1;
808                 else
809                         o->el = &el;
810         }
811
812         memset(&o->result, 0, sizeof(o->result));
813         o->result.initialized = 1;
814         o->result.timestamp.sec = o->src_index->timestamp.sec;
815         o->result.timestamp.nsec = o->src_index->timestamp.nsec;
816         o->merge_size = len;
817         mark_all_ce_unused(o->src_index);
818
819         if (!dfc)
820                 dfc = xcalloc(1, cache_entry_size(0));
821         o->df_conflict_entry = dfc;
822
823         if (len) {
824                 const char *prefix = o->prefix ? o->prefix : "";
825                 struct traverse_info info;
826
827                 setup_traverse_info(&info, prefix);
828                 info.fn = unpack_callback;
829                 info.data = o;
830                 info.show_all_errors = o->show_all_errors;
831
832                 if (o->prefix) {
833                         /*
834                          * Unpack existing index entries that sort before the
835                          * prefix the tree is spliced into.  Note that o->merge
836                          * is always true in this case.
837                          */
838                         while (1) {
839                                 struct cache_entry *ce = next_cache_entry(o);
840                                 if (!ce)
841                                         break;
842                                 if (ce_in_traverse_path(ce, &info))
843                                         break;
844                                 if (unpack_index_entry(ce, o) < 0)
845                                         goto return_failed;
846                         }
847                 }
848
849                 if (traverse_trees(len, t, &info) < 0)
850                         goto return_failed;
851         }
852
853         /* Any left-over entries in the index? */
854         if (o->merge) {
855                 while (1) {
856                         struct cache_entry *ce = next_cache_entry(o);
857                         if (!ce)
858                                 break;
859                         if (unpack_index_entry(ce, o) < 0)
860                                 goto return_failed;
861                 }
862         }
863         mark_all_ce_unused(o->src_index);
864
865         if (o->trivial_merges_only && o->nontrivial_merge) {
866                 ret = unpack_failed(o, "Merge requires file-level merging");
867                 goto done;
868         }
869
870         if (!o->skip_sparse_checkout) {
871                 int empty_worktree = 1;
872                 for (i = 0;i < o->result.cache_nr;i++) {
873                         struct cache_entry *ce = o->result.cache[i];
874
875                         if (apply_sparse_checkout(ce, o)) {
876                                 ret = -1;
877                                 goto done;
878                         }
879                         /*
880                          * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout
881                          * area as a result of ce_skip_worktree() shortcuts in
882                          * verify_absent() and verify_uptodate(). Clear them.
883                          */
884                         if (ce_skip_worktree(ce))
885                                 ce->ce_flags &= ~(CE_UPDATE | CE_REMOVE);
886                         else
887                                 empty_worktree = 0;
888
889                 }
890                 if (o->result.cache_nr && empty_worktree) {
891                         ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");
892                         goto done;
893                 }
894         }
895
896         o->src_index = NULL;
897         ret = check_updates(o) ? (-2) : 0;
898         if (o->dst_index)
899                 *o->dst_index = o->result;
900
901 done:
902         for (i = 0;i < el.nr;i++)
903                 free(el.excludes[i]);
904         if (el.excludes)
905                 free(el.excludes);
906
907         return ret;
908
909 return_failed:
910         if (o->show_all_errors)
911                 display_error_msgs(o);
912         mark_all_ce_unused(o->src_index);
913         ret = unpack_failed(o, NULL);
914         goto done;
915 }
916
917 /* Here come the merge functions */
918
919 static int reject_merge(struct cache_entry *ce, struct unpack_trees_options *o)
920 {
921         return add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
922 }
923
924 static int same(struct cache_entry *a, struct cache_entry *b)
925 {
926         if (!!a != !!b)
927                 return 0;
928         if (!a && !b)
929                 return 1;
930         if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)
931                 return 0;
932         return a->ce_mode == b->ce_mode &&
933                !hashcmp(a->sha1, b->sha1);
934 }
935
936
937 /*
938  * When a CE gets turned into an unmerged entry, we
939  * want it to be up-to-date
940  */
941 static int verify_uptodate_1(struct cache_entry *ce,
942                                    struct unpack_trees_options *o,
943                                    enum unpack_trees_error_types error_type)
944 {
945         struct stat st;
946
947         if (o->index_only || (!((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce)) && (o->reset || ce_uptodate(ce))))
948                 return 0;
949
950         if (!lstat(ce->name, &st)) {
951                 unsigned changed = ie_match_stat(o->src_index, ce, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);
952                 if (!changed)
953                         return 0;
954                 /*
955                  * NEEDSWORK: the current default policy is to allow
956                  * submodule to be out of sync wrt the supermodule
957                  * index.  This needs to be tightened later for
958                  * submodules that are marked to be automatically
959                  * checked out.
960                  */
961                 if (S_ISGITLINK(ce->ce_mode))
962                         return 0;
963                 errno = 0;
964         }
965         if (errno == ENOENT)
966                 return 0;
967         return o->gently ? -1 :
968                 add_rejected_path(o, error_type, ce->name);
969 }
970
971 static int verify_uptodate(struct cache_entry *ce,
972                            struct unpack_trees_options *o)
973 {
974         if (!o->skip_sparse_checkout && will_have_skip_worktree(ce, o))
975                 return 0;
976         return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);
977 }
978
979 static int verify_uptodate_sparse(struct cache_entry *ce,
980                                   struct unpack_trees_options *o)
981 {
982         return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);
983 }
984
985 static void invalidate_ce_path(struct cache_entry *ce, struct unpack_trees_options *o)
986 {
987         if (ce)
988                 cache_tree_invalidate_path(o->src_index->cache_tree, ce->name);
989 }
990
991 /*
992  * Check that checking out ce->sha1 in subdir ce->name is not
993  * going to overwrite any working files.
994  *
995  * Currently, git does not checkout subprojects during a superproject
996  * checkout, so it is not going to overwrite anything.
997  */
998 static int verify_clean_submodule(struct cache_entry *ce,
999                                       enum unpack_trees_error_types error_type,
1000                                       struct unpack_trees_options *o)
1001 {
1002         return 0;
1003 }
1004
1005 static int verify_clean_subdirectory(struct cache_entry *ce,
1006                                       enum unpack_trees_error_types error_type,
1007                                       struct unpack_trees_options *o)
1008 {
1009         /*
1010          * we are about to extract "ce->name"; we would not want to lose
1011          * anything in the existing directory there.
1012          */
1013         int namelen;
1014         int i;
1015         struct dir_struct d;
1016         char *pathbuf;
1017         int cnt = 0;
1018         unsigned char sha1[20];
1019
1020         if (S_ISGITLINK(ce->ce_mode) &&
1021             resolve_gitlink_ref(ce->name, "HEAD", sha1) == 0) {
1022                 /* If we are not going to update the submodule, then
1023                  * we don't care.
1024                  */
1025                 if (!hashcmp(sha1, ce->sha1))
1026                         return 0;
1027                 return verify_clean_submodule(ce, error_type, o);
1028         }
1029
1030         /*
1031          * First let's make sure we do not have a local modification
1032          * in that directory.
1033          */
1034         namelen = strlen(ce->name);
1035         for (i = locate_in_src_index(ce, o);
1036              i < o->src_index->cache_nr;
1037              i++) {
1038                 struct cache_entry *ce2 = o->src_index->cache[i];
1039                 int len = ce_namelen(ce2);
1040                 if (len < namelen ||
1041                     strncmp(ce->name, ce2->name, namelen) ||
1042                     ce2->name[namelen] != '/')
1043                         break;
1044                 /*
1045                  * ce2->name is an entry in the subdirectory to be
1046                  * removed.
1047                  */
1048                 if (!ce_stage(ce2)) {
1049                         if (verify_uptodate(ce2, o))
1050                                 return -1;
1051                         add_entry(o, ce2, CE_REMOVE, 0);
1052                         mark_ce_used(ce2, o);
1053                 }
1054                 cnt++;
1055         }
1056
1057         /*
1058          * Then we need to make sure that we do not lose a locally
1059          * present file that is not ignored.
1060          */
1061         pathbuf = xmalloc(namelen + 2);
1062         memcpy(pathbuf, ce->name, namelen);
1063         strcpy(pathbuf+namelen, "/");
1064
1065         memset(&d, 0, sizeof(d));
1066         if (o->dir)
1067                 d.exclude_per_dir = o->dir->exclude_per_dir;
1068         i = read_directory(&d, pathbuf, namelen+1, NULL);
1069         if (i)
1070                 return o->gently ? -1 :
1071                         add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
1072         free(pathbuf);
1073         return cnt;
1074 }
1075
1076 /*
1077  * This gets called when there was no index entry for the tree entry 'dst',
1078  * but we found a file in the working tree that 'lstat()' said was fine,
1079  * and we're on a case-insensitive filesystem.
1080  *
1081  * See if we can find a case-insensitive match in the index that also
1082  * matches the stat information, and assume it's that other file!
1083  */
1084 static int icase_exists(struct unpack_trees_options *o, struct cache_entry *dst, struct stat *st)
1085 {
1086         struct cache_entry *src;
1087
1088         src = index_name_exists(o->src_index, dst->name, ce_namelen(dst), 1);
1089         return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);
1090 }
1091
1092 /*
1093  * We do not want to remove or overwrite a working tree file that
1094  * is not tracked, unless it is ignored.
1095  */
1096 static int verify_absent_1(struct cache_entry *ce,
1097                                  enum unpack_trees_error_types error_type,
1098                                  struct unpack_trees_options *o)
1099 {
1100         struct stat st;
1101
1102         if (o->index_only || o->reset || !o->update)
1103                 return 0;
1104
1105         if (has_symlink_or_noent_leading_path(ce->name, ce_namelen(ce)))
1106                 return 0;
1107
1108         if (!lstat(ce->name, &st)) {
1109                 int dtype = ce_to_dtype(ce);
1110                 struct cache_entry *result;
1111
1112                 /*
1113                  * It may be that the 'lstat()' succeeded even though
1114                  * target 'ce' was absent, because there is an old
1115                  * entry that is different only in case..
1116                  *
1117                  * Ignore that lstat() if it matches.
1118                  */
1119                 if (ignore_case && icase_exists(o, ce, &st))
1120                         return 0;
1121
1122                 if (o->dir && excluded(o->dir, ce->name, &dtype))
1123                         /*
1124                          * ce->name is explicitly excluded, so it is Ok to
1125                          * overwrite it.
1126                          */
1127                         return 0;
1128                 if (S_ISDIR(st.st_mode)) {
1129                         /*
1130                          * We are checking out path "foo" and
1131                          * found "foo/." in the working tree.
1132                          * This is tricky -- if we have modified
1133                          * files that are in "foo/" we would lose
1134                          * them.
1135                          */
1136                         if (verify_clean_subdirectory(ce, error_type, o) < 0)
1137                                 return -1;
1138                         return 0;
1139                 }
1140
1141                 /*
1142                  * The previous round may already have decided to
1143                  * delete this path, which is in a subdirectory that
1144                  * is being replaced with a blob.
1145                  */
1146                 result = index_name_exists(&o->result, ce->name, ce_namelen(ce), 0);
1147                 if (result) {
1148                         if (result->ce_flags & CE_REMOVE)
1149                                 return 0;
1150                 }
1151
1152                 return o->gently ? -1 :
1153                         add_rejected_path(o, error_type, ce->name);
1154         }
1155         return 0;
1156 }
1157 static int verify_absent(struct cache_entry *ce,
1158                          enum unpack_trees_error_types error_type,
1159                          struct unpack_trees_options *o)
1160 {
1161         if (!o->skip_sparse_checkout && will_have_skip_worktree(ce, o))
1162                 return 0;
1163         return verify_absent_1(ce, error_type, o);
1164 }
1165
1166 static int verify_absent_sparse(struct cache_entry *ce,
1167                          enum unpack_trees_error_types error_type,
1168                          struct unpack_trees_options *o)
1169 {
1170         enum unpack_trees_error_types orphaned_error = error_type;
1171         if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)
1172                 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;
1173
1174         return verify_absent_1(ce, orphaned_error, o);
1175 }
1176
1177 static int merged_entry(struct cache_entry *merge, struct cache_entry *old,
1178                 struct unpack_trees_options *o)
1179 {
1180         int update = CE_UPDATE;
1181
1182         if (!old) {
1183                 if (verify_absent(merge, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o))
1184                         return -1;
1185                 invalidate_ce_path(merge, o);
1186         } else if (!(old->ce_flags & CE_CONFLICTED)) {
1187                 /*
1188                  * See if we can re-use the old CE directly?
1189                  * That way we get the uptodate stat info.
1190                  *
1191                  * This also removes the UPDATE flag on a match; otherwise
1192                  * we will end up overwriting local changes in the work tree.
1193                  */
1194                 if (same(old, merge)) {
1195                         copy_cache_entry(merge, old);
1196                         update = 0;
1197                 } else {
1198                         if (verify_uptodate(old, o))
1199                                 return -1;
1200                         if (ce_skip_worktree(old))
1201                                 update |= CE_SKIP_WORKTREE;
1202                         invalidate_ce_path(old, o);
1203                 }
1204         } else {
1205                 /*
1206                  * Previously unmerged entry left as an existence
1207                  * marker by read_index_unmerged();
1208                  */
1209                 invalidate_ce_path(old, o);
1210         }
1211
1212         add_entry(o, merge, update, CE_STAGEMASK);
1213         return 1;
1214 }
1215
1216 static int deleted_entry(struct cache_entry *ce, struct cache_entry *old,
1217                 struct unpack_trees_options *o)
1218 {
1219         /* Did it exist in the index? */
1220         if (!old) {
1221                 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
1222                         return -1;
1223                 return 0;
1224         }
1225         if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))
1226                 return -1;
1227         add_entry(o, ce, CE_REMOVE, 0);
1228         invalidate_ce_path(ce, o);
1229         return 1;
1230 }
1231
1232 static int keep_entry(struct cache_entry *ce, struct unpack_trees_options *o)
1233 {
1234         add_entry(o, ce, 0, 0);
1235         return 1;
1236 }
1237
1238 #if DBRT_DEBUG
1239 static void show_stage_entry(FILE *o,
1240                              const char *label, const struct cache_entry *ce)
1241 {
1242         if (!ce)
1243                 fprintf(o, "%s (missing)\n", label);
1244         else
1245                 fprintf(o, "%s%06o %s %d\t%s\n",
1246                         label,
1247                         ce->ce_mode,
1248                         sha1_to_hex(ce->sha1),
1249                         ce_stage(ce),
1250                         ce->name);
1251 }
1252 #endif
1253
1254 int threeway_merge(struct cache_entry **stages, struct unpack_trees_options *o)
1255 {
1256         struct cache_entry *index;
1257         struct cache_entry *head;
1258         struct cache_entry *remote = stages[o->head_idx + 1];
1259         int count;
1260         int head_match = 0;
1261         int remote_match = 0;
1262
1263         int df_conflict_head = 0;
1264         int df_conflict_remote = 0;
1265
1266         int any_anc_missing = 0;
1267         int no_anc_exists = 1;
1268         int i;
1269
1270         for (i = 1; i < o->head_idx; i++) {
1271                 if (!stages[i] || stages[i] == o->df_conflict_entry)
1272                         any_anc_missing = 1;
1273                 else
1274                         no_anc_exists = 0;
1275         }
1276
1277         index = stages[0];
1278         head = stages[o->head_idx];
1279
1280         if (head == o->df_conflict_entry) {
1281                 df_conflict_head = 1;
1282                 head = NULL;
1283         }
1284
1285         if (remote == o->df_conflict_entry) {
1286                 df_conflict_remote = 1;
1287                 remote = NULL;
1288         }
1289
1290         /*
1291          * First, if there's a #16 situation, note that to prevent #13
1292          * and #14.
1293          */
1294         if (!same(remote, head)) {
1295                 for (i = 1; i < o->head_idx; i++) {
1296                         if (same(stages[i], head)) {
1297                                 head_match = i;
1298                         }
1299                         if (same(stages[i], remote)) {
1300                                 remote_match = i;
1301                         }
1302                 }
1303         }
1304
1305         /*
1306          * We start with cases where the index is allowed to match
1307          * something other than the head: #14(ALT) and #2ALT, where it
1308          * is permitted to match the result instead.
1309          */
1310         /* #14, #14ALT, #2ALT */
1311         if (remote && !df_conflict_head && head_match && !remote_match) {
1312                 if (index && !same(index, remote) && !same(index, head))
1313                         return o->gently ? -1 : reject_merge(index, o);
1314                 return merged_entry(remote, index, o);
1315         }
1316         /*
1317          * If we have an entry in the index cache, then we want to
1318          * make sure that it matches head.
1319          */
1320         if (index && !same(index, head))
1321                 return o->gently ? -1 : reject_merge(index, o);
1322
1323         if (head) {
1324                 /* #5ALT, #15 */
1325                 if (same(head, remote))
1326                         return merged_entry(head, index, o);
1327                 /* #13, #3ALT */
1328                 if (!df_conflict_remote && remote_match && !head_match)
1329                         return merged_entry(head, index, o);
1330         }
1331
1332         /* #1 */
1333         if (!head && !remote && any_anc_missing)
1334                 return 0;
1335
1336         /*
1337          * Under the "aggressive" rule, we resolve mostly trivial
1338          * cases that we historically had git-merge-one-file resolve.
1339          */
1340         if (o->aggressive) {
1341                 int head_deleted = !head;
1342                 int remote_deleted = !remote;
1343                 struct cache_entry *ce = NULL;
1344
1345                 if (index)
1346                         ce = index;
1347                 else if (head)
1348                         ce = head;
1349                 else if (remote)
1350                         ce = remote;
1351                 else {
1352                         for (i = 1; i < o->head_idx; i++) {
1353                                 if (stages[i] && stages[i] != o->df_conflict_entry) {
1354                                         ce = stages[i];
1355                                         break;
1356                                 }
1357                         }
1358                 }
1359
1360                 /*
1361                  * Deleted in both.
1362                  * Deleted in one and unchanged in the other.
1363                  */
1364                 if ((head_deleted && remote_deleted) ||
1365                     (head_deleted && remote && remote_match) ||
1366                     (remote_deleted && head && head_match)) {
1367                         if (index)
1368                                 return deleted_entry(index, index, o);
1369                         if (ce && !head_deleted) {
1370                                 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
1371                                         return -1;
1372                         }
1373                         return 0;
1374                 }
1375                 /*
1376                  * Added in both, identically.
1377                  */
1378                 if (no_anc_exists && head && remote && same(head, remote))
1379                         return merged_entry(head, index, o);
1380
1381         }
1382
1383         /* Below are "no merge" cases, which require that the index be
1384          * up-to-date to avoid the files getting overwritten with
1385          * conflict resolution files.
1386          */
1387         if (index) {
1388                 if (verify_uptodate(index, o))
1389                         return -1;
1390         }
1391
1392         o->nontrivial_merge = 1;
1393
1394         /* #2, #3, #4, #6, #7, #9, #10, #11. */
1395         count = 0;
1396         if (!head_match || !remote_match) {
1397                 for (i = 1; i < o->head_idx; i++) {
1398                         if (stages[i] && stages[i] != o->df_conflict_entry) {
1399                                 keep_entry(stages[i], o);
1400                                 count++;
1401                                 break;
1402                         }
1403                 }
1404         }
1405 #if DBRT_DEBUG
1406         else {
1407                 fprintf(stderr, "read-tree: warning #16 detected\n");
1408                 show_stage_entry(stderr, "head   ", stages[head_match]);
1409                 show_stage_entry(stderr, "remote ", stages[remote_match]);
1410         }
1411 #endif
1412         if (head) { count += keep_entry(head, o); }
1413         if (remote) { count += keep_entry(remote, o); }
1414         return count;
1415 }
1416
1417 /*
1418  * Two-way merge.
1419  *
1420  * The rule is to "carry forward" what is in the index without losing
1421  * information across a "fast-forward", favoring a successful merge
1422  * over a merge failure when it makes sense.  For details of the
1423  * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
1424  *
1425  */
1426 int twoway_merge(struct cache_entry **src, struct unpack_trees_options *o)
1427 {
1428         struct cache_entry *current = src[0];
1429         struct cache_entry *oldtree = src[1];
1430         struct cache_entry *newtree = src[2];
1431
1432         if (o->merge_size != 2)
1433                 return error("Cannot do a twoway merge of %d trees",
1434                              o->merge_size);
1435
1436         if (oldtree == o->df_conflict_entry)
1437                 oldtree = NULL;
1438         if (newtree == o->df_conflict_entry)
1439                 newtree = NULL;
1440
1441         if (current) {
1442                 if ((!oldtree && !newtree) || /* 4 and 5 */
1443                     (!oldtree && newtree &&
1444                      same(current, newtree)) || /* 6 and 7 */
1445                     (oldtree && newtree &&
1446                      same(oldtree, newtree)) || /* 14 and 15 */
1447                     (oldtree && newtree &&
1448                      !same(oldtree, newtree) && /* 18 and 19 */
1449                      same(current, newtree))) {
1450                         return keep_entry(current, o);
1451                 }
1452                 else if (oldtree && !newtree && same(current, oldtree)) {
1453                         /* 10 or 11 */
1454                         return deleted_entry(oldtree, current, o);
1455                 }
1456                 else if (oldtree && newtree &&
1457                          same(current, oldtree) && !same(current, newtree)) {
1458                         /* 20 or 21 */
1459                         return merged_entry(newtree, current, o);
1460                 }
1461                 else {
1462                         /* all other failures */
1463                         if (oldtree)
1464                                 return o->gently ? -1 : reject_merge(oldtree, o);
1465                         if (current)
1466                                 return o->gently ? -1 : reject_merge(current, o);
1467                         if (newtree)
1468                                 return o->gently ? -1 : reject_merge(newtree, o);
1469                         return -1;
1470                 }
1471         }
1472         else if (newtree) {
1473                 if (oldtree && !o->initial_checkout) {
1474                         /*
1475                          * deletion of the path was staged;
1476                          */
1477                         if (same(oldtree, newtree))
1478                                 return 1;
1479                         return reject_merge(oldtree, o);
1480                 }
1481                 return merged_entry(newtree, current, o);
1482         }
1483         return deleted_entry(oldtree, current, o);
1484 }
1485
1486 /*
1487  * Bind merge.
1488  *
1489  * Keep the index entries at stage0, collapse stage1 but make sure
1490  * stage0 does not have anything there.
1491  */
1492 int bind_merge(struct cache_entry **src,
1493                 struct unpack_trees_options *o)
1494 {
1495         struct cache_entry *old = src[0];
1496         struct cache_entry *a = src[1];
1497
1498         if (o->merge_size != 1)
1499                 return error("Cannot do a bind merge of %d trees\n",
1500                              o->merge_size);
1501         if (a && old)
1502                 return o->gently ? -1 :
1503                         error(ERRORMSG(o, ERROR_BIND_OVERLAP), a->name, old->name);
1504         if (!a)
1505                 return keep_entry(old, o);
1506         else
1507                 return merged_entry(a, NULL, o);
1508 }
1509
1510 /*
1511  * One-way merge.
1512  *
1513  * The rule is:
1514  * - take the stat information from stage0, take the data from stage1
1515  */
1516 int oneway_merge(struct cache_entry **src, struct unpack_trees_options *o)
1517 {
1518         struct cache_entry *old = src[0];
1519         struct cache_entry *a = src[1];
1520
1521         if (o->merge_size != 1)
1522                 return error("Cannot do a oneway merge of %d trees",
1523                              o->merge_size);
1524
1525         if (!a || a == o->df_conflict_entry)
1526                 return deleted_entry(old, old, o);
1527
1528         if (old && same(old, a)) {
1529                 int update = 0;
1530                 if (o->reset && !ce_uptodate(old) && !ce_skip_worktree(old)) {
1531                         struct stat st;
1532                         if (lstat(old->name, &st) ||
1533                             ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))
1534                                 update |= CE_UPDATE;
1535                 }
1536                 add_entry(o, old, update, 0);
1537                 return 0;
1538         }
1539         return merged_entry(a, old, o);
1540 }