]> Pileus Git - ~andy/linux/commitdiff
x86, mm: use limit_pfn for end pfn
authorYinghai Lu <yinghai@kernel.org>
Sat, 17 Nov 2012 03:39:15 +0000 (19:39 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Sat, 17 Nov 2012 19:59:43 +0000 (11:59 -0800)
instead of shifting end to get that.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-39-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/mm/init.c

index 4bf1c5374928b678e9c158748533b679bdb40da9..f410dc6f843eb6b55f95ab2de85cee64bd01d5ce 100644 (file)
@@ -203,10 +203,12 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                                     unsigned long start,
                                     unsigned long end)
 {
-       unsigned long start_pfn, end_pfn;
+       unsigned long start_pfn, end_pfn, limit_pfn;
        unsigned long pfn;
        int i;
 
+       limit_pfn = PFN_DOWN(end);
+
        /* head if not big page alignment ? */
        pfn = start_pfn = PFN_DOWN(start);
 #ifdef CONFIG_X86_32
@@ -223,8 +225,8 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
 #else /* CONFIG_X86_64 */
        end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #endif
-       if (end_pfn > PFN_DOWN(end))
-               end_pfn = PFN_DOWN(end);
+       if (end_pfn > limit_pfn)
+               end_pfn = limit_pfn;
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
                pfn = end_pfn;
@@ -233,11 +235,11 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
        /* big page (2M) range */
        start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
 #ifdef CONFIG_X86_32
-       end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+       end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 #else /* CONFIG_X86_64 */
        end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
-       if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE)))
-               end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+       if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
+               end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
 #endif
 
        if (start_pfn < end_pfn) {
@@ -249,7 +251,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
 #ifdef CONFIG_X86_64
        /* big page (1G) range */
        start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
-       end_pfn = PFN_DOWN(round_down(end, PUD_SIZE));
+       end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask &
@@ -259,7 +261,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
 
        /* tail is not big page (1G) alignment */
        start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
-       end_pfn = PFN_DOWN(round_down(end, PMD_SIZE));
+       end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask & (1<<PG_LEVEL_2M));
@@ -269,7 +271,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
 
        /* tail is not big page (2M) alignment */
        start_pfn = pfn;
-       end_pfn = PFN_DOWN(end);
+       end_pfn = limit_pfn;
        nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 
        /* try to merge same page size and continuous */