]> Pileus Git - ~andy/linux/commitdiff
memcg: do not recalculate section unnecessarily in init_section_page_cgroup
authorFernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>
Thu, 8 Jan 2009 02:07:51 +0000 (18:07 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 8 Jan 2009 16:31:04 +0000 (08:31 -0800)
In init_section_page_cgroup() the section a given pfn belongs to is
calculated at the top of the function and, despite the fact that the
pfn/section correspondence does not change, it is recalculated further
down the same function.  By computing this just once and reusing that
value we save some bytes in the object file and do not waste CPU cycles.

Signed-off-by: Fernando Luis Vazquez Cao <fernando@oss.ntt.co.jp>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_cgroup.c

index d6507a660ed64bb1a15129b9336457a616bd9008..df1e54a5ed1956a44854ca3320a8a8ae905c9db5 100644 (file)
@@ -103,13 +103,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
 /* __alloc_bootmem...() is protected by !slab_available() */
 static int __init_refok init_section_page_cgroup(unsigned long pfn)
 {
-       struct mem_section *section;
+       struct mem_section *section = __pfn_to_section(pfn);
        struct page_cgroup *base, *pc;
        unsigned long table_size;
        int nid, index;
 
-       section = __pfn_to_section(pfn);
-
        if (!section->page_cgroup) {
                nid = page_to_nid(pfn_to_page(pfn));
                table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +143,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
                __init_page_cgroup(pc, pfn + index);
        }
 
-       section = __pfn_to_section(pfn);
        section->page_cgroup = base - pfn;
        total_usage += table_size;
        return 0;