2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/capability.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
37 #include <asm/uaccess.h>
39 #include <asm/sections.h>
41 /* Per cpu memory for storing cpu states in case of system crash. */
42 note_buf_t __percpu *crash_notes;
44 /* vmcoreinfo stuff */
45 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
46 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47 size_t vmcoreinfo_size;
48 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
50 /* Location of the reserved area for the crash kernel */
51 struct resource crashk_res = {
52 .name = "Crash kernel",
55 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
57 struct resource crashk_low_res = {
58 .name = "Crash kernel low",
61 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
64 int kexec_should_crash(struct task_struct *p)
66 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
72 * When kexec transitions to the new kernel there is a one-to-one
73 * mapping between physical and virtual addresses. On processors
74 * where you can disable the MMU this is trivial, and easy. For
75 * others it is still a simple predictable page table to setup.
77 * In that environment kexec copies the new kernel to its final
78 * resting place. This means I can only support memory whose
79 * physical address can fit in an unsigned long. In particular
80 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
81 * If the assembly stub has more restrictive requirements
82 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
83 * defined more restrictively in <asm/kexec.h>.
85 * The code for the transition from the current kernel to the
86 * the new kernel is placed in the control_code_buffer, whose size
87 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
88 * page of memory is necessary, but some architectures require more.
89 * Because this memory must be identity mapped in the transition from
90 * virtual to physical addresses it must live in the range
91 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
94 * The assembly stub in the control code buffer is passed a linked list
95 * of descriptor pages detailing the source pages of the new kernel,
96 * and the destination addresses of those source pages. As this data
97 * structure is not used in the context of the current OS, it must
100 * The code has been made to work with highmem pages and will use a
101 * destination page in its final resting place (if it happens
102 * to allocate it). The end product of this is that most of the
103 * physical address space, and most of RAM can be used.
105 * Future directions include:
106 * - allocating a page table with the control code buffer identity
107 * mapped, to simplify machine_kexec and make kexec_on_panic more
112 * KIMAGE_NO_DEST is an impossible destination address..., for
113 * allocating pages whose destination address we do not care about.
115 #define KIMAGE_NO_DEST (-1UL)
117 static int kimage_is_destination_range(struct kimage *image,
118 unsigned long start, unsigned long end);
119 static struct page *kimage_alloc_page(struct kimage *image,
123 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
124 unsigned long nr_segments,
125 struct kexec_segment __user *segments)
127 size_t segment_bytes;
128 struct kimage *image;
132 /* Allocate a controlling structure */
134 image = kzalloc(sizeof(*image), GFP_KERNEL);
139 image->entry = &image->head;
140 image->last_entry = &image->head;
141 image->control_page = ~0; /* By default this does not apply */
142 image->start = entry;
143 image->type = KEXEC_TYPE_DEFAULT;
145 /* Initialize the list of control pages */
146 INIT_LIST_HEAD(&image->control_pages);
148 /* Initialize the list of destination pages */
149 INIT_LIST_HEAD(&image->dest_pages);
151 /* Initialize the list of unusable pages */
152 INIT_LIST_HEAD(&image->unuseable_pages);
154 /* Read in the segments */
155 image->nr_segments = nr_segments;
156 segment_bytes = nr_segments * sizeof(*segments);
157 result = copy_from_user(image->segment, segments, segment_bytes);
164 * Verify we have good destination addresses. The caller is
165 * responsible for making certain we don't attempt to load
166 * the new image into invalid or reserved areas of RAM. This
167 * just verifies it is an address we can use.
169 * Since the kernel does everything in page size chunks ensure
170 * the destination addresses are page aligned. Too many
171 * special cases crop of when we don't do this. The most
172 * insidious is getting overlapping destination addresses
173 * simply because addresses are changed to page size
176 result = -EADDRNOTAVAIL;
177 for (i = 0; i < nr_segments; i++) {
178 unsigned long mstart, mend;
180 mstart = image->segment[i].mem;
181 mend = mstart + image->segment[i].memsz;
182 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
184 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
188 /* Verify our destination addresses do not overlap.
189 * If we alloed overlapping destination addresses
190 * through very weird things can happen with no
191 * easy explanation as one segment stops on another.
194 for (i = 0; i < nr_segments; i++) {
195 unsigned long mstart, mend;
198 mstart = image->segment[i].mem;
199 mend = mstart + image->segment[i].memsz;
200 for (j = 0; j < i; j++) {
201 unsigned long pstart, pend;
202 pstart = image->segment[j].mem;
203 pend = pstart + image->segment[j].memsz;
204 /* Do the segments overlap ? */
205 if ((mend > pstart) && (mstart < pend))
210 /* Ensure our buffer sizes are strictly less than
211 * our memory sizes. This should always be the case,
212 * and it is easier to check up front than to be surprised
216 for (i = 0; i < nr_segments; i++) {
217 if (image->segment[i].bufsz > image->segment[i].memsz)
232 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
233 unsigned long nr_segments,
234 struct kexec_segment __user *segments)
237 struct kimage *image;
239 /* Allocate and initialize a controlling structure */
241 result = do_kimage_alloc(&image, entry, nr_segments, segments);
248 * Find a location for the control code buffer, and add it
249 * the vector of segments so that it's pages will also be
250 * counted as destination pages.
253 image->control_code_page = kimage_alloc_control_pages(image,
254 get_order(KEXEC_CONTROL_PAGE_SIZE));
255 if (!image->control_code_page) {
256 printk(KERN_ERR "Could not allocate control_code_buffer\n");
260 image->swap_page = kimage_alloc_control_pages(image, 0);
261 if (!image->swap_page) {
262 printk(KERN_ERR "Could not allocate swap buffer\n");
276 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
277 unsigned long nr_segments,
278 struct kexec_segment __user *segments)
281 struct kimage *image;
285 /* Verify we have a valid entry point */
286 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
287 result = -EADDRNOTAVAIL;
291 /* Allocate and initialize a controlling structure */
292 result = do_kimage_alloc(&image, entry, nr_segments, segments);
296 /* Enable the special crash kernel control page
299 image->control_page = crashk_res.start;
300 image->type = KEXEC_TYPE_CRASH;
303 * Verify we have good destination addresses. Normally
304 * the caller is responsible for making certain we don't
305 * attempt to load the new image into invalid or reserved
306 * areas of RAM. But crash kernels are preloaded into a
307 * reserved area of ram. We must ensure the addresses
308 * are in the reserved area otherwise preloading the
309 * kernel could corrupt things.
311 result = -EADDRNOTAVAIL;
312 for (i = 0; i < nr_segments; i++) {
313 unsigned long mstart, mend;
315 mstart = image->segment[i].mem;
316 mend = mstart + image->segment[i].memsz - 1;
317 /* Ensure we are within the crash kernel limits */
318 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
323 * Find a location for the control code buffer, and add
324 * the vector of segments so that it's pages will also be
325 * counted as destination pages.
328 image->control_code_page = kimage_alloc_control_pages(image,
329 get_order(KEXEC_CONTROL_PAGE_SIZE));
330 if (!image->control_code_page) {
331 printk(KERN_ERR "Could not allocate control_code_buffer\n");
345 static int kimage_is_destination_range(struct kimage *image,
351 for (i = 0; i < image->nr_segments; i++) {
352 unsigned long mstart, mend;
354 mstart = image->segment[i].mem;
355 mend = mstart + image->segment[i].memsz;
356 if ((end > mstart) && (start < mend))
363 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
367 pages = alloc_pages(gfp_mask, order);
369 unsigned int count, i;
370 pages->mapping = NULL;
371 set_page_private(pages, order);
373 for (i = 0; i < count; i++)
374 SetPageReserved(pages + i);
380 static void kimage_free_pages(struct page *page)
382 unsigned int order, count, i;
384 order = page_private(page);
386 for (i = 0; i < count; i++)
387 ClearPageReserved(page + i);
388 __free_pages(page, order);
391 static void kimage_free_page_list(struct list_head *list)
393 struct list_head *pos, *next;
395 list_for_each_safe(pos, next, list) {
398 page = list_entry(pos, struct page, lru);
399 list_del(&page->lru);
400 kimage_free_pages(page);
404 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
407 /* Control pages are special, they are the intermediaries
408 * that are needed while we copy the rest of the pages
409 * to their final resting place. As such they must
410 * not conflict with either the destination addresses
411 * or memory the kernel is already using.
413 * The only case where we really need more than one of
414 * these are for architectures where we cannot disable
415 * the MMU and must instead generate an identity mapped
416 * page table for all of the memory.
418 * At worst this runs in O(N) of the image size.
420 struct list_head extra_pages;
425 INIT_LIST_HEAD(&extra_pages);
427 /* Loop while I can allocate a page and the page allocated
428 * is a destination page.
431 unsigned long pfn, epfn, addr, eaddr;
433 pages = kimage_alloc_pages(GFP_KERNEL, order);
436 pfn = page_to_pfn(pages);
438 addr = pfn << PAGE_SHIFT;
439 eaddr = epfn << PAGE_SHIFT;
440 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
441 kimage_is_destination_range(image, addr, eaddr)) {
442 list_add(&pages->lru, &extra_pages);
448 /* Remember the allocated page... */
449 list_add(&pages->lru, &image->control_pages);
451 /* Because the page is already in it's destination
452 * location we will never allocate another page at
453 * that address. Therefore kimage_alloc_pages
454 * will not return it (again) and we don't need
455 * to give it an entry in image->segment[].
458 /* Deal with the destination pages I have inadvertently allocated.
460 * Ideally I would convert multi-page allocations into single
461 * page allocations, and add everything to image->dest_pages.
463 * For now it is simpler to just free the pages.
465 kimage_free_page_list(&extra_pages);
470 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
473 /* Control pages are special, they are the intermediaries
474 * that are needed while we copy the rest of the pages
475 * to their final resting place. As such they must
476 * not conflict with either the destination addresses
477 * or memory the kernel is already using.
479 * Control pages are also the only pags we must allocate
480 * when loading a crash kernel. All of the other pages
481 * are specified by the segments and we just memcpy
482 * into them directly.
484 * The only case where we really need more than one of
485 * these are for architectures where we cannot disable
486 * the MMU and must instead generate an identity mapped
487 * page table for all of the memory.
489 * Given the low demand this implements a very simple
490 * allocator that finds the first hole of the appropriate
491 * size in the reserved memory region, and allocates all
492 * of the memory up to and including the hole.
494 unsigned long hole_start, hole_end, size;
498 size = (1 << order) << PAGE_SHIFT;
499 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
500 hole_end = hole_start + size - 1;
501 while (hole_end <= crashk_res.end) {
504 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
506 /* See if I overlap any of the segments */
507 for (i = 0; i < image->nr_segments; i++) {
508 unsigned long mstart, mend;
510 mstart = image->segment[i].mem;
511 mend = mstart + image->segment[i].memsz - 1;
512 if ((hole_end >= mstart) && (hole_start <= mend)) {
513 /* Advance the hole to the end of the segment */
514 hole_start = (mend + (size - 1)) & ~(size - 1);
515 hole_end = hole_start + size - 1;
519 /* If I don't overlap any segments I have found my hole! */
520 if (i == image->nr_segments) {
521 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
526 image->control_page = hole_end;
532 struct page *kimage_alloc_control_pages(struct kimage *image,
535 struct page *pages = NULL;
537 switch (image->type) {
538 case KEXEC_TYPE_DEFAULT:
539 pages = kimage_alloc_normal_control_pages(image, order);
541 case KEXEC_TYPE_CRASH:
542 pages = kimage_alloc_crash_control_pages(image, order);
549 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
551 if (*image->entry != 0)
554 if (image->entry == image->last_entry) {
555 kimage_entry_t *ind_page;
558 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
562 ind_page = page_address(page);
563 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
564 image->entry = ind_page;
565 image->last_entry = ind_page +
566 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
568 *image->entry = entry;
575 static int kimage_set_destination(struct kimage *image,
576 unsigned long destination)
580 destination &= PAGE_MASK;
581 result = kimage_add_entry(image, destination | IND_DESTINATION);
583 image->destination = destination;
589 static int kimage_add_page(struct kimage *image, unsigned long page)
594 result = kimage_add_entry(image, page | IND_SOURCE);
596 image->destination += PAGE_SIZE;
602 static void kimage_free_extra_pages(struct kimage *image)
604 /* Walk through and free any extra destination pages I may have */
605 kimage_free_page_list(&image->dest_pages);
607 /* Walk through and free any unusable pages I have cached */
608 kimage_free_page_list(&image->unuseable_pages);
611 static void kimage_terminate(struct kimage *image)
613 if (*image->entry != 0)
616 *image->entry = IND_DONE;
619 #define for_each_kimage_entry(image, ptr, entry) \
620 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
621 ptr = (entry & IND_INDIRECTION)? \
622 phys_to_virt((entry & PAGE_MASK)): ptr +1)
624 static void kimage_free_entry(kimage_entry_t entry)
628 page = pfn_to_page(entry >> PAGE_SHIFT);
629 kimage_free_pages(page);
632 static void kimage_free(struct kimage *image)
634 kimage_entry_t *ptr, entry;
635 kimage_entry_t ind = 0;
640 kimage_free_extra_pages(image);
641 for_each_kimage_entry(image, ptr, entry) {
642 if (entry & IND_INDIRECTION) {
643 /* Free the previous indirection page */
644 if (ind & IND_INDIRECTION)
645 kimage_free_entry(ind);
646 /* Save this indirection page until we are
651 else if (entry & IND_SOURCE)
652 kimage_free_entry(entry);
654 /* Free the final indirection page */
655 if (ind & IND_INDIRECTION)
656 kimage_free_entry(ind);
658 /* Handle any machine specific cleanup */
659 machine_kexec_cleanup(image);
661 /* Free the kexec control pages... */
662 kimage_free_page_list(&image->control_pages);
666 static kimage_entry_t *kimage_dst_used(struct kimage *image,
669 kimage_entry_t *ptr, entry;
670 unsigned long destination = 0;
672 for_each_kimage_entry(image, ptr, entry) {
673 if (entry & IND_DESTINATION)
674 destination = entry & PAGE_MASK;
675 else if (entry & IND_SOURCE) {
676 if (page == destination)
678 destination += PAGE_SIZE;
685 static struct page *kimage_alloc_page(struct kimage *image,
687 unsigned long destination)
690 * Here we implement safeguards to ensure that a source page
691 * is not copied to its destination page before the data on
692 * the destination page is no longer useful.
694 * To do this we maintain the invariant that a source page is
695 * either its own destination page, or it is not a
696 * destination page at all.
698 * That is slightly stronger than required, but the proof
699 * that no problems will not occur is trivial, and the
700 * implementation is simply to verify.
702 * When allocating all pages normally this algorithm will run
703 * in O(N) time, but in the worst case it will run in O(N^2)
704 * time. If the runtime is a problem the data structures can
711 * Walk through the list of destination pages, and see if I
714 list_for_each_entry(page, &image->dest_pages, lru) {
715 addr = page_to_pfn(page) << PAGE_SHIFT;
716 if (addr == destination) {
717 list_del(&page->lru);
725 /* Allocate a page, if we run out of memory give up */
726 page = kimage_alloc_pages(gfp_mask, 0);
729 /* If the page cannot be used file it away */
730 if (page_to_pfn(page) >
731 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
732 list_add(&page->lru, &image->unuseable_pages);
735 addr = page_to_pfn(page) << PAGE_SHIFT;
737 /* If it is the destination page we want use it */
738 if (addr == destination)
741 /* If the page is not a destination page use it */
742 if (!kimage_is_destination_range(image, addr,
747 * I know that the page is someones destination page.
748 * See if there is already a source page for this
749 * destination page. And if so swap the source pages.
751 old = kimage_dst_used(image, addr);
754 unsigned long old_addr;
755 struct page *old_page;
757 old_addr = *old & PAGE_MASK;
758 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
759 copy_highpage(page, old_page);
760 *old = addr | (*old & ~PAGE_MASK);
762 /* The old page I have found cannot be a
763 * destination page, so return it if it's
764 * gfp_flags honor the ones passed in.
766 if (!(gfp_mask & __GFP_HIGHMEM) &&
767 PageHighMem(old_page)) {
768 kimage_free_pages(old_page);
776 /* Place the page on the destination list I
779 list_add(&page->lru, &image->dest_pages);
786 static int kimage_load_normal_segment(struct kimage *image,
787 struct kexec_segment *segment)
790 unsigned long ubytes, mbytes;
792 unsigned char __user *buf;
796 ubytes = segment->bufsz;
797 mbytes = segment->memsz;
798 maddr = segment->mem;
800 result = kimage_set_destination(image, maddr);
807 size_t uchunk, mchunk;
809 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
814 result = kimage_add_page(image, page_to_pfn(page)
820 /* Start with a clear page */
822 ptr += maddr & ~PAGE_MASK;
823 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
831 result = copy_from_user(ptr, buf, uchunk);
846 static int kimage_load_crash_segment(struct kimage *image,
847 struct kexec_segment *segment)
849 /* For crash dumps kernels we simply copy the data from
850 * user space to it's destination.
851 * We do things a page at a time for the sake of kmap.
854 unsigned long ubytes, mbytes;
856 unsigned char __user *buf;
860 ubytes = segment->bufsz;
861 mbytes = segment->memsz;
862 maddr = segment->mem;
866 size_t uchunk, mchunk;
868 page = pfn_to_page(maddr >> PAGE_SHIFT);
874 ptr += maddr & ~PAGE_MASK;
875 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
880 if (uchunk > ubytes) {
882 /* Zero the trailing part of the page */
883 memset(ptr + uchunk, 0, mchunk - uchunk);
885 result = copy_from_user(ptr, buf, uchunk);
886 kexec_flush_icache_page(page);
901 static int kimage_load_segment(struct kimage *image,
902 struct kexec_segment *segment)
904 int result = -ENOMEM;
906 switch (image->type) {
907 case KEXEC_TYPE_DEFAULT:
908 result = kimage_load_normal_segment(image, segment);
910 case KEXEC_TYPE_CRASH:
911 result = kimage_load_crash_segment(image, segment);
919 * Exec Kernel system call: for obvious reasons only root may call it.
921 * This call breaks up into three pieces.
922 * - A generic part which loads the new kernel from the current
923 * address space, and very carefully places the data in the
926 * - A generic part that interacts with the kernel and tells all of
927 * the devices to shut down. Preventing on-going dmas, and placing
928 * the devices in a consistent state so a later kernel can
931 * - A machine specific part that includes the syscall number
932 * and the copies the image to it's final destination. And
933 * jumps into the image at entry.
935 * kexec does not sync, or unmount filesystems so if you need
936 * that to happen you need to do that yourself.
938 struct kimage *kexec_image;
939 struct kimage *kexec_crash_image;
941 static DEFINE_MUTEX(kexec_mutex);
943 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
944 struct kexec_segment __user *, segments, unsigned long, flags)
946 struct kimage **dest_image, *image;
949 /* We only trust the superuser with rebooting the system. */
950 if (!capable(CAP_SYS_BOOT))
954 * Verify we have a legal set of flags
955 * This leaves us room for future extensions.
957 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
960 /* Verify we are on the appropriate architecture */
961 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
962 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
965 /* Put an artificial cap on the number
966 * of segments passed to kexec_load.
968 if (nr_segments > KEXEC_SEGMENT_MAX)
974 /* Because we write directly to the reserved memory
975 * region when loading crash kernels we need a mutex here to
976 * prevent multiple crash kernels from attempting to load
977 * simultaneously, and to prevent a crash kernel from loading
978 * over the top of a in use crash kernel.
980 * KISS: always take the mutex.
982 if (!mutex_trylock(&kexec_mutex))
985 dest_image = &kexec_image;
986 if (flags & KEXEC_ON_CRASH)
987 dest_image = &kexec_crash_image;
988 if (nr_segments > 0) {
991 /* Loading another kernel to reboot into */
992 if ((flags & KEXEC_ON_CRASH) == 0)
993 result = kimage_normal_alloc(&image, entry,
994 nr_segments, segments);
995 /* Loading another kernel to switch to if this one crashes */
996 else if (flags & KEXEC_ON_CRASH) {
997 /* Free any current crash dump kernel before
1000 kimage_free(xchg(&kexec_crash_image, NULL));
1001 result = kimage_crash_alloc(&image, entry,
1002 nr_segments, segments);
1003 crash_map_reserved_pages();
1008 if (flags & KEXEC_PRESERVE_CONTEXT)
1009 image->preserve_context = 1;
1010 result = machine_kexec_prepare(image);
1014 for (i = 0; i < nr_segments; i++) {
1015 result = kimage_load_segment(image, &image->segment[i]);
1019 kimage_terminate(image);
1020 if (flags & KEXEC_ON_CRASH)
1021 crash_unmap_reserved_pages();
1023 /* Install the new kernel, and Uninstall the old */
1024 image = xchg(dest_image, image);
1027 mutex_unlock(&kexec_mutex);
1034 * Add and remove page tables for crashkernel memory
1036 * Provide an empty default implementation here -- architecture
1037 * code may override this
1039 void __weak crash_map_reserved_pages(void)
1042 void __weak crash_unmap_reserved_pages(void)
1045 #ifdef CONFIG_COMPAT
1046 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1047 unsigned long nr_segments,
1048 struct compat_kexec_segment __user *segments,
1049 unsigned long flags)
1051 struct compat_kexec_segment in;
1052 struct kexec_segment out, __user *ksegments;
1053 unsigned long i, result;
1055 /* Don't allow clients that don't understand the native
1056 * architecture to do anything.
1058 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1061 if (nr_segments > KEXEC_SEGMENT_MAX)
1064 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1065 for (i=0; i < nr_segments; i++) {
1066 result = copy_from_user(&in, &segments[i], sizeof(in));
1070 out.buf = compat_ptr(in.buf);
1071 out.bufsz = in.bufsz;
1073 out.memsz = in.memsz;
1075 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1080 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1084 void crash_kexec(struct pt_regs *regs)
1086 /* Take the kexec_mutex here to prevent sys_kexec_load
1087 * running on one cpu from replacing the crash kernel
1088 * we are using after a panic on a different cpu.
1090 * If the crash kernel was not located in a fixed area
1091 * of memory the xchg(&kexec_crash_image) would be
1092 * sufficient. But since I reuse the memory...
1094 if (mutex_trylock(&kexec_mutex)) {
1095 if (kexec_crash_image) {
1096 struct pt_regs fixed_regs;
1098 crash_setup_regs(&fixed_regs, regs);
1099 crash_save_vmcoreinfo();
1100 machine_crash_shutdown(&fixed_regs);
1101 machine_kexec(kexec_crash_image);
1103 mutex_unlock(&kexec_mutex);
1107 size_t crash_get_memory_size(void)
1110 mutex_lock(&kexec_mutex);
1111 if (crashk_res.end != crashk_res.start)
1112 size = resource_size(&crashk_res);
1113 mutex_unlock(&kexec_mutex);
1117 void __weak crash_free_reserved_phys_range(unsigned long begin,
1122 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1123 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1124 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1125 free_page((unsigned long)__va(addr));
1130 int crash_shrink_memory(unsigned long new_size)
1133 unsigned long start, end;
1134 unsigned long old_size;
1135 struct resource *ram_res;
1137 mutex_lock(&kexec_mutex);
1139 if (kexec_crash_image) {
1143 start = crashk_res.start;
1144 end = crashk_res.end;
1145 old_size = (end == 0) ? 0 : end - start + 1;
1146 if (new_size >= old_size) {
1147 ret = (new_size == old_size) ? 0 : -EINVAL;
1151 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1157 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1158 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1160 crash_map_reserved_pages();
1161 crash_free_reserved_phys_range(end, crashk_res.end);
1163 if ((start == end) && (crashk_res.parent != NULL))
1164 release_resource(&crashk_res);
1166 ram_res->start = end;
1167 ram_res->end = crashk_res.end;
1168 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1169 ram_res->name = "System RAM";
1171 crashk_res.end = end - 1;
1173 insert_resource(&iomem_resource, ram_res);
1174 crash_unmap_reserved_pages();
1177 mutex_unlock(&kexec_mutex);
1181 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1184 struct elf_note note;
1186 note.n_namesz = strlen(name) + 1;
1187 note.n_descsz = data_len;
1189 memcpy(buf, ¬e, sizeof(note));
1190 buf += (sizeof(note) + 3)/4;
1191 memcpy(buf, name, note.n_namesz);
1192 buf += (note.n_namesz + 3)/4;
1193 memcpy(buf, data, note.n_descsz);
1194 buf += (note.n_descsz + 3)/4;
1199 static void final_note(u32 *buf)
1201 struct elf_note note;
1206 memcpy(buf, ¬e, sizeof(note));
1209 void crash_save_cpu(struct pt_regs *regs, int cpu)
1211 struct elf_prstatus prstatus;
1214 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1217 /* Using ELF notes here is opportunistic.
1218 * I need a well defined structure format
1219 * for the data I pass, and I need tags
1220 * on the data to indicate what information I have
1221 * squirrelled away. ELF notes happen to provide
1222 * all of that, so there is no need to invent something new.
1224 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1227 memset(&prstatus, 0, sizeof(prstatus));
1228 prstatus.pr_pid = current->pid;
1229 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1230 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1231 &prstatus, sizeof(prstatus));
1235 static int __init crash_notes_memory_init(void)
1237 /* Allocate memory for saving cpu registers. */
1238 crash_notes = alloc_percpu(note_buf_t);
1240 printk("Kexec: Memory allocation for saving cpu register"
1241 " states failed\n");
1246 module_init(crash_notes_memory_init)
1250 * parsing the "crashkernel" commandline
1252 * this code is intended to be called from architecture specific code
1257 * This function parses command lines in the format
1259 * crashkernel=ramsize-range:size[,...][@offset]
1261 * The function returns 0 on success and -EINVAL on failure.
1263 static int __init parse_crashkernel_mem(char *cmdline,
1264 unsigned long long system_ram,
1265 unsigned long long *crash_size,
1266 unsigned long long *crash_base)
1268 char *cur = cmdline, *tmp;
1270 /* for each entry of the comma-separated list */
1272 unsigned long long start, end = ULLONG_MAX, size;
1274 /* get the start of the range */
1275 start = memparse(cur, &tmp);
1277 pr_warning("crashkernel: Memory value expected\n");
1282 pr_warning("crashkernel: '-' expected\n");
1287 /* if no ':' is here, than we read the end */
1289 end = memparse(cur, &tmp);
1291 pr_warning("crashkernel: Memory "
1292 "value expected\n");
1297 pr_warning("crashkernel: end <= start\n");
1303 pr_warning("crashkernel: ':' expected\n");
1308 size = memparse(cur, &tmp);
1310 pr_warning("Memory value expected\n");
1314 if (size >= system_ram) {
1315 pr_warning("crashkernel: invalid size\n");
1320 if (system_ram >= start && system_ram < end) {
1324 } while (*cur++ == ',');
1326 if (*crash_size > 0) {
1327 while (*cur && *cur != ' ' && *cur != '@')
1331 *crash_base = memparse(cur, &tmp);
1333 pr_warning("Memory value expected "
1344 * That function parses "simple" (old) crashkernel command lines like
1346 * crashkernel=size[@offset]
1348 * It returns 0 on success and -EINVAL on failure.
1350 static int __init parse_crashkernel_simple(char *cmdline,
1351 unsigned long long *crash_size,
1352 unsigned long long *crash_base)
1354 char *cur = cmdline;
1356 *crash_size = memparse(cmdline, &cur);
1357 if (cmdline == cur) {
1358 pr_warning("crashkernel: memory value expected\n");
1363 *crash_base = memparse(cur+1, &cur);
1364 else if (*cur != ' ' && *cur != '\0') {
1365 pr_warning("crashkernel: unrecognized char\n");
1373 * That function is the entry point for command line parsing and should be
1374 * called from the arch-specific code.
1376 static int __init __parse_crashkernel(char *cmdline,
1377 unsigned long long system_ram,
1378 unsigned long long *crash_size,
1379 unsigned long long *crash_base,
1382 char *p = cmdline, *ck_cmdline = NULL;
1383 char *first_colon, *first_space;
1385 BUG_ON(!crash_size || !crash_base);
1389 /* find crashkernel and use the last one if there are more */
1390 p = strstr(p, name);
1393 p = strstr(p+1, name);
1399 ck_cmdline += strlen(name);
1402 * if the commandline contains a ':', then that's the extended
1403 * syntax -- if not, it must be the classic syntax
1405 first_colon = strchr(ck_cmdline, ':');
1406 first_space = strchr(ck_cmdline, ' ');
1407 if (first_colon && (!first_space || first_colon < first_space))
1408 return parse_crashkernel_mem(ck_cmdline, system_ram,
1409 crash_size, crash_base);
1411 return parse_crashkernel_simple(ck_cmdline, crash_size,
1417 int __init parse_crashkernel(char *cmdline,
1418 unsigned long long system_ram,
1419 unsigned long long *crash_size,
1420 unsigned long long *crash_base)
1422 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1426 int __init parse_crashkernel_low(char *cmdline,
1427 unsigned long long system_ram,
1428 unsigned long long *crash_size,
1429 unsigned long long *crash_base)
1431 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1432 "crashkernel_low=");
1435 static void update_vmcoreinfo_note(void)
1437 u32 *buf = vmcoreinfo_note;
1439 if (!vmcoreinfo_size)
1441 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1446 void crash_save_vmcoreinfo(void)
1448 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1449 update_vmcoreinfo_note();
1452 void vmcoreinfo_append_str(const char *fmt, ...)
1458 va_start(args, fmt);
1459 r = vsnprintf(buf, sizeof(buf), fmt, args);
1462 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1463 r = vmcoreinfo_max_size - vmcoreinfo_size;
1465 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1467 vmcoreinfo_size += r;
1471 * provide an empty default implementation here -- architecture
1472 * code may override this
1474 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1477 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1479 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1482 static int __init crash_save_vmcoreinfo_init(void)
1484 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1485 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1487 VMCOREINFO_SYMBOL(init_uts_ns);
1488 VMCOREINFO_SYMBOL(node_online_map);
1490 VMCOREINFO_SYMBOL(swapper_pg_dir);
1492 VMCOREINFO_SYMBOL(_stext);
1493 VMCOREINFO_SYMBOL(vmlist);
1495 #ifndef CONFIG_NEED_MULTIPLE_NODES
1496 VMCOREINFO_SYMBOL(mem_map);
1497 VMCOREINFO_SYMBOL(contig_page_data);
1499 #ifdef CONFIG_SPARSEMEM
1500 VMCOREINFO_SYMBOL(mem_section);
1501 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1502 VMCOREINFO_STRUCT_SIZE(mem_section);
1503 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1505 VMCOREINFO_STRUCT_SIZE(page);
1506 VMCOREINFO_STRUCT_SIZE(pglist_data);
1507 VMCOREINFO_STRUCT_SIZE(zone);
1508 VMCOREINFO_STRUCT_SIZE(free_area);
1509 VMCOREINFO_STRUCT_SIZE(list_head);
1510 VMCOREINFO_SIZE(nodemask_t);
1511 VMCOREINFO_OFFSET(page, flags);
1512 VMCOREINFO_OFFSET(page, _count);
1513 VMCOREINFO_OFFSET(page, mapping);
1514 VMCOREINFO_OFFSET(page, lru);
1515 VMCOREINFO_OFFSET(page, _mapcount);
1516 VMCOREINFO_OFFSET(page, private);
1517 VMCOREINFO_OFFSET(pglist_data, node_zones);
1518 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1519 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1520 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1522 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1523 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1524 VMCOREINFO_OFFSET(pglist_data, node_id);
1525 VMCOREINFO_OFFSET(zone, free_area);
1526 VMCOREINFO_OFFSET(zone, vm_stat);
1527 VMCOREINFO_OFFSET(zone, spanned_pages);
1528 VMCOREINFO_OFFSET(free_area, free_list);
1529 VMCOREINFO_OFFSET(list_head, next);
1530 VMCOREINFO_OFFSET(list_head, prev);
1531 VMCOREINFO_OFFSET(vm_struct, addr);
1532 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1533 log_buf_kexec_setup();
1534 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1535 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1536 VMCOREINFO_NUMBER(PG_lru);
1537 VMCOREINFO_NUMBER(PG_private);
1538 VMCOREINFO_NUMBER(PG_swapcache);
1539 VMCOREINFO_NUMBER(PG_slab);
1540 #ifdef CONFIG_MEMORY_FAILURE
1541 VMCOREINFO_NUMBER(PG_hwpoison);
1543 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1545 arch_crash_save_vmcoreinfo();
1546 update_vmcoreinfo_note();
1551 module_init(crash_save_vmcoreinfo_init)
1554 * Move into place and start executing a preloaded standalone
1555 * executable. If nothing was preloaded return an error.
1557 int kernel_kexec(void)
1561 if (!mutex_trylock(&kexec_mutex))
1568 #ifdef CONFIG_KEXEC_JUMP
1569 if (kexec_image->preserve_context) {
1570 lock_system_sleep();
1571 pm_prepare_console();
1572 error = freeze_processes();
1575 goto Restore_console;
1578 error = dpm_suspend_start(PMSG_FREEZE);
1580 goto Resume_console;
1581 /* At this point, dpm_suspend_start() has been called,
1582 * but *not* dpm_suspend_end(). We *must* call
1583 * dpm_suspend_end() now. Otherwise, drivers for
1584 * some devices (e.g. interrupt controllers) become
1585 * desynchronized with the actual state of the
1586 * hardware at resume time, and evil weirdness ensues.
1588 error = dpm_suspend_end(PMSG_FREEZE);
1590 goto Resume_devices;
1591 error = disable_nonboot_cpus();
1594 local_irq_disable();
1595 error = syscore_suspend();
1601 kernel_restart_prepare(NULL);
1602 printk(KERN_EMERG "Starting new kernel\n");
1606 machine_kexec(kexec_image);
1608 #ifdef CONFIG_KEXEC_JUMP
1609 if (kexec_image->preserve_context) {
1614 enable_nonboot_cpus();
1615 dpm_resume_start(PMSG_RESTORE);
1617 dpm_resume_end(PMSG_RESTORE);
1622 pm_restore_console();
1623 unlock_system_sleep();
1628 mutex_unlock(&kexec_mutex);