Advertisement
Guest User

Untitled

a guest
Dec 16th, 2017
199
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 12.92 KB | None | 0 0
  1. #include "stdlib/string.h"
  2. #include "stdlib/assert.h"
  3.  
  4. #include "kernel/lib/disk/ata.h"
  5. #include "kernel/lib/memory/map.h"
  6. #include "kernel/lib/memory/layout.h"
  7. #include "kernel/lib/console/terminal.h"
  8.  
  9. #include "kernel/asm.h"
  10. #include "kernel/cpu.h"
  11. #include "kernel/misc/gdt.h"
  12. #include "kernel/misc/elf.h"
  13. #include "kernel/misc/util.h"
  14. #include "kernel/loader/config.h"
  15.  
  16. struct bios_mmap_entry {
  17.     uint64_t base_addr;
  18.     uint64_t addr_len;
  19.     uint32_t type;
  20.     uint32_t acpi_attrs;
  21. };
  22.  
  23. // Describe gdtr for long mode
  24. struct gdtr {
  25.     uint16_t limit;
  26.     uint32_t base;
  27.     uint32_t zero;
  28. } __attribute__((packed));
  29.  
  30. // Some help from linker script
  31. extern uint8_t boot_stack[], end[];
  32.  
  33. // XXX: Now (in loader) virtual address is equal to physical one
  34. static uint64_t max_physical_address;
  35. static uint8_t *free_memory = end;
  36.  
  37. static struct page *pages;
  38. static uint64_t pages_cnt;
  39.  
  40. pml4e_t *pml4;
  41.  
  42. struct descriptor *gdt;
  43. struct gdtr gdtr;
  44.  
  45. void loader_panic(const char *fmt, ...);
  46. panic_t panic = loader_panic;
  47.  
  48. // Loader uses this struct to pass some
  49. // useful information to kernel
  50. struct kernel_config *config;
  51.  
  52.  
  53. void *loader_alloc(uint64_t size, uint32_t align);
  54. void loader_detect_memory(struct bios_mmap_entry *mm, uint32_t cnt);
  55. int loader_init_memory(struct bios_mmap_entry *mm, uint32_t cnt);
  56. int loader_map_section(uint64_t va, uintptr_t pa, uint64_t len, bool hard);
  57.  
  58. bool page_is_available(uint64_t paddr, struct bios_mmap_entry *mm, uint32_t cnt);
  59.  
  60. struct descriptor *loader_init_gdt(void);
  61.  
  62. int loader_read_kernel(uint64_t *kernel_entry_point);
  63. void loader_enter_long_mode(uint64_t kernel_entry_point);
  64.  
  65. // Why this address? See `boot/boot.S'
  66. #define BOOT_MMAP_ADDR      0x7e00
  67. void loader_main(void)
  68. {
  69.     terminal_init();
  70.  
  71. #if LAB >= 2
  72.     // Next two parameters are prepared by the first loader
  73.     struct bios_mmap_entry *mm = (struct bios_mmap_entry *)BOOT_MMAP_ADDR;
  74.     uint32_t cnt = *((uint32_t *)BOOT_MMAP_ADDR - 1);
  75.  
  76.     uint64_t kernel_entry_point;
  77.     if (loader_read_kernel(&kernel_entry_point) != 0)
  78.         goto something_bad;
  79.  
  80.     loader_detect_memory(mm, cnt);
  81.     if (loader_init_memory(mm, cnt) != 0)
  82.         goto something_bad;
  83.  
  84.     loader_enter_long_mode(kernel_entry_point);
  85.  
  86. something_bad:
  87. #endif
  88.     terminal_printf("Stop loading, hang\n");
  89.  
  90. //  while (1) {
  91.         /*do nothing*/
  92. //  }
  93. }
  94.  
  95. // LAB2 Instruction:
  96. // - use `free_memory' as a pointer to memory, wich may be allocated
  97. void *loader_alloc(uint64_t size, uint32_t align)
  98. {
  99.     // LAB 2: Your code here:
  100.     //  Step 1: round loader_freemem up to be aligned properly
  101.     //  Step 2: save current value of loader_freemem as allocated chunk
  102.     //  Step 3: increase free_memory to record allocation
  103.     //  Step 4: return allocated loader_freemem
  104.  
  105.     // округляем free_memory до границы align
  106.  
  107.     uint8_t *loader_freemem = (void*) ROUND_UP((uint32_t)free_memory, align);
  108.     terminal_printf("Allocate %u with align %u fm_ptr %x res_ptr %x\n", (uint32_t)size, align, free_memory, loader_freemem);
  109.  
  110.     // сдвигаем указатель на область свободной памяти
  111.     free_memory = loader_freemem + size;
  112.  
  113.     return loader_freemem;
  114. }
  115.  
  116. // LAB2 Instruction:
  117. // - read elf header (see boot/main.c, but use `elf64_*' here) (for error use terminal_printf)
  118. // - check magic ELF_MAGIC
  119. // - store `kernel_entry_point'
  120. // - read other segments:
  121. // -- shift `free_memory' if needed to avoid overlaps in future
  122. // -- load kernel into physical addresses instead of virtual (drop >4Gb part of virtual address)
  123. // -- use loader_alloc
  124. #define KERNEL_BASE_DISK_SECTOR 2048 // 1Mb = 1024*1024/512
  125. int loader_read_kernel(uint64_t *kernel_entry_point)
  126. {
  127.     *kernel_entry_point = 0;
  128.  
  129.     // ADDED CODE:
  130.     // читаем 1 сектор размером 512 байт с первого мегабайта на диске (помним, что второй загрузчик расположен с первлго мегабайта)
  131.     struct elf64_header *elf_header = (struct elf64_header *)loader_alloc(sizeof(*elf_header), PAGE_SIZE);
  132.     if (disk_io_read_segment((uint32_t)elf_header, ATA_SECTOR_SIZE, KERNEL_BASE_DISK_SECTOR) != 0) {
  133.         terminal_printf("Can't read elf header\n");
  134.         return -1;
  135.     }
  136.     // check magic ELF_MAGIC
  137.     if (elf_header->e_magic != ELF_MAGIC) {
  138.         terminal_printf("Elf header must equals ELF_MAGIC, but  (%i)", elf_header->e_magic);
  139.         return -1;
  140.     }
  141.  
  142.     // store `kernel_entry_point'
  143.     *kernel_entry_point = elf_header->e_entry;
  144.  
  145.     // read other segments
  146.     for (struct elf64_program_header *ph = ELF64_PHEADER_FIRST(elf_header);
  147.          ph < ELF64_PHEADER_LAST(elf_header); ph++) {
  148.         // обрезование верхней части виртуального адреса и приведение
  149.         // [KERNBASE; KERNBASE+FREEMEM) к [0; FREEMEM)
  150.         ph->p_va &= 0xFFFFFFFFull;
  151.  
  152.         uint32_t lba = (ph->p_offset / ATA_SECTOR_SIZE) + KERNEL_BASE_DISK_SECTOR;
  153.         if (disk_io_read_segment(ph->p_va, ph->p_memsz, lba) != 0) {
  154.             terminal_printf("Error reading segment %i", lba);
  155.             return -1;
  156.         }
  157.         // т.к. указатель free_memory растет при увеличении потребляемой памяти, то сдвигаем его по мере роста потребляемой памяти
  158.         // PADDR по сути в загрузчике будет возвращать тоже самое т.к. VADDR_BASE = 0
  159.         if (PADDR(free_memory) < PADDR(ph->p_va + ph->p_memsz))
  160.             free_memory = (uint8_t *)(uintptr_t)(ph->p_va + ph->p_memsz);
  161.     }
  162.     return 0;
  163.  
  164. }
  165.  
  166. // LAB2 Instruction:
  167. // - check all entry points with type `free' and detect `max_physical_address'
  168. // - also detect total `pages_cnt', using `max_physical_address' and `PAGE_SIZE'
  169. #define MEMORY_TYPE_FREE 1
  170. void loader_detect_memory(struct bios_mmap_entry *memory_map, uint32_t cnt)
  171. {
  172.     max_physical_address = 0;
  173.     for (uint32_t i = 0; i < cnt; i++) {
  174.         terminal_printf("Memory_map entry base_addr %u addr_len %u mem_type %u\n",
  175.                         (uint32_t)memory_map[i].base_addr, (uint32_t)memory_map[i].addr_len, (uint32_t)memory_map[i].type);
  176.         /** итерируемся и проверяем тип памяти проставляя по ходу дела max_physical_address */
  177.         if (memory_map[i].type == MEMORY_TYPE_FREE
  178.             && memory_map[i].base_addr + memory_map[i].addr_len >= max_physical_address)
  179.         {
  180.             max_physical_address = memory_map[i].base_addr + memory_map[i].addr_len;
  181.             terminal_printf("Current max_physical_address: %u\n", (uint32_t)max_physical_address);
  182.         }
  183.     }
  184.  
  185.     /** выравниваем вниз, чтобы последняя страница содержала целиком в себе страницу памяти */
  186.     pages_cnt = ROUND_DOWN(max_physical_address, PAGE_SIZE) / PAGE_SIZE;
  187.  
  188.     terminal_printf("Available memory: %u Kb (%u pages)\n",
  189.             (uint32_t)(max_physical_address / 1024), (uint32_t)pages_cnt);
  190. }
  191.  
  192. int loader_init_memory(struct bios_mmap_entry *mm, uint32_t cnt)
  193. {
  194.     static struct mmap_state state;
  195.  
  196.     config = loader_alloc(sizeof(*config), PAGE_SIZE);
  197.     memset(config, 0, sizeof(*config));
  198.  
  199.     gdt = loader_init_gdt();
  200.  
  201.     // Allocate and init PML4
  202.     pml4 = loader_alloc(PAGE_SIZE, PAGE_SIZE);
  203.     memset(pml4, 0, PAGE_SIZE);
  204.  
  205.     // Allocate and initialize physical pages array
  206.     pages = loader_alloc(SIZEOF_PAGE64 * pages_cnt, PAGE_SIZE);
  207.     memset(pages, 0, SIZEOF_PAGE64 * pages_cnt);
  208.  
  209.     // Initialize config
  210.     config->pages_cnt = pages_cnt;
  211.     config->pages.ptr = pages;
  212.     config->pml4.ptr = pml4;
  213.     config->gdt.ptr = gdt;
  214.  
  215.     // Initialize `mmap_state'
  216.     state.free = (struct mmap_free_pages){ NULL };
  217.     state.pages_cnt = pages_cnt;
  218.     state.pages = pages;
  219.     mmap_init(&state);
  220.  
  221.     // Fill in free pages list, skip ones used by kernel or hardware
  222.     for (uint32_t i = 0; i < pages_cnt; i++) {
  223.         uint64_t page_addr = (uint64_t)i * PAGE_SIZE;
  224.  
  225.         if (page_is_available(page_addr, mm, cnt) == false) {
  226.             pages[i].ref = 1;
  227.             continue;
  228.         }
  229.  
  230.         // Insert head is important, it guarantees that high physical
  231.         // addresses will be used before low ones.
  232.         LIST_INSERT_HEAD(&state.free, &pages[i], link);
  233.     }
  234.  
  235.     // Map kernel stack
  236.     if (loader_map_section(KERNEL_STACK_TOP - KERNEL_STACK_SIZE,
  237.             (uintptr_t)boot_stack, KERNEL_STACK_SIZE, true) != 0)
  238.         return -1;
  239.  
  240.     // Pass some information to kernel
  241.     if (loader_map_section(KERNEL_INFO, (uintptr_t)config, PAGE_SIZE, true) != 0)
  242.         return -1;
  243.  
  244.     // Make APIC registers available for the kernel
  245.     if (loader_map_section(APIC_BASE, APIC_BASE_PA, PAGE_SIZE, true) != 0)
  246.         return -1;
  247.  
  248.     // Make IO APIC registers available for the kernel
  249.     if (loader_map_section(IOAPIC_BASE, IOAPIC_BASE_PA, PAGE_SIZE, true) != 0)
  250.         return -1;
  251.  
  252.     // Map loader to make all addresses valid after paging enable
  253.     // (before jump to kernel entry point). We must map all until
  254.     // `free_memory' not just `end', because `pml4' located after `end'
  255.     if (loader_map_section(0x0, 0x0, (uintptr_t)free_memory, true) != 0)
  256.         return -1;
  257.  
  258.     // Make continuous mapping [KERNEL_BASE, KERNEL_BASE + FREE_MEM) -> [0, FREE_MEM)
  259.     // Without this mapping we can't compute virtual address from physical one
  260.     if (loader_map_section(KERNEL_BASE, 0x0, ROUND_DOWN(max_physical_address, PAGE_SIZE), false) != 0)
  261.         return -1;
  262.  
  263.     return 0;
  264. }
  265.  
  266. #define NGDT_ENTRIES    5
  267. struct descriptor *loader_init_gdt(void)
  268. {
  269.     uint16_t system_segmnets_size = sizeof(struct descriptor64) * CPU_MAX_CNT;
  270.     uint16_t user_segments_size = sizeof(struct descriptor) * NGDT_ENTRIES;
  271.     uint16_t gdt_size = user_segments_size + system_segmnets_size;
  272.     struct descriptor *gdt = loader_alloc(gdt_size, 16);
  273.  
  274.     gdtr.base = (uintptr_t)gdt;
  275.     gdtr.limit = gdt_size - 1;
  276.     gdtr.zero = 0;
  277.  
  278.     // XXX: according to AMD64 documentation, in 64-bit mode all most
  279.     // fields, like `UST_W' or `DPL' for data segment are ignored,
  280.     // but this is not true inside QEMU and Bochs
  281.  
  282.     // Null descriptor - just in case
  283.     gdt[0] = SEGMENT_DESC(0, 0x0, 0x0);
  284.  
  285.     // Kernel text
  286.     gdt[GD_KT >> 3] = SEGMENT_DESC(USF_L|USF_P|DPL_S|USF_S|UST_X, 0x0, 0x0);
  287.  
  288.     // Kernel data
  289.     gdt[GD_KD >> 3] = SEGMENT_DESC(USF_P|USF_S|DPL_S|UST_W, 0x0, 0x0);
  290.  
  291.     // User text
  292.     gdt[GD_UT >> 3] = SEGMENT_DESC(USF_L|USF_P|DPL_U|USF_S|UST_X, 0x0, 0x0);
  293.  
  294.     // User data
  295.     gdt[GD_UD >> 3] = SEGMENT_DESC(USF_P|USF_S|DPL_U|UST_W, 0x0, 0x0);
  296.  
  297.     return gdt;
  298. }
  299.  
  300. bool page_is_available(uint64_t paddr, struct bios_mmap_entry *mm, uint32_t cnt)
  301. {
  302.     if (paddr == 0)
  303.         // The first page contain some useful bios data strutures.
  304.         // Reserve it just in case.
  305.         return false;
  306.  
  307.     if (paddr >= APIC_BASE_PA && paddr < APIC_BASE_PA + PAGE_SIZE)
  308.         // APIC registers mapped here
  309.         return false;
  310.  
  311.     if (paddr >= IOAPIC_BASE_PA && paddr < IOAPIC_BASE_PA + PAGE_SIZE)
  312.         // IO APIC registers mapped here
  313.         return false;
  314.  
  315.     if (paddr >= (uint64_t)(uintptr_t)end &&
  316.         paddr  < (uint64_t)(uintptr_t)free_memory)
  317.         // This address range contains kernel
  318.         // and data allocated with `loader_alloc()'
  319.         return false;
  320.  
  321.     bool page_is_available = true;
  322.     for (uint32_t i = 0; i < cnt; i++) {
  323.         if (mm->base_addr > paddr)
  324.             continue;
  325.         if (paddr+PAGE_SIZE >= mm->base_addr+mm->addr_len)
  326.             continue;
  327.  
  328.         // Memory areas from bios may be overlapped, so we must check
  329.         // all areas, before we can consider that page is free.
  330.         page_is_available &= mm->type == MEMORY_TYPE_FREE;
  331.     }
  332.  
  333.     return page_is_available;
  334. }
  335.  
  336. int loader_map_section(uint64_t va, uintptr_t pa, uint64_t len, bool hard)
  337. {
  338.     uint64_t va_aligned = ROUND_DOWN(va, PAGE_SIZE);
  339.     uint64_t len_aligned = ROUND_UP(len, PAGE_SIZE);
  340.  
  341.     for (uint64_t i = 0; i < len_aligned; i += PAGE_SIZE) {
  342.         pte_t *pte = mmap_lookup(pml4, va_aligned + i, true);
  343.         struct page *page;
  344.  
  345.         if (pte == NULL)
  346.             return -1;
  347.         assert((*pte & PTE_P) == 0);
  348.  
  349.         *pte = PTE_ADDR(pa + i) | PTE_P | PTE_W;
  350.  
  351.         page = pa2page(PTE_ADDR(pa + i));
  352.         if (page->ref != 0)
  353.             // Page already has been removed from free list
  354.             continue;
  355.  
  356.         page_incref(page);
  357.         if (hard == true) {
  358.             // We must remove some pages from free list, to avoid
  359.             // overriding them later
  360.             LIST_REMOVE(page, link);
  361.             page->link.le_next = NULL;
  362.             page->link.le_prev = NULL;
  363.         }
  364.     }
  365.  
  366.     return 0;
  367. }
  368.  
  369. void loader_enter_long_mode(uint64_t kernel_entry_point)
  370. {
  371.     // Reload gdt
  372.     asm volatile("lgdt gdtr");
  373.  
  374.     // Enable PAE
  375.     asm volatile(
  376.         "movl %cr4, %eax\n\t"
  377.         "btsl $5, %eax\n\t"
  378.         "movl %eax, %cr4\n"
  379.     );
  380.  
  381.     // Setup CR3
  382.     asm volatile ("movl %%eax, %%cr3" :: "a" (PADDR(pml4)));
  383.  
  384.     // Enable long mode (set EFER.LME=1)
  385.     asm volatile (
  386.         "movl $0xc0000080, %ecx\n\t"    // EFER MSR number
  387.         "rdmsr\n\t"         // Read EFER
  388.         "btsl $8, %eax\n\t"     // Set LME=1
  389.         "wrmsr\n"           // Write EFER
  390.     );
  391.  
  392.     // Enable paging to activate long mode
  393.     asm volatile (
  394.         "movl %cr0, %eax\n\t"
  395.         "btsl $31, %eax\n\t"
  396.         "movl %eax, %cr0\n"
  397.     );
  398.  
  399.     extern void entry_long_mode_asm(uint64_t kernel_entry);
  400.     entry_long_mode_asm(kernel_entry_point);
  401. }
  402.  
  403. void loader_panic(const char *fmt, ...)
  404. {
  405.     va_list ap;
  406.  
  407.     va_start(ap, fmt);
  408.     terminal_vprintf(fmt, ap);
  409.     va_end(ap);
  410.  
  411.     while (1) {
  412.         /*do nothing*/;
  413.     }
  414. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement