В предыдущей статье мы рассмотрели основы работы в защищенном режиме IA-32. Сегодня пришла пора научиться работать с виртуальным адресным пространством.
struct page_directory_entry_t {
u8 present : 1;
u8 read_write : 1;
u8 user_supervisor : 1;
u8 write_through : 1;
u8 cache_disabled : 1;
u8 accessed : 1;
u8 zero : 1;
u8 page_size : 1;
u8 ignored : 1;
u8 available : 3;
u32 page_table_addr : 20;
} attribute(packed);
struct page_table_entry_t {
u8 present : 1;
u8 read_write : 1;
u8 user_supervisor : 1;
u8 write_through : 1;
u8 cache_disabled : 1;
u8 accessed : 1;
u8 dirty : 1;
u8 zero : 1;
u8 global : 1;
u8 available : 3;
u32 page_phys_addr : 20;
} attribute(packed);
static struct page_directory_entry_t kpage_directory attribute(aligned(4096));
static struct page_table_entry_t kpage_table[MMU_PAGE_TABLE_ENTRIES_COUNT] attribute(aligned(4096));
/*
* Api - init kernel page directory
* Here assumed each entry addresses 4Kb
*/
extern void mmu_init()
{
memset(&kpage_directory, 0, sizeof(struct page_directory_entry_t));
/* set kernel page directory */
kpage_directory.zero = 1;
kpage_directory.accessed = 0;
kpage_directory.available = 0;
kpage_directory.cache_disabled = 0;
kpage_directory.ignored = 0;
kpage_directory.page_size = 0; /* 4KB */
kpage_directory.present = 1; /* kernel pages always in memory */
kpage_directory.read_write = 1; /* read & write */
kpage_directory.user_supervisor = 1; /* kernel mode pages */
kpage_directory.write_through = 1;
kpage_directory.page_table_addr = (size_t)kpage_table >> 12;
/* set kernel table */
for (int i = 0; i < MMU_PAGE_TABLE_ENTRIES_COUNT; ++i) {
kpage_table[i].zero = 0;
kpage_table[i].accessed = 0;
kpage_table[i].available = 0;
kpage_table[i].cache_disabled = 0;
kpage_table[i].dirty = 0;
kpage_table[i].global = 1;
kpage_table[i].present = 1; /* kernel pages always in memory */
kpage_table[i].read_write = 1; /* read & write */
kpage_table[i].user_supervisor = 1; /* kernel mode pages */
kpage_table[i].write_through = 1;
kpage_table[i].page_phys_addr = (i * 4096) >> 12; /* assume 4Kb pages */
}
}
/*
* Api - Create user page directory
*/
extern struct page_directory_entry_t* mmu_create_user_page_directory(struct page_table_entry_t* page_table)
{
struct page_directory_entry_t* upage_dir;
upage_dir = malloc_a(sizeof(struct page_directory_entry_t), 4096);
upage_dir->zero = 1;
upage_dir->accessed = 0;
upage_dir->available = 0;
upage_dir->cache_disabled = 0;
upage_dir->ignored = 0;
upage_dir->page_size = 0; /* 4KB */
upage_dir->present = 1;
upage_dir->read_write = 1; /* read & write */
upage_dir->user_supervisor = 0; /* user mode pages */
upage_dir->write_through = 1;
upage_dir->page_table_addr = (size_t)page_table >> 12; /* assume 4Kb pages */
return upage_dir;
}
/*
* Api - Create user page table
*/
extern struct page_table_entry_t* mmu_create_user_page_table()
{
struct page_table_entry_t* upage_table;
upage_table = malloc_a(sizeof(struct page_table_entry_t) * MMU_PAGE_TABLE_ENTRIES_COUNT, 4096);
/* share kernel pages */
memcpy(upage_table, kpage_table, sizeof(struct page_table_entry_t) * MMU_KERNEL_PAGES_COUNT);
/* fill user pages */
for (int i = MMU_KERNEL_PAGES_COUNT; i < MMU_PAGE_TABLE_ENTRIES_COUNT; ++i) {
struct page_table_entry_t* current;
current = upage_table + i;
current->zero = 0;
current->accessed = 0;
current->available = 0;
current->cache_disabled = 0;
current->dirty = 0;
current->global = 1;
current->present = 0; /* not present as so as there is no user pages yet */
current->read_write = 1; /* read & write */
current->user_supervisor = 0; /* user mode page */
current->write_through = 1;
current->page_phys_addr = 0; /* page is not present */
}
return upage_table;
}
/*
* Api - Occupy user page
*/
extern bool mmu_occupy_user_page(struct page_table_entry_t* upage_table, void* phys_addr)
{
for (int i = MMU_KERNEL_PAGES_COUNT; i < MMU_PAGE_TABLE_ENTRIES_COUNT; ++i) {
struct page_table_entry_t* current;
current = upage_table + i;
if (current->present) {
/* page is buzy */
continue;
}
current->zero = 0;
current->accessed = 0;
current->available = 0;
current->cache_disabled = 0;
current->dirty = 0;
current->global = 1;
current->present = 1;
current->read_write = 1; /* read & write */
current->user_supervisor = 0; /* user mode page */
current->write_through = 1;
current->page_phys_addr = (size_t)phys_addr >> 12; /* assume 4Kb pages */
return true;
}
return false;
}
/*
* Enable paging
* void asm_enable_paging(void *page_directory)
*/
asm_enable_paging:
mov 4(%esp),%eax # page_directory
mov %eax,%cr3
mov %cr0,%eax
or $0x80000001,%eax # set PE & PG bits
mov %eax,%cr0
ret
/*
* Disable paging
* void asm_disable_paging()
*/
asm_disable_paging:
mov %eax,%cr3
mov %cr0,%eax
xor $0x80000000,%eax # unset PG bit
mov %eax,%cr0
ret
static u32 bitmap[MM_BITMAP_SIZE];
/*
* Api - allocate pages
*/
extern void* mm_phys_alloc_pages(u_int count)
{
/* find free pages */
for (int i = 0; i < MM_DYNAMIC_PAGES_COUNT; ++i) {
bool is_found = true;
for (int j = 0; j < count; ++j) {
is_found = is_found && !mm_get_bit(i + j);
}
if (is_found) {
/* occupy */
for (int j = 0; j < count; ++j) {
assert(!mm_get_bit(i + j));
mm_set_bit(i + j);
}
return (void *)mm_get_addr(i);
}
}
return null;
}
/*
* Api - free page
*/
extern bool mm_phys_free_pages(void* ptr, u_int count)
{
size_t address = (size_t)ptr;
assert(address >= MM_AREA_START);
assert(address % MM_PAGE_SIZE == 0);
/* find page */
for (int i = 0; i < MM_DYNAMIC_PAGES_COUNT; ++i) {
size_t addr = mm_get_addr(i);
if (addr == address) {
/* free pages */
for (int j = 0; j < count; ++j) {
assert(mm_get_bit(i + j));
mm_clear_bit(i + j);
}
return true;
}
}
return false;
}
К сожалению, не доступен сервер mySQL