-
Notifications
You must be signed in to change notification settings - Fork 0
/
memory_manager.c
1481 lines (1250 loc) · 52.5 KB
/
memory_manager.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* See COPYRIGHT for copyright information. */
/*
KEY WORDS
==========
MACROS: STATIC_KERNEL_PHYSICAL_ADDRESS, STATIC_KERNEL_VIRTUAL_ADDRESS, PDX, PTX, CONSTRUCT_ENTRY, EXTRACT_ADDRESS, ROUNDUP, ROUNDDOWN, LIST_INIT, LIST_INSERT_HEAD, LIST_FIRST, LIST_REMOVE
CONSTANTS: PAGE_SIZE, PERM_PRESENT, PERM_WRITEABLE, PERM_USER, KERNEL_STACK_TOP, KERNEL_STACK_SIZE, KERNEL_BASE, READ_ONLY_FRAMES_INFO, PHYS_IO_MEM, PHYS_EXTENDED_MEM, E_NO_MEM
VARIABLES: ptr_free_mem, ptr_page_directory, phys_page_directory, phys_stack_bottom, Frame_Info, frames_info, free_frame_list, references, prev_next_info, size_of_extended_mem, number_of_frames, ptr_frame_info ,create, perm, va
FUNCTIONS: to_physical_address, get_frame_info, tlb_invalidate
=====================================================================================================================================================================================================
*/
#include <kern/memory_manager.h>
#include <kern/file_manager.h>
#include <inc/x86.h>
#include <inc/mmu.h>
#include <inc/error.h>
#include <inc/string.h>
#include <inc/assert.h>
#include <kern/trap.h>
#include <kern/kclock.h>
#include <kern/user_environment.h>
#include <kern/sched.h>
#include <kern/kheap.h>
#include <kern/file_manager.h>
extern uint32 number_of_frames; // Amount of physical memory (in frames_info)
extern uint32 size_of_base_mem; // Amount of base memory (in bytes)
extern uint32 size_of_extended_mem; // Amount of extended memory (in bytes)
inline uint32 env_table_ws_get_size(struct Env *e);
inline void env_table_ws_invalidate(struct Env* e, uint32 virtual_address);
inline void env_table_ws_set_entry(struct Env* e, uint32 entry_index, uint32 virtual_address);
inline void env_table_ws_clear_entry(struct Env* e, uint32 entry_index);
inline uint32 env_table_ws_get_virtual_address(struct Env* e, uint32 entry_index);
inline uint32 env_table_ws_get_time_stamp(struct Env* e, uint32 entry_index);
inline uint32 env_table_ws_is_entry_empty(struct Env* e, uint32 entry_index);
void env_table_ws_print(struct Env *curenv);
inline uint32 pd_is_table_used(struct Env *e, uint32 virtual_address);
inline void pd_set_table_unused(struct Env *e, uint32 virtual_address);
inline void pd_clear_page_dir_entry(struct Env *e, uint32 virtual_address);
// These variables are set in initialize_kernel_VM()
uint32* ptr_page_directory; // Virtual address of boot time page directory
uint8* ptr_zero_page; // Virtual address of zero page used by program loader to initialize extra segment zero memory (bss section) it to zero
uint8* ptr_temp_page; // Virtual address of a page used by program loader to initialize segment last page fraction
uint32 phys_page_directory; // Physical address of boot time page directory
char* ptr_free_mem; // Pointer to next byte of free mem
struct Frame_Info* frames_info; // Virtual address of physical frames_info array
struct Frame_Info* disk_frames_info; // Virtual address of physical frames_info array
struct Linked_List free_frame_list; // Free list of physical frames_info
struct Linked_List modified_frame_list;
///**************************** MAPPING KERNEL SPACE *******************************
// Set up a two-level page table:
// ptr_page_directory is the virtual address of the page directory
// phys_page_directory is the physical adresss of the page directory
// Then turn on paging. Then effectively turn off segmentation.
// (i.e., the segment base addrs are set to zero).
//
// This function only sets up the kernel part of the address space
// (ie. addresses >= USER_TOP). The user part of the address space
// will be setup later.
//
// From USER_TOP to USER_LIMIT, the user is allowed to read but not write.
// Above USER_LIMIT the user cannot read (or write).
void initialize_kernel_VM()
{
// Remove this line when you're ready to test this function.
//panic("initialize_kernel_VM: This function is not finished\n");
//////////////////////////////////////////////////////////////////////
// create initial page directory.
ptr_page_directory = boot_allocate_space(PAGE_SIZE, PAGE_SIZE);
memset(ptr_page_directory, 0, PAGE_SIZE);
phys_page_directory = STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_page_directory);
//////////////////////////////////////////////////////////////////////
// Map the kernel stack with VA range :
// [KERNEL_STACK_TOP-KERNEL_STACK_SIZE, KERNEL_STACK_TOP),
// to physical address : "phys_stack_bottom".
// Permissions: kernel RW, user NONE
// Your code goes here:
boot_map_range(ptr_page_directory, KERNEL_STACK_TOP - KERNEL_STACK_SIZE, KERNEL_STACK_SIZE, STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_stack_bottom), PERM_WRITEABLE) ;
//////////////////////////////////////////////////////////////////////
// Map all of physical memory at KERNEL_BASE.
// i.e. the VA range [KERNEL_BASE, 2^32) should map to
// the PA range [0, 2^32 - KERNEL_BASE)
// We might not have 2^32 - KERNEL_BASE bytes of physical memory, but
// we just set up the mapping anyway.
// Permissions: kernel RW, user NONE
// Your code goes here:
//2016:
//boot tables
unsigned long long sva = KERNEL_BASE;
unsigned int nTables=0;
for (;sva < 0xFFFFFFFF; sva += PTSIZE)
{
++nTables;
boot_get_page_table(ptr_page_directory, (uint32)sva, 1);
}
//cprintf("nTables = %d\n", nTables);
//////////////////////////////////////////////////////////////////////
// Make 'frames_info' point to an array of size 'number_of_frames' of 'struct Frame_Info'.
// The kernel uses this structure to keep track of physical frames;
// 'number_of_frames' equals the number of physical frames in memory. User-level
// programs get read-only access to the array as well.
// You must allocate the array yourself.
// ************************************************************************************
// /*2016: READ_ONLY_FRAMES_INFO not valid any more since it can't fit in 4 MB space*/
// Map this array read-only by the user at virtual address READ_ONLY_FRAMES_INFO
// (ie. perm = PERM_USER | PERM_PRESENT)
// ************************************************************************************
// Permissions:
// - frames_info -- kernel RW, user NONE
// - the image mapped at READ_ONLY_FRAMES_INFO -- kernel R, user R
// Your code goes here:
//cprintf("size of WorkingSetPage = %d\n",sizeof(struct WorkingSetPage));
uint32 array_size;
array_size = number_of_frames * sizeof(struct Frame_Info) ;
frames_info = boot_allocate_space(array_size, PAGE_SIZE);
memset(frames_info, 0, array_size);
//2016: Not valid any more since the RAM size exceed the 64 MB limit. This lead to the
// size of "frames_info" can exceed the 4 MB space for "READ_ONLY_FRAMES_INFO"
//boot_map_range(ptr_page_directory, READ_ONLY_FRAMES_INFO, array_size, STATIC_KERNEL_PHYSICAL_ADDRESS(frames_info),PERM_USER) ;
uint32 disk_array_size = PAGES_PER_FILE * sizeof(struct Frame_Info);
disk_frames_info = boot_allocate_space(disk_array_size , PAGE_SIZE);
memset(disk_frames_info , 0, disk_array_size);
// This allows the kernel & user to access any page table entry using a
// specified VA for each: VPT for kernel and UVPT for User.
setup_listing_to_all_page_tables_entries();
//////////////////////////////////////////////////////////////////////
// Make 'envs' point to an array of size 'NENV' of 'struct Env'.
// Map this array read-only by the user at linear address UENVS
// (ie. perm = PTE_U | PTE_P).
// Permissions:
// - envs itself -- kernel RW, user NONE
// - the image of envs mapped at UENVS -- kernel R, user R
// LAB 3: Your code here.
cprintf("Max Envs = %d\n",NENV);
int envs_size = NENV * sizeof(struct Env) ;
//allocate space for "envs" array aligned on 4KB boundary
envs = boot_allocate_space(envs_size, PAGE_SIZE);
memset(envs , 0, envs_size);
//make the user to access this array by mapping it to UPAGES linear address (UPAGES is in User/Kernel space)
boot_map_range(ptr_page_directory, UENVS, envs_size, STATIC_KERNEL_PHYSICAL_ADDRESS(envs), PERM_USER) ;
//update permissions of the corresponding entry in page directory to make it USER with PERMISSION read only
ptr_page_directory[PDX(UENVS)] = ptr_page_directory[PDX(UENVS)]|(PERM_USER|(PERM_PRESENT & (~PERM_WRITEABLE)));
if(USE_KHEAP)
{
// MAKE SURE THAT THIS MAPPING HAPPENS AFTER ALL BOOT ALLOCATIONS (boot_allocate_space)
// calls are fininshed, and no remaining data to be allocated for the kernel
// map all used pages so far for the kernel
boot_map_range(ptr_page_directory, KERNEL_BASE, (uint32)ptr_free_mem - KERNEL_BASE, 0, PERM_WRITEABLE) ;
}
else
{
boot_map_range(ptr_page_directory, KERNEL_BASE, 0xFFFFFFFF - KERNEL_BASE, 0, PERM_WRITEABLE) ;
}
// Check that the initial page directory has been set up correctly.
check_boot_pgdir();
memory_scarce_threshold_percentage = DEFAULT_MEM_SCARCE_PERCENTAGE; // Memory remains plentiful till % of free frames gets below 25% of the memory space
/*
NOW: Turn off the segmentation by setting the segments' base to 0, and
turn on the paging by setting the corresponding flags in control register 0 (cr0)
*/
turn_on_paging() ;
}
//
// Allocate "size" bytes of physical memory aligned on an
// "align"-byte boundary. Align must be a power of two.
// Return the start kernel virtual address of the allocated space.
// Returned memory is uninitialized.
//
// If we're out of memory, boot_allocate_space should panic.
// It's too early to run out of memory.
// This function may ONLY be used during boot time,
// before the free_frame_list has been set up.
//
void* boot_allocate_space(uint32 size, uint32 align)
{
extern char end_of_kernel[];
// Initialize ptr_free_mem if this is the first time.
// 'end_of_kernel' is a symbol automatically generated by the linker,
// which points to the end of the kernel-
// i.e., the first virtual address that the linker
// did not assign to any kernel code or global variables.
if (ptr_free_mem == 0)
ptr_free_mem = end_of_kernel;
// Your code here:
// Step 1: round ptr_free_mem up to be aligned properly
ptr_free_mem = ROUNDUP(ptr_free_mem, align) ;
// Step 2: save current value of ptr_free_mem as allocated space
void *ptr_allocated_mem;
ptr_allocated_mem = ptr_free_mem ;
// Step 3: increase ptr_free_mem to record allocation
ptr_free_mem += size ;
//// 2016: Step 3.5: initialize allocated space by ZEROOOOOOOOOOOOOO
//memset(ptr_allocated_mem, 0, size);
// Step 4: return allocated space
return ptr_allocated_mem ;
}
//
// Map [virtual_address, virtual_address+size) of virtual address space to
// physical [physical_address, physical_address+size)
// in the page table rooted at ptr_page_directory.
// "size" is a multiple of PAGE_SIZE.
// Use permission bits perm|PERM_PRESENT for the entries.
//
// This function may ONLY be used during boot time,
// before the free_frame_list has been set up.
//
void boot_map_range(uint32 *ptr_page_directory, uint32 virtual_address, uint32 size, uint32 physical_address, int perm)
{
int i = 0 ;
//physical_address = ROUNDUP(physical_address, PAGE_SIZE) ;
///we assume here that all addresses are given divisible by 4 KB, look at boot_allocate_space ...
for (i = 0 ; i < size ; i += PAGE_SIZE)
{
uint32 *ptr_page_table = boot_get_page_table(ptr_page_directory, virtual_address, 1) ;
uint32 index_page_table = PTX(virtual_address);
//LOG_VARS("\nCONSTRUCT_ENTRY = %x",physical_address);
ptr_page_table[index_page_table] = CONSTRUCT_ENTRY(physical_address, perm | PERM_PRESENT) ;
physical_address += PAGE_SIZE ;
virtual_address += PAGE_SIZE ;
}
}
//
// Given ptr_page_directory, a pointer to a page directory,
// traverse the 2-level page table structure to find
// the page table for "virtual_address".
// Return a pointer to the table.
//
// If the relevant page table doesn't exist in the page directory:
// - If create == 0, return 0.
// - Otherwise allocate a new page table, install it into ptr_page_directory,
// and return a pointer into it.
// (Questions: What data should the new page table contain?
// And what permissions should the new ptr_page_directory entry have?)
//
// This function allocates new page tables as needed.
//
// boot_get_page_table cannot fail. It's too early to fail.
// This function may ONLY be used during boot time,
// before the free_frame_list has been set up.
//
uint32* boot_get_page_table(uint32 *ptr_page_directory, uint32 virtual_address, int create)
{
uint32 index_page_directory = PDX(virtual_address);
uint32 page_directory_entry = ptr_page_directory[index_page_directory];
//cprintf("boot d ind = %d, entry = %x\n",index_page_directory, page_directory_entry);
uint32 phys_page_table = EXTRACT_ADDRESS(page_directory_entry);
uint32 *ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(phys_page_table);
if (phys_page_table == 0)
{
if (create)
{
ptr_page_table = boot_allocate_space(PAGE_SIZE, PAGE_SIZE) ;
phys_page_table = STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_page_table);
ptr_page_directory[index_page_directory] = CONSTRUCT_ENTRY(phys_page_table, PERM_PRESENT | PERM_WRITEABLE);
return ptr_page_table ;
}
else
return 0 ;
}
return ptr_page_table ;
}
///******************************* END of MAPPING KERNEL SPACE *******************************
///******************************* MAPPING USER SPACE *******************************
// --------------------------------------------------------------
// Tracking of physical frames.
// The 'frames_info' array has one 'struct Frame_Info' entry per physical frame.
// frames_info are reference counted, and free frames are kept on a linked list.
// --------------------------------------------------------------
// Initialize paging structure and free_frame_list.
// After this point, ONLY use the functions below
// to allocate and deallocate physical memory via the free_frame_list,
// and NEVER use boot_allocate_space() or the related boot-time functions above.
//
extern void initialize_disk_page_file();
void initialize_paging()
{
// The example code here marks all frames_info as free.
// However this is not truly the case. What memory is free?
// 1) Mark frame 0 as in use.
// This way we preserve the real-mode IDT and BIOS structures
// in case we ever need them. (Currently we don't, but...)
// 2) Mark the rest of base memory as free.
// 3) Then comes the IO hole [PHYS_IO_MEM, PHYS_EXTENDED_MEM).
// Mark it as in use so that it can never be allocated.
// 4) Then extended memory [PHYS_EXTENDED_MEM, ...).
// Some of it is in use, some is free. Where is the kernel?
// Which frames are used for page tables and other data structures?
//
// Change the code to reflect this.
int i;
LIST_INIT(&free_frame_list);
LIST_INIT(&modified_frame_list);
frames_info[0].references = 1;
frames_info[1].references = 1;
frames_info[2].references = 1;
ptr_zero_page = (uint8*) KERNEL_BASE+PAGE_SIZE;
ptr_temp_page = (uint8*) KERNEL_BASE+2*PAGE_SIZE;
i =0;
for(;i<1024; i++)
{
ptr_zero_page[i]=0;
ptr_temp_page[i]=0;
}
int range_end = ROUNDUP(PHYS_IO_MEM,PAGE_SIZE);
for (i = 3; i < range_end/PAGE_SIZE; i++)
{
initialize_frame_info(&(frames_info[i]));
//frames_info[i].references = 0;
LIST_INSERT_HEAD(&free_frame_list, &frames_info[i]);
}
for (i = PHYS_IO_MEM/PAGE_SIZE ; i < PHYS_EXTENDED_MEM/PAGE_SIZE; i++)
{
frames_info[i].references = 1;
}
range_end = ROUNDUP(STATIC_KERNEL_PHYSICAL_ADDRESS(ptr_free_mem), PAGE_SIZE);
for (i = PHYS_EXTENDED_MEM/PAGE_SIZE ; i < range_end/PAGE_SIZE; i++)
{
frames_info[i].references = 1;
}
for (i = range_end/PAGE_SIZE ; i < number_of_frames; i++)
{
initialize_frame_info(&(frames_info[i]));
//frames_info[i].references = 0;
LIST_INSERT_HEAD(&free_frame_list, &frames_info[i]);
}
initialize_disk_page_file();
}
//
// Initialize a Frame_Info structure.
// The result has null links and 0 references.
// Note that the corresponding physical frame is NOT initialized!
//
void initialize_frame_info(struct Frame_Info *ptr_frame_info)
{
memset(ptr_frame_info, 0, sizeof(*ptr_frame_info));
}
//
// Allocates a physical frame.
// Does NOT set the contents of the physical frame to zero -
// the caller must do that if necessary.
//
// *ptr_frame_info -- is set to point to the Frame_Info struct of the
// newly allocated frame
//
// RETURNS
// 0 -- on success
// If failed, it panic.
//
// Hint: use LIST_FIRST, LIST_REMOVE, and initialize_frame_info
// Hint: references should not be incremented
extern void env_free(struct Env *e);
int allocate_frame(struct Frame_Info **ptr_frame_info)
{
*ptr_frame_info = LIST_FIRST(&free_frame_list);
int c = 0;
if (*ptr_frame_info == NULL)
{
panic("ERROR: Kernel run out of memory... allocate_frame cannot find a free frame.\n");
}
LIST_REMOVE(&free_frame_list,*ptr_frame_info);
/******************* PAGE BUFFERING CODE *******************
***********************************************************/
if((*ptr_frame_info)->isBuffered)
{
pt_clear_page_table_entry((*ptr_frame_info)->environment,(*ptr_frame_info)->va);
//pt_set_page_permissions((*ptr_frame_info)->environment->env_pgdir, (*ptr_frame_info)->va, 0, PERM_BUFFERED);
}
/**********************************************************
***********************************************************/
initialize_frame_info(*ptr_frame_info);
return 0;
}
//
// Return a frame to the free_frame_list.
// (This function should only be called when ptr_frame_info->references reaches 0.)
//
void free_frame(struct Frame_Info *ptr_frame_info)
{
/*2012: clear it to ensure that its members (env, isBuffered, ...) become NULL*/
initialize_frame_info(ptr_frame_info);
/*=============================================================================*/
// Fill this function in
LIST_INSERT_HEAD(&free_frame_list, ptr_frame_info);
//LOG_STATMENT(cprintf("FN # %d FREED",to_frame_number(ptr_frame_info)));
}
//
// Decrement the reference count on a frame
// freeing it if there are no more references.
//
void decrement_references(struct Frame_Info* ptr_frame_info)
{
if (--(ptr_frame_info->references) == 0)
free_frame(ptr_frame_info);
}
//
// Stores address of page table entry in *ptr_page_table .
// Stores 0 if there is no such entry or on error.
//
// IT RETURNS:
// TABLE_IN_MEMORY : if page table exists in main memory
// TABLE_NOT_EXIST : if page table doesn't exist,
//
int get_page_table(uint32 *ptr_page_directory, const void *virtual_address, uint32 **ptr_page_table)
{
// cprintf("gpt .05\n");
uint32 page_directory_entry = ptr_page_directory[PDX(virtual_address)];
// cprintf("gpt .07, page_directory_entry= %x \n",page_directory_entry);
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
{
*ptr_page_table = (void *)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
}
else
{
*ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
}
if ( (page_directory_entry & PERM_PRESENT) == PERM_PRESENT)
{
return TABLE_IN_MEMORY;
}
else if (page_directory_entry != 0) //the table exists but not in main mem, so it must be in sec mem
{
// Put the faulted address in CR2 and then
// Call the fault_handler() to load the table in memory for us ...
// cprintf("gpt .1\n, %x page_directory_entry\n", page_directory_entry);
lcr2((uint32)virtual_address) ;
// cprintf("gpt .12\n");
fault_handler(NULL);
// cprintf("gpt .15\n");
// now the page_fault_handler() should have returned successfully and updated the
// directory with the new table frame number in memory
page_directory_entry = ptr_page_directory[PDX(virtual_address)];
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
{
*ptr_page_table = (void *)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
}
else
{
*ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
}
return TABLE_IN_MEMORY;
}
else // there is no table for this va anywhere. This is a new table required, so check if the user want creation
{
// cprintf("gpt .2\n");
*ptr_page_table = 0;
return TABLE_NOT_EXIST;
}
}
void * create_page_table(uint32 *ptr_page_directory, const uint32 virtual_address)
{
//TODO: [PROJECT 2019 - MS1 - [2] Kernel Dynamic Allocation] create_page_table()
//Use kmalloc() to create a new page TABLE for the given virtual address,
//link it to the given directory and return the address of the created table
//REMEMBER TO:
// a. clear all entries (as it may contain garbage data)
// b. clear the TLB cache (using "tlbflush()")
uint32 page_table_va = (uint32)kmalloc(PAGE_SIZE);
uint32 *meh = (uint32 *) page_table_va;
uint32 PA = kheap_physical_address(page_table_va);
if(meh == NULL)
return NULL;
for(int i = 0; i < 1024; i++){
meh[i] = 0;
}
ptr_page_directory[PDX(virtual_address)] |= (PA/PAGE_SIZE)<<12;
ptr_page_directory[PDX(virtual_address)] |= (PERM_USER|PERM_PRESENT|PERM_WRITEABLE);
tlbflush();
return (void*)page_table_va;
}
void __static_cpt(uint32 *ptr_page_directory, const uint32 virtual_address, uint32 **ptr_page_table)
{
panic("this function is not required...!!");
}
//
// Map the physical frame 'ptr_frame_info' at 'virtual_address'.
// The permissions (the low 12 bits) of the page table
// entry should be set to 'perm|PERM_PRESENT'.
//
// Details
// - If there is already a frame mapped at 'virtual_address', it should be unmaped
// using unmap_frame().
// - If necessary, on demand, allocates a page table and inserts it into 'ptr_page_directory'.
// - ptr_frame_info->references should be incremented if the insertion succeeds
//
// RETURNS:
// 0 on success
//
// Hint: implement using get_page_table() and unmap_frame().
//
int map_frame(uint32 *ptr_page_directory, struct Frame_Info *ptr_frame_info, void *virtual_address, int perm)
{
// Fill this function in
uint32 physical_address = to_physical_address(ptr_frame_info);
uint32 *ptr_page_table;
if( get_page_table(ptr_page_directory, virtual_address, &ptr_page_table) == TABLE_NOT_EXIST)
{
/*==========================================================================================
// OLD WRONG SOLUTION
//=====================
//// initiate a read instruction for an address inside the wanted table.
//// this will generate a page fault, that will cause page_fault_handler() to
//// create the table in memory for us ...
//char dummy_char = *((char*)virtual_address) ;
//// a page fault is created now and page_fault_handler() should start handling the fault ...
//// now the page_fault_handler() should have returned successfully and updated the
//// directory with the new table frame number in memory
//uint32 page_directory_entry;
//page_directory_entry = ptr_page_directory[PDX(virtual_address)];
//ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
=============================================================================================*/
if(USE_KHEAP)
{
ptr_page_table = create_page_table(ptr_page_directory, (uint32)virtual_address);
}
else
{
__static_cpt(ptr_page_directory, (uint32)virtual_address, &ptr_page_table);
}
}
uint32 page_table_entry = ptr_page_table[PTX(virtual_address)];
/*OLD WRONG SOLUTION
if( EXTRACT_ADDRESS(page_table_entry) != physical_address)
{
if( page_table_entry != 0)
{
unmap_frame(ptr_page_directory , virtual_address);
}
ptr_frame_info->references++;
ptr_page_table[PTX(virtual_address)] = CONSTRUCT_ENTRY(physical_address , perm | PERM_PRESENT);
}*/
/*NEW'15 CORRECT SOLUTION*/
//If already mapped
if ((page_table_entry & PERM_PRESENT) == PERM_PRESENT)
{
//on this pa, then do nothing
if (EXTRACT_ADDRESS(page_table_entry) == physical_address)
return 0;
//on another pa, then unmap it
else
unmap_frame(ptr_page_directory , virtual_address);
}
ptr_frame_info->references++;
ptr_page_table[PTX(virtual_address)] = CONSTRUCT_ENTRY(physical_address , perm | PERM_PRESENT);
//ptr_frame_info->va = (uint32)virtual_address;
return 0;
}
//
// Return the frame mapped at 'virtual_address'.
// If the page table entry corresponding to 'virtual_address' exists, then we store a pointer to the table in 'ptr_page_table'
// This is used by 'unmap_frame()'
// but should not be used by other callers.
//
// Return 0 if there is no frame mapped at virtual_address.
//
// Hint: implement using get_page_table() and get_frame_info().
//
struct Frame_Info * get_frame_info(uint32 *ptr_page_directory, void *virtual_address, uint32 **ptr_page_table)
{
// Fill this function in
//cprintf(".gfi .1\n %x, %x, %x, \n", ptr_page_directory, virtual_address, ptr_page_table);
uint32 ret = get_page_table(ptr_page_directory, virtual_address, ptr_page_table) ;
//cprintf(".gfi .15\n");
if((*ptr_page_table) != 0)
{
uint32 index_page_table = PTX(virtual_address);
//cprintf(".gfi .2\n");
uint32 page_table_entry = (*ptr_page_table)[index_page_table];
if( page_table_entry != 0)
{
//cprintf(".gfi .3\n");
return to_frame_info( EXTRACT_ADDRESS ( page_table_entry ) );
}
return 0;
}
return 0;
}
//
// Unmaps the physical frame at 'virtual_address'.
//
// Details:
// - The references count on the physical frame should decrement.
// - The physical frame should be freed if the 'references' reaches 0.
// - The page table entry corresponding to 'virtual_address' should be set to 0.
// (if such a page table exists)
// - The TLB must be invalidated if you remove an entry from
// the page directory/page table.
//
// Hint: implement using get_frame_info(),
// tlb_invalidate(), and decrement_references().
//
void unmap_frame(uint32 *ptr_page_directory, void *virtual_address)
{
// Fill this function in
uint32 *ptr_page_table;
struct Frame_Info* ptr_frame_info = get_frame_info(ptr_page_directory, virtual_address, &ptr_page_table);
if( ptr_frame_info != 0 )
{
if (ptr_frame_info->isBuffered && !CHECK_IF_KERNEL_ADDRESS((uint32)virtual_address))
cprintf("Freeing BUFFERED frame at va %x!!!\n", virtual_address) ;
decrement_references(ptr_frame_info);
ptr_page_table[PTX(virtual_address)] = 0;
tlb_invalidate(ptr_page_directory, virtual_address);
}
}
/*/this function should be called only in the env_create() for creating the page table if not exist
* (without causing page fault as the normal map_frame())*/
// Map the physical frame 'ptr_frame_info' at 'virtual_address'.
// The permissions (the low 12 bits) of the page table
// entry should be set to 'perm|PERM_PRESENT'.
//
// Details
// - If there is already a frame mapped at 'virtual_address', it should be unmaped
// using unmap_frame().
// - If necessary, on demand, allocates a page table and inserts it into 'ptr_page_directory'.
// - ptr_frame_info->references should be incremented if the insertion succeeds
//
// RETURNS:
// 0 on success
//
//
int loadtime_map_frame(uint32 *ptr_page_directory, struct Frame_Info *ptr_frame_info, void *virtual_address, int perm)
{
uint32 physical_address = to_physical_address(ptr_frame_info);
uint32 *ptr_page_table;
uint32 page_directory_entry = ptr_page_directory[PDX(virtual_address)];
if(USE_KHEAP && !CHECK_IF_KERNEL_ADDRESS(virtual_address))
{
ptr_page_table = (uint32*)kheap_virtual_address(EXTRACT_ADDRESS(page_directory_entry)) ;
}
else
{
ptr_page_table = STATIC_KERNEL_VIRTUAL_ADDRESS(EXTRACT_ADDRESS(page_directory_entry)) ;
}
//if page table not exist, create it in memory and link it with the directory
if (page_directory_entry == 0)
{
if(USE_KHEAP)
{
ptr_page_table = create_page_table(ptr_page_directory, (uint32)virtual_address);
}
else
{
__static_cpt(ptr_page_directory, (uint32)virtual_address, &ptr_page_table);
}
}
ptr_frame_info->references++;
ptr_page_table[PTX(virtual_address)] = CONSTRUCT_ENTRY(physical_address , perm | PERM_PRESENT);
return 0;
}
///****************************************************************************************///
///******************************* END OF MAPPING USER SPACE ******************************///
///****************************************************************************************///
//======================================================
/// functions used for malloc() and freeHeap()
//======================================================
// [1] allocateMem
void allocateMem(struct Env* e, uint32 virtual_address, uint32 size)
{
//TODO: [PROJECT 2019 - MS2 - [5] User Heap] allocateMem() [Kernel Side]
//This function should allocate ALL pages of the required range in the PAGE FILE
//and allocate NOTHING in the main memory
uint32 required_num_pages = size/PAGE_SIZE + (size % PAGE_SIZE != 0);
virtual_address = ROUNDDOWN(virtual_address,PAGE_SIZE);
for(int i = 0, va = virtual_address; i < required_num_pages; i++, va += PAGE_SIZE)
{
pf_add_empty_env_page(e, va, 0);
}
}
// [2] freeMem
void freeMem(struct Env* e, uint32 virtual_address, uint32 size)
{
panic("This function is not required");
}
void __freeMem_with_buffering(struct Env* e, uint32 virtual_address, uint32 size)
{
//TODO: [PROJECT 2019 - MS2 - [5] User Heap] freeMem() [Kernel Side]
//This function should:
uint32 *ptr_table;
virtual_address = ROUNDDOWN(virtual_address,PAGE_SIZE);
for(int i = 0, va = virtual_address; i < size; i++, va += PAGE_SIZE){
struct Frame_Info *ptr_frame_info = get_frame_info(e->env_page_directory, (void *)va, &ptr_table);
int permissions = pt_get_page_permissions(e, va);
//3. Free any BUFFERED pages in the given range
if((permissions&PERM_BUFFERED) == PERM_BUFFERED){
if((permissions&PERM_MODIFIED) == PERM_MODIFIED)
bufferlist_remove_page(&modified_frame_list, ptr_frame_info);
else
bufferlist_remove_page(&free_frame_list, ptr_frame_info);
ptr_frame_info->isBuffered = 0;
ptr_frame_info->environment = NULL;
free_frame(ptr_frame_info);
pt_clear_page_table_entry(e, va);
}
//2. Free ONLY pages that are resident in the working set from the memory
get_page_table(e->env_page_directory,(void*)va, &ptr_table);
if((permissions&PERM_PRESENT) == PERM_PRESENT && ptr_table != NULL){
unmap_frame(e->env_page_directory, (void*)va);
env_page_ws_invalidate(e, va);
}
//4. Removes ONLY the empty page tables (i.e. not used) (no pages are mapped in the table)
bool ok = 1;
if(ptr_table != NULL){
for(int j = 0; j < 1024; j++)
if(ptr_table[j])
ok = 0;
if(ok){
uint32 physical=e->env_page_directory[PDX(va)];
to_frame_info(physical)->references = 0;
free_frame(to_frame_info(physical));
pd_clear_page_dir_entry(e, va);
}
}
//1. Free ALL pages of the given range from the Page File
pf_remove_env_page(e, va);
}
}
//================= [BONUS] =====================
// [3] moveMem
void moveMem(struct Env* e, uint32 src_virtual_address, uint32 dst_virtual_address, uint32 size)
{
//TODO: [PROJECT 2019 - BONUS3] User Heap Realloc [Kernel Side]
//your code is here, remove the panic and write your code
panic("moveMem() is not implemented yet...!!");
// This function should move all pages from "src_virtual_address" to "dst_virtual_address"
// with the given size
// After finished, the src_virtual_address must no longer be accessed/exist in either page file
// or main memory
}
//==================================================================================================
//==================================================================================================
//==================================================================================================
// calculate_required_frames:
// calculates the new allocatino size required for given address+size,
// we are not interested in knowing if pages or tables actually exist in memory or the page file,
// we are interested in knowing whether they are allocated or not.
uint32 calculate_required_frames(uint32* ptr_page_directory, uint32 start_virtual_address, uint32 size)
{
LOG_STATMENT(cprintf("calculate_required_frames: Starting at address %x",start_virtual_address));
//calculate the required page tables
uint32 number_of_tables = 0;
long i = 0;
uint32 current_virtual_address = ROUNDDOWN(start_virtual_address, PAGE_SIZE*1024);
for(; current_virtual_address < (start_virtual_address+size); current_virtual_address+= PAGE_SIZE*1024)
{
uint32 *ptr_page_table;
get_page_table(ptr_page_directory, (void*) current_virtual_address, &ptr_page_table);
if(ptr_page_table == 0)
{
(number_of_tables)++;
}
}
//calc the required page frames
uint32 number_of_pages = 0;
current_virtual_address = ROUNDDOWN(start_virtual_address, PAGE_SIZE);
for(; current_virtual_address < (start_virtual_address+size); current_virtual_address+= PAGE_SIZE)
{
uint32 *ptr_page_table;
if (get_frame_info(ptr_page_directory, (void*) current_virtual_address, &ptr_page_table) == 0)
{
(number_of_pages)++;
}
}
//return total number of frames
LOG_STATMENT(cprintf("calculate_required_frames: Done!"));
return number_of_tables+number_of_pages;
}
// calculate_available_frames:
struct freeFramesCounters calculate_available_frames()
{
//DETECTING LOOP inside the list
//================================
/* struct Frame_Info * slowPtr = LIST_FIRST(&free_frame_list);
struct Frame_Info * fastPtr = LIST_FIRST(&free_frame_list);
while (slowPtr && fastPtr) {
fastPtr = LIST_NEXT(fastPtr); // advance the fast pointer
if (fastPtr == slowPtr) // and check if its equal to the slow pointer
{
cprintf("loop detected in freelist\n");
break;
}
if (fastPtr == NULL) {
break; // since fastPtr is NULL we reached the tail
}
fastPtr = LIST_NEXT(fastPtr); //advance and check again
if (fastPtr == slowPtr) {
cprintf("loop detected in freelist\n");
break;
}
slowPtr = LIST_NEXT(slowPtr); // advance the slow pointer only once
}
cprintf("finished loop detction\n");
*/
//calculate the free frames from the free frame list
struct Frame_Info *ptr;
uint32 totalFreeUnBuffered = 0 ;
uint32 totalFreeBuffered = 0 ;
uint32 totalModified = 0 ;
LIST_FOREACH(ptr, &free_frame_list)
{
if (ptr->isBuffered)
totalFreeBuffered++ ;
else
totalFreeUnBuffered++ ;
}
LIST_FOREACH(ptr, &modified_frame_list)
{
totalModified++ ;
}
struct freeFramesCounters counters ;
counters.freeBuffered = totalFreeBuffered ;
counters.freeNotBuffered = totalFreeUnBuffered ;
counters.modified = totalModified;
return counters;
}
//2018
// calculate_free_frames:
uint32 calculate_free_frames()
{
return LIST_SIZE(&free_frame_list);
}
///============================================================================================
/// Dealing with environment working set
inline uint32 env_page_ws_get_size(struct Env *e)
{
int i=0, counter=0;
for(;i<e->page_WS_max_size; i++) if(e->ptr_pageWorkingSet[i].empty == 0) counter++;
return counter;
}
inline void env_page_ws_invalidate(struct Env* e, uint32 virtual_address)
{
int i=0;
for(;i<e->page_WS_max_size; i++)
{
if(ROUNDDOWN(e->ptr_pageWorkingSet[i].virtual_address,PAGE_SIZE) == ROUNDDOWN(virtual_address,PAGE_SIZE))
{
env_page_ws_clear_entry(e, i);
break;
}
}
}
inline void env_page_ws_set_entry(struct Env* e, uint32 entry_index, uint32 virtual_address)
{
assert(entry_index >= 0 && entry_index < e->page_WS_max_size);
assert(virtual_address >= 0 && virtual_address < USER_TOP);