summaryrefslogtreecommitdiff
path: root/sys/src/9/pc64/mmu.c
diff options
context:
space:
mode:
authorcinap_lenrek <cinap_lenrek@felloff.net>2019-06-28 18:12:13 +0200
committercinap_lenrek <cinap_lenrek@felloff.net>2019-06-28 18:12:13 +0200
commitb2c7a8d84a0075fea104009115d0c34bffc39e06 (patch)
tree915ff021b261043e3b04b695418f1fe86d4ea52f /sys/src/9/pc64/mmu.c
parent6118d778581b44e5c41a04a60d4aa8e6cad4a37c (diff)
pc64: preallocate mmupool page tables
preallocate 2% of user pages for page tables and MMU structures and keep them mapped in the VMAP range. this leaves more space in the KZERO window and avoids running out of kernel memory on machines with large amounts of memory.
Diffstat (limited to 'sys/src/9/pc64/mmu.c')
-rw-r--r--sys/src/9/pc64/mmu.c65
1 files changed, 64 insertions, 1 deletions
diff --git a/sys/src/9/pc64/mmu.c b/sys/src/9/pc64/mmu.c
index 9261354e8..dfb165ef5 100644
--- a/sys/src/9/pc64/mmu.c
+++ b/sys/src/9/pc64/mmu.c
@@ -271,7 +271,11 @@ mmuwalk(uintptr* table, uintptr va, int level, int create)
if(pte & PTEVALID){
if(pte & PTESIZE)
return 0;
- table = KADDR(PPN(pte));
+ pte = PPN(pte);
+ if(pte >= (uintptr)-KZERO)
+ table = (void*)(pte + VMAP);
+ else
+ table = (void*)(pte + KZERO);
} else {
if(!create)
return 0;
@@ -570,3 +574,62 @@ patwc(void *a, int n)
*pte = (*pte & ~mask) | (attr & mask);
}
}
+
+/*
+ * The palloc.pages array and mmupool can be a large chunk
+ * out of the 2GB window above KZERO, so we allocate from
+ * upages and map in the VMAP window before pageinit()
+ */
+void
+preallocpages(void)
+{
+ Pallocmem *pm;
+ uintptr va, base, top;
+ vlong tsize, psize;
+ ulong np, nt;
+ int i;
+
+ np = 0;
+ for(i=0; i<nelem(palloc.mem); i++){
+ pm = &palloc.mem[i];
+ np += pm->npage;
+ }
+ nt = np / 50; /* 2% for mmupool */
+ np -= nt;
+
+ nt = (uvlong)nt*BY2PG / (sizeof(MMU)+PTSZ);
+ tsize = (uvlong)nt * (sizeof(MMU)+PTSZ);
+
+ psize = (uvlong)np * BY2PG;
+ psize += sizeof(Page) + BY2PG;
+ psize = (psize / (sizeof(Page)+BY2PG)) * sizeof(Page);
+
+ psize += tsize;
+ psize = ROUND(psize, PGLSZ(1));
+
+ for(i=0; i<nelem(palloc.mem); i++){
+ pm = &palloc.mem[i];
+ base = ROUND(pm->base, PGLSZ(1));
+ top = pm->base + (uvlong)pm->npage * BY2PG;
+ if((base + psize) <= VMAPSIZE && (vlong)(top - base) >= psize){
+ pm->base = base + psize;
+ pm->npage = (top - pm->base)/BY2PG;
+
+ va = base + VMAP;
+ pmap(m->pml4, base | PTEGLOBAL|PTEWRITE|PTEVALID, va, psize);
+
+ palloc.pages = (void*)(va + tsize);
+
+ mmupool.nfree = mmupool.nalloc = nt;
+ mmupool.free = (void*)(va + (uvlong)nt*PTSZ);
+ for(i=0; i<nt; i++){
+ mmupool.free[i].page = (uintptr*)va;
+ mmupool.free[i].next = &mmupool.free[i+1];
+ va += PTSZ;
+ }
+ mmupool.free[i-1].next = nil;
+
+ break;
+ }
+ }
+}