summaryrefslogtreecommitdiff
path: root/sys/src/9/pc64/mmu.c
diff options
context:
space:
mode:
authorcinap_lenrek <cinap_lenrek@felloff.net>2018-01-29 08:26:42 +0100
committercinap_lenrek <cinap_lenrek@felloff.net>2018-01-29 08:26:42 +0100
commit83d8a24215ddf66ee64fc4704151571b2e952685 (patch)
tree920f49c8c22603666b047c259932aced3224de66 /sys/src/9/pc64/mmu.c
parentb5362dc72220a4ac80678cc00e4289befae337e3 (diff)
pc64: fix kmap() and invlpg()
flushing tlb once the index wraps arround is not enougth as in use pte's can be speculatively loaded. so instead use invlpg() and explicitely invalidate the tlb of the page mapped. this fixes wired mount cache corruption for reads approaching 2MB which is the size of the KMAP window. invlpg() was broken, using wrong operand type.
Diffstat (limited to 'sys/src/9/pc64/mmu.c')
-rw-r--r--sys/src/9/pc64/mmu.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/sys/src/9/pc64/mmu.c b/sys/src/9/pc64/mmu.c
index 6f27ac25e..02f96d55a 100644
--- a/sys/src/9/pc64/mmu.c
+++ b/sys/src/9/pc64/mmu.c
@@ -485,15 +485,13 @@ kmap(Page *page)
return (KMap*)KADDR(pa);
x = splhi();
- va = KMAP + ((uintptr)up->kmapindex << PGSHIFT);
+ va = KMAP + (((uintptr)up->kmapindex++ << PGSHIFT) & (KMAPSIZE-1));
pte = mmuwalk(m->pml4, va, 0, 1);
- if(pte == 0 || *pte & PTEVALID)
+ if(pte == 0 || (*pte & PTEVALID) != 0)
panic("kmap: pa=%#p va=%#p", pa, va);
*pte = pa | PTEWRITE|PTEVALID;
- up->kmapindex = (up->kmapindex + 1) % (1<<PTSHIFT);
- if(up->kmapindex == 0)
- mmuflushtlb();
splx(x);
+ invlpg(va);
return (KMap*)va;
}