summaryrefslogtreecommitdiff
path: root/sys/src/9/bcm64/mmu.c
diff options
context:
space:
mode:
authorcinap_lenrek <cinap_lenrek@felloff.net>2019-09-14 14:02:34 +0200
committercinap_lenrek <cinap_lenrek@felloff.net>2019-09-14 14:02:34 +0200
commitacab8881bc2c0befcda62c2e6cc479cd7529dbe3 (patch)
tree8e5013e2fd5890fd63544f422b8f05fe0bbfd0f3 /sys/src/9/bcm64/mmu.c
parentca2f1c07f2a6bb48582b990868976b45d3663605 (diff)
bcm64: enter page tables in mmutop *AFTER* switching asid in mmuswitch()
there was a small window between modifying mmutop and switching the asid where the core could bring in the new entries under the old asid into the tlb due to speculation / prefetching. this change moves the entering of the page tables into mmutop after setttbr() to prevent this scenario. due to us switching to the resereved asid 0 on procsave()->putasid(), the only asid that could have potentially been poisoned would be asid 0 which does not have any user mappings. so this did not show any noticable effect.
Diffstat (limited to 'sys/src/9/bcm64/mmu.c')
-rw-r--r--sys/src/9/bcm64/mmu.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/sys/src/9/bcm64/mmu.c b/sys/src/9/bcm64/mmu.c
index ca8186322..e53270af9 100644
--- a/sys/src/9/bcm64/mmu.c
+++ b/sys/src/9/bcm64/mmu.c
@@ -499,15 +499,15 @@ mmuswitch(Proc *p)
p->newtlb = 0;
}
- for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
- va = t->va;
- m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
- }
-
if(allocasid(p))
flushasid((uvlong)p->asid<<48);
setttbr((uvlong)p->asid<<48 | PADDR(m->mmutop));
+
+ for(t = p->mmuhead[PTLEVELS-1]; t != nil; t = t->next){
+ va = t->va;
+ m->mmutop[PTLX(va, PTLEVELS-1)] = t->pa | PTEVALID | PTETABLE;
+ }
}
void