## diffname mtx/mmu.c 2001/1122
## diff -e /dev/null /n/emeliedump/2001/1122/sys/src/9/mtx/mmu.c
0a
#include "u.h"
#include "../port/lib.h"
#include "mem.h"
#include "dat.h"
#include "fns.h"
#include "io.h"
#define TLBINVLAID KZERO
void
mmuinit(void)
{
int i;
print("mmuinit\n");
for(i=0; i<STLBSIZE; i++)
m->stb[i].virt = TLBINVLAID;
}
void
flushmmu(void)
{
int x;
if(0)print("flushmmu(%ld)\n", up->pid);
x = splhi();
up->newtlb = 1;
mmuswitch(up);
splx(x);
}
/*
* called with splhi
*/
void
mmuswitch(Proc *p)
{
int tp;
if(0)print("mmuswitch()\n");
if(p->newtlb) {
memset(p->pidonmach, 0, sizeof p->pidonmach);
p->newtlb = 0;
}
tp = p->pidonmach[m->machno];
putcasid(tp);
}
void
mmurelease(Proc* p)
{
if(0)print("mmurelease(%ld)\n", p->pid);
memset(p->pidonmach, 0, sizeof p->pidonmach);
}
void
purgetlb(int pid)
{
int i, mno;
Proc *sp, **pidproc;
Softtlb *entry, *etab;
if(0)print("purgetlb: pid = %d\n", pid);
m->tlbpurge++;
/*
* find all pid entries that are no longer used by processes
*/
mno = m->machno;
pidproc = m->pidproc;
for(i=1; i<NTLBPID; i++) {
sp = pidproc[i];
if(sp && sp->pidonmach[mno] != i)
pidproc[i] = 0;
}
/*
* shoot down the one we want
*/
sp = pidproc[pid];
if(sp != 0)
sp->pidonmach[mno] = 0;
pidproc[pid] = 0;
/*
* clean out all dead pids from the stlb;
*/
entry = m->stb;
for(etab = &entry[STLBSIZE]; entry < etab; entry++)
if(pidproc[TLBPID(entry->virt)] == 0)
entry->virt = TLBINVLAID;
/*
* clean up the hardware
*/
tlbflushall();
}
int
newtlbpid(Proc *p)
{
int i, s;
Proc **h;
i = m->lastpid;
h = m->pidproc;
for(s = 0; s < NTLBPID; s++) {
i++;
if(i >= NTLBPID)
i = 1;
if(h[i] == 0)
break;
}
if(h[i]) {
i = m->purgepid+1;
if(i >= NTLBPID)
i = 1;
m->purgepid = i;
purgetlb(i);
}
if(h[i] != 0)
panic("newtlb");
m->pidproc[i] = p;
p->pidonmach[m->machno] = i;
m->lastpid = i;
if(0)print("newtlbpid: pid=%ld = tlbpid = %d\n", p->pid, i);
return i;
}
void
putmmu(ulong va, ulong pa, Page *pg)
{
char *ctl;
int tp;
ulong h;
qlock(&m->stlblk);
tp = up->pidonmach[m->machno];
if(tp == 0) {
tp = newtlbpid(up);
putcasid(tp);
}
h = ((va>>12)^(va>>24)^(tp<<8)) & 0xfff;
m->stb[h].virt = va|tp;
m->stb[h].phys = pa;
tlbflush(va);
qunlock(&m->stlblk);
ctl = &pg->cachectl[m->machno];
if(0)print("putmmu tp=%d h=%ld va=%lux pa=%lux ctl=%x\n", tp, h,va, pa, *ctl);
switch(*ctl) {
default:
panic("putmmu: %d\n", *ctl);
break;
case PG_NOFLUSH:
break;
case PG_TXTFLUSH:
dcflush((void*)pg->va, BY2PG);
icflush((void*)pg->va, BY2PG);
*ctl = PG_NOFLUSH;
break;
case PG_NEWCOL:
print("PG_NEWCOL!!\n");
*ctl = PG_NOFLUSH;
break;
}
}
.
## diffname mtx/mmu.c 2001/1207
## diff -e /n/emeliedump/2001/1122/sys/src/9/mtx/mmu.c /n/emeliedump/2001/1207/sys/src/9/mtx/mmu.c
135,171d
57,132d
46c
// putcasid(tp);
.
16,17d
## diffname mtx/mmu.c 2001/1212
## diff -e /n/emeliedump/2001/1207/sys/src/9/mtx/mmu.c /n/emeliedump/2001/1212/sys/src/9/mtx/mmu.c
8,9d
## diffname mtx/mmu.c 2001/1219
## diff -e /n/emeliedump/2001/1212/sys/src/9/mtx/mmu.c /n/emeliedump/2001/1219/sys/src/9/mtx/mmu.c
48d
36d
21d
11,13d
## diffname mtx/mmu.c 2001/1222
## diff -e /n/emeliedump/2001/1219/sys/src/9/mtx/mmu.c /n/emeliedump/2001/1222/sys/src/9/mtx/mmu.c
48a
}
int
newmmupid(void)
{
return -1;
.
43c
p->mmupid = 0;
.
36,37c
mp = p->mmupid;
if(mp == 0)
mp = newmmupid();
// for(i = 0; i < 8; i++)
// putsr(i,
.
33c
p->mmupid = 0;
.
30c
int mp;
.
10a
int lhash, mem;
extern ulong memsize; /* passed in from ROM monitor */
/* heuristically size the hash table */
lhash = 10; /* log of hash table size */
mem = (1<<23);
while(mem < memsize) {
lhash++;
mem <<= 1;
}
ptabsize = (1<<(lhash+6));
ptab = xspanalloc(ptabsize, 0, ptabsize);
putsdr1(PADDR(ptab) | ((1<<(lhash-10))-1));
.
7a
void *ptab;
ulong ptabsize;
.
## diffname mtx/mmu.c 2002/0104
## diff -e /n/emeliedump/2001/1222/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0104/sys/src/9/mtx/mmu.c
25,27c
ptab.size = (1<<(lhash+6));
ptab.base = xspanalloc(ptab.size, 0, ptab.size);
putsdr1(PADDR(ptab.base) | ((1<<(lhash-10))-1));
.
10a
static struct {
Lock;
void *base; /* start of page table in kernel virtual space */
ulong size; /* number of bytes in page table */
int slotgen; /* used to choose which pte to alocate in pteg */
} ptab;
.
8,9c
/*
* The page table is shared across all processes and processors
* (hence needs to be locked for updates on a multiprocessor).
* Different processes are distinguished via the VSID field in
* the segment registers. As flushing the entire page table is an
* expensive operation, we implement an aging algorithm for
* mmu pids, with a background kproc to purge stale pids en mass.
*/
.
## diffname mtx/mmu.c 2002/0108
## diff -e /n/emeliedump/2002/0104/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0108/sys/src/9/mtx/mmu.c
88c
int pid;
lock(&ptab);
pid = ptab.pidgen++;
unlock(&ptab);
if(pid > PIDMAX)
panic("newmmupid");
up->mmupid = pid;
return pid;
.
82a
int mp;
ulong *p, *ep, *q, pteg;
ulong vsid, ptehi, x, hash;
mp = up->mmupid;
if(mp == 0)
panic("putmmu pid");
vsid = VSID(mp, va>>28);
hash = (vsid ^ (va>>12)&0xffff) & ptab.mask;
ptehi = BIT(0)|(vsid<<7)|((va>>22)&0x3f);
pteg = ptab.base + 64*hash;
p = (ulong*)pteg;
ep = (ulong*)(pteg+64);
q = nil;
lock(&ptab);
tlbflush(va);
while(p < ep) {
x = p[0];
if(x == ptehi) {
q = p;
if(q[1] == pa) panic("putmmu already set pte");
break;
}
if(q == nil && (x & BIT(0)) == 0)
q = p;
p += 2;
}
if(q == nil) {
q = (ulong*)(pteg+ptab.slotgen);
ptab.slotgen = (ptab.slotgen + 8) & 0x3f;
}
q[0] = ptehi;
q[1] = pa;
sync();
unlock(&ptab);
.
81c
putmmu(ulong va, ulong pa, Page*)
.
70,71c
for(i = 0; i < 8; i++)
putsr(i<<28, VSID(mp, i)|BIT(1)|BIT(2));
.
60c
int i, mp;
.
40a
ptab.pidgen = PIDBASE;
ptab.mask = (1<<lhash)-1;
.
39c
ptab.base = (ulong)xspanalloc(ptab.size, 0, ptab.size);
.
23a
/*
* VSID is 24 bits. 3 are required to distinguish segments in user
* space (kernel space only uses the BATs).
*/
#define VSID(pid, i) (((pid)<<3)|i)
enum {
PIDBASE = 1,
PIDMAX = ((1<<21)-1),
};
.
21c
ulong mask; /* hash mask */
int slotgen; /* next pte (byte offset) when pteg is full */
int pidgen; /* next mmu pid to use */
.
19c
ulong base; /* start of page table in kernel virtual space */
.
## diffname mtx/mmu.c 2002/0112
## diff -e /n/emeliedump/2002/0108/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0112/sys/src/9/mtx/mmu.c
130c
ptab.slotgen = (ptab.slotgen + BY2PTE) & (BY2PTEG-1);
.
113c
ep = (ulong*)(pteg+BY2PTEG);
.
111c
pteg = ptab.base + BY2PTEG*hash;
.
109c
ptehi = PTE0(1, vsid, 0, va);
.
## diffname mtx/mmu.c 2002/0116
## diff -e /n/emeliedump/2002/0112/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0116/sys/src/9/mtx/mmu.c
135a
ctl = &pg->cachectl[m->machno];
switch(*ctl) {
case PG_NEWCOL:
default:
panic("putmmu: %d\n", *ctl);
break;
case PG_NOFLUSH:
break;
case PG_TXTFLUSH:
dcflush((void*)pg->va, BY2PG);
icflush((void*)pg->va, BY2PG);
*ctl = PG_NOFLUSH;
break;
}
.
99a
char *ctl;
.
97c
putmmu(ulong va, ulong pa, Page *pg)
.
## diffname mtx/mmu.c 2002/0124
## diff -e /n/emeliedump/2002/0116/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0124/sys/src/9/mtx/mmu.c
163c
pid = PIDBASE;
ptab.pidlock = 0;
coherence();
.
161d
159c
/* can't use lock() here as we're called from within sched() */
while(!tas(&ptab.pidlock))
;
.
136c
iunlock(&ptab);
.
122c
if(q[1] == pa) print("putmmu already set pte\n");
.
116c
ilock(&ptab);
.
77a
if(p->kp) {
for(i = 0; i < 8; i++)
putsr(i<<28, 0);
return;
}
.
22a
int pidlock;
.
## diffname mtx/mmu.c 2002/0126
## diff -e /n/emeliedump/2002/0124/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0126/sys/src/9/mtx/mmu.c
167,174c
pid = m->mmupid++;
if(m->mmupid > PIDMAX)
panic("ran out of mmu pids");
// m->mmupid = PIDBASE;
.
144d
138,139c
q = (ulong*)(pteg+m->slotgen);
m->slotgen = (m->slotgen + BY2PTE) & (BY2PTEG-1);
.
124d
120c
pteg = m->ptabbase + BY2PTEG*hash;
.
117c
hash = (vsid ^ (va>>12)&0xffff) & ptabmask;
.
54,58c
m->ptabbase = (ulong)xspanalloc(ptabsize, 0, ptabsize);
putsdr1(PADDR(m->ptabbase) | (ptabmask>>10));
m->mmupid = PIDBASE;
v = getdec();
memset((void*)m->ptabbase, 0, ptabsize);
v -= getdec();
print("memset took %lud cycles, dechz %lud\n", v, m->dechz);
.
46,51c
if(ptabsize == 0) {
/* heuristically size the hash table */
lhash = 10;
mem = (1<<23);
while(mem < memsize) {
lhash++;
mem <<= 1;
}
ptabsize = (1<<(lhash+6));
ptabmask = (1<<lhash)-1;
.
44a
ulong v;
.
24,27d
17,22c
static ulong ptabsize; /* number of bytes in page table */
static ulong ptabmask; /* hash mask */
.
## diffname mtx/mmu.c 2002/0201
## diff -e /n/emeliedump/2002/0126/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0201/sys/src/9/mtx/mmu.c
54,58d
37d
## diffname mtx/mmu.c 2002/0212
## diff -e /n/emeliedump/2002/0201/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0212/sys/src/9/mtx/mmu.c
152,164d
55a
mmusweep(void*)
{
Proc *p;
int i, x, sweepcolor;
ulong *ptab, *ptabend, ptecol;
for(;;) {
if(PIDCOLOR(m->mmupid) != m->trigcolor)
sleep(&m->sweepr, work, nil);
sweepcolor = m->sweepcolor;
//print("sweep %d trig %d\n", sweepcolor, m->trigcolor);
x = splhi();
p = proctab(0);
for(i = 0; i < conf.nproc; i++, p++)
if(PIDCOLOR(p->mmupid) == sweepcolor)
p->mmupid = 0;
splx(x);
ptab = (ulong*)m->ptabbase;
ptabend = (ulong*)(m->ptabbase+ptabsize);
ptecol = PTECOL(sweepcolor);
while(ptab < ptabend) {
if((*ptab & PTECOL(3)) == ptecol)
*ptab = 0;
ptab += 2;
}
//print("swept %d\n", sweepcolor);
m->sweepcolor = (sweepcolor+1) & COLMASK;
m->trigcolor = (m->trigcolor+1) & COLMASK;
}
}
int
newmmupid(void)
{
int pid, newcolor;
pid = m->mmupid++;
if(m->mmupid > PIDMAX)
m->mmupid = PIDBASE;
newcolor = PIDCOLOR(m->mmupid);
if(newcolor != PIDCOLOR(pid)) {
if(newcolor == m->sweepcolor)
panic("ran out of pids");
else if(newcolor == m->trigcolor)
wakeup(&m->sweepr);
}
up->mmupid = pid;
return pid;
}
void
.
54a
static int
work(void*)
{
return PIDCOLOR(m->mmupid) == m->trigcolor;
}
.
52a
m->sweepcolor = 0;
m->trigcolor = 2;
.
31a
#define VSID(pid, i) (((pid)<<3)|i)
#define PIDCOLOR(pid) ((pid)>>(PIDBITS-COLBITS))
#define PTECOL(color) PTE0(1, VSID(((color)<<(PIDBITS-COLBITS)), 0), 0, 0)
.
29c
PIDBITS = 21,
COLBITS = 2,
PIDMAX = ((1<<PIDBITS)-1),
COLMASK = ((1<<COLBITS)-1),
.
25,26d
22c
* space (kernel space only uses the BATs). pid 0 is reserved.
* The top 2 bits of the pid are used as a `color' for the background
* pid reclaimation algorithm.
.
## diffname mtx/mmu.c 2002/0213
## diff -e /n/emeliedump/2002/0212/sys/src/9/mtx/mmu.c /n/emeliedump/2002/0213/sys/src/9/mtx/mmu.c
192d
175,177c
/*
* If mmupid is 0, mmuswitch/newmmupid was unable to assign us
* a pid, hence we faulted. Keep calling sched() until the mmusweep
* proc catches up, and we are able to get a pid.
*/
while((mp = up->mmupid) == 0)
sched();
.
115,116c
if(newcolor == m->sweepcolor) {
/* desperation time. can't block here. punt to fault/putmmu */
print("newmmupid: %uld: no free mmu pids\n", up->pid);
if(m->mmupid == PIDBASE)
m->mmupid = PIDMAX;
else
m->mmupid--;
pid = 0;
}
.
98c
tlbflushall();
.
82d
61c
m->trigcolor = COLMASK;
.
14a
*
* This needs modifications to run on a multiprocessor.
.
9,10c
* We have one page table per processor.
*
.
|