[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

UVM (Re: MNN)



<199805300634.PAA19219@ruri.iri.co.jp>の記事において
1998年05月30日15時34分頃、tsubai@iri.co.jpさんは書きました。

 > じゃあ次は UVM ですね。:-)

ちゅーことでUVM対応にしてみました。

#include "opt_uvm.h" を該当ファイルに追加。

kmem_alloc_wait		uvm_km_valloc_wait
kmem_free_wakeup	uvm_km_free_wakeup
vmspace_free		uvmspace_free
cnt			uvmexp
vm_fault		uvm_fault   (arg 3<->4)
vm_page_physload	uvm_page_physload
kmem_alloc		uvm_km_alloc
			uvm_km_zalloc (zeros memory)
kmem_free		uvm_km_free
kmem_alloc_pageable	uvm_km_valloc
vm_page_alloc		uvm_pagealloc (add arg 3)
vm_page_free		uvm_pagefree
vm_page_alloc_memory	uvm_pglistalloc
vm_page_free_memory	uvm_pglistfree
vm_set_page_size	uvm_setpagesize
kmem_suballoc		uvm_map (add arg 6,7)
と変更。

kcopy 追加。


 > 基本的には vm_... のいくつかを対応する uvm_... に書き換えるだけ
 > なんですが、一発でうごかなかったら楽しいデバッグが待ってます。
 > 
 > UVM って MD 側での整合性チェック(のようなもの)が MACH VM にくらべ
 > て*とても*甘くなっているので、pmap.c が変なことしてるとなかなか
 > うごいてくれないんです。

多分動いてます。
とりあえず30分くらいはpanicせずに動いています > bebox :-)

以下はpowerpc, beboxへのpatchです。
本当はcurrentからのpatchにしたかったのですが、
会社で動かしているNetBSD/i386が死んでしまった :-( ようなので、
tsubaiさんのMNN patchがあたった後のdiffになっています。

足りない部分や間違いがあったら教えてください。
uvm関係のheaderのincludeをまったくしていないのと、
kcopyの所がちと不安。

# 速くなったようななっていないような...

sakamoto@cec.co.jp

===========POWERPC
=====./powerpc/pmap.c
*** ../powerpc.org/./powerpc/pmap.c	Thu May 28 22:21:23 1998
--- ./powerpc/pmap.c	Sun May 31 03:06:13 1998
***************
*** 30,35 ****
--- 30,37 ----
   * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
+ #include "opt_uvm.h"
+ 
  #include <sys/param.h>
  #include <sys/malloc.h>
  #include <sys/proc.h>
***************
*** 492,500 ****
--- 494,507 ----
  
  #if defined(MACHINE_NEW_NONCONTIG)
  	for (mp = avail; mp->size; mp++)
+ #if defined(UVM)
+ 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
+ 			atop(mp->start), atop(mp->start + mp->size));
+ #else
  		vm_page_physload(atop(mp->start), atop(mp->start + mp->size),
  			atop(mp->start), atop(mp->start + mp->size));
  #endif
+ #endif
  
  	/*
  	 * Initialize kernel pmap and hardware.
***************
*** 560,566 ****
--- 567,579 ----
  
  	sz = (vm_size_t)((sizeof(struct pv_entry) + 1) * npgs);
  	sz = round_page(sz);
+ #if defined(UVM)
+ 	/* XXXCDC: ABSOLUTELY WRONG!   uvm_km_alloc() _CAN_
+ 		return 0 if out of VM */
+ 	addr = (vm_offset_t)uvm_km_alloc(kernel_map, sz);
+ #else
  	addr = (vm_offset_t)kmem_alloc(kernel_map, sz);
+ #endif
  	s = splimp();
  	pv = pv_table = (struct pv_entry *)addr;
  	for (i = npgs; --i >= 0;)
***************
*** 816,823 ****
--- 829,841 ----
  	int i;
  	
  	if (pv_nfree == 0) {
+ #if defined(UVM)
+ 		if (!(pvp = (struct pv_page *)uvm_km_alloc(kernel_map, NBPG)))
+ 			panic("pmap_alloc_pv: uvm_km_alloc() failed");
+ #else
  		if (!(pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG)))
  			panic("pmap_alloc_pv: kmem_alloc() failed");
+ #endif
  		pv_pcnt++;
  		pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
  		for (i = NPVPPG - 2; --i >= 0; pv++)
***************
*** 856,862 ****
--- 874,884 ----
  		pv_nfree -= NPVPPG - 1;
  		pv_pcnt--;
  		LIST_REMOVE(pvp, pvp_pgi.pgi_list);
+ #if defined(UVM)
+ 		uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+ #else
  		kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+ #endif
  		break;
  	}
  }
***************
*** 881,887 ****
--- 903,913 ----
  		 * Since we cannot use maps for potable allocation,
  		 * we have to steal some memory from the VM system.			XXX
  		 */
+ #if defined(UVM)
+ 		mem = uvm_pagealloc(NULL, NULL, NULL);
+ #else
  		mem = vm_page_alloc(NULL, NULL);
+ #endif
  		po_pcnt++;
  		pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
  		pop->pop_pgi.pgi_page = mem;
***************
*** 917,923 ****
--- 943,953 ----
  		po_nfree -= NPOPPG - 1;
  		po_pcnt--;
  		LIST_REMOVE(pop, pop_pgi.pgi_list);
+ #if defined(UVM)
+ 		uvm_pagefree(pop->pop_pgi.pgi_page);
+ #else
  		vm_page_free(pop->pop_pgi.pgi_page);
+ #endif
  		return;
  	case 1:
  		LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
=====./powerpc/trap.c
*** ../powerpc.org/./powerpc/trap.c	Thu May 28 17:18:23 1998
--- ./powerpc/trap.c	Sun May 31 02:52:02 1998
***************
*** 30,35 ****
--- 30,37 ----
   * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
+ #include "opt_uvm.h"
+ 
  #include <sys/param.h>
  #include <sys/proc.h>
  #include <sys/reboot.h>
***************
*** 95,103 ****
--- 97,111 ----
  				ftype = VM_PROT_READ | VM_PROT_WRITE;
  			else
  				ftype = VM_PROT_READ;
+ #if defined(UVM)
+ 			if (uvm_fault(map, trunc_page(va), FALSE, ftype)
+ 			    == KERN_SUCCESS)
+ 				break;
+ #else
  			if (vm_fault(map, trunc_page(va), ftype, FALSE)
  			    == KERN_SUCCESS)
  				break;
+ #endif
  			if (fb = p->p_addr->u_pcb.pcb_onfault) {
  				frame->srr0 = (*fb)[0];
  				frame->fixreg[1] = (*fb)[1];
***************
*** 116,125 ****
--- 124,140 ----
  				ftype = VM_PROT_READ | VM_PROT_WRITE;
  			else
  				ftype = VM_PROT_READ;
+ #if defined(UVM)
+ 			if (uvm_fault(&p->p_vmspace->vm_map,
+ 				     trunc_page(frame->dar), FALSE, ftype)
+ 			    == KERN_SUCCESS)
+ 				break;
+ #else
  			if (vm_fault(&p->p_vmspace->vm_map,
  				     trunc_page(frame->dar), ftype, FALSE)
  			    == KERN_SUCCESS)
  				break;
+ #endif
  		}
  		trapsignal(p, SIGSEGV, EXC_DSI);
  		break;
***************
*** 128,137 ****
--- 143,159 ----
  			int ftype;
  			
  			ftype = VM_PROT_READ | VM_PROT_EXECUTE;
+ #if defined(UVM)
+ 			if (uvm_fault(&p->p_vmspace->vm_map,
+ 				     trunc_page(frame->srr0), FALSE, ftype)
+ 			    == KERN_SUCCESS)
+ 				break;
+ #else
  			if (vm_fault(&p->p_vmspace->vm_map,
  				     trunc_page(frame->srr0), ftype, FALSE)
  			    == KERN_SUCCESS)
  				break;
+ #endif
  		}
  		trapsignal(p, SIGSEGV, EXC_ISI);
  		break;
***************
*** 144,150 ****
--- 166,176 ----
  			int nsys, n;
  			register_t args[10];
  			
+ #if defined(UVM)
+ 			uvmexp.syscalls++;
+ #else
  			cnt.v_syscall++;
+ #endif
  			
  			nsys = p->p_emul->e_nsysent;
  			callp = p->p_emul->e_sysent;
***************
*** 271,277 ****
--- 297,307 ----
  
  	astpending = 0;		/* we are about to do it */
  
+ #if defined(UVM)
+ 	uvmexp.softs++;
+ #else
  	cnt.v_soft++;
+ #endif
  
  	if (p->p_flag & P_OWEUPC) {
  		p->p_flag &= ~P_OWEUPC;
***************
*** 401,405 ****
--- 431,461 ----
  		len -= l;
  	}
  	curpcb->pcb_onfault = 0;
+ 	return 0;
+ }
+ 
+ /*
+  * kcopy(const void *src, void *dst, size_t len);
+  *
+  * Copy len bytes from src to dst, aborting if we encounter a fatal
+  * page fault.
+  *
+  * kcopy() _must_ save and restore the old fault handler since it is
+  * called by uiomove(), which may be in the path of servicing a non-fatal
+  * page fault.
+  */
+ int
+ kcopy(src, dst, len)
+ 	const void *src;
+ 	void *dst;
+ 	size_t len;
+ {
+ 	void *p;
+ 	size_t l;
+ 	faultbuf env;
+ 
+ 	if (setfault(env))
+ 		return EFAULT;
+ 	bcopy(src, dst, len);
  	return 0;
  }
=====./powerpc/vm_machdep.c
*** ../powerpc.org/./powerpc/vm_machdep.c	Thu May 28 17:18:25 1998
--- ./powerpc/vm_machdep.c	Sun May 31 00:39:49 1998
***************
*** 30,35 ****
--- 30,37 ----
   * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
+ #include "opt_uvm.h"
+ 
  #include <sys/param.h>
  #include <sys/core.h>
  #include <sys/exec.h>
***************
*** 159,165 ****
--- 161,171 ----
  	if (p == fpuproc)	/* release the fpu */
  		fpuproc = 0;
  	
+ #if defined(UVM)
+ 	uvmspace_free(p->p_vmspace);
+ #else
  	vmspace_free(p->p_vmspace);
+ #endif
  	switchexit(kernel_map, p->p_addr, USPACE);
  }
  
***************
*** 221,227 ****
--- 227,237 ----
  	faddr = trunc_page(bp->b_saveaddr = bp->b_data);
  	off = (vm_offset_t)bp->b_data - faddr;
  	len = round_page(off + len);
+ #if defined(UVM)
+ 	taddr = uvm_km_valloc_wait(phys_map, len);
+ #else
  	taddr = kmem_alloc_wait(phys_map, len);
+ #endif
  	bp->b_data = (caddr_t)(taddr + off);
  	for (; len > 0; len -= NBPG) {
  		pa = pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), faddr);
***************
*** 249,255 ****
--- 259,269 ----
  	addr = trunc_page(bp->b_data);
  	off = (vm_offset_t)bp->b_data - addr;
  	len = round_page(off + len);
+ #if defined(UVM)
+ 	uvm_km_free_wakeup(phys_map, addr, len);
+ #else
  	kmem_free_wakeup(phys_map, addr, len);
+ #endif
  	bp->b_data = bp->b_saveaddr;
  	bp->b_saveaddr = 0;
  }

===========BEBOX
=====./bebox/machdep.c
*** ../bebox.org/./bebox/machdep.c	Thu May 28 17:42:05 1998
--- ./bebox/machdep.c	Sun May 31 01:53:52 1998
***************
*** 30,35 ****
--- 30,36 ----
   * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
+ #include "opt_uvm.h"
  #include "ipkdb.h"
  
  #include <sys/param.h>
***************
*** 97,102 ****
--- 98,109 ----
  /*
   * Global variables used here and there
   */
+ #if defined(UVM)
+ vm_map_t exec_map = NULL;
+ vm_map_t mb_map = NULL;
+ vm_map_t phys_map = NULL;
+ #endif
+ 
  char bootinfo[BOOTINFO_MAXSIZE];
  
  char machine[] = MACHINE;		/* machine */
***************
*** 317,323 ****
--- 324,334 ----
          /*
  	 * Set the page size.
  	 */
+ #if defined(UVM)
+ 	uvm_setpagesize();
+ #else
  	vm_set_page_size();
+ #endif
  
  	/*
  	 * Initialize pmap module.
***************
*** 435,450 ****
--- 446,471 ----
  	/*
  	 * BeBox Mother Board's Register Mapping
  	 */
+ #if defined(UVM)
+ 	if (!(bebox_mb_reg = uvm_km_alloc(kernel_map, round_page(NBPG))))
+ 		panic("initppc: no room for interrupt register");
+ #else
  	if (!(bebox_mb_reg = kmem_alloc(kernel_map, round_page(NBPG))))
  		panic("initppc: no room for interrupt register");
+ #endif
  	pmap_enter(pmap_kernel(), bebox_mb_reg, MOTHER_BOARD_REG,
  		VM_PROT_ALL, TRUE);
  
  	/*
  	 * Initialize error message buffer (at end of core).
  	 */
+ #if defined(UVM)
+ 	if (!(msgbuf_vaddr = uvm_km_alloc(kernel_map, round_page(MSGBUFSIZE))))
+ 		panic("startup: no room for message buffer");
+ #else
  	if (!(msgbuf_vaddr = kmem_alloc(kernel_map, round_page(MSGBUFSIZE))))
  		panic("startup: no room for message buffer");
+ #endif
  	for (i = 0; i < btoc(MSGBUFSIZE); i++)
  		pmap_enter(pmap_kernel(), msgbuf_vaddr + i * NBPG,
  		    msgbuf_paddr + i * NBPG, VM_PROT_ALL, TRUE);
***************
*** 461,468 ****
--- 482,494 ----
  	 * and then give everything true virtual addresses.
  	 */
  	sz = (int)allocsys((caddr_t)0);
+ #if defined(UVM)
+ 	if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(sz))) == 0)
+ 		panic("startup: no room for tables");
+ #else
  	if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(sz))) == 0)
  		panic("startup: no room for tables");
+ #endif
  	if (allocsys(v) - v != sz)
  		panic("startup: table size inconsistency");
  
***************
*** 471,481 ****
--- 497,515 ----
  	 * in that they usually occupy more virtual memory than physical.
  	 */
  	sz = MAXBSIZE * nbuf;
+ #if defined(UVM)
+ 	if (uvm_map(kernel_map, (vm_offset_t *)&buffers, round_page(sz),
+ 		    NULL, UVM_UNKNOWN_OFFSET,
+ 		    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ 				UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
+ 		panic("startup: cannot allocate VM for buffers");
+ #else
  	buffer_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, sz, TRUE);
  	buffers = (char *)minaddr;
  	if (vm_map_find(buffer_map, vm_object_allocate(sz), (vm_offset_t)0,
  			&minaddr, sz, FALSE) != KERN_SUCCESS)
  		panic("startup: cannot allocate buffers");
+ #endif
  	base = bufpages / nbuf;
  	residual = bufpages % nbuf;
  	if (base >= MAXBSIZE) {
***************
*** 484,489 ****
--- 518,548 ----
  		residual = 0;
  	}
  	for (i = 0; i < nbuf; i++) {
+ #if defined(UVM)
+ 		vm_size_t curbufsize;
+ 		vm_offset_t curbuf;
+ 		struct vm_page *pg;
+ 
+ 		/*
+ 		 * Each buffer has MAXBSIZE bytes of VM space allocated.  Of
+ 		 * that MAXBSIZE space, we allocate and map (base+1) pages
+ 		 * for the first "residual" buffers, and then we allocate
+ 		 * "base" pages for the rest.
+ 		 */
+ 		curbuf = (vm_offset_t) buffers + (i * MAXBSIZE);
+ 		curbufsize = CLBYTES * ((i < residual) ? (base+1) : base);
+ 
+ 		while (curbufsize) {
+ 			pg = uvm_pagealloc(NULL, 0, NULL);
+ 			if (pg == NULL)
+ 				panic("startup: not enough memory for "
+ 					"buffer cache");
+ 			pmap_enter(kernel_map->pmap, curbuf,
+ 				   VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE);
+ 			curbuf += PAGE_SIZE;
+ 			curbufsize -= PAGE_SIZE;
+ 		}
+ #else
  		vm_size_t curbufsize;
  		vm_offset_t curbuf;
  		
***************
*** 491,516 ****
--- 550,591 ----
  		curbufsize = CLBYTES * (i < residual ? base + 1 : base);
  		vm_map_pageable(buffer_map, curbuf, curbuf + curbufsize, FALSE);
  		vm_map_simplify(buffer_map, curbuf);
+ #endif
  	}
  
  	/*
  	 * Allocate a submap for exec arguments.  This map effectively
  	 * limits the number of processes exec'ing at any time.
  	 */
+ #if defined(UVM)
+ 	exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 				 16*NCARGS, TRUE, FALSE, NULL);
+ #else
  	exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
  				 16*NCARGS, TRUE);
+ #endif
  
  	/*
  	 * Allocate a submap for physio
  	 */
+ #if defined(UVM)
+ 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
+ 				 VM_PHYS_SIZE, TRUE, FALSE, NULL);
+ #else
  	phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
  				 VM_PHYS_SIZE, TRUE);
+ #endif
  
  	/*
  	 * Finally, allocate mbuf cluster submap.
  	 */
+ #if defined(UVM)
+ 	mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
+ 			       VM_MBUF_SIZE, FALSE, FALSE, NULL);
+ #else
  	mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
  			       VM_MBUF_SIZE, FALSE);
+ #endif
  
  	/*
  	 * Initialize callouts.
***************
*** 519,526 ****
--- 594,606 ----
  	for (i = 1; i < ncallout; i++)
  		callout[i - 1].c_next = &callout[i];
  
+ #if defined(UVM)
+ 	printf("avail memory = %d (%dK bytes)\n",
+ 		ptoa(uvmexp.free), ptoa(uvmexp.free) / 1024);
+ #else
  	printf("avail memory = %d (%dK bytes)\n",
  		ptoa(cnt.v_free_count), ptoa(cnt.v_free_count) / 1024);
+ #endif
  	printf("using %d buffers containing %d bytes of memory\n",
  	       nbuf, bufpages * CLBYTES);
  
***************
*** 587,593 ****
--- 667,675 ----
  		if (nswbuf > 256)
  			nswbuf = 256;
  	}
+ #if !defined(UVM)
  	valloc(swbuf, struct buf, nswbuf);
+ #endif
  	valloc(buf, struct buf, nbuf);
  	
  	return v;
=====./bebox/bus_dma.c
*** ../bebox.org/./bebox/bus_dma.c	Tue Feb 24 19:20:57 1998
--- ./bebox/bus_dma.c	Sun May 31 00:53:39 1998
***************
*** 76,81 ****
--- 76,82 ----
   *
   *	@(#)machdep.c	7.4 (Berkeley) 6/3/91
   */
+ #include "opt_uvm.h"
  
  #include <sys/param.h>
  #include <sys/systm.h>
***************
*** 392,398 ****
--- 393,403 ----
  		}
  	}
  
+ #if defined(UVM)
+ 	uvm_pglistfree(&mlist);
+ #else
  	vm_page_free_memory(&mlist);
+ #endif
  }
  
  /*
***************
*** 414,420 ****
--- 419,429 ----
  
  	size = round_page(size);
  	s = splimp();
+ #if defined(UVM)
+ 	va = uvm_km_valloc(kmem_map, size);
+ #else
  	va = kmem_alloc_pageable(kmem_map, size);
+ #endif
  	splx(s);
  
  	if (va == 0)
***************
*** 462,468 ****
--- 471,481 ----
  
  	size = round_page(size);
  	s = splimp();
+ #if defined(UVM)
+ 	uvm_km_free(kmem_map, (vm_offset_t)kva, size);
+ #else
  	kmem_free(kmem_map, (vm_offset_t)kva, size);
+ #endif
  	splx(s);
  }
  
***************
*** 612,619 ****
--- 625,637 ----
  	 * Allocate pages from the VM system.
  	 */
  	TAILQ_INIT(&mlist);
+ #if defined(UVM)
+ 	error = uvm_pglistalloc(size, low, high,
+ 	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
+ #else
  	error = vm_page_alloc_memory(size, low, high,
  	    alignment, boundary, &mlist, nsegs, (flags & BUS_DMA_NOWAIT) == 0);
+ #endif
  	if (error)
  		return (error);
  
=====./bebox/locore.s
*** ../bebox.org/./bebox/locore.s	Thu Feb 19 13:00:09 1998
--- ./bebox/locore.s	Sun May 31 02:49:13 1998
***************
*** 31,41 ****
   * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
! 
  #include "fs_kernfs.h"
- 
  #include "ipkdb.h"
- 
  #include "assym.h"
  
  #include <sys/syscall.h>
--- 31,39 ----
   * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
! #include "opt_uvm.h"
  #include "fs_kernfs.h"
  #include "ipkdb.h"
  #include "assym.h"
  
  #include <sys/syscall.h>
***************
*** 206,212 ****
--- 204,214 ----
  	stw	6,_C_LABEL(curpcb)@l(7)
  	addi	1,6,USPACE-16		/* 16 bytes are reserved at stack top */
  /* Now free the old user structure (args are already in r3, r4, r5) */
+ #if defined(UVM)
+ 	bl	_C_LABEL(uvm_km_free)
+ #else
  	bl	_C_LABEL(kmem_free)
+ #endif
  /* Fall through to cpu_switch to actually select another proc */
  	li	3,0			/* indicate exited process */
  
=====./conf/GENERIC
*** ../bebox.org/./conf/GENERIC	Fri May 29 23:03:13 1998
--- ./conf/GENERIC	Sun May 31 01:27:23 1998
***************
*** 20,25 ****
--- 20,26 ----
  options 	DDB
  #options 	DDB_HISTORY_SIZE=100	# Enable history editing in DDB
  options 	KTRACE
+ options		UVM			# Use UVM instead of Mach VM.
  
  options 	TCP_COMPAT_42
  options 	COMPAT_43
=====./include/vmparam.h
*** ../bebox.org/./include/vmparam.h	Thu May 28 22:34:21 1998
--- ./include/vmparam.h	Sun May 31 01:49:27 1998
***************
*** 31,36 ****
--- 31,39 ----
   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   */
  
+ #ifndef	_MACHINE_VMPARAM_H_
+ #define	_MACHINE_VMPARAM_H_
+ 
  #define	USRTEXT		CLBYTES
  #define	USRSTACK	VM_MAXUSER_ADDRESS
  
***************
*** 84,89 ****
--- 87,94 ----
  #define	VM_MAXUSER_ADDRESS	((vm_offset_t)0xfffff000)
  #define	VM_MAX_ADDRESS		VM_MAXUSER_ADDRESS
  #define	VM_MIN_KERNEL_ADDRESS	((vm_offset_t)(KERNEL_SR << ADDR_SR_SHFT))
+ #define	VM_MAX_KERNEL_ADDRESS	\
+ 	((vm_offset_t)(KERNEL_SR << ADDR_SR_SHFT + SEGMENT_LENGTH - 1))
  
  #define	VM_KMEM_SIZE		(NKMEMCLUSTERS * CLBYTES)
  #define	VM_MBUF_SIZE		(NMBCLUSTERS * CLBYTES)
***************
*** 99,101 ****
--- 104,108 ----
  #define VM_PHYSSEG_MAX		32
  #define VM_PHYSSEG_STRAT	VM_PSTRAT_BSEARCH
  #define VM_PHYSSEG_NOADD		/* can't add RAM after vm_mem_init */
+ 
+ #endif /* _MACHINE_VMPARAM_H_ */