diff -purN -X /home/mbligh/.diff.exclude 340-schedstat/kernel/sysctl.c 350-autoswap/kernel/sysctl.c
--- 340-schedstat/kernel/sysctl.c	2003-12-02 14:54:18.000000000 -0800
+++ 350-autoswap/kernel/sysctl.c	2003-12-02 14:58:13.000000000 -0800
@@ -682,11 +682,8 @@ static ctl_table vm_table[] = {
 		.procname	= "swappiness",
 		.data		= &vm_swappiness,
 		.maxlen		= sizeof(vm_swappiness),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec_minmax,
-		.strategy	= &sysctl_intvec,
-		.extra1		= &zero,
-		.extra2		= &one_hundred,
+		.mode		= 0444 /* read-only*/,
+		.proc_handler	= &proc_dointvec,
 	},
 #ifdef CONFIG_HUGETLB_PAGE
 	 {
diff -purN -X /home/mbligh/.diff.exclude 340-schedstat/mm/vmscan.c 350-autoswap/mm/vmscan.c
--- 340-schedstat/mm/vmscan.c	2003-12-02 14:57:40.000000000 -0800
+++ 350-autoswap/mm/vmscan.c	2003-12-02 14:58:13.000000000 -0800
@@ -47,7 +47,7 @@
 /*
  * From 0 .. 100.  Higher means more swappy.
  */
-int vm_swappiness = 60;
+int vm_swappiness = 0;
 static long total_memory;
 
 #ifdef ARCH_HAS_PREFETCH
@@ -600,6 +600,7 @@ refill_inactive_zone(struct zone *zone, 
 	LIST_HEAD(l_active);	/* Pages to go onto the active_list */
 	struct page *page;
 	struct pagevec pvec;
+	struct sysinfo i;
 	int reclaim_mapped = 0;
 	long mapped_ratio;
 	long distress;
@@ -642,6 +643,14 @@ refill_inactive_zone(struct zone *zone, 
 	mapped_ratio = (ps->nr_mapped * 100) / total_memory;
 
 	/*
+	 * Autoregulate vm_swappiness to be equal to the percentage of
+	 * pages in physical ram that are application pages. -ck
+	 */
+	si_meminfo(&i);
+	vm_swappiness = 100 - (((i.freeram + get_page_cache_size() -
+		swapper_space.nrpages) * 100) / i.totalram);
+
+	/*
 	 * Now decide how much we really want to unmap some pages.  The mapped
 	 * ratio is downgraded - just because there's a lot of mapped memory
 	 * doesn't necessarily mean that page reclaim isn't succeeding.