patch-2.4.25 linux-2.4.25/arch/mips/mm/tlb-sb1.c

Next file: linux-2.4.25/arch/mips/mm/tlbex-mips32.S
Previous file: linux-2.4.25/arch/mips/mm/tlb-r4k.c
Back to the patch index
Back to the overall index

diff -urN linux-2.4.24/arch/mips/mm/tlb-sb1.c linux-2.4.25/arch/mips/mm/tlb-sb1.c
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
  * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 2000, 2001 Broadcom Corporation
+ * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -18,12 +18,15 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
 #include <linux/config.h>
+#include <linux/init.h>
 #include <asm/mmu_context.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
 
 extern char except_vec0_sb1[];
 
+#define UNIQUE_ENTRYHI(idx) (KSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+
 /* Dump the current entry* and pagemask registers */
 static inline void dump_cur_tlb_regs(void)
 {
@@ -35,6 +38,7 @@
 		".set noreorder        \n"
 		".set mips64           \n"
 		".set noat             \n"
+		"     tlbr             \n"
 		"     dmfc0  $1, $10   \n"
 		"     dsrl32 %0, $1, 0 \n"
 		"     sll    %1, $1, 0 \n"
@@ -76,7 +80,6 @@
 	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
 		write_c0_index(entry);
 		printk("\n%02i ", entry);
-		tlb_read();
 		dump_cur_tlb_regs();
 	}
 	printk("\n");
@@ -95,10 +98,13 @@
 	old_ctx = read_c0_entryhi() & ASID_MASK;
 	write_c0_entrylo0(0);
 	write_c0_entrylo1(0);
-	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
-		write_c0_entryhi(KSEG0 + (PAGE_SIZE << 1) * entry);
+
+	entry = read_c0_wired();
+	while (entry < current_cpu_data.tlbsize) {
+		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 		write_c0_index(entry);
 		tlb_write_indexed();
+		entry++;
 	}
 	write_c0_entryhi(old_ctx);
 	local_irq_restore(flags);
@@ -110,7 +116,7 @@
  * Use increments of the maximum page size (16MB), and check for duplicate
  * entries before doing a given write.  Then, when we're safe from collisions
  * with the firmware, go back and give all the entries invalid addresses with
- * the normal flush routine.
+ * the normal flush routine.  Wired entries will be killed as well!
  */
 void sb1_sanitize_tlb(void)
 {
@@ -164,7 +170,7 @@
 				idx = read_c0_index();
 				write_c0_entrylo0(0);
 				write_c0_entrylo1(0);
-				write_c0_entryhi(KSEG0 + (idx << (PAGE_SHIFT+1)));
+				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 				if (idx < 0)
 					continue;
 				tlb_write_indexed();
@@ -196,7 +202,7 @@
 		if (idx < 0)
 			goto finish;
 		/* Make sure all entries differ. */
-		write_c0_entryhi(KSEG0+(idx<<(PAGE_SHIFT+1)));
+		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
 		tlb_write_indexed();
 	finish:
 		write_c0_entryhi(oldpid);
@@ -204,13 +210,39 @@
 	local_irq_restore(flags);
 }
 
+/*
+ * Remove one kernel space TLB entry.  This entry is assumed to be marked
+ * global so we don't do the ASID thing.
+ */
+void local_flush_tlb_one(unsigned long page)
+{
+	unsigned long flags;
+	int oldpid, idx;
+
+	page &= (PAGE_MASK << 1);
+	oldpid = read_c0_entryhi() & ASID_MASK;
+
+	local_irq_save(flags);
+	write_c0_entryhi(page);
+	tlb_probe();
+	idx = read_c0_index();
+	if (idx >= 0) {
+		/* Make sure all entries differ. */
+		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+		write_c0_entrylo0(0);
+		write_c0_entrylo1(0);
+		tlb_write_indexed();
+	}
+
+	write_c0_entryhi(oldpid);
+	local_irq_restore(flags);
+}
 
 /* All entries common to a mm share an asid.  To effectively flush
    these entries, we just bump the asid. */
 void local_flush_tlb_mm(struct mm_struct *mm)
 {
 	int cpu = smp_processor_id();
-
 	if (cpu_context(cpu, mm) != 0) {
 		drop_mmu_context(mm, cpu);
 	}
@@ -252,6 +284,34 @@
 	local_irq_restore(flags);
 }
 
+void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
+	unsigned long entryhi, unsigned long pagemask)
+{
+	unsigned long flags;
+	unsigned long wired;
+	unsigned long old_pagemask;
+	unsigned long old_ctx;
+
+	local_irq_save(flags);
+	old_ctx = read_c0_entryhi() & 0xff;
+	old_pagemask = read_c0_pagemask();
+	wired = read_c0_wired();
+	write_c0_wired(wired + 1);
+	write_c0_index(wired);
+
+	write_c0_pagemask(pagemask);
+	write_c0_entryhi(entryhi);
+	write_c0_entrylo0(entrylo0);
+	write_c0_entrylo1(entrylo1);
+	tlb_write_indexed();
+
+	write_c0_entryhi(old_ctx);
+	write_c0_pagemask(old_pagemask);
+
+	local_flush_tlb_all();
+	local_irq_restore(flags);
+}
+
 /*
  * This is called from loadmmu.c.  We have to set up all the
  * memory management function pointers, as well as initialize
@@ -259,7 +319,8 @@
  */
 void sb1_tlb_init(void)
 {
-	write_c0_pagemask(PM_4K);
+	write_c0_pagemask(PM_DEFAULT_MASK);
+	write_c0_wired(0);
 
 	/*
 	 * We don't know what state the firmware left the TLB's in, so this is

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)