patch-2.1.121 linux/include/asm-alpha/smplock.h
Next file: linux/include/asm-alpha/softirq.h
Previous file: linux/include/asm-alpha/smp.h
Back to the patch index
Back to the overall index
- Lines: 61
- Date:
Sun Sep 6 10:34:33 1998
- Orig file:
v2.1.120/linux/include/asm-alpha/smplock.h
- Orig date:
Mon Aug 3 17:48:27 1998
diff -u --recursive --new-file v2.1.120/linux/include/asm-alpha/smplock.h linux/include/asm-alpha/smplock.h
@@ -3,6 +3,8 @@
*
* Default SMP lock implementation
*/
+
+#include <linux/sched.h>
#include <linux/interrupt.h>
#include <asm/spinlock.h>
@@ -11,23 +13,22 @@
/*
* Release global kernel lock and global interrupt lock
*/
-#define release_kernel_lock(task, cpu) \
-do { \
- if (task->lock_depth >= 0) \
- spin_unlock(&kernel_flag); \
- release_irqlock(cpu); \
- __sti(); \
-} while (0)
+static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
+{
+ if (task->lock_depth >= 0)
+ spin_unlock(&kernel_flag);
+ release_irqlock(cpu);
+ __sti();
+}
/*
* Re-acquire the kernel lock
*/
-#define reacquire_kernel_lock(task) \
-do { \
- if (task->lock_depth >= 0) \
- spin_lock(&kernel_flag); \
-} while (0)
-
+static __inline__ void reacquire_kernel_lock(struct task_struct *task)
+{
+ if (task->lock_depth >= 0)
+ spin_lock(&kernel_flag);
+}
/*
* Getting the big kernel lock.
@@ -36,13 +37,13 @@
* so we only need to worry about other
* CPU's.
*/
-extern __inline__ void lock_kernel(void)
+static __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
-extern __inline__ void unlock_kernel(void)
+static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov